Skip to content

Commit f4a5ae5

Browse files
committed
runtime: track the number of free unscavenged huge pages
This change tracks the number of potential free and unscavenged huge pages which will be used to inform the rate at which scavenging should occur. For #30333. Change-Id: I47663e5ffb64cac44ffa10db158486783f707479 Reviewed-on: https://go-review.googlesource.com/c/go/+/170860 Run-TryBot: Michael Knyszek <[email protected]> TryBot-Result: Gobot Gobot <[email protected]> Reviewed-by: Austin Clements <[email protected]>
1 parent a62b572 commit f4a5ae5

File tree

4 files changed

+50
-2
lines changed

4 files changed

+50
-2
lines changed

src/runtime/export_test.go

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,8 @@ var Atoi32 = atoi32
3636

3737
var Nanotime = nanotime
3838

39+
var PhysHugePageSize = physHugePageSize
40+
3941
type LFNode struct {
4042
Next uint64
4143
Pushcnt uintptr
@@ -516,6 +518,26 @@ func MapTombstoneCheck(m map[int]int) {
516518
}
517519
}
518520

521+
// UnscavHugePagesSlow returns the value of mheap_.freeHugePages
522+
// and the number of unscavenged huge pages calculated by
523+
// scanning the heap.
524+
func UnscavHugePagesSlow() (uintptr, uintptr) {
525+
var base, slow uintptr
526+
// Run on the system stack to avoid deadlock from stack growth
527+
// trying to acquire the heap lock.
528+
systemstack(func() {
529+
lock(&mheap_.lock)
530+
base = mheap_.free.unscavHugePages
531+
for _, s := range mheap_.allspans {
532+
if s.state == mSpanFree && !s.scavenged {
533+
slow += s.hugePages()
534+
}
535+
}
536+
unlock(&mheap_.lock)
537+
})
538+
return base, slow
539+
}
540+
519541
// Span is a safe wrapper around an mspan, whose memory
520542
// is managed manually.
521543
type Span struct {

src/runtime/gc_test.go

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -470,6 +470,25 @@ func TestReadMemStats(t *testing.T) {
470470
}
471471
}
472472

473+
func TestUnscavHugePages(t *testing.T) {
474+
// Allocate 20 MiB and immediately free it a few times to increase
475+
// the chance that unscavHugePages isn't zero and that some kind of
476+
// accounting had to happen in the runtime.
477+
for j := 0; j < 3; j++ {
478+
var large [][]byte
479+
for i := 0; i < 5; i++ {
480+
large = append(large, make([]byte, runtime.PhysHugePageSize))
481+
}
482+
runtime.KeepAlive(large)
483+
runtime.GC()
484+
}
485+
base, slow := runtime.UnscavHugePagesSlow()
486+
if base != slow {
487+
logDiff(t, "unscavHugePages", reflect.ValueOf(base), reflect.ValueOf(slow))
488+
t.Fatal("unscavHugePages mismatch")
489+
}
490+
}
491+
473492
func logDiff(t *testing.T, prefix string, got, want reflect.Value) {
474493
typ := got.Type()
475494
switch typ.Kind() {

src/runtime/mgclarge.go

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,8 @@ import (
4040

4141
//go:notinheap
4242
type mTreap struct {
43-
treap *treapNode
43+
treap *treapNode
44+
unscavHugePages uintptr // number of unscavenged huge pages in the treap
4445
}
4546

4647
//go:notinheap
@@ -378,6 +379,9 @@ func (root *mTreap) end(mask, match treapIterType) treapIter {
378379

379380
// insert adds span to the large span treap.
380381
func (root *mTreap) insert(span *mspan) {
382+
if !span.scavenged {
383+
root.unscavHugePages += span.hugePages()
384+
}
381385
base := span.base()
382386
var last *treapNode
383387
pt := &root.treap
@@ -435,6 +439,9 @@ func (root *mTreap) insert(span *mspan) {
435439
}
436440

437441
func (root *mTreap) removeNode(t *treapNode) {
442+
if !t.span.scavenged {
443+
root.unscavHugePages -= t.span.hugePages()
444+
}
438445
if t.span.base() != t.key {
439446
throw("span and treap node base addresses do not match")
440447
}

src/runtime/mheap.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ type mheap struct {
5959
// on the swept stack.
6060
sweepSpans [2]gcSweepBuf
6161

62-
// _ uint32 // align uint64 fields on 32-bit for atomics
62+
_ uint32 // align uint64 fields on 32-bit for atomics
6363

6464
// Proportional sweep
6565
//

0 commit comments

Comments
 (0)