Skip to content

Commit 31c4e09

Browse files
committed
runtime: ensure free and unscavenged spans may be backed by huge pages
This change adds a new sysHugePage function to provide the equivalent of Linux's madvise(MADV_HUGEPAGE) support to the runtime. It then uses sysHugePage to mark a newly-coalesced free span as backable by huge pages to make the freeHugePages approximation a bit more accurate. The problem being solved here is that if a large free span is composed of many small spans which were coalesced together, then there's a chance that they have had madvise(MADV_NOHUGEPAGE) called on them at some point, which makes freeHugePages less accurate. For #30333. Change-Id: Idd4b02567619fc8d45647d9abd18da42f96f0522 Reviewed-on: https://go-review.googlesource.com/c/go/+/173338 Run-TryBot: Michael Knyszek <[email protected]> TryBot-Result: Gobot Gobot <[email protected]> Reviewed-by: Austin Clements <[email protected]>
1 parent 5c15ed6 commit 31c4e09

File tree

8 files changed

+44
-9
lines changed

8 files changed

+44
-9
lines changed

src/runtime/mem_aix.go

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,9 @@ func sysUnused(v unsafe.Pointer, n uintptr) {
3535
func sysUsed(v unsafe.Pointer, n uintptr) {
3636
}
3737

38+
func sysHugePage(v unsafe.Pointer, n uintptr) {
39+
}
40+
3841
// Don't split the stack as this function may be invoked without a valid G,
3942
// which prevents us from allocating more stack.
4043
//go:nosplit

src/runtime/mem_bsd.go

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,9 @@ func sysUnused(v unsafe.Pointer, n uintptr) {
2929
func sysUsed(v unsafe.Pointer, n uintptr) {
3030
}
3131

32+
func sysHugePage(v unsafe.Pointer, n uintptr) {
33+
}
34+
3235
// Don't split the stack as this function may be invoked without a valid G,
3336
// which prevents us from allocating more stack.
3437
//go:nosplit

src/runtime/mem_darwin.go

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,9 @@ func sysUsed(v unsafe.Pointer, n uintptr) {
3333
madvise(v, n, _MADV_FREE_REUSE)
3434
}
3535

36+
func sysHugePage(v unsafe.Pointer, n uintptr) {
37+
}
38+
3639
// Don't split the stack as this function may be invoked without a valid G,
3740
// which prevents us from allocating more stack.
3841
//go:nosplit

src/runtime/mem_js.go

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,9 @@ func sysUnused(v unsafe.Pointer, n uintptr) {
2626
func sysUsed(v unsafe.Pointer, n uintptr) {
2727
}
2828

29+
func sysHugePage(v unsafe.Pointer, n uintptr) {
30+
}
31+
2932
// Don't split the stack as this function may be invoked without a valid G,
3033
// which prevents us from allocating more stack.
3134
//go:nosplit

src/runtime/mem_linux.go

Lines changed: 12 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -117,16 +117,19 @@ func sysUnused(v unsafe.Pointer, n uintptr) {
117117
}
118118

119119
func sysUsed(v unsafe.Pointer, n uintptr) {
120-
if physHugePageSize != 0 {
121-
// Partially undo the NOHUGEPAGE marks from sysUnused
122-
// for whole huge pages between v and v+n. This may
123-
// leave huge pages off at the end points v and v+n
124-
// even though allocations may cover these entire huge
125-
// pages. We could detect this and undo NOHUGEPAGE on
126-
// the end points as well, but it's probably not worth
127-
// the cost because when neighboring allocations are
128-
// freed sysUnused will just set NOHUGEPAGE again.
120+
// Partially undo the NOHUGEPAGE marks from sysUnused
121+
// for whole huge pages between v and v+n. This may
122+
// leave huge pages off at the end points v and v+n
123+
// even though allocations may cover these entire huge
124+
// pages. We could detect this and undo NOHUGEPAGE on
125+
// the end points as well, but it's probably not worth
126+
// the cost because when neighboring allocations are
127+
// freed sysUnused will just set NOHUGEPAGE again.
128+
sysHugePage(v, n)
129+
}
129130

131+
func sysHugePage(v unsafe.Pointer, n uintptr) {
132+
if physHugePageSize != 0 {
130133
// Round v up to a huge page boundary.
131134
beg := (uintptr(v) + (physHugePageSize - 1)) &^ (physHugePageSize - 1)
132135
// Round v+n down to a huge page boundary.

src/runtime/mem_plan9.go

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -173,6 +173,9 @@ func sysUnused(v unsafe.Pointer, n uintptr) {
173173
func sysUsed(v unsafe.Pointer, n uintptr) {
174174
}
175175

176+
func sysHugePage(v unsafe.Pointer, n uintptr) {
177+
}
178+
176179
func sysMap(v unsafe.Pointer, n uintptr, sysStat *uint64) {
177180
// sysReserve has already allocated all heap memory,
178181
// but has not adjusted stats.

src/runtime/mem_windows.go

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -81,6 +81,9 @@ func sysUsed(v unsafe.Pointer, n uintptr) {
8181
}
8282
}
8383

84+
func sysHugePage(v unsafe.Pointer, n uintptr) {
85+
}
86+
8487
// Don't split the stack as this function may be invoked without a valid G,
8588
// which prevents us from allocating more stack.
8689
//go:nosplit

src/runtime/mheap.go

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -502,6 +502,8 @@ func (h *mheap) coalesce(s *mspan) {
502502
h.free.insert(other)
503503
}
504504

505+
hpBefore := s.hugePages()
506+
505507
// Coalesce with earlier, later spans.
506508
if before := spanOf(s.base() - 1); before != nil && before.state == mSpanFree {
507509
if s.scavenged == before.scavenged {
@@ -519,6 +521,18 @@ func (h *mheap) coalesce(s *mspan) {
519521
realign(s, after, after)
520522
}
521523
}
524+
525+
if !s.scavenged && s.hugePages() > hpBefore {
526+
// If s has grown such that it now may contain more huge pages than it
527+
// did before, then mark the whole region as huge-page-backable.
528+
//
529+
// Otherwise, on systems where we break up huge pages (like Linux)
530+
// s may not be backed by huge pages because it could be made up of
531+
// pieces which are broken up in the underlying VMA. The primary issue
532+
// with this is that it can lead to a poor estimate of the amount of
533+
// free memory backed by huge pages for determining the scavenging rate.
534+
sysHugePage(unsafe.Pointer(s.base()), s.npages*pageSize)
535+
}
522536
}
523537

524538
// hugePages returns the number of aligned physical huge pages in the memory

0 commit comments

Comments
 (0)