Skip to content

Commit 951dbb1

Browse files
aclementsandybons
authored andcommitted
[release-branch.go1.13] runtime: grow the heap incrementally
Currently, we map and grow the heap a whole arena (64MB) at a time. Unfortunately, in order to fix #32828, we need to switch from scavenging inline with allocation back to scavenging on heap growth, but heap-growth scavenging happens in large jumps because we grow the heap in large jumps. In order to prepare for better heap-growth scavenging, this CL separates mapping more space for the heap from actually "growing" it (tracking the new space with spans). Instead, growing the heap keeps track of the "current arena" it's growing into. It track that with new spans as needed, and only maps more arena space when the current arena is inadequate. The effect to the user is the same, but this will let us scavenge on much smaller increments of heap growth. There are two slightly subtleties to this change: 1. If an allocation requires mapping a new arena and that new arena isn't contiguous with the current arena, we don't want to lose the unused space in the current arena, so we have to immediately track that with a span. 2. The mapped space must be accounted as released and idle, even though it isn't actually tracked in a span. For #32828, since this makes heap-growth scavenging far more effective, especially at small heap sizes. For example, this change is necessary for TestPhysicalMemoryUtilization to pass once we remove inline scavenging. Updates #34556 Change-Id: I300e74a0534062467e4ce91cdc3508e5ef9aa73a Reviewed-on: https://go-review.googlesource.com/c/go/+/189957 Run-TryBot: Austin Clements <[email protected]> TryBot-Result: Gobot Gobot <[email protected]> Reviewed-by: Keith Randall <[email protected]> Reviewed-by: Michael Knyszek <[email protected]> (cherry picked from commit f18109d) Reviewed-on: https://go-review.googlesource.com/c/go/+/198485 Run-TryBot: Andrew Bonventre <[email protected]> Reviewed-by: Austin Clements <[email protected]>
1 parent fa325ea commit 951dbb1

File tree

2 files changed

+69
-15
lines changed

2 files changed

+69
-15
lines changed

src/runtime/export_test.go

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -345,6 +345,9 @@ func ReadMemStatsSlow() (base, slow MemStats) {
345345
slow.HeapReleased += uint64(i.span().released())
346346
}
347347

348+
// Unused space in the current arena also counts as released space.
349+
slow.HeapReleased += uint64(mheap_.curArena.end - mheap_.curArena.base)
350+
348351
getg().m.mallocing--
349352
})
350353

src/runtime/mheap.go

Lines changed: 66 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -186,6 +186,12 @@ type mheap struct {
186186
// simply blocking GC (by disabling preemption).
187187
sweepArenas []arenaIdx
188188

189+
// curArena is the arena that the heap is currently growing
190+
// into. This should always be physPageSize-aligned.
191+
curArena struct {
192+
base, end uintptr
193+
}
194+
189195
_ uint32 // ensure 64-bit alignment of central
190196

191197
// central free lists for small size classes.
@@ -1250,29 +1256,74 @@ HaveSpan:
12501256
// h must be locked.
12511257
func (h *mheap) grow(npage uintptr) bool {
12521258
ask := npage << _PageShift
1253-
v, size := h.sysAlloc(ask)
1254-
if v == nil {
1255-
print("runtime: out of memory: cannot allocate ", ask, "-byte block (", memstats.heap_sys, " in use)\n")
1256-
return false
1257-
}
12581259

1259-
// Create a fake "in use" span and free it, so that the
1260-
// right accounting and coalescing happens.
1260+
nBase := round(h.curArena.base+ask, physPageSize)
1261+
if nBase > h.curArena.end {
1262+
// Not enough room in the current arena. Allocate more
1263+
// arena space. This may not be contiguous with the
1264+
// current arena, so we have to request the full ask.
1265+
av, asize := h.sysAlloc(ask)
1266+
if av == nil {
1267+
print("runtime: out of memory: cannot allocate ", ask, "-byte block (", memstats.heap_sys, " in use)\n")
1268+
return false
1269+
}
1270+
1271+
if uintptr(av) == h.curArena.end {
1272+
// The new space is contiguous with the old
1273+
// space, so just extend the current space.
1274+
h.curArena.end = uintptr(av) + asize
1275+
} else {
1276+
// The new space is discontiguous. Track what
1277+
// remains of the current space and switch to
1278+
// the new space. This should be rare.
1279+
if size := h.curArena.end - h.curArena.base; size != 0 {
1280+
h.growAddSpan(unsafe.Pointer(h.curArena.base), size)
1281+
}
1282+
// Switch to the new space.
1283+
h.curArena.base = uintptr(av)
1284+
h.curArena.end = uintptr(av) + asize
1285+
}
1286+
1287+
// The memory just allocated counts as both released
1288+
// and idle, even though it's not yet backed by spans.
1289+
//
1290+
// The allocation is always aligned to the heap arena
1291+
// size which is always > physPageSize, so its safe to
1292+
// just add directly to heap_released. Coalescing, if
1293+
// possible, will also always be correct in terms of
1294+
// accounting, because s.base() must be a physical
1295+
// page boundary.
1296+
memstats.heap_released += uint64(asize)
1297+
memstats.heap_idle += uint64(asize)
1298+
1299+
// Recalculate nBase
1300+
nBase = round(h.curArena.base+ask, physPageSize)
1301+
}
1302+
1303+
// Grow into the current arena.
1304+
v := h.curArena.base
1305+
h.curArena.base = nBase
1306+
h.growAddSpan(unsafe.Pointer(v), nBase-v)
1307+
return true
1308+
}
1309+
1310+
// growAddSpan adds a free span when the heap grows into [v, v+size).
1311+
// This memory must be in the Prepared state (not Ready).
1312+
//
1313+
// h must be locked.
1314+
func (h *mheap) growAddSpan(v unsafe.Pointer, size uintptr) {
12611315
s := (*mspan)(h.spanalloc.alloc())
12621316
s.init(uintptr(v), size/pageSize)
12631317
h.setSpans(s.base(), s.npages, s)
12641318
s.state = mSpanFree
1265-
memstats.heap_idle += uint64(size)
1266-
// (*mheap).sysAlloc returns untouched/uncommitted memory.
1319+
// [v, v+size) is always in the Prepared state. The new span
1320+
// must be marked scavenged so the allocator transitions it to
1321+
// Ready when allocating from it.
12671322
s.scavenged = true
1268-
// s is always aligned to the heap arena size which is always > physPageSize,
1269-
// so its totally safe to just add directly to heap_released. Coalescing,
1270-
// if possible, will also always be correct in terms of accounting, because
1271-
// s.base() must be a physical page boundary.
1272-
memstats.heap_released += uint64(size)
1323+
// This span is both released and idle, but grow already
1324+
// updated both memstats.
12731325
h.coalesce(s)
12741326
h.free.insert(s)
1275-
return true
12761327
}
12771328

12781329
// Free the span back into the heap.

0 commit comments

Comments
 (0)