Skip to content

Commit 5c15ed6

Browse files
committed
runtime: split spans during allocation without treap removal
Now that the treap is first-fit, we can make a nice optimization. Mainly, since we know that span splitting doesn't modify the relative position of a span in a treap, we can actually modify a span in-place on the treap. The only caveat is that we need to update the relevant metadata. To enable this optimization, this change introduces a mutate method on the iterator which takes a callback that is passed the iterator's span. The method records some properties of the span before it calls into the callback and then uses those records to see what changed and update treap metadata appropriately. Change-Id: I74f7d2ee172800828434ba0194d3d78d3942acf2 Reviewed-on: https://go-review.googlesource.com/c/go/+/174879 Run-TryBot: Michael Knyszek <[email protected]> Reviewed-by: Austin Clements <[email protected]>
1 parent f4a5ae5 commit 5c15ed6

File tree

2 files changed

+67
-27
lines changed

2 files changed

+67
-27
lines changed

src/runtime/mgclarge.go

Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -377,6 +377,40 @@ func (root *mTreap) end(mask, match treapIterType) treapIter {
377377
return treapIter{f, root.treap.findMaximal(f)}
378378
}
379379

380+
// mutate allows one to mutate the span without removing it from the treap via a
381+
// callback. The span's base and size are allowed to change as long as the span
382+
// remains in the same order relative to its predecessor and successor.
383+
//
384+
// Note however that any operation that causes a treap rebalancing inside of fn
385+
// is strictly forbidden, as that may cause treap node metadata to go
386+
// out-of-sync.
387+
func (root *mTreap) mutate(i treapIter, fn func(span *mspan)) {
388+
s := i.span()
389+
// Save some state about the span for later inspection.
390+
hpages := s.hugePages()
391+
scavenged := s.scavenged
392+
// Call the mutator.
393+
fn(s)
394+
// Update unscavHugePages appropriately.
395+
if !scavenged {
396+
mheap_.free.unscavHugePages -= hpages
397+
}
398+
if !s.scavenged {
399+
mheap_.free.unscavHugePages += s.hugePages()
400+
}
401+
// Update the key in case the base changed.
402+
i.t.key = s.base()
403+
// Updating invariants up the tree needs to happen if
404+
// anything changed at all, so just go ahead and do it
405+
// unconditionally.
406+
//
407+
// If it turns out nothing changed, it'll exit quickly.
408+
t := i.t
409+
for t != nil && t.updateInvariants() {
410+
t = t.parent
411+
}
412+
}
413+
380414
// insert adds span to the large span treap.
381415
func (root *mTreap) insert(span *mspan) {
382416
if !span.scavenged {

src/runtime/mheap.go

Lines changed: 33 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -1138,40 +1138,46 @@ func (h *mheap) allocSpanLocked(npage uintptr, stat *uint64) *mspan {
11381138

11391139
HaveSpan:
11401140
s := t.span()
1141-
h.free.erase(t)
1142-
1143-
// Mark span in use.
11441141
if s.state != mSpanFree {
11451142
throw("candidate mspan for allocation is not free")
11461143
}
1147-
if s.npages < npage {
1148-
throw("candidate mspan for allocation is too small")
1149-
}
11501144

11511145
// First, subtract any memory that was released back to
1152-
// the OS from s. We will re-scavenge the trimmed section
1153-
// if necessary.
1146+
// the OS from s. We will add back what's left if necessary.
11541147
memstats.heap_released -= uint64(s.released())
11551148

1156-
if s.npages > npage {
1157-
// Trim extra and put it back in the heap.
1158-
t := (*mspan)(h.spanalloc.alloc())
1159-
t.init(s.base()+npage<<_PageShift, s.npages-npage)
1160-
s.npages = npage
1161-
h.setSpan(t.base()-1, s)
1162-
h.setSpan(t.base(), t)
1163-
h.setSpan(t.base()+t.npages*pageSize-1, t)
1164-
t.needzero = s.needzero
1165-
// If s was scavenged, then t may be scavenged.
1166-
start, end := t.physPageBounds()
1167-
if s.scavenged && start < end {
1168-
memstats.heap_released += uint64(end - start)
1169-
t.scavenged = true
1170-
}
1171-
s.state = mSpanManual // prevent coalescing with s
1172-
t.state = mSpanManual
1173-
h.freeSpanLocked(t, false, false, s.unusedsince)
1174-
s.state = mSpanFree
1149+
if s.npages == npage {
1150+
h.free.erase(t)
1151+
} else if s.npages > npage {
1152+
// Trim off the lower bits and make that our new span.
1153+
// Do this in-place since this operation does not
1154+
// affect the original span's location in the treap.
1155+
n := (*mspan)(h.spanalloc.alloc())
1156+
h.free.mutate(t, func(s *mspan) {
1157+
n.init(s.base(), npage)
1158+
s.npages -= npage
1159+
s.startAddr = s.base() + npage*pageSize
1160+
h.setSpan(s.base()-1, n)
1161+
h.setSpan(s.base(), s)
1162+
h.setSpan(n.base(), n)
1163+
n.needzero = s.needzero
1164+
// n may not be big enough to actually be scavenged, but that's fine.
1165+
// We still want it to appear to be scavenged so that we can do the
1166+
// right bookkeeping later on in this function (i.e. sysUsed).
1167+
n.scavenged = s.scavenged
1168+
// Check if s is still scavenged.
1169+
if s.scavenged {
1170+
start, end := s.physPageBounds()
1171+
if start < end {
1172+
memstats.heap_released += uint64(end - start)
1173+
} else {
1174+
s.scavenged = false
1175+
}
1176+
}
1177+
})
1178+
s = n
1179+
} else {
1180+
throw("candidate mspan for allocation is too small")
11751181
}
11761182
// "Unscavenge" s only AFTER splitting so that
11771183
// we only sysUsed whatever we actually need.

0 commit comments

Comments
 (0)