Skip to content

Commit 2ab6d01

Browse files
committed
runtime: throw if scavenge necessary during coalescing
Currently when coalescing if two adjacent spans are scavenged, we subtract their sizes from memstats and re-scavenge the new combined span. This is wasteful however, since the realignment semantics make this case of having to re-scavenge impossible. In realign() inside of coalesce(), there was also a bug: on systems where physPageSize > pageSize, we wouldn't realign because a condition had the wrong sign. This wasteful re-scavenging has been masking this bug this whole time. So, this change fixes that first. Then this change gets rid of the needsScavenge logic and instead checks explicitly for the possibility of unscavenged pages near the physical page boundary. If the possibility exists, it throws. The intent of throwing here is to catch changes to the runtime which cause this invariant to no longer hold, at which point it would likely be appropriate to scavenge the additional pages (and only the additional pages) at that point. Change-Id: I185e3d7b53e36e90cf9ace5fa297a9e8008d75f3 Reviewed-on: https://go-review.googlesource.com/c/go/+/158377 Run-TryBot: Michael Knyszek <[email protected]> TryBot-Result: Gobot Gobot <[email protected]> Reviewed-by: Austin Clements <[email protected]>
1 parent 512b3c6 commit 2ab6d01

File tree

1 file changed

+29
-32
lines changed

1 file changed

+29
-32
lines changed

src/runtime/mheap.go

Lines changed: 29 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -428,28 +428,40 @@ func (s *mspan) physPageBounds() (uintptr, uintptr) {
428428
}
429429

430430
func (h *mheap) coalesce(s *mspan) {
431-
// We scavenge s at the end after coalescing if s or anything
432-
// it merged with is marked scavenged.
433-
needsScavenge := false
434-
prescavenged := s.released() // number of bytes already scavenged.
435-
436431
// merge is a helper which merges other into s, deletes references to other
437432
// in heap metadata, and then discards it. other must be adjacent to s.
438-
merge := func(other *mspan) {
433+
merge := func(a, b, other *mspan) {
434+
// Caller must ensure a.startAddr < b.startAddr and that either a or
435+
// b is s. a and b must be adjacent. other is whichever of the two is
436+
// not s.
437+
438+
if pageSize < physPageSize && a.scavenged && b.scavenged {
439+
// If we're merging two scavenged spans on systems where
440+
// pageSize < physPageSize, then their boundary should always be on
441+
// a physical page boundary, due to the realignment that happens
442+
// during coalescing. Throw if this case is no longer true, which
443+
// means the implementation should probably be changed to scavenge
444+
// along the boundary.
445+
_, start := a.physPageBounds()
446+
end, _ := b.physPageBounds()
447+
if start != end {
448+
println("runtime: a.base=", hex(a.base()), "a.npages=", a.npages)
449+
println("runtime: b.base=", hex(b.base()), "b.npages=", b.npages)
450+
println("runtime: physPageSize=", physPageSize, "pageSize=", pageSize)
451+
throw("neighboring scavenged spans boundary is not a physical page boundary")
452+
}
453+
}
454+
439455
// Adjust s via base and npages and also in heap metadata.
440456
s.npages += other.npages
441457
s.needzero |= other.needzero
442-
if other.startAddr < s.startAddr {
458+
if a == s {
459+
h.setSpan(s.base()+s.npages*pageSize-1, s)
460+
} else {
443461
s.startAddr = other.startAddr
444462
h.setSpan(s.base(), s)
445-
} else {
446-
h.setSpan(s.base()+s.npages*pageSize-1, s)
447463
}
448464

449-
// If before or s are scavenged, then we need to scavenge the final coalesced span.
450-
needsScavenge = needsScavenge || other.scavenged || s.scavenged
451-
prescavenged += other.released()
452-
453465
// The size is potentially changing so the treap needs to delete adjacent nodes and
454466
// insert back as a combined node.
455467
if other.scavenged {
@@ -468,9 +480,9 @@ func (h *mheap) coalesce(s *mspan) {
468480
// b is s. a and b must be adjacent. other is whichever of the two is
469481
// not s.
470482

471-
// If pageSize <= physPageSize then spans are always aligned
483+
// If pageSize >= physPageSize then spans are always aligned
472484
// to physical page boundaries, so just exit.
473-
if pageSize <= physPageSize {
485+
if pageSize >= physPageSize {
474486
return
475487
}
476488
// Since we're resizing other, we must remove it from the treap.
@@ -505,7 +517,7 @@ func (h *mheap) coalesce(s *mspan) {
505517
// Coalesce with earlier, later spans.
506518
if before := spanOf(s.base() - 1); before != nil && before.state == mSpanFree {
507519
if s.scavenged == before.scavenged {
508-
merge(before)
520+
merge(before, s, before)
509521
} else {
510522
realign(before, s, before)
511523
}
@@ -514,26 +526,11 @@ func (h *mheap) coalesce(s *mspan) {
514526
// Now check to see if next (greater addresses) span is free and can be coalesced.
515527
if after := spanOf(s.base() + s.npages*pageSize); after != nil && after.state == mSpanFree {
516528
if s.scavenged == after.scavenged {
517-
merge(after)
529+
merge(s, after, after)
518530
} else {
519531
realign(s, after, after)
520532
}
521533
}
522-
523-
if needsScavenge {
524-
// When coalescing spans, some physical pages which
525-
// were not returned to the OS previously because
526-
// they were only partially covered by the span suddenly
527-
// become available for scavenging. We want to make sure
528-
// those holes are filled in, and the span is properly
529-
// scavenged. Rather than trying to detect those holes
530-
// directly, we collect how many bytes were already
531-
// scavenged above and subtract that from heap_released
532-
// before re-scavenging the entire newly-coalesced span,
533-
// which will implicitly bump up heap_released.
534-
memstats.heap_released -= uint64(prescavenged)
535-
s.scavenge()
536-
}
537534
}
538535

539536
func (s *mspan) scavenge() uintptr {

0 commit comments

Comments
 (0)