Skip to content

Commit d57056b

Browse files
committed
runtime: don't free stack spans during GC
Memory for stacks is manually managed by the runtime and, currently (with one exception) we free stack spans immediately when the last stack on a span is freed. However, the garbage collector assumes that spans can never transition from non-free to free during scan or mark. This disagreement makes it possible for the garbage collector to mark uninitialized objects and is blocking us from re-enabling the bad pointer test in the garbage collector (issue #9880). For example, the following sequence will result in marking an uninitialized object: 1. scanobject loads a pointer slot out of the object it's scanning. This happens to be one of the special pointers from the heap into a stack. Call the pointer p and suppose it points into X's stack. 2. X, running on another thread, grows its stack and frees its old stack. 3. The old stack happens to be large or was the last stack in its span, so X frees this span, setting it to state _MSpanFree. 4. The span gets reused as a heap span. 5. scanobject calls heapBitsForObject, which loads the span containing p, which is now in state _MSpanInUse, but doesn't necessarily have an object at p. The not-object at p gets marked, and at this point all sorts of things can go wrong. We already have a partial solution to this. When shrinking a stack, we put the old stack on a queue to be freed at the end of garbage collection. This was done to address exactly this problem, but wasn't a complete solution. This commit generalizes this solution to both shrinking and growing stacks. For stacks that fit in the stack pool, we simply don't free the span, even if its reference count reaches zero. It's fine to reuse the span for other stacks, and this enables that. At the end of GC, we sweep for cached stack spans with a zero reference count and free them. For larger stacks, we simply queue the stack span to be freed at the end of GC. Ideally, we would reuse these large stack spans the way we can small stack spans, but that's a more invasive change that will have to wait until after the freeze. Fixes #11267. Change-Id: Ib7f2c5da4845cc0268e8dc098b08465116972a71 Reviewed-on: https://go-review.googlesource.com/11502 Reviewed-by: Russ Cox <[email protected]>
1 parent f73b2fc commit d57056b

File tree

3 files changed

+79
-34
lines changed

3 files changed

+79
-34
lines changed

src/runtime/mgc.go

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1386,7 +1386,11 @@ func gcMark(start_time int64) {
13861386
traceGCScanDone()
13871387
}
13881388

1389-
shrinkfinish()
1389+
// TODO(austin): This doesn't have to be done during STW, as
1390+
// long as we block the next GC cycle until this is done. Move
1391+
// it after we start the world, but before dropping worldsema.
1392+
// (See issue #11465.)
1393+
freeStackSpans()
13901394

13911395
cachestats()
13921396

src/runtime/mheap.go

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -75,6 +75,20 @@ var mheap_ mheap
7575
// either one of the MHeap's free lists or one of the
7676
// MCentral's span lists. We use empty MSpan structures as list heads.
7777

78+
// An MSpan representing actual memory has state _MSpanInUse,
79+
// _MSpanStack, or _MSpanFree. Transitions between these states are
80+
// constrained as follows:
81+
//
82+
// * A span may transition from free to in-use or stack during any GC
83+
// phase.
84+
//
85+
// * During sweeping (gcphase == _GCoff), a span may transition from
86+
// in-use to free (as a result of sweeping) or stack to free (as a
87+
// result of stacks being freed).
88+
//
89+
// * During GC (gcphase != _GCoff), a span *must not* transition from
90+
// stack or in-use to free. Because concurrent GC may read a pointer
91+
// and then look up its span, the span state must be monotonic.
7892
const (
7993
_MSpanInUse = iota // allocated for garbage collected heap
8094
_MSpanStack // allocated for use by stack allocator

src/runtime/stack1.go

Lines changed: 60 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,9 @@ const (
4444
var stackpool [_NumStackOrders]mspan
4545
var stackpoolmu mutex
4646

47-
var stackfreequeue stack
47+
// List of stack spans to be freed at the end of GC. Protected by
48+
// stackpoolmu.
49+
var stackFreeQueue mspan
4850

4951
// Cached value of haveexperiment("framepointer")
5052
var framepointer_enabled bool
@@ -56,6 +58,7 @@ func stackinit() {
5658
for i := range stackpool {
5759
mSpanList_Init(&stackpool[i])
5860
}
61+
mSpanList_Init(&stackFreeQueue)
5962
}
6063

6164
// Allocates a stack from the free pool. Must be called with
@@ -108,8 +111,22 @@ func stackpoolfree(x gclinkptr, order uint8) {
108111
x.ptr().next = s.freelist
109112
s.freelist = x
110113
s.ref--
111-
if s.ref == 0 {
112-
// span is completely free - return to heap
114+
if gcphase == _GCoff && s.ref == 0 {
115+
// Span is completely free. Return it to the heap
116+
// immediately if we're sweeping.
117+
//
118+
// If GC is active, we delay the free until the end of
119+
// GC to avoid the following type of situation:
120+
//
121+
// 1) GC starts, scans a SudoG but does not yet mark the SudoG.elem pointer
122+
// 2) The stack that pointer points to is copied
123+
// 3) The old stack is freed
124+
// 4) The containing span is marked free
125+
// 5) GC attempts to mark the SudoG.elem pointer. The
126+
// marking fails because the pointer looks like a
127+
// pointer into a free span.
128+
//
129+
// By not freeing, we prevent step #4 until GC is done.
113130
mSpanList_Remove(s)
114131
s.freelist = 0
115132
mHeap_FreeStack(&mheap_, s)
@@ -302,7 +319,21 @@ func stackfree(stk stack, n uintptr) {
302319
println(hex(s.start<<_PageShift), v)
303320
throw("bad span state")
304321
}
305-
mHeap_FreeStack(&mheap_, s)
322+
if gcphase == _GCoff {
323+
// Free the stack immediately if we're
324+
// sweeping.
325+
mHeap_FreeStack(&mheap_, s)
326+
} else {
327+
// Otherwise, add it to a list of stack spans
328+
// to be freed at the end of GC.
329+
//
330+
// TODO(austin): Make it possible to re-use
331+
// these spans as stacks, like we do for small
332+
// stack spans. (See issue #11466.)
333+
lock(&stackpoolmu)
334+
mSpanList_Insert(&stackFreeQueue, s)
335+
unlock(&stackpoolmu)
336+
}
306337
}
307338
}
308339

@@ -613,25 +644,7 @@ func copystack(gp *g, newsize uintptr) {
613644
if stackPoisonCopy != 0 {
614645
fillstack(old, 0xfc)
615646
}
616-
if newsize > oldsize {
617-
// growing, free stack immediately
618-
stackfree(old, oldsize)
619-
} else {
620-
// shrinking, queue up free operation. We can't actually free the stack
621-
// just yet because we might run into the following situation:
622-
// 1) GC starts, scans a SudoG but does not yet mark the SudoG.elem pointer
623-
// 2) The stack that pointer points to is shrunk
624-
// 3) The old stack is freed
625-
// 4) The containing span is marked free
626-
// 5) GC attempts to mark the SudoG.elem pointer. The marking fails because
627-
// the pointer looks like a pointer into a free span.
628-
// By not freeing, we prevent step #4 until GC is done.
629-
lock(&stackpoolmu)
630-
*(*stack)(unsafe.Pointer(old.lo)) = stackfreequeue
631-
*(*uintptr)(unsafe.Pointer(old.lo + ptrSize)) = oldsize
632-
stackfreequeue = old
633-
unlock(&stackpoolmu)
634-
}
647+
stackfree(old, oldsize)
635648
}
636649

637650
// round x up to a power of 2.
@@ -868,18 +881,32 @@ func shrinkstack(gp *g) {
868881
casgstatus(gp, _Gcopystack, oldstatus)
869882
}
870883

871-
// Do any delayed stack freeing that was queued up during GC.
872-
func shrinkfinish() {
884+
// freeStackSpans frees unused stack spans at the end of GC.
885+
func freeStackSpans() {
873886
lock(&stackpoolmu)
874-
s := stackfreequeue
875-
stackfreequeue = stack{}
876-
unlock(&stackpoolmu)
877-
for s.lo != 0 {
878-
t := *(*stack)(unsafe.Pointer(s.lo))
879-
n := *(*uintptr)(unsafe.Pointer(s.lo + ptrSize))
880-
stackfree(s, n)
881-
s = t
887+
888+
// Scan stack pools for empty stack spans.
889+
for order := range stackpool {
890+
list := &stackpool[order]
891+
for s := list.next; s != list; {
892+
next := s.next
893+
if s.ref == 0 {
894+
mSpanList_Remove(s)
895+
s.freelist = 0
896+
mHeap_FreeStack(&mheap_, s)
897+
}
898+
s = next
899+
}
882900
}
901+
902+
// Free queued stack spans.
903+
for stackFreeQueue.next != &stackFreeQueue {
904+
s := stackFreeQueue.next
905+
mSpanList_Remove(s)
906+
mHeap_FreeStack(&mheap_, s)
907+
}
908+
909+
unlock(&stackpoolmu)
883910
}
884911

885912
//go:nosplit

0 commit comments

Comments
 (0)