Skip to content

Commit e500ffd

Browse files
committed
runtime: track all heap arenas in a slice
Currently, there's no efficient way to iterate over the Go heap. We're going to need this for fast free page sweeping, so this CL adds a slice of all allocated heap arenas. This will also be useful for generational GC. For #18155. Change-Id: I58d126cfb9c3f61b3125d80b74ccb1b2169efbcc Reviewed-on: https://go-review.googlesource.com/c/138076 Run-TryBot: Austin Clements <[email protected]> TryBot-Result: Gobot Gobot <[email protected]> Reviewed-by: Rick Hudson <[email protected]>
1 parent b2df0bd commit e500ffd

File tree

2 files changed

+31
-1
lines changed

2 files changed

+31
-1
lines changed

src/runtime/malloc.go

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -641,6 +641,27 @@ mapped:
641641
}
642642
}
643643

644+
// Add the arena to the arenas list.
645+
if len(h.allArenas) == cap(h.allArenas) {
646+
size := 2 * uintptr(cap(h.allArenas)) * sys.PtrSize
647+
if size == 0 {
648+
size = physPageSize
649+
}
650+
newArray := (*notInHeap)(persistentalloc(size, sys.PtrSize, &memstats.gc_sys))
651+
if newArray == nil {
652+
throw("out of memory allocating allArenas")
653+
}
654+
oldSlice := h.allArenas
655+
*(*notInHeapSlice)(unsafe.Pointer(&h.allArenas)) = notInHeapSlice{newArray, len(h.allArenas), int(size / sys.PtrSize)}
656+
copy(h.allArenas, oldSlice)
657+
// Do not free the old backing array because
658+
// there may be concurrent readers. Since we
659+
// double the array each time, this can lead
660+
// to at most 2x waste.
661+
}
662+
h.allArenas = h.allArenas[:len(h.allArenas)+1]
663+
h.allArenas[len(h.allArenas)-1] = ri
664+
644665
// Store atomically just in case an object from the
645666
// new heap arena becomes visible before the heap lock
646667
// is released (which shouldn't happen, but there's

src/runtime/mheap.go

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -133,7 +133,16 @@ type mheap struct {
133133
// (the actual arenas). This is only used on 32-bit.
134134
arena linearAlloc
135135

136-
// _ uint32 // ensure 64-bit alignment of central
136+
// allArenas is the arenaIndex of every mapped arena. This can
137+
// be used to iterate through the address space.
138+
//
139+
// Access is protected by mheap_.lock. However, since this is
140+
// append-only and old backing arrays are never freed, it is
141+
// safe to acquire mheap_.lock, copy the slice header, and
142+
// then release mheap_.lock.
143+
allArenas []arenaIdx
144+
145+
_ uint32 // ensure 64-bit alignment of central
137146

138147
// central free lists for small size classes.
139148
// the padding makes sure that the mcentrals are

0 commit comments

Comments
 (0)