Skip to content

Commit ad863ba

Browse files
committed
runtime: break down memstats.gc_sys
This change breaks apart gc_sys into three distinct pieces. Two of those pieces are pieces which come from heap_sys since they're allocated from the page heap. The rest comes from memory mapped from e.g. persistentalloc which better fits the purpose of a sysMemStat. Also, rename gc_sys to gcMiscSys. Change-Id: I098789170052511e7b31edbcdc9a53e5c24573f7 Reviewed-on: https://go-review.googlesource.com/c/go/+/246973 Run-TryBot: Michael Knyszek <[email protected]> TryBot-Result: Go Bot <[email protected]> Trust: Michael Knyszek <[email protected]> Reviewed-by: Michael Pratt <[email protected]>
1 parent 39e335a commit ad863ba

File tree

7 files changed

+39
-27
lines changed

7 files changed

+39
-27
lines changed

src/runtime/heapdump.go

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -540,6 +540,9 @@ func dumpms() {
540540
}
541541

542542
func dumpmemstats() {
543+
// These ints should be identical to the exported
544+
// MemStats structure and should be ordered the same
545+
// way too.
543546
dumpint(tagMemStats)
544547
dumpint(memstats.alloc)
545548
dumpint(memstats.total_alloc)
@@ -560,7 +563,7 @@ func dumpmemstats() {
560563
dumpint(memstats.mcache_inuse)
561564
dumpint(memstats.mcache_sys.load())
562565
dumpint(memstats.buckhash_sys.load())
563-
dumpint(memstats.gc_sys.load())
566+
dumpint(memstats.gcMiscSys.load() + memstats.gcWorkBufInUse + memstats.gcProgPtrScalarBitsInUse)
564567
dumpint(memstats.other_sys.load())
565568
dumpint(memstats.next_gc)
566569
dumpint(memstats.last_gc_unix)

src/runtime/malloc.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -743,9 +743,9 @@ mapped:
743743
throw("arena already initialized")
744744
}
745745
var r *heapArena
746-
r = (*heapArena)(h.heapArenaAlloc.alloc(unsafe.Sizeof(*r), sys.PtrSize, &memstats.gc_sys))
746+
r = (*heapArena)(h.heapArenaAlloc.alloc(unsafe.Sizeof(*r), sys.PtrSize, &memstats.gcMiscSys))
747747
if r == nil {
748-
r = (*heapArena)(persistentalloc(unsafe.Sizeof(*r), sys.PtrSize, &memstats.gc_sys))
748+
r = (*heapArena)(persistentalloc(unsafe.Sizeof(*r), sys.PtrSize, &memstats.gcMiscSys))
749749
if r == nil {
750750
throw("out of memory allocating heap arena metadata")
751751
}
@@ -757,7 +757,7 @@ mapped:
757757
if size == 0 {
758758
size = physPageSize
759759
}
760-
newArray := (*notInHeap)(persistentalloc(size, sys.PtrSize, &memstats.gc_sys))
760+
newArray := (*notInHeap)(persistentalloc(size, sys.PtrSize, &memstats.gcMiscSys))
761761
if newArray == nil {
762762
throw("out of memory allocating allArenas")
763763
}

src/runtime/mcheckmark.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ func startCheckmarks() {
4141

4242
if bitmap == nil {
4343
// Allocate bitmap on first use.
44-
bitmap = (*checkmarksMap)(persistentalloc(unsafe.Sizeof(*bitmap), 0, &memstats.gc_sys))
44+
bitmap = (*checkmarksMap)(persistentalloc(unsafe.Sizeof(*bitmap), 0, &memstats.gcMiscSys))
4545
if bitmap == nil {
4646
throw("out of memory allocating checkmarks bitmap")
4747
}

src/runtime/mfinal.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,7 @@ func queuefinalizer(p unsafe.Pointer, fn *funcval, nret uintptr, fint *_type, ot
8888
lock(&finlock)
8989
if finq == nil || finq.cnt == uint32(len(finq.fin)) {
9090
if finc == nil {
91-
finc = (*finblock)(persistentalloc(_FinBlockSize, 0, &memstats.gc_sys))
91+
finc = (*finblock)(persistentalloc(_FinBlockSize, 0, &memstats.gcMiscSys))
9292
finc.alllink = allfin
9393
allfin = finc
9494
if finptrmask[0] == 0 {

src/runtime/mheap.go

Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -713,7 +713,7 @@ func (h *mheap) init() {
713713
h.central[i].mcentral.init(spanClass(i))
714714
}
715715

716-
h.pages.init(&h.lock, &memstats.gc_sys)
716+
h.pages.init(&h.lock, &memstats.gcMiscSys)
717717
}
718718

719719
// reclaim sweeps and reclaims at least npage pages into the heap.
@@ -1230,8 +1230,10 @@ HaveSpan:
12301230
atomic.Xadd64(&memstats.heap_inuse, int64(nbytes))
12311231
case spanAllocStack:
12321232
atomic.Xadd64(&memstats.stacks_inuse, int64(nbytes))
1233-
case spanAllocPtrScalarBits, spanAllocWorkBuf:
1234-
memstats.gc_sys.add(int64(nbytes))
1233+
case spanAllocWorkBuf:
1234+
atomic.Xadd64(&memstats.gcWorkBufInUse, int64(nbytes))
1235+
case spanAllocPtrScalarBits:
1236+
atomic.Xadd64(&memstats.gcProgPtrScalarBitsInUse, int64(nbytes))
12351237
}
12361238
if typ.manual() {
12371239
// Manually managed memory doesn't count toward heap_sys.
@@ -1406,8 +1408,10 @@ func (h *mheap) freeSpanLocked(s *mspan, typ spanAllocType) {
14061408
atomic.Xadd64(&memstats.heap_inuse, -int64(nbytes))
14071409
case spanAllocStack:
14081410
atomic.Xadd64(&memstats.stacks_inuse, -int64(nbytes))
1409-
case spanAllocPtrScalarBits, spanAllocWorkBuf:
1410-
memstats.gc_sys.add(-int64(nbytes))
1411+
case spanAllocWorkBuf:
1412+
atomic.Xadd64(&memstats.gcWorkBufInUse, -int64(nbytes))
1413+
case spanAllocPtrScalarBits:
1414+
atomic.Xadd64(&memstats.gcProgPtrScalarBitsInUse, -int64(nbytes))
14111415
}
14121416
if typ.manual() {
14131417
// Manually managed memory doesn't count toward heap_sys, so add it back.
@@ -1956,7 +1960,7 @@ func newArenaMayUnlock() *gcBitsArena {
19561960
var result *gcBitsArena
19571961
if gcBitsArenas.free == nil {
19581962
unlock(&gcBitsArenas.lock)
1959-
result = (*gcBitsArena)(sysAlloc(gcBitsChunkBytes, &memstats.gc_sys))
1963+
result = (*gcBitsArena)(sysAlloc(gcBitsChunkBytes, &memstats.gcMiscSys))
19601964
if result == nil {
19611965
throw("runtime: cannot allocate memory")
19621966
}

src/runtime/mspanset.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -102,7 +102,7 @@ retry:
102102
if newCap == 0 {
103103
newCap = spanSetInitSpineCap
104104
}
105-
newSpine := persistentalloc(newCap*sys.PtrSize, cpu.CacheLineSize, &memstats.gc_sys)
105+
newSpine := persistentalloc(newCap*sys.PtrSize, cpu.CacheLineSize, &memstats.gcMiscSys)
106106
if b.spineCap != 0 {
107107
// Blocks are allocated off-heap, so
108108
// no write barriers.
@@ -283,7 +283,7 @@ func (p *spanSetBlockAlloc) alloc() *spanSetBlock {
283283
if s := (*spanSetBlock)(p.stack.pop()); s != nil {
284284
return s
285285
}
286-
return (*spanSetBlock)(persistentalloc(unsafe.Sizeof(spanSetBlock{}), cpu.CacheLineSize, &memstats.gc_sys))
286+
return (*spanSetBlock)(persistentalloc(unsafe.Sizeof(spanSetBlock{}), cpu.CacheLineSize, &memstats.gcMiscSys))
287287
}
288288

289289
// free returns a spanSetBlock back to the pool.

src/runtime/mstats.go

Lines changed: 18 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -44,15 +44,17 @@ type mstats struct {
4444

4545
// Statistics about allocation of low-level fixed-size structures.
4646
// Protected by FixAlloc locks.
47-
stacks_inuse uint64 // bytes in manually-managed stack spans; updated atomically or during STW
48-
stacks_sys sysMemStat // only counts newosproc0 stack in mstats; differs from MemStats.StackSys
49-
mspan_inuse uint64 // mspan structures
50-
mspan_sys sysMemStat
51-
mcache_inuse uint64 // mcache structures
52-
mcache_sys sysMemStat
53-
buckhash_sys sysMemStat // profiling bucket hash table
54-
gc_sys sysMemStat // updated atomically or during STW
55-
other_sys sysMemStat // updated atomically or during STW
47+
stacks_inuse uint64 // bytes in manually-managed stack spans; updated atomically or during STW
48+
stacks_sys sysMemStat // only counts newosproc0 stack in mstats; differs from MemStats.StackSys
49+
mspan_inuse uint64 // mspan structures
50+
mspan_sys sysMemStat
51+
mcache_inuse uint64 // mcache structures
52+
mcache_sys sysMemStat
53+
buckhash_sys sysMemStat // profiling bucket hash table
54+
gcWorkBufInUse uint64 // updated atomically or during STW
55+
gcProgPtrScalarBitsInUse uint64 // updated atomically or during STW
56+
gcMiscSys sysMemStat // updated atomically or during STW
57+
other_sys sysMemStat // updated atomically or during STW
5658

5759
// Statistics about the garbage collector.
5860

@@ -472,7 +474,10 @@ func readmemstats_m(stats *MemStats) {
472474
stats.MCacheInuse = memstats.mcache_inuse
473475
stats.MCacheSys = memstats.mcache_sys.load()
474476
stats.BuckHashSys = memstats.buckhash_sys.load()
475-
stats.GCSys = memstats.gc_sys.load()
477+
// MemStats defines GCSys as an aggregate of all memory related
478+
// to the memory management system, but we track this memory
479+
// at a more granular level in the runtime.
480+
stats.GCSys = memstats.gcMiscSys.load() + memstats.gcWorkBufInUse + memstats.gcProgPtrScalarBitsInUse
476481
stats.OtherSys = memstats.other_sys.load()
477482
stats.NextGC = memstats.next_gc
478483
stats.LastGC = memstats.last_gc_unix
@@ -557,11 +562,11 @@ func updatememstats() {
557562
memstats.mcache_inuse = uint64(mheap_.cachealloc.inuse)
558563
memstats.mspan_inuse = uint64(mheap_.spanalloc.inuse)
559564
memstats.sys = memstats.heap_sys.load() + memstats.stacks_sys.load() + memstats.mspan_sys.load() +
560-
memstats.mcache_sys.load() + memstats.buckhash_sys.load() + memstats.gc_sys.load() +
565+
memstats.mcache_sys.load() + memstats.buckhash_sys.load() + memstats.gcMiscSys.load() +
561566
memstats.other_sys.load()
562567

563-
// We also count stacks_inuse as sys memory.
564-
memstats.sys += memstats.stacks_inuse
568+
// We also count stacks_inuse, gcWorkBufInUse, and gcProgPtrScalarBitsInUse as sys memory.
569+
memstats.sys += memstats.stacks_inuse + memstats.gcWorkBufInUse + memstats.gcProgPtrScalarBitsInUse
565570

566571
// Calculate memory allocator stats.
567572
// During program execution we only count number of frees and amount of freed memory.

0 commit comments

Comments
 (0)