Skip to content

Commit eaa1c87

Browse files
committed
runtime: remove periodic scavenging
This change removes the periodic scavenger which goes over every span in the heap and scavenges it if it hasn't been used for 5 minutes. It should no longer be necessary if we have background scavenging (follow-up). For #30333. Change-Id: Ic3a1a4e85409dc25719ba4593a3b60273a4c71e0 Reviewed-on: https://go-review.googlesource.com/c/go/+/143157 Run-TryBot: Michael Knyszek <[email protected]> TryBot-Result: Gobot Gobot <[email protected]> Reviewed-by: Austin Clements <[email protected]>
1 parent d56199d commit eaa1c87

File tree

2 files changed

+34
-77
lines changed

2 files changed

+34
-77
lines changed

src/runtime/mheap.go

Lines changed: 34 additions & 55 deletions
Original file line numberDiff line numberDiff line change
@@ -394,7 +394,6 @@ type mspan struct {
394394
divShift2 uint8 // for divide by elemsize - divMagic.shift2
395395
scavenged bool // whether this span has had its pages released to the OS
396396
elemsize uintptr // computed from sizeclass or from npages
397-
unusedsince int64 // first time spotted by gc in mspanfree state
398397
limit uintptr // end of data in span
399398
speciallock mutex // guards specials list
400399
specials *special // linked list of special records sorted by offset.
@@ -1209,10 +1208,9 @@ HaveSpan:
12091208
// Also, scavenge may cause coalescing, so prevent
12101209
// coalescing with s by temporarily changing its state.
12111210
s.state = mSpanManual
1212-
h.scavengeLocked(s.npages * pageSize)
1211+
h.scavengeLocked(s.npages*pageSize, true)
12131212
s.state = mSpanFree
12141213
}
1215-
s.unusedsince = 0
12161214

12171215
h.setSpans(s.base(), npage, s)
12181216

@@ -1243,7 +1241,7 @@ func (h *mheap) grow(npage uintptr) bool {
12431241
// is proportional to the number of sysUnused() calls rather than
12441242
// the number of pages released, so we make fewer of those calls
12451243
// with larger spans.
1246-
h.scavengeLocked(size)
1244+
h.scavengeLocked(size, true)
12471245

12481246
// Create a fake "in use" span and free it, so that the
12491247
// right coalescing happens.
@@ -1253,7 +1251,7 @@ func (h *mheap) grow(npage uintptr) bool {
12531251
atomic.Store(&s.sweepgen, h.sweepgen)
12541252
s.state = mSpanInUse
12551253
h.pagesInUse += uint64(s.npages)
1256-
h.freeSpanLocked(s, false, true, 0)
1254+
h.freeSpanLocked(s, false, true)
12571255
return true
12581256
}
12591257

@@ -1283,7 +1281,7 @@ func (h *mheap) freeSpan(s *mspan, large bool) {
12831281
// heap_scan changed.
12841282
gcController.revise()
12851283
}
1286-
h.freeSpanLocked(s, true, true, 0)
1284+
h.freeSpanLocked(s, true, true)
12871285
unlock(&h.lock)
12881286
})
12891287
}
@@ -1304,12 +1302,12 @@ func (h *mheap) freeManual(s *mspan, stat *uint64) {
13041302
lock(&h.lock)
13051303
*stat -= uint64(s.npages << _PageShift)
13061304
memstats.heap_sys += uint64(s.npages << _PageShift)
1307-
h.freeSpanLocked(s, false, true, 0)
1305+
h.freeSpanLocked(s, false, true)
13081306
unlock(&h.lock)
13091307
}
13101308

13111309
// s must be on the busy list or unlinked.
1312-
func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince int64) {
1310+
func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool) {
13131311
switch s.state {
13141312
case mSpanManual:
13151313
if s.allocCount != 0 {
@@ -1337,13 +1335,6 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i
13371335
}
13381336
s.state = mSpanFree
13391337

1340-
// Stamp newly unused spans. The scavenger will use that
1341-
// info to potentially give back some pages to the OS.
1342-
s.unusedsince = unusedsince
1343-
if unusedsince == 0 {
1344-
s.unusedsince = nanotime()
1345-
}
1346-
13471338
// Coalesce span with neighbors.
13481339
h.coalesce(s)
13491340

@@ -1353,15 +1344,23 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i
13531344

13541345
// scavengeLocked scavenges nbytes worth of spans in the free treap by
13551346
// starting from the span with the highest base address and working down.
1356-
// It then takes those spans and places them in scav. h must be locked.
1357-
func (h *mheap) scavengeLocked(nbytes uintptr) {
1347+
// It then takes those spans and places them in scav.
1348+
//
1349+
// useCredit determines whether a scavenging call should use the credit
1350+
// system. In general, useCredit should be true except in special
1351+
// circumstances.
1352+
//
1353+
// Returns the amount of memory scavenged in bytes. h must be locked.
1354+
func (h *mheap) scavengeLocked(nbytes uintptr, useCredit bool) uintptr {
13581355
// Use up scavenge credit if there's any available.
1359-
if nbytes > h.scavengeCredit {
1360-
nbytes -= h.scavengeCredit
1361-
h.scavengeCredit = 0
1362-
} else {
1363-
h.scavengeCredit -= nbytes
1364-
return
1356+
if useCredit {
1357+
if nbytes > h.scavengeCredit {
1358+
nbytes -= h.scavengeCredit
1359+
h.scavengeCredit = 0
1360+
} else {
1361+
h.scavengeCredit -= nbytes
1362+
return nbytes
1363+
}
13651364
}
13661365
released := uintptr(0)
13671366
// Iterate over spans with huge pages first, then spans without.
@@ -1388,60 +1387,41 @@ func (h *mheap) scavengeLocked(nbytes uintptr) {
13881387
h.free.insert(s)
13891388
}
13901389
}
1391-
// If we over-scavenged, turn that extra amount into credit.
1392-
if released > nbytes {
1393-
h.scavengeCredit += released - nbytes
1394-
}
1395-
}
1396-
1397-
// scavengeAll visits each node in the unscav treap and scavenges the
1398-
// treapNode's span. It then removes the scavenged span from
1399-
// unscav and adds it into scav before continuing. h must be locked.
1400-
func (h *mheap) scavengeAllLocked(now, limit uint64) uintptr {
1401-
// Iterate over the unscavenged spans in the treap scavenging spans
1402-
// if unused for at least limit time.
1403-
released := uintptr(0)
1404-
for t := h.free.start(treapIterScav, 0); t.valid(); {
1405-
s := t.span()
1406-
n := t.next()
1407-
if (now - uint64(s.unusedsince)) > limit {
1408-
start, end := s.physPageBounds()
1409-
if start < end {
1410-
h.free.erase(t)
1411-
released += s.scavenge()
1412-
// See (*mheap).scavengeLocked.
1413-
h.coalesce(s)
1414-
h.free.insert(s)
1415-
}
1390+
if useCredit {
1391+
// If we over-scavenged, turn that extra amount into credit.
1392+
if released > nbytes {
1393+
h.scavengeCredit += released - nbytes
14161394
}
1417-
t = n
14181395
}
14191396
return released
14201397
}
14211398

1422-
func (h *mheap) scavengeAll(k int32, now, limit uint64) {
1399+
// scavengeAll visits each node in the free treap and scavenges the
1400+
// treapNode's span. It then removes the scavenged span from
1401+
// unscav and adds it into scav before continuing.
1402+
func (h *mheap) scavengeAll() {
14231403
// Disallow malloc or panic while holding the heap lock. We do
14241404
// this here because this is an non-mallocgc entry-point to
14251405
// the mheap API.
14261406
gp := getg()
14271407
gp.m.mallocing++
14281408
lock(&h.lock)
1429-
released := h.scavengeAllLocked(now, limit)
1409+
released := h.scavengeLocked(^uintptr(0), false)
14301410
unlock(&h.lock)
14311411
gp.m.mallocing--
14321412

14331413
if debug.gctrace > 0 {
14341414
if released > 0 {
1435-
print("scvg", k, ": ", released>>20, " MB released\n")
1415+
print("forced scvg: ", released>>20, " MB released\n")
14361416
}
1437-
print("scvg", k, ": inuse: ", memstats.heap_inuse>>20, ", idle: ", memstats.heap_idle>>20, ", sys: ", memstats.heap_sys>>20, ", released: ", memstats.heap_released>>20, ", consumed: ", (memstats.heap_sys-memstats.heap_released)>>20, " (MB)\n")
1417+
print("forced scvg: inuse: ", memstats.heap_inuse>>20, ", idle: ", memstats.heap_idle>>20, ", sys: ", memstats.heap_sys>>20, ", released: ", memstats.heap_released>>20, ", consumed: ", (memstats.heap_sys-memstats.heap_released)>>20, " (MB)\n")
14381418
}
14391419
}
14401420

14411421
//go:linkname runtime_debug_freeOSMemory runtime/debug.freeOSMemory
14421422
func runtime_debug_freeOSMemory() {
14431423
GC()
1444-
systemstack(func() { mheap_.scavengeAll(-1, ^uint64(0), 0) })
1424+
systemstack(func() { mheap_.scavengeAll() })
14451425
}
14461426

14471427
// Initialize a new span with the given start and npages.
@@ -1456,7 +1436,6 @@ func (span *mspan) init(base uintptr, npages uintptr) {
14561436
span.spanclass = 0
14571437
span.elemsize = 0
14581438
span.state = mSpanDead
1459-
span.unusedsince = 0
14601439
span.scavenged = false
14611440
span.speciallock.key = 0
14621441
span.specials = nil

src/runtime/proc.go

Lines changed: 0 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -4282,19 +4282,6 @@ func sysmon() {
42824282
checkdead()
42834283
unlock(&sched.lock)
42844284

4285-
// If a heap span goes unused for 5 minutes after a garbage collection,
4286-
// we hand it back to the operating system.
4287-
scavengelimit := int64(5 * 60 * 1e9)
4288-
4289-
if debug.scavenge > 0 {
4290-
// Scavenge-a-lot for testing.
4291-
forcegcperiod = 10 * 1e6
4292-
scavengelimit = 20 * 1e6
4293-
}
4294-
4295-
lastscavenge := nanotime()
4296-
nscavenge := 0
4297-
42984285
lasttrace := int64(0)
42994286
idle := 0 // how many cycles in succession we had not wokeup somebody
43004287
delay := uint32(0)
@@ -4316,9 +4303,6 @@ func sysmon() {
43164303
// Make wake-up period small enough
43174304
// for the sampling to be correct.
43184305
maxsleep := forcegcperiod / 2
4319-
if scavengelimit < forcegcperiod {
4320-
maxsleep = scavengelimit / 2
4321-
}
43224306
shouldRelax := true
43234307
if osRelaxMinNS > 0 {
43244308
next := timeSleepUntil()
@@ -4381,12 +4365,6 @@ func sysmon() {
43814365
injectglist(&list)
43824366
unlock(&forcegc.lock)
43834367
}
4384-
// scavenge heap once in a while
4385-
if lastscavenge+scavengelimit/2 < now {
4386-
mheap_.scavengeAll(int32(nscavenge), uint64(now), uint64(scavengelimit))
4387-
lastscavenge = now
4388-
nscavenge++
4389-
}
43904368
if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now {
43914369
lasttrace = now
43924370
schedtrace(debug.scheddetail > 0)

0 commit comments

Comments
 (0)