@@ -30,12 +30,11 @@ const minPhysPageSize = 4096
30
30
//go:notinheap
31
31
type mheap struct {
32
32
lock mutex
33
- free mTreap // free and non-scavenged spans
34
- scav mTreap // free and scavenged spans
35
- busy mSpanList // busy list of spans
36
- sweepgen uint32 // sweep generation, see comment in mspan
37
- sweepdone uint32 // all spans are swept
38
- sweepers uint32 // number of active sweepone calls
33
+ free mTreap // free and non-scavenged spans
34
+ scav mTreap // free and scavenged spans
35
+ sweepgen uint32 // sweep generation, see comment in mspan
36
+ sweepdone uint32 // all spans are swept
37
+ sweepers uint32 // number of active sweepone calls
39
38
40
39
// allspans is a slice of all mspans ever created. Each mspan
41
40
// appears exactly once.
@@ -676,7 +675,7 @@ func (h *mheap) init() {
676
675
h .spanalloc .zero = false
677
676
678
677
// h->mapcache needs no init
679
- h . busy . init ()
678
+
680
679
for i := range h .central {
681
680
h .central [i ].mcentral .init (spanClass (i ))
682
681
}
@@ -893,8 +892,6 @@ func (h *mheap) alloc_m(npage uintptr, spanclass spanClass, large bool) *mspan {
893
892
mheap_ .largealloc += uint64 (s .elemsize )
894
893
mheap_ .nlargealloc ++
895
894
atomic .Xadd64 (& memstats .heap_live , int64 (npage << _PageShift ))
896
- // Swept spans are at the end of lists.
897
- h .busy .insertBack (s )
898
895
}
899
896
}
900
897
// heap_scan and heap_live were updated.
@@ -1199,9 +1196,6 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i
1199
1196
memstats .heap_idle += uint64 (s .npages << _PageShift )
1200
1197
}
1201
1198
s .state = mSpanFree
1202
- if s .inList () {
1203
- h .busy .remove (s )
1204
- }
1205
1199
1206
1200
// Stamp newly unused spans. The scavenger will use that
1207
1201
// info to potentially give back some pages to the OS.
0 commit comments