Skip to content

Commit 33dfd35

Browse files
committed
runtime: remove old page allocator
This change removes the old page allocator from the runtime. Updates #35112. Change-Id: Ib20e1c030f869b6318cd6f4288a9befdbae1b771 Reviewed-on: https://go-review.googlesource.com/c/go/+/195700 Run-TryBot: Michael Knyszek <[email protected]> TryBot-Result: Gobot Gobot <[email protected]> Reviewed-by: Austin Clements <[email protected]>
1 parent e6135c2 commit 33dfd35

File tree

8 files changed

+27
-1606
lines changed

8 files changed

+27
-1606
lines changed

src/runtime/export_test.go

Lines changed: 3 additions & 175 deletions
Original file line numberDiff line numberDiff line change
@@ -12,8 +12,6 @@ import (
1212
"unsafe"
1313
)
1414

15-
const OldPageAllocator = oldPageAllocator
16-
1715
var Fadd64 = fadd64
1816
var Fsub64 = fsub64
1917
var Fmul64 = fmul64
@@ -356,15 +354,9 @@ func ReadMemStatsSlow() (base, slow MemStats) {
356354
slow.BySize[i].Frees = bySize[i].Frees
357355
}
358356

359-
if oldPageAllocator {
360-
for i := mheap_.free.start(0, 0); i.valid(); i = i.next() {
361-
slow.HeapReleased += uint64(i.span().released())
362-
}
363-
} else {
364-
for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
365-
pg := mheap_.pages.chunks[i].scavenged.popcntRange(0, pallocChunkPages)
366-
slow.HeapReleased += uint64(pg) * pageSize
367-
}
357+
for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
358+
pg := mheap_.pages.chunks[i].scavenged.popcntRange(0, pallocChunkPages)
359+
slow.HeapReleased += uint64(pg) * pageSize
368360
}
369361

370362
// Unused space in the current arena also counts as released space.
@@ -543,170 +535,6 @@ func MapTombstoneCheck(m map[int]int) {
543535
}
544536
}
545537

546-
// UnscavHugePagesSlow returns the value of mheap_.freeHugePages
547-
// and the number of unscavenged huge pages calculated by
548-
// scanning the heap.
549-
func UnscavHugePagesSlow() (uintptr, uintptr) {
550-
var base, slow uintptr
551-
// Run on the system stack to avoid deadlock from stack growth
552-
// trying to acquire the heap lock.
553-
systemstack(func() {
554-
lock(&mheap_.lock)
555-
base = mheap_.free.unscavHugePages
556-
for _, s := range mheap_.allspans {
557-
if s.state.get() == mSpanFree && !s.scavenged {
558-
slow += s.hugePages()
559-
}
560-
}
561-
unlock(&mheap_.lock)
562-
})
563-
return base, slow
564-
}
565-
566-
// Span is a safe wrapper around an mspan, whose memory
567-
// is managed manually.
568-
type Span struct {
569-
*mspan
570-
}
571-
572-
func AllocSpan(base, npages uintptr, scavenged bool) Span {
573-
var s *mspan
574-
systemstack(func() {
575-
lock(&mheap_.lock)
576-
s = (*mspan)(mheap_.spanalloc.alloc())
577-
unlock(&mheap_.lock)
578-
})
579-
s.init(base, npages)
580-
s.scavenged = scavenged
581-
return Span{s}
582-
}
583-
584-
func (s *Span) Free() {
585-
systemstack(func() {
586-
lock(&mheap_.lock)
587-
mheap_.spanalloc.free(unsafe.Pointer(s.mspan))
588-
unlock(&mheap_.lock)
589-
})
590-
s.mspan = nil
591-
}
592-
593-
func (s Span) Base() uintptr {
594-
return s.mspan.base()
595-
}
596-
597-
func (s Span) Pages() uintptr {
598-
return s.mspan.npages
599-
}
600-
601-
type TreapIterType treapIterType
602-
603-
const (
604-
TreapIterScav TreapIterType = TreapIterType(treapIterScav)
605-
TreapIterHuge = TreapIterType(treapIterHuge)
606-
TreapIterBits = treapIterBits
607-
)
608-
609-
type TreapIterFilter treapIterFilter
610-
611-
func TreapFilter(mask, match TreapIterType) TreapIterFilter {
612-
return TreapIterFilter(treapFilter(treapIterType(mask), treapIterType(match)))
613-
}
614-
615-
func (s Span) MatchesIter(mask, match TreapIterType) bool {
616-
return treapFilter(treapIterType(mask), treapIterType(match)).matches(s.treapFilter())
617-
}
618-
619-
type TreapIter struct {
620-
treapIter
621-
}
622-
623-
func (t TreapIter) Span() Span {
624-
return Span{t.span()}
625-
}
626-
627-
func (t TreapIter) Valid() bool {
628-
return t.valid()
629-
}
630-
631-
func (t TreapIter) Next() TreapIter {
632-
return TreapIter{t.next()}
633-
}
634-
635-
func (t TreapIter) Prev() TreapIter {
636-
return TreapIter{t.prev()}
637-
}
638-
639-
// Treap is a safe wrapper around mTreap for testing.
640-
//
641-
// It must never be heap-allocated because mTreap is
642-
// notinheap.
643-
//
644-
//go:notinheap
645-
type Treap struct {
646-
mTreap
647-
}
648-
649-
func (t *Treap) Start(mask, match TreapIterType) TreapIter {
650-
return TreapIter{t.start(treapIterType(mask), treapIterType(match))}
651-
}
652-
653-
func (t *Treap) End(mask, match TreapIterType) TreapIter {
654-
return TreapIter{t.end(treapIterType(mask), treapIterType(match))}
655-
}
656-
657-
func (t *Treap) Insert(s Span) {
658-
// mTreap uses a fixalloc in mheap_ for treapNode
659-
// allocation which requires the mheap_ lock to manipulate.
660-
// Locking here is safe because the treap itself never allocs
661-
// or otherwise ends up grabbing this lock.
662-
systemstack(func() {
663-
lock(&mheap_.lock)
664-
t.insert(s.mspan)
665-
unlock(&mheap_.lock)
666-
})
667-
t.CheckInvariants()
668-
}
669-
670-
func (t *Treap) Find(npages uintptr) TreapIter {
671-
return TreapIter{t.find(npages)}
672-
}
673-
674-
func (t *Treap) Erase(i TreapIter) {
675-
// mTreap uses a fixalloc in mheap_ for treapNode
676-
// freeing which requires the mheap_ lock to manipulate.
677-
// Locking here is safe because the treap itself never allocs
678-
// or otherwise ends up grabbing this lock.
679-
systemstack(func() {
680-
lock(&mheap_.lock)
681-
t.erase(i.treapIter)
682-
unlock(&mheap_.lock)
683-
})
684-
t.CheckInvariants()
685-
}
686-
687-
func (t *Treap) RemoveSpan(s Span) {
688-
// See Erase about locking.
689-
systemstack(func() {
690-
lock(&mheap_.lock)
691-
t.removeSpan(s.mspan)
692-
unlock(&mheap_.lock)
693-
})
694-
t.CheckInvariants()
695-
}
696-
697-
func (t *Treap) Size() int {
698-
i := 0
699-
t.mTreap.treap.walkTreap(func(t *treapNode) {
700-
i++
701-
})
702-
return i
703-
}
704-
705-
func (t *Treap) CheckInvariants() {
706-
t.mTreap.treap.walkTreap(checkTreapNode)
707-
t.mTreap.treap.validateInvariants()
708-
}
709-
710538
func RunGetgThreadSwitchTest() {
711539
// Test that getg works correctly with thread switch.
712540
// With gccgo, if we generate getg inlined, the backend

src/runtime/gc_test.go

Lines changed: 0 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -464,29 +464,6 @@ func TestReadMemStats(t *testing.T) {
464464
}
465465
}
466466

467-
func TestUnscavHugePages(t *testing.T) {
468-
if !runtime.OldPageAllocator {
469-
// This test is only relevant for the old page allocator.
470-
return
471-
}
472-
// Allocate 20 MiB and immediately free it a few times to increase
473-
// the chance that unscavHugePages isn't zero and that some kind of
474-
// accounting had to happen in the runtime.
475-
for j := 0; j < 3; j++ {
476-
var large [][]byte
477-
for i := 0; i < 5; i++ {
478-
large = append(large, make([]byte, runtime.PhysHugePageSize))
479-
}
480-
runtime.KeepAlive(large)
481-
runtime.GC()
482-
}
483-
base, slow := runtime.UnscavHugePagesSlow()
484-
if base != slow {
485-
logDiff(t, "unscavHugePages", reflect.ValueOf(base), reflect.ValueOf(slow))
486-
t.Fatal("unscavHugePages mismatch")
487-
}
488-
}
489-
490467
func logDiff(t *testing.T, prefix string, got, want reflect.Value) {
491468
typ := got.Type()
492469
switch typ.Kind() {

src/runtime/malloc.go

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -317,9 +317,6 @@ const (
317317
//
318318
// This should agree with minZeroPage in the compiler.
319319
minLegalPointer uintptr = 4096
320-
321-
// Whether to use the old page allocator or not.
322-
oldPageAllocator = false
323320
)
324321

325322
// physPageSize is the size in bytes of the OS's physical pages.

src/runtime/malloc_test.go

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -177,10 +177,6 @@ func TestPhysicalMemoryUtilization(t *testing.T) {
177177
}
178178

179179
func TestScavengedBitsCleared(t *testing.T) {
180-
if OldPageAllocator {
181-
// This test is only relevant for the new page allocator.
182-
return
183-
}
184180
var mismatches [128]BitsMismatch
185181
if n, ok := CheckScavengedBitsCleared(mismatches[:]); !ok {
186182
t.Errorf("uncleared scavenged bits")

0 commit comments

Comments
 (0)