@@ -12,8 +12,6 @@ import (
12
12
"unsafe"
13
13
)
14
14
15
- const OldPageAllocator = oldPageAllocator
16
-
17
15
var Fadd64 = fadd64
18
16
var Fsub64 = fsub64
19
17
var Fmul64 = fmul64
@@ -356,15 +354,9 @@ func ReadMemStatsSlow() (base, slow MemStats) {
356
354
slow .BySize [i ].Frees = bySize [i ].Frees
357
355
}
358
356
359
- if oldPageAllocator {
360
- for i := mheap_ .free .start (0 , 0 ); i .valid (); i = i .next () {
361
- slow .HeapReleased += uint64 (i .span ().released ())
362
- }
363
- } else {
364
- for i := mheap_ .pages .start ; i < mheap_ .pages .end ; i ++ {
365
- pg := mheap_ .pages .chunks [i ].scavenged .popcntRange (0 , pallocChunkPages )
366
- slow .HeapReleased += uint64 (pg ) * pageSize
367
- }
357
+ for i := mheap_ .pages .start ; i < mheap_ .pages .end ; i ++ {
358
+ pg := mheap_ .pages .chunks [i ].scavenged .popcntRange (0 , pallocChunkPages )
359
+ slow .HeapReleased += uint64 (pg ) * pageSize
368
360
}
369
361
370
362
// Unused space in the current arena also counts as released space.
@@ -543,170 +535,6 @@ func MapTombstoneCheck(m map[int]int) {
543
535
}
544
536
}
545
537
546
- // UnscavHugePagesSlow returns the value of mheap_.freeHugePages
547
- // and the number of unscavenged huge pages calculated by
548
- // scanning the heap.
549
- func UnscavHugePagesSlow () (uintptr , uintptr ) {
550
- var base , slow uintptr
551
- // Run on the system stack to avoid deadlock from stack growth
552
- // trying to acquire the heap lock.
553
- systemstack (func () {
554
- lock (& mheap_ .lock )
555
- base = mheap_ .free .unscavHugePages
556
- for _ , s := range mheap_ .allspans {
557
- if s .state .get () == mSpanFree && ! s .scavenged {
558
- slow += s .hugePages ()
559
- }
560
- }
561
- unlock (& mheap_ .lock )
562
- })
563
- return base , slow
564
- }
565
-
566
- // Span is a safe wrapper around an mspan, whose memory
567
- // is managed manually.
568
- type Span struct {
569
- * mspan
570
- }
571
-
572
- func AllocSpan (base , npages uintptr , scavenged bool ) Span {
573
- var s * mspan
574
- systemstack (func () {
575
- lock (& mheap_ .lock )
576
- s = (* mspan )(mheap_ .spanalloc .alloc ())
577
- unlock (& mheap_ .lock )
578
- })
579
- s .init (base , npages )
580
- s .scavenged = scavenged
581
- return Span {s }
582
- }
583
-
584
- func (s * Span ) Free () {
585
- systemstack (func () {
586
- lock (& mheap_ .lock )
587
- mheap_ .spanalloc .free (unsafe .Pointer (s .mspan ))
588
- unlock (& mheap_ .lock )
589
- })
590
- s .mspan = nil
591
- }
592
-
593
- func (s Span ) Base () uintptr {
594
- return s .mspan .base ()
595
- }
596
-
597
- func (s Span ) Pages () uintptr {
598
- return s .mspan .npages
599
- }
600
-
601
- type TreapIterType treapIterType
602
-
603
- const (
604
- TreapIterScav TreapIterType = TreapIterType (treapIterScav )
605
- TreapIterHuge = TreapIterType (treapIterHuge )
606
- TreapIterBits = treapIterBits
607
- )
608
-
609
- type TreapIterFilter treapIterFilter
610
-
611
- func TreapFilter (mask , match TreapIterType ) TreapIterFilter {
612
- return TreapIterFilter (treapFilter (treapIterType (mask ), treapIterType (match )))
613
- }
614
-
615
- func (s Span ) MatchesIter (mask , match TreapIterType ) bool {
616
- return treapFilter (treapIterType (mask ), treapIterType (match )).matches (s .treapFilter ())
617
- }
618
-
619
- type TreapIter struct {
620
- treapIter
621
- }
622
-
623
- func (t TreapIter ) Span () Span {
624
- return Span {t .span ()}
625
- }
626
-
627
- func (t TreapIter ) Valid () bool {
628
- return t .valid ()
629
- }
630
-
631
- func (t TreapIter ) Next () TreapIter {
632
- return TreapIter {t .next ()}
633
- }
634
-
635
- func (t TreapIter ) Prev () TreapIter {
636
- return TreapIter {t .prev ()}
637
- }
638
-
639
- // Treap is a safe wrapper around mTreap for testing.
640
- //
641
- // It must never be heap-allocated because mTreap is
642
- // notinheap.
643
- //
644
- //go:notinheap
645
- type Treap struct {
646
- mTreap
647
- }
648
-
649
- func (t * Treap ) Start (mask , match TreapIterType ) TreapIter {
650
- return TreapIter {t .start (treapIterType (mask ), treapIterType (match ))}
651
- }
652
-
653
- func (t * Treap ) End (mask , match TreapIterType ) TreapIter {
654
- return TreapIter {t .end (treapIterType (mask ), treapIterType (match ))}
655
- }
656
-
657
- func (t * Treap ) Insert (s Span ) {
658
- // mTreap uses a fixalloc in mheap_ for treapNode
659
- // allocation which requires the mheap_ lock to manipulate.
660
- // Locking here is safe because the treap itself never allocs
661
- // or otherwise ends up grabbing this lock.
662
- systemstack (func () {
663
- lock (& mheap_ .lock )
664
- t .insert (s .mspan )
665
- unlock (& mheap_ .lock )
666
- })
667
- t .CheckInvariants ()
668
- }
669
-
670
- func (t * Treap ) Find (npages uintptr ) TreapIter {
671
- return TreapIter {t .find (npages )}
672
- }
673
-
674
- func (t * Treap ) Erase (i TreapIter ) {
675
- // mTreap uses a fixalloc in mheap_ for treapNode
676
- // freeing which requires the mheap_ lock to manipulate.
677
- // Locking here is safe because the treap itself never allocs
678
- // or otherwise ends up grabbing this lock.
679
- systemstack (func () {
680
- lock (& mheap_ .lock )
681
- t .erase (i .treapIter )
682
- unlock (& mheap_ .lock )
683
- })
684
- t .CheckInvariants ()
685
- }
686
-
687
- func (t * Treap ) RemoveSpan (s Span ) {
688
- // See Erase about locking.
689
- systemstack (func () {
690
- lock (& mheap_ .lock )
691
- t .removeSpan (s .mspan )
692
- unlock (& mheap_ .lock )
693
- })
694
- t .CheckInvariants ()
695
- }
696
-
697
- func (t * Treap ) Size () int {
698
- i := 0
699
- t .mTreap .treap .walkTreap (func (t * treapNode ) {
700
- i ++
701
- })
702
- return i
703
- }
704
-
705
- func (t * Treap ) CheckInvariants () {
706
- t .mTreap .treap .walkTreap (checkTreapNode )
707
- t .mTreap .treap .validateInvariants ()
708
- }
709
-
710
538
func RunGetgThreadSwitchTest () {
711
539
// Test that getg works correctly with thread switch.
712
540
// With gccgo, if we generate getg inlined, the backend
0 commit comments