@@ -973,6 +973,84 @@ func (h *mheap) allocNeedsZero(base, npage uintptr) (needZero bool) {
973
973
return
974
974
}
975
975
976
+ // tryAllocMSpan attempts to allocate an mspan object from
977
+ // the P-local cache, but may fail.
978
+ //
979
+ // h need not be locked.
980
+ //
981
+ // This caller must ensure that its P won't change underneath
982
+ // it during this function. Currently to ensure that we enforce
983
+ // that the function is run on the system stack, because that's
984
+ // the only place it is used now. In the future, this requirement
985
+ // may be relaxed if its use is necessary elsewhere.
986
+ //
987
+ //go:systemstack
988
+ func (h * mheap ) tryAllocMSpan () * mspan {
989
+ pp := getg ().m .p .ptr ()
990
+ // If we don't have a p or the cache is empty, we can't do
991
+ // anything here.
992
+ if pp == nil || pp .mspancache .len == 0 {
993
+ return nil
994
+ }
995
+ // Pull off the last entry in the cache.
996
+ s := pp .mspancache .buf [pp .mspancache .len - 1 ]
997
+ pp .mspancache .len --
998
+ return s
999
+ }
1000
+
1001
+ // allocMSpanLocked allocates an mspan object.
1002
+ //
1003
+ // h must be locked.
1004
+ //
1005
+ // allocMSpanLocked must be called on the system stack because
1006
+ // its caller holds the heap lock. See mheap for details.
1007
+ // Running on the system stack also ensures that we won't
1008
+ // switch Ps during this function. See tryAllocMSpan for details.
1009
+ //
1010
+ //go:systemstack
1011
+ func (h * mheap ) allocMSpanLocked () * mspan {
1012
+ pp := getg ().m .p .ptr ()
1013
+ if pp == nil {
1014
+ // We don't have a p so just do the normal thing.
1015
+ return (* mspan )(h .spanalloc .alloc ())
1016
+ }
1017
+ // Refill the cache if necessary.
1018
+ if pp .mspancache .len == 0 {
1019
+ const refillCount = len (pp .mspancache .buf ) / 2
1020
+ for i := 0 ; i < refillCount ; i ++ {
1021
+ pp .mspancache .buf [i ] = (* mspan )(h .spanalloc .alloc ())
1022
+ }
1023
+ pp .mspancache .len = refillCount
1024
+ }
1025
+ // Pull off the last entry in the cache.
1026
+ s := pp .mspancache .buf [pp .mspancache .len - 1 ]
1027
+ pp .mspancache .len --
1028
+ return s
1029
+ }
1030
+
1031
+ // freeMSpanLocked free an mspan object.
1032
+ //
1033
+ // h must be locked.
1034
+ //
1035
+ // freeMSpanLocked must be called on the system stack because
1036
+ // its caller holds the heap lock. See mheap for details.
1037
+ // Running on the system stack also ensures that we won't
1038
+ // switch Ps during this function. See tryAllocMSpan for details.
1039
+ //
1040
+ //go:systemstack
1041
+ func (h * mheap ) freeMSpanLocked (s * mspan ) {
1042
+ pp := getg ().m .p .ptr ()
1043
+ // First try to free the mspan directly to the cache.
1044
+ if pp != nil && pp .mspancache .len < len (pp .mspancache .buf ) {
1045
+ pp .mspancache .buf [pp .mspancache .len ] = s
1046
+ pp .mspancache .len ++
1047
+ return
1048
+ }
1049
+ // Failing that (or if we don't have a p), just free it to
1050
+ // the heap.
1051
+ h .spanalloc .free (unsafe .Pointer (s ))
1052
+ }
1053
+
976
1054
// allocSpan allocates an mspan which owns npages worth of memory.
977
1055
//
978
1056
// If manual == false, allocSpan allocates a heap span of class spanclass
@@ -995,6 +1073,9 @@ func (h *mheap) allocSpan(npages uintptr, manual bool, spanclass spanClass, sysS
995
1073
gp := getg ()
996
1074
base , scav := uintptr (0 ), uintptr (0 )
997
1075
1076
+ // Try to allocate a cached span.
1077
+ s = h .tryAllocMSpan ()
1078
+
998
1079
// We failed to do what we need to do without the lock.
999
1080
lock (& h .lock )
1000
1081
@@ -1014,6 +1095,11 @@ func (h *mheap) allocSpan(npages uintptr, manual bool, spanclass spanClass, sysS
1014
1095
throw ("grew heap, but no adequate free space found" )
1015
1096
1016
1097
HaveBase:
1098
+ if s == nil {
1099
+ // We failed to get an mspan earlier, so grab
1100
+ // one now that we have the heap lock.
1101
+ s = h .allocMSpanLocked ()
1102
+ }
1017
1103
if ! manual {
1018
1104
// This is a heap span, so we should do some additional accounting
1019
1105
// which may only be done with the heap locked.
@@ -1036,9 +1122,6 @@ HaveBase:
1036
1122
gcController .revise ()
1037
1123
}
1038
1124
}
1039
-
1040
- // Allocate an mspan object before releasing the lock.
1041
- s = (* mspan )(h .spanalloc .alloc ())
1042
1125
unlock (& h .lock )
1043
1126
1044
1127
// Initialize the span.
@@ -1294,7 +1377,7 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool) {
1294
1377
1295
1378
// Free the span structure. We no longer have a use for it.
1296
1379
s .state .set (mSpanDead )
1297
- h .spanalloc . free ( unsafe . Pointer ( s ) )
1380
+ h .freeMSpanLocked ( s )
1298
1381
}
1299
1382
1300
1383
// scavengeAll visits each node in the free treap and scavenges the
0 commit comments