@@ -159,9 +159,8 @@ func lock2(l *mutex) {
159
159
160
160
k8 := key8 (& l .key )
161
161
162
- var v8 uint8
163
162
// Speculative grab for lock.
164
- v8 = atomic .Xchg8 (k8 , mutexLocked )
163
+ v8 : = atomic .Xchg8 (k8 , mutexLocked )
165
164
if v8 & mutexLocked == 0 {
166
165
if v8 & mutexSleeping != 0 {
167
166
atomic .Or8 (k8 , mutexSleeping )
@@ -183,11 +182,13 @@ func lock2(l *mutex) {
183
182
v := atomic .Loaduintptr (& l .key )
184
183
tryAcquire:
185
184
for i := 0 ; ; i ++ {
186
- for v & mutexLocked == 0 {
185
+ if v & mutexLocked == 0 {
187
186
if weSpin {
188
- next := (v &^ mutexMMask ) | (v & (mutexMMask &^ mutexSpinning )) | mutexLocked
189
- if next &^mutexMMask != 0 {
190
- next |= mutexSleeping
187
+ next := (v &^ mutexSpinning ) | mutexSleeping | mutexLocked
188
+ if next &^mutexMMask == 0 {
189
+ // The fast-path Xchg8 may have cleared mutexSleeping. Fix
190
+ // the hint so unlock2 knows when to use its slow path.
191
+ next = next &^ mutexSleeping
191
192
}
192
193
if atomic .Casuintptr (& l .key , v , next ) {
193
194
timer .end ()
@@ -201,6 +202,7 @@ tryAcquire:
201
202
}
202
203
}
203
204
v = atomic .Loaduintptr (& l .key )
205
+ continue tryAcquire
204
206
}
205
207
206
208
if ! weSpin && v & mutexSpinning == 0 && atomic .Casuintptr (& l .key , v , v | mutexSpinning ) {
@@ -214,35 +216,36 @@ tryAcquire:
214
216
v = atomic .Loaduintptr (& l .key )
215
217
continue tryAcquire
216
218
} else if i < spin + mutexPassiveSpinCount {
217
- osyield () // TODO: Consider removing this step. See https://go.dev/issue/69268
219
+ osyield () // TODO: Consider removing this step. See https://go.dev/issue/69268.
218
220
v = atomic .Loaduintptr (& l .key )
219
221
continue tryAcquire
220
222
}
221
223
}
222
224
223
225
// Go to sleep
224
- for v & mutexLocked != 0 {
225
- // Store the current head of the list of sleeping Ms in our gp.m.mWaitList.next field
226
- gp .m .mWaitList .next = mutexWaitListHead (v )
227
-
228
- // Pack a (partial) pointer to this M with the current lock state bits
229
- next := (uintptr (unsafe .Pointer (gp .m )) &^ mutexMMask ) | v & mutexMMask | mutexSleeping
230
- if weSpin { // If we were spinning, prepare to retire
231
- next = next &^ mutexSpinning
232
- }
226
+ if v & mutexLocked == 0 {
227
+ throw ("runtime·lock: sleeping while lock is available" )
228
+ }
233
229
234
- if atomic .Casuintptr (& l .key , v , next ) {
235
- weSpin = false
236
- // We've pushed ourselves onto the stack of waiters. Wait.
237
- semasleep (- 1 )
238
- atTail = gp .m .mWaitList .next == 0 // we were at risk of starving
239
- gp .m .mWaitList .next = 0
240
- i = 0
241
- v = atomic .Loaduintptr (& l .key )
242
- continue tryAcquire
243
- }
244
- v = atomic .Loaduintptr (& l .key )
230
+ // Store the current head of the list of sleeping Ms in our gp.m.mWaitList.next field
231
+ gp .m .mWaitList .next = mutexWaitListHead (v )
232
+
233
+ // Pack a (partial) pointer to this M with the current lock state bits
234
+ next := (uintptr (unsafe .Pointer (gp .m )) &^ mutexMMask ) | v & mutexMMask | mutexSleeping
235
+ if weSpin { // If we were spinning, prepare to retire
236
+ next = next &^ mutexSpinning
237
+ }
238
+
239
+ if atomic .Casuintptr (& l .key , v , next ) {
240
+ weSpin = false
241
+ // We've pushed ourselves onto the stack of waiters. Wait.
242
+ semasleep (- 1 )
243
+ atTail = gp .m .mWaitList .next == 0 // we were at risk of starving
244
+ i = 0
245
245
}
246
+
247
+ gp .m .mWaitList .next = 0
248
+ v = atomic .Loaduintptr (& l .key )
246
249
}
247
250
}
248
251
0 commit comments