Skip to content

Commit dfb7073

Browse files
rhyshgopherbot
authored andcommitted
runtime: use semaphore structure for futex locks
Prepare the futex-based implementation of lock2 to maintain a list of waiting Ms. Beyond storing an muintptr in the mutex's key field, we now must never overwrite that field (even for a moment) without taking its current value into account. The semaphore-based implementation of lock2 already has that behavior. Reuse that structure. For #66999 Change-Id: I23b6f6bacb276fe33c6aed5c0571161a7e71fe6c Reviewed-on: https://go-review.googlesource.com/c/go/+/585636 Reviewed-by: Dmitri Shuralyov <[email protected]> Auto-Submit: Rhys Hiltner <[email protected]> Reviewed-by: Michael Pratt <[email protected]> LUCI-TryBot-Result: Go LUCI <[email protected]>
1 parent be0b569 commit dfb7073

File tree

1 file changed

+52
-51
lines changed

1 file changed

+52
-51
lines changed

src/runtime/lock_futex.go

Lines changed: 52 additions & 51 deletions
Original file line numberDiff line numberDiff line change
@@ -23,19 +23,20 @@ import (
2323
// If any procs are sleeping on addr, wake up at most cnt.
2424

2525
const (
26-
mutex_unlocked = 0
27-
mutex_locked = 1
28-
mutex_sleeping = 2
26+
mutex_locked = 0x1
27+
mutex_sleeping = 0x2 // Ensure futex's low 32 bits won't be all zeros
2928

3029
active_spin = 4
3130
active_spin_cnt = 30
3231
passive_spin = 1
3332
)
3433

35-
// Possible lock states are mutex_unlocked, mutex_locked and mutex_sleeping.
36-
// mutex_sleeping means that there is presumably at least one sleeping thread.
37-
// Note that there can be spinning threads during all states - they do not
38-
// affect mutex's state.
34+
// The mutex.key holds two state flags in its lowest bits: When the mutex_locked
35+
// bit is set, the mutex is locked. When the mutex_sleeping bit is set, a thread
36+
// is waiting in futexsleep for the mutex to be available. These flags operate
37+
// independently: a thread can enter lock2, observe that another thread is
38+
// already asleep, and immediately try to grab the lock anyway without waiting
39+
// for its "fair" turn.
3940

4041
// We use the uintptr mutex.key and note.key as a uint32.
4142
//
@@ -54,27 +55,16 @@ func lock(l *mutex) {
5455

5556
func lock2(l *mutex) {
5657
gp := getg()
57-
5858
if gp.m.locks < 0 {
5959
throw("runtime·lock: lock count")
6060
}
6161
gp.m.locks++
6262

6363
// Speculative grab for lock.
64-
v := atomic.Xchg(key32(&l.key), mutex_locked)
65-
if v == mutex_unlocked {
64+
if atomic.Casuintptr(&l.key, 0, mutex_locked) {
6665
return
6766
}
6867

69-
// wait is either MUTEX_LOCKED or MUTEX_SLEEPING
70-
// depending on whether there is a thread sleeping
71-
// on this mutex. If we ever change l->key from
72-
// MUTEX_SLEEPING to some other value, we must be
73-
// careful to change it back to MUTEX_SLEEPING before
74-
// returning, to ensure that the sleeping thread gets
75-
// its wakeup call.
76-
wait := v
77-
7868
timer := &lockTimer{lock: l}
7969
timer.begin()
8070
// On uniprocessors, no point spinning.
@@ -83,37 +73,39 @@ func lock2(l *mutex) {
8373
if ncpu > 1 {
8474
spin = active_spin
8575
}
86-
for {
87-
// Try for lock, spinning.
88-
for i := 0; i < spin; i++ {
89-
for l.key == mutex_unlocked {
90-
if atomic.Cas(key32(&l.key), mutex_unlocked, wait) {
91-
timer.end()
92-
return
93-
}
76+
Loop:
77+
for i := 0; ; i++ {
78+
v := atomic.Loaduintptr(&l.key)
79+
if v&mutex_locked == 0 {
80+
// Unlocked. Try to lock.
81+
if atomic.Casuintptr(&l.key, v, v|mutex_locked) {
82+
timer.end()
83+
return
9484
}
95-
procyield(active_spin_cnt)
85+
i = 0
9686
}
97-
98-
// Try for lock, rescheduling.
99-
for i := 0; i < passive_spin; i++ {
100-
for l.key == mutex_unlocked {
101-
if atomic.Cas(key32(&l.key), mutex_unlocked, wait) {
102-
timer.end()
103-
return
87+
if i < spin {
88+
procyield(active_spin_cnt)
89+
} else if i < spin+passive_spin {
90+
osyield()
91+
} else {
92+
// Someone else has it.
93+
for {
94+
head := v &^ (mutex_locked | mutex_sleeping)
95+
if atomic.Casuintptr(&l.key, v, head|mutex_locked|mutex_sleeping) {
96+
break
97+
}
98+
v = atomic.Loaduintptr(&l.key)
99+
if v&mutex_locked == 0 {
100+
continue Loop
104101
}
105102
}
106-
osyield()
107-
}
108-
109-
// Sleep.
110-
v = atomic.Xchg(key32(&l.key), mutex_sleeping)
111-
if v == mutex_unlocked {
112-
timer.end()
113-
return
103+
if v&mutex_locked != 0 {
104+
// Queued. Wait.
105+
futexsleep(key32(&l.key), uint32(v), -1)
106+
i = 0
107+
}
114108
}
115-
wait = mutex_sleeping
116-
futexsleep(key32(&l.key), mutex_sleeping, -1)
117109
}
118110
}
119111

@@ -122,12 +114,21 @@ func unlock(l *mutex) {
122114
}
123115

124116
func unlock2(l *mutex) {
125-
v := atomic.Xchg(key32(&l.key), mutex_unlocked)
126-
if v == mutex_unlocked {
127-
throw("unlock of unlocked lock")
128-
}
129-
if v == mutex_sleeping {
130-
futexwakeup(key32(&l.key), 1)
117+
for {
118+
v := atomic.Loaduintptr(&l.key)
119+
if v == mutex_locked {
120+
if atomic.Casuintptr(&l.key, mutex_locked, 0) {
121+
break
122+
}
123+
} else if v&mutex_locked == 0 {
124+
throw("unlock of unlocked lock")
125+
} else {
126+
// Other M's are waiting for the lock.
127+
if atomic.Casuintptr(&l.key, v, v&^mutex_locked) {
128+
futexwakeup(key32(&l.key), 1)
129+
break
130+
}
131+
}
131132
}
132133

133134
gp := getg()

0 commit comments

Comments
 (0)