Skip to content

Commit 3f4be12

Browse files
rhyshgopherbot
authored andcommitted
Revert "runtime: use semaphore structure for futex locks"
This reverts commit dfb7073 (CL 585636). Reason for revert: This is part of a patch series that changed the handling of contended lock2/unlock2 calls, reducing the maximum throughput of contended runtime.mutex values, and causing a performance regression on applications where that is (or became) the bottleneck. Updates #66999 Updates #67585 Change-Id: I3483bf0b85ba0b77204032a68b7cbe93f142703e Reviewed-on: https://go-review.googlesource.com/c/go/+/589098 LUCI-TryBot-Result: Go LUCI <[email protected]> Reviewed-by: Than McIntosh <[email protected]> Reviewed-by: Michael Pratt <[email protected]> Auto-Submit: Rhys Hiltner <[email protected]>
1 parent afbbc28 commit 3f4be12

File tree

1 file changed

+51
-52
lines changed

1 file changed

+51
-52
lines changed

src/runtime/lock_futex.go

Lines changed: 51 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -23,20 +23,19 @@ import (
2323
// If any procs are sleeping on addr, wake up at most cnt.
2424

2525
const (
26-
mutex_locked = 0x1
27-
mutex_sleeping = 0x2 // Ensure futex's low 32 bits won't be all zeros
26+
mutex_unlocked = 0
27+
mutex_locked = 1
28+
mutex_sleeping = 2
2829

2930
active_spin = 4
3031
active_spin_cnt = 30
3132
passive_spin = 1
3233
)
3334

34-
// The mutex.key holds two state flags in its lowest bits: When the mutex_locked
35-
// bit is set, the mutex is locked. When the mutex_sleeping bit is set, a thread
36-
// is waiting in futexsleep for the mutex to be available. These flags operate
37-
// independently: a thread can enter lock2, observe that another thread is
38-
// already asleep, and immediately try to grab the lock anyway without waiting
39-
// for its "fair" turn.
35+
// Possible lock states are mutex_unlocked, mutex_locked and mutex_sleeping.
36+
// mutex_sleeping means that there is presumably at least one sleeping thread.
37+
// Note that there can be spinning threads during all states - they do not
38+
// affect mutex's state.
4039

4140
// We use the uintptr mutex.key and note.key as a uint32.
4241
//
@@ -55,16 +54,27 @@ func lock(l *mutex) {
5554

5655
func lock2(l *mutex) {
5756
gp := getg()
57+
5858
if gp.m.locks < 0 {
5959
throw("runtime·lock: lock count")
6060
}
6161
gp.m.locks++
6262

6363
// Speculative grab for lock.
64-
if atomic.Casuintptr(&l.key, 0, mutex_locked) {
64+
v := atomic.Xchg(key32(&l.key), mutex_locked)
65+
if v == mutex_unlocked {
6566
return
6667
}
6768

69+
// wait is either MUTEX_LOCKED or MUTEX_SLEEPING
70+
// depending on whether there is a thread sleeping
71+
// on this mutex. If we ever change l->key from
72+
// MUTEX_SLEEPING to some other value, we must be
73+
// careful to change it back to MUTEX_SLEEPING before
74+
// returning, to ensure that the sleeping thread gets
75+
// its wakeup call.
76+
wait := v
77+
6878
timer := &lockTimer{lock: l}
6979
timer.begin()
7080
// On uniprocessors, no point spinning.
@@ -73,39 +83,37 @@ func lock2(l *mutex) {
7383
if ncpu > 1 {
7484
spin = active_spin
7585
}
76-
Loop:
77-
for i := 0; ; i++ {
78-
v := atomic.Loaduintptr(&l.key)
79-
if v&mutex_locked == 0 {
80-
// Unlocked. Try to lock.
81-
if atomic.Casuintptr(&l.key, v, v|mutex_locked) {
82-
timer.end()
83-
return
86+
for {
87+
// Try for lock, spinning.
88+
for i := 0; i < spin; i++ {
89+
for l.key == mutex_unlocked {
90+
if atomic.Cas(key32(&l.key), mutex_unlocked, wait) {
91+
timer.end()
92+
return
93+
}
8494
}
85-
i = 0
86-
}
87-
if i < spin {
8895
procyield(active_spin_cnt)
89-
} else if i < spin+passive_spin {
90-
osyield()
91-
} else {
92-
// Someone else has it.
93-
for {
94-
head := v &^ (mutex_locked | mutex_sleeping)
95-
if atomic.Casuintptr(&l.key, v, head|mutex_locked|mutex_sleeping) {
96-
break
97-
}
98-
v = atomic.Loaduintptr(&l.key)
99-
if v&mutex_locked == 0 {
100-
continue Loop
96+
}
97+
98+
// Try for lock, rescheduling.
99+
for i := 0; i < passive_spin; i++ {
100+
for l.key == mutex_unlocked {
101+
if atomic.Cas(key32(&l.key), mutex_unlocked, wait) {
102+
timer.end()
103+
return
101104
}
102105
}
103-
if v&mutex_locked != 0 {
104-
// Queued. Wait.
105-
futexsleep(key32(&l.key), uint32(v), -1)
106-
i = 0
107-
}
106+
osyield()
108107
}
108+
109+
// Sleep.
110+
v = atomic.Xchg(key32(&l.key), mutex_sleeping)
111+
if v == mutex_unlocked {
112+
timer.end()
113+
return
114+
}
115+
wait = mutex_sleeping
116+
futexsleep(key32(&l.key), mutex_sleeping, -1)
109117
}
110118
}
111119

@@ -114,21 +122,12 @@ func unlock(l *mutex) {
114122
}
115123

116124
func unlock2(l *mutex) {
117-
for {
118-
v := atomic.Loaduintptr(&l.key)
119-
if v == mutex_locked {
120-
if atomic.Casuintptr(&l.key, mutex_locked, 0) {
121-
break
122-
}
123-
} else if v&mutex_locked == 0 {
124-
throw("unlock of unlocked lock")
125-
} else {
126-
// Other M's are waiting for the lock.
127-
if atomic.Casuintptr(&l.key, v, v&^mutex_locked) {
128-
futexwakeup(key32(&l.key), 1)
129-
break
130-
}
131-
}
125+
v := atomic.Xchg(key32(&l.key), mutex_unlocked)
126+
if v == mutex_unlocked {
127+
throw("unlock of unlocked lock")
128+
}
129+
if v == mutex_sleeping {
130+
futexwakeup(key32(&l.key), 1)
132131
}
133132

134133
gp := getg()

0 commit comments

Comments
 (0)