Skip to content

Commit 18c2461

Browse files
rhyshmknyszek
authored andcommitted
runtime: allow futex OSes to use sema-based mutex
Implement sema{create,sleep,wakeup} in terms of the futex syscall when available. Split the lock2/unlock2 implementations out of lock_sema.go and lock_futex.go (which they shared with runtime.note) to allow swapping in new implementations of those. Let futex-based platforms use the semaphore-based mutex implementation. Control that via the new "spinbitmutex" GOEXPERMENT value, disabled by default. This lays the groundwork for a "spinbit" mutex implementation; it does not include the new mutex implementation. For #68578. Change-Id: I091289c85124212a87abec7079ecbd9e610b4270 Reviewed-on: https://go-review.googlesource.com/c/go/+/622996 Reviewed-by: Michael Knyszek <[email protected]> Reviewed-by: Cherry Mui <[email protected]> LUCI-TryBot-Result: Go LUCI <[email protected]>
1 parent 252e9de commit 18c2461

13 files changed

+347
-251
lines changed

src/internal/goexperiment/exp_spinbitmutex_off.go

Lines changed: 8 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

src/internal/goexperiment/exp_spinbitmutex_on.go

Lines changed: 8 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

src/internal/goexperiment/flags.go

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -118,4 +118,8 @@ type Flags struct {
118118

119119
// SwissMap enables the SwissTable-based map implementation.
120120
SwissMap bool
121+
122+
// SpinbitMutex enables the new "spinbit" mutex implementation on supported
123+
// platforms. See https://go.dev/issue/68578.
124+
SpinbitMutex bool
121125
}

src/runtime/lock_futex.go

Lines changed: 30 additions & 123 deletions
Original file line numberDiff line numberDiff line change
@@ -11,136 +11,13 @@ import (
1111
"unsafe"
1212
)
1313

14-
// This implementation depends on OS-specific implementations of
15-
//
16-
// futexsleep(addr *uint32, val uint32, ns int64)
17-
// Atomically,
18-
// if *addr == val { sleep }
19-
// Might be woken up spuriously; that's allowed.
20-
// Don't sleep longer than ns; ns < 0 means forever.
21-
//
22-
// futexwakeup(addr *uint32, cnt uint32)
23-
// If any procs are sleeping on addr, wake up at most cnt.
24-
25-
const (
26-
mutex_unlocked = 0
27-
mutex_locked = 1
28-
mutex_sleeping = 2
29-
30-
active_spin = 4
31-
active_spin_cnt = 30
32-
passive_spin = 1
33-
)
34-
35-
// Possible lock states are mutex_unlocked, mutex_locked and mutex_sleeping.
36-
// mutex_sleeping means that there is presumably at least one sleeping thread.
37-
// Note that there can be spinning threads during all states - they do not
38-
// affect mutex's state.
39-
4014
// We use the uintptr mutex.key and note.key as a uint32.
4115
//
4216
//go:nosplit
4317
func key32(p *uintptr) *uint32 {
4418
return (*uint32)(unsafe.Pointer(p))
4519
}
4620

47-
func mutexContended(l *mutex) bool {
48-
return atomic.Load(key32(&l.key)) > mutex_locked
49-
}
50-
51-
func lock(l *mutex) {
52-
lockWithRank(l, getLockRank(l))
53-
}
54-
55-
func lock2(l *mutex) {
56-
gp := getg()
57-
58-
if gp.m.locks < 0 {
59-
throw("runtime·lock: lock count")
60-
}
61-
gp.m.locks++
62-
63-
// Speculative grab for lock.
64-
v := atomic.Xchg(key32(&l.key), mutex_locked)
65-
if v == mutex_unlocked {
66-
return
67-
}
68-
69-
// wait is either MUTEX_LOCKED or MUTEX_SLEEPING
70-
// depending on whether there is a thread sleeping
71-
// on this mutex. If we ever change l->key from
72-
// MUTEX_SLEEPING to some other value, we must be
73-
// careful to change it back to MUTEX_SLEEPING before
74-
// returning, to ensure that the sleeping thread gets
75-
// its wakeup call.
76-
wait := v
77-
78-
timer := &lockTimer{lock: l}
79-
timer.begin()
80-
// On uniprocessors, no point spinning.
81-
// On multiprocessors, spin for ACTIVE_SPIN attempts.
82-
spin := 0
83-
if ncpu > 1 {
84-
spin = active_spin
85-
}
86-
for {
87-
// Try for lock, spinning.
88-
for i := 0; i < spin; i++ {
89-
for l.key == mutex_unlocked {
90-
if atomic.Cas(key32(&l.key), mutex_unlocked, wait) {
91-
timer.end()
92-
return
93-
}
94-
}
95-
procyield(active_spin_cnt)
96-
}
97-
98-
// Try for lock, rescheduling.
99-
for i := 0; i < passive_spin; i++ {
100-
for l.key == mutex_unlocked {
101-
if atomic.Cas(key32(&l.key), mutex_unlocked, wait) {
102-
timer.end()
103-
return
104-
}
105-
}
106-
osyield()
107-
}
108-
109-
// Sleep.
110-
v = atomic.Xchg(key32(&l.key), mutex_sleeping)
111-
if v == mutex_unlocked {
112-
timer.end()
113-
return
114-
}
115-
wait = mutex_sleeping
116-
futexsleep(key32(&l.key), mutex_sleeping, -1)
117-
}
118-
}
119-
120-
func unlock(l *mutex) {
121-
unlockWithRank(l)
122-
}
123-
124-
func unlock2(l *mutex) {
125-
v := atomic.Xchg(key32(&l.key), mutex_unlocked)
126-
if v == mutex_unlocked {
127-
throw("unlock of unlocked lock")
128-
}
129-
if v == mutex_sleeping {
130-
futexwakeup(key32(&l.key), 1)
131-
}
132-
133-
gp := getg()
134-
gp.m.mLockProfile.recordUnlock(l)
135-
gp.m.locks--
136-
if gp.m.locks < 0 {
137-
throw("runtime·unlock: lock count")
138-
}
139-
if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
140-
gp.stackguard0 = stackPreempt
141-
}
142-
}
143-
14421
// One-time notifications.
14522
func noteclear(n *note) {
14623
n.key = 0
@@ -254,3 +131,33 @@ func beforeIdle(int64, int64) (*g, bool) {
254131
}
255132

256133
func checkTimeouts() {}
134+
135+
//go:nosplit
136+
func semacreate(mp *m) {}
137+
138+
//go:nosplit
139+
func semasleep(ns int64) int32 {
140+
mp := getg().m
141+
142+
for v := atomic.Xadd(&mp.waitsema, -1); ; v = atomic.Load(&mp.waitsema) {
143+
if int32(v) >= 0 {
144+
return 0
145+
}
146+
futexsleep(&mp.waitsema, v, ns)
147+
if ns >= 0 {
148+
if int32(v) >= 0 {
149+
return 0
150+
} else {
151+
return -1
152+
}
153+
}
154+
}
155+
}
156+
157+
//go:nosplit
158+
func semawakeup(mp *m) {
159+
v := atomic.Xadd(&mp.waitsema, 1)
160+
if v == 0 {
161+
futexwakeup(&mp.waitsema, 1)
162+
}
163+
}

src/runtime/lock_futex_tristate.go

Lines changed: 136 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,136 @@
1+
// Copyright 2011 The Go Authors. All rights reserved.
2+
// Use of this source code is governed by a BSD-style
3+
// license that can be found in the LICENSE file.
4+
5+
//go:build (dragonfly || freebsd || linux) && !goexperiment.spinbitmutex
6+
7+
package runtime
8+
9+
import (
10+
"internal/runtime/atomic"
11+
)
12+
13+
// This implementation depends on OS-specific implementations of
14+
//
15+
// futexsleep(addr *uint32, val uint32, ns int64)
16+
// Atomically,
17+
// if *addr == val { sleep }
18+
// Might be woken up spuriously; that's allowed.
19+
// Don't sleep longer than ns; ns < 0 means forever.
20+
//
21+
// futexwakeup(addr *uint32, cnt uint32)
22+
// If any procs are sleeping on addr, wake up at most cnt.
23+
24+
const (
25+
mutex_unlocked = 0
26+
mutex_locked = 1
27+
mutex_sleeping = 2
28+
29+
active_spin = 4
30+
active_spin_cnt = 30
31+
passive_spin = 1
32+
)
33+
34+
// Possible lock states are mutex_unlocked, mutex_locked and mutex_sleeping.
35+
// mutex_sleeping means that there is presumably at least one sleeping thread.
36+
// Note that there can be spinning threads during all states - they do not
37+
// affect mutex's state.
38+
39+
type mWaitList struct{}
40+
41+
func mutexContended(l *mutex) bool {
42+
return atomic.Load(key32(&l.key)) > mutex_locked
43+
}
44+
45+
func lock(l *mutex) {
46+
lockWithRank(l, getLockRank(l))
47+
}
48+
49+
func lock2(l *mutex) {
50+
gp := getg()
51+
52+
if gp.m.locks < 0 {
53+
throw("runtime·lock: lock count")
54+
}
55+
gp.m.locks++
56+
57+
// Speculative grab for lock.
58+
v := atomic.Xchg(key32(&l.key), mutex_locked)
59+
if v == mutex_unlocked {
60+
return
61+
}
62+
63+
// wait is either MUTEX_LOCKED or MUTEX_SLEEPING
64+
// depending on whether there is a thread sleeping
65+
// on this mutex. If we ever change l->key from
66+
// MUTEX_SLEEPING to some other value, we must be
67+
// careful to change it back to MUTEX_SLEEPING before
68+
// returning, to ensure that the sleeping thread gets
69+
// its wakeup call.
70+
wait := v
71+
72+
timer := &lockTimer{lock: l}
73+
timer.begin()
74+
// On uniprocessors, no point spinning.
75+
// On multiprocessors, spin for ACTIVE_SPIN attempts.
76+
spin := 0
77+
if ncpu > 1 {
78+
spin = active_spin
79+
}
80+
for {
81+
// Try for lock, spinning.
82+
for i := 0; i < spin; i++ {
83+
for l.key == mutex_unlocked {
84+
if atomic.Cas(key32(&l.key), mutex_unlocked, wait) {
85+
timer.end()
86+
return
87+
}
88+
}
89+
procyield(active_spin_cnt)
90+
}
91+
92+
// Try for lock, rescheduling.
93+
for i := 0; i < passive_spin; i++ {
94+
for l.key == mutex_unlocked {
95+
if atomic.Cas(key32(&l.key), mutex_unlocked, wait) {
96+
timer.end()
97+
return
98+
}
99+
}
100+
osyield()
101+
}
102+
103+
// Sleep.
104+
v = atomic.Xchg(key32(&l.key), mutex_sleeping)
105+
if v == mutex_unlocked {
106+
timer.end()
107+
return
108+
}
109+
wait = mutex_sleeping
110+
futexsleep(key32(&l.key), mutex_sleeping, -1)
111+
}
112+
}
113+
114+
func unlock(l *mutex) {
115+
unlockWithRank(l)
116+
}
117+
118+
func unlock2(l *mutex) {
119+
v := atomic.Xchg(key32(&l.key), mutex_unlocked)
120+
if v == mutex_unlocked {
121+
throw("unlock of unlocked lock")
122+
}
123+
if v == mutex_sleeping {
124+
futexwakeup(key32(&l.key), 1)
125+
}
126+
127+
gp := getg()
128+
gp.m.mLockProfile.recordUnlock(l)
129+
gp.m.locks--
130+
if gp.m.locks < 0 {
131+
throw("runtime·unlock: lock count")
132+
}
133+
if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
134+
gp.stackguard0 = stackPreempt
135+
}
136+
}

src/runtime/lock_js.go

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,8 @@ const (
2626
passive_spin = 1
2727
)
2828

29+
type mWaitList struct{}
30+
2931
func mutexContended(l *mutex) bool {
3032
return false
3133
}

0 commit comments

Comments
 (0)