@@ -23,20 +23,19 @@ import (
23
23
// If any procs are sleeping on addr, wake up at most cnt.
24
24
25
25
const (
26
- mutex_locked = 0x1
27
- mutex_sleeping = 0x2 // Ensure futex's low 32 bits won't be all zeros
26
+ mutex_unlocked = 0
27
+ mutex_locked = 1
28
+ mutex_sleeping = 2
28
29
29
30
active_spin = 4
30
31
active_spin_cnt = 30
31
32
passive_spin = 1
32
33
)
33
34
34
- // The mutex.key holds two state flags in its lowest bits: When the mutex_locked
35
- // bit is set, the mutex is locked. When the mutex_sleeping bit is set, a thread
36
- // is waiting in futexsleep for the mutex to be available. These flags operate
37
- // independently: a thread can enter lock2, observe that another thread is
38
- // already asleep, and immediately try to grab the lock anyway without waiting
39
- // for its "fair" turn.
35
+ // Possible lock states are mutex_unlocked, mutex_locked and mutex_sleeping.
36
+ // mutex_sleeping means that there is presumably at least one sleeping thread.
37
+ // Note that there can be spinning threads during all states - they do not
38
+ // affect mutex's state.
40
39
41
40
// We use the uintptr mutex.key and note.key as a uint32.
42
41
//
@@ -55,16 +54,27 @@ func lock(l *mutex) {
55
54
56
55
func lock2 (l * mutex ) {
57
56
gp := getg ()
57
+
58
58
if gp .m .locks < 0 {
59
59
throw ("runtime·lock: lock count" )
60
60
}
61
61
gp .m .locks ++
62
62
63
63
// Speculative grab for lock.
64
- if atomic .Casuintptr (& l .key , 0 , mutex_locked ) {
64
+ v := atomic .Xchg (key32 (& l .key ), mutex_locked )
65
+ if v == mutex_unlocked {
65
66
return
66
67
}
67
68
69
+ // wait is either MUTEX_LOCKED or MUTEX_SLEEPING
70
+ // depending on whether there is a thread sleeping
71
+ // on this mutex. If we ever change l->key from
72
+ // MUTEX_SLEEPING to some other value, we must be
73
+ // careful to change it back to MUTEX_SLEEPING before
74
+ // returning, to ensure that the sleeping thread gets
75
+ // its wakeup call.
76
+ wait := v
77
+
68
78
timer := & lockTimer {lock : l }
69
79
timer .begin ()
70
80
// On uniprocessors, no point spinning.
@@ -73,39 +83,37 @@ func lock2(l *mutex) {
73
83
if ncpu > 1 {
74
84
spin = active_spin
75
85
}
76
- Loop:
77
- for i := 0 ; ; i ++ {
78
- v := atomic . Loaduintptr ( & l . key )
79
- if v & mutex_locked == 0 {
80
- // Unlocked. Try to lock.
81
- if atomic . Casuintptr ( & l . key , v , v | mutex_locked ) {
82
- timer . end ()
83
- return
86
+ for {
87
+ // Try for lock, spinning.
88
+ for i := 0 ; i < spin ; i ++ {
89
+ for l . key == mutex_unlocked {
90
+ if atomic . Cas ( key32 ( & l . key ), mutex_unlocked , wait ) {
91
+ timer . end ()
92
+ return
93
+ }
84
94
}
85
- i = 0
86
- }
87
- if i < spin {
88
95
procyield (active_spin_cnt )
89
- } else if i < spin + passive_spin {
90
- osyield ()
91
- } else {
92
- // Someone else has it.
93
- for {
94
- head := v &^ (mutex_locked | mutex_sleeping )
95
- if atomic .Casuintptr (& l .key , v , head | mutex_locked | mutex_sleeping ) {
96
- break
97
- }
98
- v = atomic .Loaduintptr (& l .key )
99
- if v & mutex_locked == 0 {
100
- continue Loop
96
+ }
97
+
98
+ // Try for lock, rescheduling.
99
+ for i := 0 ; i < passive_spin ; i ++ {
100
+ for l .key == mutex_unlocked {
101
+ if atomic .Cas (key32 (& l .key ), mutex_unlocked , wait ) {
102
+ timer .end ()
103
+ return
101
104
}
102
105
}
103
- if v & mutex_locked != 0 {
104
- // Queued. Wait.
105
- futexsleep (key32 (& l .key ), uint32 (v ), - 1 )
106
- i = 0
107
- }
106
+ osyield ()
108
107
}
108
+
109
+ // Sleep.
110
+ v = atomic .Xchg (key32 (& l .key ), mutex_sleeping )
111
+ if v == mutex_unlocked {
112
+ timer .end ()
113
+ return
114
+ }
115
+ wait = mutex_sleeping
116
+ futexsleep (key32 (& l .key ), mutex_sleeping , - 1 )
109
117
}
110
118
}
111
119
@@ -114,21 +122,12 @@ func unlock(l *mutex) {
114
122
}
115
123
116
124
func unlock2 (l * mutex ) {
117
- for {
118
- v := atomic .Loaduintptr (& l .key )
119
- if v == mutex_locked {
120
- if atomic .Casuintptr (& l .key , mutex_locked , 0 ) {
121
- break
122
- }
123
- } else if v & mutex_locked == 0 {
124
- throw ("unlock of unlocked lock" )
125
- } else {
126
- // Other M's are waiting for the lock.
127
- if atomic .Casuintptr (& l .key , v , v &^mutex_locked ) {
128
- futexwakeup (key32 (& l .key ), 1 )
129
- break
130
- }
131
- }
125
+ v := atomic .Xchg (key32 (& l .key ), mutex_unlocked )
126
+ if v == mutex_unlocked {
127
+ throw ("unlock of unlocked lock" )
128
+ }
129
+ if v == mutex_sleeping {
130
+ futexwakeup (key32 (& l .key ), 1 )
132
131
}
133
132
134
133
gp := getg ()
0 commit comments