@@ -43,30 +43,90 @@ impl Condvar {
43
43
}
44
44
45
45
thread_local ! {
46
- /// We track the set of locks currently held by a reference to their `MutexMetadata `
47
- static MUTEXES_HELD : RefCell <HashSet <Arc <MutexMetadata >>> = RefCell :: new( HashSet :: new( ) ) ;
46
+ /// We track the set of locks currently held by a reference to their `LockMetadata `
47
+ static LOCKS_HELD : RefCell <HashSet <Arc <LockMetadata >>> = RefCell :: new( HashSet :: new( ) ) ;
48
48
}
49
- static MUTEX_IDX : AtomicUsize = AtomicUsize :: new ( 0 ) ;
49
+ static LOCK_IDX : AtomicUsize = AtomicUsize :: new ( 0 ) ;
50
50
51
- /// Metadata about a single mutex , by id, the set of things locked-before it, and the backtrace of
51
+ /// Metadata about a single lock , by id, the set of things locked-before it, and the backtrace of
52
52
/// when the Mutex itself was constructed.
53
- struct MutexMetadata {
54
- mutex_idx : u64 ,
55
- locked_before : StdMutex < HashSet < Arc < MutexMetadata > > > ,
53
+ struct LockMetadata {
54
+ lock_idx : u64 ,
55
+ locked_before : StdMutex < HashSet < Arc < LockMetadata > > > ,
56
56
#[ cfg( feature = "backtrace" ) ]
57
- mutex_construction_bt : Backtrace ,
57
+ lock_construction_bt : Backtrace ,
58
58
}
59
- impl PartialEq for MutexMetadata {
60
- fn eq ( & self , o : & MutexMetadata ) -> bool { self . mutex_idx == o. mutex_idx }
59
+ impl PartialEq for LockMetadata {
60
+ fn eq ( & self , o : & LockMetadata ) -> bool { self . lock_idx == o. lock_idx }
61
61
}
62
- impl Eq for MutexMetadata { }
63
- impl std:: hash:: Hash for MutexMetadata {
64
- fn hash < H : std:: hash:: Hasher > ( & self , hasher : & mut H ) { hasher. write_u64 ( self . mutex_idx ) ; }
62
+ impl Eq for LockMetadata { }
63
+ impl std:: hash:: Hash for LockMetadata {
64
+ fn hash < H : std:: hash:: Hasher > ( & self , hasher : & mut H ) { hasher. write_u64 ( self . lock_idx ) ; }
65
+ }
66
+
67
+ impl LockMetadata {
68
+ fn new ( ) -> LockMetadata {
69
+ LockMetadata {
70
+ locked_before : StdMutex :: new ( HashSet :: new ( ) ) ,
71
+ lock_idx : LOCK_IDX . fetch_add ( 1 , Ordering :: Relaxed ) as u64 ,
72
+ #[ cfg( feature = "backtrace" ) ]
73
+ lock_construction_bt : Backtrace :: new ( ) ,
74
+ }
75
+ }
76
+
77
+ // Returns whether we were a recursive lock (only relevant for read)
78
+ fn _pre_lock ( this : & Arc < LockMetadata > , read : bool ) -> bool {
79
+ let mut inserted = false ;
80
+ LOCKS_HELD . with ( |held| {
81
+ // For each lock which is currently locked, check that no lock's locked-before
82
+ // set includes the lock we're about to lock, which would imply a lockorder
83
+ // inversion.
84
+ for locked in held. borrow ( ) . iter ( ) {
85
+ if read && * locked == * this {
86
+ // Recursive read locks are explicitly allowed
87
+ return ;
88
+ }
89
+ }
90
+ for locked in held. borrow ( ) . iter ( ) {
91
+ if !read && * locked == * this {
92
+ panic ! ( "Tried to lock a lock while it was held!" ) ;
93
+ }
94
+ for locked_dep in locked. locked_before . lock ( ) . unwrap ( ) . iter ( ) {
95
+ if * locked_dep == * this {
96
+ #[ cfg( feature = "backtrace" ) ]
97
+ panic ! ( "Tried to violate existing lockorder.\n Mutex that should be locked after the current lock was created at the following backtrace.\n Note that to get a backtrace for the lockorder violation, you should set RUST_BACKTRACE=1\n {:?}" , locked. lock_construction_bt) ;
98
+ #[ cfg( not( feature = "backtrace" ) ) ]
99
+ panic ! ( "Tried to violate existing lockorder. Build with the backtrace feature for more info." ) ;
100
+ }
101
+ }
102
+ // Insert any already-held locks in our locked-before set.
103
+ this. locked_before . lock ( ) . unwrap ( ) . insert ( Arc :: clone ( locked) ) ;
104
+ }
105
+ held. borrow_mut ( ) . insert ( Arc :: clone ( this) ) ;
106
+ inserted = true ;
107
+ } ) ;
108
+ inserted
109
+ }
110
+
111
+ fn pre_lock ( this : & Arc < LockMetadata > ) { Self :: _pre_lock ( this, false ) ; }
112
+ fn pre_read_lock ( this : & Arc < LockMetadata > ) -> bool { Self :: _pre_lock ( this, true ) }
113
+
114
+ fn try_locked ( this : & Arc < LockMetadata > ) {
115
+ LOCKS_HELD . with ( |held| {
116
+ // Since a try-lock will simply fail if the lock is held already, we do not
117
+ // consider try-locks to ever generate lockorder inversions. However, if a try-lock
118
+ // succeeds, we do consider it to have created lockorder dependencies.
119
+ for locked in held. borrow ( ) . iter ( ) {
120
+ this. locked_before . lock ( ) . unwrap ( ) . insert ( Arc :: clone ( locked) ) ;
121
+ }
122
+ held. borrow_mut ( ) . insert ( Arc :: clone ( this) ) ;
123
+ } ) ;
124
+ }
65
125
}
66
126
67
127
pub struct Mutex < T : Sized > {
68
128
inner : StdMutex < T > ,
69
- deps : Arc < MutexMetadata > ,
129
+ deps : Arc < LockMetadata > ,
70
130
}
71
131
72
132
#[ must_use = "if unused the Mutex will immediately unlock" ]
@@ -88,7 +148,7 @@ impl<'a, T: Sized> MutexGuard<'a, T> {
88
148
89
149
impl < T : Sized > Drop for MutexGuard < ' _ , T > {
90
150
fn drop ( & mut self ) {
91
- MUTEXES_HELD . with ( |held| {
151
+ LOCKS_HELD . with ( |held| {
92
152
held. borrow_mut ( ) . remove ( & self . mutex . deps ) ;
93
153
} ) ;
94
154
}
@@ -110,104 +170,195 @@ impl<T: Sized> DerefMut for MutexGuard<'_, T> {
110
170
111
171
impl < T > Mutex < T > {
112
172
pub fn new ( inner : T ) -> Mutex < T > {
113
- Mutex {
114
- inner : StdMutex :: new ( inner) ,
115
- deps : Arc :: new ( MutexMetadata {
116
- locked_before : StdMutex :: new ( HashSet :: new ( ) ) ,
117
- mutex_idx : MUTEX_IDX . fetch_add ( 1 , Ordering :: Relaxed ) as u64 ,
118
- #[ cfg( feature = "backtrace" ) ]
119
- mutex_construction_bt : Backtrace :: new ( ) ,
120
- } ) ,
121
- }
173
+ Mutex { inner : StdMutex :: new ( inner) , deps : Arc :: new ( LockMetadata :: new ( ) ) }
122
174
}
123
175
124
176
pub fn lock < ' a > ( & ' a self ) -> LockResult < MutexGuard < ' a , T > > {
125
- MUTEXES_HELD . with ( |held| {
126
- // For each mutex which is currently locked, check that no mutex's locked-before
127
- // set includes the mutex we're about to lock, which would imply a lockorder
128
- // inversion.
129
- for locked in held. borrow ( ) . iter ( ) {
130
- for locked_dep in locked. locked_before . lock ( ) . unwrap ( ) . iter ( ) {
131
- if * locked_dep == self . deps {
132
- #[ cfg( feature = "backtrace" ) ]
133
- panic ! ( "Tried to violate existing lockorder.\n Mutex that should be locked after the current lock was created at the following backtrace.\n Note that to get a backtrace for the lockorder violation, you should set RUST_BACKTRACE=1\n {:?}" , locked. mutex_construction_bt) ;
134
- #[ cfg( not( feature = "backtrace" ) ) ]
135
- panic ! ( "Tried to violate existing lockorder. Build with the backtrace feature for more info." ) ;
136
- }
137
- }
138
- // Insert any already-held mutexes in our locked-before set.
139
- self . deps . locked_before . lock ( ) . unwrap ( ) . insert ( Arc :: clone ( locked) ) ;
140
- }
141
- held. borrow_mut ( ) . insert ( Arc :: clone ( & self . deps ) ) ;
142
- } ) ;
177
+ LockMetadata :: pre_lock ( & self . deps ) ;
143
178
self . inner . lock ( ) . map ( |lock| MutexGuard { mutex : self , lock } ) . map_err ( |_| ( ) )
144
179
}
145
180
146
181
pub fn try_lock < ' a > ( & ' a self ) -> LockResult < MutexGuard < ' a , T > > {
147
182
let res = self . inner . try_lock ( ) . map ( |lock| MutexGuard { mutex : self , lock } ) . map_err ( |_| ( ) ) ;
148
183
if res. is_ok ( ) {
149
- MUTEXES_HELD . with ( |held| {
150
- // Since a try-lock will simply fail if the lock is held already, we do not
151
- // consider try-locks to ever generate lockorder inversions. However, if a try-lock
152
- // succeeds, we do consider it to have created lockorder dependencies.
153
- for locked in held. borrow ( ) . iter ( ) {
154
- self . deps . locked_before . lock ( ) . unwrap ( ) . insert ( Arc :: clone ( locked) ) ;
155
- }
156
- held. borrow_mut ( ) . insert ( Arc :: clone ( & self . deps ) ) ;
157
- } ) ;
184
+ LockMetadata :: try_locked ( & self . deps ) ;
158
185
}
159
186
res
160
187
}
161
188
}
162
189
163
- pub struct RwLock < T : ?Sized > {
164
- inner : StdRwLock < T >
190
+ pub struct RwLock < T : Sized > {
191
+ inner : StdRwLock < T > ,
192
+ deps : Arc < LockMetadata > ,
165
193
}
166
194
167
- pub struct RwLockReadGuard < ' a , T : ?Sized + ' a > {
168
- lock : StdRwLockReadGuard < ' a , T > ,
195
+ pub struct RwLockReadGuard < ' a , T : Sized + ' a > {
196
+ lock : & ' a RwLock < T > ,
197
+ first_lock : bool ,
198
+ guard : StdRwLockReadGuard < ' a , T > ,
169
199
}
170
200
171
- pub struct RwLockWriteGuard < ' a , T : ?Sized + ' a > {
172
- lock : StdRwLockWriteGuard < ' a , T > ,
201
+ pub struct RwLockWriteGuard < ' a , T : Sized + ' a > {
202
+ lock : & ' a RwLock < T > ,
203
+ guard : StdRwLockWriteGuard < ' a , T > ,
173
204
}
174
205
175
- impl < T : ? Sized > Deref for RwLockReadGuard < ' _ , T > {
206
+ impl < T : Sized > Deref for RwLockReadGuard < ' _ , T > {
176
207
type Target = T ;
177
208
178
209
fn deref ( & self ) -> & T {
179
- & self . lock . deref ( )
210
+ & self . guard . deref ( )
180
211
}
181
212
}
182
213
183
- impl < T : ?Sized > Deref for RwLockWriteGuard < ' _ , T > {
214
+ impl < T : Sized > Drop for RwLockReadGuard < ' _ , T > {
215
+ fn drop ( & mut self ) {
216
+ if !self . first_lock {
217
+ // Note that its not strictly true that the first taken read lock will get unlocked
218
+ // last, but in practice our locks are always taken as RAII, so it should basically
219
+ // always be true.
220
+ return ;
221
+ }
222
+ LOCKS_HELD . with ( |held| {
223
+ held. borrow_mut ( ) . remove ( & self . lock . deps ) ;
224
+ } ) ;
225
+ }
226
+ }
227
+
228
+ impl < T : Sized > Deref for RwLockWriteGuard < ' _ , T > {
184
229
type Target = T ;
185
230
186
231
fn deref ( & self ) -> & T {
187
- & self . lock . deref ( )
232
+ & self . guard . deref ( )
233
+ }
234
+ }
235
+
236
+ impl < T : Sized > Drop for RwLockWriteGuard < ' _ , T > {
237
+ fn drop ( & mut self ) {
238
+ LOCKS_HELD . with ( |held| {
239
+ held. borrow_mut ( ) . remove ( & self . lock . deps ) ;
240
+ } ) ;
188
241
}
189
242
}
190
243
191
- impl < T : ? Sized > DerefMut for RwLockWriteGuard < ' _ , T > {
244
+ impl < T : Sized > DerefMut for RwLockWriteGuard < ' _ , T > {
192
245
fn deref_mut ( & mut self ) -> & mut T {
193
- self . lock . deref_mut ( )
246
+ self . guard . deref_mut ( )
194
247
}
195
248
}
196
249
197
250
impl < T > RwLock < T > {
198
251
pub fn new ( inner : T ) -> RwLock < T > {
199
- RwLock { inner : StdRwLock :: new ( inner) }
252
+ RwLock { inner : StdRwLock :: new ( inner) , deps : Arc :: new ( LockMetadata :: new ( ) ) }
200
253
}
201
254
202
255
pub fn read < ' a > ( & ' a self ) -> LockResult < RwLockReadGuard < ' a , T > > {
203
- self . inner . read ( ) . map ( |lock| RwLockReadGuard { lock } ) . map_err ( |_| ( ) )
256
+ let first_lock = LockMetadata :: pre_read_lock ( & self . deps ) ;
257
+ self . inner . read ( ) . map ( |guard| RwLockReadGuard { lock : self , guard, first_lock } ) . map_err ( |_| ( ) )
204
258
}
205
259
206
260
pub fn write < ' a > ( & ' a self ) -> LockResult < RwLockWriteGuard < ' a , T > > {
207
- self . inner . write ( ) . map ( |lock| RwLockWriteGuard { lock } ) . map_err ( |_| ( ) )
261
+ LockMetadata :: pre_lock ( & self . deps ) ;
262
+ self . inner . write ( ) . map ( |guard| RwLockWriteGuard { lock : self , guard } ) . map_err ( |_| ( ) )
208
263
}
209
264
210
265
pub fn try_write < ' a > ( & ' a self ) -> LockResult < RwLockWriteGuard < ' a , T > > {
211
- self . inner . try_write ( ) . map ( |lock| RwLockWriteGuard { lock } ) . map_err ( |_| ( ) )
266
+ let res = self . inner . try_write ( ) . map ( |guard| RwLockWriteGuard { lock : self , guard } ) . map_err ( |_| ( ) ) ;
267
+ if res. is_ok ( ) {
268
+ LockMetadata :: try_locked ( & self . deps ) ;
269
+ }
270
+ res
271
+ }
272
+ }
273
+
274
+ #[ test]
275
+ #[ should_panic]
276
+ fn recursive_lock_fail ( ) {
277
+ let mutex = Mutex :: new ( ( ) ) ;
278
+ let _a = mutex. lock ( ) . unwrap ( ) ;
279
+ let _b = mutex. lock ( ) . unwrap ( ) ;
280
+ }
281
+
282
+ #[ test]
283
+ fn recursive_read ( ) {
284
+ let lock = RwLock :: new ( ( ) ) ;
285
+ let _a = lock. read ( ) . unwrap ( ) ;
286
+ let _b = lock. read ( ) . unwrap ( ) ;
287
+ }
288
+
289
+ #[ test]
290
+ #[ should_panic]
291
+ fn lockorder_fail ( ) {
292
+ let a = Mutex :: new ( ( ) ) ;
293
+ let b = Mutex :: new ( ( ) ) ;
294
+ {
295
+ let _a = a. lock ( ) . unwrap ( ) ;
296
+ let _b = b. lock ( ) . unwrap ( ) ;
297
+ }
298
+ {
299
+ let _b = b. lock ( ) . unwrap ( ) ;
300
+ let _a = a. lock ( ) . unwrap ( ) ;
301
+ }
302
+ }
303
+
304
+ #[ test]
305
+ #[ should_panic]
306
+ fn write_lockorder_fail ( ) {
307
+ let a = RwLock :: new ( ( ) ) ;
308
+ let b = RwLock :: new ( ( ) ) ;
309
+ {
310
+ let _a = a. write ( ) . unwrap ( ) ;
311
+ let _b = b. write ( ) . unwrap ( ) ;
312
+ }
313
+ {
314
+ let _b = b. write ( ) . unwrap ( ) ;
315
+ let _a = a. write ( ) . unwrap ( ) ;
316
+ }
317
+ }
318
+
319
+ #[ test]
320
+ #[ should_panic]
321
+ fn read_lockorder_fail ( ) {
322
+ let a = RwLock :: new ( ( ) ) ;
323
+ let b = RwLock :: new ( ( ) ) ;
324
+ {
325
+ let _a = a. read ( ) . unwrap ( ) ;
326
+ let _b = b. read ( ) . unwrap ( ) ;
327
+ }
328
+ {
329
+ let _b = b. read ( ) . unwrap ( ) ;
330
+ let _a = a. read ( ) . unwrap ( ) ;
331
+ }
332
+ }
333
+
334
+ #[ test]
335
+ fn read_recurisve_no_lockorder ( ) {
336
+ // Like the above, but note that no lockorder is implied when we recursively read-lock a
337
+ // RwLock, causing this to pass just fine.
338
+ let a = RwLock :: new ( ( ) ) ;
339
+ let b = RwLock :: new ( ( ) ) ;
340
+ let _outer = a. read ( ) . unwrap ( ) ;
341
+ {
342
+ let _a = a. read ( ) . unwrap ( ) ;
343
+ let _b = b. read ( ) . unwrap ( ) ;
344
+ }
345
+ {
346
+ let _b = b. read ( ) . unwrap ( ) ;
347
+ let _a = a. read ( ) . unwrap ( ) ;
348
+ }
349
+ }
350
+
351
+ #[ test]
352
+ #[ should_panic]
353
+ fn read_write_lockorder_fail ( ) {
354
+ let a = RwLock :: new ( ( ) ) ;
355
+ let b = RwLock :: new ( ( ) ) ;
356
+ {
357
+ let _a = a. write ( ) . unwrap ( ) ;
358
+ let _b = b. read ( ) . unwrap ( ) ;
359
+ }
360
+ {
361
+ let _b = b. read ( ) . unwrap ( ) ;
362
+ let _a = a. write ( ) . unwrap ( ) ;
212
363
}
213
364
}
0 commit comments