|
| 1 | +pub use ::alloc::sync::Arc; |
| 2 | +use core::ops::{Deref, DerefMut}; |
| 3 | +use core::time::Duration; |
| 4 | + |
| 5 | +use std::collections::HashSet; |
| 6 | +use std::cell::RefCell; |
| 7 | + |
| 8 | +use std::sync::atomic::{AtomicUsize, Ordering}; |
| 9 | + |
| 10 | +use std::sync::Mutex as StdMutex; |
| 11 | +use std::sync::MutexGuard as StdMutexGuard; |
| 12 | +use std::sync::RwLock as StdRwLock; |
| 13 | +use std::sync::RwLockReadGuard as StdRwLockReadGuard; |
| 14 | +use std::sync::RwLockWriteGuard as StdRwLockWriteGuard; |
| 15 | +use std::sync::Condvar as StdCondvar; |
| 16 | + |
| 17 | +#[cfg(feature = "backtrace")] |
| 18 | +use backtrace::Backtrace; |
| 19 | + |
| 20 | +pub type LockResult<Guard> = Result<Guard, ()>; |
| 21 | + |
| 22 | +pub struct Condvar { |
| 23 | + inner: StdCondvar, |
| 24 | +} |
| 25 | + |
| 26 | +impl Condvar { |
| 27 | + pub fn new() -> Condvar { |
| 28 | + Condvar { inner: StdCondvar::new() } |
| 29 | + } |
| 30 | + |
| 31 | + pub fn wait<'a, T>(&'a self, guard: MutexGuard<'a, T>) -> LockResult<MutexGuard<'a, T>> { |
| 32 | + let mutex: &'a Mutex<T> = guard.mutex; |
| 33 | + self.inner.wait(guard.into_inner()).map(|lock| MutexGuard { mutex, lock }).map_err(|_| ()) |
| 34 | + } |
| 35 | + |
| 36 | + #[allow(unused)] |
| 37 | + pub fn wait_timeout<'a, T>(&'a self, guard: MutexGuard<'a, T>, dur: Duration) -> LockResult<(MutexGuard<'a, T>, ())> { |
| 38 | + let mutex = guard.mutex; |
| 39 | + self.inner.wait_timeout(guard.into_inner(), dur).map(|(lock, _)| (MutexGuard { mutex, lock }, ())).map_err(|_| ()) |
| 40 | + } |
| 41 | + |
| 42 | + pub fn notify_all(&self) { self.inner.notify_all(); } |
| 43 | +} |
| 44 | + |
| 45 | +thread_local! { |
| 46 | + /// We track the set of locks currently held by a reference to their `MutexMetadata` |
| 47 | + static MUTEXES_HELD: RefCell<HashSet<Arc<MutexMetadata>>> = RefCell::new(HashSet::new()); |
| 48 | +} |
| 49 | +static MUTEX_IDX: AtomicUsize = AtomicUsize::new(0); |
| 50 | + |
| 51 | +/// Metadata about a single mutex, by id, the set of things locked-before it, and the backtrace of |
| 52 | +/// when the Mutex itself was constructed. |
| 53 | +struct MutexMetadata { |
| 54 | + mutex_idx: u64, |
| 55 | + locked_before: StdMutex<HashSet<Arc<MutexMetadata>>>, |
| 56 | + #[cfg(feature = "backtrace")] |
| 57 | + mutex_construction_bt: Backtrace, |
| 58 | +} |
| 59 | +impl PartialEq for MutexMetadata { |
| 60 | + fn eq(&self, o: &MutexMetadata) -> bool { self.mutex_idx == o.mutex_idx } |
| 61 | +} |
| 62 | +impl Eq for MutexMetadata {} |
| 63 | +impl std::hash::Hash for MutexMetadata { |
| 64 | + fn hash<H: std::hash::Hasher>(&self, hasher: &mut H) { hasher.write_u64(self.mutex_idx); } |
| 65 | +} |
| 66 | + |
| 67 | +pub struct Mutex<T: Sized> { |
| 68 | + inner: StdMutex<T>, |
| 69 | + deps: Arc<MutexMetadata>, |
| 70 | +} |
| 71 | + |
| 72 | +#[must_use = "if unused the Mutex will immediately unlock"] |
| 73 | +pub struct MutexGuard<'a, T: Sized + 'a> { |
| 74 | + mutex: &'a Mutex<T>, |
| 75 | + lock: StdMutexGuard<'a, T>, |
| 76 | +} |
| 77 | + |
| 78 | +impl<'a, T: Sized> MutexGuard<'a, T> { |
| 79 | + fn into_inner(self) -> StdMutexGuard<'a, T> { |
| 80 | + // Somewhat unclear why we cannot move out of self.lock, but doing so gets E0509. |
| 81 | + unsafe { |
| 82 | + let v: StdMutexGuard<'a, T> = std::ptr::read(&self.lock); |
| 83 | + std::mem::forget(self); |
| 84 | + v |
| 85 | + } |
| 86 | + } |
| 87 | +} |
| 88 | + |
| 89 | +impl<T: Sized> Drop for MutexGuard<'_, T> { |
| 90 | + fn drop(&mut self) { |
| 91 | + MUTEXES_HELD.with(|held| { |
| 92 | + held.borrow_mut().remove(&self.mutex.deps); |
| 93 | + }); |
| 94 | + } |
| 95 | +} |
| 96 | + |
| 97 | +impl<T: Sized> Deref for MutexGuard<'_, T> { |
| 98 | + type Target = T; |
| 99 | + |
| 100 | + fn deref(&self) -> &T { |
| 101 | + &self.lock.deref() |
| 102 | + } |
| 103 | +} |
| 104 | + |
| 105 | +impl<T: Sized> DerefMut for MutexGuard<'_, T> { |
| 106 | + fn deref_mut(&mut self) -> &mut T { |
| 107 | + self.lock.deref_mut() |
| 108 | + } |
| 109 | +} |
| 110 | + |
| 111 | +impl<T> Mutex<T> { |
| 112 | + pub fn new(inner: T) -> Mutex<T> { |
| 113 | + Mutex { |
| 114 | + inner: StdMutex::new(inner), |
| 115 | + deps: Arc::new(MutexMetadata { |
| 116 | + locked_before: StdMutex::new(HashSet::new()), |
| 117 | + mutex_idx: MUTEX_IDX.fetch_add(1, Ordering::Relaxed) as u64, |
| 118 | + #[cfg(feature = "backtrace")] |
| 119 | + mutex_construction_bt: Backtrace::new(), |
| 120 | + }), |
| 121 | + } |
| 122 | + } |
| 123 | + |
| 124 | + pub fn lock<'a>(&'a self) -> LockResult<MutexGuard<'a, T>> { |
| 125 | + MUTEXES_HELD.with(|held| { |
| 126 | + // For each mutex which is currently locked, check that no mutex's locked-before |
| 127 | + // set includes the mutex we're about to lock, which would imply a lockorder |
| 128 | + // inversion. |
| 129 | + for locked in held.borrow().iter() { |
| 130 | + for locked_dep in locked.locked_before.lock().unwrap().iter() { |
| 131 | + if *locked_dep == self.deps { |
| 132 | + #[cfg(feature = "backtrace")] |
| 133 | + panic!("Tried to violate existing lockorder.\nMutex that should be locked after the current lock was created at the following backtrace.\nNote that to get a backtrace for the lockorder violation, you should set RUST_BACKTRACE=1\n{:?}", locked.mutex_construction_bt); |
| 134 | + #[cfg(not(feature = "backtrace"))] |
| 135 | + panic!("Tried to violate existing lockorder. Build with the backtrace feature for more info."); |
| 136 | + } |
| 137 | + } |
| 138 | + // Insert any already-held mutexes in our locked-before set. |
| 139 | + self.deps.locked_before.lock().unwrap().insert(Arc::clone(locked)); |
| 140 | + } |
| 141 | + held.borrow_mut().insert(Arc::clone(&self.deps)); |
| 142 | + }); |
| 143 | + self.inner.lock().map(|lock| MutexGuard { mutex: self, lock }).map_err(|_| ()) |
| 144 | + } |
| 145 | + |
| 146 | + pub fn try_lock<'a>(&'a self) -> LockResult<MutexGuard<'a, T>> { |
| 147 | + let res = self.inner.try_lock().map(|lock| MutexGuard { mutex: self, lock }).map_err(|_| ()); |
| 148 | + if res.is_ok() { |
| 149 | + MUTEXES_HELD.with(|held| { |
| 150 | + // Since a try-lock will simply fail if the lock is held already, we do not |
| 151 | + // consider try-locks to ever generate lockorder inversions. However, if a try-lock |
| 152 | + // succeeds, we do consider it to have created lockorder dependencies. |
| 153 | + for locked in held.borrow().iter() { |
| 154 | + self.deps.locked_before.lock().unwrap().insert(Arc::clone(locked)); |
| 155 | + } |
| 156 | + held.borrow_mut().insert(Arc::clone(&self.deps)); |
| 157 | + }); |
| 158 | + } |
| 159 | + res |
| 160 | + } |
| 161 | +} |
| 162 | + |
| 163 | +pub struct RwLock<T: ?Sized> { |
| 164 | + inner: StdRwLock<T> |
| 165 | +} |
| 166 | + |
| 167 | +pub struct RwLockReadGuard<'a, T: ?Sized + 'a> { |
| 168 | + lock: StdRwLockReadGuard<'a, T>, |
| 169 | +} |
| 170 | + |
| 171 | +pub struct RwLockWriteGuard<'a, T: ?Sized + 'a> { |
| 172 | + lock: StdRwLockWriteGuard<'a, T>, |
| 173 | +} |
| 174 | + |
| 175 | +impl<T: ?Sized> Deref for RwLockReadGuard<'_, T> { |
| 176 | + type Target = T; |
| 177 | + |
| 178 | + fn deref(&self) -> &T { |
| 179 | + &self.lock.deref() |
| 180 | + } |
| 181 | +} |
| 182 | + |
| 183 | +impl<T: ?Sized> Deref for RwLockWriteGuard<'_, T> { |
| 184 | + type Target = T; |
| 185 | + |
| 186 | + fn deref(&self) -> &T { |
| 187 | + &self.lock.deref() |
| 188 | + } |
| 189 | +} |
| 190 | + |
| 191 | +impl<T: ?Sized> DerefMut for RwLockWriteGuard<'_, T> { |
| 192 | + fn deref_mut(&mut self) -> &mut T { |
| 193 | + self.lock.deref_mut() |
| 194 | + } |
| 195 | +} |
| 196 | + |
| 197 | +impl<T> RwLock<T> { |
| 198 | + pub fn new(inner: T) -> RwLock<T> { |
| 199 | + RwLock { inner: StdRwLock::new(inner) } |
| 200 | + } |
| 201 | + |
| 202 | + pub fn read<'a>(&'a self) -> LockResult<RwLockReadGuard<'a, T>> { |
| 203 | + self.inner.read().map(|lock| RwLockReadGuard { lock }).map_err(|_| ()) |
| 204 | + } |
| 205 | + |
| 206 | + pub fn write<'a>(&'a self) -> LockResult<RwLockWriteGuard<'a, T>> { |
| 207 | + self.inner.write().map(|lock| RwLockWriteGuard { lock }).map_err(|_| ()) |
| 208 | + } |
| 209 | + |
| 210 | + pub fn try_write<'a>(&'a self) -> LockResult<RwLockWriteGuard<'a, T>> { |
| 211 | + self.inner.try_write().map(|lock| RwLockWriteGuard { lock }).map_err(|_| ()) |
| 212 | + } |
| 213 | +} |
0 commit comments