Skip to content

Commit 6090d9e

Browse files
committed
Test if a given mutex is locked by the current thread in tests
In anticipation of the next commit(s) adding threaded tests, we need to ensure our lockorder checks work fine with multiple threads. Sadly, currently we have tests in the form `assert!(mutex.try_lock().is_ok())` to assert that a given mutex is not locked by the caller to a function. The fix is rather simple given we already track mutexes locked by a thread in our `debug_sync` logic - simply replace the check with a new extension trait which (for test builds) checks the locked state by only looking at what was locked by the current thread.
1 parent 9422370 commit 6090d9e

File tree

6 files changed

+115
-19
lines changed

6 files changed

+115
-19
lines changed

lightning/src/ln/channelmanager.rs

Lines changed: 11 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ use crate::prelude::*;
7070
use core::{cmp, mem};
7171
use core::cell::RefCell;
7272
use crate::io::Read;
73-
use crate::sync::{Arc, Mutex, RwLock, RwLockReadGuard, FairRwLock};
73+
use crate::sync::{Arc, Mutex, RwLock, RwLockReadGuard, FairRwLock, LockTestExt, LockHeldState};
7474
use core::sync::atomic::{AtomicUsize, Ordering};
7575
use core::time::Duration;
7676
use core::ops::Deref;
@@ -1218,13 +1218,10 @@ macro_rules! handle_error {
12181218
match $internal {
12191219
Ok(msg) => Ok(msg),
12201220
Err(MsgHandleErrInternal { err, chan_id, shutdown_finish }) => {
1221-
#[cfg(any(feature = "_test_utils", test))]
1222-
{
1223-
// In testing, ensure there are no deadlocks where the lock is already held upon
1224-
// entering the macro.
1225-
debug_assert!($self.pending_events.try_lock().is_ok());
1226-
debug_assert!($self.per_peer_state.try_write().is_ok());
1227-
}
1221+
// In testing, ensure there are no deadlocks where the lock is already held upon
1222+
// entering the macro.
1223+
debug_assert_ne!($self.pending_events.held_by_thread(), LockHeldState::HeldByThread);
1224+
debug_assert_ne!($self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread);
12281225

12291226
let mut msg_events = Vec::with_capacity(2);
12301227

@@ -3743,17 +3740,12 @@ where
37433740
/// Fails an HTLC backwards to the sender of it to us.
37443741
/// Note that we do not assume that channels corresponding to failed HTLCs are still available.
37453742
fn fail_htlc_backwards_internal(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) {
3746-
#[cfg(any(feature = "_test_utils", test))]
3747-
{
3748-
// Ensure that the peer state channel storage lock is not held when calling this
3749-
// function.
3750-
// This ensures that future code doesn't introduce a lock_order requirement for
3751-
// `forward_htlcs` to be locked after the `per_peer_state` peer locks, which calling
3752-
// this function with any `per_peer_state` peer lock aquired would.
3753-
let per_peer_state = self.per_peer_state.read().unwrap();
3754-
for (_, peer) in per_peer_state.iter() {
3755-
debug_assert!(peer.try_lock().is_ok());
3756-
}
3743+
// Ensure that no peer state channel storage lock is held when calling this function.
3744+
// This ensures that future code doesn't introduce a lock-order requirement for
3745+
// `forward_htlcs` to be locked after the `per_peer_state` peer locks, which calling
3746+
// this function with any `per_peer_state` peer lock acquired would.
3747+
for (_, peer) in self.per_peer_state.read().unwrap().iter() {
3748+
debug_assert_ne!(peer.held_by_thread(), LockHeldState::HeldByThread);
37573749
}
37583750

37593751
//TODO: There is a timing attack here where if a node fails an HTLC back to us they can

lightning/src/sync/debug_sync.rs

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,8 @@ use std::sync::Condvar as StdCondvar;
1414

1515
use crate::prelude::HashMap;
1616

17+
use super::{LockTestExt, LockHeldState};
18+
1719
#[cfg(feature = "backtrace")]
1820
use {crate::prelude::hash_map, backtrace::Backtrace, std::sync::Once};
1921

@@ -168,6 +170,18 @@ impl LockMetadata {
168170
fn pre_lock(this: &Arc<LockMetadata>) { Self::_pre_lock(this, false); }
169171
fn pre_read_lock(this: &Arc<LockMetadata>) -> bool { Self::_pre_lock(this, true) }
170172

173+
fn held_by_thread(this: &Arc<LockMetadata>) -> LockHeldState {
174+
let mut res = LockHeldState::NotHeldByThread;
175+
LOCKS_HELD.with(|held| {
176+
for (locked_idx, _locked) in held.borrow().iter() {
177+
if *locked_idx == this.lock_idx {
178+
res = LockHeldState::HeldByThread;
179+
}
180+
}
181+
});
182+
res
183+
}
184+
171185
fn try_locked(this: &Arc<LockMetadata>) {
172186
LOCKS_HELD.with(|held| {
173187
// Since a try-lock will simply fail if the lock is held already, we do not
@@ -248,6 +262,13 @@ impl<T> Mutex<T> {
248262
}
249263
}
250264

265+
impl <T> LockTestExt for Mutex<T> {
266+
#[inline]
267+
fn held_by_thread(&self) -> LockHeldState {
268+
LockMetadata::held_by_thread(&self.deps)
269+
}
270+
}
271+
251272
pub struct RwLock<T: Sized> {
252273
inner: StdRwLock<T>,
253274
deps: Arc<LockMetadata>,
@@ -332,4 +353,11 @@ impl<T> RwLock<T> {
332353
}
333354
}
334355

356+
impl <T> LockTestExt for RwLock<T> {
357+
#[inline]
358+
fn held_by_thread(&self) -> LockHeldState {
359+
LockMetadata::held_by_thread(&self.deps)
360+
}
361+
}
362+
335363
pub type FairRwLock<T> = RwLock<T>;

lightning/src/sync/fairrwlock.rs

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
use std::sync::{LockResult, RwLock, RwLockReadGuard, RwLockWriteGuard, TryLockResult};
22
use std::sync::atomic::{AtomicUsize, Ordering};
3+
use super::{LockHeldState, LockTestExt};
34

45
/// Rust libstd's RwLock does not provide any fairness guarantees (and, in fact, when used on
56
/// Linux with pthreads under the hood, readers trivially and completely starve writers).
@@ -48,3 +49,11 @@ impl<T> FairRwLock<T> {
4849
self.lock.try_write()
4950
}
5051
}
52+
53+
impl<T> LockTestExt for FairRwLock<T> {
54+
#[inline]
55+
fn held_by_thread(&self) -> LockHeldState {
56+
// fairrwlock is only built in non-test modes, so we should never support tests.
57+
LockHeldState::Unsupported
58+
}
59+
}

lightning/src/sync/mod.rs

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,16 @@
1+
#[allow(dead_code)] // Depending on the compilation flags some variants are never used
2+
#[derive(Debug, PartialEq, Eq)]
3+
pub(crate) enum LockHeldState {
4+
HeldByThread,
5+
NotHeldByThread,
6+
#[cfg(any(feature = "_bench_unstable", not(test)))]
7+
Unsupported,
8+
}
9+
10+
pub(crate) trait LockTestExt {
11+
fn held_by_thread(&self) -> LockHeldState;
12+
}
13+
114
#[cfg(all(feature = "std", not(feature = "_bench_unstable"), test))]
215
mod debug_sync;
316
#[cfg(all(feature = "std", not(feature = "_bench_unstable"), test))]
@@ -11,6 +24,19 @@ pub(crate) mod fairrwlock;
1124
#[cfg(all(feature = "std", any(feature = "_bench_unstable", not(test))))]
1225
pub use {std::sync::{Arc, Mutex, Condvar, MutexGuard, RwLock, RwLockReadGuard, RwLockWriteGuard}, fairrwlock::FairRwLock};
1326

27+
#[cfg(all(feature = "std", any(feature = "_bench_unstable", not(test))))]
28+
mod ext_impl {
29+
use super::*;
30+
impl<T> LockTestExt for Mutex<T> {
31+
#[inline]
32+
fn held_by_thread(&self) -> LockHeldState { LockHeldState::Unsupported }
33+
}
34+
impl<T> LockTestExt for RwLock<T> {
35+
#[inline]
36+
fn held_by_thread(&self) -> LockHeldState { LockHeldState::Unsupported }
37+
}
38+
}
39+
1440
#[cfg(not(feature = "std"))]
1541
mod nostd_sync;
1642
#[cfg(not(feature = "std"))]

lightning/src/sync/nostd_sync.rs

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ pub use ::alloc::sync::Arc;
22
use core::ops::{Deref, DerefMut};
33
use core::time::Duration;
44
use core::cell::{RefCell, Ref, RefMut};
5+
use super::{LockTestExt, LockHeldState};
56

67
pub type LockResult<Guard> = Result<Guard, ()>;
78

@@ -61,6 +62,14 @@ impl<T> Mutex<T> {
6162
}
6263
}
6364

65+
impl<T> LockTestExt for Mutex<T> {
66+
#[inline]
67+
fn held_by_thread(&self) -> LockHeldState {
68+
if self.lock().is_err() { return LockHeldState::HeldByThread; }
69+
else { return LockHeldState::NotHeldByThread; }
70+
}
71+
}
72+
6473
pub struct RwLock<T: ?Sized> {
6574
inner: RefCell<T>
6675
}
@@ -116,4 +125,12 @@ impl<T> RwLock<T> {
116125
}
117126
}
118127

128+
impl<T> LockTestExt for RwLock<T> {
129+
#[inline]
130+
fn held_by_thread(&self) -> LockHeldState {
131+
if self.write().is_err() { return LockHeldState::HeldByThread; }
132+
else { return LockHeldState::NotHeldByThread; }
133+
}
134+
}
135+
119136
pub type FairRwLock<T> = RwLock<T>;

lightning/src/sync/test_lockorder_checks.rs

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,10 @@
11
use crate::sync::debug_sync::{RwLock, Mutex};
22

3+
use super::{LockHeldState, LockTestExt};
4+
5+
use std::sync::Arc;
6+
use std::thread;
7+
38
#[test]
49
#[should_panic]
510
#[cfg(not(feature = "backtrace"))]
@@ -92,3 +97,22 @@ fn read_write_lockorder_fail() {
9297
let _a = a.write().unwrap();
9398
}
9499
}
100+
101+
#[test]
102+
fn test_thread_locked_state() {
103+
let mtx = Arc::new(Mutex::new(()));
104+
let mtx_ref = Arc::clone(&mtx);
105+
assert_eq!(mtx.held_by_thread(), LockHeldState::NotHeldByThread);
106+
107+
let lck = mtx.lock().unwrap();
108+
assert_eq!(mtx.held_by_thread(), LockHeldState::HeldByThread);
109+
110+
let thrd = std::thread::spawn(move || {
111+
assert_eq!(mtx_ref.held_by_thread(), LockHeldState::NotHeldByThread);
112+
});
113+
thrd.join().unwrap();
114+
assert_eq!(mtx.held_by_thread(), LockHeldState::HeldByThread);
115+
116+
std::mem::drop(lck);
117+
assert_eq!(mtx.held_by_thread(), LockHeldState::NotHeldByThread);
118+
}

0 commit comments

Comments
 (0)