Skip to content

Commit 497c017

Browse files
committed
Remove largely useless checks in chanmon_consistency fuzzer
When reloading nodes A or C, the chanmon_consistency fuzzer currently calls `get_and_clear_pending_msg_events` on the node, potentially causing additional `ChannelMonitor` or `ChannelManager` updates, just to check that no unexpected messages are generated. There's not much reason to do so, the fuzzer could always swap for a different command to call the same method, and the additional checking requires some weird monitor persistence introspection. Here we simplify the fuzzer by simply removing this logic.
1 parent 0043bf8 commit 497c017

File tree

1 file changed

+6
-65
lines changed

1 file changed

+6
-65
lines changed

fuzz/src/chanmon_consistency.rs

Lines changed: 6 additions & 65 deletions
Original file line numberDiff line numberDiff line change
@@ -125,7 +125,6 @@ struct TestChainMonitor {
125125
// "fails" if we ever force-close a channel, we avoid doing so, always saving the latest
126126
// fully-serialized monitor state here, as well as the corresponding update_id.
127127
pub latest_monitors: Mutex<HashMap<OutPoint, (u64, Vec<u8>)>>,
128-
pub should_update_manager: atomic::AtomicBool,
129128
}
130129
impl TestChainMonitor {
131130
pub fn new(broadcaster: Arc<TestBroadcaster>, logger: Arc<dyn Logger>, feeest: Arc<FuzzEstimator>, persister: Arc<TestPersister>, keys: Arc<KeyProvider>) -> Self {
@@ -135,7 +134,6 @@ impl TestChainMonitor {
135134
keys,
136135
persister,
137136
latest_monitors: Mutex::new(HashMap::new()),
138-
should_update_manager: atomic::AtomicBool::new(false),
139137
}
140138
}
141139
}
@@ -146,7 +144,6 @@ impl chain::Watch<EnforcingSigner> for TestChainMonitor {
146144
if let Some(_) = self.latest_monitors.lock().unwrap().insert(funding_txo, (monitor.get_latest_update_id(), ser.0)) {
147145
panic!("Already had monitor pre-watch_channel");
148146
}
149-
self.should_update_manager.store(true, atomic::Ordering::Relaxed);
150147
self.chain_monitor.watch_channel(funding_txo, monitor)
151148
}
152149

@@ -162,7 +159,6 @@ impl chain::Watch<EnforcingSigner> for TestChainMonitor {
162159
let mut ser = VecWriter(Vec::new());
163160
deserialized_monitor.write(&mut ser).unwrap();
164161
map_entry.insert((update.update_id, ser.0));
165-
self.should_update_manager.store(true, atomic::Ordering::Relaxed);
166162
self.chain_monitor.update_channel(funding_txo, update)
167163
}
168164

@@ -878,54 +874,6 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
878874
} }
879875
}
880876

881-
macro_rules! drain_msg_events_on_disconnect {
882-
($counterparty_id: expr) => { {
883-
if $counterparty_id == 0 {
884-
for event in nodes[0].get_and_clear_pending_msg_events() {
885-
match event {
886-
events::MessageSendEvent::UpdateHTLCs { .. } => {},
887-
events::MessageSendEvent::SendRevokeAndACK { .. } => {},
888-
events::MessageSendEvent::SendChannelReestablish { .. } => {},
889-
events::MessageSendEvent::SendChannelReady { .. } => {},
890-
events::MessageSendEvent::SendAnnouncementSignatures { .. } => {},
891-
events::MessageSendEvent::SendChannelUpdate { ref msg, .. } => {
892-
assert_eq!(msg.contents.flags & 2, 0); // The disable bit must never be set!
893-
},
894-
_ => if out.may_fail.load(atomic::Ordering::Acquire) {
895-
return;
896-
} else {
897-
panic!("Unhandled message event")
898-
},
899-
}
900-
}
901-
push_excess_b_events!(nodes[1].get_and_clear_pending_msg_events().drain(..), Some(0));
902-
ab_events.clear();
903-
ba_events.clear();
904-
} else {
905-
for event in nodes[2].get_and_clear_pending_msg_events() {
906-
match event {
907-
events::MessageSendEvent::UpdateHTLCs { .. } => {},
908-
events::MessageSendEvent::SendRevokeAndACK { .. } => {},
909-
events::MessageSendEvent::SendChannelReestablish { .. } => {},
910-
events::MessageSendEvent::SendChannelReady { .. } => {},
911-
events::MessageSendEvent::SendAnnouncementSignatures { .. } => {},
912-
events::MessageSendEvent::SendChannelUpdate { ref msg, .. } => {
913-
assert_eq!(msg.contents.flags & 2, 0); // The disable bit must never be set!
914-
},
915-
_ => if out.may_fail.load(atomic::Ordering::Acquire) {
916-
return;
917-
} else {
918-
panic!("Unhandled message event")
919-
},
920-
}
921-
}
922-
push_excess_b_events!(nodes[1].get_and_clear_pending_msg_events().drain(..), Some(2));
923-
bc_events.clear();
924-
cb_events.clear();
925-
}
926-
} }
927-
}
928-
929877
macro_rules! process_events {
930878
($node: expr, $fail: expr) => { {
931879
// In case we get 256 payments we may have a hash collision, resulting in the
@@ -1101,11 +1049,9 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
11011049
if !chan_a_disconnected {
11021050
nodes[1].peer_disconnected(&nodes[0].get_our_node_id());
11031051
chan_a_disconnected = true;
1104-
drain_msg_events_on_disconnect!(0);
1105-
}
1106-
if monitor_a.should_update_manager.load(atomic::Ordering::Relaxed) {
1107-
node_a_ser.0.clear();
1108-
nodes[0].write(&mut node_a_ser).unwrap();
1052+
push_excess_b_events!(nodes[1].get_and_clear_pending_msg_events().drain(..), Some(0));
1053+
ab_events.clear();
1054+
ba_events.clear();
11091055
}
11101056
let (new_node_a, new_monitor_a) = reload_node!(node_a_ser, 0, monitor_a, keys_manager_a, fee_est_a);
11111057
nodes[0] = new_node_a;
@@ -1134,11 +1080,9 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
11341080
if !chan_b_disconnected {
11351081
nodes[1].peer_disconnected(&nodes[2].get_our_node_id());
11361082
chan_b_disconnected = true;
1137-
drain_msg_events_on_disconnect!(2);
1138-
}
1139-
if monitor_c.should_update_manager.load(atomic::Ordering::Relaxed) {
1140-
node_c_ser.0.clear();
1141-
nodes[2].write(&mut node_c_ser).unwrap();
1083+
push_excess_b_events!(nodes[1].get_and_clear_pending_msg_events().drain(..), Some(2));
1084+
bc_events.clear();
1085+
cb_events.clear();
11421086
}
11431087
let (new_node_c, new_monitor_c) = reload_node!(node_c_ser, 2, monitor_c, keys_manager_c, fee_est_c);
11441088
nodes[2] = new_node_c;
@@ -1306,13 +1250,10 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
13061250

13071251
node_a_ser.0.clear();
13081252
nodes[0].write(&mut node_a_ser).unwrap();
1309-
monitor_a.should_update_manager.store(false, atomic::Ordering::Relaxed);
13101253
node_b_ser.0.clear();
13111254
nodes[1].write(&mut node_b_ser).unwrap();
1312-
monitor_b.should_update_manager.store(false, atomic::Ordering::Relaxed);
13131255
node_c_ser.0.clear();
13141256
nodes[2].write(&mut node_c_ser).unwrap();
1315-
monitor_c.should_update_manager.store(false, atomic::Ordering::Relaxed);
13161257
}
13171258
}
13181259

0 commit comments

Comments
 (0)