@@ -125,7 +125,6 @@ struct TestChainMonitor {
125125 // "fails" if we ever force-close a channel, we avoid doing so, always saving the latest
126126 // fully-serialized monitor state here, as well as the corresponding update_id.
127127 pub latest_monitors : Mutex < HashMap < OutPoint , ( u64 , Vec < u8 > ) > > ,
128- pub should_update_manager : atomic:: AtomicBool ,
129128}
130129impl TestChainMonitor {
131130 pub fn new ( broadcaster : Arc < TestBroadcaster > , logger : Arc < dyn Logger > , feeest : Arc < FuzzEstimator > , persister : Arc < TestPersister > , keys : Arc < KeyProvider > ) -> Self {
@@ -135,7 +134,6 @@ impl TestChainMonitor {
135134 keys,
136135 persister,
137136 latest_monitors : Mutex :: new ( HashMap :: new ( ) ) ,
138- should_update_manager : atomic:: AtomicBool :: new ( false ) ,
139137 }
140138 }
141139}
@@ -146,7 +144,6 @@ impl chain::Watch<TestChannelSigner> for TestChainMonitor {
146144 if let Some ( _) = self . latest_monitors . lock ( ) . unwrap ( ) . insert ( funding_txo, ( monitor. get_latest_update_id ( ) , ser. 0 ) ) {
147145 panic ! ( "Already had monitor pre-watch_channel" ) ;
148146 }
149- self . should_update_manager . store ( true , atomic:: Ordering :: Relaxed ) ;
150147 self . chain_monitor . watch_channel ( funding_txo, monitor)
151148 }
152149
@@ -162,7 +159,6 @@ impl chain::Watch<TestChannelSigner> for TestChainMonitor {
162159 let mut ser = VecWriter ( Vec :: new ( ) ) ;
163160 deserialized_monitor. write ( & mut ser) . unwrap ( ) ;
164161 map_entry. insert ( ( update. update_id , ser. 0 ) ) ;
165- self . should_update_manager . store ( true , atomic:: Ordering :: Relaxed ) ;
166162 self . chain_monitor . update_channel ( funding_txo, update)
167163 }
168164
@@ -1101,11 +1097,9 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
11011097 if !chan_a_disconnected {
11021098 nodes[ 1 ] . peer_disconnected ( & nodes[ 0 ] . get_our_node_id ( ) ) ;
11031099 chan_a_disconnected = true ;
1104- drain_msg_events_on_disconnect ! ( 0 ) ;
1105- }
1106- if monitor_a. should_update_manager . load ( atomic:: Ordering :: Relaxed ) {
1107- node_a_ser. 0 . clear ( ) ;
1108- nodes[ 0 ] . write ( & mut node_a_ser) . unwrap ( ) ;
1100+ push_excess_b_events ! ( nodes[ 1 ] . get_and_clear_pending_msg_events( ) . drain( ..) , Some ( 0 ) ) ;
1101+ ab_events. clear ( ) ;
1102+ ba_events. clear ( ) ;
11091103 }
11101104 let ( new_node_a, new_monitor_a) = reload_node ! ( node_a_ser, 0 , monitor_a, keys_manager_a, fee_est_a) ;
11111105 nodes[ 0 ] = new_node_a;
@@ -1134,11 +1128,9 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
11341128 if !chan_b_disconnected {
11351129 nodes[ 1 ] . peer_disconnected ( & nodes[ 2 ] . get_our_node_id ( ) ) ;
11361130 chan_b_disconnected = true ;
1137- drain_msg_events_on_disconnect ! ( 2 ) ;
1138- }
1139- if monitor_c. should_update_manager . load ( atomic:: Ordering :: Relaxed ) {
1140- node_c_ser. 0 . clear ( ) ;
1141- nodes[ 2 ] . write ( & mut node_c_ser) . unwrap ( ) ;
1131+ push_excess_b_events ! ( nodes[ 1 ] . get_and_clear_pending_msg_events( ) . drain( ..) , Some ( 2 ) ) ;
1132+ bc_events. clear ( ) ;
1133+ cb_events. clear ( ) ;
11421134 }
11431135 let ( new_node_c, new_monitor_c) = reload_node ! ( node_c_ser, 2 , monitor_c, keys_manager_c, fee_est_c) ;
11441136 nodes[ 2 ] = new_node_c;
@@ -1304,15 +1296,18 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
13041296 _ => test_return ! ( ) ,
13051297 }
13061298
1307- node_a_ser. 0 . clear ( ) ;
1308- nodes[ 0 ] . write ( & mut node_a_ser) . unwrap ( ) ;
1309- monitor_a. should_update_manager . store ( false , atomic:: Ordering :: Relaxed ) ;
1310- node_b_ser. 0 . clear ( ) ;
1311- nodes[ 1 ] . write ( & mut node_b_ser) . unwrap ( ) ;
1312- monitor_b. should_update_manager . store ( false , atomic:: Ordering :: Relaxed ) ;
1313- node_c_ser. 0 . clear ( ) ;
1314- nodes[ 2 ] . write ( & mut node_c_ser) . unwrap ( ) ;
1315- monitor_c. should_update_manager . store ( false , atomic:: Ordering :: Relaxed ) ;
1299+ if nodes[ 0 ] . get_and_clear_needs_persistence ( ) == true {
1300+ node_a_ser. 0 . clear ( ) ;
1301+ nodes[ 0 ] . write ( & mut node_a_ser) . unwrap ( ) ;
1302+ }
1303+ if nodes[ 1 ] . get_and_clear_needs_persistence ( ) == true {
1304+ node_b_ser. 0 . clear ( ) ;
1305+ nodes[ 1 ] . write ( & mut node_b_ser) . unwrap ( ) ;
1306+ }
1307+ if nodes[ 2 ] . get_and_clear_needs_persistence ( ) == true {
1308+ node_c_ser. 0 . clear ( ) ;
1309+ nodes[ 2 ] . write ( & mut node_c_ser) . unwrap ( ) ;
1310+ }
13161311 }
13171312}
13181313
0 commit comments