Skip to content

Commit d609779

Browse files
Aditya SharmaAditya Sharma
Aditya Sharma
authored and
Aditya Sharma
committed
functional_tests: Add test_peer_storage to confirm if we recover from peer storage and sweep funds correctly.
1 parent ca1e043 commit d609779

File tree

1 file changed

+138
-0
lines changed

1 file changed

+138
-0
lines changed

lightning/src/ln/functional_tests.rs

+138
Original file line numberDiff line numberDiff line change
@@ -174,6 +174,144 @@ fn test_funding_exceeds_no_wumbo_limit() {
174174
}
175175
}
176176

177+
#[test]
178+
fn test_peer_storage() {
179+
let chanmon_cfgs = create_chanmon_cfgs(2);
180+
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
181+
let (persister, chain_monitor);
182+
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
183+
let nodes_0_deserialized;
184+
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
185+
let nodes_0_serialized = nodes[0].node.encode();
186+
187+
let (_a, _b, _channel_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1);
188+
189+
send_payment(&nodes[0], &vec!(&nodes[1])[..], 1000);
190+
send_payment(&nodes[0], &vec!(&nodes[1])[..], 10000);
191+
send_payment(&nodes[0], &vec!(&nodes[1])[..], 9999);
192+
193+
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
194+
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
195+
196+
// Reconnect peers
197+
nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
198+
features: nodes[1].node.init_features(), networks: None, remote_network_address: None
199+
}, true).unwrap();
200+
let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
201+
assert_eq!(reestablish_1.len(), 1);
202+
nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
203+
features: nodes[0].node.init_features(), networks: None, remote_network_address: None
204+
}, false).unwrap();
205+
let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
206+
assert_eq!(reestablish_2.len(), 1);
207+
208+
nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
209+
handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
210+
nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
211+
handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
212+
213+
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
214+
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
215+
216+
// Reconnect peers to see if we send YourPeerStorage
217+
nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
218+
features: nodes[1].node.init_features(), networks: None, remote_network_address: None
219+
}, true).unwrap();
220+
let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
221+
assert_eq!(reestablish_1.len(), 1);
222+
nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
223+
features: nodes[0].node.init_features(), networks: None, remote_network_address: None
224+
}, false).unwrap();
225+
let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
226+
assert_eq!(reestablish_2.len(), 1);
227+
228+
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
229+
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
230+
231+
// Lets drop the monitor and clear the chain_monitor as well.
232+
nodes[0].chain_source.remove_watched_txn_and_outputs(
233+
OutPoint { txid: funding_tx.txid(), index: 0 },
234+
funding_tx.output[0].script_pubkey.clone()
235+
);
236+
237+
reload_node_with_stubs!(nodes[0], &nodes_0_serialized, &[], &[], persister, chain_monitor, nodes_0_deserialized);
238+
239+
nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
240+
features: nodes[0].node.init_features(), networks: None, remote_network_address: None
241+
}, true).unwrap();
242+
let reestablish_1 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
243+
assert_eq!(reestablish_1.len(), 1);
244+
245+
nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
246+
features: nodes[1].node.init_features(), networks: None, remote_network_address: None
247+
}, false).unwrap();
248+
let reestablish_2 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
249+
assert_eq!(reestablish_2.len(), 0);
250+
251+
nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_1[0]);
252+
253+
// Node[0] will generate bogus chennal reestablish and warning so that the peer closes the channel.
254+
let mut closing_events = nodes[0].node.get_and_clear_pending_msg_events();
255+
assert_eq!(closing_events.len(), 2);
256+
let nodes_2_event = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut closing_events);
257+
let nodes_0_event = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut closing_events);
258+
259+
match nodes_2_event {
260+
MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } => {
261+
assert_eq!(nodes[1].node.get_our_node_id(), *node_id);
262+
nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), msg)
263+
},
264+
_ => panic!("Unexpected event"),
265+
}
266+
267+
let mut err_msgs_0 = Vec::with_capacity(1);
268+
if let MessageSendEvent::HandleError { ref action, .. } = nodes_0_event {
269+
match action {
270+
&ErrorAction::SendErrorMessage { ref msg } => {
271+
assert_eq!(msg.data, format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id()));
272+
err_msgs_0.push(msg.clone());
273+
},
274+
_ => panic!("Unexpected event!"),
275+
}
276+
} else {
277+
panic!("Unexpected event!");
278+
}
279+
assert_eq!(err_msgs_0.len(), 1);
280+
nodes[1].node.handle_error(&nodes[0].node.get_our_node_id(), &err_msgs_0[0]);
281+
282+
let commitment_tx = {
283+
let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
284+
assert_eq!(node_txn.len(), 1);
285+
node_txn.remove(0)
286+
};
287+
288+
289+
let block = create_dummy_block(nodes[1].best_block_hash(), 42, vec![commitment_tx]);
290+
connect_block(&nodes[1], &block);
291+
connect_block(&nodes[0], &block);
292+
check_closed_broadcast!(nodes[1], true);
293+
294+
let events_2 = nodes[1].node.get_and_clear_pending_events();
295+
assert_eq!(events_2.len(), 1);
296+
match events_2[0] {
297+
Event::ChannelClosed {..} => {}, // If we actually processed we'd receive the payment
298+
_ => panic!("Unexpected event"),
299+
}
300+
301+
check_added_monitors!(nodes[1], 1);
302+
303+
// let (txo, cid) = nodes[0].chain_monitor.chain_monitor.list_stub_monitors()[0];
304+
// let monitor = nodes[0].chain_monitor.chain_monitor.get_stub_monitor(txo).unwrap();
305+
// let total_claimable_balance = monitor.get_claimable_balances();
306+
307+
// assert_eq!(cid, _channel_id);
308+
// println!("\n CLAIMABLE BALANCE {:?} \n CLAIMABLE OUTPOINTS {:?} \n", total_claimable_balance, monitor.inner.lock().unwrap().onchain_tx_handler.claimable_outpoints);
309+
// for (i, j) in nodes[1].chain_source.watched_txn.lock().unwrap().iter() {
310+
// println!("watched txns {:?} {:?}", i, j);
311+
// }
312+
connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH);
313+
}
314+
177315
fn do_test_counterparty_no_reserve(send_from_initiator: bool) {
178316
// A peer providing a channel_reserve_satoshis of 0 (or less than our dust limit) is insecure,
179317
// but only for them. Because some LSPs do it with some level of trust of the clients (for a

0 commit comments

Comments
 (0)