Skip to content

Commit 26aafd0

Browse files
committed
Test basic HTLC claim behavior from monitor -> manager on reorg
1 parent dd01e31 commit 26aafd0

File tree

3 files changed

+170
-0
lines changed

3 files changed

+170
-0
lines changed

lightning/src/ln/functional_test_utils.rs

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -662,6 +662,20 @@ macro_rules! expect_payment_sent {
662662
}
663663
}
664664

665+
macro_rules! expect_payment_failed {
666+
($node: expr, $expected_payment_hash: expr, $rejected_by_dest: expr) => {
667+
let events = $node.node.get_and_clear_pending_events();
668+
assert_eq!(events.len(), 1);
669+
match events[0] {
670+
Event::PaymentFailed { ref payment_hash, rejected_by_dest, .. } => {
671+
assert_eq!(*payment_hash, $expected_payment_hash);
672+
assert_eq!(rejected_by_dest, $rejected_by_dest);
673+
},
674+
_ => panic!("Unexpected event"),
675+
}
676+
}
677+
}
678+
665679
pub fn send_along_route_with_hash<'a, 'b>(origin_node: &Node<'a, 'b>, route: Route, expected_route: &[&Node<'a, 'b>], recv_value: u64, our_payment_hash: PaymentHash) {
666680
let mut payment_event = {
667681
origin_node.node.send_payment(route, our_payment_hash).unwrap();

lightning/src/ln/mod.rs

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -32,3 +32,5 @@ mod wire;
3232
mod functional_tests;
3333
#[cfg(test)]
3434
mod chanmon_update_fail_tests;
35+
#[cfg(test)]
36+
mod reorg_tests;

lightning/src/ln/reorg_tests.rs

Lines changed: 154 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,154 @@
1+
//! Further functional tests which test blockchain reorganizations.
2+
3+
use ln::channelmonitor::ANTI_REORG_DELAY;
4+
use ln::features::InitFeatures;
5+
use ln::msgs::{ChannelMessageHandler, ErrorAction, HTLCFailChannelUpdate};
6+
use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
7+
8+
use bitcoin::util::hash::BitcoinHash;
9+
use bitcoin::blockdata::block::{Block, BlockHeader};
10+
11+
use std::default::Default;
12+
13+
use ln::functional_test_utils::*;
14+
15+
// Our on-chain HTLC-claim learning has a few properties worth testing which are tested in the
16+
// first XXX tests:
17+
// * If an upstream HTLC is claimed with a preimage (both against our own commitment
18+
// transaction our counterparty's), we claim it backwards immediately.
19+
// * If an upstream HTLC is claimed with a timeout, we delay ANTI_REORG_DELAY before failing
20+
// it backwards to ensure our counterparty can't claim with a preimage in a reorg.
21+
22+
fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) {
23+
// Test learning HTLC preimage from an Offered HTLC after a reorg
24+
let node_cfgs = create_node_cfgs(3);
25+
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
26+
let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
27+
28+
create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
29+
let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::supported(), InitFeatures::supported());
30+
31+
let (our_payment_preimage, our_payment_hash) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
32+
33+
// Provide preimage to node 2 by claiming payment
34+
nodes[2].node.claim_funds(our_payment_preimage, &None, 1000000);
35+
check_added_monitors!(nodes[2], 1);
36+
get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
37+
38+
let mut headers = Vec::new();
39+
let mut header = BlockHeader { version: 0x2000_0000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
40+
let claim_txn = if local_commitment {
41+
// Broadcast node 1 commitment txn to broadcast the HTLC-Timeout
42+
let node_1_commitment_txn = nodes[1].node.channel_state.lock().unwrap().by_id.get_mut(&chan_2.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
43+
assert_eq!(node_1_commitment_txn.len(), 2); // 1 local commitment tx, 1 Outbound HTLC-Timeout
44+
assert_eq!(node_1_commitment_txn[0].output.len(), 2); // to-self and Offered HTLC (to-remote/to-node-3 is dust)
45+
check_spends!(node_1_commitment_txn[0], chan_2.3);
46+
check_spends!(node_1_commitment_txn[1], node_1_commitment_txn[0].clone());
47+
48+
// Give node 2 node 1's transactions and get its response (claiming the HTLC instead).
49+
nodes[2].block_notifier.block_connected(&Block { header, txdata: node_1_commitment_txn.clone() }, CHAN_CONFIRM_DEPTH + 1);
50+
check_closed_broadcast!(nodes[2], false); // We should get a BroadcastChannelUpdate (and *only* a BroadcstChannelUpdate)
51+
let node_2_commitment_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
52+
assert_eq!(node_2_commitment_txn.len(), 3); // ChannelMonitor: 1 offered HTLC-Claim, ChannelManger: 1 local commitment tx, 1 Received HTLC-Claim
53+
assert_eq!(node_2_commitment_txn[1].output.len(), 2); // to-remote and Received HTLC (to-self is dust)
54+
check_spends!(node_2_commitment_txn[1], chan_2.3);
55+
check_spends!(node_2_commitment_txn[2], node_2_commitment_txn[1].clone());
56+
check_spends!(node_2_commitment_txn[0], node_1_commitment_txn[0]);
57+
58+
// Confirm node 1's commitment txn (and HTLC-Timeout) on node 1
59+
nodes[1].block_notifier.block_connected(&Block { header, txdata: node_1_commitment_txn.clone() }, CHAN_CONFIRM_DEPTH + 1);
60+
61+
// ...but return node 1's commitment tx in case claim is set and we're preparing to reorg
62+
vec![node_1_commitment_txn[0].clone(), node_2_commitment_txn[0].clone()]
63+
} else {
64+
// Broadcast node 2 commitment txn
65+
let node_2_commitment_txn = nodes[2].node.channel_state.lock().unwrap().by_id.get_mut(&chan_2.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
66+
assert_eq!(node_2_commitment_txn.len(), 2); // 1 local commitment tx, 1 Received HTLC-Claim
67+
assert_eq!(node_2_commitment_txn[0].output.len(), 2); // to-remote and Received HTLC (to-self is dust)
68+
check_spends!(node_2_commitment_txn[0], chan_2.3);
69+
check_spends!(node_2_commitment_txn[1], node_2_commitment_txn[0].clone());
70+
71+
// Give node 1 node 2's commitment transaction and get its response (timing the HTLC out)
72+
nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![node_2_commitment_txn[0].clone()] }, CHAN_CONFIRM_DEPTH + 1);
73+
let node_1_commitment_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
74+
assert_eq!(node_1_commitment_txn.len(), 3); // ChannelMonitor: 1 offered HTLC-Timeout, ChannelManger: 1 local commitment tx, 1 Offered HTLC-Timeout
75+
assert_eq!(node_1_commitment_txn[1].output.len(), 2); // to-local and Offered HTLC (to-remote is dust)
76+
check_spends!(node_1_commitment_txn[1], chan_2.3);
77+
check_spends!(node_1_commitment_txn[2], node_1_commitment_txn[1].clone());
78+
check_spends!(node_1_commitment_txn[0], node_2_commitment_txn[0]);
79+
80+
// Confirm node 2's commitment txn (and node 1's HTLC-Timeout) on node 1
81+
nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![node_2_commitment_txn[0].clone(), node_1_commitment_txn[0].clone()] }, CHAN_CONFIRM_DEPTH + 1);
82+
// ...but return node 2's commitment tx (and claim) in case claim is set and we're preparing to reorg
83+
node_2_commitment_txn
84+
};
85+
check_closed_broadcast!(nodes[1], false); // We should get a BroadcastChannelUpdate (and *only* a BroadcstChannelUpdate)
86+
headers.push(header.clone());
87+
// At CHAN_CONFIRM_DEPTH + 1 we have a confirmation count of 1, so CHAN_CONFIRM_DEPTH +
88+
// ANTI_REORG_DELAY - 1 will give us a confirmation count of ANTI_REORG_DELAY - 1.
89+
for i in CHAN_CONFIRM_DEPTH + 2..CHAN_CONFIRM_DEPTH + ANTI_REORG_DELAY - 1 {
90+
header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
91+
nodes[1].block_notifier.block_connected_checked(&header, i, &vec![], &[0; 0]);
92+
headers.push(header.clone());
93+
}
94+
check_added_monitors!(nodes[1], 0);
95+
assert_eq!(nodes[1].node.get_and_clear_pending_events().len(), 0);
96+
97+
if claim {
98+
// Now reorg back to CHAN_CONFIRM_DEPTH and confirm node 2's broadcasted transactions:
99+
for (height, header) in (CHAN_CONFIRM_DEPTH + 1..CHAN_CONFIRM_DEPTH + ANTI_REORG_DELAY - 1).zip(headers.iter()).rev() {
100+
nodes[1].block_notifier.block_disconnected(&header, height);
101+
}
102+
103+
header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
104+
nodes[1].block_notifier.block_connected(&Block { header, txdata: claim_txn }, CHAN_CONFIRM_DEPTH + 1);
105+
106+
// ChannelManager only polls ManyChannelMonitor::get_and_clear_pending_htlcs_updated when we
107+
// probe it for events, so we probe non-message events here (which should still end up empty):
108+
assert_eq!(nodes[1].node.get_and_clear_pending_events().len(), 0);
109+
} else {
110+
// Confirm the timeout tx and check that we fail the HTLC backwards
111+
header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
112+
nodes[1].block_notifier.block_connected_checked(&header, CHAN_CONFIRM_DEPTH + ANTI_REORG_DELAY, &vec![], &[0; 0]);
113+
expect_pending_htlcs_forwardable!(nodes[1]);
114+
}
115+
116+
check_added_monitors!(nodes[1], 1);
117+
// Which should result in an immediate claim/fail of the HTLC:
118+
let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
119+
if claim {
120+
assert_eq!(htlc_updates.update_fulfill_htlcs.len(), 1);
121+
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &htlc_updates.update_fulfill_htlcs[0]);
122+
} else {
123+
assert_eq!(htlc_updates.update_fail_htlcs.len(), 1);
124+
nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_updates.update_fail_htlcs[0]);
125+
}
126+
commitment_signed_dance!(nodes[0], nodes[1], htlc_updates.commitment_signed, false, true);
127+
if claim {
128+
expect_payment_sent!(nodes[0], our_payment_preimage);
129+
} else {
130+
let events = nodes[0].node.get_and_clear_pending_msg_events();
131+
assert_eq!(events.len(), 1);
132+
if let MessageSendEvent::PaymentFailureNetworkUpdate { update: HTLCFailChannelUpdate::ChannelClosed { ref is_permanent, .. } } = events[0] {
133+
assert!(is_permanent);
134+
} else { panic!("Unexpected event!"); }
135+
expect_payment_failed!(nodes[0], our_payment_hash, false);
136+
}
137+
}
138+
139+
#[test]
140+
fn test_onchain_htlc_claim_reorg_local_commitment() {
141+
do_test_onchain_htlc_reorg(true, true);
142+
}
143+
#[test]
144+
fn test_onchain_htlc_timeout_delay_local_commitment() {
145+
do_test_onchain_htlc_reorg(true, false);
146+
}
147+
#[test]
148+
fn test_onchain_htlc_claim_reorg_remote_commitment() {
149+
do_test_onchain_htlc_reorg(false, true);
150+
}
151+
#[test]
152+
fn test_onchain_htlc_timeout_delay_remote_commitment() {
153+
do_test_onchain_htlc_reorg(false, false);
154+
}

0 commit comments

Comments
 (0)