Skip to content

Commit 188a3fa

Browse files
committed
Check for timing-out HTLCs in remote unrevoked commitments
This resolves a TODO/issue in would_broadcast_at_height where we will not fail a channel with HTLCs which time out in remote broadcastable transactions.
1 parent 5bb9292 commit 188a3fa

File tree

2 files changed

+153
-30
lines changed

2 files changed

+153
-30
lines changed

src/ln/channelmonitor.rs

Lines changed: 55 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -1568,6 +1568,7 @@ impl ChannelMonitor {
15681568
if let Some(transaction_output_index) = htlc.transaction_output_index {
15691569
if let &Some((ref their_sig, ref our_sig)) = sigs {
15701570
if htlc.offered {
1571+
log_trace!(self, "Broadcasting HTLC-Timeout transaction against local commitment transactions");
15711572
let mut htlc_timeout_tx = chan_utils::build_htlc_transaction(&local_tx.txid, local_tx.feerate_per_kw, self.their_to_self_delay.unwrap(), htlc, &local_tx.delayed_payment_key, &local_tx.revocation_key);
15721573

15731574
htlc_timeout_tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
@@ -1584,6 +1585,7 @@ impl ChannelMonitor {
15841585
res.push(htlc_timeout_tx);
15851586
} else {
15861587
if let Some(payment_preimage) = self.payment_preimages.get(&htlc.payment_hash) {
1588+
log_trace!(self, "Broadcasting HTLC-Success transaction against local commitment transactions");
15871589
let mut htlc_success_tx = chan_utils::build_htlc_transaction(&local_tx.txid, local_tx.feerate_per_kw, self.their_to_self_delay.unwrap(), htlc, &local_tx.delayed_payment_key, &local_tx.revocation_key);
15881590

15891591
htlc_success_tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
@@ -1618,6 +1620,7 @@ impl ChannelMonitor {
16181620
// weren't yet included in our commitment transaction(s).
16191621
if let &Some(ref local_tx) = &self.current_local_signed_commitment_tx {
16201622
if local_tx.txid == commitment_txid {
1623+
log_trace!(self, "Got latest local commitment tx broadcast, searching for available HTLCs to claim");
16211624
match self.key_storage {
16221625
Storage::Local { ref delayed_payment_base_key, ref latest_per_commitment_point, .. } => {
16231626
let (local_txn, spendable_outputs, watch_outputs) = self.broadcast_by_local_state(local_tx, latest_per_commitment_point, &Some(*delayed_payment_base_key));
@@ -1632,6 +1635,7 @@ impl ChannelMonitor {
16321635
}
16331636
if let &Some(ref local_tx) = &self.prev_local_signed_commitment_tx {
16341637
if local_tx.txid == commitment_txid {
1638+
log_trace!(self, "Got previous local commitment tx broadcast, searching for available HTLCs to claim");
16351639
match self.key_storage {
16361640
Storage::Local { ref delayed_payment_base_key, ref prev_latest_per_commitment_point, .. } => {
16371641
let (local_txn, spendable_outputs, watch_outputs) = self.broadcast_by_local_state(local_tx, prev_latest_per_commitment_point, &Some(*delayed_payment_base_key));
@@ -1784,44 +1788,66 @@ impl ChannelMonitor {
17841788
}
17851789

17861790
pub(super) fn would_broadcast_at_height(&self, height: u32) -> bool {
1787-
// TODO: We need to consider HTLCs which weren't included in latest local commitment
1788-
// transaction (or in any of the latest two local commitment transactions). This probably
1789-
// needs to use the same logic as the revoked-tx-announe logic - checking the last two
1790-
// remote commitment transactions. This probably has implications for what data we need to
1791-
// store in local commitment transactions.
1791+
// We need to consider all HTLCs which are:
1792+
// * in any unrevoked remote commitment transaction, as they could broadcast said
1793+
// transactions and we'd end up in a race, or
1794+
// * are in our latest local commitment transaction, as this is the thing we will
1795+
// broadcast if we go on-chain.
17921796
// Note that we consider HTLCs which were below dust threshold here - while they don't
17931797
// strictly imply that we need to fail the channel, we need to go ahead and fail them back
17941798
// to the source, and if we don't fail the channel we will have to ensure that the next
17951799
// updates that peer sends us are update_fails, failing the channel if not. It's probably
17961800
// easier to just fail the channel as this case should be rare enough anyway.
1801+
macro_rules! scan_commitment {
1802+
($htlcs: expr, $local_tx: expr) => {
1803+
for ref htlc in $htlcs {
1804+
// For inbound HTLCs which we know the preimage for, we have to ensure we hit the
1805+
// chain with enough room to claim the HTLC without our counterparty being able to
1806+
// time out the HTLC first.
1807+
// For outbound HTLCs which our counterparty hasn't failed/claimed, our primary
1808+
// concern is being able to claim the corresponding inbound HTLC (on another
1809+
// channel) before it expires. In fact, we don't even really care if our
1810+
// counterparty here claims such an outbound HTLC after it expired as long as we
1811+
// can still claim the corresponding HTLC. Thus, to avoid needlessly hitting the
1812+
// chain when our counterparty is waiting for expiration to off-chain fail an HTLC
1813+
// we give ourselves a few blocks of headroom after expiration before going
1814+
// on-chain for an expired HTLC.
1815+
// Note that, to avoid a potential attack whereby a node delays claiming an HTLC
1816+
// from us until we've reached the point where we go on-chain with the
1817+
// corresponding inbound HTLC, we must ensure that outbound HTLCs go on chain at
1818+
// least CLTV_CLAIM_BUFFER blocks prior to the inbound HTLC.
1819+
// aka outbound_cltv + HTLC_FAIL_TIMEOUT_BLOCKS == height - CLTV_CLAIM_BUFFER
1820+
// inbound_cltv == height + CLTV_CLAIM_BUFFER
1821+
// outbound_cltv + HTLC_FAIL_TIMEOUT_BLOCKS + CLTV_CLAIM_BUFER <= inbound_cltv - CLTV_CLAIM_BUFFER
1822+
// HTLC_FAIL_TIMEOUT_BLOCKS + 2*CLTV_CLAIM_BUFER <= inbound_cltv - outbound_cltv
1823+
// HTLC_FAIL_TIMEOUT_BLOCKS + 2*CLTV_CLAIM_BUFER <= CLTV_EXPIRY_DELTA
1824+
let htlc_outbound = $local_tx == htlc.offered;
1825+
if ( htlc_outbound && htlc.cltv_expiry + HTLC_FAIL_TIMEOUT_BLOCKS <= height) ||
1826+
(!htlc_outbound && htlc.cltv_expiry <= height + CLTV_CLAIM_BUFFER && self.payment_preimages.contains_key(&htlc.payment_hash)) {
1827+
log_info!(self, "Force-closing channel due to {} HTLC timeout, HTLC expiry is {}", if htlc_outbound { "outbound" } else { "inbound "}, htlc.cltv_expiry);
1828+
return true;
1829+
}
1830+
}
1831+
}
1832+
}
1833+
17971834
if let Some(ref cur_local_tx) = self.current_local_signed_commitment_tx {
1798-
for &(ref htlc, _, _) in cur_local_tx.htlc_outputs.iter() {
1799-
// For inbound HTLCs which we know the preimage for, we have to ensure we hit the
1800-
// chain with enough room to claim the HTLC without our counterparty being able to
1801-
// time out the HTLC first.
1802-
// For outbound HTLCs which our counterparty hasn't failed/claimed, our primary
1803-
// concern is being able to claim the corresponding inbound HTLC (on another
1804-
// channel) before it expires. In fact, we don't even really care if our
1805-
// counterparty here claims such an outbound HTLC after it expired as long as we
1806-
// can still claim the corresponding HTLC. Thus, to avoid needlessly hitting the
1807-
// chain when our counterparty is waiting for expiration to off-chain fail an HTLC
1808-
// we give ourselves a few blocks of headroom after expiration before going
1809-
// on-chain for an expired HTLC.
1810-
// Note that, to avoid a potential attack whereby a node delays claiming an HTLC
1811-
// from us until we've reached the point where we go on-chain with the
1812-
// corresponding inbound HTLC, we must ensure that outbound HTLCs go on chain at
1813-
// least CLTV_CLAIM_BUFFER blocks prior to the inbound HTLC.
1814-
// aka outbound_cltv + HTLC_FAIL_TIMEOUT_BLOCKS == height - CLTV_CLAIM_BUFFER
1815-
// inbound_cltv == height + CLTV_CLAIM_BUFFER
1816-
// outbound_cltv + HTLC_FAIL_TIMEOUT_BLOCKS + CLTV_CLAIM_BUFER <= inbound_cltv - CLTV_CLAIM_BUFFER
1817-
// HTLC_FAIL_TIMEOUT_BLOCKS + 2*CLTV_CLAIM_BUFER <= inbound_cltv - outbound_cltv
1818-
// HTLC_FAIL_TIMEOUT_BLOCKS + 2*CLTV_CLAIM_BUFER <= CLTV_EXPIRY_DELTA
1819-
if ( htlc.offered && htlc.cltv_expiry + HTLC_FAIL_TIMEOUT_BLOCKS <= height) ||
1820-
(!htlc.offered && htlc.cltv_expiry <= height + CLTV_CLAIM_BUFFER && self.payment_preimages.contains_key(&htlc.payment_hash)) {
1821-
return true;
1835+
scan_commitment!(cur_local_tx.htlc_outputs.iter().map(|&(ref a, _, _)| a), true);
1836+
}
1837+
1838+
if let Storage::Local { ref current_remote_commitment_txid, ref prev_remote_commitment_txid, .. } = self.key_storage {
1839+
if let &Some(ref txid) = current_remote_commitment_txid {
1840+
if let Some(ref htlc_outputs) = self.remote_claimable_outpoints.get(txid) {
1841+
scan_commitment!(htlc_outputs.iter().map(|&(ref a, _)| a), false);
1842+
}
1843+
}
1844+
if let &Some(ref txid) = prev_remote_commitment_txid {
1845+
if let Some(ref htlc_outputs) = self.remote_claimable_outpoints.get(txid) {
1846+
scan_commitment!(htlc_outputs.iter().map(|&(ref a, _)| a), false);
18221847
}
18231848
}
18241849
}
1850+
18251851
false
18261852
}
18271853

src/ln/functional_tests.rs

Lines changed: 98 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6084,10 +6084,107 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) {
60846084
check_closed_broadcast!(nodes[1]);
60856085
}
60866086

6087+
fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
6088+
let mut nodes = create_network(2);
6089+
let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
6090+
6091+
let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), if use_dust { 50000 } else { 3000000 }, TEST_FINAL_CLTV).unwrap();
6092+
let (_, payment_hash) = get_payment_preimage_hash!(nodes[0]);
6093+
nodes[0].node.send_payment(route, payment_hash).unwrap();
6094+
check_added_monitors!(nodes[0], 1);
6095+
6096+
let _as_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6097+
6098+
// As far as A is concerened, the HTLC is now present only in the latest remote commitment
6099+
// transaction, however it is not in A's latest local commitment, so we can just broadcast that
6100+
// to "time out" the HTLC.
6101+
6102+
let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
6103+
for i in 1..TEST_FINAL_CLTV + HTLC_FAIL_TIMEOUT_BLOCKS + CHAN_CONFIRM_DEPTH + 1 {
6104+
nodes[0].chain_monitor.block_connected_checked(&header, i, &Vec::new(), &Vec::new());
6105+
header.prev_blockhash = header.bitcoin_hash();
6106+
}
6107+
test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
6108+
check_closed_broadcast!(nodes[0]);
6109+
}
6110+
6111+
fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no_close: bool) {
6112+
let nodes = create_network(3);
6113+
let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
6114+
6115+
// Fail the payment, but don't deliver A's final RAA, resulting in the HTLC only being present
6116+
// in B's previous (unrevoked) commitment transaction, but none of A's commitment transactions.
6117+
// Also optionally test that we *don't* fail the channel in case the commitment transaction was
6118+
// actually revoked.
6119+
let htlc_value = if use_dust { 50000 } else { 3000000 };
6120+
let (_, our_payment_hash) = route_payment(&nodes[0], &[&nodes[1]], htlc_value);
6121+
assert!(nodes[1].node.fail_htlc_backwards(&our_payment_hash, htlc_value));
6122+
expect_pending_htlcs_forwardable!(nodes[1]);
6123+
check_added_monitors!(nodes[1], 1);
6124+
6125+
let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
6126+
nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fail_htlcs[0]).unwrap();
6127+
nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed).unwrap();
6128+
check_added_monitors!(nodes[0], 1);
6129+
let as_updates = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6130+
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_updates.0).unwrap();
6131+
check_added_monitors!(nodes[1], 1);
6132+
nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_updates.1).unwrap();
6133+
check_added_monitors!(nodes[1], 1);
6134+
let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
6135+
6136+
if check_revoke_no_close {
6137+
nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap();
6138+
check_added_monitors!(nodes[0], 1);
6139+
}
6140+
6141+
let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
6142+
for i in 1..TEST_FINAL_CLTV + HTLC_FAIL_TIMEOUT_BLOCKS + CHAN_CONFIRM_DEPTH + 1 {
6143+
nodes[0].chain_monitor.block_connected_checked(&header, i, &Vec::new(), &Vec::new());
6144+
header.prev_blockhash = header.bitcoin_hash();
6145+
}
6146+
if !check_revoke_no_close {
6147+
test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
6148+
check_closed_broadcast!(nodes[0]);
6149+
} else {
6150+
let events = nodes[0].node.get_and_clear_pending_events();
6151+
assert_eq!(events.len(), 1);
6152+
match events[0] {
6153+
Event::PaymentFailed { payment_hash, rejected_by_dest, .. } => {
6154+
assert_eq!(payment_hash, our_payment_hash);
6155+
assert!(rejected_by_dest);
6156+
},
6157+
_ => panic!("Unexpected event"),
6158+
}
6159+
}
6160+
}
6161+
6162+
// Test that we close channels on-chain when broadcastable HTLCs reach their timeout window.
6163+
// There are only a few cases to test here:
6164+
// * its not really normative behavior, but we test that below-dust HTLCs "included" in
6165+
// broadcastable commitment transactions result in channel closure,
6166+
// * its included in an unrevoked-but-previous remote commitment transaction,
6167+
// * its included in the latest remote or local commitment transactions.
6168+
// We test each of the three possible commitment transactions individually and use both dust and
6169+
// non-dust HTLCs.
6170+
// Note that we don't bother testing both outbound and inbound HTLC failures for each case, and we
6171+
// assume they are handled the same across all six cases, as both outbound and inbound failures are
6172+
// tested for at least one of the cases in other tests.
60876173
#[test]
6088-
fn htlc_claim_local_commitment_only() {
6174+
fn htlc_claim_single_commitment_only_a() {
60896175
do_htlc_claim_local_commitment_only(true);
60906176
do_htlc_claim_local_commitment_only(false);
6177+
6178+
do_htlc_claim_current_remote_commitment_only(true);
6179+
do_htlc_claim_current_remote_commitment_only(false);
6180+
}
6181+
6182+
#[test]
6183+
fn htlc_claim_single_commitment_only_b() {
6184+
do_htlc_claim_previous_remote_commitment_only(true, false);
6185+
do_htlc_claim_previous_remote_commitment_only(false, false);
6186+
do_htlc_claim_previous_remote_commitment_only(true, true);
6187+
do_htlc_claim_previous_remote_commitment_only(false, true);
60916188
}
60926189

60936190
fn run_onion_failure_test<F1,F2>(_name: &str, test_case: u8, nodes: &Vec<Node>, route: &Route, payment_hash: &PaymentHash, callback_msg: F1, callback_node: F2, expected_retryable: bool, expected_error_code: Option<u16>, expected_channel_update: Option<HTLCFailChannelUpdate>)

0 commit comments

Comments
 (0)