Skip to content

Commit 14fd4a2

Browse files
committed
Handle double-HTLC-claims without failing the backwards channel
When receiving an update_fulfill_htlc message, we immediately forward the claim backwards along the payment path before waiting for a full commitment_signed dance. This is great, but can cause duplicative claims if a node sends an update_fulfill_htlc message, disconnects, reconnects, and then has to re-send its update_fulfill_htlc message again. While there was code to handle this, it treated it as a channel error on the inbound channel, which is incorrect - this is an expected, albeit incredibly rare, condition. Instead, we handle these double-claims correctly, simply ignoring them. With debug_assertions enabled, we also check that the previous close of the same HTLC was a fulfill, and that we are not moving from a HTLC failure to an HTLC claim after its too late. A test is also added, which hits all three failure cases in `Channel::get_update_fulfill_htlc`. Found by the chanmon_consistency fuzzer.
1 parent 431f807 commit 14fd4a2

File tree

2 files changed

+139
-3
lines changed

2 files changed

+139
-3
lines changed

lightning/src/ln/chanmon_update_fail_tests.rs

Lines changed: 94 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2215,3 +2215,97 @@ fn channel_holding_cell_serialize() {
22152215
do_channel_holding_cell_serialize(true, false);
22162216
do_channel_holding_cell_serialize(false, true); // last arg doesn't matter
22172217
}
2218+
2219+
#[derive(PartialEq)]
2220+
enum HTLCStatusAtDupClaim {
2221+
Received,
2222+
HoldingCell,
2223+
Cleared,
2224+
}
2225+
fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim) {
2226+
// When receiving an update_fulfill_htlc message, we immediately forward the claim backwards
2227+
// along the payment path before waiting for a full commitment_signed dance. This is great, but
2228+
// can cause duplicative claims if a node sends an update_fulfill_htlc message, disconnects,
2229+
// reconnects, and then has to re-send its update_fulfill_htlc message again.
2230+
// In previous code, we didn't handle the double-claim correctly, spuriously closing the
2231+
// channel on which the inbound HTLC was received.
2232+
let chanmon_cfgs = create_chanmon_cfgs(3);
2233+
let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
2234+
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
2235+
let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
2236+
2237+
create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
2238+
create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
2239+
2240+
let preimage = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000).0;
2241+
2242+
let mut as_raa = None;
2243+
if htlc_status == HTLCStatusAtDupClaim::HoldingCell {
2244+
// In order to get the HTLC claim into the holding cell at nodes[1], we need nodes[1] to be
2245+
// awaiting a remote revoke_and_ack from nodes[0].
2246+
let (_, second_payment_hash, second_payment_secret) = get_payment_preimage_hash!(nodes[1]);
2247+
let route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph.read().unwrap(),
2248+
&nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, nodes[1].logger).unwrap();
2249+
nodes[0].node.send_payment(&route, second_payment_hash, &Some(second_payment_secret)).unwrap();
2250+
check_added_monitors!(nodes[0], 1);
2251+
2252+
let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
2253+
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
2254+
nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg);
2255+
check_added_monitors!(nodes[1], 1);
2256+
2257+
let (bs_raa, bs_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2258+
nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
2259+
check_added_monitors!(nodes[0], 1);
2260+
nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs);
2261+
check_added_monitors!(nodes[0], 1);
2262+
2263+
as_raa = Some(get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()));
2264+
}
2265+
2266+
assert!(nodes[2].node.claim_funds(preimage));
2267+
check_added_monitors!(nodes[2], 1);
2268+
let cs_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
2269+
assert_eq!(cs_updates.update_fulfill_htlcs.len(), 1);
2270+
nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]);
2271+
check_added_monitors!(nodes[1], 1);
2272+
2273+
let mut bs_updates = None;
2274+
if htlc_status != HTLCStatusAtDupClaim::HoldingCell {
2275+
bs_updates = Some(get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()));
2276+
assert_eq!(bs_updates.as_ref().unwrap().update_fulfill_htlcs.len(), 1);
2277+
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.as_ref().unwrap().update_fulfill_htlcs[0]);
2278+
expect_payment_sent!(nodes[0], preimage);
2279+
if htlc_status == HTLCStatusAtDupClaim::Cleared {
2280+
commitment_signed_dance!(nodes[0], nodes[1], &bs_updates.as_ref().unwrap().commitment_signed, false);
2281+
}
2282+
} else {
2283+
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2284+
}
2285+
2286+
nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), false);
2287+
nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
2288+
2289+
reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (false, false));
2290+
2291+
if htlc_status == HTLCStatusAtDupClaim::HoldingCell {
2292+
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa.unwrap());
2293+
check_added_monitors!(nodes[1], 1);
2294+
expect_pending_htlcs_forwardable_ignore!(nodes[1]); // We finally receive the second payment, but don't claim it
2295+
2296+
bs_updates = Some(get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()));
2297+
assert_eq!(bs_updates.as_ref().unwrap().update_fulfill_htlcs.len(), 1);
2298+
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.as_ref().unwrap().update_fulfill_htlcs[0]);
2299+
expect_payment_sent!(nodes[0], preimage);
2300+
}
2301+
if htlc_status != HTLCStatusAtDupClaim::Cleared {
2302+
commitment_signed_dance!(nodes[0], nodes[1], &bs_updates.as_ref().unwrap().commitment_signed, false);
2303+
}
2304+
}
2305+
2306+
#[test]
2307+
fn test_reconnect_dup_htlc_claims() {
2308+
do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::Received);
2309+
do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::HoldingCell);
2310+
do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::Cleared);
2311+
}

lightning/src/ln/channel.rs

Lines changed: 45 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -444,6 +444,15 @@ pub(super) struct Channel<Signer: Sign> {
444444
///
445445
/// See-also <https://github.com/lightningnetwork/lnd/issues/4006>
446446
pub workaround_lnd_bug_4006: Option<msgs::FundingLocked>,
447+
448+
#[cfg(any(test, feature = "fuzztarget"))]
449+
// When receive an HTLC fulfill on an outbound path, we may immediately fulfill the
450+
// corresponding HTLC on the inbound path. If, then, the outbound path channel is
451+
// disconnected and reconnected, they may re-broadcast their update_fulfill_htlc,
452+
// causing a double-claim. This is fine, but as a sanity check in our failure to
453+
// generate the second claim, we check here that the original was a claim, and that we
454+
// aren't now trying to fulfill a failed HTLC.
455+
historical_inbound_htlc_fulfills: HashSet<u64>,
447456
}
448457

449458
#[cfg(any(test, feature = "fuzztarget"))]
@@ -645,6 +654,9 @@ impl<Signer: Sign> Channel<Signer> {
645654
next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
646655

647656
workaround_lnd_bug_4006: None,
657+
658+
#[cfg(any(test, feature = "fuzztarget"))]
659+
historical_inbound_htlc_fulfills: HashSet::new(),
648660
})
649661
}
650662

@@ -890,6 +902,9 @@ impl<Signer: Sign> Channel<Signer> {
890902
next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
891903

892904
workaround_lnd_bug_4006: None,
905+
906+
#[cfg(any(test, feature = "fuzztarget"))]
907+
historical_inbound_htlc_fulfills: HashSet::new(),
893908
};
894909

895910
Ok(chan)
@@ -1249,8 +1264,8 @@ impl<Signer: Sign> Channel<Signer> {
12491264
if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
12501265
} else {
12511266
log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", log_bytes!(htlc.payment_hash.0), log_bytes!(self.channel_id()));
1267+
debug_assert!(false, "Tried to fulfill an HTLC that was already fail/fulfilled");
12521268
}
1253-
debug_assert!(false, "Tried to fulfill an HTLC that was already fail/fulfilled");
12541269
return Ok((None, None));
12551270
},
12561271
_ => {
@@ -1263,7 +1278,9 @@ impl<Signer: Sign> Channel<Signer> {
12631278
}
12641279
}
12651280
if pending_idx == core::usize::MAX {
1266-
return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
1281+
#[cfg(any(test, feature = "fuzztarget"))]
1282+
debug_assert!(self.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
1283+
return Ok((None, None));
12671284
}
12681285

12691286
// Now update local state:
@@ -1285,7 +1302,8 @@ impl<Signer: Sign> Channel<Signer> {
12851302
if htlc_id_arg == htlc_id {
12861303
// Make sure we don't leave latest_monitor_update_id incremented here:
12871304
self.latest_monitor_update_id -= 1;
1288-
debug_assert!(false, "Tried to fulfill an HTLC that was already fulfilled");
1305+
#[cfg(any(test, feature = "fuzztarget"))]
1306+
debug_assert!(self.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
12891307
return Ok((None, None));
12901308
}
12911309
},
@@ -1305,8 +1323,12 @@ impl<Signer: Sign> Channel<Signer> {
13051323
self.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
13061324
payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
13071325
});
1326+
#[cfg(any(test, feature = "fuzztarget"))]
1327+
self.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
13081328
return Ok((None, Some(monitor_update)));
13091329
}
1330+
#[cfg(any(test, feature = "fuzztarget"))]
1331+
self.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
13101332

13111333
{
13121334
let htlc = &mut self.pending_inbound_htlcs[pending_idx];
@@ -4690,6 +4712,13 @@ impl<Signer: Sign> Writeable for Channel<Signer> {
46904712

46914713
self.channel_update_status.write(writer)?;
46924714

4715+
#[cfg(any(test, feature = "fuzztarget"))]
4716+
(self.historical_inbound_htlc_fulfills.len() as u64).write(writer)?;
4717+
#[cfg(any(test, feature = "fuzztarget"))]
4718+
for htlc in self.historical_inbound_htlc_fulfills.iter() {
4719+
htlc.write(writer)?;
4720+
}
4721+
46934722
write_tlv_fields!(writer, {
46944723
(0, self.announcement_sigs, option),
46954724
// minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a
@@ -4882,6 +4911,16 @@ impl<'a, Signer: Sign, K: Deref> ReadableArgs<&'a K> for Channel<Signer>
48824911

48834912
let channel_update_status = Readable::read(reader)?;
48844913

4914+
#[cfg(any(test, feature = "fuzztarget"))]
4915+
let mut historical_inbound_htlc_fulfills = HashSet::new();
4916+
#[cfg(any(test, feature = "fuzztarget"))]
4917+
{
4918+
let htlc_fulfills_len: u64 = Readable::read(reader)?;
4919+
for _ in 0..htlc_fulfills_len {
4920+
assert!(historical_inbound_htlc_fulfills.insert(Readable::read(reader)?));
4921+
}
4922+
}
4923+
48854924
let mut announcement_sigs = None;
48864925
read_tlv_fields!(reader, {
48874926
(0, announcement_sigs, option),
@@ -4973,6 +5012,9 @@ impl<'a, Signer: Sign, K: Deref> ReadableArgs<&'a K> for Channel<Signer>
49735012
next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
49745013

49755014
workaround_lnd_bug_4006: None,
5015+
5016+
#[cfg(any(test, feature = "fuzztarget"))]
5017+
historical_inbound_htlc_fulfills,
49765018
})
49775019
}
49785020
}

0 commit comments

Comments
 (0)