Skip to content

Commit db5ddcb

Browse files
committed
Handle double-HTLC-claims without failing the backwards channel
When receiving an update_fulfill_htlc message, we immediately forward the claim backwards along the payment path before waiting for a full commitment_signed dance. This is great, but can cause duplicative claims if a node sends an update_fulfill_htlc message, disconnects, reconnects, and then has to re-send its update_fulfill_htlc message again. While there was code to handle this, it treated it as a channel error on the inbound channel, which is incorrect - this is an expected, albeit incredibly rare, condition. Instead, we handle these double-claims correctly, simply ignoring them. With debug_assertions enabled, we also check that the previous close of the same HTLC was a fulfill, and that we are not moving from a HTLC failure to an HTLC claim after its too late. A test is also added, which hits all three failure cases in `Channel::get_update_fulfill_htlc`. Found by the chanmon_consistency fuzzer.
1 parent f472907 commit db5ddcb

File tree

2 files changed

+122
-3
lines changed

2 files changed

+122
-3
lines changed

lightning/src/ln/chanmon_update_fail_tests.rs

Lines changed: 94 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2183,3 +2183,97 @@ fn channel_holding_cell_serialize() {
21832183
do_channel_holding_cell_serialize(true, false);
21842184
do_channel_holding_cell_serialize(false, true); // last arg doesn't matter
21852185
}
2186+
2187+
#[derive(PartialEq)]
2188+
enum HTLCStatusAtDupClaim {
2189+
Received,
2190+
HoldingCell,
2191+
Cleared,
2192+
}
2193+
fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim) {
2194+
// When receiving an update_fulfill_htlc message, we immediately forward the claim backwards
2195+
// along the payment path before waiting for a full commitment_signed dance. This is great, but
2196+
// can cause duplicative claims if a node sends an update_fulfill_htlc message, disconnects,
2197+
// reconnects, and then has to re-send its update_fulfill_htlc message again.
2198+
// In previous code, we didn't handle the double-claim correctly, spuriously closing the
2199+
// channel on which the inbound HTLC was received.
2200+
let chanmon_cfgs = create_chanmon_cfgs(3);
2201+
let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
2202+
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
2203+
let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
2204+
2205+
create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
2206+
create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
2207+
2208+
let preimage = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000).0;
2209+
2210+
let mut as_raa = None;
2211+
if htlc_status == HTLCStatusAtDupClaim::HoldingCell {
2212+
// In order to get the HTLC claim into the holding cell at nodes[1], we need nodes[1] to be
2213+
// awaiting a remote revoke_and_ack from nodes[0].
2214+
let (_, second_payment_hash, second_payment_secret) = get_payment_preimage_hash!(nodes[1]);
2215+
let route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph.read().unwrap(),
2216+
&nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, nodes[1].logger).unwrap();
2217+
nodes[0].node.send_payment(&route, second_payment_hash, &Some(second_payment_secret)).unwrap();
2218+
check_added_monitors!(nodes[0], 1);
2219+
2220+
let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
2221+
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
2222+
nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg);
2223+
check_added_monitors!(nodes[1], 1);
2224+
2225+
let (bs_raa, bs_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2226+
nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
2227+
check_added_monitors!(nodes[0], 1);
2228+
nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs);
2229+
check_added_monitors!(nodes[0], 1);
2230+
2231+
as_raa = Some(get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()));
2232+
}
2233+
2234+
assert!(nodes[2].node.claim_funds(preimage));
2235+
check_added_monitors!(nodes[2], 1);
2236+
let cs_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
2237+
assert_eq!(cs_updates.update_fulfill_htlcs.len(), 1);
2238+
nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]);
2239+
check_added_monitors!(nodes[1], 1);
2240+
2241+
let mut bs_updates = None;
2242+
if htlc_status != HTLCStatusAtDupClaim::HoldingCell {
2243+
bs_updates = Some(get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()));
2244+
assert_eq!(bs_updates.as_ref().unwrap().update_fulfill_htlcs.len(), 1);
2245+
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.as_ref().unwrap().update_fulfill_htlcs[0]);
2246+
expect_payment_sent!(nodes[0], preimage);
2247+
if htlc_status == HTLCStatusAtDupClaim::Cleared {
2248+
commitment_signed_dance!(nodes[0], nodes[1], &bs_updates.as_ref().unwrap().commitment_signed, false);
2249+
}
2250+
} else {
2251+
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2252+
}
2253+
2254+
nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), false);
2255+
nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
2256+
2257+
reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (false, false));
2258+
2259+
if htlc_status == HTLCStatusAtDupClaim::HoldingCell {
2260+
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa.unwrap());
2261+
check_added_monitors!(nodes[1], 1);
2262+
expect_pending_htlcs_forwardable_ignore!(nodes[1]); // We finally receive the second payment, but don't claim it
2263+
2264+
bs_updates = Some(get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()));
2265+
assert_eq!(bs_updates.as_ref().unwrap().update_fulfill_htlcs.len(), 1);
2266+
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.as_ref().unwrap().update_fulfill_htlcs[0]);
2267+
expect_payment_sent!(nodes[0], preimage);
2268+
}
2269+
if htlc_status != HTLCStatusAtDupClaim::Cleared {
2270+
commitment_signed_dance!(nodes[0], nodes[1], &bs_updates.as_ref().unwrap().commitment_signed, false);
2271+
}
2272+
}
2273+
2274+
#[test]
2275+
fn test_reconnect_dup_htlc_claims() {
2276+
do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::Received);
2277+
do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::HoldingCell);
2278+
do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::Cleared);
2279+
}

lightning/src/ln/channel.rs

Lines changed: 28 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -443,6 +443,15 @@ pub(super) struct Channel<Signer: Sign> {
443443
///
444444
/// See-also <https://github.com/lightningnetwork/lnd/issues/4006>
445445
pub workaround_lnd_bug_4006: Option<msgs::FundingLocked>,
446+
447+
#[cfg(debug_assertions)]
448+
// When receive an HTLC fulfill on an outbound path, we may immediately fulfill the
449+
// corresponding HTLC on the inbound path. If, then, the outbound path channel is
450+
// disconnected and reconnected, they may re-broadcast their update_fulfill_htlc,
451+
// causing a double-claim. This is fine, but as a sanity check in our failure to
452+
// generate the second claim, we check here that the original was a claim, and that we
453+
// aren't now trying to fulfill a failed HTLC.
454+
historical_inbound_htlc_fulfills: HashSet<u64>,
446455
}
447456

448457
#[cfg(any(test, feature = "fuzztarget"))]
@@ -644,6 +653,9 @@ impl<Signer: Sign> Channel<Signer> {
644653
next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
645654

646655
workaround_lnd_bug_4006: None,
656+
657+
#[cfg(debug_assertions)]
658+
historical_inbound_htlc_fulfills: HashSet::new(),
647659
})
648660
}
649661

@@ -889,6 +901,9 @@ impl<Signer: Sign> Channel<Signer> {
889901
next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
890902

891903
workaround_lnd_bug_4006: None,
904+
905+
#[cfg(debug_assertions)]
906+
historical_inbound_htlc_fulfills: HashSet::new(),
892907
};
893908

894909
Ok(chan)
@@ -1255,8 +1270,8 @@ impl<Signer: Sign> Channel<Signer> {
12551270
if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
12561271
} else {
12571272
log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", log_bytes!(htlc.payment_hash.0), log_bytes!(self.channel_id()));
1273+
debug_assert!(false, "Tried to fulfill an HTLC that was already fail/fulfilled");
12581274
}
1259-
debug_assert!(false, "Tried to fulfill an HTLC that was already fail/fulfilled");
12601275
return Ok((None, None));
12611276
},
12621277
_ => {
@@ -1269,7 +1284,9 @@ impl<Signer: Sign> Channel<Signer> {
12691284
}
12701285
}
12711286
if pending_idx == core::usize::MAX {
1272-
return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
1287+
#[cfg(debug_assertions)]
1288+
debug_assert!(self.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
1289+
return Ok((None, None));
12731290
}
12741291

12751292
// Now update local state:
@@ -1291,7 +1308,8 @@ impl<Signer: Sign> Channel<Signer> {
12911308
if htlc_id_arg == htlc_id {
12921309
// Make sure we don't leave latest_monitor_update_id incremented here:
12931310
self.latest_monitor_update_id -= 1;
1294-
debug_assert!(false, "Tried to fulfill an HTLC that was already fulfilled");
1311+
#[cfg(debug_assertions)]
1312+
debug_assert!(self.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
12951313
return Ok((None, None));
12961314
}
12971315
},
@@ -1311,8 +1329,12 @@ impl<Signer: Sign> Channel<Signer> {
13111329
self.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
13121330
payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
13131331
});
1332+
#[cfg(debug_assertions)]
1333+
self.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
13141334
return Ok((None, Some(monitor_update)));
13151335
}
1336+
#[cfg(debug_assertions)]
1337+
self.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
13161338

13171339
{
13181340
let htlc = &mut self.pending_inbound_htlcs[pending_idx];
@@ -4920,6 +4942,9 @@ impl<'a, Signer: Sign, K: Deref> ReadableArgs<&'a K> for Channel<Signer>
49204942
next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
49214943

49224944
workaround_lnd_bug_4006: None,
4945+
4946+
#[cfg(debug_assertions)]
4947+
historical_inbound_htlc_fulfills: HashSet::new(),
49234948
})
49244949
}
49254950
}

0 commit comments

Comments
 (0)