Skip to content

Commit 574554b

Browse files
committed
Enable decoding new incoming HTLC onions when fully committed
This commit ensures all new incoming HTLCs going forward will have their onion decoded when they become fully committed to decide how we should proceed with each one. As a result, we'll obtain `HTLCHandlingFailed` events for _any_ failed HTLC that comes across a channel. We will now start writing channels with the new serialization version (4), and we will still be able to downgrade back to the commit that introduced it since reading version 4 is supported. Note that existing pending inbound HTLCs may already have their resolution if they were received in a previous version of LDK. We must support those until we no longer allow downgrading beyond this commit.
1 parent 99b4a03 commit 574554b

11 files changed

+327
-272
lines changed

lightning/src/events/mod.rs

+4-2
Original file line numberDiff line numberDiff line change
@@ -1123,9 +1123,11 @@ pub enum Event {
11231123
/// * When an unknown SCID is requested for forwarding a payment.
11241124
/// * Expected MPP amount has already been reached
11251125
/// * The HTLC has timed out
1126+
/// * The HTLC failed to meet the forwarding requirements (i.e. insufficient fees paid, or a
1127+
/// CLTV that is too soon)
11261128
///
1127-
/// This event, however, does not get generated if an HTLC fails to meet the forwarding
1128-
/// requirements (i.e. insufficient fees paid, or a CLTV that is too soon).
1129+
/// The list above is not meant to cover all scenarios. This event should be expected for each
1130+
/// incoming HTLC that has been fully committed but we failed to handle.
11291131
HTLCHandlingFailed {
11301132
/// The channel over which the HTLC was received.
11311133
prev_channel_id: ChannelId,

lightning/src/ln/blinded_payment_tests.rs

+54-10
Original file line numberDiff line numberDiff line change
@@ -277,8 +277,10 @@ fn do_forward_checks_failure(check: ForwardCheckFail, intro_fails: bool) {
277277
// We need the session priv to construct a bogus onion packet later.
278278
*nodes[0].keys_manager.override_random_bytes.lock().unwrap() = Some([3; 32]);
279279
create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0);
280-
let chan_upd_1_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0).0.contents;
281-
let chan_upd_2_3 = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 1_000_000, 0).0.contents;
280+
let chan_1_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0);
281+
let chan_upd_1_2 = chan_1_2.0.contents;
282+
let chan_2_3 = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 1_000_000, 0);
283+
let chan_upd_2_3 = chan_2_3.0.contents;
282284

283285
let amt_msat = 5000;
284286
let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[3], Some(amt_msat), None);
@@ -330,18 +332,27 @@ fn do_forward_checks_failure(check: ForwardCheckFail, intro_fails: bool) {
330332
check_added_monitors!(nodes[1], 0);
331333
do_commitment_signed_dance(&nodes[1], &nodes[0], &updates_0_1.commitment_signed, true, true);
332334

335+
expect_pending_htlcs_forwardable!(nodes[1]);
336+
check_added_monitors!(nodes[1], 1);
337+
333338
if intro_fails {
334339
let mut updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
335340
nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
336341
do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false);
342+
let failed_destination = match check {
343+
ForwardCheckFail::InboundOnionCheck => HTLCDestination::InvalidOnion,
344+
ForwardCheckFail::ForwardPayloadEncodedAsReceive => HTLCDestination::FailedPayment { payment_hash },
345+
ForwardCheckFail::OutboundChannelCheck =>
346+
HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_1_2.2 },
347+
};
348+
expect_htlc_handling_failed_destinations!(
349+
nodes[1].node.get_and_clear_pending_events(), &[failed_destination.clone()]
350+
);
337351
expect_payment_failed_conditions(&nodes[0], payment_hash, false,
338352
PaymentFailedConditions::new().expected_htlc_error_data(INVALID_ONION_BLINDING, &[0; 32]));
339353
return
340354
}
341355

342-
expect_pending_htlcs_forwardable!(nodes[1]);
343-
check_added_monitors!(nodes[1], 1);
344-
345356
let mut updates_1_2 = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
346357
let mut update_add = &mut updates_1_2.update_add_htlcs[0];
347358

@@ -351,6 +362,17 @@ fn do_forward_checks_failure(check: ForwardCheckFail, intro_fails: bool) {
351362
check_added_monitors!(nodes[2], 0);
352363
do_commitment_signed_dance(&nodes[2], &nodes[1], &updates_1_2.commitment_signed, true, true);
353364

365+
expect_pending_htlcs_forwardable!(nodes[2]);
366+
let failed_destination = match check {
367+
ForwardCheckFail::InboundOnionCheck|ForwardCheckFail::ForwardPayloadEncodedAsReceive => HTLCDestination::InvalidOnion,
368+
ForwardCheckFail::OutboundChannelCheck =>
369+
HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 },
370+
};
371+
expect_htlc_handling_failed_destinations!(
372+
nodes[2].node.get_and_clear_pending_events(), &[failed_destination.clone()]
373+
);
374+
check_added_monitors!(nodes[2], 1);
375+
354376
let mut updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
355377
let update_malformed = &mut updates.update_fail_malformed_htlcs[0];
356378
assert_eq!(update_malformed.failure_code, INVALID_ONION_BLINDING);
@@ -410,7 +432,10 @@ fn failed_backwards_to_intro_node() {
410432
nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
411433
check_added_monitors!(nodes[2], 0);
412434
do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event.commitment_msg, true, true);
413-
nodes[2].node.process_pending_htlc_forwards();
435+
436+
expect_pending_htlcs_forwardable!(nodes[2]);
437+
expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCDestination::InvalidOnion]);
438+
check_added_monitors(&nodes[2], 1);
414439

415440
let mut updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
416441
let mut update_malformed = &mut updates.update_fail_malformed_htlcs[0];
@@ -486,7 +511,7 @@ fn do_forward_fail_in_process_pending_htlc_fwds(check: ProcessPendingHTLCsCheck,
486511
// intro node will error backwards.
487512
$curr_node.node.peer_disconnected(&$next_node.node.get_our_node_id());
488513
expect_pending_htlcs_forwardable!($curr_node);
489-
expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!($curr_node,
514+
expect_htlc_handling_failed_destinations!($curr_node.node.get_and_clear_pending_events(),
490515
vec![HTLCDestination::NextHopChannel { node_id: Some($next_node.node.get_our_node_id()), channel_id: $failed_chan_id }]);
491516
},
492517
ProcessPendingHTLCsCheck::FwdChannelClosed => {
@@ -502,12 +527,12 @@ fn do_forward_fail_in_process_pending_htlc_fwds(check: ProcessPendingHTLCsCheck,
502527
crate::events::Event::ChannelClosed { .. } => {},
503528
_ => panic!("Unexpected event {:?}", events),
504529
}
530+
check_closed_broadcast(&$curr_node, 1, true);
531+
check_added_monitors!($curr_node, 1);
505532

506533
$curr_node.node.process_pending_htlc_forwards();
507-
expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!($curr_node,
534+
expect_htlc_handling_failed_destinations!($curr_node.node.get_and_clear_pending_events(),
508535
vec![HTLCDestination::UnknownNextHop { requested_forward_scid: $failed_scid }]);
509-
check_closed_broadcast(&$curr_node, 1, true);
510-
check_added_monitors!($curr_node, 1);
511536
$curr_node.node.process_pending_htlc_forwards();
512537
},
513538
}
@@ -593,6 +618,7 @@ fn do_blinded_intercept_payment(intercept_node_fails: bool) {
593618
};
594619
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
595620
commitment_signed_dance!(nodes[1], nodes[0], &payment_event.commitment_msg, false, true);
621+
expect_pending_htlcs_forwardable!(nodes[1]);
596622

597623
let events = nodes[1].node.get_and_clear_pending_events();
598624
assert_eq!(events.len(), 1);
@@ -894,13 +920,19 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) {
894920
nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), update_add);
895921
check_added_monitors!(nodes[2], 0);
896922
do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true);
923+
expect_pending_htlcs_forwardable!(nodes[2]);
924+
expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCDestination::InvalidOnion]);
925+
check_added_monitors(&nodes[2], 1);
897926
},
898927
ReceiveCheckFail::ReceiveRequirements => {
899928
let update_add = &mut payment_event_1_2.msgs[0];
900929
update_add.amount_msat -= 1;
901930
nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), update_add);
902931
check_added_monitors!(nodes[2], 0);
903932
do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true);
933+
expect_pending_htlcs_forwardable!(nodes[2]);
934+
expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCDestination::FailedPayment { payment_hash }]);
935+
check_added_monitors(&nodes[2], 1);
904936
},
905937
ReceiveCheckFail::ChannelCheck => {
906938
nodes[2].node.close_channel(&chan_id_1_2, &nodes[1].node.get_our_node_id()).unwrap();
@@ -914,6 +946,9 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) {
914946

915947
nodes[2].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown);
916948
commitment_signed_dance!(nodes[2], nodes[1], (), false, true, false, false);
949+
expect_pending_htlcs_forwardable!(nodes[2]);
950+
expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCDestination::FailedPayment { payment_hash }]);
951+
check_added_monitors(&nodes[2], 1);
917952
},
918953
ReceiveCheckFail::ProcessPendingHTLCsCheck => {
919954
assert_eq!(payment_event_1_2.msgs[0].cltv_expiry, nodes[0].best_block_info().1 + 1 + excess_final_cltv_delta_opt.unwrap() as u32 + TEST_FINAL_CLTV);
@@ -929,6 +964,9 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) {
929964
nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_1_2.msgs[0]);
930965
check_added_monitors!(nodes[2], 0);
931966
do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true);
967+
expect_pending_htlcs_forwardable!(nodes[2]);
968+
expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCDestination::FailedPayment { payment_hash }]);
969+
check_added_monitors(&nodes[2], 1);
932970
}
933971
}
934972

@@ -1129,6 +1167,12 @@ fn min_htlc() {
11291167
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event_0_1.msgs[0]);
11301168
check_added_monitors!(nodes[1], 0);
11311169
do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event_0_1.commitment_msg, true, true);
1170+
expect_pending_htlcs_forwardable!(nodes[1]);
1171+
expect_htlc_handling_failed_destinations!(
1172+
nodes[1].node.get_and_clear_pending_events(),
1173+
&[HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_1_2.2 }]
1174+
);
1175+
check_added_monitors(&nodes[1], 1);
11321176
let mut updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
11331177
nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
11341178
do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false);

lightning/src/ln/channel.rs

+10-44
Original file line numberDiff line numberDiff line change
@@ -4179,8 +4179,7 @@ impl<SP: Deref> Channel<SP> where
41794179
}
41804180

41814181
pub fn update_add_htlc<F: Deref>(
4182-
&mut self, msg: &msgs::UpdateAddHTLC, pending_forward_status: PendingHTLCStatus,
4183-
fee_estimator: &LowerBoundedFeeEstimator<F>,
4182+
&mut self, msg: &msgs::UpdateAddHTLC, fee_estimator: &LowerBoundedFeeEstimator<F>,
41844183
) -> Result<(), ChannelError> where F::Target: FeeEstimator {
41854184
if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
41864185
return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
@@ -4280,21 +4279,15 @@ impl<SP: Deref> Channel<SP> where
42804279
return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
42814280
}
42824281

4283-
if self.context.channel_state.is_local_shutdown_sent() {
4284-
if let PendingHTLCStatus::Forward(_) = pending_forward_status {
4285-
panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
4286-
}
4287-
}
4288-
42894282
// Now update local state:
42904283
self.context.next_counterparty_htlc_id += 1;
42914284
self.context.pending_inbound_htlcs.push(InboundHTLCOutput {
42924285
htlc_id: msg.htlc_id,
42934286
amount_msat: msg.amount_msat,
42944287
payment_hash: msg.payment_hash,
42954288
cltv_expiry: msg.cltv_expiry,
4296-
state: InboundHTLCState::RemoteAnnounced(InboundHTLCResolution::Resolved {
4297-
pending_htlc_status: pending_forward_status
4289+
state: InboundHTLCState::RemoteAnnounced(InboundHTLCResolution::Pending {
4290+
update_add_htlc: msg.clone(),
42984291
}),
42994292
});
43004293
Ok(())
@@ -6171,7 +6164,7 @@ impl<SP: Deref> Channel<SP> where
61716164
};
61726165
let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
61736166
if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
6174-
let on_counterparty_tx_dust_htlc_exposure_msat = htlc_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
6167+
let on_counterparty_tx_dust_htlc_exposure_msat = htlc_stats.on_counterparty_tx_dust_exposure_msat;
61756168
if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
61766169
log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
61776170
on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
@@ -6191,7 +6184,7 @@ impl<SP: Deref> Channel<SP> where
61916184

61926185
let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
61936186
if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
6194-
let on_holder_tx_dust_htlc_exposure_msat = htlc_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
6187+
let on_holder_tx_dust_htlc_exposure_msat = htlc_stats.on_holder_tx_dust_exposure_msat;
61956188
if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
61966189
log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
61976190
on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
@@ -6225,11 +6218,11 @@ impl<SP: Deref> Channel<SP> where
62256218
// side, only on the sender's. Note that with anchor outputs we are no longer as
62266219
// sensitive to fee spikes, so we need to account for them.
62276220
let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
6228-
let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
6221+
let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
62296222
if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
62306223
remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
62316224
}
6232-
if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
6225+
if pending_remote_value_msat.saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
62336226
log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
62346227
return Err(("Fee spike buffer violation", 0x1000|7));
62356228
}
@@ -8453,18 +8446,7 @@ impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
84538446
// Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been
84548447
// called.
84558448

8456-
let version_to_write = if self.context.pending_inbound_htlcs.iter().any(|htlc| match htlc.state {
8457-
InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_resolution)|
8458-
InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_resolution) => {
8459-
matches!(htlc_resolution, InboundHTLCResolution::Pending { .. })
8460-
},
8461-
_ => false,
8462-
}) {
8463-
SERIALIZATION_VERSION
8464-
} else {
8465-
MIN_SERIALIZATION_VERSION
8466-
};
8467-
write_ver_prefix!(writer, version_to_write, MIN_SERIALIZATION_VERSION);
8449+
write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
84688450

84698451
// `user_id` used to be a single u64 value. In order to remain backwards compatible with
84708452
// versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We write
@@ -8522,27 +8504,11 @@ impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
85228504
&InboundHTLCState::RemoteAnnounced(_) => unreachable!(),
85238505
&InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_resolution) => {
85248506
1u8.write(writer)?;
8525-
if version_to_write <= 3 {
8526-
if let InboundHTLCResolution::Resolved { pending_htlc_status } = htlc_resolution {
8527-
pending_htlc_status.write(writer)?;
8528-
} else {
8529-
panic!();
8530-
}
8531-
} else {
8532-
htlc_resolution.write(writer)?;
8533-
}
8507+
htlc_resolution.write(writer)?;
85348508
},
85358509
&InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_resolution) => {
85368510
2u8.write(writer)?;
8537-
if version_to_write <= 3 {
8538-
if let InboundHTLCResolution::Resolved { pending_htlc_status } = htlc_resolution {
8539-
pending_htlc_status.write(writer)?;
8540-
} else {
8541-
panic!();
8542-
}
8543-
} else {
8544-
htlc_resolution.write(writer)?;
8545-
}
8511+
htlc_resolution.write(writer)?;
85468512
},
85478513
&InboundHTLCState::Committed => {
85488514
3u8.write(writer)?;

0 commit comments

Comments
 (0)