diff --git a/lightning-persister/src/fs_store.rs b/lightning-persister/src/fs_store.rs index 850a0786671..4ed72902d52 100644 --- a/lightning-persister/src/fs_store.rs +++ b/lightning-persister/src/fs_store.rs @@ -592,23 +592,19 @@ mod tests { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let chan = create_announced_chan_between_nodes(&nodes, 0, 1); - let error_message = "Channel force-closed"; + + let message = "Channel force-closed".to_owned(); nodes[1] .node - .force_close_broadcasting_latest_txn( - &chan.2, - &nodes[0].node.get_our_node_id(), - error_message.to_string(), - ) + .force_close_broadcasting_latest_txn(&chan.2, &node_a_id, message.clone()) .unwrap(); - check_closed_event!( - nodes[1], - 1, - ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, - [nodes[0].node.get_our_node_id()], - 100000 - ); + let reason = + ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; + check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap(); // Set the store's directory to read-only, which should result in @@ -640,23 +636,19 @@ mod tests { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let chan = create_announced_chan_between_nodes(&nodes, 0, 1); - let error_message = "Channel force-closed"; + + let message = "Channel force-closed".to_owned(); nodes[1] .node - .force_close_broadcasting_latest_txn( - &chan.2, - &nodes[0].node.get_our_node_id(), - error_message.to_string(), - ) + .force_close_broadcasting_latest_txn(&chan.2, &node_a_id, message.clone()) .unwrap(); - check_closed_event!( - nodes[1], - 1, - ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, - [nodes[0].node.get_our_node_id()], - 100000 - ); + let reason = + ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; + check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap(); let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap(); let update_id = update_map.get(&added_monitors[0].1.channel_id()).unwrap(); diff --git a/lightning-persister/src/test_utils.rs b/lightning-persister/src/test_utils.rs index c69652c1455..c6617e8be1e 100644 --- a/lightning-persister/src/test_utils.rs +++ b/lightning-persister/src/test_utils.rs @@ -137,6 +137,8 @@ pub(crate) fn do_test_store(store_0: &K, store_1: &K) { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_b_id = nodes[1].node.get_our_node_id(); + // Check that the persisted channel data is empty before any channels are // open. let mut persisted_chan_data_0 = @@ -178,22 +180,14 @@ pub(crate) fn do_test_store(store_0: &K, store_1: &K) { // Force close because cooperative close doesn't result in any persisted // updates. - let error_message = "Channel force-closed"; + let message = "Channel force-closed".to_owned(); + let chan_id = nodes[0].node.list_channels()[0].channel_id; nodes[0] .node - .force_close_broadcasting_latest_txn( - &nodes[0].node.list_channels()[0].channel_id, - &nodes[1].node.get_our_node_id(), - error_message.to_string(), - ) + .force_close_broadcasting_latest_txn(&chan_id, &node_b_id, message.clone()) .unwrap(); - check_closed_event!( - nodes[0], - 1, - ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, - [nodes[1].node.get_our_node_id()], - 100000 - ); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; + check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index bb290deba9c..8e18fca14bb 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -1458,12 +1458,13 @@ mod tests { // Test that monitors with pending_claims are persisted on every block. // Now, close channel_2 i.e. b/w node-0 and node-2 to create pending_claim in node[0]. + let message = "Channel force-closed".to_owned(); nodes[0] .node - .force_close_broadcasting_latest_txn(&channel_2, &node_c_id, "closed".to_string()) + .force_close_broadcasting_latest_txn(&channel_2, &node_c_id, message.clone()) .unwrap(); let closure_reason = - ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event!(&nodes[0], 1, closure_reason, false, [node_c_id], 1000000); check_closed_broadcast(&nodes[0], 1, true); let close_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 5a4a94b1641..368442884bd 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -3551,7 +3551,11 @@ impl ChannelMonitorImpl { F::Target: FeeEstimator, L::Target: Logger, { - let (claimable_outpoints, _) = self.generate_claimable_outpoints_and_watch_outputs(ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }); + let reason = ClosureReason::HolderForceClosed { + broadcasted_latest_txn: Some(true), + message: "ChannelMonitor-initiated commitment transaction broadcast".to_owned(), + }; + let (claimable_outpoints, _) = self.generate_claimable_outpoints_and_watch_outputs(reason); let conf_target = self.closure_conf_target(); self.onchain_tx_handler.update_claims_view_from_requests( claimable_outpoints, self.best_block.height, self.best_block.height, broadcaster, diff --git a/lightning/src/events/mod.rs b/lightning/src/events/mod.rs index d01af737c32..d81de931009 100644 --- a/lightning/src/events/mod.rs +++ b/lightning/src/events/mod.rs @@ -318,23 +318,30 @@ pub enum ClosureReason { /// [`UntrustedString`]: crate::util::string::UntrustedString peer_msg: UntrustedString, }, - /// Closure generated from [`ChannelManager::force_close_channel`], called by the user. + /// Closure generated from [`ChannelManager::force_close_broadcasting_latest_txn`] or + /// [`ChannelManager::force_close_all_channels_broadcasting_latest_txn`], called by the user. /// - /// [`ChannelManager::force_close_channel`]: crate::ln::channelmanager::ChannelManager::force_close_channel. + /// [`ChannelManager::force_close_broadcasting_latest_txn`]: crate::ln::channelmanager::ChannelManager::force_close_broadcasting_latest_txn + /// [`ChannelManager::force_close_all_channels_broadcasting_latest_txn`]: crate::ln::channelmanager::ChannelManager::force_close_all_channels_broadcasting_latest_txn HolderForceClosed { /// Whether or not the latest transaction was broadcasted when the channel was force /// closed. /// - /// Channels closed using [`ChannelManager::force_close_broadcasting_latest_txn`] will have - /// this field set to true, whereas channels closed using [`ChannelManager::force_close_without_broadcasting_txn`] - /// or force-closed prior to being funded will have this field set to false. + /// This will be set to `Some(true)` for any channels closed after their funding + /// transaction was (or might have been) broadcasted, and `Some(false)` for any channels + /// closed prior to their funding transaction being broadcasted. /// /// This will be `None` for objects generated or written by LDK 0.0.123 and /// earlier. - /// - /// [`ChannelManager::force_close_broadcasting_latest_txn`]: crate::ln::channelmanager::ChannelManager::force_close_broadcasting_latest_txn. - /// [`ChannelManager::force_close_without_broadcasting_txn`]: crate::ln::channelmanager::ChannelManager::force_close_without_broadcasting_txn. broadcasted_latest_txn: Option, + /// The error message provided to [`ChannelManager::force_close_broadcasting_latest_txn`] or + /// [`ChannelManager::force_close_all_channels_broadcasting_latest_txn`]. + /// + /// This will be the empty string for objects generated or written by LDK 0.1 and earlier. + /// + /// [`ChannelManager::force_close_broadcasting_latest_txn`]: crate::ln::channelmanager::ChannelManager::force_close_broadcasting_latest_txn + /// [`ChannelManager::force_close_all_channels_broadcasting_latest_txn`]: crate::ln::channelmanager::ChannelManager::force_close_all_channels_broadcasting_latest_txn + message: String, }, /// The channel was closed after negotiating a cooperative close and we've now broadcasted /// the cooperative close transaction. Note the shutdown may have been initiated by us. @@ -356,7 +363,8 @@ pub enum ClosureReason { /// commitment transaction came from our counterparty, but it may also have come from /// a copy of our own `ChannelMonitor`. CommitmentTxConfirmed, - /// The funding transaction failed to confirm in a timely manner on an inbound channel. + /// The funding transaction failed to confirm in a timely manner on an inbound channel or the + /// counterparty failed to fund the channel in a timely manner. FundingTimedOut, /// Closure generated from processing an event, likely a HTLC forward/relay/reception. ProcessingError { @@ -383,6 +391,12 @@ pub enum ClosureReason { /// The counterparty requested a cooperative close of a channel that had not been funded yet. /// The channel has been immediately closed. CounterpartyCoopClosedUnfundedChannel, + /// We requested a cooperative close of a channel that had not been funded yet. + /// The channel has been immediately closed. + /// + /// Note that events containing this variant will be lost on downgrade to a version of LDK + /// prior to 0.2. + LocallyCoopClosedUnfundedChannel, /// Another channel in the same funding batch closed before the funding transaction /// was ready to be broadcast. FundingBatchClosure, @@ -412,12 +426,13 @@ impl core::fmt::Display for ClosureReason { ClosureReason::CounterpartyForceClosed { peer_msg } => { f.write_fmt(format_args!("counterparty force-closed with message: {}", peer_msg)) }, - ClosureReason::HolderForceClosed { broadcasted_latest_txn } => { - f.write_str("user force-closed the channel")?; + ClosureReason::HolderForceClosed { broadcasted_latest_txn, message } => { + f.write_str("user force-closed the channel with the message \"")?; + f.write_str(message)?; if let Some(brodcasted) = broadcasted_latest_txn { write!( f, - " and {} the latest transaction", + "\" and {} the latest transaction", if *brodcasted { "broadcasted" } else { "elected not to broadcast" } ) } else { @@ -454,6 +469,9 @@ impl core::fmt::Display for ClosureReason { ClosureReason::CounterpartyCoopClosedUnfundedChannel => { f.write_str("the peer requested the unfunded channel be closed") }, + ClosureReason::LocallyCoopClosedUnfundedChannel => { + f.write_str("we requested the unfunded channel be closed") + }, ClosureReason::FundingBatchClosure => { f.write_str("another channel in the same funding batch closed") }, @@ -472,7 +490,10 @@ impl core::fmt::Display for ClosureReason { impl_writeable_tlv_based_enum_upgradable!(ClosureReason, (0, CounterpartyForceClosed) => { (1, peer_msg, required) }, (1, FundingTimedOut) => {}, - (2, HolderForceClosed) => { (1, broadcasted_latest_txn, option) }, + (2, HolderForceClosed) => { + (1, broadcasted_latest_txn, option), + (3, message, (default_value, String::new())), + }, (6, CommitmentTxConfirmed) => {}, (4, LegacyCooperativeClosure) => {}, (8, ProcessingError) => { (1, err, required) }, @@ -487,6 +508,7 @@ impl_writeable_tlv_based_enum_upgradable!(ClosureReason, (0, peer_feerate_sat_per_kw, required), (2, required_feerate_sat_per_kw, required), }, + (25, LocallyCoopClosedUnfundedChannel) => {}, ); /// The type of HTLC handling performed in [`Event::HTLCHandlingFailed`]. @@ -1461,7 +1483,7 @@ pub enum Event { /// /// To accept the request (and in the case of a dual-funded channel, not contribute funds), /// call [`ChannelManager::accept_inbound_channel`]. - /// To reject the request, call [`ChannelManager::force_close_without_broadcasting_txn`]. + /// To reject the request, call [`ChannelManager::force_close_broadcasting_latest_txn`]. /// Note that a ['ChannelClosed`] event will _not_ be triggered if the channel is rejected. /// /// The event is only triggered when a new open channel request is received and the @@ -1472,27 +1494,27 @@ pub enum Event { /// returning `Err(ReplayEvent ())`) and won't be persisted across restarts. /// /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel - /// [`ChannelManager::force_close_without_broadcasting_txn`]: crate::ln::channelmanager::ChannelManager::force_close_without_broadcasting_txn + /// [`ChannelManager::force_close_broadcasting_latest_txn`]: crate::ln::channelmanager::ChannelManager::force_close_broadcasting_latest_txn /// [`UserConfig::manually_accept_inbound_channels`]: crate::util::config::UserConfig::manually_accept_inbound_channels OpenChannelRequest { /// The temporary channel ID of the channel requested to be opened. /// /// When responding to the request, the `temporary_channel_id` should be passed /// back to the ChannelManager through [`ChannelManager::accept_inbound_channel`] to accept, - /// or through [`ChannelManager::force_close_without_broadcasting_txn`] to reject. + /// or through [`ChannelManager::force_close_broadcasting_latest_txn`] to reject. /// /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel - /// [`ChannelManager::force_close_without_broadcasting_txn`]: crate::ln::channelmanager::ChannelManager::force_close_without_broadcasting_txn + /// [`ChannelManager::force_close_broadcasting_latest_txn`]: crate::ln::channelmanager::ChannelManager::force_close_broadcasting_latest_txn temporary_channel_id: ChannelId, /// The node_id of the counterparty requesting to open the channel. /// /// When responding to the request, the `counterparty_node_id` should be passed /// back to the `ChannelManager` through [`ChannelManager::accept_inbound_channel`] to - /// accept the request, or through [`ChannelManager::force_close_without_broadcasting_txn`] to reject the - /// request. + /// accept the request, or through [`ChannelManager::force_close_broadcasting_latest_txn`] + /// to reject the request. /// /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel - /// [`ChannelManager::force_close_without_broadcasting_txn`]: crate::ln::channelmanager::ChannelManager::force_close_without_broadcasting_txn + /// [`ChannelManager::force_close_broadcasting_latest_txn`]: crate::ln::channelmanager::ChannelManager::force_close_broadcasting_latest_txn counterparty_node_id: PublicKey, /// The channel value of the requested channel. funding_satoshis: u64, diff --git a/lightning/src/ln/async_signer_tests.rs b/lightning/src/ln/async_signer_tests.rs index b667007295a..69abfbf3312 100644 --- a/lightning/src/ln/async_signer_tests.rs +++ b/lightning/src/ln/async_signer_tests.rs @@ -1011,17 +1011,18 @@ fn do_test_async_holder_signatures(anchors: bool, remote_commitment: bool) { // Route an HTLC and set the signer as unavailable. let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1); route_payment(&nodes[0], &[&nodes[1]], 1_000_000); - let error_message = "Channel force-closed"; if remote_commitment { + let message = "Channel force-closed".to_owned(); // Make the counterparty broadcast its latest commitment. nodes[1] .node - .force_close_broadcasting_latest_txn(&chan_id, &node_a_id, error_message.to_string()) + .force_close_broadcasting_latest_txn(&chan_id, &node_a_id, message.clone()) .unwrap(); check_added_monitors(&nodes[1], 1); check_closed_broadcast(&nodes[1], 1, true); - let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + let reason = + ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 100_000); } else { nodes[0].disable_channel_signer_op(&node_b_id, &chan_id, SignerOp::SignHolderCommitment); diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index ef8f256ed5e..6f36ea43605 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -261,8 +261,12 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { } // ...and make sure we can force-close a frozen channel - let err_msg = "Channel force-closed".to_owned(); - nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &node_b_id, err_msg).unwrap(); + let message = "Channel force-closed".to_owned(); + let reason = ClosureReason::HolderForceClosed { + broadcasted_latest_txn: Some(true), + message: message.clone(), + }; + nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &node_b_id, message).unwrap(); check_added_monitors!(nodes[0], 1); check_closed_broadcast!(nodes[0], true); @@ -270,7 +274,6 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { // PaymentPathFailed event assert_eq!(nodes[0].node.list_channels().len(), 0); - let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); } @@ -3757,27 +3760,30 @@ fn do_test_durable_preimages_on_closed_channel( let _ = get_revoke_commit_msgs!(nodes[1], node_c_id); let mon_bc = get_monitor!(nodes[1], chan_id_bc).encode(); - let err_msg = "Channel force-closed".to_owned(); if close_chans_before_reload { if !close_only_a { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); + let message = "Channel force-closed".to_owned(); nodes[1] .node - .force_close_broadcasting_latest_txn(&chan_id_bc, &node_c_id, err_msg.clone()) + .force_close_broadcasting_latest_txn(&chan_id_bc, &node_c_id, message.clone()) .unwrap(); check_closed_broadcast(&nodes[1], 1, true); - let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + let reason = + ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[1], 1, reason, false, &[node_c_id], 100000); } chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); + let message = "Channel force-closed".to_owned(); nodes[1] .node - .force_close_broadcasting_latest_txn(&chan_id_ab, &node_a_id, err_msg) + .force_close_broadcasting_latest_txn(&chan_id_ab, &node_a_id, message.clone()) .unwrap(); check_closed_broadcast(&nodes[1], 1, true); - let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + let reason = + ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 100000); } @@ -3799,8 +3805,11 @@ fn do_test_durable_preimages_on_closed_channel( } let err_msg = "Channel force-closed".to_owned(); + let reason = ClosureReason::HolderForceClosed { + broadcasted_latest_txn: Some(true), + message: err_msg.clone(), + }; nodes[0].node.force_close_broadcasting_latest_txn(&chan_id_ab, &node_b_id, err_msg).unwrap(); - let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 100000); let as_closing_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(as_closing_tx.len(), 1); @@ -3964,10 +3973,13 @@ fn do_test_reload_mon_update_completion_actions(close_during_reload: bool) { if close_during_reload { // Test that we still free the B<->C channel if the A<->B channel closed while we reloaded // (as learned about during the on-reload block connection). + let reason = ClosureReason::HolderForceClosed { + broadcasted_latest_txn: Some(true), + message: msg.clone(), + }; nodes[0].node.force_close_broadcasting_latest_txn(&chan_id_ab, &node_b_id, msg).unwrap(); check_added_monitors!(nodes[0], 1); check_closed_broadcast!(nodes[0], true); - let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 100_000); let as_closing_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); mine_transaction_without_consistency_checks(&nodes[1], &as_closing_tx[0]); @@ -4292,12 +4304,13 @@ fn test_claim_to_closed_channel_blocks_forwarded_preimage_removal() { let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000); + let message = "Channel force-closed".to_owned(); nodes[0] .node - .force_close_broadcasting_latest_txn(&chan_a.2, &node_b_id, String::new()) + .force_close_broadcasting_latest_txn(&chan_a.2, &node_b_id, message.clone()) .unwrap(); check_added_monitors!(nodes[0], 1); - let a_reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + let a_reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event!(nodes[0], 1, a_reason, [node_b_id], 1000000); check_closed_broadcast!(nodes[0], true); @@ -4367,12 +4380,13 @@ fn test_claim_to_closed_channel_blocks_claimed_event() { let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); + let message = "Channel force-closed".to_owned(); nodes[0] .node - .force_close_broadcasting_latest_txn(&chan_a.2, &node_b_id, String::new()) + .force_close_broadcasting_latest_txn(&chan_a.2, &node_b_id, message.clone()) .unwrap(); check_added_monitors!(nodes[0], 1); - let a_reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + let a_reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event!(nodes[0], 1, a_reason, [node_b_id], 1000000); check_closed_broadcast!(nodes[0], true); diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index b516961d100..a52c11a26a8 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1775,11 +1775,9 @@ where } } - pub fn force_shutdown( - &mut self, should_broadcast: bool, closure_reason: ClosureReason, - ) -> ShutdownResult { + pub fn force_shutdown(&mut self, closure_reason: ClosureReason) -> ShutdownResult { let (funding, context) = self.funding_and_context_mut(); - context.force_shutdown(funding, should_broadcast, closure_reason) + context.force_shutdown(funding, closure_reason) } #[rustfmt::skip] @@ -2937,11 +2935,9 @@ where for (idx, outp) in signing_session.unsigned_tx().outputs().enumerate() { if outp.script_pubkey() == &expected_spk && outp.value() == self.funding.get_value_satoshis() { if output_index.is_some() { - return Err(ChannelError::Close( - ( - "Multiple outputs matched the expected script and value".to_owned(), - ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }, - ))); + let msg = "Multiple outputs matched the expected script and value"; + let reason = ClosureReason::ProcessingError { err: msg.to_owned() }; + return Err(ChannelError::Close((msg.to_owned(), reason))); } output_index = Some(idx as u16); } @@ -2949,11 +2945,9 @@ where let outpoint = if let Some(output_index) = output_index { OutPoint { txid: signing_session.unsigned_tx().compute_txid(), index: output_index } } else { - return Err(ChannelError::Close( - ( - "No output matched the funding script_pubkey".to_owned(), - ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }, - ))); + let msg = "No output matched the funding script_pubkey"; + let reason = ClosureReason::ProcessingError { err: msg.to_owned() }; + return Err(ChannelError::Close((msg.to_owned(), reason))); }; self.funding.channel_transaction_parameters.funding_outpoint = Some(outpoint); @@ -2961,9 +2955,9 @@ where let commitment_signed = self.context.get_initial_commitment_signed(&self.funding, logger); let commitment_signed = match commitment_signed { Ok(commitment_signed) => commitment_signed, - Err(err) => { + Err(e) => { self.funding.channel_transaction_parameters.funding_outpoint = None; - return Err(ChannelError::Close((err.to_string(), ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }))); + return Err(e) }, }; @@ -2974,10 +2968,9 @@ where false, "Zero inputs were provided & zero witnesses were provided, but a count mismatch was somehow found", ); - return Err(ChannelError::Close(( - "V2 channel rejected due to sender error".into(), - ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) } - ))); + let msg = "V2 channel rejected due to sender error"; + let reason = ClosureReason::ProcessingError { err: msg.to_owned() }; + return Err(ChannelError::Close((msg.to_owned(), reason))); } None } else { @@ -2999,10 +2992,9 @@ where false, "We don't support users providing inputs but somehow we had more than zero inputs", ); - return Err(ChannelError::Close(( - "V2 channel rejected due to sender error".into(), - ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) } - ))); + let msg = "V2 channel rejected due to sender error"; + let reason = ClosureReason::ProcessingError { err: msg.to_owned() }; + return Err(ChannelError::Close((msg.to_owned(), reason))); }; let mut channel_state = ChannelState::FundingNegotiated(FundingNegotiatedFlags::new()); @@ -5346,13 +5338,73 @@ where self.unbroadcasted_funding_txid(funding).filter(|_| self.is_batch_funding()) } - /// Gets the latest commitment transaction and any dependent transactions for relay (forcing - /// shutdown of this channel - no more calls into this Channel may be made afterwards except - /// those explicitly stated to be allowed after shutdown completes, eg some simple getters). - /// Also returns the list of payment_hashes for channels which we can safely fail backwards - /// immediately (others we will have to allow to time out). + /// Shuts down this channel (no more calls into this Channel may be made afterwards except + /// those explicitly stated to be allowed after shutdown, eg some simple getters). + /// + /// Only allowed for channels which never been used (i.e. have never broadcasted their funding + /// transaction). + fn abandon_unfunded_chan( + &mut self, funding: &FundingScope, mut closure_reason: ClosureReason, + ) -> ShutdownResult { + assert!(!matches!(self.channel_state, ChannelState::ShutdownComplete)); + assert!(!self.is_funding_broadcast()); + + let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid(funding); + let unbroadcasted_funding_tx = self.unbroadcasted_funding(funding); + + let monitor_update = if let Some(funding_txo) = funding.get_funding_txo() { + // If we haven't yet exchanged funding signatures (ie channel_state < AwaitingChannelReady), + // returning a channel monitor update here would imply a channel monitor update before + // we even registered the channel monitor to begin with, which is invalid. + // Thus, if we aren't actually at a point where we could conceivably broadcast the + // funding transaction, don't return a funding txo (which prevents providing the + // monitor update to the user, even if we return one). + // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more. + if !self.channel_state.is_pre_funded_state() { + self.latest_monitor_update_id += 1; + let update = ChannelMonitorUpdate { + update_id: self.latest_monitor_update_id, + updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { + should_broadcast: false, + }], + channel_id: Some(self.channel_id()), + }; + Some((self.get_counterparty_node_id(), funding_txo, self.channel_id(), update)) + } else { + None + } + } else { + None + }; + + if let ClosureReason::HolderForceClosed { ref mut broadcasted_latest_txn, .. } = + &mut closure_reason + { + *broadcasted_latest_txn = Some(false); + } + + self.channel_state = ChannelState::ShutdownComplete; + self.update_time_counter += 1; + ShutdownResult { + closure_reason, + monitor_update, + dropped_outbound_htlcs: Vec::new(), + unbroadcasted_batch_funding_txid, + channel_id: self.channel_id, + user_channel_id: self.user_id, + channel_capacity_satoshis: funding.get_value_satoshis(), + counterparty_node_id: self.counterparty_node_id, + unbroadcasted_funding_tx, + is_manual_broadcast: self.is_manual_broadcast, + channel_funding_txo: funding.get_funding_txo(), + last_local_balance_msat: funding.value_to_self_msat, + } + } + + /// Shuts down this channel (no more calls into this Channel may be made afterwards except + /// those explicitly stated to be alowed after shutdown, eg some simple getters). pub fn force_shutdown( - &mut self, funding: &FundingScope, should_broadcast: bool, closure_reason: ClosureReason, + &mut self, funding: &FundingScope, mut closure_reason: ClosureReason, ) -> ShutdownResult { // Note that we MUST only generate a monitor update that indicates force-closure - we're // called during initialization prior to the chain_monitor in the encompassing ChannelManager @@ -5360,6 +5412,10 @@ where // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more. assert!(!matches!(self.channel_state, ChannelState::ShutdownComplete)); + if !self.is_funding_broadcast() { + return self.abandon_unfunded_chan(funding, closure_reason); + } + // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and // return them to fail the payment. let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len()); @@ -5390,7 +5446,7 @@ where let update = ChannelMonitorUpdate { update_id: self.latest_monitor_update_id, updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { - should_broadcast, + should_broadcast: true, }], channel_id: Some(self.channel_id()), }; @@ -5404,6 +5460,12 @@ where let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid(funding); let unbroadcasted_funding_tx = self.unbroadcasted_funding(funding); + if let ClosureReason::HolderForceClosed { ref mut broadcasted_latest_txn, .. } = + &mut closure_reason + { + *broadcasted_latest_txn = Some(true); + } + self.channel_state = ChannelState::ShutdownComplete; self.update_time_counter += 1; ShutdownResult { @@ -5556,11 +5618,11 @@ where let channel_parameters = &funding.channel_transaction_parameters; ecdsa.sign_counterparty_commitment(channel_parameters, &counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.secp_ctx) .map(|(signature, _)| signature) - .map_err(|_| ChannelError::Close( - ( - "Failed to get signatures for new commitment_signed".to_owned(), - ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }, - ))) + .map_err(|()| { + let msg = "Failed to get signatures for new commitment_signed"; + let reason = ClosureReason::ProcessingError { err: msg.to_owned() }; + ChannelError::Close((msg.to_owned(), reason)) + }) }, // TODO (taproot|arik) #[cfg(taproot)] @@ -5581,10 +5643,10 @@ where if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT) ) { debug_assert!(false); - return Err(ChannelError::Close(("Tried to get an initial commitment_signed messsage at a time other than \ - immediately after initial handshake completion (or tried to get funding_created twice)".to_string(), - ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) } - ))); + let msg = "Tried to get an initial commitment_signed messsage at a time other than \ + immediately after initial handshake completion (or tried to get funding_created twice)"; + let reason = ClosureReason::ProcessingError { err: msg.to_owned() }; + return Err(ChannelError::Close((msg.to_owned(), reason))); } let signature = match self.get_initial_counterparty_commitment_signature(funding, logger) { @@ -6049,6 +6111,14 @@ where SP::Target: SignerProvider, ::EcdsaSigner: EcdsaChannelSigner, { + pub fn context(&self) -> &ChannelContext { + &self.context + } + + pub fn force_shutdown(&mut self, closure_reason: ClosureReason) -> ShutdownResult { + self.context.force_shutdown(&self.funding, closure_reason) + } + #[rustfmt::skip] fn check_remote_fee( channel_type: &ChannelTypeFeatures, fee_estimator: &LowerBoundedFeeEstimator, @@ -6713,11 +6783,9 @@ where if !self.context.channel_state.is_interactive_signing() || self.context.channel_state.is_their_tx_signatures_sent() { - return Err(ChannelError::Close( - ( - "Received initial commitment_signed before funding transaction constructed or after peer's tx_signatures received!".to_owned(), - ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }, - ))); + let msg = "Received initial commitment_signed before funding transaction constructed or after peer's tx_signatures received!"; + let reason = ClosureReason::ProcessingError { err: msg.to_owned() }; + return Err(ChannelError::Close((msg.to_owned(), reason))); } let holder_commitment_point = &mut self.holder_commitment_point.clone(); @@ -7652,22 +7720,18 @@ where if let Some(ref mut signing_session) = self.interactive_tx_signing_session { if msg.tx_hash != signing_session.unsigned_tx().compute_txid() { - return Err(ChannelError::Close( - ( - "The txid for the transaction does not match".to_string(), - ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }, - ))); + let msg = "The txid for the transaction does not match"; + let reason = ClosureReason::ProcessingError { err: msg.to_owned() }; + return Err(ChannelError::Close((msg.to_owned(), reason))); } // We need to close the channel if our peer hasn't sent their commitment signed already. // Technically we'd wait on having an initial monitor persisted, so we shouldn't be broadcasting // the transaction, but this may risk losing funds for a manual broadcast if we continue. if !signing_session.has_received_commitment_signed() { - return Err(ChannelError::Close( - ( - "Received tx_signatures before initial commitment_signed".to_string(), - ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }, - ))); + let msg = "Received tx_signatures before initial commitment_signed"; + let reason = ClosureReason::ProcessingError { err: msg.to_owned() }; + return Err(ChannelError::Close((msg.to_owned(), reason))); } if msg.witnesses.len() != signing_session.remote_inputs_count() { @@ -7678,11 +7742,9 @@ where for witness in &msg.witnesses { if witness.is_empty() { - return Err(ChannelError::Close( - ( - "Unexpected empty witness in tx_signatures received".to_string(), - ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }, - ))); + let msg = "Unexpected empty witness in tx_signatures received"; + let reason = ClosureReason::ProcessingError { err: msg.to_owned() }; + return Err(ChannelError::Close((msg.to_owned(), reason))); } // TODO(dual_funding): Check all sigs are SIGHASH_ALL. @@ -7718,10 +7780,9 @@ where self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()); Ok((funding_tx_opt, holder_tx_signatures_opt)) } else { - Err(ChannelError::Close(( - "Unexpected tx_signatures. No funding transaction awaiting signatures".to_string(), - ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }, - ))) + let msg = "Unexpected tx_signatures. No funding transaction awaiting signatures"; + let reason = ClosureReason::ProcessingError { err: msg.to_owned() }; + return Err(ChannelError::Close((msg.to_owned(), reason))); } } @@ -8129,7 +8190,7 @@ where (closing_signed, signed_tx, shutdown_result) } Err(err) => { - let shutdown = self.context.force_shutdown(&self.funding, true, ClosureReason::ProcessingError {err: err.to_string()}); + let shutdown = self.context.force_shutdown(&self.funding, ClosureReason::ProcessingError {err: err.to_string()}); (None, None, Some(shutdown)) } } @@ -8297,11 +8358,6 @@ where /// May panic if some calls other than message-handling calls (which will all Err immediately) /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call. - /// - /// Some links printed in log lines are included here to check them during build (when run with - /// `cargo doc --document-private-items`): - /// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and - /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`]. #[rustfmt::skip] pub fn channel_reestablish( &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS, @@ -8339,18 +8395,17 @@ where if msg.next_remote_commitment_number > our_commitment_transaction { macro_rules! log_and_panic { ($err_msg: expr) => { - log_error!(logger, $err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id)); - panic!($err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id)); + log_error!(logger, $err_msg); + panic!($err_msg); } } log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\ This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\ More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\ - If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\ - ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\ - ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\ - Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\ - See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info."); + If you have restored from an old backup and wish to claim any available funds, you should restart with\n\ + an empty ChannelManager and no ChannelMonitors, reconnect to peer(s), ensure they've force-closed all of your\n\ + previous channels and that the closure transaction(s) have confirmed on-chain,\n\ + then restart with an empty ChannelManager and the latest ChannelMonitors that you do have."); } } @@ -11211,6 +11266,10 @@ impl OutboundV1Channel where SP::Target: SignerProvider, { + pub fn abandon_unfunded_chan(&mut self, closure_reason: ClosureReason) -> ShutdownResult { + self.context.abandon_unfunded_chan(&self.funding, closure_reason) + } + #[allow(dead_code)] // TODO(dual_funding): Remove once opending V2 channels is enabled. #[rustfmt::skip] pub fn new( @@ -12056,10 +12115,10 @@ where outputs_to_contribute: Vec::new(), expected_remote_shared_funding_output: Some((funding.get_funding_redeemscript().to_p2wsh(), funding.get_value_satoshis())), } - ).map_err(|_| ChannelError::Close(( - "V2 channel rejected due to sender error".into(), - ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) } - )))?); + ).map_err(|err| { + let reason = ClosureReason::ProcessingError { err: err.to_string() }; + ChannelError::Close((err.to_string(), reason)) + })?); let unfunded_context = UnfundedChannelContext { unfunded_channel_age_ticks: 0, diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 8a904a90e64..f53bc687d65 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -179,8 +179,10 @@ use crate::io::Read; use crate::prelude::*; use crate::sync::{Arc, FairRwLock, LockHeldState, LockTestExt, Mutex, RwLock, RwLockReadGuard}; use bitcoin::hex::impl_fmt_traits; + use core::borrow::Borrow; use core::cell::RefCell; +use core::convert::Infallible; use core::ops::Deref; use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; use core::time::Duration; @@ -877,7 +879,6 @@ struct MsgHandleErrInternal { shutdown_finish: Option<(ShutdownResult, Option)>, } impl MsgHandleErrInternal { - #[inline] fn send_err_msg_no_close(err: String, channel_id: ChannelId) -> Self { Self { err: LightningError { @@ -890,11 +891,11 @@ impl MsgHandleErrInternal { shutdown_finish: None, } } - #[inline] + fn from_no_close(err: msgs::LightningError) -> Self { Self { err, closes_channel: false, shutdown_finish: None } } - #[inline] + fn from_finish_shutdown( err: String, channel_id: ChannelId, shutdown_res: ShutdownResult, channel_update: Option, @@ -914,46 +915,42 @@ impl MsgHandleErrInternal { shutdown_finish: Some((shutdown_res, channel_update)), } } - #[inline] - #[rustfmt::skip] + fn from_chan_no_close(err: ChannelError, channel_id: ChannelId) -> Self { - Self { - err: match err { - ChannelError::Warn(msg) => LightningError { - err: msg.clone(), - action: msgs::ErrorAction::SendWarningMessage { - msg: msgs::WarningMessage { - channel_id, - data: msg - }, - log_level: Level::Warn, - }, - }, - ChannelError::WarnAndDisconnect(msg) => LightningError { - err: msg.clone(), - action: msgs::ErrorAction::DisconnectPeerWithWarning { - msg: msgs::WarningMessage { - channel_id, - data: msg - }, - }, + let err = match err { + ChannelError::Warn(msg) => LightningError { + err: msg.clone(), + action: msgs::ErrorAction::SendWarningMessage { + msg: msgs::WarningMessage { channel_id, data: msg }, + log_level: Level::Warn, }, - ChannelError::Ignore(msg) => LightningError { - err: msg, - action: msgs::ErrorAction::IgnoreError, + }, + ChannelError::WarnAndDisconnect(msg) => LightningError { + err: msg.clone(), + action: msgs::ErrorAction::DisconnectPeerWithWarning { + msg: msgs::WarningMessage { channel_id, data: msg }, }, - ChannelError::Close((msg, _)) | ChannelError::SendError(msg) => LightningError { - err: msg.clone(), - action: msgs::ErrorAction::SendErrorMessage { - msg: msgs::ErrorMessage { - channel_id, - data: msg - }, - }, + }, + ChannelError::Ignore(msg) => { + LightningError { err: msg, action: msgs::ErrorAction::IgnoreError } + }, + ChannelError::Close((msg, _)) | ChannelError::SendError(msg) => LightningError { + err: msg.clone(), + action: msgs::ErrorAction::SendErrorMessage { + msg: msgs::ErrorMessage { channel_id, data: msg }, }, }, - closes_channel: false, - shutdown_finish: None, + }; + Self { err, closes_channel: false, shutdown_finish: None } + } + + fn dont_send_error_message(&mut self) { + match &mut self.err.action { + msgs::ErrorAction::DisconnectPeer { msg } => *msg = None, + msgs::ErrorAction::SendErrorMessage { msg: _ } => { + self.err.action = msgs::ErrorAction::IgnoreError; + }, + _ => {}, } } @@ -2007,7 +2004,7 @@ where /// match event { /// Event::OpenChannelRequest { temporary_channel_id, counterparty_node_id, .. } => { /// if !is_trusted(counterparty_node_id) { -/// match channel_manager.force_close_without_broadcasting_txn( +/// match channel_manager.force_close_broadcasting_latest_txn( /// &temporary_channel_id, &counterparty_node_id, error_message.to_string() /// ) { /// Ok(()) => println!("Rejecting channel {}", temporary_channel_id), @@ -3165,24 +3162,40 @@ macro_rules! handle_error { /// /// Note that this step can be skipped if the channel was never opened (through the creation of a /// [`ChannelMonitor`]/channel funding transaction) to begin with. +/// +/// For non-coop-close cases, you should generally prefer to call `convert_channel_err` and +/// [`handle_error`] instead (which delegate to this and [`ChannelManager::finish_close_channel`]), +/// as they ensure the relevant messages go out as well. In a coop close case, calling this +/// directly avoids duplicate error messages. #[rustfmt::skip] macro_rules! locked_close_channel { - ($self: ident, $peer_state: expr, $channel_context: expr, $channel_funding: expr, $shutdown_res_mut: expr) => {{ + ($self: ident, $chan_context: expr, UNFUNDED) => {{ + $self.short_to_chan_info.write().unwrap().remove(&$chan_context.outbound_scid_alias()); + // If the channel was never confirmed on-chain prior to its closure, remove the + // outbound SCID alias we used for it from the collision-prevention set. While we + // generally want to avoid ever re-using an outbound SCID alias across all channels, we + // also don't want a counterparty to be able to trivially cause a memory leak by simply + // opening a million channels with us which are closed before we ever reach the funding + // stage. + let alias_removed = $self.outbound_scid_aliases.lock().unwrap().remove(&$chan_context.outbound_scid_alias()); + debug_assert!(alias_removed); + }}; + ($self: ident, $peer_state: expr, $funded_chan: expr, $shutdown_res_mut: expr, FUNDED) => {{ if let Some((_, funding_txo, _, update)) = $shutdown_res_mut.monitor_update.take() { handle_new_monitor_update!($self, funding_txo, update, $peer_state, - $channel_context, REMAIN_LOCKED_UPDATE_ACTIONS_PROCESSED_LATER); + $funded_chan.context, REMAIN_LOCKED_UPDATE_ACTIONS_PROCESSED_LATER); } // If there's a possibility that we need to generate further monitor updates for this // channel, we need to store the last update_id of it. However, we don't want to insert // into the map (which prevents the `PeerState` from being cleaned up) for channels that // never even got confirmations (which would open us up to DoS attacks). - let update_id = $channel_context.get_latest_monitor_update_id(); - if $channel_funding.get_funding_tx_confirmation_height().is_some() || $channel_context.minimum_depth($channel_funding) == Some(0) || update_id > 1 { - let chan_id = $channel_context.channel_id(); + let update_id = $funded_chan.context.get_latest_monitor_update_id(); + let mut short_to_chan_info = $self.short_to_chan_info.write().unwrap(); + if $funded_chan.funding.get_funding_tx_confirmation_height().is_some() || $funded_chan.context.minimum_depth(&$funded_chan.funding) == Some(0) || update_id > 1 { + let chan_id = $funded_chan.context.channel_id(); $peer_state.closed_channel_monitor_update_ids.insert(chan_id, update_id); } - let mut short_to_chan_info = $self.short_to_chan_info.write().unwrap(); - if let Some(short_id) = $channel_funding.get_short_channel_id() { + if let Some(short_id) = $funded_chan.funding.get_short_channel_id() { short_to_chan_info.remove(&short_id); } else { // If the channel was never confirmed on-chain prior to its closure, remove the @@ -3191,11 +3204,11 @@ macro_rules! locked_close_channel { // also don't want a counterparty to be able to trivially cause a memory leak by simply // opening a million channels with us which are closed before we ever reach the funding // stage. - let alias_removed = $self.outbound_scid_aliases.lock().unwrap().remove(&$channel_context.outbound_scid_alias()); + let alias_removed = $self.outbound_scid_aliases.lock().unwrap().remove(&$funded_chan.context.outbound_scid_alias()); debug_assert!(alias_removed); } - short_to_chan_info.remove(&$channel_context.outbound_scid_alias()); - for scid in $channel_context.historical_scids() { + short_to_chan_info.remove(&$funded_chan.context.outbound_scid_alias()); + for scid in $funded_chan.context.historical_scids() { short_to_chan_info.remove(scid); } }} @@ -3204,7 +3217,7 @@ macro_rules! locked_close_channel { /// Returns (boolean indicating if we should remove the Channel object from memory, a mapped error) #[rustfmt::skip] macro_rules! convert_channel_err { - ($self: ident, $peer_state: expr, $err: expr, $context: expr, $funding: expr, $channel_id: expr, MANUAL_CHANNEL_UPDATE, $channel_update: expr) => { + ($self: ident, $peer_state: expr, $err: expr, $chan: expr, $close: expr, $locked_close: expr, $channel_id: expr, _internal) => { match $err { ChannelError::Warn(msg) => { (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Warn(msg), *$channel_id)) @@ -3216,12 +3229,12 @@ macro_rules! convert_channel_err { (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), *$channel_id)) }, ChannelError::Close((msg, reason)) => { - let logger = WithChannelContext::from(&$self.logger, &$context, None); - log_error!(logger, "Closing channel {} due to close-required error: {}", $channel_id, msg); - let mut shutdown_res = $context.force_shutdown($funding, true, reason); - locked_close_channel!($self, $peer_state, $context, $funding, &mut shutdown_res); + let (mut shutdown_res, chan_update) = $close(reason); + let logger = WithChannelContext::from(&$self.logger, &$chan.context(), None); + log_error!(logger, "Closed channel {} due to close-required error: {}", $channel_id, msg); + $locked_close(&mut shutdown_res, $chan); let err = - MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $channel_update); + MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, chan_update); (true, err) }, ChannelError::SendError(msg) => { @@ -3229,20 +3242,30 @@ macro_rules! convert_channel_err { }, } }; - ($self: ident, $peer_state: expr, $err: expr, $funded_channel: expr, $channel_id: expr, FUNDED_CHANNEL) => { - convert_channel_err!($self, $peer_state, $err, $funded_channel.context, &$funded_channel.funding, $channel_id, MANUAL_CHANNEL_UPDATE, { $self.get_channel_update_for_broadcast(&$funded_channel).ok() }) - }; - ($self: ident, $peer_state: expr, $err: expr, $context: expr, $funding: expr, $channel_id: expr, UNFUNDED_CHANNEL) => { - convert_channel_err!($self, $peer_state, $err, $context, $funding, $channel_id, MANUAL_CHANNEL_UPDATE, None) - }; + ($self: ident, $peer_state: expr, $err: expr, $funded_channel: expr, $channel_id: expr, FUNDED_CHANNEL) => { { + let mut do_close = |reason| { + ( + $funded_channel.force_shutdown(reason), + $self.get_channel_update_for_broadcast(&$funded_channel).ok(), + ) + }; + let mut locked_close = |shutdown_res_mut: &mut ShutdownResult, funded_channel: &mut FundedChannel<_>| { + locked_close_channel!($self, $peer_state, funded_channel, shutdown_res_mut, FUNDED); + }; + convert_channel_err!($self, $peer_state, $err, $funded_channel, do_close, locked_close, $channel_id, _internal) + } }; + ($self: ident, $peer_state: expr, $err: expr, $channel: expr, $channel_id: expr, UNFUNDED_CHANNEL) => { { + let mut do_close = |reason| { ($channel.force_shutdown(reason), None) }; + let locked_close = |_, chan: &mut Channel<_>| { locked_close_channel!($self, chan.context(), UNFUNDED); }; + convert_channel_err!($self, $peer_state, $err, $channel, do_close, locked_close, $channel_id, _internal) + } }; ($self: ident, $peer_state: expr, $err: expr, $channel: expr, $channel_id: expr) => { match $channel.as_funded_mut() { Some(funded_channel) => { convert_channel_err!($self, $peer_state, $err, funded_channel, $channel_id, FUNDED_CHANNEL) }, None => { - let (funding, context) = $channel.funding_and_context_mut(); - convert_channel_err!($self, $peer_state, $err, context, funding, $channel_id, UNFUNDED_CHANNEL) + convert_channel_err!($self, $peer_state, $err, $channel, $channel_id, UNFUNDED_CHANNEL) }, } }; @@ -3282,20 +3305,6 @@ macro_rules! try_channel_entry { }; } -macro_rules! remove_channel_entry { - ($self: ident, $peer_state: expr, $entry: expr, $shutdown_res_mut: expr) => {{ - let channel = $entry.remove_entry().1; - locked_close_channel!( - $self, - $peer_state, - &channel.context(), - channel.funding(), - $shutdown_res_mut - ); - channel - }}; -} - macro_rules! send_channel_ready { ($self: ident, $pending_msg_events: expr, $channel: expr, $channel_ready_msg: expr) => {{ $pending_msg_events.push(MessageSendEvent::SendChannelReady { @@ -4084,11 +4093,11 @@ where } #[rustfmt::skip] - fn close_channel_internal(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option, override_shutdown_script: Option) -> Result<(), APIError> { + fn close_channel_internal(&self, chan_id: &ChannelId, counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option, override_shutdown_script: Option) -> Result<(), APIError> { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)> = Vec::new(); - let mut shutdown_result = None; + let mut shutdown_result = Ok(()); { let per_peer_state = self.per_peer_state.read().unwrap(); @@ -4099,7 +4108,7 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; - match peer_state.channel_by_id.entry(channel_id.clone()) { + match peer_state.channel_by_id.entry(*chan_id) { hash_map::Entry::Occupied(mut chan_entry) => { if let Some(chan) = chan_entry.get_mut().as_funded_mut() { let funding_txo_opt = chan.funding.get_funding_txo(); @@ -4125,17 +4134,19 @@ where peer_state_lock, peer_state, per_peer_state, chan); } } else { - let mut shutdown_res = chan_entry.get_mut() - .force_shutdown(false, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }); - remove_channel_entry!(self, peer_state, chan_entry, shutdown_res); - shutdown_result = Some(shutdown_res); + let reason = ClosureReason::LocallyCoopClosedUnfundedChannel; + let err = ChannelError::Close((reason.to_string(), reason)); + let mut chan = chan_entry.remove(); + let (_, mut e) = convert_channel_err!(self, peer_state, err, &mut chan, chan_id); + e.dont_send_error_message(); + shutdown_result = Err(e); } }, hash_map::Entry::Vacant(_) => { return Err(APIError::ChannelUnavailable { err: format!( "Channel with id {} not found for the passed counterparty node_id {}", - channel_id, counterparty_node_id, + chan_id, counterparty_node_id, ) }); }, @@ -4145,13 +4156,11 @@ where for htlc_source in failed_htlcs.drain(..) { let failure_reason = LocalHTLCFailureReason::ChannelClosed; let reason = HTLCFailReason::from_failure_code(failure_reason); - let receiver = HTLCHandlingFailureType::Forward { node_id: Some(*counterparty_node_id), channel_id: *channel_id }; + let receiver = HTLCHandlingFailureType::Forward { node_id: Some(*counterparty_node_id), channel_id: *chan_id }; self.fail_htlc_backwards_internal(&htlc_source.0, &htlc_source.1, &reason, receiver); } - if let Some(shutdown_result) = shutdown_result { - self.finish_close_channel(shutdown_result); - } + let _ = handle_error!(self, shutdown_result, *counterparty_node_id); Ok(()) } @@ -4310,7 +4319,7 @@ where } } } - let mut shutdown_results = Vec::new(); + let mut shutdown_results: Vec<(Result, _)> = Vec::new(); if let Some(txid) = shutdown_res.unbroadcasted_batch_funding_txid { let mut funding_batch_states = self.funding_batch_states.lock().unwrap(); let affected_channels = funding_batch_states.remove(&txid).into_iter().flatten(); @@ -4320,9 +4329,10 @@ where if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { let mut peer_state = peer_state_mutex.lock().unwrap(); if let Some(mut chan) = peer_state.channel_by_id.remove(&channel_id) { - let mut close_res = chan.force_shutdown(false, ClosureReason::FundingBatchClosure); - locked_close_channel!(self, &mut *peer_state, chan.context(), chan.funding(), close_res); - shutdown_results.push(close_res); + let reason = ClosureReason::FundingBatchClosure; + let err = ChannelError::Close((reason.to_string(), reason)); + let (_, e) = convert_channel_err!(self, peer_state, err, &mut chan, &channel_id); + shutdown_results.push((Err(e), counterparty_node_id)); } } has_uncompleted_channel = Some(has_uncompleted_channel.map_or(!state, |v| v || !state)); @@ -4359,91 +4369,76 @@ where }, None)); } } - for shutdown_result in shutdown_results.drain(..) { - self.finish_close_channel(shutdown_result); + for (err, counterparty_node_id) in shutdown_results.drain(..) { + let _ = handle_error!(self, err, counterparty_node_id); } } /// `peer_msg` should be set when we receive a message from a peer, but not set when the /// user closes, which will be re-exposed as the `ChannelClosed` reason. #[rustfmt::skip] - fn force_close_channel_with_peer(&self, channel_id: &ChannelId, peer_node_id: &PublicKey, peer_msg: Option<&String>, broadcast: bool) - -> Result { + fn force_close_channel_with_peer(&self, channel_id: &ChannelId, peer_node_id: &PublicKey, reason: ClosureReason) + -> Result<(), APIError> { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(peer_node_id) .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", peer_node_id) })?; - let (update_opt, counterparty_node_id) = { - let mut peer_state = peer_state_mutex.lock().unwrap(); - let closure_reason = if let Some(peer_msg) = peer_msg { - ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(peer_msg.to_string()) } - } else { - ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(broadcast) } - }; - let logger = WithContext::from(&self.logger, Some(*peer_node_id), Some(*channel_id), None); - if let hash_map::Entry::Occupied(mut chan_entry) = peer_state.channel_by_id.entry(channel_id.clone()) { - log_error!(logger, "Force-closing channel {}", channel_id); - let (mut shutdown_res, update_opt) = match chan_entry.get_mut().as_funded_mut() { - Some(chan) => { - ( - chan.context.force_shutdown(&chan.funding, broadcast, closure_reason), - self.get_channel_update_for_broadcast(&chan).ok(), - ) - }, - None => { - // Unfunded channel has no update - (chan_entry.get_mut().force_shutdown(false, closure_reason), None) - }, - }; - let chan = remove_channel_entry!(self, peer_state, chan_entry, shutdown_res); - mem::drop(peer_state); - mem::drop(per_peer_state); - self.finish_close_channel(shutdown_res); - (update_opt, chan.context().get_counterparty_node_id()) - } else if peer_state.inbound_channel_request_by_id.remove(channel_id).is_some() { - log_error!(logger, "Force-closing channel {}", &channel_id); - // N.B. that we don't send any channel close event here: we - // don't have a user_channel_id, and we never sent any opening - // events anyway. - (None, *peer_node_id) - } else { - return Err(APIError::ChannelUnavailable{ err: format!("Channel with id {} not found for the passed counterparty node_id {}", channel_id, peer_node_id) }); - } + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + let logger = WithContext::from(&self.logger, Some(*peer_node_id), Some(*channel_id), None); + + let is_from_counterparty = matches!(reason, ClosureReason::CounterpartyForceClosed { .. }); + let message = match &reason { + ClosureReason::HolderForceClosed { message, .. } => message.clone(), + _ => reason.to_string(), }; - if let Some(update) = update_opt { - // If we have some Channel Update to broadcast, we cache it and broadcast it later. - let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap(); - pending_broadcast_messages.push(MessageSendEvent::BroadcastChannelUpdate { - msg: update - }); - } - Ok(counterparty_node_id) + if let Some(mut chan) = peer_state.channel_by_id.remove(channel_id) { + log_error!(logger, "Force-closing channel {}", channel_id); + let err = ChannelError::Close((message, reason)); + let (_, mut e) = convert_channel_err!(self, peer_state, err, &mut chan, channel_id); + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + if is_from_counterparty { + // If the peer is the one who asked us to force-close, don't reply with a fresh + // error message. + e.dont_send_error_message(); + } + let _ = handle_error!(self, Err::<(), _>(e), *peer_node_id); + Ok(()) + } else if peer_state.inbound_channel_request_by_id.remove(channel_id).is_some() { + log_error!(logger, "Force-closing inbound channel request {}", &channel_id); + if !is_from_counterparty { + peer_state.pending_msg_events.push( + MessageSendEvent::HandleError { + node_id: *peer_node_id, + action: msgs::ErrorAction::SendErrorMessage { + msg: msgs::ErrorMessage { channel_id: *channel_id, data: message } + }, + } + ); + } + // N.B. that we don't send any channel close event here: we + // don't have a user_channel_id, and we never sent any opening + // events anyway. + Ok(()) + } else { + Err(APIError::ChannelUnavailable{ err: format!("Channel with id {} not found for the passed counterparty node_id {}", channel_id, peer_node_id) }) + } } #[rustfmt::skip] - fn force_close_sending_error(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, broadcast: bool, error_message: String) + fn force_close_sending_error(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, error_message: String) -> Result<(), APIError> { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); log_debug!(self.logger, "Force-closing channel, The error message sent to the peer : {}", error_message); - match self.force_close_channel_with_peer(channel_id, &counterparty_node_id, None, broadcast) { - Ok(counterparty_node_id) => { - let per_peer_state = self.per_peer_state.read().unwrap(); - if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { - let mut peer_state = peer_state_mutex.lock().unwrap(); - peer_state.pending_msg_events.push( - MessageSendEvent::HandleError { - node_id: counterparty_node_id, - action: msgs::ErrorAction::SendErrorMessage { - msg: msgs::ErrorMessage { channel_id: *channel_id, data: error_message } - }, - } - ); - } - Ok(()) - }, - Err(e) => Err(e) - } + // No matter what value for `broadcast_latest_txn` we set here, `Channel` will override it + // and set the appropriate value. + let reason = ClosureReason::HolderForceClosed { + broadcasted_latest_txn: Some(true), + message: error_message, + }; + self.force_close_channel_with_peer(channel_id, &counterparty_node_id, reason) } /// Force closes a channel, immediately broadcasting the latest local transaction(s), @@ -4457,23 +4452,7 @@ where pub fn force_close_broadcasting_latest_txn( &self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, error_message: String, ) -> Result<(), APIError> { - self.force_close_sending_error(channel_id, counterparty_node_id, true, error_message) - } - - /// Force closes a channel, rejecting new HTLCs on the given channel but skips broadcasting - /// the latest local transaction(s). - /// - /// The provided `error_message` is sent to connected peers for closing channels and should - /// be a human-readable description of what went wrong. - /// - /// Fails if `channel_id` is unknown to the manager, or if the - /// `counterparty_node_id` isn't the counterparty of the corresponding channel. - /// You can always broadcast the latest local transaction(s) via - /// [`ChannelMonitor::broadcast_latest_holder_commitment_txn`]. - pub fn force_close_without_broadcasting_txn( - &self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, error_message: String, - ) -> Result<(), APIError> { - self.force_close_sending_error(channel_id, counterparty_node_id, false, error_message) + self.force_close_sending_error(channel_id, counterparty_node_id, error_message) } /// Force close all channels, immediately broadcasting the latest local commitment transaction @@ -4491,21 +4470,6 @@ where } } - /// Force close all channels rejecting new HTLCs on each but without broadcasting the latest - /// local transaction(s). - /// - /// The provided `error_message` is sent to connected peers for closing channels and - /// should be a human-readable description of what went wrong. - pub fn force_close_all_channels_without_broadcasting_txn(&self, error_message: String) { - for chan in self.list_channels() { - let _ = self.force_close_without_broadcasting_txn( - &chan.channel_id, - &chan.counterparty.node_id, - error_message.clone(), - ); - } - } - /// Initiate a splice, to change the channel capacity of an existing funded channel. /// After completion of splicing, the funding transaction will be replaced by a new one, spending the old funding transaction, /// with optional extra inputs (splice-in) and/or extra outputs (splice-out or change). @@ -5585,12 +5549,12 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; - macro_rules! close_chan { ($err: expr, $api_err: expr, $chan: expr) => { { + macro_rules! abandon_chan { ($err: expr, $api_err: expr, $chan: expr) => { { let counterparty; let err = if let ChannelError::Close((msg, reason)) = $err { let channel_id = $chan.context.channel_id(); counterparty = $chan.context.get_counterparty_node_id(); - let shutdown_res = $chan.context.force_shutdown(&$chan.funding, false, reason); + let shutdown_res = $chan.abandon_unfunded_chan(reason); MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, shutdown_res, None) } else { unreachable!(); }; @@ -5610,7 +5574,7 @@ where Err(err) => { let chan_err = ChannelError::close(err.to_owned()); let api_err = APIError::APIMisuseError { err: err.to_owned() }; - return close_chan!(chan_err, api_err, chan); + return abandon_chan!(chan_err, api_err, chan); }, } @@ -5620,7 +5584,7 @@ where Ok(funding_msg) => (chan, funding_msg), Err((mut chan, chan_err)) => { let api_err = APIError::ChannelUnavailable { err: "Signer refused to sign the initial commitment transaction".to_owned() }; - return close_chan!(chan_err, api_err, chan); + return abandon_chan!(chan_err, api_err, chan); } } }, @@ -5649,7 +5613,7 @@ where let chan_err = ChannelError::close(err.to_owned()); let api_err = APIError::APIMisuseError { err: err.to_owned() }; chan.unset_funding_info(); - return close_chan!(chan_err, api_err, chan); + return abandon_chan!(chan_err, api_err, chan); }, hash_map::Entry::Vacant(e) => { if let Some(msg) = msg_opt { @@ -5879,7 +5843,7 @@ where channels_to_remove.extend(temporary_channels.iter() .map(|(&chan_id, &node_id)| (chan_id, node_id)) ); - let mut shutdown_results = Vec::new(); + let mut shutdown_results: Vec<(Result, _)> = Vec::new(); { let per_peer_state = self.per_peer_state.read().unwrap(); for (channel_id, counterparty_node_id) in channels_to_remove { @@ -5887,25 +5851,17 @@ where .map(|peer_state_mutex| peer_state_mutex.lock().unwrap()) .and_then(|mut peer_state| peer_state.channel_by_id.remove(&channel_id).map(|chan| (chan, peer_state))) .map(|(mut chan, mut peer_state)| { - let closure_reason = ClosureReason::ProcessingError { err: e.clone() }; - let mut close_res = chan.force_shutdown(false, closure_reason); - locked_close_channel!(self, peer_state, chan.context(), chan.funding(), close_res); - shutdown_results.push(close_res); - peer_state.pending_msg_events.push(MessageSendEvent::HandleError { - node_id: counterparty_node_id, - action: msgs::ErrorAction::SendErrorMessage { - msg: msgs::ErrorMessage { - channel_id, - data: "Failed to fund channel".to_owned(), - } - }, - }); + let reason = ClosureReason::ProcessingError { err: e.clone() }; + let err = ChannelError::Close((e.clone(), reason)); + let (_, e) = + convert_channel_err!(self, peer_state, err, &mut chan, &channel_id); + shutdown_results.push((Err(e), counterparty_node_id)); }); } } mem::drop(funding_batch_states); - for shutdown_result in shutdown_results.drain(..) { - self.finish_close_channel(shutdown_result); + for (err, counterparty_node_id) in shutdown_results { + let _ = handle_error!(self, err, counterparty_node_id); } } result @@ -7044,7 +7000,6 @@ where let mut handle_errors: Vec<(Result<(), _>, _)> = Vec::new(); let mut timed_out_mpp_htlcs = Vec::new(); let mut pending_peers_awaiting_removal = Vec::new(); - let mut shutdown_channels = Vec::new(); { let per_peer_state = self.per_peer_state.read().unwrap(); @@ -7140,19 +7095,11 @@ where log_error!(logger, "Force-closing pending channel with ID {} for not establishing in a timely manner", context.channel_id()); - let mut close_res = chan.force_shutdown(false, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }); - let (funding, context) = chan.funding_and_context_mut(); - locked_close_channel!(self, peer_state, context, funding, close_res); - shutdown_channels.push(close_res); - pending_msg_events.push(MessageSendEvent::HandleError { - node_id: context.get_counterparty_node_id(), - action: msgs::ErrorAction::SendErrorMessage { - msg: msgs::ErrorMessage { - channel_id: context.channel_id(), - data: "Force-closing pending channel due to timeout awaiting establishment handshake".to_owned(), - }, - }, - }); + let reason = ClosureReason::FundingTimedOut; + let msg = "Force-closing pending channel due to timeout awaiting establishment handshake".to_owned(); + let err = ChannelError::Close((msg, reason)); + let (_, e) = convert_channel_err!(self, peer_state, err, chan, chan_id); + handle_errors.push((Err(e), counterparty_node_id)); false } else { true @@ -7245,14 +7192,10 @@ where self.fail_htlc_backwards_internal(&source, &htlc_source.1, &reason, receiver); } - for (err, counterparty_node_id) in handle_errors.drain(..) { + for (err, counterparty_node_id) in handle_errors { let _ = handle_error!(self, err, counterparty_node_id); } - for shutdown_res in shutdown_channels { - self.finish_close_channel(shutdown_res); - } - #[cfg(feature = "std")] let duration_since_epoch = std::time::SystemTime::now() .duration_since(std::time::SystemTime::UNIX_EPOCH) @@ -8369,14 +8312,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ &open_channel_msg, user_channel_id, &config, best_block_height, &self.logger, - ).map_err(|_| MsgHandleErrInternal::from_chan_no_close( - ChannelError::Close( - ( - "V2 channel rejected due to sender error".into(), - ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }, - ) - ), *temporary_channel_id) - ).map(|channel| { + ).map_err(|e| { + let channel_id = open_channel_msg.common_fields.temporary_channel_id; + MsgHandleErrInternal::from_chan_no_close(e, channel_id) + }).map(|channel| { let message_send_event = MessageSendEvent::SendAcceptChannelV2 { node_id: channel.context.get_counterparty_node_id(), msg: channel.accept_inbound_dual_funded_channel() @@ -8736,7 +8675,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let logger = WithChannelContext::from(&self.logger, &inbound_chan.context, None); match inbound_chan.funding_created(msg, best_block, &self.signer_provider, &&logger) { Ok(res) => res, - Err((mut inbound_chan, err)) => { + Err((inbound_chan, err)) => { // We've already removed this inbound channel from the map in `PeerState` // above so at this point we just need to clean up any lingering entries // concerning this channel as it is safe to do so. @@ -8744,7 +8683,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // Really we should be returning the channel_id the peer expects based // on their funding info here, but they're horribly confused anyway, so // there's not a lot we can do to save them. - return Err(convert_channel_err!(self, peer_state, err, inbound_chan.context, &inbound_chan.funding, &msg.temporary_channel_id, UNFUNDED_CHANNEL).1); + let mut chan = Channel::from(inbound_chan); + return Err(convert_channel_err!(self, peer_state, err, &mut chan, &msg.temporary_channel_id, UNFUNDED_CHANNEL).1); }, } }, @@ -8766,7 +8706,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // Thus, we must first unset the funding outpoint on the channel. let err = ChannelError::close($err.to_owned()); chan.unset_funding_info(); - return Err(convert_channel_err!(self, peer_state, err, chan.context, &chan.funding, &funded_channel_id, UNFUNDED_CHANNEL).1); + let mut chan = Channel::from(chan); + return Err(convert_channel_err!(self, peer_state, err, &mut chan, &funded_channel_id, UNFUNDED_CHANNEL).1); } } } match peer_state.channel_by_id.entry(funded_channel_id) { @@ -9027,11 +8968,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let (msg_send_event_opt, signing_session_opt) = match chan_entry.get_mut().as_unfunded_v2_mut() { Some(chan) => chan.tx_complete(msg) .into_msg_send_event_or_signing_session(counterparty_node_id), - None => try_channel_entry!(self, peer_state, Err(ChannelError::Close( - ( - "Got a tx_complete message with no interactive transaction construction expected or in-progress".into(), - ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }, - ))), chan_entry) + None => { + let msg = "Got a tx_complete message with no interactive transaction construction expected or in-progress"; + let reason = ClosureReason::ProcessingError { err: msg.to_owned() }; + let err = ChannelError::Close((msg.to_owned(), reason)); + try_channel_entry!(self, peer_state, Err(err), chan_entry) + }, }; if let Some(msg_send_event) = msg_send_event_opt { peer_state.pending_msg_events.push(msg_send_event); @@ -9099,11 +9041,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } } }, - None => try_channel_entry!(self, peer_state, Err(ChannelError::Close( - ( - "Got an unexpected tx_signatures message".into(), - ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }, - ))), chan_entry) + None => { + let msg = "Got an unexpected tx_signatures message"; + let reason = ClosureReason::ProcessingError { err: msg.to_owned() }; + let err = ChannelError::Close((msg.to_owned(), reason)); + try_channel_entry!(self, peer_state, Err(err), chan_entry) + }, } Ok(()) }, @@ -9235,8 +9178,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ fn internal_shutdown( &self, counterparty_node_id: &PublicKey, msg: &msgs::Shutdown, ) -> Result<(), MsgHandleErrInternal> { - let mut dropped_htlcs: Vec<(HTLCSource, PaymentHash)> = Vec::new(); - let mut finish_shutdown = None; + let mut dropped_htlcs: Vec<(HTLCSource, PaymentHash)>; { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { @@ -9302,12 +9244,13 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ None, ); log_error!(logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id); - let mut close_res = chan_entry.get_mut().force_shutdown( - false, - ClosureReason::CounterpartyCoopClosedUnfundedChannel, - ); - remove_channel_entry!(self, peer_state, chan_entry, close_res); - finish_shutdown = Some(close_res); + let reason = ClosureReason::CounterpartyCoopClosedUnfundedChannel; + let err = ChannelError::Close((reason.to_string(), reason)); + let mut chan = chan_entry.remove(); + let (_, mut e) = + convert_channel_err!(self, peer_state, err, &mut chan, &msg.channel_id); + e.dont_send_error_message(); + return Err(e); }, } } else { @@ -9322,9 +9265,6 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let reason = HTLCFailReason::from_failure_code(LocalHTLCFailureReason::ChannelClosed); self.fail_htlc_backwards_internal(&htlc_source.0, &htlc_source.1, &reason, receiver); } - if let Some(shutdown_res) = finish_shutdown { - self.finish_close_channel(shutdown_res); - } Ok(()) } @@ -9365,8 +9305,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // fully delete it from tracking (the channel monitor is still around to // watch for old state broadcasts)! debug_assert!(tx.is_some()); - let channel = remove_channel_entry!(self, peer_state, chan_entry, close_res); - (tx, Some(channel), Some(close_res)) + locked_close_channel!(self, peer_state, chan, close_res, FUNDED); + (tx, Some(chan_entry.remove()), Some(close_res)) } else { debug_assert!(tx.is_none()); (tx, None, None) @@ -9589,12 +9529,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } else { let logger = WithChannelContext::from(&self.logger, &chan.context, None); log_error!(logger, "Persisting initial ChannelMonitor failed, implying the channel ID was duplicated"); - try_channel_entry!(self, peer_state, Err(ChannelError::Close( - ( - "Channel ID was a duplicate".to_owned(), - ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }, - ) - )), chan_entry) + let msg = "Channel ID was a duplicate"; + let reason = ClosureReason::ProcessingError { err: msg.to_owned() }; + let err = ChannelError::Close((msg.to_owned(), reason)); + try_channel_entry!(self, peer_state, Err(err), chan_entry) } } else if let Some(monitor_update) = monitor_update_opt { handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, peer_state_lock, @@ -10302,7 +10240,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ fn process_pending_monitor_events(&self) -> bool { debug_assert!(self.total_consistency_lock.try_write().is_err()); // Caller holds read lock - let mut failed_channels = Vec::new(); + let mut failed_channels: Vec<(Result, _)> = Vec::new(); let mut pending_monitor_events = self.chain_monitor.release_pending_monitor_events(); let has_pending_monitor_events = !pending_monitor_events.is_empty(); for (funding_outpoint, channel_id, mut monitor_events, counterparty_node_id) in pending_monitor_events.drain(..) { @@ -10330,33 +10268,19 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; - let pending_msg_events = &mut peer_state.pending_msg_events; - if let hash_map::Entry::Occupied(mut chan_entry) = peer_state.channel_by_id.entry(channel_id) { + if let hash_map::Entry::Occupied(chan_entry) = peer_state.channel_by_id.entry(channel_id) { let reason = if let MonitorEvent::HolderForceClosedWithInfo { reason, .. } = monitor_event { reason } else { - ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) } - }; - let mut shutdown_res = chan_entry.get_mut().force_shutdown(false, reason.clone()); - let chan = remove_channel_entry!(self, peer_state, chan_entry, shutdown_res); - failed_channels.push(shutdown_res); - if let Some(funded_chan) = chan.as_funded() { - if let Ok(update) = self.get_channel_update_for_broadcast(funded_chan) { - let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap(); - pending_broadcast_messages.push(MessageSendEvent::BroadcastChannelUpdate { - msg: update - }); + ClosureReason::HolderForceClosed { + broadcasted_latest_txn: Some(true), + message: "Legacy ChannelMonitor closure".to_owned() } - pending_msg_events.push(MessageSendEvent::HandleError { - node_id: counterparty_node_id, - action: msgs::ErrorAction::DisconnectPeer { - msg: Some(msgs::ErrorMessage { - channel_id: funded_chan.context.channel_id(), - data: reason.to_string() - }) - }, - }); - } + }; + let err = ChannelError::Close((reason.to_string(), reason)); + let mut chan = chan_entry.remove(); + let (_, e) = convert_channel_err!(self, peer_state, err, &mut chan, &channel_id); + failed_channels.push((Err(e), counterparty_node_id)); } } }, @@ -10367,8 +10291,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } } - for failure in failed_channels.drain(..) { - self.finish_close_channel(failure); + for (err, counterparty_node_id) in failed_channels { + let _ = handle_error!(self, err, counterparty_node_id); } has_pending_monitor_events @@ -10544,10 +10468,14 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ _ => unblock_chan(chan, &mut peer_state.pending_msg_events), }; if let Some(mut shutdown_result) = shutdown_result { - let context = &chan.context(); + let context = chan.context(); let logger = WithChannelContext::from(&self.logger, context, None); log_trace!(logger, "Removing channel {} now that the signer is unblocked", context.channel_id()); - locked_close_channel!(self, peer_state, context, chan.funding(), shutdown_result); + if let Some(funded_channel) = chan.as_funded_mut() { + locked_close_channel!(self, peer_state, funded_channel, shutdown_result, FUNDED); + } else { + locked_close_channel!(self, chan.context(), UNFUNDED); + } shutdown_results.push(shutdown_result); false } else { @@ -10590,7 +10518,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } debug_assert_eq!(shutdown_result_opt.is_some(), funded_chan.is_shutdown()); if let Some(mut shutdown_result) = shutdown_result_opt { - locked_close_channel!(self, peer_state, &funded_chan.context, &funded_chan.funding, shutdown_result); + locked_close_channel!(self, peer_state, funded_chan, shutdown_result, FUNDED); shutdown_results.push(shutdown_result); } if let Some(tx) = tx_opt { @@ -11827,7 +11755,7 @@ where fn peer_disconnected(&self, counterparty_node_id: PublicKey) { let _persistence_guard = PersistenceNotifierGuard::optionally_notify( self, || NotifyOption::SkipPersistHandleEvents); - let mut failed_channels = Vec::new(); + let mut failed_channels: Vec<(Result, _)> = Vec::new(); let mut per_peer_state = self.per_peer_state.write().unwrap(); let remove_peer = { log_debug!( @@ -11839,16 +11767,16 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; let pending_msg_events = &mut peer_state.pending_msg_events; - peer_state.channel_by_id.retain(|_, chan| { + peer_state.channel_by_id.retain(|chan_id, chan| { let logger = WithChannelContext::from(&self.logger, &chan.context(), None); if chan.peer_disconnected_is_resumable(&&logger) { return true; } // Clean up for removal. - let mut close_res = chan.force_shutdown(false, ClosureReason::DisconnectedPeer); - let (funding, context) = chan.funding_and_context_mut(); - locked_close_channel!(self, peer_state, &context, funding, close_res); - failed_channels.push(close_res); + let reason = ClosureReason::DisconnectedPeer; + let err = ChannelError::Close((reason.to_string(), reason)); + let (_, e) = convert_channel_err!(self, peer_state, err, chan, chan_id); + failed_channels.push((Err(e), counterparty_node_id)); false }); // Note that we don't bother generating any events for pre-accept channels - @@ -11923,8 +11851,8 @@ where } mem::drop(per_peer_state); - for failure in failed_channels.drain(..) { - self.finish_close_channel(failure); + for (err, counterparty_node_id) in failed_channels.drain(..) { + let _ = handle_error!(self, err, counterparty_node_id); } } @@ -12391,16 +12319,16 @@ where // during initialization prior to the chain_monitor being fully configured in some cases. // See the docs for `ChannelManagerReadArgs` for more. - let mut failed_channels = Vec::new(); + let mut failed_channels: Vec<(Result, _)> = Vec::new(); let mut timed_out_htlcs = Vec::new(); { let per_peer_state = self.per_peer_state.read().unwrap(); - for (_cp_id, peer_state_mutex) in per_peer_state.iter() { + for (counterparty_node_id, peer_state_mutex) in per_peer_state.iter() { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; let pending_msg_events = &mut peer_state.pending_msg_events; - peer_state.channel_by_id.retain(|_, chan| { + peer_state.channel_by_id.retain(|chan_id, chan| { match chan.as_funded_mut() { // Retain unfunded channels. None => true, @@ -12518,25 +12446,16 @@ where } else if let Err(reason) = res { // It looks like our counterparty went on-chain or funding transaction was // reorged out of the main chain. Close the channel. - let reason_message = format!("{}", reason); - let mut close_res = funded_channel.context.force_shutdown(&funded_channel.funding, true, reason); - locked_close_channel!(self, peer_state, &funded_channel.context, &funded_channel.funding, close_res); - failed_channels.push(close_res); - if let Ok(update) = self.get_channel_update_for_broadcast(&funded_channel) { - let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap(); - pending_broadcast_messages.push(MessageSendEvent::BroadcastChannelUpdate { - msg: update - }); - } - pending_msg_events.push(MessageSendEvent::HandleError { - node_id: funded_channel.context.get_counterparty_node_id(), - action: msgs::ErrorAction::DisconnectPeer { - msg: Some(msgs::ErrorMessage { - channel_id: funded_channel.context.channel_id(), - data: reason_message, - }) - }, - }); + let err = ChannelError::Close((reason.to_string(), reason)); + let (_, e) = convert_channel_err!( + self, + peer_state, + err, + funded_channel, + chan_id, + FUNDED_CHANNEL + ); + failed_channels.push((Err(e), *counterparty_node_id)); return false; } true @@ -12596,8 +12515,8 @@ where }); } - for failure in failed_channels { - self.finish_close_channel(failure); + for (failure, counterparty_node_id) in failed_channels { + let _ = handle_error!(self, failure, counterparty_node_id); } for (source, payment_hash, reason, destination) in timed_out_htlcs.drain(..) { @@ -13083,6 +13002,9 @@ where let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); + let peer_msg = UntrustedString(msg.data.clone()); + let reason = ClosureReason::CounterpartyForceClosed { peer_msg }; + if msg.channel_id.is_zero() { let channel_ids: Vec = { let per_peer_state = self.per_peer_state.read().unwrap(); @@ -13097,7 +13019,7 @@ where }; for channel_id in channel_ids { // Untrusted messages from peer, we throw away the error if id points to a non-existent channel - let _ = self.force_close_channel_with_peer(&channel_id, &counterparty_node_id, Some(&msg.data), true); + let _ = self.force_close_channel_with_peer(&channel_id, &counterparty_node_id, reason.clone()); } } else { { @@ -13133,7 +13055,7 @@ where } // Untrusted messages from peer, we throw away the error if id points to a non-existent channel - let _ = self.force_close_channel_with_peer(&msg.channel_id, &counterparty_node_id, Some(&msg.data), true); + let _ = self.force_close_channel_with_peer(&msg.channel_id, &counterparty_node_id, reason); } } @@ -14709,7 +14631,7 @@ where log_error!(logger, " The ChannelMonitor for channel {} is at counterparty commitment transaction number {} but the ChannelManager is at counterparty commitment transaction number {}.", &channel.context.channel_id(), monitor.get_cur_counterparty_commitment_number(), channel.get_cur_counterparty_commitment_transaction_number()); } - let mut shutdown_result = channel.context.force_shutdown(&channel.funding, true, ClosureReason::OutdatedChannelManager); + let mut shutdown_result = channel.force_shutdown(ClosureReason::OutdatedChannelManager); if shutdown_result.unbroadcasted_batch_funding_txid.is_some() { return Err(DecodeError::InvalidValue); } @@ -14782,7 +14704,6 @@ where // If we were persisted and shut down while the initial ChannelMonitor persistence // was in-progress, we never broadcasted the funding transaction and can still // safely discard the channel. - let _ = channel.context.force_shutdown(&channel.funding, false, ClosureReason::DisconnectedPeer); channel_closures.push_back((events::Event::ChannelClosed { channel_id: channel.context.channel_id(), user_channel_id: channel.context.get_user_id(), @@ -16497,9 +16418,11 @@ mod tests { let chan = create_announced_chan_between_nodes(&nodes, 0, 1); - nodes[0].node.force_close_channel_with_peer(&chan.2, &nodes[1].node.get_our_node_id(), None, true).unwrap(); + let message = "Channel force-closed".to_owned(); + nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id(), message.clone()).unwrap(); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; + check_closed_event!(nodes[0], 1, reason, [nodes[1].node.get_our_node_id()], 100000); // Confirm that the channel_update was not sent immediately to node[1] but was cached. let node_1_events = nodes[1].node.get_and_clear_pending_msg_events(); @@ -16556,10 +16479,14 @@ mod tests { nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); let chan_id = nodes[0].node.list_channels()[0].channel_id; - let error_message = "Channel force-closed"; - nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap(); + let message = "Channel force-closed".to_owned(); + nodes[0] + .node + .force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), message.clone()) + .unwrap(); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 1_000_000); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; + check_closed_event!(nodes[0], 1, reason, [nodes[1].node.get_our_node_id()], 1_000_000); { // Assert that nodes[1] is awaiting removal for nodes[0] once nodes[1] has been @@ -16697,8 +16624,6 @@ mod tests { check_unkown_peer_error(nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &unkown_public_key, error_message.to_string()), unkown_public_key); - check_unkown_peer_error(nodes[0].node.force_close_without_broadcasting_txn(&channel_id, &unkown_public_key, error_message.to_string()), unkown_public_key); - check_unkown_peer_error(nodes[0].node.forward_intercepted_htlc(intercept_id, &channel_id, unkown_public_key, 1_000_000), unkown_public_key); check_unkown_peer_error(nodes[0].node.update_channel_config(&unkown_public_key, &[channel_id], &ChannelConfig::default()), unkown_public_key); @@ -16729,8 +16654,6 @@ mod tests { check_channel_unavailable_error(nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &counterparty_node_id, error_message.to_string()), channel_id, counterparty_node_id); - check_channel_unavailable_error(nodes[0].node.force_close_without_broadcasting_txn(&channel_id, &counterparty_node_id, error_message.to_string()), channel_id, counterparty_node_id); - check_channel_unavailable_error(nodes[0].node.forward_intercepted_htlc(InterceptId([0; 32]), &channel_id, counterparty_node_id, 1_000_000), channel_id, counterparty_node_id); check_channel_unavailable_error(nodes[0].node.update_channel_config(&counterparty_node_id, &[channel_id], &ChannelConfig::default()), channel_id, counterparty_node_id); @@ -17034,16 +16957,20 @@ mod tests { let user_config = test_default_channel_config(); let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[Some(user_config.clone()), Some(user_config)]); let nodes = create_network(2, &node_cfg, &node_chanmgr); - let error_message = "Channel force-closed"; + let message = "Channel force-closed".to_owned(); // Open a channel, immediately disconnect each other, and broadcast Alice's latest state. let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1); nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); - nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap(); + nodes[0] + .node + .force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), message.clone()) + .unwrap(); check_closed_broadcast(&nodes[0], 1, true); check_added_monitors(&nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; + check_closed_event!(nodes[0], 1, reason, [nodes[1].node.get_our_node_id()], 100000); { let txn = nodes[0].tx_broadcaster.txn_broadcast(); assert_eq!(txn.len(), 1); diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 8ca290ef165..24aae650f86 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -760,10 +760,10 @@ pub fn test_duplicate_htlc_different_direction_onchain() { MessageSendEvent::BroadcastChannelUpdate { .. } => {}, MessageSendEvent::HandleError { node_id, - action: msgs::ErrorAction::DisconnectPeer { ref msg }, + action: msgs::ErrorAction::SendErrorMessage { ref msg }, } => { assert_eq!(node_id, node_b_id); - assert_eq!(msg.as_ref().unwrap().data, "Channel closed because commitment or closing transaction was confirmed on chain."); + assert_eq!(msg.data, "Channel closed because commitment or closing transaction was confirmed on chain."); }, MessageSendEvent::UpdateHTLCs { ref node_id, @@ -984,11 +984,14 @@ pub fn channel_monitor_network_test() { send_payment(&nodes[0], &[&nodes[1], &nodes[2], &nodes[3], &nodes[4]], 8000000); // Simple case with no pending HTLCs: - let err = "Channel force-closed".to_string(); - nodes[1].node.force_close_broadcasting_latest_txn(&chan_1.2, &node_a_id, err).unwrap(); + let message = "Channel force-closed".to_owned(); + nodes[1] + .node + .force_close_broadcasting_latest_txn(&chan_1.2, &node_a_id, message.clone()) + .unwrap(); check_added_monitors(&nodes[1], 1); check_closed_broadcast!(nodes[1], true); - let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); { let mut node_txn = test_txn_broadcast(&nodes[1], &chan_1, None, HTLCType::NONE); @@ -1013,10 +1016,10 @@ pub fn channel_monitor_network_test() { // Simple case of one pending HTLC to HTLC-Timeout (note that the HTLC-Timeout is not // broadcasted until we reach the timelock time). - let error_message = "Channel force-closed"; + let message = "Channel force-closed".to_owned(); nodes[1] .node - .force_close_broadcasting_latest_txn(&chan_2.2, &node_c_id, error_message.to_string()) + .force_close_broadcasting_latest_txn(&chan_2.2, &node_c_id, message.clone()) .unwrap(); check_closed_broadcast!(nodes[1], true); check_added_monitors(&nodes[1], 1); @@ -1034,7 +1037,8 @@ pub fn channel_monitor_network_test() { check_closed_broadcast!(nodes[2], true); assert_eq!(nodes[1].node.list_channels().len(), 0); assert_eq!(nodes[2].node.list_channels().len(), 1); - let node_b_reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + let node_b_reason = + ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event!(nodes[1], 1, node_b_reason, [node_c_id], 100000); check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); @@ -1064,8 +1068,11 @@ pub fn channel_monitor_network_test() { // nodes[3] gets the preimage, but nodes[2] already disconnected, resulting in a nodes[2] // HTLC-Timeout and a nodes[3] claim against it (+ its own announces) - let err = "Channel force-closed".to_string(); - nodes[2].node.force_close_broadcasting_latest_txn(&chan_3.2, &node_d_id, err).unwrap(); + let message = "Channel force-closed".to_owned(); + nodes[2] + .node + .force_close_broadcasting_latest_txn(&chan_3.2, &node_d_id, message.clone()) + .unwrap(); check_added_monitors(&nodes[2], 1); check_closed_broadcast!(nodes[2], true); let node2_commitment_txid; @@ -1084,7 +1091,8 @@ pub fn channel_monitor_network_test() { check_closed_broadcast!(nodes[3], true); assert_eq!(nodes[2].node.list_channels().len(), 0); assert_eq!(nodes[3].node.list_channels().len(), 1); - let node_c_reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + let node_c_reason = + ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event!(nodes[2], 1, node_c_reason, [node_d_id], 100000); check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed, [node_c_id], 100000); @@ -1108,7 +1116,7 @@ pub fn channel_monitor_network_test() { }; match events[0] { MessageSendEvent::HandleError { - action: ErrorAction::DisconnectPeer { .. }, + action: ErrorAction::SendErrorMessage { .. }, node_id, } => { assert_eq!(node_id, node_e_id); @@ -1143,7 +1151,7 @@ pub fn channel_monitor_network_test() { }; match events[0] { MessageSendEvent::HandleError { - action: ErrorAction::DisconnectPeer { .. }, + action: ErrorAction::SendErrorMessage { .. }, node_id, } => { assert_eq!(node_id, node_d_id); @@ -1929,7 +1937,7 @@ pub fn test_htlc_on_chain_success() { let nodes_0_event = remove_first_msg_event_to_node(&node_a_id, &mut events); match nodes_2_event { - MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, .. } => {}, + MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { .. }, .. } => {}, _ => panic!("Unexpected event"), } @@ -2505,7 +2513,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive( match nodes_2_event { MessageSendEvent::HandleError { action: - ErrorAction::DisconnectPeer { msg: Some(msgs::ErrorMessage { channel_id, ref data }) }, + ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage { channel_id, ref data } }, .. } => { assert_eq!(channel_id, chan_2.2); @@ -2752,20 +2760,17 @@ pub fn test_htlc_ignore_latest_remote_commitment() { return; } let funding_tx = create_announced_chan_between_nodes(&nodes, 0, 1).3; - let error_message = "Channel force-closed"; + let message = "Channel force-closed".to_owned(); route_payment(&nodes[0], &[&nodes[1]], 10000000); + let chan_id = nodes[0].node.list_channels()[0].channel_id; nodes[0] .node - .force_close_broadcasting_latest_txn( - &nodes[0].node.list_channels()[0].channel_id, - &node_b_id, - error_message.to_string(), - ) + .force_close_broadcasting_latest_txn(&chan_id, &node_b_id, message.clone()) .unwrap(); connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1); check_closed_broadcast!(nodes[0], true); check_added_monitors(&nodes[0], 1); - let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); let node_txn = nodes[0].tx_broadcaster.unique_txn_broadcast(); @@ -2832,15 +2837,15 @@ pub fn test_force_close_fail_back() { // nodes[2] now has the latest commitment transaction, but hasn't revoked its previous // state or updated nodes[1]' state. Now force-close and broadcast that commitment/HTLC // transaction and ensure nodes[1] doesn't fail-backwards (this was originally a bug!). - let error_message = "Channel force-closed"; + let message = "Channel force-closed".to_owned(); let channel_id = payment_event.commitment_msg[0].channel_id; nodes[2] .node - .force_close_broadcasting_latest_txn(&channel_id, &node_b_id, error_message.to_string()) + .force_close_broadcasting_latest_txn(&channel_id, &node_b_id, message.clone()) .unwrap(); check_closed_broadcast!(nodes[2], true); check_added_monitors(&nodes[2], 1); - let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event!(nodes[2], 1, reason, [node_b_id], 100000); let commitment_tx = { @@ -3880,11 +3885,14 @@ pub fn test_claim_sizeable_push_msat() { let node_a_id = nodes[0].node.get_our_node_id(); let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000); - let err = "Channel force-closed".to_string(); - nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &node_a_id, err).unwrap(); + let message = "Channel force-closed".to_owned(); + nodes[1] + .node + .force_close_broadcasting_latest_txn(&chan.2, &node_a_id, message.clone()) + .unwrap(); check_closed_broadcast!(nodes[1], true); check_added_monitors(&nodes[1], 1); - let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); @@ -3914,13 +3922,16 @@ pub fn test_claim_on_remote_sizeable_push_msat() { let node_a_id = nodes[0].node.get_our_node_id(); let node_b_id = nodes[1].node.get_our_node_id(); - let err = "Channel force-closed".to_string(); + let message = "Channel force-closed".to_owned(); let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000); - nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &node_b_id, err).unwrap(); + nodes[0] + .node + .force_close_broadcasting_latest_txn(&chan.2, &node_b_id, message.clone()) + .unwrap(); check_closed_broadcast!(nodes[0], true); check_added_monitors(&nodes[0], 1); - let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); @@ -4382,7 +4393,7 @@ pub fn test_onchain_to_onchain_claim() { match nodes_2_event { MessageSendEvent::HandleError { - action: ErrorAction::DisconnectPeer { .. }, + action: ErrorAction::SendErrorMessage { .. }, node_id: _, } => {}, _ => panic!("Unexpected event"), @@ -7313,12 +7324,12 @@ pub fn test_channel_conf_timeout() { assert_eq!(close_ev.len(), 1); match close_ev[0] { MessageSendEvent::HandleError { - action: ErrorAction::DisconnectPeer { ref msg }, + action: ErrorAction::SendErrorMessage { ref msg }, ref node_id, } => { assert_eq!(*node_id, node_a_id); assert_eq!( - msg.as_ref().unwrap().data, + msg.data, "Channel closed because funding transaction failed to confirm within 2016 blocks" ); }, @@ -8522,15 +8533,15 @@ fn do_test_onchain_htlc_settlement_after_close( force_closing_node = 1; counterparty_node = 0; } - let err = "Channel force-closed".to_string(); + let message = "Channel force-closed".to_owned(); let counterparty_node_id = nodes[counterparty_node].node.get_our_node_id(); nodes[force_closing_node] .node - .force_close_broadcasting_latest_txn(&chan_ab.2, &counterparty_node_id, err) + .force_close_broadcasting_latest_txn(&chan_ab.2, &counterparty_node_id, message.clone()) .unwrap(); check_closed_broadcast!(nodes[force_closing_node], true); check_added_monitors(&nodes[force_closing_node], 1); - let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event!(nodes[force_closing_node], 1, reason, [counterparty_node_id], 100000); if go_onchain_before_fulfill { @@ -9258,9 +9269,9 @@ pub fn test_invalid_funding_tx() { assert_eq!(events_2.len(), 1); if let MessageSendEvent::HandleError { node_id, action } = &events_2[0] { assert_eq!(*node_id, node_a_id); - if let msgs::ErrorAction::DisconnectPeer { msg } = action { + if let msgs::ErrorAction::SendErrorMessage { msg } = action { assert_eq!( - msg.as_ref().unwrap().data, + msg.data, "Channel closed because of an exception: ".to_owned() + expected_err ); } else { @@ -9397,11 +9408,14 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t nodes[1].node.peer_disconnected(node_c_id); nodes[2].node.peer_disconnected(node_b_id); - let err = "Channel force-closed".to_string(); - nodes[1].node.force_close_broadcasting_latest_txn(&channel_id, &node_c_id, err).unwrap(); + let message = "Channel force-closed".to_owned(); + nodes[1] + .node + .force_close_broadcasting_latest_txn(&channel_id, &node_c_id, message.clone()) + .unwrap(); check_closed_broadcast!(nodes[1], true); - let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event!(nodes[1], 1, reason, [node_c_id], 100000); check_added_monitors(&nodes[1], 1); let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); @@ -10674,11 +10688,11 @@ pub fn test_non_final_funding_tx() { }, _ => panic!(), } - let err = "Error in transaction funding: Misuse error: Funding transaction absolute timelock is non-final".to_owned(); - let reason = ClosureReason::ProcessingError { err }; + let err = "Error in transaction funding: Misuse error: Funding transaction absolute timelock is non-final"; + let reason = ClosureReason::ProcessingError { err: err.to_owned() }; let event = ExpectedCloseEvent::from_id_reason(temp_channel_id, false, reason); check_closed_events(&nodes[0], &[event]); - assert_eq!(get_err_msg(&nodes[0], &node_b_id).data, "Failed to fund channel"); + assert_eq!(get_err_msg(&nodes[0], &node_b_id).data, err); } #[xtest(feature = "_externalize_tests")] @@ -11003,7 +11017,7 @@ pub fn test_remove_expired_outbound_unfunded_channels() { }, _ => panic!("Unexpected event"), } - let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }; + let reason = ClosureReason::FundingTimedOut; check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 100000); } @@ -11067,7 +11081,7 @@ pub fn test_remove_expired_inbound_unfunded_channels() { }, _ => panic!("Unexpected event"), } - let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }; + let reason = ClosureReason::FundingTimedOut; check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 100000); } @@ -11106,7 +11120,7 @@ pub fn test_channel_close_when_not_timely_accepted() { // Since we disconnected from peer and did not connect back within time, // we should have forced-closed the channel by now. - let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }; + let reason = ClosureReason::FundingTimedOut; check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); assert_eq!(nodes[0].node.list_channels().len(), 0); @@ -11404,14 +11418,10 @@ pub fn test_close_in_funding_batch() { _ => panic!("Unexpected message."), } - // We broadcast the commitment transaction as part of the force-close. - { - let broadcasted_txs = nodes[0].tx_broadcaster.txn_broadcast(); - assert_eq!(broadcasted_txs.len(), 1); - assert!(broadcasted_txs[0].compute_txid() != tx.compute_txid()); - assert_eq!(broadcasted_txs[0].input.len(), 1); - assert_eq!(broadcasted_txs[0].input[0].previous_output.txid, tx.compute_txid()); - } + // Because the funding was never broadcasted, we should never bother to broadcast the + // commitment transactions either. + let broadcasted_txs = nodes[0].tx_broadcaster.txn_broadcast(); + assert_eq!(broadcasted_txs.len(), 0); // All channels in the batch should close immediately. check_closed_events( @@ -11510,14 +11520,10 @@ pub fn test_batch_funding_close_after_funding_signed() { _ => panic!("Unexpected message."), } - // We broadcast the commitment transaction as part of the force-close. - { - let broadcasted_txs = nodes[0].tx_broadcaster.txn_broadcast(); - assert_eq!(broadcasted_txs.len(), 1); - assert!(broadcasted_txs[0].compute_txid() != tx.compute_txid()); - assert_eq!(broadcasted_txs[0].input.len(), 1); - assert_eq!(broadcasted_txs[0].input[0].previous_output.txid, tx.compute_txid()); - } + // Because the funding was never broadcasted, we should never bother to broadcast the + // commitment transactions either. + let broadcasted_txs = nodes[0].tx_broadcaster.txn_broadcast(); + assert_eq!(broadcasted_txs.len(), 0); // All channels in the batch should close immediately. check_closed_events( @@ -11544,7 +11550,8 @@ pub fn test_batch_funding_close_after_funding_signed() { assert!(nodes[0].node.list_channels().is_empty()); } -fn do_test_funding_and_commitment_tx_confirm_same_block(confirm_remote_commitment: bool) { +#[xtest(feature = "_externalize_tests")] +pub fn test_funding_and_commitment_tx_confirm_same_block() { // Tests that a node will forget the channel (when it only requires 1 confirmation) if the // funding and commitment transaction confirm in the same block. let chanmon_cfgs = create_chanmon_cfgs(2); @@ -11558,6 +11565,9 @@ fn do_test_funding_and_commitment_tx_confirm_same_block(confirm_remote_commitmen ); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0); let chan_id = ChannelId::v1_from_funding_outpoint(chain::transaction::OutPoint { txid: funding_tx.compute_txid(), @@ -11567,52 +11577,30 @@ fn do_test_funding_and_commitment_tx_confirm_same_block(confirm_remote_commitmen assert_eq!(nodes[0].node.list_channels().len(), 1); assert_eq!(nodes[1].node.list_channels().len(), 1); - let (closing_node, other_node) = - if confirm_remote_commitment { (&nodes[1], &nodes[0]) } else { (&nodes[0], &nodes[1]) }; - let closing_node_id = closing_node.node.get_our_node_id(); - let other_node_id = other_node.node.get_our_node_id(); - - let err = "Channel force-closed".to_string(); - closing_node.node.force_close_broadcasting_latest_txn(&chan_id, &other_node_id, err).unwrap(); - let mut msg_events = closing_node.node.get_and_clear_pending_msg_events(); - assert_eq!(msg_events.len(), 1); - match msg_events.pop().unwrap() { - MessageSendEvent::HandleError { - action: msgs::ErrorAction::SendErrorMessage { .. }, - .. - } => {}, - _ => panic!("Unexpected event"), - } - check_added_monitors(closing_node, 1); - let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; - check_closed_event(closing_node, 1, reason, false, &[other_node_id], 1_000_000); - let commitment_tx = { - let mut txn = closing_node.tx_broadcaster.txn_broadcast(); + let mon = get_monitor!(nodes[0], chan_id); + let mut txn = mon.unsafe_get_latest_holder_commitment_txn(&nodes[0].logger); assert_eq!(txn.len(), 1); - let commitment_tx = txn.pop().unwrap(); - check_spends!(commitment_tx, funding_tx); - commitment_tx + txn.pop().unwrap() }; mine_transactions(&nodes[0], &[&funding_tx, &commitment_tx]); mine_transactions(&nodes[1], &[&funding_tx, &commitment_tx]); - check_closed_broadcast(other_node, 1, true); - check_added_monitors(other_node, 1); + check_closed_broadcast(&nodes[0], 1, true); + check_added_monitors(&nodes[0], 1); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 1_000_000); + + check_closed_broadcast(&nodes[1], 1, true); + check_added_monitors(&nodes[1], 1); let reason = ClosureReason::CommitmentTxConfirmed; - check_closed_event(other_node, 1, reason, false, &[closing_node_id], 1_000_000); + check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 1_000_000); assert!(nodes[0].node.list_channels().is_empty()); assert!(nodes[1].node.list_channels().is_empty()); } -#[xtest(feature = "_externalize_tests")] -pub fn test_funding_and_commitment_tx_confirm_same_block() { - do_test_funding_and_commitment_tx_confirm_same_block(false); - do_test_funding_and_commitment_tx_confirm_same_block(true); -} - #[xtest(feature = "_externalize_tests")] pub fn test_accept_inbound_channel_errors_queued() { // For manually accepted inbound channels, tests that a close error is correctly handled diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index 0a89899f118..5e97808537c 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -182,10 +182,12 @@ fn archive_fully_resolved_monitors() { let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 10_000_000); - nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), "".to_owned()).unwrap(); + let message = "Channel force-closed".to_owned(); + nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), message.clone()).unwrap(); check_added_monitors!(nodes[0], 1); check_closed_broadcast!(nodes[0], true); - check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 1_000_000); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; + check_closed_event!(nodes[0], 1, reason, [nodes[1].node.get_our_node_id()], 1_000_000); let commitment_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(commitment_tx.len(), 1); @@ -926,12 +928,13 @@ fn do_test_balances_on_local_commitment_htlcs(anchors: bool) { // First confirm the commitment transaction on nodes[0], which should leave us with three // claimable balances. - let error_message = "Channel force-closed"; + let message = "Channel force-closed".to_owned(); let node_a_commitment_claimable = nodes[0].best_block_info().1 + BREAKDOWN_TIMEOUT as u32; - nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap(); + nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), message.clone()).unwrap(); check_added_monitors!(nodes[0], 1); check_closed_broadcast!(nodes[0], true); - check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 1000000); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; + check_closed_event!(nodes[0], 1, reason, [nodes[1].node.get_our_node_id()], 1000000); let commitment_tx = { let mut txn = nodes[0].tx_broadcaster.unique_txn_broadcast(); assert_eq!(txn.len(), 1); @@ -2334,11 +2337,12 @@ fn do_test_restored_packages_retry(check_old_monitor_retries_after_upgrade: bool // ensures that the HTLC timeout package is held until we reach its expiration height. let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 50_000_000); route_payment(&nodes[0], &[&nodes[1]], 10_000_000); - let error_message = "Channel force-closed"; - nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap(); + let message = "Channel force-closed".to_owned(); + nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), message.clone()).unwrap(); check_added_monitors(&nodes[0], 1); check_closed_broadcast(&nodes[0], 1, true); - check_closed_event!(&nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, false, + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; + check_closed_event!(&nodes[0], 1, reason, false, [nodes[1].node.get_our_node_id()], 100000); let commitment_tx = { @@ -3051,11 +3055,15 @@ fn do_test_anchors_monitor_fixes_counterparty_payment_script_on_reload(confirm_c // Confirm the counterparty's commitment and reload the monitor (either before or after) such // that we arrive at the correct `counterparty_payment_script` after the reload. - let error_message = "Channel force-closed"; - nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap(); + let message = "Channel force-closed".to_owned(); + nodes[0] + .node + .force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), message.clone()) + .unwrap(); check_added_monitors(&nodes[0], 1); check_closed_broadcast(&nodes[0], 1, true); - check_closed_event!(&nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, false, + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; + check_closed_event!(&nodes[0], 1, reason, false, [nodes[1].node.get_our_node_id()], 100000); let commitment_tx = { @@ -3273,8 +3281,9 @@ fn test_update_replay_panics() { // Create some updates to apply let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); - nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id(), "".to_owned()).unwrap(); - let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + let message = "Channel force-closed".to_owned(); + nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id(), message.clone()).unwrap(); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[1], 1, reason, false, &[nodes[0].node.get_our_node_id()], 100_000); check_closed_broadcast(&nodes[1], 1, true); check_added_monitors(&nodes[1], 1); diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 9fde71ad72e..fc545424265 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -1199,15 +1199,18 @@ fn do_test_dup_htlc_onchain_doesnt_fail_on_reload( let node_b_id = nodes[1].node.get_our_node_id(); let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1); - let error_message = "Channel force-closed".to_string(); + let message = "Channel force-closed".to_owned(); // Route a payment, but force-close the channel before the HTLC fulfill message arrives at // nodes[0]. let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 10_000_000); - nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &node_b_id, error_message).unwrap(); + nodes[0] + .node + .force_close_broadcasting_latest_txn(&chan_id, &node_b_id, message.clone()) + .unwrap(); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); - let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); nodes[0].node.peer_disconnected(node_b_id); @@ -4199,9 +4202,13 @@ fn do_claim_from_closed_chan(fail_payment: bool) { let reason = PaymentFailureReason::RecipientRejected; pass_failed_payment_back(&nodes[0], &[path_a, path_b], false, hash, reason); } else { - let err = "Channel force-closed".to_string(); - nodes[1].node.force_close_broadcasting_latest_txn(&chan_bd, &node_d_id, err).unwrap(); - let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + let message = "Channel force-closed".to_owned(); + nodes[1] + .node + .force_close_broadcasting_latest_txn(&chan_bd, &node_d_id, message.clone()) + .unwrap(); + let reason = + ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event!(&nodes[1], 1, reason, false, [node_d_id], 1000000); check_closed_broadcast(&nodes[1], 1, true); let bs_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); diff --git a/lightning/src/ln/priv_short_conf_tests.rs b/lightning/src/ln/priv_short_conf_tests.rs index 623774acb5e..484643473ac 100644 --- a/lightning/src/ln/priv_short_conf_tests.rs +++ b/lightning/src/ln/priv_short_conf_tests.rs @@ -999,7 +999,7 @@ fn test_0conf_close_no_early_chan_update() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(chan_config.clone())]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_b_id = nodes[1].node.get_our_node_id(); - let error_message = "Channel force-closed"; + let message = "Channel force-closed".to_owned(); // This is the default but we force it on anyway chan_config.channel_handshake_config.announce_for_forwarding = true; @@ -1008,9 +1008,9 @@ fn test_0conf_close_no_early_chan_update() { // We can use the channel immediately, but won't generate a channel_update until we get confs send_payment(&nodes[0], &[&nodes[1]], 100_000); - nodes[0].node.force_close_all_channels_broadcasting_latest_txn(error_message.to_string()); + nodes[0].node.force_close_all_channels_broadcasting_latest_txn(message.clone()); check_added_monitors!(nodes[0], 1); - let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event!(&nodes[0], 1, reason, [node_b_id], 100000); let _ = get_err_msg(&nodes[0], &node_b_id); } diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index 17028b36ae5..ff7aee8b3b1 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -640,15 +640,20 @@ fn do_test_data_loss_protect(reconnect_panicing: bool, substantially_old: bool, std::mem::forget(nodes); } } else { - let error_message = "Channel force-closed"; + let message = "Channel force-closed".to_owned(); assert!(!not_stale, "We only care about the stale case when not testing panicking"); - nodes[0].node.force_close_without_broadcasting_txn(&chan.2, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap(); + nodes[0] + .node + .force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id(), message.clone()) + .unwrap(); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }, [nodes[1].node.get_our_node_id()], 1000000); + let reason = + ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; + check_closed_event!(nodes[0], 1, reason, [nodes[1].node.get_our_node_id()], 1000000); { - let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); - assert_eq!(node_txn.len(), 0); + let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); + assert_eq!(node_txn.len(), 1); } for msg in nodes[0].node.get_and_clear_pending_msg_events() { @@ -1066,14 +1071,18 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht assert!(nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty()); let _ = nodes[2].node.get_and_clear_pending_msg_events(); - let error_message = "Channel force-closed"; + let message = "Channel force-closed".to_owned(); - nodes[2].node.force_close_broadcasting_latest_txn(&chan_id_2, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap(); + nodes[2] + .node + .force_close_broadcasting_latest_txn(&chan_id_2, &nodes[1].node.get_our_node_id(), message.clone()) + .unwrap(); let cs_commitment_tx = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(cs_commitment_tx.len(), if claim_htlc { 2 } else { 1 }); check_added_monitors!(nodes[2], 1); - check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; + check_closed_event!(nodes[2], 1, reason, [nodes[1].node.get_our_node_id()], 100000); check_closed_broadcast!(nodes[2], true); let chan_0_monitor_serialized = get_monitor!(nodes[1], chan_id_1).encode(); diff --git a/lightning/src/ln/reorg_tests.rs b/lightning/src/ln/reorg_tests.rs index 0ae3048215e..da6f25c4d8c 100644 --- a/lightning/src/ln/reorg_tests.rs +++ b/lightning/src/ln/reorg_tests.rs @@ -653,19 +653,30 @@ fn test_htlc_preimage_claim_holder_commitment_after_counterparty_commitment_reor // Route an HTLC which we will claim onchain with the preimage. let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); - let error_message = "Channel force-closed"; + let message = "Channel force-closed".to_owned(); // Force close with the latest counterparty commitment, confirm it, and reorg it with the latest // holder commitment. - nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap(); + nodes[0] + .node + .force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), message.clone()) + .unwrap(); check_closed_broadcast(&nodes[0], 1, true); check_added_monitors(&nodes[0], 1); - check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, false, &[nodes[1].node.get_our_node_id()], 100000); + let reason = ClosureReason::HolderForceClosed { + broadcasted_latest_txn: Some(true), + message: message.clone(), + }; + check_closed_event(&nodes[0], 1, reason, false, &[nodes[1].node.get_our_node_id()], 100000); - nodes[1].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap(); + nodes[1] + .node + .force_close_broadcasting_latest_txn(&chan_id, &nodes[0].node.get_our_node_id(), message.clone()) + .unwrap(); check_closed_broadcast(&nodes[1], 1, true); check_added_monitors(&nodes[1], 1); - check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, false, &[nodes[0].node.get_our_node_id()], 100000); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; + check_closed_event(&nodes[1], 1, reason, false, &[nodes[0].node.get_our_node_id()], 100000); let mut txn = nodes[0].tx_broadcaster.txn_broadcast(); assert_eq!(txn.len(), 1); @@ -737,13 +748,14 @@ fn test_htlc_preimage_claim_prev_counterparty_commitment_after_current_counterpa // commitment is still valid (unrevoked). nodes[1].node().handle_update_fee(nodes[0].node.get_our_node_id(), &update_fee); let _last_revoke_and_ack = commitment_signed_dance!(nodes[1], nodes[0], commit_sig, false, true, false, true); - let error_message = "Channel force-closed"; + let message = "Channel force-closed".to_owned(); // Force close with the latest commitment, confirm it, and reorg it with the previous commitment. - nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap(); + nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), message.clone()).unwrap(); check_closed_broadcast(&nodes[0], 1, true); check_added_monitors(&nodes[0], 1); - check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, false, &[nodes[1].node.get_our_node_id()], 100000); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; + check_closed_event(&nodes[0], 1, reason, false, &[nodes[1].node.get_our_node_id()], 100000); let mut txn = nodes[0].tx_broadcaster.txn_broadcast(); assert_eq!(txn.len(), 1); @@ -803,7 +815,6 @@ fn do_test_retries_own_commitment_broadcast_after_reorg(anchors: bool, revoked_c // Route a payment so we have an HTLC to claim as well. let _ = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); - let error_message = "Channel force-closed"; if revoked_counterparty_commitment { // Trigger a fee update such that we advance the state. We will have B broadcast its state @@ -846,10 +857,15 @@ fn do_test_retries_own_commitment_broadcast_after_reorg(anchors: bool, revoked_c }; // B will also broadcast its own commitment. - nodes[1].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap(); + let message = "Channel force-closed".to_owned(); + nodes[1] + .node + .force_close_broadcasting_latest_txn(&chan_id, &nodes[0].node.get_our_node_id(), message.clone()) + .unwrap(); check_closed_broadcast(&nodes[1], 1, true); check_added_monitors(&nodes[1], 1); - check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, false, &[nodes[0].node.get_our_node_id()], 100_000); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; + check_closed_event(&nodes[1], 1, reason, false, &[nodes[0].node.get_our_node_id()], 100_000); let commitment_b = { let mut txn = nodes[1].tx_broadcaster.txn_broadcast(); diff --git a/lightning/src/ln/shutdown_tests.rs b/lightning/src/ln/shutdown_tests.rs index 9826b8a39cd..8000392fe7d 100644 --- a/lightning/src/ln/shutdown_tests.rs +++ b/lightning/src/ln/shutdown_tests.rs @@ -337,7 +337,7 @@ fn close_on_unfunded_channel() { let _open_chan = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); nodes[0].node.close_channel(&chan_id, &node_b_id).unwrap(); - let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }; + let reason = ClosureReason::LocallyCoopClosedUnfundedChannel; check_closed_event!(nodes[0], 1, reason, [node_b_id], 1_000_000); } @@ -351,14 +351,14 @@ fn expect_channel_shutdown_state_with_force_closure() { let node_a_id = nodes[0].node.get_our_node_id(); let node_b_id = nodes[1].node.get_our_node_id(); let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); - let error_message = "Channel force-closed"; + let message = "Channel force-closed".to_owned(); expect_channel_shutdown_state!(nodes[0], chan_1.2, ChannelShutdownState::NotShuttingDown); expect_channel_shutdown_state!(nodes[1], chan_1.2, ChannelShutdownState::NotShuttingDown); nodes[1] .node - .force_close_broadcasting_latest_txn(&chan_1.2, &node_a_id, error_message.to_string()) + .force_close_broadcasting_latest_txn(&chan_1.2, &node_a_id, message.clone()) .unwrap(); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); @@ -376,7 +376,7 @@ fn expect_channel_shutdown_state_with_force_closure() { assert!(nodes[1].node.list_channels().is_empty()); check_closed_broadcast!(nodes[0], true); check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); - let reason_b = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + let reason_b = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event!(nodes[1], 1, reason_b, [node_a_id], 100000); } diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index 5b283aee890..97a7687cb7b 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -1279,10 +1279,14 @@ mod tests { let node_id_1 = nodes[1].node.get_our_node_id(); let chan_id = nodes[0].node.list_channels()[0].channel_id; - let err_msg = "Channel force-closed".to_string(); - nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &node_id_1, err_msg).unwrap(); + let message = "Channel force-closed".to_owned(); + nodes[0] + .node + .force_close_broadcasting_latest_txn(&chan_id, &node_id_1, message.clone()) + .unwrap(); - let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + let reason = + ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[0], 1, reason, false, &[node_id_1], 100000); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); @@ -1316,11 +1320,17 @@ mod tests { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes(&nodes, 0, 1); - let err_msg = "Channel force-closed".to_string(); + + let message = "Channel force-closed".to_owned(); let node_id_0 = nodes[0].node.get_our_node_id(); - nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &node_id_0, err_msg).unwrap(); - let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + nodes[1] + .node + .force_close_broadcasting_latest_txn(&chan.2, &node_id_0, message.clone()) + .unwrap(); + let reason = + ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[1], 1, reason, false, &[node_id_0], 100000); + { let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap(); let cmu_map = nodes[1].chain_monitor.monitor_updates.lock().unwrap();