From 1780ce4e5abe34df9e39cbbc7c069d810ed16ff5 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Sun, 5 Jan 2025 22:24:18 -0600 Subject: [PATCH 01/19] Hide ChannelPhase::Funded behind as_funded method Exposing ChannelPhase in ChannelManager has led to verbose match statements, which need to be modified each time a ChannelPhase is added. Making ChannelPhase an implementation detail of Channel would help avoid this. As a step in this direction, introduce ChannelPhase::as_funded and ChannelPhase::as_funded_mut for use in ChannelManager when a Channel (to be later renamed FundedChannel) is needed. --- lightning/src/ln/chanmon_update_fail_tests.rs | 4 +- lightning/src/ln/channel.rs | 16 ++ lightning/src/ln/channelmanager.rs | 219 ++++++++++-------- lightning/src/ln/functional_test_utils.rs | 4 +- lightning/src/ln/functional_tests.rs | 33 +-- 5 files changed, 150 insertions(+), 126 deletions(-) diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index fcc1f8f5a64..89a69321bc6 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -20,7 +20,7 @@ use crate::chain::transaction::OutPoint; use crate::chain::{ChannelMonitorUpdateStatus, Listen, Watch}; use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason, HTLCDestination}; use crate::ln::channelmanager::{RAACommitmentOrder, PaymentSendFailure, PaymentId, RecipientOnionFields}; -use crate::ln::channel::{AnnouncementSigsState, ChannelPhase}; +use crate::ln::channel::AnnouncementSigsState; use crate::ln::msgs; use crate::ln::types::ChannelId; use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler}; @@ -98,7 +98,7 @@ fn test_monitor_and_persister_update_fail() { { let mut node_0_per_peer_lock; let mut node_0_peer_state_lock; - if let ChannelPhase::Funded(ref mut channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan.2) { + if let Some(channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan.2).as_funded_mut() { if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) { // Check that the persister returns InProgress (and will never actually complete) // as the monitor update errors. diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index f86a42c6e02..3ba2d270b4b 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1152,6 +1152,22 @@ impl<'a, SP: Deref> ChannelPhase where ChannelPhase::UnfundedV2(ref mut chan) => &mut chan.context, } } + + pub fn as_funded(&self) -> Option<&Channel> { + if let ChannelPhase::Funded(channel) = self { + Some(channel) + } else { + None + } + } + + pub fn as_funded_mut(&mut self) -> Option<&mut Channel> { + if let ChannelPhase::Funded(channel) = self { + Some(channel) + } else { + None + } + } } /// Contains all state common to unfunded inbound/outbound channels. diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 21ab790bf9c..79b2f429831 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3229,7 +3229,10 @@ macro_rules! handle_monitor_update_completion { for (channel_id, counterparty_node_id, _) in removed_batch_state { if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { let mut peer_state = peer_state_mutex.lock().unwrap(); - if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(&channel_id) { + if let Some(chan) = peer_state.channel_by_id + .get_mut(&channel_id) + .and_then(ChannelPhase::as_funded_mut) + { batch_funding_tx = batch_funding_tx.or_else(|| chan.context.unbroadcasted_funding()); chan.set_batch_ready(); let mut pending_events = $self.pending_events.lock().unwrap(); @@ -3704,11 +3707,8 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; res.extend(peer_state.channel_by_id.iter() - .filter_map(|(chan_id, phase)| match phase { - // Only `Channels` in the `ChannelPhase::Funded` phase can be considered funded. - ChannelPhase::Funded(chan) => Some((chan_id, chan)), - _ => None, - }) + // Only `Channels` in the `ChannelPhase::Funded` phase can be considered funded. + .filter_map(|(chan_id, phase)| phase.as_funded().map(|chan| (chan_id, chan))) .filter(f) .map(|(_channel_id, channel)| { ChannelDetails::from_channel_context(&channel.context, best_block_height, @@ -3836,7 +3836,7 @@ where match peer_state.channel_by_id.entry(channel_id.clone()) { hash_map::Entry::Occupied(mut chan_phase_entry) => { - if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { + if let Some(chan) = chan_phase_entry.get_mut().as_funded_mut() { let funding_txo_opt = chan.context.get_funding_txo(); let their_features = &peer_state.latest_features; let (shutdown_msg, mut monitor_update_opt, htlcs) = @@ -3964,7 +3964,7 @@ where let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(channel_id) { hash_map::Entry::Occupied(mut chan_phase) => { - if let ChannelPhase::Funded(chan) = chan_phase.get_mut() { + if let Some(chan) = chan_phase.get_mut().as_funded_mut() { handle_new_monitor_update!(self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, chan); return; @@ -4102,15 +4102,14 @@ where let logger = WithContext::from(&self.logger, Some(*peer_node_id), Some(*channel_id), None); if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(channel_id.clone()) { log_error!(logger, "Force-closing channel {}", channel_id); - let (mut shutdown_res, update_opt) = match chan_phase_entry.get_mut() { - ChannelPhase::Funded(ref mut chan) => { + let (mut shutdown_res, update_opt) = match chan_phase_entry.get_mut().as_funded_mut() { + Some(chan) => { ( chan.context.force_shutdown(broadcast, closure_reason), self.get_channel_update_for_broadcast(&chan).ok(), ) }, - ChannelPhase::UnfundedOutboundV1(_) | ChannelPhase::UnfundedInboundV1(_) | - ChannelPhase::UnfundedV2(_) => { + None => { // Unfunded channel has no update (chan_phase_entry.get_mut().context_mut().force_shutdown(false, closure_reason), None) }, @@ -4272,9 +4271,7 @@ where } let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); let peer_state = &mut *peer_state_lock; - match peer_state.channel_by_id.get_mut(&channel_id).and_then( - |chan_phase| if let ChannelPhase::Funded(chan) = chan_phase { Some(chan) } else { None } - ) { + match peer_state.channel_by_id.get_mut(&channel_id).and_then(ChannelPhase::as_funded_mut) { None => None, Some(chan) => Some(callback(chan)), } @@ -4567,8 +4564,8 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(id) { - match chan_phase_entry.get_mut() { - ChannelPhase::Funded(chan) => { + match chan_phase_entry.get_mut().as_funded_mut() { + Some(chan) => { if !chan.context.is_live() { return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected".to_owned()}); } @@ -4599,7 +4596,7 @@ where None => {}, } }, - _ => return Err(APIError::ChannelUnavailable{err: "Channel to first hop is unfunded".to_owned()}), + None => return Err(APIError::ChannelUnavailable{err: "Channel to first hop is unfunded".to_owned()}), }; } else { // The channel was likely removed after we fetched the id from the @@ -5413,7 +5410,7 @@ where if !channel_phase.context_mut().update_config(&config) { continue; } - if let ChannelPhase::Funded(channel) = channel_phase { + if let Some(channel) = channel_phase.as_funded() { if let Ok(msg) = self.get_channel_update_for_broadcast(channel) { let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap(); pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate { msg }); @@ -5501,18 +5498,19 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.get(next_hop_channel_id) { - Some(ChannelPhase::Funded(chan)) => { + Some(chan) => if let Some(chan) = chan.as_funded() { if !chan.context.is_usable() { return Err(APIError::ChannelUnavailable { err: format!("Channel with id {} not fully established", next_hop_channel_id) }) } chan.context.get_short_channel_id().unwrap_or(chan.context.outbound_scid_alias()) + } else { + return Err(APIError::ChannelUnavailable { + err: format!("Channel with id {} for the passed counterparty node_id {} is still opening.", + next_hop_channel_id, next_node_id) + }) }, - Some(_) => return Err(APIError::ChannelUnavailable { - err: format!("Channel with id {} for the passed counterparty node_id {} is still opening.", - next_hop_channel_id, next_node_id) - }), None => { let error = format!("Channel with id {} not found for the passed counterparty node_id {}", next_hop_channel_id, next_node_id); @@ -5521,7 +5519,7 @@ where return Err(APIError::ChannelUnavailable { err: error }) - } + }, } }; @@ -5912,8 +5910,9 @@ where // applying non-strict forwarding. // The channel with the least amount of outbound liquidity will be used to maximize the // probability of being able to successfully forward a subsequent HTLC. - let maybe_optimal_channel = peer_state.channel_by_id.values_mut().filter_map(|phase| match phase { - ChannelPhase::Funded(chan) => { + let maybe_optimal_channel = peer_state.channel_by_id.values_mut() + .filter_map(ChannelPhase::as_funded_mut) + .filter_map(|chan| { let balances = chan.context.get_available_balances(&self.fee_estimator); if outgoing_amt_msat <= balances.next_outbound_htlc_limit_msat && outgoing_amt_msat >= balances.next_outbound_htlc_minimum_msat && @@ -5922,14 +5921,16 @@ where } else { None } - }, - _ => None, - }).min_by_key(|(_, balances)| balances.next_outbound_htlc_limit_msat).map(|(c, _)| c); + }) + .min_by_key(|(_, balances)| balances.next_outbound_htlc_limit_msat).map(|(c, _)| c); let optimal_channel = match maybe_optimal_channel { Some(chan) => chan, None => { // Fall back to the specified channel to return an appropriate error. - if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) { + if let Some(chan) = peer_state.channel_by_id + .get_mut(&forward_chan_id) + .and_then(ChannelPhase::as_funded_mut) + { chan } else { forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards)); @@ -5957,7 +5958,10 @@ where panic!("Stated return value requirements in send_htlc() were not met"); } - if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) { + if let Some(chan) = peer_state.channel_by_id + .get_mut(&forward_chan_id) + .and_then(ChannelPhase::as_funded_mut) + { let failure_code = 0x1000|7; let data = self.get_htlc_inbound_temp_fail_data(failure_code); failed_forwards.push((htlc_source, payment_hash, @@ -5975,7 +5979,10 @@ where panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward"); }, HTLCForwardInfo::FailHTLC { htlc_id, ref err_packet } => { - if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) { + if let Some(chan) = peer_state.channel_by_id + .get_mut(&forward_chan_id) + .and_then(ChannelPhase::as_funded_mut) + { let logger = WithChannelContext::from(&self.logger, &chan.context, None); log_trace!(logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id); Some((chan.queue_fail_htlc(htlc_id, err_packet.clone(), &&logger), htlc_id)) @@ -5985,7 +5992,10 @@ where } }, HTLCForwardInfo::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => { - if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) { + if let Some(chan) = peer_state.channel_by_id + .get_mut(&forward_chan_id) + .and_then(ChannelPhase::as_funded_mut) + { let logger = WithChannelContext::from(&self.logger, &chan.context, None); log_trace!(logger, "Failing malformed HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id); let res = chan.queue_fail_malformed_htlc( @@ -6001,7 +6011,10 @@ where if let Some((queue_fail_htlc_res, htlc_id)) = queue_fail_htlc_res { if let Err(e) = queue_fail_htlc_res { if let ChannelError::Ignore(msg) = e { - if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) { + if let Some(chan) = peer_state.channel_by_id + .get_mut(&forward_chan_id) + .and_then(ChannelPhase::as_funded_mut) + { let logger = WithChannelContext::from(&self.logger, &chan.context, None); log_trace!(logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg); } @@ -6309,7 +6322,10 @@ where if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; - if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(&channel_id) { + if let Some(chan) = peer_state.channel_by_id + .get_mut(&channel_id) + .and_then(ChannelPhase::as_funded_mut) + { handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan); } else { let update_actions = peer_state.monitor_update_blocked_actions @@ -6369,9 +6385,9 @@ where for (_cp_id, peer_state_mutex) in per_peer_state.iter() { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; - for (chan_id, chan) in peer_state.channel_by_id.iter_mut().filter_map( - |(chan_id, phase)| if let ChannelPhase::Funded(chan) = phase { Some((chan_id, chan)) } else { None } - ) { + for (chan_id, chan) in peer_state.channel_by_id.iter_mut() + .filter_map(|(chan_id, phase)| phase.as_funded_mut().map(|chan| (chan_id, chan))) + { let new_feerate = if chan.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { anchor_feerate } else { @@ -6748,7 +6764,7 @@ where let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(channel_id) { hash_map::Entry::Occupied(chan_phase_entry) => { - if let ChannelPhase::Funded(_chan) = chan_phase_entry.get() { + if let Some(_chan) = chan_phase_entry.get().as_funded() { let failure_code = 0x1000|7; let data = self.get_htlc_inbound_temp_fail_data(failure_code); (failure_code, data) @@ -7081,7 +7097,7 @@ where if let Some(peer_state_lock) = peer_state_opt.as_mut() { let peer_state = &mut **peer_state_lock; if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(chan_id) { - if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { + if let Some(chan) = chan_phase_entry.get_mut().as_funded_mut() { let logger = WithChannelContext::from(&self.logger, &chan.context, None); let fulfill_res = chan.get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, payment_info, &&logger); @@ -7583,7 +7599,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ return; } - if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(channel_id) { + if let Some(chan) = peer_state.channel_by_id + .get_mut(channel_id) + .and_then(ChannelPhase::as_funded_mut) + { if chan.is_awaiting_monitor_update() { log_trace!(logger, "Channel is open and awaiting update, resuming it"); handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan); @@ -8125,7 +8144,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }); } - if let ChannelPhase::Funded(chan) = e.insert(ChannelPhase::Funded(chan)) { + if let Some(chan) = e.insert(ChannelPhase::Funded(chan)).as_funded_mut() { handle_new_monitor_update!(self, persist_state, peer_state_lock, peer_state, per_peer_state, chan, INITIAL_MONITOR); } else { @@ -8172,8 +8191,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // We really should be able to insert here without doing a second // lookup, but sadly rust stdlib doesn't currently allow keeping // the original Entry around with the value removed. - let mut chan = peer_state.channel_by_id.entry(msg.channel_id).or_insert(ChannelPhase::Funded(chan)); - if let ChannelPhase::Funded(ref mut chan) = &mut chan { + let chan = peer_state.channel_by_id.entry(msg.channel_id).or_insert(ChannelPhase::Funded(chan)); + if let Some(chan) = chan.as_funded_mut() { handle_new_monitor_update!(self, persist_status, peer_state_lock, peer_state, per_peer_state, chan, INITIAL_MONITOR); } else { unreachable!(); } Ok(()) @@ -8366,9 +8385,9 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan_phase_entry) => { - let channel_phase = chan_phase_entry.get_mut(); + let channel_phase = chan_phase_entry.get_mut().as_funded_mut(); match channel_phase { - ChannelPhase::Funded(chan) => { + Some(chan) => { let logger = WithChannelContext::from(&self.logger, &chan.context, None); let (tx_signatures_opt, funding_tx_opt) = try_chan_phase_entry!(self, peer_state, chan.tx_signatures(msg, &&logger), chan_phase_entry); if let Some(tx_signatures) = tx_signatures_opt { @@ -8385,7 +8404,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } } }, - _ => try_chan_phase_entry!(self, peer_state, Err(ChannelError::Close( + None => try_chan_phase_entry!(self, peer_state, Err(ChannelError::Close( ( "Got an unexpected tx_signatures message".into(), ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }, @@ -8476,7 +8495,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan_phase_entry) => { - if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { + if let Some(chan) = chan_phase_entry.get_mut().as_funded_mut() { let logger = WithChannelContext::from(&self.logger, &chan.context, None); let announcement_sigs_opt = try_chan_phase_entry!(self, peer_state, chan.channel_ready(&msg, &self.node_signer, self.chain_hash, &self.default_configuration, &self.best_block.read().unwrap(), &&logger), chan_phase_entry); @@ -8531,9 +8550,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(msg.channel_id.clone()) { - let phase = chan_phase_entry.get_mut(); - match phase { - ChannelPhase::Funded(chan) => { + match chan_phase_entry.get_mut().as_funded_mut() { + Some(chan) => { if !chan.received_shutdown() { let logger = WithChannelContext::from(&self.logger, &chan.context, None); log_info!(logger, "Received a shutdown message from our counterparty for channel {}{}.", @@ -8561,12 +8579,11 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ peer_state_lock, peer_state, per_peer_state, chan); } }, - ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::UnfundedOutboundV1(_) | - ChannelPhase::UnfundedV2(_) => { - let context = phase.context_mut(); + None => { + let context = chan_phase_entry.get_mut().context_mut(); let logger = WithChannelContext::from(&self.logger, context, None); log_error!(logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id); - let mut close_res = phase.context_mut().force_shutdown(false, ClosureReason::CounterpartyCoopClosedUnfundedChannel); + let mut close_res = context.force_shutdown(false, ClosureReason::CounterpartyCoopClosedUnfundedChannel); remove_channel_phase!(self, peer_state, chan_phase_entry, close_res); finish_shutdown = Some(close_res); }, @@ -8599,7 +8616,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id.clone()) { hash_map::Entry::Occupied(mut chan_phase_entry) => { - if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { + if let Some(chan) = chan_phase_entry.get_mut().as_funded_mut() { let logger = WithChannelContext::from(&self.logger, &chan.context, None); let (closing_signed, tx, shutdown_result) = try_chan_phase_entry!(self, peer_state, chan.closing_signed(&self.fee_estimator, &msg, &&logger), chan_phase_entry); debug_assert_eq!(shutdown_result.is_some(), chan.is_shutdown()); @@ -8635,8 +8652,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ log_info!(WithContext::from(&self.logger, Some(*counterparty_node_id), channel_id, None), "Broadcasting {}", log_tx!(broadcast_tx)); self.tx_broadcaster.broadcast_transactions(&[&broadcast_tx]); } - if let Some(ChannelPhase::Funded(chan)) = chan_option { - if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { + if let Some(chan) = chan_option.as_ref().and_then(ChannelPhase::as_funded) { + if let Ok(update) = self.get_channel_update_for_broadcast(chan) { let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap(); pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update @@ -8674,7 +8691,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan_phase_entry) => { - if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { + if let Some(chan) = chan_phase_entry.get_mut().as_funded_mut() { let mut pending_forward_info = match decoded_hop_res { Ok((next_hop, shared_secret, next_packet_pk_opt)) => self.construct_pending_htlc_status( @@ -8746,7 +8763,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan_phase_entry) => { - if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { + if let Some(chan) = chan_phase_entry.get_mut().as_funded_mut() { let res = try_chan_phase_entry!(self, peer_state, chan.update_fulfill_htlc(&msg), chan_phase_entry); if let HTLCSource::PreviousHopData(prev_hop) = &res.0 { let logger = WithChannelContext::from(&self.logger, &chan.context, None); @@ -8795,7 +8812,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan_phase_entry) => { - if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { + if let Some(chan) = chan_phase_entry.get_mut().as_funded_mut() { try_chan_phase_entry!(self, peer_state, chan.update_fail_htlc(&msg, HTLCFailReason::from_msg(msg)), chan_phase_entry); } else { return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close( @@ -8824,7 +8841,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let chan_err = ChannelError::close("Got update_fail_malformed_htlc with BADONION not set".to_owned()); try_chan_phase_entry!(self, peer_state, Err(chan_err), chan_phase_entry); } - if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { + if let Some(chan) = chan_phase_entry.get_mut().as_funded_mut() { try_chan_phase_entry!(self, peer_state, chan.update_fail_malformed_htlc(&msg, HTLCFailReason::reason(msg.failure_code, msg.sha256_of_onion.to_vec())), chan_phase_entry); } else { return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close( @@ -8848,7 +8865,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan_phase_entry) => { - if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { + if let Some(chan) = chan_phase_entry.get_mut().as_funded_mut() { let logger = WithChannelContext::from(&self.logger, &chan.context, None); let funding_txo = chan.context.get_funding_txo(); @@ -9065,7 +9082,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan_phase_entry) => { - if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { + if let Some(chan) = chan_phase_entry.get_mut().as_funded_mut() { let logger = WithChannelContext::from(&self.logger, &chan.context, None); let funding_txo_opt = chan.context.get_funding_txo(); let mon_update_blocked = if let Some(funding_txo) = funding_txo_opt { @@ -9105,7 +9122,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan_phase_entry) => { - if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { + if let Some(chan) = chan_phase_entry.get_mut().as_funded_mut() { let logger = WithChannelContext::from(&self.logger, &chan.context, None); try_chan_phase_entry!(self, peer_state, chan.update_fee(&self.fee_estimator, &msg, &&logger), chan_phase_entry); } else { @@ -9129,7 +9146,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan_phase_entry) => { - if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { + if let Some(chan) = chan_phase_entry.get_mut().as_funded_mut() { if !chan.context.is_usable() { return Err(MsgHandleErrInternal::from_no_close(LightningError{err: "Got an announcement_signatures before we were ready for it".to_owned(), action: msgs::ErrorAction::IgnoreError})); } @@ -9171,7 +9188,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(chan_id) { hash_map::Entry::Occupied(mut chan_phase_entry) => { - if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { + if let Some(chan) = chan_phase_entry.get_mut().as_funded_mut() { if chan.context.get_counterparty_node_id() != *counterparty_node_id { if chan.context.should_announce() { // If the announcement is about a channel of ours which is public, some @@ -9222,7 +9239,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan_phase_entry) => { - if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { + if let Some(chan) = chan_phase_entry.get_mut().as_funded_mut() { // Currently, we expect all holding cell update_adds to be dropped on peer // disconnect, so Channel's reestablish will never hand us any holding cell // freed HTLCs to fail backwards. If in the future we no longer drop pending @@ -9353,8 +9370,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let mut shutdown_res = chan_phase_entry.get_mut().context_mut().force_shutdown(false, reason.clone()); let chan_phase = remove_channel_phase!(self, peer_state, chan_phase_entry, shutdown_res); failed_channels.push(shutdown_res); - if let ChannelPhase::Funded(chan) = chan_phase { - if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { + if let Some(chan) = chan_phase.as_funded() { + if let Ok(update) = self.get_channel_update_for_broadcast(chan) { let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap(); pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update @@ -9414,9 +9431,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ 'chan_loop: loop { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state: &mut PeerState<_> = &mut *peer_state_lock; - for (channel_id, chan) in peer_state.channel_by_id.iter_mut().filter_map( - |(chan_id, phase)| if let ChannelPhase::Funded(chan) = phase { Some((chan_id, chan)) } else { None } - ) { + for (channel_id, chan) in peer_state.channel_by_id + .iter_mut() + .filter_map(|(chan_id, phase)| phase.as_funded_mut().map(|chan| (chan_id, chan))) + { let counterparty_node_id = chan.context.get_counterparty_node_id(); let funding_txo = chan.context.get_funding_txo(); let (monitor_opt, holding_cell_failed_htlcs) = @@ -9591,8 +9609,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let peer_state = &mut *peer_state_lock; let pending_msg_events = &mut peer_state.pending_msg_events; peer_state.channel_by_id.retain(|channel_id, phase| { - match phase { - ChannelPhase::Funded(chan) => { + match phase.as_funded_mut() { + Some(chan) => { let logger = WithChannelContext::from(&self.logger, &chan.context, None); match chan.maybe_propose_closing_signed(&self.fee_estimator, &&logger) { Ok((msg_opt, tx_opt, shutdown_result_opt)) => { @@ -9630,7 +9648,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } } }, - _ => true, // Retain unfunded channels if present. + None => true, // Retain unfunded channels if present. } }); } @@ -10539,9 +10557,7 @@ where for (_cp_id, peer_state_mutex) in per_peer_state.iter() { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; - for chan in peer_state.channel_by_id.values().filter_map( - |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None } - ) { + for chan in peer_state.channel_by_id.values().filter_map(ChannelPhase::as_funded) { for (htlc_source, _) in chan.inflight_htlc_sources() { if let HTLCSource::OutboundRoute { path, .. } = htlc_source { inflight_htlcs.process_path(path, self.get_our_node_id()); @@ -10620,7 +10636,7 @@ where if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry( channel_id) { - if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { + if let Some(chan) = chan_phase_entry.get_mut().as_funded_mut() { debug_assert_eq!(chan.context.get_funding_txo().unwrap(), channel_funding_outpoint); if let Some((monitor_update, further_update_exists)) = chan.unblock_next_blocked_monitor_update() { log_debug!(logger, "Unlocking monitor updating for channel {} and updating monitor", @@ -10916,7 +10932,7 @@ where for (_cp_id, peer_state_mutex) in self.per_peer_state.read().unwrap().iter() { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; - for chan in peer_state.channel_by_id.values().filter_map(|phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }) { + for chan in peer_state.channel_by_id.values().filter_map(ChannelPhase::as_funded) { let txid_opt = chan.context.get_funding_txo(); let height_opt = chan.context.get_funding_tx_confirmation_height(); let hash_opt = chan.context.get_funding_tx_confirmed_in(); @@ -10973,11 +10989,10 @@ where let pending_msg_events = &mut peer_state.pending_msg_events; peer_state.channel_by_id.retain(|_, phase| { - match phase { + match phase.as_funded_mut() { // Retain unfunded channels. - ChannelPhase::UnfundedOutboundV1(_) | ChannelPhase::UnfundedInboundV1(_) | - ChannelPhase::UnfundedV2(_) => true, - ChannelPhase::Funded(channel) => { + None => true, + Some(channel) => { let res = f(channel); if let Ok((channel_ready_opt, mut timed_out_pending_htlcs, announcement_sigs)) = res { for (source, payment_hash) in timed_out_pending_htlcs.drain(..) { @@ -11716,7 +11731,10 @@ where let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id); if peer_state_mutex_opt.is_none() { return NotifyOption::SkipPersistNoEvents; } let mut peer_state = peer_state_mutex_opt.unwrap().lock().unwrap(); - if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get(&msg.channel_id) { + if let Some(chan) = peer_state.channel_by_id + .get(&msg.channel_id) + .and_then(ChannelPhase::as_funded) + { if let Some(msg) = chan.get_outbound_shutdown() { peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { node_id: counterparty_node_id, @@ -12745,9 +12763,11 @@ where serializable_peer_count += 1; } - number_of_funded_channels += peer_state.channel_by_id.iter().filter( - |(_, phase)| if let ChannelPhase::Funded(chan) = phase { chan.context.is_funding_broadcast() } else { false } - ).count(); + number_of_funded_channels += peer_state.channel_by_id + .values() + .filter_map(ChannelPhase::as_funded) + .filter(|chan| chan.context.is_funding_broadcast()) + .count(); } (number_of_funded_channels as u64).write(writer)?; @@ -12755,11 +12775,11 @@ where for (_, peer_state_mutex) in per_peer_state.iter() { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; - for channel in peer_state.channel_by_id.iter().filter_map( - |(_, phase)| if let ChannelPhase::Funded(channel) = phase { - if channel.context.is_funding_broadcast() { Some(channel) } else { None } - } else { None } - ) { + for channel in peer_state.channel_by_id + .values() + .filter_map(ChannelPhase::as_funded) + .filter(|channel| channel.context.is_funding_broadcast()) + { channel.write(writer)?; } } @@ -13565,7 +13585,7 @@ where let mut peer_state_lock = peer_state_mtx.lock().unwrap(); let peer_state = &mut *peer_state_lock; for phase in peer_state.channel_by_id.values() { - if let ChannelPhase::Funded(chan) = phase { + if let Some(chan) = phase.as_funded() { let logger = WithChannelContext::from(&args.logger, &chan.context, None); // Channels that were persisted have to be funded, otherwise they should have been @@ -14024,7 +14044,7 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; for (chan_id, phase) in peer_state.channel_by_id.iter_mut() { - if let ChannelPhase::Funded(chan) = phase { + if let Some(chan) = phase.as_funded_mut() { let logger = WithChannelContext::from(&args.logger, &chan.context, None); if chan.context.outbound_scid_alias() == 0 { let mut outbound_scid_alias; @@ -14274,7 +14294,10 @@ where let peer_state_mutex = per_peer_state.get(&peer_node_id).unwrap(); let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; - if let Some(ChannelPhase::Funded(channel)) = peer_state.channel_by_id.get_mut(&previous_channel_id) { + if let Some(channel) = peer_state.channel_by_id + .get_mut(&previous_channel_id) + .and_then(ChannelPhase::as_funded_mut) + { let logger = WithChannelContext::from(&channel_manager.logger, &channel.context, Some(payment_hash)); channel.claim_htlc_while_disconnected_dropping_mon_update_legacy( claimable_htlc.prev_hop.htlc_id, payment_preimage, &&logger diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index b4f172b4a27..726fed48b9f 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -3605,9 +3605,7 @@ macro_rules! get_channel_value_stat { ($node: expr, $counterparty_node: expr, $channel_id: expr) => {{ let peer_state_lock = $node.node.per_peer_state.read().unwrap(); let chan_lock = peer_state_lock.get(&$counterparty_node.node.get_our_node_id()).unwrap().lock().unwrap(); - let chan = chan_lock.channel_by_id.get(&$channel_id).map( - |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None } - ).flatten().unwrap(); + let chan = chan_lock.channel_by_id.get(&$channel_id).and_then(ChannelPhase::as_funded).unwrap(); chan.get_value_stat() }} } diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index ac43efe4499..53642bb2430 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -734,9 +734,7 @@ fn test_update_fee_that_funder_cannot_afford() { let (local_revocation_basepoint, local_htlc_basepoint, local_funding) = { let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); - let local_chan = chan_lock.channel_by_id.get(&chan.2).map( - |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None } - ).flatten().unwrap(); + let local_chan = chan_lock.channel_by_id.get(&chan.2).and_then(ChannelPhase::as_funded).unwrap(); let chan_signer = local_chan.get_signer(); let pubkeys = chan_signer.as_ref().pubkeys(); (pubkeys.revocation_basepoint, pubkeys.htlc_basepoint, @@ -745,9 +743,7 @@ fn test_update_fee_that_funder_cannot_afford() { let (remote_delayed_payment_basepoint, remote_htlc_basepoint, remote_point, remote_funding) = { let per_peer_state = nodes[1].node.per_peer_state.read().unwrap(); let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap(); - let remote_chan = chan_lock.channel_by_id.get(&chan.2).map( - |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None } - ).flatten().unwrap(); + let remote_chan = chan_lock.channel_by_id.get(&chan.2).and_then(ChannelPhase::as_funded).unwrap(); let chan_signer = remote_chan.get_signer(); let pubkeys = chan_signer.as_ref().pubkeys(); (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint, @@ -762,9 +758,7 @@ fn test_update_fee_that_funder_cannot_afford() { let res = { let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); let local_chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); - let local_chan = local_chan_lock.channel_by_id.get(&chan.2).map( - |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None } - ).flatten().unwrap(); + let local_chan = local_chan_lock.channel_by_id.get(&chan.2).and_then(ChannelPhase::as_funded).unwrap(); let local_chan_signer = local_chan.get_signer(); let mut htlcs: Vec<(HTLCOutputInCommitment, ())> = vec![]; let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data( @@ -1470,9 +1464,7 @@ fn test_fee_spike_violation_fails_htlc() { let (local_revocation_basepoint, local_htlc_basepoint, local_secret, next_local_point, local_funding) = { let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); - let local_chan = chan_lock.channel_by_id.get(&chan.2).map( - |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None } - ).flatten().unwrap(); + let local_chan = chan_lock.channel_by_id.get(&chan.2).and_then(ChannelPhase::as_funded).unwrap(); let chan_signer = local_chan.get_signer(); // Make the signer believe we validated another commitment, so we can release the secret chan_signer.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1; @@ -1486,9 +1478,7 @@ fn test_fee_spike_violation_fails_htlc() { let (remote_delayed_payment_basepoint, remote_htlc_basepoint, remote_point, remote_funding) = { let per_peer_state = nodes[1].node.per_peer_state.read().unwrap(); let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap(); - let remote_chan = chan_lock.channel_by_id.get(&chan.2).map( - |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None } - ).flatten().unwrap(); + let remote_chan = chan_lock.channel_by_id.get(&chan.2).and_then(ChannelPhase::as_funded).unwrap(); let chan_signer = remote_chan.get_signer(); let pubkeys = chan_signer.as_ref().pubkeys(); (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint, @@ -1517,9 +1507,7 @@ fn test_fee_spike_violation_fails_htlc() { let res = { let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); let local_chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); - let local_chan = local_chan_lock.channel_by_id.get(&chan.2).map( - |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None } - ).flatten().unwrap(); + let local_chan = local_chan_lock.channel_by_id.get(&chan.2).and_then(ChannelPhase::as_funded).unwrap(); let local_chan_signer = local_chan.get_signer(); let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data( commitment_number, @@ -7786,9 +7774,8 @@ fn test_counterparty_raa_skip_no_crash() { { let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); let mut guard = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); - let keys = guard.channel_by_id.get_mut(&channel_id).map( - |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None } - ).flatten().unwrap().get_signer(); + let keys = guard.channel_by_id.get(&channel_id).and_then(ChannelPhase::as_funded).unwrap() + .get_signer(); const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1; @@ -8523,7 +8510,7 @@ fn test_update_err_monitor_lockdown() { { let mut node_0_per_peer_lock; let mut node_0_peer_state_lock; - if let ChannelPhase::Funded(ref mut channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2) { + if let Some(channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2).as_funded_mut() { if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) { assert_eq!(watchtower.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::InProgress); assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed); @@ -8625,7 +8612,7 @@ fn test_concurrent_monitor_claim() { { let mut node_0_per_peer_lock; let mut node_0_peer_state_lock; - if let ChannelPhase::Funded(ref mut channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2) { + if let Some(channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2).as_funded_mut() { if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) { // Watchtower Alice should already have seen the block and reject the update assert_eq!(watchtower_alice.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::InProgress); From 401bdb84e3aac1638bce7ebb3c24431f0cf533d0 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Sun, 5 Jan 2025 22:24:51 -0600 Subject: [PATCH 02/19] Add ChannelPhase::is_funded for filtering channels. Exposing ChannelPhase in ChannelManager has led to verbose match statements, which need to be modified each time a ChannelPhase is added. Making ChannelPhase an implementation detail of Channel would help avoid this. As a step in this direction, introduce ChannelPhase::is_funded for use in ChannelManager when a Channel (to be later renamed FundedChannel) needs to be tested for. --- lightning/src/ln/channel.rs | 4 ++++ lightning/src/ln/channelmanager.rs | 8 +------- 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 3ba2d270b4b..12472d1a502 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1153,6 +1153,10 @@ impl<'a, SP: Deref> ChannelPhase where } } + pub fn is_funded(&self) -> bool { + matches!(self, ChannelPhase::Funded(_)) + } + pub fn as_funded(&self) -> Option<&Channel> { if let ChannelPhase::Funded(channel) = self { Some(channel) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 79b2f429831..37f6f134708 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1366,13 +1366,7 @@ impl PeerState where SP::Target: SignerProvider { return false; } } - !self.channel_by_id.iter().any(|(_, phase)| - match phase { - ChannelPhase::Funded(_) | ChannelPhase::UnfundedOutboundV1(_) => true, - ChannelPhase::UnfundedInboundV1(_) => false, - ChannelPhase::UnfundedV2(chan) => chan.context.is_outbound(), - } - ) + !self.channel_by_id.iter().any(|(_, channel)| channel.is_funded() || channel.context().is_outbound()) && self.monitor_update_blocked_actions.is_empty() && self.closed_channel_monitor_update_ids.is_empty() } From 6f119eb20c66ee5a76fa69eb34537b9bb65a1714 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Mon, 6 Jan 2025 17:26:42 -0600 Subject: [PATCH 03/19] Rewrite ChannelManager::unfunded_channel_count Exposing ChannelPhase in ChannelManager has led to verbose match statements, which need to be modified each time a ChannelPhase is added. Making ChannelPhase an implementation detail of Channel would help avoid this. As a step in this direction, rewrite ChannelManager's unfunded_channel_count method to use ChannelPhase::as_funded and a new ChannelPhase::as_unfunded_v2 method. --- lightning/src/ln/channel.rs | 8 +++++++ lightning/src/ln/channelmanager.rs | 34 ++++++++++++++---------------- 2 files changed, 24 insertions(+), 18 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 12472d1a502..c5ec6ce8c19 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1172,6 +1172,14 @@ impl<'a, SP: Deref> ChannelPhase where None } } + + pub fn as_unfunded_v2(&self) -> Option<&PendingV2Channel> { + if let ChannelPhase::UnfundedV2(channel) = self { + Some(channel) + } else { + None + } + } } /// Contains all state common to unfunded inbound/outbound channels. diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 37f6f134708..5026c820b43 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -7831,8 +7831,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ ) -> usize { let mut num_unfunded_channels = 0; for (_, phase) in peer.channel_by_id.iter() { - match phase { - ChannelPhase::Funded(chan) => { + match phase.as_funded() { + Some(chan) => { // This covers non-zero-conf inbound `Channel`s that we are currently monitoring, but those // which have not yet had any confirmations on-chain. if !chan.context.is_outbound() && chan.context.minimum_depth().unwrap_or(1) != 0 && @@ -7841,27 +7841,25 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ num_unfunded_channels += 1; } }, - ChannelPhase::UnfundedInboundV1(chan) => { - if chan.context.minimum_depth().unwrap_or(1) != 0 { - num_unfunded_channels += 1; - } - }, - ChannelPhase::UnfundedV2(chan) => { + None => { // Outbound channels don't contribute to the unfunded count in the DoS context. - if chan.context.is_outbound() { + if phase.context().is_outbound() { continue; } - // Only inbound V2 channels that are not 0conf and that we do not contribute to will be - // included in the unfunded count. - if chan.context.minimum_depth().unwrap_or(1) != 0 && - chan.dual_funding_context.our_funding_satoshis == 0 { - num_unfunded_channels += 1; + // 0conf channels are not considered unfunded. + if phase.context().minimum_depth().unwrap_or(1) == 0 { + continue; } - }, - ChannelPhase::UnfundedOutboundV1(_) => { - // Outbound channels don't contribute to the unfunded count in the DoS context. - continue; + + // Inbound V2 channels with contributed inputs are not considered unfunded. + if let Some(chan) = phase.as_unfunded_v2() { + if chan.dual_funding_context.our_funding_satoshis != 0 { + continue; + } + } + + num_unfunded_channels += 1; }, } } From 307e0485d2e1440b8bdbcb80ade31b679c79b785 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Mon, 6 Jan 2025 18:30:49 -0600 Subject: [PATCH 04/19] Handle tx_abort without using ChannelPhase Exposing ChannelPhase in ChannelManager has led to verbose match statements, which need to be modified each time a ChannelPhase is added. Making ChannelPhase an implementation detail of Channel would help avoid this. As a step in this direction, update ChannelManager::internal_tx_abort to use ChannelPhase::is_funded and a new ChannelPhase::as_unfunded_v2_mut method. --- lightning/src/ln/channel.rs | 8 ++++++++ lightning/src/ln/channelmanager.rs | 9 ++++----- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index c5ec6ce8c19..bdadcb75f1b 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1180,6 +1180,14 @@ impl<'a, SP: Deref> ChannelPhase where None } } + + pub fn as_unfunded_v2_mut(&mut self) -> Option<&mut PendingV2Channel> { + if let ChannelPhase::UnfundedV2(channel) = self { + Some(channel) + } else { + None + } + } } /// Contains all state common to unfunded inbound/outbound channels. diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 5026c820b43..8d39784ef97 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -8425,9 +8425,9 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan_phase_entry) => { let channel_phase = chan_phase_entry.get_mut(); - let tx_constructor = match channel_phase { - ChannelPhase::UnfundedV2(chan) => &mut chan.interactive_tx_constructor, - ChannelPhase::Funded(_) => { + let tx_constructor = match channel_phase.as_unfunded_v2_mut() { + Some(chan) => &mut chan.interactive_tx_constructor, + None => if channel_phase.is_funded() { // TODO(splicing)/TODO(RBF): We'll also be doing interactive tx construction // for a "ChannelPhase::Funded" when we want to bump the fee on an interactively // constructed funding tx or during splicing. For now we send an error as we would @@ -8437,8 +8437,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ splicing and RBF attempts of interactive funding transactions are not supported yet so \ we don't have any negotiation in progress".into(), )), chan_phase_entry) - } - ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::UnfundedOutboundV1(_) => { + } else { try_chan_phase_entry!(self, peer_state, Err(ChannelError::Warn( "Got an unexpected tx_abort message: This is an unfunded channel created with V1 channel \ establishment".into(), From cae893af64a5adc51242402ab6dee6c5f874dde2 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Tue, 7 Jan 2025 16:24:31 -0600 Subject: [PATCH 05/19] Rewrite ChannelManager::signer_unblocked Exposing ChannelPhase in ChannelManager has led to verbose match statements, which need to be modified each time a ChannelPhase is added. Making ChannelPhase an implementation detail of Channel would help avoid this. As a step in this direction, update ChannelManager::signer_unblocked to use ChannelPhase::as_funded and a new method on ChannelPhase dispatching to each variant's signer_maybe_unblocked method. --- lightning/src/ln/channel.rs | 48 ++++++++++++++++++++++++++++++ lightning/src/ln/channelmanager.rs | 24 ++++++--------- 2 files changed, 57 insertions(+), 15 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index bdadcb75f1b..6bcde8f6fe6 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -909,6 +909,9 @@ pub(super) struct MonitorRestoreUpdates { pub(super) struct SignerResumeUpdates { pub commitment_update: Option, pub revoke_and_ack: Option, + pub open_channel: Option, + pub accept_channel: Option, + pub funding_created: Option, pub funding_signed: Option, pub channel_ready: Option, pub order: RAACommitmentOrder, @@ -1188,6 +1191,48 @@ impl<'a, SP: Deref> ChannelPhase where None } } + + pub fn signer_maybe_unblocked( + &mut self, chain_hash: ChainHash, logger: &L, + ) -> Option where L::Target: Logger { + match self { + ChannelPhase::Funded(chan) => Some(chan.signer_maybe_unblocked(logger)), + ChannelPhase::UnfundedOutboundV1(chan) => { + let (open_channel, funding_created) = chan.signer_maybe_unblocked(chain_hash, logger); + Some(SignerResumeUpdates { + commitment_update: None, + revoke_and_ack: None, + open_channel, + accept_channel: None, + funding_created, + funding_signed: None, + channel_ready: None, + order: chan.context.resend_order.clone(), + closing_signed: None, + signed_closing_tx: None, + shutdown_result: None, + }) + }, + ChannelPhase::UnfundedInboundV1(chan) => { + let logger = WithChannelContext::from(logger, &chan.context, None); + let accept_channel = chan.signer_maybe_unblocked(&&logger); + Some(SignerResumeUpdates { + commitment_update: None, + revoke_and_ack: None, + open_channel: None, + accept_channel, + funding_created: None, + funding_signed: None, + channel_ready: None, + order: chan.context.resend_order.clone(), + closing_signed: None, + signed_closing_tx: None, + shutdown_result: None, + }) + }, + ChannelPhase::UnfundedV2(_) => None, + } + } } /// Contains all state common to unfunded inbound/outbound channels. @@ -6178,6 +6223,9 @@ impl Channel where SignerResumeUpdates { commitment_update, revoke_and_ack, + open_channel: None, + accept_channel: None, + funding_created: None, funding_signed, channel_ready, order: self.context.resend_order.clone(), diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 8d39784ef97..1a1a578a21d 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -9468,9 +9468,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // Returns whether we should remove this channel as it's just been closed. let unblock_chan = |phase: &mut ChannelPhase, pending_msg_events: &mut Vec| -> Option { let node_id = phase.context().get_counterparty_node_id(); - match phase { - ChannelPhase::Funded(chan) => { - let msgs = chan.signer_maybe_unblocked(&self.logger); + match (phase.signer_maybe_unblocked(self.chain_hash, &self.logger), phase.as_funded()) { + (Some(msgs), Some(chan)) => { let cu_msg = msgs.commitment_update.map(|updates| events::MessageSendEvent::UpdateHTLCs { node_id, updates, @@ -9521,34 +9520,29 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } } msgs.shutdown_result - } - ChannelPhase::UnfundedOutboundV1(chan) => { - let (open_channel, funding_created) = chan.signer_maybe_unblocked(self.chain_hash.clone(), &self.logger); - if let Some(msg) = open_channel { + }, + (Some(msgs), None) => { + if let Some(msg) = msgs.open_channel { pending_msg_events.push(events::MessageSendEvent::SendOpenChannel { node_id, msg, }); } - if let Some(msg) = funding_created { + if let Some(msg) = msgs.funding_created { pending_msg_events.push(events::MessageSendEvent::SendFundingCreated { node_id, msg, }); } - None - } - ChannelPhase::UnfundedInboundV1(chan) => { - let logger = WithChannelContext::from(&self.logger, &chan.context, None); - if let Some(msg) = chan.signer_maybe_unblocked(&&logger) { + if let Some(msg) = msgs.accept_channel { pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel { node_id, msg, }); } None - }, - ChannelPhase::UnfundedV2(_) => None, + } + (None, _) => None, } }; From a6c70eab0a3327631866cf26e4b32a1b6d30afcb Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Tue, 7 Jan 2025 16:51:40 -0600 Subject: [PATCH 06/19] Rewrite ChannelManager::peer_disconnected Exposing ChannelPhase in ChannelManager has led to verbose match statements, which need to be modified each time a ChannelPhase is added. Making ChannelPhase an implementation detail of Channel would help avoid this. As a step in this direction, update ChannelManager::peer_disconnected to use ChannelPhase::as_funded_mut and a new ChannelPhase::is_resumable method. --- lightning/src/ln/channel.rs | 9 +++++++++ lightning/src/ln/channelmanager.rs | 18 +++++++----------- 2 files changed, 16 insertions(+), 11 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 6bcde8f6fe6..dfe05e769c7 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1233,6 +1233,15 @@ impl<'a, SP: Deref> ChannelPhase where ChannelPhase::UnfundedV2(_) => None, } } + + pub fn is_resumable(&self) -> bool { + match self { + ChannelPhase::Funded(_) => false, + ChannelPhase::UnfundedOutboundV1(chan) => chan.is_resumable(), + ChannelPhase::UnfundedInboundV1(_) => false, + ChannelPhase::UnfundedV2(_) => false, + } + } } /// Contains all state common to unfunded inbound/outbound channels. diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 1a1a578a21d..9ad1ffd47f5 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -11483,14 +11483,13 @@ where let peer_state = &mut *peer_state_lock; let pending_msg_events = &mut peer_state.pending_msg_events; peer_state.channel_by_id.retain(|_, phase| { - let context = match phase { - ChannelPhase::Funded(chan) => { + match phase.as_funded_mut() { + Some(chan) => { let logger = WithChannelContext::from(&self.logger, &chan.context, None); if chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok() { // We only retain funded channels that are not shutdown. return true; } - &mut chan.context }, // If we get disconnected and haven't yet committed to a funding // transaction, we can replay the `open_channel` on reconnection, so don't @@ -11498,17 +11497,14 @@ where // the funding transaction we don't yet support replaying the funding // handshake (and bailing if the peer rejects it), so we force-close in // that case. - ChannelPhase::UnfundedOutboundV1(chan) if chan.is_resumable() => return true, - ChannelPhase::UnfundedOutboundV1(chan) => &mut chan.context, - // Unfunded inbound channels will always be removed. - ChannelPhase::UnfundedInboundV1(chan) => { - &mut chan.context - }, - ChannelPhase::UnfundedV2(chan) => { - &mut chan.context + None => { + if phase.is_resumable() { + return true; + } }, }; // Clean up for removal. + let context = phase.context_mut(); let mut close_res = context.force_shutdown(false, ClosureReason::DisconnectedPeer); locked_close_channel!(self, peer_state, &context, close_res); failed_channels.push(close_res); From fdbce7f75b36dea66aaa95059737c4f29ecefb23 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Tue, 7 Jan 2025 17:46:49 -0600 Subject: [PATCH 07/19] Rewrite ChannelManager::peer_connected Exposing ChannelPhase in ChannelManager has led to verbose match statements, which need to be modified each time a ChannelPhase is added. Making ChannelPhase an implementation detail of Channel would help avoid this. As a step in this direction, update ChannelManager::peer_connected to use ChannelPhase::as_funded_mut and a new ChannelPhase::maybe_get_open_channel method. --- lightning/src/ln/channel.rs | 39 +++++++++++++++++++++++++++++- lightning/src/ln/channelmanager.rs | 36 +++++++++------------------ 2 files changed, 50 insertions(+), 25 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index dfe05e769c7..8c40de80136 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -39,7 +39,7 @@ use crate::ln::msgs; use crate::ln::msgs::{ClosingSigned, ClosingSignedFeeRange, DecodeError}; use crate::ln::script::{self, ShutdownScript}; use crate::ln::channel_state::{ChannelShutdownState, CounterpartyForwardingInfo, InboundHTLCDetails, InboundHTLCStateDetails, OutboundHTLCDetails, OutboundHTLCStateDetails}; -use crate::ln::channelmanager::{self, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, PaymentClaimDetails, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT}; +use crate::ln::channelmanager::{self, OpenChannelMessage, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, PaymentClaimDetails, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT}; use crate::ln::chan_utils::{ CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, @@ -1242,6 +1242,43 @@ impl<'a, SP: Deref> ChannelPhase where ChannelPhase::UnfundedV2(_) => false, } } + + pub fn maybe_get_open_channel( + &mut self, chain_hash: ChainHash, logger: &L, + ) -> Option where L::Target: Logger { + match self { + ChannelPhase::Funded(_) => None, + ChannelPhase::UnfundedOutboundV1(chan) => { + let logger = WithChannelContext::from(logger, &chan.context, None); + chan.get_open_channel(chain_hash, &&logger) + .map(|msg| OpenChannelMessage::V1(msg)) + }, + ChannelPhase::UnfundedInboundV1(_) => { + // Since unfunded inbound channel maps are cleared upon disconnecting a peer, + // they are not persisted and won't be recovered after a crash. + // Therefore, they shouldn't exist at this point. + debug_assert!(false); + None + }, + #[cfg(dual_funding)] + ChannelPhase::UnfundedV2(chan) => { + if chan.context.is_outbound() { + Some(OpenChannelMessage::V2(chan.get_open_channel_v2(chain_hash))) + } else { + // Since unfunded inbound channel maps are cleared upon disconnecting a peer, + // they are not persisted and won't be recovered after a crash. + // Therefore, they shouldn't exist at this point. + debug_assert!(false); + None + } + }, + #[cfg(not(dual_funding))] + ChannelPhase::UnfundedV2(_) => { + debug_assert!(false); + None + }, + } + } } /// Contains all state common to unfunded inbound/outbound channels. diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 9ad1ffd47f5..e5fc560a5d3 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -11646,41 +11646,29 @@ where let pending_msg_events = &mut peer_state.pending_msg_events; for (_, phase) in peer_state.channel_by_id.iter_mut() { - match phase { - ChannelPhase::Funded(chan) => { + match phase.as_funded_mut() { + Some(chan) => { let logger = WithChannelContext::from(&self.logger, &chan.context, None); pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish { node_id: chan.context.get_counterparty_node_id(), msg: chan.get_channel_reestablish(&&logger), }); }, - ChannelPhase::UnfundedOutboundV1(chan) => { - let logger = WithChannelContext::from(&self.logger, &chan.context, None); - if let Some(msg) = chan.get_open_channel(self.chain_hash, &&logger) { + None => match phase.maybe_get_open_channel(self.chain_hash, &self.logger) { + Some(OpenChannelMessage::V1(msg)) => { pending_msg_events.push(events::MessageSendEvent::SendOpenChannel { - node_id: chan.context.get_counterparty_node_id(), + node_id: phase.context().get_counterparty_node_id(), msg, }); - } - }, - ChannelPhase::UnfundedV2(chan) => { - if chan.context.is_outbound() { + }, + #[cfg(dual_funding)] + Some(OpenChannelMessage::V2(msg)) => { pending_msg_events.push(events::MessageSendEvent::SendOpenChannelV2 { - node_id: chan.context.get_counterparty_node_id(), - msg: chan.get_open_channel_v2(self.chain_hash), + node_id: phase.context().get_counterparty_node_id(), + msg, }); - } else { - // Since unfunded inbound channel maps are cleared upon disconnecting a peer, - // they are not persisted and won't be recovered after a crash. - // Therefore, they shouldn't exist at this point. - debug_assert!(false); - } - }, - ChannelPhase::UnfundedInboundV1(_) => { - // Since unfunded inbound channel maps are cleared upon disconnecting a peer, - // they are not persisted and won't be recovered after a crash. - // Therefore, they shouldn't exist at this point. - debug_assert!(false); + }, + None => {}, }, } } From e24082c6abed1da02b01598e889dc331c25546be Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Tue, 7 Jan 2025 18:14:17 -0600 Subject: [PATCH 08/19] Rewrite ChannelManager::handle_error Exposing ChannelPhase in ChannelManager has led to verbose match statements, which need to be modified each time a ChannelPhase is added. Making ChannelPhase an implementation detail of Channel would help avoid this. As a step in this direction, update ChannelManager::handle_error to use a new ChannelPhase::maybe_handle_error_without_close. --- lightning/src/ln/channel.rs | 34 ++++++++++++++++++++++++++++++ lightning/src/ln/channelmanager.rs | 31 +++++++++++++-------------- 2 files changed, 49 insertions(+), 16 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 8c40de80136..ee9b6bb1a22 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1279,6 +1279,38 @@ impl<'a, SP: Deref> ChannelPhase where }, } } + + pub fn maybe_handle_error_without_close( + &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, + ) -> Result, ()> + where + F::Target: FeeEstimator, + L::Target: Logger, + { + match self { + ChannelPhase::Funded(_) => Ok(None), + ChannelPhase::UnfundedOutboundV1(chan) => { + let logger = WithChannelContext::from(logger, &chan.context, None); + chan.maybe_handle_error_without_close(chain_hash, fee_estimator, &&logger) + .map(|msg| Some(OpenChannelMessage::V1(msg))) + }, + ChannelPhase::UnfundedInboundV1(_) => Ok(None), + #[cfg(dual_funding)] + ChannelPhase::UnfundedV2(chan) => { + if chan.context.is_outbound() { + chan.maybe_handle_error_without_close(chain_hash, fee_estimator) + .map(|msg| Some(OpenChannelMessage::V2(msg))) + } else { + Ok(None) + } + }, + #[cfg(not(dual_funding))] + ChannelPhase::UnfundedV2(_) => { + debug_assert!(false); + Ok(None) + }, + } + } } /// Contains all state common to unfunded inbound/outbound channels. @@ -8979,6 +9011,7 @@ impl PendingV2Channel where SP::Target: SignerProvider { /// If we receive an error message, it may only be a rejection of the channel type we tried, /// not of our ability to open any channel at all. Thus, on error, we should first call this /// and see if we get a new `OpenChannelV2` message, otherwise the channel is failed. + #[cfg(dual_funding)] pub(crate) fn maybe_handle_error_without_close( &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator ) -> Result @@ -8989,6 +9022,7 @@ impl PendingV2Channel where SP::Target: SignerProvider { Ok(self.get_open_channel_v2(chain_hash)) } + #[cfg(dual_funding)] pub fn get_open_channel_v2(&self, chain_hash: ChainHash) -> msgs::OpenChannelV2 { if !self.context.is_outbound() { debug_assert!(false, "Tried to send open_channel2 for an inbound channel?"); diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index e5fc560a5d3..430ad885a3e 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -11760,28 +11760,27 @@ where let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.get_mut(&msg.channel_id) { - Some(ChannelPhase::UnfundedOutboundV1(ref mut chan)) => { - let logger = WithChannelContext::from(&self.logger, &chan.context, None); - if let Ok(msg) = chan.maybe_handle_error_without_close(self.chain_hash, &self.fee_estimator, &&logger) { + Some(chan) => match chan.maybe_handle_error_without_close( + self.chain_hash, &self.fee_estimator, &self.logger, + ) { + Ok(Some(OpenChannelMessage::V1(msg))) => { peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel { node_id: counterparty_node_id, msg, }); return; - } - }, - Some(ChannelPhase::UnfundedV2(ref mut chan)) => { - if chan.context.is_outbound() { - if let Ok(msg) = chan.maybe_handle_error_without_close(self.chain_hash, &self.fee_estimator) { - peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannelV2 { - node_id: counterparty_node_id, - msg, - }); - return; - } - } + }, + #[cfg(dual_funding)] + Ok(Some(OpenChannelMessage::V2(msg))) => { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannelV2 { + node_id: counterparty_node_id, + msg, + }); + return; + }, + Ok(None) | Err(()) => {}, }, - None | Some(ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::Funded(_)) => (), + None => {}, } } From 537ed88ae9cd0fc5aab0063c3a38e7ac03c85f24 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Wed, 8 Jan 2025 09:18:05 -0600 Subject: [PATCH 09/19] Rewrite ChannelManager::timer_tick_occurred Exposing ChannelPhase in ChannelManager has led to verbose match statements, which need to be modified each time a ChannelPhase is added. Making ChannelPhase an implementation detail of Channel would help avoid this. As a step in this direction, update ChannelManager::timer_tick_occurred to use ChannelPhase::as_funded_mut and a new ChannelPhase::unfunded_context_mut method. --- lightning/src/ln/channel.rs | 9 ++++ lightning/src/ln/channelmanager.rs | 66 +++++++++++++----------------- 2 files changed, 37 insertions(+), 38 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index ee9b6bb1a22..aa7ec953093 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1156,6 +1156,15 @@ impl<'a, SP: Deref> ChannelPhase where } } + pub fn unfunded_context_mut(&mut self) -> Option<&mut UnfundedChannelContext> { + match self { + ChannelPhase::Funded(_) => { debug_assert!(false); None }, + ChannelPhase::UnfundedOutboundV1(chan) => Some(&mut chan.unfunded_context), + ChannelPhase::UnfundedInboundV1(chan) => Some(&mut chan.unfunded_context), + ChannelPhase::UnfundedV2(chan) => Some(&mut chan.unfunded_context), + } + } + pub fn is_funded(&self) -> bool { matches!(self, ChannelPhase::Funded(_)) } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 430ad885a3e..61b220e5bae 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -6429,34 +6429,6 @@ where let mut pending_peers_awaiting_removal = Vec::new(); let mut shutdown_channels = Vec::new(); - macro_rules! process_unfunded_channel_tick { - ($peer_state: expr, $chan: expr, $pending_msg_events: expr) => { { - let context = &mut $chan.context; - context.maybe_expire_prev_config(); - if $chan.unfunded_context.should_expire_unfunded_channel() { - let logger = WithChannelContext::from(&self.logger, context, None); - log_error!(logger, - "Force-closing pending channel with ID {} for not establishing in a timely manner", - context.channel_id()); - let mut close_res = context.force_shutdown(false, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }); - locked_close_channel!(self, $peer_state, context, close_res); - shutdown_channels.push(close_res); - $pending_msg_events.push(MessageSendEvent::HandleError { - node_id: context.get_counterparty_node_id(), - action: msgs::ErrorAction::SendErrorMessage { - msg: msgs::ErrorMessage { - channel_id: context.channel_id(), - data: "Force-closing pending channel due to timeout awaiting establishment handshake".to_owned(), - }, - }, - }); - false - } else { - true - } - } } - } - { let per_peer_state = self.per_peer_state.read().unwrap(); for (counterparty_node_id, peer_state_mutex) in per_peer_state.iter() { @@ -6465,8 +6437,8 @@ where let pending_msg_events = &mut peer_state.pending_msg_events; let counterparty_node_id = *counterparty_node_id; peer_state.channel_by_id.retain(|chan_id, phase| { - match phase { - ChannelPhase::Funded(chan) => { + match phase.as_funded_mut() { + Some(chan) => { let new_feerate = if chan.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { anchor_feerate } else { @@ -6540,14 +6512,32 @@ where true }, - ChannelPhase::UnfundedInboundV1(chan) => { - process_unfunded_channel_tick!(peer_state, chan, pending_msg_events) - }, - ChannelPhase::UnfundedOutboundV1(chan) => { - process_unfunded_channel_tick!(peer_state, chan, pending_msg_events) - }, - ChannelPhase::UnfundedV2(chan) => { - process_unfunded_channel_tick!(peer_state, chan, pending_msg_events) + None => { + phase.context_mut().maybe_expire_prev_config(); + let unfunded_context = phase.unfunded_context_mut().expect("channel should be unfunded"); + if unfunded_context.should_expire_unfunded_channel() { + let context = phase.context(); + let logger = WithChannelContext::from(&self.logger, context, None); + log_error!(logger, + "Force-closing pending channel with ID {} for not establishing in a timely manner", + context.channel_id()); + let context = phase.context_mut(); + let mut close_res = context.force_shutdown(false, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }); + locked_close_channel!(self, peer_state, context, close_res); + shutdown_channels.push(close_res); + pending_msg_events.push(MessageSendEvent::HandleError { + node_id: context.get_counterparty_node_id(), + action: msgs::ErrorAction::SendErrorMessage { + msg: msgs::ErrorMessage { + channel_id: context.channel_id(), + data: "Force-closing pending channel due to timeout awaiting establishment handshake".to_owned(), + }, + }, + }); + false + } else { + true + } }, } }); From 4b965e0f78222ade879f7cfe5c8fc763c0627b71 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Wed, 8 Jan 2025 13:53:48 -0600 Subject: [PATCH 10/19] Remove ChannelPhase::UnfundedV2 pattern matches Exposing ChannelPhase in ChannelManager has led to verbose match statements, which need to be modified each time a ChannelPhase is added. Making ChannelPhase an implementation detail of Channel would help avoid this. As a step in this direction, update ChannelManager methods to use ChannelPhase::as_unfunded_v2_mut and ChannelPhase::into_unfunded_v2 methods. --- lightning/src/ln/channel.rs | 8 ++++++ lightning/src/ln/channelmanager.rs | 42 +++++++++++++++--------------- 2 files changed, 29 insertions(+), 21 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index aa7ec953093..1e9e450cfe3 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1201,6 +1201,14 @@ impl<'a, SP: Deref> ChannelPhase where } } + pub fn into_unfunded_v2(self) -> Option> { + if let ChannelPhase::UnfundedV2(channel) = self { + Some(channel) + } else { + None + } + } + pub fn signer_maybe_unblocked( &mut self, chain_hash: ChainHash, logger: &L, ) -> Option where L::Target: Logger { diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 61b220e5bae..6a91c9e8f6d 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -8241,44 +8241,44 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ fn internal_tx_add_input(&self, counterparty_node_id: PublicKey, msg: &msgs::TxAddInput) -> Result<(), MsgHandleErrInternal> { self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel_phase: &mut ChannelPhase| { - match channel_phase { - ChannelPhase::UnfundedV2(ref mut channel) => { + match channel_phase.as_unfunded_v2_mut() { + Some(channel) => { Ok(channel.tx_add_input(msg).into_msg_send_event(counterparty_node_id)) }, - _ => Err("tx_add_input"), + None => Err("tx_add_input"), } }) } fn internal_tx_add_output(&self, counterparty_node_id: PublicKey, msg: &msgs::TxAddOutput) -> Result<(), MsgHandleErrInternal> { self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel_phase: &mut ChannelPhase| { - match channel_phase { - ChannelPhase::UnfundedV2(ref mut channel) => { + match channel_phase.as_unfunded_v2_mut() { + Some(channel) => { Ok(channel.tx_add_output(msg).into_msg_send_event(counterparty_node_id)) }, - _ => Err("tx_add_output"), + None => Err("tx_add_output"), } }) } fn internal_tx_remove_input(&self, counterparty_node_id: PublicKey, msg: &msgs::TxRemoveInput) -> Result<(), MsgHandleErrInternal> { self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel_phase: &mut ChannelPhase| { - match channel_phase { - ChannelPhase::UnfundedV2(ref mut channel) => { + match channel_phase.as_unfunded_v2_mut() { + Some(channel) => { Ok(channel.tx_remove_input(msg).into_msg_send_event(counterparty_node_id)) }, - _ => Err("tx_remove_input"), + None => Err("tx_remove_input"), } }) } fn internal_tx_remove_output(&self, counterparty_node_id: PublicKey, msg: &msgs::TxRemoveOutput) -> Result<(), MsgHandleErrInternal> { self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel_phase: &mut ChannelPhase| { - match channel_phase { - ChannelPhase::UnfundedV2(ref mut channel) => { + match channel_phase.as_unfunded_v2_mut() { + Some(channel) => { Ok(channel.tx_remove_output(msg).into_msg_send_event(counterparty_node_id)) }, - _ => Err("tx_remove_output"), + None => Err("tx_remove_output"), } }) } @@ -8297,10 +8297,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan_phase_entry) => { let channel_phase = chan_phase_entry.get_mut(); - let (msg_send_event_opt, signing_session_opt) = match channel_phase { - ChannelPhase::UnfundedV2(channel) => channel.tx_complete(msg) + let (msg_send_event_opt, signing_session_opt) = match channel_phase.as_unfunded_v2_mut() { + Some(channel) => channel.tx_complete(msg) .into_msg_send_event_or_signing_session(counterparty_node_id), - _ => try_chan_phase_entry!(self, peer_state, Err(ChannelError::Close( + None => try_chan_phase_entry!(self, peer_state, Err(ChannelError::Close( ( "Got a tx_complete message with no interactive transaction construction expected or in-progress".into(), ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }, @@ -8310,18 +8310,18 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ peer_state.pending_msg_events.push(msg_send_event); }; if let Some(mut signing_session) = signing_session_opt { - let (commitment_signed, funding_ready_for_sig_event_opt) = match chan_phase_entry.get_mut() { - ChannelPhase::UnfundedV2(chan) => { + let (commitment_signed, funding_ready_for_sig_event_opt) = match chan_phase_entry.get_mut().as_unfunded_v2_mut() { + Some(chan) => { chan.funding_tx_constructed(&mut signing_session, &self.logger) }, - _ => Err(ChannelError::Warn( + None => Err(ChannelError::Warn( "Got a tx_complete message with no interactive transaction construction expected or in-progress" .into())), }.map_err(|err| MsgHandleErrInternal::send_err_msg_no_close(format!("{}", err), msg.channel_id))?; let (channel_id, channel_phase) = chan_phase_entry.remove_entry(); - let channel = match channel_phase { - ChannelPhase::UnfundedV2(chan) => chan.into_channel(signing_session), - _ => { + let channel = match channel_phase.into_unfunded_v2() { + Some(chan) => chan.into_channel(signing_session), + None => { debug_assert!(false); // It cannot be another variant as we are in the `Ok` branch of the above match. Err(ChannelError::Warn( "Got a tx_complete message with no interactive transaction construction expected or in-progress" From 06a61b3b15f661f606117a858a5e536744c735e2 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Wed, 8 Jan 2025 14:05:21 -0600 Subject: [PATCH 11/19] Remove ChannelPhase from convert_chan_phase_err Exposing ChannelPhase in ChannelManager has led to verbose match statements, which need to be modified each time a ChannelPhase is added. Making ChannelPhase an implementation detail of Channel would help avoid this. As a step in this direction, update the convert_chan_phase_err macro to use ChannelPhase::as_funded_mut instead. --- lightning/src/ln/channelmanager.rs | 40 +++++++++++++----------------- 1 file changed, 17 insertions(+), 23 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 6a91c9e8f6d..7fc0763f7ae 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3005,7 +3005,7 @@ macro_rules! locked_close_channel { /// Returns (boolean indicating if we should remove the Channel object from memory, a mapped error) macro_rules! convert_chan_phase_err { - ($self: ident, $peer_state: expr, $err: expr, $channel: expr, $channel_id: expr, MANUAL_CHANNEL_UPDATE, $channel_update: expr) => { + ($self: ident, $peer_state: expr, $err: expr, $context: expr, $channel_id: expr, MANUAL_CHANNEL_UPDATE, $channel_update: expr) => { match $err { ChannelError::Warn(msg) => { (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Warn(msg), *$channel_id)) @@ -3014,10 +3014,10 @@ macro_rules! convert_chan_phase_err { (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), *$channel_id)) }, ChannelError::Close((msg, reason)) => { - let logger = WithChannelContext::from(&$self.logger, &$channel.context, None); + let logger = WithChannelContext::from(&$self.logger, &$context, None); log_error!(logger, "Closing channel {} due to close-required error: {}", $channel_id, msg); - let mut shutdown_res = $channel.context.force_shutdown(true, reason); - locked_close_channel!($self, $peer_state, &$channel.context, &mut shutdown_res); + let mut shutdown_res = $context.force_shutdown(true, reason); + locked_close_channel!($self, $peer_state, $context, &mut shutdown_res); let err = MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $channel_update); (true, err) @@ -3025,24 +3025,18 @@ macro_rules! convert_chan_phase_err { } }; ($self: ident, $peer_state: expr, $err: expr, $channel: expr, $channel_id: expr, FUNDED_CHANNEL) => { - convert_chan_phase_err!($self, $peer_state, $err, $channel, $channel_id, MANUAL_CHANNEL_UPDATE, { $self.get_channel_update_for_broadcast($channel).ok() }) + convert_chan_phase_err!($self, $peer_state, $err, $channel.context, $channel_id, MANUAL_CHANNEL_UPDATE, { $self.get_channel_update_for_broadcast(&$channel).ok() }) }; - ($self: ident, $peer_state: expr, $err: expr, $channel: expr, $channel_id: expr, UNFUNDED_CHANNEL) => { - convert_chan_phase_err!($self, $peer_state, $err, $channel, $channel_id, MANUAL_CHANNEL_UPDATE, None) + ($self: ident, $peer_state: expr, $err: expr, $context: expr, $channel_id: expr, UNFUNDED_CHANNEL) => { + convert_chan_phase_err!($self, $peer_state, $err, $context, $channel_id, MANUAL_CHANNEL_UPDATE, None) }; ($self: ident, $peer_state: expr, $err: expr, $channel_phase: expr, $channel_id: expr) => { - match $channel_phase { - ChannelPhase::Funded(channel) => { + match $channel_phase.as_funded_mut() { + Some(channel) => { convert_chan_phase_err!($self, $peer_state, $err, channel, $channel_id, FUNDED_CHANNEL) }, - ChannelPhase::UnfundedOutboundV1(channel) => { - convert_chan_phase_err!($self, $peer_state, $err, channel, $channel_id, UNFUNDED_CHANNEL) - }, - ChannelPhase::UnfundedInboundV1(channel) => { - convert_chan_phase_err!($self, $peer_state, $err, channel, $channel_id, UNFUNDED_CHANNEL) - }, - ChannelPhase::UnfundedV2(channel) => { - convert_chan_phase_err!($self, $peer_state, $err, channel, $channel_id, UNFUNDED_CHANNEL) + None => { + convert_chan_phase_err!($self, $peer_state, $err, $channel_phase.context_mut(), $channel_id, UNFUNDED_CHANNEL) }, } }; @@ -8066,7 +8060,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let logger = WithChannelContext::from(&self.logger, &inbound_chan.context, None); match inbound_chan.funding_created(msg, best_block, &self.signer_provider, &&logger) { Ok(res) => res, - Err((inbound_chan, err)) => { + Err((mut inbound_chan, err)) => { // We've already removed this inbound channel from the map in `PeerState` // above so at this point we just need to clean up any lingering entries // concerning this channel as it is safe to do so. @@ -8074,7 +8068,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // Really we should be returning the channel_id the peer expects based // on their funding info here, but they're horribly confused anyway, so // there's not a lot we can do to save them. - return Err(convert_chan_phase_err!(self, peer_state, err, &mut ChannelPhase::UnfundedInboundV1(inbound_chan), &msg.temporary_channel_id).1); + return Err(convert_chan_phase_err!(self, peer_state, err, inbound_chan.context, &msg.temporary_channel_id, UNFUNDED_CHANNEL).1); }, } }, @@ -8096,7 +8090,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // Thus, we must first unset the funding outpoint on the channel. let err = ChannelError::close($err.to_owned()); chan.unset_funding_info(msg.temporary_channel_id); - return Err(convert_chan_phase_err!(self, peer_state, err, chan, &funded_channel_id, UNFUNDED_CHANNEL).1); + return Err(convert_chan_phase_err!(self, peer_state, err, chan.context, &funded_channel_id, UNFUNDED_CHANNEL).1); } } } match peer_state.channel_by_id.entry(funded_channel_id) { @@ -8185,16 +8179,16 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // found an (unreachable) panic when the monitor update contained // within `shutdown_finish` was applied. chan.unset_funding_info(msg.channel_id); - return Err(convert_chan_phase_err!(self, peer_state, e, &mut ChannelPhase::Funded(chan), &msg.channel_id).1); + return Err(convert_chan_phase_err!(self, peer_state, e, chan, &msg.channel_id, FUNDED_CHANNEL).1); } }, - Err((chan, e)) => { + Err((mut chan, e)) => { debug_assert!(matches!(e, ChannelError::Close(_)), "We don't have a channel anymore, so the error better have expected close"); // We've already removed this outbound channel from the map in // `PeerState` above so at this point we just need to clean up any // lingering entries concerning this channel as it is safe to do so. - return Err(convert_chan_phase_err!(self, peer_state, e, &mut ChannelPhase::UnfundedOutboundV1(chan), &msg.channel_id).1); + return Err(convert_chan_phase_err!(self, peer_state, e, chan.context, &msg.channel_id, UNFUNDED_CHANNEL).1); } } } else { From 28971d4be11b96e094492927aeeae2771eb7fe32 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Wed, 8 Jan 2025 16:42:59 -0600 Subject: [PATCH 12/19] Remove ChannelPhase::Unfunded*V1 pattern matches Exposing ChannelPhase in ChannelManager has led to verbose match statements, which need to be modified each time a ChannelPhase is added. Making ChannelPhase an implementation detail of Channel would help avoid this. As a step in this direction, update ChannelManager methods to use methods on ChannelPhase for obtaining the appropriate V1 channel types. --- lightning/src/ln/channel.rs | 28 ++++++++++++++++++++++++++++ lightning/src/ln/channelmanager.rs | 28 ++++++++++++++++------------ 2 files changed, 44 insertions(+), 12 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 1e9e450cfe3..cfb8d9e7469 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1185,6 +1185,34 @@ impl<'a, SP: Deref> ChannelPhase where } } + pub fn as_unfunded_outbound_v1_mut(&mut self) -> Option<&mut OutboundV1Channel> { + if let ChannelPhase::UnfundedOutboundV1(channel) = self { + Some(channel) + } else { + None + } + } + + pub fn is_unfunded_outbound_v1(&self) -> bool { + matches!(self, ChannelPhase::UnfundedOutboundV1(_)) + } + + pub fn into_unfunded_outbound_v1(self) -> Result, Self> { + if let ChannelPhase::UnfundedOutboundV1(channel) = self { + Ok(channel) + } else { + Err(self) + } + } + + pub fn into_unfunded_inbound_v1(self) -> Result, Self> { + if let ChannelPhase::UnfundedInboundV1(channel) = self { + Ok(channel) + } else { + Err(self) + } + } + pub fn as_unfunded_v2(&self) -> Option<&PendingV2Channel> { if let ChannelPhase::UnfundedV2(channel) = self { Some(channel) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 7fc0763f7ae..a32476a3f9d 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -5035,13 +5035,15 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; let funding_txo; - let (mut chan, msg_opt) = match peer_state.channel_by_id.remove(&temporary_channel_id) { - Some(ChannelPhase::UnfundedOutboundV1(mut chan)) => { + let (mut chan, msg_opt) = match peer_state.channel_by_id.remove(&temporary_channel_id) + .map(ChannelPhase::into_unfunded_outbound_v1) + { + Some(Ok(mut chan)) => { macro_rules! close_chan { ($err: expr, $api_err: expr, $chan: expr) => { { let counterparty; let err = if let ChannelError::Close((msg, reason)) = $err { let channel_id = $chan.context.channel_id(); - counterparty = chan.context.get_counterparty_node_id(); + counterparty = $chan.context.get_counterparty_node_id(); let shutdown_res = $chan.context.force_shutdown(false, reason); MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, shutdown_res, None) } else { unreachable!(); }; @@ -5070,7 +5072,7 @@ where } } }, - Some(phase) => { + Some(Err(phase)) => { peer_state.channel_by_id.insert(temporary_channel_id, phase); return Err(APIError::APIMisuseError { err: format!( @@ -8018,12 +8020,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.common_fields.temporary_channel_id) { hash_map::Entry::Occupied(mut phase) => { - match phase.get_mut() { - ChannelPhase::UnfundedOutboundV1(chan) => { + match phase.get_mut().as_unfunded_outbound_v1_mut() { + Some(chan) => { try_chan_phase_entry!(self, peer_state, chan.accept_channel(msg, &self.default_configuration.channel_handshake_limits, &peer_state.latest_features), phase); (chan.context.get_value_satoshis(), chan.context.get_funding_redeemscript().to_p2wsh(), chan.context.get_user_id()) }, - _ => { + None => { return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got an unexpected accept_channel message from peer with counterparty_node_id {}", counterparty_node_id), msg.common_fields.temporary_channel_id)); } } @@ -8055,8 +8057,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; let (mut chan, funding_msg_opt, monitor) = - match peer_state.channel_by_id.remove(&msg.temporary_channel_id) { - Some(ChannelPhase::UnfundedInboundV1(inbound_chan)) => { + match peer_state.channel_by_id.remove(&msg.temporary_channel_id) + .map(ChannelPhase::into_unfunded_inbound_v1) + { + Some(Ok(inbound_chan)) => { let logger = WithChannelContext::from(&self.logger, &inbound_chan.context, None); match inbound_chan.funding_created(msg, best_block, &self.signer_provider, &&logger) { Ok(res) => res, @@ -8072,7 +8076,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }, } }, - Some(mut phase) => { + Some(Err(mut phase)) => { let err_msg = format!("Got an unexpected funding_created message from peer with counterparty_node_id {}", counterparty_node_id); let err = ChannelError::close(err_msg); return Err(convert_chan_phase_err!(self, peer_state, err, &mut phase, &msg.temporary_channel_id).1); @@ -8151,8 +8155,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(chan_phase_entry) => { - if matches!(chan_phase_entry.get(), ChannelPhase::UnfundedOutboundV1(_)) { - let chan = if let ChannelPhase::UnfundedOutboundV1(chan) = chan_phase_entry.remove() { chan } else { unreachable!() }; + if chan_phase_entry.get().is_unfunded_outbound_v1() { + let chan = if let Ok(chan) = chan_phase_entry.remove().into_unfunded_outbound_v1() { chan } else { unreachable!() }; let logger = WithContext::from( &self.logger, Some(chan.context.get_counterparty_node_id()), From 12ff3614c9460155612ea9055ffba0550832bb5f Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Wed, 8 Jan 2025 16:53:34 -0600 Subject: [PATCH 13/19] Implement From for ChannelPhase Exposing ChannelPhase in ChannelManager has led to verbose match statements, which need to be modified each time a ChannelPhase is added. Making ChannelPhase an implementation detail of Channel would help avoid this. As a step in this direction, define a conversion from Channel (to be renamed FundedChannel) to ChannelPhase (to be renamed Channel). --- lightning/src/ln/channel.rs | 10 ++++++++++ lightning/src/ln/channelmanager.rs | 8 ++++---- 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index cfb8d9e7469..971f7df612c 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1358,6 +1358,16 @@ impl<'a, SP: Deref> ChannelPhase where } } +impl From> for ChannelPhase +where + SP::Target: SignerProvider, + ::EcdsaSigner: ChannelSigner, +{ + fn from(channel: Channel) -> Self { + ChannelPhase::Funded(channel) + } +} + /// Contains all state common to unfunded inbound/outbound channels. pub(super) struct UnfundedChannelContext { /// A counter tracking how many ticks have elapsed since this unfunded channel was diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index a32476a3f9d..04b073a9260 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -8124,7 +8124,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }); } - if let Some(chan) = e.insert(ChannelPhase::Funded(chan)).as_funded_mut() { + if let Some(chan) = e.insert(ChannelPhase::from(chan)).as_funded_mut() { handle_new_monitor_update!(self, persist_state, peer_state_lock, peer_state, per_peer_state, chan, INITIAL_MONITOR); } else { @@ -8171,7 +8171,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // We really should be able to insert here without doing a second // lookup, but sadly rust stdlib doesn't currently allow keeping // the original Entry around with the value removed. - let chan = peer_state.channel_by_id.entry(msg.channel_id).or_insert(ChannelPhase::Funded(chan)); + let chan = peer_state.channel_by_id.entry(msg.channel_id).or_insert(ChannelPhase::from(chan)); if let Some(chan) = chan.as_funded_mut() { handle_new_monitor_update!(self, persist_status, peer_state_lock, peer_state, per_peer_state, chan, INITIAL_MONITOR); } else { unreachable!(); } @@ -8326,7 +8326,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ .into())) }, }.map_err(|err| MsgHandleErrInternal::send_err_msg_no_close(format!("{}", err), msg.channel_id))?; - peer_state.channel_by_id.insert(channel_id, ChannelPhase::Funded(channel)); + peer_state.channel_by_id.insert(channel_id, ChannelPhase::from(channel)); if let Some(funding_ready_for_sig_event) = funding_ready_for_sig_event_opt { let mut pending_events = self.pending_events.lock().unwrap(); pending_events.push_back((funding_ready_for_sig_event, None)); @@ -13253,7 +13253,7 @@ where per_peer_state.entry(channel.context.get_counterparty_node_id()) .or_insert_with(|| Mutex::new(empty_peer_state())) .get_mut().unwrap() - .channel_by_id.insert(channel.context.channel_id(), ChannelPhase::Funded(channel)); + .channel_by_id.insert(channel.context.channel_id(), ChannelPhase::from(channel)); } } else if channel.is_awaiting_initial_mon_persist() { // If we were persisted and shut down while the initial ChannelMonitor persistence From 583224f4f979f3852bbb6e4b012c5684db2e50b9 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Wed, 8 Jan 2025 18:21:04 -0600 Subject: [PATCH 14/19] Implement From for ChannelPhase Exposing ChannelPhase in ChannelManager has led to verbose match statements, which need to be modified each time a ChannelPhase is added. Making ChannelPhase an implementation detail of Channel would help avoid this. As a step in this direction, define a conversion from OutboundV1Channel to ChannelPhase (to be renamed Channel). --- lightning/src/ln/channel.rs | 10 ++++++++++ lightning/src/ln/channelmanager.rs | 4 ++-- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 971f7df612c..240d72459c5 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1358,6 +1358,16 @@ impl<'a, SP: Deref> ChannelPhase where } } +impl From> for ChannelPhase +where + SP::Target: SignerProvider, + ::EcdsaSigner: ChannelSigner, +{ + fn from(channel: OutboundV1Channel) -> Self { + ChannelPhase::UnfundedOutboundV1(channel) + } +} + impl From> for ChannelPhase where SP::Target: SignerProvider, diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 04b073a9260..ce123ff35bc 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3668,7 +3668,7 @@ where panic!("RNG is bad???"); } }, - hash_map::Entry::Vacant(entry) => { entry.insert(ChannelPhase::UnfundedOutboundV1(channel)); } + hash_map::Entry::Vacant(entry) => { entry.insert(ChannelPhase::from(channel)); } } if let Some(msg) = res { @@ -5116,7 +5116,7 @@ where return Err(APIError::ChannelUnavailable { err }); } } - e.insert(ChannelPhase::UnfundedOutboundV1(chan)); + e.insert(ChannelPhase::from(chan)); } } Ok(()) From e8c4849dc70277b53b8367e16735e409a3d571c0 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Wed, 8 Jan 2025 18:25:00 -0600 Subject: [PATCH 15/19] Implement From for ChannelPhase Exposing ChannelPhase in ChannelManager has led to verbose match statements, which need to be modified each time a ChannelPhase is added. Making ChannelPhase an implementation detail of Channel would help avoid this. As a step in this direction, define a conversion from InboundV1Channel to ChannelPhase (to be renamed Channel). --- lightning/src/ln/channel.rs | 10 ++++++++++ lightning/src/ln/channelmanager.rs | 4 ++-- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 240d72459c5..281b55d5bc4 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1368,6 +1368,16 @@ where } } +impl From> for ChannelPhase +where + SP::Target: SignerProvider, + ::EcdsaSigner: ChannelSigner, +{ + fn from(channel: InboundV1Channel) -> Self { + ChannelPhase::UnfundedInboundV1(channel) + } +} + impl From> for ChannelPhase where SP::Target: SignerProvider, diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index ce123ff35bc..2f8eeb5c3be 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -7689,7 +7689,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ msg, } }); - (*temporary_channel_id, ChannelPhase::UnfundedInboundV1(channel), message_send_event) + (*temporary_channel_id, ChannelPhase::from(channel), message_send_event) }) }, #[cfg(dual_funding)] @@ -7977,7 +7977,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ msg, } }); - (ChannelPhase::UnfundedInboundV1(channel), message_send_event) + (ChannelPhase::from(channel), message_send_event) }, #[cfg(dual_funding)] OpenChannelMessageRef::V2(msg) => { From 4a81e65b0ea1c1b36ad9cb417935fc6aa0de308b Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Wed, 8 Jan 2025 18:27:58 -0600 Subject: [PATCH 16/19] Implement From for ChannelPhase Exposing ChannelPhase in ChannelManager has led to verbose match statements, which need to be modified each time a ChannelPhase is added. Making ChannelPhase an implementation detail of Channel would help avoid this. As a step in this direction, define a conversion from PendingV2Channel to ChannelPhase (to be renamed Channel). --- lightning/src/ln/channel.rs | 10 ++++++++++ lightning/src/ln/channelmanager.rs | 4 ++-- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 281b55d5bc4..aa2bedf6368 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1378,6 +1378,16 @@ where } } +impl From> for ChannelPhase +where + SP::Target: SignerProvider, + ::EcdsaSigner: ChannelSigner, +{ + fn from(channel: PendingV2Channel) -> Self { + ChannelPhase::UnfundedV2(channel) + } +} + impl From> for ChannelPhase where SP::Target: SignerProvider, diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 2f8eeb5c3be..c918e433b65 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -7713,7 +7713,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ node_id: channel.context.get_counterparty_node_id(), msg: channel.accept_inbound_dual_funded_channel() }; - (channel.context.channel_id(), ChannelPhase::UnfundedV2(channel), Some(message_send_event)) + (channel.context.channel_id(), ChannelPhase::from(channel), Some(message_send_event)) }) }, } @@ -7991,7 +7991,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ node_id: *counterparty_node_id, msg: channel.accept_inbound_dual_funded_channel(), }; - (ChannelPhase::UnfundedV2(channel), Some(message_send_event)) + (ChannelPhase::from(channel), Some(message_send_event)) }, }; From c55508860fd9b3d9233994886c9d47d4216d1606 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Wed, 8 Jan 2025 18:42:24 -0600 Subject: [PATCH 17/19] Rename Channel to FundedChannel In preparation for hiding ChannelPhase inside a Channel type, rename Channel to FundedChannel. --- lightning/src/ln/channel.rs | 56 +++++++++++++++--------------- lightning/src/ln/channelmanager.rs | 36 +++++++++---------- 2 files changed, 46 insertions(+), 46 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index aa2bedf6368..2a277b4c9c5 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1091,8 +1091,8 @@ pub(crate) const CONCURRENT_INBOUND_HTLC_FEE_BUFFER: u32 = 2; /// transaction (not counting the value of the HTLCs themselves). pub(crate) const MIN_AFFORDABLE_HTLC_COUNT: usize = 4; -/// When a [`Channel`] has its [`ChannelConfig`] updated, its existing one is stashed for up to this -/// number of ticks to allow forwarding HTLCs by nodes that have yet to receive the new +/// When a [`FundedChannel`] has its [`ChannelConfig`] updated, its existing one is stashed for up +/// to this number of ticks to allow forwarding HTLCs by nodes that have yet to receive the new /// ChannelUpdate prompted by the config update. This value was determined as follows: /// /// * The expected interval between ticks (1 minute). @@ -1109,7 +1109,7 @@ pub(crate) const EXPIRE_PREV_CONFIG_TICKS: usize = 5; pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2; /// The number of ticks that may elapse while we're waiting for an unfunded outbound/inbound channel -/// to be promoted to a [`Channel`] since the unfunded channel was created. An unfunded channel +/// to be promoted to a [`FundedChannel`] since the unfunded channel was created. An unfunded channel /// exceeding this age limit will be force-closed and purged from memory. pub(crate) const UNFUNDED_CHANNEL_AGE_LIMIT_TICKS: usize = 60; @@ -1131,7 +1131,7 @@ pub(super) enum ChannelPhase where SP::Target: SignerProvider { UnfundedInboundV1(InboundV1Channel), #[allow(dead_code)] // TODO(dual_funding): Remove once creating V2 channels is enabled. UnfundedV2(PendingV2Channel), - Funded(Channel), + Funded(FundedChannel), } impl<'a, SP: Deref> ChannelPhase where @@ -1169,7 +1169,7 @@ impl<'a, SP: Deref> ChannelPhase where matches!(self, ChannelPhase::Funded(_)) } - pub fn as_funded(&self) -> Option<&Channel> { + pub fn as_funded(&self) -> Option<&FundedChannel> { if let ChannelPhase::Funded(channel) = self { Some(channel) } else { @@ -1177,7 +1177,7 @@ impl<'a, SP: Deref> ChannelPhase where } } - pub fn as_funded_mut(&mut self) -> Option<&mut Channel> { + pub fn as_funded_mut(&mut self) -> Option<&mut FundedChannel> { if let ChannelPhase::Funded(channel) = self { Some(channel) } else { @@ -1388,12 +1388,12 @@ where } } -impl From> for ChannelPhase +impl From> for ChannelPhase where SP::Target: SignerProvider, ::EcdsaSigner: ChannelSigner, { - fn from(channel: Channel) -> Self { + fn from(channel: FundedChannel) -> Self { ChannelPhase::Funded(channel) } } @@ -1514,7 +1514,7 @@ pub(super) struct ChannelContext where SP::Target: SignerProvider { /// the future when the signer indicates it may have a signature for us. /// /// This flag is set in such a case. Note that we don't need to persist this as we'll end up - /// setting it again as a side-effect of [`Channel::channel_reestablish`]. + /// setting it again as a side-effect of [`FundedChannel::channel_reestablish`]. signer_pending_commitment_update: bool, /// Similar to [`Self::signer_pending_commitment_update`] but we're waiting to send either a /// [`msgs::FundingCreated`] or [`msgs::FundingSigned`] depending on if this channel is @@ -1910,7 +1910,7 @@ impl InitialRemoteCommitmentReceiver for InboundV1Channel whe } } -impl InitialRemoteCommitmentReceiver for Channel where SP::Target: SignerProvider { +impl InitialRemoteCommitmentReceiver for FundedChannel where SP::Target: SignerProvider { fn context(&self) -> &ChannelContext { &self.context } @@ -2140,7 +2140,7 @@ impl ChannelContext where SP::Target: SignerProvider { if open_channel_fields.htlc_minimum_msat >= full_channel_value_msat { return Err(ChannelError::close(format!("Minimum htlc value ({}) was larger than full channel value ({})", open_channel_fields.htlc_minimum_msat, full_channel_value_msat))); } - Channel::::check_remote_fee(&channel_type, fee_estimator, open_channel_fields.commitment_feerate_sat_per_1000_weight, None, &&logger)?; + FundedChannel::::check_remote_fee(&channel_type, fee_estimator, open_channel_fields.commitment_feerate_sat_per_1000_weight, None, &&logger)?; let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT); if open_channel_fields.to_self_delay > max_counterparty_selected_contest_delay { @@ -4410,7 +4410,7 @@ pub(super) struct DualFundingChannelContext { // Holder designates channel data owned for the benefit of the user client. // Counterparty designates channel data owned by the another channel participant entity. -pub(super) struct Channel where SP::Target: SignerProvider { +pub(super) struct FundedChannel where SP::Target: SignerProvider { pub context: ChannelContext, pub interactive_tx_signing_session: Option, holder_commitment_point: HolderCommitmentPoint, @@ -4425,7 +4425,7 @@ struct CommitmentTxInfoCached { feerate: u32, } -/// Contents of a wire message that fails an HTLC backwards. Useful for [`Channel::fail_htlc`] to +/// Contents of a wire message that fails an HTLC backwards. Useful for [`FundedChannel::fail_htlc`] to /// fail with either [`msgs::UpdateFailMalformedHTLC`] or [`msgs::UpdateFailHTLC`] as needed. trait FailHTLCContents { type Message: FailHTLCMessageName; @@ -4481,7 +4481,7 @@ impl FailHTLCMessageName for msgs::UpdateFailMalformedHTLC { } } -impl Channel where +impl FundedChannel where SP::Target: SignerProvider, ::EcdsaSigner: EcdsaChannelSigner { @@ -5989,7 +5989,7 @@ impl Channel where /// new feerate, the update is cancelled. /// /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this - /// [`Channel`] if `force_holding_cell` is false. + /// [`FundedChannel`] if `force_holding_cell` is false. fn send_update_fee( &mut self, feerate_per_kw: u32, mut force_holding_cell: bool, fee_estimator: &LowerBoundedFeeEstimator, logger: &L @@ -6283,7 +6283,7 @@ impl Channel where if self.context.channel_state.is_peer_disconnected() { return Err(ChannelError::close("Peer sent update_fee when we needed a channel_reestablish".to_owned())); } - Channel::::check_remote_fee(&self.context.channel_type, fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?; + FundedChannel::::check_remote_fee(&self.context.channel_type, fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?; self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced)); self.context.update_time_counter += 1; @@ -8070,7 +8070,7 @@ impl Channel where /// regenerate them. /// /// You MUST call [`Self::send_commitment_no_state_update`] prior to calling any other methods - /// on this [`Channel`] if `force_holding_cell` is false. + /// on this [`FundedChannel`] if `force_holding_cell` is false. /// /// `Err`s will only be [`ChannelError::Ignore`]. fn send_htlc( @@ -8694,7 +8694,7 @@ impl OutboundV1Channel where SP::Target: SignerProvider { /// If this call is successful, broadcast the funding transaction (and not before!) pub fn funding_signed( mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L - ) -> Result<(Channel, ChannelMonitor<::EcdsaSigner>), (OutboundV1Channel, ChannelError)> + ) -> Result<(FundedChannel, ChannelMonitor<::EcdsaSigner>), (OutboundV1Channel, ChannelError)> where L::Target: Logger { @@ -8721,7 +8721,7 @@ impl OutboundV1Channel where SP::Target: SignerProvider { log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id()); - let mut channel = Channel { + let mut channel = FundedChannel { context: self.context, interactive_tx_signing_session: None, holder_commitment_point, @@ -8942,7 +8942,7 @@ impl InboundV1Channel where SP::Target: SignerProvider { pub fn funding_created( mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L - ) -> Result<(Channel, Option, ChannelMonitor<::EcdsaSigner>), (Self, ChannelError)> + ) -> Result<(FundedChannel, Option, ChannelMonitor<::EcdsaSigner>), (Self, ChannelError)> where L::Target: Logger { @@ -8986,7 +8986,7 @@ impl InboundV1Channel where SP::Target: SignerProvider { // Promote the channel to a full-fledged one now that we have updated the state and have a // `ChannelMonitor`. - let mut channel = Channel { + let mut channel = FundedChannel { context: self.context, interactive_tx_signing_session: None, holder_commitment_point, @@ -9343,11 +9343,11 @@ impl PendingV2Channel where SP::Target: SignerProvider { self.generate_accept_channel_v2_message() } - pub fn into_channel(self, signing_session: InteractiveTxSigningSession) -> Result, ChannelError>{ + pub fn into_channel(self, signing_session: InteractiveTxSigningSession) -> Result, ChannelError>{ let holder_commitment_point = self.unfunded_context.holder_commitment_point.ok_or(ChannelError::close( format!("Expected to have holder commitment points available upon finishing interactive tx construction for channel {}", self.context.channel_id())))?; - let channel = Channel { + let channel = FundedChannel { context: self.context, interactive_tx_signing_session: Some(signing_session), holder_commitment_point, @@ -9439,7 +9439,7 @@ impl Readable for AnnouncementSigsState { } } -impl Writeable for Channel where SP::Target: SignerProvider { +impl Writeable for FundedChannel where SP::Target: SignerProvider { fn write(&self, writer: &mut W) -> Result<(), io::Error> { // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been // called. @@ -9818,7 +9818,7 @@ impl Writeable for Channel where SP::Target: SignerProvider { } const MAX_ALLOC_SIZE: usize = 64*1024; -impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)> for Channel +impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)> for FundedChannel where ES::Target: EntropySource, SP::Target: SignerProvider @@ -10291,7 +10291,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch }, }; - Ok(Channel { + Ok(FundedChannel { context: ChannelContext { user_id, @@ -10449,7 +10449,7 @@ mod tests { use crate::ln::channel_keys::{RevocationKey, RevocationBasepoint}; use crate::ln::channelmanager::{self, HTLCSource, PaymentId}; use crate::ln::channel::InitFeatures; - use crate::ln::channel::{AwaitingChannelReadyFlags, Channel, ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, HTLCUpdateAwaitingACK, commit_tx_fee_sat}; + use crate::ln::channel::{AwaitingChannelReadyFlags, ChannelState, FundedChannel, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, HTLCUpdateAwaitingACK, commit_tx_fee_sat}; use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS}; use crate::types::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures}; use crate::ln::msgs; @@ -11115,7 +11115,7 @@ mod tests { let mut s = crate::io::Cursor::new(&encoded_chan); let mut reader = crate::util::ser::FixedLengthReader::new(&mut s, encoded_chan.len() as u64); let features = channelmanager::provided_channel_type_features(&config); - let decoded_chan = Channel::read(&mut reader, (&&keys_provider, &&keys_provider, 0, &features)).unwrap(); + let decoded_chan = FundedChannel::read(&mut reader, (&&keys_provider, &&keys_provider, 0, &features)).unwrap(); assert_eq!(decoded_chan.context.pending_outbound_htlcs, pending_outbound_htlcs); assert_eq!(decoded_chan.context.holding_cell_htlc_updates, holding_cell_htlc_updates); } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index c918e433b65..d64a1603a8d 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -48,7 +48,7 @@ use crate::events::{self, Event, EventHandler, EventsProvider, InboundChannelFun use crate::ln::inbound_payment; use crate::ln::types::ChannelId; use crate::types::payment::{PaymentHash, PaymentPreimage, PaymentSecret}; -use crate::ln::channel::{self, Channel, ChannelPhase, ChannelError, ChannelUpdateStatus, ShutdownResult, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel, WithChannelContext}; +use crate::ln::channel::{self, ChannelPhase, ChannelError, ChannelUpdateStatus, FundedChannel, ShutdownResult, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel, WithChannelContext}; #[cfg(any(dual_funding, splicing))] use crate::ln::channel::PendingV2Channel; use crate::ln::channel_state::ChannelDetails; @@ -150,7 +150,7 @@ use crate::ln::script::ShutdownScript; // our payment, which we can use to decode errors or inform the user that the payment was sent. /// Information about where a received HTLC('s onion) has indicated the HTLC should go. -#[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug +#[derive(Clone)] // See FundedChannel::revoke_and_ack for why, tl;dr: Rust bug #[cfg_attr(test, derive(Debug, PartialEq))] pub enum PendingHTLCRouting { /// An HTLC which should be forwarded on to another node. @@ -270,7 +270,7 @@ impl PendingHTLCRouting { /// Information about an incoming HTLC, including the [`PendingHTLCRouting`] describing where it /// should go next. -#[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug +#[derive(Clone)] // See FundedChannel::revoke_and_ack for why, tl;dr: Rust bug #[cfg_attr(test, derive(Debug, PartialEq))] pub struct PendingHTLCInfo { /// Further routing details based on whether the HTLC is being forwarded or received. @@ -313,14 +313,14 @@ pub struct PendingHTLCInfo { pub skimmed_fee_msat: Option, } -#[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug +#[derive(Clone)] // See FundedChannel::revoke_and_ack for why, tl;dr: Rust bug pub(super) enum HTLCFailureMsg { Relay(msgs::UpdateFailHTLC), Malformed(msgs::UpdateFailMalformedHTLC), } /// Stores whether we can't forward an HTLC or relevant forwarding info -#[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug +#[derive(Clone)] // See FundedChannel::revoke_and_ack for why, tl;dr: Rust bug pub(super) enum PendingHTLCStatus { Forward(PendingHTLCInfo), Fail(HTLCFailureMsg), @@ -820,7 +820,7 @@ pub(super) const MIN_HTLC_RELAY_HOLDING_CELL_MILLIS: u64 = 100; /// For events which result in both a RevokeAndACK and a CommitmentUpdate, by default they should /// be sent in the order they appear in the return value, however sometimes the order needs to be -/// variable at runtime (eg Channel::channel_reestablish needs to re-send messages in the order +/// variable at runtime (eg FundedChannel::channel_reestablish needs to re-send messages in the order /// they were originally sent). In those cases, this enum is also returned. #[derive(Clone, PartialEq, Debug)] pub(super) enum RAACommitmentOrder { @@ -3680,7 +3680,7 @@ where Ok(temporary_channel_id) } - fn list_funded_channels_with_filter)) -> bool + Copy>(&self, f: Fn) -> Vec { + fn list_funded_channels_with_filter)) -> bool + Copy>(&self, f: Fn) -> Vec { // Allocate our best estimate of the number of channels we have in the `res` // Vec. Sadly the `short_to_chan_info` map doesn't cover channels without // a scid or a scid alias, and the `outpoint_to_peer` shouldn't be used outside @@ -4204,7 +4204,7 @@ where } fn can_forward_htlc_to_outgoing_channel( - &self, chan: &mut Channel, msg: &msgs::UpdateAddHTLC, next_packet: &NextPacketDetails + &self, chan: &mut FundedChannel, msg: &msgs::UpdateAddHTLC, next_packet: &NextPacketDetails ) -> Result<(), (&'static str, u16)> { if !chan.context.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels { // Note that the behavior here should be identical to the above block - we @@ -4245,7 +4245,7 @@ where /// Executes a callback `C` that returns some value `X` on the channel found with the given /// `scid`. `None` is returned when the channel is not found. - fn do_funded_channel_callback) -> X>( + fn do_funded_channel_callback) -> X>( &self, scid: u64, callback: C, ) -> Option { let (counterparty_node_id, channel_id) = match self.short_to_chan_info.read().unwrap().get(&scid).cloned() { @@ -4268,7 +4268,7 @@ where fn can_forward_htlc( &self, msg: &msgs::UpdateAddHTLC, next_packet_details: &NextPacketDetails ) -> Result<(), (&'static str, u16)> { - match self.do_funded_channel_callback(next_packet_details.outgoing_scid, |chan: &mut Channel| { + match self.do_funded_channel_callback(next_packet_details.outgoing_scid, |chan: &mut FundedChannel| { self.can_forward_htlc_to_outgoing_channel(chan, msg, next_packet_details) }) { Some(Ok(())) => {}, @@ -4439,7 +4439,7 @@ where /// /// [`channel_update`]: msgs::ChannelUpdate /// [`internal_closing_signed`]: Self::internal_closing_signed - fn get_channel_update_for_broadcast(&self, chan: &Channel) -> Result { + fn get_channel_update_for_broadcast(&self, chan: &FundedChannel) -> Result { if !chan.context.should_announce() { return Err(LightningError { err: "Cannot broadcast a channel_update for a private channel".to_owned(), @@ -4465,7 +4465,7 @@ where /// /// [`channel_update`]: msgs::ChannelUpdate /// [`internal_closing_signed`]: Self::internal_closing_signed - fn get_channel_update_for_unicast(&self, chan: &Channel) -> Result { + fn get_channel_update_for_unicast(&self, chan: &FundedChannel) -> Result { let logger = WithChannelContext::from(&self.logger, &chan.context, None); log_trace!(logger, "Attempting to generate channel update for channel {}", chan.context.channel_id()); let short_channel_id = match chan.context.get_short_channel_id().or(chan.context.latest_inbound_scid_alias()) { @@ -5603,7 +5603,7 @@ where }; 'outer_loop: for (incoming_scid, update_add_htlcs) in decode_update_add_htlcs { - let incoming_channel_details_opt = self.do_funded_channel_callback(incoming_scid, |chan: &mut Channel| { + let incoming_channel_details_opt = self.do_funded_channel_callback(incoming_scid, |chan: &mut FundedChannel| { let counterparty_node_id = chan.context.get_counterparty_node_id(); let channel_id = chan.context.channel_id(); let funding_txo = chan.context.get_funding_txo().unwrap(); @@ -5638,7 +5638,7 @@ where let outgoing_scid_opt = next_packet_details_opt.as_ref().map(|d| d.outgoing_scid); // Process the HTLC on the incoming channel. - match self.do_funded_channel_callback(incoming_scid, |chan: &mut Channel| { + match self.do_funded_channel_callback(incoming_scid, |chan: &mut FundedChannel| { let logger = WithChannelContext::from(&self.logger, &chan.context, Some(update_add_htlc.payment_hash)); chan.can_accept_incoming_htlc( update_add_htlc, &self.fee_estimator, &logger, @@ -6338,7 +6338,7 @@ where let _ = self.process_background_events(); } - fn update_channel_fee(&self, chan_id: &ChannelId, chan: &mut Channel, new_feerate: u32) -> NotifyOption { + fn update_channel_fee(&self, chan_id: &ChannelId, chan: &mut FundedChannel, new_feerate: u32) -> NotifyOption { if !chan.context.is_outbound() { return NotifyOption::SkipPersistNoEvents; } let logger = WithChannelContext::from(&self.logger, &chan.context, None); @@ -7437,7 +7437,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ /// Handles a channel reentering a functional state, either due to reconnect or a monitor /// update completion. fn handle_channel_resumption(&self, pending_msg_events: &mut Vec, - channel: &mut Channel, raa: Option, + channel: &mut FundedChannel, raa: Option, commitment_update: Option, order: RAACommitmentOrder, pending_forwards: Vec<(PendingHTLCInfo, u64)>, pending_update_adds: Vec, funding_broadcastable: Option, @@ -10946,7 +10946,7 @@ where /// Calls a function which handles an on-chain event (blocks dis/connected, transactions /// un/confirmed, etc) on each channel, handling any resulting errors or messages generated by /// the function. - fn do_chain_event) -> Result<(Option, Vec<(HTLCSource, PaymentHash)>, Option), ClosureReason>> + fn do_chain_event) -> Result<(Option, Vec<(HTLCSource, PaymentHash)>, Option), ClosureReason>> (&self, height_opt: Option, f: FN) { // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called // during initialization prior to the chain_monitor being fully configured in some cases. @@ -13157,7 +13157,7 @@ where let mut close_background_events = Vec::new(); let mut funding_txo_to_channel_id = hash_map_with_capacity(channel_count as usize); for _ in 0..channel_count { - let mut channel: Channel = Channel::read(reader, ( + let mut channel: FundedChannel = FundedChannel::read(reader, ( &args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config) ))?; let logger = WithChannelContext::from(&args.logger, &channel.context, None); From 442b03050465f3b41c78e4919415c25a340d7aef Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Wed, 8 Jan 2025 18:49:34 -0600 Subject: [PATCH 18/19] Rename ChannelPhase to Channel In preparation for hiding ChannelPhase inside a Channel type, rename ChannelPhase to Channel. --- lightning/src/ln/channel.rs | 102 +++++++++++----------- lightning/src/ln/channelmanager.rs | 82 ++++++++--------- lightning/src/ln/functional_test_utils.rs | 2 +- lightning/src/ln/functional_tests.rs | 26 +++--- 4 files changed, 106 insertions(+), 106 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 2a277b4c9c5..275dcf0fbce 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1124,9 +1124,9 @@ impl_writeable_tlv_based!(PendingChannelMonitorUpdate, { (0, update, required), }); -/// The `ChannelPhase` enum describes the current phase in life of a lightning channel with each of +/// The `Channel` enum describes the current phase in life of a lightning channel with each of /// its variants containing an appropriate channel struct. -pub(super) enum ChannelPhase where SP::Target: SignerProvider { +pub(super) enum Channel where SP::Target: SignerProvider { UnfundedOutboundV1(OutboundV1Channel), UnfundedInboundV1(InboundV1Channel), #[allow(dead_code)] // TODO(dual_funding): Remove once creating V2 channels is enabled. @@ -1134,43 +1134,43 @@ pub(super) enum ChannelPhase where SP::Target: SignerProvider { Funded(FundedChannel), } -impl<'a, SP: Deref> ChannelPhase where +impl<'a, SP: Deref> Channel where SP::Target: SignerProvider, ::EcdsaSigner: ChannelSigner, { pub fn context(&'a self) -> &'a ChannelContext { match self { - ChannelPhase::Funded(chan) => &chan.context, - ChannelPhase::UnfundedOutboundV1(chan) => &chan.context, - ChannelPhase::UnfundedInboundV1(chan) => &chan.context, - ChannelPhase::UnfundedV2(chan) => &chan.context, + Channel::Funded(chan) => &chan.context, + Channel::UnfundedOutboundV1(chan) => &chan.context, + Channel::UnfundedInboundV1(chan) => &chan.context, + Channel::UnfundedV2(chan) => &chan.context, } } pub fn context_mut(&'a mut self) -> &'a mut ChannelContext { match self { - ChannelPhase::Funded(ref mut chan) => &mut chan.context, - ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context, - ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context, - ChannelPhase::UnfundedV2(ref mut chan) => &mut chan.context, + Channel::Funded(ref mut chan) => &mut chan.context, + Channel::UnfundedOutboundV1(ref mut chan) => &mut chan.context, + Channel::UnfundedInboundV1(ref mut chan) => &mut chan.context, + Channel::UnfundedV2(ref mut chan) => &mut chan.context, } } pub fn unfunded_context_mut(&mut self) -> Option<&mut UnfundedChannelContext> { match self { - ChannelPhase::Funded(_) => { debug_assert!(false); None }, - ChannelPhase::UnfundedOutboundV1(chan) => Some(&mut chan.unfunded_context), - ChannelPhase::UnfundedInboundV1(chan) => Some(&mut chan.unfunded_context), - ChannelPhase::UnfundedV2(chan) => Some(&mut chan.unfunded_context), + Channel::Funded(_) => { debug_assert!(false); None }, + Channel::UnfundedOutboundV1(chan) => Some(&mut chan.unfunded_context), + Channel::UnfundedInboundV1(chan) => Some(&mut chan.unfunded_context), + Channel::UnfundedV2(chan) => Some(&mut chan.unfunded_context), } } pub fn is_funded(&self) -> bool { - matches!(self, ChannelPhase::Funded(_)) + matches!(self, Channel::Funded(_)) } pub fn as_funded(&self) -> Option<&FundedChannel> { - if let ChannelPhase::Funded(channel) = self { + if let Channel::Funded(channel) = self { Some(channel) } else { None @@ -1178,7 +1178,7 @@ impl<'a, SP: Deref> ChannelPhase where } pub fn as_funded_mut(&mut self) -> Option<&mut FundedChannel> { - if let ChannelPhase::Funded(channel) = self { + if let Channel::Funded(channel) = self { Some(channel) } else { None @@ -1186,7 +1186,7 @@ impl<'a, SP: Deref> ChannelPhase where } pub fn as_unfunded_outbound_v1_mut(&mut self) -> Option<&mut OutboundV1Channel> { - if let ChannelPhase::UnfundedOutboundV1(channel) = self { + if let Channel::UnfundedOutboundV1(channel) = self { Some(channel) } else { None @@ -1194,11 +1194,11 @@ impl<'a, SP: Deref> ChannelPhase where } pub fn is_unfunded_outbound_v1(&self) -> bool { - matches!(self, ChannelPhase::UnfundedOutboundV1(_)) + matches!(self, Channel::UnfundedOutboundV1(_)) } pub fn into_unfunded_outbound_v1(self) -> Result, Self> { - if let ChannelPhase::UnfundedOutboundV1(channel) = self { + if let Channel::UnfundedOutboundV1(channel) = self { Ok(channel) } else { Err(self) @@ -1206,7 +1206,7 @@ impl<'a, SP: Deref> ChannelPhase where } pub fn into_unfunded_inbound_v1(self) -> Result, Self> { - if let ChannelPhase::UnfundedInboundV1(channel) = self { + if let Channel::UnfundedInboundV1(channel) = self { Ok(channel) } else { Err(self) @@ -1214,7 +1214,7 @@ impl<'a, SP: Deref> ChannelPhase where } pub fn as_unfunded_v2(&self) -> Option<&PendingV2Channel> { - if let ChannelPhase::UnfundedV2(channel) = self { + if let Channel::UnfundedV2(channel) = self { Some(channel) } else { None @@ -1222,7 +1222,7 @@ impl<'a, SP: Deref> ChannelPhase where } pub fn as_unfunded_v2_mut(&mut self) -> Option<&mut PendingV2Channel> { - if let ChannelPhase::UnfundedV2(channel) = self { + if let Channel::UnfundedV2(channel) = self { Some(channel) } else { None @@ -1230,7 +1230,7 @@ impl<'a, SP: Deref> ChannelPhase where } pub fn into_unfunded_v2(self) -> Option> { - if let ChannelPhase::UnfundedV2(channel) = self { + if let Channel::UnfundedV2(channel) = self { Some(channel) } else { None @@ -1241,8 +1241,8 @@ impl<'a, SP: Deref> ChannelPhase where &mut self, chain_hash: ChainHash, logger: &L, ) -> Option where L::Target: Logger { match self { - ChannelPhase::Funded(chan) => Some(chan.signer_maybe_unblocked(logger)), - ChannelPhase::UnfundedOutboundV1(chan) => { + Channel::Funded(chan) => Some(chan.signer_maybe_unblocked(logger)), + Channel::UnfundedOutboundV1(chan) => { let (open_channel, funding_created) = chan.signer_maybe_unblocked(chain_hash, logger); Some(SignerResumeUpdates { commitment_update: None, @@ -1258,7 +1258,7 @@ impl<'a, SP: Deref> ChannelPhase where shutdown_result: None, }) }, - ChannelPhase::UnfundedInboundV1(chan) => { + Channel::UnfundedInboundV1(chan) => { let logger = WithChannelContext::from(logger, &chan.context, None); let accept_channel = chan.signer_maybe_unblocked(&&logger); Some(SignerResumeUpdates { @@ -1275,16 +1275,16 @@ impl<'a, SP: Deref> ChannelPhase where shutdown_result: None, }) }, - ChannelPhase::UnfundedV2(_) => None, + Channel::UnfundedV2(_) => None, } } pub fn is_resumable(&self) -> bool { match self { - ChannelPhase::Funded(_) => false, - ChannelPhase::UnfundedOutboundV1(chan) => chan.is_resumable(), - ChannelPhase::UnfundedInboundV1(_) => false, - ChannelPhase::UnfundedV2(_) => false, + Channel::Funded(_) => false, + Channel::UnfundedOutboundV1(chan) => chan.is_resumable(), + Channel::UnfundedInboundV1(_) => false, + Channel::UnfundedV2(_) => false, } } @@ -1292,13 +1292,13 @@ impl<'a, SP: Deref> ChannelPhase where &mut self, chain_hash: ChainHash, logger: &L, ) -> Option where L::Target: Logger { match self { - ChannelPhase::Funded(_) => None, - ChannelPhase::UnfundedOutboundV1(chan) => { + Channel::Funded(_) => None, + Channel::UnfundedOutboundV1(chan) => { let logger = WithChannelContext::from(logger, &chan.context, None); chan.get_open_channel(chain_hash, &&logger) .map(|msg| OpenChannelMessage::V1(msg)) }, - ChannelPhase::UnfundedInboundV1(_) => { + Channel::UnfundedInboundV1(_) => { // Since unfunded inbound channel maps are cleared upon disconnecting a peer, // they are not persisted and won't be recovered after a crash. // Therefore, they shouldn't exist at this point. @@ -1306,7 +1306,7 @@ impl<'a, SP: Deref> ChannelPhase where None }, #[cfg(dual_funding)] - ChannelPhase::UnfundedV2(chan) => { + Channel::UnfundedV2(chan) => { if chan.context.is_outbound() { Some(OpenChannelMessage::V2(chan.get_open_channel_v2(chain_hash))) } else { @@ -1318,7 +1318,7 @@ impl<'a, SP: Deref> ChannelPhase where } }, #[cfg(not(dual_funding))] - ChannelPhase::UnfundedV2(_) => { + Channel::UnfundedV2(_) => { debug_assert!(false); None }, @@ -1333,15 +1333,15 @@ impl<'a, SP: Deref> ChannelPhase where L::Target: Logger, { match self { - ChannelPhase::Funded(_) => Ok(None), - ChannelPhase::UnfundedOutboundV1(chan) => { + Channel::Funded(_) => Ok(None), + Channel::UnfundedOutboundV1(chan) => { let logger = WithChannelContext::from(logger, &chan.context, None); chan.maybe_handle_error_without_close(chain_hash, fee_estimator, &&logger) .map(|msg| Some(OpenChannelMessage::V1(msg))) }, - ChannelPhase::UnfundedInboundV1(_) => Ok(None), + Channel::UnfundedInboundV1(_) => Ok(None), #[cfg(dual_funding)] - ChannelPhase::UnfundedV2(chan) => { + Channel::UnfundedV2(chan) => { if chan.context.is_outbound() { chan.maybe_handle_error_without_close(chain_hash, fee_estimator) .map(|msg| Some(OpenChannelMessage::V2(msg))) @@ -1350,7 +1350,7 @@ impl<'a, SP: Deref> ChannelPhase where } }, #[cfg(not(dual_funding))] - ChannelPhase::UnfundedV2(_) => { + Channel::UnfundedV2(_) => { debug_assert!(false); Ok(None) }, @@ -1358,43 +1358,43 @@ impl<'a, SP: Deref> ChannelPhase where } } -impl From> for ChannelPhase +impl From> for Channel where SP::Target: SignerProvider, ::EcdsaSigner: ChannelSigner, { fn from(channel: OutboundV1Channel) -> Self { - ChannelPhase::UnfundedOutboundV1(channel) + Channel::UnfundedOutboundV1(channel) } } -impl From> for ChannelPhase +impl From> for Channel where SP::Target: SignerProvider, ::EcdsaSigner: ChannelSigner, { fn from(channel: InboundV1Channel) -> Self { - ChannelPhase::UnfundedInboundV1(channel) + Channel::UnfundedInboundV1(channel) } } -impl From> for ChannelPhase +impl From> for Channel where SP::Target: SignerProvider, ::EcdsaSigner: ChannelSigner, { fn from(channel: PendingV2Channel) -> Self { - ChannelPhase::UnfundedV2(channel) + Channel::UnfundedV2(channel) } } -impl From> for ChannelPhase +impl From> for Channel where SP::Target: SignerProvider, ::EcdsaSigner: ChannelSigner, { fn from(channel: FundedChannel) -> Self { - ChannelPhase::Funded(channel) + Channel::Funded(channel) } } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index d64a1603a8d..06fe0ee33a4 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -48,7 +48,7 @@ use crate::events::{self, Event, EventHandler, EventsProvider, InboundChannelFun use crate::ln::inbound_payment; use crate::ln::types::ChannelId; use crate::types::payment::{PaymentHash, PaymentPreimage, PaymentSecret}; -use crate::ln::channel::{self, ChannelPhase, ChannelError, ChannelUpdateStatus, FundedChannel, ShutdownResult, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel, WithChannelContext}; +use crate::ln::channel::{self, Channel, ChannelError, ChannelUpdateStatus, FundedChannel, ShutdownResult, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel, WithChannelContext}; #[cfg(any(dual_funding, splicing))] use crate::ln::channel::PendingV2Channel; use crate::ln::channel_state::ChannelDetails; @@ -1289,10 +1289,10 @@ impl Readable for Option { /// State we hold per-peer. pub(super) struct PeerState where SP::Target: SignerProvider { - /// `channel_id` -> `ChannelPhase` + /// `channel_id` -> `Channel` /// - /// Holds all channels within corresponding `ChannelPhase`s where the peer is the counterparty. - pub(super) channel_by_id: HashMap>, + /// Holds all channels where the peer is the counterparty. + pub(super) channel_by_id: HashMap>, /// `temporary_channel_id` -> `InboundChannelRequest`. /// /// When manual channel acceptance is enabled, this holds all unaccepted inbound channels where @@ -3219,7 +3219,7 @@ macro_rules! handle_monitor_update_completion { let mut peer_state = peer_state_mutex.lock().unwrap(); if let Some(chan) = peer_state.channel_by_id .get_mut(&channel_id) - .and_then(ChannelPhase::as_funded_mut) + .and_then(Channel::as_funded_mut) { batch_funding_tx = batch_funding_tx.or_else(|| chan.context.unbroadcasted_funding()); chan.set_batch_ready(); @@ -3668,7 +3668,7 @@ where panic!("RNG is bad???"); } }, - hash_map::Entry::Vacant(entry) => { entry.insert(ChannelPhase::from(channel)); } + hash_map::Entry::Vacant(entry) => { entry.insert(Channel::from(channel)); } } if let Some(msg) = res { @@ -3695,7 +3695,7 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; res.extend(peer_state.channel_by_id.iter() - // Only `Channels` in the `ChannelPhase::Funded` phase can be considered funded. + // Only `Channels` in the `Channel::Funded` phase can be considered funded. .filter_map(|(chan_id, phase)| phase.as_funded().map(|chan| (chan_id, chan))) .filter(f) .map(|(_channel_id, channel)| { @@ -4259,7 +4259,7 @@ where } let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); let peer_state = &mut *peer_state_lock; - match peer_state.channel_by_id.get_mut(&channel_id).and_then(ChannelPhase::as_funded_mut) { + match peer_state.channel_by_id.get_mut(&channel_id).and_then(Channel::as_funded_mut) { None => None, Some(chan) => Some(callback(chan)), } @@ -5036,7 +5036,7 @@ where let peer_state = &mut *peer_state_lock; let funding_txo; let (mut chan, msg_opt) = match peer_state.channel_by_id.remove(&temporary_channel_id) - .map(ChannelPhase::into_unfunded_outbound_v1) + .map(Channel::into_unfunded_outbound_v1) { Some(Ok(mut chan)) => { macro_rules! close_chan { ($err: expr, $api_err: expr, $chan: expr) => { { @@ -5116,7 +5116,7 @@ where return Err(APIError::ChannelUnavailable { err }); } } - e.insert(ChannelPhase::from(chan)); + e.insert(Channel::from(chan)); } } Ok(()) @@ -5901,7 +5901,7 @@ where // The channel with the least amount of outbound liquidity will be used to maximize the // probability of being able to successfully forward a subsequent HTLC. let maybe_optimal_channel = peer_state.channel_by_id.values_mut() - .filter_map(ChannelPhase::as_funded_mut) + .filter_map(Channel::as_funded_mut) .filter_map(|chan| { let balances = chan.context.get_available_balances(&self.fee_estimator); if outgoing_amt_msat <= balances.next_outbound_htlc_limit_msat && @@ -5919,7 +5919,7 @@ where // Fall back to the specified channel to return an appropriate error. if let Some(chan) = peer_state.channel_by_id .get_mut(&forward_chan_id) - .and_then(ChannelPhase::as_funded_mut) + .and_then(Channel::as_funded_mut) { chan } else { @@ -5950,7 +5950,7 @@ where if let Some(chan) = peer_state.channel_by_id .get_mut(&forward_chan_id) - .and_then(ChannelPhase::as_funded_mut) + .and_then(Channel::as_funded_mut) { let failure_code = 0x1000|7; let data = self.get_htlc_inbound_temp_fail_data(failure_code); @@ -5971,7 +5971,7 @@ where HTLCForwardInfo::FailHTLC { htlc_id, ref err_packet } => { if let Some(chan) = peer_state.channel_by_id .get_mut(&forward_chan_id) - .and_then(ChannelPhase::as_funded_mut) + .and_then(Channel::as_funded_mut) { let logger = WithChannelContext::from(&self.logger, &chan.context, None); log_trace!(logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id); @@ -5984,7 +5984,7 @@ where HTLCForwardInfo::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => { if let Some(chan) = peer_state.channel_by_id .get_mut(&forward_chan_id) - .and_then(ChannelPhase::as_funded_mut) + .and_then(Channel::as_funded_mut) { let logger = WithChannelContext::from(&self.logger, &chan.context, None); log_trace!(logger, "Failing malformed HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id); @@ -6003,7 +6003,7 @@ where if let ChannelError::Ignore(msg) = e { if let Some(chan) = peer_state.channel_by_id .get_mut(&forward_chan_id) - .and_then(ChannelPhase::as_funded_mut) + .and_then(Channel::as_funded_mut) { let logger = WithChannelContext::from(&self.logger, &chan.context, None); log_trace!(logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg); @@ -6314,7 +6314,7 @@ where let peer_state = &mut *peer_state_lock; if let Some(chan) = peer_state.channel_by_id .get_mut(&channel_id) - .and_then(ChannelPhase::as_funded_mut) + .and_then(Channel::as_funded_mut) { handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan); } else { @@ -7581,7 +7581,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if let Some(chan) = peer_state.channel_by_id .get_mut(channel_id) - .and_then(ChannelPhase::as_funded_mut) + .and_then(Channel::as_funded_mut) { if chan.is_awaiting_monitor_update() { log_trace!(logger, "Channel is open and awaiting update, resuming it"); @@ -7689,7 +7689,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ msg, } }); - (*temporary_channel_id, ChannelPhase::from(channel), message_send_event) + (*temporary_channel_id, Channel::from(channel), message_send_event) }) }, #[cfg(dual_funding)] @@ -7713,7 +7713,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ node_id: channel.context.get_counterparty_node_id(), msg: channel.accept_inbound_dual_funded_channel() }; - (channel.context.channel_id(), ChannelPhase::from(channel), Some(message_send_event)) + (channel.context.channel_id(), Channel::from(channel), Some(message_send_event)) }) }, } @@ -7977,7 +7977,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ msg, } }); - (ChannelPhase::from(channel), message_send_event) + (Channel::from(channel), message_send_event) }, #[cfg(dual_funding)] OpenChannelMessageRef::V2(msg) => { @@ -7991,7 +7991,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ node_id: *counterparty_node_id, msg: channel.accept_inbound_dual_funded_channel(), }; - (ChannelPhase::from(channel), Some(message_send_event)) + (Channel::from(channel), Some(message_send_event)) }, }; @@ -8058,7 +8058,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let peer_state = &mut *peer_state_lock; let (mut chan, funding_msg_opt, monitor) = match peer_state.channel_by_id.remove(&msg.temporary_channel_id) - .map(ChannelPhase::into_unfunded_inbound_v1) + .map(Channel::into_unfunded_inbound_v1) { Some(Ok(inbound_chan)) => { let logger = WithChannelContext::from(&self.logger, &inbound_chan.context, None); @@ -8124,7 +8124,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }); } - if let Some(chan) = e.insert(ChannelPhase::from(chan)).as_funded_mut() { + if let Some(chan) = e.insert(Channel::from(chan)).as_funded_mut() { handle_new_monitor_update!(self, persist_state, peer_state_lock, peer_state, per_peer_state, chan, INITIAL_MONITOR); } else { @@ -8171,7 +8171,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // We really should be able to insert here without doing a second // lookup, but sadly rust stdlib doesn't currently allow keeping // the original Entry around with the value removed. - let chan = peer_state.channel_by_id.entry(msg.channel_id).or_insert(ChannelPhase::from(chan)); + let chan = peer_state.channel_by_id.entry(msg.channel_id).or_insert(Channel::from(chan)); if let Some(chan) = chan.as_funded_mut() { handle_new_monitor_update!(self, persist_status, peer_state_lock, peer_state, per_peer_state, chan, INITIAL_MONITOR); } else { unreachable!(); } @@ -8203,7 +8203,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } } - fn internal_tx_msg) -> Result>( + fn internal_tx_msg) -> Result>( &self, counterparty_node_id: &PublicKey, channel_id: ChannelId, tx_msg_handler: HandleTxMsgFn ) -> Result<(), MsgHandleErrInternal> { let per_peer_state = self.per_peer_state.read().unwrap(); @@ -8238,7 +8238,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } fn internal_tx_add_input(&self, counterparty_node_id: PublicKey, msg: &msgs::TxAddInput) -> Result<(), MsgHandleErrInternal> { - self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel_phase: &mut ChannelPhase| { + self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel_phase: &mut Channel| { match channel_phase.as_unfunded_v2_mut() { Some(channel) => { Ok(channel.tx_add_input(msg).into_msg_send_event(counterparty_node_id)) @@ -8249,7 +8249,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } fn internal_tx_add_output(&self, counterparty_node_id: PublicKey, msg: &msgs::TxAddOutput) -> Result<(), MsgHandleErrInternal> { - self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel_phase: &mut ChannelPhase| { + self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel_phase: &mut Channel| { match channel_phase.as_unfunded_v2_mut() { Some(channel) => { Ok(channel.tx_add_output(msg).into_msg_send_event(counterparty_node_id)) @@ -8260,7 +8260,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } fn internal_tx_remove_input(&self, counterparty_node_id: PublicKey, msg: &msgs::TxRemoveInput) -> Result<(), MsgHandleErrInternal> { - self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel_phase: &mut ChannelPhase| { + self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel_phase: &mut Channel| { match channel_phase.as_unfunded_v2_mut() { Some(channel) => { Ok(channel.tx_remove_input(msg).into_msg_send_event(counterparty_node_id)) @@ -8271,7 +8271,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } fn internal_tx_remove_output(&self, counterparty_node_id: PublicKey, msg: &msgs::TxRemoveOutput) -> Result<(), MsgHandleErrInternal> { - self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel_phase: &mut ChannelPhase| { + self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel_phase: &mut Channel| { match channel_phase.as_unfunded_v2_mut() { Some(channel) => { Ok(channel.tx_remove_output(msg).into_msg_send_event(counterparty_node_id)) @@ -8326,7 +8326,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ .into())) }, }.map_err(|err| MsgHandleErrInternal::send_err_msg_no_close(format!("{}", err), msg.channel_id))?; - peer_state.channel_by_id.insert(channel_id, ChannelPhase::from(channel)); + peer_state.channel_by_id.insert(channel_id, Channel::from(channel)); if let Some(funding_ready_for_sig_event) = funding_ready_for_sig_event_opt { let mut pending_events = self.pending_events.lock().unwrap(); pending_events.push_back((funding_ready_for_sig_event, None)); @@ -8417,7 +8417,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ Some(chan) => &mut chan.interactive_tx_constructor, None => if channel_phase.is_funded() { // TODO(splicing)/TODO(RBF): We'll also be doing interactive tx construction - // for a "ChannelPhase::Funded" when we want to bump the fee on an interactively + // for a "Channel::Funded" when we want to bump the fee on an interactively // constructed funding tx or during splicing. For now we send an error as we would // never ack an RBF attempt or a splice for now: try_chan_phase_entry!(self, peer_state, Err(ChannelError::Warn( @@ -8631,7 +8631,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ log_info!(WithContext::from(&self.logger, Some(*counterparty_node_id), channel_id, None), "Broadcasting {}", log_tx!(broadcast_tx)); self.tx_broadcaster.broadcast_transactions(&[&broadcast_tx]); } - if let Some(chan) = chan_option.as_ref().and_then(ChannelPhase::as_funded) { + if let Some(chan) = chan_option.as_ref().and_then(Channel::as_funded) { if let Ok(update) = self.get_channel_update_for_broadcast(chan) { let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap(); pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate { @@ -9454,7 +9454,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); // Returns whether we should remove this channel as it's just been closed. - let unblock_chan = |phase: &mut ChannelPhase, pending_msg_events: &mut Vec| -> Option { + let unblock_chan = |phase: &mut Channel, pending_msg_events: &mut Vec| -> Option { let node_id = phase.context().get_counterparty_node_id(); match (phase.signer_maybe_unblocked(self.chain_hash, &self.logger), phase.as_funded()) { (Some(msgs), Some(chan)) => { @@ -10530,7 +10530,7 @@ where for (_cp_id, peer_state_mutex) in per_peer_state.iter() { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; - for chan in peer_state.channel_by_id.values().filter_map(ChannelPhase::as_funded) { + for chan in peer_state.channel_by_id.values().filter_map(Channel::as_funded) { for (htlc_source, _) in chan.inflight_htlc_sources() { if let HTLCSource::OutboundRoute { path, .. } = htlc_source { inflight_htlcs.process_path(path, self.get_our_node_id()); @@ -10905,7 +10905,7 @@ where for (_cp_id, peer_state_mutex) in self.per_peer_state.read().unwrap().iter() { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; - for chan in peer_state.channel_by_id.values().filter_map(ChannelPhase::as_funded) { + for chan in peer_state.channel_by_id.values().filter_map(Channel::as_funded) { let txid_opt = chan.context.get_funding_txo(); let height_opt = chan.context.get_funding_tx_confirmation_height(); let hash_opt = chan.context.get_funding_tx_confirmed_in(); @@ -11690,7 +11690,7 @@ where let mut peer_state = peer_state_mutex_opt.unwrap().lock().unwrap(); if let Some(chan) = peer_state.channel_by_id .get(&msg.channel_id) - .and_then(ChannelPhase::as_funded) + .and_then(Channel::as_funded) { if let Some(msg) = chan.get_outbound_shutdown() { peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { @@ -12721,7 +12721,7 @@ where number_of_funded_channels += peer_state.channel_by_id .values() - .filter_map(ChannelPhase::as_funded) + .filter_map(Channel::as_funded) .filter(|chan| chan.context.is_funding_broadcast()) .count(); } @@ -12733,7 +12733,7 @@ where let peer_state = &mut *peer_state_lock; for channel in peer_state.channel_by_id .values() - .filter_map(ChannelPhase::as_funded) + .filter_map(Channel::as_funded) .filter(|channel| channel.context.is_funding_broadcast()) { channel.write(writer)?; @@ -13253,7 +13253,7 @@ where per_peer_state.entry(channel.context.get_counterparty_node_id()) .or_insert_with(|| Mutex::new(empty_peer_state())) .get_mut().unwrap() - .channel_by_id.insert(channel.context.channel_id(), ChannelPhase::from(channel)); + .channel_by_id.insert(channel.context.channel_id(), Channel::from(channel)); } } else if channel.is_awaiting_initial_mon_persist() { // If we were persisted and shut down while the initial ChannelMonitor persistence @@ -14252,7 +14252,7 @@ where let peer_state = &mut *peer_state_lock; if let Some(channel) = peer_state.channel_by_id .get_mut(&previous_channel_id) - .and_then(ChannelPhase::as_funded_mut) + .and_then(Channel::as_funded_mut) { let logger = WithChannelContext::from(&channel_manager.logger, &channel.context, Some(payment_hash)); channel.claim_htlc_while_disconnected_dropping_mon_update_legacy( diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 726fed48b9f..e95b3334850 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -3605,7 +3605,7 @@ macro_rules! get_channel_value_stat { ($node: expr, $counterparty_node: expr, $channel_id: expr) => {{ let peer_state_lock = $node.node.per_peer_state.read().unwrap(); let chan_lock = peer_state_lock.get(&$counterparty_node.node.get_our_node_id()).unwrap().lock().unwrap(); - let chan = chan_lock.channel_by_id.get(&$channel_id).and_then(ChannelPhase::as_funded).unwrap(); + let chan = chan_lock.channel_by_id.get(&$channel_id).and_then(Channel::as_funded).unwrap(); chan.get_value_stat() }} } diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 53642bb2430..5a7f870078a 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -21,7 +21,7 @@ use crate::sign::{ecdsa::EcdsaChannelSigner, EntropySource, OutputSpender, Signe use crate::events::{Event, FundingInfo, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, ClosureReason, HTLCDestination, PaymentFailureReason}; use crate::ln::types::ChannelId; use crate::types::payment::{PaymentPreimage, PaymentSecret, PaymentHash}; -use crate::ln::channel::{CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis, OutboundV1Channel, InboundV1Channel, COINBASE_MATURITY, ChannelPhase}; +use crate::ln::channel::{CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis, OutboundV1Channel, InboundV1Channel, COINBASE_MATURITY, Channel}; use crate::ln::channelmanager::{self, PaymentId, RAACommitmentOrder, PaymentSendFailure, RecipientOnionFields, BREAKDOWN_TIMEOUT, ENABLE_GOSSIP_TICKS, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA}; use crate::ln::channel::{DISCONNECT_PEER_AWAITING_RESPONSE_TICKS, ChannelError}; use crate::ln::{chan_utils, onion_utils}; @@ -216,7 +216,7 @@ fn do_test_counterparty_no_reserve(send_from_initiator: bool) { let channel_phase = get_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id); match channel_phase { - ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::UnfundedOutboundV1(_) => { + Channel::UnfundedInboundV1(_) | Channel::UnfundedOutboundV1(_) => { let chan_context = channel_phase.context_mut(); chan_context.holder_selected_channel_reserve_satoshis = 0; chan_context.holder_max_htlc_value_in_flight_msat = 100_000_000; @@ -734,7 +734,7 @@ fn test_update_fee_that_funder_cannot_afford() { let (local_revocation_basepoint, local_htlc_basepoint, local_funding) = { let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); - let local_chan = chan_lock.channel_by_id.get(&chan.2).and_then(ChannelPhase::as_funded).unwrap(); + let local_chan = chan_lock.channel_by_id.get(&chan.2).and_then(Channel::as_funded).unwrap(); let chan_signer = local_chan.get_signer(); let pubkeys = chan_signer.as_ref().pubkeys(); (pubkeys.revocation_basepoint, pubkeys.htlc_basepoint, @@ -743,7 +743,7 @@ fn test_update_fee_that_funder_cannot_afford() { let (remote_delayed_payment_basepoint, remote_htlc_basepoint, remote_point, remote_funding) = { let per_peer_state = nodes[1].node.per_peer_state.read().unwrap(); let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap(); - let remote_chan = chan_lock.channel_by_id.get(&chan.2).and_then(ChannelPhase::as_funded).unwrap(); + let remote_chan = chan_lock.channel_by_id.get(&chan.2).and_then(Channel::as_funded).unwrap(); let chan_signer = remote_chan.get_signer(); let pubkeys = chan_signer.as_ref().pubkeys(); (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint, @@ -758,7 +758,7 @@ fn test_update_fee_that_funder_cannot_afford() { let res = { let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); let local_chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); - let local_chan = local_chan_lock.channel_by_id.get(&chan.2).and_then(ChannelPhase::as_funded).unwrap(); + let local_chan = local_chan_lock.channel_by_id.get(&chan.2).and_then(Channel::as_funded).unwrap(); let local_chan_signer = local_chan.get_signer(); let mut htlcs: Vec<(HTLCOutputInCommitment, ())> = vec![]; let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data( @@ -1464,7 +1464,7 @@ fn test_fee_spike_violation_fails_htlc() { let (local_revocation_basepoint, local_htlc_basepoint, local_secret, next_local_point, local_funding) = { let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); - let local_chan = chan_lock.channel_by_id.get(&chan.2).and_then(ChannelPhase::as_funded).unwrap(); + let local_chan = chan_lock.channel_by_id.get(&chan.2).and_then(Channel::as_funded).unwrap(); let chan_signer = local_chan.get_signer(); // Make the signer believe we validated another commitment, so we can release the secret chan_signer.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1; @@ -1478,7 +1478,7 @@ fn test_fee_spike_violation_fails_htlc() { let (remote_delayed_payment_basepoint, remote_htlc_basepoint, remote_point, remote_funding) = { let per_peer_state = nodes[1].node.per_peer_state.read().unwrap(); let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap(); - let remote_chan = chan_lock.channel_by_id.get(&chan.2).and_then(ChannelPhase::as_funded).unwrap(); + let remote_chan = chan_lock.channel_by_id.get(&chan.2).and_then(Channel::as_funded).unwrap(); let chan_signer = remote_chan.get_signer(); let pubkeys = chan_signer.as_ref().pubkeys(); (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint, @@ -1507,7 +1507,7 @@ fn test_fee_spike_violation_fails_htlc() { let res = { let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); let local_chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); - let local_chan = local_chan_lock.channel_by_id.get(&chan.2).and_then(ChannelPhase::as_funded).unwrap(); + let local_chan = local_chan_lock.channel_by_id.get(&chan.2).and_then(Channel::as_funded).unwrap(); let local_chan_signer = local_chan.get_signer(); let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data( commitment_number, @@ -7774,7 +7774,7 @@ fn test_counterparty_raa_skip_no_crash() { { let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); let mut guard = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); - let keys = guard.channel_by_id.get(&channel_id).and_then(ChannelPhase::as_funded).unwrap() + let keys = guard.channel_by_id.get(&channel_id).and_then(Channel::as_funded).unwrap() .get_signer(); const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1; @@ -9221,11 +9221,11 @@ fn test_duplicate_chan_id() { // try to create another channel. Instead, we drop the channel entirely here (leaving the // channelmanager in a possibly nonsense state instead). match a_peer_state.channel_by_id.remove(&open_chan_2_msg.common_fields.temporary_channel_id).unwrap() { - ChannelPhase::UnfundedOutboundV1(mut chan) => { + Channel::UnfundedOutboundV1(mut chan) => { let logger = test_utils::TestLogger::new(); chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap() }, - _ => panic!("Unexpected ChannelPhase variant"), + _ => panic!("Unexpected Channel variant"), }.unwrap() }; check_added_monitors!(nodes[0], 0); @@ -9927,10 +9927,10 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e let mut node_0_per_peer_lock; let mut node_0_peer_state_lock; match get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, temporary_channel_id) { - ChannelPhase::UnfundedOutboundV1(chan) => { + Channel::UnfundedOutboundV1(chan) => { chan.context.holder_dust_limit_satoshis = 546; }, - _ => panic!("Unexpected ChannelPhase variant"), + _ => panic!("Unexpected Channel variant"), } } From 4579e63e9e37d964c74f93a819d9835fb27caee2 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Wed, 8 Jan 2025 18:53:13 -0600 Subject: [PATCH 19/19] Remove unnecessary lifetime parameter --- lightning/src/ln/channel.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 275dcf0fbce..ef6ba41b959 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1134,11 +1134,11 @@ pub(super) enum Channel where SP::Target: SignerProvider { Funded(FundedChannel), } -impl<'a, SP: Deref> Channel where +impl Channel where SP::Target: SignerProvider, ::EcdsaSigner: ChannelSigner, { - pub fn context(&'a self) -> &'a ChannelContext { + pub fn context(&self) -> &ChannelContext { match self { Channel::Funded(chan) => &chan.context, Channel::UnfundedOutboundV1(chan) => &chan.context, @@ -1147,7 +1147,7 @@ impl<'a, SP: Deref> Channel where } } - pub fn context_mut(&'a mut self) -> &'a mut ChannelContext { + pub fn context_mut(&mut self) -> &mut ChannelContext { match self { Channel::Funded(ref mut chan) => &mut chan.context, Channel::UnfundedOutboundV1(ref mut chan) => &mut chan.context,