From c9069743fe8f1b75f329939362dfcb40420283f9 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Wed, 21 May 2025 22:55:05 -0400 Subject: [PATCH 1/4] Add start_batch message Instead of batching commitment_signed messages using a batch TLV, the splicing spec has been updated to introduce a start_batch messages. It used to indicate that the next batch_size messages for the channel_id should be treated as one logical message. This commit simply adds the message while the following commits will implement the handling logic. --- lightning/src/ln/msgs.rs | 17 +++++++++++++++++ lightning/src/ln/peer_handler.rs | 3 +++ lightning/src/ln/wire.rs | 10 ++++++++++ 3 files changed, 30 insertions(+) diff --git a/lightning/src/ln/msgs.rs b/lightning/src/ln/msgs.rs index 6f452836c15..54ae4e9c16f 100644 --- a/lightning/src/ln/msgs.rs +++ b/lightning/src/ln/msgs.rs @@ -686,6 +686,18 @@ pub struct ClosingSigned { pub fee_range: Option, } +/// A [`start_batch`] message to be sent to group together multiple channel messages as a single +/// logical message. +/// +/// [`start_batch`]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md#batching-channel-messages +#[derive(Clone, Debug, Hash, PartialEq, Eq)] +pub struct StartBatch { + /// The channel ID of all messages in the batch. + pub channel_id: ChannelId, + /// The number of messages to follow. + pub batch_size: u16, +} + /// An [`update_add_htlc`] message to be sent to or received from a peer. /// /// [`update_add_htlc`]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md#adding-an-htlc-update_add_htlc @@ -3097,6 +3109,11 @@ impl_writeable_msg!(PeerStorage, { data }, {}); impl_writeable_msg!(PeerStorageRetrieval, { data }, {}); +impl_writeable_msg!(StartBatch, { + channel_id, + batch_size +}, {}); + // Note that this is written as a part of ChannelManager objects, and thus cannot change its // serialization format in a way which assumes we know the total serialized length/message end // position. diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index 5c3bfd48d55..c33914d3450 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -1901,6 +1901,9 @@ impl { + debug_assert!(false); + }, wire::Message::OpenChannel(msg) => { self.message_handler.chan_handler.handle_open_channel(their_node_id, &msg); }, diff --git a/lightning/src/ln/wire.rs b/lightning/src/ln/wire.rs index 1bb7c7448a8..2dc54f852b5 100644 --- a/lightning/src/ln/wire.rs +++ b/lightning/src/ln/wire.rs @@ -82,6 +82,7 @@ pub(crate) enum Message { Shutdown(msgs::Shutdown), ClosingSigned(msgs::ClosingSigned), OnionMessage(msgs::OnionMessage), + StartBatch(msgs::StartBatch), UpdateAddHTLC(msgs::UpdateAddHTLC), UpdateFulfillHTLC(msgs::UpdateFulfillHTLC), UpdateFailHTLC(msgs::UpdateFailHTLC), @@ -142,6 +143,7 @@ impl Writeable for Message { &Message::Shutdown(ref msg) => msg.write(writer), &Message::ClosingSigned(ref msg) => msg.write(writer), &Message::OnionMessage(ref msg) => msg.write(writer), + &Message::StartBatch(ref msg) => msg.write(writer), &Message::UpdateAddHTLC(ref msg) => msg.write(writer), &Message::UpdateFulfillHTLC(ref msg) => msg.write(writer), &Message::UpdateFailHTLC(ref msg) => msg.write(writer), @@ -202,6 +204,7 @@ impl Type for Message { &Message::Shutdown(ref msg) => msg.type_id(), &Message::ClosingSigned(ref msg) => msg.type_id(), &Message::OnionMessage(ref msg) => msg.type_id(), + &Message::StartBatch(ref msg) => msg.type_id(), &Message::UpdateAddHTLC(ref msg) => msg.type_id(), &Message::UpdateFulfillHTLC(ref msg) => msg.type_id(), &Message::UpdateFailHTLC(ref msg) => msg.type_id(), @@ -350,6 +353,9 @@ where msgs::OnionMessage::TYPE => { Ok(Message::OnionMessage(LengthReadable::read_from_fixed_length_buffer(buffer)?)) }, + msgs::StartBatch::TYPE => { + Ok(Message::StartBatch(LengthReadable::read_from_fixed_length_buffer(buffer)?)) + }, msgs::UpdateAddHTLC::TYPE => { Ok(Message::UpdateAddHTLC(LengthReadable::read_from_fixed_length_buffer(buffer)?)) }, @@ -590,6 +596,10 @@ impl Encode for msgs::OnionMessage { const TYPE: u16 = 513; } +impl Encode for msgs::StartBatch { + const TYPE: u16 = 127; +} + impl Encode for msgs::UpdateAddHTLC { const TYPE: u16 = 128; } From c8672e26ab756537e7a26cab79a4e155a415110f Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Wed, 21 May 2025 23:39:22 -0400 Subject: [PATCH 2/4] Handle batched commitment_signed messages Update commitment_signed message to contain the funding_txid instead of both that and a batch_size. The spec was updated to batch messages using start_batch, which contains the batch_size. This commit also updates PeerManager to batch commitment_signed messages in this manner instead of the previous custom approach. --- lightning/src/ln/channel.rs | 17 +--- lightning/src/ln/dual_funding_tests.rs | 2 +- lightning/src/ln/htlc_reserve_unit_tests.rs | 2 +- lightning/src/ln/msgs.rs | 44 +++------ lightning/src/ln/peer_handler.rs | 103 ++++++++++++++++---- lightning/src/ln/update_fee_tests.rs | 4 +- 6 files changed, 103 insertions(+), 69 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 7f33937f2dd..1cc2b574c23 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -4978,7 +4978,7 @@ impl ChannelContext where SP::Target: SignerProvider { channel_id: self.channel_id, htlc_signatures: vec![], signature, - batch: None, + funding_txid: funding.get_funding_txo().map(|funding_txo| funding_txo.txid), #[cfg(taproot)] partial_signature_with_nonce: None, }) @@ -5948,10 +5948,6 @@ impl FundedChannel where ))); } - if msg.batch.is_some() { - return Err(ChannelError::close("Peer sent initial commitment_signed with a batch".to_owned())); - } - let holder_commitment_point = &mut self.holder_commitment_point.clone(); self.context.assert_no_commitment_advancement(holder_commitment_point.transaction_number(), "initial commitment_signed"); @@ -9349,20 +9345,11 @@ impl FundedChannel where } } - let batch = if self.pending_funding.is_empty() { None } else { - Some(msgs::CommitmentSignedBatch { - batch_size: self.pending_funding.len() as u16 + 1, - funding_txid: funding - .get_funding_txo() - .expect("splices should have their funding transactions negotiated before exiting quiescence while un-negotiated splices are discarded on reload") - .txid, - }) - }; Ok(msgs::CommitmentSigned { channel_id: self.context.channel_id, signature, htlc_signatures, - batch, + funding_txid: funding.get_funding_txo().map(|funding_txo| funding_txo.txid), #[cfg(taproot)] partial_signature_with_nonce: None, }) diff --git a/lightning/src/ln/dual_funding_tests.rs b/lightning/src/ln/dual_funding_tests.rs index 6a7ef317ba8..ed770d06e6d 100644 --- a/lightning/src/ln/dual_funding_tests.rs +++ b/lightning/src/ln/dual_funding_tests.rs @@ -185,7 +185,7 @@ fn do_test_v2_channel_establishment(session: V2ChannelEstablishmentTestSession) ) .unwrap(), htlc_signatures: vec![], - batch: None, + funding_txid: None, #[cfg(taproot)] partial_signature_with_nonce: None, }; diff --git a/lightning/src/ln/htlc_reserve_unit_tests.rs b/lightning/src/ln/htlc_reserve_unit_tests.rs index aee764682a2..111e0b404de 100644 --- a/lightning/src/ln/htlc_reserve_unit_tests.rs +++ b/lightning/src/ln/htlc_reserve_unit_tests.rs @@ -899,7 +899,7 @@ pub fn test_fee_spike_violation_fails_htlc() { channel_id: chan.2, signature: res.0, htlc_signatures: res.1, - batch: None, + funding_txid: None, #[cfg(taproot)] partial_signature_with_nonce: None, }; diff --git a/lightning/src/ln/msgs.rs b/lightning/src/ln/msgs.rs index 54ae4e9c16f..b01ca4a4d99 100644 --- a/lightning/src/ln/msgs.rs +++ b/lightning/src/ln/msgs.rs @@ -807,15 +807,6 @@ pub struct UpdateFailMalformedHTLC { pub failure_code: u16, } -/// Optional batch parameters for `commitment_signed` message. -#[derive(Clone, Debug, Hash, PartialEq, Eq)] -pub struct CommitmentSignedBatch { - /// Batch size N: all N `commitment_signed` messages must be received before being processed - pub batch_size: u16, - /// The funding transaction, to discriminate among multiple pending funding transactions (e.g. in case of splicing) - pub funding_txid: Txid, -} - /// A [`commitment_signed`] message to be sent to or received from a peer. /// /// [`commitment_signed`]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md#committing-updates-so-far-commitment_signed @@ -827,8 +818,8 @@ pub struct CommitmentSigned { pub signature: Signature, /// Signatures on the HTLC transactions pub htlc_signatures: Vec, - /// Optional batch size and other parameters - pub batch: Option, + /// The funding transaction, to discriminate among multiple pending funding transactions (e.g. in case of splicing) + pub funding_txid: Option, #[cfg(taproot)] /// The partial Taproot signature on the commitment transaction pub partial_signature_with_nonce: Option, @@ -1986,15 +1977,14 @@ pub trait ChannelMessageHandler: BaseMessageHandler { ) { assert!(!batch.is_empty()); if batch.len() == 1 { - assert!(batch[0].batch.is_none()); self.handle_commitment_signed(their_node_id, &batch[0]); } else { let channel_id = batch[0].channel_id; let batch: BTreeMap = batch .iter() .cloned() - .map(|mut cs| { - let funding_txid = cs.batch.take().unwrap().funding_txid; + .map(|cs| { + let funding_txid = cs.funding_txid.unwrap(); (funding_txid, cs) }) .collect(); @@ -2768,18 +2758,14 @@ impl_writeable!(ClosingSignedFeeRange, { max_fee_satoshis }); -impl_writeable_msg!(CommitmentSignedBatch, { - batch_size, - funding_txid, -}, {}); - #[cfg(not(taproot))] impl_writeable_msg!(CommitmentSigned, { channel_id, signature, htlc_signatures }, { - (0, batch, option), + // TOOD(splicing): Change this to 1 once the spec is finalized + (1001, funding_txid, option), }); #[cfg(taproot)] @@ -2788,8 +2774,9 @@ impl_writeable_msg!(CommitmentSigned, { signature, htlc_signatures }, { - (0, batch, option), (2, partial_signature_with_nonce, option), + // TOOD(splicing): Change this to 1 and reorder once the spec is finalized + (1001, funding_txid, option), }); impl_writeable!(DecodedOnionErrorPacket, { @@ -5649,13 +5636,10 @@ mod tests { channel_id: ChannelId::from_bytes([2; 32]), signature: sig_1, htlc_signatures: if htlcs { vec![sig_2, sig_3, sig_4] } else { Vec::new() }, - batch: Some(msgs::CommitmentSignedBatch { - batch_size: 3, - funding_txid: Txid::from_str( - "c2d4449afa8d26140898dd54d3390b057ba2a5afcf03ba29d7dc0d8b9ffe966e", - ) - .unwrap(), - }), + funding_txid: Some( + Txid::from_str("c2d4449afa8d26140898dd54d3390b057ba2a5afcf03ba29d7dc0d8b9ffe966e") + .unwrap(), + ), #[cfg(taproot)] partial_signature_with_nonce: None, }; @@ -5666,7 +5650,9 @@ mod tests { } else { target_value += "0000"; } - target_value += "002200036e96fe9f8b0ddcd729ba03cfafa5a27b050b39d354dd980814268dfa9a44d4c2"; // batch + target_value += "fd03e9"; // Type (funding_txid) + target_value += "20"; // Length (funding_txid) + target_value += "6e96fe9f8b0ddcd729ba03cfafa5a27b050b39d354dd980814268dfa9a44d4c2"; // Value assert_eq!(encoded_value.as_hex().to_string(), target_value); } diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index c33914d3450..b92b75d4327 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -620,7 +620,7 @@ struct Peer { inbound_connection: bool, - commitment_signed_batch: Option<(ChannelId, BTreeMap)>, + commitment_signed_batch: Option<(ChannelId, usize, BTreeMap)>, } impl Peer { @@ -1772,41 +1772,95 @@ impl COMMITMENT_SIGNED_BATCH_LIMIT { + let error = format!("Peer {} sent start_batch for channel {} exceeding the limit", log_pubkey!(their_node_id), &msg.channel_id); + log_debug!(logger, "{}", error); + return Err(LightningError { + err: error.clone(), + action: msgs::ErrorAction::DisconnectPeerWithWarning { + msg: msgs::WarningMessage { + channel_id: msg.channel_id, + data: error, + }, + }, + }.into()); + } + + peer_lock.commitment_signed_batch = Some((msg.channel_id, batch_size, BTreeMap::new())); + + return Ok(None); + } + + if let wire::Message::CommitmentSigned(msg) = message { + if let Some((channel_id, batch_size, buffer)) = &mut peer_lock.commitment_signed_batch { if msg.channel_id != *channel_id { - log_debug!(logger, "Peer {} sent batched commitment_signed for the wrong channel (expected: {}, actual: {})", log_pubkey!(their_node_id), channel_id, &msg.channel_id); - return Err(PeerHandleError { }.into()); + let error = format!("Peer {} sent batched commitment_signed for the wrong channel (expected: {}, actual: {})", log_pubkey!(their_node_id), channel_id, &msg.channel_id); + log_debug!(logger, "{}", error); + return Err(LightningError { + err: error.clone(), + action: msgs::ErrorAction::DisconnectPeerWithWarning { + msg: msgs::WarningMessage { + channel_id: msg.channel_id, + data: error, + }, + }, + }.into()); } - const COMMITMENT_SIGNED_BATCH_LIMIT: usize = 100; - if buffer.len() == COMMITMENT_SIGNED_BATCH_LIMIT { - log_debug!(logger, "Peer {} sent batched commitment_signed for channel {} exceeding the limit", log_pubkey!(their_node_id), channel_id); - return Err(PeerHandleError { }.into()); - } + let funding_txid = match msg.funding_txid { + Some(funding_txid) => funding_txid, + None => { + log_debug!(logger, "Peer {} sent batched commitment_signed without a funding_txid for channel {}", log_pubkey!(their_node_id), channel_id); + return Err(PeerHandleError { }.into()); + }, + }; - let batch_size = batch.batch_size as usize; - match buffer.entry(batch.funding_txid) { + match buffer.entry(funding_txid) { btree_map::Entry::Vacant(entry) => { entry.insert(msg); }, btree_map::Entry::Occupied(_) => { - log_debug!(logger, "Peer {} sent batched commitment_signed with duplicate funding_txid {} for channel {}", log_pubkey!(their_node_id), channel_id, &batch.funding_txid); + log_debug!(logger, "Peer {} sent batched commitment_signed with duplicate funding_txid {} for channel {}", log_pubkey!(their_node_id), funding_txid, channel_id); return Err(PeerHandleError { }.into()); } } - if buffer.len() >= batch_size { - let (channel_id, batch) = peer_lock.commitment_signed_batch.take().expect("batch should have been inserted"); + if buffer.len() == *batch_size { + let (channel_id, _, batch) = peer_lock.commitment_signed_batch.take().expect("batch should have been inserted"); return Ok(Some(LogicalMessage::CommitmentSignedBatch(channel_id, batch))); } else { return Ok(None); } - } else if peer_lock.commitment_signed_batch.is_some() { - log_debug!(logger, "Peer {} sent non-batched commitment_signed for channel {} when expecting batched commitment_signed", log_pubkey!(their_node_id), &msg.channel_id); - return Err(PeerHandleError { }.into()); } else { return Ok(Some(LogicalMessage::FromWire(wire::Message::CommitmentSigned(msg)))); } @@ -2407,6 +2461,13 @@ impl 1 { + let msg = msgs::StartBatch { + channel_id: *channel_id, + batch_size: commitment_signed.len() as u16, + }; + self.enqueue_message(&mut *peer, &msg); + } for msg in commitment_signed { self.enqueue_message(&mut *peer, msg); } diff --git a/lightning/src/ln/update_fee_tests.rs b/lightning/src/ln/update_fee_tests.rs index 1512444c830..27a1035e639 100644 --- a/lightning/src/ln/update_fee_tests.rs +++ b/lightning/src/ln/update_fee_tests.rs @@ -520,7 +520,7 @@ pub fn do_test_update_fee_that_funder_cannot_afford(channel_type_features: Chann channel_id: chan.2, signature: res.0, htlc_signatures: res.1, - batch: None, + funding_txid: None, #[cfg(taproot)] partial_signature_with_nonce: None, }; @@ -621,7 +621,7 @@ pub fn test_update_fee_that_saturates_subs() { channel_id: chan_id, signature: res.0, htlc_signatures: res.1, - batch: None, + funding_txid: None, #[cfg(taproot)] partial_signature_with_nonce: None, }; From d4bbd4224999dc70263a3e3f87cc5815845559ae Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Fri, 23 May 2025 12:47:00 -0400 Subject: [PATCH 3/4] Add support for batches of other types While the spec only includes commitment_signed messages in batches, there may be other types of batches in the future. Generalize the message batching code to allow for other types in the future. --- lightning/src/ln/msgs.rs | 6 ++- lightning/src/ln/peer_handler.rs | 80 ++++++++++++++++++++++++++------ 2 files changed, 71 insertions(+), 15 deletions(-) diff --git a/lightning/src/ln/msgs.rs b/lightning/src/ln/msgs.rs index b01ca4a4d99..05368d07546 100644 --- a/lightning/src/ln/msgs.rs +++ b/lightning/src/ln/msgs.rs @@ -696,6 +696,8 @@ pub struct StartBatch { pub channel_id: ChannelId, /// The number of messages to follow. pub batch_size: u16, + /// The type of all messages expected in the batch. + pub message_type: Option, } /// An [`update_add_htlc`] message to be sent to or received from a peer. @@ -3099,7 +3101,9 @@ impl_writeable_msg!(PeerStorageRetrieval, { data }, {}); impl_writeable_msg!(StartBatch, { channel_id, batch_size -}, {}); +}, { + (1, message_type, option) +}); // Note that this is written as a part of ChannelManager objects, and thus cannot change its // serialization format in a way which assumes we know the total serialized length/message end diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index b92b75d4327..5fa542fedb5 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -539,6 +539,24 @@ enum InitSyncTracker{ NodesSyncing(NodeId), } +/// A batch of messages initiated when receiving a `start_batch` message. +struct MessageBatch { + /// The channel associated with all the messages in the batch. + channel_id: ChannelId, + + /// The number of messages expected to be in the batch. + batch_size: usize, + + /// The batch of messages, which should all be of the same type. + messages: MessageBatchImpl, +} + +/// The representation of the message batch, which may different for each message type. +enum MessageBatchImpl { + /// A batch of `commitment_signed` messages, where each has a unique `funding_txid`. + CommitmentSigned(BTreeMap), +} + /// The ratio between buffer sizes at which we stop sending initial sync messages vs when we stop /// forwarding gossip messages to peers altogether. const FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO: usize = 2; @@ -620,7 +638,7 @@ struct Peer { inbound_connection: bool, - commitment_signed_batch: Option<(ChannelId, usize, BTreeMap)>, + message_batch: Option, } impl Peer { @@ -1159,7 +1177,7 @@ impl { + MessageBatchImpl::CommitmentSigned(BTreeMap::new()) + }, + _ => { + let error = format!("Peer {} sent start_batch for channel {} without a known message type", log_pubkey!(their_node_id), &msg.channel_id); + log_debug!(logger, "{}", error); + return Err(LightningError { + err: error.clone(), + action: msgs::ErrorAction::DisconnectPeerWithWarning { + msg: msgs::WarningMessage { + channel_id: msg.channel_id, + data: error, + }, + }, + }.into()); + }, + }; + + let message_batch = MessageBatch { + channel_id: msg.channel_id, + batch_size, + messages, + }; + peer_lock.message_batch = Some(message_batch); return Ok(None); } if let wire::Message::CommitmentSigned(msg) = message { - if let Some((channel_id, batch_size, buffer)) = &mut peer_lock.commitment_signed_batch { - if msg.channel_id != *channel_id { - let error = format!("Peer {} sent batched commitment_signed for the wrong channel (expected: {}, actual: {})", log_pubkey!(their_node_id), channel_id, &msg.channel_id); + if let Some(message_batch) = &mut peer_lock.message_batch { + let MessageBatchImpl::CommitmentSigned(ref mut buffer) = &mut message_batch.messages; + + if msg.channel_id != message_batch.channel_id { + let error = format!("Peer {} sent batched commitment_signed for the wrong channel (expected: {}, actual: {})", log_pubkey!(their_node_id), message_batch.channel_id, &msg.channel_id); log_debug!(logger, "{}", error); return Err(LightningError { err: error.clone(), @@ -1842,7 +1886,7 @@ impl funding_txid, None => { - log_debug!(logger, "Peer {} sent batched commitment_signed without a funding_txid for channel {}", log_pubkey!(their_node_id), channel_id); + log_debug!(logger, "Peer {} sent batched commitment_signed without a funding_txid for channel {}", log_pubkey!(their_node_id), message_batch.channel_id); return Err(PeerHandleError { }.into()); }, }; @@ -1850,13 +1894,15 @@ impl { entry.insert(msg); }, btree_map::Entry::Occupied(_) => { - log_debug!(logger, "Peer {} sent batched commitment_signed with duplicate funding_txid {} for channel {}", log_pubkey!(their_node_id), funding_txid, channel_id); + log_debug!(logger, "Peer {} sent batched commitment_signed with duplicate funding_txid {} for channel {}", log_pubkey!(their_node_id), funding_txid, message_batch.channel_id); return Err(PeerHandleError { }.into()); } } - if buffer.len() == *batch_size { - let (channel_id, _, batch) = peer_lock.commitment_signed_batch.take().expect("batch should have been inserted"); + if buffer.len() == message_batch.batch_size { + let MessageBatch { channel_id, batch_size: _, messages } = peer_lock.message_batch.take().expect("batch should have been inserted"); + let MessageBatchImpl::CommitmentSigned(batch) = messages; + return Ok(Some(LogicalMessage::CommitmentSignedBatch(channel_id, batch))); } else { return Ok(None); @@ -1864,8 +1910,13 @@ impl { + log_debug!(logger, "Peer {} sent an unexpected message for a commitment_signed batch", log_pubkey!(their_node_id)); + }, + } + return Err(PeerHandleError { }.into()); } @@ -2465,6 +2516,7 @@ impl Date: Fri, 23 May 2025 16:46:24 -0400 Subject: [PATCH 4/4] Fail channel for batched commitment_signed appropriately If a message in a commitment_signed batch does not contain a funding_txid or has duplicates, the channel should be failed. Move this check from PeerManager to FundedChannel such that this can be done. --- lightning-net-tokio/src/lib.rs | 6 ++--- lightning/src/ln/channel.rs | 23 +++++++++++++++++--- lightning/src/ln/channelmanager.rs | 6 ++--- lightning/src/ln/msgs.rs | 15 ++----------- lightning/src/ln/peer_handler.rs | 35 ++++++++---------------------- lightning/src/util/test_utils.rs | 4 +--- 6 files changed, 37 insertions(+), 52 deletions(-) diff --git a/lightning-net-tokio/src/lib.rs b/lightning-net-tokio/src/lib.rs index 95b83b105ac..888915f43e2 100644 --- a/lightning-net-tokio/src/lib.rs +++ b/lightning-net-tokio/src/lib.rs @@ -622,7 +622,7 @@ impl Hash for SocketDescriptor { mod tests { use bitcoin::constants::ChainHash; use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey}; - use bitcoin::{Network, Txid}; + use bitcoin::Network; use lightning::ln::msgs::*; use lightning::ln::peer_handler::{IgnoringMessageHandler, MessageHandler, PeerManager}; use lightning::ln::types::ChannelId; @@ -632,7 +632,6 @@ mod tests { use tokio::sync::mpsc; - use std::collections::BTreeMap; use std::mem; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::{Arc, Mutex}; @@ -726,8 +725,7 @@ mod tests { } fn handle_commitment_signed(&self, _their_node_id: PublicKey, _msg: &CommitmentSigned) {} fn handle_commitment_signed_batch( - &self, _their_node_id: PublicKey, _channel_id: ChannelId, - _batch: BTreeMap, + &self, _their_node_id: PublicKey, _channel_id: ChannelId, _batch: Vec, ) { } fn handle_revoke_and_ack(&self, _their_node_id: PublicKey, _msg: &RevokeAndACK) {} diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 1cc2b574c23..dbe890649e7 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -68,7 +68,7 @@ use crate::util::errors::APIError; use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits, MaxDustHTLCExposure}; use crate::util::scid_utils::scid_from_parts; -use alloc::collections::BTreeMap; +use alloc::collections::{btree_map, BTreeMap}; use crate::io; use crate::prelude::*; @@ -5987,18 +5987,35 @@ impl FundedChannel where self.commitment_signed_update_monitor(updates, logger) } - pub fn commitment_signed_batch(&mut self, batch: &BTreeMap, logger: &L) -> Result, ChannelError> + pub fn commitment_signed_batch(&mut self, batch: Vec, logger: &L) -> Result, ChannelError> where L::Target: Logger { self.commitment_signed_check_state()?; + let mut messages = BTreeMap::new(); + for msg in batch { + let funding_txid = match msg.funding_txid { + Some(funding_txid) => funding_txid, + None => { + return Err(ChannelError::close("Peer sent batched commitment_signed without a funding_txid".to_string())); + }, + }; + + match messages.entry(funding_txid) { + btree_map::Entry::Vacant(entry) => { entry.insert(msg); }, + btree_map::Entry::Occupied(_) => { + return Err(ChannelError::close(format!("Peer sent batched commitment_signed with duplicate funding_txid {}", funding_txid))); + } + } + } + // Any commitment_signed not associated with a FundingScope is ignored below if a // pending splice transaction has confirmed since receiving the batch. let updates = core::iter::once(&self.funding) .chain(self.pending_funding.iter()) .map(|funding| { let funding_txid = funding.get_funding_txo().unwrap().txid; - let msg = batch + let msg = messages .get(&funding_txid) .ok_or_else(|| ChannelError::close(format!("Peer did not send a commitment_signed for pending splice transaction: {}", funding_txid)))?; self.context diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index b018c6c74cd..bea4275ed01 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -9263,7 +9263,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } #[rustfmt::skip] - fn internal_commitment_signed_batch(&self, counterparty_node_id: &PublicKey, channel_id: ChannelId, batch: &BTreeMap) -> Result<(), MsgHandleErrInternal> { + fn internal_commitment_signed_batch(&self, counterparty_node_id: &PublicKey, channel_id: ChannelId, batch: Vec) -> Result<(), MsgHandleErrInternal> { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) .ok_or_else(|| { @@ -12330,9 +12330,9 @@ where } #[rustfmt::skip] - fn handle_commitment_signed_batch(&self, counterparty_node_id: PublicKey, channel_id: ChannelId, batch: BTreeMap) { + fn handle_commitment_signed_batch(&self, counterparty_node_id: PublicKey, channel_id: ChannelId, batch: Vec) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); - let _ = handle_error!(self, self.internal_commitment_signed_batch(&counterparty_node_id, channel_id, &batch), counterparty_node_id); + let _ = handle_error!(self, self.internal_commitment_signed_batch(&counterparty_node_id, channel_id, batch), counterparty_node_id); } #[rustfmt::skip] diff --git a/lightning/src/ln/msgs.rs b/lightning/src/ln/msgs.rs index 05368d07546..dcafc27d482 100644 --- a/lightning/src/ln/msgs.rs +++ b/lightning/src/ln/msgs.rs @@ -47,8 +47,6 @@ use crate::types::payment::{PaymentHash, PaymentPreimage, PaymentSecret}; #[allow(unused_imports)] use crate::prelude::*; -use alloc::collections::BTreeMap; - use crate::io::{self, Cursor, Read}; use crate::io_extras::read_to_end; use core::fmt; @@ -1967,8 +1965,7 @@ pub trait ChannelMessageHandler: BaseMessageHandler { fn handle_commitment_signed(&self, their_node_id: PublicKey, msg: &CommitmentSigned); /// Handle a batch of incoming `commitment_signed` message from the given peer. fn handle_commitment_signed_batch( - &self, their_node_id: PublicKey, channel_id: ChannelId, - batch: BTreeMap, + &self, their_node_id: PublicKey, channel_id: ChannelId, batch: Vec, ); /// Handle an incoming `revoke_and_ack` message from the given peer. fn handle_revoke_and_ack(&self, their_node_id: PublicKey, msg: &RevokeAndACK); @@ -1982,15 +1979,7 @@ pub trait ChannelMessageHandler: BaseMessageHandler { self.handle_commitment_signed(their_node_id, &batch[0]); } else { let channel_id = batch[0].channel_id; - let batch: BTreeMap = batch - .iter() - .cloned() - .map(|cs| { - let funding_txid = cs.funding_txid.unwrap(); - (funding_txid, cs) - }) - .collect(); - self.handle_commitment_signed_batch(their_node_id, channel_id, batch); + self.handle_commitment_signed_batch(their_node_id, channel_id, batch.clone()); } } diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index 5fa542fedb5..92fc73d64f9 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -17,7 +17,6 @@ //! call into the provided message handlers (probably a ChannelManager and P2PGossipSync) with //! messages they should handle, and encoding/sending response messages. -use bitcoin::Txid; use bitcoin::constants::ChainHash; use bitcoin::secp256k1::{self, Secp256k1, SecretKey, PublicKey}; @@ -44,8 +43,6 @@ use crate::util::string::PrintableString; #[allow(unused_imports)] use crate::prelude::*; -use alloc::collections::{btree_map, BTreeMap}; - use crate::io; use crate::sync::{Mutex, MutexGuard, FairRwLock}; use core::sync::atomic::{AtomicBool, AtomicU32, AtomicI32, Ordering}; @@ -337,8 +334,7 @@ impl ChannelMessageHandler for ErroringMessageHandler { ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id); } fn handle_commitment_signed_batch( - &self, their_node_id: PublicKey, channel_id: ChannelId, - _batch: BTreeMap, + &self, their_node_id: PublicKey, channel_id: ChannelId, _batch: Vec, ) { ErroringMessageHandler::push_error(self, their_node_id, channel_id); } @@ -553,8 +549,8 @@ struct MessageBatch { /// The representation of the message batch, which may different for each message type. enum MessageBatchImpl { - /// A batch of `commitment_signed` messages, where each has a unique `funding_txid`. - CommitmentSigned(BTreeMap), + /// A batch of `commitment_signed` messages used when there are pending splices. + CommitmentSigned(Vec), } /// The ratio between buffer sizes at which we stop sending initial sync messages vs when we stop @@ -891,7 +887,7 @@ pub struct PeerManager { FromWire(wire::Message), - CommitmentSignedBatch(ChannelId, BTreeMap), + CommitmentSignedBatch(ChannelId, Vec), } enum MessageHandlingError { @@ -1838,7 +1834,8 @@ impl { - MessageBatchImpl::CommitmentSigned(BTreeMap::new()) + let messages = Vec::with_capacity(batch_size); + MessageBatchImpl::CommitmentSigned(messages) }, _ => { let error = format!("Peer {} sent start_batch for channel {} without a known message type", log_pubkey!(their_node_id), &msg.channel_id); @@ -1867,7 +1864,7 @@ impl funding_txid, - None => { - log_debug!(logger, "Peer {} sent batched commitment_signed without a funding_txid for channel {}", log_pubkey!(their_node_id), message_batch.channel_id); - return Err(PeerHandleError { }.into()); - }, - }; - - match buffer.entry(funding_txid) { - btree_map::Entry::Vacant(entry) => { entry.insert(msg); }, - btree_map::Entry::Occupied(_) => { - log_debug!(logger, "Peer {} sent batched commitment_signed with duplicate funding_txid {} for channel {}", log_pubkey!(their_node_id), funding_txid, message_batch.channel_id); - return Err(PeerHandleError { }.into()); - } - } + messages.push(msg); - if buffer.len() == message_batch.batch_size { + if messages.len() == message_batch.batch_size { let MessageBatch { channel_id, batch_size: _, messages } = peer_lock.message_batch.take().expect("batch should have been inserted"); let MessageBatchImpl::CommitmentSigned(batch) = messages; diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index 00b85cc1ef8..a6f0b38a4fb 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -79,8 +79,6 @@ use bitcoin::secp256k1::{self, PublicKey, Scalar, Secp256k1, SecretKey}; use lightning_invoice::RawBolt11Invoice; -use alloc::collections::BTreeMap; - use crate::io; use crate::prelude::*; use crate::sign::{EntropySource, NodeSigner, RandomBytes, Recipient, SignerProvider}; @@ -1057,7 +1055,7 @@ impl msgs::ChannelMessageHandler for TestChannelMessageHandler { } fn handle_commitment_signed_batch( &self, _their_node_id: PublicKey, _channel_id: ChannelId, - _batch: BTreeMap, + _batch: Vec, ) { unreachable!() }