diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 633652222d1..7a992f64f1e 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -37,6 +37,7 @@ use util::ser::{Readable, ReadableArgs, Writeable, Writer, VecWriter}; use util::logger::Logger; use util::errors::APIError; use util::config::{UserConfig,ChannelConfig}; +use util::scid_utils::scid_from_parts; use std; use std::default::Default; @@ -3556,14 +3557,11 @@ impl Channel { } } } - if height > 0xff_ff_ff || (index_in_block) > 0xff_ff_ff { - panic!("Block was bogus - either height 16 million or had > 16 million transactions"); - } - assert!(txo_idx <= 0xffff); // txo_idx is a (u16 as usize), so this is just listed here for completeness self.funding_tx_confirmations = 1; - self.short_channel_id = Some(((height as u64) << (5*8)) | - ((index_in_block as u64) << (2*8)) | - ((txo_idx as u64) << (0*8))); + self.short_channel_id = match scid_from_parts(height as u64, index_in_block as u64, txo_idx as u64) { + Ok(scid) => Some(scid), + Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"), + } } } } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index b56ce5d9d05..3109b852905 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3499,6 +3499,7 @@ impl true, &events::MessageSendEvent::SendChannelRangeQuery { .. } => false, &events::MessageSendEvent::SendShortIdsQuery { .. } => false, + &events::MessageSendEvent::SendReplyChannelRange { .. } => false, } }); } diff --git a/lightning/src/ln/msgs.rs b/lightning/src/ln/msgs.rs index 3042b4d32e0..a9c4bc3d6f5 100644 --- a/lightning/src/ln/msgs.rs +++ b/lightning/src/ln/msgs.rs @@ -1682,6 +1682,19 @@ impl Writeable for ReplyShortChannelIdsEnd { } } +impl QueryChannelRange { + /** + * Calculates the overflow safe ending block height for the query. + * Overflow returns `0xffffffff`, otherwise returns `first_blocknum + number_of_blocks` + */ + pub fn end_blocknum(&self) -> u32 { + match self.first_blocknum.checked_add(self.number_of_blocks) { + Some(block) => block, + None => u32::max_value(), + } + } +} + impl Readable for QueryChannelRange { fn read(r: &mut R) -> Result { let chain_hash: BlockHash = Readable::read(r)?; @@ -2523,6 +2536,24 @@ mod tests { assert_eq!(msg.outgoing_cltv_value, 0xffffffff); } + #[test] + fn query_channel_range_end_blocknum() { + let tests: Vec<(u32, u32, u32)> = vec![ + (10000, 1500, 11500), + (0, 0xffffffff, 0xffffffff), + (1, 0xffffffff, 0xffffffff), + ]; + + for (first_blocknum, number_of_blocks, expected) in tests.into_iter() { + let sut = msgs::QueryChannelRange { + chain_hash: BlockHash::from_hex("06226e46111a0b59caaf126043eb5bbf28c34f3a5e332a1fc7b2b73cf188910f").unwrap(), + first_blocknum, + number_of_blocks, + }; + assert_eq!(sut.end_blocknum(), expected); + } + } + #[test] fn encoding_query_channel_range() { let mut query_channel_range = msgs::QueryChannelRange { diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index d37476327ba..dacae671aaa 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -1281,6 +1281,17 @@ impl PeerManager { + log_trace!(self.logger, "Handling SendReplyChannelRange event in peer_handler for node {} with num_scids={} first_blocknum={} number_of_blocks={}, sync_complete={}", + log_pubkey!(node_id), + msg.short_channel_ids.len(), + msg.first_blocknum, + msg.number_of_blocks, + msg.sync_complete); + let (mut descriptor, peer) = get_peer_for_forwarding!(node_id, {}); + peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg))); + self.do_attempt_write_data(&mut descriptor, peer); + } } } diff --git a/lightning/src/routing/network_graph.rs b/lightning/src/routing/network_graph.rs index 13dc662642c..04e4eb14770 100644 --- a/lightning/src/routing/network_graph.rs +++ b/lightning/src/routing/network_graph.rs @@ -30,6 +30,7 @@ use ln::msgs; use util::ser::{Writeable, Readable, Writer}; use util::logger::Logger; use util::events::{MessageSendEvent, MessageSendEventsProvider}; +use util::scid_utils::{block_from_scid, scid_from_parts, MAX_SCID_BLOCK}; use std::{cmp, fmt}; use std::sync::{RwLock, RwLockReadGuard}; @@ -44,6 +45,10 @@ use bitcoin::hashes::hex::ToHex; /// refuse to relay the message. const MAX_EXCESS_BYTES_FOR_RELAY: usize = 1024; +/// Maximum number of short_channel_ids that will be encoded in one gossip reply message. +/// This value ensures a reply fits within the 65k payload limit and is consistent with other implementations. +const MAX_SCIDS_PER_REPLY: usize = 8000; + /// Represents the network as nodes and channels between them #[derive(Clone, PartialEq)] pub struct NetworkGraph { @@ -312,12 +317,93 @@ impl RoutingMessageHandler for N Ok(()) } - fn handle_query_channel_range(&self, _their_node_id: &PublicKey, _msg: QueryChannelRange) -> Result<(), LightningError> { - // TODO - Err(LightningError { - err: String::from("Not implemented"), - action: ErrorAction::IgnoreError, - }) + /// Processes a query from a peer by finding announced/public channels whose funding UTXOs + /// are in the specified block range. Due to message size limits, large range + /// queries may result in several reply messages. This implementation enqueues + /// all reply messages into pending events. Each message will allocate just under 65KiB. A full + /// sync of the public routing table with 128k channels will generated 16 messages and allocate ~1MB. + /// Logic can be changed to reduce allocation if/when a full sync of the routing table impacts + /// memory constrained systems. + fn handle_query_channel_range(&self, their_node_id: &PublicKey, msg: QueryChannelRange) -> Result<(), LightningError> { + log_debug!(self.logger, "Handling query_channel_range peer={}, first_blocknum={}, number_of_blocks={}", log_pubkey!(their_node_id), msg.first_blocknum, msg.number_of_blocks); + + let network_graph = self.network_graph.read().unwrap(); + + let inclusive_start_scid = scid_from_parts(msg.first_blocknum as u64, 0, 0); + + // We might receive valid queries with end_blocknum that would overflow SCID conversion. + // If so, we manually cap the ending block to avoid this overflow. + let exclusive_end_scid = scid_from_parts(cmp::min(msg.end_blocknum() as u64, MAX_SCID_BLOCK), 0, 0); + + // Per spec, we must reply to a query. Send an empty message when things are invalid. + if msg.chain_hash != network_graph.genesis_hash || inclusive_start_scid.is_err() || exclusive_end_scid.is_err() || msg.number_of_blocks == 0 { + let mut pending_events = self.pending_events.lock().unwrap(); + pending_events.push(MessageSendEvent::SendReplyChannelRange { + node_id: their_node_id.clone(), + msg: ReplyChannelRange { + chain_hash: msg.chain_hash.clone(), + first_blocknum: msg.first_blocknum, + number_of_blocks: msg.number_of_blocks, + sync_complete: true, + short_channel_ids: vec![], + } + }); + return Err(LightningError { + err: String::from("query_channel_range could not be processed"), + action: ErrorAction::IgnoreError, + }); + } + + // Creates channel batches. We are not checking if the channel is routable + // (has at least one update). A peer may still want to know the channel + // exists even if its not yet routable. + let mut batches: Vec> = vec![Vec::with_capacity(MAX_SCIDS_PER_REPLY)]; + for (_, ref chan) in network_graph.get_channels().range(inclusive_start_scid.unwrap()..exclusive_end_scid.unwrap()) { + if let Some(chan_announcement) = &chan.announcement_message { + // Construct a new batch if last one is full + if batches.last().unwrap().len() == batches.last().unwrap().capacity() { + batches.push(Vec::with_capacity(MAX_SCIDS_PER_REPLY)); + } + + let batch = batches.last_mut().unwrap(); + batch.push(chan_announcement.contents.short_channel_id); + } + } + drop(network_graph); + + let mut pending_events = self.pending_events.lock().unwrap(); + let batch_count = batches.len(); + for (batch_index, batch) in batches.into_iter().enumerate() { + // Per spec, the initial first_blocknum needs to be <= the query's first_blocknum and subsequent + // must be >= the prior reply. We'll simplify this by using zero since its still spec compliant and + // sequence completion is now explicitly. + let first_blocknum = 0; + + // Per spec, the final end_blocknum needs to be >= the query's end_blocknum, so we'll use the + // query's value. Prior batches must use the number of blocks that fit into the message. We'll + // base this off the last SCID in the batch since we've somewhat abusing first_blocknum. + let number_of_blocks = if batch_index == batch_count-1 { + msg.end_blocknum() + } else { + block_from_scid(batch.last().unwrap()) + 1 + }; + + // Only true for the last message in a sequence + let sync_complete = batch_index == batch_count - 1; + + pending_events.push(MessageSendEvent::SendReplyChannelRange { + node_id: their_node_id.clone(), + msg: ReplyChannelRange { + chain_hash: msg.chain_hash.clone(), + first_blocknum, + number_of_blocks, + sync_complete, + short_channel_ids: batch, + } + }); + } + + Ok(()) } fn handle_query_short_channel_ids(&self, _their_node_id: &PublicKey, _msg: QueryShortChannelIds) -> Result<(), LightningError> { @@ -1028,6 +1114,7 @@ mod tests { use util::logger::Logger; use util::ser::{Readable, Writeable}; use util::events::{MessageSendEvent, MessageSendEventsProvider}; + use util::scid_utils::scid_from_parts; use bitcoin::hashes::sha256d::Hash as Sha256dHash; use bitcoin::hashes::Hash; @@ -2089,17 +2176,298 @@ mod tests { #[test] fn handling_query_channel_range() { let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(); - let node_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); - let node_id = PublicKey::from_secret_key(&secp_ctx, node_privkey); let chain_hash = genesis_block(Network::Testnet).header.block_hash(); + let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap(); + let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); + let node_1_btckey = &SecretKey::from_slice(&[40; 32]).unwrap(); + let node_2_btckey = &SecretKey::from_slice(&[39; 32]).unwrap(); + let node_id_1 = PublicKey::from_secret_key(&secp_ctx, node_1_privkey); + let node_id_2 = PublicKey::from_secret_key(&secp_ctx, node_2_privkey); + let bitcoin_key_1 = PublicKey::from_secret_key(&secp_ctx, node_1_btckey); + let bitcoin_key_2 = PublicKey::from_secret_key(&secp_ctx, node_2_btckey); - let result = net_graph_msg_handler.handle_query_channel_range(&node_id, QueryChannelRange { - chain_hash, - first_blocknum: 0, - number_of_blocks: 0xffff_ffff, - }); - assert!(result.is_err()); + let mut scids: Vec = vec![ + scid_from_parts(0xfffffe, 0xffffff, 0xffff).unwrap(), // max + scid_from_parts(0xffffff, 0xffffff, 0xffff).unwrap(), // never + ]; + + // used for testing multipart reply across blocks + for block in 100000..=108001 { + scids.push(scid_from_parts(block, 0, 0).unwrap()); + } + + // used for testing resumption on same block + scids.push(scid_from_parts(108001, 1, 0).unwrap()); + + for scid in scids { + let unsigned_announcement = UnsignedChannelAnnouncement { + features: ChannelFeatures::known(), + chain_hash: chain_hash.clone(), + short_channel_id: scid, + node_id_1, + node_id_2, + bitcoin_key_1, + bitcoin_key_2, + excess_data: Vec::new(), + }; + + let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); + let valid_announcement = ChannelAnnouncement { + node_signature_1: secp_ctx.sign(&msghash, node_1_privkey), + node_signature_2: secp_ctx.sign(&msghash, node_2_privkey), + bitcoin_signature_1: secp_ctx.sign(&msghash, node_1_btckey), + bitcoin_signature_2: secp_ctx.sign(&msghash, node_2_btckey), + contents: unsigned_announcement.clone(), + }; + match net_graph_msg_handler.handle_channel_announcement(&valid_announcement) { + Ok(_) => (), + _ => panic!() + }; + } + + // Error when number_of_blocks=0 + do_handling_query_channel_range( + &net_graph_msg_handler, + &node_id_2, + QueryChannelRange { + chain_hash: chain_hash.clone(), + first_blocknum: 0, + number_of_blocks: 0, + }, + false, + vec![ReplyChannelRange { + chain_hash: chain_hash.clone(), + first_blocknum: 0, + number_of_blocks: 0, + sync_complete: true, + short_channel_ids: vec![] + }] + ); + + // Error when wrong chain + do_handling_query_channel_range( + &net_graph_msg_handler, + &node_id_2, + QueryChannelRange { + chain_hash: genesis_block(Network::Bitcoin).header.block_hash(), + first_blocknum: 0, + number_of_blocks: 0xffff_ffff, + }, + false, + vec![ReplyChannelRange { + chain_hash: genesis_block(Network::Bitcoin).header.block_hash(), + first_blocknum: 0, + number_of_blocks: 0xffff_ffff, + sync_complete: true, + short_channel_ids: vec![], + }] + ); + + // Error when first_blocknum > 0xffffff + do_handling_query_channel_range( + &net_graph_msg_handler, + &node_id_2, + QueryChannelRange { + chain_hash: chain_hash.clone(), + first_blocknum: 0x01000000, + number_of_blocks: 0xffff_ffff, + }, + false, + vec![ReplyChannelRange { + chain_hash: chain_hash.clone(), + first_blocknum: 0x01000000, + number_of_blocks: 0xffff_ffff, + sync_complete: true, + short_channel_ids: vec![] + }] + ); + + // Empty reply when max valid SCID block num + do_handling_query_channel_range( + &net_graph_msg_handler, + &node_id_2, + QueryChannelRange { + chain_hash: chain_hash.clone(), + first_blocknum: 0xffffff, + number_of_blocks: 1, + }, + true, + vec![ + ReplyChannelRange { + chain_hash: chain_hash.clone(), + first_blocknum: 0, + number_of_blocks: 0x01000000, + sync_complete: true, + short_channel_ids: vec![] + }, + ] + ); + + // No results in valid query range + do_handling_query_channel_range( + &net_graph_msg_handler, + &node_id_2, + QueryChannelRange { + chain_hash: chain_hash.clone(), + first_blocknum: 1000, + number_of_blocks: 1000, + }, + true, + vec![ + ReplyChannelRange { + chain_hash: chain_hash.clone(), + first_blocknum: 0, + number_of_blocks: 2000, + sync_complete: true, + short_channel_ids: vec![], + } + ] + ); + + // Overflow first_blocknum + number_of_blocks + do_handling_query_channel_range( + &net_graph_msg_handler, + &node_id_2, + QueryChannelRange { + chain_hash: chain_hash.clone(), + first_blocknum: 0xfe0000, + number_of_blocks: 0xffffffff, + }, + true, + vec![ + ReplyChannelRange { + chain_hash: chain_hash.clone(), + first_blocknum: 0, + number_of_blocks: 0xffffffff, + sync_complete: true, + short_channel_ids: vec![ + 0xfffffe_ffffff_ffff, // max + ] + } + ] + ); + + // Single block exactly full + do_handling_query_channel_range( + &net_graph_msg_handler, + &node_id_2, + QueryChannelRange { + chain_hash: chain_hash.clone(), + first_blocknum: 100000, + number_of_blocks: 8000, + }, + true, + vec![ + ReplyChannelRange { + chain_hash: chain_hash.clone(), + first_blocknum: 0, + number_of_blocks: 108000, + sync_complete: true, + short_channel_ids: (100000..=107999) + .map(|block| scid_from_parts(block, 0, 0).unwrap()) + .collect(), + }, + ] + ); + + // Multiple split on new block + do_handling_query_channel_range( + &net_graph_msg_handler, + &node_id_2, + QueryChannelRange { + chain_hash: chain_hash.clone(), + first_blocknum: 100000, + number_of_blocks: 8001, + }, + true, + vec![ + ReplyChannelRange { + chain_hash: chain_hash.clone(), + first_blocknum: 0, + number_of_blocks: 108000, + sync_complete: false, + short_channel_ids: (100000..=107999) + .map(|block| scid_from_parts(block, 0, 0).unwrap()) + .collect(), + }, + ReplyChannelRange { + chain_hash: chain_hash.clone(), + first_blocknum: 0, + number_of_blocks: 108001, + sync_complete: true, + short_channel_ids: vec![ + scid_from_parts(108000, 0, 0).unwrap(), + ], + } + ] + ); + + // Multiple split on same block + do_handling_query_channel_range( + &net_graph_msg_handler, + &node_id_2, + QueryChannelRange { + chain_hash: chain_hash.clone(), + first_blocknum: 100002, + number_of_blocks: 8000, + }, + true, + vec![ + ReplyChannelRange { + chain_hash: chain_hash.clone(), + first_blocknum: 0, + number_of_blocks: 108002, + sync_complete: false, + short_channel_ids: (100002..=108001) + .map(|block| scid_from_parts(block, 0, 0).unwrap()) + .collect(), + }, + ReplyChannelRange { + chain_hash: chain_hash.clone(), + first_blocknum: 0, + number_of_blocks: 108002, + sync_complete: true, + short_channel_ids: vec![ + scid_from_parts(108001, 1, 0).unwrap(), + ], + } + ] + ); + } + + fn do_handling_query_channel_range( + net_graph_msg_handler: &NetGraphMsgHandler, Arc>, + test_node_id: &PublicKey, + msg: QueryChannelRange, + expected_ok: bool, + expected_replies: Vec + ) { + let result = net_graph_msg_handler.handle_query_channel_range(test_node_id, msg); + + if expected_ok { + assert!(result.is_ok()); + } else { + assert!(result.is_err()); + } + + let events = net_graph_msg_handler.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), expected_replies.len()); + + for i in 0..events.len() { + let expected_reply = &expected_replies[i]; + match &events[i] { + MessageSendEvent::SendReplyChannelRange { node_id, msg } => { + assert_eq!(node_id, test_node_id); + assert_eq!(msg.chain_hash, expected_reply.chain_hash); + assert_eq!(msg.first_blocknum, expected_reply.first_blocknum); + assert_eq!(msg.number_of_blocks, expected_reply.number_of_blocks); + assert_eq!(msg.sync_complete, expected_reply.sync_complete); + assert_eq!(msg.short_channel_ids, expected_reply.short_channel_ids); + }, + _ => panic!("expected MessageSendEvent::SendReplyChannelRange"), + } + } } #[test] diff --git a/lightning/src/util/events.rs b/lightning/src/util/events.rs index 6f6f32daeab..271b6ddd13e 100644 --- a/lightning/src/util/events.rs +++ b/lightning/src/util/events.rs @@ -362,6 +362,14 @@ pub enum MessageSendEvent { /// The query_short_channel_ids which should be sent. msg: msgs::QueryShortChannelIds, }, + /// Sends a reply to a channel range query. This may be one of several SendReplyChannelRange events + /// emitted during processing of the query. + SendReplyChannelRange { + /// The node_id of this message recipient + node_id: PublicKey, + /// The reply_channel_range which should be sent. + msg: msgs::ReplyChannelRange, + } } /// A trait indicating an object may generate message send events diff --git a/lightning/src/util/mod.rs b/lightning/src/util/mod.rs index b8028ea97af..612bd9404b4 100644 --- a/lightning/src/util/mod.rs +++ b/lightning/src/util/mod.rs @@ -22,6 +22,7 @@ pub(crate) mod chacha20; pub(crate) mod poly1305; pub(crate) mod chacha20poly1305rfc; pub(crate) mod transaction_utils; +pub(crate) mod scid_utils; #[macro_use] pub(crate) mod ser_macros; diff --git a/lightning/src/util/scid_utils.rs b/lightning/src/util/scid_utils.rs new file mode 100644 index 00000000000..7902a5271a6 --- /dev/null +++ b/lightning/src/util/scid_utils.rs @@ -0,0 +1,76 @@ +// This file is Copyright its original authors, visible in version control +// history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license +// , at your option. +// You may not use this file except in accordance with one or both of these +// licenses. + +/// Maximum block height that can be used in a `short_channel_id`. This +/// value is based on the 3-bytes available for block height. +pub const MAX_SCID_BLOCK: u64 = 0x00ffffff; + +/// Maximum transaction index that can be used in a `short_channel_id`. +/// This value is based on the 3-bytes available for tx index. +pub const MAX_SCID_TX_INDEX: u64 = 0x00ffffff; + +/// Maximum vout index that can be used in a `short_channel_id`. This +/// value is based on the 2-bytes available for the vout index. +pub const MAX_SCID_VOUT_INDEX: u64 = 0xffff; + +/// A `short_channel_id` construction error +#[derive(Debug, PartialEq)] +pub enum ShortChannelIdError { + BlockOverflow, + TxIndexOverflow, + VoutIndexOverflow, +} + +/// Extracts the block height (most significant 3-bytes) from the `short_channel_id` +pub fn block_from_scid(short_channel_id: &u64) -> u32 { + return (short_channel_id >> 40) as u32; +} + +/// Constructs a `short_channel_id` using the components pieces. Results in an error +/// if the block height, tx index, or vout index overflow the maximum sizes. +pub fn scid_from_parts(block: u64, tx_index: u64, vout_index: u64) -> Result { + if block > MAX_SCID_BLOCK { + return Err(ShortChannelIdError::BlockOverflow); + } + + if tx_index > MAX_SCID_TX_INDEX { + return Err(ShortChannelIdError::TxIndexOverflow); + } + + if vout_index > MAX_SCID_VOUT_INDEX { + return Err(ShortChannelIdError::VoutIndexOverflow); + } + + Ok((block << 40) | (tx_index << 16) | vout_index) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_block_from_scid() { + assert_eq!(block_from_scid(&0x000000_000000_0000), 0); + assert_eq!(block_from_scid(&0x000001_000000_0000), 1); + assert_eq!(block_from_scid(&0x000001_ffffff_ffff), 1); + assert_eq!(block_from_scid(&0x800000_ffffff_ffff), 0x800000); + assert_eq!(block_from_scid(&0xffffff_ffffff_ffff), 0xffffff); + } + + #[test] + fn test_scid_from_parts() { + assert_eq!(scid_from_parts(0x00000000, 0x00000000, 0x0000).unwrap(), 0x000000_000000_0000); + assert_eq!(scid_from_parts(0x00000001, 0x00000002, 0x0003).unwrap(), 0x000001_000002_0003); + assert_eq!(scid_from_parts(0x00111111, 0x00222222, 0x3333).unwrap(), 0x111111_222222_3333); + assert_eq!(scid_from_parts(0x00ffffff, 0x00ffffff, 0xffff).unwrap(), 0xffffff_ffffff_ffff); + assert_eq!(scid_from_parts(0x01ffffff, 0x00000000, 0x0000).err().unwrap(), ShortChannelIdError::BlockOverflow); + assert_eq!(scid_from_parts(0x00000000, 0x01ffffff, 0x0000).err().unwrap(), ShortChannelIdError::TxIndexOverflow); + assert_eq!(scid_from_parts(0x00000000, 0x00000000, 0x010000).err().unwrap(), ShortChannelIdError::VoutIndexOverflow); + } +}