From 6d714b8cbd467b2a0efe39e5973994857149a233 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Thu, 7 Dec 2023 04:12:04 +0000 Subject: [PATCH 1/8] Track a `counter` for each node in our network graph These counters are simply a unique number describing each node. They have no specific meaning, but start at 0 and count up, with counters being reused after a node has been deleted. --- lightning/src/routing/gossip.rs | 76 +++++++++++++++++++++++++++++---- 1 file changed, 67 insertions(+), 9 deletions(-) diff --git a/lightning/src/routing/gossip.rs b/lightning/src/routing/gossip.rs index c7908d0040c..02f8a8c3290 100644 --- a/lightning/src/routing/gossip.rs +++ b/lightning/src/routing/gossip.rs @@ -42,7 +42,6 @@ use crate::sync::Mutex; use crate::sync::{LockTestExt, RwLock, RwLockReadGuard}; use core::ops::{Bound, Deref}; use core::str::FromStr; -#[cfg(feature = "std")] use core::sync::atomic::{AtomicUsize, Ordering}; use core::{cmp, fmt}; @@ -184,6 +183,8 @@ pub struct NetworkGraph where L::Target: Logger { // Lock order: channels -> nodes channels: RwLock>, nodes: RwLock>, + removed_node_counters: Mutex>, + next_node_counter: AtomicUsize, // Lock order: removed_channels -> removed_nodes // // NOTE: In the following `removed_*` maps, we use seconds since UNIX epoch to track time instead @@ -1368,7 +1369,7 @@ impl Readable for NodeAlias { } } -#[derive(Clone, Debug, PartialEq, Eq)] +#[derive(Clone, Debug, Eq)] /// Details about a node in the network, known from the network announcement. pub struct NodeInfo { /// All valid channels a node has announced @@ -1376,7 +1377,19 @@ pub struct NodeInfo { /// More information about a node from node_announcement. /// Optional because we store a Node entry after learning about it from /// a channel announcement, but before receiving a node announcement. - pub announcement_info: Option + pub announcement_info: Option, + /// In memory, each node is assigned a unique ID. They are eagerly reused, ensuring they remain + /// relatively dense. + /// + /// These IDs allow the router to avoid a `HashMap` lookup by simply using this value as an + /// index in a `Vec`, skipping a big step in some of the hottest code when routing. + pub(crate) node_counter: u32, +} + +impl PartialEq for NodeInfo { + fn eq(&self, o: &NodeInfo) -> bool { + self.channels == o.channels && self.announcement_info == o.announcement_info + } } impl NodeInfo { @@ -1446,6 +1459,7 @@ impl Readable for NodeInfo { Ok(NodeInfo { announcement_info: announcement_info_wrap.map(|w| w.0), channels, + node_counter: u32::max_value(), }) } } @@ -1455,6 +1469,8 @@ const MIN_SERIALIZATION_VERSION: u8 = 1; impl Writeable for NetworkGraph where L::Target: Logger { fn write(&self, writer: &mut W) -> Result<(), io::Error> { + self.test_node_counter_consistency(); + write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION); self.chain_hash.write(writer)?; @@ -1493,11 +1509,15 @@ impl ReadableArgs for NetworkGraph where L::Target: Logger { channels.insert(chan_id, chan_info); } let nodes_count: u64 = Readable::read(reader)?; + // There shouldn't be anywhere near `u32::MAX` nodes, and we need some headroom to insert + // new nodes during sync, so reject any graphs claiming more than `u32::MAX / 2` nodes. + if nodes_count > u32::max_value() as u64 / 2 { return Err(DecodeError::InvalidValue); } // In Nov, 2023 there were about 69K channels; we cap allocations to 1.5x that. let mut nodes = IndexedMap::with_capacity(cmp::min(nodes_count as usize, 103500)); - for _ in 0..nodes_count { + for i in 0..nodes_count { let node_id = Readable::read(reader)?; - let node_info = Readable::read(reader)?; + let mut node_info: NodeInfo = Readable::read(reader)?; + node_info.node_counter = i as u32; nodes.insert(node_id, node_info); } @@ -1512,6 +1532,8 @@ impl ReadableArgs for NetworkGraph where L::Target: Logger { logger, channels: RwLock::new(channels), nodes: RwLock::new(nodes), + removed_node_counters: Mutex::new(Vec::new()), + next_node_counter: AtomicUsize::new(nodes_count as usize), last_rapid_gossip_sync_timestamp: Mutex::new(last_rapid_gossip_sync_timestamp), removed_nodes: Mutex::new(new_hash_map()), removed_channels: Mutex::new(new_hash_map()), @@ -1557,6 +1579,8 @@ impl NetworkGraph where L::Target: Logger { logger, channels: RwLock::new(IndexedMap::new()), nodes: RwLock::new(IndexedMap::new()), + next_node_counter: AtomicUsize::new(0), + removed_node_counters: Mutex::new(Vec::new()), last_rapid_gossip_sync_timestamp: Mutex::new(None), removed_channels: Mutex::new(new_hash_map()), removed_nodes: Mutex::new(new_hash_map()), @@ -1564,8 +1588,33 @@ impl NetworkGraph where L::Target: Logger { } } + fn test_node_counter_consistency(&self) { + #[cfg(debug_assertions)] { + let nodes = self.nodes.read().unwrap(); + let removed_node_counters = self.removed_node_counters.lock().unwrap(); + let next_counter = self.next_node_counter.load(Ordering::Acquire); + assert!(next_counter < (u32::max_value() as usize) / 2); + let mut used_node_counters = vec![0u8; next_counter / 8 + 1]; + + for counter in removed_node_counters.iter() { + let pos = (*counter as usize) / 8; + let bit = 1 << (counter % 8); + assert_eq!(used_node_counters[pos] & bit, 0); + used_node_counters[pos] |= bit; + } + for (_, node) in nodes.unordered_iter() { + assert!((node.node_counter as usize) < next_counter); + let pos = (node.node_counter as usize) / 8; + let bit = 1 << (node.node_counter % 8); + assert_eq!(used_node_counters[pos] & bit, 0); + used_node_counters[pos] |= bit; + } + } + } + /// Returns a read-only view of the network graph. pub fn read_only(&'_ self) -> ReadOnlyNetworkGraph<'_> { + self.test_node_counter_consistency(); let channels = self.channels.read().unwrap(); let nodes = self.nodes.read().unwrap(); ReadOnlyNetworkGraph { @@ -1752,7 +1801,7 @@ impl NetworkGraph where L::Target: Logger { // b) we don't track UTXOs of channels we know about and remove them if they // get reorg'd out. // c) it's unclear how to do so without exposing ourselves to massive DoS risk. - Self::remove_channel_in_nodes(&mut nodes, &entry.get(), short_channel_id); + self.remove_channel_in_nodes(&mut nodes, &entry.get(), short_channel_id); *entry.get_mut() = channel_info; } else { return Err(LightningError{err: "Already have knowledge of channel".to_owned(), action: ErrorAction::IgnoreDuplicateGossip}); @@ -1769,9 +1818,13 @@ impl NetworkGraph where L::Target: Logger { node_entry.into_mut().channels.push(short_channel_id); }, IndexedMapEntry::Vacant(node_entry) => { + let mut removed_node_counters = self.removed_node_counters.lock().unwrap(); + let node_counter = removed_node_counters.pop() + .unwrap_or(self.next_node_counter.fetch_add(1, Ordering::Relaxed) as u32); node_entry.insert(NodeInfo { channels: vec!(short_channel_id), announcement_info: None, + node_counter, }); } }; @@ -1890,7 +1943,7 @@ impl NetworkGraph where L::Target: Logger { if let Some(chan) = channels.remove(&short_channel_id) { let mut nodes = self.nodes.write().unwrap(); self.removed_channels.lock().unwrap().insert(short_channel_id, current_time_unix); - Self::remove_channel_in_nodes(&mut nodes, &chan, short_channel_id); + self.remove_channel_in_nodes(&mut nodes, &chan, short_channel_id); } } @@ -1909,6 +1962,7 @@ impl NetworkGraph where L::Target: Logger { let mut removed_nodes = self.removed_nodes.lock().unwrap(); if let Some(node) = nodes.remove(&node_id) { + let mut removed_node_counters = self.removed_node_counters.lock().unwrap(); for scid in node.channels.iter() { if let Some(chan_info) = channels.remove(scid) { let other_node_id = if node_id == chan_info.node_one { chan_info.node_two } else { chan_info.node_one }; @@ -1917,12 +1971,14 @@ impl NetworkGraph where L::Target: Logger { *scid != *chan_id }); if other_node_entry.get().channels.is_empty() { + removed_node_counters.push(other_node_entry.get().node_counter); other_node_entry.remove_entry(); } } removed_channels.insert(*scid, current_time_unix); } } + removed_node_counters.push(node.node_counter); removed_nodes.insert(node_id, current_time_unix); } } @@ -1998,7 +2054,7 @@ impl NetworkGraph where L::Target: Logger { let mut nodes = self.nodes.write().unwrap(); for scid in scids_to_remove { let info = channels.remove(&scid).expect("We just accessed this scid, it should be present"); - Self::remove_channel_in_nodes(&mut nodes, &info, scid); + self.remove_channel_in_nodes(&mut nodes, &info, scid); self.removed_channels.lock().unwrap().insert(scid, Some(current_time_unix)); } } @@ -2180,7 +2236,7 @@ impl NetworkGraph where L::Target: Logger { Ok(()) } - fn remove_channel_in_nodes(nodes: &mut IndexedMap, chan: &ChannelInfo, short_channel_id: u64) { + fn remove_channel_in_nodes(&self, nodes: &mut IndexedMap, chan: &ChannelInfo, short_channel_id: u64) { macro_rules! remove_from_node { ($node_id: expr) => { if let IndexedMapEntry::Occupied(mut entry) = nodes.entry($node_id) { @@ -2188,6 +2244,7 @@ impl NetworkGraph where L::Target: Logger { short_channel_id != *chan_id }); if entry.get().channels.is_empty() { + self.removed_node_counters.lock().unwrap().push(entry.get().node_counter); entry.remove_entry(); } } else { @@ -3604,6 +3661,7 @@ pub(crate) mod tests { let valid_node_info = NodeInfo { channels: Vec::new(), announcement_info: Some(valid_node_ann_info), + node_counter: 0, }; let mut encoded_valid_node_info = Vec::new(); From a17a159c937578abbd57e3e271be4fcc47b1620d Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Thu, 7 Dec 2023 04:32:06 +0000 Subject: [PATCH 2/8] Store the source and destination `node_counter`s in `ChannelInfo` In the next commit, we'll use the new `node_counter`s to remove a `HashMap` from the router, using a `Vec` to store all our per-node information. In order to make finding entries in that `Vec` cheap, here we store the source and destintaion `node_counter`s in `ChannelInfo`, givind us the counters for both ends of a channel without doing a second `HashMap` lookup. --- lightning/src/routing/gossip.rs | 84 ++++++++++++++++++++++++++------- 1 file changed, 67 insertions(+), 17 deletions(-) diff --git a/lightning/src/routing/gossip.rs b/lightning/src/routing/gossip.rs index 02f8a8c3290..152604c5d5e 100644 --- a/lightning/src/routing/gossip.rs +++ b/lightning/src/routing/gossip.rs @@ -64,7 +64,7 @@ const MAX_EXCESS_BYTES_FOR_RELAY: usize = 1024; const MAX_SCIDS_PER_REPLY: usize = 8000; /// Represents the compressed public key of a node -#[derive(Clone, Copy)] +#[derive(Clone, Copy, PartialEq, Eq)] pub struct NodeId([u8; PUBLIC_KEY_SIZE]); impl NodeId { @@ -116,14 +116,6 @@ impl core::hash::Hash for NodeId { } } -impl Eq for NodeId {} - -impl PartialEq for NodeId { - fn eq(&self, other: &Self) -> bool { - self.0[..] == other.0[..] - } -} - impl cmp::PartialOrd for NodeId { fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) @@ -885,7 +877,7 @@ impl Readable for ChannelUpdateInfo { } } -#[derive(Clone, Debug, PartialEq, Eq)] +#[derive(Clone, Debug, Eq)] /// Details about a channel (both directions). /// Received within a channel announcement. pub struct ChannelInfo { @@ -910,6 +902,24 @@ pub struct ChannelInfo { /// (which we can probably assume we are - no-std environments probably won't have a full /// network graph in memory!). announcement_received_time: u64, + + /// The [`NodeInfo::node_counter`] of the node pointed to by [`Self::node_one`]. + pub(crate) node_one_counter: u32, + /// The [`NodeInfo::node_counter`] of the node pointed to by [`Self::node_two`]. + pub(crate) node_two_counter: u32, +} + +impl PartialEq for ChannelInfo { + fn eq(&self, o: &ChannelInfo) -> bool { + self.features == o.features && + self.node_one == o.node_one && + self.one_to_two == o.one_to_two && + self.node_two == o.node_two && + self.two_to_one == o.two_to_one && + self.capacity_sats == o.capacity_sats && + self.announcement_message == o.announcement_message && + self.announcement_received_time == o.announcement_received_time + } } impl ChannelInfo { @@ -1030,6 +1040,8 @@ impl Readable for ChannelInfo { capacity_sats: _init_tlv_based_struct_field!(capacity_sats, required), announcement_message: _init_tlv_based_struct_field!(announcement_message, required), announcement_received_time: _init_tlv_based_struct_field!(announcement_received_time, (default_value, 0)), + node_one_counter: u32::max_value(), + node_two_counter: u32::max_value(), }) } } @@ -1505,7 +1517,7 @@ impl ReadableArgs for NetworkGraph where L::Target: Logger { let mut channels = IndexedMap::with_capacity(cmp::min(channels_count as usize, 22500)); for _ in 0..channels_count { let chan_id: u64 = Readable::read(reader)?; - let chan_info = Readable::read(reader)?; + let chan_info: ChannelInfo = Readable::read(reader)?; channels.insert(chan_id, chan_info); } let nodes_count: u64 = Readable::read(reader)?; @@ -1521,6 +1533,13 @@ impl ReadableArgs for NetworkGraph where L::Target: Logger { nodes.insert(node_id, node_info); } + for (_, chan) in channels.unordered_iter_mut() { + chan.node_one_counter = + nodes.get(&chan.node_one).ok_or(DecodeError::InvalidValue)?.node_counter; + chan.node_two_counter = + nodes.get(&chan.node_two).ok_or(DecodeError::InvalidValue)?.node_counter; + } + let mut last_rapid_gossip_sync_timestamp: Option = None; read_tlv_fields!(reader, { (1, last_rapid_gossip_sync_timestamp, option), @@ -1590,6 +1609,7 @@ impl NetworkGraph where L::Target: Logger { fn test_node_counter_consistency(&self) { #[cfg(debug_assertions)] { + let channels = self.channels.read().unwrap(); let nodes = self.nodes.read().unwrap(); let removed_node_counters = self.removed_node_counters.lock().unwrap(); let next_counter = self.next_node_counter.load(Ordering::Acquire); @@ -1609,6 +1629,19 @@ impl NetworkGraph where L::Target: Logger { assert_eq!(used_node_counters[pos] & bit, 0); used_node_counters[pos] |= bit; } + + for (idx, used_bitset) in used_node_counters.iter().enumerate() { + if idx != next_counter / 8 { + assert_eq!(*used_bitset, 0xff); + } else { + assert_eq!(*used_bitset, (1u8 << (next_counter % 8)) - 1); + } + } + + for (_, chan) in channels.unordered_iter() { + assert_eq!(chan.node_one_counter, nodes.get(&chan.node_one).unwrap().node_counter); + assert_eq!(chan.node_two_counter, nodes.get(&chan.node_two).unwrap().node_counter); + } } } @@ -1773,6 +1806,8 @@ impl NetworkGraph where L::Target: Logger { capacity_sats: None, announcement_message: None, announcement_received_time: timestamp, + node_one_counter: u32::max_value(), + node_two_counter: u32::max_value(), }; self.add_channel_between_nodes(short_channel_id, channel_info, None) @@ -1787,7 +1822,7 @@ impl NetworkGraph where L::Target: Logger { log_gossip!(self.logger, "Adding channel {} between nodes {} and {}", short_channel_id, node_id_a, node_id_b); - match channels.entry(short_channel_id) { + let channel_info = match channels.entry(short_channel_id) { IndexedMapEntry::Occupied(mut entry) => { //TODO: because asking the blockchain if short_channel_id is valid is only optional //in the blockchain API, we need to handle it smartly here, though it's unclear @@ -1803,28 +1838,35 @@ impl NetworkGraph where L::Target: Logger { // c) it's unclear how to do so without exposing ourselves to massive DoS risk. self.remove_channel_in_nodes(&mut nodes, &entry.get(), short_channel_id); *entry.get_mut() = channel_info; + entry.into_mut() } else { return Err(LightningError{err: "Already have knowledge of channel".to_owned(), action: ErrorAction::IgnoreDuplicateGossip}); } }, IndexedMapEntry::Vacant(entry) => { - entry.insert(channel_info); + entry.insert(channel_info) } }; - for current_node_id in [node_id_a, node_id_b].iter() { + let mut node_counter_id = [ + (&mut channel_info.node_one_counter, node_id_a), + (&mut channel_info.node_two_counter, node_id_b) + ]; + for (node_counter, current_node_id) in node_counter_id.iter_mut() { match nodes.entry(current_node_id.clone()) { IndexedMapEntry::Occupied(node_entry) => { - node_entry.into_mut().channels.push(short_channel_id); + let node = node_entry.into_mut(); + node.channels.push(short_channel_id); + **node_counter = node.node_counter; }, IndexedMapEntry::Vacant(node_entry) => { let mut removed_node_counters = self.removed_node_counters.lock().unwrap(); - let node_counter = removed_node_counters.pop() + **node_counter = removed_node_counters.pop() .unwrap_or(self.next_node_counter.fetch_add(1, Ordering::Relaxed) as u32); node_entry.insert(NodeInfo { channels: vec!(short_channel_id), announcement_info: None, - node_counter, + node_counter: **node_counter, }); } }; @@ -1915,6 +1957,8 @@ impl NetworkGraph where L::Target: Logger { announcement_message: if msg.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY { full_msg.cloned() } else { None }, announcement_received_time, + node_one_counter: u32::max_value(), + node_two_counter: u32::max_value(), }; self.add_channel_between_nodes(msg.short_channel_id, chan_info, utxo_value)?; @@ -1976,6 +2020,8 @@ impl NetworkGraph where L::Target: Logger { } } removed_channels.insert(*scid, current_time_unix); + } else { + debug_assert!(false, "Channels in nodes must always have channel info"); } } removed_node_counters.push(node.node_counter); @@ -3595,6 +3641,8 @@ pub(crate) mod tests { capacity_sats: None, announcement_message: None, announcement_received_time: 87654, + node_one_counter: 0, + node_two_counter: 1, }; let mut encoded_chan_info: Vec = Vec::new(); @@ -3613,6 +3661,8 @@ pub(crate) mod tests { capacity_sats: None, announcement_message: None, announcement_received_time: 87654, + node_one_counter: 0, + node_two_counter: 1, }; let mut encoded_chan_info: Vec = Vec::new(); From c6cd3c817643d5648d613f45b63393e47001b530 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Tue, 19 Mar 2024 19:29:06 +0000 Subject: [PATCH 3/8] Add a new `NodeCounters` utility to track counters when routing In the next commit we'll stop using `NodeId`s to look up nodes when routing, instead using the new per-node counters. Here we take the first step, adding a local struct which tracks temporary counters for route hints/source/destination nodes. Because we must ensure we have a 1-to-1 mapping from node ids to `node_counter`s, even across first-hop and last-hop hints, we have to be careful to check the network graph first, then a new `private_node_id_to_node_counter` map to ensure we only ever end up with one counter per node id. --- lightning/src/routing/gossip.rs | 7 +++ lightning/src/routing/router.rs | 99 ++++++++++++++++++++++++++++++++- 2 files changed, 104 insertions(+), 2 deletions(-) diff --git a/lightning/src/routing/gossip.rs b/lightning/src/routing/gossip.rs index 152604c5d5e..5a5379dde91 100644 --- a/lightning/src/routing/gossip.rs +++ b/lightning/src/routing/gossip.rs @@ -204,6 +204,7 @@ pub struct NetworkGraph where L::Target: Logger { pub struct ReadOnlyNetworkGraph<'a> { channels: RwLockReadGuard<'a, IndexedMap>, nodes: RwLockReadGuard<'a, IndexedMap>, + max_node_counter: u32, } /// Update to the [`NetworkGraph`] based on payment failure information conveyed via the Onion @@ -1653,6 +1654,7 @@ impl NetworkGraph where L::Target: Logger { ReadOnlyNetworkGraph { channels, nodes, + max_node_counter: (self.next_node_counter.load(Ordering::Acquire) as u32).saturating_sub(1), } } @@ -2348,6 +2350,11 @@ impl ReadOnlyNetworkGraph<'_> { self.nodes.get(&NodeId::from_pubkey(&pubkey)) .and_then(|node| node.announcement_info.as_ref().map(|ann| ann.addresses().to_vec())) } + + /// Gets the maximum possible node_counter for a node in this graph + pub(crate) fn max_node_counter(&self) -> u32 { + self.max_node_counter + } } #[cfg(test)] diff --git a/lightning/src/routing/router.rs b/lightning/src/routing/router.rs index 5b202604e37..369131cc28d 100644 --- a/lightning/src/routing/router.rs +++ b/lightning/src/routing/router.rs @@ -1499,6 +1499,83 @@ enum CandidateHopId { Blinded(usize), } +/// To avoid doing [`PublicKey`] -> [`PathBuildingHop`] hashtable lookups, we assign each +/// [`PublicKey`]/node a `usize` index and simply keep a `Vec` of values. +/// +/// While this is easy for gossip-originating nodes (the [`DirectedChannelInfo`] exposes "counters" +/// for us for this purpose) we have to have our own indexes for nodes originating from invoice +/// hints, local channels, or blinded path fake nodes. +/// +/// This wrapper handles all this for us, allowing look-up of counters from the various contexts. +/// +/// It is first built by passing all [`NodeId`]s that we'll ever care about (which are not in our +/// [`NetworkGraph`], e.g. those from first- and last-hop hints and blinded path introduction +/// points) either though [`NodeCountersBuilder::select_node_counter_for_pubkey`] or +/// [`NodeCountersBuilder::select_node_counter_for_id`], then calling [`NodeCountersBuilder::build`] +/// and using the resulting [`NodeCounters`] to look up any counters. +/// +/// [`NodeCounters::private_node_counter_from_pubkey`], specifically, will return `Some` iff +/// [`NodeCountersBuilder::select_node_counter_for_pubkey`] was called on the same key (not +/// [`NodeCountersBuilder::select_node_counter_for_id`]). It will also return a cached copy of the +/// [`PublicKey`] -> [`NodeId`] conversion. +struct NodeCounters<'a> { + network_graph: &'a ReadOnlyNetworkGraph<'a>, + private_node_id_to_node_counter: HashMap, + private_hop_key_cache: HashMap, +} + +struct NodeCountersBuilder<'a>(NodeCounters<'a>); + +impl<'a> NodeCountersBuilder<'a> { + fn new(network_graph: &'a ReadOnlyNetworkGraph) -> Self { + Self(NodeCounters { + network_graph, + private_node_id_to_node_counter: new_hash_map(), + private_hop_key_cache: new_hash_map(), + }) + } + + fn select_node_counter_for_pubkey(&mut self, pubkey: PublicKey) -> u32 { + let id = NodeId::from_pubkey(&pubkey); + let counter = self.select_node_counter_for_id(id); + self.0.private_hop_key_cache.insert(pubkey, (id, counter)); + counter + } + + fn select_node_counter_for_id(&mut self, node_id: NodeId) -> u32 { + // For any node_id, we first have to check if its in the existing network graph, and then + // ensure that we always look up in our internal map first. + self.0.network_graph.nodes().get(&node_id) + .map(|node| node.node_counter) + .unwrap_or_else(|| { + let next_node_counter = self.0.network_graph.max_node_counter() + 1 + + self.0.private_node_id_to_node_counter.len() as u32; + *self.0.private_node_id_to_node_counter.entry(node_id).or_insert(next_node_counter) + }) + } + + fn build(self) -> NodeCounters<'a> { self.0 } +} + +impl<'a> NodeCounters<'a> { + fn max_counter(&self) -> u32 { + self.network_graph.max_node_counter() + + self.private_node_id_to_node_counter.len() as u32 + } + + fn private_node_counter_from_pubkey(&self, pubkey: &PublicKey) -> Option<&(NodeId, u32)> { + self.private_hop_key_cache.get(pubkey) + } + + fn node_counter_from_id(&self, node_id: &NodeId) -> Option<(&NodeId, u32)> { + self.private_node_id_to_node_counter.get_key_value(node_id).map(|(a, b)| (a, *b)) + .or_else(|| { + self.network_graph.nodes().get_key_value(node_id) + .map(|(node_id, node)| (node_id, node.node_counter)) + }) + } +} + #[inline] fn max_htlc_from_capacity(capacity: EffectiveCapacity, max_channel_saturation_power_of_half: u8) -> u64 { let saturation_shift: u32 = max_channel_saturation_power_of_half as u32; @@ -2051,6 +2128,17 @@ where L::Target: Logger { } } + let mut node_counter_builder = NodeCountersBuilder::new(&network_graph); + + let payer_node_counter = node_counter_builder.select_node_counter_for_pubkey(*our_node_pubkey); + let payee_node_counter = node_counter_builder.select_node_counter_for_pubkey(maybe_dummy_payee_pk); + + for route in payment_params.payee.unblinded_route_hints().iter() { + for hop in route.0.iter() { + node_counter_builder.select_node_counter_for_pubkey(hop.src_node_id); + } + } + // Step (1). // Prepare the data we'll use for payee-to-payer search by // inserting first hops suggested by the caller as targets. @@ -2065,9 +2153,14 @@ where L::Target: Logger { if chan.counterparty.node_id == *our_node_pubkey { return Err(LightningError{err: "First hop cannot have our_node_pubkey as a destination.".to_owned(), action: ErrorAction::IgnoreError}); } + let counterparty_id = NodeId::from_pubkey(&chan.counterparty.node_id); first_hop_targets - .entry(NodeId::from_pubkey(&chan.counterparty.node_id)) - .or_insert(Vec::new()) + .entry(counterparty_id) + .or_insert_with(|| { + // Make sure there's a counter assigned for the counterparty + node_counter_builder.select_node_counter_for_id(counterparty_id); + Vec::new() + }) .push(chan); } if first_hop_targets.is_empty() { @@ -2089,6 +2182,8 @@ where L::Target: Logger { } } + let node_counters = node_counter_builder.build(); + // The main heap containing all candidate next-hops sorted by their score (max(fee, // htlc_minimum)). Ideally this would be a heap which allowed cheap score reduction instead of // adding duplicate entries when we find a better path to a given node. From 04dad438d72bc75aac83cba21e9801238024bc80 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sat, 1 Jun 2024 18:19:45 +0000 Subject: [PATCH 4/8] Drop `private_hop_key_cache` in favor of `NodeCounters` With the new `NodeCounters` have have a all the `NodeId`s we'll need during routing, so there's no need to keep the `private_hop_key_cache` which existed to provide references to `NodeId`s which are needed during routing. --- lightning/src/routing/router.rs | 18 +++--------------- 1 file changed, 3 insertions(+), 15 deletions(-) diff --git a/lightning/src/routing/router.rs b/lightning/src/routing/router.rs index 369131cc28d..fdd28ccad8b 100644 --- a/lightning/src/routing/router.rs +++ b/lightning/src/routing/router.rs @@ -2168,20 +2168,6 @@ where L::Target: Logger { } } - let mut private_hop_key_cache = hash_map_with_capacity( - payment_params.payee.unblinded_route_hints().iter().map(|path| path.0.len()).sum() - ); - - // Because we store references to private hop node_ids in `dist`, below, we need them to exist - // (as `NodeId`, not `PublicKey`) for the lifetime of `dist`. Thus, we calculate all the keys - // we'll need here and simply fetch them when routing. - private_hop_key_cache.insert(maybe_dummy_payee_pk, NodeId::from_pubkey(&maybe_dummy_payee_pk)); - for route in payment_params.payee.unblinded_route_hints().iter() { - for hop in route.0.iter() { - private_hop_key_cache.insert(hop.src_node_id, NodeId::from_pubkey(&hop.src_node_id)); - } - } - let node_counters = node_counter_builder.build(); // The main heap containing all candidate next-hops sorted by their score (max(fee, @@ -2770,7 +2756,9 @@ where L::Target: Logger { let mut aggregate_path_contribution_msat = path_value_msat; for (idx, (hop, prev_hop_id)) in hop_iter.zip(prev_hop_iter).enumerate() { - let target = private_hop_key_cache.get(prev_hop_id).unwrap(); + let (target, _private_target_node_counter) = + node_counters.private_node_counter_from_pubkey(&prev_hop_id) + .expect("node_counter_from_pubkey is called on all unblinded_route_hints keys during setup, so is always Some here"); if let Some(first_channels) = first_hop_targets.get(target) { if first_channels.iter().any(|d| d.outbound_scid_alias == Some(hop.short_channel_id)) { From c34980c47fc4b9976a477e613e73b893bad179ee Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sat, 1 Jun 2024 22:45:04 +0000 Subject: [PATCH 5/8] Use `NodeCounters` `NodeId`s as the blinded path intro references The router's `introduction_node_id_cache` is used to cache the `NodeId`s of blinded path introduction points so that we don't have to look them up every time we go around the main router loop. When using it, if the introduction point isn't a public node we then look up the introduction in our first-hops map. In either case, we have to end up with a reference to a `NodeId` that outlives our `dist` map. Here we consolidate both the initial cache building and the first-hops map lookup to one place, storing only a reference to a `NodeId` either in the `NetworkGraph` or in the new `NodeCounters` to get the required lifetime without needing to reference into the first-hops map. We then take this opportunity to avoid `clone`ing the first-hops map entries as we now no longer reference into it. --- lightning/src/blinded_path/mod.rs | 2 +- lightning/src/routing/router.rs | 152 ++++++++++++++++-------------- 2 files changed, 81 insertions(+), 73 deletions(-) diff --git a/lightning/src/blinded_path/mod.rs b/lightning/src/blinded_path/mod.rs index 2d3d085bddf..17ea67c5365 100644 --- a/lightning/src/blinded_path/mod.rs +++ b/lightning/src/blinded_path/mod.rs @@ -306,7 +306,7 @@ impl_writeable!(BlindedHop, { impl Direction { /// Returns the [`NodeId`] from the inputs corresponding to the direction. - pub fn select_node_id<'a>(&self, node_a: &'a NodeId, node_b: &'a NodeId) -> &'a NodeId { + pub fn select_node_id(&self, node_a: NodeId, node_b: NodeId) -> NodeId { match self { Direction::NodeOne => core::cmp::min(node_a, node_b), Direction::NodeTwo => core::cmp::max(node_a, node_b), diff --git a/lightning/src/routing/router.rs b/lightning/src/routing/router.rs index fdd28ccad8b..0e60d3e332d 100644 --- a/lightning/src/routing/router.rs +++ b/lightning/src/routing/router.rs @@ -1990,39 +1990,6 @@ where L::Target: Logger { return Err(LightningError{err: "Cannot send a payment of 0 msat".to_owned(), action: ErrorAction::IgnoreError}); } - let introduction_node_id_cache = payment_params.payee.blinded_route_hints().iter() - .map(|(_, path)| path.public_introduction_node_id(network_graph)) - .collect::>(); - match &payment_params.payee { - Payee::Clear { route_hints, node_id, .. } => { - for route in route_hints.iter() { - for hop in &route.0 { - if hop.src_node_id == *node_id { - return Err(LightningError{err: "Route hint cannot have the payee as the source.".to_owned(), action: ErrorAction::IgnoreError}); - } - } - } - }, - Payee::Blinded { route_hints, .. } => { - if introduction_node_id_cache.iter().all(|introduction_node_id| *introduction_node_id == Some(&our_node_id)) { - return Err(LightningError{err: "Cannot generate a route to blinded paths if we are the introduction node to all of them".to_owned(), action: ErrorAction::IgnoreError}); - } - for ((_, blinded_path), introduction_node_id) in route_hints.iter().zip(introduction_node_id_cache.iter()) { - if blinded_path.blinded_hops.len() == 0 { - return Err(LightningError{err: "0-hop blinded path provided".to_owned(), action: ErrorAction::IgnoreError}); - } else if *introduction_node_id == Some(&our_node_id) { - log_info!(logger, "Got blinded path with ourselves as the introduction node, ignoring"); - } else if blinded_path.blinded_hops.len() == 1 && - route_hints - .iter().zip(introduction_node_id_cache.iter()) - .filter(|((_, p), _)| p.blinded_hops.len() == 1) - .any(|(_, p_introduction_node_id)| p_introduction_node_id != introduction_node_id) - { - return Err(LightningError{err: format!("1-hop blinded paths must all have matching introduction node ids"), action: ErrorAction::IgnoreError}); - } - } - } - } let final_cltv_expiry_delta = payment_params.payee.final_cltv_expiry_delta().unwrap_or(0); if payment_params.max_total_cltv_expiry_delta <= final_cltv_expiry_delta { return Err(LightningError{err: "Can't find a route where the maximum total CLTV expiry delta is below the final CLTV expiry.".to_owned(), action: ErrorAction::IgnoreError}); @@ -2139,10 +2106,10 @@ where L::Target: Logger { } } - // Step (1). - // Prepare the data we'll use for payee-to-payer search by - // inserting first hops suggested by the caller as targets. - // Our search will then attempt to reach them while traversing from the payee node. + // Step (1). Prepare first and last hop targets. + // + // First cache all our direct channels so that we can insert them in the heap at startup. + // Then process any blinded routes, resolving their introduction node and caching it. let mut first_hop_targets: HashMap<_, Vec<&ChannelDetails>> = hash_map_with_capacity(if first_hops.is_some() { first_hops.as_ref().unwrap().len() } else { 0 }); if let Some(hops) = first_hops { @@ -2170,6 +2137,68 @@ where L::Target: Logger { let node_counters = node_counter_builder.build(); + let introduction_node_id_cache = payment_params.payee.blinded_route_hints().iter() + .map(|(_, path)| { + match &path.introduction_node { + IntroductionNode::NodeId(pubkey) => { + // Note that this will only return `Some` if the `pubkey` is somehow known to + // us (i.e. a channel counterparty or in the network graph). + node_counters.node_counter_from_id(&NodeId::from_pubkey(&pubkey)) + }, + IntroductionNode::DirectedShortChannelId(direction, scid) => { + path.public_introduction_node_id(network_graph) + .map(|node_id_ref| *node_id_ref) + .or_else(|| { + first_hop_targets.iter().find(|(_, channels)| + channels + .iter() + .any(|details| Some(*scid) == details.get_outbound_payment_scid()) + ).map(|(cp, _)| direction.select_node_id(our_node_id, *cp)) + }) + .and_then(|node_id| node_counters.node_counter_from_id(&node_id)) + }, + } + }) + .collect::>(); + match &payment_params.payee { + Payee::Clear { route_hints, node_id, .. } => { + for route in route_hints.iter() { + for hop in &route.0 { + if hop.src_node_id == *node_id { + return Err(LightningError { + err: "Route hint cannot have the payee as the source.".to_owned(), + action: ErrorAction::IgnoreError + }); + } + } + } + }, + Payee::Blinded { route_hints, .. } => { + if introduction_node_id_cache.iter().all(|info_opt| info_opt.map(|(a, _)| a) == Some(&our_node_id)) { + return Err(LightningError{err: "Cannot generate a route to blinded paths if we are the introduction node to all of them".to_owned(), action: ErrorAction::IgnoreError}); + } + for ((_, blinded_path), info_opt) in route_hints.iter().zip(introduction_node_id_cache.iter()) { + if blinded_path.blinded_hops.len() == 0 { + return Err(LightningError{err: "0-hop blinded path provided".to_owned(), action: ErrorAction::IgnoreError}); + } + let introduction_node_id = match info_opt { + None => continue, + Some(info) => info.0, + }; + if *introduction_node_id == our_node_id { + log_info!(logger, "Got blinded path with ourselves as the introduction node, ignoring"); + } else if blinded_path.blinded_hops.len() == 1 && + route_hints + .iter().zip(introduction_node_id_cache.iter()) + .filter(|((_, p), _)| p.blinded_hops.len() == 1) + .any(|(_, iter_info_opt)| iter_info_opt.is_some() && iter_info_opt != info_opt) + { + return Err(LightningError{err: format!("1-hop blinded paths must all have matching introduction node ids"), action: ErrorAction::IgnoreError}); + } + } + } + } + // The main heap containing all candidate next-hops sorted by their score (max(fee, // htlc_minimum)). Ideally this would be a heap which allowed cheap score reduction instead of // adding duplicate entries when we find a better path to a given node. @@ -2667,35 +2696,17 @@ where L::Target: Logger { // If a caller provided us with last hops, add them to routing targets. Since this happens // earlier than general path finding, they will be somewhat prioritized, although currently // it matters only if the fees are exactly the same. + debug_assert_eq!( + payment_params.payee.blinded_route_hints().len(), + introduction_node_id_cache.len(), + "introduction_node_id_cache was built by iterating the blinded_route_hints, so they should be the same len" + ); for (hint_idx, hint) in payment_params.payee.blinded_route_hints().iter().enumerate() { // Only add the hops in this route to our candidate set if either // we have a direct channel to the first hop or the first hop is // in the regular network graph. - let source_node_id = match introduction_node_id_cache[hint_idx] { - Some(node_id) => node_id, - None => match &hint.1.introduction_node { - IntroductionNode::NodeId(pubkey) => { - let node_id = NodeId::from_pubkey(&pubkey); - match first_hop_targets.get_key_value(&node_id).map(|(key, _)| key) { - Some(node_id) => node_id, - None => continue, - } - }, - IntroductionNode::DirectedShortChannelId(direction, scid) => { - let first_hop = first_hop_targets.iter().find(|(_, channels)| - channels - .iter() - .any(|details| Some(*scid) == details.get_outbound_payment_scid()) - ); - match first_hop { - Some((counterparty_node_id, _)) => { - direction.select_node_id(&our_node_id, counterparty_node_id) - }, - None => continue, - } - }, - }, - }; + let source_node_opt = introduction_node_id_cache[hint_idx]; + let (source_node_id, _source_node_counter) = if let Some(v) = source_node_opt { v } else { continue }; if our_node_id == *source_node_id { continue } let candidate = if hint.1.blinded_hops.len() == 1 { CandidateRouteHop::OneHopBlinded( @@ -2710,10 +2721,9 @@ where L::Target: Logger { { path_contribution_msat = hop_used_msat; } else { continue } - if let Some(first_channels) = first_hop_targets.get(source_node_id) { - let mut first_channels = first_channels.clone(); + if let Some(first_channels) = first_hop_targets.get_mut(source_node_id) { sort_first_hop_channels( - &mut first_channels, &used_liquidities, recommended_value_msat, our_node_pubkey + first_channels, &used_liquidities, recommended_value_msat, our_node_pubkey ); for details in first_channels { let first_hop_candidate = CandidateRouteHop::FirstHop(FirstHopCandidate { @@ -2811,10 +2821,9 @@ where L::Target: Logger { .saturating_add(1); // Searching for a direct channel between last checked hop and first_hop_targets - if let Some(first_channels) = first_hop_targets.get(target) { - let mut first_channels = first_channels.clone(); + if let Some(first_channels) = first_hop_targets.get_mut(target) { sort_first_hop_channels( - &mut first_channels, &used_liquidities, recommended_value_msat, our_node_pubkey + first_channels, &used_liquidities, recommended_value_msat, our_node_pubkey ); for details in first_channels { let first_hop_candidate = CandidateRouteHop::FirstHop(FirstHopCandidate { @@ -2860,10 +2869,9 @@ where L::Target: Logger { // Note that we *must* check if the last hop was added as `add_entry` // always assumes that the third argument is a node to which we have a // path. - if let Some(first_channels) = first_hop_targets.get(&NodeId::from_pubkey(&hop.src_node_id)) { - let mut first_channels = first_channels.clone(); + if let Some(first_channels) = first_hop_targets.get_mut(&NodeId::from_pubkey(&hop.src_node_id)) { sort_first_hop_channels( - &mut first_channels, &used_liquidities, recommended_value_msat, our_node_pubkey + first_channels, &used_liquidities, recommended_value_msat, our_node_pubkey ); for details in first_channels { let first_hop_candidate = CandidateRouteHop::FirstHop(FirstHopCandidate { @@ -7726,7 +7734,7 @@ mod tests { }; let mut invalid_blinded_path_2 = invalid_blinded_path.clone(); - invalid_blinded_path_2.introduction_node = IntroductionNode::NodeId(ln_test_utils::pubkey(45)); + invalid_blinded_path_2.introduction_node = IntroductionNode::NodeId(nodes[3]); let payment_params = PaymentParameters::blinded(vec![ (blinded_payinfo.clone(), invalid_blinded_path.clone()), (blinded_payinfo.clone(), invalid_blinded_path_2)]); From 43d250dadcdad54836eacd8b447bb36d5c8e6cb5 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 24 Jun 2024 23:50:29 +0000 Subject: [PATCH 6/8] Drop the `dist` `HashMap` in routing, replacing it with a `Vec`. Now that we have unique, dense, 32-bit identifiers for all the nodes in our network graph, we can store the per-node information when routing in a simple `Vec` rather than a `HashMap`. This avoids the overhead of hashing and table scanning entirely, for a nice "simple" optimization win. --- lightning/src/routing/gossip.rs | 16 ++- lightning/src/routing/router.rs | 168 ++++++++++++++++++++++++------- lightning/src/util/test_utils.rs | 4 + 3 files changed, 145 insertions(+), 43 deletions(-) diff --git a/lightning/src/routing/gossip.rs b/lightning/src/routing/gossip.rs index 5a5379dde91..a18f4ca2efb 100644 --- a/lightning/src/routing/gossip.rs +++ b/lightning/src/routing/gossip.rs @@ -1102,6 +1102,14 @@ impl<'a> DirectedChannelInfo<'a> { /// Refers to the `node_id` receiving the payment from the previous hop. #[inline] pub fn target(&self) -> &'a NodeId { if self.from_node_one { &self.channel.node_two } else { &self.channel.node_one } } + + /// Returns the source node's counter + #[inline] + pub(super) fn source_counter(&self) -> u32 { if self.from_node_one { self.channel.node_one_counter } else { self.channel.node_two_counter } } + + /// Returns the target node's counter + #[inline] + pub(super) fn target_counter(&self) -> u32 { if self.from_node_one { self.channel.node_two_counter } else { self.channel.node_one_counter } } } impl<'a> fmt::Debug for DirectedChannelInfo<'a> { @@ -1854,21 +1862,21 @@ impl NetworkGraph where L::Target: Logger { (&mut channel_info.node_one_counter, node_id_a), (&mut channel_info.node_two_counter, node_id_b) ]; - for (node_counter, current_node_id) in node_counter_id.iter_mut() { + for (chan_info_node_counter, current_node_id) in node_counter_id.iter_mut() { match nodes.entry(current_node_id.clone()) { IndexedMapEntry::Occupied(node_entry) => { let node = node_entry.into_mut(); node.channels.push(short_channel_id); - **node_counter = node.node_counter; + **chan_info_node_counter = node.node_counter; }, IndexedMapEntry::Vacant(node_entry) => { let mut removed_node_counters = self.removed_node_counters.lock().unwrap(); - **node_counter = removed_node_counters.pop() + **chan_info_node_counter = removed_node_counters.pop() .unwrap_or(self.next_node_counter.fetch_add(1, Ordering::Relaxed) as u32); node_entry.insert(NodeInfo { channels: vec!(short_channel_id), announcement_info: None, - node_counter: **node_counter, + node_counter: **chan_info_node_counter, }); } }; diff --git a/lightning/src/routing/router.rs b/lightning/src/routing/router.rs index 0e60d3e332d..4c560969eae 100644 --- a/lightning/src/routing/router.rs +++ b/lightning/src/routing/router.rs @@ -1188,6 +1188,20 @@ pub struct FirstHopCandidate<'a> { /// /// This is not exported to bindings users as lifetimes are not expressible in most languages. pub payer_node_id: &'a NodeId, + /// A unique ID which describes the payer. + /// + /// It will not conflict with any [`NodeInfo::node_counter`]s, but may be equal to one if the + /// payer is a public node. + /// + /// [`NodeInfo::node_counter`]: super::gossip::NodeInfo::node_counter + pub(crate) payer_node_counter: u32, + /// A unique ID which describes the first hop counterparty. + /// + /// It will not conflict with any [`NodeInfo::node_counter`]s, but may be equal to one if the + /// counterparty is a public node. + /// + /// [`NodeInfo::node_counter`]: super::gossip::NodeInfo::node_counter + pub(crate) target_node_counter: u32, } /// A [`CandidateRouteHop::PublicHop`] entry. @@ -1213,7 +1227,21 @@ pub struct PrivateHopCandidate<'a> { /// Node id of the next hop in BOLT 11 route hint. /// /// This is not exported to bindings users as lifetimes are not expressible in most languages. - pub target_node_id: &'a NodeId + pub target_node_id: &'a NodeId, + /// A unique ID which describes the source node of the hop (further from the payment target). + /// + /// It will not conflict with any [`NodeInfo::node_counter`]s, but may be equal to one if the + /// node is a public node. + /// + /// [`NodeInfo::node_counter`]: super::gossip::NodeInfo::node_counter + pub(crate) source_node_counter: u32, + /// A unique ID which describes the destination node of the hop (towards the payment target). + /// + /// It will not conflict with any [`NodeInfo::node_counter`]s, but may be equal to one if the + /// node is a public node. + /// + /// [`NodeInfo::node_counter`]: super::gossip::NodeInfo::node_counter + pub(crate) target_node_counter: u32, } /// A [`CandidateRouteHop::Blinded`] entry. @@ -1234,6 +1262,13 @@ pub struct BlindedPathCandidate<'a> { /// This is used to cheaply uniquely identify this blinded path, even though we don't have /// a short channel ID for this hop. hint_idx: usize, + /// A unique ID which describes the introduction point of the blinded path. + /// + /// It will not conflict with any [`NodeInfo::node_counter`]s, but will generally be equal to + /// one from the public network graph (assuming the introduction point is a public node). + /// + /// [`NodeInfo::node_counter`]: super::gossip::NodeInfo::node_counter + source_node_counter: u32, } /// A [`CandidateRouteHop::OneHopBlinded`] entry. @@ -1256,6 +1291,13 @@ pub struct OneHopBlindedPathCandidate<'a> { /// This is used to cheaply uniquely identify this blinded path, even though we don't have /// a short channel ID for this hop. hint_idx: usize, + /// A unique ID which describes the introduction point of the blinded path. + /// + /// It will not conflict with any [`NodeInfo::node_counter`]s, but will generally be equal to + /// one from the public network graph (assuming the introduction point is a public node). + /// + /// [`NodeInfo::node_counter`]: super::gossip::NodeInfo::node_counter + source_node_counter: u32, } /// A wrapper around the various hop representations. @@ -1378,6 +1420,28 @@ impl<'a> CandidateRouteHop<'a> { } } + #[inline] + fn src_node_counter(&self) -> u32 { + match self { + CandidateRouteHop::FirstHop(hop) => hop.payer_node_counter, + CandidateRouteHop::PublicHop(hop) => hop.info.source_counter(), + CandidateRouteHop::PrivateHop(hop) => hop.source_node_counter, + CandidateRouteHop::Blinded(hop) => hop.source_node_counter, + CandidateRouteHop::OneHopBlinded(hop) => hop.source_node_counter, + } + } + + #[inline] + fn target_node_counter(&self) -> Option { + match self { + CandidateRouteHop::FirstHop(hop) => Some(hop.target_node_counter), + CandidateRouteHop::PublicHop(hop) => Some(hop.info.target_counter()), + CandidateRouteHop::PrivateHop(hop) => Some(hop.target_node_counter), + CandidateRouteHop::Blinded(_) => None, + CandidateRouteHop::OneHopBlinded(_) => None, + } + } + /// Returns the fees that must be paid to route an HTLC over this channel. #[inline] pub fn fees(&self) -> RoutingFees { @@ -1667,12 +1731,13 @@ impl<'a> core::fmt::Debug for PathBuildingHop<'a> { fn fmt(&self, f: &mut core::fmt::Formatter) -> Result<(), core::fmt::Error> { let mut debug_struct = f.debug_struct("PathBuildingHop"); debug_struct - .field("node_id", &self.candidate.target()) + .field("source_node_id", &self.candidate.source()) + .field("target_node_id", &self.candidate.target()) .field("short_channel_id", &self.candidate.short_channel_id()) .field("total_fee_msat", &self.total_fee_msat) .field("next_hops_fee_msat", &self.next_hops_fee_msat) .field("hop_use_fee_msat", &self.hop_use_fee_msat) - .field("total_fee_msat - (next_hops_fee_msat + hop_use_fee_msat)", &(&self.total_fee_msat - (&self.next_hops_fee_msat + &self.hop_use_fee_msat))) + .field("total_fee_msat - (next_hops_fee_msat + hop_use_fee_msat)", &(&self.total_fee_msat.saturating_sub(self.next_hops_fee_msat).saturating_sub(self.hop_use_fee_msat))) .field("path_penalty_msat", &self.path_penalty_msat) .field("path_htlc_minimum_msat", &self.path_htlc_minimum_msat) .field("cltv_expiry_delta", &self.candidate.cltv_expiry_delta()); @@ -2110,7 +2175,7 @@ where L::Target: Logger { // // First cache all our direct channels so that we can insert them in the heap at startup. // Then process any blinded routes, resolving their introduction node and caching it. - let mut first_hop_targets: HashMap<_, Vec<&ChannelDetails>> = + let mut first_hop_targets: HashMap<_, (Vec<&ChannelDetails>, u32)> = hash_map_with_capacity(if first_hops.is_some() { first_hops.as_ref().unwrap().len() } else { 0 }); if let Some(hops) = first_hops { for chan in hops { @@ -2125,10 +2190,10 @@ where L::Target: Logger { .entry(counterparty_id) .or_insert_with(|| { // Make sure there's a counter assigned for the counterparty - node_counter_builder.select_node_counter_for_id(counterparty_id); - Vec::new() + let node_counter = node_counter_builder.select_node_counter_for_id(counterparty_id); + (Vec::new(), node_counter) }) - .push(chan); + .0.push(chan); } if first_hop_targets.is_empty() { return Err(LightningError{err: "Cannot route when there are no outbound routes away from us".to_owned(), action: ErrorAction::IgnoreError}); @@ -2149,7 +2214,7 @@ where L::Target: Logger { path.public_introduction_node_id(network_graph) .map(|node_id_ref| *node_id_ref) .or_else(|| { - first_hop_targets.iter().find(|(_, channels)| + first_hop_targets.iter().find(|(_, (channels, _))| channels .iter() .any(|details| Some(*scid) == details.get_outbound_payment_scid()) @@ -2206,7 +2271,8 @@ where L::Target: Logger { // Map from node_id to information about the best current path to that node, including feerate // information. - let mut dist: HashMap = hash_map_with_capacity(network_nodes.len()); + let dist_len = node_counters.max_counter() + 1; + let mut dist: Vec> = vec![None; dist_len as usize]; // During routing, if we ignore a path due to an htlc_minimum_msat limit, we set this, // indicating that we may wish to try again with a higher value, potentially paying to meet an @@ -2253,7 +2319,7 @@ where L::Target: Logger { // when we want to stop looking for new paths. let mut already_collected_value_msat = 0; - for (_, channels) in first_hop_targets.iter_mut() { + for (_, (channels, _)) in first_hop_targets.iter_mut() { sort_first_hop_channels(channels, &used_liquidities, recommended_value_msat, our_node_pubkey); } @@ -2413,14 +2479,17 @@ where L::Target: Logger { ); let path_htlc_minimum_msat = compute_fees_saturating(curr_min, $candidate.fees()) .saturating_add(curr_min); - let hm_entry = dist.entry(src_node_id); - let old_entry = hm_entry.or_insert_with(|| { + + let dist_entry = &mut dist[$candidate.src_node_counter() as usize]; + let old_entry = if let Some(hop) = dist_entry { + hop + } else { // If there was previously no known way to access the source node // (recall it goes payee-to-payer) of short_channel_id, first add a // semi-dummy record just to compute the fees to reach the source node. // This will affect our decision on selecting short_channel_id // as a way to reach the $candidate.target() node. - PathBuildingHop { + *dist_entry = Some(PathBuildingHop { candidate: $candidate.clone(), fee_msat: 0, next_hops_fee_msat: u64::max_value(), @@ -2431,8 +2500,9 @@ where L::Target: Logger { was_processed: false, #[cfg(all(not(ldk_bench), any(test, fuzzing)))] value_contribution_msat, - } - }); + }); + dist_entry.as_mut().unwrap() + }; #[allow(unused_mut)] // We only use the mut in cfg(test) let mut should_process = !old_entry.was_processed; @@ -2591,7 +2661,7 @@ where L::Target: Logger { let fee_to_target_msat; let next_hops_path_htlc_minimum_msat; let next_hops_path_penalty_msat; - let skip_node = if let Some(elem) = dist.get_mut(&$node_id) { + let skip_node = if let Some(elem) = &mut dist[$node.node_counter as usize] { let was_processed = elem.was_processed; elem.was_processed = true; fee_to_target_msat = elem.total_fee_msat; @@ -2603,6 +2673,7 @@ where L::Target: Logger { // Because there are no channels from payee, it will not have a dist entry at this point. // If we're processing any other node, it is always be the result of a channel from it. debug_assert_eq!($node_id, maybe_dummy_payee_node_id); + fee_to_target_msat = 0; next_hops_path_htlc_minimum_msat = 0; next_hops_path_penalty_msat = 0; @@ -2610,10 +2681,12 @@ where L::Target: Logger { }; if !skip_node { - if let Some(first_channels) = first_hop_targets.get(&$node_id) { + if let Some((first_channels, peer_node_counter)) = first_hop_targets.get(&$node_id) { for details in first_channels { + debug_assert_eq!(*peer_node_counter, $node.node_counter); let candidate = CandidateRouteHop::FirstHop(FirstHopCandidate { - details, payer_node_id: &our_node_id, + details, payer_node_id: &our_node_id, payer_node_counter, + target_node_counter: $node.node_counter, }); add_entry!(&candidate, fee_to_target_msat, $next_hops_value_contribution, @@ -2662,15 +2735,19 @@ where L::Target: Logger { // For every new path, start from scratch, except for used_liquidities, which // helps to avoid reusing previously selected paths in future iterations. targets.clear(); - dist.clear(); + for e in dist.iter_mut() { + *e = None; + } hit_minimum_limit = false; // If first hop is a private channel and the only way to reach the payee, this is the only // place where it could be added. - payee_node_id_opt.map(|payee| first_hop_targets.get(&payee).map(|first_channels| { + payee_node_id_opt.map(|payee| first_hop_targets.get(&payee).map(|(first_channels, peer_node_counter)| { + debug_assert_eq!(*peer_node_counter, payee_node_counter); for details in first_channels { let candidate = CandidateRouteHop::FirstHop(FirstHopCandidate { - details, payer_node_id: &our_node_id, + details, payer_node_id: &our_node_id, payer_node_counter, + target_node_counter: payee_node_counter, }); let added = add_entry!(&candidate, 0, path_value_msat, 0, 0u64, 0, 0).is_some(); @@ -2706,14 +2783,14 @@ where L::Target: Logger { // we have a direct channel to the first hop or the first hop is // in the regular network graph. let source_node_opt = introduction_node_id_cache[hint_idx]; - let (source_node_id, _source_node_counter) = if let Some(v) = source_node_opt { v } else { continue }; + let (source_node_id, source_node_counter) = if let Some(v) = source_node_opt { v } else { continue }; if our_node_id == *source_node_id { continue } let candidate = if hint.1.blinded_hops.len() == 1 { CandidateRouteHop::OneHopBlinded( - OneHopBlindedPathCandidate { source_node_id, hint, hint_idx } + OneHopBlindedPathCandidate { source_node_counter, source_node_id, hint, hint_idx } ) } else { - CandidateRouteHop::Blinded(BlindedPathCandidate { source_node_id, hint, hint_idx }) + CandidateRouteHop::Blinded(BlindedPathCandidate { source_node_counter, source_node_id, hint, hint_idx }) }; let mut path_contribution_msat = path_value_msat; if let Some(hop_used_msat) = add_entry!(&candidate, @@ -2721,13 +2798,14 @@ where L::Target: Logger { { path_contribution_msat = hop_used_msat; } else { continue } - if let Some(first_channels) = first_hop_targets.get_mut(source_node_id) { + if let Some((first_channels, peer_node_counter)) = first_hop_targets.get_mut(source_node_id) { sort_first_hop_channels( first_channels, &used_liquidities, recommended_value_msat, our_node_pubkey ); for details in first_channels { let first_hop_candidate = CandidateRouteHop::FirstHop(FirstHopCandidate { - details, payer_node_id: &our_node_id, + details, payer_node_id: &our_node_id, payer_node_counter, + target_node_counter: *peer_node_counter, }); let blinded_path_fee = match compute_fees(path_contribution_msat, candidate.fees()) { Some(fee) => fee, @@ -2766,11 +2844,14 @@ where L::Target: Logger { let mut aggregate_path_contribution_msat = path_value_msat; for (idx, (hop, prev_hop_id)) in hop_iter.zip(prev_hop_iter).enumerate() { - let (target, _private_target_node_counter) = + let (target, private_target_node_counter) = node_counters.private_node_counter_from_pubkey(&prev_hop_id) + .expect("node_counter_from_pubkey is called on all unblinded_route_hints keys during setup, so is always Some here"); + let (_src_id, private_source_node_counter) = + node_counters.private_node_counter_from_pubkey(&hop.src_node_id) .expect("node_counter_from_pubkey is called on all unblinded_route_hints keys during setup, so is always Some here"); - if let Some(first_channels) = first_hop_targets.get(target) { + if let Some((first_channels, _)) = first_hop_targets.get(target) { if first_channels.iter().any(|d| d.outbound_scid_alias == Some(hop.short_channel_id)) { log_trace!(logger, "Ignoring route hint with SCID {} (and any previous) due to it being a direct channel of ours.", hop.short_channel_id); @@ -2785,7 +2866,11 @@ where L::Target: Logger { info, short_channel_id: hop.short_channel_id, })) - .unwrap_or_else(|| CandidateRouteHop::PrivateHop(PrivateHopCandidate { hint: hop, target_node_id: target })); + .unwrap_or_else(|| CandidateRouteHop::PrivateHop(PrivateHopCandidate { + hint: hop, target_node_id: target, + source_node_counter: *private_source_node_counter, + target_node_counter: *private_target_node_counter, + })); if let Some(hop_used_msat) = add_entry!(&candidate, aggregate_next_hops_fee_msat, aggregate_path_contribution_msat, @@ -2821,13 +2906,14 @@ where L::Target: Logger { .saturating_add(1); // Searching for a direct channel between last checked hop and first_hop_targets - if let Some(first_channels) = first_hop_targets.get_mut(target) { + if let Some((first_channels, peer_node_counter)) = first_hop_targets.get_mut(target) { sort_first_hop_channels( first_channels, &used_liquidities, recommended_value_msat, our_node_pubkey ); for details in first_channels { let first_hop_candidate = CandidateRouteHop::FirstHop(FirstHopCandidate { - details, payer_node_id: &our_node_id, + details, payer_node_id: &our_node_id, payer_node_counter, + target_node_counter: *peer_node_counter, }); add_entry!(&first_hop_candidate, aggregate_next_hops_fee_msat, aggregate_path_contribution_msat, @@ -2869,13 +2955,14 @@ where L::Target: Logger { // Note that we *must* check if the last hop was added as `add_entry` // always assumes that the third argument is a node to which we have a // path. - if let Some(first_channels) = first_hop_targets.get_mut(&NodeId::from_pubkey(&hop.src_node_id)) { + if let Some((first_channels, peer_node_counter)) = first_hop_targets.get_mut(&NodeId::from_pubkey(&hop.src_node_id)) { sort_first_hop_channels( first_channels, &used_liquidities, recommended_value_msat, our_node_pubkey ); for details in first_channels { let first_hop_candidate = CandidateRouteHop::FirstHop(FirstHopCandidate { - details, payer_node_id: &our_node_id, + details, payer_node_id: &our_node_id, payer_node_counter, + target_node_counter: *peer_node_counter, }); add_entry!(&first_hop_candidate, aggregate_next_hops_fee_msat, @@ -2911,16 +2998,18 @@ where L::Target: Logger { // Since we're going payee-to-payer, hitting our node as a target means we should stop // traversing the graph and arrange the path out of what we found. if node_id == our_node_id { - let mut new_entry = dist.remove(&our_node_id).unwrap(); + let mut new_entry = dist[payer_node_counter as usize].take().unwrap(); let mut ordered_hops: Vec<(PathBuildingHop, NodeFeatures)> = vec!((new_entry.clone(), default_node_features.clone())); 'path_walk: loop { let mut features_set = false; - let target = ordered_hops.last().unwrap().0.candidate.target().unwrap_or(maybe_dummy_payee_node_id); - if let Some(first_channels) = first_hop_targets.get(&target) { + let candidate = &ordered_hops.last().unwrap().0.candidate; + let target = candidate.target().unwrap_or(maybe_dummy_payee_node_id); + let target_node_counter = candidate.target_node_counter(); + if let Some((first_channels, _)) = first_hop_targets.get(&target) { for details in first_channels { if let CandidateRouteHop::FirstHop(FirstHopCandidate { details: last_hop_details, .. }) - = ordered_hops.last().unwrap().0.candidate + = candidate { if details.get_outbound_payment_scid() == last_hop_details.get_outbound_payment_scid() { ordered_hops.last_mut().unwrap().1 = details.counterparty.features.to_context(); @@ -2948,11 +3037,12 @@ where L::Target: Logger { // save this path for the payment route. Also, update the liquidity // remaining on the used hops, so that we take them into account // while looking for more paths. - if target == maybe_dummy_payee_node_id { + if target_node_counter.is_none() { break 'path_walk; } + if target_node_counter == Some(payee_node_counter) { break 'path_walk; } - new_entry = match dist.remove(&target) { + new_entry = match dist[target_node_counter.unwrap() as usize].take() { Some(payment_hop) => payment_hop, // We can't arrive at None because, if we ever add an entry to targets, // we also fill in the entry in dist (see add_entry!). diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index f6616a8e5d2..d9da01fee9f 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -177,6 +177,8 @@ impl<'a> Router for TestRouter<'a> { let candidate = CandidateRouteHop::FirstHop(FirstHopCandidate { details: first_hops[idx], payer_node_id: &node_id, + payer_node_counter: u32::max_value(), + target_node_counter: u32::max_value(), }); scorer.channel_penalty_msat(&candidate, usage, &Default::default()); continue; @@ -204,6 +206,8 @@ impl<'a> Router for TestRouter<'a> { let candidate = CandidateRouteHop::PrivateHop(PrivateHopCandidate { hint: &route_hint, target_node_id: &target_node_id, + source_node_counter: u32::max_value(), + target_node_counter: u32::max_value(), }); scorer.channel_penalty_msat(&candidate, usage, &Default::default()); } From 5fb66377eff721d1a26be504b91f46afc689631d Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Thu, 7 Dec 2023 23:40:26 +0000 Subject: [PATCH 7/8] Align `PathBuildingHop` to 128b, now that we store them in a `Vec` Now that `PathBuildingHop` is stored in a `Vec` (as `Option`s), rather than `HashMap` entries, they can grow to fill a full two cache lines without a memory access performance cost. In the next commit we'll take advantage of this somewhat, but here we update the assertions and drop the `repr(C)`, allowing rust to lay the memory out as it wishes. --- lightning/src/routing/router.rs | 25 +++---------------------- 1 file changed, 3 insertions(+), 22 deletions(-) diff --git a/lightning/src/routing/router.rs b/lightning/src/routing/router.rs index 4c560969eae..8f299d9ac0c 100644 --- a/lightning/src/routing/router.rs +++ b/lightning/src/routing/router.rs @@ -1164,12 +1164,7 @@ impl cmp::PartialOrd for RouteGraphNode { // While RouteGraphNode can be laid out with fewer bytes, performance appears to be improved // substantially when it is laid out at exactly 64 bytes. -// -// Thus, we use `#[repr(C)]` on the struct to force a suboptimal layout and check that it stays 64 -// bytes here. -#[cfg(any(ldk_bench, not(any(test, fuzzing))))] const _GRAPH_NODE_SMALL: usize = 64 - core::mem::size_of::(); -#[cfg(any(ldk_bench, not(any(test, fuzzing))))] const _GRAPH_NODE_FIXED_SIZE: usize = core::mem::size_of::() - 64; /// A [`CandidateRouteHop::FirstHop`] entry. @@ -1673,7 +1668,7 @@ fn iter_equal(mut iter_a: I1, mut iter_b: I2) /// Fee values should be updated only in the context of the whole path, see update_value_and_recompute_fees. /// These fee values are useful to choose hops as we traverse the graph "payee-to-payer". #[derive(Clone)] -#[repr(C)] // Force fields to appear in the order we define them. +#[repr(align(128))] struct PathBuildingHop<'a> { candidate: CandidateRouteHop<'a>, /// If we've already processed a node as the best node, we shouldn't process it again. Normally @@ -1694,11 +1689,6 @@ struct PathBuildingHop<'a> { /// channel scoring. path_penalty_msat: u64, - // The last 16 bytes are on the next cache line by default in glibc's malloc. Thus, we should - // only place fields which are not hot there. Luckily, the next three fields are only read if - // we end up on the selected path, and only in the final path layout phase, so we don't care - // too much if reading them is slow. - fee_msat: u64, /// All the fees paid *after* this channel on the way to the destination @@ -1715,17 +1705,8 @@ struct PathBuildingHop<'a> { value_contribution_msat: u64, } -// Checks that the entries in the `find_route` `dist` map fit in (exactly) two standard x86-64 -// cache lines. Sadly, they're not guaranteed to actually lie on a cache line (and in fact, -// generally won't, because at least glibc's malloc will align to a nice, big, round -// boundary...plus 16), but at least it will reduce the amount of data we'll need to load. -// -// Note that these assertions only pass on somewhat recent rustc, and thus are gated on the -// ldk_bench flag. -#[cfg(ldk_bench)] -const _NODE_MAP_SIZE_TWO_CACHE_LINES: usize = 128 - core::mem::size_of::<(NodeId, PathBuildingHop)>(); -#[cfg(ldk_bench)] -const _NODE_MAP_SIZE_EXACTLY_CACHE_LINES: usize = core::mem::size_of::<(NodeId, PathBuildingHop)>() - 128; +const _NODE_MAP_SIZE_TWO_CACHE_LINES: usize = 128 - core::mem::size_of::>(); +const _NODE_MAP_SIZE_EXACTLY_TWO_CACHE_LINES: usize = core::mem::size_of::>() - 128; impl<'a> core::fmt::Debug for PathBuildingHop<'a> { fn fmt(&self, f: &mut core::fmt::Formatter) -> Result<(), core::fmt::Error> { From 4f5e17b54d9339469938cb7652f566f9f89410bb Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Wed, 10 Jul 2024 01:22:32 +0000 Subject: [PATCH 8/8] Move blinded path introduction point resolution to a helper method This marginally reduces the size of `get_route` by moving a the blinded path introduction point resolution and blinded path checks into a helper method. --- lightning/src/routing/router.rs | 137 ++++++++++++++++++-------------- 1 file changed, 76 insertions(+), 61 deletions(-) diff --git a/lightning/src/routing/router.rs b/lightning/src/routing/router.rs index 8f299d9ac0c..da724159a4e 100644 --- a/lightning/src/routing/router.rs +++ b/lightning/src/routing/router.rs @@ -1635,6 +1635,79 @@ impl<'a> NodeCounters<'a> { } } +/// Calculates the introduction point for each blinded path in the given [`PaymentParameters`], if +/// they can be found. +fn calculate_blinded_path_intro_points<'a, L: Deref>( + payment_params: &PaymentParameters, node_counters: &'a NodeCounters, + network_graph: &ReadOnlyNetworkGraph, logger: &L, our_node_id: NodeId, + first_hop_targets: &HashMap, u32)>, +) -> Result>, LightningError> +where L::Target: Logger { + let introduction_node_id_cache = payment_params.payee.blinded_route_hints().iter() + .map(|(_, path)| { + match &path.introduction_node { + IntroductionNode::NodeId(pubkey) => { + // Note that this will only return `Some` if the `pubkey` is somehow known to + // us (i.e. a channel counterparty or in the network graph). + node_counters.node_counter_from_id(&NodeId::from_pubkey(&pubkey)) + }, + IntroductionNode::DirectedShortChannelId(direction, scid) => { + path.public_introduction_node_id(network_graph) + .map(|node_id_ref| *node_id_ref) + .or_else(|| { + first_hop_targets.iter().find(|(_, (channels, _))| + channels + .iter() + .any(|details| Some(*scid) == details.get_outbound_payment_scid()) + ).map(|(cp, _)| direction.select_node_id(our_node_id, *cp)) + }) + .and_then(|node_id| node_counters.node_counter_from_id(&node_id)) + }, + } + }) + .collect::>(); + match &payment_params.payee { + Payee::Clear { route_hints, node_id, .. } => { + for route in route_hints.iter() { + for hop in &route.0 { + if hop.src_node_id == *node_id { + return Err(LightningError { + err: "Route hint cannot have the payee as the source.".to_owned(), + action: ErrorAction::IgnoreError + }); + } + } + } + }, + Payee::Blinded { route_hints, .. } => { + if introduction_node_id_cache.iter().all(|info_opt| info_opt.map(|(a, _)| a) == Some(&our_node_id)) { + return Err(LightningError{err: "Cannot generate a route to blinded paths if we are the introduction node to all of them".to_owned(), action: ErrorAction::IgnoreError}); + } + for ((_, blinded_path), info_opt) in route_hints.iter().zip(introduction_node_id_cache.iter()) { + if blinded_path.blinded_hops.len() == 0 { + return Err(LightningError{err: "0-hop blinded path provided".to_owned(), action: ErrorAction::IgnoreError}); + } + let introduction_node_id = match info_opt { + None => continue, + Some(info) => info.0, + }; + if *introduction_node_id == our_node_id { + log_info!(logger, "Got blinded path with ourselves as the introduction node, ignoring"); + } else if blinded_path.blinded_hops.len() == 1 && + route_hints + .iter().zip(introduction_node_id_cache.iter()) + .filter(|((_, p), _)| p.blinded_hops.len() == 1) + .any(|(_, iter_info_opt)| iter_info_opt.is_some() && iter_info_opt != info_opt) + { + return Err(LightningError{err: format!("1-hop blinded paths must all have matching introduction node ids"), action: ErrorAction::IgnoreError}); + } + } + } + } + + Ok(introduction_node_id_cache) +} + #[inline] fn max_htlc_from_capacity(capacity: EffectiveCapacity, max_channel_saturation_power_of_half: u8) -> u64 { let saturation_shift: u32 = max_channel_saturation_power_of_half as u32; @@ -2183,67 +2256,9 @@ where L::Target: Logger { let node_counters = node_counter_builder.build(); - let introduction_node_id_cache = payment_params.payee.blinded_route_hints().iter() - .map(|(_, path)| { - match &path.introduction_node { - IntroductionNode::NodeId(pubkey) => { - // Note that this will only return `Some` if the `pubkey` is somehow known to - // us (i.e. a channel counterparty or in the network graph). - node_counters.node_counter_from_id(&NodeId::from_pubkey(&pubkey)) - }, - IntroductionNode::DirectedShortChannelId(direction, scid) => { - path.public_introduction_node_id(network_graph) - .map(|node_id_ref| *node_id_ref) - .or_else(|| { - first_hop_targets.iter().find(|(_, (channels, _))| - channels - .iter() - .any(|details| Some(*scid) == details.get_outbound_payment_scid()) - ).map(|(cp, _)| direction.select_node_id(our_node_id, *cp)) - }) - .and_then(|node_id| node_counters.node_counter_from_id(&node_id)) - }, - } - }) - .collect::>(); - match &payment_params.payee { - Payee::Clear { route_hints, node_id, .. } => { - for route in route_hints.iter() { - for hop in &route.0 { - if hop.src_node_id == *node_id { - return Err(LightningError { - err: "Route hint cannot have the payee as the source.".to_owned(), - action: ErrorAction::IgnoreError - }); - } - } - } - }, - Payee::Blinded { route_hints, .. } => { - if introduction_node_id_cache.iter().all(|info_opt| info_opt.map(|(a, _)| a) == Some(&our_node_id)) { - return Err(LightningError{err: "Cannot generate a route to blinded paths if we are the introduction node to all of them".to_owned(), action: ErrorAction::IgnoreError}); - } - for ((_, blinded_path), info_opt) in route_hints.iter().zip(introduction_node_id_cache.iter()) { - if blinded_path.blinded_hops.len() == 0 { - return Err(LightningError{err: "0-hop blinded path provided".to_owned(), action: ErrorAction::IgnoreError}); - } - let introduction_node_id = match info_opt { - None => continue, - Some(info) => info.0, - }; - if *introduction_node_id == our_node_id { - log_info!(logger, "Got blinded path with ourselves as the introduction node, ignoring"); - } else if blinded_path.blinded_hops.len() == 1 && - route_hints - .iter().zip(introduction_node_id_cache.iter()) - .filter(|((_, p), _)| p.blinded_hops.len() == 1) - .any(|(_, iter_info_opt)| iter_info_opt.is_some() && iter_info_opt != info_opt) - { - return Err(LightningError{err: format!("1-hop blinded paths must all have matching introduction node ids"), action: ErrorAction::IgnoreError}); - } - } - } - } + let introduction_node_id_cache = calculate_blinded_path_intro_points( + &payment_params, &node_counters, network_graph, &logger, our_node_id, &first_hop_targets, + )?; // The main heap containing all candidate next-hops sorted by their score (max(fee, // htlc_minimum)). Ideally this would be a heap which allowed cheap score reduction instead of