-
Notifications
You must be signed in to change notification settings - Fork 410
Add ability to broadcast our own node_announcement #435
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
fd08529
107da97
a8114a7
c2ca6d3
78c48f7
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -29,8 +29,8 @@ use chain::chaininterface::{BroadcasterInterface,ChainListener,FeeEstimator}; | |
use chain::transaction::OutPoint; | ||
use ln::channel::{Channel, ChannelError}; | ||
use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, ManyChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY}; | ||
use ln::features::{InitFeatures, NodeFeatures}; | ||
use ln::router::Route; | ||
use ln::features::InitFeatures; | ||
use ln::msgs; | ||
use ln::onion_utils; | ||
use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError}; | ||
|
@@ -368,6 +368,10 @@ pub struct ChannelManager<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, | |
channel_state: Mutex<ChannelHolder<ChanSigner>>, | ||
our_network_key: SecretKey, | ||
|
||
/// Used to track the last value sent in a node_announcement "timestamp" field. We ensure this | ||
/// value increases strictly since we don't assume access to a time source. | ||
last_node_announcement_serial: AtomicUsize, | ||
TheBlueMatt marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
||
/// The bulk of our storage will eventually be here (channels and message queues and the like). | ||
/// If we are connected to a peer we always at least have an entry here, even if no channels | ||
/// are currently open with that peer. | ||
|
@@ -665,6 +669,8 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref> ChannelMan | |
}), | ||
our_network_key: keys_manager.get_node_secret(), | ||
|
||
last_node_announcement_serial: AtomicUsize::new(0), | ||
|
||
per_peer_state: RwLock::new(HashMap::new()), | ||
|
||
pending_events: Mutex::new(Vec::new()), | ||
|
@@ -1118,7 +1124,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref> ChannelMan | |
let unsigned = msgs::UnsignedChannelUpdate { | ||
chain_hash: self.genesis_hash, | ||
short_channel_id: short_channel_id, | ||
timestamp: chan.get_channel_update_count(), | ||
timestamp: chan.get_update_time_counter(), | ||
flags: (!were_node_one) as u16 | ((!chan.is_live() as u16) << 1), | ||
cltv_expiry_delta: CLTV_EXPIRY_DELTA, | ||
htlc_minimum_msat: chan.get_our_htlc_minimum_msat(), | ||
|
@@ -1334,6 +1340,57 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref> ChannelMan | |
}) | ||
} | ||
|
||
#[allow(dead_code)] | ||
// Messages of up to 64KB should never end up more than half full with addresses, as that would | ||
// be absurd. We ensure this by checking that at least 500 (our stated public contract on when | ||
Comment on lines
+1344
to
+1345
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Hm, this isn't an actual spec rule, though, is it (mod the issue that this PR addresses)? >500 does seem extreme and I don't have an issue with enforcing it, just not sure the exact purpose of enforcing it... There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. IIRC (and if it doesn't it should) if we went to serialize it we'd generate something >64KB, panicing trying to send a message that overflows the max message size. |
||
// broadcast_node_announcement panics) of the maximum-length addresses would fit in a 64KB | ||
// message... | ||
const HALF_MESSAGE_IS_ADDRS: u32 = ::std::u16::MAX as u32 / (msgs::NetAddress::MAX_LEN as u32 + 1) / 2; | ||
#[deny(const_err)] | ||
#[allow(dead_code)] | ||
// ...by failing to compile if the number of addresses that would be half of a message is | ||
// smaller than 500: | ||
const STATIC_ASSERT: u32 = Self::HALF_MESSAGE_IS_ADDRS - 500; | ||
|
||
/// Generates a signed node_announcement from the given arguments and creates a | ||
/// BroadcastNodeAnnouncement event. Note that such messages will be ignored unless peers have | ||
/// seen a channel_announcement from us (ie unless we have public channels open). | ||
/// | ||
/// RGB is a node "color" and alias is a printable human-readable string to describe this node | ||
/// to humans. They carry no in-protocol meaning. | ||
/// | ||
/// addresses represent the set (possibly empty) of socket addresses on which this node accepts | ||
TheBlueMatt marked this conversation as resolved.
Show resolved
Hide resolved
|
||
/// incoming connections. These will be broadcast to the network, publicly tying these | ||
/// addresses together. If you wish to preserve user privacy, addresses should likely contain | ||
/// only Tor Onion addresses. | ||
/// | ||
/// Panics if addresses is absurdly large (more than 500). | ||
pub fn broadcast_node_announcement(&self, rgb: [u8; 3], alias: [u8; 32], addresses: Vec<msgs::NetAddress>) { | ||
let _ = self.total_consistency_lock.read().unwrap(); | ||
|
||
if addresses.len() > 500 { | ||
panic!("More than half the message size was taken up by public addresses!"); | ||
} | ||
Comment on lines
+1371
to
+1373
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Good target for whomever addresses #529 |
||
|
||
let announcement = msgs::UnsignedNodeAnnouncement { | ||
features: NodeFeatures::supported(), | ||
timestamp: self.last_node_announcement_serial.fetch_add(1, Ordering::AcqRel) as u32, | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. reasoning for There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
|
||
node_id: self.get_our_node_id(), | ||
rgb, alias, addresses, | ||
excess_address_data: Vec::new(), | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Since this is our node announcement, we'll never have any excess address data, right? this is just for remote peer NodeAnnouncements that may randomly have excess address data? is this a common problem...? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. That is correct. And, indeed, it is not a common thing. We have to have it as otherwise the signatures of things we relay will fail, but, in general, we anticipate almost never having anything in there, or if we do, a very small thing. |
||
excess_data: Vec::new(), | ||
}; | ||
let msghash = hash_to_message!(&Sha256dHash::hash(&announcement.encode()[..])[..]); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. just to be sure -- spec says the signature should be over the double hash, and this seems to be a single hash? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Right, bitcoin_hashes types are confusing. That tiny little d that easy to miss means double :). There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Ah, it is easy to miss.. 😅 |
||
|
||
let mut channel_state = self.channel_state.lock().unwrap(); | ||
channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastNodeAnnouncement { | ||
msg: msgs::NodeAnnouncement { | ||
signature: self.secp_ctx.sign(&msghash, &self.our_network_key), | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. we may wanna provide an interface for external signers later There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Right. For now the external signing support hasn't even attempted to think about moving the node_id private key out, given its used in a ton of places. Eventually it'll need to be (well, probably after splitting it up more), but for now it is what it is. |
||
contents: announcement | ||
}, | ||
}); | ||
} | ||
|
||
/// Processes HTLCs which are pending waiting on random forward delay. | ||
/// | ||
/// Should only really ever be called in response to a PendingHTLCsForwardable event. | ||
|
@@ -2719,6 +2776,18 @@ impl<ChanSigner: ChannelKeys, M: Deref + Sync + Send, T: Deref + Sync + Send, K: | |
} | ||
self.latest_block_height.store(height as usize, Ordering::Release); | ||
*self.last_block_hash.try_lock().expect("block_(dis)connected must not be called in parallel") = header_hash; | ||
loop { | ||
// Update last_node_announcement_serial to be the max of its current value and the | ||
// block timestamp. This should keep us close to the current time without relying on | ||
// having an explicit local time source. | ||
// Just in case we end up in a race, we loop until we either successfully update | ||
// last_node_announcement_serial or decide we don't need to. | ||
let old_serial = self.last_node_announcement_serial.load(Ordering::Acquire); | ||
if old_serial >= header.time as usize { break; } | ||
if self.last_node_announcement_serial.compare_exchange(old_serial, header.time as usize, Ordering::AcqRel, Ordering::Relaxed).is_ok() { | ||
break; | ||
} | ||
} | ||
} | ||
|
||
/// We force-close the channel without letting our counterparty participate in the shutdown | ||
|
@@ -2970,6 +3039,7 @@ impl<ChanSigner: ChannelKeys, M: Deref + Sync + Send, T: Deref + Sync + Send, K: | |
&events::MessageSendEvent::SendShutdown { ref node_id, .. } => node_id != their_node_id, | ||
&events::MessageSendEvent::SendChannelReestablish { ref node_id, .. } => node_id != their_node_id, | ||
&events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true, | ||
&events::MessageSendEvent::BroadcastNodeAnnouncement { .. } => true, | ||
&events::MessageSendEvent::BroadcastChannelUpdate { .. } => true, | ||
&events::MessageSendEvent::HandleError { ref node_id, .. } => node_id != their_node_id, | ||
&events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => true, | ||
|
@@ -3288,6 +3358,8 @@ impl<ChanSigner: ChannelKeys + Writeable, M: Deref, T: Deref, K: Deref, F: Deref | |
peer_state.latest_features.write(writer)?; | ||
} | ||
|
||
(self.last_node_announcement_serial.load(Ordering::Acquire) as u32).write(writer)?; | ||
|
||
Ok(()) | ||
} | ||
} | ||
|
@@ -3459,6 +3531,8 @@ impl<'a, ChanSigner: ChannelKeys + Readable, M: Deref, T: Deref, K: Deref, F: De | |
per_peer_state.insert(peer_pubkey, Mutex::new(peer_state)); | ||
} | ||
|
||
let last_node_announcement_serial: u32 = Readable::read(reader)?; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Hm, in theory could this cause a user migrating error? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yea, we currently do no version checking for serialized types and break things all the time. We'll need to fix this come 0.1, but for now, no reason to slow down to build compatibility with 0.0.X. |
||
|
||
let channel_manager = ChannelManager { | ||
genesis_hash, | ||
fee_estimator: args.fee_estimator, | ||
|
@@ -3478,6 +3552,8 @@ impl<'a, ChanSigner: ChannelKeys + Readable, M: Deref, T: Deref, K: Deref, F: De | |
}), | ||
our_network_key: args.keys_manager.get_node_secret(), | ||
|
||
last_node_announcement_serial: AtomicUsize::new(last_node_announcement_serial as usize), | ||
|
||
per_peer_state: RwLock::new(per_peer_state), | ||
|
||
pending_events: Mutex::new(Vec::new()), | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -394,10 +394,33 @@ pub fn create_announced_chan_between_nodes<'a, 'b, 'c, 'd>(nodes: &'a Vec<Node<' | |
|
||
pub fn create_announced_chan_between_nodes_with_value<'a, 'b, 'c, 'd>(nodes: &'a Vec<Node<'b, 'c, 'd>>, a: usize, b: usize, channel_value: u64, push_msat: u64, a_flags: InitFeatures, b_flags: InitFeatures) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) { | ||
let chan_announcement = create_chan_between_nodes_with_value(&nodes[a], &nodes[b], channel_value, push_msat, a_flags, b_flags); | ||
|
||
nodes[a].node.broadcast_node_announcement([0, 0, 0], [0; 32], Vec::new()); | ||
let a_events = nodes[a].node.get_and_clear_pending_msg_events(); | ||
assert_eq!(a_events.len(), 1); | ||
let a_node_announcement = match a_events[0] { | ||
MessageSendEvent::BroadcastNodeAnnouncement { ref msg } => { | ||
(*msg).clone() | ||
}, | ||
_ => panic!("Unexpected event"), | ||
}; | ||
|
||
nodes[b].node.broadcast_node_announcement([1, 1, 1], [1; 32], Vec::new()); | ||
let b_events = nodes[b].node.get_and_clear_pending_msg_events(); | ||
assert_eq!(b_events.len(), 1); | ||
let b_node_announcement = match b_events[0] { | ||
MessageSendEvent::BroadcastNodeAnnouncement { ref msg } => { | ||
(*msg).clone() | ||
}, | ||
_ => panic!("Unexpected event"), | ||
}; | ||
Comment on lines
+399
to
+416
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 👍 |
||
|
||
for node in nodes { | ||
assert!(node.router.handle_channel_announcement(&chan_announcement.0).unwrap()); | ||
node.router.handle_channel_update(&chan_announcement.1).unwrap(); | ||
node.router.handle_channel_update(&chan_announcement.2).unwrap(); | ||
node.router.handle_node_announcement(&a_node_announcement).unwrap(); | ||
node.router.handle_node_announcement(&b_node_announcement).unwrap(); | ||
} | ||
(chan_announcement.1, chan_announcement.2, chan_announcement.3, chan_announcement.4) | ||
} | ||
|
Uh oh!
There was an error while loading. Please reload this page.