Skip to content

Add ability to broadcast our own node_announcement #435

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion fuzz/src/router.rs
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,6 @@ pub fn do_test(data: &[u8]) {
msgs::DecodeError::UnknownVersion => return,
msgs::DecodeError::UnknownRequiredFeature => return,
msgs::DecodeError::InvalidValue => return,
msgs::DecodeError::ExtraAddressesPerType => return,
msgs::DecodeError::BadLengthDescriptor => return,
msgs::DecodeError::ShortRead => panic!("We picked the length..."),
msgs::DecodeError::Io(e) => panic!(format!("{}", e)),
Expand Down
37 changes: 19 additions & 18 deletions lightning/src/ln/channel.rs
Original file line number Diff line number Diff line change
Expand Up @@ -295,7 +295,7 @@ pub(super) struct Channel<ChanSigner: ChannelKeys> {
holding_cell_update_fee: Option<u64>,
next_local_htlc_id: u64,
next_remote_htlc_id: u64,
channel_update_count: u32,
update_time_counter: u32,
feerate_per_kw: u64,

#[cfg(debug_assertions)]
Expand Down Expand Up @@ -490,7 +490,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
holding_cell_update_fee: None,
next_local_htlc_id: 0,
next_remote_htlc_id: 0,
channel_update_count: 1,
update_time_counter: 1,

resend_order: RAACommitmentOrder::CommitmentFirst,

Expand Down Expand Up @@ -714,7 +714,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
holding_cell_update_fee: None,
next_local_htlc_id: 0,
next_remote_htlc_id: 0,
channel_update_count: 1,
update_time_counter: 1,

resend_order: RAACommitmentOrder::CommitmentFirst,

Expand Down Expand Up @@ -1586,7 +1586,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
self.channel_state |= ChannelState::TheirFundingLocked as u32;
} else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurFundingLocked as u32) {
self.channel_state = ChannelState::ChannelFunded as u32 | (self.channel_state & MULTI_STATE_FLAGS);
self.channel_update_count += 1;
self.update_time_counter += 1;
} else if (self.channel_state & (ChannelState::ChannelFunded as u32) != 0 &&
// Note that funding_signed/funding_created will have decremented both by 1!
self.cur_local_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
Expand Down Expand Up @@ -2480,7 +2480,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
}
Channel::<ChanSigner>::check_remote_fee(fee_estimator, msg.feerate_per_kw)?;
self.pending_update_fee = Some(msg.feerate_per_kw as u64);
self.channel_update_count += 1;
self.update_time_counter += 1;
Ok(())
}

Expand Down Expand Up @@ -2763,7 +2763,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
// From here on out, we may not fail!

self.channel_state |= ChannelState::RemoteShutdownSent as u32;
self.channel_update_count += 1;
self.update_time_counter += 1;

// We can't send our shutdown until we've committed all of our pending HTLCs, but the
// remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
Expand Down Expand Up @@ -2793,7 +2793,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
};

self.channel_state |= ChannelState::LocalShutdownSent as u32;
self.channel_update_count += 1;
self.update_time_counter += 1;

Ok((our_shutdown, self.maybe_propose_first_closing_signed(fee_estimator), dropped_outbound_htlcs))
}
Expand Down Expand Up @@ -2860,7 +2860,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
if last_fee == msg.fee_satoshis {
self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &our_sig);
self.channel_state = ChannelState::ShutdownComplete as u32;
self.channel_update_count += 1;
self.update_time_counter += 1;
return Ok((None, Some(closing_tx)));
}
}
Expand Down Expand Up @@ -2910,7 +2910,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &our_sig);

self.channel_state = ChannelState::ShutdownComplete as u32;
self.channel_update_count += 1;
self.update_time_counter += 1;

Ok((Some(msgs::ClosingSigned {
channel_id: self.channel_id,
Expand Down Expand Up @@ -3022,8 +3022,8 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
}

/// Allowed in any state (including after shutdown)
pub fn get_channel_update_count(&self) -> u32 {
self.channel_update_count
pub fn get_update_time_counter(&self) -> u32 {
self.update_time_counter
}

pub fn get_latest_monitor_update_id(&self) -> u64 {
Expand Down Expand Up @@ -3149,7 +3149,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
}
self.channel_state = ChannelState::ShutdownComplete as u32;
self.channel_update_count += 1;
self.update_time_counter += 1;
return Err(msgs::ErrorMessage {
channel_id: self.channel_id(),
data: "funding tx had wrong script/value".to_owned()
Expand All @@ -3175,6 +3175,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
}
if header.bitcoin_hash() != self.last_block_connected {
self.last_block_connected = header.bitcoin_hash();
self.update_time_counter = cmp::max(self.update_time_counter, header.time);
if let Some(channel_monitor) = self.channel_monitor.as_mut() {
channel_monitor.last_block_hash = self.last_block_connected;
}
Expand All @@ -3185,7 +3186,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
true
} else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::TheirFundingLocked as u32) {
self.channel_state = ChannelState::ChannelFunded as u32 | (self.channel_state & MULTI_STATE_FLAGS);
self.channel_update_count += 1;
self.update_time_counter += 1;
true
} else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurFundingLocked as u32) {
// We got a reorg but not enough to trigger a force close, just update
Expand Down Expand Up @@ -3728,7 +3729,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
} else {
self.channel_state |= ChannelState::LocalShutdownSent as u32;
}
self.channel_update_count += 1;
self.update_time_counter += 1;

// Go ahead and drop holding cell updates as we'd rather fail payments than wait to send
// our shutdown until we've committed all of the pending changes.
Expand Down Expand Up @@ -3777,7 +3778,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
}

self.channel_state = ChannelState::ShutdownComplete as u32;
self.channel_update_count += 1;
self.update_time_counter += 1;
if self.channel_monitor.is_some() {
(self.channel_monitor.as_mut().unwrap().get_latest_local_commitment_txn(), dropped_outbound_htlcs)
} else {
Expand Down Expand Up @@ -3964,7 +3965,7 @@ impl<ChanSigner: ChannelKeys + Writeable> Writeable for Channel<ChanSigner> {

self.next_local_htlc_id.write(writer)?;
(self.next_remote_htlc_id - dropped_inbound_htlcs).write(writer)?;
self.channel_update_count.write(writer)?;
self.update_time_counter.write(writer)?;
self.feerate_per_kw.write(writer)?;

match self.last_sent_closing_fee {
Expand Down Expand Up @@ -4124,7 +4125,7 @@ impl<ChanSigner: ChannelKeys + Readable> ReadableArgs<Arc<Logger>> for Channel<C

let next_local_htlc_id = Readable::read(reader)?;
let next_remote_htlc_id = Readable::read(reader)?;
let channel_update_count = Readable::read(reader)?;
let update_time_counter = Readable::read(reader)?;
let feerate_per_kw = Readable::read(reader)?;

let last_sent_closing_fee = match <u8 as Readable>::read(reader)? {
Expand Down Expand Up @@ -4203,7 +4204,7 @@ impl<ChanSigner: ChannelKeys + Readable> ReadableArgs<Arc<Logger>> for Channel<C
holding_cell_update_fee,
next_local_htlc_id,
next_remote_htlc_id,
channel_update_count,
update_time_counter,
feerate_per_kw,

#[cfg(debug_assertions)]
Expand Down
80 changes: 78 additions & 2 deletions lightning/src/ln/channelmanager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,8 @@ use chain::chaininterface::{BroadcasterInterface,ChainListener,FeeEstimator};
use chain::transaction::OutPoint;
use ln::channel::{Channel, ChannelError};
use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, ManyChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
use ln::features::{InitFeatures, NodeFeatures};
use ln::router::Route;
use ln::features::InitFeatures;
use ln::msgs;
use ln::onion_utils;
use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError};
Expand Down Expand Up @@ -368,6 +368,10 @@ pub struct ChannelManager<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref,
channel_state: Mutex<ChannelHolder<ChanSigner>>,
our_network_key: SecretKey,

/// Used to track the last value sent in a node_announcement "timestamp" field. We ensure this
/// value increases strictly since we don't assume access to a time source.
last_node_announcement_serial: AtomicUsize,

/// The bulk of our storage will eventually be here (channels and message queues and the like).
/// If we are connected to a peer we always at least have an entry here, even if no channels
/// are currently open with that peer.
Expand Down Expand Up @@ -665,6 +669,8 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref> ChannelMan
}),
our_network_key: keys_manager.get_node_secret(),

last_node_announcement_serial: AtomicUsize::new(0),

per_peer_state: RwLock::new(HashMap::new()),

pending_events: Mutex::new(Vec::new()),
Expand Down Expand Up @@ -1118,7 +1124,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref> ChannelMan
let unsigned = msgs::UnsignedChannelUpdate {
chain_hash: self.genesis_hash,
short_channel_id: short_channel_id,
timestamp: chan.get_channel_update_count(),
timestamp: chan.get_update_time_counter(),
flags: (!were_node_one) as u16 | ((!chan.is_live() as u16) << 1),
cltv_expiry_delta: CLTV_EXPIRY_DELTA,
htlc_minimum_msat: chan.get_our_htlc_minimum_msat(),
Expand Down Expand Up @@ -1334,6 +1340,57 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref> ChannelMan
})
}

#[allow(dead_code)]
// Messages of up to 64KB should never end up more than half full with addresses, as that would
// be absurd. We ensure this by checking that at least 500 (our stated public contract on when
Comment on lines +1344 to +1345
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hm, this isn't an actual spec rule, though, is it (mod the issue that this PR addresses)? >500 does seem extreme and I don't have an issue with enforcing it, just not sure the exact purpose of enforcing it...

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

IIRC (and if it doesn't it should) if we went to serialize it we'd generate something >64KB, panicing trying to send a message that overflows the max message size.

// broadcast_node_announcement panics) of the maximum-length addresses would fit in a 64KB
// message...
const HALF_MESSAGE_IS_ADDRS: u32 = ::std::u16::MAX as u32 / (msgs::NetAddress::MAX_LEN as u32 + 1) / 2;
#[deny(const_err)]
#[allow(dead_code)]
// ...by failing to compile if the number of addresses that would be half of a message is
// smaller than 500:
const STATIC_ASSERT: u32 = Self::HALF_MESSAGE_IS_ADDRS - 500;

/// Generates a signed node_announcement from the given arguments and creates a
/// BroadcastNodeAnnouncement event. Note that such messages will be ignored unless peers have
/// seen a channel_announcement from us (ie unless we have public channels open).
///
/// RGB is a node "color" and alias is a printable human-readable string to describe this node
/// to humans. They carry no in-protocol meaning.
///
/// addresses represent the set (possibly empty) of socket addresses on which this node accepts
/// incoming connections. These will be broadcast to the network, publicly tying these
/// addresses together. If you wish to preserve user privacy, addresses should likely contain
/// only Tor Onion addresses.
///
/// Panics if addresses is absurdly large (more than 500).
pub fn broadcast_node_announcement(&self, rgb: [u8; 3], alias: [u8; 32], addresses: Vec<msgs::NetAddress>) {
let _ = self.total_consistency_lock.read().unwrap();

if addresses.len() > 500 {
panic!("More than half the message size was taken up by public addresses!");
}
Comment on lines +1371 to +1373
Copy link
Contributor

@valentinewallace valentinewallace Mar 6, 2020

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Good target for whomever addresses #529


let announcement = msgs::UnsignedNodeAnnouncement {
features: NodeFeatures::supported(),
timestamp: self.last_node_announcement_serial.fetch_add(1, Ordering::AcqRel) as u32,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

reasoning for AcqRel?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

AcqRel is a good default - it ensures we always have the latest value here, without acting as a lock (as SeqCst does). If we're not relying on any other data to be consistent, but want consistency ourselves, AcqRel it is. Note that, on x86, AcqRel compiles down to nothing, whereas SeqCst is relatively expensive.

node_id: self.get_our_node_id(),
rgb, alias, addresses,
excess_address_data: Vec::new(),
Copy link
Contributor

@valentinewallace valentinewallace Mar 4, 2020

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Since this is our node announcement, we'll never have any excess address data, right? this is just for remote peer NodeAnnouncements that may randomly have excess address data? is this a common problem...?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

That is correct. And, indeed, it is not a common thing. We have to have it as otherwise the signatures of things we relay will fail, but, in general, we anticipate almost never having anything in there, or if we do, a very small thing.

excess_data: Vec::new(),
};
let msghash = hash_to_message!(&Sha256dHash::hash(&announcement.encode()[..])[..]);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

just to be sure -- spec says the signature should be over the double hash, and this seems to be a single hash?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Right, bitcoin_hashes types are confusing. That tiny little d that easy to miss means double :).

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ah, it is easy to miss.. 😅


let mut channel_state = self.channel_state.lock().unwrap();
channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastNodeAnnouncement {
msg: msgs::NodeAnnouncement {
signature: self.secp_ctx.sign(&msghash, &self.our_network_key),
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

we may wanna provide an interface for external signers later

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Right. For now the external signing support hasn't even attempted to think about moving the node_id private key out, given its used in a ton of places. Eventually it'll need to be (well, probably after splitting it up more), but for now it is what it is.

contents: announcement
},
});
}

/// Processes HTLCs which are pending waiting on random forward delay.
///
/// Should only really ever be called in response to a PendingHTLCsForwardable event.
Expand Down Expand Up @@ -2719,6 +2776,18 @@ impl<ChanSigner: ChannelKeys, M: Deref + Sync + Send, T: Deref + Sync + Send, K:
}
self.latest_block_height.store(height as usize, Ordering::Release);
*self.last_block_hash.try_lock().expect("block_(dis)connected must not be called in parallel") = header_hash;
loop {
// Update last_node_announcement_serial to be the max of its current value and the
// block timestamp. This should keep us close to the current time without relying on
// having an explicit local time source.
// Just in case we end up in a race, we loop until we either successfully update
// last_node_announcement_serial or decide we don't need to.
let old_serial = self.last_node_announcement_serial.load(Ordering::Acquire);
if old_serial >= header.time as usize { break; }
if self.last_node_announcement_serial.compare_exchange(old_serial, header.time as usize, Ordering::AcqRel, Ordering::Relaxed).is_ok() {
break;
}
}
}

/// We force-close the channel without letting our counterparty participate in the shutdown
Expand Down Expand Up @@ -2970,6 +3039,7 @@ impl<ChanSigner: ChannelKeys, M: Deref + Sync + Send, T: Deref + Sync + Send, K:
&events::MessageSendEvent::SendShutdown { ref node_id, .. } => node_id != their_node_id,
&events::MessageSendEvent::SendChannelReestablish { ref node_id, .. } => node_id != their_node_id,
&events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true,
&events::MessageSendEvent::BroadcastNodeAnnouncement { .. } => true,
&events::MessageSendEvent::BroadcastChannelUpdate { .. } => true,
&events::MessageSendEvent::HandleError { ref node_id, .. } => node_id != their_node_id,
&events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => true,
Expand Down Expand Up @@ -3288,6 +3358,8 @@ impl<ChanSigner: ChannelKeys + Writeable, M: Deref, T: Deref, K: Deref, F: Deref
peer_state.latest_features.write(writer)?;
}

(self.last_node_announcement_serial.load(Ordering::Acquire) as u32).write(writer)?;

Ok(())
}
}
Expand Down Expand Up @@ -3459,6 +3531,8 @@ impl<'a, ChanSigner: ChannelKeys + Readable, M: Deref, T: Deref, K: Deref, F: De
per_peer_state.insert(peer_pubkey, Mutex::new(peer_state));
}

let last_node_announcement_serial: u32 = Readable::read(reader)?;
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hm, in theory could this cause a user migrating error?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yea, we currently do no version checking for serialized types and break things all the time. We'll need to fix this come 0.1, but for now, no reason to slow down to build compatibility with 0.0.X.


let channel_manager = ChannelManager {
genesis_hash,
fee_estimator: args.fee_estimator,
Expand All @@ -3478,6 +3552,8 @@ impl<'a, ChanSigner: ChannelKeys + Readable, M: Deref, T: Deref, K: Deref, F: De
}),
our_network_key: args.keys_manager.get_node_secret(),

last_node_announcement_serial: AtomicUsize::new(last_node_announcement_serial as usize),

per_peer_state: RwLock::new(per_peer_state),

pending_events: Mutex::new(Vec::new()),
Expand Down
23 changes: 23 additions & 0 deletions lightning/src/ln/functional_test_utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -394,10 +394,33 @@ pub fn create_announced_chan_between_nodes<'a, 'b, 'c, 'd>(nodes: &'a Vec<Node<'

pub fn create_announced_chan_between_nodes_with_value<'a, 'b, 'c, 'd>(nodes: &'a Vec<Node<'b, 'c, 'd>>, a: usize, b: usize, channel_value: u64, push_msat: u64, a_flags: InitFeatures, b_flags: InitFeatures) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
let chan_announcement = create_chan_between_nodes_with_value(&nodes[a], &nodes[b], channel_value, push_msat, a_flags, b_flags);

nodes[a].node.broadcast_node_announcement([0, 0, 0], [0; 32], Vec::new());
let a_events = nodes[a].node.get_and_clear_pending_msg_events();
assert_eq!(a_events.len(), 1);
let a_node_announcement = match a_events[0] {
MessageSendEvent::BroadcastNodeAnnouncement { ref msg } => {
(*msg).clone()
},
_ => panic!("Unexpected event"),
};

nodes[b].node.broadcast_node_announcement([1, 1, 1], [1; 32], Vec::new());
let b_events = nodes[b].node.get_and_clear_pending_msg_events();
assert_eq!(b_events.len(), 1);
let b_node_announcement = match b_events[0] {
MessageSendEvent::BroadcastNodeAnnouncement { ref msg } => {
(*msg).clone()
},
_ => panic!("Unexpected event"),
};
Comment on lines +399 to +416
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

👍


for node in nodes {
assert!(node.router.handle_channel_announcement(&chan_announcement.0).unwrap());
node.router.handle_channel_update(&chan_announcement.1).unwrap();
node.router.handle_channel_update(&chan_announcement.2).unwrap();
node.router.handle_node_announcement(&a_node_announcement).unwrap();
node.router.handle_node_announcement(&b_node_announcement).unwrap();
}
(chan_announcement.1, chan_announcement.2, chan_announcement.3, chan_announcement.4)
}
Expand Down
Loading