From 422bdcf81467451a31a6052950759a86cdf1760a Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Fri, 11 Jun 2021 16:03:34 +0000 Subject: [PATCH 1/4] Never generate a `BroadcastChannelUpdate` for priv channels Currently we always generate a `MessageSendEvent::BroadcastChannelUpdate` when a channel is closed even if the channel is private. Our immediate peers should ignore such messages as they haven't seen a corresponding `channel_announcement`, but we are still giving up some privacy by informing our immediate peers of which channels were ours. Here we split `ChannelManager::get_channel_update` into a `get_channel_update_for_broadcast` and `get_channel_update_for_unicast`. The first is used when we are broadcasting a `channel_update`, allowing us to refuse to do so for private channels. The second is used when failing a payment (in which case the recipient has already shown that they are aware of the channel so no such privacy concerns exist). --- lightning/src/ln/channelmanager.rs | 77 +++++++++++++++++++----------- 1 file changed, 48 insertions(+), 29 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 1f16781603a..5caffe8ee12 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -780,7 +780,7 @@ macro_rules! convert_chan_err { $short_to_id.remove(&short_id); } let shutdown_res = $channel.force_shutdown(true); - (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $self.get_channel_update(&$channel).ok())) + (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok())) }, ChannelError::CloseDelayBroadcast(msg) => { log_error!($self.logger, "Channel {} need to be shutdown but closing transactions not broadcast due to {}", log_bytes!($channel_id[..]), msg); @@ -788,7 +788,7 @@ macro_rules! convert_chan_err { $short_to_id.remove(&short_id); } let shutdown_res = $channel.force_shutdown(false); - (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $self.get_channel_update(&$channel).ok())) + (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok())) } } } @@ -844,7 +844,8 @@ macro_rules! handle_monitor_err { // splitting hairs we'd prefer to claim payments that were to us, but we haven't // given up the preimage yet, so might as well just wait until the payment is // retried, avoiding the on-chain fees. - let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure".to_owned(), *$chan_id, $chan.force_shutdown(true), $self.get_channel_update(&$chan).ok())); + let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure".to_owned(), *$chan_id, + $chan.force_shutdown(true), $self.get_channel_update_for_broadcast(&$chan).ok() )); (res, true) }, ChannelMonitorUpdateErr::TemporaryFailure => { @@ -1224,9 +1225,7 @@ impl ChannelMana self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }); } let chan_update = if let Some(chan) = chan_option { - if let Ok(update) = self.get_channel_update(&chan) { - Some(update) - } else { None } + self.get_channel_update_for_broadcast(&chan).ok() } else { None }; if let Some(update) = chan_update { @@ -1275,7 +1274,7 @@ impl ChannelMana }; log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..])); self.finish_force_close_channel(chan.force_shutdown(true)); - if let Ok(update) = self.get_channel_update(&chan) { + if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { let mut channel_state = self.channel_state.lock().unwrap(); channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update @@ -1535,23 +1534,23 @@ impl ChannelMana // hopefully an attacker trying to path-trace payments cannot make this occur // on a small/per-node/per-channel scale. if !chan.is_live() { // channel_disabled - break Some(("Forwarding channel is not in a ready state.", 0x1000 | 20, Some(self.get_channel_update(chan).unwrap()))); + break Some(("Forwarding channel is not in a ready state.", 0x1000 | 20, Some(self.get_channel_update_for_unicast(chan).unwrap()))); } if *amt_to_forward < chan.get_counterparty_htlc_minimum_msat() { // amount_below_minimum - break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, Some(self.get_channel_update(chan).unwrap()))); + break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, Some(self.get_channel_update_for_unicast(chan).unwrap()))); } let fee = amt_to_forward.checked_mul(chan.get_fee_proportional_millionths() as u64).and_then(|prop_fee| { (prop_fee / 1000000).checked_add(chan.get_holder_fee_base_msat(&self.fee_estimator) as u64) }); if fee.is_none() || msg.amount_msat < fee.unwrap() || (msg.amount_msat - fee.unwrap()) < *amt_to_forward { // fee_insufficient - break Some(("Prior hop has deviated from specified fees parameters or origin node has obsolete ones", 0x1000 | 12, Some(self.get_channel_update(chan).unwrap()))); + break Some(("Prior hop has deviated from specified fees parameters or origin node has obsolete ones", 0x1000 | 12, Some(self.get_channel_update_for_unicast(chan).unwrap()))); } if (msg.cltv_expiry as u64) < (*outgoing_cltv_value) as u64 + chan.get_cltv_expiry_delta() as u64 { // incorrect_cltv_expiry - break Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, Some(self.get_channel_update(chan).unwrap()))); + break Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, Some(self.get_channel_update_for_unicast(chan).unwrap()))); } let cur_height = self.best_block.read().unwrap().height() + 1; // Theoretically, channel counterparty shouldn't send us a HTLC expiring now, but we want to be robust wrt to counterparty // packet sanitization (see HTLC_FAIL_BACK_BUFFER rational) if msg.cltv_expiry <= cur_height + HTLC_FAIL_BACK_BUFFER as u32 { // expiry_too_soon - break Some(("CLTV expiry is too close", 0x1000 | 14, Some(self.get_channel_update(chan).unwrap()))); + break Some(("CLTV expiry is too close", 0x1000 | 14, Some(self.get_channel_update_for_unicast(chan).unwrap()))); } if msg.cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far break Some(("CLTV expiry is too far in the future", 21, None)); @@ -1559,7 +1558,7 @@ impl ChannelMana // In theory, we would be safe against unitentional channel-closure, if we only required a margin of LATENCY_GRACE_PERIOD_BLOCKS. // But, to be safe against policy reception, we use a longuer delay. if (*outgoing_cltv_value) as u64 <= (cur_height + HTLC_FAIL_BACK_BUFFER) as u64 { - break Some(("Outgoing CLTV value is too soon", 0x1000 | 14, Some(self.get_channel_update(chan).unwrap()))); + break Some(("Outgoing CLTV value is too soon", 0x1000 | 14, Some(self.get_channel_update_for_unicast(chan).unwrap()))); } break None; @@ -1587,9 +1586,27 @@ impl ChannelMana (pending_forward_info, channel_state.unwrap()) } - /// only fails if the channel does not yet have an assigned short_id + /// Gets the current channel_update for the given channel. This first checks if the channel is + /// public, and thus should be called whenever the result is going to be passed out in a + /// [`MessageSendEvent::BroadcastChannelUpdate`] event. + /// + /// May be called with channel_state already locked! + fn get_channel_update_for_broadcast(&self, chan: &Channel) -> Result { + if !chan.should_announce() { + return Err(LightningError { + err: "Cannot broadcast a channel_update for a private channel".to_owned(), + action: msgs::ErrorAction::IgnoreError + }); + } + self.get_channel_update_for_unicast(chan) + } + + /// Gets the current channel_update for the given channel. This does not check if the channel + /// is public (only returning an Err if the channel does not yet have an assigned short_id), + /// and thus MUST NOT be called unless the recipient of the resulting message has already + /// provided evidence that they know about the existence of the channel. /// May be called with channel_state already locked! - fn get_channel_update(&self, chan: &Channel) -> Result { + fn get_channel_update_for_unicast(&self, chan: &Channel) -> Result { let short_channel_id = match chan.get_short_channel_id() { None => return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}), Some(id) => id, @@ -1982,7 +1999,7 @@ impl ChannelMana if let Some(msg) = chan.get_signed_channel_announcement(&self.our_network_key, self.get_our_node_id(), self.genesis_hash.clone()) { channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement { msg, - update_msg: match self.get_channel_update(chan) { + update_msg: match self.get_channel_update_for_broadcast(chan) { Ok(msg) => msg, Err(_) => continue, }, @@ -2074,7 +2091,7 @@ impl ChannelMana } else { panic!("Stated return value requirements in send_htlc() were not met"); } - let chan_update = self.get_channel_update(chan.get()).unwrap(); + let chan_update = self.get_channel_update_for_unicast(chan.get()).unwrap(); failed_forwards.push((htlc_source, payment_hash, HTLCFailReason::Reason { failure_code: 0x1000 | 7, data: chan_update.encode_with_len() } )); @@ -2146,7 +2163,7 @@ impl ChannelMana if let Some(short_id) = channel.get_short_channel_id() { channel_state.short_to_id.remove(&short_id); } - Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, channel.force_shutdown(true), self.get_channel_update(&channel).ok())) + Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, channel.force_shutdown(true), self.get_channel_update_for_broadcast(&channel).ok())) }, ChannelError::CloseDelayBroadcast(_) => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); } }; @@ -2349,7 +2366,7 @@ impl ChannelMana ChannelUpdateStatus::DisabledStaged if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Enabled), ChannelUpdateStatus::EnabledStaged if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Disabled), ChannelUpdateStatus::DisabledStaged if !chan.is_live() => { - if let Ok(update) = self.get_channel_update(&chan) { + if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update }); @@ -2358,7 +2375,7 @@ impl ChannelMana chan.set_channel_update_status(ChannelUpdateStatus::Disabled); }, ChannelUpdateStatus::EnabledStaged if chan.is_live() => { - if let Ok(update) = self.get_channel_update(&chan) { + if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update }); @@ -2408,7 +2425,7 @@ impl ChannelMana let (failure_code, onion_failure_data) = match self.channel_state.lock().unwrap().by_id.entry(channel_id) { hash_map::Entry::Occupied(chan_entry) => { - if let Ok(upd) = self.get_channel_update(&chan_entry.get()) { + if let Ok(upd) = self.get_channel_update_for_unicast(&chan_entry.get()) { (0x1000|7, upd.encode_with_len()) } else { (0x4000|10, Vec::new()) @@ -2991,7 +3008,7 @@ impl ChannelMana self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }); } if let Some(chan) = chan_option { - if let Ok(update) = self.get_channel_update(&chan) { + if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { let mut channel_state = self.channel_state.lock().unwrap(); channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update @@ -3037,7 +3054,7 @@ impl ChannelMana self.tx_broadcaster.broadcast_transaction(&broadcast_tx); } if let Some(chan) = chan_option { - if let Ok(update) = self.get_channel_update(&chan) { + if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { let mut channel_state = self.channel_state.lock().unwrap(); channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update @@ -3075,7 +3092,7 @@ impl ChannelMana // want to reject the new HTLC and fail it backwards instead of forwarding. match pending_forward_info { PendingHTLCStatus::Forward(PendingHTLCInfo { ref incoming_shared_secret, .. }) => { - let reason = if let Ok(upd) = self.get_channel_update(chan) { + let reason = if let Ok(upd) = self.get_channel_update_for_unicast(chan) { onion_utils::build_first_hop_failure_packet(incoming_shared_secret, error_code, &{ let mut res = Vec::with_capacity(8 + 128); // TODO: underspecified, follow https://github.com/lightningnetwork/lightning-rfc/issues/791 @@ -3337,7 +3354,9 @@ impl ChannelMana channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement { msg: try_chan_entry!(self, chan.get_mut().announcement_signatures(&self.our_network_key, self.get_our_node_id(), self.genesis_hash.clone(), msg), channel_state, chan), - update_msg: self.get_channel_update(chan.get()).unwrap(), // can only fail if we're not in a ready state + // Note that announcement_signatures fails if the channel cannot be announced, + // so get_channel_update_for_broadcast will never fail by the time we get here. + update_msg: self.get_channel_update_for_broadcast(chan.get()).unwrap(), }); }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) @@ -3495,7 +3514,7 @@ impl ChannelMana short_to_id.remove(&short_id); } failed_channels.push(chan.force_shutdown(false)); - if let Ok(update) = self.get_channel_update(&chan) { + if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update }); @@ -3934,7 +3953,7 @@ where let res = f(channel); if let Ok((chan_res, mut timed_out_pending_htlcs)) = res { for (source, payment_hash) in timed_out_pending_htlcs.drain(..) { - let chan_update = self.get_channel_update(&channel).map(|u| u.encode_with_len()).unwrap(); // Cannot add/recv HTLCs before we have a short_id so unwrap is safe + let chan_update = self.get_channel_update_for_unicast(&channel).map(|u| u.encode_with_len()).unwrap(); // Cannot add/recv HTLCs before we have a short_id so unwrap is safe timed_out_htlcs.push((source, payment_hash, HTLCFailReason::Reason { failure_code: 0x1000 | 14, // expiry_too_soon, or at least it is now data: chan_update, @@ -3963,7 +3982,7 @@ where // It looks like our counterparty went on-chain or funding transaction was // reorged out of the main chain. Close the channel. failed_channels.push(channel.force_shutdown(true)); - if let Ok(update) = self.get_channel_update(&channel) { + if let Ok(update) = self.get_channel_update_for_broadcast(&channel) { pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update }); @@ -4147,7 +4166,7 @@ impl short_to_id.remove(&short_id); } failed_channels.push(chan.force_shutdown(true)); - if let Ok(update) = self.get_channel_update(&chan) { + if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update }); From e3968e09939b0b48ec7b4b614a75018c0d3114b0 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sat, 12 Jun 2021 21:58:50 +0000 Subject: [PATCH 2/4] Send channel_update messages to direct peers on private channels If we are a public node and have a private channel, our counterparty needs to know the fees which we will charge to forward payments to them. Without sending them a channel_update, they have no way to learn that information, resulting in the channel being effectively useless for outbound-from-us payments. This commit fixes our lack of channel_update messages to private channel counterparties, ensuring we always send them a channel_update after the channel funding is confirmed. --- fuzz/src/chanmon_consistency.rs | 24 ++++++- lightning-background-processor/src/lib.rs | 8 ++- lightning/src/ln/chanmon_update_fail_tests.rs | 18 +++-- lightning/src/ln/channelmanager.rs | 66 +++++++++++++++++-- lightning/src/ln/functional_test_utils.rs | 12 +++- lightning/src/ln/functional_tests.rs | 3 +- lightning/src/ln/peer_handler.rs | 6 ++ lightning/src/util/events.rs | 9 +++ 8 files changed, 130 insertions(+), 16 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 085e4cb7402..158d87149a5 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -571,7 +571,12 @@ pub fn do_test(data: &[u8], out: Out) { events::MessageSendEvent::SendFundingLocked { .. } => continue, events::MessageSendEvent::SendAnnouncementSignatures { .. } => continue, events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => continue, - _ => panic!("Unhandled message event"), + events::MessageSendEvent::SendChannelUpdate { ref node_id, ref msg } => { + assert_eq!(msg.contents.flags & 2, 0); // The disable bit must never be set! + if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } + *node_id == a_id + }, + _ => panic!("Unhandled message event {:?}", event), }; if push_a { ba_events.push(event); } else { bc_events.push(event); } } @@ -692,7 +697,16 @@ pub fn do_test(data: &[u8], out: Out) { // Can be generated due to a payment forward being rejected due to a // channel having previously failed a monitor update }, - _ => panic!("Unhandled message event"), + events::MessageSendEvent::SendChannelUpdate { ref msg, .. } => { + // When we reconnect we will resend a channel_update to make sure our + // counterparty has the latest parameters for receiving payments + // through us. We do, however, check that the message does not include + // the "disabled" bit, as we should never ever have a channel which is + // disabled when we send such an update (or it may indicate channel + // force-close which we should detect as an error). + assert_eq!(msg.contents.flags & 2, 0); + }, + _ => panic!("Unhandled message event {:?}", event), } if $limit_events != ProcessMessages::AllMessages { break; @@ -722,6 +736,9 @@ pub fn do_test(data: &[u8], out: Out) { events::MessageSendEvent::SendFundingLocked { .. } => {}, events::MessageSendEvent::SendAnnouncementSignatures { .. } => {}, events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {}, + events::MessageSendEvent::SendChannelUpdate { ref msg, .. } => { + assert_eq!(msg.contents.flags & 2, 0); // The disable bit must never be set! + }, _ => panic!("Unhandled message event"), } } @@ -737,6 +754,9 @@ pub fn do_test(data: &[u8], out: Out) { events::MessageSendEvent::SendFundingLocked { .. } => {}, events::MessageSendEvent::SendAnnouncementSignatures { .. } => {}, events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {}, + events::MessageSendEvent::SendChannelUpdate { ref msg, .. } => { + assert_eq!(msg.contents.flags & 2, 0); // The disable bit must never be set! + }, _ => panic!("Unhandled message event"), } } diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index dd13a35b46f..8c90532d1ea 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -443,9 +443,13 @@ mod tests { // Confirm the funding transaction. confirm_transaction(&mut nodes[0], &funding_tx); + let as_funding = get_event_msg!(nodes[0], MessageSendEvent::SendFundingLocked, nodes[1].node.get_our_node_id()); confirm_transaction(&mut nodes[1], &funding_tx); - nodes[0].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingLocked, nodes[0].node.get_our_node_id())); - nodes[1].node.handle_funding_locked(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingLocked, nodes[1].node.get_our_node_id())); + let bs_funding = get_event_msg!(nodes[1], MessageSendEvent::SendFundingLocked, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &bs_funding); + let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_funding_locked(&nodes[0].node.get_our_node_id(), &as_funding); + let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id()); assert!(bg_processor.stop().is_ok()); diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index dd7b1906ca0..5f90b030dfb 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -1158,7 +1158,10 @@ fn test_monitor_update_fail_reestablish() { nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish); nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish); - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + assert_eq!( + get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id()) + .contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected + nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); check_added_monitors!(nodes[1], 1); @@ -1172,10 +1175,15 @@ fn test_monitor_update_fail_reestablish() { assert!(bs_reestablish == get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id())); nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish); + assert_eq!( + get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id()) + .contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish); check_added_monitors!(nodes[1], 0); - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + assert_eq!( + get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id()) + .contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(())); let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone(); @@ -1352,14 +1360,14 @@ fn claim_while_disconnected_monitor_update_fail() { let bs_reconnect = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect); - assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id()); // Now deliver a's reestablish, freeing the claim from the holding cell, but fail the monitor // update. *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect); - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id()); nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -1492,7 +1500,9 @@ fn monitor_failed_no_reestablish_response() { let bs_reconnect = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect); + let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id()); nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect); + let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id()); *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(())); let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 5caffe8ee12..8dbc17bae84 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1598,6 +1598,7 @@ impl ChannelMana action: msgs::ErrorAction::IgnoreError }); } + log_trace!(self.logger, "Attempting to generate broadcast channel update for channel {}", log_bytes!(chan.channel_id())); self.get_channel_update_for_unicast(chan) } @@ -1607,6 +1608,7 @@ impl ChannelMana /// provided evidence that they know about the existence of the channel. /// May be called with channel_state already locked! fn get_channel_update_for_unicast(&self, chan: &Channel) -> Result { + log_trace!(self.logger, "Attempting to generate channel update for channel {}", log_bytes!(chan.channel_id())); let short_channel_id = match chan.get_short_channel_id() { None => return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}), Some(id) => id, @@ -2789,7 +2791,8 @@ impl ChannelMana pub fn channel_monitor_updated(&self, funding_txo: &OutPoint, highest_applied_update_id: u64) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); - let (mut pending_failures, chan_restoration_res) = { + let chan_restoration_res; + let mut pending_failures = { let mut channel_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_lock; let mut channel = match channel_state.by_id.entry(funding_txo.to_channel_id()) { @@ -2801,7 +2804,21 @@ impl ChannelMana } let (raa, commitment_update, order, pending_forwards, pending_failures, funding_broadcastable, funding_locked) = channel.get_mut().monitor_updating_restored(&self.logger); - (pending_failures, handle_chan_restoration_locked!(self, channel_lock, channel_state, channel, raa, commitment_update, order, None, pending_forwards, funding_broadcastable, funding_locked)) + let channel_update = if funding_locked.is_some() && channel.get().is_usable() && !channel.get().should_announce() { + // We only send a channel_update in the case where we are just now sending a + // funding_locked and the channel is in a usable state. Further, we rely on the + // normal announcement_signatures process to send a channel_update for public + // channels, only generating a unicast channel_update if this is a private channel. + Some(events::MessageSendEvent::SendChannelUpdate { + node_id: channel.get().get_counterparty_node_id(), + msg: self.get_channel_update_for_unicast(channel.get()).unwrap(), + }) + } else { None }; + chan_restoration_res = handle_chan_restoration_locked!(self, channel_lock, channel_state, channel, raa, commitment_update, order, None, pending_forwards, funding_broadcastable, funding_locked); + if let Some(upd) = channel_update { + channel_state.pending_msg_events.push(upd); + } + pending_failures }; post_handle_chan_restoration!(self, chan_restoration_res); for failure in pending_failures.drain(..) { @@ -2964,6 +2981,11 @@ impl ChannelMana node_id: counterparty_node_id.clone(), msg: announcement_sigs, }); + } else if chan.get().is_usable() { + channel_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { + node_id: counterparty_node_id.clone(), + msg: self.get_channel_update_for_unicast(chan.get()).unwrap(), + }); } Ok(()) }, @@ -3394,7 +3416,8 @@ impl ChannelMana } fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> { - let (htlcs_failed_forward, need_lnd_workaround, chan_restoration_res) = { + let chan_restoration_res; + let (htlcs_failed_forward, need_lnd_workaround) = { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; @@ -3409,15 +3432,27 @@ impl ChannelMana // add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here. let (funding_locked, revoke_and_ack, commitment_update, monitor_update_opt, order, htlcs_failed_forward, shutdown) = try_chan_entry!(self, chan.get_mut().channel_reestablish(msg, &self.logger), channel_state, chan); + let mut channel_update = None; if let Some(msg) = shutdown { channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { node_id: counterparty_node_id.clone(), msg, }); + } else if chan.get().is_usable() { + // If the channel is in a usable state (ie the channel is not being shut + // down), send a unicast channel_update to our counterparty to make sure + // they have the latest channel parameters. + channel_update = Some(events::MessageSendEvent::SendChannelUpdate { + node_id: chan.get().get_counterparty_node_id(), + msg: self.get_channel_update_for_unicast(chan.get()).unwrap(), + }); } let need_lnd_workaround = chan.get_mut().workaround_lnd_bug_4006.take(); - (htlcs_failed_forward, need_lnd_workaround, - handle_chan_restoration_locked!(self, channel_state_lock, channel_state, chan, revoke_and_ack, commitment_update, order, monitor_update_opt, Vec::new(), None, funding_locked)) + chan_restoration_res = handle_chan_restoration_locked!(self, channel_state_lock, channel_state, chan, revoke_and_ack, commitment_update, order, monitor_update_opt, Vec::new(), None, funding_locked); + if let Some(upd) = channel_update { + channel_state.pending_msg_events.push(upd); + } + (htlcs_failed_forward, need_lnd_workaround) }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) } @@ -3970,6 +4005,12 @@ where node_id: channel.get_counterparty_node_id(), msg: announcement_sigs, }); + } else if channel.is_usable() { + log_trace!(self.logger, "Sending funding_locked WITHOUT announcement_signatures but with private channel_update for our counterparty on channel {}", log_bytes!(channel.channel_id())); + pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { + node_id: channel.get_counterparty_node_id(), + msg: self.get_channel_update_for_unicast(channel).unwrap(), + }); } else { log_trace!(self.logger, "Sending funding_locked WITHOUT announcement_signatures for {}", log_bytes!(channel.channel_id())); } @@ -4209,6 +4250,7 @@ impl &events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true, &events::MessageSendEvent::BroadcastNodeAnnouncement { .. } => true, &events::MessageSendEvent::BroadcastChannelUpdate { .. } => true, + &events::MessageSendEvent::SendChannelUpdate { ref node_id, .. } => node_id != counterparty_node_id, &events::MessageSendEvent::HandleError { ref node_id, .. } => node_id != counterparty_node_id, &events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => true, &events::MessageSendEvent::SendChannelRangeQuery { .. } => false, @@ -5042,7 +5084,19 @@ pub mod bench { Listen::block_connected(&node_b, &block, 1); node_a.handle_funding_locked(&node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendFundingLocked, node_a.get_our_node_id())); - node_b.handle_funding_locked(&node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendFundingLocked, node_b.get_our_node_id())); + let msg_events = node_a.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 2); + match msg_events[0] { + MessageSendEvent::SendFundingLocked { ref msg, .. } => { + node_b.handle_funding_locked(&node_a.get_our_node_id(), msg); + get_event_msg!(node_b_holder, MessageSendEvent::SendChannelUpdate, node_a.get_our_node_id()); + }, + _ => panic!(), + } + match msg_events[1] { + MessageSendEvent::SendChannelUpdate { .. } => {}, + _ => panic!(), + } let dummy_graph = NetworkGraph::new(genesis_hash); diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 00bde294cef..e9fae460dd6 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1583,18 +1583,20 @@ macro_rules! handle_chan_reestablish_msgs { let mut revoke_and_ack = None; let mut commitment_update = None; let order = if let Some(ev) = msg_events.get(idx) { - idx += 1; match ev { &MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { assert_eq!(*node_id, $dst_node.node.get_our_node_id()); revoke_and_ack = Some(msg.clone()); + idx += 1; RAACommitmentOrder::RevokeAndACKFirst }, &MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => { assert_eq!(*node_id, $dst_node.node.get_our_node_id()); commitment_update = Some(updates.clone()); + idx += 1; RAACommitmentOrder::CommitmentFirst }, + &MessageSendEvent::SendChannelUpdate { .. } => RAACommitmentOrder::CommitmentFirst, _ => panic!("Unexpected event"), } } else { @@ -1607,16 +1609,24 @@ macro_rules! handle_chan_reestablish_msgs { assert_eq!(*node_id, $dst_node.node.get_our_node_id()); assert!(revoke_and_ack.is_none()); revoke_and_ack = Some(msg.clone()); + idx += 1; }, &MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => { assert_eq!(*node_id, $dst_node.node.get_our_node_id()); assert!(commitment_update.is_none()); commitment_update = Some(updates.clone()); + idx += 1; }, + &MessageSendEvent::SendChannelUpdate { .. } => {}, _ => panic!("Unexpected event"), } } + if let Some(&MessageSendEvent::SendChannelUpdate { ref node_id, ref msg }) = msg_events.get(idx) { + assert_eq!(*node_id, $dst_node.node.get_our_node_id()); + assert_eq!(msg.contents.flags & 2, 0); // "disabled" flag must not be set as we just reconnected. + } + (funding_locked, revoke_and_ack, commitment_update, order) } } diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index df3c64ae8ff..d6c4716b19d 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -1039,7 +1039,8 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_2nd_shutdown); node_0_2nd_shutdown } else { - assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + let node_0_chan_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id()); + assert_eq!(node_0_chan_update.contents.flags & 2, 0); // "disabled" flag must not be set as we just reconnected. nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_2nd_shutdown); get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()) }; diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index 9e395f1107b..96ec31c98ee 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -1264,6 +1264,12 @@ impl PeerManager { + log_trace!(self.logger, "Handling SendChannelUpdate event in peer_handler for node {} for channel {}", + log_pubkey!(node_id), msg.contents.short_channel_id); + let peer = get_peer_for_forwarding!(node_id); + peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg))); + }, MessageSendEvent::PaymentFailureNetworkUpdate { ref update } => { self.message_handler.route_handler.handle_htlc_fail_channel_update(update); }, diff --git a/lightning/src/util/events.rs b/lightning/src/util/events.rs index dbb4178fb50..0fc7c6b3a3d 100644 --- a/lightning/src/util/events.rs +++ b/lightning/src/util/events.rs @@ -389,6 +389,15 @@ pub enum MessageSendEvent { /// The channel_update which should be sent. msg: msgs::ChannelUpdate, }, + /// Used to indicate that a channel_update should be sent to a single peer. + /// In contrast to [`Self::BroadcastChannelUpdate`], this is used when the channel is a + /// private channel and we shouldn't be informing all of our peers of channel parameters. + SendChannelUpdate { + /// The node_id of the node which should receive this message + node_id: PublicKey, + /// The channel_update which should be sent. + msg: msgs::ChannelUpdate, + }, /// Broadcast an error downstream to be handled HandleError { /// The node_id of the node which should receive this message From 11594c37a13a1b39833c5335e91ac567de147724 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 14 Jun 2021 15:14:18 +0000 Subject: [PATCH 3/4] Fix spelling in ChannelManager comment --- lightning/src/ln/channelmanager.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 8dbc17bae84..88f35e6667c 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1555,8 +1555,8 @@ impl ChannelMana if msg.cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far break Some(("CLTV expiry is too far in the future", 21, None)); } - // In theory, we would be safe against unitentional channel-closure, if we only required a margin of LATENCY_GRACE_PERIOD_BLOCKS. - // But, to be safe against policy reception, we use a longuer delay. + // In theory, we would be safe against unintentional channel-closure, if we only required a margin of LATENCY_GRACE_PERIOD_BLOCKS. + // But, to be safe against policy reception, we use a longer delay. if (*outgoing_cltv_value) as u64 <= (cur_height + HTLC_FAIL_BACK_BUFFER) as u64 { break Some(("Outgoing CLTV value is too soon", 0x1000 | 14, Some(self.get_channel_update_for_unicast(chan).unwrap()))); } From ba600db7931d8f87a738ef077db3583554af60cb Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Wed, 30 Jun 2021 00:27:24 +0000 Subject: [PATCH 4/4] Ignore our own gossip if it is sent to us from our counterparty If our channel party sends us our own channel_update message, we'll erroneously use the information in that message to update our view of the forwarding parameters our counterparty requires of us, ultimately generating invoices with bogus forwarding information. This fixes that behavior by checking the channel_update's directionality before handling it. --- lightning/src/ln/channelmanager.rs | 33 +++++++++++++++++++++++++++++- 1 file changed, 32 insertions(+), 1 deletion(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 88f35e6667c..47ea3564423 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3408,7 +3408,13 @@ impl ChannelMana } return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a channel_update for a channel from the wrong node - it shouldn't know about our private channels!".to_owned(), chan_id)); } - try_chan_entry!(self, chan.get_mut().channel_update(&msg), channel_state, chan); + let were_node_one = self.get_our_node_id().serialize()[..] < chan.get().get_counterparty_node_id().serialize()[..]; + let msg_from_node_one = msg.contents.flags & 1 == 0; + if were_node_one == msg_from_node_one { + return Ok(NotifyOption::SkipPersist); + } else { + try_chan_entry!(self, chan.get_mut().channel_update(&msg), channel_state, chan); + } }, hash_map::Entry::Vacant(_) => unreachable!() } @@ -4984,6 +4990,31 @@ mod tests { // At this point the channel info given by peers should still be the same. assert_eq!(nodes[0].node.list_channels()[0], node_a_chan_info); assert_eq!(nodes[1].node.list_channels()[0], node_b_chan_info); + + // An earlier version of handle_channel_update didn't check the directionality of the + // update message and would always update the local fee info, even if our peer was + // (spuriously) forwarding us our own channel_update. + let as_node_one = nodes[0].node.get_our_node_id().serialize()[..] < nodes[1].node.get_our_node_id().serialize()[..]; + let as_update = if as_node_one == (chan.0.contents.flags & 1 == 0 /* chan.0 is from node one */) { &chan.0 } else { &chan.1 }; + let bs_update = if as_node_one == (chan.0.contents.flags & 1 == 0 /* chan.0 is from node one */) { &chan.1 } else { &chan.0 }; + + // First deliver each peers' own message, checking that the node doesn't need to be + // persisted and that its channel info remains the same. + nodes[0].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &as_update); + nodes[1].node.handle_channel_update(&nodes[0].node.get_our_node_id(), &bs_update); + assert!(!nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1))); + assert!(!nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1))); + assert_eq!(nodes[0].node.list_channels()[0], node_a_chan_info); + assert_eq!(nodes[1].node.list_channels()[0], node_b_chan_info); + + // Finally, deliver the other peers' message, ensuring each node needs to be persisted and + // the channel info has updated. + nodes[0].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &bs_update); + nodes[1].node.handle_channel_update(&nodes[0].node.get_our_node_id(), &as_update); + assert!(nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1))); + assert!(nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1))); + assert_ne!(nodes[0].node.list_channels()[0], node_a_chan_info); + assert_ne!(nodes[1].node.list_channels()[0], node_b_chan_info); } }