@@ -1884,7 +1884,10 @@ macro_rules! handle_error {
1884
1884
}
1885
1885
}
1886
1886
1887
- log_error!(WithContext::from(&$self.logger, Some($counterparty_node_id), chan_id.map(|(cid, _)| cid)), "{}", err.err);
1887
+ let logger = WithContext::from(
1888
+ &$self.logger, Some($counterparty_node_id), chan_id.map(|(chan_id, _)| chan_id)
1889
+ );
1890
+ log_error!(logger, "{}", err.err);
1888
1891
if let msgs::ErrorAction::IgnoreError = err.action {
1889
1892
} else {
1890
1893
msg_events.push(events::MessageSendEvent::HandleError {
@@ -2836,7 +2839,7 @@ where
2836
2839
} else {
2837
2840
ClosureReason::HolderForceClosed
2838
2841
};
2839
- let logger = WithContext::from(&self.logger, Some(*peer_node_id), Some(channel_id.clone() ));
2842
+ let logger = WithContext::from(&self.logger, Some(*peer_node_id), Some(* channel_id));
2840
2843
if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(channel_id.clone()) {
2841
2844
log_error!(logger, "Force-closing channel {}", channel_id);
2842
2845
self.issue_channel_close_events(&chan_phase_entry.get().context(), closure_reason);
@@ -3157,7 +3160,8 @@ where
3157
3160
if chan.context.get_short_channel_id().is_none() {
3158
3161
return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError});
3159
3162
}
3160
- log_trace!(WithChannelContext::from(&self.logger, &chan.context), "Attempting to generate broadcast channel update for channel {}", &chan.context.channel_id());
3163
+ let logger = WithChannelContext::from(&self.logger, &chan.context);
3164
+ log_trace!(logger, "Attempting to generate broadcast channel update for channel {}", &chan.context.channel_id());
3161
3165
self.get_channel_update_for_unicast(chan)
3162
3166
}
3163
3167
@@ -3173,7 +3177,8 @@ where
3173
3177
/// [`channel_update`]: msgs::ChannelUpdate
3174
3178
/// [`internal_closing_signed`]: Self::internal_closing_signed
3175
3179
fn get_channel_update_for_unicast(&self, chan: &Channel<SP>) -> Result<msgs::ChannelUpdate, LightningError> {
3176
- log_trace!(self.logger, "Attempting to generate channel update for channel {}", log_bytes!(chan.context.channel_id().0));
3180
+ let logger = WithChannelContext::from(&self.logger, &chan.context);
3181
+ log_trace!(logger, "Attempting to generate channel update for channel {}", log_bytes!(chan.context.channel_id().0));
3177
3182
let short_channel_id = match chan.context.get_short_channel_id().or(chan.context.latest_inbound_scid_alias()) {
3178
3183
None => return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}),
3179
3184
Some(id) => id,
@@ -3183,7 +3188,8 @@ where
3183
3188
}
3184
3189
3185
3190
fn get_channel_update_for_onion(&self, short_channel_id: u64, chan: &Channel<SP>) -> Result<msgs::ChannelUpdate, LightningError> {
3186
- log_trace!(self.logger, "Generating channel update for channel {}", log_bytes!(chan.context.channel_id().0));
3191
+ let logger = WithChannelContext::from(&self.logger, &chan.context);
3192
+ log_trace!(logger, "Generating channel update for channel {}", log_bytes!(chan.context.channel_id().0));
3187
3193
let were_node_one = self.our_network_pubkey.serialize()[..] < chan.context.get_counterparty_node_id().serialize()[..];
3188
3194
3189
3195
let enabled = chan.context.is_usable() && match chan.channel_update_status() {
@@ -4716,7 +4722,8 @@ where
4716
4722
| {
4717
4723
context.maybe_expire_prev_config();
4718
4724
if unfunded_context.should_expire_unfunded_channel() {
4719
- log_error!(self.logger,
4725
+ let logger = WithChannelContext::from(&self.logger, context);
4726
+ log_error!(logger,
4720
4727
"Force-closing pending channel with ID {} for not establishing in a timely manner", chan_id);
4721
4728
update_maps_on_chan_removal!(self, &context);
4722
4729
self.issue_channel_close_events(&context, ClosureReason::HolderForceClosed);
@@ -4830,7 +4837,8 @@ where
4830
4837
4831
4838
for (chan_id, req) in peer_state.inbound_channel_request_by_id.iter_mut() {
4832
4839
if { req.ticks_remaining -= 1 ; req.ticks_remaining } <= 0 {
4833
- log_error!(self.logger, "Force-closing unaccepted inbound channel {} for not accepting in a timely manner", &chan_id);
4840
+ let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(*chan_id));
4841
+ log_error!(logger, "Force-closing unaccepted inbound channel {} for not accepting in a timely manner", &chan_id);
4834
4842
peer_state.pending_msg_events.push(
4835
4843
events::MessageSendEvent::HandleError {
4836
4844
node_id: counterparty_node_id,
@@ -5742,7 +5750,8 @@ where
5742
5750
pending.retain(|upd| upd.update_id > highest_applied_update_id);
5743
5751
pending.len()
5744
5752
} else { 0 };
5745
- log_trace!(WithChannelContext::from(&self.logger, &channel.context), "ChannelMonitor updated to {}. Current highest is {}. {} pending in-flight updates.",
5753
+ let logger = WithChannelContext::from(&self.logger, &channel.context);
5754
+ log_trace!(logger, "ChannelMonitor updated to {}. Current highest is {}. {} pending in-flight updates.",
5746
5755
highest_applied_update_id, channel.context.get_latest_monitor_update_id(),
5747
5756
remaining_in_flight);
5748
5757
if !channel.is_awaiting_monitor_update() || channel.context.get_latest_monitor_update_id() != highest_applied_update_id {
@@ -6129,7 +6138,8 @@ where
6129
6138
}
6130
6139
Ok(())
6131
6140
} else {
6132
- log_error!(self.logger, "Persisting initial ChannelMonitor failed, implying the funding outpoint was duplicated");
6141
+ let logger = WithChannelContext::from(&self.logger, &chan.context);
6142
+ log_error!(logger, "Persisting initial ChannelMonitor failed, implying the funding outpoint was duplicated");
6133
6143
let channel_id = match funding_msg_opt {
6134
6144
Some(msg) => msg.channel_id,
6135
6145
None => chan.context.channel_id(),
@@ -6441,7 +6451,8 @@ where
6441
6451
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
6442
6452
let res = try_chan_phase_entry!(self, chan.update_fulfill_htlc(&msg), chan_phase_entry);
6443
6453
if let HTLCSource::PreviousHopData(prev_hop) = &res.0 {
6444
- log_trace!(self.logger,
6454
+ let logger = WithChannelContext::from(&self.logger, &chan.context);
6455
+ log_trace!(logger,
6445
6456
"Holding the next revoke_and_ack from {} until the preimage is durably persisted in the inbound edge's ChannelMonitor",
6446
6457
msg.channel_id);
6447
6458
peer_state.actions_blocking_raa_monitor_updates.entry(msg.channel_id)
@@ -6593,7 +6604,8 @@ where
6593
6604
prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info });
6594
6605
},
6595
6606
hash_map::Entry::Occupied(_) => {
6596
- log_info!(WithContext::from(&self.logger, None, Some(prev_funding_outpoint.to_channel_id())), "Failed to forward incoming HTLC: detected duplicate intercepted payment over short channel id {}", scid);
6607
+ let logger = WithContext::from(&self.logger, None, Some(prev_funding_outpoint.to_channel_id()));
6608
+ log_info!(logger, "Failed to forward incoming HTLC: detected duplicate intercepted payment over short channel id {}", scid);
6597
6609
let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
6598
6610
short_channel_id: prev_short_channel_id,
6599
6611
user_channel_id: Some(prev_user_channel_id),
@@ -7748,12 +7760,14 @@ where
7748
7760
/// operation. It will double-check that nothing *else* is also blocking the same channel from
7749
7761
/// making progress and then let any blocked [`ChannelMonitorUpdate`]s fly.
7750
7762
fn handle_monitor_update_release(&self, counterparty_node_id: PublicKey, channel_funding_outpoint: OutPoint, mut completed_blocker: Option<RAAMonitorUpdateBlockingAction>) {
7763
+ let logger = WithContext::from(
7764
+ &self.logger, Some(counterparty_node_id), Some(channel_funding_outpoint.to_channel_id())
7765
+ );
7751
7766
loop {
7752
7767
let per_peer_state = self.per_peer_state.read().unwrap();
7753
7768
if let Some(peer_state_mtx) = per_peer_state.get(&counterparty_node_id) {
7754
7769
let mut peer_state_lck = peer_state_mtx.lock().unwrap();
7755
7770
let peer_state = &mut *peer_state_lck;
7756
- let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(channel_funding_outpoint.to_channel_id()));
7757
7771
if let Some(blocker) = completed_blocker.take() {
7758
7772
// Only do this on the first iteration of the loop.
7759
7773
if let Some(blockers) = peer_state.actions_blocking_raa_monitor_updates
@@ -7775,7 +7789,6 @@ where
7775
7789
7776
7790
if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(channel_funding_outpoint.to_channel_id()) {
7777
7791
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
7778
- let logger = WithChannelContext::from(&self.logger, &chan.context);
7779
7792
debug_assert_eq!(chan.context.get_funding_txo().unwrap(), channel_funding_outpoint);
7780
7793
if let Some((monitor_update, further_update_exists)) = chan.unblock_next_blocked_monitor_update() {
7781
7794
log_debug!(logger, "Unlocking monitor updating for channel {} and updating monitor",
@@ -7794,7 +7807,6 @@ where
7794
7807
}
7795
7808
}
7796
7809
} else {
7797
- let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(channel_funding_outpoint.to_channel_id()));
7798
7810
log_debug!(logger,
7799
7811
"Got a release post-RAA monitor update for peer {} but the channel is gone",
7800
7812
log_pubkey!(counterparty_node_id));
@@ -8552,7 +8564,10 @@ where
8552
8564
timed_out_htlcs.push((prev_hop_data, htlc.forward_info.payment_hash,
8553
8565
HTLCFailReason::from_failure_code(0x2000 | 2),
8554
8566
HTLCDestination::InvalidForward { requested_forward_scid }));
8555
- log_trace!(self.logger, "Timing out intercepted HTLC with requested forward scid {}", requested_forward_scid);
8567
+ let logger = WithContext::from(
8568
+ &self.logger, None, Some(htlc.prev_funding_outpoint.to_channel_id())
8569
+ );
8570
+ log_trace!(logger, "Timing out intercepted HTLC with requested forward scid {}", requested_forward_scid);
8556
8571
false
8557
8572
} else { true }
8558
8573
});
@@ -8859,8 +8874,11 @@ where
8859
8874
let mut failed_channels = Vec::new();
8860
8875
let mut per_peer_state = self.per_peer_state.write().unwrap();
8861
8876
let remove_peer = {
8862
- log_debug!(WithContext::from(&self.logger, Some(*counterparty_node_id), None), "Marking channels with {} disconnected and generating channel_updates.",
8863
- log_pubkey!(counterparty_node_id));
8877
+ log_debug!(
8878
+ WithContext::from(&self.logger, Some(*counterparty_node_id), None),
8879
+ "Marking channels with {} disconnected and generating channel_updates.",
8880
+ log_pubkey!(counterparty_node_id)
8881
+ );
8864
8882
if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
8865
8883
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8866
8884
let peer_state = &mut *peer_state_lock;
0 commit comments