@@ -1309,7 +1309,8 @@ pub(super) struct PeerState<SP: Deref> where SP::Target: SignerProvider {
1309
1309
/// for broadcast messages, where ordering isn't as strict).
1310
1310
pub(super) pending_msg_events: Vec<MessageSendEvent>,
1311
1311
/// Map from Channel IDs to pending [`ChannelMonitorUpdate`]s which have been passed to the
1312
- /// user but which have not yet completed.
1312
+ /// user but which have not yet completed. We still keep the funding outpoint around to backfill
1313
+ /// the legacy TLV field to support downgrading.
1313
1314
///
1314
1315
/// Note that the channel may no longer exist. For example if the channel was closed but we
1315
1316
/// later needed to claim an HTLC which is pending on-chain, we may generate a monitor update
@@ -1321,7 +1322,7 @@ pub(super) struct PeerState<SP: Deref> where SP::Target: SignerProvider {
1321
1322
/// where we complete one [`ChannelMonitorUpdate`] (but there are more pending as background
1322
1323
/// events) but we conclude all pending [`ChannelMonitorUpdate`]s have completed and its safe
1323
1324
/// to run post-completion actions.
1324
- in_flight_monitor_updates: BTreeMap<OutPoint, Vec<ChannelMonitorUpdate>>,
1325
+ in_flight_monitor_updates: BTreeMap<( OutPoint, ChannelId) , Vec<ChannelMonitorUpdate>>,
1325
1326
/// Map from a specific channel to some action(s) that should be taken when all pending
1326
1327
/// [`ChannelMonitorUpdate`]s for the channel complete updating.
1327
1328
///
@@ -3284,7 +3285,7 @@ macro_rules! handle_new_monitor_update {
3284
3285
$chan_id: expr, $counterparty_node_id: expr, $in_flight_updates: ident, $update_idx: ident,
3285
3286
_internal_outer, $completed: expr
3286
3287
) => { {
3287
- $in_flight_updates = $peer_state.in_flight_monitor_updates.entry($funding_txo)
3288
+ $in_flight_updates = $peer_state.in_flight_monitor_updates.entry(( $funding_txo, $chan_id) )
3288
3289
.or_insert_with(Vec::new);
3289
3290
// During startup, we push monitor updates as background events through to here in
3290
3291
// order to replay updates that were in-flight when we shut down. Thus, we have to
@@ -4010,7 +4011,7 @@ where
4010
4011
let per_peer_state = self.per_peer_state.read().unwrap();
4011
4012
if let Some(peer_state_mtx) = per_peer_state.get(&shutdown_res.counterparty_node_id) {
4012
4013
let mut peer_state = peer_state_mtx.lock().unwrap();
4013
- if peer_state.in_flight_monitor_updates.get(&funding_txo).map(|l| l.is_empty()).unwrap_or(true) {
4014
+ if peer_state.in_flight_monitor_updates.get(&( funding_txo, shutdown_res.channel_id) ).map(|l| l.is_empty()).unwrap_or(true) {
4014
4015
let update_actions = peer_state.monitor_update_blocked_actions
4015
4016
.remove(&shutdown_res.channel_id).unwrap_or(Vec::new());
4016
4017
@@ -7574,7 +7575,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
7574
7575
let peer_state = &mut *peer_state_lock;
7575
7576
7576
7577
let remaining_in_flight =
7577
- if let Some(pending) = peer_state.in_flight_monitor_updates.get_mut(funding_txo) {
7578
+ if let Some(pending) = peer_state.in_flight_monitor_updates.get_mut(&(* funding_txo, *channel_id) ) {
7578
7579
pending.retain(|upd| upd.update_id > highest_applied_update_id);
7579
7580
pending.len()
7580
7581
} else { 0 };
@@ -12986,12 +12987,22 @@ where
12986
12987
pending_claiming_payments = None;
12987
12988
}
12988
12989
12989
- let mut in_flight_monitor_updates: Option<HashMap<(&PublicKey, &OutPoint), &Vec<ChannelMonitorUpdate>>> = None;
12990
+ let mut legacy_in_flight_monitor_updates: Option<HashMap<(&PublicKey, &OutPoint), &Vec<ChannelMonitorUpdate>>> = None;
12991
+ let mut in_flight_monitor_updates: Option<HashMap<(&PublicKey, &ChannelId), &Vec<ChannelMonitorUpdate>>> = None;
12990
12992
for ((counterparty_id, _), peer_state) in per_peer_state.iter().zip(peer_states.iter()) {
12991
- for (funding_outpoint , updates) in peer_state.in_flight_monitor_updates.iter() {
12993
+ for ((funding_txo, channel_id) , updates) in peer_state.in_flight_monitor_updates.iter() {
12992
12994
if !updates.is_empty() {
12993
- if in_flight_monitor_updates.is_none() { in_flight_monitor_updates = Some(new_hash_map()); }
12994
- in_flight_monitor_updates.as_mut().unwrap().insert((counterparty_id, funding_outpoint), updates);
12995
+ if legacy_in_flight_monitor_updates.is_none() {
12996
+ legacy_in_flight_monitor_updates = Some(new_hash_map());
12997
+ }
12998
+ legacy_in_flight_monitor_updates.as_mut().unwrap()
12999
+ .insert((counterparty_id, funding_txo), updates);
13000
+
13001
+ if in_flight_monitor_updates.is_none() {
13002
+ in_flight_monitor_updates = Some(new_hash_map());
13003
+ }
13004
+ in_flight_monitor_updates.as_mut().unwrap()
13005
+ .insert((counterparty_id, channel_id), updates);
12995
13006
}
12996
13007
}
12997
13008
}
@@ -13006,11 +13017,12 @@ where
13006
13017
(7, self.fake_scid_rand_bytes, required),
13007
13018
(8, if events_not_backwards_compatible { Some(&*events) } else { None }, option),
13008
13019
(9, htlc_purposes, required_vec),
13009
- (10, in_flight_monitor_updates , option),
13020
+ (10, legacy_in_flight_monitor_updates , option),
13010
13021
(11, self.probing_cookie_secret, required),
13011
13022
(13, htlc_onion_fields, optional_vec),
13012
13023
(14, decode_update_add_htlcs_opt, option),
13013
13024
(15, self.inbound_payment_id_secret, required),
13025
+ (17, in_flight_monitor_updates, required),
13014
13026
});
13015
13027
13016
13028
Ok(())
@@ -13146,8 +13158,7 @@ where
13146
13158
/// runtime settings which were stored when the ChannelManager was serialized.
13147
13159
pub default_config: UserConfig,
13148
13160
13149
- /// A map from channel funding outpoints to ChannelMonitors for those channels (ie
13150
- /// value.context.get_funding_txo() should be the key).
13161
+ /// A map from channel IDs to ChannelMonitors for those channels.
13151
13162
///
13152
13163
/// If a monitor is inconsistent with the channel state during deserialization the channel will
13153
13164
/// be force-closed using the data in the ChannelMonitor and the channel will be dropped. This
@@ -13158,7 +13169,7 @@ where
13158
13169
/// this struct.
13159
13170
///
13160
13171
/// This is not exported to bindings users because we have no HashMap bindings
13161
- pub channel_monitors: HashMap<OutPoint , &'a ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>>,
13172
+ pub channel_monitors: HashMap<ChannelId , &'a ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>>,
13162
13173
}
13163
13174
13164
13175
impl<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref>
@@ -13187,7 +13198,7 @@ where
13187
13198
entropy_source, node_signer, signer_provider, fee_estimator, chain_monitor,
13188
13199
tx_broadcaster, router, message_router, logger, default_config,
13189
13200
channel_monitors: hash_map_from_iter(
13190
- channel_monitors.drain(..).map(|monitor| { (monitor.get_funding_txo().0 , monitor) })
13201
+ channel_monitors.drain(..).map(|monitor| { (monitor.channel_id() , monitor) })
13191
13202
),
13192
13203
}
13193
13204
}
@@ -13250,22 +13261,21 @@ where
13250
13261
13251
13262
let mut failed_htlcs = Vec::new();
13252
13263
let channel_count: u64 = Readable::read(reader)?;
13253
- let mut funding_txo_set = hash_set_with_capacity(cmp::min(channel_count as usize, 128));
13264
+ let mut channel_id_set = hash_set_with_capacity(cmp::min(channel_count as usize, 128));
13254
13265
let mut per_peer_state = hash_map_with_capacity(cmp::min(channel_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex<PeerState<SP>>)>()));
13255
13266
let mut outpoint_to_peer = hash_map_with_capacity(cmp::min(channel_count as usize, 128));
13256
13267
let mut short_to_chan_info = hash_map_with_capacity(cmp::min(channel_count as usize, 128));
13257
13268
let mut channel_closures = VecDeque::new();
13258
13269
let mut close_background_events = Vec::new();
13259
- let mut funding_txo_to_channel_id = hash_map_with_capacity(channel_count as usize);
13260
13270
for _ in 0..channel_count {
13261
13271
let mut channel: FundedChannel<SP> = FundedChannel::read(reader, (
13262
13272
&args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config)
13263
13273
))?;
13264
13274
let logger = WithChannelContext::from(&args.logger, &channel.context, None);
13275
+ let channel_id = channel.context.channel_id();
13276
+ channel_id_set.insert(channel_id.clone());
13265
13277
let funding_txo = channel.context.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
13266
- funding_txo_to_channel_id.insert(funding_txo, channel.context.channel_id());
13267
- funding_txo_set.insert(funding_txo.clone());
13268
- if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) {
13278
+ if let Some(ref mut monitor) = args.channel_monitors.get_mut(&channel_id) {
13269
13279
if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() ||
13270
13280
channel.get_revoked_counterparty_commitment_transaction_number() > monitor.get_min_seen_secret() ||
13271
13281
channel.get_cur_counterparty_commitment_transaction_number() > monitor.get_cur_counterparty_commitment_number() ||
@@ -13348,9 +13358,7 @@ where
13348
13358
if let Some(short_channel_id) = channel.context.get_short_channel_id() {
13349
13359
short_to_chan_info.insert(short_channel_id, (channel.context.get_counterparty_node_id(), channel.context.channel_id()));
13350
13360
}
13351
- if let Some(funding_txo) = channel.context.get_funding_txo() {
13352
- outpoint_to_peer.insert(funding_txo, channel.context.get_counterparty_node_id());
13353
- }
13361
+ outpoint_to_peer.insert(funding_txo, channel.context.get_counterparty_node_id());
13354
13362
per_peer_state.entry(channel.context.get_counterparty_node_id())
13355
13363
.or_insert_with(|| Mutex::new(empty_peer_state()))
13356
13364
.get_mut().unwrap()
@@ -13380,8 +13388,8 @@ where
13380
13388
}
13381
13389
}
13382
13390
13383
- for (funding_txo , monitor) in args.channel_monitors.iter() {
13384
- if !funding_txo_set .contains(funding_txo ) {
13391
+ for (channel_id , monitor) in args.channel_monitors.iter() {
13392
+ if !channel_id_set .contains(channel_id ) {
13385
13393
let mut should_queue_fc_update = false;
13386
13394
if let Some(counterparty_node_id) = monitor.get_counterparty_node_id() {
13387
13395
// If the ChannelMonitor had any updates, we may need to update it further and
@@ -13419,10 +13427,11 @@ where
13419
13427
updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }],
13420
13428
channel_id: Some(monitor.channel_id()),
13421
13429
};
13430
+ let funding_txo = monitor.get_funding_txo().0;
13422
13431
if let Some(counterparty_node_id) = monitor.get_counterparty_node_id() {
13423
13432
let update = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
13424
13433
counterparty_node_id,
13425
- funding_txo: *funding_txo ,
13434
+ funding_txo,
13426
13435
channel_id,
13427
13436
update: monitor_update,
13428
13437
};
@@ -13435,7 +13444,7 @@ where
13435
13444
// generate a `ChannelMonitorUpdate` for it aside from this
13436
13445
// `ChannelForceClosed` one.
13437
13446
monitor_update.update_id = u64::MAX;
13438
- close_background_events.push(BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((* funding_txo, channel_id, monitor_update)));
13447
+ close_background_events.push(BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((funding_txo, channel_id, monitor_update)));
13439
13448
}
13440
13449
}
13441
13450
}
@@ -13535,7 +13544,10 @@ where
13535
13544
let mut pending_claiming_payments = Some(new_hash_map());
13536
13545
let mut monitor_update_blocked_actions_per_peer: Option<Vec<(_, BTreeMap<_, Vec<_>>)>> = Some(Vec::new());
13537
13546
let mut events_override = None;
13538
- let mut in_flight_monitor_updates: Option<HashMap<(PublicKey, OutPoint), Vec<ChannelMonitorUpdate>>> = None;
13547
+ let mut _legacy_in_flight_monitor_updates: Option<HashMap<(PublicKey, OutPoint), Vec<ChannelMonitorUpdate>>> = None;
13548
+ // We use this one over the legacy since they represent the same data, just with a different
13549
+ // key. We still need to read the legacy one as it's an even TLV.
13550
+ let mut in_flight_monitor_updates: Option<HashMap<(PublicKey, ChannelId), Vec<ChannelMonitorUpdate>>> = None;
13539
13551
let mut decode_update_add_htlcs: Option<HashMap<u64, Vec<msgs::UpdateAddHTLC>>> = None;
13540
13552
let mut inbound_payment_id_secret = None;
13541
13553
read_tlv_fields!(reader, {
@@ -13548,11 +13560,12 @@ where
13548
13560
(7, fake_scid_rand_bytes, option),
13549
13561
(8, events_override, option),
13550
13562
(9, claimable_htlc_purposes, optional_vec),
13551
- (10, in_flight_monitor_updates , option),
13563
+ (10, _legacy_in_flight_monitor_updates , option),
13552
13564
(11, probing_cookie_secret, option),
13553
13565
(13, claimable_htlc_onion_fields, optional_vec),
13554
13566
(14, decode_update_add_htlcs, option),
13555
13567
(15, inbound_payment_id_secret, option),
13568
+ (17, in_flight_monitor_updates, required),
13556
13569
});
13557
13570
let mut decode_update_add_htlcs = decode_update_add_htlcs.unwrap_or_else(|| new_hash_map());
13558
13571
if fake_scid_rand_bytes.is_none() {
@@ -13599,19 +13612,20 @@ where
13599
13612
// Because the actual handling of the in-flight updates is the same, it's macro'ized here:
13600
13613
let mut pending_background_events = Vec::new();
13601
13614
macro_rules! handle_in_flight_updates {
13602
- ($counterparty_node_id: expr, $chan_in_flight_upds: expr, $funding_txo : expr,
13603
- $monitor: expr, $ peer_state: expr, $logger: expr, $channel_info_log: expr
13615
+ ($counterparty_node_id: expr, $chan_in_flight_upds: expr, $monitor : expr,
13616
+ $peer_state: expr, $logger: expr, $channel_info_log: expr
13604
13617
) => { {
13605
13618
let mut max_in_flight_update_id = 0;
13606
13619
$chan_in_flight_upds.retain(|upd| upd.update_id > $monitor.get_latest_update_id());
13620
+ let funding_txo = $monitor.get_funding_txo().0;
13607
13621
for update in $chan_in_flight_upds.iter() {
13608
13622
log_trace!($logger, "Replaying ChannelMonitorUpdate {} for {}channel {}",
13609
13623
update.update_id, $channel_info_log, &$monitor.channel_id());
13610
13624
max_in_flight_update_id = cmp::max(max_in_flight_update_id, update.update_id);
13611
13625
pending_background_events.push(
13612
13626
BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
13613
13627
counterparty_node_id: $counterparty_node_id,
13614
- funding_txo: $ funding_txo,
13628
+ funding_txo: funding_txo.clone() ,
13615
13629
channel_id: $monitor.channel_id(),
13616
13630
update: update.clone(),
13617
13631
});
@@ -13630,7 +13644,7 @@ where
13630
13644
.and_modify(|v| *v = cmp::max(max_in_flight_update_id, *v))
13631
13645
.or_insert(max_in_flight_update_id);
13632
13646
}
13633
- if $peer_state.in_flight_monitor_updates.insert($ funding_txo, $chan_in_flight_upds).is_some() {
13647
+ if $peer_state.in_flight_monitor_updates.insert(( funding_txo, $monitor.channel_id()) , $chan_in_flight_upds).is_some() {
13634
13648
log_error!($logger, "Duplicate in-flight monitor update set for the same channel!");
13635
13649
return Err(DecodeError::InvalidValue);
13636
13650
}
@@ -13641,28 +13655,27 @@ where
13641
13655
for (counterparty_id, peer_state_mtx) in per_peer_state.iter_mut() {
13642
13656
let mut peer_state_lock = peer_state_mtx.lock().unwrap();
13643
13657
let peer_state = &mut *peer_state_lock;
13644
- for phase in peer_state.channel_by_id.values() {
13658
+ for (channel_id, phase) in & peer_state.channel_by_id {
13645
13659
if let Some(chan) = phase.as_funded() {
13646
13660
let logger = WithChannelContext::from(&args.logger, &chan.context, None);
13647
13661
13648
13662
// Channels that were persisted have to be funded, otherwise they should have been
13649
13663
// discarded.
13650
- let funding_txo = chan.context.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
13651
- let monitor = args.channel_monitors.get(&funding_txo)
13664
+ let monitor = args.channel_monitors.get(channel_id)
13652
13665
.expect("We already checked for monitor presence when loading channels");
13653
13666
let mut max_in_flight_update_id = monitor.get_latest_update_id();
13654
13667
if let Some(in_flight_upds) = &mut in_flight_monitor_updates {
13655
- if let Some(mut chan_in_flight_upds) = in_flight_upds.remove(&(*counterparty_id, funding_txo )) {
13668
+ if let Some(mut chan_in_flight_upds) = in_flight_upds.remove(&(*counterparty_id, *channel_id )) {
13656
13669
max_in_flight_update_id = cmp::max(max_in_flight_update_id,
13657
13670
handle_in_flight_updates!(*counterparty_id, chan_in_flight_upds,
13658
- funding_txo, monitor, peer_state, logger, ""));
13671
+ monitor, peer_state, logger, ""));
13659
13672
}
13660
13673
}
13661
13674
if chan.get_latest_unblocked_monitor_update_id() > max_in_flight_update_id {
13662
13675
// If the channel is ahead of the monitor, return DangerousValue:
13663
13676
log_error!(logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!");
13664
13677
log_error!(logger, " The ChannelMonitor for channel {} is at update_id {} with update_id through {} in-flight",
13665
- chan.context. channel_id() , monitor.get_latest_update_id(), max_in_flight_update_id);
13678
+ channel_id, monitor.get_latest_update_id(), max_in_flight_update_id);
13666
13679
log_error!(logger, " but the ChannelManager is at update_id {}.", chan.get_latest_unblocked_monitor_update_id());
13667
13680
log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
13668
13681
log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
@@ -13680,23 +13693,21 @@ where
13680
13693
}
13681
13694
13682
13695
if let Some(in_flight_upds) = in_flight_monitor_updates {
13683
- for ((counterparty_id, funding_txo), mut chan_in_flight_updates) in in_flight_upds {
13684
- let channel_id = funding_txo_to_channel_id.get(&funding_txo).copied();
13685
- let logger = WithContext::from(&args.logger, Some(counterparty_id), channel_id, None);
13686
- if let Some(monitor) = args.channel_monitors.get(&funding_txo) {
13696
+ for ((counterparty_id, channel_id), mut chan_in_flight_updates) in in_flight_upds {
13697
+ let logger = WithContext::from(&args.logger, Some(counterparty_id), Some(channel_id), None);
13698
+ if let Some(monitor) = args.channel_monitors.get(&channel_id) {
13687
13699
// Now that we've removed all the in-flight monitor updates for channels that are
13688
13700
// still open, we need to replay any monitor updates that are for closed channels,
13689
13701
// creating the neccessary peer_state entries as we go.
13690
13702
let peer_state_mutex = per_peer_state.entry(counterparty_id).or_insert_with(|| {
13691
13703
Mutex::new(empty_peer_state())
13692
13704
});
13693
13705
let mut peer_state = peer_state_mutex.lock().unwrap();
13694
- handle_in_flight_updates!(counterparty_id, chan_in_flight_updates,
13695
- funding_txo, monitor, peer_state, logger, "closed ");
13706
+ handle_in_flight_updates!(counterparty_id, chan_in_flight_updates, monitor,
13707
+ peer_state, logger, "closed ");
13696
13708
} else {
13697
13709
log_error!(logger, "A ChannelMonitor is missing even though we have in-flight updates for it! This indicates a potentially-critical violation of the chain::Watch API!");
13698
- log_error!(logger, " The ChannelMonitor for channel {} is missing.", if let Some(channel_id) =
13699
- channel_id { channel_id.to_string() } else { format!("with outpoint {}", funding_txo) } );
13710
+ log_error!(logger, " The ChannelMonitor for channel {} is missing.", channel_id.to_string());
13700
13711
log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
13701
13712
log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
13702
13713
log_error!(logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
@@ -13748,7 +13759,7 @@ where
13748
13759
.or_insert(update.update_id);
13749
13760
}
13750
13761
let in_flight_updates = per_peer_state.in_flight_monitor_updates
13751
- .entry(*funding_txo)
13762
+ .entry(( *funding_txo, *channel_id) )
13752
13763
.or_insert_with(Vec::new);
13753
13764
debug_assert!(!in_flight_updates.iter().any(|upd| upd == update));
13754
13765
in_flight_updates.push(update.clone());
@@ -13873,7 +13884,7 @@ where
13873
13884
.filter_map(|(htlc_source, (htlc, preimage_opt))| {
13874
13885
if let HTLCSource::PreviousHopData(prev_hop) = &htlc_source {
13875
13886
if let Some(payment_preimage) = preimage_opt {
13876
- let inbound_edge_monitor = args.channel_monitors.get(&prev_hop.outpoint );
13887
+ let inbound_edge_monitor = args.channel_monitors.get(&prev_hop.channel_id );
13877
13888
// Note that for channels which have gone to chain,
13878
13889
// `get_all_current_outbound_htlcs` is never pruned and always returns
13879
13890
// a constant set until the monitor is removed/archived. Thus, we
@@ -14361,7 +14372,7 @@ where
14361
14372
);
14362
14373
}
14363
14374
}
14364
- if let Some(previous_hop_monitor) = args.channel_monitors.get(&claimable_htlc.prev_hop.outpoint ) {
14375
+ if let Some(previous_hop_monitor) = args.channel_monitors.get(&claimable_htlc.prev_hop.channel_id ) {
14365
14376
// Note that this is unsafe as we no longer require the
14366
14377
// `ChannelMonitor`s to be re-persisted prior to this
14367
14378
// `ChannelManager` being persisted after we get started running.
0 commit comments