Skip to content

Commit cc1406f

Browse files
Initialize list_channels_with_filter result vec with capacity
1 parent 6cdbb48 commit cc1406f

File tree

1 file changed

+30
-37
lines changed

1 file changed

+30
-37
lines changed

lightning/src/ln/channelmanager.rs

Lines changed: 30 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -1636,14 +1636,13 @@ where
16361636
}
16371637

16381638
fn list_channels_with_filter<Fn: FnMut(&(&[u8; 32], &Channel<<SP::Target as SignerProvider>::Signer>)) -> bool + Copy>(&self, f: Fn) -> Vec<ChannelDetails> {
1639-
let mut res = Vec::new();
16401639
// Allocate our best estimate of the number of channels we have in the `res`
16411640
// Vec. Sadly the `short_to_chan_info` map doesn't cover channels without
16421641
// a scid or a scid alias, and the `id_to_peer` shouldn't be used outside
16431642
// of the ChannelMonitor handling. Therefore reallocations may still occur, but is
16441643
// unlikely as the `short_to_chan_info` map often contains 2 entries for
16451644
// the same channel.
1646-
res.reserve(self.short_to_chan_info.read().unwrap().len());
1645+
let mut res = Vec::with_capacity(self.short_to_chan_info.read().unwrap().len());
16471646
{
16481647
let best_block_height = self.best_block.read().unwrap().height();
16491648
let per_peer_state = self.per_peer_state.read().unwrap();
@@ -3421,38 +3420,6 @@ where
34213420
true
34223421
}
34233422

3424-
/// When a peer disconnects but still has channels, the peer's `peer_state` entry in the
3425-
/// `per_peer_state` is not removed by the `peer_disconnected` function. If the channels of
3426-
/// to that peer is later closed while still being disconnected (i.e. force closed), we
3427-
/// therefore need to remove the peer from `peer_state` separately.
3428-
/// To avoid having to take the `per_peer_state` `write` lock once the channels are closed, we
3429-
/// instead remove such peers awaiting removal through this function, which is called on a
3430-
/// timer through `timer_tick_occurred`, passing the peers disconnected peers with no channels,
3431-
/// to limit the negative effects on parallelism as much as possible.
3432-
///
3433-
/// Must be called without the `per_peer_state` lock acquired.
3434-
fn remove_peers_awaiting_removal(&self, pending_peers_awaiting_removal: HashSet<PublicKey>) {
3435-
if pending_peers_awaiting_removal.len() > 0 {
3436-
let mut per_peer_state = self.per_peer_state.write().unwrap();
3437-
for counterparty_node_id in pending_peers_awaiting_removal {
3438-
match per_peer_state.entry(counterparty_node_id) {
3439-
hash_map::Entry::Occupied(entry) => {
3440-
// Remove the entry if the peer is still disconnected and we still
3441-
// have no channels to the peer.
3442-
let remove_entry = {
3443-
let peer_state = entry.get().lock().unwrap();
3444-
!peer_state.is_connected && peer_state.channel_by_id.len() == 0
3445-
};
3446-
if remove_entry {
3447-
entry.remove_entry();
3448-
}
3449-
},
3450-
hash_map::Entry::Vacant(_) => { /* The PeerState has already been removed */ }
3451-
}
3452-
}
3453-
}
3454-
}
3455-
34563423
#[cfg(any(test, feature = "_test_utils"))]
34573424
/// Process background events, for functional testing
34583425
pub fn test_process_background_events(&self) {
@@ -3526,7 +3493,7 @@ where
35263493

35273494
let mut handle_errors: Vec<(Result<(), _>, _)> = Vec::new();
35283495
let mut timed_out_mpp_htlcs = Vec::new();
3529-
let mut pending_peers_awaiting_removal = HashSet::new();
3496+
let mut pending_peers_awaiting_removal = Vec::new();
35303497
{
35313498
let per_peer_state = self.per_peer_state.read().unwrap();
35323499
for (counterparty_node_id, peer_state_mutex) in per_peer_state.iter() {
@@ -3576,11 +3543,37 @@ where
35763543
});
35773544
let peer_should_be_removed = !peer_state.is_connected && peer_state.channel_by_id.len() == 0;
35783545
if peer_should_be_removed {
3579-
pending_peers_awaiting_removal.insert(counterparty_node_id);
3546+
pending_peers_awaiting_removal.push(counterparty_node_id);
3547+
}
3548+
}
3549+
}
3550+
3551+
// When a peer disconnects but still has channels, the peer's `peer_state` entry in the
3552+
// `per_peer_state` is not removed by the `peer_disconnected` function. If the channels
3553+
// of to that peer is later closed while still being disconnected (i.e. force closed),
3554+
// we therefore need to remove the peer from `peer_state` separately.
3555+
// To avoid having to take the `per_peer_state` `write` lock once the channels are
3556+
// closed, we instead remove such peers awaiting removal here on a timer, to limit the
3557+
// negative effects on parallelism as much as possible.
3558+
if pending_peers_awaiting_removal.len() > 0 {
3559+
let mut per_peer_state = self.per_peer_state.write().unwrap();
3560+
for counterparty_node_id in pending_peers_awaiting_removal {
3561+
match per_peer_state.entry(counterparty_node_id) {
3562+
hash_map::Entry::Occupied(entry) => {
3563+
// Remove the entry if the peer is still disconnected and we still
3564+
// have no channels to the peer.
3565+
let remove_entry = {
3566+
let peer_state = entry.get().lock().unwrap();
3567+
!peer_state.is_connected && peer_state.channel_by_id.len() == 0
3568+
};
3569+
if remove_entry {
3570+
entry.remove_entry();
3571+
}
3572+
},
3573+
hash_map::Entry::Vacant(_) => { /* The PeerState has already been removed */ }
35803574
}
35813575
}
35823576
}
3583-
self.remove_peers_awaiting_removal(pending_peers_awaiting_removal);
35843577

35853578
self.claimable_payments.lock().unwrap().claimable_htlcs.retain(|payment_hash, (_, htlcs)| {
35863579
if htlcs.is_empty() {

0 commit comments

Comments
 (0)