@@ -1636,14 +1636,13 @@ where
1636
1636
}
1637
1637
1638
1638
fn list_channels_with_filter < Fn : FnMut ( & ( & [ u8 ; 32 ] , & Channel < <SP :: Target as SignerProvider >:: Signer > ) ) -> bool + Copy > ( & self , f : Fn ) -> Vec < ChannelDetails > {
1639
- let mut res = Vec :: new ( ) ;
1640
1639
// Allocate our best estimate of the number of channels we have in the `res`
1641
1640
// Vec. Sadly the `short_to_chan_info` map doesn't cover channels without
1642
1641
// a scid or a scid alias, and the `id_to_peer` shouldn't be used outside
1643
1642
// of the ChannelMonitor handling. Therefore reallocations may still occur, but is
1644
1643
// unlikely as the `short_to_chan_info` map often contains 2 entries for
1645
1644
// the same channel.
1646
- res. reserve ( self . short_to_chan_info . read ( ) . unwrap ( ) . len ( ) ) ;
1645
+ let mut res = Vec :: with_capacity ( self . short_to_chan_info . read ( ) . unwrap ( ) . len ( ) ) ;
1647
1646
{
1648
1647
let best_block_height = self . best_block . read ( ) . unwrap ( ) . height ( ) ;
1649
1648
let per_peer_state = self . per_peer_state . read ( ) . unwrap ( ) ;
@@ -3421,38 +3420,6 @@ where
3421
3420
true
3422
3421
}
3423
3422
3424
- /// When a peer disconnects but still has channels, the peer's `peer_state` entry in the
3425
- /// `per_peer_state` is not removed by the `peer_disconnected` function. If the channels of
3426
- /// to that peer is later closed while still being disconnected (i.e. force closed), we
3427
- /// therefore need to remove the peer from `peer_state` separately.
3428
- /// To avoid having to take the `per_peer_state` `write` lock once the channels are closed, we
3429
- /// instead remove such peers awaiting removal through this function, which is called on a
3430
- /// timer through `timer_tick_occurred`, passing the peers disconnected peers with no channels,
3431
- /// to limit the negative effects on parallelism as much as possible.
3432
- ///
3433
- /// Must be called without the `per_peer_state` lock acquired.
3434
- fn remove_peers_awaiting_removal ( & self , pending_peers_awaiting_removal : HashSet < PublicKey > ) {
3435
- if pending_peers_awaiting_removal. len ( ) > 0 {
3436
- let mut per_peer_state = self . per_peer_state . write ( ) . unwrap ( ) ;
3437
- for counterparty_node_id in pending_peers_awaiting_removal {
3438
- match per_peer_state. entry ( counterparty_node_id) {
3439
- hash_map:: Entry :: Occupied ( entry) => {
3440
- // Remove the entry if the peer is still disconnected and we still
3441
- // have no channels to the peer.
3442
- let remove_entry = {
3443
- let peer_state = entry. get ( ) . lock ( ) . unwrap ( ) ;
3444
- !peer_state. is_connected && peer_state. channel_by_id . len ( ) == 0
3445
- } ;
3446
- if remove_entry {
3447
- entry. remove_entry ( ) ;
3448
- }
3449
- } ,
3450
- hash_map:: Entry :: Vacant ( _) => { /* The PeerState has already been removed */ }
3451
- }
3452
- }
3453
- }
3454
- }
3455
-
3456
3423
#[ cfg( any( test, feature = "_test_utils" ) ) ]
3457
3424
/// Process background events, for functional testing
3458
3425
pub fn test_process_background_events ( & self ) {
@@ -3526,7 +3493,7 @@ where
3526
3493
3527
3494
let mut handle_errors: Vec < ( Result < ( ) , _ > , _ ) > = Vec :: new ( ) ;
3528
3495
let mut timed_out_mpp_htlcs = Vec :: new ( ) ;
3529
- let mut pending_peers_awaiting_removal = HashSet :: new ( ) ;
3496
+ let mut pending_peers_awaiting_removal = Vec :: new ( ) ;
3530
3497
{
3531
3498
let per_peer_state = self . per_peer_state . read ( ) . unwrap ( ) ;
3532
3499
for ( counterparty_node_id, peer_state_mutex) in per_peer_state. iter ( ) {
@@ -3576,11 +3543,37 @@ where
3576
3543
} ) ;
3577
3544
let peer_should_be_removed = !peer_state. is_connected && peer_state. channel_by_id . len ( ) == 0 ;
3578
3545
if peer_should_be_removed {
3579
- pending_peers_awaiting_removal. insert ( counterparty_node_id) ;
3546
+ pending_peers_awaiting_removal. push ( counterparty_node_id) ;
3547
+ }
3548
+ }
3549
+ }
3550
+
3551
+ // When a peer disconnects but still has channels, the peer's `peer_state` entry in the
3552
+ // `per_peer_state` is not removed by the `peer_disconnected` function. If the channels
3553
+ // of to that peer is later closed while still being disconnected (i.e. force closed),
3554
+ // we therefore need to remove the peer from `peer_state` separately.
3555
+ // To avoid having to take the `per_peer_state` `write` lock once the channels are
3556
+ // closed, we instead remove such peers awaiting removal here on a timer, to limit the
3557
+ // negative effects on parallelism as much as possible.
3558
+ if pending_peers_awaiting_removal. len ( ) > 0 {
3559
+ let mut per_peer_state = self . per_peer_state . write ( ) . unwrap ( ) ;
3560
+ for counterparty_node_id in pending_peers_awaiting_removal {
3561
+ match per_peer_state. entry ( counterparty_node_id) {
3562
+ hash_map:: Entry :: Occupied ( entry) => {
3563
+ // Remove the entry if the peer is still disconnected and we still
3564
+ // have no channels to the peer.
3565
+ let remove_entry = {
3566
+ let peer_state = entry. get ( ) . lock ( ) . unwrap ( ) ;
3567
+ !peer_state. is_connected && peer_state. channel_by_id . len ( ) == 0
3568
+ } ;
3569
+ if remove_entry {
3570
+ entry. remove_entry ( ) ;
3571
+ }
3572
+ } ,
3573
+ hash_map:: Entry :: Vacant ( _) => { /* The PeerState has already been removed */ }
3580
3574
}
3581
3575
}
3582
3576
}
3583
- self . remove_peers_awaiting_removal ( pending_peers_awaiting_removal) ;
3584
3577
3585
3578
self . claimable_payments . lock ( ) . unwrap ( ) . claimable_htlcs . retain ( |payment_hash, ( _, htlcs) | {
3586
3579
if htlcs. is_empty ( ) {
0 commit comments