@@ -1417,7 +1417,7 @@ macro_rules! emit_channel_ready_event {
1417
1417
}
1418
1418
1419
1419
macro_rules! handle_monitor_update_completion {
1420
- ( $self: ident, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $chan: expr) => { {
1420
+ ( $self: ident, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock : expr , $ chan: expr) => { {
1421
1421
let mut updates = $chan. monitor_updating_restored( & $self. logger,
1422
1422
& $self. node_signer, $self. genesis_hash, & $self. default_configuration,
1423
1423
$self. best_block. read( ) . unwrap( ) . height( ) ) ;
@@ -1450,6 +1450,7 @@ macro_rules! handle_monitor_update_completion {
1450
1450
1451
1451
let channel_id = $chan. channel_id( ) ;
1452
1452
core:: mem:: drop( $peer_state_lock) ;
1453
+ core:: mem:: drop( $per_peer_state_lock) ;
1453
1454
1454
1455
$self. handle_monitor_update_completion_actions( update_actions) ;
1455
1456
@@ -1465,7 +1466,7 @@ macro_rules! handle_monitor_update_completion {
1465
1466
}
1466
1467
1467
1468
macro_rules! handle_new_monitor_update {
1468
- ( $self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $chan: expr, MANUALLY_REMOVING , $remove: expr) => { {
1469
+ ( $self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock : expr , $ chan: expr, MANUALLY_REMOVING , $remove: expr) => { {
1469
1470
// update_maps_on_chan_removal needs to be able to take id_to_peer, so make sure we can in
1470
1471
// any case so that it won't deadlock.
1471
1472
debug_assert!( $self. id_to_peer. try_lock( ) . is_ok( ) ) ;
@@ -1492,14 +1493,14 @@ macro_rules! handle_new_monitor_update {
1492
1493
. update_id == $update_id) &&
1493
1494
$chan. get_latest_monitor_update_id( ) == $update_id
1494
1495
{
1495
- handle_monitor_update_completion!( $self, $update_id, $peer_state_lock, $peer_state, $chan) ;
1496
+ handle_monitor_update_completion!( $self, $update_id, $peer_state_lock, $peer_state, $per_peer_state_lock , $ chan) ;
1496
1497
}
1497
1498
Ok ( ( ) )
1498
1499
} ,
1499
1500
}
1500
1501
} } ;
1501
- ( $self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $chan_entry: expr) => {
1502
- handle_new_monitor_update!( $self, $update_res, $update_id, $peer_state_lock, $peer_state, $chan_entry. get_mut( ) , MANUALLY_REMOVING , $chan_entry. remove_entry( ) )
1502
+ ( $self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock : expr , $ chan_entry: expr) => {
1503
+ handle_new_monitor_update!( $self, $update_res, $update_id, $peer_state_lock, $peer_state, $per_peer_state_lock , $ chan_entry. get_mut( ) , MANUALLY_REMOVING , $chan_entry. remove_entry( ) )
1503
1504
}
1504
1505
}
1505
1506
@@ -1835,7 +1836,7 @@ where
1835
1836
if let Some ( monitor_update) = monitor_update_opt. take ( ) {
1836
1837
let update_id = monitor_update. update_id ;
1837
1838
let update_res = self . chain_monitor . update_channel ( funding_txo_opt. unwrap ( ) , monitor_update) ;
1838
- break handle_new_monitor_update ! ( self , update_res, update_id, peer_state_lock, peer_state, chan_entry) ;
1839
+ break handle_new_monitor_update ! ( self , update_res, update_id, peer_state_lock, peer_state, per_peer_state , chan_entry) ;
1839
1840
}
1840
1841
1841
1842
if chan_entry. get ( ) . is_shutdown ( ) {
@@ -2458,7 +2459,7 @@ where
2458
2459
Some ( monitor_update) => {
2459
2460
let update_id = monitor_update. update_id ;
2460
2461
let update_res = self . chain_monitor . update_channel ( funding_txo, monitor_update) ;
2461
- if let Err ( e) = handle_new_monitor_update ! ( self , update_res, update_id, peer_state_lock, peer_state, chan) {
2462
+ if let Err ( e) = handle_new_monitor_update ! ( self , update_res, update_id, peer_state_lock, peer_state, per_peer_state , chan) {
2462
2463
break Err ( e) ;
2463
2464
}
2464
2465
if update_res == ChannelMonitorUpdateStatus :: InProgress {
@@ -3984,7 +3985,8 @@ where
3984
3985
)
3985
3986
) . unwrap_or ( None ) ;
3986
3987
3987
- if let Some ( mut peer_state_lock) = peer_state_opt. take ( ) {
3988
+ if peer_state_opt. is_some ( ) {
3989
+ let mut peer_state_lock = peer_state_opt. unwrap ( ) ;
3988
3990
let peer_state = & mut * peer_state_lock;
3989
3991
if let hash_map:: Entry :: Occupied ( mut chan) = peer_state. channel_by_id . entry ( chan_id) {
3990
3992
let counterparty_node_id = chan. get ( ) . get_counterparty_node_id ( ) ;
@@ -3999,7 +4001,7 @@ where
3999
4001
let update_id = monitor_update. update_id ;
4000
4002
let update_res = self . chain_monitor . update_channel ( prev_hop. outpoint , monitor_update) ;
4001
4003
let res = handle_new_monitor_update ! ( self , update_res, update_id, peer_state_lock,
4002
- peer_state, chan) ;
4004
+ peer_state, per_peer_state , chan) ;
4003
4005
if let Err ( e) = res {
4004
4006
// TODO: This is a *critical* error - we probably updated the outbound edge
4005
4007
// of the HTLC's monitor with a preimage. We should retry this monitor
@@ -4200,7 +4202,7 @@ where
4200
4202
if !channel. get ( ) . is_awaiting_monitor_update ( ) || channel. get ( ) . get_latest_monitor_update_id ( ) != highest_applied_update_id {
4201
4203
return ;
4202
4204
}
4203
- handle_monitor_update_completion ! ( self , highest_applied_update_id, peer_state_lock, peer_state, channel. get_mut( ) ) ;
4205
+ handle_monitor_update_completion ! ( self , highest_applied_update_id, peer_state_lock, peer_state, per_peer_state , channel. get_mut( ) ) ;
4204
4206
}
4205
4207
4206
4208
/// Accepts a request to open a channel after a [`Event::OpenChannelRequest`].
@@ -4506,7 +4508,8 @@ where
4506
4508
let monitor_res = self . chain_monitor . watch_channel ( monitor. get_funding_txo ( ) . 0 , monitor) ;
4507
4509
4508
4510
let chan = e. insert ( chan) ;
4509
- let mut res = handle_new_monitor_update ! ( self , monitor_res, 0 , peer_state_lock, peer_state, chan, MANUALLY_REMOVING , { peer_state. channel_by_id. remove( & new_channel_id) } ) ;
4511
+ let mut res = handle_new_monitor_update ! ( self , monitor_res, 0 , peer_state_lock, peer_state,
4512
+ per_peer_state, chan, MANUALLY_REMOVING , { peer_state. channel_by_id. remove( & new_channel_id) } ) ;
4510
4513
4511
4514
// Note that we reply with the new channel_id in error messages if we gave up on the
4512
4515
// channel, not the temporary_channel_id. This is compatible with ourselves, but the
@@ -4539,7 +4542,7 @@ where
4539
4542
let monitor = try_chan_entry ! ( self ,
4540
4543
chan. get_mut( ) . funding_signed( & msg, best_block, & self . signer_provider, & self . logger) , chan) ;
4541
4544
let update_res = self . chain_monitor . watch_channel ( chan. get ( ) . get_funding_txo ( ) . unwrap ( ) , monitor) ;
4542
- let mut res = handle_new_monitor_update ! ( self , update_res, 0 , peer_state_lock, peer_state, chan) ;
4545
+ let mut res = handle_new_monitor_update ! ( self , update_res, 0 , peer_state_lock, peer_state, per_peer_state , chan) ;
4543
4546
if let Err ( MsgHandleErrInternal { ref mut shutdown_finish, .. } ) = res {
4544
4547
// We weren't able to watch the channel to begin with, so no updates should be made on
4545
4548
// it. Previously, full_stack_target found an (unreachable) panic when the
@@ -4635,7 +4638,7 @@ where
4635
4638
if let Some ( monitor_update) = monitor_update_opt {
4636
4639
let update_id = monitor_update. update_id ;
4637
4640
let update_res = self . chain_monitor . update_channel ( funding_txo_opt. unwrap ( ) , monitor_update) ;
4638
- break handle_new_monitor_update ! ( self , update_res, update_id, peer_state_lock, peer_state, chan_entry) ;
4641
+ break handle_new_monitor_update ! ( self , update_res, update_id, peer_state_lock, peer_state, per_peer_state , chan_entry) ;
4639
4642
}
4640
4643
break Ok ( ( ) ) ;
4641
4644
} ,
@@ -4827,7 +4830,7 @@ where
4827
4830
let update_res = self . chain_monitor . update_channel ( funding_txo. unwrap ( ) , monitor_update) ;
4828
4831
let update_id = monitor_update. update_id ;
4829
4832
handle_new_monitor_update ! ( self , update_res, update_id, peer_state_lock,
4830
- peer_state, chan)
4833
+ peer_state, per_peer_state , chan)
4831
4834
} ,
4832
4835
hash_map:: Entry :: Vacant ( _) => return Err ( MsgHandleErrInternal :: send_err_msg_no_close ( format ! ( "Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}" , counterparty_node_id) , msg. channel_id ) )
4833
4836
}
@@ -4933,21 +4936,20 @@ where
4933
4936
fn internal_revoke_and_ack ( & self , counterparty_node_id : & PublicKey , msg : & msgs:: RevokeAndACK ) -> Result < ( ) , MsgHandleErrInternal > {
4934
4937
let ( htlcs_to_fail, res) = {
4935
4938
let per_peer_state = self . per_peer_state . read ( ) . unwrap ( ) ;
4936
- let peer_state_mutex = per_peer_state. get ( counterparty_node_id)
4939
+ let mut peer_state_lock = per_peer_state. get ( counterparty_node_id)
4937
4940
. ok_or_else ( || {
4938
4941
debug_assert ! ( false ) ;
4939
4942
MsgHandleErrInternal :: send_err_msg_no_close ( format ! ( "Can't find a peer matching the passed counterparty node_id {}" , counterparty_node_id) , msg. channel_id )
4940
- } ) ?;
4941
- let mut peer_state_lock = peer_state_mutex. lock ( ) . unwrap ( ) ;
4943
+ } ) . map ( |mtx| mtx. lock ( ) . unwrap ( ) ) ?;
4942
4944
let peer_state = & mut * peer_state_lock;
4943
4945
match peer_state. channel_by_id . entry ( msg. channel_id ) {
4944
4946
hash_map:: Entry :: Occupied ( mut chan) => {
4945
4947
let funding_txo = chan. get ( ) . get_funding_txo ( ) ;
4946
4948
let ( htlcs_to_fail, monitor_update) = try_chan_entry ! ( self , chan. get_mut( ) . revoke_and_ack( & msg, & self . logger) , chan) ;
4947
4949
let update_res = self . chain_monitor . update_channel ( funding_txo. unwrap ( ) , monitor_update) ;
4948
4950
let update_id = monitor_update. update_id ;
4949
- let res = handle_new_monitor_update ! ( self , update_res, update_id, peer_state_lock ,
4950
- peer_state, chan) ;
4951
+ let res = handle_new_monitor_update ! ( self , update_res, update_id,
4952
+ peer_state_lock , peer_state, per_peer_state , chan) ;
4951
4953
( htlcs_to_fail, res)
4952
4954
} ,
4953
4955
hash_map:: Entry :: Vacant ( _) => return Err ( MsgHandleErrInternal :: send_err_msg_no_close ( format ! ( "Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}" , counterparty_node_id) , msg. channel_id ) )
@@ -5204,38 +5206,41 @@ where
5204
5206
let mut has_monitor_update = false ;
5205
5207
let mut failed_htlcs = Vec :: new ( ) ;
5206
5208
let mut handle_errors = Vec :: new ( ) ;
5207
- let per_peer_state = self . per_peer_state . read ( ) . unwrap ( ) ;
5208
5209
5209
- for ( _cp_id, peer_state_mutex) in per_peer_state. iter ( ) {
5210
- ' chan_loop: loop {
5211
- let mut peer_state_lock = peer_state_mutex. lock ( ) . unwrap ( ) ;
5212
- let peer_state: & mut PeerState < _ > = & mut * peer_state_lock;
5213
- for ( channel_id, chan) in peer_state. channel_by_id . iter_mut ( ) {
5214
- let counterparty_node_id = chan. get_counterparty_node_id ( ) ;
5215
- let funding_txo = chan. get_funding_txo ( ) ;
5216
- let ( monitor_opt, holding_cell_failed_htlcs) =
5217
- chan. maybe_free_holding_cell_htlcs ( & self . logger ) ;
5218
- if !holding_cell_failed_htlcs. is_empty ( ) {
5219
- failed_htlcs. push ( ( holding_cell_failed_htlcs, * channel_id, counterparty_node_id) ) ;
5220
- }
5221
- if let Some ( monitor_update) = monitor_opt {
5222
- has_monitor_update = true ;
5223
-
5224
- let update_res = self . chain_monitor . update_channel (
5225
- funding_txo. expect ( "channel is live" ) , monitor_update) ;
5226
- let update_id = monitor_update. update_id ;
5227
- let channel_id: [ u8 ; 32 ] = * channel_id;
5228
- let res = handle_new_monitor_update ! ( self , update_res, update_id,
5229
- peer_state_lock, peer_state, chan, MANUALLY_REMOVING ,
5230
- peer_state. channel_by_id. remove( & channel_id) ) ;
5231
- if res. is_err ( ) {
5232
- handle_errors. push ( ( counterparty_node_id, res) ) ;
5210
+ ' peer_loop: loop {
5211
+ let per_peer_state = self . per_peer_state . read ( ) . unwrap ( ) ;
5212
+ for ( _cp_id, peer_state_mutex) in per_peer_state. iter ( ) {
5213
+ ' chan_loop: loop {
5214
+ let mut peer_state_lock = peer_state_mutex. lock ( ) . unwrap ( ) ;
5215
+ let peer_state: & mut PeerState < _ > = & mut * peer_state_lock;
5216
+ for ( channel_id, chan) in peer_state. channel_by_id . iter_mut ( ) {
5217
+ let counterparty_node_id = chan. get_counterparty_node_id ( ) ;
5218
+ let funding_txo = chan. get_funding_txo ( ) ;
5219
+ let ( monitor_opt, holding_cell_failed_htlcs) =
5220
+ chan. maybe_free_holding_cell_htlcs ( & self . logger ) ;
5221
+ if !holding_cell_failed_htlcs. is_empty ( ) {
5222
+ failed_htlcs. push ( ( holding_cell_failed_htlcs, * channel_id, counterparty_node_id) ) ;
5223
+ }
5224
+ if let Some ( monitor_update) = monitor_opt {
5225
+ has_monitor_update = true ;
5226
+
5227
+ let update_res = self . chain_monitor . update_channel (
5228
+ funding_txo. expect ( "channel is live" ) , monitor_update) ;
5229
+ let update_id = monitor_update. update_id ;
5230
+ let channel_id: [ u8 ; 32 ] = * channel_id;
5231
+ let res = handle_new_monitor_update ! ( self , update_res, update_id,
5232
+ peer_state_lock, peer_state, per_peer_state, chan, MANUALLY_REMOVING ,
5233
+ peer_state. channel_by_id. remove( & channel_id) ) ;
5234
+ if res. is_err ( ) {
5235
+ handle_errors. push ( ( counterparty_node_id, res) ) ;
5236
+ }
5237
+ continue ' peer_loop;
5233
5238
}
5234
- continue ' chan_loop;
5235
5239
}
5240
+ break ' chan_loop;
5236
5241
}
5237
- break ' chan_loop;
5238
5242
}
5243
+ break ' peer_loop;
5239
5244
}
5240
5245
5241
5246
let has_update = has_monitor_update || !failed_htlcs. is_empty ( ) || !handle_errors. is_empty ( ) ;
0 commit comments