@@ -1855,7 +1855,7 @@ macro_rules! handle_monitor_update_completion {
18551855}
18561856
18571857macro_rules! handle_new_monitor_update {
1858- ( $self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, MANUALLY_REMOVING_ALREADY_APPLIED , $remove: expr) => { {
1858+ ( $self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, MANUALLY_REMOVING_INITIAL_MONITOR , $remove: expr) => { {
18591859 // update_maps_on_chan_removal needs to be able to take id_to_peer, so make sure we can in
18601860 // any case so that it won't deadlock.
18611861 debug_assert_ne!( $self. id_to_peer. held_by_thread( ) , LockHeldState :: HeldByThread ) ;
@@ -1866,13 +1866,13 @@ macro_rules! handle_new_monitor_update {
18661866 ChannelMonitorUpdateStatus :: InProgress => {
18671867 log_debug!( $self. logger, "ChannelMonitor update for {} in flight, holding messages until the update completes." ,
18681868 log_bytes!( $chan. context. channel_id( ) [ ..] ) ) ;
1869- Ok ( ( ) )
1869+ Ok ( false )
18701870 } ,
18711871 ChannelMonitorUpdateStatus :: PermanentFailure => {
18721872 log_error!( $self. logger, "Closing channel {} due to monitor update ChannelMonitorUpdateStatus::PermanentFailure" ,
18731873 log_bytes!( $chan. context. channel_id( ) [ ..] ) ) ;
18741874 update_maps_on_chan_removal!( $self, & $chan. context) ;
1875- let res: Result < ( ) , _> = Err ( MsgHandleErrInternal :: from_finish_shutdown(
1875+ let res = Err ( MsgHandleErrInternal :: from_finish_shutdown(
18761876 "ChannelMonitor storage failure" . to_owned( ) , $chan. context. channel_id( ) ,
18771877 $chan. context. get_user_id( ) , $chan. context. force_shutdown( false ) ,
18781878 $self. get_channel_update_for_broadcast( & $chan) . ok( ) ) ) ;
@@ -1884,16 +1884,16 @@ macro_rules! handle_new_monitor_update {
18841884 if $chan. no_monitor_updates_pending( ) {
18851885 handle_monitor_update_completion!( $self, $update_id, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan) ;
18861886 }
1887- Ok ( ( ) )
1887+ Ok ( true )
18881888 } ,
18891889 }
18901890 } } ;
1891- ( $self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan_entry: expr, ALREADY_APPLIED ) => {
1892- handle_new_monitor_update!( $self, $update_res, $update_id, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan_entry. get_mut( ) , MANUALLY_REMOVING_ALREADY_APPLIED , $chan_entry. remove_entry( ) )
1891+ ( $self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan_entry: expr, INITIAL_MONITOR ) => {
1892+ handle_new_monitor_update!( $self, $update_res, $update_id, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan_entry. get_mut( ) , MANUALLY_REMOVING_INITIAL_MONITOR , $chan_entry. remove_entry( ) )
18931893 } ;
18941894 ( $self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, MANUALLY_REMOVING , $remove: expr) => { {
18951895 let update_res = $self. chain_monitor. update_channel( $funding_txo, & $update) ;
1896- handle_new_monitor_update!( $self, update_res, $update. update_id, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan, MANUALLY_REMOVING_ALREADY_APPLIED , $remove)
1896+ handle_new_monitor_update!( $self, update_res, $update. update_id, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan, MANUALLY_REMOVING_INITIAL_MONITOR , $remove)
18971897 } } ;
18981898 ( $self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan_entry: expr) => {
18991899 handle_new_monitor_update!( $self, $funding_txo, $update, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan_entry. get_mut( ) , MANUALLY_REMOVING , $chan_entry. remove_entry( ) )
@@ -2311,7 +2311,8 @@ where
23112311
23122312 // Update the monitor with the shutdown script if necessary.
23132313 if let Some ( monitor_update) = monitor_update_opt. take ( ) {
2314- break handle_new_monitor_update ! ( self , funding_txo_opt. unwrap( ) , monitor_update, peer_state_lock, peer_state, per_peer_state, chan_entry) ;
2314+ break handle_new_monitor_update ! ( self , funding_txo_opt. unwrap( ) , monitor_update,
2315+ peer_state_lock, peer_state, per_peer_state, chan_entry) . map ( |_| ( ) ) ;
23152316 }
23162317
23172318 if chan_entry. get ( ) . is_shutdown ( ) {
@@ -2992,19 +2993,18 @@ where
29922993 } , onion_packet, & self . logger ) ;
29932994 match break_chan_entry ! ( self , send_res, chan) {
29942995 Some ( monitor_update) => {
2995- let update_id = monitor_update. update_id ;
2996- let update_res = self . chain_monitor . update_channel ( funding_txo, & monitor_update) ;
2997- if let Err ( e) = handle_new_monitor_update ! ( self , update_res, update_id, peer_state_lock, peer_state, per_peer_state, chan, ALREADY_APPLIED ) {
2998- break Err ( e) ;
2999- }
3000- if update_res == ChannelMonitorUpdateStatus :: InProgress {
3001- // Note that MonitorUpdateInProgress here indicates (per function
3002- // docs) that we will resend the commitment update once monitor
3003- // updating completes. Therefore, we must return an error
3004- // indicating that it is unsafe to retry the payment wholesale,
3005- // which we do in the send_payment check for
3006- // MonitorUpdateInProgress, below.
3007- return Err ( APIError :: MonitorUpdateInProgress ) ;
2996+ match handle_new_monitor_update ! ( self , funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, chan) {
2997+ Err ( e) => break Err ( e) ,
2998+ Ok ( false ) => {
2999+ // Note that MonitorUpdateInProgress here indicates (per function
3000+ // docs) that we will resend the commitment update once monitor
3001+ // updating completes. Therefore, we must return an error
3002+ // indicating that it is unsafe to retry the payment wholesale,
3003+ // which we do in the send_payment check for
3004+ // MonitorUpdateInProgress, below.
3005+ return Err ( APIError :: MonitorUpdateInProgress ) ;
3006+ } ,
3007+ Ok ( true ) => { } ,
30083008 }
30093009 } ,
30103010 None => { } ,
@@ -4023,21 +4023,26 @@ where
40234023 let _ = self . chain_monitor . update_channel ( funding_txo, & update) ;
40244024 } ,
40254025 BackgroundEvent :: MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, update } => {
4026- let update_res = self . chain_monitor . update_channel ( funding_txo, & update) ;
4027-
4026+ let mut updated_chan = false ;
40284027 let res = {
40294028 let per_peer_state = self . per_peer_state . read ( ) . unwrap ( ) ;
40304029 if let Some ( peer_state_mutex) = per_peer_state. get ( & counterparty_node_id) {
40314030 let mut peer_state_lock = peer_state_mutex. lock ( ) . unwrap ( ) ;
40324031 let peer_state = & mut * peer_state_lock;
40334032 match peer_state. channel_by_id . entry ( funding_txo. to_channel_id ( ) ) {
40344033 hash_map:: Entry :: Occupied ( mut chan) => {
4035- handle_new_monitor_update ! ( self , update_res, update. update_id, peer_state_lock, peer_state, per_peer_state, chan, ALREADY_APPLIED )
4034+ updated_chan = true ;
4035+ handle_new_monitor_update ! ( self , funding_txo, update,
4036+ peer_state_lock, peer_state, per_peer_state, chan) . map ( |_| ( ) )
40364037 } ,
40374038 hash_map:: Entry :: Vacant ( _) => Ok ( ( ) ) ,
40384039 }
40394040 } else { Ok ( ( ) ) }
40404041 } ;
4042+ if !updated_chan {
4043+ // TODO: Track this as in-flight even though the channel is closed.
4044+ let _ = self . chain_monitor . update_channel ( funding_txo, & update) ;
4045+ }
40414046 // TODO: If this channel has since closed, we're likely providing a payment
40424047 // preimage update, which we must ensure is durable! We currently don't,
40434048 // however, ensure that.
@@ -5155,7 +5160,7 @@ where
51555160
51565161 let chan = e. insert ( chan) ;
51575162 let mut res = handle_new_monitor_update ! ( self , monitor_res, 0 , peer_state_lock, peer_state,
5158- per_peer_state, chan, MANUALLY_REMOVING_ALREADY_APPLIED ,
5163+ per_peer_state, chan, MANUALLY_REMOVING_INITIAL_MONITOR ,
51595164 { peer_state. channel_by_id. remove( & new_channel_id) } ) ;
51605165
51615166 // Note that we reply with the new channel_id in error messages if we gave up on the
@@ -5168,7 +5173,7 @@ where
51685173 if let Err ( MsgHandleErrInternal { shutdown_finish : Some ( ( res, _) ) , .. } ) = & mut res {
51695174 res. 0 = None ;
51705175 }
5171- res
5176+ res. map ( |_| ( ) )
51725177 }
51735178 }
51745179 }
@@ -5189,7 +5194,7 @@ where
51895194 let monitor = try_chan_entry ! ( self ,
51905195 chan. get_mut( ) . funding_signed( & msg, best_block, & self . signer_provider, & self . logger) , chan) ;
51915196 let update_res = self . chain_monitor . watch_channel ( chan. get ( ) . context . get_funding_txo ( ) . unwrap ( ) , monitor) ;
5192- let mut res = handle_new_monitor_update ! ( self , update_res, 0 , peer_state_lock, peer_state, per_peer_state, chan, ALREADY_APPLIED ) ;
5197+ let mut res = handle_new_monitor_update ! ( self , update_res, 0 , peer_state_lock, peer_state, per_peer_state, chan, INITIAL_MONITOR ) ;
51935198 if let Err ( MsgHandleErrInternal { ref mut shutdown_finish, .. } ) = res {
51945199 // We weren't able to watch the channel to begin with, so no updates should be made on
51955200 // it. Previously, full_stack_target found an (unreachable) panic when the
@@ -5198,7 +5203,7 @@ where
51985203 shutdown_finish. 0 . take ( ) ;
51995204 }
52005205 }
5201- res
5206+ res. map ( |_| ( ) )
52025207 } ,
52035208 hash_map:: Entry :: Vacant ( _) => return Err ( MsgHandleErrInternal :: send_err_msg_no_close ( "Failed to find corresponding channel" . to_owned ( ) , msg. channel_id ) )
52045209 }
@@ -5286,7 +5291,8 @@ where
52865291
52875292 // Update the monitor with the shutdown script if necessary.
52885293 if let Some ( monitor_update) = monitor_update_opt {
5289- break handle_new_monitor_update ! ( self , funding_txo_opt. unwrap( ) , monitor_update, peer_state_lock, peer_state, per_peer_state, chan_entry) ;
5294+ break handle_new_monitor_update ! ( self , funding_txo_opt. unwrap( ) , monitor_update,
5295+ peer_state_lock, peer_state, per_peer_state, chan_entry) . map ( |_| ( ) ) ;
52905296 }
52915297 break Ok ( ( ) ) ;
52925298 } ,
@@ -5477,7 +5483,7 @@ where
54775483 let monitor_update_opt = try_chan_entry ! ( self , chan. get_mut( ) . commitment_signed( & msg, & self . logger) , chan) ;
54785484 if let Some ( monitor_update) = monitor_update_opt {
54795485 handle_new_monitor_update ! ( self , funding_txo. unwrap( ) , monitor_update, peer_state_lock,
5480- peer_state, per_peer_state, chan)
5486+ peer_state, per_peer_state, chan) . map ( |_| ( ) )
54815487 } else { Ok ( ( ) ) }
54825488 } ,
54835489 hash_map:: Entry :: Vacant ( _) => return Err ( MsgHandleErrInternal :: send_err_msg_no_close ( format ! ( "Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}" , counterparty_node_id) , msg. channel_id ) )
@@ -5614,7 +5620,7 @@ where
56145620 let ( htlcs_to_fail, monitor_update_opt) = try_chan_entry ! ( self , chan. get_mut( ) . revoke_and_ack( & msg, & self . logger) , chan) ;
56155621 let res = if let Some ( monitor_update) = monitor_update_opt {
56165622 handle_new_monitor_update ! ( self , funding_txo. unwrap( ) , monitor_update,
5617- peer_state_lock, peer_state, per_peer_state, chan)
5623+ peer_state_lock, peer_state, per_peer_state, chan) . map ( |_| ( ) )
56185624 } else { Ok ( ( ) ) } ;
56195625 ( htlcs_to_fail, res)
56205626 } ,
0 commit comments