@@ -4654,7 +4654,7 @@ where
4654
4654
for htlc in sources. drain ( ..) {
4655
4655
if let Err ( ( pk, err) ) = self . claim_funds_from_hop (
4656
4656
htlc. prev_hop , payment_preimage,
4657
- |_| Some ( MonitorUpdateCompletionAction :: PaymentClaimed { payment_hash } ) )
4657
+ |_| Some ( MonitorUpdateCompletionAction :: PaymentClaimed { payment_hash } ) , false )
4658
4658
{
4659
4659
if let msgs:: ErrorAction :: IgnoreError = err. err . action {
4660
4660
// We got a temporary failure updating monitor, but will claim the
@@ -4684,7 +4684,7 @@ where
4684
4684
}
4685
4685
4686
4686
fn claim_funds_from_hop < ComplFunc : FnOnce ( Option < u64 > ) -> Option < MonitorUpdateCompletionAction > > ( & self ,
4687
- prev_hop : HTLCPreviousHopData , payment_preimage : PaymentPreimage , completion_action : ComplFunc )
4687
+ prev_hop : HTLCPreviousHopData , payment_preimage : PaymentPreimage , completion_action : ComplFunc , during_init : bool )
4688
4688
-> Result < ( ) , ( PublicKey , MsgHandleErrInternal ) > {
4689
4689
//TODO: Delay the claimed_funds relaying just like we do outbound relay!
4690
4690
@@ -4714,14 +4714,26 @@ where
4714
4714
log_bytes!( chan_id) , action) ;
4715
4715
peer_state. monitor_update_blocked_actions . entry ( chan_id) . or_insert ( Vec :: new ( ) ) . push ( action) ;
4716
4716
}
4717
- let res = handle_new_monitor_update ! ( self , prev_hop. outpoint, monitor_update, peer_state_lock,
4718
- peer_state, per_peer_state, chan) ;
4719
- if let Err ( e) = res {
4720
- // TODO: This is a *critical* error - we probably updated the outbound edge
4721
- // of the HTLC's monitor with a preimage. We should retry this monitor
4722
- // update over and over again until morale improves.
4723
- log_error ! ( self . logger, "Failed to update channel monitor with preimage {:?}" , payment_preimage) ;
4724
- return Err ( ( counterparty_node_id, e) ) ;
4717
+ if !during_init {
4718
+ let res = handle_new_monitor_update ! ( self , prev_hop. outpoint, monitor_update, peer_state_lock,
4719
+ peer_state, per_peer_state, chan) ;
4720
+ if let Err ( e) = res {
4721
+ // TODO: This is a *critical* error - we probably updated the outbound edge
4722
+ // of the HTLC's monitor with a preimage. We should retry this monitor
4723
+ // update over and over again until morale improves.
4724
+ log_error ! ( self . logger, "Failed to update channel monitor with preimage {:?}" , payment_preimage) ;
4725
+ return Err ( ( counterparty_node_id, e) ) ;
4726
+ }
4727
+ } else {
4728
+ // If we're running during init we cannot update a monitor directly -
4729
+ // they probably haven't actually been loaded yet. Instead, push the
4730
+ // monitor update as a background event.
4731
+ self . pending_background_events . lock ( ) . unwrap ( ) . push (
4732
+ BackgroundEvent :: MonitorUpdateRegeneratedOnStartup {
4733
+ counterparty_node_id,
4734
+ funding_txo : prev_hop. outpoint ,
4735
+ update : monitor_update. clone ( ) ,
4736
+ } ) ;
4725
4737
}
4726
4738
}
4727
4739
return Ok ( ( ) ) ;
@@ -4734,16 +4746,34 @@ where
4734
4746
payment_preimage,
4735
4747
} ] ,
4736
4748
} ;
4737
- // We update the ChannelMonitor on the backward link, after
4738
- // receiving an `update_fulfill_htlc` from the forward link.
4739
- let update_res = self . chain_monitor . update_channel ( prev_hop. outpoint , & preimage_update) ;
4740
- if update_res != ChannelMonitorUpdateStatus :: Completed {
4741
- // TODO: This needs to be handled somehow - if we receive a monitor update
4742
- // with a preimage we *must* somehow manage to propagate it to the upstream
4743
- // channel, or we must have an ability to receive the same event and try
4744
- // again on restart.
4745
- log_error ! ( self . logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}" ,
4746
- payment_preimage, update_res) ;
4749
+
4750
+ if !during_init {
4751
+ // We update the ChannelMonitor on the backward link, after
4752
+ // receiving an `update_fulfill_htlc` from the forward link.
4753
+ let update_res = self . chain_monitor . update_channel ( prev_hop. outpoint , & preimage_update) ;
4754
+ if update_res != ChannelMonitorUpdateStatus :: Completed {
4755
+ // TODO: This needs to be handled somehow - if we receive a monitor update
4756
+ // with a preimage we *must* somehow manage to propagate it to the upstream
4757
+ // channel, or we must have an ability to receive the same event and try
4758
+ // again on restart.
4759
+ log_error ! ( self . logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}" ,
4760
+ payment_preimage, update_res) ;
4761
+ }
4762
+ } else {
4763
+ // If we're running during init we cannot update a monitor directly - they probably
4764
+ // haven't actually been loaded yet. Instead, push the monitor update as a background
4765
+ // event.
4766
+ // Note that while its safe to use `ClosingMonitorUpdateRegeneratedOnStartup` here (the
4767
+ // channel is already closed) we need to ultimately handle the monitor update
4768
+ // completion action only after we've completed the monitor update. This is the only
4769
+ // way to guarantee this update *will* be regenerated on startup (otherwise if this was
4770
+ // from a forwarded HTLC the downstream preimage may be deleted before we claim
4771
+ // upstream). Thus, we need to transition to some new `BackgroundEvent` type which will
4772
+ // complete the monitor update completion action from `completion_action`.
4773
+ self . pending_background_events . lock ( ) . unwrap ( ) . push (
4774
+ BackgroundEvent :: ClosingMonitorUpdateRegeneratedOnStartup ( (
4775
+ prev_hop. outpoint , preimage_update,
4776
+ ) ) ) ;
4747
4777
}
4748
4778
// Note that we do process the completion action here. This totally could be a
4749
4779
// duplicate claim, but we have no way of knowing without interrogating the
@@ -4758,9 +4788,10 @@ where
4758
4788
self . pending_outbound_payments . finalize_claims ( sources, & self . pending_events ) ;
4759
4789
}
4760
4790
4761
- fn claim_funds_internal ( & self , source : HTLCSource , payment_preimage : PaymentPreimage , forwarded_htlc_value_msat : Option < u64 > , from_onchain : bool , next_channel_id : [ u8 ; 32 ] ) {
4791
+ fn claim_funds_internal ( & self , source : HTLCSource , payment_preimage : PaymentPreimage , forwarded_htlc_value_msat : Option < u64 > , from_onchain : bool , next_channel_id : [ u8 ; 32 ] , during_init : bool ) {
4762
4792
match source {
4763
4793
HTLCSource :: OutboundRoute { session_priv, payment_id, path, .. } => {
4794
+ debug_assert ! ( !during_init) ;
4764
4795
self . pending_outbound_payments . claim_htlc ( payment_id, payment_preimage, session_priv, path, from_onchain, & self . pending_events , & self . logger ) ;
4765
4796
} ,
4766
4797
HTLCSource :: PreviousHopData ( hop_data) => {
@@ -4783,7 +4814,7 @@ where
4783
4814
downstream_counterparty_and_funding_outpoint : None ,
4784
4815
} )
4785
4816
} else { None }
4786
- } ) ;
4817
+ } , during_init ) ;
4787
4818
if let Err ( ( pk, err) ) = res {
4788
4819
let result: Result < ( ) , _ > = Err ( err) ;
4789
4820
let _ = handle_error ! ( self , result, pk) ;
@@ -5531,7 +5562,7 @@ where
5531
5562
hash_map:: Entry :: Vacant ( _) => return Err ( MsgHandleErrInternal :: send_err_msg_no_close ( format ! ( "Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}" , counterparty_node_id) , msg. channel_id ) )
5532
5563
}
5533
5564
} ;
5534
- self . claim_funds_internal ( htlc_source, msg. payment_preimage . clone ( ) , Some ( forwarded_htlc_value) , false , msg. channel_id ) ;
5565
+ self . claim_funds_internal ( htlc_source, msg. payment_preimage . clone ( ) , Some ( forwarded_htlc_value) , false , msg. channel_id , false ) ;
5535
5566
Ok ( ( ) )
5536
5567
}
5537
5568
@@ -5901,7 +5932,7 @@ where
5901
5932
MonitorEvent :: HTLCEvent ( htlc_update) => {
5902
5933
if let Some ( preimage) = htlc_update. payment_preimage {
5903
5934
log_trace ! ( self . logger, "Claiming HTLC with preimage {} from our monitor" , log_bytes!( preimage. 0 ) ) ;
5904
- self . claim_funds_internal ( htlc_update. source , preimage, htlc_update. htlc_value_satoshis . map ( |v| v * 1000 ) , true , funding_outpoint. to_channel_id ( ) ) ;
5935
+ self . claim_funds_internal ( htlc_update. source , preimage, htlc_update. htlc_value_satoshis . map ( |v| v * 1000 ) , true , funding_outpoint. to_channel_id ( ) , false ) ;
5905
5936
} else {
5906
5937
log_trace ! ( self . logger, "Failing HTLC with hash {} from our monitor" , log_bytes!( htlc_update. payment_hash. 0 ) ) ;
5907
5938
let receiver = HTLCDestination :: NextHopChannel { node_id : counterparty_node_id, channel_id : funding_outpoint. to_channel_id ( ) } ;
@@ -8485,6 +8516,11 @@ where
8485
8516
// Note that we have to do the above replays before we push new monitor updates.
8486
8517
pending_background_events. append ( & mut close_background_events) ;
8487
8518
8519
+ // If there's any preimages for forwarded HTLCs hanging around in ChannelMonitors we
8520
+ // should ensure we try them again on the inbound edge. We put them here and do so after we
8521
+ // have a fully-constructed `ChannelManager` at the end.
8522
+ let mut pending_claims_to_replay = Vec :: new ( ) ;
8523
+
8488
8524
{
8489
8525
// If we're tracking pending payments, ensure we haven't lost any by looking at the
8490
8526
// ChannelMonitor data for any channels for which we do not have authorative state
@@ -8495,7 +8531,8 @@ where
8495
8531
// We only rebuild the pending payments map if we were most recently serialized by
8496
8532
// 0.0.102+
8497
8533
for ( _, monitor) in args. channel_monitors . iter ( ) {
8498
- if id_to_peer. get ( & monitor. get_funding_txo ( ) . 0 . to_channel_id ( ) ) . is_none ( ) {
8534
+ let counterparty_opt = id_to_peer. get ( & monitor. get_funding_txo ( ) . 0 . to_channel_id ( ) ) ;
8535
+ if counterparty_opt. is_none ( ) {
8499
8536
for ( htlc_source, ( htlc, _) ) in monitor. get_pending_or_resolved_outbound_htlcs ( ) {
8500
8537
if let HTLCSource :: OutboundRoute { payment_id, session_priv, path, .. } = htlc_source {
8501
8538
if path. hops . is_empty ( ) {
@@ -8589,6 +8626,30 @@ where
8589
8626
}
8590
8627
}
8591
8628
}
8629
+
8630
+ // Whether the downstream channel was closed or not, try to re-apply any payment
8631
+ // preimages from it which may be needed in upstream channels for forwarded
8632
+ // payments.
8633
+ let outbound_claimed_htlcs_iter = monitor. get_all_current_outbound_htlcs ( )
8634
+ . into_iter ( )
8635
+ . filter_map ( |( htlc_source, ( htlc, preimage_opt) ) | {
8636
+ if let HTLCSource :: PreviousHopData ( _) = htlc_source {
8637
+ if let Some ( payment_preimage) = preimage_opt {
8638
+ Some ( ( htlc_source, payment_preimage, htlc. amount_msat ,
8639
+ counterparty_opt. is_none ( ) , // i.e. the downstream chan is closed
8640
+ monitor. get_funding_txo ( ) . 0 . to_channel_id ( ) ) )
8641
+ } else { None }
8642
+ } else {
8643
+ // If it was an outbound payment, we've handled it above - if a preimage
8644
+ // came in and we persisted the `ChannelManager` we either handled it and
8645
+ // are good to go or the channel force-closed - we don't have to handle the
8646
+ // channel still live case here.
8647
+ None
8648
+ }
8649
+ } ) ;
8650
+ for tuple in outbound_claimed_htlcs_iter {
8651
+ pending_claims_to_replay. push ( tuple) ;
8652
+ }
8592
8653
}
8593
8654
}
8594
8655
@@ -8840,6 +8901,11 @@ where
8840
8901
channel_manager. fail_htlc_backwards_internal ( & source, & payment_hash, & reason, receiver) ;
8841
8902
}
8842
8903
8904
+ for ( source, preimage, downstream_value, downstream_closed, downstream_chan_id) in pending_claims_to_replay {
8905
+ channel_manager. claim_funds_internal ( source, preimage, Some ( downstream_value) ,
8906
+ downstream_closed, downstream_chan_id, true ) ;
8907
+ }
8908
+
8843
8909
//TODO: Broadcast channel update for closed channels, but only after we've made a
8844
8910
//connection or two.
8845
8911
0 commit comments