11
11
//! serialization ordering between ChannelManager/ChannelMonitors and ensuring we can still retry
12
12
//! payments thereafter.
13
13
14
- use chain:: Watch ;
15
- use chain:: channelmonitor:: ChannelMonitor ;
14
+ use chain:: { ChannelMonitorUpdateErr , Listen , Watch } ;
15
+ use chain:: channelmonitor:: { ANTI_REORG_DELAY , ChannelMonitor , LATENCY_GRACE_PERIOD_BLOCKS } ;
16
+ use chain:: transaction:: OutPoint ;
16
17
use ln:: { PaymentPreimage , PaymentHash } ;
17
- use ln:: channelmanager:: { ChannelManager , ChannelManagerReadArgs , PaymentId , PaymentSendFailure } ;
18
+ use ln:: channelmanager:: { BREAKDOWN_TIMEOUT , ChannelManager , ChannelManagerReadArgs , PaymentId , PaymentSendFailure } ;
18
19
use ln:: features:: { InitFeatures , InvoiceFeatures } ;
19
20
use ln:: msgs;
20
21
use ln:: msgs:: { ChannelMessageHandler , ErrorAction } ;
@@ -24,10 +25,11 @@ use util::events::{ClosureReason, Event, MessageSendEvent, MessageSendEventsProv
24
25
use util:: errors:: APIError ;
25
26
use util:: enforcing_trait_impls:: EnforcingSigner ;
26
27
use util:: ser:: { ReadableArgs , Writeable } ;
28
+ use io;
27
29
28
30
use bitcoin:: hashes:: sha256:: Hash as Sha256 ;
29
31
use bitcoin:: hashes:: Hash ;
30
- use bitcoin:: BlockHash ;
32
+ use bitcoin:: { Block , BlockHeader , BlockHash } ;
31
33
32
34
use prelude:: * ;
33
35
@@ -394,3 +396,170 @@ fn retry_with_no_persist() {
394
396
pass_along_path ( & nodes[ 0 ] , & [ & nodes[ 1 ] , & nodes[ 2 ] ] , 100_000 , payment_hash, Some ( payment_secret) , events. pop ( ) . unwrap ( ) , true , None ) ;
395
397
claim_payment_along_route ( & nodes[ 0 ] , & [ & [ & nodes[ 1 ] , & nodes[ 2 ] ] ] , false , payment_preimage) ;
396
398
}
399
+
400
+ fn do_test_dup_htlc_onchain_fails_on_reload ( persist_manager_post_event : bool , confirm_commitment_tx : bool , payment_timeout : bool ) {
401
+ // When a Channel is closed, any outbound HTLCs which were relayed through it are simply
402
+ // dropped when the Channel is. From there, the ChannelManager relies on the ChannelMonitor
403
+ // having a copy of the relevant fail-/claim-back data and processes the HTLC fail/claim when
404
+ // the ChannelMonitor tells it to.
405
+ //
406
+ // If, due to an on-chain event, an HTLC is failed/claimed, we should avoid providing the
407
+ // ChannelManaver the HTLC event until after the monitor is re-persisted. This should prevent a
408
+ // duplicate HTLC fail/claim (e.g. via a PaymentPathFailed event).
409
+ let chanmon_cfgs = create_chanmon_cfgs ( 2 ) ;
410
+ let node_cfgs = create_node_cfgs ( 2 , & chanmon_cfgs) ;
411
+ let node_chanmgrs = create_node_chanmgrs ( 2 , & node_cfgs, & [ None , None ] ) ;
412
+ let persister: test_utils:: TestPersister ;
413
+ let new_chain_monitor: test_utils:: TestChainMonitor ;
414
+ let nodes_0_deserialized: ChannelManager < EnforcingSigner , & test_utils:: TestChainMonitor , & test_utils:: TestBroadcaster , & test_utils:: TestKeysInterface , & test_utils:: TestFeeEstimator , & test_utils:: TestLogger > ;
415
+ let mut nodes = create_network ( 2 , & node_cfgs, & node_chanmgrs) ;
416
+
417
+ let ( _, _, chan_id, funding_tx) = create_announced_chan_between_nodes ( & nodes, 0 , 1 , InitFeatures :: known ( ) , InitFeatures :: known ( ) ) ;
418
+
419
+ // Route a payment, but force-close the channel before the HTLC fulfill message arrives at
420
+ // nodes[0].
421
+ let ( payment_preimage, payment_hash, _) = route_payment ( & nodes[ 0 ] , & [ & nodes[ 1 ] ] , 10000000 ) ;
422
+ nodes[ 0 ] . node . force_close_channel ( & nodes[ 0 ] . node . list_channels ( ) [ 0 ] . channel_id ) . unwrap ( ) ;
423
+ check_closed_broadcast ! ( nodes[ 0 ] , true ) ;
424
+ check_added_monitors ! ( nodes[ 0 ] , 1 ) ;
425
+ check_closed_event ! ( nodes[ 0 ] , 1 , ClosureReason :: HolderForceClosed ) ;
426
+
427
+ nodes[ 0 ] . node . peer_disconnected ( & nodes[ 1 ] . node . get_our_node_id ( ) , false ) ;
428
+ nodes[ 1 ] . node . peer_disconnected ( & nodes[ 0 ] . node . get_our_node_id ( ) , false ) ;
429
+
430
+ // Connect blocks until the CLTV timeout is up so that we get an HTLC-Timeout transaction
431
+ connect_blocks ( & nodes[ 0 ] , TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1 ) ;
432
+ let node_txn = nodes[ 0 ] . tx_broadcaster . txn_broadcasted . lock ( ) . unwrap ( ) . split_off ( 0 ) ;
433
+ assert_eq ! ( node_txn. len( ) , 3 ) ;
434
+ assert_eq ! ( node_txn[ 0 ] , node_txn[ 1 ] ) ;
435
+ check_spends ! ( node_txn[ 1 ] , funding_tx) ;
436
+ check_spends ! ( node_txn[ 2 ] , node_txn[ 1 ] ) ;
437
+ let timeout_txn = vec ! [ node_txn[ 2 ] . clone( ) ] ;
438
+
439
+ assert ! ( nodes[ 1 ] . node. claim_funds( payment_preimage) ) ;
440
+ check_added_monitors ! ( nodes[ 1 ] , 1 ) ;
441
+
442
+ let mut header = BlockHeader { version : 0x20000000 , prev_blockhash : nodes[ 1 ] . best_block_hash ( ) , merkle_root : Default :: default ( ) , time : 42 , bits : 42 , nonce : 42 } ;
443
+ connect_block ( & nodes[ 1 ] , & Block { header, txdata : vec ! [ node_txn[ 1 ] . clone( ) ] } ) ;
444
+ check_closed_broadcast ! ( nodes[ 1 ] , true ) ;
445
+ check_added_monitors ! ( nodes[ 1 ] , 1 ) ;
446
+ check_closed_event ! ( nodes[ 1 ] , 1 , ClosureReason :: CommitmentTxConfirmed ) ;
447
+ let claim_txn = nodes[ 1 ] . tx_broadcaster . txn_broadcasted . lock ( ) . unwrap ( ) . split_off ( 0 ) ;
448
+
449
+ header. prev_blockhash = nodes[ 0 ] . best_block_hash ( ) ;
450
+ connect_block ( & nodes[ 0 ] , & Block { header, txdata : vec ! [ node_txn[ 1 ] . clone( ) ] } ) ;
451
+
452
+ if confirm_commitment_tx {
453
+ connect_blocks ( & nodes[ 0 ] , BREAKDOWN_TIMEOUT as u32 - 1 ) ;
454
+ }
455
+
456
+ header. prev_blockhash = nodes[ 0 ] . best_block_hash ( ) ;
457
+ let claim_block = Block { header, txdata : if payment_timeout { timeout_txn } else { claim_txn } } ;
458
+
459
+ if payment_timeout {
460
+ assert ! ( confirm_commitment_tx) ; // Otherwise we're spending below our CSV!
461
+ connect_block ( & nodes[ 0 ] , & claim_block) ;
462
+ connect_blocks ( & nodes[ 0 ] , ANTI_REORG_DELAY - 2 ) ;
463
+ }
464
+
465
+ // Now connect the HTLC claim transaction with the ChainMonitor-generated ChannelMonitor update
466
+ // returning TemporaryFailure. This should cause the claim event to never make its way to the
467
+ // ChannelManager.
468
+ chanmon_cfgs[ 0 ] . persister . non_update_monitor_persistences . lock ( ) . unwrap ( ) . clear ( ) ;
469
+ chanmon_cfgs[ 0 ] . persister . set_update_ret ( Err ( ChannelMonitorUpdateErr :: TemporaryFailure ) ) ;
470
+
471
+ if payment_timeout {
472
+ connect_blocks ( & nodes[ 0 ] , 1 ) ;
473
+ } else {
474
+ connect_block ( & nodes[ 0 ] , & claim_block) ;
475
+ }
476
+
477
+ let funding_txo = OutPoint { txid : funding_tx. txid ( ) , index : 0 } ;
478
+ let mon_updates: Vec < _ > = chanmon_cfgs[ 0 ] . persister . non_update_monitor_persistences . lock ( ) . unwrap ( )
479
+ . get_mut ( & funding_txo) . unwrap ( ) . drain ( ) . collect ( ) ;
480
+ assert_eq ! ( mon_updates. len( ) , 1 ) ;
481
+ assert ! ( nodes[ 0 ] . chain_monitor. release_pending_monitor_events( ) . is_empty( ) ) ;
482
+ assert ! ( nodes[ 0 ] . node. get_and_clear_pending_events( ) . is_empty( ) ) ;
483
+
484
+ // If we persist the ChannelManager here, we should get the PaymentSent event after
485
+ // deserialization.
486
+ let mut chan_manager_serialized = test_utils:: TestVecWriter ( Vec :: new ( ) ) ;
487
+ if !persist_manager_post_event {
488
+ nodes[ 0 ] . node . write ( & mut chan_manager_serialized) . unwrap ( ) ;
489
+ }
490
+
491
+ // Now persist the ChannelMonitor and inform the ChainMonitor that we're done, generating the
492
+ // payment sent event.
493
+ chanmon_cfgs[ 0 ] . persister . set_update_ret ( Ok ( ( ) ) ) ;
494
+ let mut chan_0_monitor_serialized = test_utils:: TestVecWriter ( Vec :: new ( ) ) ;
495
+ get_monitor ! ( nodes[ 0 ] , chan_id) . write ( & mut chan_0_monitor_serialized) . unwrap ( ) ;
496
+ nodes[ 0 ] . chain_monitor . chain_monitor . channel_monitor_updated ( funding_txo, mon_updates[ 0 ] ) ;
497
+ if payment_timeout {
498
+ expect_payment_failed ! ( nodes[ 0 ] , payment_hash, true ) ;
499
+ } else {
500
+ expect_payment_sent ! ( nodes[ 0 ] , payment_preimage) ;
501
+ }
502
+
503
+ // If we persist the ChannelManager after we get the PaymentSent event, we shouldn't get it
504
+ // twice.
505
+ if persist_manager_post_event {
506
+ nodes[ 0 ] . node . write ( & mut chan_manager_serialized) . unwrap ( ) ;
507
+ }
508
+
509
+ // Now reload nodes[0]...
510
+ persister = test_utils:: TestPersister :: new ( ) ;
511
+ let keys_manager = & chanmon_cfgs[ 0 ] . keys_manager ;
512
+ new_chain_monitor = test_utils:: TestChainMonitor :: new ( Some ( nodes[ 0 ] . chain_source ) , nodes[ 0 ] . tx_broadcaster . clone ( ) , nodes[ 0 ] . logger , node_cfgs[ 0 ] . fee_estimator , & persister, keys_manager) ;
513
+ nodes[ 0 ] . chain_monitor = & new_chain_monitor;
514
+ let mut chan_0_monitor_read = & chan_0_monitor_serialized. 0 [ ..] ;
515
+ let ( _, mut chan_0_monitor) = <( BlockHash , ChannelMonitor < EnforcingSigner > ) >:: read (
516
+ & mut chan_0_monitor_read, keys_manager) . unwrap ( ) ;
517
+ assert ! ( chan_0_monitor_read. is_empty( ) ) ;
518
+
519
+ let ( _, nodes_0_deserialized_tmp) = {
520
+ let mut channel_monitors = HashMap :: new ( ) ;
521
+ channel_monitors. insert ( chan_0_monitor. get_funding_txo ( ) . 0 , & mut chan_0_monitor) ;
522
+ <( BlockHash , ChannelManager < EnforcingSigner , & test_utils:: TestChainMonitor , & test_utils:: TestBroadcaster , & test_utils:: TestKeysInterface , & test_utils:: TestFeeEstimator , & test_utils:: TestLogger > ) >
523
+ :: read ( & mut io:: Cursor :: new ( & chan_manager_serialized. 0 [ ..] ) , ChannelManagerReadArgs {
524
+ default_config : Default :: default ( ) ,
525
+ keys_manager,
526
+ fee_estimator : node_cfgs[ 0 ] . fee_estimator ,
527
+ chain_monitor : nodes[ 0 ] . chain_monitor ,
528
+ tx_broadcaster : nodes[ 0 ] . tx_broadcaster . clone ( ) ,
529
+ logger : nodes[ 0 ] . logger ,
530
+ channel_monitors,
531
+ } ) . unwrap ( )
532
+ } ;
533
+ nodes_0_deserialized = nodes_0_deserialized_tmp;
534
+
535
+ assert ! ( nodes[ 0 ] . chain_monitor. watch_channel( chan_0_monitor. get_funding_txo( ) . 0 , chan_0_monitor) . is_ok( ) ) ;
536
+ check_added_monitors ! ( nodes[ 0 ] , 1 ) ;
537
+ nodes[ 0 ] . node = & nodes_0_deserialized;
538
+
539
+ if persist_manager_post_event {
540
+ assert ! ( nodes[ 0 ] . node. get_and_clear_pending_events( ) . is_empty( ) ) ;
541
+ } else {
542
+ if payment_timeout {
543
+ expect_payment_failed ! ( nodes[ 0 ] , payment_hash, true ) ;
544
+ } else {
545
+ expect_payment_sent ! ( nodes[ 0 ] , payment_preimage) ;
546
+ }
547
+ }
548
+
549
+ // Note that if we re-connect the block which exposed nodes[0] to the payment preimage (but
550
+ // which the current ChannelMonitor has not seen), the ChannelManager's de-duplication of
551
+ // payment events should kick in, leaving us with no pending events here.
552
+ let height = nodes[ 0 ] . blocks . lock ( ) . unwrap ( ) . len ( ) as u32 - 1 ;
553
+ nodes[ 0 ] . chain_monitor . chain_monitor . block_connected ( & claim_block, height) ;
554
+ assert ! ( nodes[ 0 ] . node. get_and_clear_pending_events( ) . is_empty( ) ) ;
555
+ }
556
+
557
+ #[ test]
558
+ fn test_dup_htlc_onchain_fails_on_reload ( ) {
559
+ do_test_dup_htlc_onchain_fails_on_reload ( true , true , true ) ;
560
+ do_test_dup_htlc_onchain_fails_on_reload ( true , true , false ) ;
561
+ do_test_dup_htlc_onchain_fails_on_reload ( true , false , false ) ;
562
+ do_test_dup_htlc_onchain_fails_on_reload ( false , true , true ) ;
563
+ do_test_dup_htlc_onchain_fails_on_reload ( false , true , false ) ;
564
+ do_test_dup_htlc_onchain_fails_on_reload ( false , false , false ) ;
565
+ }
0 commit comments