@@ -15,9 +15,10 @@ use crate::chain::channelmonitor::{CLOSED_CHANNEL_UPDATE_ID, ChannelMonitor};
15
15
use crate :: sign:: EntropySource ;
16
16
use crate :: chain:: transaction:: OutPoint ;
17
17
use crate :: events:: { ClosureReason , Event , HTLCDestination , MessageSendEvent , MessageSendEventsProvider } ;
18
- use crate :: ln:: channelmanager:: { ChannelManager , ChannelManagerReadArgs , PaymentId , RecipientOnionFields } ;
18
+ use crate :: ln:: channelmanager:: { ChannelManager , ChannelManagerReadArgs , PaymentId , Retry , RecipientOnionFields } ;
19
19
use crate :: ln:: msgs;
20
20
use crate :: ln:: msgs:: { ChannelMessageHandler , RoutingMessageHandler , ErrorAction } ;
21
+ use crate :: routing:: router:: { RouteParameters , PaymentParameters } ;
21
22
use crate :: util:: test_channel_signer:: TestChannelSigner ;
22
23
use crate :: util:: test_utils;
23
24
use crate :: util:: errors:: APIError ;
@@ -494,7 +495,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
494
495
}
495
496
496
497
#[ cfg( feature = "std" ) ]
497
- fn do_test_data_loss_protect ( reconnect_panicing : bool ) {
498
+ fn do_test_data_loss_protect ( reconnect_panicing : bool , substantially_old : bool , not_stale : bool ) {
498
499
// When we get a data_loss_protect proving we're behind, we immediately panic as the
499
500
// chain::Watch API requirements have been violated (e.g. the user restored from a backup). The
500
501
// panic message informs the user they should force-close without broadcasting, which is tested
@@ -518,8 +519,38 @@ fn do_test_data_loss_protect(reconnect_panicing: bool) {
518
519
let previous_node_state = nodes[ 0 ] . node . encode ( ) ;
519
520
let previous_chain_monitor_state = get_monitor ! ( nodes[ 0 ] , chan. 2 ) . encode ( ) ;
520
521
521
- send_payment ( & nodes[ 0 ] , & vec ! ( & nodes[ 1 ] ) [ ..] , 8000000 ) ;
522
- send_payment ( & nodes[ 0 ] , & vec ! ( & nodes[ 1 ] ) [ ..] , 8000000 ) ;
522
+ assert ! ( !substantially_old || !not_stale, "substantially_old and not_stale doesn't make sense" ) ;
523
+ if not_stale || !substantially_old {
524
+ // Previously, we'd only hit the data_loss_protect assertion if we had a state which
525
+ // revoked at least two revocations ago, not the latest revocation. Here, we use
526
+ // `not_stale` to test the boundary condition.
527
+ let pay_params = PaymentParameters :: for_keysend ( nodes[ 1 ] . node . get_our_node_id ( ) , 100 , false ) ;
528
+ let route_params = RouteParameters :: from_payment_params_and_value ( pay_params, 40000 ) ;
529
+ nodes[ 0 ] . node . send_spontaneous_payment_with_retry ( None , RecipientOnionFields :: spontaneous_empty ( ) , PaymentId ( [ 0 ; 32 ] ) , route_params, Retry :: Attempts ( 0 ) ) ;
530
+ check_added_monitors ( & nodes[ 0 ] , 1 ) ;
531
+ let update_add_commit = SendEvent :: from_node ( & nodes[ 0 ] ) ;
532
+
533
+ nodes[ 1 ] . node . handle_update_add_htlc ( & nodes[ 0 ] . node . get_our_node_id ( ) , & update_add_commit. msgs [ 0 ] ) ;
534
+ nodes[ 1 ] . node . handle_commitment_signed ( & nodes[ 0 ] . node . get_our_node_id ( ) , & update_add_commit. commitment_msg ) ;
535
+ check_added_monitors ( & nodes[ 1 ] , 1 ) ;
536
+ let ( raa, cs) = get_revoke_commit_msgs ( & nodes[ 1 ] , & nodes[ 0 ] . node . get_our_node_id ( ) ) ;
537
+
538
+ nodes[ 0 ] . node . handle_revoke_and_ack ( & nodes[ 1 ] . node . get_our_node_id ( ) , & raa) ;
539
+ check_added_monitors ( & nodes[ 0 ] , 1 ) ;
540
+ assert ! ( nodes[ 0 ] . node. get_and_clear_pending_msg_events( ) . is_empty( ) ) ;
541
+ if !not_stale {
542
+ nodes[ 0 ] . node . handle_commitment_signed ( & nodes[ 1 ] . node . get_our_node_id ( ) , & cs) ;
543
+ check_added_monitors ( & nodes[ 0 ] , 1 ) ;
544
+ // A now revokes their original state, at which point reconnect should panic
545
+ let raa = get_event_msg ! ( nodes[ 0 ] , MessageSendEvent :: SendRevokeAndACK , nodes[ 1 ] . node. get_our_node_id( ) ) ;
546
+ nodes[ 1 ] . node . handle_revoke_and_ack ( & nodes[ 0 ] . node . get_our_node_id ( ) , & raa) ;
547
+ check_added_monitors ( & nodes[ 1 ] , 1 ) ;
548
+ expect_pending_htlcs_forwardable_ignore ! ( nodes[ 1 ] ) ;
549
+ }
550
+ } else {
551
+ send_payment ( & nodes[ 0 ] , & [ & nodes[ 1 ] ] , 8000000 ) ;
552
+ send_payment ( & nodes[ 0 ] , & [ & nodes[ 1 ] ] , 8000000 ) ;
553
+ }
523
554
524
555
nodes[ 0 ] . node . peer_disconnected ( & nodes[ 1 ] . node . get_our_node_id ( ) ) ;
525
556
nodes[ 1 ] . node . peer_disconnected ( & nodes[ 0 ] . node . get_our_node_id ( ) ) ;
@@ -536,99 +567,131 @@ fn do_test_data_loss_protect(reconnect_panicing: bool) {
536
567
537
568
let reestablish_1 = get_chan_reestablish_msgs ! ( nodes[ 0 ] , nodes[ 1 ] ) ;
538
569
539
- // Check we close channel detecting A is fallen-behind
540
- // Check that we sent the warning message when we detected that A has fallen behind,
541
- // and give the possibility for A to recover from the warning.
570
+ // If A has fallen behind substantially, B should send it a message letting it know
571
+ // that.
542
572
nodes[ 1 ] . node . handle_channel_reestablish ( & nodes[ 0 ] . node . get_our_node_id ( ) , & reestablish_1[ 0 ] ) ;
543
- let warn_msg = "Peer attempted to reestablish channel with a very old local commitment transaction: 0 (received) vs 4 (expected)" . to_owned ( ) ;
544
-
545
- let warn_reestablish = nodes[ 1 ] . node . get_and_clear_pending_msg_events ( ) ;
546
- assert_eq ! ( warn_reestablish. len( ) , 2 ) ;
547
- match warn_reestablish[ 1 ] {
548
- MessageSendEvent :: HandleError { action : ErrorAction :: SendWarningMessage { ref msg, .. } , .. } => {
549
- assert_eq ! ( msg. data, warn_msg) ;
550
- } ,
551
- _ => panic ! ( "Unexpected event" ) ,
573
+ let reestablish_msg;
574
+ if substantially_old {
575
+ let warn_msg = "Peer attempted to reestablish channel with a very old local commitment transaction: 0 (received) vs 4 (expected)" . to_owned ( ) ;
576
+
577
+ let warn_reestablish = nodes[ 1 ] . node . get_and_clear_pending_msg_events ( ) ;
578
+ assert_eq ! ( warn_reestablish. len( ) , 2 ) ;
579
+ match warn_reestablish[ 1 ] {
580
+ MessageSendEvent :: HandleError { action : ErrorAction :: SendWarningMessage { ref msg, .. } , .. } => {
581
+ assert_eq ! ( msg. data, warn_msg) ;
582
+ } ,
583
+ _ => panic ! ( "Unexpected events: {:?}" , warn_reestablish) ,
584
+ }
585
+ reestablish_msg = match & warn_reestablish[ 0 ] {
586
+ MessageSendEvent :: SendChannelReestablish { msg, .. } => msg. clone ( ) ,
587
+ _ => panic ! ( "Unexpected events: {:?}" , warn_reestablish) ,
588
+ } ;
589
+ } else {
590
+ let msgs = nodes[ 1 ] . node . get_and_clear_pending_msg_events ( ) ;
591
+ assert ! ( msgs. len( ) >= 4 ) ;
592
+ match msgs. last ( ) {
593
+ Some ( MessageSendEvent :: SendChannelUpdate { .. } ) => { } ,
594
+ _ => panic ! ( "Unexpected events: {:?}" , msgs) ,
595
+ }
596
+ assert ! ( msgs. iter( ) . any( |msg| matches!( msg, MessageSendEvent :: SendRevokeAndACK { .. } ) ) ) ;
597
+ assert ! ( msgs. iter( ) . any( |msg| matches!( msg, MessageSendEvent :: UpdateHTLCs { .. } ) ) ) ;
598
+ reestablish_msg = match & msgs[ 0 ] {
599
+ MessageSendEvent :: SendChannelReestablish { msg, .. } => msg. clone ( ) ,
600
+ _ => panic ! ( "Unexpected events: {:?}" , msgs) ,
601
+ } ;
552
602
}
553
- let reestablish_msg = match & warn_reestablish[ 0 ] {
554
- MessageSendEvent :: SendChannelReestablish { msg, .. } => msg. clone ( ) ,
555
- _ => panic ! ( "Unexpected event" ) ,
556
- } ;
557
603
558
604
{
559
- let mut node_txn = nodes[ 1 ] . tx_broadcaster . txn_broadcasted . lock ( ) . unwrap ( ) . clone ( ) ;
560
- // The node B should not broadcast the transaction to force close the channel!
605
+ let mut node_txn = nodes[ 1 ] . tx_broadcaster . txn_broadcasted . lock ( ) . unwrap ( ) ;
606
+ // The node B should never force- close the channel.
561
607
assert ! ( node_txn. is_empty( ) ) ;
562
608
}
563
609
564
610
// Check A panics upon seeing proof it has fallen behind.
565
- assert ! ( std:: panic:: catch_unwind( || {
611
+ let reconnect_res = std:: panic:: catch_unwind ( || {
566
612
nodes[ 0 ] . node . handle_channel_reestablish ( & nodes[ 1 ] . node . get_our_node_id ( ) , & reestablish_msg) ;
567
- } ) . is_err( ) ) ;
568
- std:: mem:: forget ( nodes) ; // Skip the `Drop` handler for `Node`
569
- return ; // By this point we should have panic'ed!
570
- }
613
+ } ) ;
614
+ if not_stale {
615
+ assert ! ( reconnect_res. is_ok( ) ) ;
616
+ // At this point A gets confused because B expects a commitment state newer than A
617
+ // has sent, but not a newer revocation secret, so A just (correctly) closes.
618
+ check_closed_broadcast ( & nodes[ 0 ] , 1 , true ) ;
619
+ check_added_monitors ( & nodes[ 0 ] , 1 ) ;
620
+ check_closed_event ! ( nodes[ 0 ] , 1 , ClosureReason :: ProcessingError {
621
+ err: "Peer attempted to reestablish channel with a future remote commitment transaction: 2 (received) vs 1 (expected)" . to_owned( )
622
+ } , [ nodes[ 1 ] . node. get_our_node_id( ) ] , 1000000 ) ;
623
+ } else {
624
+ assert ! ( reconnect_res. is_err( ) ) ;
625
+ // Skip the `Drop` handler for `Node`s as some may be in an invalid (panicked) state.
626
+ std:: mem:: forget ( nodes) ;
627
+ }
628
+ } else {
629
+ assert ! ( !not_stale, "We only care about the stale case when not testing panicking" ) ;
571
630
572
- nodes[ 0 ] . node . force_close_without_broadcasting_txn ( & chan. 2 , & nodes[ 1 ] . node . get_our_node_id ( ) ) . unwrap ( ) ;
573
- check_added_monitors ! ( nodes[ 0 ] , 1 ) ;
574
- check_closed_event ! ( nodes[ 0 ] , 1 , ClosureReason :: HolderForceClosed , [ nodes[ 1 ] . node. get_our_node_id( ) ] , 1000000 ) ;
575
- {
576
- let node_txn = nodes[ 0 ] . tx_broadcaster . txn_broadcasted . lock ( ) . unwrap ( ) ;
577
- assert_eq ! ( node_txn. len( ) , 0 ) ;
578
- }
631
+ nodes[ 0 ] . node . force_close_without_broadcasting_txn ( & chan. 2 , & nodes[ 1 ] . node . get_our_node_id ( ) ) . unwrap ( ) ;
632
+ check_added_monitors ! ( nodes[ 0 ] , 1 ) ;
633
+ check_closed_event ! ( nodes[ 0 ] , 1 , ClosureReason :: HolderForceClosed , [ nodes[ 1 ] . node. get_our_node_id( ) ] , 1000000 ) ;
634
+ {
635
+ let node_txn = nodes[ 0 ] . tx_broadcaster . txn_broadcasted . lock ( ) . unwrap ( ) ;
636
+ assert_eq ! ( node_txn. len( ) , 0 ) ;
637
+ }
579
638
580
- for msg in nodes[ 0 ] . node . get_and_clear_pending_msg_events ( ) {
581
- if let MessageSendEvent :: BroadcastChannelUpdate { .. } = msg {
582
- } else if let MessageSendEvent :: HandleError { ref action, .. } = msg {
639
+ for msg in nodes[ 0 ] . node . get_and_clear_pending_msg_events ( ) {
640
+ if let MessageSendEvent :: BroadcastChannelUpdate { .. } = msg {
641
+ } else if let MessageSendEvent :: HandleError { ref action, .. } = msg {
642
+ match action {
643
+ & ErrorAction :: DisconnectPeer { ref msg } => {
644
+ assert_eq ! ( msg. as_ref( ) . unwrap( ) . data, "Channel force-closed" ) ;
645
+ } ,
646
+ _ => panic ! ( "Unexpected event!" ) ,
647
+ }
648
+ } else {
649
+ panic ! ( "Unexpected event {:?}" , msg)
650
+ }
651
+ }
652
+
653
+ // after the warning message sent by B, we should not able to
654
+ // use the channel, or reconnect with success to the channel.
655
+ assert ! ( nodes[ 0 ] . node. list_usable_channels( ) . is_empty( ) ) ;
656
+ nodes[ 0 ] . node . peer_connected ( & nodes[ 1 ] . node . get_our_node_id ( ) , & msgs:: Init {
657
+ features : nodes[ 1 ] . node . init_features ( ) , networks : None , remote_network_address : None
658
+ } , true ) . unwrap ( ) ;
659
+ nodes[ 1 ] . node . peer_connected ( & nodes[ 0 ] . node . get_our_node_id ( ) , & msgs:: Init {
660
+ features : nodes[ 0 ] . node . init_features ( ) , networks : None , remote_network_address : None
661
+ } , false ) . unwrap ( ) ;
662
+ let retry_reestablish = get_chan_reestablish_msgs ! ( nodes[ 1 ] , nodes[ 0 ] ) ;
663
+
664
+ nodes[ 0 ] . node . handle_channel_reestablish ( & nodes[ 1 ] . node . get_our_node_id ( ) , & retry_reestablish[ 0 ] ) ;
665
+ let mut err_msgs_0 = Vec :: with_capacity ( 1 ) ;
666
+ if let MessageSendEvent :: HandleError { ref action, .. } = nodes[ 0 ] . node . get_and_clear_pending_msg_events ( ) [ 1 ] {
583
667
match action {
584
- & ErrorAction :: DisconnectPeer { ref msg } => {
585
- assert_eq ! ( msg. as_ref( ) . unwrap( ) . data, "Channel force-closed" ) ;
668
+ & ErrorAction :: SendErrorMessage { ref msg } => {
669
+ assert_eq ! ( msg. data, format!( "Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}" , & nodes[ 1 ] . node. get_our_node_id( ) ) ) ;
670
+ err_msgs_0. push ( msg. clone ( ) ) ;
586
671
} ,
587
672
_ => panic ! ( "Unexpected event!" ) ,
588
673
}
589
674
} else {
590
- panic ! ( "Unexpected event {:?}" , msg)
591
- }
592
- }
593
-
594
- // after the warning message sent by B, we should not able to
595
- // use the channel, or reconnect with success to the channel.
596
- assert ! ( nodes[ 0 ] . node. list_usable_channels( ) . is_empty( ) ) ;
597
- nodes[ 0 ] . node . peer_connected ( & nodes[ 1 ] . node . get_our_node_id ( ) , & msgs:: Init {
598
- features : nodes[ 1 ] . node . init_features ( ) , networks : None , remote_network_address : None
599
- } , true ) . unwrap ( ) ;
600
- nodes[ 1 ] . node . peer_connected ( & nodes[ 0 ] . node . get_our_node_id ( ) , & msgs:: Init {
601
- features : nodes[ 0 ] . node . init_features ( ) , networks : None , remote_network_address : None
602
- } , false ) . unwrap ( ) ;
603
- let retry_reestablish = get_chan_reestablish_msgs ! ( nodes[ 1 ] , nodes[ 0 ] ) ;
604
-
605
- nodes[ 0 ] . node . handle_channel_reestablish ( & nodes[ 1 ] . node . get_our_node_id ( ) , & retry_reestablish[ 0 ] ) ;
606
- let mut err_msgs_0 = Vec :: with_capacity ( 1 ) ;
607
- if let MessageSendEvent :: HandleError { ref action, .. } = nodes[ 0 ] . node . get_and_clear_pending_msg_events ( ) [ 1 ] {
608
- match action {
609
- & ErrorAction :: SendErrorMessage { ref msg } => {
610
- assert_eq ! ( msg. data, format!( "Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}" , & nodes[ 1 ] . node. get_our_node_id( ) ) ) ;
611
- err_msgs_0. push ( msg. clone ( ) ) ;
612
- } ,
613
- _ => panic ! ( "Unexpected event!" ) ,
675
+ panic ! ( "Unexpected event!" ) ;
614
676
}
615
- } else {
616
- panic ! ( "Unexpected event!" ) ;
677
+ assert_eq ! ( err_msgs_0. len( ) , 1 ) ;
678
+ nodes[ 1 ] . node . handle_error ( & nodes[ 0 ] . node . get_our_node_id ( ) , & err_msgs_0[ 0 ] ) ;
679
+ assert ! ( nodes[ 1 ] . node. list_usable_channels( ) . is_empty( ) ) ;
680
+ check_added_monitors ! ( nodes[ 1 ] , 1 ) ;
681
+ check_closed_event ! ( nodes[ 1 ] , 1 , ClosureReason :: CounterpartyForceClosed { peer_msg: UntrustedString ( format!( "Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}" , & nodes[ 1 ] . node. get_our_node_id( ) ) ) }
682
+ , [ nodes[ 0 ] . node. get_our_node_id( ) ] , 1000000 ) ;
683
+ check_closed_broadcast ! ( nodes[ 1 ] , false ) ;
617
684
}
618
- assert_eq ! ( err_msgs_0. len( ) , 1 ) ;
619
- nodes[ 1 ] . node . handle_error ( & nodes[ 0 ] . node . get_our_node_id ( ) , & err_msgs_0[ 0 ] ) ;
620
- assert ! ( nodes[ 1 ] . node. list_usable_channels( ) . is_empty( ) ) ;
621
- check_added_monitors ! ( nodes[ 1 ] , 1 ) ;
622
- check_closed_event ! ( nodes[ 1 ] , 1 , ClosureReason :: CounterpartyForceClosed { peer_msg: UntrustedString ( format!( "Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}" , & nodes[ 1 ] . node. get_our_node_id( ) ) ) }
623
- , [ nodes[ 0 ] . node. get_our_node_id( ) ] , 1000000 ) ;
624
- check_closed_broadcast ! ( nodes[ 1 ] , false ) ;
625
685
}
626
686
627
687
#[ test]
628
688
#[ cfg( feature = "std" ) ]
629
689
fn test_data_loss_protect ( ) {
630
- do_test_data_loss_protect ( true ) ;
631
- do_test_data_loss_protect ( false ) ;
690
+ do_test_data_loss_protect ( true , false , true ) ;
691
+ do_test_data_loss_protect ( true , true , false ) ;
692
+ do_test_data_loss_protect ( true , false , false ) ;
693
+ do_test_data_loss_protect ( false , true , false ) ;
694
+ do_test_data_loss_protect ( false , false , false ) ;
632
695
}
633
696
634
697
#[ test]
0 commit comments