@@ -3860,3 +3860,145 @@ fn test_claim_to_closed_channel_blocks_claimed_event() {
3860
3860
nodes[ 1 ] . chain_monitor . complete_sole_pending_chan_update ( & chan_a. 2 ) ;
3861
3861
expect_payment_claimed ! ( nodes[ 1 ] , payment_hash, 1_000_000 ) ;
3862
3862
}
3863
+
3864
+ #[ test]
3865
+ #[ cfg( feature = "std" ) ]
3866
+ fn test_single_channel_multiple_mpp ( ) {
3867
+ // Test what happens when we attempt to claim an MPP with many parts that came to us through
3868
+ // the same channel with a synchronous persistence interface which has very high latency.
3869
+ //
3870
+ // Previously, if a `revoke_and_ack` came in while we were still running in
3871
+ // `ChannelManager::claim_payment` we'd end up hanging waiting to apply a
3872
+ // `ChannelMonitorUpdate` until after it completed. See the commit which introduced this test
3873
+ // for more info.
3874
+ let chanmon_cfgs = create_chanmon_cfgs ( 7 ) ;
3875
+ let node_cfgs = create_node_cfgs ( 7 , & chanmon_cfgs) ;
3876
+ let node_chanmgrs = create_node_chanmgrs ( 7 , & node_cfgs, & [ None ; 7 ] ) ;
3877
+ let mut nodes = create_network ( 7 , & node_cfgs, & node_chanmgrs) ;
3878
+
3879
+ let node_5_id = nodes[ 5 ] . node . get_our_node_id ( ) ;
3880
+ let node_6_id = nodes[ 6 ] . node . get_our_node_id ( ) ;
3881
+
3882
+ // Send an MPP payment in four parts along the path shown from top to bottom
3883
+ // 0
3884
+ // 1 2 3 4
3885
+ // 5
3886
+ // 6
3887
+
3888
+ create_announced_chan_between_nodes_with_value ( & nodes, 0 , 1 , 100_000 , 0 ) ;
3889
+ create_announced_chan_between_nodes_with_value ( & nodes, 0 , 2 , 100_000 , 0 ) ;
3890
+ create_announced_chan_between_nodes_with_value ( & nodes, 0 , 3 , 100_000 , 0 ) ;
3891
+ create_announced_chan_between_nodes_with_value ( & nodes, 0 , 4 , 100_000 , 0 ) ;
3892
+ create_announced_chan_between_nodes_with_value ( & nodes, 1 , 5 , 100_000 , 0 ) ;
3893
+ create_announced_chan_between_nodes_with_value ( & nodes, 2 , 5 , 100_000 , 0 ) ;
3894
+ create_announced_chan_between_nodes_with_value ( & nodes, 3 , 5 , 100_000 , 0 ) ;
3895
+ create_announced_chan_between_nodes_with_value ( & nodes, 4 , 5 , 100_000 , 0 ) ;
3896
+ create_announced_chan_between_nodes_with_value ( & nodes, 5 , 6 , 1_000_000 , 0 ) ;
3897
+
3898
+ let ( mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash ! ( & nodes[ 0 ] , nodes[ 6 ] , 30_000_000 ) ;
3899
+
3900
+ send_along_route_with_secret ( & nodes[ 0 ] , route, & [ & [ & nodes[ 1 ] , & nodes[ 5 ] , & nodes[ 6 ] ] , & [ & nodes[ 2 ] , & nodes[ 5 ] , & nodes[ 6 ] ] , & [ & nodes[ 3 ] , & nodes[ 5 ] , & nodes[ 6 ] ] , & [ & nodes[ 4 ] , & nodes[ 5 ] , & nodes[ 6 ] ] ] , 30_000_000 , payment_hash, payment_secret) ;
3901
+
3902
+ let ( do_a_write, blocker) = std:: sync:: mpsc:: sync_channel ( 0 ) ;
3903
+ * nodes[ 6 ] . chain_monitor . write_blocker . lock ( ) . unwrap ( ) = Some ( blocker) ;
3904
+
3905
+ // Until we have std::thread::scoped we have to unsafe { turn off the borrow checker }.
3906
+ let claim_node: & ' static TestChannelManager < ' static , ' static > =
3907
+ unsafe { std:: mem:: transmute ( nodes[ 6 ] . node as & TestChannelManager ) } ;
3908
+ let thrd = std:: thread:: spawn ( move || {
3909
+ // Initiate the claim in a background thread as it will immediately block waiting on the
3910
+ // `write_blocker` we set above.
3911
+ claim_node. claim_funds ( payment_preimage) ;
3912
+ } ) ;
3913
+
3914
+ // First unlock one monitor so that we have a pending
3915
+ // `update_fulfill_htlc`/`commitment_signed` pair to pass to our counterparty.
3916
+ do_a_write. send ( ( ) ) . unwrap ( ) ;
3917
+
3918
+ // Then fetch the `update_fulfill_htlc`/`commitment_signed`. Note that the
3919
+ // `get_and_clear_pending_msg_events` will immediately hang trying to take a peer lock which
3920
+ // `claim_funds` is holding. Thus, we release a second write after a small sleep in the
3921
+ // background to give `claim_funds` a chance to step forward, unblocking
3922
+ // `get_and_clear_pending_msg_events`.
3923
+ const MAX_THREAD_INIT_TIME : std:: time:: Duration = std:: time:: Duration :: from_millis ( 10 ) ;
3924
+ let do_a_write_background = do_a_write. clone ( ) ;
3925
+ let thrd2 = std:: thread:: spawn ( move || {
3926
+ std:: thread:: sleep ( MAX_THREAD_INIT_TIME ) ;
3927
+ do_a_write_background. send ( ( ) ) . unwrap ( ) ;
3928
+ } ) ;
3929
+ let first_updates = get_htlc_update_msgs ( & nodes[ 6 ] , & nodes[ 5 ] . node . get_our_node_id ( ) ) ;
3930
+ thrd2. join ( ) . unwrap ( ) ;
3931
+
3932
+ nodes[ 5 ] . node . peer_disconnected ( nodes[ 1 ] . node . get_our_node_id ( ) ) ;
3933
+ nodes[ 5 ] . node . peer_disconnected ( nodes[ 2 ] . node . get_our_node_id ( ) ) ;
3934
+ nodes[ 5 ] . node . peer_disconnected ( nodes[ 3 ] . node . get_our_node_id ( ) ) ;
3935
+ nodes[ 5 ] . node . peer_disconnected ( nodes[ 4 ] . node . get_our_node_id ( ) ) ;
3936
+
3937
+ nodes[ 5 ] . node . handle_update_fulfill_htlc ( node_6_id, & first_updates. update_fulfill_htlcs [ 0 ] ) ;
3938
+ check_added_monitors ( & nodes[ 5 ] , 1 ) ;
3939
+ expect_payment_forwarded ! ( nodes[ 5 ] , nodes[ 1 ] , nodes[ 6 ] , Some ( 1000 ) , false , false ) ;
3940
+ nodes[ 5 ] . node . handle_commitment_signed ( node_6_id, & first_updates. commitment_signed ) ;
3941
+ check_added_monitors ( & nodes[ 5 ] , 1 ) ;
3942
+ let ( raa, cs) = get_revoke_commit_msgs ( & nodes[ 5 ] , & node_6_id) ;
3943
+
3944
+ // Now, handle the `revoke_and_ack` from node 5. Note that `claim_funds` is still blocked on
3945
+ // our peer lock, so we have to release a write to let it process.
3946
+ let do_a_write_background = do_a_write. clone ( ) ;
3947
+ let thrd3 = std:: thread:: spawn ( move || {
3948
+ std:: thread:: sleep ( MAX_THREAD_INIT_TIME ) ;
3949
+ do_a_write_background. send ( ( ) ) . unwrap ( ) ;
3950
+ } ) ;
3951
+ nodes[ 6 ] . node . handle_revoke_and_ack ( node_5_id, & raa) ;
3952
+ thrd3. join ( ) . unwrap ( ) ;
3953
+
3954
+ let thrd4 = std:: thread:: spawn ( move || {
3955
+ std:: thread:: sleep ( MAX_THREAD_INIT_TIME ) ;
3956
+ do_a_write. send ( ( ) ) . unwrap ( ) ;
3957
+ do_a_write. send ( ( ) ) . unwrap ( ) ;
3958
+ } ) ;
3959
+
3960
+ thrd4. join ( ) . unwrap ( ) ;
3961
+ thrd. join ( ) . unwrap ( ) ;
3962
+
3963
+ expect_payment_claimed ! ( nodes[ 6 ] , payment_hash, 30_000_000 ) ;
3964
+
3965
+ // At the end, we should have 5 ChannelMonitorUpdates - 4 for HTLC claims, and one for the
3966
+ // above `revoke_and_ack`.
3967
+ check_added_monitors ( & nodes[ 6 ] , 5 ) ;
3968
+
3969
+ // Now drive everything to the end, at least as far as node 6 is concerned...
3970
+ * nodes[ 6 ] . chain_monitor . write_blocker . lock ( ) . unwrap ( ) = None ;
3971
+ nodes[ 6 ] . node . handle_commitment_signed ( node_5_id, & cs) ;
3972
+ check_added_monitors ( & nodes[ 6 ] , 1 ) ;
3973
+
3974
+ let ( updates, raa) = get_updates_and_revoke ( & nodes[ 6 ] , & nodes[ 5 ] . node . get_our_node_id ( ) ) ;
3975
+ nodes[ 5 ] . node . handle_update_fulfill_htlc ( node_6_id, & updates. update_fulfill_htlcs [ 0 ] ) ;
3976
+ expect_payment_forwarded ! ( nodes[ 5 ] , nodes[ 2 ] , nodes[ 6 ] , Some ( 1000 ) , false , false ) ;
3977
+ nodes[ 5 ] . node . handle_update_fulfill_htlc ( node_6_id, & updates. update_fulfill_htlcs [ 1 ] ) ;
3978
+ expect_payment_forwarded ! ( nodes[ 5 ] , nodes[ 3 ] , nodes[ 6 ] , Some ( 1000 ) , false , false ) ;
3979
+ nodes[ 5 ] . node . handle_commitment_signed ( node_6_id, & updates. commitment_signed ) ;
3980
+ nodes[ 5 ] . node . handle_revoke_and_ack ( node_6_id, & raa) ;
3981
+ check_added_monitors ( & nodes[ 5 ] , 4 ) ;
3982
+
3983
+ let ( raa, cs) = get_revoke_commit_msgs ( & nodes[ 5 ] , & node_6_id) ;
3984
+
3985
+ nodes[ 6 ] . node . handle_revoke_and_ack ( node_5_id, & raa) ;
3986
+ nodes[ 6 ] . node . handle_commitment_signed ( node_5_id, & cs) ;
3987
+ check_added_monitors ( & nodes[ 6 ] , 2 ) ;
3988
+
3989
+ let ( updates, raa) = get_updates_and_revoke ( & nodes[ 6 ] , & nodes[ 5 ] . node . get_our_node_id ( ) ) ;
3990
+ nodes[ 5 ] . node . handle_update_fulfill_htlc ( node_6_id, & updates. update_fulfill_htlcs [ 0 ] ) ;
3991
+ expect_payment_forwarded ! ( nodes[ 5 ] , nodes[ 4 ] , nodes[ 6 ] , Some ( 1000 ) , false , false ) ;
3992
+ nodes[ 5 ] . node . handle_commitment_signed ( node_6_id, & updates. commitment_signed ) ;
3993
+ nodes[ 5 ] . node . handle_revoke_and_ack ( node_6_id, & raa) ;
3994
+ check_added_monitors ( & nodes[ 5 ] , 3 ) ;
3995
+
3996
+ let ( raa, cs) = get_revoke_commit_msgs ( & nodes[ 5 ] , & node_6_id) ;
3997
+ nodes[ 6 ] . node . handle_revoke_and_ack ( node_5_id, & raa) ;
3998
+ nodes[ 6 ] . node . handle_commitment_signed ( node_5_id, & cs) ;
3999
+ check_added_monitors ( & nodes[ 6 ] , 2 ) ;
4000
+
4001
+ let raa = get_event_msg ! ( nodes[ 6 ] , MessageSendEvent :: SendRevokeAndACK , node_5_id) ;
4002
+ nodes[ 5 ] . node . handle_revoke_and_ack ( node_6_id, & raa) ;
4003
+ check_added_monitors ( & nodes[ 5 ] , 1 ) ;
4004
+ }
0 commit comments