Skip to content

Commit a7cea3d

Browse files
committed
Move test_dup_htlc_onchain_fails_on_reload to payment_tests
test_dup_htlc_onchain_fails_on_reload is now more of a payment_test than a functional_test, testing for handling of pending payments.
1 parent 8239045 commit a7cea3d

File tree

2 files changed

+174
-172
lines changed

2 files changed

+174
-172
lines changed

lightning/src/ln/functional_tests.rs

+1-168
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
//! claim outputs on-chain.
1313
1414
use chain;
15-
use chain::{Confirm, Listen, Watch, ChannelMonitorUpdateErr};
15+
use chain::{Confirm, Listen, Watch};
1616
use chain::channelmonitor;
1717
use chain::channelmonitor::{ChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
1818
use chain::transaction::OutPoint;
@@ -4151,173 +4151,6 @@ fn mpp_failure() {
41514151
fail_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_hash);
41524152
}
41534153

4154-
fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool, confirm_commitment_tx: bool, payment_timeout: bool) {
4155-
// When a Channel is closed, any outbound HTLCs which were relayed through it are simply
4156-
// dropped when the Channel is. From there, the ChannelManager relies on the ChannelMonitor
4157-
// having a copy of the relevant fail-/claim-back data and processes the HTLC fail/claim when
4158-
// the ChannelMonitor tells it to.
4159-
//
4160-
// If, due to an on-chain event, an HTLC is failed/claimed, we should avoid providing the
4161-
// ChannelManaver the HTLC event until after the monitor is re-persisted. This should prevent a
4162-
// duplicate HTLC fail/claim (e.g. via a PaymentPathFailed event).
4163-
let chanmon_cfgs = create_chanmon_cfgs(2);
4164-
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4165-
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4166-
let persister: test_utils::TestPersister;
4167-
let new_chain_monitor: test_utils::TestChainMonitor;
4168-
let nodes_0_deserialized: ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
4169-
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4170-
4171-
let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
4172-
4173-
// Route a payment, but force-close the channel before the HTLC fulfill message arrives at
4174-
// nodes[0].
4175-
let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 10000000);
4176-
nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id).unwrap();
4177-
check_closed_broadcast!(nodes[0], true);
4178-
check_added_monitors!(nodes[0], 1);
4179-
check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
4180-
4181-
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
4182-
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
4183-
4184-
// Connect blocks until the CLTV timeout is up so that we get an HTLC-Timeout transaction
4185-
connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
4186-
let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
4187-
assert_eq!(node_txn.len(), 3);
4188-
assert_eq!(node_txn[0], node_txn[1]);
4189-
check_spends!(node_txn[1], funding_tx);
4190-
check_spends!(node_txn[2], node_txn[1]);
4191-
let timeout_txn = vec![node_txn[2].clone()];
4192-
4193-
assert!(nodes[1].node.claim_funds(payment_preimage));
4194-
check_added_monitors!(nodes[1], 1);
4195-
4196-
let mut header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
4197-
connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[1].clone()]});
4198-
check_closed_broadcast!(nodes[1], true);
4199-
check_added_monitors!(nodes[1], 1);
4200-
check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
4201-
let claim_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
4202-
4203-
header.prev_blockhash = nodes[0].best_block_hash();
4204-
connect_block(&nodes[0], &Block { header, txdata: vec![node_txn[1].clone()]});
4205-
4206-
if confirm_commitment_tx {
4207-
connect_blocks(&nodes[0], BREAKDOWN_TIMEOUT as u32 - 1);
4208-
}
4209-
4210-
header.prev_blockhash = nodes[0].best_block_hash();
4211-
let claim_block = Block { header, txdata: if payment_timeout { timeout_txn } else { claim_txn } };
4212-
4213-
if payment_timeout {
4214-
assert!(confirm_commitment_tx); // Otherwise we're spending below our CSV!
4215-
connect_block(&nodes[0], &claim_block);
4216-
connect_blocks(&nodes[0], ANTI_REORG_DELAY - 2);
4217-
}
4218-
4219-
// Now connect the HTLC claim transaction with the ChainMonitor-generated ChannelMonitor update
4220-
// returning TemporaryFailure. This should cause the claim event to never make its way to the
4221-
// ChannelManager.
4222-
chanmon_cfgs[0].persister.non_update_monitor_persistences.lock().unwrap().clear();
4223-
chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
4224-
4225-
if payment_timeout {
4226-
connect_blocks(&nodes[0], 1);
4227-
} else {
4228-
connect_block(&nodes[0], &claim_block);
4229-
}
4230-
4231-
let funding_txo = OutPoint { txid: funding_tx.txid(), index: 0 };
4232-
let mon_updates: Vec<_> = chanmon_cfgs[0].persister.non_update_monitor_persistences.lock().unwrap()
4233-
.get_mut(&funding_txo).unwrap().drain().collect();
4234-
assert_eq!(mon_updates.len(), 1);
4235-
assert!(nodes[0].chain_monitor.release_pending_monitor_events().is_empty());
4236-
assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
4237-
4238-
// If we persist the ChannelManager here, we should get the PaymentSent event after
4239-
// deserialization.
4240-
let mut chan_manager_serialized = test_utils::TestVecWriter(Vec::new());
4241-
if !persist_manager_post_event {
4242-
nodes[0].node.write(&mut chan_manager_serialized).unwrap();
4243-
}
4244-
4245-
// Now persist the ChannelMonitor and inform the ChainMonitor that we're done, generating the
4246-
// payment sent event.
4247-
chanmon_cfgs[0].persister.set_update_ret(Ok(()));
4248-
let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
4249-
get_monitor!(nodes[0], chan_id).write(&mut chan_0_monitor_serialized).unwrap();
4250-
nodes[0].chain_monitor.chain_monitor.channel_monitor_updated(funding_txo, mon_updates[0]);
4251-
if payment_timeout {
4252-
expect_payment_failed!(nodes[0], payment_hash, true);
4253-
} else {
4254-
expect_payment_sent!(nodes[0], payment_preimage);
4255-
}
4256-
4257-
// If we persist the ChannelManager after we get the PaymentSent event, we shouldn't get it
4258-
// twice.
4259-
if persist_manager_post_event {
4260-
nodes[0].node.write(&mut chan_manager_serialized).unwrap();
4261-
}
4262-
4263-
// Now reload nodes[0]...
4264-
persister = test_utils::TestPersister::new();
4265-
let keys_manager = &chanmon_cfgs[0].keys_manager;
4266-
new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster.clone(), nodes[0].logger, node_cfgs[0].fee_estimator, &persister, keys_manager);
4267-
nodes[0].chain_monitor = &new_chain_monitor;
4268-
let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..];
4269-
let (_, mut chan_0_monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(
4270-
&mut chan_0_monitor_read, keys_manager).unwrap();
4271-
assert!(chan_0_monitor_read.is_empty());
4272-
4273-
let (_, nodes_0_deserialized_tmp) = {
4274-
let mut channel_monitors = HashMap::new();
4275-
channel_monitors.insert(chan_0_monitor.get_funding_txo().0, &mut chan_0_monitor);
4276-
<(BlockHash, ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>
4277-
::read(&mut io::Cursor::new(&chan_manager_serialized.0[..]), ChannelManagerReadArgs {
4278-
default_config: Default::default(),
4279-
keys_manager,
4280-
fee_estimator: node_cfgs[0].fee_estimator,
4281-
chain_monitor: nodes[0].chain_monitor,
4282-
tx_broadcaster: nodes[0].tx_broadcaster.clone(),
4283-
logger: nodes[0].logger,
4284-
channel_monitors,
4285-
}).unwrap()
4286-
};
4287-
nodes_0_deserialized = nodes_0_deserialized_tmp;
4288-
4289-
assert!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
4290-
check_added_monitors!(nodes[0], 1);
4291-
nodes[0].node = &nodes_0_deserialized;
4292-
4293-
if persist_manager_post_event {
4294-
assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
4295-
} else {
4296-
if payment_timeout {
4297-
expect_payment_failed!(nodes[0], payment_hash, true);
4298-
} else {
4299-
expect_payment_sent!(nodes[0], payment_preimage);
4300-
}
4301-
}
4302-
4303-
// Note that if we re-connect the block which exposed nodes[0] to the payment preimage (but
4304-
// which the current ChannelMonitor has not seen), the ChannelManager's de-duplication of
4305-
// payment events should kick in, leaving us with no pending events here.
4306-
let height = nodes[0].blocks.lock().unwrap().len() as u32 - 1;
4307-
nodes[0].chain_monitor.chain_monitor.block_connected(&claim_block, height);
4308-
assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
4309-
}
4310-
4311-
#[test]
4312-
fn test_dup_htlc_onchain_fails_on_reload() {
4313-
do_test_dup_htlc_onchain_fails_on_reload(true, true, true);
4314-
do_test_dup_htlc_onchain_fails_on_reload(true, true, false);
4315-
do_test_dup_htlc_onchain_fails_on_reload(true, false, false);
4316-
do_test_dup_htlc_onchain_fails_on_reload(false, true, true);
4317-
do_test_dup_htlc_onchain_fails_on_reload(false, true, false);
4318-
do_test_dup_htlc_onchain_fails_on_reload(false, false, false);
4319-
}
4320-
43214154
#[test]
43224155
fn test_manager_serialize_deserialize_events() {
43234156
// This test makes sure the events field in ChannelManager survives de/serialization

lightning/src/ln/payment_tests.rs

+173-4
Original file line numberDiff line numberDiff line change
@@ -11,10 +11,11 @@
1111
//! serialization ordering between ChannelManager/ChannelMonitors and ensuring we can still retry
1212
//! payments thereafter.
1313
14-
use chain::Watch;
15-
use chain::channelmonitor::ChannelMonitor;
14+
use chain::{ChannelMonitorUpdateErr, Listen, Watch};
15+
use chain::channelmonitor::{ANTI_REORG_DELAY, ChannelMonitor, LATENCY_GRACE_PERIOD_BLOCKS};
16+
use chain::transaction::OutPoint;
1617
use ln::{PaymentPreimage, PaymentHash};
17-
use ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, PaymentId, PaymentSendFailure};
18+
use ln::channelmanager::{BREAKDOWN_TIMEOUT, ChannelManager, ChannelManagerReadArgs, PaymentId, PaymentSendFailure};
1819
use ln::features::{InitFeatures, InvoiceFeatures};
1920
use ln::msgs;
2021
use ln::msgs::{ChannelMessageHandler, ErrorAction};
@@ -24,10 +25,11 @@ use util::events::{ClosureReason, Event, MessageSendEvent, MessageSendEventsProv
2425
use util::errors::APIError;
2526
use util::enforcing_trait_impls::EnforcingSigner;
2627
use util::ser::{ReadableArgs, Writeable};
28+
use io;
2729

2830
use bitcoin::hashes::sha256::Hash as Sha256;
2931
use bitcoin::hashes::Hash;
30-
use bitcoin::BlockHash;
32+
use bitcoin::{Block, BlockHeader, BlockHash};
3133

3234
use prelude::*;
3335

@@ -394,3 +396,170 @@ fn retry_with_no_persist() {
394396
pass_along_path(&nodes[0], &[&nodes[1], &nodes[2]], 100_000, payment_hash, Some(payment_secret), events.pop().unwrap(), true, None);
395397
claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], false, payment_preimage);
396398
}
399+
400+
fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool, confirm_commitment_tx: bool, payment_timeout: bool) {
401+
// When a Channel is closed, any outbound HTLCs which were relayed through it are simply
402+
// dropped when the Channel is. From there, the ChannelManager relies on the ChannelMonitor
403+
// having a copy of the relevant fail-/claim-back data and processes the HTLC fail/claim when
404+
// the ChannelMonitor tells it to.
405+
//
406+
// If, due to an on-chain event, an HTLC is failed/claimed, we should avoid providing the
407+
// ChannelManaver the HTLC event until after the monitor is re-persisted. This should prevent a
408+
// duplicate HTLC fail/claim (e.g. via a PaymentPathFailed event).
409+
let chanmon_cfgs = create_chanmon_cfgs(2);
410+
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
411+
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
412+
let persister: test_utils::TestPersister;
413+
let new_chain_monitor: test_utils::TestChainMonitor;
414+
let nodes_0_deserialized: ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
415+
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
416+
417+
let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
418+
419+
// Route a payment, but force-close the channel before the HTLC fulfill message arrives at
420+
// nodes[0].
421+
let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 10000000);
422+
nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id).unwrap();
423+
check_closed_broadcast!(nodes[0], true);
424+
check_added_monitors!(nodes[0], 1);
425+
check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
426+
427+
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
428+
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
429+
430+
// Connect blocks until the CLTV timeout is up so that we get an HTLC-Timeout transaction
431+
connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
432+
let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
433+
assert_eq!(node_txn.len(), 3);
434+
assert_eq!(node_txn[0], node_txn[1]);
435+
check_spends!(node_txn[1], funding_tx);
436+
check_spends!(node_txn[2], node_txn[1]);
437+
let timeout_txn = vec![node_txn[2].clone()];
438+
439+
assert!(nodes[1].node.claim_funds(payment_preimage));
440+
check_added_monitors!(nodes[1], 1);
441+
442+
let mut header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
443+
connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[1].clone()]});
444+
check_closed_broadcast!(nodes[1], true);
445+
check_added_monitors!(nodes[1], 1);
446+
check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
447+
let claim_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
448+
449+
header.prev_blockhash = nodes[0].best_block_hash();
450+
connect_block(&nodes[0], &Block { header, txdata: vec![node_txn[1].clone()]});
451+
452+
if confirm_commitment_tx {
453+
connect_blocks(&nodes[0], BREAKDOWN_TIMEOUT as u32 - 1);
454+
}
455+
456+
header.prev_blockhash = nodes[0].best_block_hash();
457+
let claim_block = Block { header, txdata: if payment_timeout { timeout_txn } else { claim_txn } };
458+
459+
if payment_timeout {
460+
assert!(confirm_commitment_tx); // Otherwise we're spending below our CSV!
461+
connect_block(&nodes[0], &claim_block);
462+
connect_blocks(&nodes[0], ANTI_REORG_DELAY - 2);
463+
}
464+
465+
// Now connect the HTLC claim transaction with the ChainMonitor-generated ChannelMonitor update
466+
// returning TemporaryFailure. This should cause the claim event to never make its way to the
467+
// ChannelManager.
468+
chanmon_cfgs[0].persister.non_update_monitor_persistences.lock().unwrap().clear();
469+
chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
470+
471+
if payment_timeout {
472+
connect_blocks(&nodes[0], 1);
473+
} else {
474+
connect_block(&nodes[0], &claim_block);
475+
}
476+
477+
let funding_txo = OutPoint { txid: funding_tx.txid(), index: 0 };
478+
let mon_updates: Vec<_> = chanmon_cfgs[0].persister.non_update_monitor_persistences.lock().unwrap()
479+
.get_mut(&funding_txo).unwrap().drain().collect();
480+
assert_eq!(mon_updates.len(), 1);
481+
assert!(nodes[0].chain_monitor.release_pending_monitor_events().is_empty());
482+
assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
483+
484+
// If we persist the ChannelManager here, we should get the PaymentSent event after
485+
// deserialization.
486+
let mut chan_manager_serialized = test_utils::TestVecWriter(Vec::new());
487+
if !persist_manager_post_event {
488+
nodes[0].node.write(&mut chan_manager_serialized).unwrap();
489+
}
490+
491+
// Now persist the ChannelMonitor and inform the ChainMonitor that we're done, generating the
492+
// payment sent event.
493+
chanmon_cfgs[0].persister.set_update_ret(Ok(()));
494+
let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
495+
get_monitor!(nodes[0], chan_id).write(&mut chan_0_monitor_serialized).unwrap();
496+
nodes[0].chain_monitor.chain_monitor.channel_monitor_updated(funding_txo, mon_updates[0]);
497+
if payment_timeout {
498+
expect_payment_failed!(nodes[0], payment_hash, true);
499+
} else {
500+
expect_payment_sent!(nodes[0], payment_preimage);
501+
}
502+
503+
// If we persist the ChannelManager after we get the PaymentSent event, we shouldn't get it
504+
// twice.
505+
if persist_manager_post_event {
506+
nodes[0].node.write(&mut chan_manager_serialized).unwrap();
507+
}
508+
509+
// Now reload nodes[0]...
510+
persister = test_utils::TestPersister::new();
511+
let keys_manager = &chanmon_cfgs[0].keys_manager;
512+
new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster.clone(), nodes[0].logger, node_cfgs[0].fee_estimator, &persister, keys_manager);
513+
nodes[0].chain_monitor = &new_chain_monitor;
514+
let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..];
515+
let (_, mut chan_0_monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(
516+
&mut chan_0_monitor_read, keys_manager).unwrap();
517+
assert!(chan_0_monitor_read.is_empty());
518+
519+
let (_, nodes_0_deserialized_tmp) = {
520+
let mut channel_monitors = HashMap::new();
521+
channel_monitors.insert(chan_0_monitor.get_funding_txo().0, &mut chan_0_monitor);
522+
<(BlockHash, ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>
523+
::read(&mut io::Cursor::new(&chan_manager_serialized.0[..]), ChannelManagerReadArgs {
524+
default_config: Default::default(),
525+
keys_manager,
526+
fee_estimator: node_cfgs[0].fee_estimator,
527+
chain_monitor: nodes[0].chain_monitor,
528+
tx_broadcaster: nodes[0].tx_broadcaster.clone(),
529+
logger: nodes[0].logger,
530+
channel_monitors,
531+
}).unwrap()
532+
};
533+
nodes_0_deserialized = nodes_0_deserialized_tmp;
534+
535+
assert!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
536+
check_added_monitors!(nodes[0], 1);
537+
nodes[0].node = &nodes_0_deserialized;
538+
539+
if persist_manager_post_event {
540+
assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
541+
} else {
542+
if payment_timeout {
543+
expect_payment_failed!(nodes[0], payment_hash, true);
544+
} else {
545+
expect_payment_sent!(nodes[0], payment_preimage);
546+
}
547+
}
548+
549+
// Note that if we re-connect the block which exposed nodes[0] to the payment preimage (but
550+
// which the current ChannelMonitor has not seen), the ChannelManager's de-duplication of
551+
// payment events should kick in, leaving us with no pending events here.
552+
let height = nodes[0].blocks.lock().unwrap().len() as u32 - 1;
553+
nodes[0].chain_monitor.chain_monitor.block_connected(&claim_block, height);
554+
assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
555+
}
556+
557+
#[test]
558+
fn test_dup_htlc_onchain_fails_on_reload() {
559+
do_test_dup_htlc_onchain_fails_on_reload(true, true, true);
560+
do_test_dup_htlc_onchain_fails_on_reload(true, true, false);
561+
do_test_dup_htlc_onchain_fails_on_reload(true, false, false);
562+
do_test_dup_htlc_onchain_fails_on_reload(false, true, true);
563+
do_test_dup_htlc_onchain_fails_on_reload(false, true, false);
564+
do_test_dup_htlc_onchain_fails_on_reload(false, false, false);
565+
}

0 commit comments

Comments
 (0)