Skip to content

Commit 0eaed3b

Browse files
committed
Add a test for MonitorEvent holding when they complete out-of-order
1 parent 6ec0b06 commit 0eaed3b

File tree

2 files changed

+82
-3
lines changed

2 files changed

+82
-3
lines changed

lightning/src/chain/chainmonitor.rs

+76-3
Original file line numberDiff line numberDiff line change
@@ -728,15 +728,16 @@ impl<ChannelSigner: Sign, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref> even
728728
#[cfg(test)]
729729
mod tests {
730730
use bitcoin::BlockHeader;
731-
use ::{check_added_monitors, check_closed_broadcast, check_closed_event, expect_payment_sent};
732-
use ::{get_local_commitment_txn, get_route_and_payment_hash, unwrap_send_err};
731+
use ::{check_added_monitors, check_closed_broadcast, check_closed_event, expect_payment_sent, get_event_msg};
732+
use ::{get_htlc_update_msgs, get_local_commitment_txn, get_revoke_commit_msgs, get_route_and_payment_hash, unwrap_send_err};
733733
use chain::{ChannelMonitorUpdateErr, Confirm, Watch};
734734
use chain::channelmonitor::LATENCY_GRACE_PERIOD_BLOCKS;
735735
use ln::channelmanager::PaymentSendFailure;
736736
use ln::features::InitFeatures;
737737
use ln::functional_test_utils::*;
738+
use ln::msgs::ChannelMessageHandler;
738739
use util::errors::APIError;
739-
use util::events::{ClosureReason, MessageSendEventsProvider};
740+
use util::events::{ClosureReason, MessageSendEvent, MessageSendEventsProvider};
740741
use util::test_utils::{OnRegisterOutput, TxOutReference};
741742

742743
/// Tests that in-block dependent transactions are processed by `block_connected` when not
@@ -782,6 +783,78 @@ mod tests {
782783
nodes[1].node.get_and_clear_pending_events();
783784
}
784785

786+
#[test]
787+
fn test_async_ooo_offchain_updates() {
788+
// Test that if we have multiple offchain updates being persisted and they complete
789+
// out-of-order, the ChainMonitor waits until all have completed until informing the
790+
// ChannelManager.
791+
let chanmon_cfgs = create_chanmon_cfgs(2);
792+
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
793+
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
794+
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
795+
create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
796+
797+
// Route two payments to be claimed at the same time.
798+
let payment_preimage_1 = route_payment(&nodes[0], &[&nodes[1]], 1_000_000).0;
799+
let payment_preimage_2 = route_payment(&nodes[0], &[&nodes[1]], 1_000_000).0;
800+
801+
chanmon_cfgs[1].persister.offchain_sync_monitor_persistences.lock().unwrap().clear();
802+
chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
803+
804+
nodes[1].node.claim_funds(payment_preimage_1);
805+
check_added_monitors!(nodes[1], 1);
806+
nodes[1].node.claim_funds(payment_preimage_2);
807+
check_added_monitors!(nodes[1], 1);
808+
809+
chanmon_cfgs[1].persister.set_update_ret(Ok(()));
810+
811+
let persistences = chanmon_cfgs[1].persister.offchain_sync_monitor_persistences.lock().unwrap().clone();
812+
assert_eq!(persistences.len(), 1);
813+
let (funding_txo, updates) = persistences.iter().next().unwrap();
814+
assert_eq!(updates.len(), 2);
815+
816+
// Note that updates is a HashMap so the ordering here is actually random. This shouldn't
817+
// fail either way but if it fails intermittently its depending on the ordering of updates.
818+
let mut update_iter = updates.iter();
819+
nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(*funding_txo, update_iter.next().unwrap().clone()).unwrap();
820+
assert!(nodes[1].chain_monitor.release_pending_monitor_events().is_empty());
821+
nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(*funding_txo, update_iter.next().unwrap().clone()).unwrap();
822+
823+
// Now manually walk the commitment signed dance - because we claimed two payments
824+
// back-to-back it doesn't fit into the neat walk commitment_signed_dance does.
825+
826+
let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
827+
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
828+
expect_payment_sent!(nodes[0], payment_preimage_1);
829+
nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed);
830+
check_added_monitors!(nodes[0], 1);
831+
let (as_first_raa, as_first_update) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
832+
833+
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_first_raa);
834+
check_added_monitors!(nodes[1], 1);
835+
let bs_second_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
836+
nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_first_update);
837+
check_added_monitors!(nodes[1], 1);
838+
let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
839+
840+
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_second_updates.update_fulfill_htlcs[0]);
841+
expect_payment_sent!(nodes[0], payment_preimage_2);
842+
nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_updates.commitment_signed);
843+
check_added_monitors!(nodes[0], 1);
844+
nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_first_raa);
845+
check_added_monitors!(nodes[0], 1);
846+
let (as_second_raa, as_second_update) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
847+
848+
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_raa);
849+
check_added_monitors!(nodes[1], 1);
850+
nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_second_update);
851+
check_added_monitors!(nodes[1], 1);
852+
let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
853+
854+
nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_raa);
855+
check_added_monitors!(nodes[0], 1);
856+
}
857+
785858
fn do_chainsync_pauses_events(block_timeout: bool) {
786859
// When a chainsync monitor update occurs, any MonitorUpdates should be held before being
787860
// passed upstream. This tests that behavior, as well as some ways it might go wrong.

lightning/src/util/test_utils.rs

+6
Original file line numberDiff line numberDiff line change
@@ -170,13 +170,17 @@ pub struct TestPersister {
170170
/// When we get an update_persisted_channel call with no ChannelMonitorUpdate, we insert the
171171
/// MonitorUpdateId here.
172172
pub chain_sync_monitor_persistences: Mutex<HashMap<OutPoint, HashSet<MonitorUpdateId>>>,
173+
/// When we get an update_persisted_channel call *with* a ChannelMonitorUpdate, we insert the
174+
/// MonitorUpdateId here.
175+
pub offchain_sync_monitor_persistences: Mutex<HashMap<OutPoint, HashSet<MonitorUpdateId>>>,
173176
}
174177
impl TestPersister {
175178
pub fn new() -> Self {
176179
Self {
177180
update_ret: Mutex::new(Ok(())),
178181
next_update_ret: Mutex::new(None),
179182
chain_sync_monitor_persistences: Mutex::new(HashMap::new()),
183+
offchain_sync_monitor_persistences: Mutex::new(HashMap::new()),
180184
}
181185
}
182186

@@ -204,6 +208,8 @@ impl<Signer: keysinterface::Sign> chainmonitor::Persist<Signer> for TestPersiste
204208
}
205209
if update.is_none() {
206210
self.chain_sync_monitor_persistences.lock().unwrap().entry(funding_txo).or_insert(HashSet::new()).insert(update_id);
211+
} else {
212+
self.offchain_sync_monitor_persistences.lock().unwrap().entry(funding_txo).or_insert(HashSet::new()).insert(update_id);
207213
}
208214
ret
209215
}

0 commit comments

Comments
 (0)