diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 7a32434b86f..09a935c646b 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -49,7 +49,7 @@ use lightning::ln::functional_test_utils::*; use lightning::offers::invoice::{BlindedPayInfo, UnsignedBolt12Invoice}; use lightning::offers::invoice_request::UnsignedInvoiceRequest; use lightning::onion_message::{Destination, MessageRouter, OnionMessagePath}; -use lightning::util::test_channel_signer::{TestChannelSigner, EnforcementState}; +use lightning::util::test_channel_signer::{TestChannelSigner, EnforcementState, ops}; use lightning::util::errors::APIError; use lightning::util::logger::Logger; use lightning::util::config::UserConfig; @@ -72,6 +72,8 @@ use std::sync::atomic; use std::io::Cursor; use bitcoin::bech32::u5; +#[allow(unused)] +const ASYNC_OPS: u32 = ops::GET_PER_COMMITMENT_POINT | ops::RELEASE_COMMITMENT_SECRET | ops::SIGN_COUNTERPARTY_COMMITMENT; const MAX_FEE: u32 = 10_000; struct FuzzEstimator { ret_val: atomic::AtomicU32, @@ -297,7 +299,6 @@ impl SignerProvider for KeyProvider { inner, state, disable_revocation_policy_check: false, - available: Arc::new(Mutex::new(true)), }) } @@ -829,7 +830,7 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { for (idx, dest) in nodes.iter().enumerate() { if dest.get_our_node_id() == node_id { for update_add in update_add_htlcs.iter() { - out.locked_write(format!("Delivering update_add_htlc to node {}.\n", idx).as_bytes()); + out.locked_write(format!("Delivering update_add_htlc to node {} from node {}.\n", idx, $node).as_bytes()); if !$corrupt_forward { dest.handle_update_add_htlc(&nodes[$node].get_our_node_id(), update_add); } else { @@ -844,19 +845,19 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { } } for update_fulfill in update_fulfill_htlcs.iter() { - out.locked_write(format!("Delivering update_fulfill_htlc to node {}.\n", idx).as_bytes()); + out.locked_write(format!("Delivering update_fulfill_htlc to node {} from node {}.\n", idx, $node).as_bytes()); dest.handle_update_fulfill_htlc(&nodes[$node].get_our_node_id(), update_fulfill); } for update_fail in update_fail_htlcs.iter() { - out.locked_write(format!("Delivering update_fail_htlc to node {}.\n", idx).as_bytes()); + out.locked_write(format!("Delivering update_fail_htlc to node {} from node {}.\n", idx, $node).as_bytes()); dest.handle_update_fail_htlc(&nodes[$node].get_our_node_id(), update_fail); } for update_fail_malformed in update_fail_malformed_htlcs.iter() { - out.locked_write(format!("Delivering update_fail_malformed_htlc to node {}.\n", idx).as_bytes()); + out.locked_write(format!("Delivering update_fail_malformed_htlc to node {} from node {}.\n", idx, $node).as_bytes()); dest.handle_update_fail_malformed_htlc(&nodes[$node].get_our_node_id(), update_fail_malformed); } if let Some(msg) = update_fee { - out.locked_write(format!("Delivering update_fee to node {}.\n", idx).as_bytes()); + out.locked_write(format!("Delivering update_fee to node {} from node {}.\n", idx, $node).as_bytes()); dest.handle_update_fee(&nodes[$node].get_our_node_id(), &msg); } let processed_change = !update_add_htlcs.is_empty() || !update_fulfill_htlcs.is_empty() || @@ -873,7 +874,7 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { } }); break; } - out.locked_write(format!("Delivering commitment_signed to node {}.\n", idx).as_bytes()); + out.locked_write(format!("Delivering commitment_signed to node {} from node {}.\n", idx, $node).as_bytes()); dest.handle_commitment_signed(&nodes[$node].get_our_node_id(), &commitment_signed); break; } @@ -882,7 +883,7 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { events::MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { for (idx, dest) in nodes.iter().enumerate() { if dest.get_our_node_id() == *node_id { - out.locked_write(format!("Delivering revoke_and_ack to node {}.\n", idx).as_bytes()); + out.locked_write(format!("Delivering revoke_and_ack to node {} from node {}.\n", idx, $node).as_bytes()); dest.handle_revoke_and_ack(&nodes[$node].get_our_node_id(), msg); } } @@ -890,7 +891,7 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { events::MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } => { for (idx, dest) in nodes.iter().enumerate() { if dest.get_our_node_id() == *node_id { - out.locked_write(format!("Delivering channel_reestablish to node {}.\n", idx).as_bytes()); + out.locked_write(format!("Delivering channel_reestablish to node {} from node {}.\n", idx, $node).as_bytes()); dest.handle_channel_reestablish(&nodes[$node].get_our_node_id(), msg); } } @@ -913,7 +914,7 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { _ => if out.may_fail.load(atomic::Ordering::Acquire) { return; } else { - panic!("Unhandled message event {:?}", event) + panic!("Unhandled message event on node {}, {:?}", $node, event) }, } if $limit_events != ProcessMessages::AllMessages { @@ -1289,15 +1290,124 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { }, 0x89 => { fee_est_c.ret_val.store(253, atomic::Ordering::Release); nodes[2].maybe_update_chan_fees(); }, + #[cfg(async_signing)] + 0xa0 => { + let states = keys_manager_a.enforcement_states.lock().unwrap(); + assert_eq!(states.len(), 1); + states.values().next().unwrap().lock().unwrap().set_signer_unavailable(ASYNC_OPS); + } + #[cfg(async_signing)] + 0xa1 => { + let states = keys_manager_a.enforcement_states.lock().unwrap(); + assert_eq!(states.len(), 1); + states.values().next().unwrap().lock().unwrap().set_signer_available(ops::GET_PER_COMMITMENT_POINT); + nodes[0].signer_unblocked(None); + } + #[cfg(async_signing)] + 0xa2 => { + let states = keys_manager_a.enforcement_states.lock().unwrap(); + assert_eq!(states.len(), 1); + states.values().next().unwrap().lock().unwrap().set_signer_available(ops::RELEASE_COMMITMENT_SECRET); + nodes[0].signer_unblocked(None); + } + #[cfg(async_signing)] + 0xa3 => { + let states = keys_manager_a.enforcement_states.lock().unwrap(); + assert_eq!(states.len(), 1); + states.values().next().unwrap().lock().unwrap().set_signer_available(ops::SIGN_COUNTERPARTY_COMMITMENT); + nodes[0].signer_unblocked(None); + } + + #[cfg(async_signing)] + 0xa4 => { + let states = keys_manager_b.enforcement_states.lock().unwrap(); + assert_eq!(states.len(), 2); + states.values().next().unwrap().lock().unwrap().set_signer_unavailable(ASYNC_OPS); + } + #[cfg(async_signing)] + 0xa5 => { + let states = keys_manager_b.enforcement_states.lock().unwrap(); + assert_eq!(states.len(), 2); + states.values().next().unwrap().lock().unwrap().set_signer_available(ops::GET_PER_COMMITMENT_POINT); + nodes[1].signer_unblocked(None); + } + #[cfg(async_signing)] + 0xa6 => { + let states = keys_manager_b.enforcement_states.lock().unwrap(); + assert_eq!(states.len(), 2); + states.values().next().unwrap().lock().unwrap().set_signer_available(ops::RELEASE_COMMITMENT_SECRET); + nodes[1].signer_unblocked(None); + } + #[cfg(async_signing)] + 0xa7 => { + let states = keys_manager_b.enforcement_states.lock().unwrap(); + assert_eq!(states.len(), 2); + states.values().next().unwrap().lock().unwrap().set_signer_available(ops::SIGN_COUNTERPARTY_COMMITMENT); + nodes[1].signer_unblocked(None); + } + + #[cfg(async_signing)] + 0xa8 => { + let states = keys_manager_b.enforcement_states.lock().unwrap(); + assert_eq!(states.len(), 2); + states.values().last().unwrap().lock().unwrap().set_signer_unavailable(ASYNC_OPS); + } + #[cfg(async_signing)] + 0xa9 => { + let states = keys_manager_b.enforcement_states.lock().unwrap(); + assert_eq!(states.len(), 2); + states.values().last().unwrap().lock().unwrap().set_signer_available(ops::GET_PER_COMMITMENT_POINT); + nodes[1].signer_unblocked(None); + } + #[cfg(async_signing)] + 0xaa => { + let states = keys_manager_b.enforcement_states.lock().unwrap(); + assert_eq!(states.len(), 2); + states.values().last().unwrap().lock().unwrap().set_signer_available(ops::RELEASE_COMMITMENT_SECRET); + nodes[1].signer_unblocked(None); + } + #[cfg(async_signing)] + 0xab => { + let states = keys_manager_b.enforcement_states.lock().unwrap(); + assert_eq!(states.len(), 2); + states.values().last().unwrap().lock().unwrap().set_signer_available(ops::SIGN_COUNTERPARTY_COMMITMENT); + nodes[1].signer_unblocked(None); + } + + #[cfg(async_signing)] + 0xac => { + let states = keys_manager_c.enforcement_states.lock().unwrap(); + assert_eq!(states.len(), 1); + states.values().next().unwrap().lock().unwrap().set_signer_unavailable(ASYNC_OPS); + } + #[cfg(async_signing)] + 0xad => { + let states = keys_manager_c.enforcement_states.lock().unwrap(); + assert_eq!(states.len(), 1); + states.values().next().unwrap().lock().unwrap().set_signer_available(ops::GET_PER_COMMITMENT_POINT); + nodes[2].signer_unblocked(None); + } + #[cfg(async_signing)] + 0xae => { + let states = keys_manager_c.enforcement_states.lock().unwrap(); + assert_eq!(states.len(), 1); + states.values().next().unwrap().lock().unwrap().set_signer_available(ops::RELEASE_COMMITMENT_SECRET); + nodes[2].signer_unblocked(None); + } + #[cfg(async_signing)] + 0xaf => { + let states = keys_manager_c.enforcement_states.lock().unwrap(); + assert_eq!(states.len(), 1); + states.values().next().unwrap().lock().unwrap().set_signer_available(ops::SIGN_COUNTERPARTY_COMMITMENT); + nodes[2].signer_unblocked(None); + } + 0xff => { // Test that no channel is in a stuck state where neither party can send funds even // after we resolve all pending events. // First make sure there are no pending monitor updates, resetting the error state // and calling force_channel_monitor_updated for each monitor. - *monitor_a.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::Completed; - *monitor_b.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::Completed; - *monitor_c.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::Completed; - + out.locked_write(b"Restoring monitors...\n"); if let Some((id, _)) = monitor_a.latest_monitors.lock().unwrap().get(&chan_1_funding) { monitor_a.chain_monitor.force_channel_monitor_updated(chan_1_funding, *id); nodes[0].process_monitor_events(); @@ -1316,7 +1426,10 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { } // Next, make sure peers are all connected to each other + out.locked_write(b"Reconnecting peers...\n"); + if chan_a_disconnected { + out.locked_write(b"Reconnecting node 0 and node 1...\n"); nodes[0].peer_connected(&nodes[1].get_our_node_id(), &Init { features: nodes[1].init_features(), networks: None, remote_network_address: None }, true).unwrap(); @@ -1326,6 +1439,7 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { chan_a_disconnected = false; } if chan_b_disconnected { + out.locked_write(b"Reconnecting node 1 and node 2...\n"); nodes[1].peer_connected(&nodes[2].get_our_node_id(), &Init { features: nodes[2].init_features(), networks: None, remote_network_address: None }, true).unwrap(); @@ -1335,8 +1449,33 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { chan_b_disconnected = false; } + out.locked_write(b"Restoring signers...\n"); + + *monitor_a.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::Completed; + *monitor_b.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::Completed; + *monitor_c.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::Completed; + + #[cfg(async_signing)] + { + for state in keys_manager_a.enforcement_states.lock().unwrap().values() { + state.lock().unwrap().set_signer_available(!0); + } + for state in keys_manager_b.enforcement_states.lock().unwrap().values() { + state.lock().unwrap().set_signer_available(!0); + } + for state in keys_manager_c.enforcement_states.lock().unwrap().values() { + state.lock().unwrap().set_signer_available(!0); + } + nodes[0].signer_unblocked(None); + nodes[1].signer_unblocked(None); + nodes[2].signer_unblocked(None); + } + + out.locked_write(b"Running event queues to quiescence...\n"); + for i in 0..std::usize::MAX { if i == 100 { panic!("It may take may iterations to settle the state, but it should not take forever"); } + // Then, make sure any current forwards make their way to their destination if process_msg_events!(0, false, ProcessMessages::AllMessages) { continue; } if process_msg_events!(1, false, ProcessMessages::AllMessages) { continue; } @@ -1349,13 +1488,34 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { break; } + out.locked_write(b"All channels restored to normal operation.\n"); + // Finally, make sure that at least one end of each channel can make a substantial payment assert!( send_payment(&nodes[0], &nodes[1], chan_a, 10_000_000, &mut payment_id, &mut payment_idx) || send_payment(&nodes[1], &nodes[0], chan_a, 10_000_000, &mut payment_id, &mut payment_idx)); + out.locked_write(b"Successfully sent a payment between node 0 and node 1.\n"); + assert!( send_payment(&nodes[1], &nodes[2], chan_b, 10_000_000, &mut payment_id, &mut payment_idx) || send_payment(&nodes[2], &nodes[1], chan_b, 10_000_000, &mut payment_id, &mut payment_idx)); + out.locked_write(b"Successfully sent a payment between node 1 and node 2.\n"); + + out.locked_write(b"Flushing pending messages.\n"); + for i in 0..std::usize::MAX { + if i == 100 { panic!("It may take may iterations to settle the state, but it should not take forever"); } + + // Then, make sure any current forwards make their way to their destination + if process_msg_events!(0, false, ProcessMessages::AllMessages) { continue; } + if process_msg_events!(1, false, ProcessMessages::AllMessages) { continue; } + if process_msg_events!(2, false, ProcessMessages::AllMessages) { continue; } + // ...making sure any pending PendingHTLCsForwardable events are handled and + // payments claimed. + if process_events!(0, false) { continue; } + if process_events!(1, false) { continue; } + if process_events!(2, false) { continue; } + break; + } last_htlc_clear_fee_a = fee_est_a.ret_val.load(atomic::Ordering::Acquire); last_htlc_clear_fee_b = fee_est_b.ret_val.load(atomic::Ordering::Acquire); diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index bcc324a5582..6fc5d5ca43c 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -2944,9 +2944,7 @@ impl ChannelMonitorImpl { }, commitment_txid: htlc.commitment_txid, per_commitment_number: htlc.per_commitment_number, - per_commitment_point: self.onchain_tx_handler.signer.get_per_commitment_point( - htlc.per_commitment_number, &self.onchain_tx_handler.secp_ctx, - ), + per_commitment_point: htlc.per_commitment_point, feerate_per_kw: 0, htlc: htlc.htlc, preimage: htlc.preimage, diff --git a/lightning/src/chain/onchaintx.rs b/lightning/src/chain/onchaintx.rs index 59c98f05ebc..2933b5f6e27 100644 --- a/lightning/src/chain/onchaintx.rs +++ b/lightning/src/chain/onchaintx.rs @@ -178,6 +178,7 @@ pub(crate) struct ExternalHTLCClaim { pub(crate) htlc: HTLCOutputInCommitment, pub(crate) preimage: Option, pub(crate) counterparty_sig: Signature, + pub(crate) per_commitment_point: bitcoin::secp256k1::PublicKey, } // Represents the different types of claims for which events are yielded externally to satisfy said @@ -1177,9 +1178,11 @@ impl OnchainTxHandler }) .map(|(htlc_idx, htlc)| { let counterparty_htlc_sig = holder_commitment.counterparty_htlc_sigs[htlc_idx]; + ExternalHTLCClaim { commitment_txid: trusted_tx.txid(), per_commitment_number: trusted_tx.commitment_number(), + per_commitment_point: trusted_tx.per_commitment_point(), htlc: htlc.clone(), preimage: *preimage, counterparty_sig: counterparty_htlc_sig, diff --git a/lightning/src/ln/async_signer_tests.rs b/lightning/src/ln/async_signer_tests.rs index 0f51bea0d36..30d8f3c7a64 100644 --- a/lightning/src/ln/async_signer_tests.rs +++ b/lightning/src/ln/async_signer_tests.rs @@ -10,13 +10,47 @@ //! Tests for asynchronous signing. These tests verify that the channel state machine behaves //! properly with a signer implementation that asynchronously derives signatures. +use bitcoin::secp256k1::PublicKey; use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider}; +use crate::ln::ChannelId; use crate::ln::functional_test_utils::*; +use crate::ln::msgs; use crate::ln::msgs::ChannelMessageHandler; -use crate::ln::channelmanager::{PaymentId, RecipientOnionFields}; +use crate::ln::channelmanager::{PaymentId, RAACommitmentOrder, RecipientOnionFields}; +use crate::util::ser::Writeable; +use crate::util::test_channel_signer::ops; +use crate::util::test_utils; -#[test] -fn test_async_commitment_signature_for_funding_created() { +/// Helper to run operations with a simulated asynchronous signer. +/// +/// Disables the signer for the specified channel and then runs `do_fn`, then re-enables the signer +/// and calls `signer_unblocked`. +#[cfg(test)] +pub fn with_async_signer<'a, DoFn, T>(node: &Node, peer_id: &PublicKey, channel_id: &ChannelId, masks: &Vec, do_fn: &'a DoFn) -> T + where DoFn: Fn() -> T +{ + let mask = masks.iter().fold(0, |acc, m| (acc | m)); + eprintln!("disabling {}", ops::string_from(mask)); + node.set_channel_signer_ops_available(peer_id, channel_id, mask, false); + let res = do_fn(); + + // Recompute the channel ID just in case the original ID was temporary. + let new_channel_id = { + let channels = node.node.list_channels(); + assert_eq!(channels.len(), 1, "expected one channel, not {}", channels.len()); + channels[0].channel_id + }; + + for mask in masks { + eprintln!("enabling {} and calling signer_unblocked", ops::string_from(*mask)); + node.set_channel_signer_ops_available(peer_id, &new_channel_id, *mask, true); + node.node.signer_unblocked(Some((*peer_id, new_channel_id))); + } + res +} + +#[cfg(test)] +fn do_test_funding_created(masks: &Vec) { // Simulate acquiring the signature for `funding_created` asynchronously. let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); @@ -37,22 +71,11 @@ fn test_async_commitment_signature_for_funding_created() { // But! Let's make node[0]'s signer be unavailable: we should *not* broadcast a funding_created // message... let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42); - nodes[0].set_channel_signer_available(&nodes[1].node.get_our_node_id(), &temporary_channel_id, false); - nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap(); - check_added_monitors(&nodes[0], 0); - - assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - - // Now re-enable the signer and simulate a retry. The temporary_channel_id won't work anymore so - // we have to dig out the real channel ID. - let chan_id = { - let channels = nodes[0].node.list_channels(); - assert_eq!(channels.len(), 1, "expected one channel, not {}", channels.len()); - channels[0].channel_id - }; - - nodes[0].set_channel_signer_available(&nodes[1].node.get_our_node_id(), &chan_id, true); - nodes[0].node.signer_unblocked(Some((nodes[1].node.get_our_node_id(), chan_id))); + with_async_signer(&nodes[0], &nodes[1].node.get_our_node_id(), &temporary_channel_id, masks, &|| { + nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap(); + check_added_monitors(&nodes[0], 0); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + }); let mut funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg); @@ -67,7 +90,38 @@ fn test_async_commitment_signature_for_funding_created() { } #[test] -fn test_async_commitment_signature_for_funding_signed() { +fn test_funding_created_grs() { + do_test_funding_created(&vec![ops::GET_PER_COMMITMENT_POINT, ops::RELEASE_COMMITMENT_SECRET, ops::SIGN_COUNTERPARTY_COMMITMENT]); +} + +#[test] +fn test_funding_created_gsr() { + do_test_funding_created(&vec![ops::GET_PER_COMMITMENT_POINT, ops::SIGN_COUNTERPARTY_COMMITMENT, ops::RELEASE_COMMITMENT_SECRET]); +} + +#[test] +fn test_funding_created_rsg() { + do_test_funding_created(&vec![ops::RELEASE_COMMITMENT_SECRET, ops::SIGN_COUNTERPARTY_COMMITMENT, ops::GET_PER_COMMITMENT_POINT]); +} + +#[test] +fn test_funding_created_rgs() { + do_test_funding_created(&vec![ops::RELEASE_COMMITMENT_SECRET, ops::GET_PER_COMMITMENT_POINT, ops::SIGN_COUNTERPARTY_COMMITMENT]); +} + +#[test] +fn test_funding_created_srg() { + do_test_funding_created(&vec![ops::SIGN_COUNTERPARTY_COMMITMENT, ops::RELEASE_COMMITMENT_SECRET, ops::GET_PER_COMMITMENT_POINT]); +} + +#[test] +fn test_funding_created_sgr() { + do_test_funding_created(&vec![ops::SIGN_COUNTERPARTY_COMMITMENT, ops::GET_PER_COMMITMENT_POINT, ops::RELEASE_COMMITMENT_SECRET]); +} + + +#[cfg(test)] +fn do_test_funding_signed(masks: &Vec) { // Simulate acquiring the signature for `funding_signed` asynchronously. let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); @@ -92,21 +146,11 @@ fn test_async_commitment_signature_for_funding_signed() { // Now let's make node[1]'s signer be unavailable while handling the `funding_created`. It should // *not* broadcast a `funding_signed`... - nodes[1].set_channel_signer_available(&nodes[0].node.get_our_node_id(), &temporary_channel_id, false); - nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg); - check_added_monitors(&nodes[1], 1); - - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - - // Now re-enable the signer and simulate a retry. The temporary_channel_id won't work anymore so - // we have to dig out the real channel ID. - let chan_id = { - let channels = nodes[0].node.list_channels(); - assert_eq!(channels.len(), 1, "expected one channel, not {}", channels.len()); - channels[0].channel_id - }; - nodes[1].set_channel_signer_available(&nodes[0].node.get_our_node_id(), &chan_id, true); - nodes[1].node.signer_unblocked(Some((nodes[0].node.get_our_node_id(), chan_id))); + with_async_signer(&nodes[1], &nodes[0].node.get_our_node_id(), &temporary_channel_id, masks, &|| { + nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg); + check_added_monitors(&nodes[1], 1); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + }); expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id()); @@ -118,7 +162,38 @@ fn test_async_commitment_signature_for_funding_signed() { } #[test] -fn test_async_commitment_signature_for_commitment_signed() { +fn test_funding_signed_grs() { + do_test_funding_signed(&vec![ops::GET_PER_COMMITMENT_POINT, ops::RELEASE_COMMITMENT_SECRET, ops::SIGN_COUNTERPARTY_COMMITMENT]); +} + +#[test] +fn test_funding_signed_gsr() { + do_test_funding_signed(&vec![ops::GET_PER_COMMITMENT_POINT, ops::SIGN_COUNTERPARTY_COMMITMENT, ops::RELEASE_COMMITMENT_SECRET]); +} + +#[test] +fn test_funding_signed_rsg() { + do_test_funding_signed(&vec![ops::RELEASE_COMMITMENT_SECRET, ops::SIGN_COUNTERPARTY_COMMITMENT, ops::GET_PER_COMMITMENT_POINT]); +} + +#[test] +fn test_funding_signed_rgs() { + do_test_funding_signed(&vec![ops::RELEASE_COMMITMENT_SECRET, ops::GET_PER_COMMITMENT_POINT, ops::SIGN_COUNTERPARTY_COMMITMENT]); +} + +#[test] +fn test_funding_signed_srg() { + do_test_funding_signed(&vec![ops::SIGN_COUNTERPARTY_COMMITMENT, ops::RELEASE_COMMITMENT_SECRET, ops::GET_PER_COMMITMENT_POINT]); +} + +#[test] +fn test_funding_signed_sgr() { + do_test_funding_signed(&vec![ops::SIGN_COUNTERPARTY_COMMITMENT, ops::GET_PER_COMMITMENT_POINT, ops::RELEASE_COMMITMENT_SECRET]); +} + + +#[cfg(test)] +fn do_test_commitment_signed(masks: &Vec) { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); @@ -144,29 +219,50 @@ fn test_async_commitment_signature_for_commitment_signed() { dst.node.handle_update_add_htlc(&src.node.get_our_node_id(), &payment_event.msgs[0]); - // Mark dst's signer as unavailable and handle src's commitment_signed: while dst won't yet have a - // `commitment_signed` of its own to offer, it should publish a `revoke_and_ack`. - dst.set_channel_signer_available(&src.node.get_our_node_id(), &chan_id, false); - dst.node.handle_commitment_signed(&src.node.get_our_node_id(), &payment_event.commitment_msg); - check_added_monitors(dst, 1); + // Mark dst's signer as unavailable and handle src's commitment_signed. If dst's signer is + // offline, it oughtn't yet respond with any updates. + with_async_signer(dst, &src.node.get_our_node_id(), &chan_id, masks, &|| { + dst.node.handle_commitment_signed(&src.node.get_our_node_id(), &payment_event.commitment_msg); + check_added_monitors(dst, 1); + assert!(dst.node.get_and_clear_pending_msg_events().is_empty()); + }); + + get_revoke_commit_msgs(&dst, &src.node.get_our_node_id()); +} + +#[test] +fn test_commitment_signed_grs() { + do_test_commitment_signed(&vec![ops::GET_PER_COMMITMENT_POINT, ops::RELEASE_COMMITMENT_SECRET, ops::SIGN_COUNTERPARTY_COMMITMENT]); +} - get_event_msg!(dst, MessageSendEvent::SendRevokeAndACK, src.node.get_our_node_id()); +#[test] +fn test_commitment_signed_gsr() { + do_test_commitment_signed(&vec![ops::GET_PER_COMMITMENT_POINT, ops::SIGN_COUNTERPARTY_COMMITMENT, ops::RELEASE_COMMITMENT_SECRET]); +} - // Mark dst's signer as available and retry: we now expect to see dst's `commitment_signed`. - dst.set_channel_signer_available(&src.node.get_our_node_id(), &chan_id, true); - dst.node.signer_unblocked(Some((src.node.get_our_node_id(), chan_id))); +#[test] +fn test_commitment_signed_rsg() { + do_test_commitment_signed(&vec![ops::RELEASE_COMMITMENT_SECRET, ops::SIGN_COUNTERPARTY_COMMITMENT, ops::GET_PER_COMMITMENT_POINT]); +} - let events = dst.node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1, "expected one message, got {}", events.len()); - if let MessageSendEvent::UpdateHTLCs { ref node_id, .. } = events[0] { - assert_eq!(node_id, &src.node.get_our_node_id()); - } else { - panic!("expected UpdateHTLCs message, not {:?}", events[0]); - }; +#[test] +fn test_commitment_signed_rgs() { + do_test_commitment_signed(&vec![ops::RELEASE_COMMITMENT_SECRET, ops::GET_PER_COMMITMENT_POINT, ops::SIGN_COUNTERPARTY_COMMITMENT]); +} + +#[test] +fn test_commitment_signed_srg() { + do_test_commitment_signed(&vec![ops::SIGN_COUNTERPARTY_COMMITMENT, ops::RELEASE_COMMITMENT_SECRET, ops::GET_PER_COMMITMENT_POINT]); } #[test] -fn test_async_commitment_signature_for_funding_signed_0conf() { +fn test_commitment_signed_sgr() { + do_test_commitment_signed(&vec![ops::SIGN_COUNTERPARTY_COMMITMENT, ops::GET_PER_COMMITMENT_POINT, ops::RELEASE_COMMITMENT_SECRET]); +} + + +#[cfg(test)] +fn do_test_funding_signed_0conf(masks: &Vec) { // Simulate acquiring the signature for `funding_signed` asynchronously for a zero-conf channel. let mut manually_accept_config = test_default_channel_config(); manually_accept_config.manually_accept_inbound_channels = true; @@ -184,7 +280,6 @@ fn test_async_commitment_signature_for_funding_signed_0conf() { { let events = nodes[1].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1, "Expected one event, got {}", events.len()); match &events[0] { Event::OpenChannelRequest { temporary_channel_id, .. } => { nodes[1].node.accept_inbound_channel_from_trusted_peer_0conf( @@ -193,6 +288,7 @@ fn test_async_commitment_signature_for_funding_signed_0conf() { }, ev => panic!("Expected OpenChannelRequest, not {:?}", ev) } + assert_eq!(events.len(), 1, "Expected one event, got {}", events.len()); } // nodes[0] <-- accept_channel --- nodes[1] @@ -209,27 +305,15 @@ fn test_async_commitment_signature_for_funding_signed_0conf() { // Now let's make node[1]'s signer be unavailable while handling the `funding_created`. It should // *not* broadcast a `funding_signed`... - nodes[1].set_channel_signer_available(&nodes[0].node.get_our_node_id(), &temporary_channel_id, false); - nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg); - check_added_monitors(&nodes[1], 1); - - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - - // Now re-enable the signer and simulate a retry. The temporary_channel_id won't work anymore so - // we have to dig out the real channel ID. - let chan_id = { - let channels = nodes[0].node.list_channels(); - assert_eq!(channels.len(), 1, "expected one channel, not {}", channels.len()); - channels[0].channel_id - }; + with_async_signer(&nodes[1], &nodes[0].node.get_our_node_id(), &temporary_channel_id, masks, &|| { + nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg); + check_added_monitors(&nodes[1], 1); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + }); // At this point, we basically expect the channel to open like a normal zero-conf channel. - nodes[1].set_channel_signer_available(&nodes[0].node.get_our_node_id(), &chan_id, true); - nodes[1].node.signer_unblocked(Some((nodes[0].node.get_our_node_id(), chan_id))); - let (funding_signed, channel_ready_1) = { let events = nodes[1].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 2); let funding_signed = match &events[0] { MessageSendEvent::SendFundingSigned { msg, .. } => msg.clone(), ev => panic!("Expected SendFundingSigned, not {:?}", ev) @@ -265,7 +349,229 @@ fn test_async_commitment_signature_for_funding_signed_0conf() { } #[test] -fn test_async_commitment_signature_for_peer_disconnect() { +fn test_funding_signed_0conf_grs() { + do_test_funding_signed_0conf(&vec![ops::GET_PER_COMMITMENT_POINT, ops::RELEASE_COMMITMENT_SECRET, ops::SIGN_COUNTERPARTY_COMMITMENT]); +} + +#[test] +fn test_funding_signed_0conf_gsr() { + do_test_funding_signed_0conf(&vec![ops::GET_PER_COMMITMENT_POINT, ops::SIGN_COUNTERPARTY_COMMITMENT, ops::RELEASE_COMMITMENT_SECRET]); +} + +#[test] +fn test_funding_signed_0conf_rsg() { + do_test_funding_signed_0conf(&vec![ops::RELEASE_COMMITMENT_SECRET, ops::SIGN_COUNTERPARTY_COMMITMENT, ops::GET_PER_COMMITMENT_POINT]); +} + +#[test] +fn test_funding_signed_0conf_rgs() { + do_test_funding_signed_0conf(&vec![ops::RELEASE_COMMITMENT_SECRET, ops::GET_PER_COMMITMENT_POINT, ops::SIGN_COUNTERPARTY_COMMITMENT]); +} + +#[test] +fn test_funding_signed_0conf_srg() { + do_test_funding_signed_0conf(&vec![ops::SIGN_COUNTERPARTY_COMMITMENT, ops::RELEASE_COMMITMENT_SECRET, ops::GET_PER_COMMITMENT_POINT]); +} + +#[test] +fn test_funding_signed_0conf_sgr() { + do_test_funding_signed_0conf(&vec![ops::SIGN_COUNTERPARTY_COMMITMENT, ops::GET_PER_COMMITMENT_POINT, ops::RELEASE_COMMITMENT_SECRET]); +} + + +#[cfg(test)] +fn do_test_payment(masks: &Vec) { + // This runs through a one-hop payment from start to finish, simulating an asynchronous signer at + // each step. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let (_up1, _up2, channel_id, _tx) = create_announced_chan_between_nodes(&nodes, 0, 1); + + let alice = &nodes[0]; + let bob = &nodes[1]; + + let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(alice, bob, 8_000_000); + + with_async_signer(&alice, &bob.node.get_our_node_id(), &channel_id, masks, &|| { + alice.node.send_payment_with_route(&route, payment_hash, + RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + check_added_monitors!(alice, 1); + let events = alice.node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 0, "expected 0 events, got {}", events.len()); + }); + + let payment_event = { + let mut events = alice.node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + SendEvent::from_event(events.remove(0)) + }; + assert_eq!(payment_event.node_id, bob.node.get_our_node_id()); + assert_eq!(payment_event.msgs.len(), 1); + + // alice --[update_add_htlc]--> bob + // alice --[commitment_signed]--> bob + with_async_signer(&bob, &alice.node.get_our_node_id(), &channel_id, masks, &|| { + bob.node.handle_update_add_htlc(&alice.node.get_our_node_id(), &payment_event.msgs[0]); + bob.node.handle_commitment_signed(&alice.node.get_our_node_id(), &payment_event.commitment_msg); + check_added_monitors(bob, 1); + }); + + // alice <--[revoke_and_ack]-- bob + // alice <--[commitment_signed]-- bob + { + let (raa, cu) = { + let events = bob.node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 2, "expected 2 messages, got {}", events.len()); + match (&events[0], &events[1]) { + (MessageSendEvent::SendRevokeAndACK { msg: raa, .. }, MessageSendEvent::UpdateHTLCs { updates: cu, .. }) => { + assert_eq!(cu.update_add_htlcs.len(), 0, "expected 0 update_add_htlcs, got {}", cu.update_add_htlcs.len()); + (raa.clone(), cu.clone()) + } + (a, b) => panic!("expected SendRevokeAndAck and UpdateHTLCs, not {:?} and {:?}", a, b) + } + }; + + // TODO: run this with_async_signer once validate_counterparty_revocation supports it. + alice.node.handle_revoke_and_ack(&bob.node.get_our_node_id(), &raa); + check_added_monitors(alice, 1); + + with_async_signer(&alice, &bob.node.get_our_node_id(), &channel_id, masks, &|| { + alice.node.handle_commitment_signed(&bob.node.get_our_node_id(), &cu.commitment_signed); + check_added_monitors(alice, 1); + }); + } + + // alice --[revoke_and_ack]--> bob + // TODO: run this with_async_signer once validate_counterparty_revocation supports it. + let raa = get_event_msg!(alice, MessageSendEvent::SendRevokeAndACK, bob.node.get_our_node_id()); + bob.node.handle_revoke_and_ack(&alice.node.get_our_node_id(), &raa); + check_added_monitors(bob, 1); + + expect_pending_htlcs_forwardable!(bob); + + // Bob generates a PaymentClaimable to user code. + { + let events = bob.node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1, "expected 1 event, got {}", events.len()); + match &events[0] { + Event::PaymentClaimable { .. } => { + bob.node.claim_funds(payment_preimage); + } + ev => panic!("Expected PaymentClaimable, got {:?}", ev) + } + check_added_monitors(bob, 1); + } + + // Bob generates a PaymentClaimed event to user code. + { + let events = bob.node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1, "expected 1 event, got {}", events.len()); + match &events[0] { + Event::PaymentClaimed { .. } => (), + ev => panic!("Expected PaymentClaimed, got {:?}", ev), + } + } + + // alice <--[update_fulfill_htlcs]-- bob + // alice <--[commitment_signed]-- bob + { + let cu = { + let events = bob.node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1, "expected 1 events, got {}", events.len()); + match &events[0] { + MessageSendEvent::UpdateHTLCs { updates, .. } => { + assert_eq!(updates.update_fulfill_htlcs.len(), 1, "expected 1 update_fulfill_htlcs, got {}", updates.update_fulfill_htlcs.len()); + updates.clone() + } + ev => panic!("Expected UpdateHTLCs, got {:?}", ev) + } + }; + + with_async_signer(&alice, &bob.node.get_our_node_id(), &channel_id, masks, &|| { + alice.node.handle_update_fulfill_htlc(&bob.node.get_our_node_id(), &cu.update_fulfill_htlcs[0]); + alice.node.handle_commitment_signed(&bob.node.get_our_node_id(), &cu.commitment_signed); + check_added_monitors(alice, 1); + }); + } + + // alice --[revoke_and_ack]--> bob + // alice --[commitment_signed]--> bob + { + let (raa, cu) = { + let events = alice.node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 2, "expected 2 messages, got {}", events.len()); + match (&events[0], &events[1]) { + (MessageSendEvent::SendRevokeAndACK { msg: raa, .. }, MessageSendEvent::UpdateHTLCs { updates: cu, .. }) => { + assert_eq!(cu.update_fulfill_htlcs.len(), 0, "expected 0 update_fulfill_htlcs, got {}", cu.update_fulfill_htlcs.len()); + (raa.clone(), cu.clone()) + } + (a, b) => panic!("expected SendRevokeAndAck and UpdateHTLCs, not {:?} and {:?}", a, b) + } + }; + + // TODO: run with async once validate_counterparty_revocation supports it. + bob.node.handle_revoke_and_ack(&alice.node.get_our_node_id(), &raa); + check_added_monitors(bob, 1); + + with_async_signer(&bob, &alice.node.get_our_node_id(), &channel_id, masks, &|| { + bob.node.handle_commitment_signed(&alice.node.get_our_node_id(), &cu.commitment_signed); + check_added_monitors(bob, 1); + }); + } + + // alice <--[revoke_and_ack]-- bob + // TODO: run with async once validate_counterparty_revocation supports it. + let raa = get_event_msg!(bob, MessageSendEvent::SendRevokeAndACK, alice.node.get_our_node_id()); + alice.node.handle_revoke_and_ack(&bob.node.get_our_node_id(), &raa); + check_added_monitors(alice, 0); + + // Alice generates PaymentSent and PaymentPathSuccessful events to user code. + { + let events = alice.node.get_and_clear_pending_events(); + assert_eq!(events.len(), 2, "expected 2 event, got {}", events.len()); + match (&events[0], &events[1]) { + (Event::PaymentSent { .. }, Event::PaymentPathSuccessful { .. }) => (), + (a, b) => panic!("Expected PaymentSent and PaymentPathSuccessful, got {:?} and {:?}", a, b) + } + + check_added_monitors(alice, 1); // why? would have expected this after handling RAA... + } +} + +#[test] +fn test_payment_grs() { + do_test_payment(&vec![ops::GET_PER_COMMITMENT_POINT, ops::RELEASE_COMMITMENT_SECRET, ops::SIGN_COUNTERPARTY_COMMITMENT]); +} + +#[test] +fn test_payment_gsr() { + do_test_payment(&vec![ops::GET_PER_COMMITMENT_POINT, ops::SIGN_COUNTERPARTY_COMMITMENT, ops::RELEASE_COMMITMENT_SECRET]); +} + +#[test] +fn test_payment_rsg() { + do_test_payment(&vec![ops::RELEASE_COMMITMENT_SECRET, ops::SIGN_COUNTERPARTY_COMMITMENT, ops::GET_PER_COMMITMENT_POINT]); +} + +#[test] +fn test_payment_rgs() { + do_test_payment(&vec![ops::RELEASE_COMMITMENT_SECRET, ops::GET_PER_COMMITMENT_POINT, ops::SIGN_COUNTERPARTY_COMMITMENT]); +} + +#[test] +fn test_payment_srg() { + do_test_payment(&vec![ops::SIGN_COUNTERPARTY_COMMITMENT, ops::RELEASE_COMMITMENT_SECRET, ops::GET_PER_COMMITMENT_POINT]); +} + +#[test] +fn test_payment_sgr() { + do_test_payment(&vec![ops::SIGN_COUNTERPARTY_COMMITMENT, ops::GET_PER_COMMITMENT_POINT, ops::RELEASE_COMMITMENT_SECRET]); +} + +#[cfg(test)] +fn do_test_peer_reconnect(masks: &Vec) { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); @@ -273,51 +579,1027 @@ fn test_async_commitment_signature_for_peer_disconnect() { let (_, _, chan_id, _) = create_announced_chan_between_nodes(&nodes, 0, 1); // Send a payment. - let src = &nodes[0]; - let dst = &nodes[1]; - let (route, our_payment_hash, _our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(src, dst, 8000000); - src.node.send_payment_with_route(&route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); - check_added_monitors!(src, 1); + let alice = &nodes[0]; + let bob = &nodes[1]; + let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(alice, bob, 8_000_000); + + with_async_signer(&alice, &bob.node.get_our_node_id(), &chan_id, masks, &|| { + alice.node.send_payment_with_route(&route, payment_hash, + RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + check_added_monitors!(alice, 1); + let events = alice.node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 0, "expected 0 events, got {}", events.len()); + + alice.node.peer_disconnected(&bob.node.get_our_node_id()); + bob.node.peer_disconnected(&alice.node.get_our_node_id()); + }); + + with_async_signer(&alice, &bob.node.get_our_node_id(), &chan_id, masks, &|| { + let mut reconnect_args = ReconnectArgs::new(alice, bob); + reconnect_args.send_channel_ready = (true, true); // ...since this will be state 1. + reconnect_nodes(reconnect_args); + }); - // Pass the payment along the route. let payment_event = { - let mut events = src.node.get_and_clear_pending_msg_events(); + let mut events = alice.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); SendEvent::from_event(events.remove(0)) }; - assert_eq!(payment_event.node_id, dst.node.get_our_node_id()); + assert_eq!(payment_event.node_id, bob.node.get_our_node_id()); assert_eq!(payment_event.msgs.len(), 1); - dst.node.handle_update_add_htlc(&src.node.get_our_node_id(), &payment_event.msgs[0]); + // alice --[update_add_htlc]--> bob + // alice --[commitment_signed]--> bob + with_async_signer(&bob, &alice.node.get_our_node_id(), &chan_id, masks, &|| { + bob.node.handle_update_add_htlc(&alice.node.get_our_node_id(), &payment_event.msgs[0]); + bob.node.handle_commitment_signed(&alice.node.get_our_node_id(), &payment_event.commitment_msg); + check_added_monitors(bob, 1); + + alice.node.peer_disconnected(&bob.node.get_our_node_id()); + bob.node.peer_disconnected(&alice.node.get_our_node_id()); + }); + + let (alice_reestablish, bob_reestablish) = with_async_signer(&alice, &bob.node.get_our_node_id(), &chan_id, masks, &|| { + alice.node.peer_connected(&bob.node.get_our_node_id(), &msgs::Init { + features: bob.node.init_features(), networks: None, remote_network_address: None + }, true).expect("peer_connected failed for alice"); + let alice_msgs = get_chan_reestablish_msgs!(alice, bob); + assert_eq!(alice_msgs.len(), 1, "expected 1 message, got {}", alice_msgs.len()); + bob.node.peer_connected(&alice.node.get_our_node_id(), &msgs::Init { + features: alice.node.init_features(), networks: None, remote_network_address: None + }, false).expect("peer_connected failed for bob"); + let bob_msgs = get_chan_reestablish_msgs!(bob, alice); + assert_eq!(bob_msgs.len(), 1, "expected 1 message, got {}", bob_msgs.len()); + (alice_msgs[0].clone(), bob_msgs[0].clone()) + }); - // Mark dst's signer as unavailable and handle src's commitment_signed: while dst won't yet have a - // `commitment_signed` of its own to offer, it should publish a `revoke_and_ack`. - dst.set_channel_signer_available(&src.node.get_our_node_id(), &chan_id, false); - dst.node.handle_commitment_signed(&src.node.get_our_node_id(), &payment_event.commitment_msg); - check_added_monitors(dst, 1); + with_async_signer(&bob, &alice.node.get_our_node_id(), &chan_id, masks, &|| { + bob.node.handle_channel_reestablish(&alice.node.get_our_node_id(), &alice_reestablish); + }); - get_event_msg!(dst, MessageSendEvent::SendRevokeAndACK, src.node.get_our_node_id()); + let (raa, cu) = match handle_chan_reestablish_msgs!(bob, alice) { + (None, Some(raa), Some(cu), RAACommitmentOrder::RevokeAndACKFirst) => (raa, cu), + (channel_ready, raa, cu, order) => { + panic!("bob: channel_ready={:?} raa={:?} cu={:?} order={:?}", channel_ready, raa, cu, order); + } + }; + + with_async_signer(&alice, &bob.node.get_our_node_id(), &chan_id, masks, &|| { + alice.node.handle_channel_reestablish(&bob.node.get_our_node_id(), &bob_reestablish); + }); + + match handle_chan_reestablish_msgs!(alice, bob) { + (None, None, None, _) => (), + (channel_ready, raa, cu, order) => { + panic!("alice: channel_ready={:?} raa={:?} cu={:?} order={:?}", channel_ready, raa, cu, order); + } + }; + + with_async_signer(&alice, &bob.node.get_our_node_id(), &chan_id, masks, &|| { + alice.node.handle_revoke_and_ack(&bob.node.get_our_node_id(), &raa); + check_added_monitors(alice, 1); + }); + + // Disconnect? - // Now disconnect and reconnect the peers. - src.node.peer_disconnected(&dst.node.get_our_node_id()); - dst.node.peer_disconnected(&src.node.get_our_node_id()); - let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]); - reconnect_args.send_channel_ready = (false, false); - reconnect_args.pending_raa = (true, false); - reconnect_nodes(reconnect_args); + with_async_signer(&alice, &bob.node.get_our_node_id(), &chan_id, masks, &|| { + alice.node.handle_commitment_signed(&bob.node.get_our_node_id(), &cu.commitment_signed); + check_added_monitors(alice, 1); + }); - // Mark dst's signer as available and retry: we now expect to see dst's `commitment_signed`. - dst.set_channel_signer_available(&src.node.get_our_node_id(), &chan_id, true); - dst.node.signer_unblocked(Some((src.node.get_our_node_id(), chan_id))); + // Disconnect? + + let raa = get_event_msg!(alice, MessageSendEvent::SendRevokeAndACK, bob.node.get_our_node_id()); + with_async_signer(&bob, &alice.node.get_our_node_id(), &chan_id, masks, &|| { + bob.node.handle_revoke_and_ack(&alice.node.get_our_node_id(), &raa); + check_added_monitors(bob, 1); + }); + + expect_pending_htlcs_forwardable!(bob); { - let events = dst.node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1, "expected one message, got {}", events.len()); - if let MessageSendEvent::UpdateHTLCs { ref node_id, .. } = events[0] { - assert_eq!(node_id, &src.node.get_our_node_id()); - } else { - panic!("expected UpdateHTLCs message, not {:?}", events[0]); - }; + let events = bob.node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1, "expected 1 event, got {}", events.len()); + match &events[0] { + Event::PaymentClaimable { .. } => (), + ev => panic!("Expected PaymentClaimable, got {:?}", ev), + } + } + + with_async_signer(&bob, &alice.node.get_our_node_id(), &chan_id, masks, &|| { + bob.node.claim_funds(payment_preimage); + check_added_monitors(bob, 1); + }); + + let _cu = { + let events = bob.node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1, "expected 1 message, got {}", events.len()); + match &events[0] { + MessageSendEvent::UpdateHTLCs { ref updates, .. } => updates.clone(), + ev => panic!("expected UpdateHTLCs, got {:?}", ev), + } + }; + + { + let events = bob.node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1, "expected 1 event, got {}", events.len()); + match &events[0] { + Event::PaymentClaimed { .. } => (), + ev => panic!("Expected PaymentClaimed, got {:?}", ev), + } + } + + // Blah blah blah... send cu to alice, probably sprinkle some reconnects above. +} + +#[test] +fn test_peer_reconnect_grs() { + do_test_peer_reconnect(&vec![ops::GET_PER_COMMITMENT_POINT, ops::RELEASE_COMMITMENT_SECRET, ops::SIGN_COUNTERPARTY_COMMITMENT]); +} + +#[test] +fn test_peer_reconnect_gsr() { + do_test_peer_reconnect(&vec![ops::GET_PER_COMMITMENT_POINT, ops::SIGN_COUNTERPARTY_COMMITMENT, ops::RELEASE_COMMITMENT_SECRET]); +} + +#[test] +fn test_peer_reconnect_rsg() { + do_test_peer_reconnect(&vec![ops::RELEASE_COMMITMENT_SECRET, ops::SIGN_COUNTERPARTY_COMMITMENT, ops::GET_PER_COMMITMENT_POINT]); +} + +#[test] +fn test_peer_reconnect_rgs() { + do_test_peer_reconnect(&vec![ops::RELEASE_COMMITMENT_SECRET, ops::GET_PER_COMMITMENT_POINT, ops::SIGN_COUNTERPARTY_COMMITMENT]); +} + +#[test] +fn test_peer_reconnect_srg() { + do_test_peer_reconnect(&vec![ops::SIGN_COUNTERPARTY_COMMITMENT, ops::RELEASE_COMMITMENT_SECRET, ops::GET_PER_COMMITMENT_POINT]); +} + +#[test] +fn test_peer_reconnect_sgr() { + do_test_payment(&vec![ops::SIGN_COUNTERPARTY_COMMITMENT, ops::GET_PER_COMMITMENT_POINT, ops::RELEASE_COMMITMENT_SECRET]); +} + +#[test] +fn channel_update_fee_test() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let (_, _, chan_id, _) = create_announced_chan_between_nodes(&nodes, 0, 1); + + let alice = &nodes[0]; + let bob = &nodes[1]; + + // Balance + send_payment(alice, &vec!(bob)[..], 8_000_000); + + // Send a payment from Bob to Alice: this requires Alice to acquire a new commitment point from + // the signer. Make the signer be unavailable, and then trigger a situation that requires Alice to + // request fee update. Alice should be able to generate the fee update without crashing: she needs + // to be able to get the statistics about the new transaction, but doesn't actually need the + // signed transaction itself. + let (route, payment_hash, _payment_preimage, payment_secret) = get_route_and_payment_hash!(bob, alice, 2_000_000); + bob.node.send_payment_with_route( + &route, payment_hash, + RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + check_added_monitors!(bob, 1); + + let payment_event = { + let mut events = bob.node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + SendEvent::from_event(events.remove(0)) + }; + + alice.set_channel_signer_ops_available( + &bob.node.get_our_node_id(), &chan_id, ops::GET_PER_COMMITMENT_POINT, false); + + alice.node.handle_update_add_htlc(&bob.node.get_our_node_id(), &payment_event.msgs[0]); + alice.node.handle_commitment_signed(&bob.node.get_our_node_id(), &payment_event.commitment_msg); + check_added_monitors!(alice, 1); + + // Force alice to generate an update_fee + { + let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + *feerate_lock += 20; + } + + alice.node.timer_tick_occurred(); + + assert!(alice.node.get_and_clear_pending_msg_events().is_empty()); + + alice.set_channel_signer_ops_available( + &bob.node.get_our_node_id(), &chan_id, ops::GET_PER_COMMITMENT_POINT, true); + alice.node.signer_unblocked(None); + + let events = alice.node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 2); + match (&events[0], &events[1]) { + (MessageSendEvent::SendRevokeAndACK { .. }, MessageSendEvent::UpdateHTLCs { .. }) => { + // TODO(waterson) we'd kind of expect to see an update_fee here, but we actually don't because + // signer_maybe_unblocked doesn't create that. It probably should. + } + (a, b) => { + panic!("Expected SendRevokeAndACK and UpdateHTLCs, got {:?} and {:?}", a, b); + } + } +} + +#[test] +fn monitor_honors_commitment_raa_order() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let (_, _, chan_id, _) = create_announced_chan_between_nodes(&nodes, 0, 1); + + let alice = &nodes[0]; + let bob = &nodes[1]; + + let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(alice, bob, 8_000_000); + + alice.node.send_payment_with_route(&route, payment_hash, + RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + check_added_monitors!(alice, 1); + + let payment_event = { + let mut events = alice.node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + SendEvent::from_event(events.remove(0)) + }; + + // Make the commitment secret be unavailable. The expectation here is that Bob should send the + // revoke-and-ack first. So even though he can generate a commitment update, he should hold onto + // that until he's ready to revoke. + bob.set_channel_signer_ops_available(&alice.node.get_our_node_id(), &chan_id, ops::RELEASE_COMMITMENT_SECRET, false); + + bob.node.handle_update_add_htlc(&alice.node.get_our_node_id(), &payment_event.msgs[0]); + bob.node.handle_commitment_signed(&alice.node.get_our_node_id(), &payment_event.commitment_msg); + check_added_monitors(bob, 1); + + assert!(bob.node.get_and_clear_pending_msg_events().is_empty()); + + // Now make the commitment secret available and restart the channel. + bob.set_channel_signer_ops_available(&alice.node.get_our_node_id(), &chan_id, ops::RELEASE_COMMITMENT_SECRET, true); + bob.node.signer_unblocked(None); + + get_revoke_commit_msgs(bob, &alice.node.get_our_node_id()); +} + +#[test] +fn reconnect_while_awaiting_commitment_point() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let (_, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 0, 1); + + let alice = &nodes[0]; + let bob = &nodes[1]; + + let (route, payment_hash, _payment_preimage, payment_secret) = get_route_and_payment_hash!(alice, bob, 8_000_000); + alice.node.send_payment_with_route(&route, payment_hash, + RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + check_added_monitors!(alice, 1); + let payment_event = { + let mut events = alice.node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + SendEvent::from_event(events.remove(0)) + }; + assert_eq!(payment_event.node_id, bob.node.get_our_node_id()); + assert_eq!(payment_event.msgs.len(), 1); + + // Don't let Bob fetch the commitment point right now. + bob.set_channel_signer_ops_available( + &alice.node.get_our_node_id(), &channel_id, ops::GET_PER_COMMITMENT_POINT, false); + + // alice --[update_add_htlc]--> bob + // alice --[commitment_signed]--> bob + bob.node.handle_update_add_htlc(&alice.node.get_our_node_id(), &payment_event.msgs[0]); + bob.node.handle_commitment_signed(&alice.node.get_our_node_id(), &payment_event.commitment_msg); + check_added_monitors(bob, 1); + + // Bob should not have responded with any messages since he's blocked on the signer yielding the + // commitment point. + assert!(bob.node.get_and_clear_pending_msg_events().is_empty()); + + // Disconnect Alice and Bob... + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); + + // ...and now reconnect them. Bob still should not have the commitment point, and so we'll unpack + // the sequence by hand here. + alice.node.peer_connected(&bob.node.get_our_node_id(), &msgs::Init { + features: bob.node.init_features(), networks: None, remote_network_address: None + }, true).unwrap(); + let reestablish_1 = get_chan_reestablish_msgs!(alice, bob); + + // Alice should have sent Bob a channel_reestablish. + assert_eq!(reestablish_1.len(), 1); + + bob.node.peer_connected(&alice.node.get_our_node_id(), &msgs::Init { + features: alice.node.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); + + // But Bob should _not_ have sent Alice one, since he's waiting on the signer to provide the + // commitment point. + assert!(get_chan_reestablish_msgs!(bob, alice).is_empty()); + + // Make Bob handle Alice's channel_reestablish. This should cause Bob to generate a + // channel_announcement and a channel_update, but nothing more. + { + bob.node.handle_channel_reestablish(&alice.node.get_our_node_id(), &reestablish_1[0]); + match handle_chan_reestablish_msgs!(bob, alice) { + (None, None, None, _) => (), + (channel_ready, revoke_and_ack, commitment_update, order) => { + panic!("got channel_ready={:?} revoke_and_ack={:?} commitment_update={:?} order={:?}", + channel_ready, revoke_and_ack, commitment_update, order); + } + } + } + + // Provide the commitment points and unblock Bob's signer. This should result in Bob sending a + // channel_reestablish, an RAA and a new commitment_update. + bob.set_channel_signer_ops_available( + &alice.node.get_our_node_id(), &channel_id, ops::GET_PER_COMMITMENT_POINT, true); + bob.node.signer_unblocked(None); + + let events = bob.node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 3, "Expected 3 events, got {:?}", events); + let reestablish_2 = match (&events[0], &events[1], &events[2]) { + (MessageSendEvent::SendChannelReestablish { msg: channel_reestablish, .. }, + MessageSendEvent::SendRevokeAndACK { msg: revoke_and_ack, .. }, + MessageSendEvent::UpdateHTLCs { updates, .. }) => { + (channel_reestablish, revoke_and_ack, updates) + } + (a, b, c) => panic!("Expected SendChannelReestablish, SendRevokeAndACK, UpdateHTLCs; got {:?} {:?} {:?}", a, b, c) + }; + + { + alice.node.handle_channel_reestablish(&bob.node.get_our_node_id(), &reestablish_2.0); + match handle_chan_reestablish_msgs!(alice, bob) { + (None, None, None, _) => (), + (channel_ready, revoke_and_ack, commitment_update, order) => { + panic!("got channel_ready={:?} revoke_and_ack={:?} commitment_update={:?} order={:?}", + channel_ready, revoke_and_ack, commitment_update, order); + } + } + } +} + +#[test] +fn peer_restart_with_blocked_signer() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let persister; + let new_chain_monitor; + + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let alice_deserialized; + + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let (_, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 0, 1); + + // Disconnect Bob and restart Alice + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); + + { + let alice_serialized = nodes[0].node.encode(); + let alice_monitor_serialized = get_monitor!(nodes[0], channel_id).encode(); + reload_node!(nodes[0], *nodes[0].node.get_current_default_configuration(), &alice_serialized, &[&alice_monitor_serialized], persister, new_chain_monitor, alice_deserialized); + } + + assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + // Turn off Alice's signer. + nodes[0].set_channel_signer_ops_available( + &nodes[1].node.get_our_node_id(), &channel_id, + ops::GET_PER_COMMITMENT_POINT | ops::RELEASE_COMMITMENT_SECRET | ops::SIGN_COUNTERPARTY_COMMITMENT, + false); + + let node1_id = nodes[1].node.get_our_node_id(); + nodes[0].forget_signer_material(&node1_id, &channel_id); + + // Reconnect Alice and Bob. + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { + features: nodes[1].node.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); + + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); + + // Bob should have sent a channel_reestablish, but Alice should not have since her signer is + // offline. + assert!(get_chan_reestablish_msgs!(nodes[0], nodes[1]).is_empty()); + let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); + assert_eq!(reestablish_2.len(), 1); + + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]); + match handle_chan_reestablish_msgs!(nodes[0], nodes[1]) { + (None, None, None, _) => (), + (channel_ready, revoke_and_ack, commitment_update, order) => { + panic!("got channel_ready={:?} revoke_and_ack={:?} commitment_update={:?} order={:?}", + channel_ready, revoke_and_ack, commitment_update, order); + } + }; + + // Re-enable and unblock Alice's signer. + nodes[0].set_channel_signer_ops_available( + &nodes[1].node.get_our_node_id(), &channel_id, + ops::GET_PER_COMMITMENT_POINT | ops::RELEASE_COMMITMENT_SECRET | ops::SIGN_COUNTERPARTY_COMMITMENT, + true); + + nodes[0].node.signer_unblocked(None); + + // N.B. that we can't just use `get_chan_reestablish_msgs` here, because we'll be expecting _both_ + // the channel_reestablish and the channel_ready that will have been generated from being sent the + // channel_reestablish from our counterparty. + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 2, "Expected two events, got {:?}", events); + match (&events[0], &events[1]) { + (MessageSendEvent::SendChannelReestablish { .. }, MessageSendEvent::SendChannelReady { .. }) => (), + (a, b) => panic!("Expected channel_reestablish and channel_ready, got {:?} and {:?}", a, b), + }; +} + +#[test] +fn peer_restart_with_blocked_signer_and_pending_payment() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let persister; + let new_chain_monitor; + + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let alice_deserialized; + + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let (_, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 0, 1); + + let (route, payment_hash, _payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1_000_000); + nodes[0].node.send_payment_with_route(&route, payment_hash, + RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + + check_added_monitors!(nodes[0], 1); + assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); + + // Deliver the update_add_htlc and commitment_signed to Bob. + { + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let payment_event = SendEvent::from_event(events.remove(0)); + assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id()); + assert_eq!(payment_event.msgs.len(), 1); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg); + } + + // Disconnect Bob and restart Alice + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); + + { + let alice_serialized = nodes[0].node.encode(); + let alice_monitor_serialized = get_monitor!(nodes[0], channel_id).encode(); + reload_node!(nodes[0], *nodes[0].node.get_current_default_configuration(), &alice_serialized, &[&alice_monitor_serialized], persister, new_chain_monitor, alice_deserialized); + } + + assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); + + // Turn off Bob's signer. + nodes[1].set_channel_signer_ops_available( + &nodes[0].node.get_our_node_id(), &channel_id, + ops::GET_PER_COMMITMENT_POINT | ops::RELEASE_COMMITMENT_SECRET | ops::SIGN_COUNTERPARTY_COMMITMENT, + false); + + // Reconnect Alice and Bob. + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { + features: nodes[1].node.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); + + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); + + // Alice should have sent Bob a channel_reestablish and vice versa. + let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); + assert_eq!(reestablish_1.len(), 1); + let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); + assert_eq!(reestablish_2.len(), 1); + + { + nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]); + match handle_chan_reestablish_msgs!(nodes[1], nodes[0]) { + (None, None, None, _) => (), + (channel_ready, revoke_and_ack, commitment_update, order) => { + panic!("got channel_ready={:?} revoke_and_ack={:?} commitment_update={:?} order={:?}", + channel_ready, revoke_and_ack, commitment_update, order); + } + } + } + + { + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]); + match handle_chan_reestablish_msgs!(nodes[0], nodes[1]) { + (None, None, None, _) => (), + (channel_ready, revoke_and_ack, commitment_update, order) => { + panic!("got channel_ready={:?} revoke_and_ack={:?} commitment_update={:?} order={:?}", + channel_ready, revoke_and_ack, commitment_update, order); + } + } + }; + + // Re-enable and unblock Bob's signer. + nodes[1].set_channel_signer_ops_available( + &nodes[0].node.get_our_node_id(), &channel_id, + ops::GET_PER_COMMITMENT_POINT | ops::RELEASE_COMMITMENT_SECRET | ops::SIGN_COUNTERPARTY_COMMITMENT, + true); + + nodes[1].node.signer_unblocked(None); + + // At this point we should provide Alice with the revoke_and_ack and commitment_signed. + get_revoke_commit_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); + check_added_monitors!(nodes[1], 1); +} + +#[test] +fn peer_restart_with_blocked_signer_before_pending_payment() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let persister; + let new_chain_monitor; + + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let alice_deserialized; + + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let (_, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 0, 1); + + let (route, payment_hash, _payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1_000_000); + nodes[0].node.send_payment_with_route(&route, payment_hash, + RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + + check_added_monitors!(nodes[0], 1); + assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); + + // Turn off Bob's signer. + nodes[1].set_channel_signer_ops_available( + &nodes[0].node.get_our_node_id(), &channel_id, + ops::GET_PER_COMMITMENT_POINT | ops::RELEASE_COMMITMENT_SECRET | ops::SIGN_COUNTERPARTY_COMMITMENT, + false); + + // Deliver the update_add_htlc and commitment_signed to Bob. + { + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let payment_event = SendEvent::from_event(events.remove(0)); + assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id()); + assert_eq!(payment_event.msgs.len(), 1); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg); + } + + // Disconnect Bob and restart Alice + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); + + { + let alice_serialized = nodes[0].node.encode(); + let alice_monitor_serialized = get_monitor!(nodes[0], channel_id).encode(); + reload_node!(nodes[0], *nodes[0].node.get_current_default_configuration(), &alice_serialized, &[&alice_monitor_serialized], persister, new_chain_monitor, alice_deserialized); + } + + assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); + + // Reconnect Alice and Bob. + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { + features: nodes[1].node.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); + + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); + + // Re-enable and unblock Bob's signer. + nodes[1].set_channel_signer_ops_available( + &nodes[0].node.get_our_node_id(), &channel_id, + ops::GET_PER_COMMITMENT_POINT | ops::RELEASE_COMMITMENT_SECRET | ops::SIGN_COUNTERPARTY_COMMITMENT, + true); + + nodes[1].node.signer_unblocked(None); + + // Alice should have sent Bob a channel_reestablish and vice versa. We explicitly do _not_ expect + // to see a RevokeAndACK and CommitmentUpdate yet! + let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); + assert_eq!(reestablish_1.len(), 1); + let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); + assert_eq!(reestablish_2.len(), 1); + + let (raa, cu) = { + nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]); + match handle_chan_reestablish_msgs!(nodes[1], nodes[0]) { + (None, Some(raa), Some(cu), RAACommitmentOrder::RevokeAndACKFirst) => (raa, cu), + (channel_ready, revoke_and_ack, commitment_update, order) => { + panic!("got channel_ready={:?} revoke_and_ack={:?} commitment_update={:?} order={:?}", + channel_ready, revoke_and_ack, commitment_update, order); + } + } + }; + + { + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]); + match handle_chan_reestablish_msgs!(nodes[0], nodes[1]) { + (None, None, None, _) => (), + (channel_ready, revoke_and_ack, commitment_update, order) => { + panic!("got channel_ready={:?} revoke_and_ack={:?} commitment_update={:?} order={:?}", + channel_ready, revoke_and_ack, commitment_update, order); + } + } + }; + + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &cu.commitment_signed); + check_added_monitors!(nodes[0], 2); + + // At this point Alice should provide Bob with the revoke_and_ack. + let raa = { + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1, "Expected 1 event, got {}: {:?}", events.len(), events); + match &events[0] { + MessageSendEvent::SendRevokeAndACK { msg: raa, .. } => raa.clone(), + ev => panic!("Expected SendRevokeAndACK, got {:?}", ev) + } + }; + + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa); + check_added_monitors!(nodes[1], 2); + + expect_pending_htlcs_forwardable!(nodes[1]); + expect_payment_claimable!(nodes[1], payment_hash, payment_secret, 1_000_000); +} + +#[test] +fn no_stray_channel_reestablish() { + // Original fuzz trace. + // a0 Disable A’s signer. + // 2c Disconnect A and B, then restart A. + // 0e Reconnect A and B. + // 2d Disconnect A and B (and C), then restart B. + // a1 Unblock A’s signer get_per_commitment_point + // ff Reset. + + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let alice_persister; + let bob_persister; + let alice_new_chain_monitor; + let bob_new_chain_monitor; + + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let alice_deserialized; + let bob_deserialized; + + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let (_, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 0, 1); + + // Turn off Alice's signer. + eprintln!("disabling alice's signer"); + nodes[0].set_channel_signer_ops_available( + &nodes[1].node.get_our_node_id(), &channel_id, + ops::GET_PER_COMMITMENT_POINT | ops::RELEASE_COMMITMENT_SECRET | ops::SIGN_COUNTERPARTY_COMMITMENT, + false); + + // Disconnect Bob and restart Alice + eprintln!("disconnecting bob"); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); + + eprintln!("restarting alice"); + { + let alice_serialized = nodes[0].node.encode(); + let alice_monitor_serialized = get_monitor!(nodes[0], channel_id).encode(); + reload_node!(nodes[0], *nodes[0].node.get_current_default_configuration(), &alice_serialized, &[&alice_monitor_serialized], alice_persister, alice_new_chain_monitor, alice_deserialized); + } + + // Reconnect Alice and Bob. + eprintln!("reconnecting alice and bob"); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { + features: nodes[1].node.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); + + // Disconnect Alice and restart Bob + eprintln!("disconnecting alice"); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); + + eprintln!("restarting bob"); + { + let bob_serialized = nodes[1].node.encode(); + let bob_monitor_serialized = get_monitor!(nodes[1], channel_id).encode(); + reload_node!(nodes[1], *nodes[1].node.get_current_default_configuration(), &bob_serialized, &[&bob_monitor_serialized], bob_persister, bob_new_chain_monitor, bob_deserialized); + } + + eprintln!("unblocking alice's signer for get_per_commitment_point"); + nodes[0].set_channel_signer_ops_available(&nodes[1].node.get_our_node_id(), &channel_id, ops::GET_PER_COMMITMENT_POINT, true); + nodes[0].node.signer_unblocked(None); + + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert!(events.is_empty(), "Expected no events from Alice, got {:?}", events); +} + +#[test] +fn dont_elide_channely_ready_from_state_1() { + // 1. Disable Alice's signer. + // 2. Send a payment from Alice to Bob. + // 3. Disconnect Alice and Bob. Reload Alice. + // 4. Reconnect Alice and Bob. + // 5. Process messages on Bob, which should generate a `channel_reestablish`. + // 6. Process messages on Alice, which should *not* send anything, in particular an + // `update_add_htlc` because Alice must send a `channel_ready` (that she can't create + // without her signer) first. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let persister; + let new_chain_monitor; + + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let alice_deserialized; + + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let (_, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 0, 1); + + // Turn off Alice's signer. + eprintln!("disabling alice's signer"); + nodes[0].set_channel_signer_ops_available( + &nodes[1].node.get_our_node_id(), &channel_id, + ops::GET_PER_COMMITMENT_POINT | ops::RELEASE_COMMITMENT_SECRET | ops::SIGN_COUNTERPARTY_COMMITMENT, + false); + + eprintln!("sending payment from alice to bob"); + let (route, payment_hash, _payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1_000_000); + nodes[0].node.send_payment_with_route(&route, payment_hash, + RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + + check_added_monitors!(nodes[0], 1); + assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); + + // Disconnect Bob and restart Alice + eprintln!("disconnecting bob"); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); + + eprintln!("restarting alice"); + { + let alice_serialized = nodes[0].node.encode(); + let alice_monitor_serialized = get_monitor!(nodes[0], channel_id).encode(); + reload_node!(nodes[0], *nodes[0].node.get_current_default_configuration(), &alice_serialized, &[&alice_monitor_serialized], persister, new_chain_monitor, alice_deserialized); + } + + assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); + + eprintln!("unblocking alice's signer with commmitment signed"); + nodes[0].set_channel_signer_ops_available( + &nodes[1].node.get_our_node_id(), &channel_id, + ops::SIGN_COUNTERPARTY_COMMITMENT, + true); + nodes[0].node.signer_unblocked(None); + + assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); + + // Reconnect Alice and Bob. + eprintln!("reconnecting alice and bob"); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { + features: nodes[1].node.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); + + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); + + // Bob should have sent Alice a channel_reestablish. Alice should not have done anything. + assert!(get_chan_reestablish_msgs!(nodes[0], nodes[1]).is_empty()); + let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); + assert_eq!(reestablish_2.len(), 1); + + // When Alice handle's the channel_reestablish, she should _still_ do nothing, in particular, + // because she doesn't have the channel ready available. + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]); + match handle_chan_reestablish_msgs!(nodes[0], nodes[1]) { + (None, None, None, _) => (), + (channel_ready, revoke_and_ack, commitment_update, order) => { + panic!("got channel_ready={:?} revoke_and_ack={:?} commitment_update={:?} order={:?}", + channel_ready, revoke_and_ack, commitment_update, order); + } + }; + + // Now provide the commitment point and Alice should send her channel_reestablish. + nodes[0].set_channel_signer_ops_available( + &nodes[1].node.get_our_node_id(), &channel_id, + ops::GET_PER_COMMITMENT_POINT, + true); + nodes[0].node.signer_unblocked(None); + + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 3, "Expected 2 events, got {}: {:?}", events.len(), events); + match (&events[0], &events[1], &events[2]) { + (MessageSendEvent::SendChannelReestablish { .. }, + MessageSendEvent::SendChannelReady { .. }, + MessageSendEvent::UpdateHTLCs { .. }) => (), + (a, b, c) => panic!("Expected SendChannelReestablish SendChannelReady UpdateHTLCs, not {:?} {:?} {:?}", a, b, c) + }; +} + +#[test] +fn dont_lose_commitment_update() { + // 1. Send a payment from Alice to Bob. + // 2. Disable signing on Alice. + // 3. Disconnect Alice and Bob. Reload Alice. + // 4. Reconnect Alice and Bob. + // 5. Process messages on Bob, which should generate a `channel_reestablish`. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let persister; + let new_chain_monitor; + + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let alice_deserialized; + + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let (_, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 0, 1); + + eprintln!("sending payment from alice to bob"); + let (route, payment_hash, _payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1_000_000); + nodes[0].node.send_payment_with_route(&route, payment_hash, + RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + + // Turn off Alice's signer. + eprintln!("disabling alice's signer"); + nodes[0].set_channel_signer_ops_available( + &nodes[1].node.get_our_node_id(), &channel_id, + ops::GET_PER_COMMITMENT_POINT | ops::RELEASE_COMMITMENT_SECRET | ops::SIGN_COUNTERPARTY_COMMITMENT, + false); + + check_added_monitors!(nodes[0], 1); + assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); + + // Disconnect Bob and restart Alice + eprintln!("disconnecting bob"); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); + + eprintln!("restarting alice"); + { + let alice_serialized = nodes[0].node.encode(); + let alice_monitor_serialized = get_monitor!(nodes[0], channel_id).encode(); + reload_node!(nodes[0], *nodes[0].node.get_current_default_configuration(), &alice_serialized, &[&alice_monitor_serialized], persister, new_chain_monitor, alice_deserialized); + } + + // Reconnect Alice and Bob. + eprintln!("reconnecting alice and bob"); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { + features: nodes[1].node.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); + + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); + + // Bob should have sent Alice a channel_reestablish. Alice should not have done anything. + assert!(get_chan_reestablish_msgs!(nodes[0], nodes[1]).is_empty()); + let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); + assert_eq!(reestablish_2.len(), 1); + + // When Alice handle's the channel_reestablish, she should _still_ do nothing, in particular, + // because she doesn't have the channel ready available. + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]); + match handle_chan_reestablish_msgs!(nodes[0], nodes[1]) { + (None, None, None, _) => (), + (channel_ready, revoke_and_ack, commitment_update, order) => { + panic!("got channel_ready={:?} revoke_and_ack={:?} commitment_update={:?} order={:?}", + channel_ready, revoke_and_ack, commitment_update, order); + } + }; + + { + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert!(events.is_empty(), "expected no events, got {:?}", events); + } + + { + let events = nodes[1].node.get_and_clear_pending_msg_events(); + assert!(events.is_empty(), "expected no events, got {:?}", events); + } + + eprintln!("unblocking alice's signer"); + nodes[0].set_channel_signer_ops_available( + &nodes[1].node.get_our_node_id(), &channel_id, + ops::GET_PER_COMMITMENT_POINT | ops::RELEASE_COMMITMENT_SECRET | ops::SIGN_COUNTERPARTY_COMMITMENT, + true); + nodes[0].node.signer_unblocked(None); + + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 3, "Expected 3 events, got {}: {:?}", events.len(), events); + match (&events[0], &events[1], &events[2]) { + (MessageSendEvent::SendChannelReestablish { .. }, + MessageSendEvent::SendChannelReady { .. }, + MessageSendEvent::UpdateHTLCs { .. }) => (), + (a, b, c) => panic!("Expected SendChannelReestablish SendChannelReady UpdateHTLCs; not {:?} {:?} {:?}", a, b, c) + } +} + +#[test] +fn dont_lose_commitment_update_redux() { + // - ~a0~ Disable A's signer. + // - ~60~ Send a payment from A to B for 1,000 msats. + // - ~2c~ Disconnect A and B, then restart A. + // - ~0e~ Reconnect A and B. + // - ~a3~ Unblock A's signer ~sign_counterparty_commitment~. + // - ~19~ Process all messages on B. + // - ~ff~ Reset. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let persister; + let new_chain_monitor; + + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let alice_deserialized; + + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let (_, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 0, 1); + + // Turn off Alice's signer. + eprintln!("disabling alice's signer"); + nodes[0].set_channel_signer_ops_available( + &nodes[1].node.get_our_node_id(), &channel_id, + ops::GET_PER_COMMITMENT_POINT | ops::RELEASE_COMMITMENT_SECRET | ops::SIGN_COUNTERPARTY_COMMITMENT, + false); + + eprintln!("sending payment from alice to bob"); + let (route, payment_hash, _payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1_000_000); + nodes[0].node.send_payment_with_route(&route, payment_hash, + RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + + check_added_monitors!(nodes[0], 1); + assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); + + // Disconnect Bob and restart Alice + eprintln!("disconnecting bob"); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); + + eprintln!("restarting alice"); + { + let alice_serialized = nodes[0].node.encode(); + let alice_monitor_serialized = get_monitor!(nodes[0], channel_id).encode(); + reload_node!(nodes[0], *nodes[0].node.get_current_default_configuration(), &alice_serialized, &[&alice_monitor_serialized], persister, new_chain_monitor, alice_deserialized); + } + + // Reconnect Alice and Bob. + eprintln!("reconnecting alice and bob"); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { + features: nodes[1].node.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); + + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); + + // Bob should have sent Alice a channel_reestablish. Alice should not have done anything. + assert!(get_chan_reestablish_msgs!(nodes[0], nodes[1]).is_empty()); + let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); + assert_eq!(reestablish_2.len(), 1); + + // Unblock alice for sign_counterparty_commitment. + eprintln!("unblocking alice's signer for sign_counterparty_commitment"); + nodes[0].set_channel_signer_ops_available(&nodes[1].node.get_our_node_id(), &channel_id, ops::SIGN_COUNTERPARTY_COMMITMENT, true); + nodes[0].node.signer_unblocked(None); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]); + match handle_chan_reestablish_msgs!(nodes[0], nodes[1]) { + (None, None, None, _) => (), + (channel_ready, revoke_and_ack, commitment_update, order) => { + panic!("got channel_ready={:?} revoke_and_ack={:?} commitment_update={:?} order={:?}", + channel_ready, revoke_and_ack, commitment_update, order); + } + }; + + // Unblock alice for get_per_commitment_point and release_commitment_secret + eprintln!("unblocking alice's signer for get_per_commitment_point and release_commitment_secret"); + nodes[0].set_channel_signer_ops_available(&nodes[1].node.get_our_node_id(), &channel_id, ops::GET_PER_COMMITMENT_POINT | ops::RELEASE_COMMITMENT_SECRET, true); + nodes[0].node.signer_unblocked(None); + + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 3, "Expected 3 events, got {}: {:?}", events.len(), events); + match (&events[0], &events[1], &events[2]) { + (MessageSendEvent::SendChannelReestablish { .. }, + MessageSendEvent::SendChannelReady { .. }, + MessageSendEvent::UpdateHTLCs { .. }) => (), + (a, b, c) => panic!("Expected SendChannelReestablish SendChannelReady UpdateHTLCs; not {:?} {:?} {:?}", a, b, c) } } diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 588995656ec..e4b2bec08b4 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -727,7 +727,7 @@ struct HTLCStats { /// An enum gathering stats on commitment transaction, either local or remote. struct CommitmentStats<'a> { - tx: CommitmentTransaction, // the transaction info + tx: Option, // the transaction info feerate_per_kw: u32, // the feerate included to build the transaction total_fee_sat: u64, // the total fee included in the transaction num_nondust_htlcs: usize, // the number of HTLC outputs (dust HTLCs *non*-included) @@ -793,12 +793,57 @@ pub(super) struct MonitorRestoreUpdates { pub announcement_sigs: Option, } -/// The return value of `signer_maybe_unblocked` +/// The return value of `signer_maybe_unblocked`. +/// +/// When the signer becomes unblocked, any non-`None` event accumulated here should be sent to the +/// peer by the caller. #[allow(unused)] pub(super) struct SignerResumeUpdates { + /// A `commitment_signed` message, possibly with additional HTLC-related messages (e.g., + /// `update_add_htlc`) that should be placed in the commitment. + /// + /// When both this and `raa` contain values, they should be sent to the peer using an ordering + /// consistent with `order`. pub commitment_update: Option, + /// A `revoke_and_ack` message that should be sent to the peer. + /// + /// When both this and `raa` contain values, they should be sent to the peer using an ordering + /// consistent with `order`. + pub raa: Option, + /// The order in which the `commitment_signed` and `revoke_and_ack` messages should be provided to + /// the peer. Only meaningful if both of these messages are present. + pub order: RAACommitmentOrder, + /// A `funding_signed` message that should be sent to the peer. pub funding_signed: Option, + /// A `channel_ready` message that should be sent to the peer. If present, it should be sent last. pub channel_ready: Option, + /// A `channel_reestablish` message that should be sent to the peer. If present, it should be sent + /// first. + pub channel_reestablish: Option, +} + +impl Default for SignerResumeUpdates { + fn default() -> Self { + Self { + commitment_update: None, + raa: None, + order: RAACommitmentOrder::CommitmentFirst, + funding_signed: None, + channel_ready: None, + channel_reestablish: None, + } + } +} + +#[allow(unused)] +pub(super) struct UnfundedInboundV1SignerResumeUpdates { + pub accept_channel: Option, +} + +#[allow(unused)] +pub(super) struct UnfundedOutboundV1SignerResumeUpdates { + pub open_channel: Option, + pub funding_created: Option, } /// The return value of `channel_reestablish` @@ -992,6 +1037,14 @@ pub(super) struct ChannelContext where SP::Target: SignerProvider { // cost of others, but should really just be changed. cur_holder_commitment_transaction_number: u64, + + // The commitment point corresponding to `cur_holder_commitment_transaction_number`, which is the + // *next* state. On initial channel construction, this value may be None, in which case that means + // that the first commitment point wasn't ready at the time that the channel needed to be created. + cur_holder_commitment_point: Option, + // The commitment secret corresponding to `cur_holder_commitment_transaction_number + 2`, which is + // the *previous* state. + prev_holder_commitment_secret: Option<[u8; 32]>, cur_counterparty_commitment_transaction_number: u64, value_to_self_msat: u64, // Excluding all pending_htlcs, fees, and anchor outputs pending_inbound_htlcs: Vec, @@ -1026,10 +1079,24 @@ pub(super) struct ChannelContext where SP::Target: SignerProvider { /// This flag is set in such a case. Note that we don't need to persist this as we'll end up /// setting it again as a side-effect of [`Channel::channel_reestablish`]. signer_pending_commitment_update: bool, + /// Similar to [`Self::signer_pending_commitment_update`]: indicates that we've deferred sending a + /// `revoke_and_ack`, and should do so once the signer has become unblocked. + signer_pending_revoke_and_ack: bool, /// Similar to [`Self::signer_pending_commitment_update`] but we're waiting to send either a /// [`msgs::FundingCreated`] or [`msgs::FundingSigned`] depending on if this channel is /// outbound or inbound. signer_pending_funding: bool, + /// Similar to [`Self::signer_pending_commitment_update`] but we're waiting to send a + /// [`msgs::ChannelReady`]. + signer_pending_channel_ready: bool, + /// If we attempted to retrieve the per-commitment point for the next transaction but the signer + /// wasn't ready, then this will be set to `true`. + signer_pending_commitment_point: bool, + /// If we attempted to release the per-commitment secret for the previous transaction but the + /// signer wasn't ready, then this will be set to `true`. + signer_pending_released_secret: bool, + /// If we need to send a `channel_reestablish` then this will be set to `true`. + signer_pending_channel_reestablish: bool, // pending_update_fee is filled when sending and receiving update_fee. // @@ -1302,14 +1369,19 @@ impl ChannelContext where SP::Target: SignerProvider { self.pending_inbound_htlcs.is_empty() && self.pending_outbound_htlcs.is_empty() && self.pending_update_fee.is_none() && - is_ready_to_close + is_ready_to_close && + !self.signer_pending_channel_reestablish } /// Returns true if this channel is currently available for use. This is a superset of /// is_usable() and considers things like the channel being temporarily disabled. /// Allowed in any state (including after shutdown) pub fn is_live(&self) -> bool { - self.is_usable() && !self.channel_state.is_peer_disconnected() + self.is_usable() && self.is_peer_connected() + } + + pub fn is_peer_connected(&self) -> bool { + !self.channel_state.is_peer_disconnected() && !self.signer_pending_channel_reestablish } // Public utilities: @@ -1522,6 +1594,70 @@ impl ChannelContext where SP::Target: SignerProvider { self.channel_ready_event_emitted = true; } + /// Retrieves the next commitment point and previous commitment secret from the signer. + pub fn update_holder_per_commitment_point(&mut self, logger: &L) where L::Target: Logger + { + let transaction_number = self.cur_holder_commitment_transaction_number; + let signer = self.holder_signer.as_ref(); + + log_trace!(logger, "Retrieving commitment point for {} transaction number {}", self.channel_id(), transaction_number); + self.cur_holder_commitment_point = match signer.get_per_commitment_point(transaction_number, &self.secp_ctx) { + Ok(point) => { + if self.signer_pending_commitment_point { + log_trace!(logger, "Commitment point for {} transaction number {} retrieved; clearing signer_pending_commitment_point", + self.channel_id(), transaction_number); + self.signer_pending_commitment_point = false; + } + Some(point) + } + + Err(_) => { + if !self.signer_pending_commitment_point { + log_trace!(logger, "Commitment point for {} transaction number {} is not available; setting signer_pending_commitment_point", + self.channel_id(), transaction_number); + self.signer_pending_commitment_point = true; + } + None + } + }; + } + + pub fn update_holder_commitment_secret(&mut self, logger: &L) where L::Target: Logger + { + let transaction_number = self.cur_holder_commitment_transaction_number; + let signer = self.holder_signer.as_ref(); + + let releasing_transaction_number = transaction_number + 2; + if releasing_transaction_number <= INITIAL_COMMITMENT_NUMBER { + log_trace!(logger, "Retrieving commitment secret for {} transaction number {}", self.channel_id(), releasing_transaction_number); + self.prev_holder_commitment_secret = match signer.release_commitment_secret(releasing_transaction_number) { + Ok(secret) => { + if self.signer_pending_released_secret { + log_trace!(logger, "Commitment secret for {} transaction number {} retrieved; clearing signer_pending_released_secret", + self.channel_id(), releasing_transaction_number); + self.signer_pending_released_secret = false; + } + Some(secret) + } + + Err(_) => { + if !self.signer_pending_released_secret { + log_trace!(logger, "Commitment secret for {} transaction number {} is not available; setting signer_pending_released_secret", + self.channel_id(), releasing_transaction_number); + self.signer_pending_released_secret = true; + } + None + } + } + }; + } + + #[cfg(test)] + pub fn forget_signer_material(&mut self) { + self.cur_holder_commitment_point = None; + self.prev_holder_commitment_secret = None; + } + /// Tracks the number of ticks elapsed since the previous [`ChannelConfig`] was updated. Once /// [`EXPIRE_PREV_CONFIG_TICKS`] is reached, the previous config is considered expired and will /// no longer be considered when forwarding HTLCs. @@ -1579,7 +1715,7 @@ impl ChannelContext where SP::Target: SignerProvider { /// generated by the peer which proposed adding the HTLCs, and thus we need to understand both /// which peer generated this transaction and "to whom" this transaction flows. #[inline] - fn build_commitment_transaction(&self, commitment_number: u64, keys: &TxCreationKeys, local: bool, generated_by_local: bool, logger: &L) -> CommitmentStats + fn build_commitment_transaction(&self, commitment_number: u64, local: bool, generated_by_local: bool, logger: &L) -> CommitmentStats where L::Target: Logger { let mut included_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::new(); @@ -1785,19 +1921,23 @@ impl ChannelContext where SP::Target: SignerProvider { let channel_parameters = if local { self.channel_transaction_parameters.as_holder_broadcastable() } else { self.channel_transaction_parameters.as_counterparty_broadcastable() }; - let tx = CommitmentTransaction::new_with_auxiliary_htlc_data(commitment_number, - value_to_a as u64, - value_to_b as u64, - funding_pubkey_a, - funding_pubkey_b, - keys.clone(), - feerate_per_kw, - &mut included_non_dust_htlcs, - &channel_parameters + let keys_opt = if local { self.build_next_holder_transaction_keys() } else { Some(self.build_remote_transaction_keys()) }; + let tx = keys_opt.map(|keys| + CommitmentTransaction::new_with_auxiliary_htlc_data(commitment_number, + value_to_a as u64, + value_to_b as u64, + funding_pubkey_a, + funding_pubkey_b, + keys.clone(), + feerate_per_kw, + &mut included_non_dust_htlcs, + &channel_parameters) ); let mut htlcs_included = included_non_dust_htlcs; - // The unwrap is safe, because all non-dust HTLCs have been assigned an output index - htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap()); + + // If the transaction was built, then the unwrap is safe, because all non-dust HTLCs have been + // assigned an output index. Otherwise we'll just leave them unsorted. + htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap_or(999)); htlcs_included.append(&mut included_dust_htlcs); // For the stats, trimmed-to-0 the value in msats accordingly @@ -1819,17 +1959,20 @@ impl ChannelContext where SP::Target: SignerProvider { #[inline] /// Creates a set of keys for build_commitment_transaction to generate a transaction which our - /// counterparty will sign (ie DO NOT send signatures over a transaction created by this to - /// our counterparty!) + /// counterparty will sign (ie DO NOT send signatures over a transaction created by this to our + /// counterparty!) The keys are specifically generated for the _next_ state to which the channel + /// is about to advance. /// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction) /// TODO Some magic rust shit to compile-time check this? - fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys { - let per_commitment_point = self.holder_signer.as_ref().get_per_commitment_point(commitment_number, &self.secp_ctx); + fn build_next_holder_transaction_keys(&self) -> Option { let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint; let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint; let counterparty_pubkeys = self.get_counterparty_pubkeys(); - - TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint) + self.cur_holder_commitment_point.map(|cur_holder_commitment_point| + TxCreationKeys::derive_new( + &self.secp_ctx, &cur_holder_commitment_point, delayed_payment_base, htlc_basepoint, + &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint) + ) } #[inline] @@ -2414,8 +2557,7 @@ impl ChannelContext where SP::Target: SignerProvider { /// Only allowed after [`Self::channel_transaction_parameters`] is set. fn get_funding_signed_msg(&mut self, logger: &L) -> (CommitmentTransaction, Option) where L::Target: Logger { - let counterparty_keys = self.build_remote_transaction_keys(); - let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number + 1, &counterparty_keys, false, false, logger).tx; + let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number + 1, false, false, logger).tx.expect("Holder per-commitment point is not ready"); let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust(); let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction(); @@ -2704,6 +2846,7 @@ impl Channel where // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc` // (see equivalent if condition there). assert!(self.context.channel_state.should_force_holding_cell()); + assert!(!self.context.signer_pending_channel_reestablish); let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger); self.context.latest_monitor_update_id = mon_update_id; @@ -2773,7 +2916,7 @@ impl Channel where }], }; - if self.context.channel_state.should_force_holding_cell() { + if self.context.channel_state.should_force_holding_cell() || !self.context.is_peer_connected() { // Note that this condition is the same as the assertion in // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly - // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we @@ -2947,7 +3090,7 @@ impl Channel where return Ok(None); } - if self.context.channel_state.should_force_holding_cell() { + if self.context.channel_state.should_force_holding_cell() || !self.context.is_peer_connected() { debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!"); force_holding_cell = true; } @@ -3348,11 +3491,13 @@ impl Channel where let funding_script = self.context.get_funding_redeemscript(); - let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number); - - let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger); + // TODO(waterson): if we get an errant commitment_signed while we're still pending a commitment + // point from the signer, this will panic. + let keys = self.context.build_next_holder_transaction_keys().expect("Holder per-commitment point is not ready"); + let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, true, false, logger); + let commitment_tx = commitment_stats.tx.expect("Holder per-commitment point is not ready"); let commitment_txid = { - let trusted_tx = commitment_stats.tx.trust(); + let trusted_tx = commitment_tx.trust(); let bitcoin_tx = trusted_tx.built_transaction(); let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis); @@ -3448,7 +3593,7 @@ impl Channel where } let holder_commitment_tx = HolderCommitmentTransaction::new( - commitment_stats.tx, + commitment_tx, msg.signature, msg.htlc_signatures.clone(), &self.context.get_holder_pubkeys().funding_pubkey, @@ -3514,8 +3659,11 @@ impl Channel where self.context.cur_holder_commitment_transaction_number -= 1; self.context.expecting_peer_commitment_signed = false; + self.context.update_holder_per_commitment_point(logger); + // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call // build_commitment_no_status_check() next which will reset this to RAAFirst. + log_debug!(logger, "setting resend_order to CommitmentFirst"); self.context.resend_order = RAACommitmentOrder::CommitmentFirst; if self.context.channel_state.is_monitor_update_in_progress() { @@ -3564,7 +3712,7 @@ impl Channel where ) -> (Option, Vec<(HTLCSource, PaymentHash)>) where F::Target: FeeEstimator, L::Target: Logger { - if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && !self.context.channel_state.should_force_holding_cell() { + if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && !self.context.channel_state.should_force_holding_cell() && self.context.is_peer_connected() { self.free_holding_cell_htlcs(fee_estimator, logger) } else { (None, Vec::new()) } } @@ -4016,8 +4164,7 @@ impl Channel where // Before proposing a feerate update, check that we can actually afford the new fee. let inbound_stats = self.context.get_inbound_pending_htlc_stats(Some(feerate_per_kw)); let outbound_stats = self.context.get_outbound_pending_htlc_stats(Some(feerate_per_kw)); - let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number); - let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger); + let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, true, true, logger); let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000; let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat; if holder_balance_msat < buffer_fee_msat + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 { @@ -4069,6 +4216,26 @@ impl Channel where return Err(()) } + // If we have requests pending to the signer, then clear out any of them that will be recomputed + // on `channel_reestablish`. This ensures we don't send them twice if the signer unblocks before + // we receive the counterparty's reestablish message. + if self.context.signer_pending_channel_reestablish { + log_trace!(logger, "Clearing signer_pending_channel_reestablish"); + self.context.signer_pending_channel_reestablish = false; + } + if self.context.signer_pending_channel_ready { + log_trace!(logger, "Clearing signer_pending_channel_ready"); + self.context.signer_pending_channel_ready = false; + } + if self.context.signer_pending_revoke_and_ack { + log_trace!(logger, "Clearing signer_pending_revoke_and_ack"); + self.context.signer_pending_revoke_and_ack = false; + } + if self.context.signer_pending_commitment_update { + log_trace!(logger, "Clearing signer_pending_commitment_update"); + self.context.signer_pending_commitment_update = false; + } + if self.context.channel_state.is_peer_disconnected() { // While the below code should be idempotent, it's simpler to just return early, as // redundant disconnect events can fire, though they should be rare. @@ -4201,11 +4368,10 @@ impl Channel where assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0), "Funding transaction broadcast by the local client before it should have - LDK didn't do it!"); self.context.monitor_pending_channel_ready = false; - let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); - Some(msgs::ChannelReady { - channel_id: self.context.channel_id(), - next_per_commitment_point, - short_channel_id_alias: Some(self.context.outbound_scid_alias), + self.get_channel_ready().or_else(|| { + log_trace!(logger, "Monitor was pending channel_ready with no commitment point available; setting signer_pending_channel_ready"); + self.context.signer_pending_channel_ready = true; + None }) } else { None }; @@ -4218,7 +4384,7 @@ impl Channel where let mut finalized_claimed_htlcs = Vec::new(); mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills); - if self.context.channel_state.is_peer_disconnected() { + if !self.context.is_peer_connected() { self.context.monitor_pending_revoke_and_ack = false; self.context.monitor_pending_commitment_signed = false; return MonitorRestoreUpdates { @@ -4228,7 +4394,12 @@ impl Channel where } let raa = if self.context.monitor_pending_revoke_and_ack { - Some(self.get_last_revoke_and_ack()) + self.context.update_holder_commitment_secret(logger); + self.get_last_revoke_and_ack(logger).or_else(|| { + log_trace!(logger, "Monitor was pending RAA, but RAA is not available; setting signer_pending_revoke_and_ack"); + self.context.signer_pending_revoke_and_ack = true; + None + }) } else { None }; let commitment_update = if self.context.monitor_pending_commitment_signed { self.get_last_commitment_update_for_send(logger).ok() @@ -4237,13 +4408,61 @@ impl Channel where self.mark_awaiting_response(); } + if self.context.monitor_pending_commitment_signed && commitment_update.is_none() { + log_trace!(logger, "Monitor was pending_commitment_signed with no commitment update available; setting signer_pending_commitment_update"); + self.context.signer_pending_commitment_update = true; + } else { + // If the signer was pending a commitment update, but we happened to get one just now because + // the monitor retrieved it, then we can mark the signer as "not pending anymore". + if self.context.signer_pending_commitment_update && commitment_update.is_some() { + log_trace!(logger, "Signer was pending commitment update, monitor retrieved it: clearing signer_pending_commitment_update"); + self.context.signer_pending_commitment_update = false; + } + } + if self.context.monitor_pending_revoke_and_ack && raa.is_none() { + log_trace!(logger, "Monitor was pending_revoke_and_ack with no RAA available; setting signer_pending_revoke_and_ack"); + self.context.signer_pending_revoke_and_ack = true; + } else { + // If the signer was pending a RAA, but we happened to get one just now because the monitor + // retrieved it, then we can mark the signer as "not pending anymore". + if self.context.signer_pending_revoke_and_ack && raa.is_some() { + log_trace!(logger, "Signer was pending RAA, monitor retrived it: clearing signer_pending_revoke_and_ack"); + self.context.signer_pending_revoke_and_ack = false; + } + } + self.context.monitor_pending_revoke_and_ack = false; self.context.monitor_pending_commitment_signed = false; + + // Enforce the ordering between commitment update and revoke-and-ack: with an asynchronous + // signer, they may have become available in an order that violates the ordering that our + // counterparty expects. If that happens, suppress the out-of-order message and mark it as + // pending. When the signer becomes unblocked we can provide the messages in the proper order. let order = self.context.resend_order.clone(); - log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first", + let (commitment_update, raa) = match order { + RAACommitmentOrder::CommitmentFirst => { + if self.context.signer_pending_commitment_update && raa.is_some() { + log_trace!(logger, "RAA is available, but we can't deliver it because ordering requires CommitmentFirst; setting signer_pending_revoke_and_ack = true"); + self.context.signer_pending_revoke_and_ack = true; + } + (commitment_update, if !self.context.signer_pending_commitment_update { raa } else { None }) + } + + RAACommitmentOrder::RevokeAndACKFirst => { + if self.context.signer_pending_revoke_and_ack && commitment_update.is_some() { + log_trace!(logger, "commitment_update is available, but we can't deliver it because ordering requires RevokeAndACKFirst; setting signer_pending_commitment_update = true"); + self.context.signer_pending_commitment_update = true; + } + (if !self.context.signer_pending_revoke_and_ack { commitment_update } else { None }, raa) + } + }; + + log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA{}", &self.context.channel_id(), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" }, if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" }, - match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"}); + if commitment_update.is_some() && raa.is_some() { + match order { RAACommitmentOrder::CommitmentFirst => ", with commitment first", RAACommitmentOrder::RevokeAndACKFirst => ", with RAA first"} + } else { "" }); MonitorRestoreUpdates { raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs } @@ -4285,40 +4504,173 @@ impl Channel where /// blocked. #[cfg(async_signing)] pub fn signer_maybe_unblocked(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger { - let commitment_update = if self.context.signer_pending_commitment_update { - self.get_last_commitment_update_for_send(logger).ok() + log_trace!(logger, "Signing unblocked in channel {} at sequence {}", + &self.context.channel_id(), self.context.cur_holder_commitment_transaction_number); + + if self.context.signer_pending_commitment_point { + log_trace!(logger, "Attempting to update holder per-commitment point..."); + self.context.update_holder_per_commitment_point(logger); + } + + if self.context.signer_pending_released_secret { + log_trace!(logger, "Attempting to update holder commitment secret..."); + self.context.update_holder_commitment_secret(logger); + } + + let channel_reestablish = if self.context.signer_pending_channel_reestablish { + log_trace!(logger, "Attempting to generate channel_reestablish..."); + self.get_channel_reestablish(logger).map(|msg| { + log_trace!(logger, "Generated channel_reestablish; clearing signer_pending_channel_reestablish"); + self.context.signer_pending_channel_reestablish = false; + msg + }) } else { None }; + + if self.context.channel_state.is_peer_disconnected() && channel_reestablish.is_none() { + log_trace!(logger, "Peer is disconnected; no unblocked messages to send."); + return SignerResumeUpdates::default() + } + let funding_signed = if self.context.signer_pending_funding && !self.context.is_outbound() { + log_trace!(logger, "Attempting to generate pending funding signed..."); self.context.get_funding_signed_msg(logger).1 } else { None }; - let channel_ready = if funding_signed.is_some() { - self.check_get_channel_ready(0) + + // Don't yield up a `channel_ready` message if we're still pending funding. + let channel_ready = if self.context.signer_pending_channel_ready && !self.context.signer_pending_funding { + log_trace!(logger, "Attempting to generate pending channel ready..."); + self.get_channel_ready().map(|msg| { + log_trace!(logger, "Generated channel_ready; clearing signer_pending_channel_ready"); + self.context.signer_pending_channel_ready = false; + msg + }) } else { None }; - log_trace!(logger, "Signer unblocked with {} commitment_update, {} funding_signed and {} channel_ready", + // If we're pending a channel_ready to the counterparty, then we can't be sending commitment + // updates. If we're not, then make sure that we honor any ordering requirements between the + // commitment update and revoke-and-ack. + let (commitment_update, raa) = if false && self.context.signer_pending_channel_ready { + (None, None) + } else { + match &self.context.resend_order { + RAACommitmentOrder::CommitmentFirst => { + let cu = if self.context.signer_pending_commitment_update { + log_trace!(logger, "Attempting to generate pending commitment update..."); + self.get_last_commitment_update_for_send(logger).map(|cu| { + log_trace!(logger, "Generated commitment update; clearing signer_pending_commitment_update"); + self.context.signer_pending_commitment_update = false; + cu + }).ok() + } else { None }; + + let raa = if self.context.signer_pending_revoke_and_ack && !self.context.signer_pending_commitment_update { + log_trace!(logger, "Attempting to generate pending RAA..."); + self.get_last_revoke_and_ack(logger).map(|raa| { + log_trace!(logger, "Generated RAA; clearing signer_pending_revoke_and_ack"); + self.context.signer_pending_revoke_and_ack = false; + raa + }) + } else { None }; + + (cu, raa) + } + + RAACommitmentOrder::RevokeAndACKFirst => { + let raa = if self.context.signer_pending_revoke_and_ack { + log_trace!(logger, "Attempting to generate pending RAA..."); + self.get_last_revoke_and_ack(logger).map(|raa| { + log_trace!(logger, "Generated RAA; clearing signer_pending_revoke_and_ack"); + self.context.signer_pending_revoke_and_ack = false; + raa + }) + } else { None }; + + let cu = if self.context.signer_pending_commitment_update && !self.context.signer_pending_revoke_and_ack { + log_trace!(logger, "Attempting to generate pending commitment update..."); + self.get_last_commitment_update_for_send(logger).map(|cu| { + log_trace!(logger, "Generated commitment update; clearing signer_pending_commitment_update"); + self.context.signer_pending_commitment_update = false; + cu + }).ok() + } else { None }; + + (cu, raa) + } + } + }; + + let order = self.context.resend_order.clone(); + + log_debug!(logger, "Signing unblocked in channel {} at sequence {} resulted in {} channel reestablish, {} commitment update, {} RAA{}, {} funding signed, {} channel ready", + &self.context.channel_id(), self.context.cur_holder_commitment_transaction_number, + if channel_reestablish.is_some() { "a" } else { "no" }, if commitment_update.is_some() { "a" } else { "no" }, + if raa.is_some() { "an" } else { "no" }, + if commitment_update.is_some() && raa.is_some() { + if order == RAACommitmentOrder::CommitmentFirst { " (commitment first)" } else { " (RAA first)" } + } else { "" }, if funding_signed.is_some() { "a" } else { "no" }, if channel_ready.is_some() { "a" } else { "no" }); SignerResumeUpdates { commitment_update, + raa, + order, funding_signed, channel_ready, + channel_reestablish, } } - fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK { - let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); - let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2); - msgs::RevokeAndACK { - channel_id: self.context.channel_id, - per_commitment_secret, - next_per_commitment_point, - #[cfg(taproot)] - next_local_nonce: None, + fn get_last_revoke_and_ack(&self, logger: &L) -> Option where L::Target: Logger { + assert!(self.context.cur_holder_commitment_transaction_number <= INITIAL_COMMITMENT_NUMBER + 2); + match (self.context.cur_holder_commitment_point, self.context.prev_holder_commitment_secret) { + (Some(next_per_commitment_point), Some(per_commitment_secret)) => { + log_debug!(logger, "Regenerated last revoke-and-ack in channel {} for next per-commitment point sequence number {}, releasing secret for {}", + &self.context.channel_id(), self.context.cur_holder_commitment_transaction_number, + self.context.cur_holder_commitment_transaction_number + 2); + + Some(msgs::RevokeAndACK { + channel_id: self.context.channel_id, + per_commitment_secret, + next_per_commitment_point, + #[cfg(taproot)] + next_local_nonce: None, + }) + }, + + (Some(_), None) => { + log_debug!(logger, "Last revoke-and-ack pending in channel {} for sequence {} because the secret for {} is not available", + &self.context.channel_id(), self.context.cur_holder_commitment_transaction_number, + self.context.cur_holder_commitment_transaction_number + 2); + None + }, + + (None, Some(_)) => { + log_debug!(logger, "Last revoke-and-ack pending in channel {} for sequence {} because the next per-commitment point is not available", + &self.context.channel_id(), self.context.cur_holder_commitment_transaction_number); + None + }, + + (None, None) => { + log_debug!(logger, "Last revoke-and-ack pending in channel {} for sequence {} because neither the next per-commitment point nor the secret for {} is available", + &self.context.channel_id(), self.context.cur_holder_commitment_transaction_number, + self.context.cur_holder_commitment_transaction_number + 2); + None + }, } } + fn get_channel_ready(&self) -> Option { + self.context.cur_holder_commitment_point.map(|next_per_commitment_point| { + msgs::ChannelReady { + channel_id: self.context.channel_id(), + next_per_commitment_point, + short_channel_id_alias: Some(self.context.outbound_scid_alias), + } + }) + } + /// Gets the last commitment update for immediate sending to our peer. fn get_last_commitment_update_for_send(&mut self, logger: &L) -> Result where L::Target: Logger { let mut update_add_htlcs = Vec::new(); @@ -4398,6 +4750,9 @@ impl Channel where return Err(()); } }; + log_debug!(logger, "Regenerated latest commitment update in channel {} at {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds", + &self.context.channel_id(), self.context.cur_holder_commitment_transaction_number, if update_fee.is_some() { " update_fee," } else { "" }, + update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len()); Ok(msgs::CommitmentUpdate { update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee, commitment_signed, @@ -4444,11 +4799,19 @@ impl Channel where let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1; if msg.next_remote_commitment_number > 0 { - let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx); - let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret) - .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?; - if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) { - return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned())); + // TODO(waterson): figure out how to do this verification when an async signer is provided + // with a (more or less) arbitrary state index. Should we require that an async signer cache + // old points? Or should we make it so that we can restart the re-establish after the signer + // becomes unblocked? Or something else? + if false { + let state_index = INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1; + let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(state_index, &self.context.secp_ctx) + .map_err(|_| ChannelError::Close(format!("Unable to retrieve per-commitment point for state {}", state_index)))?; + let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret) + .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?; + if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) { + return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned())); + } } if msg.next_remote_commitment_number > our_commitment_transaction { macro_rules! log_and_panic { @@ -4504,29 +4867,46 @@ impl Channel where } // We have OurChannelReady set! - let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); + let channel_ready = self.get_channel_ready(); + if channel_ready.is_none() { + log_trace!(logger, "Could not generate channel_ready during channel_reestablish; setting signer_pending_channel_ready"); + self.context.signer_pending_channel_ready = true; + } + return Ok(ReestablishResponses { - channel_ready: Some(msgs::ChannelReady { - channel_id: self.context.channel_id(), - next_per_commitment_point, - short_channel_id_alias: Some(self.context.outbound_scid_alias), - }), + channel_ready, raa: None, commitment_update: None, order: RAACommitmentOrder::CommitmentFirst, shutdown_msg, announcement_sigs, }); } - let required_revoke = if msg.next_remote_commitment_number == our_commitment_transaction { + // If they think we're behind by one state, then we owe them an RAA. We may or may not have that + // RAA handy depending on the status of the remote signer and the monitor. + let steps_behind = (INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number) - (msg.next_remote_commitment_number + 1); + let mut raa = if steps_behind == 0 { // Remote isn't waiting on any RevokeAndACK from us! // Note that if we need to repeat our ChannelReady we'll do that in the next if block. None - } else if msg.next_remote_commitment_number + 1 == our_commitment_transaction { + } else if steps_behind == 1 { // msg.next_remote_commitment_number + 1 == (INITIAL_COMMITMENT_NUMBER - 1) - self.context.cur_holder_commitment_transaction_number { if self.context.channel_state.is_monitor_update_in_progress() { self.context.monitor_pending_revoke_and_ack = true; None } else { - Some(self.get_last_revoke_and_ack()) + self.context.update_holder_commitment_secret(logger); + self.get_last_revoke_and_ack(logger).map(|raa| { + if self.context.signer_pending_revoke_and_ack { + log_trace!(logger, "Generated RAA for channel_reestablish; clearing signer_pending_revoke_and_ack"); + self.context.signer_pending_revoke_and_ack = false; + } + raa + }).or_else(|| { + if !self.context.signer_pending_revoke_and_ack { + log_trace!(logger, "Unable to generate RAA for channel_reestablish; setting signer_pending_revoke_and_ack"); + self.context.signer_pending_revoke_and_ack = true; + } + None + }) } } else { debug_assert!(false, "All values should have been handled in the four cases above"); @@ -4547,31 +4927,42 @@ impl Channel where } let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 }; - let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 { + let mut channel_ready = None; + if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 { // We should never have to worry about MonitorUpdateInProgress resending ChannelReady - let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); - Some(msgs::ChannelReady { - channel_id: self.context.channel_id(), - next_per_commitment_point, - short_channel_id_alias: Some(self.context.outbound_scid_alias), - }) - } else { None }; + log_debug!(logger, "Reconnecting channel at state 1, (re?)sending channel_ready"); + channel_ready = self.get_channel_ready(); + if channel_ready.is_none() { + if !self.context.signer_pending_channel_ready { + log_trace!(logger, "Unable to generate channel_ready for channel_reestablish; setting signer_pending_channel_ready"); + self.context.signer_pending_channel_ready = true; + } + } + } if msg.next_local_commitment_number == next_counterparty_commitment_number { - if required_revoke.is_some() { + if raa.is_some() || self.context.signer_pending_revoke_and_ack { log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id()); } else { log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id()); } + // If we need to provide a channel_ready, but cannot create it yet (e.g. because we do not + // have the next commitment point), then we must not send a revoke-and-ack: suppress it if + // necessary. + if self.context.signer_pending_channel_ready && raa.is_some() { + log_trace!(logger, "Suppressing RAA because signer_pending_channel_ready; marking signer_pending_revoke_and_ack"); + self.context.signer_pending_revoke_and_ack = true; + raa = None; + } + Ok(ReestablishResponses { - channel_ready, shutdown_msg, announcement_sigs, - raa: required_revoke, + channel_ready, shutdown_msg, announcement_sigs, raa, commitment_update: None, order: self.context.resend_order.clone(), }) } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 { - if required_revoke.is_some() { + if raa.is_some() || self.context.signer_pending_revoke_and_ack { log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", &self.context.channel_id()); } else { log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", &self.context.channel_id()); @@ -4585,10 +4976,32 @@ impl Channel where order: self.context.resend_order.clone(), }) } else { + // Get the commitment update. This may fail if we're pending the signer. + let mut commitment_update = if raa.is_some() || steps_behind == 0 { + self.get_last_commitment_update_for_send(logger).ok() + } else { None }; + self.context.signer_pending_commitment_update = commitment_update.is_none(); + + // If we need to provide a channel_ready, but cannot create it yet (e.g. because we do not + // have the next commitment point), then we must not send the commitment update: suppress it + // if necessary. + if commitment_update.is_some() && self.context.signer_pending_channel_ready { + log_trace!(logger, "Suppressing commitment update because signer_pending_channel_ready; marking signer_pending_commitment_udpate"); + self.context.signer_pending_commitment_update = true; + commitment_update = None; + } + + // Handle the revoke-and-ack similarly. + if self.context.signer_pending_channel_ready && raa.is_some() { + log_trace!(logger, "Suppressing RAA because signer_pending_channel_ready; marking signer_pending_revoke_and_ack"); + self.context.signer_pending_revoke_and_ack = true; + raa = None; + } + Ok(ReestablishResponses { channel_ready, shutdown_msg, announcement_sigs, - raa: required_revoke, - commitment_update: self.get_last_commitment_update_for_send(logger).ok(), + raa: if !self.context.signer_pending_channel_ready { raa } else { None }, + commitment_update: if !self.context.signer_pending_channel_ready { commitment_update } else { None }, order: self.context.resend_order.clone(), }) } @@ -5242,7 +5655,9 @@ impl Channel where self.context.channel_update_status = status; } - fn check_get_channel_ready(&mut self, height: u32) -> Option { + fn check_get_channel_ready(&mut self, height: u32, logger: &L) -> Option + where L::Target: Logger + { // Called: // * always when a new block/transactions are confirmed with the new height // * when funding is signed with a height of 0 @@ -5259,12 +5674,6 @@ impl Channel where return None; } - // If we're still pending the signature on a funding transaction, then we're not ready to send a - // channel_ready yet. - if self.context.signer_pending_funding { - return None; - } - // Note that we don't include ChannelState::WaitingForBatch as we don't want to send // channel_ready until the entire batch is ready. let need_commitment_update = if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if (f & !FundedStateFlags::ALL).is_empty()) { @@ -5294,22 +5703,50 @@ impl Channel where false }; - if need_commitment_update { - if !self.context.channel_state.is_monitor_update_in_progress() { - if !self.context.channel_state.is_peer_disconnected() { - let next_per_commitment_point = - self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx); - return Some(msgs::ChannelReady { - channel_id: self.context.channel_id, - next_per_commitment_point, - short_channel_id_alias: Some(self.context.outbound_scid_alias), - }); - } - } else { - self.context.monitor_pending_channel_ready = true; - } + // If we don't need a commitment update, then we don't need a channel_ready. + if !need_commitment_update { + return None + } + + // If a monitor update is in progress, flag that we're pending a channel ready from the monitor. + if self.context.channel_state.is_monitor_update_in_progress() { + log_trace!(logger, "Monitor update in progress; setting monitor_pending_channel_ready"); + self.context.monitor_pending_channel_ready = true; + return None; + } + + // If the peer is disconnected, then we'll worry about sending channel_ready as part of the + // reconnection process. + if !self.context.is_peer_connected() { + log_trace!(logger, "Peer is disconnected; we'll deal with channel_ready on reconnect"); + return None + } + + // If we're still pending the signature on a funding transaction, then we're not ready to send a + // channel_ready yet. + if self.context.signer_pending_funding { + log_trace!(logger, "Awaiting signer funding; setting signer_pending_channel_ready"); + self.context.signer_pending_channel_ready = true; + return None; + } + + // If we're able to get the next per-commitment point from the signer, then return a + // channel_ready. + let res = self.context.holder_signer.as_ref().get_per_commitment_point( + INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx); + + if let Ok(next_per_commitment_point) = res { + Some(msgs::ChannelReady { + channel_id: self.context.channel_id, + next_per_commitment_point, + short_channel_id_alias: Some(self.context.outbound_scid_alias), + }) + } else { + // Otherwise, mark us as awaiting the signer to send the channel ready. + log_trace!(logger, "Awaiting signer to generate next per_commitment_point; setting signer_pending_channel_ready"); + self.context.signer_pending_channel_ready = true; + None } - None } /// When a transaction is confirmed, we check whether it is or spends the funding transaction @@ -5376,7 +5813,7 @@ impl Channel where // If we allow 1-conf funding, we may need to check for channel_ready here and // send it immediately instead of waiting for a best_block_updated call (which // may have already happened for this block). - if let Some(channel_ready) = self.check_get_channel_ready(height) { + if let Some(channel_ready) = self.check_get_channel_ready(height, logger) { log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id); let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger); msgs = (Some(channel_ready), announcement_sigs); @@ -5442,7 +5879,7 @@ impl Channel where self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time); - if let Some(channel_ready) = self.check_get_channel_ready(height) { + if let Some(channel_ready) = self.check_get_channel_ready(height, logger) { let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer { self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger) } else { None }; @@ -5577,7 +6014,7 @@ impl Channel where return None; } - if self.context.channel_state.is_peer_disconnected() { + if !self.context.is_peer_connected() { log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected"); return None; } @@ -5714,9 +6151,21 @@ impl Channel where /// May panic if called on a channel that wasn't immediately-previously /// self.remove_uncommitted_htlcs_and_mark_paused()'d - pub fn get_channel_reestablish(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger { - assert!(self.context.channel_state.is_peer_disconnected()); + pub fn get_channel_reestablish(&mut self, logger: &L) -> Option where L::Target: Logger { + assert!(!self.context.is_peer_connected()); assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER); + if self.context.cur_holder_commitment_point.is_none() { + log_trace!(logger, "Cannot produce channel_reestablish for {} until holder commitment point is available", self.context.channel_id()); + if !self.context.signer_pending_channel_reestablish { + log_debug!(logger, "Marking {} as signer_pending_channel_reestablish", self.context.channel_id()); + self.context.signer_pending_channel_reestablish = true; + } + if !self.context.signer_pending_commitment_point { + log_debug!(logger, "Marking {} as signer_pending_commitment_point", self.context.channel_id()); + self.context.signer_pending_commitment_point = true; + } + return None; + } // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming // current to_remote balances. However, it no longer has any use, and thus is now simply // set to a dummy (but valid, as required by the spec) public key. @@ -5734,7 +6183,7 @@ impl Channel where [0;32] }; self.mark_awaiting_response(); - msgs::ChannelReestablish { + Some(msgs::ChannelReestablish { channel_id: self.context.channel_id(), // The protocol has two different commitment number concepts - the "commitment // transaction number", which starts from 0 and counts up, and the "revocation key @@ -5760,7 +6209,7 @@ impl Channel where // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the // txid of that interactive transaction, else we MUST NOT set it. next_funding_txid: None, - } + }) } @@ -5839,7 +6288,7 @@ impl Channel where available_balances.next_outbound_htlc_limit_msat))); } - if self.context.channel_state.is_peer_disconnected() { + if !self.context.is_peer_connected() { // Note that this should never really happen, if we're !is_live() on receipt of an // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow // the user to send directly into a !is_live() channel. However, if we @@ -5931,6 +6380,7 @@ impl Channel where self.context.pending_update_fee = None; } } + log_debug!(logger, "setting resend_order to RevokeAndACKFirst"); self.context.resend_order = RAACommitmentOrder::RevokeAndACKFirst; let (mut htlcs_ref, counterparty_commitment_tx) = @@ -5965,9 +6415,8 @@ impl Channel where -> (Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>, CommitmentTransaction) where L::Target: Logger { - let counterparty_keys = self.context.build_remote_transaction_keys(); - let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger); - let counterparty_commitment_tx = commitment_stats.tx; + let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, false, true, logger); + let counterparty_commitment_tx = commitment_stats.tx.expect("Holder per-commitment point is not ready"); #[cfg(any(test, fuzzing))] { @@ -5998,8 +6447,9 @@ impl Channel where self.build_commitment_no_state_update(logger); let counterparty_keys = self.context.build_remote_transaction_keys(); - let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger); - let counterparty_commitment_txid = commitment_stats.tx.trust().txid(); + let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, false, true, logger); + let counterparty_commitment_tx = commitment_stats.tx.expect("Holder per-commitment point is not ready"); + let counterparty_commitment_txid = counterparty_commitment_tx.trust().txid(); match &self.context.holder_signer { ChannelSignerType::Ecdsa(ecdsa) => { @@ -6012,7 +6462,7 @@ impl Channel where } let res = ecdsa.sign_counterparty_commitment( - &commitment_stats.tx, + &counterparty_commitment_tx, commitment_stats.inbound_htlc_preimages, commitment_stats.outbound_htlc_preimages, &self.context.secp_ctx, @@ -6021,7 +6471,7 @@ impl Channel where htlc_signatures = res.1; log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}", - encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction), + encode::serialize_hex(&counterparty_commitment_tx.trust().built_transaction().transaction), &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()), log_bytes!(signature.serialize_compact()[..]), &self.context.channel_id()); @@ -6195,6 +6645,7 @@ impl Channel where pub(super) struct OutboundV1Channel where SP::Target: SignerProvider { pub context: ChannelContext, pub unfunded_context: UnfundedChannelContext, + pub signer_pending_open_channel: bool, } impl OutboundV1Channel where SP::Target: SignerProvider { @@ -6270,6 +6721,8 @@ impl OutboundV1Channel where SP::Target: SignerProvider { let temporary_channel_id = temporary_channel_id.unwrap_or_else(|| ChannelId::temporary_from_entropy_source(entropy_source)); + let cur_holder_commitment_point = holder_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER, &secp_ctx).ok(); + Ok(Self { context: ChannelContext { user_id, @@ -6298,6 +6751,8 @@ impl OutboundV1Channel where SP::Target: SignerProvider { destination_script, cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, + cur_holder_commitment_point, + prev_holder_commitment_secret: None, cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, value_to_self_msat, @@ -6320,7 +6775,12 @@ impl OutboundV1Channel where SP::Target: SignerProvider { monitor_pending_finalized_fulfills: Vec::new(), signer_pending_commitment_update: false, + signer_pending_revoke_and_ack: false, signer_pending_funding: false, + signer_pending_channel_ready: false, + signer_pending_commitment_point: false, + signer_pending_released_secret: false, + signer_pending_channel_reestablish: false, #[cfg(debug_assertions)] holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)), @@ -6399,14 +6859,14 @@ impl OutboundV1Channel where SP::Target: SignerProvider { blocked_monitor_updates: Vec::new(), }, - unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 } + unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }, + signer_pending_open_channel: false, }) } /// Only allowed after [`ChannelContext::channel_transaction_parameters`] is set. fn get_funding_created_msg(&mut self, logger: &L) -> Option where L::Target: Logger { - let counterparty_keys = self.context.build_remote_transaction_keys(); - let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx; + let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, false, false, logger).tx.expect("Holder per-commitment point is not ready"); let signature = match &self.context.holder_signer { // TODO (taproot|arik): move match into calling method for Taproot ChannelSignerType::Ecdsa(ecdsa) => { @@ -6522,7 +6982,7 @@ impl OutboundV1Channel where SP::Target: SignerProvider { /// and see if we get a new `OpenChannel` message, otherwise the channel is failed. pub(crate) fn maybe_handle_error_without_close( &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator - ) -> Result + ) -> Result, ()> where F::Target: FeeEstimator { @@ -6557,10 +7017,14 @@ impl OutboundV1Channel where SP::Target: SignerProvider { self.context.channel_type = ChannelTypeFeatures::only_static_remote_key(); } self.context.channel_transaction_parameters.channel_type_features = self.context.channel_type.clone(); - Ok(self.get_open_channel(chain_hash)) + let opt_msg = self.get_open_channel(chain_hash); + if opt_msg.is_none() { + self.signer_pending_open_channel = true; + } + Ok(opt_msg) } - pub fn get_open_channel(&self, chain_hash: ChainHash) -> msgs::OpenChannel { + pub fn get_open_channel(&self, chain_hash: ChainHash) -> Option { if !self.context.is_outbound() { panic!("Tried to open a channel for an inbound channel?"); } @@ -6572,34 +7036,35 @@ impl OutboundV1Channel where SP::Target: SignerProvider { panic!("Tried to send an open_channel for a channel that has already advanced"); } - let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); let keys = self.context.get_holder_pubkeys(); - msgs::OpenChannel { - chain_hash, - temporary_channel_id: self.context.channel_id, - funding_satoshis: self.context.channel_value_satoshis, - push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat, - dust_limit_satoshis: self.context.holder_dust_limit_satoshis, - max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat, - channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis, - htlc_minimum_msat: self.context.holder_htlc_minimum_msat, - feerate_per_kw: self.context.feerate_per_kw as u32, - to_self_delay: self.context.get_holder_selected_contest_delay(), - max_accepted_htlcs: self.context.holder_max_accepted_htlcs, - funding_pubkey: keys.funding_pubkey, - revocation_basepoint: keys.revocation_basepoint.to_public_key(), - payment_point: keys.payment_point, - delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(), - htlc_basepoint: keys.htlc_basepoint.to_public_key(), - first_per_commitment_point, - channel_flags: if self.context.config.announced_channel {1} else {0}, - shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey { - Some(script) => script.clone().into_inner(), - None => Builder::new().into_script(), - }), - channel_type: Some(self.context.channel_type.clone()), - } + self.context.cur_holder_commitment_point.map(|first_per_commitment_point| { + msgs::OpenChannel { + chain_hash, + temporary_channel_id: self.context.channel_id, + funding_satoshis: self.context.channel_value_satoshis, + push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat, + dust_limit_satoshis: self.context.holder_dust_limit_satoshis, + max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat, + channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis, + htlc_minimum_msat: self.context.holder_htlc_minimum_msat, + feerate_per_kw: self.context.feerate_per_kw as u32, + to_self_delay: self.context.get_holder_selected_contest_delay(), + max_accepted_htlcs: self.context.holder_max_accepted_htlcs, + funding_pubkey: keys.funding_pubkey, + revocation_basepoint: keys.revocation_basepoint.to_public_key(), + payment_point: keys.payment_point, + delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(), + htlc_basepoint: keys.htlc_basepoint.to_public_key(), + first_per_commitment_point, + channel_flags: if self.context.config.announced_channel {1} else {0}, + shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey { + Some(script) => script.clone().into_inner(), + None => Builder::new().into_script(), + }), + channel_type: Some(self.context.channel_type.clone()), + } + }) } // Message handlers @@ -6757,16 +7222,14 @@ impl OutboundV1Channel where SP::Target: SignerProvider { let funding_script = self.context.get_funding_redeemscript(); - let counterparty_keys = self.context.build_remote_transaction_keys(); - let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx; + let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, false, false, logger).tx.expect("Holder per-commitment point is not ready"); let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust(); let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction(); log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}", &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction)); - let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number); - let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx; + let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, true, false, logger).tx.expect("Holder per-commitment point is not ready"); { let trusted_tx = initial_commitment_tx.trust(); let initial_commitment_bitcoin_tx = trusted_tx.built_transaction(); @@ -6820,13 +7283,15 @@ impl OutboundV1Channel where SP::Target: SignerProvider { self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()); } self.context.cur_holder_commitment_transaction_number -= 1; + self.context.update_holder_per_commitment_point(logger); self.context.cur_counterparty_commitment_transaction_number -= 1; log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id()); let mut channel = Channel { context: self.context }; - let need_channel_ready = channel.check_get_channel_ready(0).is_some(); + let need_channel_ready = channel.check_get_channel_ready(0, logger).is_some(); + log_trace!(logger, "funding_signed {} channel_ready", if need_channel_ready { "needs" } else { "does not need" }); channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new()); Ok((channel, channel_monitor)) } @@ -6834,11 +7299,26 @@ impl OutboundV1Channel where SP::Target: SignerProvider { /// Indicates that the signer may have some signatures for us, so we should retry if we're /// blocked. #[cfg(async_signing)] - pub fn signer_maybe_unblocked(&mut self, logger: &L) -> Option where L::Target: Logger { - if self.context.signer_pending_funding && self.context.is_outbound() { + pub fn signer_maybe_unblocked(&mut self, chain_hash: &ChainHash, logger: &L) -> UnfundedOutboundV1SignerResumeUpdates where L::Target: Logger { + let open_channel = if self.signer_pending_open_channel { + self.context.update_holder_per_commitment_point(logger); + + self.get_open_channel(chain_hash.clone()).map(|msg| { + log_trace!(logger, "Signer unblocked an open_channel; clearing signer_pending_open_channel"); + self.signer_pending_open_channel = false; + msg + }) + } else { None }; + + let funding_created = if self.context.signer_pending_funding && self.context.is_outbound() { log_trace!(logger, "Signer unblocked a funding_created"); self.get_funding_created_msg(logger) - } else { None } + } else { None }; + + UnfundedOutboundV1SignerResumeUpdates { + open_channel, + funding_created, + } } } @@ -6846,6 +7326,7 @@ impl OutboundV1Channel where SP::Target: SignerProvider { pub(super) struct InboundV1Channel where SP::Target: SignerProvider { pub context: ChannelContext, pub unfunded_context: UnfundedChannelContext, + pub signer_pending_accept_channel: bool, } impl InboundV1Channel where SP::Target: SignerProvider { @@ -7055,6 +7536,7 @@ impl InboundV1Channel where SP::Target: SignerProvider { } else { Some(cmp::max(config.channel_handshake_config.minimum_depth, 1)) }; + let cur_holder_commitment_point = holder_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER, &secp_ctx).ok(); let chan = Self { context: ChannelContext { @@ -7085,6 +7567,8 @@ impl InboundV1Channel where SP::Target: SignerProvider { destination_script, cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, + cur_holder_commitment_point, + prev_holder_commitment_secret: None, cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, value_to_self_msat: msg.push_msat, @@ -7107,7 +7591,12 @@ impl InboundV1Channel where SP::Target: SignerProvider { monitor_pending_finalized_fulfills: Vec::new(), signer_pending_commitment_update: false, + signer_pending_revoke_and_ack: false, signer_pending_funding: false, + signer_pending_channel_ready: false, + signer_pending_commitment_point: false, + signer_pending_released_secret: false, + signer_pending_channel_reestablish: false, #[cfg(debug_assertions)] holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)), @@ -7190,7 +7679,8 @@ impl InboundV1Channel where SP::Target: SignerProvider { blocked_monitor_updates: Vec::new(), }, - unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 } + unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }, + signer_pending_accept_channel: false, }; Ok(chan) @@ -7200,7 +7690,7 @@ impl InboundV1Channel where SP::Target: SignerProvider { /// should be sent back to the counterparty node. /// /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel - pub fn accept_inbound_channel(&mut self) -> msgs::AcceptChannel { + pub fn accept_inbound_channel(&mut self) -> Option { if self.context.is_outbound() { panic!("Tried to send accept_channel for an outbound channel?"); } @@ -7222,33 +7712,33 @@ impl InboundV1Channel where SP::Target: SignerProvider { /// [`InboundV1Channel::accept_inbound_channel`] instead. /// /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel - fn generate_accept_channel_message(&self) -> msgs::AcceptChannel { - let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); - let keys = self.context.get_holder_pubkeys(); - - msgs::AcceptChannel { - temporary_channel_id: self.context.channel_id, - dust_limit_satoshis: self.context.holder_dust_limit_satoshis, - max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat, - channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis, - htlc_minimum_msat: self.context.holder_htlc_minimum_msat, - minimum_depth: self.context.minimum_depth.unwrap(), - to_self_delay: self.context.get_holder_selected_contest_delay(), - max_accepted_htlcs: self.context.holder_max_accepted_htlcs, - funding_pubkey: keys.funding_pubkey, - revocation_basepoint: keys.revocation_basepoint.to_public_key(), - payment_point: keys.payment_point, - delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(), - htlc_basepoint: keys.htlc_basepoint.to_public_key(), - first_per_commitment_point, - shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey { - Some(script) => script.clone().into_inner(), - None => Builder::new().into_script(), - }), - channel_type: Some(self.context.channel_type.clone()), - #[cfg(taproot)] - next_local_nonce: None, - } + fn generate_accept_channel_message(&self) -> Option { + self.context.cur_holder_commitment_point.map(|first_per_commitment_point| { + let keys = self.context.get_holder_pubkeys(); + msgs::AcceptChannel { + temporary_channel_id: self.context.channel_id, + dust_limit_satoshis: self.context.holder_dust_limit_satoshis, + max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat, + channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis, + htlc_minimum_msat: self.context.holder_htlc_minimum_msat, + minimum_depth: self.context.minimum_depth.unwrap(), + to_self_delay: self.context.get_holder_selected_contest_delay(), + max_accepted_htlcs: self.context.holder_max_accepted_htlcs, + funding_pubkey: keys.funding_pubkey, + revocation_basepoint: keys.revocation_basepoint.to_public_key(), + payment_point: keys.payment_point, + delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(), + htlc_basepoint: keys.htlc_basepoint.to_public_key(), + first_per_commitment_point, + shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey { + Some(script) => script.clone().into_inner(), + None => Builder::new().into_script(), + }), + channel_type: Some(self.context.channel_type.clone()), + #[cfg(taproot)] + next_local_nonce: None, + } + }) } /// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an @@ -7256,15 +7746,14 @@ impl InboundV1Channel where SP::Target: SignerProvider { /// /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel #[cfg(test)] - pub fn get_accept_channel_message(&self) -> msgs::AcceptChannel { + pub fn get_accept_channel_message(&self) -> Option { self.generate_accept_channel_message() } fn check_funding_created_signature(&mut self, sig: &Signature, logger: &L) -> Result where L::Target: Logger { let funding_script = self.context.get_funding_redeemscript(); - let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number); - let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx; + let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, true, false, logger).tx.expect("Holder per-commitment point is not ready"); let trusted_tx = initial_commitment_tx.trust(); let initial_commitment_bitcoin_tx = trusted_tx.built_transaction(); let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis); @@ -7339,6 +7828,7 @@ impl InboundV1Channel where SP::Target: SignerProvider { self.context.channel_id = funding_txo.to_channel_id(); self.context.cur_counterparty_commitment_transaction_number -= 1; self.context.cur_holder_commitment_transaction_number -= 1; + self.context.update_holder_per_commitment_point(logger); let (counterparty_initial_commitment_tx, funding_signed) = self.context.get_funding_signed_msg(logger); @@ -7370,11 +7860,33 @@ impl InboundV1Channel where SP::Target: SignerProvider { let mut channel = Channel { context: self.context, }; - let need_channel_ready = channel.check_get_channel_ready(0).is_some(); + + let need_channel_ready = channel.check_get_channel_ready(0, logger).is_some(); + log_trace!(logger, "funding_created {} channel_ready", if need_channel_ready { "needs" } else { "does not need" }); channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new()); Ok((channel, funding_signed, channel_monitor)) } + + /// Indicates that the signer may have some signatures for us, so we should retry if we're + /// blocked. + #[allow(unused)] + pub fn signer_maybe_unblocked(&mut self, logger: &L) -> UnfundedInboundV1SignerResumeUpdates + where L::Target: Logger + { + let accept_channel = if self.signer_pending_accept_channel { + self.context.update_holder_per_commitment_point(logger); + self.generate_accept_channel_message().map(|msg| { + log_trace!(logger, "Clearing signer_pending_accept_channel"); + self.signer_pending_accept_channel = false; + msg + }) + } else { None }; + + UnfundedInboundV1SignerResumeUpdates { + accept_channel, + } + } } const SERIALIZATION_VERSION: u8 = 3; @@ -8217,6 +8729,8 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch destination_script, cur_holder_commitment_transaction_number, + cur_holder_commitment_point: None, + prev_holder_commitment_secret: None, cur_counterparty_commitment_transaction_number, value_to_self_msat, @@ -8235,7 +8749,12 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(), signer_pending_commitment_update: false, + signer_pending_revoke_and_ack: false, signer_pending_funding: false, + signer_pending_channel_ready: false, + signer_pending_commitment_point: true, + signer_pending_released_secret: true, + signer_pending_channel_reestablish: false, pending_update_fee, holding_cell_update_fee, @@ -8461,7 +8980,7 @@ mod tests { // Now change the fee so we can check that the fee in the open_channel message is the // same as the old fee. fee_est.fee_est = 500; - let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network)); + let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network)).unwrap(); assert_eq!(open_channel_msg.feerate_per_kw, original_fee); } @@ -8487,12 +9006,12 @@ mod tests { // Create Node B's channel by receiving Node A's open_channel message // Make sure A's dust limit is as we expect. - let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network)); + let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network)).unwrap(); let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap()); let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap(); // Node B --> Node A: accept channel, explicitly setting B's dust limit. - let mut accept_channel_msg = node_b_chan.accept_inbound_channel(); + let mut accept_channel_msg = node_b_chan.accept_inbound_channel().unwrap(); accept_channel_msg.dust_limit_satoshis = 546; node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap(); node_a_chan.context.holder_dust_limit_satoshis = 1560; @@ -8618,12 +9137,12 @@ mod tests { let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap(); // Create Node B's channel by receiving Node A's open_channel message - let open_channel_msg = node_a_chan.get_open_channel(chain_hash); + let open_channel_msg = node_a_chan.get_open_channel(chain_hash).unwrap(); let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap()); let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap(); // Node B --> Node A: accept channel - let accept_channel_msg = node_b_chan.accept_inbound_channel(); + let accept_channel_msg = node_b_chan.accept_inbound_channel().unwrap(); node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap(); // Node A --> Node B: funding created @@ -8642,7 +9161,7 @@ mod tests { // Now disconnect the two nodes and check that the commitment point in // Node B's channel_reestablish message is sane. assert!(node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok()); - let msg = node_b_chan.get_channel_reestablish(&&logger); + let msg = node_b_chan.get_channel_reestablish(&&logger).unwrap(); assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number assert_eq!(msg.your_last_per_commitment_secret, [0; 32]); @@ -8650,7 +9169,7 @@ mod tests { // Check that the commitment point in Node A's channel_reestablish message // is sane. assert!(node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok()); - let msg = node_a_chan.get_channel_reestablish(&&logger); + let msg = node_a_chan.get_channel_reestablish(&&logger).unwrap(); assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number assert_eq!(msg.your_last_per_commitment_secret, [0; 32]); @@ -8688,7 +9207,7 @@ mod tests { let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000; assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64); - let chan_1_open_channel_msg = chan_1.get_open_channel(ChainHash::using_genesis_block(network)); + let chan_1_open_channel_msg = chan_1.get_open_channel(ChainHash::using_genesis_block(network)).unwrap(); // Test that `InboundV1Channel::new` creates a channel with the correct value for // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value, @@ -8769,7 +9288,7 @@ mod tests { let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64); assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve); - let chan_open_channel_msg = chan.get_open_channel(ChainHash::using_genesis_block(network)); + let chan_open_channel_msg = chan.get_open_channel(ChainHash::using_genesis_block(network)).unwrap(); let mut inbound_node_config = UserConfig::default(); inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32; @@ -8805,12 +9324,12 @@ mod tests { // Create Node B's channel by receiving Node A's open_channel message // Make sure A's dust limit is as we expect. - let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network)); + let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network)).unwrap(); let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap()); let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap(); // Node B --> Node A: accept channel, explicitly setting B's dust limit. - let mut accept_channel_msg = node_b_chan.accept_inbound_channel(); + let mut accept_channel_msg = node_b_chan.accept_inbound_channel().unwrap(); accept_channel_msg.dust_limit_satoshis = 546; node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap(); node_a_chan.context.holder_dust_limit_satoshis = 1560; @@ -9044,12 +9563,13 @@ mod tests { assert_eq!(counterparty_pubkeys.htlc_basepoint.to_public_key().serialize()[..], >::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]); - // We can't just use build_holder_transaction_keys here as the per_commitment_secret is not + // We can't just use build_next_holder_transaction_keys here as the per_commitment_secret is not // derived from a commitment_seed, so instead we copy it here and call // build_commitment_transaction. let delayed_payment_base = &chan.context.holder_signer.as_ref().pubkeys().delayed_payment_basepoint; let per_commitment_secret = SecretKey::from_slice(&>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap(); let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret); + chan.context.cur_holder_commitment_point = Some(per_commitment_point); let htlc_basepoint = &chan.context.holder_signer.as_ref().pubkeys().htlc_basepoint; let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint); @@ -9072,12 +9592,12 @@ mod tests { $( { $htlc_idx: expr, $counterparty_htlc_sig_hex: expr, $htlc_sig_hex: expr, $htlc_tx_hex: expr } ), * } ) => { { let (commitment_tx, htlcs): (_, Vec) = { - let mut commitment_stats = chan.context.build_commitment_transaction(0xffffffffffff - 42, &keys, true, false, &logger); + let mut commitment_stats = chan.context.build_commitment_transaction(0xffffffffffff - 42, true, false, &logger); let htlcs = commitment_stats.htlcs_included.drain(..) .filter_map(|(htlc, _)| if htlc.transaction_output_index.is_some() { Some(htlc) } else { None }) .collect(); - (commitment_stats.tx, htlcs) + (commitment_stats.tx.expect("transaction was not created"), htlcs) }; let trusted_tx = commitment_tx.trust(); let unsigned_tx = trusted_tx.built_transaction(); @@ -9766,7 +10286,7 @@ mod tests { let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key(); channel_type_features.set_zero_conf_required(); - let mut open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network)); + let mut open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network)).unwrap(); open_channel_msg.channel_type = Some(channel_type_features); let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap()); let res = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, @@ -9810,7 +10330,7 @@ mod tests { None ).unwrap(); - let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network)); + let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network)).unwrap(); let channel_b = InboundV1Channel::<&TestKeysInterface>::new( &fee_estimator, &&keys_provider, &&keys_provider, node_id_a, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), @@ -9849,7 +10369,7 @@ mod tests { ).unwrap(); // Set `channel_type` to `None` to force the implicit feature negotiation. - let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network)); + let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network)).unwrap(); open_channel_msg.channel_type = None; // Since A supports both `static_remote_key` and `option_anchors`, but B only accepts @@ -9895,7 +10415,7 @@ mod tests { None ).unwrap(); - let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network)); + let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network)).unwrap(); open_channel_msg.channel_type = Some(simple_anchors_channel_type.clone()); let res = InboundV1Channel::<&TestKeysInterface>::new( @@ -9914,7 +10434,7 @@ mod tests { 10000000, 100000, 42, &config, 0, 42, None ).unwrap(); - let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network)); + let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network)).unwrap(); let channel_b = InboundV1Channel::<&TestKeysInterface>::new( &fee_estimator, &&keys_provider, &&keys_provider, node_id_a, @@ -9922,7 +10442,7 @@ mod tests { &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false ).unwrap(); - let mut accept_channel_msg = channel_b.get_accept_channel_message(); + let mut accept_channel_msg = channel_b.get_accept_channel_message().unwrap(); accept_channel_msg.channel_type = Some(simple_anchors_channel_type.clone()); let res = channel_a.accept_channel( @@ -9973,7 +10493,7 @@ mod tests { node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), - &open_channel_msg, + &open_channel_msg.unwrap(), 7, &config, 0, @@ -9983,7 +10503,7 @@ mod tests { let accept_channel_msg = node_b_chan.accept_inbound_channel(); node_a_chan.accept_channel( - &accept_channel_msg, + &accept_channel_msg.unwrap(), &config.channel_handshake_limits, &channelmanager::provided_init_features(&config), ).unwrap(); @@ -10056,6 +10576,6 @@ mod tests { // Clear the ChannelState::WaitingForBatch only when called by ChannelManager. node_a_chan.set_batch_ready(); assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY)); - assert!(node_a_chan.check_get_channel_ready(0).is_some()); + assert!(node_a_chan.check_get_channel_ready(0, &&logger).is_some()); } } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index b2b28cdf365..2f29872cc3c 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -638,7 +638,7 @@ pub(super) const MIN_HTLC_RELAY_HOLDING_CELL_MILLIS: u64 = 100; /// be sent in the order they appear in the return value, however sometimes the order needs to be /// variable at runtime (eg Channel::channel_reestablish needs to re-send messages in the order /// they were originally sent). In those cases, this enum is also returned. -#[derive(Clone, PartialEq)] +#[derive(Clone, Debug, PartialEq)] pub(super) enum RAACommitmentOrder { /// Send the CommitmentUpdate messages first CommitmentFirst, @@ -2538,7 +2538,7 @@ where } } - let channel = { + let mut channel = { let outbound_scid_alias = self.create_and_insert_outbound_scid_alias(); let their_features = &peer_state.latest_features; let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration }; @@ -2553,7 +2553,11 @@ where }, } }; - let res = channel.get_open_channel(self.chain_hash); + let opt_msg = channel.get_open_channel(self.chain_hash); + if opt_msg.is_none() { + log_trace!(self.logger, "Awaiting signer for open_channel, setting signer_pending_open_channel"); + channel.signer_pending_open_channel = true; + } let temporary_channel_id = channel.context.channel_id(); match peer_state.channel_by_id.entry(temporary_channel_id) { @@ -2567,10 +2571,13 @@ where hash_map::Entry::Vacant(entry) => { entry.insert(ChannelPhase::UnfundedOutboundV1(channel)); } } - peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel { - node_id: their_network_key, - msg: res, - }); + if let Some(msg) = opt_msg { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel { + node_id: their_network_key, + msg, + }); + }; + Ok(temporary_channel_id) } @@ -5904,6 +5911,12 @@ where emit_channel_ready_event!(pending_events, channel); } + + log_debug!(self.logger, "Outgoing message queue is{}", if pending_msg_events.is_empty() { " empty" } else { "..." }); + for msg in pending_msg_events.iter() { + log_debug!(self.logger, " {:?}", msg); + } + htlc_forwards } @@ -6055,10 +6068,17 @@ where let outbound_scid_alias = self.create_and_insert_outbound_scid_alias(); channel.context.set_outbound_scid_alias(outbound_scid_alias); - peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel { - node_id: channel.context.get_counterparty_node_id(), - msg: channel.accept_inbound_channel(), - }); + match channel.accept_inbound_channel() { + Some(msg) => + peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel { + node_id: channel.context.get_counterparty_node_id(), + msg + }), + None => { + log_trace!(self.logger, "Awaiting signer for accept_channel; setting signing_pending_accept_channel"); + channel.signer_pending_accept_channel = true; + }, + }; peer_state.channel_by_id.insert(temporary_channel_id.clone(), ChannelPhase::UnfundedInboundV1(channel)); @@ -6210,10 +6230,18 @@ where let outbound_scid_alias = self.create_and_insert_outbound_scid_alias(); channel.context.set_outbound_scid_alias(outbound_scid_alias); - peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel { - node_id: counterparty_node_id.clone(), - msg: channel.accept_inbound_channel(), - }); + match channel.accept_inbound_channel() { + Some(msg) => + peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel { + node_id: channel.context.get_counterparty_node_id(), + msg + }), + None => { + log_trace!(self.logger, "Awaiting signer for accept_channel; setting signer_pending_accept_channel"); + channel.signer_pending_accept_channel = true; + }, + }; + peer_state.channel_by_id.insert(channel_id, ChannelPhase::UnfundedInboundV1(channel)); Ok(()) } @@ -6274,6 +6302,12 @@ where let logger = WithChannelContext::from(&self.logger, &inbound_chan.context); match inbound_chan.funding_created(msg, best_block, &self.signer_provider, &&logger) { Ok(res) => res, + Err((inbound_chan, ChannelError::Ignore(_))) => { + // If we get an `Ignore` error then something transient went wrong. Put the channel + // back into the table and bail. + peer_state.channel_by_id.insert(msg.temporary_channel_id, ChannelPhase::UnfundedInboundV1(inbound_chan)); + return Ok(()); + }, Err((inbound_chan, err)) => { // We've already removed this inbound channel from the map in `PeerState` // above so at this point we just need to clean up any lingering entries @@ -6423,6 +6457,7 @@ where hash_map::Entry::Occupied(mut chan_phase_entry) => { if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { let logger = WithChannelContext::from(&self.logger, &chan.context); + log_debug!(logger, "<== channel_ready"); let announcement_sigs_opt = try_chan_phase_entry!(self, chan.channel_ready(&msg, &self.node_signer, self.chain_hash, &self.default_configuration, &self.best_block.read().unwrap(), &&logger), chan_phase_entry); if let Some(announcement_sigs) = announcement_sigs_opt { @@ -6660,6 +6695,7 @@ where } }; let logger = WithChannelContext::from(&self.logger, &chan.context); + log_debug!(logger, "<== update_add_htlc: htlc_id={} amount_msat={}", msg.htlc_id, msg.amount_msat); try_chan_phase_entry!(self, chan.update_add_htlc(&msg, pending_forward_info, create_pending_htlc_status, &self.fee_estimator, &&logger), chan_phase_entry); } else { return try_chan_phase_entry!(self, Err(ChannelError::Close( @@ -6685,6 +6721,7 @@ where match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan_phase_entry) => { if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { + log_debug!(self.logger, "<== update_fulfill_htlc: htlc_id={}", msg.htlc_id); let res = try_chan_phase_entry!(self, chan.update_fulfill_htlc(&msg), chan_phase_entry); if let HTLCSource::PreviousHopData(prev_hop) = &res.0 { let logger = WithChannelContext::from(&self.logger, &chan.context); @@ -6783,6 +6820,7 @@ where if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { let logger = WithChannelContext::from(&self.logger, &chan.context); let funding_txo = chan.context.get_funding_txo(); + log_debug!(logger, "<== commitment_signed: {} htlcs", msg.htlc_signatures.len()); let monitor_update_opt = try_chan_phase_entry!(self, chan.commitment_signed(&msg, &&logger), chan_phase_entry); if let Some(monitor_update) = monitor_update_opt { handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, peer_state_lock, @@ -6957,6 +6995,7 @@ where &peer_state.actions_blocking_raa_monitor_updates, funding_txo, *counterparty_node_id) } else { false }; + log_debug!(self.logger, "<== revoke_and_ack"); let (htlcs_to_fail, monitor_update_opt) = try_chan_phase_entry!(self, chan.revoke_and_ack(&msg, &self.fee_estimator, &&logger, mon_update_blocked), chan_phase_entry); if let Some(monitor_update) = monitor_update_opt { @@ -7107,6 +7146,7 @@ where let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan_phase_entry) => { + log_debug!(logger, "<== channel_reestablish"); if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { // Currently, we expect all holding cell update_adds to be dropped on peer // disconnect, so Channel's reestablish will never hand us any holding cell @@ -7338,32 +7378,71 @@ where let node_id = phase.context().get_counterparty_node_id(); match phase { ChannelPhase::Funded(chan) => { - let msgs = chan.signer_maybe_unblocked(&self.logger); - if let Some(updates) = msgs.commitment_update { - pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id, - updates, - }); + let logger = WithChannelContext::from(&self.logger, &chan.context); + let msgs = chan.signer_maybe_unblocked(&&logger); + + // Note that the order in which we enqueue the messages is significant! + if let Some(msg) = msgs.channel_reestablish { + log_trace!(logger, "Queuing channel_reestablish to {}", node_id); + pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish { node_id, msg }); } if let Some(msg) = msgs.funding_signed { + log_trace!(logger, "Queuing funding_signed to {}", node_id); pending_msg_events.push(events::MessageSendEvent::SendFundingSigned { node_id, msg, }); } if let Some(msg) = msgs.channel_ready { + log_trace!(logger, "Queuing channel_ready to {}", node_id); send_channel_ready!(self, pending_msg_events, chan, msg); } + match (msgs.commitment_update, msgs.raa) { + (Some(cu), Some(raa)) if msgs.order == RAACommitmentOrder::CommitmentFirst => { + log_trace!(logger, "Queuing update_htlcs to {}", node_id); + pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { node_id, updates: cu }); + log_trace!(logger, "Queuing revoke_and_ack to {}", node_id); + pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK { node_id, msg: raa }); + }, + (Some(cu), Some(raa)) if msgs.order == RAACommitmentOrder::RevokeAndACKFirst => { + log_trace!(logger, "Queuing revoke_and_ack to {}", node_id); + pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK { node_id, msg: raa }); + log_trace!(logger, "Queuing update_htlcs to {}", node_id); + pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { node_id, updates: cu }); + }, + (Some(cu), _) => { + log_trace!(logger, "Queuing update_htlcs to {}", node_id); + pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { node_id, updates: cu }) + }, + (_, Some(raa)) => { + log_trace!(logger, "Queuing revoke_and_ack to {}", node_id); + pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK { node_id, msg: raa }) + }, + (_, _) => (), + }; + } + ChannelPhase::UnfundedInboundV1(chan) => { + let logger = WithChannelContext::from(&self.logger, &chan.context); + let msgs = chan.signer_maybe_unblocked(&&logger); + let node_id = phase.context().get_counterparty_node_id(); + if let Some(msg) = msgs.accept_channel { + log_trace!(logger, "Queuing accept_channel to {}", node_id); + pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel { node_id, msg }); + } } ChannelPhase::UnfundedOutboundV1(chan) => { - if let Some(msg) = chan.signer_maybe_unblocked(&self.logger) { - pending_msg_events.push(events::MessageSendEvent::SendFundingCreated { - node_id, - msg, - }); + let logger = WithChannelContext::from(&self.logger, &chan.context); + let msgs = chan.signer_maybe_unblocked(&self.chain_hash, &&logger); + let node_id = phase.context().get_counterparty_node_id(); + if let Some(msg) = msgs.open_channel { + log_trace!(logger, "Queuing open_channel to {}", node_id); + pending_msg_events.push(events::MessageSendEvent::SendOpenChannel { node_id, msg }); + } + if let Some(msg) = msgs.funding_created { + log_trace!(logger, "Queuing funding_created to {}", node_id); + pending_msg_events.push(events::MessageSendEvent::SendFundingCreated { node_id, msg }); } } - ChannelPhase::UnfundedInboundV1(_) => {}, } }; @@ -8988,10 +9067,13 @@ where } ).for_each(|chan| { let logger = WithChannelContext::from(&self.logger, &chan.context); - pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish { - node_id: chan.context.get_counterparty_node_id(), - msg: chan.get_channel_reestablish(&&logger), - }); + if let Some(msg) = chan.get_channel_reestablish(&&logger) { + log_trace!(logger, "Queuing channel_reestablish to {}", chan.context.get_counterparty_node_id()); + pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish { + node_id: chan.context.get_counterparty_node_id(), + msg, + }); + } }); } @@ -9069,11 +9151,13 @@ where let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); let peer_state = &mut *peer_state_lock; if let Some(ChannelPhase::UnfundedOutboundV1(chan)) = peer_state.channel_by_id.get_mut(&msg.channel_id) { - if let Ok(msg) = chan.maybe_handle_error_without_close(self.chain_hash, &self.fee_estimator) { - peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel { - node_id: *counterparty_node_id, - msg, - }); + if let Ok(opt_msg) = chan.maybe_handle_error_without_close(self.chain_hash, &self.fee_estimator) { + if let Some(msg) = opt_msg { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel { + node_id: *counterparty_node_id, + msg, + }); + } return; } } @@ -10332,6 +10416,8 @@ where log_info!(logger, "Successfully loaded channel {} at update_id {} against monitor at update id {}", &channel.context.channel_id(), channel.context.get_latest_monitor_update_id(), monitor.get_latest_update_id()); + channel.context.update_holder_per_commitment_point(&args.logger); + // channel.context.update_holder_commitment_secret(&args.logger); if let Some(short_channel_id) = channel.context.get_short_channel_id() { short_to_chan_info.insert(short_channel_id, (channel.context.get_counterparty_node_id(), channel.context.channel_id())); } diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index a2d9631716c..b83b53072e1 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -32,6 +32,8 @@ use crate::util::config::{UserConfig, MaxDustHTLCExposure}; use crate::util::ser::{ReadableArgs, Writeable}; #[cfg(test)] use crate::util::logger::Logger; +#[cfg(test)] +use crate::util::test_channel_signer::ops; use bitcoin::blockdata::block::{Block, Header, Version}; use bitcoin::blockdata::locktime::absolute::LockTime; @@ -446,14 +448,14 @@ impl<'a, 'b, 'c> Node<'a, 'b, 'c> { pub fn get_block_header(&self, height: u32) -> Header { self.blocks.lock().unwrap()[height as usize].0.header } + /// Changes the channel signer's availability for the specified peer and channel. /// /// When `available` is set to `true`, the channel signer will behave normally. When set to /// `false`, the channel signer will act like an off-line remote signer and will return `Err` for - /// several of the signing methods. Currently, only `get_per_commitment_point` and - /// `release_commitment_secret` are affected by this setting. + /// several of the signing methods. #[cfg(test)] - pub fn set_channel_signer_available(&self, peer_id: &PublicKey, chan_id: &ChannelId, available: bool) { + pub fn set_channel_signer_ops_available(&self, peer_id: &PublicKey, chan_id: &ChannelId, mask: u32, available: bool) { let per_peer_state = self.node.per_peer_state.read().unwrap(); let chan_lock = per_peer_state.get(peer_id).unwrap().lock().unwrap(); let signer = (|| { @@ -462,8 +464,21 @@ impl<'a, 'b, 'c> Node<'a, 'b, 'c> { None => panic!("Couldn't find a channel with id {}", chan_id), } })(); - log_debug!(self.logger, "Setting channel signer for {} as available={}", chan_id, available); - signer.as_ecdsa().unwrap().set_available(available); + log_debug!(self.logger, "Setting channel signer for {} as {}available for {} (mask={})", + chan_id, if available { "" } else { "un" }, ops::string_from(mask), mask); + signer.as_ecdsa().unwrap().set_ops_available(mask, available); + } + + /// Removes signature material from the channel context. + #[cfg(test)] + pub fn forget_signer_material(&mut self, peer_id: &PublicKey, chan_id: &ChannelId) { + let mut per_peer_state = self.node.per_peer_state.write().unwrap(); + let mut chan_lock = per_peer_state.get_mut(peer_id).unwrap().lock().unwrap(); + match chan_lock.channel_by_id.get_mut(chan_id) { + Some(ref mut phase) => phase.context_mut().forget_signer_material(), + None => panic!("Couldn't find a channel with id {}", chan_id), + } + log_debug!(self.logger, "Forgetting signing material for {}", chan_id); } } @@ -3164,7 +3179,7 @@ macro_rules! get_chan_reestablish_msgs { assert_eq!(*node_id, $dst_node.node.get_our_node_id()); announcements.insert(msg.contents.short_channel_id); } else { - panic!("Unexpected event") + panic!("Unexpected event re-establishing channel, {:?}", msg) } } assert!(announcements.is_empty()); @@ -3220,6 +3235,13 @@ macro_rules! handle_chan_reestablish_msgs { RAACommitmentOrder::CommitmentFirst }; + if let Some(&MessageSendEvent::SendChannelUpdate { ref node_id, .. }) = msg_events.get(idx) { + assert_eq!(*node_id, $dst_node.node.get_our_node_id()); + idx += 1; + assert!(!had_channel_update); + had_channel_update = true; + } + if let Some(ev) = msg_events.get(idx) { match ev { &MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { @@ -3370,7 +3392,7 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { } else { panic!("Unexpected event! {:?}", announcement_event[0]); } } } else { - assert!(chan_msgs.0.is_none()); + assert!(chan_msgs.0.is_none(), "did not expect to have a ChannelReady for node 1"); } if pending_raa.0 { assert!(chan_msgs.3 == RAACommitmentOrder::RevokeAndACKFirst); @@ -3378,7 +3400,7 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { assert!(node_a.node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(node_a, 1); } else { - assert!(chan_msgs.1.is_none()); + assert!(chan_msgs.1.is_none(), "did not expect to have a RevokeAndACK for node 1"); } if pending_htlc_adds.0 != 0 || pending_htlc_claims.0 != 0 || pending_htlc_fails.0 != 0 || pending_cell_htlc_claims.0 != 0 || pending_cell_htlc_fails.0 != 0 || @@ -3411,7 +3433,7 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { check_added_monitors!(node_b, if pending_responding_commitment_signed_dup_monitor.0 { 0 } else { 1 }); } } else { - assert!(chan_msgs.2.is_none()); + assert!(chan_msgs.2.is_none(), "did not expect to have commitment updates for node 1"); } } @@ -3428,7 +3450,7 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { } } } else { - assert!(chan_msgs.0.is_none()); + assert!(chan_msgs.0.is_none(), "did not expect to have a ChannelReady for node 2"); } if pending_raa.1 { assert!(chan_msgs.3 == RAACommitmentOrder::RevokeAndACKFirst); @@ -3436,7 +3458,7 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { assert!(node_b.node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(node_b, 1); } else { - assert!(chan_msgs.1.is_none()); + assert!(chan_msgs.1.is_none(), "did not expect to have a RevokeAndACK for node 2"); } if pending_htlc_adds.1 != 0 || pending_htlc_claims.1 != 0 || pending_htlc_fails.1 != 0 || pending_cell_htlc_claims.1 != 0 || pending_cell_htlc_fails.1 != 0 || @@ -3469,7 +3491,7 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { check_added_monitors!(node_a, if pending_responding_commitment_signed_dup_monitor.1 { 0 } else { 1 }); } } else { - assert!(chan_msgs.2.is_none()); + assert!(chan_msgs.2.is_none(), "did not expect to have commitment updates for node 2"); } } } diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 2ad53faa8b2..998f96ad4f4 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -720,7 +720,7 @@ fn test_update_fee_that_funder_cannot_afford() { let chan_signer = remote_chan.get_signer(); let pubkeys = chan_signer.as_ref().pubkeys(); (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint, - chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx), + chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx).unwrap(), pubkeys.funding_pubkey) }; @@ -1442,8 +1442,8 @@ fn test_fee_spike_violation_fails_htlc() { let pubkeys = chan_signer.as_ref().pubkeys(); (pubkeys.revocation_basepoint, pubkeys.htlc_basepoint, - chan_signer.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER), - chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 2, &secp_ctx), + chan_signer.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER).unwrap(), + chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 2, &secp_ctx).unwrap(), chan_signer.as_ref().pubkeys().funding_pubkey) }; let (remote_delayed_payment_basepoint, remote_htlc_basepoint, remote_point, remote_funding) = { @@ -1455,7 +1455,7 @@ fn test_fee_spike_violation_fails_htlc() { let chan_signer = remote_chan.get_signer(); let pubkeys = chan_signer.as_ref().pubkeys(); (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint, - chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx), + chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx).unwrap(), chan_signer.as_ref().pubkeys().funding_pubkey) }; @@ -7795,15 +7795,15 @@ fn test_counterparty_raa_skip_no_crash() { // Make signer believe we got a counterparty signature, so that it allows the revocation keys.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1; - per_commitment_secret = keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER); + per_commitment_secret = keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER).unwrap(); // Must revoke without gaps keys.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1; - keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 1); + keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("unable to release commitment secret"); keys.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1; next_per_commitment_point = PublicKey::from_secret_key(&Secp256k1::new(), - &SecretKey::from_slice(&keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 2)).unwrap()); + &SecretKey::from_slice(&keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 2).unwrap()).unwrap()); } nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), diff --git a/lightning/src/sign/mod.rs b/lightning/src/sign/mod.rs index 4e418f049bb..192da4d6998 100644 --- a/lightning/src/sign/mod.rs +++ b/lightning/src/sign/mod.rs @@ -574,7 +574,10 @@ pub trait ChannelSigner { /// Gets the per-commitment point for a specific commitment number /// /// Note that the commitment number starts at `(1 << 48) - 1` and counts backwards. - fn get_per_commitment_point(&self, idx: u64, secp_ctx: &Secp256k1) -> PublicKey; + /// + /// If the signer returns `Err`, then the user is responsible for either force-closing the channel + /// or retrying once the signature is ready. + fn get_per_commitment_point(&self, idx: u64, secp_ctx: &Secp256k1) -> Result; /// Gets the commitment secret for a specific commitment number as part of the revocation process /// @@ -585,7 +588,7 @@ pub trait ChannelSigner { /// /// Note that the commitment number starts at `(1 << 48) - 1` and counts backwards. // TODO: return a Result so we can signal a validation error - fn release_commitment_secret(&self, idx: u64) -> [u8; 32]; + fn release_commitment_secret(&self, idx: u64) -> Result<[u8; 32], ()>; /// Validate the counterparty's signatures on the holder commitment transaction and HTLCs. /// @@ -1077,13 +1080,13 @@ impl EntropySource for InMemorySigner { } impl ChannelSigner for InMemorySigner { - fn get_per_commitment_point(&self, idx: u64, secp_ctx: &Secp256k1) -> PublicKey { + fn get_per_commitment_point(&self, idx: u64, secp_ctx: &Secp256k1) -> Result { let commitment_secret = SecretKey::from_slice(&chan_utils::build_commitment_secret(&self.commitment_seed, idx)).unwrap(); - PublicKey::from_secret_key(secp_ctx, &commitment_secret) + Ok(PublicKey::from_secret_key(secp_ctx, &commitment_secret)) } - fn release_commitment_secret(&self, idx: u64) -> [u8; 32] { - chan_utils::build_commitment_secret(&self.commitment_seed, idx) + fn release_commitment_secret(&self, idx: u64) -> Result<[u8; 32], ()> { + Ok(chan_utils::build_commitment_secret(&self.commitment_seed, idx)) } fn validate_holder_commitment(&self, _holder_tx: &HolderCommitmentTransaction, _outbound_htlc_preimages: Vec) -> Result<(), ()> { diff --git a/lightning/src/util/test_channel_signer.rs b/lightning/src/util/test_channel_signer.rs index a2cbf78b700..e64d67c00a8 100644 --- a/lightning/src/util/test_channel_signer.rs +++ b/lightning/src/util/test_channel_signer.rs @@ -68,9 +68,51 @@ pub struct TestChannelSigner { /// Channel state used for policy enforcement pub state: Arc>, pub disable_revocation_policy_check: bool, - /// When `true` (the default), the signer will respond immediately with signatures. When `false`, - /// the signer will return an error indicating that it is unavailable. - pub available: Arc>, +} + +/// Channel signer operations that can be individually enabled and disabled. If a particular value +/// is set in the `TestChannelSigner::unavailable` bitmask, then that operation will return an +/// error. +pub mod ops { + pub const GET_PER_COMMITMENT_POINT: u32 = 1 << 0; + pub const RELEASE_COMMITMENT_SECRET: u32 = 1 << 1; + pub const VALIDATE_HOLDER_COMMITMENT: u32 = 1 << 2; + pub const SIGN_COUNTERPARTY_COMMITMENT: u32 = 1 << 3; + pub const VALIDATE_COUNTERPARTY_REVOCATION: u32 = 1 << 4; + pub const SIGN_HOLDER_COMMITMENT_AND_HTLCS: u32 = 1 << 5; + pub const SIGN_JUSTICE_REVOKED_OUTPUT: u32 = 1 << 6; + pub const SIGN_JUSTICE_REVOKED_HTLC: u32 = 1 << 7; + pub const SIGN_HOLDER_HTLC_TRANSACTION: u32 = 1 << 8; + pub const SIGN_COUNTERPARTY_HTLC_TRANSATION: u32 = 1 << 9; + pub const SIGN_CLOSING_TRANSACTION: u32 = 1 << 10; + pub const SIGN_HOLDER_ANCHOR_INPUT: u32 = 1 << 11; + pub const SIGN_CHANNEL_ANNOUNCMENT_WITH_FUNDING_KEY: u32 = 1 << 12; + + #[cfg(test)] + pub fn string_from(mask: u32) -> String { + if mask == 0 { + return "nothing".to_owned(); + } + if mask == !(0 as u32) { + return "everything".to_owned(); + } + + vec![ + if (mask & GET_PER_COMMITMENT_POINT) != 0 { Some("get_per_commitment_point") } else { None }, + if (mask & RELEASE_COMMITMENT_SECRET) != 0 { Some("release_commitment_secret") } else { None }, + if (mask & VALIDATE_HOLDER_COMMITMENT) != 0 { Some("validate_holder_commitment") } else { None }, + if (mask & SIGN_COUNTERPARTY_COMMITMENT) != 0 { Some("sign_counterparty_commitment") } else { None }, + if (mask & VALIDATE_COUNTERPARTY_REVOCATION) != 0 { Some("validate_counterparty_revocation") } else { None }, + if (mask & SIGN_HOLDER_COMMITMENT_AND_HTLCS) != 0 { Some("sign_holder_commitment_and_htlcs") } else { None }, + if (mask & SIGN_JUSTICE_REVOKED_OUTPUT) != 0 { Some("sign_justice_revoked_output") } else { None }, + if (mask & SIGN_JUSTICE_REVOKED_HTLC) != 0 { Some("sign_justice_revoked_htlc") } else { None }, + if (mask & SIGN_HOLDER_HTLC_TRANSACTION) != 0 { Some("sign_holder_htlc_transaction") } else { None }, + if (mask & SIGN_COUNTERPARTY_HTLC_TRANSATION) != 0 { Some("sign_counterparty_htlc_transation") } else { None }, + if (mask & SIGN_CLOSING_TRANSACTION) != 0 { Some("sign_closing_transaction") } else { None }, + if (mask & SIGN_HOLDER_ANCHOR_INPUT) != 0 { Some("sign_holder_anchor_input") } else { None }, + if (mask & SIGN_CHANNEL_ANNOUNCMENT_WITH_FUNDING_KEY) != 0 { Some("sign_channel_announcment_with_funding_key") } else { None }, + ].iter().flatten().map(|s| s.to_string()).collect::>().join(", ") + } } impl PartialEq for TestChannelSigner { @@ -87,7 +129,6 @@ impl TestChannelSigner { inner, state, disable_revocation_policy_check: false, - available: Arc::new(Mutex::new(true)), } } @@ -101,7 +142,6 @@ impl TestChannelSigner { inner, state, disable_revocation_policy_check, - available: Arc::new(Mutex::new(true)), } } @@ -113,22 +153,33 @@ impl TestChannelSigner { } /// Marks the signer's availability. - /// - /// When `true`, methods are forwarded to the underlying signer as normal. When `false`, some - /// methods will return `Err` indicating that the signer is unavailable. Intended to be used for - /// testing asynchronous signing. #[cfg(test)] - pub fn set_available(&self, available: bool) { - *self.available.lock().unwrap() = available; + pub fn set_ops_available(&self, mask: u32, available: bool) { + let mut state = self.get_enforcement_state(); + if available { + state.unavailable_signer_ops &= !mask; // clear the bits that are now available + } else { + state.unavailable_signer_ops |= mask; // set the bits that are now unavailable + } + } + + fn is_signer_available(&self, ops_mask: u32) -> bool { + self.state.lock().unwrap().is_signer_available(ops_mask) } } impl ChannelSigner for TestChannelSigner { - fn get_per_commitment_point(&self, idx: u64, secp_ctx: &Secp256k1) -> PublicKey { + fn get_per_commitment_point(&self, idx: u64, secp_ctx: &Secp256k1) -> Result { + if !self.is_signer_available(ops::GET_PER_COMMITMENT_POINT) { + return Err(()); + } self.inner.get_per_commitment_point(idx, secp_ctx) } - fn release_commitment_secret(&self, idx: u64) -> [u8; 32] { + fn release_commitment_secret(&self, idx: u64) -> Result<[u8; 32], ()> { + if !self.is_signer_available(ops::RELEASE_COMMITMENT_SECRET) { + return Err(()); + } { let mut state = self.state.lock().unwrap(); assert!(idx == state.last_holder_revoked_commitment || idx == state.last_holder_revoked_commitment - 1, "can only revoke the current or next unrevoked commitment - trying {}, last revoked {}", idx, state.last_holder_revoked_commitment); @@ -139,6 +190,9 @@ impl ChannelSigner for TestChannelSigner { } fn validate_holder_commitment(&self, holder_tx: &HolderCommitmentTransaction, _outbound_htlc_preimages: Vec) -> Result<(), ()> { + if !self.is_signer_available(ops::VALIDATE_HOLDER_COMMITMENT) { + return Err(()); + } let mut state = self.state.lock().unwrap(); let idx = holder_tx.commitment_number(); assert!(idx == state.last_holder_commitment || idx == state.last_holder_commitment - 1, "expecting to validate the current or next holder commitment - trying {}, current {}", idx, state.last_holder_commitment); @@ -147,7 +201,7 @@ impl ChannelSigner for TestChannelSigner { } fn validate_counterparty_revocation(&self, idx: u64, _secret: &SecretKey) -> Result<(), ()> { - if !*self.available.lock().unwrap() { + if !self.is_signer_available(ops::VALIDATE_COUNTERPARTY_REVOCATION) { return Err(()); } let mut state = self.state.lock().unwrap(); @@ -170,7 +224,7 @@ impl EcdsaChannelSigner for TestChannelSigner { self.verify_counterparty_commitment_tx(commitment_tx, secp_ctx); { - if !*self.available.lock().unwrap() { + if !self.is_signer_available(ops::SIGN_COUNTERPARTY_COMMITMENT) { return Err(()); } let mut state = self.state.lock().unwrap(); @@ -189,7 +243,7 @@ impl EcdsaChannelSigner for TestChannelSigner { } fn sign_holder_commitment(&self, commitment_tx: &HolderCommitmentTransaction, secp_ctx: &Secp256k1) -> Result { - if !*self.available.lock().unwrap() { + if !self.is_signer_available(ops::SIGN_HOLDER_COMMITMENT_AND_HTLCS) { return Err(()); } let trusted_tx = self.verify_holder_commitment_tx(commitment_tx, secp_ctx); @@ -362,6 +416,10 @@ pub struct EnforcementState { pub last_holder_revoked_commitment: u64, /// The last validated holder commitment number, backwards counting pub last_holder_commitment: u64, + /// A flag array that indicates which signing operations are currently *not* available in the + /// channel. When a method's bit is set, then the signer will act as if the signature is + /// unavailable and return an error result. + pub unavailable_signer_ops: u32, } impl EnforcementState { @@ -372,6 +430,19 @@ impl EnforcementState { last_counterparty_revoked_commitment: INITIAL_REVOKED_COMMITMENT_NUMBER, last_holder_revoked_commitment: INITIAL_REVOKED_COMMITMENT_NUMBER, last_holder_commitment: INITIAL_REVOKED_COMMITMENT_NUMBER, + unavailable_signer_ops: 0, } } + + pub fn set_signer_available(&mut self, ops_mask: u32) { + self.unavailable_signer_ops &= !ops_mask; // clear the bits that are now available + } + + pub fn set_signer_unavailable(&mut self, ops_mask: u32) { + self.unavailable_signer_ops |= ops_mask; // set the bits that are now unavailable + } + + pub fn is_signer_available(&self, ops_mask: u32) -> bool { + (self.unavailable_signer_ops & ops_mask) == 0 + } }