diff --git a/fuzz/Cargo.toml b/fuzz/Cargo.toml index 53ff1cbd7a1..21f3bac22ce 100644 --- a/fuzz/Cargo.toml +++ b/fuzz/Cargo.toml @@ -44,6 +44,10 @@ path = "fuzz_targets/peer_crypt_target.rs" name = "full_stack_target" path = "fuzz_targets/full_stack_target.rs" +[[bin]] +name = "chanmon_fail_consistency" +path = "fuzz_targets/chanmon_fail_consistency.rs" + [[bin]] name = "router_target" path = "fuzz_targets/router_target.rs" diff --git a/fuzz/fuzz_targets/chanmon_deser_target.rs b/fuzz/fuzz_targets/chanmon_deser_target.rs index 9ddf52c662e..fb8f0bf6f12 100644 --- a/fuzz/fuzz_targets/chanmon_deser_target.rs +++ b/fuzz/fuzz_targets/chanmon_deser_target.rs @@ -30,7 +30,7 @@ impl Writer for VecWriter { #[inline] pub fn do_test(data: &[u8]) { reset_rng_state(); - let logger = Arc::new(test_logger::TestLogger{}); + let logger = Arc::new(test_logger::TestLogger::new("".to_owned())); if let Ok((latest_block_hash, monitor)) = <(Sha256dHash, channelmonitor::ChannelMonitor)>::read(&mut Cursor::new(data), logger.clone()) { let mut w = VecWriter(Vec::new()); monitor.write_for_disk(&mut w).unwrap(); diff --git a/fuzz/fuzz_targets/chanmon_fail_consistency.rs b/fuzz/fuzz_targets/chanmon_fail_consistency.rs new file mode 100644 index 00000000000..4db158f28e1 --- /dev/null +++ b/fuzz/fuzz_targets/chanmon_fail_consistency.rs @@ -0,0 +1,553 @@ +//! Test that monitor update failures don't get our channel state out of sync. +//! One of the biggest concern with the monitor update failure handling code is that messages +//! resent after monitor updating is restored are delivered out-of-order, resulting in +//! commitment_signed messages having "invalid signatures". +//! To test this we stand up a network of three nodes and read bytes from the fuzz input to denote +//! actions such as sending payments, handling events, or changing monitor update return values on +//! a per-node basis. This should allow it to find any cases where the ordering of actions results +//! in us getting out of sync with ourselves, and, assuming at least one of our recieve- or +//! send-side handling is correct, other peers. We consider it a failure if any action results in a +//! channel being force-closed. + +//Uncomment this for libfuzzer builds: +//#![no_main] + +extern crate bitcoin; +extern crate bitcoin_hashes; +extern crate lightning; +extern crate secp256k1; + +use bitcoin::BitcoinHash; +use bitcoin::blockdata::block::BlockHeader; +use bitcoin::blockdata::transaction::{Transaction, TxOut}; +use bitcoin::blockdata::script::{Builder, Script}; +use bitcoin::blockdata::opcodes; +use bitcoin::network::constants::Network; + +use bitcoin_hashes::Hash as TraitImport; +use bitcoin_hashes::hash160::Hash as Hash160; +use bitcoin_hashes::sha256::Hash as Sha256; + +use lightning::chain::chaininterface; +use lightning::chain::transaction::OutPoint; +use lightning::chain::chaininterface::{BroadcasterInterface,ConfirmationTarget,ChainListener,FeeEstimator,ChainWatchInterfaceUtil}; +use lightning::chain::keysinterface::{ChannelKeys, KeysInterface}; +use lightning::ln::channelmonitor; +use lightning::ln::channelmonitor::{ChannelMonitorUpdateErr, HTLCUpdate}; +use lightning::ln::channelmanager::{ChannelManager, PaymentHash, PaymentPreimage}; +use lightning::ln::router::{Route, RouteHop}; +use lightning::ln::msgs::{CommitmentUpdate, ChannelMessageHandler, ErrorAction, HandleError, UpdateAddHTLC}; +use lightning::util::{reset_rng_state, fill_bytes, events}; +use lightning::util::logger::Logger; +use lightning::util::config::UserConfig; +use lightning::util::events::{EventsProvider, MessageSendEventsProvider}; +use lightning::util::ser::{Readable, Writeable}; + +mod utils; +use utils::test_logger; + +use secp256k1::key::{PublicKey,SecretKey}; +use secp256k1::Secp256k1; + +use std::sync::{Arc,Mutex}; +use std::io::Cursor; + +struct FuzzEstimator {} +impl FeeEstimator for FuzzEstimator { + fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u64 { + 253 + } +} + +pub struct TestBroadcaster {} +impl BroadcasterInterface for TestBroadcaster { + fn broadcast_transaction(&self, _tx: &Transaction) { } +} + +pub struct TestChannelMonitor { + pub simple_monitor: Arc>, + pub update_ret: Mutex>, +} +impl TestChannelMonitor { + pub fn new(chain_monitor: Arc, broadcaster: Arc, logger: Arc) -> Self { + Self { + simple_monitor: channelmonitor::SimpleManyChannelMonitor::new(chain_monitor, broadcaster, logger), + update_ret: Mutex::new(Ok(())), + } + } +} +impl channelmonitor::ManyChannelMonitor for TestChannelMonitor { + fn add_update_monitor(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> { + assert!(self.simple_monitor.add_update_monitor(funding_txo, monitor).is_ok()); + self.update_ret.lock().unwrap().clone() + } + + fn fetch_pending_htlc_updated(&self) -> Vec { + return self.simple_monitor.fetch_pending_htlc_updated(); + } +} + +struct KeyProvider { + node_id: u8, +} +impl KeysInterface for KeyProvider { + fn get_node_secret(&self) -> SecretKey { + SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, self.node_id]).unwrap() + } + + fn get_destination_script(&self) -> Script { + let secp_ctx = Secp256k1::signing_only(); + let channel_monitor_claim_key = SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, self.node_id]).unwrap(); + let our_channel_monitor_claim_key_hash = Hash160::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize()); + Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&our_channel_monitor_claim_key_hash[..]).into_script() + } + + fn get_shutdown_pubkey(&self) -> PublicKey { + let secp_ctx = Secp256k1::signing_only(); + PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, self.node_id]).unwrap()) + } + + fn get_channel_keys(&self, _inbound: bool) -> ChannelKeys { + ChannelKeys { + funding_key: SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, self.node_id]).unwrap(), + revocation_base_key: SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, self.node_id]).unwrap(), + payment_base_key: SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, self.node_id]).unwrap(), + delayed_payment_base_key: SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, self.node_id]).unwrap(), + htlc_base_key: SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, self.node_id]).unwrap(), + commitment_seed: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, self.node_id], + } + } + + fn get_session_key(&self) -> SecretKey { + let mut session_key = [0; 32]; + fill_bytes(&mut session_key); + SecretKey::from_slice(&session_key).unwrap() + } + + fn get_channel_id(&self) -> [u8; 32] { + let mut channel_id = [0; 32]; + fill_bytes(&mut channel_id); + channel_id + } +} + +#[inline] +pub fn do_test(data: &[u8]) { + reset_rng_state(); + + let fee_est = Arc::new(FuzzEstimator{}); + let broadcast = Arc::new(TestBroadcaster{}); + + macro_rules! make_node { + ($node_id: expr) => { { + let logger: Arc = Arc::new(test_logger::TestLogger::new($node_id.to_string())); + let watch = Arc::new(ChainWatchInterfaceUtil::new(Network::Bitcoin, Arc::clone(&logger))); + let monitor = Arc::new(TestChannelMonitor::new(watch.clone(), broadcast.clone(), logger.clone())); + + let keys_manager = Arc::new(KeyProvider { node_id: $node_id }); + let mut config = UserConfig::new(); + config.channel_options.fee_proportional_millionths = 0; + config.channel_options.announced_channel = true; + config.channel_limits.min_dust_limit_satoshis = 0; + (ChannelManager::new(Network::Bitcoin, fee_est.clone(), monitor.clone(), watch.clone(), broadcast.clone(), Arc::clone(&logger), keys_manager.clone(), config).unwrap(), + monitor) + } } + } + + let mut channel_txn = Vec::new(); + macro_rules! make_channel { + ($source: expr, $dest: expr, $chan_id: expr) => { { + $source.create_channel($dest.get_our_node_id(), 10000000, 42, 0).unwrap(); + let open_channel = { + let events = $source.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + if let events::MessageSendEvent::SendOpenChannel { ref msg, .. } = events[0] { + msg.clone() + } else { panic!("Wrong event type"); } + }; + + $dest.handle_open_channel(&$source.get_our_node_id(), &open_channel).unwrap(); + let accept_channel = { + let events = $dest.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + if let events::MessageSendEvent::SendAcceptChannel { ref msg, .. } = events[0] { + msg.clone() + } else { panic!("Wrong event type"); } + }; + + $source.handle_accept_channel(&$dest.get_our_node_id(), &accept_channel).unwrap(); + { + let events = $source.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + if let events::Event::FundingGenerationReady { ref temporary_channel_id, ref channel_value_satoshis, ref output_script, .. } = events[0] { + let tx = Transaction { version: $chan_id, lock_time: 0, input: Vec::new(), output: vec![TxOut { + value: *channel_value_satoshis, script_pubkey: output_script.clone(), + }]}; + let funding_output = OutPoint::new(tx.txid(), 0); + $source.funding_transaction_generated(&temporary_channel_id, funding_output); + channel_txn.push(tx); + } else { panic!("Wrong event type"); } + } + + let funding_created = { + let events = $source.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + if let events::MessageSendEvent::SendFundingCreated { ref msg, .. } = events[0] { + msg.clone() + } else { panic!("Wrong event type"); } + }; + $dest.handle_funding_created(&$source.get_our_node_id(), &funding_created).unwrap(); + + let funding_signed = { + let events = $dest.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + if let events::MessageSendEvent::SendFundingSigned { ref msg, .. } = events[0] { + msg.clone() + } else { panic!("Wrong event type"); } + }; + $source.handle_funding_signed(&$dest.get_our_node_id(), &funding_signed).unwrap(); + + { + let events = $source.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + if let events::Event::FundingBroadcastSafe { .. } = events[0] { + } else { panic!("Wrong event type"); } + } + } } + } + + macro_rules! confirm_txn { + ($node: expr) => { { + let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; + let mut txn = Vec::with_capacity(channel_txn.len()); + let mut posn = Vec::with_capacity(channel_txn.len()); + for i in 0..channel_txn.len() { + txn.push(&channel_txn[i]); + posn.push(i as u32 + 1); + } + $node.block_connected(&header, 1, &txn, &posn); + for i in 2..100 { + header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; + $node.block_connected(&header, i, &Vec::new(), &[0; 0]); + } + } } + } + + macro_rules! lock_fundings { + ($nodes: expr) => { { + let mut node_events = Vec::new(); + for node in $nodes.iter() { + node_events.push(node.get_and_clear_pending_msg_events()); + } + for (idx, node_event) in node_events.iter().enumerate() { + for event in node_event { + if let events::MessageSendEvent::SendFundingLocked { ref node_id, ref msg } = event { + for node in $nodes.iter() { + if node.get_our_node_id() == *node_id { + node.handle_funding_locked(&$nodes[idx].get_our_node_id(), msg).unwrap(); + } + } + } else { panic!("Wrong event type"); } + } + } + + for node in $nodes.iter() { + let events = node.get_and_clear_pending_msg_events(); + for event in events { + if let events::MessageSendEvent::SendAnnouncementSignatures { .. } = event { + } else { panic!("Wrong event type"); } + } + } + } } + } + + // 3 nodes is enough to hit all the possible cases, notably unknown-source-unknown-dest + // forwarding. + let (node_a, monitor_a) = make_node!(0); + let (node_b, monitor_b) = make_node!(1); + let (node_c, monitor_c) = make_node!(2); + + let nodes = [node_a, node_b, node_c]; + + make_channel!(nodes[0], nodes[1], 0); + make_channel!(nodes[1], nodes[2], 1); + + for node in nodes.iter() { + confirm_txn!(node); + } + + lock_fundings!(nodes); + + let chan_a = nodes[0].list_usable_channels()[0].short_channel_id.unwrap(); + let chan_b = nodes[2].list_usable_channels()[0].short_channel_id.unwrap(); + + let mut payment_id = 0; + + let mut chan_a_disconnected = false; + let mut chan_b_disconnected = false; + let mut chan_a_reconnecting = false; + let mut chan_b_reconnecting = false; + + macro_rules! test_err { + ($res: expr) => { + match $res { + Ok(()) => {}, + Err(HandleError { action: Some(ErrorAction::IgnoreError), .. }) => { }, + _ => { $res.unwrap() }, + } + } + } + + macro_rules! test_return { + () => { { + assert_eq!(nodes[0].list_channels().len(), 1); + assert_eq!(nodes[1].list_channels().len(), 2); + assert_eq!(nodes[2].list_channels().len(), 1); + return; + } } + } + + let mut read_pos = 0; + macro_rules! get_slice { + ($len: expr) => { + { + let slice_len = $len as usize; + if data.len() < read_pos + slice_len { + test_return!(); + } + read_pos += slice_len; + &data[read_pos - slice_len..read_pos] + } + } + } + + loop { + macro_rules! send_payment { + ($source: expr, $dest: expr) => { { + let payment_hash = Sha256::hash(&[payment_id; 1]); + payment_id = payment_id.wrapping_add(1); + if let Err(_) = $source.send_payment(Route { + hops: vec![RouteHop { + pubkey: $dest.0.get_our_node_id(), + short_channel_id: $dest.1, + fee_msat: 5000000, + cltv_expiry_delta: 200, + }], + }, PaymentHash(payment_hash.into_inner())) { + // Probably ran out of funds + test_return!(); + } + } }; + ($source: expr, $middle: expr, $dest: expr) => { { + let payment_hash = Sha256::hash(&[payment_id; 1]); + payment_id = payment_id.wrapping_add(1); + if let Err(_) = $source.send_payment(Route { + hops: vec![RouteHop { + pubkey: $middle.0.get_our_node_id(), + short_channel_id: $middle.1, + fee_msat: 50000, + cltv_expiry_delta: 100, + },RouteHop { + pubkey: $dest.0.get_our_node_id(), + short_channel_id: $dest.1, + fee_msat: 5000000, + cltv_expiry_delta: 200, + }], + }, PaymentHash(payment_hash.into_inner())) { + // Probably ran out of funds + test_return!(); + } + } } + } + + macro_rules! process_msg_events { + ($node: expr, $corrupt_forward: expr) => { { + for event in nodes[$node].get_and_clear_pending_msg_events() { + match event { + events::MessageSendEvent::UpdateHTLCs { ref node_id, updates: CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { + for (idx, dest) in nodes.iter().enumerate() { + if dest.get_our_node_id() == *node_id && + (($node != 0 && idx != 0) || !chan_a_disconnected) && + (($node != 2 && idx != 2) || !chan_b_disconnected) { + assert!(update_fee.is_none()); + for update_add in update_add_htlcs { + if !$corrupt_forward { + test_err!(dest.handle_update_add_htlc(&nodes[$node].get_our_node_id(), &update_add)); + } else { + // Corrupt the update_add_htlc message so that its HMAC + // check will fail and we generate a + // update_fail_malformed_htlc instead of an + // update_fail_htlc as we do when we reject a payment. + let mut msg_ser = update_add.encode(); + msg_ser[1000] ^= 0xff; + let new_msg = UpdateAddHTLC::read(&mut Cursor::new(&msg_ser)).unwrap(); + test_err!(dest.handle_update_add_htlc(&nodes[$node].get_our_node_id(), &new_msg)); + } + } + for update_fulfill in update_fulfill_htlcs { + test_err!(dest.handle_update_fulfill_htlc(&nodes[$node].get_our_node_id(), &update_fulfill)); + } + for update_fail in update_fail_htlcs { + test_err!(dest.handle_update_fail_htlc(&nodes[$node].get_our_node_id(), &update_fail)); + } + for update_fail_malformed in update_fail_malformed_htlcs { + test_err!(dest.handle_update_fail_malformed_htlc(&nodes[$node].get_our_node_id(), &update_fail_malformed)); + } + test_err!(dest.handle_commitment_signed(&nodes[$node].get_our_node_id(), &commitment_signed)); + } + } + }, + events::MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { + for (idx, dest) in nodes.iter().enumerate() { + if dest.get_our_node_id() == *node_id && + (($node != 0 && idx != 0) || !chan_a_disconnected) && + (($node != 2 && idx != 2) || !chan_b_disconnected) { + test_err!(dest.handle_revoke_and_ack(&nodes[$node].get_our_node_id(), msg)); + } + } + }, + events::MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } => { + for (idx, dest) in nodes.iter().enumerate() { + if dest.get_our_node_id() == *node_id { + test_err!(dest.handle_channel_reestablish(&nodes[$node].get_our_node_id(), msg)); + if $node == 0 || idx == 0 { + chan_a_reconnecting = false; + chan_a_disconnected = false; + } else { + chan_b_reconnecting = false; + chan_b_disconnected = false; + } + } + } + }, + events::MessageSendEvent::SendFundingLocked { .. } => { + // Can be generated as a reestablish response + }, + events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => { + // Can be generated due to a payment forward being rejected due to a + // channel having previously failed a monitor update + }, + _ => panic!("Unhandled message event"), + } + } + } } + } + + macro_rules! process_events { + ($node: expr, $fail: expr) => { { + for event in nodes[$node].get_and_clear_pending_events() { + match event { + events::Event::PaymentReceived { payment_hash, .. } => { + if $fail { + assert!(nodes[$node].fail_htlc_backwards(&payment_hash, 0)); + } else { + assert!(nodes[$node].claim_funds(PaymentPreimage(payment_hash.0))); + } + }, + events::Event::PaymentSent { .. } => {}, + events::Event::PaymentFailed { .. } => {}, + events::Event::PendingHTLCsForwardable { .. } => { + nodes[$node].process_pending_htlc_forwards(); + }, + _ => panic!("Unhandled event"), + } + } + } } + } + + match get_slice!(1)[0] { + 0x00 => *monitor_a.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure), + 0x01 => *monitor_b.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure), + 0x02 => *monitor_c.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure), + 0x03 => *monitor_a.update_ret.lock().unwrap() = Ok(()), + 0x04 => *monitor_b.update_ret.lock().unwrap() = Ok(()), + 0x05 => *monitor_c.update_ret.lock().unwrap() = Ok(()), + 0x06 => nodes[0].test_restore_channel_monitor(), + 0x07 => nodes[1].test_restore_channel_monitor(), + 0x08 => nodes[2].test_restore_channel_monitor(), + 0x09 => send_payment!(nodes[0], (&nodes[1], chan_a)), + 0x0a => send_payment!(nodes[1], (&nodes[0], chan_a)), + 0x0b => send_payment!(nodes[1], (&nodes[2], chan_b)), + 0x0c => send_payment!(nodes[2], (&nodes[1], chan_b)), + 0x0d => send_payment!(nodes[0], (&nodes[1], chan_a), (&nodes[2], chan_b)), + 0x0e => send_payment!(nodes[2], (&nodes[1], chan_b), (&nodes[0], chan_a)), + 0x0f => { + if !chan_a_disconnected { + nodes[0].peer_disconnected(&nodes[1].get_our_node_id(), false); + nodes[1].peer_disconnected(&nodes[0].get_our_node_id(), false); + chan_a_disconnected = true; + } + }, + 0x10 => { + if !chan_b_disconnected { + nodes[1].peer_disconnected(&nodes[2].get_our_node_id(), false); + nodes[2].peer_disconnected(&nodes[1].get_our_node_id(), false); + chan_b_disconnected = true; + } + }, + 0x11 => { + if chan_a_disconnected && !chan_a_reconnecting { + nodes[0].peer_connected(&nodes[1].get_our_node_id()); + nodes[1].peer_connected(&nodes[0].get_our_node_id()); + chan_a_reconnecting = true; + } + }, + 0x12 => { + if chan_b_disconnected && !chan_b_reconnecting { + nodes[1].peer_connected(&nodes[2].get_our_node_id()); + nodes[2].peer_connected(&nodes[1].get_our_node_id()); + chan_b_reconnecting = true; + } + }, + 0x13 => process_msg_events!(0, true), + 0x14 => process_msg_events!(0, false), + 0x15 => process_events!(0, true), + 0x16 => process_events!(0, false), + 0x17 => process_msg_events!(1, true), + 0x18 => process_msg_events!(1, false), + 0x19 => process_events!(1, true), + 0x1a => process_events!(1, false), + 0x1b => process_msg_events!(2, true), + 0x1c => process_msg_events!(2, false), + 0x1d => process_events!(2, true), + 0x1e => process_events!(2, false), + _ => test_return!(), + } + } +} + +#[cfg(feature = "afl")] +#[macro_use] extern crate afl; +#[cfg(feature = "afl")] +fn main() { + fuzz!(|data| { + do_test(data); + }); +} + +#[cfg(feature = "honggfuzz")] +#[macro_use] extern crate honggfuzz; +#[cfg(feature = "honggfuzz")] +fn main() { + loop { + fuzz!(|data| { + do_test(data); + }); + } +} + +#[cfg(feature = "libfuzzer_fuzz")] +#[macro_use] extern crate libfuzzer_sys; +#[cfg(feature = "libfuzzer_fuzz")] +fuzz_target!(|data: &[u8]| { + do_test(data); +}); + +extern crate hex; +#[cfg(test)] +mod tests { + #[test] + fn duplicate_crash() { + super::do_test(&::hex::decode("00").unwrap()); + } +} diff --git a/fuzz/fuzz_targets/full_stack_target.rs b/fuzz/fuzz_targets/full_stack_target.rs index 9930a44d255..a4697fd939d 100644 --- a/fuzz/fuzz_targets/full_stack_target.rs +++ b/fuzz/fuzz_targets/full_stack_target.rs @@ -554,7 +554,7 @@ pub fn do_test(data: &[u8], logger: &Arc) { #[cfg(feature = "afl")] fn main() { fuzz!(|data| { - let logger: Arc = Arc::new(test_logger::TestLogger{}); + let logger: Arc = Arc::new(test_logger::TestLogger::new("".to_owned())); do_test(data, &logger); }); } @@ -565,7 +565,7 @@ fn main() { fn main() { loop { fuzz!(|data| { - let logger: Arc = Arc::new(test_logger::TestLogger{}); + let logger: Arc = Arc::new(test_logger::TestLogger::new("".to_owned())); do_test(data, &logger); }); } @@ -575,7 +575,7 @@ fn main() { #[macro_use] extern crate libfuzzer_sys; #[cfg(feature = "libfuzzer_fuzz")] fuzz_target!(|data: &[u8]| { - let logger: Arc = Arc::new(test_logger::TestLogger{}); + let logger: Arc = Arc::new(test_logger::TestLogger::new("".to_owned())); do_test(data, &logger); }); @@ -589,7 +589,7 @@ mod tests { #[test] fn duplicate_crash() { - let logger: Arc = Arc::new(test_logger::TestLogger{}); + let logger: Arc = Arc::new(test_logger::TestLogger::new("".to_owned())); super::do_test(&::hex::decode("00").unwrap(), &logger); } diff --git a/fuzz/fuzz_targets/router_target.rs b/fuzz/fuzz_targets/router_target.rs index 8938deefe00..3a40d39855e 100644 --- a/fuzz/fuzz_targets/router_target.rs +++ b/fuzz/fuzz_targets/router_target.rs @@ -154,7 +154,7 @@ pub fn do_test(data: &[u8]) { } } - let logger: Arc = Arc::new(test_logger::TestLogger{}); + let logger: Arc = Arc::new(test_logger::TestLogger::new("".to_owned())); let chain_monitor = Arc::new(DummyChainWatcher { input: Arc::clone(&input), }); diff --git a/fuzz/fuzz_targets/utils/test_logger.rs b/fuzz/fuzz_targets/utils/test_logger.rs index f828d0638f1..097d001dbb2 100644 --- a/fuzz/fuzz_targets/utils/test_logger.rs +++ b/fuzz/fuzz_targets/utils/test_logger.rs @@ -1,11 +1,22 @@ use lightning::util::logger::{Logger, Record}; +pub struct TestLogger { + #[cfg(test)] + id: String, +} -pub struct TestLogger {} +impl TestLogger { + pub fn new(_id: String) -> TestLogger { + TestLogger { + #[cfg(test)] + id: _id + } + } +} impl Logger for TestLogger { fn log(&self, record: &Record) { #[cfg(test)] - println!("{:<5} [{} : {}, {}] {}", record.level.to_string(), record.module_path, record.file, record.line, record.args); + println!("{:<5} {} [{} : {}, {}] {}", record.level.to_string(), self.id, record.module_path, record.file, record.line, record.args); #[cfg(not(test))] let _ = format!("{}", record.args); } diff --git a/fuzz/travis-fuzz.sh b/fuzz/travis-fuzz.sh index 86b57fa0b04..e602e9518f7 100755 --- a/fuzz/travis-fuzz.sh +++ b/fuzz/travis-fuzz.sh @@ -11,7 +11,14 @@ cargo install --force honggfuzz for TARGET in fuzz_targets/*.rs fuzz_targets/msg_targets/*_target.rs; do FILENAME=$(basename $TARGET) FILE="${FILENAME%.*}" - HFUZZ_BUILD_ARGS="--features honggfuzz_fuzz" HFUZZ_RUN_ARGS="-N1000000 --exit_upon_crash -v" cargo hfuzz run $FILE + HFUZZ_RUN_ARGS="--exit_upon_crash -v -n2" + if [ "$FILE" = "chanmon_fail_consistency" ]; then + HFUZZ_RUN_ARGS="$HFUZZ_RUN_ARGS -F 64 -N100000" + else + HFUZZ_RUN_ARGS="$HFUZZ_RUN_ARGS -N1000000" + fi + export HFUZZ_RUN_ARGS + HFUZZ_BUILD_ARGS="--features honggfuzz_fuzz" cargo hfuzz run $FILE if [ -f hfuzz_workspace/$FILE/HONGGFUZZ.REPORT.TXT ]; then cat hfuzz_workspace/$FILE/HONGGFUZZ.REPORT.TXT for CASE in hfuzz_workspace/$FILE/SIG*; do diff --git a/src/ln/chanmon_update_fail_tests.rs b/src/ln/chanmon_update_fail_tests.rs index c915307385f..fc481f8aac7 100644 --- a/src/ln/chanmon_update_fail_tests.rs +++ b/src/ln/chanmon_update_fail_tests.rs @@ -1365,3 +1365,194 @@ fn first_message_on_recv_ordering() { claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1); claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2); } + +#[test] +fn test_monitor_update_fail_claim() { + // Basic test for monitor update failures when processing claim_funds calls. + // We set up a simple 3-node network, sending a payment from A to B and failing B's monitor + // update to claim the payment. We then send a payment C->B->A, making the forward of this + // payment from B to A fail due to the paused channel. Finally, we restore the channel monitor + // updating and claim the payment on B. + let mut nodes = create_network(3); + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); + create_announced_chan_between_nodes(&nodes, 1, 2); + + // Rebalance a bit so that we can send backwards from 3 to 2. + send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000); + + let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); + + *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + assert!(nodes[1].node.claim_funds(payment_preimage_1)); + check_added_monitors!(nodes[1], 1); + + let route = nodes[2].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); + let (_, payment_hash_2) = get_payment_preimage_hash!(nodes[0]); + nodes[2].node.send_payment(route, payment_hash_2).unwrap(); + check_added_monitors!(nodes[2], 1); + + // Successfully update the monitor on the 1<->2 channel, but the 0<->1 channel should still be + // paused, so forward shouldn't succeed until we call test_restore_channel_monitor(). + *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); + + let mut events = nodes[2].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let payment_event = SendEvent::from_event(events.pop().unwrap()); + nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); + commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false, true); + + let bs_fail_update = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id()); + nodes[2].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_fail_update.update_fail_htlcs[0]).unwrap(); + commitment_signed_dance!(nodes[2], nodes[1], bs_fail_update.commitment_signed, false, true); + + let msg_events = nodes[2].node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 1); + match msg_events[0] { + MessageSendEvent::PaymentFailureNetworkUpdate { update: msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { ref msg }} => { + assert_eq!(msg.contents.short_channel_id, chan_1.0.contents.short_channel_id); + assert_eq!(msg.contents.flags & 2, 2); // temp disabled + }, + _ => panic!("Unexpected event"), + } + + let events = nodes[2].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + if let Event::PaymentFailed { payment_hash, rejected_by_dest, .. } = events[0] { + assert_eq!(payment_hash, payment_hash_2); + assert!(!rejected_by_dest); + } else { panic!("Unexpected event!"); } + + // Now restore monitor updating on the 0<->1 channel and claim the funds on B. + nodes[1].node.test_restore_channel_monitor(); + check_added_monitors!(nodes[1], 1); + + let bs_fulfill_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_fulfill_update.update_fulfill_htlcs[0]).unwrap(); + commitment_signed_dance!(nodes[0], nodes[1], bs_fulfill_update.commitment_signed, false); + + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + if let Event::PaymentSent { payment_preimage, .. } = events[0] { + assert_eq!(payment_preimage, payment_preimage_1); + } else { panic!("Unexpected event!"); } +} + +#[test] +fn test_monitor_update_on_pending_forwards() { + // Basic test for monitor update failures when processing pending HTLC fail/add forwards. + // We do this with a simple 3-node network, sending a payment from A to C and one from C to A. + // The payment from A to C will be failed by C and pending a back-fail to A, while the payment + // from C to A will be pending a forward to A. + let mut nodes = create_network(3); + create_announced_chan_between_nodes(&nodes, 0, 1); + create_announced_chan_between_nodes(&nodes, 1, 2); + + // Rebalance a bit so that we can send backwards from 3 to 1. + send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000); + + let (_, payment_hash_1) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000); + assert!(nodes[2].node.fail_htlc_backwards(&payment_hash_1, 1000000)); + expect_pending_htlcs_forwardable!(nodes[2]); + check_added_monitors!(nodes[2], 1); + + let cs_fail_update = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &cs_fail_update.update_fail_htlcs[0]).unwrap(); + commitment_signed_dance!(nodes[1], nodes[2], cs_fail_update.commitment_signed, true, true); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + let route = nodes[2].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); + let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]); + nodes[2].node.send_payment(route, payment_hash_2).unwrap(); + check_added_monitors!(nodes[2], 1); + + let mut events = nodes[2].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let payment_event = SendEvent::from_event(events.pop().unwrap()); + nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); + commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false); + + *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + expect_pending_htlcs_forwardable!(nodes[1]); + check_added_monitors!(nodes[1], 1); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); + nodes[1].node.test_restore_channel_monitor(); + check_added_monitors!(nodes[1], 1); + + let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fail_htlcs[0]).unwrap(); + nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_add_htlcs[0]).unwrap(); + commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false, true); + + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 2); + if let Event::PaymentFailed { payment_hash, rejected_by_dest, .. } = events[0] { + assert_eq!(payment_hash, payment_hash_1); + assert!(rejected_by_dest); + } else { panic!("Unexpected event!"); } + match events[1] { + Event::PendingHTLCsForwardable { .. } => { }, + _ => panic!("Unexpected event"), + }; + nodes[0].node.channel_state.lock().unwrap().next_forward = Instant::now(); + nodes[0].node.process_pending_htlc_forwards(); + expect_payment_received!(nodes[0], payment_hash_2, 1000000); + + claim_payment(&nodes[2], &[&nodes[1], &nodes[0]], payment_preimage_2); +} + +#[test] +fn monitor_update_claim_fail_no_response() { + // Test for claim_funds resulting in both a monitor update failure and no message response (due + // to channel being AwaitingRAA). + // Backported from chanmon_fail_consistency fuzz tests as an unmerged version of the handling + // code was broken. + let mut nodes = create_network(2); + create_announced_chan_between_nodes(&nodes, 0, 1); + + // Forward a payment for B to claim + let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); + + // Now start forwarding a second payment, skipping the last RAA so B is in AwaitingRAA + let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); + let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]); + nodes[0].node.send_payment(route, payment_hash_2).unwrap(); + check_added_monitors!(nodes[0], 1); + + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let payment_event = SendEvent::from_event(events.pop().unwrap()); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); + let as_raa = commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true, false, true); + + *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + assert!(nodes[1].node.claim_funds(payment_preimage_1)); + check_added_monitors!(nodes[1], 1); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); + nodes[1].node.test_restore_channel_monitor(); + check_added_monitors!(nodes[1], 1); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap(); + check_added_monitors!(nodes[1], 1); + expect_pending_htlcs_forwardable!(nodes[1]); + expect_payment_received!(nodes[1], payment_hash_2, 1000000); + + let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]).unwrap(); + commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false); + + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PaymentSent { ref payment_preimage } => { + assert_eq!(*payment_preimage, payment_preimage_1); + }, + _ => panic!("Unexpected event"), + } + + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2); +} diff --git a/src/ln/channelmanager.rs b/src/ln/channelmanager.rs index 912e22abb63..ae5842463b9 100644 --- a/src/ln/channelmanager.rs +++ b/src/ln/channelmanager.rs @@ -448,9 +448,9 @@ macro_rules! try_chan_entry { } } -macro_rules! return_monitor_err { +macro_rules! handle_monitor_err { ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => { - return_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, Vec::new(), Vec::new()) + handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, Vec::new(), Vec::new()) }; ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => { match $err { @@ -468,7 +468,8 @@ macro_rules! return_monitor_err { // splitting hairs we'd prefer to claim payments that were to us, but we haven't // given up the preimage yet, so might as well just wait until the payment is // retried, avoiding the on-chain fees. - return Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok())) + let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok())); + res }, ChannelMonitorUpdateErr::TemporaryFailure => { if !$resend_commitment { @@ -478,26 +479,29 @@ macro_rules! return_monitor_err { debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst || !$resend_commitment); } $entry.get_mut().monitor_update_failed($action_type, $resend_raa, $resend_commitment, $failed_forwards, $failed_fails); - return Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor"), *$entry.key())); + Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor"), *$entry.key())) }, } } } +macro_rules! return_monitor_err { + ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => { + return handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment); + }; + ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => { + return handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, $failed_forwards, $failed_fails); + } +} + // Does not break in case of TemporaryFailure! macro_rules! maybe_break_monitor_err { ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => { - match $err { - ChannelMonitorUpdateErr::PermanentFailure => { - let (channel_id, mut chan) = $entry.remove_entry(); - if let Some(short_id) = chan.get_short_channel_id() { - $channel_state.short_to_id.remove(&short_id); - } - break Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok())) - }, - ChannelMonitorUpdateErr::TemporaryFailure => { - $entry.get_mut().monitor_update_failed($action_type, $resend_raa, $resend_commitment, Vec::new(), Vec::new()); + match (handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment), $err) { + (e, ChannelMonitorUpdateErr::PermanentFailure) => { + break e; }, + (_, ChannelMonitorUpdateErr::TemporaryFailure) => { }, } } } @@ -1159,6 +1163,7 @@ impl ChannelManager { let mut new_events = Vec::new(); let mut failed_forwards = Vec::new(); + let mut handle_errors = Vec::new(); { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = channel_state_lock.borrow_parts(); @@ -1194,101 +1199,104 @@ impl ChannelManager { continue; } }; - let forward_chan = &mut channel_state.by_id.get_mut(&forward_chan_id).unwrap(); - - let mut add_htlc_msgs = Vec::new(); - let mut fail_htlc_msgs = Vec::new(); - for forward_info in pending_forwards.drain(..) { - match forward_info { - HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info } => { - log_trace!(self, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", log_bytes!(forward_info.payment_hash.0), prev_short_channel_id, short_chan_id); - let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { - short_channel_id: prev_short_channel_id, - htlc_id: prev_htlc_id, - incoming_packet_shared_secret: forward_info.incoming_shared_secret, - }); - match forward_chan.send_htlc(forward_info.amt_to_forward, forward_info.payment_hash, forward_info.outgoing_cltv_value, htlc_source.clone(), forward_info.onion_packet.unwrap()) { - Err(e) => { - if let ChannelError::Ignore(msg) = e { - log_trace!(self, "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(forward_info.payment_hash.0), msg); - } else { - panic!("Stated return value requirements in send_htlc() were not met"); - } - let chan_update = self.get_channel_update(forward_chan).unwrap(); - failed_forwards.push((htlc_source, forward_info.payment_hash, 0x1000 | 7, Some(chan_update))); - continue; - }, - Ok(update_add) => { - match update_add { - Some(msg) => { add_htlc_msgs.push(msg); }, - None => { - // Nothing to do here...we're waiting on a remote - // revoke_and_ack before we can add anymore HTLCs. The Channel - // will automatically handle building the update_add_htlc and - // commitment_signed messages when we can. - // TODO: Do some kind of timer to set the channel as !is_live() - // as we don't really want others relying on us relaying through - // this channel currently :/. + if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(forward_chan_id) { + let mut add_htlc_msgs = Vec::new(); + let mut fail_htlc_msgs = Vec::new(); + for forward_info in pending_forwards.drain(..) { + match forward_info { + HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info } => { + log_trace!(self, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", log_bytes!(forward_info.payment_hash.0), prev_short_channel_id, short_chan_id); + let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { + short_channel_id: prev_short_channel_id, + htlc_id: prev_htlc_id, + incoming_packet_shared_secret: forward_info.incoming_shared_secret, + }); + match chan.get_mut().send_htlc(forward_info.amt_to_forward, forward_info.payment_hash, forward_info.outgoing_cltv_value, htlc_source.clone(), forward_info.onion_packet.unwrap()) { + Err(e) => { + if let ChannelError::Ignore(msg) = e { + log_trace!(self, "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(forward_info.payment_hash.0), msg); + } else { + panic!("Stated return value requirements in send_htlc() were not met"); + } + let chan_update = self.get_channel_update(chan.get()).unwrap(); + failed_forwards.push((htlc_source, forward_info.payment_hash, 0x1000 | 7, Some(chan_update))); + continue; + }, + Ok(update_add) => { + match update_add { + Some(msg) => { add_htlc_msgs.push(msg); }, + None => { + // Nothing to do here...we're waiting on a remote + // revoke_and_ack before we can add anymore HTLCs. The Channel + // will automatically handle building the update_add_htlc and + // commitment_signed messages when we can. + // TODO: Do some kind of timer to set the channel as !is_live() + // as we don't really want others relying on us relaying through + // this channel currently :/. + } } } } - } - }, - HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => { - log_trace!(self, "Failing HTLC back to channel with short id {} after delay", short_chan_id); - match forward_chan.get_update_fail_htlc(htlc_id, err_packet) { - Err(e) => { - if let ChannelError::Ignore(msg) = e { - log_trace!(self, "Failed to fail backwards to short_id {}: {}", short_chan_id, msg); - } else { - panic!("Stated return value requirements in get_update_fail_htlc() were not met"); + }, + HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => { + log_trace!(self, "Failing HTLC back to channel with short id {} after delay", short_chan_id); + match chan.get_mut().get_update_fail_htlc(htlc_id, err_packet) { + Err(e) => { + if let ChannelError::Ignore(msg) = e { + log_trace!(self, "Failed to fail backwards to short_id {}: {}", short_chan_id, msg); + } else { + panic!("Stated return value requirements in get_update_fail_htlc() were not met"); + } + // fail-backs are best-effort, we probably already have one + // pending, and if not that's OK, if not, the channel is on + // the chain and sending the HTLC-Timeout is their problem. + continue; + }, + Ok(Some(msg)) => { fail_htlc_msgs.push(msg); }, + Ok(None) => { + // Nothing to do here...we're waiting on a remote + // revoke_and_ack before we can update the commitment + // transaction. The Channel will automatically handle + // building the update_fail_htlc and commitment_signed + // messages when we can. + // We don't need any kind of timer here as they should fail + // the channel onto the chain if they can't get our + // update_fail_htlc in time, it's not our problem. } - // fail-backs are best-effort, we probably already have one - // pending, and if not that's OK, if not, the channel is on - // the chain and sending the HTLC-Timeout is their problem. - continue; - }, - Ok(Some(msg)) => { fail_htlc_msgs.push(msg); }, - Ok(None) => { - // Nothing to do here...we're waiting on a remote - // revoke_and_ack before we can update the commitment - // transaction. The Channel will automatically handle - // building the update_fail_htlc and commitment_signed - // messages when we can. - // We don't need any kind of timer here as they should fail - // the channel onto the chain if they can't get our - // update_fail_htlc in time, it's not our problem. } - } - }, + }, + } } - } - if !add_htlc_msgs.is_empty() || !fail_htlc_msgs.is_empty() { - let (commitment_msg, monitor) = match forward_chan.send_commitment() { - Ok(res) => res, - Err(e) => { - if let ChannelError::Ignore(_) = e { - panic!("Stated return value requirements in send_commitment() were not met"); - } - //TODO: Handle...this is bad! + if !add_htlc_msgs.is_empty() || !fail_htlc_msgs.is_empty() { + let (commitment_msg, monitor) = match chan.get_mut().send_commitment() { + Ok(res) => res, + Err(e) => { + if let ChannelError::Ignore(_) = e { + panic!("Stated return value requirements in send_commitment() were not met"); + } + //TODO: Handle...this is bad! + continue; + }, + }; + if let Err(e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) { + handle_errors.push((chan.get().get_their_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true))); continue; - }, - }; - if let Err(_e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) { - unimplemented!(); + } + channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: chan.get().get_their_node_id(), + updates: msgs::CommitmentUpdate { + update_add_htlcs: add_htlc_msgs, + update_fulfill_htlcs: Vec::new(), + update_fail_htlcs: fail_htlc_msgs, + update_fail_malformed_htlcs: Vec::new(), + update_fee: None, + commitment_signed: commitment_msg, + }, + }); } - channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id: forward_chan.get_their_node_id(), - updates: msgs::CommitmentUpdate { - update_add_htlcs: add_htlc_msgs, - update_fulfill_htlcs: Vec::new(), - update_fail_htlcs: fail_htlc_msgs, - update_fail_malformed_htlcs: Vec::new(), - update_fee: None, - commitment_signed: commitment_msg, - }, - }); + } else { + unreachable!(); } } else { for forward_info in pending_forwards.drain(..) { @@ -1324,6 +1332,22 @@ impl ChannelManager { }; } + for (their_node_id, err) in handle_errors.drain(..) { + match handle_error!(self, err) { + Ok(_) => {}, + Err(e) => { + if let Some(msgs::ErrorAction::IgnoreError) = e.action { + } else { + let mut channel_state = self.channel_state.lock().unwrap(); + channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError { + node_id: their_node_id, + action: e.action, + }); + } + }, + } + } + if new_events.is_empty() { return } let mut events = self.pending_events.lock().unwrap(); events.append(&mut new_events); @@ -1469,56 +1493,79 @@ impl ChannelManager { } else { false } } fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard, source: HTLCSource, payment_preimage: PaymentPreimage) { - match source { - HTLCSource::OutboundRoute { .. } => { - mem::drop(channel_state_lock); - let mut pending_events = self.pending_events.lock().unwrap(); - pending_events.push(events::Event::PaymentSent { - payment_preimage - }); - }, - HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, .. }) => { - //TODO: Delay the claimed_funds relaying just like we do outbound relay! - let channel_state = channel_state_lock.borrow_parts(); - - let chan_id = match channel_state.short_to_id.get(&short_channel_id) { - Some(chan_id) => chan_id.clone(), - None => { - // TODO: There is probably a channel manager somewhere that needs to - // learn the preimage as the channel already hit the chain and that's - // why it's missing. - return - } - }; + let (their_node_id, err) = loop { + match source { + HTLCSource::OutboundRoute { .. } => { + mem::drop(channel_state_lock); + let mut pending_events = self.pending_events.lock().unwrap(); + pending_events.push(events::Event::PaymentSent { + payment_preimage + }); + }, + HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, .. }) => { + //TODO: Delay the claimed_funds relaying just like we do outbound relay! + let channel_state = channel_state_lock.borrow_parts(); - let chan = channel_state.by_id.get_mut(&chan_id).unwrap(); - match chan.get_update_fulfill_htlc_and_commit(htlc_id, payment_preimage) { - Ok((msgs, monitor_option)) => { - if let Some(chan_monitor) = monitor_option { - if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { - unimplemented!();// but def don't push the event... - } + let chan_id = match channel_state.short_to_id.get(&short_channel_id) { + Some(chan_id) => chan_id.clone(), + None => { + // TODO: There is probably a channel manager somewhere that needs to + // learn the preimage as the channel already hit the chain and that's + // why it's missing. + return } - if let Some((msg, commitment_signed)) = msgs { - channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id: chan.get_their_node_id(), - updates: msgs::CommitmentUpdate { - update_add_htlcs: Vec::new(), - update_fulfill_htlcs: vec![msg], - update_fail_htlcs: Vec::new(), - update_fail_malformed_htlcs: Vec::new(), - update_fee: None, - commitment_signed, + }; + + if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(chan_id) { + let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update(); + match chan.get_mut().get_update_fulfill_htlc_and_commit(htlc_id, payment_preimage) { + Ok((msgs, monitor_option)) => { + if let Some(chan_monitor) = monitor_option { + if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { + if was_frozen_for_monitor { + assert!(msgs.is_none()); + } else { + break (chan.get().get_their_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some())); + } + } } - }); + if let Some((msg, commitment_signed)) = msgs { + channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: chan.get().get_their_node_id(), + updates: msgs::CommitmentUpdate { + update_add_htlcs: Vec::new(), + update_fulfill_htlcs: vec![msg], + update_fail_htlcs: Vec::new(), + update_fail_malformed_htlcs: Vec::new(), + update_fee: None, + commitment_signed, + } + }); + } + }, + Err(_e) => { + // TODO: There is probably a channel manager somewhere that needs to + // learn the preimage as the channel may be about to hit the chain. + //TODO: Do something with e? + return + }, } - }, - Err(_e) => { - // TODO: There is probably a channel manager somewhere that needs to - // learn the preimage as the channel may be about to hit the chain. - //TODO: Do something with e? - return - }, + } else { unreachable!(); } + }, + } + return; + }; + + match handle_error!(self, err) { + Ok(_) => {}, + Err(e) => { + if let Some(msgs::ErrorAction::IgnoreError) = e.action { + } else { + let mut channel_state = self.channel_state.lock().unwrap(); + channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError { + node_id: their_node_id, + action: e.action, + }); } }, } @@ -2569,6 +2616,25 @@ impl ChannelMessageHandler for ChannelManager { true }) } + pending_msg_events.retain(|msg| { + match msg { + &events::MessageSendEvent::SendAcceptChannel { ref node_id, .. } => node_id != their_node_id, + &events::MessageSendEvent::SendOpenChannel { ref node_id, .. } => node_id != their_node_id, + &events::MessageSendEvent::SendFundingCreated { ref node_id, .. } => node_id != their_node_id, + &events::MessageSendEvent::SendFundingSigned { ref node_id, .. } => node_id != their_node_id, + &events::MessageSendEvent::SendFundingLocked { ref node_id, .. } => node_id != their_node_id, + &events::MessageSendEvent::SendAnnouncementSignatures { ref node_id, .. } => node_id != their_node_id, + &events::MessageSendEvent::UpdateHTLCs { ref node_id, .. } => node_id != their_node_id, + &events::MessageSendEvent::SendRevokeAndACK { ref node_id, .. } => node_id != their_node_id, + &events::MessageSendEvent::SendClosingSigned { ref node_id, .. } => node_id != their_node_id, + &events::MessageSendEvent::SendShutdown { ref node_id, .. } => node_id != their_node_id, + &events::MessageSendEvent::SendChannelReestablish { ref node_id, .. } => node_id != their_node_id, + &events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true, + &events::MessageSendEvent::BroadcastChannelUpdate { .. } => true, + &events::MessageSendEvent::HandleError { ref node_id, .. } => node_id != their_node_id, + &events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => true, + } + }); } for failure in failed_channels.drain(..) { self.finish_force_close_channel(failure); diff --git a/src/ln/msgs.rs b/src/ln/msgs.rs index f6e89524a6d..f00dc34a500 100644 --- a/src/ln/msgs.rs +++ b/src/ln/msgs.rs @@ -555,12 +555,18 @@ pub struct HandleError { //TODO: rename me /// transaction updates if they were pending. #[derive(PartialEq, Clone)] pub struct CommitmentUpdate { - pub(crate) update_add_htlcs: Vec, - pub(crate) update_fulfill_htlcs: Vec, - pub(crate) update_fail_htlcs: Vec, - pub(crate) update_fail_malformed_htlcs: Vec, - pub(crate) update_fee: Option, - pub(crate) commitment_signed: CommitmentSigned, + /// update_add_htlc messages which should be sent + pub update_add_htlcs: Vec, + /// update_fulfill_htlc messages which should be sent + pub update_fulfill_htlcs: Vec, + /// update_fail_htlc messages which should be sent + pub update_fail_htlcs: Vec, + /// update_fail_malformed_htlc messages which should be sent + pub update_fail_malformed_htlcs: Vec, + /// An update_fee message which should be sent + pub update_fee: Option, + /// Finally, the commitment_signed message which should be sent + pub commitment_signed: CommitmentSigned, } /// The information we received from a peer along the route of a payment we originated. This is