diff --git a/fuzz/Cargo.toml b/fuzz/Cargo.toml index b2ec5e18ea4..b0b6910ae2f 100644 --- a/fuzz/Cargo.toml +++ b/fuzz/Cargo.toml @@ -18,11 +18,13 @@ libfuzzer_fuzz = ["libfuzzer-sys"] stdin_fuzz = [] [dependencies] -afl = { version = "0.4", optional = true } -lightning = { path = "../lightning", features = ["regex"] } +lightning = { path = "../lightning", features = ["regex", "hashbrown"] } lightning-rapid-gossip-sync = { path = "../lightning-rapid-gossip-sync" } bitcoin = { version = "0.29.0", features = ["secp-lowmemory"] } hex = "0.3" +hashbrown = "0.8" + +afl = { version = "0.12", optional = true } honggfuzz = { version = "0.5", optional = true, default-features = false } libfuzzer-sys = { version = "0.4", optional = true } @@ -36,6 +38,8 @@ members = ["."] [profile.release] lto = true codegen-units = 1 +debug-assertions = true +overflow-checks = true # When testing a large fuzz corpus, -O1 offers a nice speedup [profile.dev] diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index fda0046828a..c9147cd83ec 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -61,7 +61,7 @@ use bitcoin::secp256k1::Secp256k1; use std::mem; use std::cmp::{self, Ordering}; -use std::collections::{HashSet, hash_map, HashMap}; +use hashbrown::{HashSet, hash_map, HashMap}; use std::sync::{Arc,Mutex}; use std::sync::atomic; use std::io::Cursor; diff --git a/fuzz/src/full_stack.rs b/fuzz/src/full_stack.rs index ae9e6295c07..6d952b42d3c 100644 --- a/fuzz/src/full_stack.rs +++ b/fuzz/src/full_stack.rs @@ -58,7 +58,7 @@ use bitcoin::secp256k1::ecdsa::RecoverableSignature; use bitcoin::secp256k1::Secp256k1; use std::cell::RefCell; -use std::collections::{HashMap, hash_map}; +use hashbrown::{HashMap, hash_map}; use std::convert::TryInto; use std::cmp; use std::sync::{Arc, Mutex}; @@ -632,7 +632,9 @@ pub fn do_test(data: &[u8], logger: &Arc) { // It's possible the channel has been closed in the mean time, but any other // failure may be a bug. if let APIError::ChannelUnavailable { err } = e { - assert_eq!(err, "No such channel"); + if !err.starts_with("Can't find a peer matching the passed counterparty node_id ") { + assert_eq!(err, "No such channel"); + } } else { panic!(); } } pending_funding_signatures.insert(funding_output, tx); diff --git a/fuzz/src/router.rs b/fuzz/src/router.rs index cb397bdec80..3e51d0c6a6d 100644 --- a/fuzz/src/router.rs +++ b/fuzz/src/router.rs @@ -29,7 +29,7 @@ use bitcoin::blockdata::constants::genesis_block; use crate::utils::test_logger; use std::convert::TryInto; -use std::collections::HashSet; +use hashbrown::HashSet; use std::sync::Arc; use std::sync::atomic::{AtomicUsize, Ordering}; diff --git a/lightning/src/chain/onchaintx.rs b/lightning/src/chain/onchaintx.rs index d6884428ed9..f526cc8aaa4 100644 --- a/lightning/src/chain/onchaintx.rs +++ b/lightning/src/chain/onchaintx.rs @@ -476,8 +476,8 @@ impl OnchainTxHandler { // remove it once it reaches the confirmation threshold, or to generate a new claim if the // transaction is reorged out. let mut all_inputs_have_confirmed_spend = true; - for outpoint in &request_outpoints { - if let Some(first_claim_txid_height) = self.claimable_outpoints.get(outpoint) { + for outpoint in request_outpoints.iter() { + if let Some(first_claim_txid_height) = self.claimable_outpoints.get(*outpoint) { // We check for outpoint spends within claims individually rather than as a set // since requests can have outpoints split off. if !self.onchain_events_awaiting_threshold_conf.iter() @@ -811,7 +811,7 @@ impl OnchainTxHandler { for outpoint in request.outpoints() { log_debug!(logger, "Removing claim tracking for {} due to maturation of claim package {}.", outpoint, log_bytes!(package_id)); - self.claimable_outpoints.remove(&outpoint); + self.claimable_outpoints.remove(outpoint); #[cfg(anchors)] self.pending_claim_events.remove(&package_id); } @@ -820,7 +820,7 @@ impl OnchainTxHandler { OnchainEvent::ContentiousOutpoint { package } => { log_debug!(logger, "Removing claim tracking due to maturation of claim tx for outpoints:"); log_debug!(logger, " {:?}", package.outpoints()); - self.claimable_outpoints.remove(&package.outpoints()[0]); + self.claimable_outpoints.remove(package.outpoints()[0]); } } } else { @@ -898,7 +898,7 @@ impl OnchainTxHandler { //- resurect outpoint back in its claimable set and regenerate tx match entry.event { OnchainEvent::ContentiousOutpoint { package } => { - if let Some(ancestor_claimable_txid) = self.claimable_outpoints.get(&package.outpoints()[0]) { + if let Some(ancestor_claimable_txid) = self.claimable_outpoints.get(package.outpoints()[0]) { if let Some(request) = self.pending_claim_requests.get_mut(&ancestor_claimable_txid.0) { request.merge_package(package); // Using a HashMap guarantee us than if we have multiple outpoints getting diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 1364e96a9fd..43d1806f90b 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -2102,7 +2102,7 @@ where // short_channel_id is non-0 in any ::Forward. if let &PendingHTLCRouting::Forward { ref short_channel_id, .. } = routing { if let Some((err, mut code, chan_update)) = loop { - let id_option = self.short_to_chan_info.read().unwrap().get(&short_channel_id).cloned(); + let id_option = self.short_to_chan_info.read().unwrap().get(short_channel_id).cloned(); let forwarding_chan_info_opt = match id_option { None => { // unknown_next_peer // Note that this is likely a timing oracle for detecting whether an scid is a @@ -2552,7 +2552,7 @@ where let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); if let None = peer_state_mutex_opt { - return Err(APIError::APIMisuseError { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) }) + return Err(APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) }) } let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); @@ -7116,7 +7116,7 @@ where } } - for (ref funding_txo, ref mut monitor) in args.channel_monitors.iter_mut() { + for (funding_txo, monitor) in args.channel_monitors.iter_mut() { if !funding_txo_set.contains(funding_txo) { log_info!(args.logger, "Broadcasting latest holder commitment transaction for closed channel {}", log_bytes!(funding_txo.to_channel_id())); monitor.broadcast_latest_holder_commitment_txn(&args.tx_broadcaster, &args.logger);