Skip to content

0.1.1 Backports #3567

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 16 commits into from
Jan 28, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
31 changes: 10 additions & 21 deletions fuzz/src/chanmon_consistency.rs
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ use lightning::ln::channel::FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
use lightning::ln::channel_state::ChannelDetails;
use lightning::ln::channelmanager::{
ChainParameters, ChannelManager, ChannelManagerReadArgs, PaymentId, RecentPaymentDetails,
RecipientOnionFields, Retry,
RecipientOnionFields,
};
use lightning::ln::functional_test_utils::*;
use lightning::ln::inbound_payment::ExpandedKey;
Expand Down Expand Up @@ -82,7 +82,6 @@ use bitcoin::secp256k1::{self, Message, PublicKey, Scalar, Secp256k1, SecretKey}

use lightning::io::Cursor;
use std::cmp::{self, Ordering};
use std::collections::VecDeque;
use std::mem;
use std::sync::atomic;
use std::sync::{Arc, Mutex};
Expand Down Expand Up @@ -113,22 +112,14 @@ impl FeeEstimator for FuzzEstimator {
}
}

struct FuzzRouter {
pub next_routes: Mutex<VecDeque<Route>>,
}
struct FuzzRouter {}

impl Router for FuzzRouter {
fn find_route(
&self, _payer: &PublicKey, _params: &RouteParameters,
_first_hops: Option<&[&ChannelDetails]>, _inflight_htlcs: InFlightHtlcs,
) -> Result<Route, msgs::LightningError> {
if let Some(route) = self.next_routes.lock().unwrap().pop_front() {
return Ok(route);
}
Err(msgs::LightningError {
err: String::from("Not implemented"),
action: msgs::ErrorAction::IgnoreError,
})
unreachable!()
}

fn create_blinded_payment_paths<T: secp256k1::Signing + secp256k1::Verification>(
Expand Down Expand Up @@ -518,7 +509,7 @@ fn send_payment(
PaymentParameters::from_node_id(source.get_our_node_id(), TEST_FINAL_CLTV),
amt,
);
source.router.next_routes.lock().unwrap().push_back(Route {
let route = Route {
paths: vec![Path {
hops: vec![RouteHop {
pubkey: dest.get_our_node_id(),
Expand All @@ -532,11 +523,10 @@ fn send_payment(
blinded_tail: None,
}],
route_params: Some(route_params.clone()),
});
};
let onion = RecipientOnionFields::secret_only(payment_secret);
let payment_id = PaymentId(payment_id);
let res =
source.send_payment(payment_hash, onion, payment_id, route_params, Retry::Attempts(0));
let res = source.send_payment_with_route(route, payment_hash, onion, payment_id);
match res {
Err(err) => {
panic!("Errored with {:?} on initial payment send", err);
Expand Down Expand Up @@ -592,7 +582,7 @@ fn send_hop_payment(
PaymentParameters::from_node_id(source.get_our_node_id(), TEST_FINAL_CLTV),
amt,
);
source.router.next_routes.lock().unwrap().push_back(Route {
let route = Route {
paths: vec![Path {
hops: vec![
RouteHop {
Expand All @@ -617,11 +607,10 @@ fn send_hop_payment(
blinded_tail: None,
}],
route_params: Some(route_params.clone()),
});
};
let onion = RecipientOnionFields::secret_only(payment_secret);
let payment_id = PaymentId(payment_id);
let res =
source.send_payment(payment_hash, onion, payment_id, route_params, Retry::Attempts(0));
let res = source.send_payment_with_route(route, payment_hash, onion, payment_id);
match res {
Err(err) => {
panic!("Errored with {:?} on initial payment send", err);
Expand All @@ -640,7 +629,7 @@ fn send_hop_payment(
pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out, anchors: bool) {
let out = SearchingOutput::new(underlying_out);
let broadcast = Arc::new(TestBroadcaster {});
let router = FuzzRouter { next_routes: Mutex::new(VecDeque::new()) };
let router = FuzzRouter {};

macro_rules! make_node {
($node_id: expr, $fee_estimator: expr) => {{
Expand Down
6 changes: 3 additions & 3 deletions lightning-background-processor/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1099,7 +1099,7 @@ mod tests {
SCORER_PERSISTENCE_SECONDARY_NAMESPACE,
};
use lightning::util::ser::Writeable;
use lightning::util::sweep::{OutputSpendStatus, OutputSweeper};
use lightning::util::sweep::{OutputSpendStatus, OutputSweeper, PRUNE_DELAY_BLOCKS};
use lightning::util::test_utils;
use lightning::{get_event, get_event_msg};
use lightning_persister::fs_store::FilesystemStore;
Expand Down Expand Up @@ -2282,8 +2282,8 @@ mod tests {
}

// Check we stop tracking the spendable outputs when one of the txs reaches
// ANTI_REORG_DELAY confirmations.
confirm_transaction_depth(&mut nodes[0], &sweep_tx_0, ANTI_REORG_DELAY);
// PRUNE_DELAY_BLOCKS confirmations.
confirm_transaction_depth(&mut nodes[0], &sweep_tx_0, PRUNE_DELAY_BLOCKS);
assert_eq!(nodes[0].sweeper.tracked_spendable_outputs().len(), 0);

if !std::thread::panicking() {
Expand Down
23 changes: 20 additions & 3 deletions lightning-invoice/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@ use core::iter::FilterMap;
use core::num::ParseIntError;
use core::ops::Deref;
use core::slice::Iter;
use core::str::FromStr;
use core::time::Duration;

#[cfg(feature = "serde")]
Expand Down Expand Up @@ -78,8 +79,12 @@ use crate::prelude::*;
/// Re-export serialization traits
#[cfg(fuzzing)]
pub use crate::de::FromBase32;
#[cfg(not(fuzzing))]
use crate::de::FromBase32;
#[cfg(fuzzing)]
pub use crate::ser::Base32Iterable;
#[cfg(not(fuzzing))]
use crate::ser::Base32Iterable;

/// Errors that indicate what is wrong with the invoice. They have some granularity for debug
/// reasons, but should generally result in an "invalid BOLT11 invoice" message for the user.
Expand Down Expand Up @@ -1086,9 +1091,6 @@ impl RawBolt11Invoice {

/// Calculate the hash of the encoded `RawBolt11Invoice` which should be signed.
pub fn signable_hash(&self) -> [u8; 32] {
#[cfg(not(fuzzing))]
use crate::ser::Base32Iterable;

Self::hash_from_parts(self.hrp.to_string().as_bytes(), self.data.fe_iter())
}

Expand Down Expand Up @@ -1189,6 +1191,21 @@ impl RawBolt11Invoice {
pub fn currency(&self) -> Currency {
self.hrp.currency.clone()
}

/// Convert to HRP prefix and Fe32 encoded data part.
/// Can be used to transmit unsigned invoices for remote signing.
pub fn to_raw(&self) -> (String, Vec<Fe32>) {
(self.hrp.to_string(), self.data.fe_iter().collect())
}

/// Convert from HRP prefix and Fe32 encoded data part.
/// Can be used to receive unsigned invoices for remote signing.
pub fn from_raw(hrp: &str, data: &[Fe32]) -> Result<Self, Bolt11ParseError> {
let raw_hrp: RawHrp = RawHrp::from_str(hrp)?;
let data_part = RawDataPart::from_base32(data)?;

Ok(Self { hrp: raw_hrp, data: data_part })
}
}

impl PositiveTimestamp {
Expand Down
2 changes: 1 addition & 1 deletion lightning/src/chain/chaininterface.rs
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,7 @@ pub trait FeeEstimator {
}

/// Minimum relay fee as required by bitcoin network mempool policy.
pub const MIN_RELAY_FEE_SAT_PER_1000_WEIGHT: u64 = 4000;
pub const MIN_RELAY_FEE_SAT_PER_1000_WEIGHT: u64 = 253;
/// Minimum feerate that takes a sane approach to bitcoind weight-to-vbytes rounding.
/// See the following Core Lightning commit for an explanation:
/// <https://github.com/ElementsProject/lightning/commit/2e687b9b352c9092b5e8bd4a688916ac50b44af0>
Expand Down
103 changes: 90 additions & 13 deletions lightning/src/chain/channelmonitor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -256,6 +256,10 @@ pub(crate) const LATENCY_GRACE_PERIOD_BLOCKS: u32 = 3;
// solved by a previous claim tx. What we want to avoid is reorg evicting our claim tx and us not
// keep bumping another claim tx to solve the outpoint.
pub const ANTI_REORG_DELAY: u32 = 6;
/// Number of blocks we wait before assuming a [`ChannelMonitor`] to be fully resolved and
/// considering it be safely archived.
// 4032 blocks are roughly four weeks
pub const ARCHIVAL_DELAY_BLOCKS: u32 = 4032;
/// Number of blocks before confirmation at which we fail back an un-relayed HTLC or at which we
/// refuse to accept a new HTLC.
///
Expand Down Expand Up @@ -1023,6 +1027,12 @@ pub(crate) struct ChannelMonitorImpl<Signer: EcdsaChannelSigner> {

/// The first block height at which we had no remaining claimable balances.
balances_empty_height: Option<u32>,

/// In-memory only HTLC ids used to track upstream HTLCs that have been failed backwards due to
/// a downstream channel force-close remaining unconfirmed by the time the upstream timeout
/// expires. This is used to tell us we already generated an event to fail this HTLC back
/// during a previous block scan.
failed_back_htlc_ids: HashSet<SentHTLCId>,
}

/// Transaction outputs to watch for on-chain spends.
Expand Down Expand Up @@ -1445,6 +1455,8 @@ impl<Signer: EcdsaChannelSigner> ChannelMonitor<Signer> {
counterparty_node_id: Some(counterparty_node_id),
initial_counterparty_commitment_info: None,
balances_empty_height: None,

failed_back_htlc_ids: new_hash_set(),
})
}

Expand Down Expand Up @@ -2015,10 +2027,11 @@ impl<Signer: EcdsaChannelSigner> ChannelMonitor<Signer> {
///
/// This function returns a tuple of two booleans, the first indicating whether the monitor is
/// fully resolved, and the second whether the monitor needs persistence to ensure it is
/// reliably marked as resolved within 4032 blocks.
/// reliably marked as resolved within [`ARCHIVAL_DELAY_BLOCKS`] blocks.
///
/// The first boolean is true only if [`Self::get_claimable_balances`] has been empty for at least
/// 4032 blocks as an additional protection against any bugs resulting in spuriously empty balance sets.
/// The first boolean is true only if [`Self::get_claimable_balances`] has been empty for at
/// least [`ARCHIVAL_DELAY_BLOCKS`] blocks as an additional protection against any bugs
/// resulting in spuriously empty balance sets.
pub fn check_and_update_full_resolution_status<L: Logger>(&self, logger: &L) -> (bool, bool) {
let mut is_all_funds_claimed = self.get_claimable_balances().is_empty();
let current_height = self.current_best_block().height;
Expand All @@ -2034,11 +2047,10 @@ impl<Signer: EcdsaChannelSigner> ChannelMonitor<Signer> {
// once processed, implies the preimage exists in the corresponding inbound channel.
let preimages_not_needed_elsewhere = inner.pending_monitor_events.is_empty();

const BLOCKS_THRESHOLD: u32 = 4032; // ~four weeks
match (inner.balances_empty_height, is_all_funds_claimed, preimages_not_needed_elsewhere) {
(Some(balances_empty_height), true, true) => {
// Claimed all funds, check if reached the blocks threshold.
(current_height >= balances_empty_height + BLOCKS_THRESHOLD, false)
(current_height >= balances_empty_height + ARCHIVAL_DELAY_BLOCKS, false)
},
(Some(_), false, _)|(Some(_), _, false) => {
// previously assumed we claimed all funds, but we have new funds to claim or
Expand All @@ -2058,7 +2070,7 @@ impl<Signer: EcdsaChannelSigner> ChannelMonitor<Signer> {
// None. It is set to the current block height.
log_debug!(logger,
"ChannelMonitor funded at {} is now fully resolved. It will become archivable in {} blocks",
inner.get_funding_txo().0, BLOCKS_THRESHOLD);
inner.get_funding_txo().0, ARCHIVAL_DELAY_BLOCKS);
inner.balances_empty_height = Some(current_height);
(false, true)
},
Expand Down Expand Up @@ -3274,7 +3286,7 @@ impl<Signer: EcdsaChannelSigner> ChannelMonitorImpl<Signer> {
}
}

if ret.is_ok() && (self.funding_spend_seen || self.lockdown_from_offchain) && is_pre_close_update {
if ret.is_ok() && (self.funding_spend_seen || self.lockdown_from_offchain || self.holder_tx_signed) && is_pre_close_update {
log_error!(logger, "Refusing Channel Monitor Update as counterparty attempted to update commitment after funding was spent");
Err(())
} else { ret }
Expand Down Expand Up @@ -4221,6 +4233,71 @@ impl<Signer: EcdsaChannelSigner> ChannelMonitorImpl<Signer> {
}
}

if self.lockdown_from_offchain || self.funding_spend_seen || self.holder_tx_signed {
// Fail back HTLCs on backwards channels if they expire within
// `LATENCY_GRACE_PERIOD_BLOCKS` blocks and the channel is closed (i.e. we're at a
// point where no further off-chain updates will be accepted). If we haven't seen the
// preimage for an HTLC by the time the previous hop's timeout expires, we've lost that
// HTLC, so we might as well fail it back instead of having our counterparty force-close
// the inbound channel.
let current_holder_htlcs = self.current_holder_commitment_tx.htlc_outputs.iter()
.map(|&(ref a, _, ref b)| (a, b.as_ref()));

let current_counterparty_htlcs = if let Some(txid) = self.current_counterparty_commitment_txid {
if let Some(htlc_outputs) = self.counterparty_claimable_outpoints.get(&txid) {
Some(htlc_outputs.iter().map(|&(ref a, ref b)| (a, b.as_ref().map(|boxed| &**boxed))))
} else { None }
} else { None }.into_iter().flatten();

let prev_counterparty_htlcs = if let Some(txid) = self.prev_counterparty_commitment_txid {
if let Some(htlc_outputs) = self.counterparty_claimable_outpoints.get(&txid) {
Some(htlc_outputs.iter().map(|&(ref a, ref b)| (a, b.as_ref().map(|boxed| &**boxed))))
} else { None }
} else { None }.into_iter().flatten();

let htlcs = current_holder_htlcs
.chain(current_counterparty_htlcs)
.chain(prev_counterparty_htlcs);

let height = self.best_block.height;
for (htlc, source_opt) in htlcs {
// Only check forwarded HTLCs' previous hops
let source = match source_opt {
Some(source) => source,
None => continue,
};
let inbound_htlc_expiry = match source.inbound_htlc_expiry() {
Some(cltv_expiry) => cltv_expiry,
None => continue,
};
let max_expiry_height = height.saturating_add(LATENCY_GRACE_PERIOD_BLOCKS);
if inbound_htlc_expiry > max_expiry_height {
continue;
}
let duplicate_event = self.pending_monitor_events.iter().any(
|update| if let &MonitorEvent::HTLCEvent(ref upd) = update {
upd.source == *source
} else { false });
if duplicate_event {
continue;
}
if !self.failed_back_htlc_ids.insert(SentHTLCId::from_source(source)) {
continue;
}
if !duplicate_event {
log_error!(logger, "Failing back HTLC {} upstream to preserve the \
channel as the forward HTLC hasn't resolved and our backward HTLC \
expires soon at {}", log_bytes!(htlc.payment_hash.0), inbound_htlc_expiry);
self.pending_monitor_events.push(MonitorEvent::HTLCEvent(HTLCUpdate {
source: source.clone(),
payment_preimage: None,
payment_hash: htlc.payment_hash,
htlc_value_satoshis: Some(htlc.amount_msat / 1000),
}));
}
}
}

let conf_target = self.closure_conf_target();
self.onchain_tx_handler.update_claims_view_from_requests(claimable_outpoints, conf_height, self.best_block.height, broadcaster, conf_target, fee_estimator, logger);
self.onchain_tx_handler.update_claims_view_from_matched_txn(&txn_matched, conf_height, conf_hash, self.best_block.height, broadcaster, conf_target, fee_estimator, logger);
Expand Down Expand Up @@ -5066,6 +5143,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP
counterparty_node_id,
initial_counterparty_commitment_info,
balances_empty_height,
failed_back_htlc_ids: new_hash_set(),
})))
}
}
Expand All @@ -5092,7 +5170,7 @@ mod tests {
use crate::chain::chaininterface::LowerBoundedFeeEstimator;

use super::ChannelMonitorUpdateStep;
use crate::{check_added_monitors, check_spends, get_local_commitment_txn, get_monitor, get_route_and_payment_hash, unwrap_send_err};
use crate::{check_added_monitors, check_spends, get_local_commitment_txn, get_monitor, get_route_and_payment_hash};
use crate::chain::{BestBlock, Confirm};
use crate::chain::channelmonitor::{ChannelMonitor, WithChannelMonitor};
use crate::chain::package::{weight_offered_htlc, weight_received_htlc, weight_revoked_offered_htlc, weight_revoked_received_htlc, WEIGHT_REVOKED_OUTPUT};
Expand All @@ -5102,10 +5180,9 @@ mod tests {
use crate::types::payment::{PaymentPreimage, PaymentHash};
use crate::ln::channel_keys::{DelayedPaymentBasepoint, DelayedPaymentKey, HtlcBasepoint, RevocationBasepoint, RevocationKey};
use crate::ln::chan_utils::{self,HTLCOutputInCommitment, ChannelPublicKeys, ChannelTransactionParameters, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
use crate::ln::channelmanager::{PaymentSendFailure, PaymentId, RecipientOnionFields};
use crate::ln::channelmanager::{PaymentId, RecipientOnionFields};
use crate::ln::functional_test_utils::*;
use crate::ln::script::ShutdownScript;
use crate::util::errors::APIError;
use crate::util::test_utils::{TestLogger, TestBroadcaster, TestFeeEstimator};
use crate::util::ser::{ReadableArgs, Writeable};
use crate::util::logger::Logger;
Expand Down Expand Up @@ -5166,9 +5243,9 @@ mod tests {
// If the ChannelManager tries to update the channel, however, the ChainMonitor will pass
// the update through to the ChannelMonitor which will refuse it (as the channel is closed).
let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 100_000);
unwrap_send_err!(nodes[1].node.send_payment_with_route(route, payment_hash,
RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)
), false, APIError::MonitorUpdateInProgress, {});
nodes[1].node.send_payment_with_route(route, payment_hash,
RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)
).unwrap();
check_added_monitors!(nodes[1], 1);

// Build a new ChannelMonitorUpdate which contains both the failing commitment tx update
Expand Down
1 change: 1 addition & 0 deletions lightning/src/chain/onchaintx.rs
Original file line number Diff line number Diff line change
Expand Up @@ -215,6 +215,7 @@ pub(crate) enum OnchainClaim {
}

/// Represents the different feerate strategies a pending request can use when generating a claim.
#[derive(Debug)]
pub(crate) enum FeerateStrategy {
/// We must reuse the most recently used feerate, if any.
RetryPrevious,
Expand Down
Loading
Loading