Skip to content

Commit db123f7

Browse files
committed
Implement pending claim rebroadcast on force-closed channels
This attempts to rebroadcast/fee-bump each pending claim a monitor is tracking for a force-closed channel. This is crucial in preventing certain classes of pinning attacks and ensures reliability if broadcasting fails. For implementations of `FeeEstimator` that also support mempool fee estimation, we may broadcast a fee-bumped claim instead, ensuring we can also react to mempool fee spikes between blocks.
1 parent e496d62 commit db123f7

File tree

6 files changed

+263
-0
lines changed

6 files changed

+263
-0
lines changed

lightning/src/chain/chainmonitor.rs

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -217,8 +217,15 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner> Deref for LockedChannelMonitor<
217217
/// or used independently to monitor channels remotely. See the [module-level documentation] for
218218
/// details.
219219
///
220+
/// Note that `ChainMonitor` should regularly trigger rebroadcasts/fee bumps of pending claims from
221+
/// a force-closed channel. This is crucial in preventing certain classes of pinning attacks,
222+
/// detecting substantial mempool feerate changes between blocks, and ensuring reliability if
223+
/// broadcasting fails. We recommend invoking this every 30 seconds, or lower if running in an
224+
/// environment with spotty connections, like on mobile.
225+
///
220226
/// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
221227
/// [module-level documentation]: crate::chain::chainmonitor
228+
/// [`rebroadcast_pending_claims`]: Self::rebroadcast_pending_claims
222229
pub struct ChainMonitor<ChannelSigner: WriteableEcdsaChannelSigner, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref>
223230
where C::Target: chain::Filter,
224231
T::Target: BroadcasterInterface,
@@ -533,6 +540,20 @@ where C::Target: chain::Filter,
533540
pub fn get_update_future(&self) -> Future {
534541
self.event_notifier.get_future()
535542
}
543+
544+
/// Triggers rebroadcasts/fee-bumps of pending claims from a force-closed channel. This is
545+
/// crucial in preventing certain classes of pinning attacks, detecting substantial mempool
546+
/// feerate changes between blocks, and ensuring reliability if broadcasting fails. We recommend
547+
/// invoking this every 30 seconds, or lower if running in an environment with spotty
548+
/// connections, like on mobile.
549+
pub fn rebroadcast_pending_claims(&self) {
550+
let monitors = self.monitors.read().unwrap();
551+
for (_, monitor_holder) in &*monitors {
552+
monitor_holder.monitor.rebroadcast_pending_claims(
553+
&*self.broadcaster, &*self.fee_estimator, &*self.logger
554+
)
555+
}
556+
}
536557
}
537558

538559
impl<ChannelSigner: WriteableEcdsaChannelSigner, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref>

lightning/src/chain/channelmonitor.rs

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1467,6 +1467,27 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitor<Signer> {
14671467
pub fn current_best_block(&self) -> BestBlock {
14681468
self.inner.lock().unwrap().best_block.clone()
14691469
}
1470+
1471+
/// Triggers rebroadcasts/fee-bumps of pending claims from a force-closed channel. This is
1472+
/// crucial in preventing certain classes of pinning attacks, detecting substantial mempool
1473+
/// feerate changes between blocks, and ensuring reliability if broadcasting fails. We recommend
1474+
/// invoking this every 30 seconds, or lower if running in an environment with spotty
1475+
/// connections, like on mobile.
1476+
pub fn rebroadcast_pending_claims<B: Deref, F: Deref, L: Deref>(
1477+
&self, broadcaster: B, fee_estimator: F, logger: L,
1478+
)
1479+
where
1480+
B::Target: BroadcasterInterface,
1481+
F::Target: FeeEstimator,
1482+
L::Target: Logger,
1483+
{
1484+
let fee_estimator = LowerBoundedFeeEstimator::new(fee_estimator);
1485+
let mut inner = self.inner.lock().unwrap();
1486+
let current_height = inner.best_block.height;
1487+
inner.onchain_tx_handler.rebroadcast_pending_claims(
1488+
current_height, &broadcaster, &fee_estimator, &logger,
1489+
);
1490+
}
14701491
}
14711492

14721493
impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {

lightning/src/chain/onchaintx.rs

Lines changed: 53 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -481,6 +481,59 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner> OnchainTxHandler<ChannelSigner>
481481
events.into_iter().map(|(_, event)| event).collect()
482482
}
483483

484+
/// Triggers rebroadcasts/fee-bumps of pending claims from a force-closed channel. This is
485+
/// crucial in preventing certain classes of pinning attacks, detecting substantial mempool
486+
/// feerate changes between blocks, and ensuring reliability if broadcasting fails. We recommend
487+
/// invoking this every 30 seconds, or lower if running in an environment with spotty
488+
/// connections, like on mobile.
489+
pub(crate) fn rebroadcast_pending_claims<B: Deref, F: Deref, L: Deref>(
490+
&mut self, current_height: u32, broadcaster: &B, fee_estimator: &LowerBoundedFeeEstimator<F>,
491+
logger: &L,
492+
)
493+
where
494+
B::Target: BroadcasterInterface,
495+
F::Target: FeeEstimator,
496+
L::Target: Logger,
497+
{
498+
let mut bump_requests = Vec::with_capacity(self.pending_claim_requests.len());
499+
for (package_id, request) in self.pending_claim_requests.iter() {
500+
let inputs = request.outpoints();
501+
log_info!(logger, "Triggering rebroadcast/fee-bump for request with inputs {:?}", inputs);
502+
bump_requests.push((*package_id, request.clone()));
503+
}
504+
for (package_id, request) in bump_requests {
505+
self.generate_claim(current_height, &request, false /* force_feerate_bump */, fee_estimator, logger)
506+
.map(|(_, new_feerate, claim)| {
507+
let mut bumped_feerate = false;
508+
if let Some(mut_request) = self.pending_claim_requests.get_mut(&package_id) {
509+
bumped_feerate = request.previous_feerate() > new_feerate;
510+
mut_request.set_feerate(new_feerate);
511+
}
512+
match claim {
513+
OnchainClaim::Tx(tx) => {
514+
let log_start = if bumped_feerate { "Broadcasting RBF-bumped" } else { "Rebroadcasting" };
515+
log_info!(logger, "{} onchain {}", log_start, log_tx!(tx));
516+
broadcaster.broadcast_transaction(&tx);
517+
},
518+
#[cfg(anchors)]
519+
OnchainClaim::Event(event) => {
520+
let log_start = if bumped_feerate { "Yielding fee-bumped" } else { "Replaying" };
521+
log_info!(logger, "{} onchain event to spend inputs {:?}", log_start,
522+
request.outpoints());
523+
#[cfg(debug_assertions)] {
524+
debug_assert!(request.requires_external_funding());
525+
let num_existing = self.pending_claim_events.iter()
526+
.filter(|entry| entry.0 == package_id).count();
527+
assert!(num_existing == 0 || num_existing == 1);
528+
}
529+
self.pending_claim_events.retain(|event| event.0 != package_id);
530+
self.pending_claim_events.push((package_id, event));
531+
}
532+
}
533+
});
534+
}
535+
}
536+
484537
/// Lightning security model (i.e being able to redeem/timeout HTLC or penalize counterparty
485538
/// onchain) lays on the assumption of claim transactions getting confirmed before timelock
486539
/// expiration (CSV or CLTV following cases). In case of high-fee spikes, claim tx may get stuck

lightning/src/chain/package.rs

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -554,6 +554,9 @@ impl PackageTemplate {
554554
pub(crate) fn aggregable(&self) -> bool {
555555
self.aggregable
556556
}
557+
pub(crate) fn previous_feerate(&self) -> u64 {
558+
self.feerate_previous
559+
}
557560
pub(crate) fn set_feerate(&mut self, new_feerate: u64) {
558561
self.feerate_previous = new_feerate;
559562
}

lightning/src/ln/functional_test_utils.rs

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -151,6 +151,20 @@ impl ConnectStyle {
151151
}
152152
}
153153

154+
pub fn updates_best_block_first(&self) -> bool {
155+
match self {
156+
ConnectStyle::BestBlockFirst => true,
157+
ConnectStyle::BestBlockFirstSkippingBlocks => true,
158+
ConnectStyle::BestBlockFirstReorgsOnlyTip => true,
159+
ConnectStyle::TransactionsFirst => false,
160+
ConnectStyle::TransactionsFirstSkippingBlocks => false,
161+
ConnectStyle::TransactionsDuplicativelyFirstSkippingBlocks => false,
162+
ConnectStyle::HighlyRedundantTransactionsFirstSkippingBlocks => false,
163+
ConnectStyle::TransactionsFirstReorgsOnlyTip => false,
164+
ConnectStyle::FullBlockViaListen => false,
165+
}
166+
}
167+
154168
fn random_style() -> ConnectStyle {
155169
#[cfg(feature = "std")] {
156170
use core::hash::{BuildHasher, Hasher};

lightning/src/ln/monitor_tests.rs

Lines changed: 151 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1778,6 +1778,157 @@ fn test_restored_packages_retry() {
17781778
do_test_restored_packages_retry(true);
17791779
}
17801780

1781+
fn do_test_monitor_rebroadcast_pending_claims(anchors: bool) {
1782+
// Test that we will retry broadcasting pending claims for a force-closed channel on every
1783+
// `ChainMonitor::rebroadcast_pending_claims` call.
1784+
if anchors {
1785+
assert!(cfg!(anchors));
1786+
}
1787+
let secp = Secp256k1::new();
1788+
let mut chanmon_cfgs = create_chanmon_cfgs(2);
1789+
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1790+
let mut config = test_default_channel_config();
1791+
if anchors {
1792+
#[cfg(anchors)] {
1793+
config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
1794+
}
1795+
}
1796+
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), Some(config)]);
1797+
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1798+
1799+
let (_, _, _, chan_id, funding_tx) = create_chan_between_nodes_with_value(
1800+
&nodes[0], &nodes[1], 1_000_000, 500_000_000
1801+
);
1802+
const HTLC_AMT_MSAT: u64 = 1_000_000;
1803+
const HTLC_AMT_SAT: u64 = HTLC_AMT_MSAT / 1000;
1804+
route_payment(&nodes[0], &[&nodes[1]], HTLC_AMT_MSAT);
1805+
1806+
let htlc_expiry = nodes[0].best_block_info().1 + TEST_FINAL_CLTV + 1;
1807+
1808+
let commitment_txn = get_local_commitment_txn!(&nodes[0], &chan_id);
1809+
assert_eq!(commitment_txn.len(), if anchors { 1 /* commitment tx only */} else { 2 /* commitment and htlc timeout tx */ });
1810+
check_spends!(&commitment_txn[0], &funding_tx);
1811+
mine_transaction(&nodes[0], &commitment_txn[0]);
1812+
check_closed_broadcast!(&nodes[0], true);
1813+
check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, false);
1814+
check_added_monitors(&nodes[0], 1);
1815+
1816+
// Set up a helper closure we'll use throughout our test. We should only expect retries without
1817+
// bumps if fees have not increased after a block has been connected (assuming the height timer
1818+
// re-evaluates at every block) or after `ChainMonitor::rebroadcast_pending_claims` is called.
1819+
let mut prev_htlc_tx_feerate = None;
1820+
let mut check_htlc_retry = |should_retry: bool, should_bump: bool| -> Option<Transaction> {
1821+
let (htlc_tx, htlc_tx_feerate) = if anchors {
1822+
assert!(nodes[0].tx_broadcaster.txn_broadcast().is_empty());
1823+
let mut events = nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events();
1824+
assert_eq!(events.len(), if should_retry { 1 } else { 0 });
1825+
if !should_retry {
1826+
return None;
1827+
}
1828+
#[allow(unused_assignments)]
1829+
let mut tx = Transaction {
1830+
version: 2,
1831+
lock_time: bitcoin::PackedLockTime::ZERO,
1832+
input: vec![],
1833+
output: vec![],
1834+
};
1835+
#[allow(unused_assignments)]
1836+
let mut feerate = 0;
1837+
#[cfg(anchors)] {
1838+
feerate = if let Event::BumpTransaction(BumpTransactionEvent::HTLCResolution {
1839+
target_feerate_sat_per_1000_weight, mut htlc_descriptors, tx_lock_time,
1840+
}) = events.pop().unwrap() {
1841+
assert_eq!(htlc_descriptors.len(), 1);
1842+
let descriptor = htlc_descriptors.pop().unwrap();
1843+
assert_eq!(descriptor.commitment_txid, commitment_txn[0].txid());
1844+
let htlc_output_idx = descriptor.htlc.transaction_output_index.unwrap() as usize;
1845+
assert!(htlc_output_idx < commitment_txn[0].output.len());
1846+
tx.lock_time = tx_lock_time;
1847+
// Note that we don't care about actually making the HTLC transaction meet the
1848+
// feerate for the test, we just want to make sure the feerates we receive from
1849+
// the events never decrease.
1850+
tx.input.push(descriptor.unsigned_tx_input());
1851+
let signer = nodes[0].keys_manager.derive_channel_keys(
1852+
descriptor.channel_value_satoshis, &descriptor.channel_keys_id,
1853+
);
1854+
let per_commitment_point = signer.get_per_commitment_point(
1855+
descriptor.per_commitment_number, &secp
1856+
);
1857+
tx.output.push(descriptor.tx_output(&per_commitment_point, &secp));
1858+
let our_sig = signer.sign_holder_htlc_transaction(&mut tx, 0, &descriptor, &secp).unwrap();
1859+
let witness_script = descriptor.witness_script(&per_commitment_point, &secp);
1860+
tx.input[0].witness = descriptor.tx_input_witness(&our_sig, &witness_script);
1861+
target_feerate_sat_per_1000_weight as u64
1862+
} else { panic!("unexpected event"); };
1863+
}
1864+
(tx, feerate)
1865+
} else {
1866+
assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
1867+
let mut txn = nodes[0].tx_broadcaster.txn_broadcast();
1868+
assert_eq!(txn.len(), if should_retry { 1 } else { 0 });
1869+
if !should_retry {
1870+
return None;
1871+
}
1872+
let htlc_tx = txn.pop().unwrap();
1873+
check_spends!(htlc_tx, commitment_txn[0]);
1874+
let htlc_tx_fee = HTLC_AMT_SAT - htlc_tx.output[0].value;
1875+
let htlc_tx_feerate = htlc_tx_fee * 1000 / htlc_tx.weight() as u64;
1876+
(htlc_tx, htlc_tx_feerate)
1877+
};
1878+
if should_bump {
1879+
assert!(htlc_tx_feerate > prev_htlc_tx_feerate.take().unwrap());
1880+
} else if let Some(prev_feerate) = prev_htlc_tx_feerate.take() {
1881+
assert_eq!(htlc_tx_feerate, prev_feerate);
1882+
}
1883+
prev_htlc_tx_feerate = Some(htlc_tx_feerate);
1884+
Some(htlc_tx)
1885+
};
1886+
1887+
// Connect blocks up to one before the HTLC expires. This should not result in a claim/retry.
1888+
connect_blocks(&nodes[0], htlc_expiry - nodes[0].best_block_info().1 - 2);
1889+
check_htlc_retry(false, false);
1890+
1891+
// Connect one more block, producing our first claim.
1892+
connect_blocks(&nodes[0], 1);
1893+
check_htlc_retry(true, false);
1894+
1895+
// Connect one more block, expecting a retry with a fee bump. Unfortunately, we cannot bump HTLC
1896+
// transactions pre-anchors.
1897+
connect_blocks(&nodes[0], 1);
1898+
check_htlc_retry(true, anchors);
1899+
1900+
// Trigger a call and we should have another retry, but without a bump.
1901+
nodes[0].chain_monitor.chain_monitor.rebroadcast_pending_claims();
1902+
check_htlc_retry(true, false);
1903+
1904+
// Double the feerate and trigger a call, expecting a fee-bumped retry.
1905+
*nodes[0].fee_estimator.sat_per_kw.lock().unwrap() *= 2;
1906+
nodes[0].chain_monitor.chain_monitor.rebroadcast_pending_claims();
1907+
check_htlc_retry(true, anchors);
1908+
1909+
// Connect one more block, expecting a retry with a fee bump. Unfortunately, we cannot bump HTLC
1910+
// transactions pre-anchors.
1911+
connect_blocks(&nodes[0], 1);
1912+
let htlc_tx = check_htlc_retry(true, anchors).unwrap();
1913+
1914+
// Mine the HTLC transaction to ensure we don't retry claims while they're confirmed.
1915+
mine_transaction(&nodes[0], &htlc_tx);
1916+
// If we have a `ConnectStyle` that advertises the new block first without the transasctions,
1917+
// we'll receive an extra bumped claim.
1918+
if nodes[0].connect_style.borrow().updates_best_block_first() {
1919+
check_htlc_retry(true, anchors);
1920+
}
1921+
nodes[0].chain_monitor.chain_monitor.rebroadcast_pending_claims();
1922+
check_htlc_retry(false, false);
1923+
}
1924+
1925+
#[test]
1926+
fn test_monitor_timer_based_claim() {
1927+
do_test_monitor_rebroadcast_pending_claims(false);
1928+
#[cfg(anchors)]
1929+
do_test_monitor_rebroadcast_pending_claims(true);
1930+
}
1931+
17811932
#[cfg(anchors)]
17821933
#[test]
17831934
fn test_yield_anchors_events() {

0 commit comments

Comments
 (0)