Skip to content

Make Channel's block connection API more electrum-friendly #838

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 8 commits into from
Apr 5, 2021
6 changes: 3 additions & 3 deletions fuzz/src/chanmon_consistency.rs
Original file line number Diff line number Diff line change
Expand Up @@ -435,11 +435,11 @@ pub fn do_test<Out: test_logger::Output>(data: &[u8], out: Out) {
let chain_hash = genesis_block(Network::Bitcoin).block_hash();
let mut header = BlockHeader { version: 0x20000000, prev_blockhash: chain_hash, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
let txdata: Vec<_> = channel_txn.iter().enumerate().map(|(i, tx)| (i + 1, tx)).collect();
$node.block_connected(&header, &txdata, 1);
for i in 2..100 {
$node.transactions_confirmed(&header, 1, &txdata);
for _ in 2..100 {
header = BlockHeader { version: 0x20000000, prev_blockhash: header.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
$node.block_connected(&header, &[], i);
}
$node.update_best_block(&header, 99);
} }
}

Expand Down
6 changes: 4 additions & 2 deletions fuzz/src/full_stack.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ use bitcoin::hashes::sha256::Hash as Sha256;
use bitcoin::hash_types::{Txid, BlockHash, WPubkeyHash};

use lightning::chain;
use lightning::chain::Listen;
use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator};
use lightning::chain::chainmonitor;
use lightning::chain::transaction::OutPoint;
Expand Down Expand Up @@ -202,7 +203,8 @@ impl<'a> MoneyLossDetector<'a> {
self.blocks_connected += 1;
let header = BlockHeader { version: 0x20000000, prev_blockhash: self.header_hashes[self.height].0, merkle_root: Default::default(), time: self.blocks_connected, bits: 42, nonce: 42 };
self.height += 1;
self.manager.block_connected(&header, &txdata, self.height as u32);
self.manager.transactions_confirmed(&header, self.height as u32, &txdata);
self.manager.update_best_block(&header, self.height as u32);
Comment on lines -205 to +207
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Does this need to be changed if ChannelManager provides block_connected still?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hmm, no, we could swap it for creating a block and then passing in the full Block object, do you have a preference?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Oh, I missed that this wasn't using the Listen version of block_connected. Nevermind.

(*self.monitor).block_connected(&header, &txdata, self.height as u32);
if self.header_hashes.len() > self.height {
self.header_hashes[self.height] = (header.block_hash(), self.blocks_connected);
Expand All @@ -216,7 +218,7 @@ impl<'a> MoneyLossDetector<'a> {
fn disconnect_block(&mut self) {
if self.height > 0 && (self.max_height < 6 || self.height >= self.max_height - 6) {
let header = BlockHeader { version: 0x20000000, prev_blockhash: self.header_hashes[self.height - 1].0, merkle_root: Default::default(), time: self.header_hashes[self.height].1, bits: 42, nonce: 42 };
self.manager.block_disconnected(&header);
self.manager.block_disconnected(&header, self.height as u32);
self.monitor.block_disconnected(&header, self.height as u32);
self.height -= 1;
let removal_height = self.height;
Expand Down
4 changes: 2 additions & 2 deletions lightning-persister/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -241,15 +241,15 @@ mod tests {
// Force close because cooperative close doesn't result in any persisted
// updates.
nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id).unwrap();
check_closed_broadcast!(nodes[0], false);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);

let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
assert_eq!(node_txn.len(), 1);

let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[0].clone(), node_txn[0].clone()]});
check_closed_broadcast!(nodes[1], false);
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);

// Make sure everything is persisted as expected after close.
Expand Down
2 changes: 1 addition & 1 deletion lightning/src/ln/chanmon_update_fail_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -241,7 +241,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool, persister_fail
// ...and make sure we can force-close a frozen channel
nodes[0].node.force_close_channel(&channel_id).unwrap();
check_added_monitors!(nodes[0], 1);
check_closed_broadcast!(nodes[0], false);
check_closed_broadcast!(nodes[0], true);

// TODO: Once we hit the chain with the failure transaction we should check that we get a
// PaymentFailed event
Expand Down
272 changes: 151 additions & 121 deletions lightning/src/ln/channel.rs

Large diffs are not rendered by default.

197 changes: 112 additions & 85 deletions lightning/src/ln/channelmanager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1002,16 +1002,14 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
}
}

fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: Option<&PublicKey>) -> Result<(), APIError> {
fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: Option<&PublicKey>) -> Result<PublicKey, APIError> {
let mut chan = {
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
if let hash_map::Entry::Occupied(chan) = channel_state.by_id.entry(channel_id.clone()) {
if let Some(node_id) = peer_node_id {
if chan.get().get_counterparty_node_id() != *node_id {
// Error or Ok here doesn't matter - the result is only exposed publicly
// when peer_node_id is None anyway.
return Ok(());
return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()});
}
}
if let Some(short_id) = chan.get().get_short_channel_id() {
Expand All @@ -1031,14 +1029,27 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
});
}

Ok(())
Ok(chan.get_counterparty_node_id())
}

/// Force closes a channel, immediately broadcasting the latest local commitment transaction to
/// the chain and rejecting new HTLCs on the given channel. Fails if channel_id is unknown to the manager.
pub fn force_close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> {
let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
self.force_close_channel_with_peer(channel_id, None)
match self.force_close_channel_with_peer(channel_id, None) {
Ok(counterparty_node_id) => {
self.channel_state.lock().unwrap().pending_msg_events.push(
events::MessageSendEvent::HandleError {
node_id: counterparty_node_id,
action: msgs::ErrorAction::SendErrorMessage {
msg: msgs::ErrorMessage { channel_id: *channel_id, data: "Channel force-closed".to_owned() }
},
}
);
Ok(())
},
Err(e) => Err(e)
}
}

/// Force close all channels, immediately broadcasting the latest local commitment transaction
Expand Down Expand Up @@ -3194,6 +3205,12 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
msg: update
});
}
pending_msg_events.push(events::MessageSendEvent::HandleError {
node_id: chan.get_counterparty_node_id(),
action: msgs::ErrorAction::SendErrorMessage {
msg: msgs::ErrorMessage { channel_id: chan.channel_id(), data: "Channel force-closed".to_owned() }
},
});
}
},
}
Expand Down Expand Up @@ -3276,12 +3293,26 @@ where
L::Target: Logger,
{
fn block_connected(&self, block: &Block, height: u32) {
assert_eq!(*self.last_block_hash.read().unwrap(), block.header.prev_blockhash,
"Blocks must be connected in chain-order - the connected header must build on the last connected header");
assert_eq!(self.latest_block_height.load(Ordering::Acquire) as u64, height as u64 - 1,
"Blocks must be connected in chain-order - the connected block height must be one greater than the previous height");
let txdata: Vec<_> = block.txdata.iter().enumerate().collect();
ChannelManager::block_connected(self, &block.header, &txdata, height);
self.transactions_confirmed(&block.header, height, &txdata);
self.update_best_block(&block.header, height);
}

fn block_disconnected(&self, header: &BlockHeader, _height: u32) {
ChannelManager::block_disconnected(self, header);
fn block_disconnected(&self, header: &BlockHeader, height: u32) {
assert_eq!(*self.last_block_hash.read().unwrap(), header.block_hash(),
"Blocks must be disconnected in chain-order - the disconnected header must be the last connected header");

let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
let new_height = self.latest_block_height.fetch_sub(1, Ordering::AcqRel) as u32 - 1;
assert_eq!(new_height, height - 1,
"Blocks must be disconnected in chain-order - the disconnected block must have the correct height");
*self.last_block_hash.write().unwrap() = header.prev_blockhash;

self.do_chain_event(new_height, |channel| channel.update_best_block(new_height, header.time));
}
}

Expand All @@ -3292,22 +3323,11 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
F::Target: FeeEstimator,
L::Target: Logger,
{
/// Updates channel state based on transactions seen in a connected block.
pub fn block_connected(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) {
fn do_chain_event<FN: Fn(&mut Channel<Signer>) -> Result<(Option<msgs::FundingLocked>, Vec<(HTLCSource, PaymentHash)>), msgs::ErrorMessage>>
(&self, height: u32, f: FN) {
// Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
// during initialization prior to the chain_monitor being fully configured in some cases.
// See the docs for `ChannelManagerReadArgs` for more.
let block_hash = header.block_hash();
log_trace!(self.logger, "Block {} at height {} connected", block_hash, height);

let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);

assert_eq!(*self.last_block_hash.read().unwrap(), header.prev_blockhash,
"Blocks must be connected in chain-order - the connected header must build on the last connected header");
assert_eq!(self.latest_block_height.load(Ordering::Acquire) as u64, height as u64 - 1,
"Blocks must be connected in chain-order - the connected header must build on the last connected header");
self.latest_block_height.store(height as usize, Ordering::Release);
*self.last_block_hash.write().unwrap() = block_hash;

let mut failed_channels = Vec::new();
let mut timed_out_htlcs = Vec::new();
Expand All @@ -3317,7 +3337,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
let short_to_id = &mut channel_state.short_to_id;
let pending_msg_events = &mut channel_state.pending_msg_events;
channel_state.by_id.retain(|_, channel| {
let res = channel.block_connected(header, txdata, height);
let res = f(channel);
if let Ok((chan_res, mut timed_out_pending_htlcs)) = res {
for (source, payment_hash) in timed_out_pending_htlcs.drain(..) {
let chan_update = self.get_channel_update(&channel).map(|u| u.encode_with_len()).unwrap(); // Cannot add/recv HTLCs before we have a short_id so unwrap is safe
Expand All @@ -3343,32 +3363,23 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
short_to_id.insert(channel.get_short_channel_id().unwrap(), channel.channel_id());
}
} else if let Err(e) = res {
if let Some(short_id) = channel.get_short_channel_id() {
short_to_id.remove(&short_id);
}
// It looks like our counterparty went on-chain or funding transaction was
// reorged out of the main chain. Close the channel.
failed_channels.push(channel.force_shutdown(true));
if let Ok(update) = self.get_channel_update(&channel) {
pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
});
}
pending_msg_events.push(events::MessageSendEvent::HandleError {
node_id: channel.get_counterparty_node_id(),
action: msgs::ErrorAction::SendErrorMessage { msg: e },
});
return false;
}
if let Some(funding_txo) = channel.get_funding_txo() {
for &(_, tx) in txdata.iter() {
for inp in tx.input.iter() {
if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
log_trace!(self.logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, log_bytes!(channel.channel_id()));
if let Some(short_id) = channel.get_short_channel_id() {
short_to_id.remove(&short_id);
}
// It looks like our counterparty went on-chain. Close the channel.
failed_channels.push(channel.force_shutdown(true));
if let Ok(update) = self.get_channel_update(&channel) {
pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
});
}
return false;
}
}
}
}
true
});

Expand Down Expand Up @@ -3397,6 +3408,64 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
for (source, payment_hash, reason) in timed_out_htlcs.drain(..) {
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), source, &payment_hash, reason);
}
}

/// Updates channel state to take note of transactions which were confirmed in the given block
/// at the given height.
///
/// Note that you must still call (or have called) [`update_best_block`] with the block
/// information which is included here.
///
/// This method may be called before or after [`update_best_block`] for a given block's
/// transaction data and may be called multiple times with additional transaction data for a
/// given block.
///
/// This method may be called for a previous block after an [`update_best_block`] call has
/// been made for a later block, however it must *not* be called with transaction data from a
/// block which is no longer in the best chain (ie where [`update_best_block`] has already
/// been informed about a blockchain reorganization which no longer includes the block which
/// corresponds to `header`).
///
/// [`update_best_block`]: `Self::update_best_block`
pub fn transactions_confirmed(&self, header: &BlockHeader, height: u32, txdata: &TransactionData) {
// Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
// during initialization prior to the chain_monitor being fully configured in some cases.
// See the docs for `ChannelManagerReadArgs` for more.

let block_hash = header.block_hash();
log_trace!(self.logger, "{} transactions included in block {} at height {} provided", txdata.len(), block_hash, height);

let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
self.do_chain_event(height, |channel| channel.transactions_confirmed(&block_hash, height, txdata, &self.logger).map(|a| (a, Vec::new())));
}

/// Updates channel state with the current best blockchain tip. You should attempt to call this
/// quickly after a new block becomes available, however if multiple new blocks become
/// available at the same time, only a single `update_best_block()` call needs to be made.
///
/// This method should also be called immediately after any block disconnections, once at the
/// reorganization fork point, and once with the new chain tip. Calling this method at the
/// blockchain reorganization fork point ensures we learn when a funding transaction which was
/// previously confirmed is reorganized out of the blockchain, ensuring we do not continue to
/// accept payments which cannot be enforced on-chain.
///
/// In both the block-connection and block-disconnection case, this method may be called either
/// once per block connected or disconnected, or simply at the fork point and new tip(s),
/// skipping any intermediary blocks.
pub fn update_best_block(&self, header: &BlockHeader, height: u32) {
// Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
// during initialization prior to the chain_monitor being fully configured in some cases.
// See the docs for `ChannelManagerReadArgs` for more.

let block_hash = header.block_hash();
log_trace!(self.logger, "New best block: {} at height {}", block_hash, height);

let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);

self.latest_block_height.store(height as usize, Ordering::Release);
*self.last_block_hash.write().unwrap() = block_hash;

self.do_chain_event(height, |channel| channel.update_best_block(height, header.time));

loop {
// Update last_node_announcement_serial to be the max of its current value and the
Expand All @@ -3412,48 +3481,6 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
}
}

/// Updates channel state based on a disconnected block.
///
/// If necessary, the channel may be force-closed without letting the counterparty participate
/// in the shutdown.
pub fn block_disconnected(&self, header: &BlockHeader) {
// Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
// during initialization prior to the chain_monitor being fully configured in some cases.
// See the docs for `ChannelManagerReadArgs` for more.
let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);

assert_eq!(*self.last_block_hash.read().unwrap(), header.block_hash(),
"Blocks must be disconnected in chain-order - the disconnected header must be the last connected header");
self.latest_block_height.fetch_sub(1, Ordering::AcqRel);
*self.last_block_hash.write().unwrap() = header.prev_blockhash;

let mut failed_channels = Vec::new();
{
let mut channel_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_lock;
let short_to_id = &mut channel_state.short_to_id;
let pending_msg_events = &mut channel_state.pending_msg_events;
channel_state.by_id.retain(|_, v| {
if v.block_disconnected(header) {
if let Some(short_id) = v.get_short_channel_id() {
short_to_id.remove(&short_id);
}
failed_channels.push(v.force_shutdown(true));
if let Ok(update) = self.get_channel_update(&v) {
pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
});
}
false
} else {
true
}
});
}

self.handle_init_event_channel_failures(failed_channels);
}

/// Blocks until ChannelManager needs to be persisted or a timeout is reached. It returns a bool
/// indicating whether persistence is necessary. Only one listener on
/// `await_persistable_update` or `await_persistable_update_timeout` is guaranteed to be woken
Expand Down
Loading