Skip to content

Commit c783fd3

Browse files
Aditya SharmaAditya Sharma
Aditya Sharma
authored and
Aditya Sharma
committed
lightning: Handle peer storage message, it's persistance and send it to the respective peer upon reconnection.
1 parent 203dc1e commit c783fd3

File tree

3 files changed

+129
-4
lines changed

3 files changed

+129
-4
lines changed

lightning/src/chain/channelmonitor.rs

+31-2
Original file line numberDiff line numberDiff line change
@@ -544,6 +544,9 @@ pub(crate) enum ChannelMonitorUpdateStep {
544544
ShutdownScript {
545545
scriptpubkey: ScriptBuf,
546546
},
547+
LatestPeerStorage {
548+
data: Vec<u8>,
549+
},
547550
}
548551

549552
impl ChannelMonitorUpdateStep {
@@ -555,6 +558,7 @@ impl ChannelMonitorUpdateStep {
555558
ChannelMonitorUpdateStep::CommitmentSecret { .. } => "CommitmentSecret",
556559
ChannelMonitorUpdateStep::ChannelForceClosed { .. } => "ChannelForceClosed",
557560
ChannelMonitorUpdateStep::ShutdownScript { .. } => "ShutdownScript",
561+
ChannelMonitorUpdateStep::LatestPeerStorage { .. } => "LatestPeerStorage",
558562
}
559563
}
560564
}
@@ -588,6 +592,9 @@ impl_writeable_tlv_based_enum_upgradable!(ChannelMonitorUpdateStep,
588592
(5, ShutdownScript) => {
589593
(0, scriptpubkey, required),
590594
},
595+
(6, LatestPeerStorage) => {
596+
(0, data, required_vec),
597+
},
591598
);
592599

593600
/// Details about the balance(s) available for spending once the channel appears on chain.
@@ -835,6 +842,8 @@ pub(crate) struct ChannelMonitorImpl<Signer: WriteableEcdsaChannelSigner> {
835842
/// revoked.
836843
payment_preimages: HashMap<PaymentHash, PaymentPreimage>,
837844

845+
peer_storage: Vec<u8>,
846+
838847
// Note that `MonitorEvent`s MUST NOT be generated during update processing, only generated
839848
// during chain data processing. This prevents a race in `ChainMonitor::update_channel` (and
840849
// presumably user implementations thereof as well) where we update the in-memory channel
@@ -1110,6 +1119,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Writeable for ChannelMonitorImpl<Signe
11101119
(15, self.counterparty_fulfilled_htlcs, required),
11111120
(17, self.initial_counterparty_commitment_info, option),
11121121
(19, self.channel_id, required),
1122+
(21, self.peer_storage, required_vec),
11131123
});
11141124

11151125
Ok(())
@@ -1289,7 +1299,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitor<Signer> {
12891299
confirmed_commitment_tx_counterparty_output: None,
12901300
htlcs_resolved_on_chain: Vec::new(),
12911301
spendable_txids_confirmed: Vec::new(),
1292-
1302+
peer_storage: Vec::new(),
12931303
best_block,
12941304
counterparty_node_id: Some(counterparty_node_id),
12951305
initial_counterparty_commitment_info: None,
@@ -1406,6 +1416,11 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitor<Signer> {
14061416
self.inner.lock().unwrap().channel_id()
14071417
}
14081418

1419+
/// Fets peer_storage of this peer.
1420+
pub fn get_peer_storage(&self) -> Vec<u8> {
1421+
self.inner.lock().unwrap().peer_storage()
1422+
}
1423+
14091424
/// Gets a list of txids, with their output scripts (in the order they appear in the
14101425
/// transaction), which we must learn about spends of via block_connected().
14111426
pub fn get_outputs_to_watch(&self) -> Vec<(Txid, Vec<(u32, ScriptBuf)>)> {
@@ -2727,6 +2742,10 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
27272742
}
27282743
}
27292744

2745+
fn update_peer_storage(&mut self, new_data: Vec<u8>) {
2746+
self.peer_storage = new_data;
2747+
}
2748+
27302749
fn generate_claimable_outpoints_and_watch_outputs(&mut self) -> (Vec<PackageTemplate>, Vec<TransactionOutputs>) {
27312750
let funding_outp = HolderFundingOutput::build(
27322751
self.funding_redeemscript.clone(),
@@ -2896,6 +2915,10 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
28962915
panic!("Attempted to replace shutdown script {} with {}", shutdown_script, scriptpubkey);
28972916
}
28982917
},
2918+
ChannelMonitorUpdateStep::LatestPeerStorage { data } => {
2919+
log_trace!(logger, "Updating ChannelMonitor with latest recieved PeerStorage");
2920+
self.update_peer_storage(data.clone());
2921+
}
28992922
}
29002923
}
29012924

@@ -2927,6 +2950,10 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
29272950
&self.funding_info
29282951
}
29292952

2953+
pub fn peer_storage(&self) -> Vec<u8> {
2954+
self.peer_storage.clone()
2955+
}
2956+
29302957
pub fn channel_id(&self) -> ChannelId {
29312958
self.channel_id
29322959
}
@@ -4592,6 +4619,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP
45924619
let mut counterparty_fulfilled_htlcs = Some(new_hash_map());
45934620
let mut initial_counterparty_commitment_info = None;
45944621
let mut channel_id = None;
4622+
let mut peer_storage = Some(Vec::new());
45954623
read_tlv_fields!(reader, {
45964624
(1, funding_spend_confirmed, option),
45974625
(3, htlcs_resolved_on_chain, optional_vec),
@@ -4603,6 +4631,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP
46034631
(15, counterparty_fulfilled_htlcs, option),
46044632
(17, initial_counterparty_commitment_info, option),
46054633
(19, channel_id, option),
4634+
(21, peer_storage, optional_vec),
46064635
});
46074636

46084637
// Monitors for anchor outputs channels opened in v0.0.116 suffered from a bug in which the
@@ -4667,7 +4696,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP
46674696
confirmed_commitment_tx_counterparty_output,
46684697
htlcs_resolved_on_chain: htlcs_resolved_on_chain.unwrap(),
46694698
spendable_txids_confirmed: spendable_txids_confirmed.unwrap(),
4670-
4699+
peer_storage: peer_storage.unwrap(),
46714700
best_block,
46724701
counterparty_node_id,
46734702
initial_counterparty_commitment_info,

lightning/src/chain/transaction.rs

+4
Original file line numberDiff line numberDiff line change
@@ -66,6 +66,10 @@ impl OutPoint {
6666
vout: self.index as u32,
6767
}
6868
}
69+
70+
pub fn get_txid(self) -> Txid {
71+
self.txid
72+
}
6973
}
7074

7175
impl core::fmt::Display for OutPoint {

lightning/src/ln/channelmanager.rs

+94-2
Original file line numberDiff line numberDiff line change
@@ -1415,6 +1415,7 @@ where
14151415
node_signer: NS,
14161416
signer_provider: SP,
14171417

1418+
peer_storage: RwLock<HashMap<PublicKey, Vec<u8>>>,
14181419
logger: L,
14191420
}
14201421

@@ -2505,7 +2506,7 @@ where
25052506
entropy_source,
25062507
node_signer,
25072508
signer_provider,
2508-
2509+
peer_storage: RwLock::new(HashMap::new()),
25092510
logger,
25102511
}
25112512
}
@@ -6493,6 +6494,73 @@ where
64936494
}
64946495
}
64956496

6497+
fn internal_peer_storage(&self, counterparty_node_id: &PublicKey, msg: &msgs::PeerStorageMessage) -> Result<(), ()> {
6498+
let per_peer_state = self.per_peer_state.read().unwrap();
6499+
let peer_state_mutex = match per_peer_state.get(counterparty_node_id) {
6500+
Some(peer_state_mutex) => peer_state_mutex,
6501+
None => return Err(()),
6502+
};
6503+
6504+
// Check if we have any channels with the peer (Currently we only provide the servie to peers we have a channel with).
6505+
let chan_info = self.list_channels_with_counterparty(counterparty_node_id);
6506+
if chan_info.is_empty() {
6507+
return Err(());
6508+
}
6509+
6510+
// Send ChannelMonitor Update.
6511+
let sorted_chan_info: Vec<ChannelDetails> = chan_info.iter().cloned().collect();
6512+
let sorted_chan_info = {
6513+
let mut sorted_vec: Vec<ChannelDetails> = sorted_chan_info;
6514+
sorted_vec.sort_by_cached_key(|s| s.funding_txo.unwrap().get_txid());
6515+
sorted_vec
6516+
};
6517+
6518+
let mut found_funded_chan = false;
6519+
for chan in &sorted_chan_info {
6520+
if let Some(funding_txo) = chan.funding_txo {
6521+
found_funded_chan = true;
6522+
let peer_storage_update = ChannelMonitorUpdate {
6523+
update_id: CLOSED_CHANNEL_UPDATE_ID,
6524+
counterparty_node_id: None,
6525+
updates: vec![ChannelMonitorUpdateStep::LatestPeerStorage {
6526+
data: msg.data.clone(),
6527+
}],
6528+
channel_id: Some(chan.channel_id),
6529+
};
6530+
// Since channel monitor is already loaded so we do not need to push this onto background event.
6531+
let update_res: ChannelMonitorUpdateStatus = self.chain_monitor.update_channel(funding_txo, &peer_storage_update);
6532+
if update_res != ChannelMonitorUpdateStatus::Completed {
6533+
log_error!(WithContext::from(&self.logger, None, Some(chan.channel_id)),
6534+
"Critical error: failed to update channel monitor with latest peer storage {:?}: {:?}",
6535+
msg.data, update_res);
6536+
}
6537+
break;
6538+
}
6539+
}
6540+
6541+
if !found_funded_chan {
6542+
return Err(());
6543+
}
6544+
6545+
6546+
// Update the store.
6547+
let mut peer_storage = self.peer_storage.write().unwrap();
6548+
peer_storage.insert(*counterparty_node_id, msg.data.clone());
6549+
6550+
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
6551+
let peer_state = &mut *peer_state_lock;
6552+
6553+
// Send ACK.
6554+
peer_state.pending_msg_events.push(events::MessageSendEvent::SendYourPeerStorageMessage {
6555+
node_id: counterparty_node_id.clone(),
6556+
msg: msgs::YourPeerStorageMessage {
6557+
data: msg.data.clone()
6558+
},
6559+
});
6560+
6561+
Ok(())
6562+
}
6563+
64966564
fn internal_funding_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), MsgHandleErrInternal> {
64976565
let best_block = *self.best_block.read().unwrap();
64986566
let per_peer_state = self.per_peer_state.read().unwrap();
@@ -8857,6 +8925,13 @@ where
88578925
}
88588926

88598927
fn handle_peer_storage(&self, counterparty_node_id: &PublicKey, msg: &msgs::PeerStorageMessage) {
8928+
match self.internal_peer_storage(counterparty_node_id, msg) {
8929+
Ok(_) => {},
8930+
Err(_err) => {
8931+
let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), None);
8932+
log_debug!(logger, "Could not store Peer Storage for peer {}", log_pubkey!(counterparty_node_id));
8933+
},
8934+
}
88608935
}
88618936

88628937
fn handle_your_peer_storage(&self, counterparty_node_id: &PublicKey, msg: &msgs::YourPeerStorageMessage) {
@@ -9244,6 +9319,19 @@ where
92449319
},
92459320
}
92469321
}
9322+
9323+
let peer_storage = self.peer_storage.read().unwrap();
9324+
9325+
if let Some(value) = peer_storage.get(counterparty_node_id) {
9326+
log_debug!(logger, "Generating peerstorage for {}", log_pubkey!(counterparty_node_id));
9327+
9328+
pending_msg_events.push(events::MessageSendEvent::SendYourPeerStorageMessage {
9329+
node_id: counterparty_node_id.clone(),
9330+
msg: msgs::YourPeerStorageMessage {
9331+
data: value.clone()
9332+
},
9333+
});
9334+
}
92479335
}
92489336

92499337
return NotifyOption::SkipPersistHandleEvents;
@@ -10564,6 +10652,7 @@ where
1056410652
let mut channel_closures = VecDeque::new();
1056510653
let mut close_background_events = Vec::new();
1056610654
let mut funding_txo_to_channel_id = hash_map_with_capacity(channel_count as usize);
10655+
let mut peer_storage_dir: HashMap<PublicKey, Vec<u8>> = HashMap::new();
1056710656
for _ in 0..channel_count {
1056810657
let mut channel: Channel<SP> = Channel::read(reader, (
1056910658
&args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config)
@@ -10573,6 +10662,9 @@ where
1057310662
funding_txo_to_channel_id.insert(funding_txo, channel.context.channel_id());
1057410663
funding_txo_set.insert(funding_txo.clone());
1057510664
if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) {
10665+
// Load Peer_storage from ChannelMonitor to memory.
10666+
peer_storage_dir.insert(channel.context.get_counterparty_node_id(), monitor.get_peer_storage());
10667+
1057610668
if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() ||
1057710669
channel.get_revoked_counterparty_commitment_transaction_number() > monitor.get_min_seen_secret() ||
1057810670
channel.get_cur_counterparty_commitment_transaction_number() > monitor.get_cur_counterparty_commitment_number() ||
@@ -11382,7 +11474,7 @@ where
1138211474
entropy_source: args.entropy_source,
1138311475
node_signer: args.node_signer,
1138411476
signer_provider: args.signer_provider,
11385-
11477+
peer_storage: RwLock::new(peer_storage_dir),
1138611478
logger: args.logger,
1138711479
default_configuration: args.default_config,
1138811480
};

0 commit comments

Comments
 (0)