@@ -2242,6 +2242,182 @@ where
2242
2242
logger: L,
2243
2243
}
2244
2244
2245
+
2246
+ /// This works as a mock [`ChannelMessageHandler`] it is used mainly when a user wants to run their node in
2247
+ /// offline mode i.e. This node won't communicate with any peer except sending a BogusChannelReestablish
2248
+ /// for all the [`StubChannelMonitors`] being tracked by the [`ChainMonitor`].
2249
+ ///
2250
+ /// [`FundRecoverer`] is parameterized by a number of components to achieve this.
2251
+ /// - [`chain::Watch`] (typically [`ChainMonitor`]) for on-chain monitoring and enforcement of each
2252
+ /// channel
2253
+ /// - [`SignerProvider`] for providing signers whose operations are scoped to individual channels
2254
+ /// - [`Logger`] for logging operational information of varying degrees
2255
+ ///
2256
+ /// Additionally, it implements the following traits:
2257
+ /// - [`ChannelMessageHandler`] to handle off-chain channel activity from peers
2258
+ /// - [`MessageSendEventsProvider`] to similarly send such messages to peers
2259
+ ///
2260
+ pub struct FundRecoverer<SP: Deref, L:Deref, M: Deref>
2261
+ where
2262
+ M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
2263
+ SP::Target: SignerProvider,
2264
+ L::Target: Logger
2265
+ {
2266
+ default_configuration: UserConfig,
2267
+ chain_monitor: M,
2268
+ chain_hash: ChainHash,
2269
+ per_peer_state: FairRwLock<HashMap<PublicKey, Mutex<PeerState<SP>>>>,
2270
+ logger: L,
2271
+ }
2272
+
2273
+ impl<SP:Deref, L:Deref, M:Deref> MessageSendEventsProvider for FundRecoverer<SP, L, M>
2274
+ where
2275
+ M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
2276
+ SP::Target: SignerProvider,
2277
+ L::Target: Logger
2278
+ {
2279
+ fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent> {
2280
+ let mut pending_events = Vec::new();
2281
+ let events = RefCell::new(Vec::new());
2282
+ let per_peer_state = self.per_peer_state.read().unwrap();
2283
+ for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
2284
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
2285
+ let peer_state = &mut *peer_state_lock;
2286
+ if peer_state.pending_msg_events.len() > 0 {
2287
+ pending_events.append(&mut peer_state.pending_msg_events);
2288
+ }
2289
+ }
2290
+ if !pending_events.is_empty() {
2291
+ events.replace(pending_events);
2292
+ }
2293
+ events.into_inner()
2294
+ }
2295
+ }
2296
+
2297
+ impl<SP:Deref, L: Deref, M:Deref> FundRecoverer<SP, L, M>
2298
+ where
2299
+ M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
2300
+ SP::Target: SignerProvider,
2301
+ L::Target: Logger
2302
+ {
2303
+ pub fn new(chain_monitor: M, logger: L, config: UserConfig, params: ChainParameters) -> Self {
2304
+ return Self { default_configuration: config.clone(),
2305
+ chain_monitor,
2306
+ chain_hash: ChainHash::using_genesis_block(params.network),
2307
+ per_peer_state: FairRwLock::new(new_hash_map()),
2308
+ logger
2309
+ }
2310
+ }
2311
+ }
2312
+
2313
+ impl<SP:Deref, L:Deref, M:Deref> ChannelMessageHandler for FundRecoverer<SP, L, M>
2314
+ where
2315
+ M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
2316
+ SP::Target: SignerProvider,
2317
+ L::Target: Logger
2318
+ {
2319
+ fn handle_open_channel(&self, _their_node_id: &PublicKey, _msg: &msgs::OpenChannel) {}
2320
+ fn handle_accept_channel(&self, _their_node_id: &PublicKey, _msg: &msgs::AcceptChannel) {}
2321
+ fn handle_funding_created(&self, _their_node_id: &PublicKey, _msg: &msgs::FundingCreated) {}
2322
+ fn handle_funding_signed(&self, _their_node_id: &PublicKey, _msg: &msgs::FundingSigned) {}
2323
+ fn handle_channel_ready(&self, _their_node_id: &PublicKey, _msg: &msgs::ChannelReady) {}
2324
+ fn handle_shutdown(&self, _their_node_id: &PublicKey, _msg: &msgs::Shutdown) {}
2325
+ fn handle_closing_signed(&self, _their_node_id: &PublicKey, _msg: &msgs::ClosingSigned) {}
2326
+ fn handle_update_add_htlc(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateAddHTLC) {}
2327
+ fn handle_update_fulfill_htlc(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateFulfillHTLC) {}
2328
+ fn handle_update_fail_htlc(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateFailHTLC) {}
2329
+ fn handle_update_fail_malformed_htlc(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateFailMalformedHTLC) {}
2330
+ fn handle_commitment_signed(&self, _their_node_id: &PublicKey, _msg: &msgs::CommitmentSigned) {}
2331
+ fn handle_revoke_and_ack(&self, _their_node_id: &PublicKey, _msg: &msgs::RevokeAndACK) {}
2332
+ fn handle_update_fee(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateFee) {}
2333
+ fn handle_announcement_signatures(&self, _their_node_id: &PublicKey, _msg: &msgs::AnnouncementSignatures) {}
2334
+ fn handle_channel_update(&self, _their_node_id: &PublicKey, _msg: &msgs::ChannelUpdate) {}
2335
+ fn handle_open_channel_v2(&self, _their_node_id: &PublicKey, _msg: &msgs::OpenChannelV2) {}
2336
+ fn handle_accept_channel_v2(&self, _their_node_id: &PublicKey, _msg: &msgs::AcceptChannelV2) {}
2337
+ fn handle_stfu(&self, _their_node_id: &PublicKey, _msg: &msgs::Stfu) {}
2338
+ #[cfg(splicing)]
2339
+ fn handle_splice_init(&self, _their_node_id: &PublicKey, _msg: &msgs::SpliceInit) {}
2340
+ #[cfg(splicing)]
2341
+ fn handle_splice_ack(&self, _their_node_id: &PublicKey, _msg: &msgs::SpliceAck) {}
2342
+ #[cfg(splicing)]
2343
+ fn handle_splice_locked(&self, _their_node_id: &PublicKey, _msg: &msgs::SpliceLocked) {}
2344
+ fn handle_tx_add_input(&self, _their_node_id: &PublicKey, _msg: &msgs::TxAddInput) {}
2345
+ fn handle_tx_add_output(&self, _their_node_id: &PublicKey, _msg: &msgs::TxAddOutput) {}
2346
+ fn handle_tx_remove_input(&self, _their_node_id: &PublicKey, _msg: &msgs::TxRemoveInput) {}
2347
+ fn handle_tx_remove_output(&self, _their_node_id: &PublicKey, _msg: &msgs::TxRemoveOutput) {}
2348
+ fn handle_tx_complete(&self, _their_node_id: &PublicKey, _msg: &msgs::TxComplete) {}
2349
+ fn handle_tx_signatures(&self, _their_node_id: &PublicKey, _msg: &msgs::TxSignatures) {}
2350
+ fn handle_tx_init_rbf(&self, _their_node_id: &PublicKey, _msg: &msgs::TxInitRbf) {}
2351
+ fn handle_tx_ack_rbf(&self, _their_node_id: &PublicKey, _msg: &msgs::TxAckRbf) {}
2352
+ fn handle_tx_abort(&self, _their_node_id: &PublicKey, _msg: &msgs::TxAbort) {}
2353
+ fn handle_peer_storage(&self, _their_node_id: &PublicKey, _msg: &msgs::PeerStorageMessage) {}
2354
+ fn handle_your_peer_storage(&self, _their_node_id: &PublicKey, _msg: &msgs::YourPeerStorageMessage) {}
2355
+ fn peer_disconnected(&self, _their_node_id: &PublicKey) {}
2356
+
2357
+ fn peer_connected(&self, counterparty_node_id: &PublicKey, init_msg: &msgs::Init, _inbound: bool) -> Result<(), ()> {
2358
+ let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), None, None);
2359
+
2360
+ {
2361
+ let mut peer_state_lock = self.per_peer_state.write().unwrap();
2362
+ match peer_state_lock.entry(counterparty_node_id.clone()) {
2363
+ hash_map::Entry::Vacant(e) => {
2364
+ e.insert(Mutex::new(PeerState {
2365
+ channel_by_id: new_hash_map(),
2366
+ inbound_channel_request_by_id: new_hash_map(),
2367
+ latest_features: init_msg.features.clone(),
2368
+ pending_msg_events: Vec::new(),
2369
+ in_flight_monitor_updates: BTreeMap::new(),
2370
+ monitor_update_blocked_actions: BTreeMap::new(),
2371
+ actions_blocking_raa_monitor_updates: BTreeMap::new(),
2372
+ is_connected: true,
2373
+ }));
2374
+ },
2375
+ hash_map::Entry::Occupied(e) => {
2376
+ let mut peer_state = e.get().lock().unwrap();
2377
+ peer_state.latest_features = init_msg.features.clone();
2378
+
2379
+ debug_assert!(!peer_state.is_connected, "A peer shouldn't be connected twice");
2380
+ peer_state.is_connected = true;
2381
+ },
2382
+ }
2383
+ }
2384
+ log_debug!(logger, "Generating Bogus channel_reestablish events for all the stub channels with peer {}", log_pubkey!(counterparty_node_id));
2385
+
2386
+ let per_peer_state = self.per_peer_state.read().unwrap();
2387
+ if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
2388
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
2389
+ let peer_state = &mut *peer_state_lock;
2390
+ let pending_msg_events = &mut peer_state.pending_msg_events;
2391
+ for cid in self.chain_monitor.get_stub_cids_with_counterparty(*counterparty_node_id) {
2392
+ pending_msg_events.push(MessageSendEvent::SendChannelReestablish {
2393
+ node_id: *counterparty_node_id,
2394
+ msg: msgs::ChannelReestablish {
2395
+ channel_id: cid,
2396
+ next_local_commitment_number: 0,
2397
+ next_remote_commitment_number: 0,
2398
+ your_last_per_commitment_secret: [1u8; 32],
2399
+ my_current_per_commitment_point: PublicKey::from_slice(&[2u8; 33]).unwrap(),
2400
+ next_funding_txid: None,
2401
+ },
2402
+ })
2403
+ }
2404
+ }
2405
+ Ok(())
2406
+ }
2407
+
2408
+ fn handle_channel_reestablish(&self, _their_node_id: &PublicKey, _msg: &msgs::ChannelReestablish) {}
2409
+ fn handle_error(&self, _their_node_id: &PublicKey, _msg: &msgs::ErrorMessage) {}
2410
+ fn provided_node_features(&self) -> NodeFeatures {
2411
+ provided_node_features(&self.default_configuration)
2412
+ }
2413
+ fn provided_init_features(&self, _their_node_id: &PublicKey) -> InitFeatures {
2414
+ provided_init_features(&self.default_configuration)
2415
+ }
2416
+ fn get_chain_hashes(&self) -> Option<Vec<ChainHash>> {
2417
+ Some(vec![self.chain_hash])
2418
+ }
2419
+ }
2420
+
2245
2421
/// Chain-related parameters used to construct a new `ChannelManager`.
2246
2422
///
2247
2423
/// Typically, the block-specific parameters are derived from the best block hash for the network,
0 commit comments