|
14 | 14 | use crate::chain;
|
15 | 15 | use crate::chain::{ChannelMonitorUpdateStatus, Confirm, Listen, Watch};
|
16 | 16 | use crate::chain::chaininterface::LowerBoundedFeeEstimator;
|
17 |
| -use crate::chain::channelmonitor; |
| 17 | +use crate::chain::{channelmonitor, BestBlock}; |
18 | 18 | use crate::chain::channelmonitor::{CLOSED_CHANNEL_UPDATE_ID, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
|
19 | 19 | use crate::chain::transaction::OutPoint;
|
20 | 20 | use crate::sign::{ecdsa::EcdsaChannelSigner, EntropySource, OutputSpender, SignerProvider};
|
21 | 21 | use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, ClosureReason, HTLCDestination, PaymentFailureReason};
|
22 | 22 | use crate::ln::types::{ChannelId, PaymentPreimage, PaymentSecret, PaymentHash};
|
23 | 23 | use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis, OutboundV1Channel, InboundV1Channel, COINBASE_MATURITY, ChannelPhase};
|
24 |
| -use crate::ln::channelmanager::{self, PaymentId, RAACommitmentOrder, PaymentSendFailure, RecipientOnionFields, BREAKDOWN_TIMEOUT, ENABLE_GOSSIP_TICKS, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA}; |
| 24 | + |
| 25 | +use crate::ln::channelmanager::{self, ChainParameters, FundRecoverer, PaymentId, RAACommitmentOrder, PaymentSendFailure, RecipientOnionFields, BREAKDOWN_TIMEOUT, ENABLE_GOSSIP_TICKS, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA}; |
25 | 26 | use crate::ln::channel::{DISCONNECT_PEER_AWAITING_RESPONSE_TICKS, ChannelError};
|
26 | 27 | use crate::ln::{chan_utils, onion_utils};
|
27 | 28 | use crate::ln::chan_utils::{OFFERED_HTLC_SCRIPT_WEIGHT, htlc_success_tx_weight, htlc_timeout_tx_weight, HTLCOutputInCommitment};
|
@@ -174,6 +175,138 @@ fn test_funding_exceeds_no_wumbo_limit() {
|
174 | 175 | }
|
175 | 176 | }
|
176 | 177 |
|
| 178 | +#[test] |
| 179 | +fn test_peer_storage() { |
| 180 | + let chanmon_cfgs = create_chanmon_cfgs(2); |
| 181 | + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); |
| 182 | + let (persister, chain_monitor); |
| 183 | + let (persister_fund_recoverer, chain_monitor_fund_recoverer); |
| 184 | + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); |
| 185 | + let nodes_0_deserialized; |
| 186 | + let nodes_0_deserialized_fund_recoverer; |
| 187 | + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); |
| 188 | + let nodes_0_serialized = nodes[0].node.encode(); |
| 189 | + |
| 190 | + let (_a, _b, channel_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1); |
| 191 | + |
| 192 | + send_payment(&nodes[0], &vec!(&nodes[1])[..], 1000); |
| 193 | + send_payment(&nodes[0], &vec!(&nodes[1])[..], 10000); |
| 194 | + send_payment(&nodes[0], &vec!(&nodes[1])[..], 9999); |
| 195 | + |
| 196 | + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); |
| 197 | + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); |
| 198 | + |
| 199 | + // Reconnect peers |
| 200 | + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { |
| 201 | + features: nodes[1].node.init_features(), networks: None, remote_network_address: None |
| 202 | + }, true).unwrap(); |
| 203 | + let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); |
| 204 | + assert_eq!(reestablish_1.len(), 1); |
| 205 | + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { |
| 206 | + features: nodes[0].node.init_features(), networks: None, remote_network_address: None |
| 207 | + }, false).unwrap(); |
| 208 | + let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); |
| 209 | + assert_eq!(reestablish_2.len(), 1); |
| 210 | + |
| 211 | + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]); |
| 212 | + handle_chan_reestablish_msgs!(nodes[0], nodes[1]); |
| 213 | + nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]); |
| 214 | + handle_chan_reestablish_msgs!(nodes[1], nodes[0]); |
| 215 | + |
| 216 | + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); |
| 217 | + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); |
| 218 | + |
| 219 | + reload_node!(nodes[0], test_default_channel_config(), &nodes_0_serialized, &[], persister, chain_monitor, nodes_0_deserialized); |
| 220 | + |
| 221 | + // Reconnect peers to see if we send YourPeerStorage |
| 222 | + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { |
| 223 | + features: nodes[0].node.init_features(), networks: None, remote_network_address: None |
| 224 | + }, false).unwrap(); |
| 225 | + let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); |
| 226 | + assert_eq!(reestablish_2.len(), 1); |
| 227 | + |
| 228 | + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { |
| 229 | + features: nodes[1].node.init_features(), networks: None, remote_network_address: None |
| 230 | + }, true).unwrap(); |
| 231 | + let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); |
| 232 | + assert_eq!(reestablish_1.len(), 0); |
| 233 | + |
| 234 | + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); |
| 235 | + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); |
| 236 | + |
| 237 | + let chanstub = get_stub!(nodes[0], channel_id).encode(); |
| 238 | + |
| 239 | + // Lets drop the monitor and clear the chain_monitor as well. |
| 240 | + nodes[0].chain_source.remove_watched_txn_and_outputs( |
| 241 | + OutPoint { txid: funding_tx.txid(), index: 0 }, |
| 242 | + funding_tx.output[0].script_pubkey.clone() |
| 243 | + ); |
| 244 | + |
| 245 | + reload_node_with_stubs!(nodes[0], &nodes_0_serialized, &[], &[&chanstub], persister_fund_recoverer, chain_monitor_fund_recoverer, nodes_0_deserialized_fund_recoverer); |
| 246 | + |
| 247 | + let fundrecoverer: FundRecoverer<&test_utils::TestKeysInterface, &TestLogger, &test_utils::TestChainMonitor> |
| 248 | + = FundRecoverer::new(&chain_monitor, node_cfgs[0].logger, test_default_channel_config(), ChainParameters {network: Network::Testnet, |
| 249 | + best_block: BestBlock::from_network(Network::Testnet)}); |
| 250 | + |
| 251 | + fundrecoverer.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { |
| 252 | + features: nodes[0].node.init_features(), networks: None, remote_network_address: None |
| 253 | + }, true).unwrap(); |
| 254 | + let mut closing_msg_events = fundrecoverer.get_and_clear_pending_msg_events(); |
| 255 | + assert_eq!(closing_msg_events.len(), 1); |
| 256 | + let bogus_chan_reestablish = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut closing_msg_events); |
| 257 | + |
| 258 | + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { |
| 259 | + features: nodes[0].node.init_features(), networks: None, remote_network_address: None |
| 260 | + }, true).unwrap(); |
| 261 | + let reestablish_1 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); |
| 262 | + assert_eq!(reestablish_1.len(), 1); |
| 263 | + |
| 264 | + match bogus_chan_reestablish { |
| 265 | + MessageSendEvent::SendChannelReestablish {ref node_id, ref msg} => { |
| 266 | + assert_eq!(nodes[1].node.get_our_node_id(), *node_id); |
| 267 | + nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), msg); |
| 268 | + }, |
| 269 | + _ => panic!("Unexpected event"), |
| 270 | + } |
| 271 | + |
| 272 | + fundrecoverer.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_1[0]); |
| 273 | + |
| 274 | + let commitment_tx = { |
| 275 | + let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); |
| 276 | + assert_eq!(node_txn.len(), 1); |
| 277 | + node_txn.remove(0) |
| 278 | + }; |
| 279 | + |
| 280 | + |
| 281 | + let block = create_dummy_block(nodes[1].best_block_hash(), 42, vec![commitment_tx.clone()]); |
| 282 | + connect_block(&nodes[1], &block); |
| 283 | + connect_block(&nodes[0], &block); |
| 284 | + check_closed_broadcast!(nodes[1], true); |
| 285 | + |
| 286 | + let events_2 = nodes[1].node.get_and_clear_pending_events(); |
| 287 | + assert_eq!(events_2.len(), 1); |
| 288 | + match events_2[0] { |
| 289 | + Event::ChannelClosed {..} => {}, // If we actually processed we'd receive the payment |
| 290 | + _ => panic!("Unexpected event"), |
| 291 | + } |
| 292 | + |
| 293 | + check_added_monitors!(nodes[1], 1); |
| 294 | + { |
| 295 | + let (txo, cid) = nodes[0].chain_monitor.chain_monitor.list_stub_monitors()[0]; |
| 296 | + assert_eq!(cid, channel_id); |
| 297 | + let monitor = nodes[0].chain_monitor.chain_monitor.get_stub_monitor(txo).unwrap(); |
| 298 | + let total_claimable_balance = monitor.get_claimable_balances().iter().fold(0, |sum, balance| { |
| 299 | + match balance { |
| 300 | + channelmonitor::Balance::ClaimableAwaitingConfirmations { amount_satoshis, .. } => sum + amount_satoshis, |
| 301 | + _ => panic!("Unexpected balance type"), |
| 302 | + } |
| 303 | + }); |
| 304 | + assert_eq!(commitment_tx.output[0].value.to_sat(), total_claimable_balance); |
| 305 | + } |
| 306 | + |
| 307 | + connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH); |
| 308 | +} |
| 309 | + |
177 | 310 | fn do_test_counterparty_no_reserve(send_from_initiator: bool) {
|
178 | 311 | // A peer providing a channel_reserve_satoshis of 0 (or less than our dust limit) is insecure,
|
179 | 312 | // but only for them. Because some LSPs do it with some level of trust of the clients (for a
|
|
0 commit comments