Skip to content

Commit bbeccad

Browse files
wip
1 parent 2b3b00f commit bbeccad

File tree

4 files changed

+113
-20
lines changed

4 files changed

+113
-20
lines changed

lightning/src/ln/chanmon_update_fail_tests.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1959,7 +1959,7 @@ fn test_path_paused_mpp() {
19591959
// Set it so that the first monitor update (for the path 0 -> 1 -> 3) succeeds, but the second
19601960
// (for the path 0 -> 2 -> 3) fails.
19611961
chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1962-
chanmon_cfgs[0].persister.set_next_update_ret(Some(ChannelMonitorUpdateStatus::InProgress));
1962+
chanmon_cfgs[0].persister.set_next_update_ret(ChannelMonitorUpdateStatus::InProgress);
19631963

19641964
// Now check that we get the right return value, indicating that the first path succeeded but
19651965
// the second got a MonitorUpdateInProgress err. This implies

lightning/src/ln/functional_test_utils.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -939,7 +939,7 @@ pub fn create_chan_between_nodes_with_value_b<'a, 'b, 'c>(node_a: &Node<'a, 'b,
939939
}
940940

941941
pub fn create_announced_chan_between_nodes<'a, 'b, 'c, 'd>(nodes: &'a Vec<Node<'b, 'c, 'd>>, a: usize, b: usize, a_flags: InitFeatures, b_flags: InitFeatures) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
942-
create_announced_chan_between_nodes_with_value(nodes, a, b, 100000, 10001, a_flags, b_flags)
942+
create_announced_chan_between_nodes_with_value(nodes, a, b, 100_000, 10_001, a_flags, b_flags)
943943
}
944944

945945
pub fn create_announced_chan_between_nodes_with_value<'a, 'b, 'c, 'd>(nodes: &'a Vec<Node<'b, 'c, 'd>>, a: usize, b: usize, channel_value: u64, push_msat: u64, a_flags: InitFeatures, b_flags: InitFeatures) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {

lightning/src/ln/payment_tests.rs

Lines changed: 68 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1754,3 +1754,71 @@ fn do_automatic_retries(test: AutoRetry) {
17541754
}
17551755
}
17561756
}
1757+
1758+
#[test]
1759+
fn auto_retry_partial_failure() {
1760+
// Test that we'll retry appropriately on send partial failure and retry partial failure.
1761+
let chanmon_cfgs = create_chanmon_cfgs(4);
1762+
let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
1763+
let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
1764+
let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs);
1765+
1766+
let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).0.contents.short_channel_id;
1767+
let (chan_2_ann, _, chan_2_id, _) = create_announced_chan_between_nodes(&nodes, 0, 2, channelmanager::provided_init_features(), channelmanager::provided_init_features());
1768+
let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3, channelmanager::provided_init_features(), channelmanager::provided_init_features()).0.contents.short_channel_id;
1769+
let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3, channelmanager::provided_init_features(), channelmanager::provided_init_features()).0.contents.short_channel_id;
1770+
1771+
1772+
// This amount should force the router to split across nodes[0]'s two channels.
1773+
let amt_msat = 18_000_000;
1774+
1775+
// Marshall data to send the payment
1776+
let (_, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[3], amt_msat);
1777+
#[cfg(feature = "std")]
1778+
let payment_expiry_secs = SystemTime::UNIX_EPOCH.elapsed().unwrap().as_secs() + 60 * 60;
1779+
#[cfg(not(feature = "std"))]
1780+
let payment_expiry_secs = 60 * 60;
1781+
let mut invoice_features = InvoiceFeatures::empty();
1782+
invoice_features.set_variable_length_onion_required();
1783+
invoice_features.set_payment_secret_required();
1784+
invoice_features.set_basic_mpp_optional();
1785+
let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id())
1786+
.with_expiry_time(payment_expiry_secs as u64)
1787+
.with_features(invoice_features);
1788+
let route_params = RouteParameters {
1789+
payment_params,
1790+
final_value_msat: amt_msat,
1791+
final_cltv_expiry_delta: TEST_FINAL_CLTV,
1792+
};
1793+
1794+
// Ensure the first monitor update (for the initial send path 0 -> 1 -> 3) succeeds, but the
1795+
// second (for the initial send path 0 -> 2 -> 3) fails.
1796+
chanmon_cfgs[0].persister.set_next_update_ret(ChannelMonitorUpdateStatus::Completed);
1797+
chanmon_cfgs[0].persister.set_next_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
1798+
// Ensure third monitor update (for the retry0's path 0 -> 1 -> 3) succeeds, but the fourth (for
1799+
// the retry0's path 0 -> 2 -> 3) fails, and monitor updates succeed after that.
1800+
chanmon_cfgs[0].persister.set_next_update_ret(ChannelMonitorUpdateStatus::Completed);
1801+
chanmon_cfgs[0].persister.set_next_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
1802+
chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1803+
1804+
// Configure the initial send, retry0 and retry1's paths.
1805+
1806+
// Send a payment that will partially fail on send, then partially fail on retry, then succeed.
1807+
match nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)) {
1808+
Err(PaymentSendFailure::PartialFailure { results, ..}) => {
1809+
assert_eq!(results[0], Ok(()));
1810+
assert_eq!(results[1], Err(APIError::ChannelUnavailable {
1811+
err: "ChannelMonitor storage failure".to_owned(),
1812+
}));
1813+
},
1814+
_ => panic!("Unexpected result"),
1815+
}
1816+
1817+
// Pass the first part of the payment along the path.
1818+
check_added_monitors!(nodes[0], 1);
1819+
let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events();
1820+
assert_eq!(msg_events.len(), 1);
1821+
pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], amt_msat, payment_hash, Some(payment_secret), msg_events.pop().unwrap(), true, None);
1822+
1823+
//
1824+
}

lightning/src/util/test_utils.rs

Lines changed: 43 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -75,6 +75,16 @@ impl chaininterface::FeeEstimator for TestFeeEstimator {
7575

7676
pub struct TestRouter<'a> {
7777
pub network_graph: Arc<NetworkGraph<&'a TestLogger>>,
78+
pub next_routes: Mutex<VecDeque<Route>>,
79+
}
80+
81+
impl<'a> TestRouter<'a> {
82+
pub fn new(network_graph: Arc<NetworkGraph<&'a TestLogger>>) -> Self {
83+
Self {
84+
network_graph,
85+
next_routes: Mutex::new(VecDeque::new()),
86+
}
87+
}
7888
}
7989

8090
impl<'a> TestRouter<'a> {
@@ -88,6 +98,9 @@ impl<'a> Router for TestRouter<'a> {
8898
&self, payer: &PublicKey, params: &RouteParameters, first_hops: Option<&[&channelmanager::ChannelDetails]>,
8999
inflight_htlcs: &InFlightHtlcs
90100
) -> Result<Route, msgs::LightningError> {
101+
if let Some(route) = self.next_routes.lock().unwrap().pop_front() {
102+
return Ok(route)
103+
}
91104
let logger = TestLogger::new();
92105
find_route(
93106
payer, params, &self.network_graph, first_hops, &logger,
@@ -101,6 +114,15 @@ impl<'a> Router for TestRouter<'a> {
101114
fn notify_payment_probe_failed(&self, _path: &[&RouteHop], _short_channel_id: u64) {}
102115
}
103116

117+
impl<'a> Drop for TestRouter<'a> {
118+
fn drop(&mut self) {
119+
if std::thread::panicking() {
120+
return;
121+
}
122+
assert!(self.next_routes.lock().unwrap().is_empty());
123+
}
124+
}
125+
104126
pub struct OnlyReadsKeysInterface {}
105127

106128
impl EntropySource for OnlyReadsKeysInterface {
@@ -224,10 +246,9 @@ impl<'a> chain::Watch<EnforcingSigner> for TestChainMonitor<'a> {
224246
}
225247

226248
pub struct TestPersister {
227-
pub update_ret: Mutex<chain::ChannelMonitorUpdateStatus>,
228-
/// If this is set to Some(), after the next return, we'll always return this until update_ret
229-
/// is changed:
230-
pub next_update_ret: Mutex<Option<chain::ChannelMonitorUpdateStatus>>,
249+
/// The queue of update statuses we'll return. If only one is queued, we'll always return it. If
250+
/// none are queued, ::Completed will always be returned.
251+
pub update_rets: Mutex<VecDeque<chain::ChannelMonitorUpdateStatus>>,
231252
/// When we get an update_persisted_channel call with no ChannelMonitorUpdate, we insert the
232253
/// MonitorUpdateId here.
233254
pub chain_sync_monitor_persistences: Mutex<HashMap<OutPoint, HashSet<MonitorUpdateId>>>,
@@ -238,35 +259,39 @@ pub struct TestPersister {
238259
impl TestPersister {
239260
pub fn new() -> Self {
240261
Self {
241-
update_ret: Mutex::new(chain::ChannelMonitorUpdateStatus::Completed),
242-
next_update_ret: Mutex::new(None),
262+
update_rets: Mutex::new(VecDeque::new()),
243263
chain_sync_monitor_persistences: Mutex::new(HashMap::new()),
244264
offchain_monitor_updates: Mutex::new(HashMap::new()),
245265
}
246266
}
247267

268+
/// Clear the queue of update statuses and set the one we'll always return.
248269
pub fn set_update_ret(&self, ret: chain::ChannelMonitorUpdateStatus) {
249-
*self.update_ret.lock().unwrap() = ret;
270+
let mut update_rets = self.update_rets.lock().unwrap();
271+
update_rets.clear();
272+
update_rets.push_front(ret);
250273
}
251274

252-
pub fn set_next_update_ret(&self, next_ret: Option<chain::ChannelMonitorUpdateStatus>) {
253-
*self.next_update_ret.lock().unwrap() = next_ret;
275+
/// Queue an update status to return.
276+
pub fn set_next_update_ret(&self, next_ret: chain::ChannelMonitorUpdateStatus) {
277+
self.update_rets.lock().unwrap().push_back(next_ret);
254278
}
255279
}
256280
impl<Signer: keysinterface::Sign> chainmonitor::Persist<Signer> for TestPersister {
257281
fn persist_new_channel(&self, _funding_txo: OutPoint, _data: &channelmonitor::ChannelMonitor<Signer>, _id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
258-
let ret = self.update_ret.lock().unwrap().clone();
259-
if let Some(next_ret) = self.next_update_ret.lock().unwrap().take() {
260-
*self.update_ret.lock().unwrap() = next_ret;
261-
}
262-
ret
282+
let mut update_rets = self.update_rets.lock().unwrap();
283+
if update_rets.len() > 1 { return update_rets.pop_front().unwrap() }
284+
else if update_rets.len() == 1 { return *update_rets.front().clone().unwrap() }
285+
chain::ChannelMonitorUpdateStatus::Completed
263286
}
264287

265288
fn update_persisted_channel(&self, funding_txo: OutPoint, update: &Option<channelmonitor::ChannelMonitorUpdate>, _data: &channelmonitor::ChannelMonitor<Signer>, update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
266-
let ret = self.update_ret.lock().unwrap().clone();
267-
if let Some(next_ret) = self.next_update_ret.lock().unwrap().take() {
268-
*self.update_ret.lock().unwrap() = next_ret;
269-
}
289+
let ret = {
290+
let mut update_rets = self.update_rets.lock().unwrap();
291+
if update_rets.len() > 1 { update_rets.pop_front().unwrap() }
292+
else if update_rets.len() == 1 { *update_rets.front().clone().unwrap() }
293+
else { chain::ChannelMonitorUpdateStatus::Completed }
294+
};
270295
if update.is_none() {
271296
self.chain_sync_monitor_persistences.lock().unwrap().entry(funding_txo).or_insert(HashSet::new()).insert(update_id);
272297
} else {

0 commit comments

Comments
 (0)