diff --git a/fuzz/ci-fuzz.sh b/fuzz/ci-fuzz.sh index 9894178458d..6f0074b6a1a 100755 --- a/fuzz/ci-fuzz.sh +++ b/fuzz/ci-fuzz.sh @@ -1,5 +1,6 @@ #!/bin/bash set -e +set -x pushd src/msg_targets rm msg_*.rs diff --git a/fuzz/src/bin/chanmon_consistency_target.rs b/fuzz/src/bin/chanmon_consistency_target.rs index c01eb57b2b0..c480b3d17c1 100644 --- a/fuzz/src/bin/chanmon_consistency_target.rs +++ b/fuzz/src/bin/chanmon_consistency_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/chanmon_deser_target.rs b/fuzz/src/bin/chanmon_deser_target.rs index 8f426c402f8..8b57874cf6c 100644 --- a/fuzz/src/bin/chanmon_deser_target.rs +++ b/fuzz/src/bin/chanmon_deser_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/full_stack_target.rs b/fuzz/src/bin/full_stack_target.rs index 527fac217d7..7dae655316b 100644 --- a/fuzz/src/bin/full_stack_target.rs +++ b/fuzz/src/bin/full_stack_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_accept_channel_target.rs b/fuzz/src/bin/msg_accept_channel_target.rs index 8e36dbbb64a..d28895a0fe9 100644 --- a/fuzz/src/bin/msg_accept_channel_target.rs +++ b/fuzz/src/bin/msg_accept_channel_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_announcement_signatures_target.rs b/fuzz/src/bin/msg_announcement_signatures_target.rs index 1b15d2fd3e9..dcfc006eb0c 100644 --- a/fuzz/src/bin/msg_announcement_signatures_target.rs +++ b/fuzz/src/bin/msg_announcement_signatures_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_channel_announcement_target.rs b/fuzz/src/bin/msg_channel_announcement_target.rs index 5363423076d..0efc0b41fc6 100644 --- a/fuzz/src/bin/msg_channel_announcement_target.rs +++ b/fuzz/src/bin/msg_channel_announcement_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_channel_reestablish_target.rs b/fuzz/src/bin/msg_channel_reestablish_target.rs index 5d8d028684c..0111d2b2754 100644 --- a/fuzz/src/bin/msg_channel_reestablish_target.rs +++ b/fuzz/src/bin/msg_channel_reestablish_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_channel_update_target.rs b/fuzz/src/bin/msg_channel_update_target.rs index beac3f0ce56..4bfb9860c8c 100644 --- a/fuzz/src/bin/msg_channel_update_target.rs +++ b/fuzz/src/bin/msg_channel_update_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_closing_signed_target.rs b/fuzz/src/bin/msg_closing_signed_target.rs index dfb9c18df89..12dbb31cd20 100644 --- a/fuzz/src/bin/msg_closing_signed_target.rs +++ b/fuzz/src/bin/msg_closing_signed_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_commitment_signed_target.rs b/fuzz/src/bin/msg_commitment_signed_target.rs index 585d97aaad4..407f0288cf5 100644 --- a/fuzz/src/bin/msg_commitment_signed_target.rs +++ b/fuzz/src/bin/msg_commitment_signed_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_decoded_onion_error_packet_target.rs b/fuzz/src/bin/msg_decoded_onion_error_packet_target.rs index 0be01b95cf3..9f76e69679a 100644 --- a/fuzz/src/bin/msg_decoded_onion_error_packet_target.rs +++ b/fuzz/src/bin/msg_decoded_onion_error_packet_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_error_message_target.rs b/fuzz/src/bin/msg_error_message_target.rs index dbe6590ab42..dc418c93d69 100644 --- a/fuzz/src/bin/msg_error_message_target.rs +++ b/fuzz/src/bin/msg_error_message_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_funding_created_target.rs b/fuzz/src/bin/msg_funding_created_target.rs index 8f174cce5d8..6fb87c4082a 100644 --- a/fuzz/src/bin/msg_funding_created_target.rs +++ b/fuzz/src/bin/msg_funding_created_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_funding_locked_target.rs b/fuzz/src/bin/msg_funding_locked_target.rs index 47ce4a905b8..7b8f21f8a0b 100644 --- a/fuzz/src/bin/msg_funding_locked_target.rs +++ b/fuzz/src/bin/msg_funding_locked_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_funding_signed_target.rs b/fuzz/src/bin/msg_funding_signed_target.rs index c1b756fe673..ea05acdcee1 100644 --- a/fuzz/src/bin/msg_funding_signed_target.rs +++ b/fuzz/src/bin/msg_funding_signed_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_gossip_timestamp_filter_target.rs b/fuzz/src/bin/msg_gossip_timestamp_filter_target.rs index 1ebd769035a..1aba4e3fefe 100644 --- a/fuzz/src/bin/msg_gossip_timestamp_filter_target.rs +++ b/fuzz/src/bin/msg_gossip_timestamp_filter_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_init_target.rs b/fuzz/src/bin/msg_init_target.rs index 2a39565fbbf..dd7b197a42e 100644 --- a/fuzz/src/bin/msg_init_target.rs +++ b/fuzz/src/bin/msg_init_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_node_announcement_target.rs b/fuzz/src/bin/msg_node_announcement_target.rs index 1c150beb0bd..63b23bd669d 100644 --- a/fuzz/src/bin/msg_node_announcement_target.rs +++ b/fuzz/src/bin/msg_node_announcement_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_onion_hop_data_target.rs b/fuzz/src/bin/msg_onion_hop_data_target.rs index 73b7d5f27e6..493817c7eea 100644 --- a/fuzz/src/bin/msg_onion_hop_data_target.rs +++ b/fuzz/src/bin/msg_onion_hop_data_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_open_channel_target.rs b/fuzz/src/bin/msg_open_channel_target.rs index 515cba9e87c..d6c9de48f28 100644 --- a/fuzz/src/bin/msg_open_channel_target.rs +++ b/fuzz/src/bin/msg_open_channel_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_ping_target.rs b/fuzz/src/bin/msg_ping_target.rs index 2a02fac5638..e739cd58052 100644 --- a/fuzz/src/bin/msg_ping_target.rs +++ b/fuzz/src/bin/msg_ping_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_pong_target.rs b/fuzz/src/bin/msg_pong_target.rs index 20304c4530e..73113472c33 100644 --- a/fuzz/src/bin/msg_pong_target.rs +++ b/fuzz/src/bin/msg_pong_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_query_channel_range_target.rs b/fuzz/src/bin/msg_query_channel_range_target.rs index b7e242467b9..db22fa525b8 100644 --- a/fuzz/src/bin/msg_query_channel_range_target.rs +++ b/fuzz/src/bin/msg_query_channel_range_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_query_short_channel_ids_target.rs b/fuzz/src/bin/msg_query_short_channel_ids_target.rs index ad18f21e2c0..6c470e3b12e 100644 --- a/fuzz/src/bin/msg_query_short_channel_ids_target.rs +++ b/fuzz/src/bin/msg_query_short_channel_ids_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_reply_channel_range_target.rs b/fuzz/src/bin/msg_reply_channel_range_target.rs index 283ac0d4662..68651347b6f 100644 --- a/fuzz/src/bin/msg_reply_channel_range_target.rs +++ b/fuzz/src/bin/msg_reply_channel_range_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_reply_short_channel_ids_end_target.rs b/fuzz/src/bin/msg_reply_short_channel_ids_end_target.rs index c8b0e7804db..de3e9f1cce3 100644 --- a/fuzz/src/bin/msg_reply_short_channel_ids_end_target.rs +++ b/fuzz/src/bin/msg_reply_short_channel_ids_end_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_revoke_and_ack_target.rs b/fuzz/src/bin/msg_revoke_and_ack_target.rs index 34df3579d7d..e0f781986d4 100644 --- a/fuzz/src/bin/msg_revoke_and_ack_target.rs +++ b/fuzz/src/bin/msg_revoke_and_ack_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_shutdown_target.rs b/fuzz/src/bin/msg_shutdown_target.rs index f682e2afaa8..6220fc92803 100644 --- a/fuzz/src/bin/msg_shutdown_target.rs +++ b/fuzz/src/bin/msg_shutdown_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_update_add_htlc_target.rs b/fuzz/src/bin/msg_update_add_htlc_target.rs index a8b3d4bab32..dffdb830939 100644 --- a/fuzz/src/bin/msg_update_add_htlc_target.rs +++ b/fuzz/src/bin/msg_update_add_htlc_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_update_fail_htlc_target.rs b/fuzz/src/bin/msg_update_fail_htlc_target.rs index ee0b5206789..2828b396ad4 100644 --- a/fuzz/src/bin/msg_update_fail_htlc_target.rs +++ b/fuzz/src/bin/msg_update_fail_htlc_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_update_fail_malformed_htlc_target.rs b/fuzz/src/bin/msg_update_fail_malformed_htlc_target.rs index 1b5435c6789..8bf82a517cf 100644 --- a/fuzz/src/bin/msg_update_fail_malformed_htlc_target.rs +++ b/fuzz/src/bin/msg_update_fail_malformed_htlc_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_update_fee_target.rs b/fuzz/src/bin/msg_update_fee_target.rs index b06d958ede6..e9e63f28a8c 100644 --- a/fuzz/src/bin/msg_update_fee_target.rs +++ b/fuzz/src/bin/msg_update_fee_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_update_fulfill_htlc_target.rs b/fuzz/src/bin/msg_update_fulfill_htlc_target.rs index 26f53cfcf54..4820b0adf9f 100644 --- a/fuzz/src/bin/msg_update_fulfill_htlc_target.rs +++ b/fuzz/src/bin/msg_update_fulfill_htlc_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/peer_crypt_target.rs b/fuzz/src/bin/peer_crypt_target.rs index e0fc97b5798..8f62626983c 100644 --- a/fuzz/src/bin/peer_crypt_target.rs +++ b/fuzz/src/bin/peer_crypt_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/router_target.rs b/fuzz/src/bin/router_target.rs index c2ef438bb8d..4df8b2a0065 100644 --- a/fuzz/src/bin/router_target.rs +++ b/fuzz/src/bin/router_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/target_template.txt b/fuzz/src/bin/target_template.txt index f73fc6d83d3..7db3bc80b9b 100644 --- a/fuzz/src/bin/target_template.txt +++ b/fuzz/src/bin/target_template.txt @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/zbase32_target.rs b/fuzz/src/bin/zbase32_target.rs index ae96ce409fc..0b9d629151d 100644 --- a/fuzz/src/bin/zbase32_target.rs +++ b/fuzz/src/bin/zbase32_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 4da65f7a2ab..732ccd27e88 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -30,9 +30,7 @@ use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::hash_types::{BlockHash, WPubkeyHash}; use lightning::chain; -use lightning::chain::Confirm; -use lightning::chain::chainmonitor; -use lightning::chain::channelmonitor; +use lightning::chain::{chainmonitor, channelmonitor, Confirm, Watch}; use lightning::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, MonitorEvent}; use lightning::chain::transaction::OutPoint; use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator}; @@ -40,7 +38,7 @@ use lightning::chain::keysinterface::{KeysInterface, InMemorySigner}; use lightning::ln::{PaymentHash, PaymentPreimage, PaymentSecret}; use lightning::ln::channelmanager::{BestBlock, ChainParameters, ChannelManager, PaymentSendFailure, ChannelManagerReadArgs}; use lightning::ln::features::{ChannelFeatures, InitFeatures, NodeFeatures}; -use lightning::ln::msgs::{CommitmentUpdate, ChannelMessageHandler, DecodeError, ErrorAction, UpdateAddHTLC, Init}; +use lightning::ln::msgs::{CommitmentUpdate, ChannelMessageHandler, DecodeError, UpdateAddHTLC, Init}; use lightning::util::enforcing_trait_impls::{EnforcingSigner, INITIAL_REVOKED_COMMITMENT_NUMBER}; use lightning::util::errors::APIError; use lightning::util::events; @@ -48,7 +46,6 @@ use lightning::util::logger::Logger; use lightning::util::config::UserConfig; use lightning::util::events::{EventsProvider, MessageSendEventsProvider}; use lightning::util::ser::{Readable, ReadableArgs, Writeable, Writer}; -use lightning::util::test_utils::OnlyReadsKeysInterface; use lightning::routing::router::{Route, RouteHop}; @@ -91,6 +88,7 @@ impl Writer for VecWriter { struct TestChainMonitor { pub logger: Arc, + pub keys: Arc, pub chain_monitor: Arc, Arc, Arc, Arc, Arc>>, pub update_ret: Mutex>, // If we reload a node with an old copy of ChannelMonitors, the ChannelManager deserialization @@ -102,10 +100,11 @@ struct TestChainMonitor { pub should_update_manager: atomic::AtomicBool, } impl TestChainMonitor { - pub fn new(broadcaster: Arc, logger: Arc, feeest: Arc, persister: Arc) -> Self { + pub fn new(broadcaster: Arc, logger: Arc, feeest: Arc, persister: Arc, keys: Arc) -> Self { Self { chain_monitor: Arc::new(chainmonitor::ChainMonitor::new(None, broadcaster, logger.clone(), feeest, persister)), logger, + keys, update_ret: Mutex::new(Ok(())), latest_monitors: Mutex::new(HashMap::new()), should_update_manager: atomic::AtomicBool::new(false), @@ -131,12 +130,13 @@ impl chain::Watch for TestChainMonitor { hash_map::Entry::Vacant(_) => panic!("Didn't have monitor on update call"), }; let deserialized_monitor = <(BlockHash, channelmonitor::ChannelMonitor)>:: - read(&mut Cursor::new(&map_entry.get().1), &OnlyReadsKeysInterface {}).unwrap().1; + read(&mut Cursor::new(&map_entry.get().1), &*self.keys).unwrap().1; deserialized_monitor.update_monitor(&update, &&TestBroadcaster{}, &&FuzzEstimator{}, &self.logger).unwrap(); let mut ser = VecWriter(Vec::new()); deserialized_monitor.write(&mut ser).unwrap(); map_entry.insert((update.update_id, ser.0)); self.should_update_manager.store(true, atomic::Ordering::Relaxed); + assert!(self.chain_monitor.update_channel(funding_txo, update).is_ok()); self.update_ret.lock().unwrap().clone() } @@ -334,9 +334,9 @@ pub fn do_test(data: &[u8], out: Out) { macro_rules! make_node { ($node_id: expr) => { { let logger: Arc = Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone())); - let monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), fee_est.clone(), Arc::new(TestPersister{}))); - let keys_manager = Arc::new(KeyProvider { node_id: $node_id, rand_bytes_id: atomic::AtomicU32::new(0), revoked_commitments: Mutex::new(HashMap::new()) }); + let monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), fee_est.clone(), Arc::new(TestPersister{}), Arc::clone(&keys_manager))); + let mut config = UserConfig::default(); config.channel_options.fee_proportional_millionths = 0; config.channel_options.announced_channel = true; @@ -354,7 +354,7 @@ pub fn do_test(data: &[u8], out: Out) { ($ser: expr, $node_id: expr, $old_monitors: expr, $keys_manager: expr) => { { let keys_manager = Arc::clone(& $keys_manager); let logger: Arc = Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone())); - let chain_monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), fee_est.clone(), Arc::new(TestPersister{}))); + let chain_monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), fee_est.clone(), Arc::new(TestPersister{}), Arc::clone(& $keys_manager))); let mut config = UserConfig::default(); config.channel_options.fee_proportional_millionths = 0; @@ -363,7 +363,7 @@ pub fn do_test(data: &[u8], out: Out) { let mut monitors = HashMap::new(); let mut old_monitors = $old_monitors.latest_monitors.lock().unwrap(); for (outpoint, (update_id, monitor_ser)) in old_monitors.drain() { - monitors.insert(outpoint, <(BlockHash, ChannelMonitor)>::read(&mut Cursor::new(&monitor_ser), &OnlyReadsKeysInterface {}).expect("Failed to read monitor").1); + monitors.insert(outpoint, <(BlockHash, ChannelMonitor)>::read(&mut Cursor::new(&monitor_ser), &*$keys_manager).expect("Failed to read monitor").1); chain_monitor.latest_monitors.lock().unwrap().insert(outpoint, (update_id, monitor_ser)); } let mut monitor_refs = HashMap::new(); @@ -381,7 +381,11 @@ pub fn do_test(data: &[u8], out: Out) { channel_monitors: monitor_refs, }; - (<(BlockHash, ChanMan)>::read(&mut Cursor::new(&$ser.0), read_args).expect("Failed to read manager").1, chain_monitor) + let res = (<(BlockHash, ChanMan)>::read(&mut Cursor::new(&$ser.0), read_args).expect("Failed to read manager").1, chain_monitor.clone()); + for (funding_txo, mon) in monitors.drain() { + assert!(chain_monitor.chain_monitor.watch_channel(funding_txo, mon).is_ok()); + } + res } } } @@ -603,6 +607,9 @@ pub fn do_test(data: &[u8], out: Out) { events::MessageSendEvent::SendFundingLocked { .. } => { // Can be generated as a reestablish response }, + events::MessageSendEvent::SendAnnouncementSignatures { .. } => { + // Can be generated as a reestablish response + }, events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => { // Can be generated due to a payment forward being rejected due to a // channel having previously failed a monitor update @@ -623,8 +630,8 @@ pub fn do_test(data: &[u8], out: Out) { events::MessageSendEvent::SendRevokeAndACK { .. } => {}, events::MessageSendEvent::SendChannelReestablish { .. } => {}, events::MessageSendEvent::SendFundingLocked { .. } => {}, + events::MessageSendEvent::SendAnnouncementSignatures { .. } => {}, events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {}, - events::MessageSendEvent::HandleError { action: ErrorAction::IgnoreError, .. } => {}, _ => panic!("Unhandled message event"), } } @@ -636,8 +643,8 @@ pub fn do_test(data: &[u8], out: Out) { events::MessageSendEvent::SendRevokeAndACK { .. } => {}, events::MessageSendEvent::SendChannelReestablish { .. } => {}, events::MessageSendEvent::SendFundingLocked { .. } => {}, + events::MessageSendEvent::SendAnnouncementSignatures { .. } => {}, events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {}, - events::MessageSendEvent::HandleError { action: ErrorAction::IgnoreError, .. } => {}, _ => panic!("Unhandled message event"), } } @@ -649,17 +656,17 @@ pub fn do_test(data: &[u8], out: Out) { for event in events.drain(..) { let push = match event { events::MessageSendEvent::UpdateHTLCs { ref node_id, .. } => { - if *node_id != drop_node_id { true } else { false } + if *node_id != drop_node_id { true } else { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } }, events::MessageSendEvent::SendRevokeAndACK { ref node_id, .. } => { - if *node_id != drop_node_id { true } else { false } + if *node_id != drop_node_id { true } else { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } }, events::MessageSendEvent::SendChannelReestablish { ref node_id, .. } => { - if *node_id != drop_node_id { true } else { false } + if *node_id != drop_node_id { true } else { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } }, events::MessageSendEvent::SendFundingLocked { .. } => false, + events::MessageSendEvent::SendAnnouncementSignatures { .. } => false, events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => false, - events::MessageSendEvent::HandleError { action: ErrorAction::IgnoreError, .. } => false, _ => panic!("Unhandled message event"), }; if push { msg_sink.push(event); } @@ -797,6 +804,10 @@ pub fn do_test(data: &[u8], out: Out) { chan_a_disconnected = true; drain_msg_events_on_disconnect!(0); } + if monitor_a.should_update_manager.load(atomic::Ordering::Relaxed) { + node_a_ser.0.clear(); + nodes[0].write(&mut node_a_ser).unwrap(); + } let (new_node_a, new_monitor_a) = reload_node!(node_a_ser, 0, monitor_a, keys_manager_a); nodes[0] = new_node_a; monitor_a = new_monitor_a; @@ -824,6 +835,10 @@ pub fn do_test(data: &[u8], out: Out) { chan_b_disconnected = true; drain_msg_events_on_disconnect!(2); } + if monitor_c.should_update_manager.load(atomic::Ordering::Relaxed) { + node_c_ser.0.clear(); + nodes[2].write(&mut node_c_ser).unwrap(); + } let (new_node_c, new_monitor_c) = reload_node!(node_c_ser, 2, monitor_c, keys_manager_c); nodes[2] = new_node_c; monitor_c = new_monitor_c; diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index d1da9f7ef30..79bbfadc768 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -20,11 +20,12 @@ use chain::transaction::OutPoint; use chain::Listen; use chain::Watch; use ln::{PaymentPreimage, PaymentHash}; -use ln::channelmanager::{RAACommitmentOrder, PaymentSendFailure}; +use ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, RAACommitmentOrder, PaymentSendFailure}; use ln::features::{InitFeatures, InvoiceFeatures}; use ln::msgs; use ln::msgs::{ChannelMessageHandler, ErrorAction, RoutingMessageHandler}; use routing::router::get_route; +use util::config::UserConfig; use util::enforcing_trait_impls::EnforcingSigner; use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider}; use util::errors::APIError; @@ -37,6 +38,8 @@ use ln::functional_test_utils::*; use util::test_utils; +use std::collections::HashMap; + // If persister_fail is true, we have the persister return a PermanentFailure // instead of the higher-level ChainMonitor. fn do_test_simple_monitor_permanent_update_fail(persister_fail: bool) { @@ -1971,3 +1974,202 @@ fn test_path_paused_mpp() { claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_preimage); } + +fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { + // Tests that, when we serialize a channel with AddHTLC entries in the holding cell, we + // properly free them on reconnect. We previously failed such HTLCs upon serialization, but + // that behavior was both somewhat unexpected and also broken (there was a debug assertion + // which failed in such a case). + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let persister: test_utils::TestPersister; + let new_chain_monitor: test_utils::TestChainMonitor; + let nodes_0_deserialized: ChannelManager; + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let chan_id = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 15_000_000, 7_000_000_000, InitFeatures::known(), InitFeatures::known()).2; + let (payment_preimage_1, payment_hash_1, payment_secret_1) = get_payment_preimage_hash!(&nodes[1]); + let (payment_preimage_2, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(&nodes[1]); + + // Do a really complicated dance to get an HTLC into the holding cell, with MonitorUpdateFailed + // set but AwaitingRemoteRevoke unset. When this test was written, any attempts to send an HTLC + // while MonitorUpdateFailed is set are immediately failed-backwards. Thus, the only way to get + // an AddHTLC into the holding cell is to add it while AwaitingRemoteRevoke is set but + // MonitorUpdateFailed is unset, and then swap the flags. + // + // We do this by: + // a) routing a payment from node B to node A, + // b) sending a payment from node A to node B without delivering any of the generated messages, + // putting node A in AwaitingRemoteRevoke, + // c) sending a second payment from node A to node B, which is immediately placed in the + // holding cell, + // d) claiming the first payment from B, allowing us to fail the monitor update which occurs + // when we try to persist the payment preimage, + // e) delivering A's commitment_signed from (b) and the resulting B revoke_and_ack message, + // clearing AwaitingRemoteRevoke on node A. + // + // Note that because, at the end, MonitorUpdateFailed is still set, the HTLC generated in (c) + // will not be freed from the holding cell. + let (payment_preimage_0, _, _) = route_payment(&nodes[1], &[&nodes[0]], 100000); + + let route = { + let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; + get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), None, None, &Vec::new(), 100000, TEST_FINAL_CLTV, nodes[0].logger).unwrap() + }; + + nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1)).unwrap(); + check_added_monitors!(nodes[0], 1); + let send = SendEvent::from_node(&nodes[0]); + assert_eq!(send.msgs.len(), 1); + + nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)).unwrap(); + check_added_monitors!(nodes[0], 0); + + *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + assert!(nodes[0].node.claim_funds(payment_preimage_0)); + check_added_monitors!(nodes[0], 1); + + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send.msgs[0]); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send.commitment_msg); + check_added_monitors!(nodes[1], 1); + + let (raa, cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa); + check_added_monitors!(nodes[0], 1); + + if disconnect { + // Optionally reload nodes[0] entirely through a serialization roundtrip, otherwise just + // disconnect the peers. Note that the fuzzer originally found this issue because + // deserializing a ChannelManager in this state causes an assertion failure. + if reload_a { + let nodes_0_serialized = nodes[0].node.encode(); + let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new()); + nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().iter().next().unwrap().1.write(&mut chan_0_monitor_serialized).unwrap(); + + persister = test_utils::TestPersister::new(); + let keys_manager = &chanmon_cfgs[0].keys_manager; + new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster.clone(), nodes[0].logger, node_cfgs[0].fee_estimator, &persister, keys_manager); + nodes[0].chain_monitor = &new_chain_monitor; + let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..]; + let (_, mut chan_0_monitor) = <(BlockHash, ChannelMonitor)>::read( + &mut chan_0_monitor_read, keys_manager).unwrap(); + assert!(chan_0_monitor_read.is_empty()); + + let mut nodes_0_read = &nodes_0_serialized[..]; + let config = UserConfig::default(); + nodes_0_deserialized = { + let mut channel_monitors = HashMap::new(); + channel_monitors.insert(chan_0_monitor.get_funding_txo().0, &mut chan_0_monitor); + <(BlockHash, ChannelManager)>::read(&mut nodes_0_read, ChannelManagerReadArgs { + default_config: config, + keys_manager, + fee_estimator: node_cfgs[0].fee_estimator, + chain_monitor: nodes[0].chain_monitor, + tx_broadcaster: nodes[0].tx_broadcaster.clone(), + logger: nodes[0].logger, + channel_monitors, + }).unwrap().1 + }; + nodes[0].node = &nodes_0_deserialized; + assert!(nodes_0_read.is_empty()); + + nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0.clone(), chan_0_monitor).unwrap(); + check_added_monitors!(nodes[0], 1); + } else { + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + } + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + + // Now reconnect the two + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() }); + let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); + assert_eq!(reestablish_1.len(), 1); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() }); + let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); + assert_eq!(reestablish_2.len(), 1); + + nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]); + let resp_1 = handle_chan_reestablish_msgs!(nodes[1], nodes[0]); + check_added_monitors!(nodes[1], 0); + + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]); + let resp_0 = handle_chan_reestablish_msgs!(nodes[0], nodes[1]); + + assert!(resp_0.0.is_none()); + assert!(resp_0.1.is_none()); + assert!(resp_0.2.is_none()); + assert!(resp_1.0.is_none()); + assert!(resp_1.1.is_none()); + + // Check that the freshly-generated cs is equal to the original (which we will deliver in a + // moment). + if let Some(pending_cs) = resp_1.2 { + assert!(pending_cs.update_add_htlcs.is_empty()); + assert!(pending_cs.update_fail_htlcs.is_empty()); + assert!(pending_cs.update_fulfill_htlcs.is_empty()); + assert_eq!(pending_cs.commitment_signed, cs); + } else { panic!(); } + + // There should be no monitor updates as we are still pending awaiting a failed one. + check_added_monitors!(nodes[0], 0); + check_added_monitors!(nodes[1], 0); + } + + // If we finish updating the monitor, we should free the holding cell right away (this did + // not occur prior to #756). + *nodes[0].chain_monitor.update_ret.lock().unwrap() = None; + let (funding_txo, mon_id) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id).unwrap().clone(); + nodes[0].node.channel_monitor_updated(&funding_txo, mon_id); + + // New outbound messages should be generated immediately upon a call to + // get_and_clear_pending_msg_events (but not before). + check_added_monitors!(nodes[0], 0); + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + check_added_monitors!(nodes[0], 1); + assert_eq!(events.len(), 1); + + // Deliver the pending in-flight CS + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &cs); + check_added_monitors!(nodes[0], 1); + + let commitment_msg = match events.pop().unwrap() { + MessageSendEvent::UpdateHTLCs { node_id, updates } => { + assert_eq!(node_id, nodes[1].node.get_our_node_id()); + assert!(updates.update_fail_htlcs.is_empty()); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + assert_eq!(updates.update_fulfill_htlcs.len(), 1); + nodes[1].node.handle_update_fulfill_htlc(&nodes[0].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); + expect_payment_sent!(nodes[1], payment_preimage_0); + assert_eq!(updates.update_add_htlcs.len(), 1); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); + updates.commitment_signed + }, + _ => panic!("Unexpected event type!"), + }; + + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_msg); + check_added_monitors!(nodes[1], 1); + + let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack); + expect_pending_htlcs_forwardable!(nodes[1]); + expect_payment_received!(nodes[1], payment_hash_1, payment_secret_1, 100000); + check_added_monitors!(nodes[1], 1); + + commitment_signed_dance!(nodes[1], nodes[0], (), false, true, false); + + expect_pending_htlcs_forwardable!(nodes[1]); + expect_payment_received!(nodes[1], payment_hash_2, payment_secret_2, 100000); + + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2); +} +#[test] +fn channel_holding_cell_serialize() { + do_channel_holding_cell_serialize(true, true); + do_channel_holding_cell_serialize(true, false); + do_channel_holding_cell_serialize(false, true); // last arg doesn't matter +} diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index a21375483da..639ac84495e 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1332,7 +1332,7 @@ impl Channel { /// /// Note that it is still possible to hit these assertions in case we find a preimage on-chain /// but then have a reorg which settles on an HTLC-failure on chain. - pub fn get_update_fail_htlc(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket) -> Result, ChannelError> { + pub fn get_update_fail_htlc(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L) -> Result, ChannelError> where L::Target: Logger { if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) { panic!("Was asked to fail an HTLC when channel was not in an operational state"); } @@ -1382,6 +1382,7 @@ impl Channel { _ => {} } } + log_trace!(logger, "Placing failure for HTLC ID {} in holding cell", htlc_id_arg); self.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::FailHTLC { htlc_id: htlc_id_arg, err_packet, @@ -1389,6 +1390,7 @@ impl Channel { return Ok(None); } + log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message", htlc_id_arg); { let htlc = &mut self.pending_inbound_htlcs[pending_idx]; htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(err_packet.clone())); @@ -2308,6 +2310,16 @@ impl Channel { }, commitment_signed, closing_signed, monitor_update)) } + /// Public version of the below, checking relevant preconditions first. + /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and + /// returns `(None, Vec::new())`. + pub fn maybe_free_holding_cell_htlcs(&mut self, logger: &L) -> Result<(Option<(msgs::CommitmentUpdate, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash)>), ChannelError> where L::Target: Logger { + if self.channel_state >= ChannelState::ChannelFunded as u32 && + (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateFailed as u32)) == 0 { + self.free_holding_cell_htlcs(logger) + } else { Ok((None, Vec::new())) } + } + /// Used to fulfill holding_cell_htlcs when we get a remote ack (or implicitly get it by them /// fulfilling or failing the last pending HTLC) fn free_holding_cell_htlcs(&mut self, logger: &L) -> Result<(Option<(msgs::CommitmentUpdate, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash)>), ChannelError> where L::Target: Logger { @@ -2372,7 +2384,7 @@ impl Channel { } }, &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => { - match self.get_update_fail_htlc(htlc_id, err_packet.clone()) { + match self.get_update_fail_htlc(htlc_id, err_packet.clone(), logger) { Ok(update_fail_msg_option) => update_fail_htlcs.push(update_fail_msg_option.unwrap()), Err(e) => { if let ChannelError::Ignore(_) = e {} @@ -2685,19 +2697,16 @@ impl Channel { } } - /// Removes any uncommitted HTLCs, to be used on peer disconnection, including any pending - /// HTLCs that we intended to add but haven't as we were waiting on a remote revoke. - /// Returns the set of PendingHTLCStatuses from remote uncommitted HTLCs (which we're - /// implicitly dropping) and the payment_hashes of HTLCs we tried to add but are dropping. + /// Removes any uncommitted inbound HTLCs and resets the state of uncommitted outbound HTLC + /// updates, to be used on peer disconnection. After this, update_*_htlc messages need to be + /// resent. /// No further message handling calls may be made until a channel_reestablish dance has /// completed. - pub fn remove_uncommitted_htlcs_and_mark_paused(&mut self, logger: &L) -> Vec<(HTLCSource, PaymentHash)> where L::Target: Logger { - let mut outbound_drops = Vec::new(); - + pub fn remove_uncommitted_htlcs_and_mark_paused(&mut self, logger: &L) where L::Target: Logger { assert_eq!(self.channel_state & ChannelState::ShutdownComplete as u32, 0); if self.channel_state < ChannelState::FundingSent as u32 { self.channel_state = ChannelState::ShutdownComplete as u32; - return outbound_drops; + return; } // Upon reconnect we have to start the closing_signed dance over, but shutdown messages // will be retransmitted. @@ -2740,23 +2749,8 @@ impl Channel { } } - self.holding_cell_htlc_updates.retain(|htlc_update| { - match htlc_update { - // Note that currently on channel reestablish we assert that there are - // no holding cell HTLC update_adds, so if in the future we stop - // dropping added HTLCs here and failing them backwards, then there will - // need to be corresponding changes made in the Channel's re-establish - // logic. - &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => { - outbound_drops.push((source.clone(), payment_hash.clone())); - false - }, - &HTLCUpdateAwaitingACK::ClaimHTLC {..} | &HTLCUpdateAwaitingACK::FailHTLC {..} => true, - } - }); self.channel_state |= ChannelState::PeerDisconnected as u32; - log_debug!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops and {} waiting-to-locally-announced HTLC drops on channel {}", outbound_drops.len(), inbound_drop_count, log_bytes!(self.channel_id())); - outbound_drops + log_debug!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, log_bytes!(self.channel_id())); } /// Indicates that a ChannelMonitor update failed to be stored by the client and further @@ -2915,7 +2909,7 @@ impl Channel { /// May panic if some calls other than message-handling calls (which will all Err immediately) /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call. - pub fn channel_reestablish(&mut self, msg: &msgs::ChannelReestablish, logger: &L) -> Result<(Option, Option, Option, Option, RAACommitmentOrder, Option), ChannelError> where L::Target: Logger { + pub fn channel_reestablish(&mut self, msg: &msgs::ChannelReestablish, logger: &L) -> Result<(Option, Option, Option, Option, RAACommitmentOrder, Vec<(HTLCSource, PaymentHash)>, Option), ChannelError> where L::Target: Logger { if self.channel_state & (ChannelState::PeerDisconnected as u32) == 0 { // While BOLT 2 doesn't indicate explicitly we should error this channel here, it // almost certainly indicates we are going to end up out-of-sync in some way, so we @@ -2966,7 +2960,7 @@ impl Channel { return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent funding_locked yet".to_owned())); } // Short circuit the whole handler as there is nothing we can resend them - return Ok((None, None, None, None, RAACommitmentOrder::CommitmentFirst, shutdown_msg)); + return Ok((None, None, None, None, RAACommitmentOrder::CommitmentFirst, Vec::new(), shutdown_msg)); } // We have OurFundingLocked set! @@ -2974,7 +2968,7 @@ impl Channel { return Ok((Some(msgs::FundingLocked { channel_id: self.channel_id(), next_per_commitment_point, - }), None, None, None, RAACommitmentOrder::CommitmentFirst, shutdown_msg)); + }), None, None, None, RAACommitmentOrder::CommitmentFirst, Vec::new(), shutdown_msg)); } let required_revoke = if msg.next_remote_commitment_number + 1 == INITIAL_COMMITMENT_NUMBER - self.cur_holder_commitment_transaction_number { @@ -3015,14 +3009,6 @@ impl Channel { } if (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateFailed as u32)) == 0 { - // Note that if in the future we no longer drop holding cell update_adds on peer - // disconnect, this logic will need to be updated. - for htlc_update in self.holding_cell_htlc_updates.iter() { - if let &HTLCUpdateAwaitingACK::AddHTLC { .. } = htlc_update { - debug_assert!(false, "There shouldn't be any add-HTLCs in the holding cell now because they should have been dropped on peer disconnect. Panic here because said HTLCs won't be handled correctly."); - } - } - // We're up-to-date and not waiting on a remote revoke (if we are our // channel_reestablish should result in them sending a revoke_and_ack), but we may // have received some updates while we were disconnected. Free the holding cell @@ -3031,20 +3017,14 @@ impl Channel { Err(ChannelError::Close(msg)) => return Err(ChannelError::Close(msg)), Err(ChannelError::Ignore(_)) | Err(ChannelError::CloseDelayBroadcast(_)) => panic!("Got non-channel-failing result from free_holding_cell_htlcs"), Ok((Some((commitment_update, monitor_update)), htlcs_to_fail)) => { - // If in the future we no longer drop holding cell update_adds on peer - // disconnect, we may be handed some HTLCs to fail backwards here. - assert!(htlcs_to_fail.is_empty()); - return Ok((resend_funding_locked, required_revoke, Some(commitment_update), Some(monitor_update), self.resend_order.clone(), shutdown_msg)); + return Ok((resend_funding_locked, required_revoke, Some(commitment_update), Some(monitor_update), self.resend_order.clone(), htlcs_to_fail, shutdown_msg)); }, Ok((None, htlcs_to_fail)) => { - // If in the future we no longer drop holding cell update_adds on peer - // disconnect, we may be handed some HTLCs to fail backwards here. - assert!(htlcs_to_fail.is_empty()); - return Ok((resend_funding_locked, required_revoke, None, None, self.resend_order.clone(), shutdown_msg)); + return Ok((resend_funding_locked, required_revoke, None, None, self.resend_order.clone(), htlcs_to_fail, shutdown_msg)); }, } } else { - return Ok((resend_funding_locked, required_revoke, None, None, self.resend_order.clone(), shutdown_msg)); + return Ok((resend_funding_locked, required_revoke, None, None, self.resend_order.clone(), Vec::new(), shutdown_msg)); } } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 { if required_revoke.is_some() { @@ -3055,10 +3035,10 @@ impl Channel { if self.channel_state & (ChannelState::MonitorUpdateFailed as u32) != 0 { self.monitor_pending_commitment_signed = true; - return Ok((resend_funding_locked, None, None, None, self.resend_order.clone(), shutdown_msg)); + return Ok((resend_funding_locked, None, None, None, self.resend_order.clone(), Vec::new(), shutdown_msg)); } - return Ok((resend_funding_locked, required_revoke, Some(self.get_last_commitment_update(logger)), None, self.resend_order.clone(), shutdown_msg)); + return Ok((resend_funding_locked, required_revoke, Some(self.get_last_commitment_update(logger)), None, self.resend_order.clone(), Vec::new(), shutdown_msg)); } else { return Err(ChannelError::Close("Peer attempted to reestablish channel with a very old remote commitment transaction".to_owned())); } @@ -4394,7 +4374,7 @@ impl Readable for ChannelUpdateStatus { impl Writeable for Channel { fn write(&self, writer: &mut W) -> Result<(), ::std::io::Error> { // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been - // called but include holding cell updates (and obviously we don't modify self). + // called. writer.write_all(&[SERIALIZATION_VERSION; 1])?; writer.write_all(&[MIN_SERIALIZATION_VERSION; 1])?; diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 52bd8ad1cdb..9f9820c1dbc 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -766,22 +766,44 @@ macro_rules! handle_error { } } +/// Returns (boolean indicating if we should remove the Channel object from memory, a mapped error) +macro_rules! convert_chan_err { + ($self: ident, $err: expr, $short_to_id: expr, $channel: expr, $channel_id: expr) => { + match $err { + ChannelError::Ignore(msg) => { + (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $channel_id.clone())) + }, + ChannelError::Close(msg) => { + log_trace!($self.logger, "Closing channel {} due to close-required error: {}", log_bytes!($channel_id[..]), msg); + if let Some(short_id) = $channel.get_short_channel_id() { + $short_to_id.remove(&short_id); + } + let shutdown_res = $channel.force_shutdown(true); + (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $self.get_channel_update(&$channel).ok())) + }, + ChannelError::CloseDelayBroadcast(msg) => { + log_error!($self.logger, "Channel {} need to be shutdown but closing transactions not broadcast due to {}", log_bytes!($channel_id[..]), msg); + if let Some(short_id) = $channel.get_short_channel_id() { + $short_to_id.remove(&short_id); + } + let shutdown_res = $channel.force_shutdown(false); + (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $self.get_channel_update(&$channel).ok())) + } + } + } +} + macro_rules! break_chan_entry { ($self: ident, $res: expr, $channel_state: expr, $entry: expr) => { match $res { Ok(res) => res, - Err(ChannelError::Ignore(msg)) => { - break Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $entry.key().clone())) - }, - Err(ChannelError::Close(msg)) => { - log_trace!($self.logger, "Closing channel {} due to Close-required error: {}", log_bytes!($entry.key()[..]), msg); - let (channel_id, mut chan) = $entry.remove_entry(); - if let Some(short_id) = chan.get_short_channel_id() { - $channel_state.short_to_id.remove(&short_id); + Err(e) => { + let (drop, res) = convert_chan_err!($self, e, $channel_state.short_to_id, $entry.get_mut(), $entry.key()); + if drop { + $entry.remove_entry(); } - break Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, chan.force_shutdown(true), $self.get_channel_update(&chan).ok())) - }, - Err(ChannelError::CloseDelayBroadcast(_)) => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); } + break Err(res); + } } } } @@ -790,25 +812,12 @@ macro_rules! try_chan_entry { ($self: ident, $res: expr, $channel_state: expr, $entry: expr) => { match $res { Ok(res) => res, - Err(ChannelError::Ignore(msg)) => { - return Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $entry.key().clone())) - }, - Err(ChannelError::Close(msg)) => { - log_trace!($self.logger, "Closing channel {} due to Close-required error: {}", log_bytes!($entry.key()[..]), msg); - let (channel_id, mut chan) = $entry.remove_entry(); - if let Some(short_id) = chan.get_short_channel_id() { - $channel_state.short_to_id.remove(&short_id); - } - return Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, chan.force_shutdown(true), $self.get_channel_update(&chan).ok())) - }, - Err(ChannelError::CloseDelayBroadcast(msg)) => { - log_error!($self.logger, "Channel {} need to be shutdown but closing transactions not broadcast due to {}", log_bytes!($entry.key()[..]), msg); - let (channel_id, mut chan) = $entry.remove_entry(); - if let Some(short_id) = chan.get_short_channel_id() { - $channel_state.short_to_id.remove(&short_id); + Err(e) => { + let (drop, res) = convert_chan_err!($self, e, $channel_state.short_to_id, $entry.get_mut(), $entry.key()); + if drop { + $entry.remove_entry(); } - let shutdown_res = chan.force_shutdown(false); - return Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, shutdown_res, $self.get_channel_update(&chan).ok())) + return Err(res); } } } @@ -818,13 +827,12 @@ macro_rules! handle_monitor_err { ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => { handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, Vec::new(), Vec::new()) }; - ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => { + ($self: ident, $err: expr, $short_to_id: expr, $chan: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr, $chan_id: expr) => { match $err { ChannelMonitorUpdateErr::PermanentFailure => { - log_error!($self.logger, "Closing channel {} due to monitor update PermanentFailure", log_bytes!($entry.key()[..])); - let (channel_id, mut chan) = $entry.remove_entry(); - if let Some(short_id) = chan.get_short_channel_id() { - $channel_state.short_to_id.remove(&short_id); + log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateErr::PermanentFailure", log_bytes!($chan_id[..])); + if let Some(short_id) = $chan.get_short_channel_id() { + $short_to_id.remove(&short_id); } // TODO: $failed_fails is dropped here, which will cause other channels to hit the // chain in a confused state! We need to move them into the ChannelMonitor which @@ -835,12 +843,12 @@ macro_rules! handle_monitor_err { // splitting hairs we'd prefer to claim payments that were to us, but we haven't // given up the preimage yet, so might as well just wait until the payment is // retried, avoiding the on-chain fees. - let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure".to_owned(), channel_id, chan.force_shutdown(true), $self.get_channel_update(&chan).ok())); - res + let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure".to_owned(), *$chan_id, $chan.force_shutdown(true), $self.get_channel_update(&$chan).ok())); + (res, true) }, ChannelMonitorUpdateErr::TemporaryFailure => { log_info!($self.logger, "Disabling channel {} due to monitor update TemporaryFailure. On restore will send {} and process {} forwards and {} fails", - log_bytes!($entry.key()[..]), + log_bytes!($chan_id[..]), if $resend_commitment && $resend_raa { match $action_type { RAACommitmentOrder::CommitmentFirst => { "commitment then RAA" }, @@ -857,11 +865,18 @@ macro_rules! handle_monitor_err { if !$resend_raa { debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst || !$resend_commitment); } - $entry.get_mut().monitor_update_failed($resend_raa, $resend_commitment, $failed_forwards, $failed_fails); - Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor".to_owned()), *$entry.key())) + $chan.monitor_update_failed($resend_raa, $resend_commitment, $failed_forwards, $failed_fails); + (Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor".to_owned()), *$chan_id)), false) }, } - } + }; + ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => { { + let (res, drop) = handle_monitor_err!($self, $err, $channel_state.short_to_id, $entry.get_mut(), $action_type, $resend_raa, $resend_commitment, $failed_forwards, $failed_fails, $entry.key()); + if drop { + $entry.remove_entry(); + } + res + } }; } macro_rules! return_monitor_err { @@ -885,6 +900,133 @@ macro_rules! maybe_break_monitor_err { } } +macro_rules! handle_chan_restoration_locked { + ($self: ident, $channel_lock: expr, $channel_state: expr, $channel_entry: expr, + $raa: expr, $commitment_update: expr, $order: expr, $chanmon_update: expr, + $pending_forwards: expr, $funding_broadcastable: expr, $funding_locked: expr) => { { + let mut htlc_forwards = None; + let counterparty_node_id = $channel_entry.get().get_counterparty_node_id(); + + let chanmon_update: Option = $chanmon_update; // Force type-checking to resolve + let chanmon_update_is_none = chanmon_update.is_none(); + let res = loop { + let forwards: Vec<(PendingHTLCInfo, u64)> = $pending_forwards; // Force type-checking to resolve + if !forwards.is_empty() { + htlc_forwards = Some(($channel_entry.get().get_short_channel_id().expect("We can't have pending forwards before funding confirmation"), + $channel_entry.get().get_funding_txo().unwrap(), forwards)); + } + + if chanmon_update.is_some() { + // On reconnect, we, by definition, only resend a funding_locked if there have been + // no commitment updates, so the only channel monitor update which could also be + // associated with a funding_locked would be the funding_created/funding_signed + // monitor update. That monitor update failing implies that we won't send + // funding_locked until it's been updated, so we can't have a funding_locked and a + // monitor update here (so we don't bother to handle it correctly below). + assert!($funding_locked.is_none()); + // A channel monitor update makes no sense without either a funding_locked or a + // commitment update to process after it. Since we can't have a funding_locked, we + // only bother to handle the monitor-update + commitment_update case below. + assert!($commitment_update.is_some()); + } + + if let Some(msg) = $funding_locked { + // Similar to the above, this implies that we're letting the funding_locked fly + // before it should be allowed to. + assert!(chanmon_update.is_none()); + $channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingLocked { + node_id: counterparty_node_id, + msg, + }); + if let Some(announcement_sigs) = $self.get_announcement_sigs($channel_entry.get()) { + $channel_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { + node_id: counterparty_node_id, + msg: announcement_sigs, + }); + } + $channel_state.short_to_id.insert($channel_entry.get().get_short_channel_id().unwrap(), $channel_entry.get().channel_id()); + } + + let funding_broadcastable: Option = $funding_broadcastable; // Force type-checking to resolve + if let Some(monitor_update) = chanmon_update { + // We only ever broadcast a funding transaction in response to a funding_signed + // message and the resulting monitor update. Thus, on channel_reestablish + // message handling we can't have a funding transaction to broadcast. When + // processing a monitor update finishing resulting in a funding broadcast, we + // cannot have a second monitor update, thus this case would indicate a bug. + assert!(funding_broadcastable.is_none()); + // Given we were just reconnected or finished updating a channel monitor, the + // only case where we can get a new ChannelMonitorUpdate would be if we also + // have some commitment updates to send as well. + assert!($commitment_update.is_some()); + if let Err(e) = $self.chain_monitor.update_channel($channel_entry.get().get_funding_txo().unwrap(), monitor_update) { + // channel_reestablish doesn't guarantee the order it returns is sensical + // for the messages it returns, but if we're setting what messages to + // re-transmit on monitor update success, we need to make sure it is sane. + let mut order = $order; + if $raa.is_none() { + order = RAACommitmentOrder::CommitmentFirst; + } + break handle_monitor_err!($self, e, $channel_state, $channel_entry, order, $raa.is_some(), true); + } + } + + macro_rules! handle_cs { () => { + if let Some(update) = $commitment_update { + $channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: counterparty_node_id, + updates: update, + }); + } + } } + macro_rules! handle_raa { () => { + if let Some(revoke_and_ack) = $raa { + $channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK { + node_id: counterparty_node_id, + msg: revoke_and_ack, + }); + } + } } + match $order { + RAACommitmentOrder::CommitmentFirst => { + handle_cs!(); + handle_raa!(); + }, + RAACommitmentOrder::RevokeAndACKFirst => { + handle_raa!(); + handle_cs!(); + }, + } + if let Some(tx) = funding_broadcastable { + log_info!($self.logger, "Broadcasting funding transaction with txid {}", tx.txid()); + $self.tx_broadcaster.broadcast_transaction(&tx); + } + break Ok(()); + }; + + if chanmon_update_is_none { + // If there was no ChannelMonitorUpdate, we should never generate an Err in the res loop + // above. Doing so would imply calling handle_err!() from channel_monitor_updated() which + // should *never* end up calling back to `chain_monitor.update_channel()`. + assert!(res.is_ok()); + } + + (htlc_forwards, res, counterparty_node_id) + } } +} + +macro_rules! post_handle_chan_restoration { + ($self: ident, $locked_res: expr) => { { + let (htlc_forwards, res, counterparty_node_id) = $locked_res; + + let _ = handle_error!($self, res, counterparty_node_id); + + if let Some(forwards) = htlc_forwards { + $self.forward_htlcs(&mut [forwards][..]); + } + } } +} + impl ChannelManager where M::Target: chain::Watch, T::Target: BroadcasterInterface, @@ -1927,7 +2069,7 @@ impl ChannelMana }, HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => { log_trace!(self.logger, "Failing HTLC back to channel with short id {} after delay", short_chan_id); - match chan.get_mut().get_update_fail_htlc(htlc_id, err_packet) { + match chan.get_mut().get_update_fail_htlc(htlc_id, err_packet, &self.logger) { Err(e) => { if let ChannelError::Ignore(msg) = e { log_trace!(self.logger, "Failed to fail backwards to short_id {}: {}", short_chan_id, msg); @@ -2593,85 +2735,24 @@ impl ChannelMana pub fn channel_monitor_updated(&self, funding_txo: &OutPoint, highest_applied_update_id: u64) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); - let mut close_results = Vec::new(); - let mut htlc_forwards = Vec::new(); - let mut htlc_failures = Vec::new(); - let mut pending_events = Vec::new(); - - { + let (mut pending_failures, chan_restoration_res) = { let mut channel_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_lock; - let short_to_id = &mut channel_state.short_to_id; - let pending_msg_events = &mut channel_state.pending_msg_events; - let channel = match channel_state.by_id.get_mut(&funding_txo.to_channel_id()) { - Some(chan) => chan, - None => return, + let mut channel = match channel_state.by_id.entry(funding_txo.to_channel_id()) { + hash_map::Entry::Occupied(chan) => chan, + hash_map::Entry::Vacant(_) => return, }; - if !channel.is_awaiting_monitor_update() || channel.get_latest_monitor_update_id() != highest_applied_update_id { + if !channel.get().is_awaiting_monitor_update() || channel.get().get_latest_monitor_update_id() != highest_applied_update_id { return; } - let (raa, commitment_update, order, pending_forwards, mut pending_failures, funding_broadcastable, funding_locked) = channel.monitor_updating_restored(&self.logger); - if !pending_forwards.is_empty() { - htlc_forwards.push((channel.get_short_channel_id().expect("We can't have pending forwards before funding confirmation"), funding_txo.clone(), pending_forwards)); - } - htlc_failures.append(&mut pending_failures); - - macro_rules! handle_cs { () => { - if let Some(update) = commitment_update { - pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id: channel.get_counterparty_node_id(), - updates: update, - }); - } - } } - macro_rules! handle_raa { () => { - if let Some(revoke_and_ack) = raa { - pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK { - node_id: channel.get_counterparty_node_id(), - msg: revoke_and_ack, - }); - } - } } - match order { - RAACommitmentOrder::CommitmentFirst => { - handle_cs!(); - handle_raa!(); - }, - RAACommitmentOrder::RevokeAndACKFirst => { - handle_raa!(); - handle_cs!(); - }, - } - if let Some(tx) = funding_broadcastable { - log_info!(self.logger, "Broadcasting funding transaction with txid {}", tx.txid()); - self.tx_broadcaster.broadcast_transaction(&tx); - } - if let Some(msg) = funding_locked { - pending_msg_events.push(events::MessageSendEvent::SendFundingLocked { - node_id: channel.get_counterparty_node_id(), - msg, - }); - if let Some(announcement_sigs) = self.get_announcement_sigs(channel) { - pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { - node_id: channel.get_counterparty_node_id(), - msg: announcement_sigs, - }); - } - short_to_id.insert(channel.get_short_channel_id().unwrap(), channel.channel_id()); - } - } - - self.pending_events.lock().unwrap().append(&mut pending_events); - - for failure in htlc_failures.drain(..) { + let (raa, commitment_update, order, pending_forwards, pending_failures, funding_broadcastable, funding_locked) = channel.get_mut().monitor_updating_restored(&self.logger); + (pending_failures, handle_chan_restoration_locked!(self, channel_lock, channel_state, channel, raa, commitment_update, order, None, pending_forwards, funding_broadcastable, funding_locked)) + }; + post_handle_chan_restoration!(self, chan_restoration_res); + for failure in pending_failures.drain(..) { self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2); } - self.forward_htlcs(&mut htlc_forwards[..]); - - for res in close_results.drain(..) { - self.finish_force_close_channel(res); - } } fn internal_open_channel(&self, counterparty_node_id: &PublicKey, their_features: InitFeatures, msg: &msgs::OpenChannel) -> Result<(), MsgHandleErrInternal> { @@ -3282,77 +3363,35 @@ impl ChannelMana } fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; + let (htlcs_failed_forward, chan_restoration_res) = { + let mut channel_state_lock = self.channel_state.lock().unwrap(); + let channel_state = &mut *channel_state_lock; - match channel_state.by_id.entry(msg.channel_id) { - hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); - } - // Currently, we expect all holding cell update_adds to be dropped on peer - // disconnect, so Channel's reestablish will never hand us any holding cell - // freed HTLCs to fail backwards. If in the future we no longer drop pending - // add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here. - let (funding_locked, revoke_and_ack, commitment_update, monitor_update_opt, mut order, shutdown) = - try_chan_entry!(self, chan.get_mut().channel_reestablish(msg, &self.logger), channel_state, chan); - if let Some(monitor_update) = monitor_update_opt { - if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { - // channel_reestablish doesn't guarantee the order it returns is sensical - // for the messages it returns, but if we're setting what messages to - // re-transmit on monitor update success, we need to make sure it is sane. - if revoke_and_ack.is_none() { - order = RAACommitmentOrder::CommitmentFirst; - } - if commitment_update.is_none() { - order = RAACommitmentOrder::RevokeAndACKFirst; - } - return_monitor_err!(self, e, channel_state, chan, order, revoke_and_ack.is_some(), commitment_update.is_some()); - //TODO: Resend the funding_locked if needed once we get the monitor running again - } - } - if let Some(msg) = funding_locked { - channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingLocked { - node_id: counterparty_node_id.clone(), - msg - }); - } - macro_rules! send_raa { () => { - if let Some(msg) = revoke_and_ack { - channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK { - node_id: counterparty_node_id.clone(), - msg - }); + match channel_state.by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_counterparty_node_id() != *counterparty_node_id { + return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); } - } } - macro_rules! send_cu { () => { - if let Some(updates) = commitment_update { - channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + // Currently, we expect all holding cell update_adds to be dropped on peer + // disconnect, so Channel's reestablish will never hand us any holding cell + // freed HTLCs to fail backwards. If in the future we no longer drop pending + // add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here. + let (funding_locked, revoke_and_ack, commitment_update, monitor_update_opt, order, htlcs_failed_forward, shutdown) = + try_chan_entry!(self, chan.get_mut().channel_reestablish(msg, &self.logger), channel_state, chan); + if let Some(msg) = shutdown { + channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { node_id: counterparty_node_id.clone(), - updates + msg, }); } - } } - match order { - RAACommitmentOrder::RevokeAndACKFirst => { - send_raa!(); - send_cu!(); - }, - RAACommitmentOrder::CommitmentFirst => { - send_cu!(); - send_raa!(); - }, - } - if let Some(msg) = shutdown { - channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { - node_id: counterparty_node_id.clone(), - msg, - }); - } - Ok(()) - }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) - } + (htlcs_failed_forward, handle_chan_restoration_locked!(self, channel_state_lock, channel_state, chan, revoke_and_ack, commitment_update, order, monitor_update_opt, Vec::new(), None, funding_locked)) + }, + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) + } + }; + post_handle_chan_restoration!(self, chan_restoration_res); + self.fail_holding_cell_htlcs(htlcs_failed_forward, msg.channel_id); + Ok(()) } /// Begin Update fee process. Allowed only on an outbound channel. @@ -3458,6 +3497,57 @@ impl ChannelMana } } + /// Check the holding cell in each channel and free any pending HTLCs in them if possible. + /// This should only apply to HTLCs which were added to the holding cell because we were + /// waiting on a monitor update to finish. In that case, we don't want to free the holding cell + /// directly in `channel_monitor_updated` as it may introduce deadlocks calling back into user + /// code to inform them of a channel monitor update. + fn check_free_holding_cells(&self) { + let mut failed_htlcs = Vec::new(); + let mut handle_errors = Vec::new(); + { + let mut channel_state_lock = self.channel_state.lock().unwrap(); + let channel_state = &mut *channel_state_lock; + let by_id = &mut channel_state.by_id; + let short_to_id = &mut channel_state.short_to_id; + let pending_msg_events = &mut channel_state.pending_msg_events; + + by_id.retain(|channel_id, chan| { + match chan.maybe_free_holding_cell_htlcs(&self.logger) { + Ok((None, ref htlcs)) if htlcs.is_empty() => true, + Ok((commitment_opt, holding_cell_failed_htlcs)) => { + failed_htlcs.push((holding_cell_failed_htlcs, *channel_id)); + if let Some((commitment_update, monitor_update)) = commitment_opt { + if let Err(e) = self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) { + let (res, close_channel) = handle_monitor_err!(self, e, short_to_id, chan, RAACommitmentOrder::CommitmentFirst, false, true, Vec::new(), Vec::new(), channel_id); + handle_errors.push((chan.get_counterparty_node_id(), res)); + if close_channel { return false; } + } else { + pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: chan.get_counterparty_node_id(), + updates: commitment_update, + }); + } + } + true + }, + Err(e) => { + let (close_channel, res) = convert_chan_err!(self, e, short_to_id, chan, channel_id); + handle_errors.push((chan.get_counterparty_node_id(), Err(res))); + !close_channel + } + } + }); + } + for (failures, channel_id) in failed_htlcs.drain(..) { + self.fail_holding_cell_htlcs(failures, channel_id); + } + + for (counterparty_node_id, err) in handle_errors.drain(..) { + let _ = handle_error!(self, err, counterparty_node_id); + } + } + /// Handle a list of channel failures during a block_connected or block_disconnected call, /// pushing the channel monitor update (if any) to the background events queue and removing the /// Channel object. @@ -3594,6 +3684,8 @@ impl MessageSend // ChannelMonitors when clearing other events. self.process_pending_monitor_events(); + self.check_free_holding_cells(); + let mut ret = Vec::new(); let mut channel_state = self.channel_state.lock().unwrap(); mem::swap(&mut ret, &mut channel_state.pending_msg_events); @@ -3961,7 +4053,6 @@ impl fn peer_disconnected(&self, counterparty_node_id: &PublicKey, no_connection_possible: bool) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); let mut failed_channels = Vec::new(); - let mut failed_payments = Vec::new(); let mut no_channels_remain = true; { let mut channel_state_lock = self.channel_state.lock().unwrap(); @@ -3990,15 +4081,7 @@ impl log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates", log_pubkey!(counterparty_node_id)); channel_state.by_id.retain(|_, chan| { if chan.get_counterparty_node_id() == *counterparty_node_id { - // Note that currently on channel reestablish we assert that there are no - // holding cell add-HTLCs, so if in the future we stop removing uncommitted HTLCs - // on peer disconnect here, there will need to be corresponding changes in - // reestablish logic. - let failed_adds = chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger); - if !failed_adds.is_empty() { - let chan_update = self.get_channel_update(&chan).map(|u| u.encode_with_len()).unwrap(); // Cannot add/recv HTLCs before we have a short_id so unwrap is safe - failed_payments.push((chan_update, failed_adds)); - } + chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger); if chan.is_shutdown() { if let Some(short_id) = chan.get_short_channel_id() { short_to_id.remove(&short_id); @@ -4042,11 +4125,6 @@ impl for failure in failed_channels.drain(..) { self.finish_force_close_channel(failure); } - for (chan_update, mut htlc_sources) in failed_payments { - for (htlc_source, payment_hash) in htlc_sources.drain(..) { - self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x1000 | 7, data: chan_update.clone() }); - } - } } fn peer_connected(&self, counterparty_node_id: &PublicKey, init_msg: &msgs::Init) { diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 78fd0644ccb..c1ea9072226 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1515,6 +1515,11 @@ macro_rules! handle_chan_reestablish_msgs { None }; + if let Some(&MessageSendEvent::SendAnnouncementSignatures { ref node_id, msg: _ }) = msg_events.get(idx) { + idx += 1; + assert_eq!(*node_id, $dst_node.node.get_our_node_id()); + } + let mut revoke_and_ack = None; let mut commitment_update = None; let order = if let Some(ev) = msg_events.get(idx) {