@@ -2726,20 +2726,21 @@ fn is_gossip_msg(type_id: u16) -> bool {
2726
2726
2727
2727
#[ cfg( test) ]
2728
2728
mod tests {
2729
+ use super :: * ;
2730
+
2729
2731
use crate :: sign:: { NodeSigner , Recipient } ;
2730
2732
use crate :: events;
2731
2733
use crate :: io;
2732
2734
use crate :: ln:: types:: ChannelId ;
2733
2735
use crate :: ln:: features:: { InitFeatures , NodeFeatures } ;
2734
2736
use crate :: ln:: peer_channel_encryptor:: PeerChannelEncryptor ;
2735
- use crate :: ln:: peer_handler:: { CustomMessageHandler , PeerManager , MessageHandler , SocketDescriptor , IgnoringMessageHandler , filter_addresses, ErroringMessageHandler , MAX_BUFFER_DRAIN_TICK_INTERVALS_PER_PEER } ;
2736
2737
use crate :: ln:: { msgs, wire} ;
2737
2738
use crate :: ln:: msgs:: { Init , LightningError , SocketAddress } ;
2738
2739
use crate :: util:: test_utils;
2739
2740
2740
2741
use bitcoin:: Network ;
2741
2742
use bitcoin:: constants:: ChainHash ;
2742
- use bitcoin:: secp256k1:: { PublicKey , SecretKey } ;
2743
+ use bitcoin:: secp256k1:: { PublicKey , SecretKey , Secp256k1 } ;
2743
2744
2744
2745
use crate :: sync:: { Arc , Mutex } ;
2745
2746
use core:: convert:: Infallible ;
@@ -3197,6 +3198,8 @@ mod tests {
3197
3198
let cfgs = create_peermgr_cfgs ( 2 ) ;
3198
3199
cfgs[ 0 ] . routing_handler . request_full_sync . store ( true , Ordering :: Release ) ;
3199
3200
cfgs[ 1 ] . routing_handler . request_full_sync . store ( true , Ordering :: Release ) ;
3201
+ cfgs[ 0 ] . routing_handler . announcement_available_for_sync . store ( true , Ordering :: Release ) ;
3202
+ cfgs[ 1 ] . routing_handler . announcement_available_for_sync . store ( true , Ordering :: Release ) ;
3200
3203
let peers = create_network ( 2 , & cfgs) ;
3201
3204
3202
3205
// By calling establish_connect, we trigger do_attempt_write_data between
@@ -3360,6 +3363,79 @@ mod tests {
3360
3363
assert_eq ! ( peer_b. peers. read( ) . unwrap( ) . len( ) , 0 ) ;
3361
3364
}
3362
3365
3366
+ #[ test]
3367
+ fn test_gossip_flood_pause ( ) {
3368
+ use crate :: routing:: test_utils:: channel_announcement;
3369
+ use lightning_types:: features:: ChannelFeatures ;
3370
+
3371
+ // Simple test which connects two nodes to a PeerManager and checks that if we run out of
3372
+ // socket buffer space we'll stop forwarding gossip but still push our own gossip.
3373
+ let cfgs = create_peermgr_cfgs ( 2 ) ;
3374
+ let mut peers = create_network ( 2 , & cfgs) ;
3375
+ let ( mut fd_a, mut fd_b) = establish_connection ( & peers[ 0 ] , & peers[ 1 ] ) ;
3376
+
3377
+ macro_rules! drain_queues { ( ) => {
3378
+ loop {
3379
+ peers[ 0 ] . process_events( ) ;
3380
+ peers[ 1 ] . process_events( ) ;
3381
+
3382
+ let msg = fd_a. outbound_data. lock( ) . unwrap( ) . split_off( 0 ) ;
3383
+ if !msg. is_empty( ) {
3384
+ assert_eq!( peers[ 1 ] . read_event( & mut fd_b, & msg) . unwrap( ) , false ) ;
3385
+ continue ;
3386
+ }
3387
+ let msg = fd_b. outbound_data. lock( ) . unwrap( ) . split_off( 0 ) ;
3388
+ if !msg. is_empty( ) {
3389
+ assert_eq!( peers[ 0 ] . read_event( & mut fd_a, & msg) . unwrap( ) , false ) ;
3390
+ continue ;
3391
+ }
3392
+ break ;
3393
+ }
3394
+ } }
3395
+
3396
+ // First, make sure all pending messages have been processed and queues drained.
3397
+ drain_queues ! ( ) ;
3398
+
3399
+ let secp_ctx = Secp256k1 :: new ( ) ;
3400
+ let key = SecretKey :: from_slice ( & [ 1 ; 32 ] ) . unwrap ( ) ;
3401
+ let msg = channel_announcement ( & key, & key, ChannelFeatures :: empty ( ) , 42 , & secp_ctx) ;
3402
+ let msg_ev = MessageSendEvent :: BroadcastChannelAnnouncement {
3403
+ msg,
3404
+ update_msg : None ,
3405
+ } ;
3406
+
3407
+ fd_a. hang_writes . store ( true , Ordering :: Relaxed ) ;
3408
+
3409
+ // Now push an arbitrarily large numebr of messages and check that only
3410
+ // `OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP` messages end up in the queue.
3411
+ for i in 0 ..OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP * 2 {
3412
+ cfgs[ 0 ] . routing_handler . pending_events . lock ( ) . unwrap ( ) . push ( msg_ev. clone ( ) ) ;
3413
+ peers[ 0 ] . process_events ( ) ;
3414
+ }
3415
+
3416
+ assert_eq ! ( peers[ 0 ] . peers. read( ) . unwrap( ) . get( & fd_a) . unwrap( ) . lock( ) . unwrap( ) . gossip_broadcast_buffer. len( ) ,
3417
+ OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP ) ;
3418
+
3419
+ // Check that if a broadcast message comes in from the channel handler (i.e. it is an
3420
+ // announcement for our own channel), it gets queued anyway.
3421
+ cfgs[ 0 ] . chan_handler . pending_events . lock ( ) . unwrap ( ) . push ( msg_ev) ;
3422
+ peers[ 0 ] . process_events ( ) ;
3423
+ assert_eq ! ( peers[ 0 ] . peers. read( ) . unwrap( ) . get( & fd_a) . unwrap( ) . lock( ) . unwrap( ) . gossip_broadcast_buffer. len( ) ,
3424
+ OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP + 1 ) ;
3425
+
3426
+ // Finally, deliver all the messages and make sure we got the right count. Note that there
3427
+ // was an extra message that had already moved from the broadcast queue to the encrypted
3428
+ // message queue so we actually receive `OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP + 2` messages.
3429
+ fd_a. hang_writes . store ( false , Ordering :: Relaxed ) ;
3430
+ cfgs[ 1 ] . routing_handler . chan_anns_recvd . store ( 0 , Ordering :: Relaxed ) ;
3431
+ peers[ 0 ] . write_buffer_space_avail ( & mut fd_a) . unwrap ( ) ;
3432
+
3433
+ drain_queues ! ( ) ;
3434
+ assert ! ( peers[ 0 ] . peers. read( ) . unwrap( ) . get( & fd_a) . unwrap( ) . lock( ) . unwrap( ) . gossip_broadcast_buffer. is_empty( ) ) ;
3435
+ assert_eq ! ( cfgs[ 1 ] . routing_handler. chan_anns_recvd. load( Ordering :: Relaxed ) ,
3436
+ OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP + 2 ) ;
3437
+ }
3438
+
3363
3439
#[ test]
3364
3440
fn test_filter_addresses ( ) {
3365
3441
// Tests the filter_addresses function.
0 commit comments