@@ -497,7 +497,7 @@ static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
497
497
if (!ingress ) {
498
498
if (!sock_writeable (psock -> sk ))
499
499
return - EAGAIN ;
500
- return skb_send_sock_locked (psock -> sk , skb , off , len );
500
+ return skb_send_sock (psock -> sk , skb , off , len );
501
501
}
502
502
return sk_psock_skb_ingress (psock , skb );
503
503
}
@@ -511,8 +511,7 @@ static void sk_psock_backlog(struct work_struct *work)
511
511
u32 len , off ;
512
512
int ret ;
513
513
514
- /* Lock sock to avoid losing sk_socket during loop. */
515
- lock_sock (psock -> sk );
514
+ mutex_lock (& psock -> work_mutex );
516
515
if (state -> skb ) {
517
516
skb = state -> skb ;
518
517
len = state -> len ;
@@ -529,7 +528,7 @@ static void sk_psock_backlog(struct work_struct *work)
529
528
skb_bpf_redirect_clear (skb );
530
529
do {
531
530
ret = - EIO ;
532
- if (likely (psock -> sk -> sk_socket ))
531
+ if (! sock_flag (psock -> sk , SOCK_DEAD ))
533
532
ret = sk_psock_handle_skb (psock , skb , off ,
534
533
len , ingress );
535
534
if (ret <= 0 ) {
@@ -553,7 +552,7 @@ static void sk_psock_backlog(struct work_struct *work)
553
552
kfree_skb (skb );
554
553
}
555
554
end :
556
- release_sock ( psock -> sk );
555
+ mutex_unlock ( & psock -> work_mutex );
557
556
}
558
557
559
558
struct sk_psock * sk_psock_init (struct sock * sk , int node )
@@ -591,6 +590,7 @@ struct sk_psock *sk_psock_init(struct sock *sk, int node)
591
590
spin_lock_init (& psock -> link_lock );
592
591
593
592
INIT_WORK (& psock -> work , sk_psock_backlog );
593
+ mutex_init (& psock -> work_mutex );
594
594
INIT_LIST_HEAD (& psock -> ingress_msg );
595
595
spin_lock_init (& psock -> ingress_lock );
596
596
skb_queue_head_init (& psock -> ingress_skb );
@@ -631,17 +631,15 @@ static void __sk_psock_purge_ingress_msg(struct sk_psock *psock)
631
631
}
632
632
}
633
633
634
- static void sk_psock_zap_ingress (struct sk_psock * psock )
634
+ static void __sk_psock_zap_ingress (struct sk_psock * psock )
635
635
{
636
636
struct sk_buff * skb ;
637
637
638
638
while ((skb = skb_dequeue (& psock -> ingress_skb )) != NULL ) {
639
639
skb_bpf_redirect_clear (skb );
640
640
kfree_skb (skb );
641
641
}
642
- spin_lock_bh (& psock -> ingress_lock );
643
642
__sk_psock_purge_ingress_msg (psock );
644
- spin_unlock_bh (& psock -> ingress_lock );
645
643
}
646
644
647
645
static void sk_psock_link_destroy (struct sk_psock * psock )
@@ -654,6 +652,18 @@ static void sk_psock_link_destroy(struct sk_psock *psock)
654
652
}
655
653
}
656
654
655
+ void sk_psock_stop (struct sk_psock * psock , bool wait )
656
+ {
657
+ spin_lock_bh (& psock -> ingress_lock );
658
+ sk_psock_clear_state (psock , SK_PSOCK_TX_ENABLED );
659
+ sk_psock_cork_free (psock );
660
+ __sk_psock_zap_ingress (psock );
661
+ spin_unlock_bh (& psock -> ingress_lock );
662
+
663
+ if (wait )
664
+ cancel_work_sync (& psock -> work );
665
+ }
666
+
657
667
static void sk_psock_done_strp (struct sk_psock * psock );
658
668
659
669
static void sk_psock_destroy_deferred (struct work_struct * gc )
@@ -665,12 +675,12 @@ static void sk_psock_destroy_deferred(struct work_struct *gc)
665
675
sk_psock_done_strp (psock );
666
676
667
677
cancel_work_sync (& psock -> work );
678
+ mutex_destroy (& psock -> work_mutex );
668
679
669
680
psock_progs_drop (& psock -> progs );
670
681
671
682
sk_psock_link_destroy (psock );
672
683
sk_psock_cork_free (psock );
673
- sk_psock_zap_ingress (psock );
674
684
675
685
if (psock -> sk_redir )
676
686
sock_put (psock -> sk_redir );
@@ -688,8 +698,7 @@ static void sk_psock_destroy(struct rcu_head *rcu)
688
698
689
699
void sk_psock_drop (struct sock * sk , struct sk_psock * psock )
690
700
{
691
- sk_psock_cork_free (psock );
692
- sk_psock_zap_ingress (psock );
701
+ sk_psock_stop (psock , false);
693
702
694
703
write_lock_bh (& sk -> sk_callback_lock );
695
704
sk_psock_restore_proto (sk , psock );
@@ -699,7 +708,6 @@ void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
699
708
else if (psock -> progs .stream_verdict )
700
709
sk_psock_stop_verdict (sk , psock );
701
710
write_unlock_bh (& sk -> sk_callback_lock );
702
- sk_psock_clear_state (psock , SK_PSOCK_TX_ENABLED );
703
711
704
712
call_rcu (& psock -> rcu , sk_psock_destroy );
705
713
}
@@ -770,14 +778,20 @@ static void sk_psock_skb_redirect(struct sk_buff *skb)
770
778
* error that caused the pipe to break. We can't send a packet on
771
779
* a socket that is in this state so we drop the skb.
772
780
*/
773
- if (!psock_other || sock_flag (sk_other , SOCK_DEAD ) ||
774
- !sk_psock_test_state (psock_other , SK_PSOCK_TX_ENABLED )) {
781
+ if (!psock_other || sock_flag (sk_other , SOCK_DEAD )) {
782
+ kfree_skb (skb );
783
+ return ;
784
+ }
785
+ spin_lock_bh (& psock_other -> ingress_lock );
786
+ if (!sk_psock_test_state (psock_other , SK_PSOCK_TX_ENABLED )) {
787
+ spin_unlock_bh (& psock_other -> ingress_lock );
775
788
kfree_skb (skb );
776
789
return ;
777
790
}
778
791
779
792
skb_queue_tail (& psock_other -> ingress_skb , skb );
780
793
schedule_work (& psock_other -> work );
794
+ spin_unlock_bh (& psock_other -> ingress_lock );
781
795
}
782
796
783
797
static void sk_psock_tls_verdict_apply (struct sk_buff * skb , struct sock * sk , int verdict )
@@ -845,8 +859,12 @@ static void sk_psock_verdict_apply(struct sk_psock *psock,
845
859
err = sk_psock_skb_ingress_self (psock , skb );
846
860
}
847
861
if (err < 0 ) {
848
- skb_queue_tail (& psock -> ingress_skb , skb );
849
- schedule_work (& psock -> work );
862
+ spin_lock_bh (& psock -> ingress_lock );
863
+ if (sk_psock_test_state (psock , SK_PSOCK_TX_ENABLED )) {
864
+ skb_queue_tail (& psock -> ingress_skb , skb );
865
+ schedule_work (& psock -> work );
866
+ }
867
+ spin_unlock_bh (& psock -> ingress_lock );
850
868
}
851
869
break ;
852
870
case __SK_REDIRECT :
0 commit comments