@@ -525,35 +525,6 @@ clean_from_lists(struct nf_conn *ct)
525
525
nf_ct_remove_expectations (ct );
526
526
}
527
527
528
- /* must be called with local_bh_disable */
529
- static void nf_ct_add_to_unconfirmed_list (struct nf_conn * ct )
530
- {
531
- struct ct_pcpu * pcpu ;
532
-
533
- /* add this conntrack to the (per cpu) unconfirmed list */
534
- ct -> cpu = smp_processor_id ();
535
- pcpu = per_cpu_ptr (nf_ct_net (ct )-> ct .pcpu_lists , ct -> cpu );
536
-
537
- spin_lock (& pcpu -> lock );
538
- hlist_nulls_add_head (& ct -> tuplehash [IP_CT_DIR_ORIGINAL ].hnnode ,
539
- & pcpu -> unconfirmed );
540
- spin_unlock (& pcpu -> lock );
541
- }
542
-
543
- /* must be called with local_bh_disable */
544
- static void nf_ct_del_from_unconfirmed_list (struct nf_conn * ct )
545
- {
546
- struct ct_pcpu * pcpu ;
547
-
548
- /* We overload first tuple to link into unconfirmed list.*/
549
- pcpu = per_cpu_ptr (nf_ct_net (ct )-> ct .pcpu_lists , ct -> cpu );
550
-
551
- spin_lock (& pcpu -> lock );
552
- BUG_ON (hlist_nulls_unhashed (& ct -> tuplehash [IP_CT_DIR_ORIGINAL ].hnnode ));
553
- hlist_nulls_del_rcu (& ct -> tuplehash [IP_CT_DIR_ORIGINAL ].hnnode );
554
- spin_unlock (& pcpu -> lock );
555
- }
556
-
557
528
#define NFCT_ALIGN (len ) (((len) + NFCT_INFOMASK) & ~NFCT_INFOMASK)
558
529
559
530
/* Released via nf_ct_destroy() */
@@ -625,19 +596,13 @@ void nf_ct_destroy(struct nf_conntrack *nfct)
625
596
if (unlikely (nf_ct_protonum (ct ) == IPPROTO_GRE ))
626
597
destroy_gre_conntrack (ct );
627
598
628
- local_bh_disable ();
629
599
/* Expectations will have been removed in clean_from_lists,
630
600
* except TFTP can create an expectation on the first packet,
631
601
* before connection is in the list, so we need to clean here,
632
602
* too.
633
603
*/
634
604
nf_ct_remove_expectations (ct );
635
605
636
- if (unlikely (!nf_ct_is_confirmed (ct )))
637
- nf_ct_del_from_unconfirmed_list (ct );
638
-
639
- local_bh_enable ();
640
-
641
606
if (ct -> master )
642
607
nf_ct_put (ct -> master );
643
608
@@ -1248,7 +1213,6 @@ __nf_conntrack_confirm(struct sk_buff *skb)
1248
1213
* user context, else we insert an already 'dead' hash, blocking
1249
1214
* further use of that particular connection -JM.
1250
1215
*/
1251
- nf_ct_del_from_unconfirmed_list (ct );
1252
1216
ct -> status |= IPS_CONFIRMED ;
1253
1217
1254
1218
if (unlikely (nf_ct_is_dying (ct ))) {
@@ -1803,9 +1767,8 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
1803
1767
if (!exp )
1804
1768
__nf_ct_try_assign_helper (ct , tmpl , GFP_ATOMIC );
1805
1769
1806
- /* Now it is inserted into the unconfirmed list , set refcount to 1. */
1770
+ /* Now it is going to be associated with an sk_buff , set refcount to 1. */
1807
1771
refcount_set (& ct -> ct_general .use , 1 );
1808
- nf_ct_add_to_unconfirmed_list (ct );
1809
1772
1810
1773
local_bh_enable ();
1811
1774
@@ -2594,7 +2557,6 @@ void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list)
2594
2557
nf_conntrack_ecache_pernet_fini (net );
2595
2558
nf_conntrack_expect_pernet_fini (net );
2596
2559
free_percpu (net -> ct .stat );
2597
- free_percpu (net -> ct .pcpu_lists );
2598
2560
}
2599
2561
}
2600
2562
@@ -2805,26 +2767,14 @@ int nf_conntrack_init_net(struct net *net)
2805
2767
{
2806
2768
struct nf_conntrack_net * cnet = nf_ct_pernet (net );
2807
2769
int ret = - ENOMEM ;
2808
- int cpu ;
2809
2770
2810
2771
BUILD_BUG_ON (IP_CT_UNTRACKED == IP_CT_NUMBER );
2811
2772
BUILD_BUG_ON_NOT_POWER_OF_2 (CONNTRACK_LOCKS );
2812
2773
atomic_set (& cnet -> count , 0 );
2813
2774
2814
- net -> ct .pcpu_lists = alloc_percpu (struct ct_pcpu );
2815
- if (!net -> ct .pcpu_lists )
2816
- goto err_stat ;
2817
-
2818
- for_each_possible_cpu (cpu ) {
2819
- struct ct_pcpu * pcpu = per_cpu_ptr (net -> ct .pcpu_lists , cpu );
2820
-
2821
- spin_lock_init (& pcpu -> lock );
2822
- INIT_HLIST_NULLS_HEAD (& pcpu -> unconfirmed , UNCONFIRMED_NULLS_VAL );
2823
- }
2824
-
2825
2775
net -> ct .stat = alloc_percpu (struct ip_conntrack_stat );
2826
2776
if (!net -> ct .stat )
2827
- goto err_pcpu_lists ;
2777
+ return ret ;
2828
2778
2829
2779
ret = nf_conntrack_expect_pernet_init (net );
2830
2780
if (ret < 0 )
@@ -2840,8 +2790,5 @@ int nf_conntrack_init_net(struct net *net)
2840
2790
2841
2791
err_expect :
2842
2792
free_percpu (net -> ct .stat );
2843
- err_pcpu_lists :
2844
- free_percpu (net -> ct .pcpu_lists );
2845
- err_stat :
2846
2793
return ret ;
2847
2794
}
0 commit comments