@@ -36,12 +36,50 @@ static void netns_bpf_run_array_detach(struct net *net,
36
36
bpf_prog_array_free (run_array );
37
37
}
38
38
39
+ static int link_index (struct net * net , enum netns_bpf_attach_type type ,
40
+ struct bpf_netns_link * link )
41
+ {
42
+ struct bpf_netns_link * pos ;
43
+ int i = 0 ;
44
+
45
+ list_for_each_entry (pos , & net -> bpf .links [type ], node ) {
46
+ if (pos == link )
47
+ return i ;
48
+ i ++ ;
49
+ }
50
+ return - ENOENT ;
51
+ }
52
+
53
+ static int link_count (struct net * net , enum netns_bpf_attach_type type )
54
+ {
55
+ struct list_head * pos ;
56
+ int i = 0 ;
57
+
58
+ list_for_each (pos , & net -> bpf .links [type ])
59
+ i ++ ;
60
+ return i ;
61
+ }
62
+
63
+ static void fill_prog_array (struct net * net , enum netns_bpf_attach_type type ,
64
+ struct bpf_prog_array * prog_array )
65
+ {
66
+ struct bpf_netns_link * pos ;
67
+ unsigned int i = 0 ;
68
+
69
+ list_for_each_entry (pos , & net -> bpf .links [type ], node ) {
70
+ prog_array -> items [i ].prog = pos -> link .prog ;
71
+ i ++ ;
72
+ }
73
+ }
74
+
39
75
static void bpf_netns_link_release (struct bpf_link * link )
40
76
{
41
77
struct bpf_netns_link * net_link =
42
78
container_of (link , struct bpf_netns_link , link );
43
79
enum netns_bpf_attach_type type = net_link -> netns_type ;
80
+ struct bpf_prog_array * old_array , * new_array ;
44
81
struct net * net ;
82
+ int cnt , idx ;
45
83
46
84
mutex_lock (& netns_bpf_mutex );
47
85
@@ -53,9 +91,27 @@ static void bpf_netns_link_release(struct bpf_link *link)
53
91
if (!net )
54
92
goto out_unlock ;
55
93
56
- netns_bpf_run_array_detach (net , type );
94
+ /* Remember link position in case of safe delete */
95
+ idx = link_index (net , type , net_link );
57
96
list_del (& net_link -> node );
58
97
98
+ cnt = link_count (net , type );
99
+ if (!cnt ) {
100
+ netns_bpf_run_array_detach (net , type );
101
+ goto out_unlock ;
102
+ }
103
+
104
+ old_array = rcu_dereference_protected (net -> bpf .run_array [type ],
105
+ lockdep_is_held (& netns_bpf_mutex ));
106
+ new_array = bpf_prog_array_alloc (cnt , GFP_KERNEL );
107
+ if (!new_array ) {
108
+ WARN_ON (bpf_prog_array_delete_safe_at (old_array , idx ));
109
+ goto out_unlock ;
110
+ }
111
+ fill_prog_array (net , type , new_array );
112
+ rcu_assign_pointer (net -> bpf .run_array [type ], new_array );
113
+ bpf_prog_array_free (old_array );
114
+
59
115
out_unlock :
60
116
mutex_unlock (& netns_bpf_mutex );
61
117
}
@@ -77,7 +133,7 @@ static int bpf_netns_link_update_prog(struct bpf_link *link,
77
133
enum netns_bpf_attach_type type = net_link -> netns_type ;
78
134
struct bpf_prog_array * run_array ;
79
135
struct net * net ;
80
- int ret = 0 ;
136
+ int idx , ret ;
81
137
82
138
if (old_prog && old_prog != link -> prog )
83
139
return - EPERM ;
@@ -95,7 +151,10 @@ static int bpf_netns_link_update_prog(struct bpf_link *link,
95
151
96
152
run_array = rcu_dereference_protected (net -> bpf .run_array [type ],
97
153
lockdep_is_held (& netns_bpf_mutex ));
98
- WRITE_ONCE (run_array -> items [0 ].prog , new_prog );
154
+ idx = link_index (net , type , net_link );
155
+ ret = bpf_prog_array_update_at (run_array , idx , new_prog );
156
+ if (ret )
157
+ goto out_unlock ;
99
158
100
159
old_prog = xchg (& link -> prog , new_prog );
101
160
bpf_prog_put (old_prog );
@@ -309,18 +368,28 @@ int netns_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
309
368
return ret ;
310
369
}
311
370
371
+ static int netns_bpf_max_progs (enum netns_bpf_attach_type type )
372
+ {
373
+ switch (type ) {
374
+ case NETNS_BPF_FLOW_DISSECTOR :
375
+ return 1 ;
376
+ default :
377
+ return 0 ;
378
+ }
379
+ }
380
+
312
381
static int netns_bpf_link_attach (struct net * net , struct bpf_link * link ,
313
382
enum netns_bpf_attach_type type )
314
383
{
315
384
struct bpf_netns_link * net_link =
316
385
container_of (link , struct bpf_netns_link , link );
317
386
struct bpf_prog_array * run_array ;
318
- int err ;
387
+ int cnt , err ;
319
388
320
389
mutex_lock (& netns_bpf_mutex );
321
390
322
- /* Allow attaching only one prog or link for now */
323
- if (! list_empty ( & net -> bpf . links [ type ] )) {
391
+ cnt = link_count ( net , type );
392
+ if (cnt >= netns_bpf_max_progs ( type )) {
324
393
err = - E2BIG ;
325
394
goto out_unlock ;
326
395
}
@@ -341,16 +410,19 @@ static int netns_bpf_link_attach(struct net *net, struct bpf_link *link,
341
410
if (err )
342
411
goto out_unlock ;
343
412
344
- run_array = bpf_prog_array_alloc (1 , GFP_KERNEL );
413
+ run_array = bpf_prog_array_alloc (cnt + 1 , GFP_KERNEL );
345
414
if (!run_array ) {
346
415
err = - ENOMEM ;
347
416
goto out_unlock ;
348
417
}
349
- run_array -> items [0 ].prog = link -> prog ;
350
- rcu_assign_pointer (net -> bpf .run_array [type ], run_array );
351
418
352
419
list_add_tail (& net_link -> node , & net -> bpf .links [type ]);
353
420
421
+ fill_prog_array (net , type , run_array );
422
+ run_array = rcu_replace_pointer (net -> bpf .run_array [type ], run_array ,
423
+ lockdep_is_held (& netns_bpf_mutex ));
424
+ bpf_prog_array_free (run_array );
425
+
354
426
out_unlock :
355
427
mutex_unlock (& netns_bpf_mutex );
356
428
return err ;
0 commit comments