Skip to content

Commit 9fd1ff5

Browse files
klassertdavem330
authored andcommitted
udp: Support UDP fraglist GRO/GSO.
This patch extends UDP GRO to support fraglist GRO/GSO by using the previously introduced infrastructure. If the feature is enabled, all UDP packets are going to fraglist GRO (local input and forward). After validating the csum, we mark ip_summed as CHECKSUM_UNNECESSARY for fraglist GRO packets to make sure that the csum is not touched. Signed-off-by: Steffen Klassert <[email protected]> Reviewed-by: Willem de Bruijn <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 3a1296a commit 9fd1ff5

File tree

3 files changed

+107
-26
lines changed

3 files changed

+107
-26
lines changed

include/net/udp.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -167,7 +167,7 @@ typedef struct sock *(*udp_lookup_t)(struct sk_buff *skb, __be16 sport,
167167
__be16 dport);
168168

169169
struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
170-
struct udphdr *uh, udp_lookup_t lookup);
170+
struct udphdr *uh, struct sock *sk);
171171
int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup);
172172

173173
struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,

net/ipv4/udp_offload.c

Lines changed: 81 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -184,6 +184,20 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
184184
}
185185
EXPORT_SYMBOL(skb_udp_tunnel_segment);
186186

187+
static struct sk_buff *__udp_gso_segment_list(struct sk_buff *skb,
188+
netdev_features_t features)
189+
{
190+
unsigned int mss = skb_shinfo(skb)->gso_size;
191+
192+
skb = skb_segment_list(skb, features, skb_mac_header_len(skb));
193+
if (IS_ERR(skb))
194+
return skb;
195+
196+
udp_hdr(skb)->len = htons(sizeof(struct udphdr) + mss);
197+
198+
return skb;
199+
}
200+
187201
struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
188202
netdev_features_t features)
189203
{
@@ -196,6 +210,9 @@ struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
196210
__sum16 check;
197211
__be16 newlen;
198212

213+
if (skb_shinfo(gso_skb)->gso_type & SKB_GSO_FRAGLIST)
214+
return __udp_gso_segment_list(gso_skb, features);
215+
199216
mss = skb_shinfo(gso_skb)->gso_size;
200217
if (gso_skb->len <= sizeof(*uh) + mss)
201218
return ERR_PTR(-EINVAL);
@@ -354,6 +371,7 @@ static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
354371
struct udphdr *uh2;
355372
struct sk_buff *p;
356373
unsigned int ulen;
374+
int ret = 0;
357375

358376
/* requires non zero csum, for symmetry with GSO */
359377
if (!uh->check) {
@@ -369,7 +387,6 @@ static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
369387
}
370388
/* pull encapsulating udp header */
371389
skb_gro_pull(skb, sizeof(struct udphdr));
372-
skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
373390

374391
list_for_each_entry(p, head, list) {
375392
if (!NAPI_GRO_CB(p)->same_flow)
@@ -383,14 +400,40 @@ static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
383400
continue;
384401
}
385402

403+
if (NAPI_GRO_CB(skb)->is_flist != NAPI_GRO_CB(p)->is_flist) {
404+
NAPI_GRO_CB(skb)->flush = 1;
405+
return p;
406+
}
407+
386408
/* Terminate the flow on len mismatch or if it grow "too much".
387409
* Under small packet flood GRO count could elsewhere grow a lot
388410
* leading to excessive truesize values.
389411
* On len mismatch merge the first packet shorter than gso_size,
390412
* otherwise complete the GRO packet.
391413
*/
392-
if (ulen > ntohs(uh2->len) || skb_gro_receive(p, skb) ||
393-
ulen != ntohs(uh2->len) ||
414+
if (ulen > ntohs(uh2->len)) {
415+
pp = p;
416+
} else {
417+
if (NAPI_GRO_CB(skb)->is_flist) {
418+
if (!pskb_may_pull(skb, skb_gro_offset(skb))) {
419+
NAPI_GRO_CB(skb)->flush = 1;
420+
return NULL;
421+
}
422+
if ((skb->ip_summed != p->ip_summed) ||
423+
(skb->csum_level != p->csum_level)) {
424+
NAPI_GRO_CB(skb)->flush = 1;
425+
return NULL;
426+
}
427+
ret = skb_gro_receive_list(p, skb);
428+
} else {
429+
skb_gro_postpull_rcsum(skb, uh,
430+
sizeof(struct udphdr));
431+
432+
ret = skb_gro_receive(p, skb);
433+
}
434+
}
435+
436+
if (ret || ulen != ntohs(uh2->len) ||
394437
NAPI_GRO_CB(p)->count >= UDP_GRO_CNT_MAX)
395438
pp = p;
396439

@@ -401,36 +444,29 @@ static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
401444
return NULL;
402445
}
403446

404-
INDIRECT_CALLABLE_DECLARE(struct sock *udp6_lib_lookup_skb(struct sk_buff *skb,
405-
__be16 sport, __be16 dport));
406447
struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
407-
struct udphdr *uh, udp_lookup_t lookup)
448+
struct udphdr *uh, struct sock *sk)
408449
{
409450
struct sk_buff *pp = NULL;
410451
struct sk_buff *p;
411452
struct udphdr *uh2;
412453
unsigned int off = skb_gro_offset(skb);
413454
int flush = 1;
414-
struct sock *sk;
415455

416-
rcu_read_lock();
417-
sk = INDIRECT_CALL_INET(lookup, udp6_lib_lookup_skb,
418-
udp4_lib_lookup_skb, skb, uh->source, uh->dest);
419-
if (!sk)
420-
goto out_unlock;
456+
if (skb->dev->features & NETIF_F_GRO_FRAGLIST)
457+
NAPI_GRO_CB(skb)->is_flist = sk ? !udp_sk(sk)->gro_enabled: 1;
421458

422-
if (udp_sk(sk)->gro_enabled) {
459+
if ((sk && udp_sk(sk)->gro_enabled) || NAPI_GRO_CB(skb)->is_flist) {
423460
pp = call_gro_receive(udp_gro_receive_segment, head, skb);
424-
rcu_read_unlock();
425461
return pp;
426462
}
427463

428-
if (NAPI_GRO_CB(skb)->encap_mark ||
464+
if (!sk || NAPI_GRO_CB(skb)->encap_mark ||
429465
(skb->ip_summed != CHECKSUM_PARTIAL &&
430466
NAPI_GRO_CB(skb)->csum_cnt == 0 &&
431467
!NAPI_GRO_CB(skb)->csum_valid) ||
432468
!udp_sk(sk)->gro_receive)
433-
goto out_unlock;
469+
goto out;
434470

435471
/* mark that this skb passed once through the tunnel gro layer */
436472
NAPI_GRO_CB(skb)->encap_mark = 1;
@@ -457,8 +493,7 @@ struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
457493
skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
458494
pp = call_gro_receive_sk(udp_sk(sk)->gro_receive, sk, head, skb);
459495

460-
out_unlock:
461-
rcu_read_unlock();
496+
out:
462497
skb_gro_flush_final(skb, pp, flush);
463498
return pp;
464499
}
@@ -468,8 +503,10 @@ INDIRECT_CALLABLE_SCOPE
468503
struct sk_buff *udp4_gro_receive(struct list_head *head, struct sk_buff *skb)
469504
{
470505
struct udphdr *uh = udp_gro_udphdr(skb);
506+
struct sk_buff *pp;
507+
struct sock *sk;
471508

472-
if (unlikely(!uh) || !static_branch_unlikely(&udp_encap_needed_key))
509+
if (unlikely(!uh))
473510
goto flush;
474511

475512
/* Don't bother verifying checksum if we're going to flush anyway. */
@@ -484,7 +521,11 @@ struct sk_buff *udp4_gro_receive(struct list_head *head, struct sk_buff *skb)
484521
inet_gro_compute_pseudo);
485522
skip:
486523
NAPI_GRO_CB(skb)->is_ipv6 = 0;
487-
return udp_gro_receive(head, skb, uh, udp4_lib_lookup_skb);
524+
rcu_read_lock();
525+
sk = static_branch_unlikely(&udp_encap_needed_key) ? udp4_lib_lookup_skb(skb, uh->source, uh->dest) : NULL;
526+
pp = udp_gro_receive(head, skb, uh, sk);
527+
rcu_read_unlock();
528+
return pp;
488529

489530
flush:
490531
NAPI_GRO_CB(skb)->flush = 1;
@@ -517,9 +558,7 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff,
517558
rcu_read_lock();
518559
sk = INDIRECT_CALL_INET(lookup, udp6_lib_lookup_skb,
519560
udp4_lib_lookup_skb, skb, uh->source, uh->dest);
520-
if (sk && udp_sk(sk)->gro_enabled) {
521-
err = udp_gro_complete_segment(skb);
522-
} else if (sk && udp_sk(sk)->gro_complete) {
561+
if (sk && udp_sk(sk)->gro_complete) {
523562
skb_shinfo(skb)->gso_type = uh->check ? SKB_GSO_UDP_TUNNEL_CSUM
524563
: SKB_GSO_UDP_TUNNEL;
525564

@@ -529,6 +568,8 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff,
529568
skb->encapsulation = 1;
530569
err = udp_sk(sk)->gro_complete(sk, skb,
531570
nhoff + sizeof(struct udphdr));
571+
} else {
572+
err = udp_gro_complete_segment(skb);
532573
}
533574
rcu_read_unlock();
534575

@@ -544,6 +585,23 @@ INDIRECT_CALLABLE_SCOPE int udp4_gro_complete(struct sk_buff *skb, int nhoff)
544585
const struct iphdr *iph = ip_hdr(skb);
545586
struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
546587

588+
if (NAPI_GRO_CB(skb)->is_flist) {
589+
uh->len = htons(skb->len - nhoff);
590+
591+
skb_shinfo(skb)->gso_type |= (SKB_GSO_FRAGLIST|SKB_GSO_UDP_L4);
592+
skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
593+
594+
if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
595+
if (skb->csum_level < SKB_MAX_CSUM_LEVEL)
596+
skb->csum_level++;
597+
} else {
598+
skb->ip_summed = CHECKSUM_UNNECESSARY;
599+
skb->csum_level = 0;
600+
}
601+
602+
return 0;
603+
}
604+
547605
if (uh->check)
548606
uh->check = ~udp_v4_check(skb->len - nhoff, iph->saddr,
549607
iph->daddr, 0);

net/ipv6/udp_offload.c

Lines changed: 25 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -115,8 +115,10 @@ INDIRECT_CALLABLE_SCOPE
115115
struct sk_buff *udp6_gro_receive(struct list_head *head, struct sk_buff *skb)
116116
{
117117
struct udphdr *uh = udp_gro_udphdr(skb);
118+
struct sk_buff *pp;
119+
struct sock *sk;
118120

119-
if (unlikely(!uh) || !static_branch_unlikely(&udpv6_encap_needed_key))
121+
if (unlikely(!uh))
120122
goto flush;
121123

122124
/* Don't bother verifying checksum if we're going to flush anyway. */
@@ -132,7 +134,11 @@ struct sk_buff *udp6_gro_receive(struct list_head *head, struct sk_buff *skb)
132134

133135
skip:
134136
NAPI_GRO_CB(skb)->is_ipv6 = 1;
135-
return udp_gro_receive(head, skb, uh, udp6_lib_lookup_skb);
137+
rcu_read_lock();
138+
sk = static_branch_unlikely(&udpv6_encap_needed_key) ? udp6_lib_lookup_skb(skb, uh->source, uh->dest) : NULL;
139+
pp = udp_gro_receive(head, skb, uh, sk);
140+
rcu_read_unlock();
141+
return pp;
136142

137143
flush:
138144
NAPI_GRO_CB(skb)->flush = 1;
@@ -144,6 +150,23 @@ INDIRECT_CALLABLE_SCOPE int udp6_gro_complete(struct sk_buff *skb, int nhoff)
144150
const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
145151
struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
146152

153+
if (NAPI_GRO_CB(skb)->is_flist) {
154+
uh->len = htons(skb->len - nhoff);
155+
156+
skb_shinfo(skb)->gso_type |= (SKB_GSO_FRAGLIST|SKB_GSO_UDP_L4);
157+
skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
158+
159+
if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
160+
if (skb->csum_level < SKB_MAX_CSUM_LEVEL)
161+
skb->csum_level++;
162+
} else {
163+
skb->ip_summed = CHECKSUM_UNNECESSARY;
164+
skb->csum_level = 0;
165+
}
166+
167+
return 0;
168+
}
169+
147170
if (uh->check)
148171
uh->check = ~udp_v6_check(skb->len - nhoff, &ipv6h->saddr,
149172
&ipv6h->daddr, 0);

0 commit comments

Comments
 (0)