@@ -184,6 +184,20 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
184
184
}
185
185
EXPORT_SYMBOL (skb_udp_tunnel_segment );
186
186
187
+ static struct sk_buff * __udp_gso_segment_list (struct sk_buff * skb ,
188
+ netdev_features_t features )
189
+ {
190
+ unsigned int mss = skb_shinfo (skb )-> gso_size ;
191
+
192
+ skb = skb_segment_list (skb , features , skb_mac_header_len (skb ));
193
+ if (IS_ERR (skb ))
194
+ return skb ;
195
+
196
+ udp_hdr (skb )-> len = htons (sizeof (struct udphdr ) + mss );
197
+
198
+ return skb ;
199
+ }
200
+
187
201
struct sk_buff * __udp_gso_segment (struct sk_buff * gso_skb ,
188
202
netdev_features_t features )
189
203
{
@@ -196,6 +210,9 @@ struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
196
210
__sum16 check ;
197
211
__be16 newlen ;
198
212
213
+ if (skb_shinfo (gso_skb )-> gso_type & SKB_GSO_FRAGLIST )
214
+ return __udp_gso_segment_list (gso_skb , features );
215
+
199
216
mss = skb_shinfo (gso_skb )-> gso_size ;
200
217
if (gso_skb -> len <= sizeof (* uh ) + mss )
201
218
return ERR_PTR (- EINVAL );
@@ -354,6 +371,7 @@ static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
354
371
struct udphdr * uh2 ;
355
372
struct sk_buff * p ;
356
373
unsigned int ulen ;
374
+ int ret = 0 ;
357
375
358
376
/* requires non zero csum, for symmetry with GSO */
359
377
if (!uh -> check ) {
@@ -369,7 +387,6 @@ static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
369
387
}
370
388
/* pull encapsulating udp header */
371
389
skb_gro_pull (skb , sizeof (struct udphdr ));
372
- skb_gro_postpull_rcsum (skb , uh , sizeof (struct udphdr ));
373
390
374
391
list_for_each_entry (p , head , list ) {
375
392
if (!NAPI_GRO_CB (p )-> same_flow )
@@ -383,14 +400,40 @@ static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
383
400
continue ;
384
401
}
385
402
403
+ if (NAPI_GRO_CB (skb )-> is_flist != NAPI_GRO_CB (p )-> is_flist ) {
404
+ NAPI_GRO_CB (skb )-> flush = 1 ;
405
+ return p ;
406
+ }
407
+
386
408
/* Terminate the flow on len mismatch or if it grow "too much".
387
409
* Under small packet flood GRO count could elsewhere grow a lot
388
410
* leading to excessive truesize values.
389
411
* On len mismatch merge the first packet shorter than gso_size,
390
412
* otherwise complete the GRO packet.
391
413
*/
392
- if (ulen > ntohs (uh2 -> len ) || skb_gro_receive (p , skb ) ||
393
- ulen != ntohs (uh2 -> len ) ||
414
+ if (ulen > ntohs (uh2 -> len )) {
415
+ pp = p ;
416
+ } else {
417
+ if (NAPI_GRO_CB (skb )-> is_flist ) {
418
+ if (!pskb_may_pull (skb , skb_gro_offset (skb ))) {
419
+ NAPI_GRO_CB (skb )-> flush = 1 ;
420
+ return NULL ;
421
+ }
422
+ if ((skb -> ip_summed != p -> ip_summed ) ||
423
+ (skb -> csum_level != p -> csum_level )) {
424
+ NAPI_GRO_CB (skb )-> flush = 1 ;
425
+ return NULL ;
426
+ }
427
+ ret = skb_gro_receive_list (p , skb );
428
+ } else {
429
+ skb_gro_postpull_rcsum (skb , uh ,
430
+ sizeof (struct udphdr ));
431
+
432
+ ret = skb_gro_receive (p , skb );
433
+ }
434
+ }
435
+
436
+ if (ret || ulen != ntohs (uh2 -> len ) ||
394
437
NAPI_GRO_CB (p )-> count >= UDP_GRO_CNT_MAX )
395
438
pp = p ;
396
439
@@ -401,36 +444,29 @@ static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
401
444
return NULL ;
402
445
}
403
446
404
- INDIRECT_CALLABLE_DECLARE (struct sock * udp6_lib_lookup_skb (struct sk_buff * skb ,
405
- __be16 sport , __be16 dport ));
406
447
struct sk_buff * udp_gro_receive (struct list_head * head , struct sk_buff * skb ,
407
- struct udphdr * uh , udp_lookup_t lookup )
448
+ struct udphdr * uh , struct sock * sk )
408
449
{
409
450
struct sk_buff * pp = NULL ;
410
451
struct sk_buff * p ;
411
452
struct udphdr * uh2 ;
412
453
unsigned int off = skb_gro_offset (skb );
413
454
int flush = 1 ;
414
- struct sock * sk ;
415
455
416
- rcu_read_lock ();
417
- sk = INDIRECT_CALL_INET (lookup , udp6_lib_lookup_skb ,
418
- udp4_lib_lookup_skb , skb , uh -> source , uh -> dest );
419
- if (!sk )
420
- goto out_unlock ;
456
+ if (skb -> dev -> features & NETIF_F_GRO_FRAGLIST )
457
+ NAPI_GRO_CB (skb )-> is_flist = sk ? !udp_sk (sk )-> gro_enabled : 1 ;
421
458
422
- if (udp_sk (sk )-> gro_enabled ) {
459
+ if (( sk && udp_sk (sk )-> gro_enabled ) || NAPI_GRO_CB ( skb ) -> is_flist ) {
423
460
pp = call_gro_receive (udp_gro_receive_segment , head , skb );
424
- rcu_read_unlock ();
425
461
return pp ;
426
462
}
427
463
428
- if (NAPI_GRO_CB (skb )-> encap_mark ||
464
+ if (! sk || NAPI_GRO_CB (skb )-> encap_mark ||
429
465
(skb -> ip_summed != CHECKSUM_PARTIAL &&
430
466
NAPI_GRO_CB (skb )-> csum_cnt == 0 &&
431
467
!NAPI_GRO_CB (skb )-> csum_valid ) ||
432
468
!udp_sk (sk )-> gro_receive )
433
- goto out_unlock ;
469
+ goto out ;
434
470
435
471
/* mark that this skb passed once through the tunnel gro layer */
436
472
NAPI_GRO_CB (skb )-> encap_mark = 1 ;
@@ -457,8 +493,7 @@ struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
457
493
skb_gro_postpull_rcsum (skb , uh , sizeof (struct udphdr ));
458
494
pp = call_gro_receive_sk (udp_sk (sk )-> gro_receive , sk , head , skb );
459
495
460
- out_unlock :
461
- rcu_read_unlock ();
496
+ out :
462
497
skb_gro_flush_final (skb , pp , flush );
463
498
return pp ;
464
499
}
@@ -468,8 +503,10 @@ INDIRECT_CALLABLE_SCOPE
468
503
struct sk_buff * udp4_gro_receive (struct list_head * head , struct sk_buff * skb )
469
504
{
470
505
struct udphdr * uh = udp_gro_udphdr (skb );
506
+ struct sk_buff * pp ;
507
+ struct sock * sk ;
471
508
472
- if (unlikely (!uh ) || ! static_branch_unlikely ( & udp_encap_needed_key ) )
509
+ if (unlikely (!uh ))
473
510
goto flush ;
474
511
475
512
/* Don't bother verifying checksum if we're going to flush anyway. */
@@ -484,7 +521,11 @@ struct sk_buff *udp4_gro_receive(struct list_head *head, struct sk_buff *skb)
484
521
inet_gro_compute_pseudo );
485
522
skip :
486
523
NAPI_GRO_CB (skb )-> is_ipv6 = 0 ;
487
- return udp_gro_receive (head , skb , uh , udp4_lib_lookup_skb );
524
+ rcu_read_lock ();
525
+ sk = static_branch_unlikely (& udp_encap_needed_key ) ? udp4_lib_lookup_skb (skb , uh -> source , uh -> dest ) : NULL ;
526
+ pp = udp_gro_receive (head , skb , uh , sk );
527
+ rcu_read_unlock ();
528
+ return pp ;
488
529
489
530
flush :
490
531
NAPI_GRO_CB (skb )-> flush = 1 ;
@@ -517,9 +558,7 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff,
517
558
rcu_read_lock ();
518
559
sk = INDIRECT_CALL_INET (lookup , udp6_lib_lookup_skb ,
519
560
udp4_lib_lookup_skb , skb , uh -> source , uh -> dest );
520
- if (sk && udp_sk (sk )-> gro_enabled ) {
521
- err = udp_gro_complete_segment (skb );
522
- } else if (sk && udp_sk (sk )-> gro_complete ) {
561
+ if (sk && udp_sk (sk )-> gro_complete ) {
523
562
skb_shinfo (skb )-> gso_type = uh -> check ? SKB_GSO_UDP_TUNNEL_CSUM
524
563
: SKB_GSO_UDP_TUNNEL ;
525
564
@@ -529,6 +568,8 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff,
529
568
skb -> encapsulation = 1 ;
530
569
err = udp_sk (sk )-> gro_complete (sk , skb ,
531
570
nhoff + sizeof (struct udphdr ));
571
+ } else {
572
+ err = udp_gro_complete_segment (skb );
532
573
}
533
574
rcu_read_unlock ();
534
575
@@ -544,6 +585,23 @@ INDIRECT_CALLABLE_SCOPE int udp4_gro_complete(struct sk_buff *skb, int nhoff)
544
585
const struct iphdr * iph = ip_hdr (skb );
545
586
struct udphdr * uh = (struct udphdr * )(skb -> data + nhoff );
546
587
588
+ if (NAPI_GRO_CB (skb )-> is_flist ) {
589
+ uh -> len = htons (skb -> len - nhoff );
590
+
591
+ skb_shinfo (skb )-> gso_type |= (SKB_GSO_FRAGLIST |SKB_GSO_UDP_L4 );
592
+ skb_shinfo (skb )-> gso_segs = NAPI_GRO_CB (skb )-> count ;
593
+
594
+ if (skb -> ip_summed == CHECKSUM_UNNECESSARY ) {
595
+ if (skb -> csum_level < SKB_MAX_CSUM_LEVEL )
596
+ skb -> csum_level ++ ;
597
+ } else {
598
+ skb -> ip_summed = CHECKSUM_UNNECESSARY ;
599
+ skb -> csum_level = 0 ;
600
+ }
601
+
602
+ return 0 ;
603
+ }
604
+
547
605
if (uh -> check )
548
606
uh -> check = ~udp_v4_check (skb -> len - nhoff , iph -> saddr ,
549
607
iph -> daddr , 0 );
0 commit comments