Skip to content

Commit 3a1296a

Browse files
klassertdavem330
authored andcommitted
net: Support GRO/GSO fraglist chaining.
This patch adds the core functions to chain/unchain GSO skbs at the frag_list pointer. This also adds a new GSO type SKB_GSO_FRAGLIST and a is_flist flag to napi_gro_cb which indicates that this flow will be GROed by fraglist chaining. Signed-off-by: Steffen Klassert <[email protected]> Reviewed-by: Willem de Bruijn <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 1a3c998 commit 3a1296a

File tree

4 files changed

+97
-2
lines changed

4 files changed

+97
-2
lines changed

include/linux/netdevice.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2326,7 +2326,8 @@ struct napi_gro_cb {
23262326
/* Number of gro_receive callbacks this packet already went through */
23272327
u8 recursion_counter:4;
23282328

2329-
/* 1 bit hole */
2329+
/* GRO is done by frag_list pointer chaining. */
2330+
u8 is_flist:1;
23302331

23312332
/* used to support CHECKSUM_COMPLETE for tunneling protocols */
23322333
__wsum csum;
@@ -2694,6 +2695,7 @@ struct net_device *dev_get_by_napi_id(unsigned int napi_id);
26942695
int netdev_get_name(struct net *net, char *name, int ifindex);
26952696
int dev_restart(struct net_device *dev);
26962697
int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
2698+
int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb);
26972699

26982700
static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
26992701
{

include/linux/skbuff.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3535,6 +3535,8 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet);
35353535
bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu);
35363536
bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len);
35373537
struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
3538+
struct sk_buff *skb_segment_list(struct sk_buff *skb, netdev_features_t features,
3539+
unsigned int offset);
35383540
struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
35393541
int skb_ensure_writable(struct sk_buff *skb, int write_len);
35403542
int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);

net/core/dev.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3249,7 +3249,7 @@ struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
32493249

32503250
segs = skb_mac_gso_segment(skb, features);
32513251

3252-
if (unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs)))
3252+
if (segs != skb && unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs)))
32533253
skb_warn_bad_offload(skb);
32543254

32553255
return segs;

net/core/skbuff.c

Lines changed: 91 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3639,6 +3639,97 @@ static inline skb_frag_t skb_head_frag_to_page_desc(struct sk_buff *frag_skb)
36393639
return head_frag;
36403640
}
36413641

3642+
struct sk_buff *skb_segment_list(struct sk_buff *skb,
3643+
netdev_features_t features,
3644+
unsigned int offset)
3645+
{
3646+
struct sk_buff *list_skb = skb_shinfo(skb)->frag_list;
3647+
unsigned int tnl_hlen = skb_tnl_header_len(skb);
3648+
unsigned int delta_truesize = 0;
3649+
unsigned int delta_len = 0;
3650+
struct sk_buff *tail = NULL;
3651+
struct sk_buff *nskb;
3652+
3653+
skb_push(skb, -skb_network_offset(skb) + offset);
3654+
3655+
skb_shinfo(skb)->frag_list = NULL;
3656+
3657+
do {
3658+
nskb = list_skb;
3659+
list_skb = list_skb->next;
3660+
3661+
if (!tail)
3662+
skb->next = nskb;
3663+
else
3664+
tail->next = nskb;
3665+
3666+
tail = nskb;
3667+
3668+
delta_len += nskb->len;
3669+
delta_truesize += nskb->truesize;
3670+
3671+
skb_push(nskb, -skb_network_offset(nskb) + offset);
3672+
3673+
__copy_skb_header(nskb, skb);
3674+
3675+
skb_headers_offset_update(nskb, skb_headroom(nskb) - skb_headroom(skb));
3676+
skb_copy_from_linear_data_offset(skb, -tnl_hlen,
3677+
nskb->data - tnl_hlen,
3678+
offset + tnl_hlen);
3679+
3680+
if (skb_needs_linearize(nskb, features) &&
3681+
__skb_linearize(nskb))
3682+
goto err_linearize;
3683+
3684+
} while (list_skb);
3685+
3686+
skb->truesize = skb->truesize - delta_truesize;
3687+
skb->data_len = skb->data_len - delta_len;
3688+
skb->len = skb->len - delta_len;
3689+
3690+
skb_gso_reset(skb);
3691+
3692+
skb->prev = tail;
3693+
3694+
if (skb_needs_linearize(skb, features) &&
3695+
__skb_linearize(skb))
3696+
goto err_linearize;
3697+
3698+
skb_get(skb);
3699+
3700+
return skb;
3701+
3702+
err_linearize:
3703+
kfree_skb_list(skb->next);
3704+
skb->next = NULL;
3705+
return ERR_PTR(-ENOMEM);
3706+
}
3707+
EXPORT_SYMBOL_GPL(skb_segment_list);
3708+
3709+
int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb)
3710+
{
3711+
if (unlikely(p->len + skb->len >= 65536))
3712+
return -E2BIG;
3713+
3714+
if (NAPI_GRO_CB(p)->last == p)
3715+
skb_shinfo(p)->frag_list = skb;
3716+
else
3717+
NAPI_GRO_CB(p)->last->next = skb;
3718+
3719+
skb_pull(skb, skb_gro_offset(skb));
3720+
3721+
NAPI_GRO_CB(p)->last = skb;
3722+
NAPI_GRO_CB(p)->count++;
3723+
p->data_len += skb->len;
3724+
p->truesize += skb->truesize;
3725+
p->len += skb->len;
3726+
3727+
NAPI_GRO_CB(skb)->same_flow = 1;
3728+
3729+
return 0;
3730+
}
3731+
EXPORT_SYMBOL_GPL(skb_gro_receive_list);
3732+
36423733
/**
36433734
* skb_segment - Perform protocol segmentation on skb.
36443735
* @head_skb: buffer to segment

0 commit comments

Comments
 (0)