@@ -842,12 +842,22 @@ vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
842
842
u8 protocol = 0 ;
843
843
844
844
if (ctx -> mss ) { /* TSO */
845
- ctx -> eth_ip_hdr_size = skb_transport_offset (skb );
846
- ctx -> l4_hdr_size = tcp_hdrlen (skb );
847
- ctx -> copy_size = ctx -> eth_ip_hdr_size + ctx -> l4_hdr_size ;
845
+ if (VMXNET3_VERSION_GE_4 (adapter ) && skb -> encapsulation ) {
846
+ ctx -> l4_offset = skb_inner_transport_offset (skb );
847
+ ctx -> l4_hdr_size = inner_tcp_hdrlen (skb );
848
+ ctx -> copy_size = ctx -> l4_offset + ctx -> l4_hdr_size ;
849
+ } else {
850
+ ctx -> l4_offset = skb_transport_offset (skb );
851
+ ctx -> l4_hdr_size = tcp_hdrlen (skb );
852
+ ctx -> copy_size = ctx -> l4_offset + ctx -> l4_hdr_size ;
853
+ }
848
854
} else {
849
855
if (skb -> ip_summed == CHECKSUM_PARTIAL ) {
850
- ctx -> eth_ip_hdr_size = skb_checksum_start_offset (skb );
856
+ /* For encap packets, skb_checksum_start_offset refers
857
+ * to inner L4 offset. Thus, below works for encap as
858
+ * well as non-encap case
859
+ */
860
+ ctx -> l4_offset = skb_checksum_start_offset (skb );
851
861
852
862
if (ctx -> ipv4 ) {
853
863
const struct iphdr * iph = ip_hdr (skb );
@@ -871,10 +881,10 @@ vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
871
881
break ;
872
882
}
873
883
874
- ctx -> copy_size = min (ctx -> eth_ip_hdr_size +
884
+ ctx -> copy_size = min (ctx -> l4_offset +
875
885
ctx -> l4_hdr_size , skb -> len );
876
886
} else {
877
- ctx -> eth_ip_hdr_size = 0 ;
887
+ ctx -> l4_offset = 0 ;
878
888
ctx -> l4_hdr_size = 0 ;
879
889
/* copy as much as allowed */
880
890
ctx -> copy_size = min_t (unsigned int ,
@@ -929,6 +939,25 @@ vmxnet3_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
929
939
}
930
940
931
941
942
+ static void
943
+ vmxnet3_prepare_inner_tso (struct sk_buff * skb ,
944
+ struct vmxnet3_tx_ctx * ctx )
945
+ {
946
+ struct tcphdr * tcph = inner_tcp_hdr (skb );
947
+ struct iphdr * iph = inner_ip_hdr (skb );
948
+
949
+ if (ctx -> ipv4 ) {
950
+ iph -> check = 0 ;
951
+ tcph -> check = ~csum_tcpudp_magic (iph -> saddr , iph -> daddr , 0 ,
952
+ IPPROTO_TCP , 0 );
953
+ } else if (ctx -> ipv6 ) {
954
+ struct ipv6hdr * iph = inner_ipv6_hdr (skb );
955
+
956
+ tcph -> check = ~csum_ipv6_magic (& iph -> saddr , & iph -> daddr , 0 ,
957
+ IPPROTO_TCP , 0 );
958
+ }
959
+ }
960
+
932
961
static void
933
962
vmxnet3_prepare_tso (struct sk_buff * skb ,
934
963
struct vmxnet3_tx_ctx * ctx )
@@ -987,6 +1016,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
987
1016
/* Use temporary descriptor to avoid touching bits multiple times */
988
1017
union Vmxnet3_GenericDesc tempTxDesc ;
989
1018
#endif
1019
+ struct udphdr * udph ;
990
1020
991
1021
count = txd_estimate (skb );
992
1022
@@ -1003,7 +1033,11 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
1003
1033
}
1004
1034
tq -> stats .copy_skb_header ++ ;
1005
1035
}
1006
- vmxnet3_prepare_tso (skb , & ctx );
1036
+ if (skb -> encapsulation ) {
1037
+ vmxnet3_prepare_inner_tso (skb , & ctx );
1038
+ } else {
1039
+ vmxnet3_prepare_tso (skb , & ctx );
1040
+ }
1007
1041
} else {
1008
1042
if (unlikely (count > VMXNET3_MAX_TXD_PER_PKT )) {
1009
1043
@@ -1026,14 +1060,14 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
1026
1060
BUG_ON (ret <= 0 && ctx .copy_size != 0 );
1027
1061
/* hdrs parsed, check against other limits */
1028
1062
if (ctx .mss ) {
1029
- if (unlikely (ctx .eth_ip_hdr_size + ctx .l4_hdr_size >
1063
+ if (unlikely (ctx .l4_offset + ctx .l4_hdr_size >
1030
1064
VMXNET3_MAX_TX_BUF_SIZE )) {
1031
1065
tq -> stats .drop_oversized_hdr ++ ;
1032
1066
goto drop_pkt ;
1033
1067
}
1034
1068
} else {
1035
1069
if (skb -> ip_summed == CHECKSUM_PARTIAL ) {
1036
- if (unlikely (ctx .eth_ip_hdr_size +
1070
+ if (unlikely (ctx .l4_offset +
1037
1071
skb -> csum_offset >
1038
1072
VMXNET3_MAX_CSUM_OFFSET )) {
1039
1073
tq -> stats .drop_oversized_hdr ++ ;
@@ -1080,16 +1114,34 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
1080
1114
#endif
1081
1115
tx_num_deferred = le32_to_cpu (tq -> shared -> txNumDeferred );
1082
1116
if (ctx .mss ) {
1083
- gdesc -> txd .hlen = ctx .eth_ip_hdr_size + ctx .l4_hdr_size ;
1084
- gdesc -> txd .om = VMXNET3_OM_TSO ;
1085
- gdesc -> txd .msscof = ctx .mss ;
1117
+ if (VMXNET3_VERSION_GE_4 (adapter ) && skb -> encapsulation ) {
1118
+ gdesc -> txd .hlen = ctx .l4_offset + ctx .l4_hdr_size ;
1119
+ gdesc -> txd .om = VMXNET3_OM_ENCAP ;
1120
+ gdesc -> txd .msscof = ctx .mss ;
1121
+
1122
+ udph = udp_hdr (skb );
1123
+ if (udph -> check )
1124
+ gdesc -> txd .oco = 1 ;
1125
+ } else {
1126
+ gdesc -> txd .hlen = ctx .l4_offset + ctx .l4_hdr_size ;
1127
+ gdesc -> txd .om = VMXNET3_OM_TSO ;
1128
+ gdesc -> txd .msscof = ctx .mss ;
1129
+ }
1086
1130
num_pkts = (skb -> len - gdesc -> txd .hlen + ctx .mss - 1 ) / ctx .mss ;
1087
1131
} else {
1088
1132
if (skb -> ip_summed == CHECKSUM_PARTIAL ) {
1089
- gdesc -> txd .hlen = ctx .eth_ip_hdr_size ;
1090
- gdesc -> txd .om = VMXNET3_OM_CSUM ;
1091
- gdesc -> txd .msscof = ctx .eth_ip_hdr_size +
1092
- skb -> csum_offset ;
1133
+ if (VMXNET3_VERSION_GE_4 (adapter ) &&
1134
+ skb -> encapsulation ) {
1135
+ gdesc -> txd .hlen = ctx .l4_offset +
1136
+ ctx .l4_hdr_size ;
1137
+ gdesc -> txd .om = VMXNET3_OM_ENCAP ;
1138
+ gdesc -> txd .msscof = 0 ; /* Reserved */
1139
+ } else {
1140
+ gdesc -> txd .hlen = ctx .l4_offset ;
1141
+ gdesc -> txd .om = VMXNET3_OM_CSUM ;
1142
+ gdesc -> txd .msscof = ctx .l4_offset +
1143
+ skb -> csum_offset ;
1144
+ }
1093
1145
} else {
1094
1146
gdesc -> txd .om = 0 ;
1095
1147
gdesc -> txd .msscof = 0 ;
@@ -1168,13 +1220,21 @@ vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
1168
1220
(le32_to_cpu (gdesc -> dword [3 ]) &
1169
1221
VMXNET3_RCD_CSUM_OK ) == VMXNET3_RCD_CSUM_OK ) {
1170
1222
skb -> ip_summed = CHECKSUM_UNNECESSARY ;
1171
- BUG_ON (!(gdesc -> rcd .tcp || gdesc -> rcd .udp ));
1172
- BUG_ON (gdesc -> rcd .frg );
1223
+ WARN_ON_ONCE (!(gdesc -> rcd .tcp || gdesc -> rcd .udp ) &&
1224
+ !(le32_to_cpu (gdesc -> dword [0 ]) &
1225
+ (1UL << VMXNET3_RCD_HDR_INNER_SHIFT )));
1226
+ WARN_ON_ONCE (gdesc -> rcd .frg &&
1227
+ !(le32_to_cpu (gdesc -> dword [0 ]) &
1228
+ (1UL << VMXNET3_RCD_HDR_INNER_SHIFT )));
1173
1229
} else if (gdesc -> rcd .v6 && (le32_to_cpu (gdesc -> dword [3 ]) &
1174
1230
(1 << VMXNET3_RCD_TUC_SHIFT ))) {
1175
1231
skb -> ip_summed = CHECKSUM_UNNECESSARY ;
1176
- BUG_ON (!(gdesc -> rcd .tcp || gdesc -> rcd .udp ));
1177
- BUG_ON (gdesc -> rcd .frg );
1232
+ WARN_ON_ONCE (!(gdesc -> rcd .tcp || gdesc -> rcd .udp ) &&
1233
+ !(le32_to_cpu (gdesc -> dword [0 ]) &
1234
+ (1UL << VMXNET3_RCD_HDR_INNER_SHIFT )));
1235
+ WARN_ON_ONCE (gdesc -> rcd .frg &&
1236
+ !(le32_to_cpu (gdesc -> dword [0 ]) &
1237
+ (1UL << VMXNET3_RCD_HDR_INNER_SHIFT )));
1178
1238
} else {
1179
1239
if (gdesc -> rcd .csum ) {
1180
1240
skb -> csum = htons (gdesc -> rcd .csum );
@@ -2429,6 +2489,10 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
2429
2489
if (adapter -> netdev -> features & NETIF_F_HW_VLAN_CTAG_RX )
2430
2490
devRead -> misc .uptFeatures |= UPT1_F_RXVLAN ;
2431
2491
2492
+ if (adapter -> netdev -> features & (NETIF_F_GSO_UDP_TUNNEL |
2493
+ NETIF_F_GSO_UDP_TUNNEL_CSUM ))
2494
+ devRead -> misc .uptFeatures |= UPT1_F_RXINNEROFLD ;
2495
+
2432
2496
devRead -> misc .mtu = cpu_to_le32 (adapter -> netdev -> mtu );
2433
2497
devRead -> misc .queueDescPA = cpu_to_le64 (adapter -> queue_desc_pa );
2434
2498
devRead -> misc .queueDescLen = cpu_to_le32 (
@@ -2561,8 +2625,8 @@ vmxnet3_init_rssfields(struct vmxnet3_adapter *adapter)
2561
2625
union Vmxnet3_CmdInfo * cmdInfo = & shared -> cu .cmdInfo ;
2562
2626
unsigned long flags ;
2563
2627
2564
- if (!VMXNET3_VERSION_GE_4 (adapter ))
2565
- return ;
2628
+ if (!VMXNET3_VERSION_GE_4 (adapter ))
2629
+ return ;
2566
2630
2567
2631
spin_lock_irqsave (& adapter -> cmd_lock , flags );
2568
2632
@@ -3073,6 +3137,18 @@ vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64)
3073
3137
NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
3074
3138
NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 |
3075
3139
NETIF_F_LRO ;
3140
+
3141
+ if (VMXNET3_VERSION_GE_4 (adapter )) {
3142
+ netdev -> hw_features |= NETIF_F_GSO_UDP_TUNNEL |
3143
+ NETIF_F_GSO_UDP_TUNNEL_CSUM ;
3144
+
3145
+ netdev -> hw_enc_features = NETIF_F_SG | NETIF_F_RXCSUM |
3146
+ NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
3147
+ NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 |
3148
+ NETIF_F_LRO | NETIF_F_GSO_UDP_TUNNEL |
3149
+ NETIF_F_GSO_UDP_TUNNEL_CSUM ;
3150
+ }
3151
+
3076
3152
if (dma64 )
3077
3153
netdev -> hw_features |= NETIF_F_HIGHDMA ;
3078
3154
netdev -> vlan_features = netdev -> hw_features &
0 commit comments