@@ -46,7 +46,8 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
46
46
if (!vsi -> rx_rings )
47
47
goto err_rings ;
48
48
49
- vsi -> txq_map = devm_kcalloc (& pf -> pdev -> dev , vsi -> alloc_txq ,
49
+ /* XDP will have vsi->alloc_txq Tx queues as well, so double the size */
50
+ vsi -> txq_map = devm_kcalloc (& pf -> pdev -> dev , (2 * vsi -> alloc_txq ),
50
51
sizeof (* vsi -> txq_map ), GFP_KERNEL );
51
52
52
53
if (!vsi -> txq_map )
@@ -1183,6 +1184,20 @@ int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
1183
1184
return err ;
1184
1185
}
1185
1186
1187
+ /**
1188
+ * ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length
1189
+ * @vsi: VSI
1190
+ */
1191
+ void ice_vsi_cfg_frame_size (struct ice_vsi * vsi )
1192
+ {
1193
+ if (vsi -> netdev && vsi -> netdev -> mtu > ETH_DATA_LEN )
1194
+ vsi -> max_frame = vsi -> netdev -> mtu + ICE_ETH_PKT_HDR_PAD ;
1195
+ else
1196
+ vsi -> max_frame = ICE_RXBUF_2048 ;
1197
+
1198
+ vsi -> rx_buf_len = ICE_RXBUF_2048 ;
1199
+ }
1200
+
1186
1201
/**
1187
1202
* ice_vsi_cfg_rxqs - Configure the VSI for Rx
1188
1203
* @vsi: the VSI being configured
@@ -1197,13 +1212,7 @@ int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
1197
1212
if (vsi -> type == ICE_VSI_VF )
1198
1213
goto setup_rings ;
1199
1214
1200
- if (vsi -> netdev && vsi -> netdev -> mtu > ETH_DATA_LEN )
1201
- vsi -> max_frame = vsi -> netdev -> mtu +
1202
- ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN ;
1203
- else
1204
- vsi -> max_frame = ICE_RXBUF_2048 ;
1205
-
1206
- vsi -> rx_buf_len = ICE_RXBUF_2048 ;
1215
+ ice_vsi_cfg_frame_size (vsi );
1207
1216
setup_rings :
1208
1217
/* set up individual rings */
1209
1218
for (i = 0 ; i < vsi -> num_rxq ; i ++ ) {
@@ -1265,6 +1274,18 @@ int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
1265
1274
return ice_vsi_cfg_txqs (vsi , vsi -> tx_rings );
1266
1275
}
1267
1276
1277
+ /**
1278
+ * ice_vsi_cfg_xdp_txqs - Configure Tx queues dedicated for XDP in given VSI
1279
+ * @vsi: the VSI being configured
1280
+ *
1281
+ * Return 0 on success and a negative value on error
1282
+ * Configure the Tx queues dedicated for XDP in given VSI for operation.
1283
+ */
1284
+ int ice_vsi_cfg_xdp_txqs (struct ice_vsi * vsi )
1285
+ {
1286
+ return ice_vsi_cfg_txqs (vsi , vsi -> xdp_rings );
1287
+ }
1288
+
1268
1289
/**
1269
1290
* ice_intrl_usec_to_reg - convert interrupt rate limit to register value
1270
1291
* @intrl: interrupt rate limit in usecs
@@ -1488,6 +1509,15 @@ ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
1488
1509
return ice_vsi_stop_tx_rings (vsi , rst_src , rel_vmvf_num , vsi -> tx_rings );
1489
1510
}
1490
1511
1512
+ /**
1513
+ * ice_vsi_stop_xdp_tx_rings - Disable XDP Tx rings
1514
+ * @vsi: the VSI being configured
1515
+ */
1516
+ int ice_vsi_stop_xdp_tx_rings (struct ice_vsi * vsi )
1517
+ {
1518
+ return ice_vsi_stop_tx_rings (vsi , ICE_NO_RESET , 0 , vsi -> xdp_rings );
1519
+ }
1520
+
1491
1521
/**
1492
1522
* ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI
1493
1523
* @vsi: VSI to enable or disable VLAN pruning on
@@ -1885,6 +1915,11 @@ static void ice_vsi_release_msix(struct ice_vsi *vsi)
1885
1915
wr32 (hw , GLINT_ITR (ICE_IDX_ITR1 , reg_idx ), 0 );
1886
1916
for (q = 0 ; q < q_vector -> num_ring_tx ; q ++ ) {
1887
1917
wr32 (hw , QINT_TQCTL (vsi -> txq_map [txq ]), 0 );
1918
+ if (ice_is_xdp_ena_vsi (vsi )) {
1919
+ u32 xdp_txq = txq + vsi -> num_xdp_txq ;
1920
+
1921
+ wr32 (hw , QINT_TQCTL (vsi -> txq_map [xdp_txq ]), 0 );
1922
+ }
1888
1923
txq ++ ;
1889
1924
}
1890
1925
@@ -2259,6 +2294,11 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
2259
2294
vsi -> base_vector = 0 ;
2260
2295
}
2261
2296
2297
+ if (ice_is_xdp_ena_vsi (vsi ))
2298
+ /* return value check can be skipped here, it always returns
2299
+ * 0 if reset is in progress
2300
+ */
2301
+ ice_destroy_xdp_rings (vsi );
2262
2302
ice_vsi_put_qs (vsi );
2263
2303
ice_vsi_clear_rings (vsi );
2264
2304
ice_vsi_free_arrays (vsi );
@@ -2299,6 +2339,12 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
2299
2339
goto err_vectors ;
2300
2340
2301
2341
ice_vsi_map_rings_to_vectors (vsi );
2342
+ if (ice_is_xdp_ena_vsi (vsi )) {
2343
+ vsi -> num_xdp_txq = vsi -> alloc_txq ;
2344
+ ret = ice_prepare_xdp_rings (vsi , vsi -> xdp_prog );
2345
+ if (ret )
2346
+ goto err_vectors ;
2347
+ }
2302
2348
/* Do not exit if configuring RSS had an issue, at least
2303
2349
* receive traffic on first queue. Hence no need to capture
2304
2350
* return value
@@ -2325,9 +2371,13 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
2325
2371
}
2326
2372
2327
2373
/* configure VSI nodes based on number of queues and TC's */
2328
- for (i = 0 ; i < vsi -> tc_cfg .numtc ; i ++ )
2374
+ for (i = 0 ; i < vsi -> tc_cfg .numtc ; i ++ ) {
2329
2375
max_txqs [i ] = vsi -> alloc_txq ;
2330
2376
2377
+ if (ice_is_xdp_ena_vsi (vsi ))
2378
+ max_txqs [i ] += vsi -> num_xdp_txq ;
2379
+ }
2380
+
2331
2381
status = ice_cfg_vsi_lan (vsi -> port_info , vsi -> idx , vsi -> tc_cfg .ena_tc ,
2332
2382
max_txqs );
2333
2383
if (status ) {
0 commit comments