Skip to content

Commit fe96d71

Browse files
skotur-brcmkuba-moo
authored andcommitted
bnxt_en: Extend queue stop/start for TX rings
In order to use queue_stop/queue_start to support the new Steering Tags, we need to free the TX ring and TX completion ring if it is a combined channel with TX/RX sharing the same NAPI. Otherwise TX completions will not have the updated Steering Tag. If TPH is not enabled, we just stop the TX ring without freeing the TX/TX cmpl rings. With that we can now add napi_disable() and napi_enable() during queue_stop()/ queue_start(). This will guarantee that NAPI will stop processing the completion entries in case there are additional pending entries in the completion rings after queue_stop(). There could be some NQEs sitting unprocessed while NAPI is disabled thereby leaving the NQ unarmed. Explicitly re-arm the NQ after napi_enable() in queue start so that NAPI will resume properly. Error handling in bnxt_queue_start() requires a reset. If a TX ring cannot be allocated or initialized properly, it will cause TX timeout. The reset will also free any partially allocated rings. We don't expect to hit this error path because re-allocating previously reserved and allocated rings with the same parameters should never fail. Reviewed-by: Ajit Khaparde <[email protected]> Reviewed-by: Michal Swiatkowski <[email protected]> Signed-off-by: Somnath Kotur <[email protected]> Signed-off-by: Michael Chan <[email protected]> Link: https://patch.msgid.link/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
1 parent c8a0f76 commit fe96d71

File tree

1 file changed

+110
-9
lines changed
  • drivers/net/ethernet/broadcom/bnxt

1 file changed

+110
-9
lines changed

drivers/net/ethernet/broadcom/bnxt/bnxt.c

Lines changed: 110 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -11279,6 +11279,78 @@ int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
1127911279
return 0;
1128011280
}
1128111281

11282+
static void bnxt_tx_queue_stop(struct bnxt *bp, int idx)
11283+
{
11284+
struct bnxt_tx_ring_info *txr;
11285+
struct netdev_queue *txq;
11286+
struct bnxt_napi *bnapi;
11287+
int i;
11288+
11289+
bnapi = bp->bnapi[idx];
11290+
bnxt_for_each_napi_tx(i, bnapi, txr) {
11291+
WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
11292+
synchronize_net();
11293+
11294+
if (!(bnapi->flags & BNXT_NAPI_FLAG_XDP)) {
11295+
txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
11296+
if (txq) {
11297+
__netif_tx_lock_bh(txq);
11298+
netif_tx_stop_queue(txq);
11299+
__netif_tx_unlock_bh(txq);
11300+
}
11301+
}
11302+
11303+
if (!bp->tph_mode)
11304+
continue;
11305+
11306+
bnxt_hwrm_tx_ring_free(bp, txr, true);
11307+
bnxt_hwrm_cp_ring_free(bp, txr->tx_cpr);
11308+
bnxt_free_one_tx_ring_skbs(bp, txr, txr->txq_index);
11309+
bnxt_clear_one_cp_ring(bp, txr->tx_cpr);
11310+
}
11311+
}
11312+
11313+
static int bnxt_tx_queue_start(struct bnxt *bp, int idx)
11314+
{
11315+
struct bnxt_tx_ring_info *txr;
11316+
struct netdev_queue *txq;
11317+
struct bnxt_napi *bnapi;
11318+
int rc, i;
11319+
11320+
bnapi = bp->bnapi[idx];
11321+
/* All rings have been reserved and previously allocated.
11322+
* Reallocating with the same parameters should never fail.
11323+
*/
11324+
bnxt_for_each_napi_tx(i, bnapi, txr) {
11325+
if (!bp->tph_mode)
11326+
goto start_tx;
11327+
11328+
rc = bnxt_hwrm_cp_ring_alloc_p5(bp, txr->tx_cpr);
11329+
if (rc)
11330+
return rc;
11331+
11332+
rc = bnxt_hwrm_tx_ring_alloc(bp, txr, false);
11333+
if (rc)
11334+
return rc;
11335+
11336+
txr->tx_prod = 0;
11337+
txr->tx_cons = 0;
11338+
txr->tx_hw_cons = 0;
11339+
start_tx:
11340+
WRITE_ONCE(txr->dev_state, 0);
11341+
synchronize_net();
11342+
11343+
if (bnapi->flags & BNXT_NAPI_FLAG_XDP)
11344+
continue;
11345+
11346+
txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
11347+
if (txq)
11348+
netif_tx_start_queue(txq);
11349+
}
11350+
11351+
return 0;
11352+
}
11353+
1128211354
static void bnxt_free_irq(struct bnxt *bp)
1128311355
{
1128411356
struct bnxt_irq *irq;
@@ -15641,7 +15713,9 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
1564115713
{
1564215714
struct bnxt *bp = netdev_priv(dev);
1564315715
struct bnxt_rx_ring_info *rxr, *clone;
15716+
struct bnxt_cp_ring_info *cpr;
1564415717
struct bnxt_vnic_info *vnic;
15718+
struct bnxt_napi *bnapi;
1564515719
int i, rc;
1564615720

1564715721
rxr = &bp->rx_ring[idx];
@@ -15659,27 +15733,39 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
1565915733

1566015734
bnxt_copy_rx_ring(bp, rxr, clone);
1566115735

15736+
bnapi = rxr->bnapi;
15737+
cpr = &bnapi->cp_ring;
15738+
1566215739
/* All rings have been reserved and previously allocated.
1566315740
* Reallocating with the same parameters should never fail.
1566415741
*/
1566515742
rc = bnxt_hwrm_rx_ring_alloc(bp, rxr);
1566615743
if (rc)
15667-
return rc;
15744+
goto err_reset;
1566815745

1566915746
if (bp->tph_mode) {
1567015747
rc = bnxt_hwrm_cp_ring_alloc_p5(bp, rxr->rx_cpr);
1567115748
if (rc)
15672-
goto err_free_hwrm_rx_ring;
15749+
goto err_reset;
1567315750
}
1567415751

1567515752
rc = bnxt_hwrm_rx_agg_ring_alloc(bp, rxr);
1567615753
if (rc)
15677-
goto err_free_hwrm_cp_ring;
15754+
goto err_reset;
1567815755

1567915756
bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
1568015757
if (bp->flags & BNXT_FLAG_AGG_RINGS)
1568115758
bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
1568215759

15760+
if (bp->flags & BNXT_FLAG_SHARED_RINGS) {
15761+
rc = bnxt_tx_queue_start(bp, idx);
15762+
if (rc)
15763+
goto err_reset;
15764+
}
15765+
15766+
napi_enable(&bnapi->napi);
15767+
bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
15768+
1568315769
for (i = 0; i <= BNXT_VNIC_NTUPLE; i++) {
1568415770
vnic = &bp->vnic_info[i];
1568515771

@@ -15696,19 +15782,22 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
1569615782

1569715783
return 0;
1569815784

15699-
err_free_hwrm_cp_ring:
15700-
if (bp->tph_mode)
15701-
bnxt_hwrm_cp_ring_free(bp, rxr->rx_cpr);
15702-
err_free_hwrm_rx_ring:
15703-
bnxt_hwrm_rx_ring_free(bp, rxr, false);
15785+
err_reset:
15786+
netdev_err(bp->dev, "Unexpected HWRM error during queue start rc: %d\n",
15787+
rc);
15788+
napi_enable(&bnapi->napi);
15789+
bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
15790+
bnxt_reset_task(bp, true);
1570415791
return rc;
1570515792
}
1570615793

1570715794
static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
1570815795
{
1570915796
struct bnxt *bp = netdev_priv(dev);
1571015797
struct bnxt_rx_ring_info *rxr;
15798+
struct bnxt_cp_ring_info *cpr;
1571115799
struct bnxt_vnic_info *vnic;
15800+
struct bnxt_napi *bnapi;
1571215801
int i;
1571315802

1571415803
for (i = 0; i <= BNXT_VNIC_NTUPLE; i++) {
@@ -15720,17 +15809,29 @@ static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
1572015809
/* Make sure NAPI sees that the VNIC is disabled */
1572115810
synchronize_net();
1572215811
rxr = &bp->rx_ring[idx];
15723-
cancel_work_sync(&rxr->bnapi->cp_ring.dim.work);
15812+
bnapi = rxr->bnapi;
15813+
cpr = &bnapi->cp_ring;
15814+
cancel_work_sync(&cpr->dim.work);
1572415815
bnxt_hwrm_rx_ring_free(bp, rxr, false);
1572515816
bnxt_hwrm_rx_agg_ring_free(bp, rxr, false);
1572615817
page_pool_disable_direct_recycling(rxr->page_pool);
1572715818
if (bnxt_separate_head_pool())
1572815819
page_pool_disable_direct_recycling(rxr->head_pool);
1572915820

15821+
if (bp->flags & BNXT_FLAG_SHARED_RINGS)
15822+
bnxt_tx_queue_stop(bp, idx);
15823+
15824+
/* Disable NAPI now after freeing the rings because HWRM_RING_FREE
15825+
* completion is handled in NAPI to guarantee no more DMA on that ring
15826+
* after seeing the completion.
15827+
*/
15828+
napi_disable(&bnapi->napi);
15829+
1573015830
if (bp->tph_mode) {
1573115831
bnxt_hwrm_cp_ring_free(bp, rxr->rx_cpr);
1573215832
bnxt_clear_one_cp_ring(bp, rxr->rx_cpr);
1573315833
}
15834+
bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
1573415835

1573515836
memcpy(qmem, rxr, sizeof(*rxr));
1573615837
bnxt_init_rx_ring_struct(bp, qmem);

0 commit comments

Comments
 (0)