Skip to content

Commit 2d694c2

Browse files
spikehdavem330
authored andcommitted
bnxt_en: implement netdev_queue_mgmt_ops
Implement netdev_queue_mgmt_ops for bnxt added in [1]. Two bnxt_rx_ring_info structs are allocated to hold the new/old queue memory. Queue memory is copied from/to the main bp->rx_ring[idx] bnxt_rx_ring_info. Queue memory is pre-allocated in bnxt_queue_mem_alloc() into a clone, and then copied into bp->rx_ring[idx] in bnxt_queue_mem_start(). Similarly, when bp->rx_ring[idx] is stopped its queue memory is copied into a clone, and then freed later in bnxt_queue_mem_free(). I tested this patchset with netdev_rx_queue_restart(), including inducing errors in all places that returns an error code. In all cases, the queue is left in a good working state. Rx queues are created/destroyed using bnxt_hwrm_rx_ring_alloc() and bnxt_hwrm_rx_ring_free(), which issue HWRM_RING_ALLOC and HWRM_RING_FREE commands respectively to the firmware. By the time a HWRM_RING_FREE response is received, there won't be any more completions from that queue. Thanks to Somnath for helping me with this patch. With their permission I've added them as Acked-by. [1]: https://lore.kernel.org/netdev/[email protected]/ Acked-by: Somnath Kotur <[email protected]> Signed-off-by: David Wei <[email protected]> Reviewed-by: Simon Horman <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 88f5625 commit 2d694c2

File tree

1 file changed

+275
-0
lines changed
  • drivers/net/ethernet/broadcom/bnxt

1 file changed

+275
-0
lines changed

drivers/net/ethernet/broadcom/bnxt/bnxt.c

Lines changed: 275 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3996,6 +3996,62 @@ static int bnxt_alloc_cp_rings(struct bnxt *bp)
39963996
return 0;
39973997
}
39983998

3999+
static void bnxt_init_rx_ring_struct(struct bnxt *bp,
4000+
struct bnxt_rx_ring_info *rxr)
4001+
{
4002+
struct bnxt_ring_mem_info *rmem;
4003+
struct bnxt_ring_struct *ring;
4004+
4005+
ring = &rxr->rx_ring_struct;
4006+
rmem = &ring->ring_mem;
4007+
rmem->nr_pages = bp->rx_nr_pages;
4008+
rmem->page_size = HW_RXBD_RING_SIZE;
4009+
rmem->pg_arr = (void **)rxr->rx_desc_ring;
4010+
rmem->dma_arr = rxr->rx_desc_mapping;
4011+
rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
4012+
rmem->vmem = (void **)&rxr->rx_buf_ring;
4013+
4014+
ring = &rxr->rx_agg_ring_struct;
4015+
rmem = &ring->ring_mem;
4016+
rmem->nr_pages = bp->rx_agg_nr_pages;
4017+
rmem->page_size = HW_RXBD_RING_SIZE;
4018+
rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
4019+
rmem->dma_arr = rxr->rx_agg_desc_mapping;
4020+
rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
4021+
rmem->vmem = (void **)&rxr->rx_agg_ring;
4022+
}
4023+
4024+
static void bnxt_reset_rx_ring_struct(struct bnxt *bp,
4025+
struct bnxt_rx_ring_info *rxr)
4026+
{
4027+
struct bnxt_ring_mem_info *rmem;
4028+
struct bnxt_ring_struct *ring;
4029+
int i;
4030+
4031+
rxr->page_pool->p.napi = NULL;
4032+
rxr->page_pool = NULL;
4033+
4034+
ring = &rxr->rx_ring_struct;
4035+
rmem = &ring->ring_mem;
4036+
rmem->pg_tbl = NULL;
4037+
rmem->pg_tbl_map = 0;
4038+
for (i = 0; i < rmem->nr_pages; i++) {
4039+
rmem->pg_arr[i] = NULL;
4040+
rmem->dma_arr[i] = 0;
4041+
}
4042+
*rmem->vmem = NULL;
4043+
4044+
ring = &rxr->rx_agg_ring_struct;
4045+
rmem = &ring->ring_mem;
4046+
rmem->pg_tbl = NULL;
4047+
rmem->pg_tbl_map = 0;
4048+
for (i = 0; i < rmem->nr_pages; i++) {
4049+
rmem->pg_arr[i] = NULL;
4050+
rmem->dma_arr[i] = 0;
4051+
}
4052+
*rmem->vmem = NULL;
4053+
}
4054+
39994055
static void bnxt_init_ring_struct(struct bnxt *bp)
40004056
{
40014057
int i, j;
@@ -14914,6 +14970,224 @@ static const struct netdev_stat_ops bnxt_stat_ops = {
1491414970
.get_base_stats = bnxt_get_base_stats,
1491514971
};
1491614972

14973+
static int bnxt_alloc_rx_agg_bmap(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
14974+
{
14975+
u16 mem_size;
14976+
14977+
rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
14978+
mem_size = rxr->rx_agg_bmap_size / 8;
14979+
rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
14980+
if (!rxr->rx_agg_bmap)
14981+
return -ENOMEM;
14982+
14983+
return 0;
14984+
}
14985+
14986+
static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx)
14987+
{
14988+
struct bnxt_rx_ring_info *rxr, *clone;
14989+
struct bnxt *bp = netdev_priv(dev);
14990+
struct bnxt_ring_struct *ring;
14991+
int rc;
14992+
14993+
rxr = &bp->rx_ring[idx];
14994+
clone = qmem;
14995+
memcpy(clone, rxr, sizeof(*rxr));
14996+
bnxt_init_rx_ring_struct(bp, clone);
14997+
bnxt_reset_rx_ring_struct(bp, clone);
14998+
14999+
clone->rx_prod = 0;
15000+
clone->rx_agg_prod = 0;
15001+
clone->rx_sw_agg_prod = 0;
15002+
clone->rx_next_cons = 0;
15003+
15004+
rc = bnxt_alloc_rx_page_pool(bp, clone, rxr->page_pool->p.nid);
15005+
if (rc)
15006+
return rc;
15007+
15008+
ring = &clone->rx_ring_struct;
15009+
rc = bnxt_alloc_ring(bp, &ring->ring_mem);
15010+
if (rc)
15011+
goto err_free_rx_ring;
15012+
15013+
if (bp->flags & BNXT_FLAG_AGG_RINGS) {
15014+
ring = &clone->rx_agg_ring_struct;
15015+
rc = bnxt_alloc_ring(bp, &ring->ring_mem);
15016+
if (rc)
15017+
goto err_free_rx_agg_ring;
15018+
15019+
rc = bnxt_alloc_rx_agg_bmap(bp, clone);
15020+
if (rc)
15021+
goto err_free_rx_agg_ring;
15022+
}
15023+
15024+
bnxt_init_one_rx_ring_rxbd(bp, clone);
15025+
bnxt_init_one_rx_agg_ring_rxbd(bp, clone);
15026+
15027+
bnxt_alloc_one_rx_ring_skb(bp, clone, idx);
15028+
if (bp->flags & BNXT_FLAG_AGG_RINGS)
15029+
bnxt_alloc_one_rx_ring_page(bp, clone, idx);
15030+
15031+
return 0;
15032+
15033+
err_free_rx_agg_ring:
15034+
bnxt_free_ring(bp, &clone->rx_agg_ring_struct.ring_mem);
15035+
err_free_rx_ring:
15036+
bnxt_free_ring(bp, &clone->rx_ring_struct.ring_mem);
15037+
clone->page_pool->p.napi = NULL;
15038+
page_pool_destroy(clone->page_pool);
15039+
clone->page_pool = NULL;
15040+
return rc;
15041+
}
15042+
15043+
static void bnxt_queue_mem_free(struct net_device *dev, void *qmem)
15044+
{
15045+
struct bnxt_rx_ring_info *rxr = qmem;
15046+
struct bnxt *bp = netdev_priv(dev);
15047+
struct bnxt_ring_struct *ring;
15048+
15049+
bnxt_free_one_rx_ring(bp, rxr);
15050+
bnxt_free_one_rx_agg_ring(bp, rxr);
15051+
15052+
/* At this point, this NAPI instance has another page pool associated
15053+
* with it. Disconnect here before freeing the old page pool to avoid
15054+
* warnings.
15055+
*/
15056+
rxr->page_pool->p.napi = NULL;
15057+
page_pool_destroy(rxr->page_pool);
15058+
rxr->page_pool = NULL;
15059+
15060+
ring = &rxr->rx_ring_struct;
15061+
bnxt_free_ring(bp, &ring->ring_mem);
15062+
15063+
ring = &rxr->rx_agg_ring_struct;
15064+
bnxt_free_ring(bp, &ring->ring_mem);
15065+
15066+
kfree(rxr->rx_agg_bmap);
15067+
rxr->rx_agg_bmap = NULL;
15068+
}
15069+
15070+
static void bnxt_copy_rx_ring(struct bnxt *bp,
15071+
struct bnxt_rx_ring_info *dst,
15072+
struct bnxt_rx_ring_info *src)
15073+
{
15074+
struct bnxt_ring_mem_info *dst_rmem, *src_rmem;
15075+
struct bnxt_ring_struct *dst_ring, *src_ring;
15076+
int i;
15077+
15078+
dst_ring = &dst->rx_ring_struct;
15079+
dst_rmem = &dst_ring->ring_mem;
15080+
src_ring = &src->rx_ring_struct;
15081+
src_rmem = &src_ring->ring_mem;
15082+
15083+
WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages);
15084+
WARN_ON(dst_rmem->page_size != src_rmem->page_size);
15085+
WARN_ON(dst_rmem->flags != src_rmem->flags);
15086+
WARN_ON(dst_rmem->depth != src_rmem->depth);
15087+
WARN_ON(dst_rmem->vmem_size != src_rmem->vmem_size);
15088+
WARN_ON(dst_rmem->ctx_mem != src_rmem->ctx_mem);
15089+
15090+
dst_rmem->pg_tbl = src_rmem->pg_tbl;
15091+
dst_rmem->pg_tbl_map = src_rmem->pg_tbl_map;
15092+
*dst_rmem->vmem = *src_rmem->vmem;
15093+
for (i = 0; i < dst_rmem->nr_pages; i++) {
15094+
dst_rmem->pg_arr[i] = src_rmem->pg_arr[i];
15095+
dst_rmem->dma_arr[i] = src_rmem->dma_arr[i];
15096+
}
15097+
15098+
if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
15099+
return;
15100+
15101+
dst_ring = &dst->rx_agg_ring_struct;
15102+
dst_rmem = &dst_ring->ring_mem;
15103+
src_ring = &src->rx_agg_ring_struct;
15104+
src_rmem = &src_ring->ring_mem;
15105+
15106+
WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages);
15107+
WARN_ON(dst_rmem->page_size != src_rmem->page_size);
15108+
WARN_ON(dst_rmem->flags != src_rmem->flags);
15109+
WARN_ON(dst_rmem->depth != src_rmem->depth);
15110+
WARN_ON(dst_rmem->vmem_size != src_rmem->vmem_size);
15111+
WARN_ON(dst_rmem->ctx_mem != src_rmem->ctx_mem);
15112+
WARN_ON(dst->rx_agg_bmap_size != src->rx_agg_bmap_size);
15113+
15114+
dst_rmem->pg_tbl = src_rmem->pg_tbl;
15115+
dst_rmem->pg_tbl_map = src_rmem->pg_tbl_map;
15116+
*dst_rmem->vmem = *src_rmem->vmem;
15117+
for (i = 0; i < dst_rmem->nr_pages; i++) {
15118+
dst_rmem->pg_arr[i] = src_rmem->pg_arr[i];
15119+
dst_rmem->dma_arr[i] = src_rmem->dma_arr[i];
15120+
}
15121+
15122+
dst->rx_agg_bmap = src->rx_agg_bmap;
15123+
}
15124+
15125+
static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
15126+
{
15127+
struct bnxt *bp = netdev_priv(dev);
15128+
struct bnxt_rx_ring_info *rxr, *clone;
15129+
struct bnxt_cp_ring_info *cpr;
15130+
int rc;
15131+
15132+
rxr = &bp->rx_ring[idx];
15133+
clone = qmem;
15134+
15135+
rxr->rx_prod = clone->rx_prod;
15136+
rxr->rx_agg_prod = clone->rx_agg_prod;
15137+
rxr->rx_sw_agg_prod = clone->rx_sw_agg_prod;
15138+
rxr->rx_next_cons = clone->rx_next_cons;
15139+
rxr->page_pool = clone->page_pool;
15140+
15141+
bnxt_copy_rx_ring(bp, rxr, clone);
15142+
15143+
rc = bnxt_hwrm_rx_ring_alloc(bp, rxr);
15144+
if (rc)
15145+
return rc;
15146+
rc = bnxt_hwrm_rx_agg_ring_alloc(bp, rxr);
15147+
if (rc)
15148+
goto err_free_hwrm_rx_ring;
15149+
15150+
bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
15151+
if (bp->flags & BNXT_FLAG_AGG_RINGS)
15152+
bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
15153+
15154+
napi_enable(&rxr->bnapi->napi);
15155+
15156+
cpr = &rxr->bnapi->cp_ring;
15157+
cpr->sw_stats->rx.rx_resets++;
15158+
15159+
return 0;
15160+
15161+
err_free_hwrm_rx_ring:
15162+
bnxt_hwrm_rx_ring_free(bp, rxr, false);
15163+
return rc;
15164+
}
15165+
15166+
static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
15167+
{
15168+
struct bnxt *bp = netdev_priv(dev);
15169+
struct bnxt_rx_ring_info *rxr;
15170+
15171+
rxr = &bp->rx_ring[idx];
15172+
napi_disable(&rxr->bnapi->napi);
15173+
bnxt_hwrm_rx_ring_free(bp, rxr, false);
15174+
bnxt_hwrm_rx_agg_ring_free(bp, rxr, false);
15175+
rxr->rx_next_cons = 0;
15176+
15177+
memcpy(qmem, rxr, sizeof(*rxr));
15178+
bnxt_init_rx_ring_struct(bp, qmem);
15179+
15180+
return 0;
15181+
}
15182+
15183+
static const struct netdev_queue_mgmt_ops bnxt_queue_mgmt_ops = {
15184+
.ndo_queue_mem_size = sizeof(struct bnxt_rx_ring_info),
15185+
.ndo_queue_mem_alloc = bnxt_queue_mem_alloc,
15186+
.ndo_queue_mem_free = bnxt_queue_mem_free,
15187+
.ndo_queue_start = bnxt_queue_start,
15188+
.ndo_queue_stop = bnxt_queue_stop,
15189+
};
15190+
1491715191
static void bnxt_remove_one(struct pci_dev *pdev)
1491815192
{
1491915193
struct net_device *dev = pci_get_drvdata(pdev);
@@ -15379,6 +15653,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1537915653
dev->stat_ops = &bnxt_stat_ops;
1538015654
dev->watchdog_timeo = BNXT_TX_TIMEOUT;
1538115655
dev->ethtool_ops = &bnxt_ethtool_ops;
15656+
dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops;
1538215657
pci_set_drvdata(pdev, dev);
1538315658

1538415659
rc = bnxt_alloc_hwrm_resources(bp);

0 commit comments

Comments
 (0)