Skip to content

Commit 47dfd7a

Browse files
Erni Sri Satya Vennelakuba-moo
authored andcommitted
net: mana: Add debug logs in MANA network driver
Add more logs to assist in debugging and monitoring driver behaviour, making it easier to identify potential issues during development and testing. Signed-off-by: Erni Sri Satya Vennela <[email protected]> Reviewed-by: Haiyang Zhang <[email protected]> Link: https://patch.msgid.link/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
1 parent a60a27c commit 47dfd7a

File tree

3 files changed

+94
-20
lines changed

3 files changed

+94
-20
lines changed

drivers/net/ethernet/microsoft/mana/gdma_main.c

Lines changed: 42 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -666,8 +666,11 @@ int mana_gd_create_hwc_queue(struct gdma_dev *gd,
666666

667667
gmi = &queue->mem_info;
668668
err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
669-
if (err)
669+
if (err) {
670+
dev_err(gc->dev, "GDMA queue type: %d, size: %u, gdma memory allocation err: %d\n",
671+
spec->type, spec->queue_size, err);
670672
goto free_q;
673+
}
671674

672675
queue->head = 0;
673676
queue->tail = 0;
@@ -688,6 +691,8 @@ int mana_gd_create_hwc_queue(struct gdma_dev *gd,
688691
*queue_ptr = queue;
689692
return 0;
690693
out:
694+
dev_err(gc->dev, "Failed to create queue type %d of size %u, err: %d\n",
695+
spec->type, spec->queue_size, err);
691696
mana_gd_free_memory(gmi);
692697
free_q:
693698
kfree(queue);
@@ -770,7 +775,13 @@ static int mana_gd_create_dma_region(struct gdma_dev *gd,
770775
}
771776

772777
gmi->dma_region_handle = resp.dma_region_handle;
778+
dev_dbg(gc->dev, "Created DMA region handle 0x%llx\n",
779+
gmi->dma_region_handle);
773780
out:
781+
if (err)
782+
dev_dbg(gc->dev,
783+
"Failed to create DMA region of length: %u, page_type: %d, status: 0x%x, err: %d\n",
784+
length, req->gdma_page_type, resp.hdr.status, err);
774785
kfree(req);
775786
return err;
776787
}
@@ -793,8 +804,11 @@ int mana_gd_create_mana_eq(struct gdma_dev *gd,
793804

794805
gmi = &queue->mem_info;
795806
err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
796-
if (err)
807+
if (err) {
808+
dev_err(gc->dev, "GDMA queue type: %d, size: %u, gdma memory allocation err: %d\n",
809+
spec->type, spec->queue_size, err);
797810
goto free_q;
811+
}
798812

799813
err = mana_gd_create_dma_region(gd, gmi);
800814
if (err)
@@ -815,6 +829,8 @@ int mana_gd_create_mana_eq(struct gdma_dev *gd,
815829
*queue_ptr = queue;
816830
return 0;
817831
out:
832+
dev_err(gc->dev, "Failed to create queue type %d of size: %u, err: %d\n",
833+
spec->type, spec->queue_size, err);
818834
mana_gd_free_memory(gmi);
819835
free_q:
820836
kfree(queue);
@@ -841,8 +857,11 @@ int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
841857

842858
gmi = &queue->mem_info;
843859
err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
844-
if (err)
860+
if (err) {
861+
dev_err(gc->dev, "GDMA queue type: %d, size: %u, memory allocation err: %d\n",
862+
spec->type, spec->queue_size, err);
845863
goto free_q;
864+
}
846865

847866
err = mana_gd_create_dma_region(gd, gmi);
848867
if (err)
@@ -862,6 +881,8 @@ int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
862881
*queue_ptr = queue;
863882
return 0;
864883
out:
884+
dev_err(gc->dev, "Failed to create queue type %d of size: %u, err: %d\n",
885+
spec->type, spec->queue_size, err);
865886
mana_gd_free_memory(gmi);
866887
free_q:
867888
kfree(queue);
@@ -1157,8 +1178,11 @@ int mana_gd_post_and_ring(struct gdma_queue *queue,
11571178
int err;
11581179

11591180
err = mana_gd_post_work_request(queue, wqe_req, wqe_info);
1160-
if (err)
1181+
if (err) {
1182+
dev_err(gc->dev, "Failed to post work req from queue type %d of size %u (err=%d)\n",
1183+
queue->type, queue->queue_size, err);
11611184
return err;
1185+
}
11621186

11631187
mana_gd_wq_ring_doorbell(gc, queue);
11641188

@@ -1435,8 +1459,10 @@ static int mana_gd_setup(struct pci_dev *pdev)
14351459
mana_smc_init(&gc->shm_channel, gc->dev, gc->shm_base);
14361460

14371461
err = mana_gd_setup_irqs(pdev);
1438-
if (err)
1462+
if (err) {
1463+
dev_err(gc->dev, "Failed to setup IRQs: %d\n", err);
14391464
return err;
1465+
}
14401466

14411467
err = mana_hwc_create_channel(gc);
14421468
if (err)
@@ -1454,12 +1480,14 @@ static int mana_gd_setup(struct pci_dev *pdev)
14541480
if (err)
14551481
goto destroy_hwc;
14561482

1483+
dev_dbg(&pdev->dev, "mana gdma setup successful\n");
14571484
return 0;
14581485

14591486
destroy_hwc:
14601487
mana_hwc_destroy_channel(gc);
14611488
remove_irq:
14621489
mana_gd_remove_irqs(pdev);
1490+
dev_err(&pdev->dev, "%s failed (error %d)\n", __func__, err);
14631491
return err;
14641492
}
14651493

@@ -1470,6 +1498,7 @@ static void mana_gd_cleanup(struct pci_dev *pdev)
14701498
mana_hwc_destroy_channel(gc);
14711499

14721500
mana_gd_remove_irqs(pdev);
1501+
dev_dbg(&pdev->dev, "mana gdma cleanup successful\n");
14731502
}
14741503

14751504
static bool mana_is_pf(unsigned short dev_id)
@@ -1488,8 +1517,10 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
14881517
BUILD_BUG_ON(2 * MAX_PORTS_IN_MANA_DEV * GDMA_EQE_SIZE > EQ_SIZE);
14891518

14901519
err = pci_enable_device(pdev);
1491-
if (err)
1520+
if (err) {
1521+
dev_err(&pdev->dev, "Failed to enable pci device (err=%d)\n", err);
14921522
return -ENXIO;
1523+
}
14931524

14941525
pci_set_master(pdev);
14951526

@@ -1498,9 +1529,10 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
14981529
goto disable_dev;
14991530

15001531
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1501-
if (err)
1532+
if (err) {
1533+
dev_err(&pdev->dev, "DMA set mask failed: %d\n", err);
15021534
goto release_region;
1503-
1535+
}
15041536
dma_set_max_seg_size(&pdev->dev, UINT_MAX);
15051537

15061538
err = -ENOMEM;
@@ -1575,6 +1607,8 @@ static void mana_gd_remove(struct pci_dev *pdev)
15751607

15761608
pci_release_regions(pdev);
15771609
pci_disable_device(pdev);
1610+
1611+
dev_dbg(&pdev->dev, "mana gdma remove successful\n");
15781612
}
15791613

15801614
/* The 'state' parameter is not used. */

drivers/net/ethernet/microsoft/mana/hw_channel.c

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -440,7 +440,8 @@ static int mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, u16 q_depth,
440440
gmi = &dma_buf->mem_info;
441441
err = mana_gd_alloc_memory(gc, buf_size, gmi);
442442
if (err) {
443-
dev_err(hwc->dev, "Failed to allocate DMA buffer: %d\n", err);
443+
dev_err(hwc->dev, "Failed to allocate DMA buffer size: %u, err %d\n",
444+
buf_size, err);
444445
goto out;
445446
}
446447

@@ -529,6 +530,9 @@ static int mana_hwc_create_wq(struct hw_channel_context *hwc,
529530
out:
530531
if (err)
531532
mana_hwc_destroy_wq(hwc, hwc_wq);
533+
534+
dev_err(hwc->dev, "Failed to create HWC queue size= %u type= %d err= %d\n",
535+
queue_size, q_type, err);
532536
return err;
533537
}
534538

drivers/net/ethernet/microsoft/mana/mana_en.c

Lines changed: 47 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -52,10 +52,12 @@ static int mana_open(struct net_device *ndev)
5252
{
5353
struct mana_port_context *apc = netdev_priv(ndev);
5454
int err;
55-
5655
err = mana_alloc_queues(ndev);
57-
if (err)
56+
57+
if (err) {
58+
netdev_err(ndev, "%s failed to allocate queues: %d\n", __func__, err);
5859
return err;
60+
}
5961

6062
apc->port_is_up = true;
6163

@@ -64,7 +66,7 @@ static int mana_open(struct net_device *ndev)
6466

6567
netif_carrier_on(ndev);
6668
netif_tx_wake_all_queues(ndev);
67-
69+
netdev_dbg(ndev, "%s successful\n", __func__);
6870
return 0;
6971
}
7072

@@ -176,6 +178,9 @@ static int mana_map_skb(struct sk_buff *skb, struct mana_port_context *apc,
176178
return 0;
177179

178180
frag_err:
181+
if (net_ratelimit())
182+
netdev_err(apc->ndev, "Failed to map skb of size %u to DMA\n",
183+
skb->len);
179184
for (i = sg_i - 1; i >= hsg; i--)
180185
dma_unmap_page(dev, ash->dma_handle[i], ash->size[i],
181186
DMA_TO_DEVICE);
@@ -690,6 +695,7 @@ int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu, int num_qu
690695
return 0;
691696

692697
error:
698+
netdev_err(mpc->ndev, "Failed to pre-allocate RX buffers for %d queues\n", num_queues);
693699
mana_pre_dealloc_rxbufs(mpc);
694700
return -ENOMEM;
695701
}
@@ -1307,8 +1313,10 @@ static int mana_create_eq(struct mana_context *ac)
13071313
for (i = 0; i < gc->max_num_queues; i++) {
13081314
spec.eq.msix_index = (i + 1) % gc->num_msix_usable;
13091315
err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq);
1310-
if (err)
1316+
if (err) {
1317+
dev_err(gc->dev, "Failed to create EQ %d : %d\n", i, err);
13111318
goto out;
1319+
}
13121320
mana_create_eq_debugfs(ac, i);
13131321
}
13141322

@@ -2083,6 +2091,8 @@ static int mana_create_txq(struct mana_port_context *apc,
20832091

20842092
return 0;
20852093
out:
2094+
netdev_err(net, "Failed to create %d TX queues, %d\n",
2095+
apc->num_queues, err);
20862096
mana_destroy_txq(apc);
20872097
return err;
20882098
}
@@ -2418,6 +2428,7 @@ static int mana_add_rx_queues(struct mana_port_context *apc,
24182428
rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev);
24192429
if (!rxq) {
24202430
err = -ENOMEM;
2431+
netdev_err(ndev, "Failed to create rxq %d : %d\n", i, err);
24212432
goto out;
24222433
}
24232434

@@ -2664,12 +2675,18 @@ int mana_alloc_queues(struct net_device *ndev)
26642675
int err;
26652676

26662677
err = mana_create_vport(apc, ndev);
2667-
if (err)
2678+
if (err) {
2679+
netdev_err(ndev, "Failed to create vPort %u : %d\n", apc->port_idx, err);
26682680
return err;
2681+
}
26692682

26702683
err = netif_set_real_num_tx_queues(ndev, apc->num_queues);
2671-
if (err)
2684+
if (err) {
2685+
netdev_err(ndev,
2686+
"netif_set_real_num_tx_queues () failed for ndev with num_queues %u : %d\n",
2687+
apc->num_queues, err);
26722688
goto destroy_vport;
2689+
}
26732690

26742691
err = mana_add_rx_queues(apc, ndev);
26752692
if (err)
@@ -2678,14 +2695,20 @@ int mana_alloc_queues(struct net_device *ndev)
26782695
apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE;
26792696

26802697
err = netif_set_real_num_rx_queues(ndev, apc->num_queues);
2681-
if (err)
2698+
if (err) {
2699+
netdev_err(ndev,
2700+
"netif_set_real_num_rx_queues () failed for ndev with num_queues %u : %d\n",
2701+
apc->num_queues, err);
26822702
goto destroy_vport;
2703+
}
26832704

26842705
mana_rss_table_init(apc);
26852706

26862707
err = mana_config_rss(apc, TRI_STATE_TRUE, true, true);
2687-
if (err)
2708+
if (err) {
2709+
netdev_err(ndev, "Failed to configure RSS table: %d\n", err);
26882710
goto destroy_vport;
2711+
}
26892712

26902713
if (gd->gdma_context->is_pf) {
26912714
err = mana_pf_register_filter(apc);
@@ -2826,8 +2849,10 @@ int mana_detach(struct net_device *ndev, bool from_close)
28262849

28272850
if (apc->port_st_save) {
28282851
err = mana_dealloc_queues(ndev);
2829-
if (err)
2852+
if (err) {
2853+
netdev_err(ndev, "%s failed to deallocate queues: %d\n", __func__, err);
28302854
return err;
2855+
}
28312856
}
28322857

28332858
if (!from_close) {
@@ -2973,6 +2998,8 @@ static int add_adev(struct gdma_dev *gd)
29732998
goto add_fail;
29742999

29753000
gd->adev = adev;
3001+
dev_dbg(gd->gdma_context->dev,
3002+
"Auxiliary device added successfully\n");
29763003
return 0;
29773004

29783005
add_fail:
@@ -3014,8 +3041,10 @@ int mana_probe(struct gdma_dev *gd, bool resuming)
30143041
}
30153042

30163043
err = mana_create_eq(ac);
3017-
if (err)
3044+
if (err) {
3045+
dev_err(dev, "Failed to create EQs: %d\n", err);
30183046
goto out;
3047+
}
30193048

30203049
err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION,
30213050
MANA_MICRO_VERSION, &num_ports);
@@ -3071,8 +3100,14 @@ int mana_probe(struct gdma_dev *gd, bool resuming)
30713100

30723101
err = add_adev(gd);
30733102
out:
3074-
if (err)
3103+
if (err) {
30753104
mana_remove(gd, false);
3105+
} else {
3106+
dev_dbg(dev, "gd=%p, id=%u, num_ports=%d, type=%u, instance=%u\n",
3107+
gd, gd->dev_id.as_uint32, ac->num_ports,
3108+
gd->dev_id.type, gd->dev_id.instance);
3109+
dev_dbg(dev, "%s succeeded\n", __func__);
3110+
}
30763111

30773112
return err;
30783113
}
@@ -3134,6 +3169,7 @@ void mana_remove(struct gdma_dev *gd, bool suspending)
31343169
gd->driver_data = NULL;
31353170
gd->gdma_context = NULL;
31363171
kfree(ac);
3172+
dev_dbg(dev, "%s succeeded\n", __func__);
31373173
}
31383174

31393175
struct net_device *mana_get_primary_netdev_rcu(struct mana_context *ac, u32 port_index)

0 commit comments

Comments
 (0)