Skip to content

Commit 850b971

Browse files
Jiawen Wudavem330
authored andcommitted
net: libwx: Allocate Rx and Tx resources
Setup Rx and Tx descriptors for specefic rings. Signed-off-by: Jiawen Wu <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 18b5b8a commit 850b971

File tree

5 files changed

+388
-0
lines changed

5 files changed

+388
-0
lines changed

drivers/net/ethernet/wangxun/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@ if NET_VENDOR_WANGXUN
1818

1919
config LIBWX
2020
tristate
21+
select PAGE_POOL
2122
help
2223
Common library for Wangxun(R) Ethernet drivers.
2324

drivers/net/ethernet/wangxun/libwx/wx_hw.c

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1335,12 +1335,16 @@ static void wx_configure_tx_ring(struct wx *wx,
13351335
{
13361336
u32 txdctl = WX_PX_TR_CFG_ENABLE;
13371337
u8 reg_idx = ring->reg_idx;
1338+
u64 tdba = ring->dma;
13381339
int ret;
13391340

13401341
/* disable queue to avoid issues while updating state */
13411342
wr32(wx, WX_PX_TR_CFG(reg_idx), WX_PX_TR_CFG_SWFLSH);
13421343
WX_WRITE_FLUSH(wx);
13431344

1345+
wr32(wx, WX_PX_TR_BAL(reg_idx), tdba & DMA_BIT_MASK(32));
1346+
wr32(wx, WX_PX_TR_BAH(reg_idx), upper_32_bits(tdba));
1347+
13441348
/* reset head and tail pointers */
13451349
wr32(wx, WX_PX_TR_RP(reg_idx), 0);
13461350
wr32(wx, WX_PX_TR_WP(reg_idx), 0);
@@ -1364,12 +1368,16 @@ static void wx_configure_rx_ring(struct wx *wx,
13641368
struct wx_ring *ring)
13651369
{
13661370
u16 reg_idx = ring->reg_idx;
1371+
u64 rdba = ring->dma;
13671372
u32 rxdctl;
13681373

13691374
/* disable queue to avoid issues while updating state */
13701375
rxdctl = rd32(wx, WX_PX_RR_CFG(reg_idx));
13711376
wx_disable_rx_queue(wx, ring);
13721377

1378+
wr32(wx, WX_PX_RR_BAL(reg_idx), rdba & DMA_BIT_MASK(32));
1379+
wr32(wx, WX_PX_RR_BAH(reg_idx), upper_32_bits(rdba));
1380+
13731381
if (ring->count == WX_MAX_RXD)
13741382
rxdctl |= 0 << WX_PX_RR_CFG_RR_SIZE_SHIFT;
13751383
else

drivers/net/ethernet/wangxun/libwx/wx_lib.c

Lines changed: 303 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */
33

44
#include <linux/etherdevice.h>
5+
#include <net/page_pool.h>
56
#include <linux/iopoll.h>
67
#include <linux/pci.h>
78

@@ -192,6 +193,8 @@ static int wx_alloc_q_vector(struct wx *wx,
192193
wx->q_vector[v_idx] = q_vector;
193194
q_vector->wx = wx;
194195
q_vector->v_idx = v_idx;
196+
if (cpu_online(v_idx))
197+
q_vector->numa_node = cpu_to_node(v_idx);
195198

196199
/* initialize pointer to rings */
197200
ring = q_vector->ring;
@@ -606,4 +609,304 @@ void wx_configure_vectors(struct wx *wx)
606609
}
607610
EXPORT_SYMBOL(wx_configure_vectors);
608611

612+
/**
613+
* wx_free_rx_resources - Free Rx Resources
614+
* @rx_ring: ring to clean the resources from
615+
*
616+
* Free all receive software resources
617+
**/
618+
static void wx_free_rx_resources(struct wx_ring *rx_ring)
619+
{
620+
kvfree(rx_ring->rx_buffer_info);
621+
rx_ring->rx_buffer_info = NULL;
622+
623+
/* if not set, then don't free */
624+
if (!rx_ring->desc)
625+
return;
626+
627+
dma_free_coherent(rx_ring->dev, rx_ring->size,
628+
rx_ring->desc, rx_ring->dma);
629+
630+
rx_ring->desc = NULL;
631+
632+
if (rx_ring->page_pool) {
633+
page_pool_destroy(rx_ring->page_pool);
634+
rx_ring->page_pool = NULL;
635+
}
636+
}
637+
638+
/**
639+
* wx_free_all_rx_resources - Free Rx Resources for All Queues
640+
* @wx: pointer to hardware structure
641+
*
642+
* Free all receive software resources
643+
**/
644+
static void wx_free_all_rx_resources(struct wx *wx)
645+
{
646+
int i;
647+
648+
for (i = 0; i < wx->num_rx_queues; i++)
649+
wx_free_rx_resources(wx->rx_ring[i]);
650+
}
651+
652+
/**
653+
* wx_free_tx_resources - Free Tx Resources per Queue
654+
* @tx_ring: Tx descriptor ring for a specific queue
655+
*
656+
* Free all transmit software resources
657+
**/
658+
static void wx_free_tx_resources(struct wx_ring *tx_ring)
659+
{
660+
kvfree(tx_ring->tx_buffer_info);
661+
tx_ring->tx_buffer_info = NULL;
662+
663+
/* if not set, then don't free */
664+
if (!tx_ring->desc)
665+
return;
666+
667+
dma_free_coherent(tx_ring->dev, tx_ring->size,
668+
tx_ring->desc, tx_ring->dma);
669+
tx_ring->desc = NULL;
670+
}
671+
672+
/**
673+
* wx_free_all_tx_resources - Free Tx Resources for All Queues
674+
* @wx: pointer to hardware structure
675+
*
676+
* Free all transmit software resources
677+
**/
678+
static void wx_free_all_tx_resources(struct wx *wx)
679+
{
680+
int i;
681+
682+
for (i = 0; i < wx->num_tx_queues; i++)
683+
wx_free_tx_resources(wx->tx_ring[i]);
684+
}
685+
686+
void wx_free_resources(struct wx *wx)
687+
{
688+
wx_free_isb_resources(wx);
689+
wx_free_all_rx_resources(wx);
690+
wx_free_all_tx_resources(wx);
691+
}
692+
EXPORT_SYMBOL(wx_free_resources);
693+
694+
static int wx_alloc_page_pool(struct wx_ring *rx_ring)
695+
{
696+
int ret = 0;
697+
698+
struct page_pool_params pp_params = {
699+
.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
700+
.order = 0,
701+
.pool_size = rx_ring->size,
702+
.nid = dev_to_node(rx_ring->dev),
703+
.dev = rx_ring->dev,
704+
.dma_dir = DMA_FROM_DEVICE,
705+
.offset = 0,
706+
.max_len = PAGE_SIZE,
707+
};
708+
709+
rx_ring->page_pool = page_pool_create(&pp_params);
710+
if (IS_ERR(rx_ring->page_pool)) {
711+
rx_ring->page_pool = NULL;
712+
ret = PTR_ERR(rx_ring->page_pool);
713+
}
714+
715+
return ret;
716+
}
717+
718+
/**
719+
* wx_setup_rx_resources - allocate Rx resources (Descriptors)
720+
* @rx_ring: rx descriptor ring (for a specific queue) to setup
721+
*
722+
* Returns 0 on success, negative on failure
723+
**/
724+
static int wx_setup_rx_resources(struct wx_ring *rx_ring)
725+
{
726+
struct device *dev = rx_ring->dev;
727+
int orig_node = dev_to_node(dev);
728+
int numa_node = NUMA_NO_NODE;
729+
int size, ret;
730+
731+
size = sizeof(struct wx_rx_buffer) * rx_ring->count;
732+
733+
if (rx_ring->q_vector)
734+
numa_node = rx_ring->q_vector->numa_node;
735+
736+
rx_ring->rx_buffer_info = kvmalloc_node(size, GFP_KERNEL, numa_node);
737+
if (!rx_ring->rx_buffer_info)
738+
rx_ring->rx_buffer_info = kvmalloc(size, GFP_KERNEL);
739+
if (!rx_ring->rx_buffer_info)
740+
goto err;
741+
742+
/* Round up to nearest 4K */
743+
rx_ring->size = rx_ring->count * sizeof(union wx_rx_desc);
744+
rx_ring->size = ALIGN(rx_ring->size, 4096);
745+
746+
set_dev_node(dev, numa_node);
747+
rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
748+
&rx_ring->dma, GFP_KERNEL);
749+
if (!rx_ring->desc) {
750+
set_dev_node(dev, orig_node);
751+
rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
752+
&rx_ring->dma, GFP_KERNEL);
753+
}
754+
755+
if (!rx_ring->desc)
756+
goto err;
757+
758+
ret = wx_alloc_page_pool(rx_ring);
759+
if (ret < 0) {
760+
dev_err(rx_ring->dev, "Page pool creation failed: %d\n", ret);
761+
goto err;
762+
}
763+
764+
return 0;
765+
err:
766+
kvfree(rx_ring->rx_buffer_info);
767+
rx_ring->rx_buffer_info = NULL;
768+
dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
769+
return -ENOMEM;
770+
}
771+
772+
/**
773+
* wx_setup_all_rx_resources - allocate all queues Rx resources
774+
* @wx: pointer to hardware structure
775+
*
776+
* If this function returns with an error, then it's possible one or
777+
* more of the rings is populated (while the rest are not). It is the
778+
* callers duty to clean those orphaned rings.
779+
*
780+
* Return 0 on success, negative on failure
781+
**/
782+
static int wx_setup_all_rx_resources(struct wx *wx)
783+
{
784+
int i, err = 0;
785+
786+
for (i = 0; i < wx->num_rx_queues; i++) {
787+
err = wx_setup_rx_resources(wx->rx_ring[i]);
788+
if (!err)
789+
continue;
790+
791+
wx_err(wx, "Allocation for Rx Queue %u failed\n", i);
792+
goto err_setup_rx;
793+
}
794+
795+
return 0;
796+
err_setup_rx:
797+
/* rewind the index freeing the rings as we go */
798+
while (i--)
799+
wx_free_rx_resources(wx->rx_ring[i]);
800+
return err;
801+
}
802+
803+
/**
804+
* wx_setup_tx_resources - allocate Tx resources (Descriptors)
805+
* @tx_ring: tx descriptor ring (for a specific queue) to setup
806+
*
807+
* Return 0 on success, negative on failure
808+
**/
809+
static int wx_setup_tx_resources(struct wx_ring *tx_ring)
810+
{
811+
struct device *dev = tx_ring->dev;
812+
int orig_node = dev_to_node(dev);
813+
int numa_node = NUMA_NO_NODE;
814+
int size;
815+
816+
size = sizeof(struct wx_tx_buffer) * tx_ring->count;
817+
818+
if (tx_ring->q_vector)
819+
numa_node = tx_ring->q_vector->numa_node;
820+
821+
tx_ring->tx_buffer_info = kvmalloc_node(size, GFP_KERNEL, numa_node);
822+
if (!tx_ring->tx_buffer_info)
823+
tx_ring->tx_buffer_info = kvmalloc(size, GFP_KERNEL);
824+
if (!tx_ring->tx_buffer_info)
825+
goto err;
826+
827+
/* round up to nearest 4K */
828+
tx_ring->size = tx_ring->count * sizeof(union wx_tx_desc);
829+
tx_ring->size = ALIGN(tx_ring->size, 4096);
830+
831+
set_dev_node(dev, numa_node);
832+
tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
833+
&tx_ring->dma, GFP_KERNEL);
834+
if (!tx_ring->desc) {
835+
set_dev_node(dev, orig_node);
836+
tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
837+
&tx_ring->dma, GFP_KERNEL);
838+
}
839+
840+
if (!tx_ring->desc)
841+
goto err;
842+
843+
return 0;
844+
845+
err:
846+
kvfree(tx_ring->tx_buffer_info);
847+
tx_ring->tx_buffer_info = NULL;
848+
dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
849+
return -ENOMEM;
850+
}
851+
852+
/**
853+
* wx_setup_all_tx_resources - allocate all queues Tx resources
854+
* @wx: pointer to private structure
855+
*
856+
* If this function returns with an error, then it's possible one or
857+
* more of the rings is populated (while the rest are not). It is the
858+
* callers duty to clean those orphaned rings.
859+
*
860+
* Return 0 on success, negative on failure
861+
**/
862+
static int wx_setup_all_tx_resources(struct wx *wx)
863+
{
864+
int i, err = 0;
865+
866+
for (i = 0; i < wx->num_tx_queues; i++) {
867+
err = wx_setup_tx_resources(wx->tx_ring[i]);
868+
if (!err)
869+
continue;
870+
871+
wx_err(wx, "Allocation for Tx Queue %u failed\n", i);
872+
goto err_setup_tx;
873+
}
874+
875+
return 0;
876+
err_setup_tx:
877+
/* rewind the index freeing the rings as we go */
878+
while (i--)
879+
wx_free_tx_resources(wx->tx_ring[i]);
880+
return err;
881+
}
882+
883+
int wx_setup_resources(struct wx *wx)
884+
{
885+
int err;
886+
887+
/* allocate transmit descriptors */
888+
err = wx_setup_all_tx_resources(wx);
889+
if (err)
890+
return err;
891+
892+
/* allocate receive descriptors */
893+
err = wx_setup_all_rx_resources(wx);
894+
if (err)
895+
goto err_free_tx;
896+
897+
err = wx_setup_isb_resources(wx);
898+
if (err)
899+
goto err_free_rx;
900+
901+
return 0;
902+
903+
err_free_rx:
904+
wx_free_all_rx_resources(wx);
905+
err_free_tx:
906+
wx_free_all_tx_resources(wx);
907+
908+
return err;
909+
}
910+
EXPORT_SYMBOL(wx_setup_resources);
911+
609912
MODULE_LICENSE("GPL");

drivers/net/ethernet/wangxun/libwx/wx_lib.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,5 +16,7 @@ int wx_setup_isb_resources(struct wx *wx);
1616
void wx_free_isb_resources(struct wx *wx);
1717
u32 wx_misc_isb(struct wx *wx, enum wx_isb_idx idx);
1818
void wx_configure_vectors(struct wx *wx);
19+
void wx_free_resources(struct wx *wx);
20+
int wx_setup_resources(struct wx *wx);
1921

2022
#endif /* _NGBE_LIB_H_ */

0 commit comments

Comments
 (0)