@@ -89,6 +89,11 @@ static const u16 fec_enet_vlan_pri_to_queue[8] = {0, 0, 1, 1, 1, 2, 2, 2};
8989#define FEC_ENET_OPD_V 0xFFF0
9090#define FEC_MDIO_PM_TIMEOUT 100 /* ms */
9191
92+ #define FEC_ENET_XDP_PASS 0
93+ #define FEC_ENET_XDP_CONSUMED BIT(0)
94+ #define FEC_ENET_XDP_TX BIT(1)
95+ #define FEC_ENET_XDP_REDIR BIT(2)
96+
9297struct fec_devinfo {
9398 u32 quirks ;
9499};
@@ -418,13 +423,14 @@ static int
418423fec_enet_create_page_pool (struct fec_enet_private * fep ,
419424 struct fec_enet_priv_rx_q * rxq , int size )
420425{
426+ struct bpf_prog * xdp_prog = READ_ONCE (fep -> xdp_prog );
421427 struct page_pool_params pp_params = {
422428 .order = 0 ,
423429 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV ,
424430 .pool_size = size ,
425431 .nid = dev_to_node (& fep -> pdev -> dev ),
426432 .dev = & fep -> pdev -> dev ,
427- .dma_dir = DMA_FROM_DEVICE ,
433+ .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE ,
428434 .offset = FEC_ENET_XDP_HEADROOM ,
429435 .max_len = FEC_ENET_RX_FRSIZE ,
430436 };
@@ -1499,6 +1505,59 @@ static void fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq,
14991505 bdp -> cbd_bufaddr = cpu_to_fec32 (phys_addr );
15001506}
15011507
1508+ static u32
1509+ fec_enet_run_xdp (struct fec_enet_private * fep , struct bpf_prog * prog ,
1510+ struct xdp_buff * xdp , struct fec_enet_priv_rx_q * rxq , int index )
1511+ {
1512+ unsigned int sync , len = xdp -> data_end - xdp -> data ;
1513+ u32 ret = FEC_ENET_XDP_PASS ;
1514+ struct page * page ;
1515+ int err ;
1516+ u32 act ;
1517+
1518+ act = bpf_prog_run_xdp (prog , xdp );
1519+
1520+ /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */
1521+ sync = xdp -> data_end - xdp -> data_hard_start - FEC_ENET_XDP_HEADROOM ;
1522+ sync = max (sync , len );
1523+
1524+ switch (act ) {
1525+ case XDP_PASS :
1526+ ret = FEC_ENET_XDP_PASS ;
1527+ break ;
1528+
1529+ case XDP_REDIRECT :
1530+ err = xdp_do_redirect (fep -> netdev , xdp , prog );
1531+ if (!err ) {
1532+ ret = FEC_ENET_XDP_REDIR ;
1533+ } else {
1534+ ret = FEC_ENET_XDP_CONSUMED ;
1535+ page = virt_to_head_page (xdp -> data );
1536+ page_pool_put_page (rxq -> page_pool , page , sync , true);
1537+ }
1538+ break ;
1539+
1540+ default :
1541+ bpf_warn_invalid_xdp_action (fep -> netdev , prog , act );
1542+ fallthrough ;
1543+
1544+ case XDP_TX :
1545+ bpf_warn_invalid_xdp_action (fep -> netdev , prog , act );
1546+ fallthrough ;
1547+
1548+ case XDP_ABORTED :
1549+ fallthrough ; /* handle aborts by dropping packet */
1550+
1551+ case XDP_DROP :
1552+ ret = FEC_ENET_XDP_CONSUMED ;
1553+ page = virt_to_head_page (xdp -> data );
1554+ page_pool_put_page (rxq -> page_pool , page , sync , true);
1555+ break ;
1556+ }
1557+
1558+ return ret ;
1559+ }
1560+
15021561/* During a receive, the bd_rx.cur points to the current incoming buffer.
15031562 * When we update through the ring, if the next incoming buffer has
15041563 * not been given to the system, we just set the empty indicator,
@@ -1520,6 +1579,9 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
15201579 u16 vlan_tag ;
15211580 int index = 0 ;
15221581 bool need_swap = fep -> quirks & FEC_QUIRK_SWAP_FRAME ;
1582+ struct bpf_prog * xdp_prog = READ_ONCE (fep -> xdp_prog );
1583+ u32 ret , xdp_result = FEC_ENET_XDP_PASS ;
1584+ struct xdp_buff xdp ;
15231585 struct page * page ;
15241586
15251587#ifdef CONFIG_M532x
@@ -1531,6 +1593,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
15311593 * These get messed up if we get called due to a busy condition.
15321594 */
15331595 bdp = rxq -> bd .cur ;
1596+ xdp_init_buff (& xdp , PAGE_SIZE , & rxq -> xdp_rxq );
15341597
15351598 while (!((status = fec16_to_cpu (bdp -> cbd_sc )) & BD_ENET_RX_EMPTY )) {
15361599
@@ -1580,6 +1643,17 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
15801643 prefetch (page_address (page ));
15811644 fec_enet_update_cbd (rxq , bdp , index );
15821645
1646+ if (xdp_prog ) {
1647+ xdp_buff_clear_frags_flag (& xdp );
1648+ xdp_prepare_buff (& xdp , page_address (page ),
1649+ FEC_ENET_XDP_HEADROOM , pkt_len , false);
1650+
1651+ ret = fec_enet_run_xdp (fep , xdp_prog , & xdp , rxq , index );
1652+ xdp_result |= ret ;
1653+ if (ret != FEC_ENET_XDP_PASS )
1654+ goto rx_processing_done ;
1655+ }
1656+
15831657 /* The packet length includes FCS, but we don't want to
15841658 * include that when passing upstream as it messes up
15851659 * bridging applications.
@@ -1675,6 +1749,10 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
16751749 writel (0 , rxq -> bd .reg_desc_active );
16761750 }
16771751 rxq -> bd .cur = bdp ;
1752+
1753+ if (xdp_result & FEC_ENET_XDP_REDIR )
1754+ xdp_do_flush_map ();
1755+
16781756 return pkt_received ;
16791757}
16801758
@@ -3518,6 +3596,148 @@ static u16 fec_enet_select_queue(struct net_device *ndev, struct sk_buff *skb,
35183596 return fec_enet_vlan_pri_to_queue [vlan_tag >> 13 ];
35193597}
35203598
3599+ static int fec_enet_bpf (struct net_device * dev , struct netdev_bpf * bpf )
3600+ {
3601+ struct fec_enet_private * fep = netdev_priv (dev );
3602+ bool is_run = netif_running (dev );
3603+ struct bpf_prog * old_prog ;
3604+
3605+ switch (bpf -> command ) {
3606+ case XDP_SETUP_PROG :
3607+ if (is_run ) {
3608+ napi_disable (& fep -> napi );
3609+ netif_tx_disable (dev );
3610+ }
3611+
3612+ old_prog = xchg (& fep -> xdp_prog , bpf -> prog );
3613+ fec_restart (dev );
3614+
3615+ if (is_run ) {
3616+ napi_enable (& fep -> napi );
3617+ netif_tx_start_all_queues (dev );
3618+ }
3619+
3620+ if (old_prog )
3621+ bpf_prog_put (old_prog );
3622+
3623+ return 0 ;
3624+
3625+ case XDP_SETUP_XSK_POOL :
3626+ return - EOPNOTSUPP ;
3627+
3628+ default :
3629+ return - EOPNOTSUPP ;
3630+ }
3631+ }
3632+
3633+ static int
3634+ fec_enet_xdp_get_tx_queue (struct fec_enet_private * fep , int cpu )
3635+ {
3636+ int index = cpu ;
3637+
3638+ if (unlikely (index < 0 ))
3639+ index = 0 ;
3640+
3641+ while (index >= fep -> num_tx_queues )
3642+ index -= fep -> num_tx_queues ;
3643+
3644+ return index ;
3645+ }
3646+
3647+ static int fec_enet_txq_xmit_frame (struct fec_enet_private * fep ,
3648+ struct fec_enet_priv_tx_q * txq ,
3649+ struct xdp_frame * frame )
3650+ {
3651+ unsigned int index , status , estatus ;
3652+ struct bufdesc * bdp , * last_bdp ;
3653+ dma_addr_t dma_addr ;
3654+ int entries_free ;
3655+
3656+ entries_free = fec_enet_get_free_txdesc_num (txq );
3657+ if (entries_free < MAX_SKB_FRAGS + 1 ) {
3658+ netdev_err (fep -> netdev , "NOT enough BD for SG!\n" );
3659+ return NETDEV_TX_OK ;
3660+ }
3661+
3662+ /* Fill in a Tx ring entry */
3663+ bdp = txq -> bd .cur ;
3664+ last_bdp = bdp ;
3665+ status = fec16_to_cpu (bdp -> cbd_sc );
3666+ status &= ~BD_ENET_TX_STATS ;
3667+
3668+ index = fec_enet_get_bd_index (bdp , & txq -> bd );
3669+
3670+ dma_addr = dma_map_single (& fep -> pdev -> dev , frame -> data ,
3671+ frame -> len , DMA_TO_DEVICE );
3672+ if (dma_mapping_error (& fep -> pdev -> dev , dma_addr ))
3673+ return FEC_ENET_XDP_CONSUMED ;
3674+
3675+ status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST );
3676+ if (fep -> bufdesc_ex )
3677+ estatus = BD_ENET_TX_INT ;
3678+
3679+ bdp -> cbd_bufaddr = cpu_to_fec32 (dma_addr );
3680+ bdp -> cbd_datlen = cpu_to_fec16 (frame -> len );
3681+
3682+ if (fep -> bufdesc_ex ) {
3683+ struct bufdesc_ex * ebdp = (struct bufdesc_ex * )bdp ;
3684+
3685+ if (fep -> quirks & FEC_QUIRK_HAS_AVB )
3686+ estatus |= FEC_TX_BD_FTYPE (txq -> bd .qid );
3687+
3688+ ebdp -> cbd_bdu = 0 ;
3689+ ebdp -> cbd_esc = cpu_to_fec32 (estatus );
3690+ }
3691+
3692+ index = fec_enet_get_bd_index (last_bdp , & txq -> bd );
3693+ txq -> tx_skbuff [index ] = NULL ;
3694+
3695+ /* Send it on its way. Tell FEC it's ready, interrupt when done,
3696+ * it's the last BD of the frame, and to put the CRC on the end.
3697+ */
3698+ status |= (BD_ENET_TX_READY | BD_ENET_TX_TC );
3699+ bdp -> cbd_sc = cpu_to_fec16 (status );
3700+
3701+ /* If this was the last BD in the ring, start at the beginning again. */
3702+ bdp = fec_enet_get_nextdesc (last_bdp , & txq -> bd );
3703+
3704+ txq -> bd .cur = bdp ;
3705+
3706+ return 0 ;
3707+ }
3708+
3709+ static int fec_enet_xdp_xmit (struct net_device * dev ,
3710+ int num_frames ,
3711+ struct xdp_frame * * frames ,
3712+ u32 flags )
3713+ {
3714+ struct fec_enet_private * fep = netdev_priv (dev );
3715+ struct fec_enet_priv_tx_q * txq ;
3716+ int cpu = smp_processor_id ();
3717+ struct netdev_queue * nq ;
3718+ unsigned int queue ;
3719+ int i ;
3720+
3721+ queue = fec_enet_xdp_get_tx_queue (fep , cpu );
3722+ txq = fep -> tx_queue [queue ];
3723+ nq = netdev_get_tx_queue (fep -> netdev , queue );
3724+
3725+ __netif_tx_lock (nq , cpu );
3726+
3727+ for (i = 0 ; i < num_frames ; i ++ )
3728+ fec_enet_txq_xmit_frame (fep , txq , frames [i ]);
3729+
3730+ /* Make sure the update to bdp and tx_skbuff are performed. */
3731+ wmb ();
3732+
3733+ /* Trigger transmission start */
3734+ writel (0 , txq -> bd .reg_desc_active );
3735+
3736+ __netif_tx_unlock (nq );
3737+
3738+ return num_frames ;
3739+ }
3740+
35213741static const struct net_device_ops fec_netdev_ops = {
35223742 .ndo_open = fec_enet_open ,
35233743 .ndo_stop = fec_enet_close ,
@@ -3532,6 +3752,8 @@ static const struct net_device_ops fec_netdev_ops = {
35323752 .ndo_poll_controller = fec_poll_controller ,
35333753#endif
35343754 .ndo_set_features = fec_set_features ,
3755+ .ndo_bpf = fec_enet_bpf ,
3756+ .ndo_xdp_xmit = fec_enet_xdp_xmit ,
35353757};
35363758
35373759static const unsigned short offset_des_active_rxq [] = {
0 commit comments