@@ -761,7 +761,6 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
761761 unsigned int * offset ,
762762 gfp_t gfp )
763763{
764- struct device * dev = & bp -> pdev -> dev ;
765764 struct page * page ;
766765
767766 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE ) {
@@ -774,12 +773,7 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
774773 if (!page )
775774 return NULL ;
776775
777- * mapping = dma_map_page_attrs (dev , page , * offset , BNXT_RX_PAGE_SIZE ,
778- bp -> rx_dir , DMA_ATTR_WEAK_ORDERING );
779- if (dma_mapping_error (dev , * mapping )) {
780- page_pool_recycle_direct (rxr -> page_pool , page );
781- return NULL ;
782- }
776+ * mapping = page_pool_get_dma_addr (page ) + * offset ;
783777 return page ;
784778}
785779
@@ -998,8 +992,8 @@ static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
998992 return NULL ;
999993 }
1000994 dma_addr -= bp -> rx_dma_offset ;
1001- dma_unmap_page_attrs (& bp -> pdev -> dev , dma_addr , BNXT_RX_PAGE_SIZE ,
1002- bp -> rx_dir , DMA_ATTR_WEAK_ORDERING );
995+ dma_sync_single_for_cpu (& bp -> pdev -> dev , dma_addr , BNXT_RX_PAGE_SIZE ,
996+ bp -> rx_dir );
1003997 skb = build_skb (data_ptr - bp -> rx_offset , BNXT_RX_PAGE_SIZE );
1004998 if (!skb ) {
1005999 page_pool_recycle_direct (rxr -> page_pool , page );
@@ -1032,8 +1026,8 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
10321026 return NULL ;
10331027 }
10341028 dma_addr -= bp -> rx_dma_offset ;
1035- dma_unmap_page_attrs (& bp -> pdev -> dev , dma_addr , BNXT_RX_PAGE_SIZE ,
1036- bp -> rx_dir , DMA_ATTR_WEAK_ORDERING );
1029+ dma_sync_single_for_cpu (& bp -> pdev -> dev , dma_addr , BNXT_RX_PAGE_SIZE ,
1030+ bp -> rx_dir );
10371031
10381032 if (unlikely (!payload ))
10391033 payload = eth_get_headlen (bp -> dev , data_ptr , len );
@@ -1149,9 +1143,8 @@ static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
11491143 return 0 ;
11501144 }
11511145
1152- dma_unmap_page_attrs (& pdev -> dev , mapping , BNXT_RX_PAGE_SIZE ,
1153- bp -> rx_dir ,
1154- DMA_ATTR_WEAK_ORDERING );
1146+ dma_sync_single_for_cpu (& pdev -> dev , mapping , BNXT_RX_PAGE_SIZE ,
1147+ bp -> rx_dir );
11551148
11561149 total_frag_len += frag_len ;
11571150 prod = NEXT_RX_AGG (prod );
@@ -2947,10 +2940,6 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
29472940
29482941 rx_buf -> data = NULL ;
29492942 if (BNXT_RX_PAGE_MODE (bp )) {
2950- mapping -= bp -> rx_dma_offset ;
2951- dma_unmap_page_attrs (& pdev -> dev , mapping ,
2952- BNXT_RX_PAGE_SIZE , bp -> rx_dir ,
2953- DMA_ATTR_WEAK_ORDERING );
29542943 page_pool_recycle_direct (rxr -> page_pool , data );
29552944 } else {
29562945 dma_unmap_single_attrs (& pdev -> dev , mapping ,
@@ -2971,9 +2960,6 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
29712960 if (!page )
29722961 continue ;
29732962
2974- dma_unmap_page_attrs (& pdev -> dev , rx_agg_buf -> mapping ,
2975- BNXT_RX_PAGE_SIZE , bp -> rx_dir ,
2976- DMA_ATTR_WEAK_ORDERING );
29772963 rx_agg_buf -> page = NULL ;
29782964 __clear_bit (i , rxr -> rx_agg_bmap );
29792965
@@ -3205,7 +3191,9 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
32053191 pp .nid = dev_to_node (& bp -> pdev -> dev );
32063192 pp .napi = & rxr -> bnapi -> napi ;
32073193 pp .dev = & bp -> pdev -> dev ;
3208- pp .dma_dir = DMA_BIDIRECTIONAL ;
3194+ pp .dma_dir = bp -> rx_dir ;
3195+ pp .max_len = PAGE_SIZE ;
3196+ pp .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV ;
32093197 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE )
32103198 pp .flags |= PP_FLAG_PAGE_FRAG ;
32113199
0 commit comments