@@ -46,17 +46,7 @@ static const struct pci_device_id ae_algo_pci_tbl[] = {
4646 {PCI_VDEVICE (HUAWEI , HNAE3_DEV_ID_50GE_RDMA ), 0 },
4747 {PCI_VDEVICE (HUAWEI , HNAE3_DEV_ID_50GE_RDMA_MACSEC ), 0 },
4848 {PCI_VDEVICE (HUAWEI , HNAE3_DEV_ID_100G_RDMA_MACSEC ), 0 },
49- /* Required last entry */
50- {0 , }
51- };
52-
53- static const struct pci_device_id roce_pci_tbl [] = {
54- {PCI_VDEVICE (HUAWEI , HNAE3_DEV_ID_25GE_RDMA ), 0 },
55- {PCI_VDEVICE (HUAWEI , HNAE3_DEV_ID_25GE_RDMA_MACSEC ), 0 },
56- {PCI_VDEVICE (HUAWEI , HNAE3_DEV_ID_50GE_RDMA ), 0 },
57- {PCI_VDEVICE (HUAWEI , HNAE3_DEV_ID_50GE_RDMA_MACSEC ), 0 },
58- {PCI_VDEVICE (HUAWEI , HNAE3_DEV_ID_100G_RDMA_MACSEC ), 0 },
59- /* Required last entry */
49+ /* required last entry */
6050 {0 , }
6151};
6252
@@ -894,7 +884,7 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
894884 hdev -> num_tqps = __le16_to_cpu (req -> tqp_num );
895885 hdev -> pkt_buf_size = __le16_to_cpu (req -> buf_size ) << HCLGE_BUF_UNIT_S ;
896886
897- if (hnae_get_bit (hdev -> ae_dev -> flag , HNAE_DEV_SUPPORT_ROCE_B )) {
887+ if (hnae3_dev_roce_supported (hdev )) {
898888 hdev -> num_roce_msix =
899889 hnae_get_field (__le16_to_cpu (req -> pf_intr_vector_number ),
900890 HCLGE_PF_VEC_NUM_M , HCLGE_PF_VEC_NUM_S );
@@ -1454,7 +1444,11 @@ static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, u32 rx_all)
14541444 tc_num = hclge_get_tc_num (hdev );
14551445 pfc_enable_num = hclge_get_pfc_enalbe_num (hdev );
14561446
1457- shared_buf_min = 2 * hdev -> mps + HCLGE_DEFAULT_DV ;
1447+ if (hnae3_dev_dcb_supported (hdev ))
1448+ shared_buf_min = 2 * hdev -> mps + HCLGE_DEFAULT_DV ;
1449+ else
1450+ shared_buf_min = 2 * hdev -> mps + HCLGE_DEFAULT_NON_DCB_DV ;
1451+
14581452 shared_buf_tc = pfc_enable_num * hdev -> mps +
14591453 (tc_num - pfc_enable_num ) * hdev -> mps / 2 +
14601454 hdev -> mps ;
@@ -1495,6 +1489,16 @@ int hclge_rx_buffer_calc(struct hclge_dev *hdev, u32 tx_size)
14951489 struct hclge_priv_buf * priv ;
14961490 int i ;
14971491
1492+ /* When DCB is not supported, rx private
1493+ * buffer is not allocated.
1494+ */
1495+ if (!hnae3_dev_dcb_supported (hdev )) {
1496+ if (!hclge_is_rx_buf_ok (hdev , rx_all ))
1497+ return - ENOMEM ;
1498+
1499+ return 0 ;
1500+ }
1501+
14981502 /* step 1, try to alloc private buffer for all enabled tc */
14991503 for (i = 0 ; i < HCLGE_MAX_TC_NUM ; i ++ ) {
15001504 priv = & hdev -> priv_buf [i ];
@@ -1510,6 +1514,11 @@ int hclge_rx_buffer_calc(struct hclge_dev *hdev, u32 tx_size)
15101514 priv -> wl .high = 2 * hdev -> mps ;
15111515 priv -> buf_size = priv -> wl .high ;
15121516 }
1517+ } else {
1518+ priv -> enable = 0 ;
1519+ priv -> wl .low = 0 ;
1520+ priv -> wl .high = 0 ;
1521+ priv -> buf_size = 0 ;
15131522 }
15141523 }
15151524
@@ -1522,8 +1531,15 @@ int hclge_rx_buffer_calc(struct hclge_dev *hdev, u32 tx_size)
15221531 for (i = 0 ; i < HCLGE_MAX_TC_NUM ; i ++ ) {
15231532 priv = & hdev -> priv_buf [i ];
15241533
1525- if (hdev -> hw_tc_map & BIT (i ))
1526- priv -> enable = 1 ;
1534+ priv -> enable = 0 ;
1535+ priv -> wl .low = 0 ;
1536+ priv -> wl .high = 0 ;
1537+ priv -> buf_size = 0 ;
1538+
1539+ if (!(hdev -> hw_tc_map & BIT (i )))
1540+ continue ;
1541+
1542+ priv -> enable = 1 ;
15271543
15281544 if (hdev -> tm_info .hw_pfc_map & BIT (i )) {
15291545 priv -> wl .low = 128 ;
@@ -1616,6 +1632,10 @@ static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev)
16161632 cpu_to_le16 (true << HCLGE_TC0_PRI_BUF_EN_B );
16171633 }
16181634
1635+ req -> shared_buf =
1636+ cpu_to_le16 ((hdev -> s_buf .buf_size >> HCLGE_BUF_UNIT_S ) |
1637+ (1 << HCLGE_TC0_PRI_BUF_EN_B ));
1638+
16191639 ret = hclge_cmd_send (& hdev -> hw , & desc , 1 );
16201640 if (ret ) {
16211641 dev_err (& hdev -> pdev -> dev ,
@@ -1782,18 +1802,22 @@ int hclge_buffer_alloc(struct hclge_dev *hdev)
17821802 return ret ;
17831803 }
17841804
1785- ret = hclge_rx_priv_wl_config (hdev );
1786- if (ret ) {
1787- dev_err (& hdev -> pdev -> dev ,
1788- "could not configure rx private waterline %d\n" , ret );
1789- return ret ;
1790- }
1805+ if (hnae3_dev_dcb_supported (hdev )) {
1806+ ret = hclge_rx_priv_wl_config (hdev );
1807+ if (ret ) {
1808+ dev_err (& hdev -> pdev -> dev ,
1809+ "could not configure rx private waterline %d\n" ,
1810+ ret );
1811+ return ret ;
1812+ }
17911813
1792- ret = hclge_common_thrd_config (hdev );
1793- if (ret ) {
1794- dev_err (& hdev -> pdev -> dev ,
1795- "could not configure common threshold %d\n" , ret );
1796- return ret ;
1814+ ret = hclge_common_thrd_config (hdev );
1815+ if (ret ) {
1816+ dev_err (& hdev -> pdev -> dev ,
1817+ "could not configure common threshold %d\n" ,
1818+ ret );
1819+ return ret ;
1820+ }
17971821 }
17981822
17991823 ret = hclge_common_wl_config (hdev );
@@ -2582,6 +2606,7 @@ static int hclge_rss_init_hw(struct hclge_dev *hdev)
25822606 u16 tc_valid [HCLGE_MAX_TC_NUM ];
25832607 u16 tc_size [HCLGE_MAX_TC_NUM ];
25842608 u32 * rss_indir = NULL ;
2609+ u16 rss_size = 0 , roundup_size ;
25852610 const u8 * key ;
25862611 int i , ret , j ;
25872612
@@ -2596,7 +2621,13 @@ static int hclge_rss_init_hw(struct hclge_dev *hdev)
25962621 for (j = 0 ; j < hdev -> num_vmdq_vport + 1 ; j ++ ) {
25972622 for (i = 0 ; i < HCLGE_RSS_IND_TBL_SIZE ; i ++ ) {
25982623 vport [j ].rss_indirection_tbl [i ] =
2599- i % hdev -> rss_size_max ;
2624+ i % vport [j ].alloc_rss_size ;
2625+
2626+ /* vport 0 is for PF */
2627+ if (j != 0 )
2628+ continue ;
2629+
2630+ rss_size = vport [j ].alloc_rss_size ;
26002631 rss_indir [i ] = vport [j ].rss_indirection_tbl [i ];
26012632 }
26022633 }
@@ -2613,42 +2644,31 @@ static int hclge_rss_init_hw(struct hclge_dev *hdev)
26132644 if (ret )
26142645 goto err ;
26152646
2647+ /* Each TC have the same queue size, and tc_size set to hardware is
2648+ * the log2 of roundup power of two of rss_size, the acutal queue
2649+ * size is limited by indirection table.
2650+ */
2651+ if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0 ) {
2652+ dev_err (& hdev -> pdev -> dev ,
2653+ "Configure rss tc size failed, invalid TC_SIZE = %d\n" ,
2654+ rss_size );
2655+ return - EINVAL ;
2656+ }
2657+
2658+ roundup_size = roundup_pow_of_two (rss_size );
2659+ roundup_size = ilog2 (roundup_size );
2660+
26162661 for (i = 0 ; i < HCLGE_MAX_TC_NUM ; i ++ ) {
2617- if (hdev -> hw_tc_map & BIT (i ))
2618- tc_valid [i ] = 1 ;
2619- else
2620- tc_valid [i ] = 0 ;
2662+ tc_valid [i ] = 0 ;
26212663
2622- switch (hdev -> rss_size_max ) {
2623- case HCLGE_RSS_TC_SIZE_0 :
2624- tc_size [i ] = 0 ;
2625- break ;
2626- case HCLGE_RSS_TC_SIZE_1 :
2627- tc_size [i ] = 1 ;
2628- break ;
2629- case HCLGE_RSS_TC_SIZE_2 :
2630- tc_size [i ] = 2 ;
2631- break ;
2632- case HCLGE_RSS_TC_SIZE_3 :
2633- tc_size [i ] = 3 ;
2634- break ;
2635- case HCLGE_RSS_TC_SIZE_4 :
2636- tc_size [i ] = 4 ;
2637- break ;
2638- case HCLGE_RSS_TC_SIZE_5 :
2639- tc_size [i ] = 5 ;
2640- break ;
2641- case HCLGE_RSS_TC_SIZE_6 :
2642- tc_size [i ] = 6 ;
2643- break ;
2644- case HCLGE_RSS_TC_SIZE_7 :
2645- tc_size [i ] = 7 ;
2646- break ;
2647- default :
2648- break ;
2649- }
2650- tc_offset [i ] = hdev -> rss_size_max * i ;
2664+ if (!(hdev -> hw_tc_map & BIT (i )))
2665+ continue ;
2666+
2667+ tc_valid [i ] = 1 ;
2668+ tc_size [i ] = roundup_size ;
2669+ tc_offset [i ] = rss_size * i ;
26512670 }
2671+
26522672 ret = hclge_set_rss_tc_mode (hdev , tc_valid , tc_size , tc_offset );
26532673
26542674err :
@@ -3932,8 +3952,7 @@ static int hclge_init_client_instance(struct hnae3_client *client,
39323952 goto err ;
39333953
39343954 if (hdev -> roce_client &&
3935- hnae_get_bit (hdev -> ae_dev -> flag ,
3936- HNAE_DEV_SUPPORT_ROCE_B )) {
3955+ hnae3_dev_roce_supported (hdev )) {
39373956 struct hnae3_client * rc = hdev -> roce_client ;
39383957
39393958 ret = hclge_init_roce_base_info (vport );
@@ -3956,8 +3975,7 @@ static int hclge_init_client_instance(struct hnae3_client *client,
39563975
39573976 break ;
39583977 case HNAE3_CLIENT_ROCE :
3959- if (hnae_get_bit (hdev -> ae_dev -> flag ,
3960- HNAE_DEV_SUPPORT_ROCE_B )) {
3978+ if (hnae3_dev_roce_supported (hdev )) {
39613979 hdev -> roce_client = client ;
39623980 vport -> roce .client = client ;
39633981 }
@@ -4069,7 +4087,6 @@ static void hclge_pci_uninit(struct hclge_dev *hdev)
40694087static int hclge_init_ae_dev (struct hnae3_ae_dev * ae_dev )
40704088{
40714089 struct pci_dev * pdev = ae_dev -> pdev ;
4072- const struct pci_device_id * id ;
40734090 struct hclge_dev * hdev ;
40744091 int ret ;
40754092
@@ -4084,10 +4101,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
40844101 hdev -> ae_dev = ae_dev ;
40854102 ae_dev -> priv = hdev ;
40864103
4087- id = pci_match_id (roce_pci_tbl , ae_dev -> pdev );
4088- if (id )
4089- hnae_set_bit (ae_dev -> flag , HNAE_DEV_SUPPORT_ROCE_B , 1 );
4090-
40914104 ret = hclge_pci_init (hdev );
40924105 if (ret ) {
40934106 dev_err (& pdev -> dev , "PCI init failed\n" );
@@ -4150,12 +4163,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
41504163 return ret ;
41514164 }
41524165
4153- ret = hclge_rss_init_hw (hdev );
4154- if (ret ) {
4155- dev_err (& pdev -> dev , "Rss init fail, ret =%d\n" , ret );
4156- return ret ;
4157- }
4158-
41594166 ret = hclge_init_vlan_config (hdev );
41604167 if (ret ) {
41614168 dev_err (& pdev -> dev , "VLAN init fail, ret =%d\n" , ret );
@@ -4168,6 +4175,12 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
41684175 return ret ;
41694176 }
41704177
4178+ ret = hclge_rss_init_hw (hdev );
4179+ if (ret ) {
4180+ dev_err (& pdev -> dev , "Rss init fail, ret =%d\n" , ret );
4181+ return ret ;
4182+ }
4183+
41714184 setup_timer (& hdev -> service_timer , hclge_service_timer ,
41724185 (unsigned long )hdev );
41734186 INIT_WORK (& hdev -> service_task , hclge_service_task );
0 commit comments