@@ -67,6 +67,7 @@ enum sched_node_type {
67
67
SCHED_NODE_TYPE_TC_ARBITER_TSAR ,
68
68
SCHED_NODE_TYPE_RATE_LIMITER ,
69
69
SCHED_NODE_TYPE_VPORT_TC ,
70
+ SCHED_NODE_TYPE_VPORTS_TC_TSAR ,
70
71
};
71
72
72
73
static const char * const sched_node_type_str [] = {
@@ -75,6 +76,7 @@ static const char * const sched_node_type_str[] = {
75
76
[SCHED_NODE_TYPE_TC_ARBITER_TSAR ] = "TC Arbiter TSAR" ,
76
77
[SCHED_NODE_TYPE_RATE_LIMITER ] = "Rate Limiter" ,
77
78
[SCHED_NODE_TYPE_VPORT_TC ] = "vport TC" ,
79
+ [SCHED_NODE_TYPE_VPORTS_TC_TSAR ] = "vports TC TSAR" ,
78
80
};
79
81
80
82
struct mlx5_esw_sched_node {
@@ -159,6 +161,11 @@ mlx5_esw_qos_vport_get_parent(const struct mlx5_vport *vport)
159
161
static void esw_qos_sched_elem_warn (struct mlx5_esw_sched_node * node , int err , const char * op )
160
162
{
161
163
switch (node -> type ) {
164
+ case SCHED_NODE_TYPE_VPORTS_TC_TSAR :
165
+ esw_warn (node -> esw -> dev ,
166
+ "E-Switch %s %s scheduling element failed (tc=%d,err=%d)\n" ,
167
+ op , sched_node_type_str [node -> type ], node -> tc , err );
168
+ break ;
162
169
case SCHED_NODE_TYPE_VPORT_TC :
163
170
esw_warn (node -> esw -> dev ,
164
171
"E-Switch %s %s scheduling element failed (vport=%d,tc=%d,err=%d)\n" ,
@@ -344,7 +351,11 @@ static void esw_qos_normalize_min_rate(struct mlx5_eswitch *esw,
344
351
if (node -> esw != esw || node -> ix == esw -> qos .root_tsar_ix )
345
352
continue ;
346
353
347
- esw_qos_update_sched_node_bw_share (node , divider , extack );
354
+ /* Vports TC TSARs don't have a minimum rate configured,
355
+ * so there's no need to update the bw_share on them.
356
+ */
357
+ if (node -> type != SCHED_NODE_TYPE_VPORTS_TC_TSAR )
358
+ esw_qos_update_sched_node_bw_share (node , divider , extack );
348
359
349
360
if (list_empty (& node -> children ))
350
361
continue ;
@@ -476,6 +487,129 @@ static void esw_qos_destroy_node(struct mlx5_esw_sched_node *node, struct netlin
476
487
__esw_qos_free_node (node );
477
488
}
478
489
490
+ static int esw_qos_create_vports_tc_node (struct mlx5_esw_sched_node * parent , u8 tc ,
491
+ struct netlink_ext_ack * extack )
492
+ {
493
+ u32 tsar_ctx [MLX5_ST_SZ_DW (scheduling_context )] = {};
494
+ struct mlx5_core_dev * dev = parent -> esw -> dev ;
495
+ struct mlx5_esw_sched_node * vports_tc_node ;
496
+ void * attr ;
497
+ int err ;
498
+
499
+ if (!mlx5_qos_element_type_supported (dev ,
500
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR ,
501
+ SCHEDULING_HIERARCHY_E_SWITCH ) ||
502
+ !mlx5_qos_tsar_type_supported (dev ,
503
+ TSAR_ELEMENT_TSAR_TYPE_DWRR ,
504
+ SCHEDULING_HIERARCHY_E_SWITCH ))
505
+ return - EOPNOTSUPP ;
506
+
507
+ vports_tc_node = __esw_qos_alloc_node (parent -> esw , 0 , SCHED_NODE_TYPE_VPORTS_TC_TSAR ,
508
+ parent );
509
+ if (!vports_tc_node ) {
510
+ NL_SET_ERR_MSG_MOD (extack , "E-Switch alloc node failed" );
511
+ esw_warn (dev , "Failed to alloc vports TC node (tc=%d)\n" , tc );
512
+ return - ENOMEM ;
513
+ }
514
+
515
+ attr = MLX5_ADDR_OF (scheduling_context , tsar_ctx , element_attributes );
516
+ MLX5_SET (tsar_element , attr , tsar_type , TSAR_ELEMENT_TSAR_TYPE_DWRR );
517
+ MLX5_SET (tsar_element , attr , traffic_class , tc );
518
+ MLX5_SET (scheduling_context , tsar_ctx , parent_element_id , parent -> ix );
519
+ MLX5_SET (scheduling_context , tsar_ctx , element_type , SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR );
520
+
521
+ err = esw_qos_node_create_sched_element (vports_tc_node , tsar_ctx , extack );
522
+ if (err )
523
+ goto err_create_sched_element ;
524
+
525
+ vports_tc_node -> tc = tc ;
526
+
527
+ return 0 ;
528
+
529
+ err_create_sched_element :
530
+ __esw_qos_free_node (vports_tc_node );
531
+ return err ;
532
+ }
533
+
534
+ static void
535
+ esw_qos_tc_arbiter_get_bw_shares (struct mlx5_esw_sched_node * tc_arbiter_node , u32 * tc_bw )
536
+ {
537
+ struct mlx5_esw_sched_node * vports_tc_node ;
538
+
539
+ list_for_each_entry (vports_tc_node , & tc_arbiter_node -> children , entry )
540
+ tc_bw [vports_tc_node -> tc ] = vports_tc_node -> bw_share ;
541
+ }
542
+
543
+ static void esw_qos_set_tc_arbiter_bw_shares (struct mlx5_esw_sched_node * tc_arbiter_node ,
544
+ u32 * tc_bw , struct netlink_ext_ack * extack )
545
+ {
546
+ struct mlx5_esw_sched_node * vports_tc_node ;
547
+
548
+ list_for_each_entry (vports_tc_node , & tc_arbiter_node -> children , entry ) {
549
+ u32 bw_share ;
550
+ u8 tc ;
551
+
552
+ tc = vports_tc_node -> tc ;
553
+ bw_share = tc_bw [tc ] ?: MLX5_MIN_BW_SHARE ;
554
+ esw_qos_sched_elem_config (vports_tc_node , 0 , bw_share , extack );
555
+ }
556
+ }
557
+
558
+ static void esw_qos_destroy_vports_tc_nodes (struct mlx5_esw_sched_node * tc_arbiter_node ,
559
+ struct netlink_ext_ack * extack )
560
+ {
561
+ struct mlx5_esw_sched_node * vports_tc_node , * tmp ;
562
+
563
+ list_for_each_entry_safe (vports_tc_node , tmp , & tc_arbiter_node -> children , entry )
564
+ esw_qos_destroy_node (vports_tc_node , extack );
565
+ }
566
+
567
+ static int esw_qos_create_vports_tc_nodes (struct mlx5_esw_sched_node * tc_arbiter_node ,
568
+ struct netlink_ext_ack * extack )
569
+ {
570
+ struct mlx5_eswitch * esw = tc_arbiter_node -> esw ;
571
+ int err , i , num_tcs = esw_qos_num_tcs (esw -> dev );
572
+
573
+ for (i = 0 ; i < num_tcs ; i ++ ) {
574
+ err = esw_qos_create_vports_tc_node (tc_arbiter_node , i , extack );
575
+ if (err )
576
+ goto err_tc_node_create ;
577
+ }
578
+
579
+ return 0 ;
580
+
581
+ err_tc_node_create :
582
+ esw_qos_destroy_vports_tc_nodes (tc_arbiter_node , NULL );
583
+ return err ;
584
+ }
585
+
586
+ static int esw_qos_create_tc_arbiter_sched_elem (struct mlx5_esw_sched_node * tc_arbiter_node ,
587
+ struct netlink_ext_ack * extack )
588
+ {
589
+ u32 tsar_ctx [MLX5_ST_SZ_DW (scheduling_context )] = {};
590
+ u32 tsar_parent_ix ;
591
+ void * attr ;
592
+
593
+ if (!mlx5_qos_tsar_type_supported (tc_arbiter_node -> esw -> dev ,
594
+ TSAR_ELEMENT_TSAR_TYPE_TC_ARB ,
595
+ SCHEDULING_HIERARCHY_E_SWITCH )) {
596
+ NL_SET_ERR_MSG_MOD (extack ,
597
+ "E-Switch TC Arbiter scheduling element is not supported" );
598
+ return - EOPNOTSUPP ;
599
+ }
600
+
601
+ attr = MLX5_ADDR_OF (scheduling_context , tsar_ctx , element_attributes );
602
+ MLX5_SET (tsar_element , attr , tsar_type , TSAR_ELEMENT_TSAR_TYPE_TC_ARB );
603
+ tsar_parent_ix = tc_arbiter_node -> parent ? tc_arbiter_node -> parent -> ix :
604
+ tc_arbiter_node -> esw -> qos .root_tsar_ix ;
605
+ MLX5_SET (scheduling_context , tsar_ctx , parent_element_id , tsar_parent_ix );
606
+ MLX5_SET (scheduling_context , tsar_ctx , element_type , SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR );
607
+ MLX5_SET (scheduling_context , tsar_ctx , max_average_bw , tc_arbiter_node -> max_rate );
608
+ MLX5_SET (scheduling_context , tsar_ctx , bw_share , tc_arbiter_node -> bw_share );
609
+
610
+ return esw_qos_node_create_sched_element (tc_arbiter_node , tsar_ctx , extack );
611
+ }
612
+
479
613
static struct mlx5_esw_sched_node *
480
614
__esw_qos_create_vports_sched_node (struct mlx5_eswitch * esw , struct mlx5_esw_sched_node * parent ,
481
615
struct netlink_ext_ack * extack )
@@ -539,6 +673,9 @@ static void __esw_qos_destroy_node(struct mlx5_esw_sched_node *node, struct netl
539
673
{
540
674
struct mlx5_eswitch * esw = node -> esw ;
541
675
676
+ if (node -> type == SCHED_NODE_TYPE_TC_ARBITER_TSAR )
677
+ esw_qos_destroy_vports_tc_nodes (node , extack );
678
+
542
679
trace_mlx5_esw_node_qos_destroy (esw -> dev , node , node -> ix );
543
680
esw_qos_destroy_node (node , extack );
544
681
esw_qos_normalize_min_rate (esw , NULL , extack );
@@ -628,13 +765,38 @@ static void esw_qos_put(struct mlx5_eswitch *esw)
628
765
629
766
static void esw_qos_tc_arbiter_scheduling_teardown (struct mlx5_esw_sched_node * node ,
630
767
struct netlink_ext_ack * extack )
631
- {}
768
+ {
769
+ /* Clean up all Vports TC nodes within the TC arbiter node. */
770
+ esw_qos_destroy_vports_tc_nodes (node , extack );
771
+ /* Destroy the scheduling element for the TC arbiter node itself. */
772
+ esw_qos_node_destroy_sched_element (node , extack );
773
+ }
632
774
633
775
static int esw_qos_tc_arbiter_scheduling_setup (struct mlx5_esw_sched_node * node ,
634
776
struct netlink_ext_ack * extack )
635
777
{
636
- NL_SET_ERR_MSG_MOD (extack , "TC arbiter elements are not supported." );
637
- return - EOPNOTSUPP ;
778
+ u32 curr_ix = node -> ix ;
779
+ int err ;
780
+
781
+ err = esw_qos_create_tc_arbiter_sched_elem (node , extack );
782
+ if (err )
783
+ return err ;
784
+ /* Initialize the vports TC nodes within created TC arbiter TSAR. */
785
+ err = esw_qos_create_vports_tc_nodes (node , extack );
786
+ if (err )
787
+ goto err_vports_tc_nodes ;
788
+
789
+ node -> type = SCHED_NODE_TYPE_TC_ARBITER_TSAR ;
790
+
791
+ return 0 ;
792
+
793
+ err_vports_tc_nodes :
794
+ /* If initialization fails, clean up the scheduling element
795
+ * for the TC arbiter node.
796
+ */
797
+ esw_qos_node_destroy_sched_element (node , NULL );
798
+ node -> ix = curr_ix ;
799
+ return err ;
638
800
}
639
801
640
802
static int esw_qos_create_vport_tc_sched_node (struct mlx5_vport * vport ,
@@ -965,6 +1127,7 @@ static int esw_qos_vport_update(struct mlx5_vport *vport, enum sched_node_type t
965
1127
{
966
1128
struct mlx5_esw_sched_node * curr_parent = vport -> qos .sched_node -> parent ;
967
1129
enum sched_node_type curr_type = vport -> qos .sched_node -> type ;
1130
+ u32 curr_tc_bw [IEEE_8021QAZ_MAX_TCS ] = {0 };
968
1131
int err ;
969
1132
970
1133
esw_assert_qos_lock_held (vport -> dev -> priv .eswitch );
@@ -976,11 +1139,19 @@ static int esw_qos_vport_update(struct mlx5_vport *vport, enum sched_node_type t
976
1139
if (err )
977
1140
return err ;
978
1141
1142
+ if (curr_type == SCHED_NODE_TYPE_TC_ARBITER_TSAR && curr_type == type )
1143
+ esw_qos_tc_arbiter_get_bw_shares (vport -> qos .sched_node , curr_tc_bw );
1144
+
979
1145
esw_qos_vport_disable (vport , extack );
980
1146
981
1147
err = esw_qos_vport_enable (vport , type , parent , extack );
982
- if (err )
1148
+ if (err ) {
983
1149
esw_qos_vport_enable (vport , curr_type , curr_parent , NULL );
1150
+ extack = NULL ;
1151
+ }
1152
+
1153
+ if (curr_type == SCHED_NODE_TYPE_TC_ARBITER_TSAR && curr_type == type )
1154
+ esw_qos_set_tc_arbiter_bw_shares (vport -> qos .sched_node , curr_tc_bw , extack );
984
1155
985
1156
return err ;
986
1157
}
@@ -1415,6 +1586,8 @@ int mlx5_esw_devlink_rate_leaf_tc_bw_set(struct devlink_rate *rate_leaf, void *p
1415
1586
} else {
1416
1587
err = esw_qos_vport_update (vport , SCHED_NODE_TYPE_TC_ARBITER_TSAR , NULL , extack );
1417
1588
}
1589
+ if (!err )
1590
+ esw_qos_set_tc_arbiter_bw_shares (vport_node , tc_bw , extack );
1418
1591
unlock :
1419
1592
esw_qos_unlock (esw );
1420
1593
return err ;
@@ -1441,6 +1614,8 @@ int mlx5_esw_devlink_rate_node_tc_bw_set(struct devlink_rate *rate_node, void *p
1441
1614
}
1442
1615
1443
1616
err = esw_qos_node_enable_tc_arbitration (node , extack );
1617
+ if (!err )
1618
+ esw_qos_set_tc_arbiter_bw_shares (node , tc_bw , extack );
1444
1619
unlock :
1445
1620
esw_qos_unlock (esw );
1446
1621
return err ;
0 commit comments