@@ -78,6 +78,8 @@ struct taprio_sched {
78
78
struct sched_gate_list __rcu * admin_sched ;
79
79
struct hrtimer advance_timer ;
80
80
struct list_head taprio_list ;
81
+ u32 max_frm_len [TC_MAX_QUEUE ]; /* for the fast path */
82
+ u32 max_sdu [TC_MAX_QUEUE ]; /* for dump and offloading */
81
83
u32 txtime_delay ;
82
84
};
83
85
@@ -415,6 +417,9 @@ static int taprio_enqueue_one(struct sk_buff *skb, struct Qdisc *sch,
415
417
struct Qdisc * child , struct sk_buff * * to_free )
416
418
{
417
419
struct taprio_sched * q = qdisc_priv (sch );
420
+ struct net_device * dev = qdisc_dev (sch );
421
+ int prio = skb -> priority ;
422
+ u8 tc ;
418
423
419
424
/* sk_flags are only safe to use on full sockets. */
420
425
if (skb -> sk && sk_fullsock (skb -> sk ) && sock_flag (skb -> sk , SOCK_TXTIME )) {
@@ -426,6 +431,11 @@ static int taprio_enqueue_one(struct sk_buff *skb, struct Qdisc *sch,
426
431
return qdisc_drop (skb , sch , to_free );
427
432
}
428
433
434
+ /* Devices with full offload are expected to honor this in hardware */
435
+ tc = netdev_get_prio_tc_map (dev , prio );
436
+ if (skb -> len > q -> max_frm_len [tc ])
437
+ return qdisc_drop (skb , sch , to_free );
438
+
429
439
qdisc_qstats_backlog_inc (sch , skb );
430
440
sch -> q .qlen ++ ;
431
441
@@ -754,6 +764,11 @@ static const struct nla_policy entry_policy[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = {
754
764
[TCA_TAPRIO_SCHED_ENTRY_INTERVAL ] = { .type = NLA_U32 },
755
765
};
756
766
767
+ static const struct nla_policy taprio_tc_policy [TCA_TAPRIO_TC_ENTRY_MAX + 1 ] = {
768
+ [TCA_TAPRIO_TC_ENTRY_INDEX ] = { .type = NLA_U32 },
769
+ [TCA_TAPRIO_TC_ENTRY_MAX_SDU ] = { .type = NLA_U32 },
770
+ };
771
+
757
772
static const struct nla_policy taprio_policy [TCA_TAPRIO_ATTR_MAX + 1 ] = {
758
773
[TCA_TAPRIO_ATTR_PRIOMAP ] = {
759
774
.len = sizeof (struct tc_mqprio_qopt )
@@ -766,6 +781,7 @@ static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = {
766
781
[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION ] = { .type = NLA_S64 },
767
782
[TCA_TAPRIO_ATTR_FLAGS ] = { .type = NLA_U32 },
768
783
[TCA_TAPRIO_ATTR_TXTIME_DELAY ] = { .type = NLA_U32 },
784
+ [TCA_TAPRIO_ATTR_TC_ENTRY ] = { .type = NLA_NESTED },
769
785
};
770
786
771
787
static int fill_sched_entry (struct taprio_sched * q , struct nlattr * * tb ,
@@ -1216,14 +1232,28 @@ static int taprio_enable_offload(struct net_device *dev,
1216
1232
{
1217
1233
const struct net_device_ops * ops = dev -> netdev_ops ;
1218
1234
struct tc_taprio_qopt_offload * offload ;
1219
- int err = 0 ;
1235
+ struct tc_taprio_caps caps ;
1236
+ int tc , err = 0 ;
1220
1237
1221
1238
if (!ops -> ndo_setup_tc ) {
1222
1239
NL_SET_ERR_MSG (extack ,
1223
1240
"Device does not support taprio offload" );
1224
1241
return - EOPNOTSUPP ;
1225
1242
}
1226
1243
1244
+ qdisc_offload_query_caps (dev , TC_SETUP_QDISC_TAPRIO ,
1245
+ & caps , sizeof (caps ));
1246
+
1247
+ if (!caps .supports_queue_max_sdu ) {
1248
+ for (tc = 0 ; tc < TC_MAX_QUEUE ; tc ++ ) {
1249
+ if (q -> max_sdu [tc ]) {
1250
+ NL_SET_ERR_MSG_MOD (extack ,
1251
+ "Device does not handle queueMaxSDU" );
1252
+ return - EOPNOTSUPP ;
1253
+ }
1254
+ }
1255
+ }
1256
+
1227
1257
offload = taprio_offload_alloc (sched -> num_entries );
1228
1258
if (!offload ) {
1229
1259
NL_SET_ERR_MSG (extack ,
@@ -1233,6 +1263,9 @@ static int taprio_enable_offload(struct net_device *dev,
1233
1263
offload -> enable = 1 ;
1234
1264
taprio_sched_to_offload (dev , sched , offload );
1235
1265
1266
+ for (tc = 0 ; tc < TC_MAX_QUEUE ; tc ++ )
1267
+ offload -> max_sdu [tc ] = q -> max_sdu [tc ];
1268
+
1236
1269
err = ops -> ndo_setup_tc (dev , TC_SETUP_QDISC_TAPRIO , offload );
1237
1270
if (err < 0 ) {
1238
1271
NL_SET_ERR_MSG (extack ,
@@ -1367,6 +1400,89 @@ static int taprio_parse_clockid(struct Qdisc *sch, struct nlattr **tb,
1367
1400
return err ;
1368
1401
}
1369
1402
1403
+ static int taprio_parse_tc_entry (struct Qdisc * sch ,
1404
+ struct nlattr * opt ,
1405
+ u32 max_sdu [TC_QOPT_MAX_QUEUE ],
1406
+ unsigned long * seen_tcs ,
1407
+ struct netlink_ext_ack * extack )
1408
+ {
1409
+ struct nlattr * tb [TCA_TAPRIO_TC_ENTRY_MAX + 1 ] = { };
1410
+ struct net_device * dev = qdisc_dev (sch );
1411
+ u32 val = 0 ;
1412
+ int err , tc ;
1413
+
1414
+ err = nla_parse_nested (tb , TCA_TAPRIO_TC_ENTRY_MAX , opt ,
1415
+ taprio_tc_policy , extack );
1416
+ if (err < 0 )
1417
+ return err ;
1418
+
1419
+ if (!tb [TCA_TAPRIO_TC_ENTRY_INDEX ]) {
1420
+ NL_SET_ERR_MSG_MOD (extack , "TC entry index missing" );
1421
+ return - EINVAL ;
1422
+ }
1423
+
1424
+ tc = nla_get_u32 (tb [TCA_TAPRIO_TC_ENTRY_INDEX ]);
1425
+ if (tc >= TC_QOPT_MAX_QUEUE ) {
1426
+ NL_SET_ERR_MSG_MOD (extack , "TC entry index out of range" );
1427
+ return - ERANGE ;
1428
+ }
1429
+
1430
+ if (* seen_tcs & BIT (tc )) {
1431
+ NL_SET_ERR_MSG_MOD (extack , "Duplicate TC entry" );
1432
+ return - EINVAL ;
1433
+ }
1434
+
1435
+ * seen_tcs |= BIT (tc );
1436
+
1437
+ if (tb [TCA_TAPRIO_TC_ENTRY_MAX_SDU ])
1438
+ val = nla_get_u32 (tb [TCA_TAPRIO_TC_ENTRY_MAX_SDU ]);
1439
+
1440
+ if (val > dev -> max_mtu ) {
1441
+ NL_SET_ERR_MSG_MOD (extack , "TC max SDU exceeds device max MTU" );
1442
+ return - ERANGE ;
1443
+ }
1444
+
1445
+ max_sdu [tc ] = val ;
1446
+
1447
+ return 0 ;
1448
+ }
1449
+
1450
+ static int taprio_parse_tc_entries (struct Qdisc * sch ,
1451
+ struct nlattr * opt ,
1452
+ struct netlink_ext_ack * extack )
1453
+ {
1454
+ struct taprio_sched * q = qdisc_priv (sch );
1455
+ struct net_device * dev = qdisc_dev (sch );
1456
+ u32 max_sdu [TC_QOPT_MAX_QUEUE ];
1457
+ unsigned long seen_tcs = 0 ;
1458
+ struct nlattr * n ;
1459
+ int tc , rem ;
1460
+ int err = 0 ;
1461
+
1462
+ for (tc = 0 ; tc < TC_QOPT_MAX_QUEUE ; tc ++ )
1463
+ max_sdu [tc ] = q -> max_sdu [tc ];
1464
+
1465
+ nla_for_each_nested (n , opt , rem ) {
1466
+ if (nla_type (n ) != TCA_TAPRIO_ATTR_TC_ENTRY )
1467
+ continue ;
1468
+
1469
+ err = taprio_parse_tc_entry (sch , n , max_sdu , & seen_tcs , extack );
1470
+ if (err )
1471
+ goto out ;
1472
+ }
1473
+
1474
+ for (tc = 0 ; tc < TC_QOPT_MAX_QUEUE ; tc ++ ) {
1475
+ q -> max_sdu [tc ] = max_sdu [tc ];
1476
+ if (max_sdu [tc ])
1477
+ q -> max_frm_len [tc ] = max_sdu [tc ] + dev -> hard_header_len ;
1478
+ else
1479
+ q -> max_frm_len [tc ] = U32_MAX ; /* never oversized */
1480
+ }
1481
+
1482
+ out :
1483
+ return err ;
1484
+ }
1485
+
1370
1486
static int taprio_mqprio_cmp (const struct net_device * dev ,
1371
1487
const struct tc_mqprio_qopt * mqprio )
1372
1488
{
@@ -1445,6 +1561,10 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
1445
1561
if (err < 0 )
1446
1562
return err ;
1447
1563
1564
+ err = taprio_parse_tc_entries (sch , opt , extack );
1565
+ if (err )
1566
+ return err ;
1567
+
1448
1568
new_admin = kzalloc (sizeof (* new_admin ), GFP_KERNEL );
1449
1569
if (!new_admin ) {
1450
1570
NL_SET_ERR_MSG (extack , "Not enough memory for a new schedule" );
@@ -1825,6 +1945,33 @@ static int dump_schedule(struct sk_buff *msg,
1825
1945
return -1 ;
1826
1946
}
1827
1947
1948
+ static int taprio_dump_tc_entries (struct taprio_sched * q , struct sk_buff * skb )
1949
+ {
1950
+ struct nlattr * n ;
1951
+ int tc ;
1952
+
1953
+ for (tc = 0 ; tc < TC_MAX_QUEUE ; tc ++ ) {
1954
+ n = nla_nest_start (skb , TCA_TAPRIO_ATTR_TC_ENTRY );
1955
+ if (!n )
1956
+ return - EMSGSIZE ;
1957
+
1958
+ if (nla_put_u32 (skb , TCA_TAPRIO_TC_ENTRY_INDEX , tc ))
1959
+ goto nla_put_failure ;
1960
+
1961
+ if (nla_put_u32 (skb , TCA_TAPRIO_TC_ENTRY_MAX_SDU ,
1962
+ q -> max_sdu [tc ]))
1963
+ goto nla_put_failure ;
1964
+
1965
+ nla_nest_end (skb , n );
1966
+ }
1967
+
1968
+ return 0 ;
1969
+
1970
+ nla_put_failure :
1971
+ nla_nest_cancel (skb , n );
1972
+ return - EMSGSIZE ;
1973
+ }
1974
+
1828
1975
static int taprio_dump (struct Qdisc * sch , struct sk_buff * skb )
1829
1976
{
1830
1977
struct taprio_sched * q = qdisc_priv (sch );
@@ -1863,6 +2010,9 @@ static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb)
1863
2010
nla_put_u32 (skb , TCA_TAPRIO_ATTR_TXTIME_DELAY , q -> txtime_delay ))
1864
2011
goto options_error ;
1865
2012
2013
+ if (taprio_dump_tc_entries (q , skb ))
2014
+ goto options_error ;
2015
+
1866
2016
if (oper && dump_schedule (skb , oper ))
1867
2017
goto options_error ;
1868
2018
0 commit comments