Skip to content

Commit dedd53c

Browse files
Paolo Abenidavem330
authored andcommitted
veth: factor out initialization helper
Extract in simpler helpers the code to enable and disable a range of xdp/napi instance, with the common property that "disable" helpers can't fail. Will be used by the next patch. No functional change intended. Signed-off-by: Paolo Abeni <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent f7918b7 commit dedd53c

File tree

1 file changed

+92
-49
lines changed

1 file changed

+92
-49
lines changed

drivers/net/veth.c

Lines changed: 92 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -926,40 +926,46 @@ static int veth_poll(struct napi_struct *napi, int budget)
926926
return done;
927927
}
928928

929-
static int __veth_napi_enable(struct net_device *dev)
929+
static int __veth_napi_enable_range(struct net_device *dev, int start, int end)
930930
{
931931
struct veth_priv *priv = netdev_priv(dev);
932932
int err, i;
933933

934-
for (i = 0; i < dev->real_num_rx_queues; i++) {
934+
for (i = start; i < end; i++) {
935935
struct veth_rq *rq = &priv->rq[i];
936936

937937
err = ptr_ring_init(&rq->xdp_ring, VETH_RING_SIZE, GFP_KERNEL);
938938
if (err)
939939
goto err_xdp_ring;
940940
}
941941

942-
for (i = 0; i < dev->real_num_rx_queues; i++) {
942+
for (i = start; i < end; i++) {
943943
struct veth_rq *rq = &priv->rq[i];
944944

945945
napi_enable(&rq->xdp_napi);
946946
rcu_assign_pointer(priv->rq[i].napi, &priv->rq[i].xdp_napi);
947947
}
948948

949949
return 0;
950+
950951
err_xdp_ring:
951-
for (i--; i >= 0; i--)
952+
for (i--; i >= start; i--)
952953
ptr_ring_cleanup(&priv->rq[i].xdp_ring, veth_ptr_free);
953954

954955
return err;
955956
}
956957

957-
static void veth_napi_del(struct net_device *dev)
958+
static int __veth_napi_enable(struct net_device *dev)
959+
{
960+
return __veth_napi_enable_range(dev, 0, dev->real_num_rx_queues);
961+
}
962+
963+
static void veth_napi_del_range(struct net_device *dev, int start, int end)
958964
{
959965
struct veth_priv *priv = netdev_priv(dev);
960966
int i;
961967

962-
for (i = 0; i < dev->real_num_rx_queues; i++) {
968+
for (i = start; i < end; i++) {
963969
struct veth_rq *rq = &priv->rq[i];
964970

965971
rcu_assign_pointer(priv->rq[i].napi, NULL);
@@ -968,49 +974,98 @@ static void veth_napi_del(struct net_device *dev)
968974
}
969975
synchronize_net();
970976

971-
for (i = 0; i < dev->real_num_rx_queues; i++) {
977+
for (i = start; i < end; i++) {
972978
struct veth_rq *rq = &priv->rq[i];
973979

974980
rq->rx_notify_masked = false;
975981
ptr_ring_cleanup(&rq->xdp_ring, veth_ptr_free);
976982
}
977983
}
978984

985+
static void veth_napi_del(struct net_device *dev)
986+
{
987+
veth_napi_del_range(dev, 0, dev->real_num_rx_queues);
988+
}
989+
979990
static bool veth_gro_requested(const struct net_device *dev)
980991
{
981992
return !!(dev->wanted_features & NETIF_F_GRO);
982993
}
983994

984-
static int veth_enable_xdp(struct net_device *dev)
995+
static int veth_enable_xdp_range(struct net_device *dev, int start, int end,
996+
bool napi_already_on)
985997
{
986-
bool napi_already_on = veth_gro_requested(dev) && (dev->flags & IFF_UP);
987998
struct veth_priv *priv = netdev_priv(dev);
988999
int err, i;
9891000

990-
if (!xdp_rxq_info_is_reg(&priv->rq[0].xdp_rxq)) {
991-
for (i = 0; i < dev->real_num_rx_queues; i++) {
992-
struct veth_rq *rq = &priv->rq[i];
1001+
for (i = start; i < end; i++) {
1002+
struct veth_rq *rq = &priv->rq[i];
9931003

994-
if (!napi_already_on)
995-
netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT);
996-
err = xdp_rxq_info_reg(&rq->xdp_rxq, dev, i, rq->xdp_napi.napi_id);
997-
if (err < 0)
998-
goto err_rxq_reg;
1004+
if (!napi_already_on)
1005+
netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT);
1006+
err = xdp_rxq_info_reg(&rq->xdp_rxq, dev, i, rq->xdp_napi.napi_id);
1007+
if (err < 0)
1008+
goto err_rxq_reg;
9991009

1000-
err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
1001-
MEM_TYPE_PAGE_SHARED,
1002-
NULL);
1003-
if (err < 0)
1004-
goto err_reg_mem;
1010+
err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
1011+
MEM_TYPE_PAGE_SHARED,
1012+
NULL);
1013+
if (err < 0)
1014+
goto err_reg_mem;
10051015

1006-
/* Save original mem info as it can be overwritten */
1007-
rq->xdp_mem = rq->xdp_rxq.mem;
1008-
}
1016+
/* Save original mem info as it can be overwritten */
1017+
rq->xdp_mem = rq->xdp_rxq.mem;
1018+
}
1019+
return 0;
1020+
1021+
err_reg_mem:
1022+
xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq);
1023+
err_rxq_reg:
1024+
for (i--; i >= start; i--) {
1025+
struct veth_rq *rq = &priv->rq[i];
1026+
1027+
xdp_rxq_info_unreg(&rq->xdp_rxq);
1028+
if (!napi_already_on)
1029+
netif_napi_del(&rq->xdp_napi);
1030+
}
1031+
1032+
return err;
1033+
}
1034+
1035+
static void veth_disable_xdp_range(struct net_device *dev, int start, int end,
1036+
bool delete_napi)
1037+
{
1038+
struct veth_priv *priv = netdev_priv(dev);
1039+
int i;
1040+
1041+
for (i = start; i < end; i++) {
1042+
struct veth_rq *rq = &priv->rq[i];
1043+
1044+
rq->xdp_rxq.mem = rq->xdp_mem;
1045+
xdp_rxq_info_unreg(&rq->xdp_rxq);
1046+
1047+
if (delete_napi)
1048+
netif_napi_del(&rq->xdp_napi);
1049+
}
1050+
}
1051+
1052+
static int veth_enable_xdp(struct net_device *dev)
1053+
{
1054+
bool napi_already_on = veth_gro_requested(dev) && (dev->flags & IFF_UP);
1055+
struct veth_priv *priv = netdev_priv(dev);
1056+
int err, i;
1057+
1058+
if (!xdp_rxq_info_is_reg(&priv->rq[0].xdp_rxq)) {
1059+
err = veth_enable_xdp_range(dev, 0, dev->real_num_rx_queues, napi_already_on);
1060+
if (err)
1061+
return err;
10091062

10101063
if (!napi_already_on) {
10111064
err = __veth_napi_enable(dev);
1012-
if (err)
1013-
goto err_rxq_reg;
1065+
if (err) {
1066+
veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, true);
1067+
return err;
1068+
}
10141069

10151070
if (!veth_gro_requested(dev)) {
10161071
/* user-space did not require GRO, but adding XDP
@@ -1028,18 +1083,6 @@ static int veth_enable_xdp(struct net_device *dev)
10281083
}
10291084

10301085
return 0;
1031-
err_reg_mem:
1032-
xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq);
1033-
err_rxq_reg:
1034-
for (i--; i >= 0; i--) {
1035-
struct veth_rq *rq = &priv->rq[i];
1036-
1037-
xdp_rxq_info_unreg(&rq->xdp_rxq);
1038-
if (!napi_already_on)
1039-
netif_napi_del(&rq->xdp_napi);
1040-
}
1041-
1042-
return err;
10431086
}
10441087

10451088
static void veth_disable_xdp(struct net_device *dev)
@@ -1062,28 +1105,23 @@ static void veth_disable_xdp(struct net_device *dev)
10621105
}
10631106
}
10641107

1065-
for (i = 0; i < dev->real_num_rx_queues; i++) {
1066-
struct veth_rq *rq = &priv->rq[i];
1067-
1068-
rq->xdp_rxq.mem = rq->xdp_mem;
1069-
xdp_rxq_info_unreg(&rq->xdp_rxq);
1070-
}
1108+
veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, false);
10711109
}
10721110

1073-
static int veth_napi_enable(struct net_device *dev)
1111+
static int veth_napi_enable_range(struct net_device *dev, int start, int end)
10741112
{
10751113
struct veth_priv *priv = netdev_priv(dev);
10761114
int err, i;
10771115

1078-
for (i = 0; i < dev->real_num_rx_queues; i++) {
1116+
for (i = start; i < end; i++) {
10791117
struct veth_rq *rq = &priv->rq[i];
10801118

10811119
netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT);
10821120
}
10831121

1084-
err = __veth_napi_enable(dev);
1122+
err = __veth_napi_enable_range(dev, start, end);
10851123
if (err) {
1086-
for (i = 0; i < dev->real_num_rx_queues; i++) {
1124+
for (i = start; i < end; i++) {
10871125
struct veth_rq *rq = &priv->rq[i];
10881126

10891127
netif_napi_del(&rq->xdp_napi);
@@ -1093,6 +1131,11 @@ static int veth_napi_enable(struct net_device *dev)
10931131
return err;
10941132
}
10951133

1134+
static int veth_napi_enable(struct net_device *dev)
1135+
{
1136+
return veth_napi_enable_range(dev, 0, dev->real_num_rx_queues);
1137+
}
1138+
10961139
static int veth_open(struct net_device *dev)
10971140
{
10981141
struct veth_priv *priv = netdev_priv(dev);

0 commit comments

Comments
 (0)