Skip to content

Commit b09522b

Browse files
bjototsipa
authored andcommitted
From: Björn Töpel <[email protected]>
Make the AF_XDP zero-copy path aware that the reason for redirect failure was due to full Rx queue. If so, exit the napi loop as soon as possible (exit the softirq processing), so that the userspace AF_XDP process can hopefully empty the Rx queue. This mainly helps the "one core scenario", where the userland process and Rx softirq processing is on the same core. Note that the early exit can only be performed if the "need wakeup" feature is enabled, because otherwise there is no notification mechanism available from the kernel side. This requires that the driver starts using the newly introduced xdp_do_redirect_ext() and xsk_do_redirect_rx_full() functions. Signed-off-by: Björn Töpel <[email protected]> --- drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c | 23 ++++++++++++++------ 1 file changed, 16 insertions(+), 7 deletions(-)
1 parent b5b5e14 commit b09522b

File tree

1 file changed

+16
-7
lines changed

1 file changed

+16
-7
lines changed

drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c

Lines changed: 16 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -93,9 +93,11 @@ int ixgbe_xsk_pool_setup(struct ixgbe_adapter *adapter,
9393

9494
static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
9595
struct ixgbe_ring *rx_ring,
96-
struct xdp_buff *xdp)
96+
struct xdp_buff *xdp,
97+
bool *early_exit)
9798
{
9899
int err, result = IXGBE_XDP_PASS;
100+
enum bpf_map_type map_type;
99101
struct bpf_prog *xdp_prog;
100102
struct xdp_frame *xdpf;
101103
u32 act;
@@ -116,8 +118,13 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
116118
result = ixgbe_xmit_xdp_ring(adapter, xdpf);
117119
break;
118120
case XDP_REDIRECT:
119-
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
120-
result = !err ? IXGBE_XDP_REDIR : IXGBE_XDP_CONSUMED;
121+
err = xdp_do_redirect_ext(rx_ring->netdev, xdp, xdp_prog, &map_type);
122+
if (err) {
123+
*early_exit = xsk_do_redirect_rx_full(err, map_type);
124+
result = IXGBE_XDP_CONSUMED;
125+
} else {
126+
result = IXGBE_XDP_REDIR;
127+
}
121128
break;
122129
default:
123130
bpf_warn_invalid_xdp_action(act);
@@ -235,8 +242,8 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
235242
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
236243
struct ixgbe_adapter *adapter = q_vector->adapter;
237244
u16 cleaned_count = ixgbe_desc_unused(rx_ring);
245+
bool early_exit = false, failure = false;
238246
unsigned int xdp_res, xdp_xmit = 0;
239-
bool failure = false;
240247
struct sk_buff *skb;
241248

242249
while (likely(total_rx_packets < budget)) {
@@ -288,7 +295,7 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
288295

289296
bi->xdp->data_end = bi->xdp->data + size;
290297
xsk_buff_dma_sync_for_cpu(bi->xdp, rx_ring->xsk_pool);
291-
xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, bi->xdp);
298+
xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, bi->xdp, &early_exit);
292299

293300
if (xdp_res) {
294301
if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR))
@@ -302,6 +309,8 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
302309

303310
cleaned_count++;
304311
ixgbe_inc_ntc(rx_ring);
312+
if (early_exit)
313+
break;
305314
continue;
306315
}
307316

@@ -346,12 +355,12 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
346355
q_vector->rx.total_bytes += total_rx_bytes;
347356

348357
if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
349-
if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
358+
if (early_exit || failure || rx_ring->next_to_clean == rx_ring->next_to_use)
350359
xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
351360
else
352361
xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);
353362

354-
return (int)total_rx_packets;
363+
return early_exit ? 0 : (int)total_rx_packets;
355364
}
356365
return failure ? budget : (int)total_rx_packets;
357366
}

0 commit comments

Comments
 (0)