|
3 | 3 | /*
|
4 | 4 | * AF_XDP user-space access library.
|
5 | 5 | *
|
6 |
| - * Copyright(c) 2018 - 2019 Intel Corporation. |
| 6 | + * Copyright (c) 2018 - 2019 Intel Corporation. |
| 7 | + * Copyright (c) 2019 Facebook |
7 | 8 | *
|
8 | 9 | * Author(s): Magnus Karlsson <[email protected]>
|
9 | 10 | */
|
|
13 | 14 |
|
14 | 15 | #include <stdio.h>
|
15 | 16 | #include <stdint.h>
|
| 17 | +#include <stdbool.h> |
16 | 18 | #include <linux/if_xdp.h>
|
17 | 19 |
|
18 | 20 | #include "libbpf.h"
|
19 |
| -#include "libbpf_util.h" |
20 | 21 |
|
21 | 22 | #ifdef __cplusplus
|
22 | 23 | extern "C" {
|
23 | 24 | #endif
|
24 | 25 |
|
| 26 | +/* Load-Acquire Store-Release barriers used by the XDP socket |
| 27 | + * library. The following macros should *NOT* be considered part of |
| 28 | + * the xsk.h API, and is subject to change anytime. |
| 29 | + * |
| 30 | + * LIBRARY INTERNAL |
| 31 | + */ |
| 32 | + |
| 33 | +#define __XSK_READ_ONCE(x) (*(volatile typeof(x) *)&x) |
| 34 | +#define __XSK_WRITE_ONCE(x, v) (*(volatile typeof(x) *)&x) = (v) |
| 35 | + |
| 36 | +#if defined(__i386__) || defined(__x86_64__) |
| 37 | +# define libbpf_smp_store_release(p, v) \ |
| 38 | + do { \ |
| 39 | + asm volatile("" : : : "memory"); \ |
| 40 | + __XSK_WRITE_ONCE(*p, v); \ |
| 41 | + } while (0) |
| 42 | +# define libbpf_smp_load_acquire(p) \ |
| 43 | + ({ \ |
| 44 | + typeof(*p) ___p1 = __XSK_READ_ONCE(*p); \ |
| 45 | + asm volatile("" : : : "memory"); \ |
| 46 | + ___p1; \ |
| 47 | + }) |
| 48 | +#elif defined(__aarch64__) |
| 49 | +# define libbpf_smp_store_release(p, v) \ |
| 50 | + asm volatile ("stlr %w1, %0" : "=Q" (*p) : "r" (v) : "memory") |
| 51 | +# define libbpf_smp_load_acquire(p) \ |
| 52 | + ({ \ |
| 53 | + typeof(*p) ___p1; \ |
| 54 | + asm volatile ("ldar %w0, %1" \ |
| 55 | + : "=r" (___p1) : "Q" (*p) : "memory"); \ |
| 56 | + ___p1; \ |
| 57 | + }) |
| 58 | +#elif defined(__riscv) |
| 59 | +# define libbpf_smp_store_release(p, v) \ |
| 60 | + do { \ |
| 61 | + asm volatile ("fence rw,w" : : : "memory"); \ |
| 62 | + __XSK_WRITE_ONCE(*p, v); \ |
| 63 | + } while (0) |
| 64 | +# define libbpf_smp_load_acquire(p) \ |
| 65 | + ({ \ |
| 66 | + typeof(*p) ___p1 = __XSK_READ_ONCE(*p); \ |
| 67 | + asm volatile ("fence r,rw" : : : "memory"); \ |
| 68 | + ___p1; \ |
| 69 | + }) |
| 70 | +#endif |
| 71 | + |
| 72 | +#ifndef libbpf_smp_store_release |
| 73 | +#define libbpf_smp_store_release(p, v) \ |
| 74 | + do { \ |
| 75 | + __sync_synchronize(); \ |
| 76 | + __XSK_WRITE_ONCE(*p, v); \ |
| 77 | + } while (0) |
| 78 | +#endif |
| 79 | + |
| 80 | +#ifndef libbpf_smp_load_acquire |
| 81 | +#define libbpf_smp_load_acquire(p) \ |
| 82 | + ({ \ |
| 83 | + typeof(*p) ___p1 = __XSK_READ_ONCE(*p); \ |
| 84 | + __sync_synchronize(); \ |
| 85 | + ___p1; \ |
| 86 | + }) |
| 87 | +#endif |
| 88 | + |
| 89 | +/* LIBRARY INTERNAL -- END */ |
| 90 | + |
25 | 91 | /* Do not access these members directly. Use the functions below. */
|
26 | 92 | #define DEFINE_XSK_RING(name) \
|
27 | 93 | struct name { \
|
|
0 commit comments