Skip to content

Commit c2cf73e

Browse files
peilin-yeKernel Patches Daemon
authored and
Kernel Patches Daemon
committed
selftests/bpf: Add selftests for load-acquire and store-release instructions
Add the following ./test_progs tests: * atomics/load_acquire * atomics/store_release * arena_atomics/load_acquire * arena_atomics/store_release They depend on the pre-defined __BPF_FEATURE_LOAD_ACQ_STORE_REL feature macro, which implies -mcpu>=v4. $ ALLOWLIST=atomics/load_acquire,atomics/store_release, $ ALLOWLIST+=arena_atomics/load_acquire,arena_atomics/store_release $ ./test_progs-cpuv4 -a $ALLOWLIST #3/9 arena_atomics/load_acquire:OK #3/10 arena_atomics/store_release:OK ... #10/8 atomics/load_acquire:OK #10/9 atomics/store_release:OK $ ./test_progs -v -a $ALLOWLIST test_load_acquire:SKIP:Clang does not support BPF load-acquire or addr_space_cast #3/9 arena_atomics/load_acquire:SKIP test_store_release:SKIP:Clang does not support BPF store-release or addr_space_cast #3/10 arena_atomics/store_release:SKIP ... test_load_acquire:SKIP:Clang does not support BPF load-acquire #10/8 atomics/load_acquire:SKIP test_store_release:SKIP:Clang does not support BPF store-release #10/9 atomics/store_release:SKIP Additionally, add several ./test_verifier tests: #65/u atomic BPF_LOAD_ACQ access through non-pointer OK #65/p atomic BPF_LOAD_ACQ access through non-pointer OK #66/u atomic BPF_STORE_REL access through non-pointer OK #66/p atomic BPF_STORE_REL access through non-pointer OK #67/u BPF_ATOMIC load-acquire, 8-bit OK #67/p BPF_ATOMIC load-acquire, 8-bit OK #68/u BPF_ATOMIC load-acquire, 16-bit OK #68/p BPF_ATOMIC load-acquire, 16-bit OK #69/u BPF_ATOMIC load-acquire, 32-bit OK #69/p BPF_ATOMIC load-acquire, 32-bit OK #70/u BPF_ATOMIC load-acquire, 64-bit OK #70/p BPF_ATOMIC load-acquire, 64-bit OK #71/u Cannot load-acquire from uninitialized src_reg OK #71/p Cannot load-acquire from uninitialized src_reg OK #76/u BPF_ATOMIC store-release, 8-bit OK #76/p BPF_ATOMIC store-release, 8-bit OK #77/u BPF_ATOMIC store-release, 16-bit OK #77/p BPF_ATOMIC store-release, 16-bit OK #78/u BPF_ATOMIC store-release, 32-bit OK #78/p BPF_ATOMIC store-release, 32-bit OK #79/u BPF_ATOMIC store-release, 64-bit OK #79/p BPF_ATOMIC store-release, 64-bit OK #80/u Cannot store-release from uninitialized src_reg OK #80/p Cannot store-release from uninitialized src_reg OK Reviewed-by: Josh Don <[email protected]> Signed-off-by: Peilin Ye <[email protected]>
1 parent 14eb323 commit c2cf73e

File tree

8 files changed

+393
-18
lines changed

8 files changed

+393
-18
lines changed

include/linux/filter.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -364,6 +364,8 @@ static inline bool insn_is_cast_user(const struct bpf_insn *insn)
364364
* BPF_XOR | BPF_FETCH src_reg = atomic_fetch_xor(dst_reg + off16, src_reg);
365365
* BPF_XCHG src_reg = atomic_xchg(dst_reg + off16, src_reg)
366366
* BPF_CMPXCHG r0 = atomic_cmpxchg(dst_reg + off16, r0, src_reg)
367+
* BPF_LOAD_ACQ dst_reg = smp_load_acquire(src_reg + off16)
368+
* BPF_STORE_REL smp_store_release(dst_reg + off16, src_reg)
367369
*/
368370

369371
#define BPF_ATOMIC_OP(SIZE, OP, DST, SRC, OFF) \

tools/testing/selftests/bpf/prog_tests/arena_atomics.c

Lines changed: 60 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -162,6 +162,60 @@ static void test_uaf(struct arena_atomics *skel)
162162
ASSERT_EQ(skel->arena->uaf_recovery_fails, 0, "uaf_recovery_fails");
163163
}
164164

165+
static void test_load_acquire(struct arena_atomics *skel)
166+
{
167+
LIBBPF_OPTS(bpf_test_run_opts, topts);
168+
int err, prog_fd;
169+
170+
if (skel->data->skip_lacq_srel_tests) {
171+
printf("%s:SKIP:Clang does not support BPF load-acquire or addr_space_cast\n",
172+
__func__);
173+
test__skip();
174+
return;
175+
}
176+
177+
/* No need to attach it, just run it directly */
178+
prog_fd = bpf_program__fd(skel->progs.load_acquire);
179+
err = bpf_prog_test_run_opts(prog_fd, &topts);
180+
if (!ASSERT_OK(err, "test_run_opts err"))
181+
return;
182+
if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
183+
return;
184+
185+
ASSERT_EQ(skel->arena->load_acquire8_result, 0x12, "load_acquire8_result");
186+
ASSERT_EQ(skel->arena->load_acquire16_result, 0x1234, "load_acquire16_result");
187+
ASSERT_EQ(skel->arena->load_acquire32_result, 0x12345678, "load_acquire32_result");
188+
ASSERT_EQ(skel->arena->load_acquire64_result, 0x1234567890abcdef,
189+
"load_acquire64_result");
190+
}
191+
192+
static void test_store_release(struct arena_atomics *skel)
193+
{
194+
LIBBPF_OPTS(bpf_test_run_opts, topts);
195+
int err, prog_fd;
196+
197+
if (skel->data->skip_lacq_srel_tests) {
198+
printf("%s:SKIP:Clang does not support BPF store-release or addr_space_cast\n",
199+
__func__);
200+
test__skip();
201+
return;
202+
}
203+
204+
/* No need to attach it, just run it directly */
205+
prog_fd = bpf_program__fd(skel->progs.store_release);
206+
err = bpf_prog_test_run_opts(prog_fd, &topts);
207+
if (!ASSERT_OK(err, "test_run_opts err"))
208+
return;
209+
if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
210+
return;
211+
212+
ASSERT_EQ(skel->arena->store_release8_result, 0x12, "store_release8_result");
213+
ASSERT_EQ(skel->arena->store_release16_result, 0x1234, "store_release16_result");
214+
ASSERT_EQ(skel->arena->store_release32_result, 0x12345678, "store_release32_result");
215+
ASSERT_EQ(skel->arena->store_release64_result, 0x1234567890abcdef,
216+
"store_release64_result");
217+
}
218+
165219
void test_arena_atomics(void)
166220
{
167221
struct arena_atomics *skel;
@@ -171,7 +225,7 @@ void test_arena_atomics(void)
171225
if (!ASSERT_OK_PTR(skel, "arena atomics skeleton open"))
172226
return;
173227

174-
if (skel->data->skip_tests) {
228+
if (skel->data->skip_all_tests) {
175229
printf("%s:SKIP:no ENABLE_ATOMICS_TESTS or no addr_space_cast support in clang",
176230
__func__);
177231
test__skip();
@@ -199,6 +253,11 @@ void test_arena_atomics(void)
199253
if (test__start_subtest("uaf"))
200254
test_uaf(skel);
201255

256+
if (test__start_subtest("load_acquire"))
257+
test_load_acquire(skel);
258+
if (test__start_subtest("store_release"))
259+
test_store_release(skel);
260+
202261
cleanup:
203262
arena_atomics__destroy(skel);
204263
}

tools/testing/selftests/bpf/prog_tests/atomics.c

Lines changed: 56 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -162,6 +162,56 @@ static void test_xchg(struct atomics_lskel *skel)
162162
ASSERT_EQ(skel->bss->xchg32_result, 1, "xchg32_result");
163163
}
164164

165+
static void test_load_acquire(struct atomics_lskel *skel)
166+
{
167+
LIBBPF_OPTS(bpf_test_run_opts, topts);
168+
int err, prog_fd;
169+
170+
if (skel->data->skip_lacq_srel_tests) {
171+
printf("%s:SKIP:Clang does not support BPF load-acquire\n", __func__);
172+
test__skip();
173+
return;
174+
}
175+
176+
/* No need to attach it, just run it directly */
177+
prog_fd = skel->progs.load_acquire.prog_fd;
178+
err = bpf_prog_test_run_opts(prog_fd, &topts);
179+
if (!ASSERT_OK(err, "test_run_opts err"))
180+
return;
181+
if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
182+
return;
183+
184+
ASSERT_EQ(skel->bss->load_acquire8_result, 0x12, "load_acquire8_result");
185+
ASSERT_EQ(skel->bss->load_acquire16_result, 0x1234, "load_acquire16_result");
186+
ASSERT_EQ(skel->bss->load_acquire32_result, 0x12345678, "load_acquire32_result");
187+
ASSERT_EQ(skel->bss->load_acquire64_result, 0x1234567890abcdef, "load_acquire64_result");
188+
}
189+
190+
static void test_store_release(struct atomics_lskel *skel)
191+
{
192+
LIBBPF_OPTS(bpf_test_run_opts, topts);
193+
int err, prog_fd;
194+
195+
if (skel->data->skip_lacq_srel_tests) {
196+
printf("%s:SKIP:Clang does not support BPF store-release\n", __func__);
197+
test__skip();
198+
return;
199+
}
200+
201+
/* No need to attach it, just run it directly */
202+
prog_fd = skel->progs.store_release.prog_fd;
203+
err = bpf_prog_test_run_opts(prog_fd, &topts);
204+
if (!ASSERT_OK(err, "test_run_opts err"))
205+
return;
206+
if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
207+
return;
208+
209+
ASSERT_EQ(skel->bss->store_release8_result, 0x12, "store_release8_result");
210+
ASSERT_EQ(skel->bss->store_release16_result, 0x1234, "store_release16_result");
211+
ASSERT_EQ(skel->bss->store_release32_result, 0x12345678, "store_release32_result");
212+
ASSERT_EQ(skel->bss->store_release64_result, 0x1234567890abcdef, "store_release64_result");
213+
}
214+
165215
void test_atomics(void)
166216
{
167217
struct atomics_lskel *skel;
@@ -170,7 +220,7 @@ void test_atomics(void)
170220
if (!ASSERT_OK_PTR(skel, "atomics skeleton load"))
171221
return;
172222

173-
if (skel->data->skip_tests) {
223+
if (skel->data->skip_all_tests) {
174224
printf("%s:SKIP:no ENABLE_ATOMICS_TESTS (missing Clang BPF atomics support)",
175225
__func__);
176226
test__skip();
@@ -193,6 +243,11 @@ void test_atomics(void)
193243
if (test__start_subtest("xchg"))
194244
test_xchg(skel);
195245

246+
if (test__start_subtest("load_acquire"))
247+
test_load_acquire(skel);
248+
if (test__start_subtest("store_release"))
249+
test_store_release(skel);
250+
196251
cleanup:
197252
atomics_lskel__destroy(skel);
198253
}

tools/testing/selftests/bpf/progs/arena_atomics.c

Lines changed: 60 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,9 +19,15 @@ struct {
1919
} arena SEC(".maps");
2020

2121
#if defined(ENABLE_ATOMICS_TESTS) && defined(__BPF_FEATURE_ADDR_SPACE_CAST)
22-
bool skip_tests __attribute((__section__(".data"))) = false;
22+
bool skip_all_tests __attribute((__section__(".data"))) = false;
2323
#else
24-
bool skip_tests = true;
24+
bool skip_all_tests = true;
25+
#endif
26+
27+
#if defined(__BPF_FEATURE_LOAD_ACQ_STORE_REL) && defined(__BPF_FEATURE_ADDR_SPACE_CAST)
28+
bool skip_lacq_srel_tests __attribute((__section__(".data"))) = false;
29+
#else
30+
bool skip_lacq_srel_tests = true;
2531
#endif
2632

2733
__u32 pid = 0;
@@ -274,4 +280,56 @@ int uaf(const void *ctx)
274280
return 0;
275281
}
276282

283+
__u8 __arena_global load_acquire8_value = 0x12;
284+
__u16 __arena_global load_acquire16_value = 0x1234;
285+
__u32 __arena_global load_acquire32_value = 0x12345678;
286+
__u64 __arena_global load_acquire64_value = 0x1234567890abcdef;
287+
288+
__u8 __arena_global load_acquire8_result = 0;
289+
__u16 __arena_global load_acquire16_result = 0;
290+
__u32 __arena_global load_acquire32_result = 0;
291+
__u64 __arena_global load_acquire64_result = 0;
292+
293+
SEC("raw_tp/sys_enter")
294+
int load_acquire(const void *ctx)
295+
{
296+
if (pid != (bpf_get_current_pid_tgid() >> 32))
297+
return 0;
298+
299+
#ifdef __BPF_FEATURE_LOAD_ACQ_STORE_REL
300+
load_acquire8_result = __atomic_load_n(&load_acquire8_value, __ATOMIC_ACQUIRE);
301+
load_acquire16_result = __atomic_load_n(&load_acquire16_value, __ATOMIC_ACQUIRE);
302+
load_acquire32_result = __atomic_load_n(&load_acquire32_value, __ATOMIC_ACQUIRE);
303+
load_acquire64_result = __atomic_load_n(&load_acquire64_value, __ATOMIC_ACQUIRE);
304+
#endif
305+
306+
return 0;
307+
}
308+
309+
__u8 __arena_global store_release8_result = 0;
310+
__u16 __arena_global store_release16_result = 0;
311+
__u32 __arena_global store_release32_result = 0;
312+
__u64 __arena_global store_release64_result = 0;
313+
314+
SEC("raw_tp/sys_enter")
315+
int store_release(const void *ctx)
316+
{
317+
if (pid != (bpf_get_current_pid_tgid() >> 32))
318+
return 0;
319+
320+
#ifdef __BPF_FEATURE_LOAD_ACQ_STORE_REL
321+
__u8 val8 = 0x12;
322+
__u16 val16 = 0x1234;
323+
__u32 val32 = 0x12345678;
324+
__u64 val64 = 0x1234567890abcdef;
325+
326+
__atomic_store_n(&store_release8_result, val8, __ATOMIC_RELEASE);
327+
__atomic_store_n(&store_release16_result, val16, __ATOMIC_RELEASE);
328+
__atomic_store_n(&store_release32_result, val32, __ATOMIC_RELEASE);
329+
__atomic_store_n(&store_release64_result, val64, __ATOMIC_RELEASE);
330+
#endif
331+
332+
return 0;
333+
}
334+
277335
char _license[] SEC("license") = "GPL";

tools/testing/selftests/bpf/progs/atomics.c

Lines changed: 60 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,9 +5,15 @@
55
#include <stdbool.h>
66

77
#ifdef ENABLE_ATOMICS_TESTS
8-
bool skip_tests __attribute((__section__(".data"))) = false;
8+
bool skip_all_tests __attribute((__section__(".data"))) = false;
99
#else
10-
bool skip_tests = true;
10+
bool skip_all_tests = true;
11+
#endif
12+
13+
#ifdef __BPF_FEATURE_LOAD_ACQ_STORE_REL
14+
bool skip_lacq_srel_tests __attribute((__section__(".data"))) = false;
15+
#else
16+
bool skip_lacq_srel_tests = true;
1117
#endif
1218

1319
__u32 pid = 0;
@@ -168,3 +174,55 @@ int xchg(const void *ctx)
168174

169175
return 0;
170176
}
177+
178+
__u8 load_acquire8_value = 0x12;
179+
__u16 load_acquire16_value = 0x1234;
180+
__u32 load_acquire32_value = 0x12345678;
181+
__u64 load_acquire64_value = 0x1234567890abcdef;
182+
183+
__u8 load_acquire8_result = 0;
184+
__u16 load_acquire16_result = 0;
185+
__u32 load_acquire32_result = 0;
186+
__u64 load_acquire64_result = 0;
187+
188+
SEC("raw_tp/sys_enter")
189+
int load_acquire(const void *ctx)
190+
{
191+
if (pid != (bpf_get_current_pid_tgid() >> 32))
192+
return 0;
193+
194+
#ifdef __BPF_FEATURE_LOAD_ACQ_STORE_REL
195+
load_acquire8_result = __atomic_load_n(&load_acquire8_value, __ATOMIC_ACQUIRE);
196+
load_acquire16_result = __atomic_load_n(&load_acquire16_value, __ATOMIC_ACQUIRE);
197+
load_acquire32_result = __atomic_load_n(&load_acquire32_value, __ATOMIC_ACQUIRE);
198+
load_acquire64_result = __atomic_load_n(&load_acquire64_value, __ATOMIC_ACQUIRE);
199+
#endif
200+
201+
return 0;
202+
}
203+
204+
__u8 store_release8_result = 0;
205+
__u16 store_release16_result = 0;
206+
__u32 store_release32_result = 0;
207+
__u64 store_release64_result = 0;
208+
209+
SEC("raw_tp/sys_enter")
210+
int store_release(const void *ctx)
211+
{
212+
if (pid != (bpf_get_current_pid_tgid() >> 32))
213+
return 0;
214+
215+
#ifdef __BPF_FEATURE_LOAD_ACQ_STORE_REL
216+
__u8 val8 = 0x12;
217+
__u16 val16 = 0x1234;
218+
__u32 val32 = 0x12345678;
219+
__u64 val64 = 0x1234567890abcdef;
220+
221+
__atomic_store_n(&store_release8_result, val8, __ATOMIC_RELEASE);
222+
__atomic_store_n(&store_release16_result, val16, __ATOMIC_RELEASE);
223+
__atomic_store_n(&store_release32_result, val32, __ATOMIC_RELEASE);
224+
__atomic_store_n(&store_release64_result, val64, __ATOMIC_RELEASE);
225+
#endif
226+
227+
return 0;
228+
}
Lines changed: 14 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
#define __INVALID_ATOMIC_ACCESS_TEST(op) \
1+
#define __INVALID_ATOMIC_ACCESS_TEST(op, reg) \
22
{ \
33
"atomic " #op " access through non-pointer ", \
44
.insns = { \
@@ -9,15 +9,17 @@
99
BPF_EXIT_INSN(), \
1010
}, \
1111
.result = REJECT, \
12-
.errstr = "R1 invalid mem access 'scalar'" \
12+
.errstr = #reg " invalid mem access 'scalar'" \
1313
}
14-
__INVALID_ATOMIC_ACCESS_TEST(BPF_ADD),
15-
__INVALID_ATOMIC_ACCESS_TEST(BPF_ADD | BPF_FETCH),
16-
__INVALID_ATOMIC_ACCESS_TEST(BPF_AND),
17-
__INVALID_ATOMIC_ACCESS_TEST(BPF_AND | BPF_FETCH),
18-
__INVALID_ATOMIC_ACCESS_TEST(BPF_OR),
19-
__INVALID_ATOMIC_ACCESS_TEST(BPF_OR | BPF_FETCH),
20-
__INVALID_ATOMIC_ACCESS_TEST(BPF_XOR),
21-
__INVALID_ATOMIC_ACCESS_TEST(BPF_XOR | BPF_FETCH),
22-
__INVALID_ATOMIC_ACCESS_TEST(BPF_XCHG),
23-
__INVALID_ATOMIC_ACCESS_TEST(BPF_CMPXCHG),
14+
__INVALID_ATOMIC_ACCESS_TEST(BPF_ADD, R1),
15+
__INVALID_ATOMIC_ACCESS_TEST(BPF_ADD | BPF_FETCH, R1),
16+
__INVALID_ATOMIC_ACCESS_TEST(BPF_AND, R1),
17+
__INVALID_ATOMIC_ACCESS_TEST(BPF_AND | BPF_FETCH, R1),
18+
__INVALID_ATOMIC_ACCESS_TEST(BPF_OR, R1),
19+
__INVALID_ATOMIC_ACCESS_TEST(BPF_OR | BPF_FETCH, R1),
20+
__INVALID_ATOMIC_ACCESS_TEST(BPF_XOR, R1),
21+
__INVALID_ATOMIC_ACCESS_TEST(BPF_XOR | BPF_FETCH, R1),
22+
__INVALID_ATOMIC_ACCESS_TEST(BPF_XCHG, R1),
23+
__INVALID_ATOMIC_ACCESS_TEST(BPF_CMPXCHG, R1),
24+
__INVALID_ATOMIC_ACCESS_TEST(BPF_LOAD_ACQ, R0),
25+
__INVALID_ATOMIC_ACCESS_TEST(BPF_STORE_REL, R1),

0 commit comments

Comments
 (0)