Skip to content

Commit 91f2f78

Browse files
laoarKernel Patches Daemon
authored and
Kernel Patches Daemon
committed
selftests/bpf: Add selftest for fill_link_info
Add selftest for the fill_link_info of uprobe, kprobe and tracepoint. The result: $ tools/testing/selftests/bpf/test_progs --name=fill_link_info #79/1 fill_link_info/kprobe_link_info:OK #79/2 fill_link_info/kretprobe_link_info:OK #79/3 fill_link_info/kprobe_invalid_ubuff:OK #79/4 fill_link_info/tracepoint_link_info:OK #79/5 fill_link_info/uprobe_link_info:OK #79/6 fill_link_info/uretprobe_link_info:OK #79/7 fill_link_info/kprobe_multi_link_info:OK #79/8 fill_link_info/kretprobe_multi_link_info:OK #79/9 fill_link_info/kprobe_multi_invalid_ubuff:OK #79 fill_link_info:OK Summary: 1/9 PASSED, 0 SKIPPED, 0 FAILED The test case for kprobe_multi won't be run on aarch64, as it is not supported. Signed-off-by: Yafang Shao <[email protected]> Acked-by: Yonghong Song <[email protected]> Acked-by: Jiri Olsa <[email protected]>
1 parent 8ad8513 commit 91f2f78

File tree

3 files changed

+387
-0
lines changed

3 files changed

+387
-0
lines changed

tools/testing/selftests/bpf/DENYLIST.aarch64

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,3 +12,6 @@ kprobe_multi_test/skel_api # libbpf: failed to load BPF sk
1212
module_attach # prog 'kprobe_multi': failed to auto-attach: -95
1313
fentry_test/fentry_many_args # fentry_many_args:FAIL:fentry_many_args_attach unexpected error: -524
1414
fexit_test/fexit_many_args # fexit_many_args:FAIL:fexit_many_args_attach unexpected error: -524
15+
fill_link_info/kprobe_multi_link_info # bpf_program__attach_kprobe_multi_opts unexpected error: -95
16+
fill_link_info/kretprobe_multi_link_info # bpf_program__attach_kprobe_multi_opts unexpected error: -95
17+
fill_link_info/kprobe_multi_invalid_ubuff # bpf_program__attach_kprobe_multi_opts unexpected error: -95
Lines changed: 342 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,342 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
/* Copyright (C) 2023 Yafang Shao <[email protected]> */
3+
4+
#include <string.h>
5+
#include <linux/bpf.h>
6+
#include <linux/limits.h>
7+
#include <test_progs.h>
8+
#include "trace_helpers.h"
9+
#include "test_fill_link_info.skel.h"
10+
11+
#define TP_CAT "sched"
12+
#define TP_NAME "sched_switch"
13+
14+
static const char *kmulti_syms[] = {
15+
"bpf_fentry_test2",
16+
"bpf_fentry_test1",
17+
"bpf_fentry_test3",
18+
};
19+
#define KMULTI_CNT ARRAY_SIZE(kmulti_syms)
20+
static __u64 kmulti_addrs[KMULTI_CNT];
21+
22+
#define KPROBE_FUNC "bpf_fentry_test1"
23+
static __u64 kprobe_addr;
24+
25+
#define UPROBE_FILE "/proc/self/exe"
26+
static ssize_t uprobe_offset;
27+
/* uprobe attach point */
28+
static noinline void uprobe_func(void)
29+
{
30+
asm volatile ("");
31+
}
32+
33+
static int verify_perf_link_info(int fd, enum bpf_perf_event_type type, long addr,
34+
ssize_t offset, ssize_t entry_offset)
35+
{
36+
struct bpf_link_info info;
37+
__u32 len = sizeof(info);
38+
char buf[PATH_MAX];
39+
int err;
40+
41+
memset(&info, 0, sizeof(info));
42+
buf[0] = '\0';
43+
44+
again:
45+
err = bpf_link_get_info_by_fd(fd, &info, &len);
46+
if (!ASSERT_OK(err, "get_link_info"))
47+
return -1;
48+
49+
if (!ASSERT_EQ(info.type, BPF_LINK_TYPE_PERF_EVENT, "link_type"))
50+
return -1;
51+
if (!ASSERT_EQ(info.perf_event.type, type, "perf_type_match"))
52+
return -1;
53+
54+
switch (info.perf_event.type) {
55+
case BPF_PERF_EVENT_KPROBE:
56+
case BPF_PERF_EVENT_KRETPROBE:
57+
ASSERT_EQ(info.perf_event.kprobe.offset, offset, "kprobe_offset");
58+
59+
/* In case kernel.kptr_restrict is not permitted or MAX_SYMS is reached */
60+
if (addr)
61+
ASSERT_EQ(info.perf_event.kprobe.addr, addr + entry_offset,
62+
"kprobe_addr");
63+
64+
if (!info.perf_event.kprobe.func_name) {
65+
ASSERT_EQ(info.perf_event.kprobe.name_len, 0, "name_len");
66+
info.perf_event.kprobe.func_name = ptr_to_u64(&buf);
67+
info.perf_event.kprobe.name_len = sizeof(buf);
68+
goto again;
69+
}
70+
71+
err = strncmp(u64_to_ptr(info.perf_event.kprobe.func_name), KPROBE_FUNC,
72+
strlen(KPROBE_FUNC));
73+
ASSERT_EQ(err, 0, "cmp_kprobe_func_name");
74+
break;
75+
case BPF_PERF_EVENT_TRACEPOINT:
76+
if (!info.perf_event.tracepoint.tp_name) {
77+
ASSERT_EQ(info.perf_event.tracepoint.name_len, 0, "name_len");
78+
info.perf_event.tracepoint.tp_name = ptr_to_u64(&buf);
79+
info.perf_event.tracepoint.name_len = sizeof(buf);
80+
goto again;
81+
}
82+
83+
err = strncmp(u64_to_ptr(info.perf_event.tracepoint.tp_name), TP_NAME,
84+
strlen(TP_NAME));
85+
ASSERT_EQ(err, 0, "cmp_tp_name");
86+
break;
87+
case BPF_PERF_EVENT_UPROBE:
88+
case BPF_PERF_EVENT_URETPROBE:
89+
ASSERT_EQ(info.perf_event.uprobe.offset, offset, "uprobe_offset");
90+
91+
if (!info.perf_event.uprobe.file_name) {
92+
ASSERT_EQ(info.perf_event.uprobe.name_len, 0, "name_len");
93+
info.perf_event.uprobe.file_name = ptr_to_u64(&buf);
94+
info.perf_event.uprobe.name_len = sizeof(buf);
95+
goto again;
96+
}
97+
98+
err = strncmp(u64_to_ptr(info.perf_event.uprobe.file_name), UPROBE_FILE,
99+
strlen(UPROBE_FILE));
100+
ASSERT_EQ(err, 0, "cmp_file_name");
101+
break;
102+
default:
103+
err = -1;
104+
break;
105+
}
106+
return err;
107+
}
108+
109+
static void kprobe_fill_invalid_user_buffer(int fd)
110+
{
111+
struct bpf_link_info info;
112+
__u32 len = sizeof(info);
113+
int err;
114+
115+
memset(&info, 0, sizeof(info));
116+
117+
info.perf_event.kprobe.func_name = 0x1; /* invalid address */
118+
err = bpf_link_get_info_by_fd(fd, &info, &len);
119+
ASSERT_EQ(err, -EINVAL, "invalid_buff_and_len");
120+
121+
info.perf_event.kprobe.name_len = 64;
122+
err = bpf_link_get_info_by_fd(fd, &info, &len);
123+
ASSERT_EQ(err, -EFAULT, "invalid_buff");
124+
125+
info.perf_event.kprobe.func_name = 0;
126+
err = bpf_link_get_info_by_fd(fd, &info, &len);
127+
ASSERT_EQ(err, -EINVAL, "invalid_len");
128+
129+
ASSERT_EQ(info.perf_event.kprobe.addr, 0, "func_addr");
130+
ASSERT_EQ(info.perf_event.kprobe.offset, 0, "func_offset");
131+
ASSERT_EQ(info.perf_event.type, 0, "type");
132+
}
133+
134+
static void test_kprobe_fill_link_info(struct test_fill_link_info *skel,
135+
enum bpf_perf_event_type type,
136+
bool invalid)
137+
{
138+
DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts,
139+
.attach_mode = PROBE_ATTACH_MODE_LINK,
140+
.retprobe = type == BPF_PERF_EVENT_KRETPROBE,
141+
);
142+
ssize_t entry_offset = 0;
143+
int link_fd, err;
144+
145+
skel->links.kprobe_run = bpf_program__attach_kprobe_opts(skel->progs.kprobe_run,
146+
KPROBE_FUNC, &opts);
147+
if (!ASSERT_OK_PTR(skel->links.kprobe_run, "attach_kprobe"))
148+
return;
149+
150+
link_fd = bpf_link__fd(skel->links.kprobe_run);
151+
if (!invalid) {
152+
/* See also arch_adjust_kprobe_addr(). */
153+
if (skel->kconfig->CONFIG_X86_KERNEL_IBT)
154+
entry_offset = 4;
155+
err = verify_perf_link_info(link_fd, type, kprobe_addr, 0, entry_offset);
156+
ASSERT_OK(err, "verify_perf_link_info");
157+
} else {
158+
kprobe_fill_invalid_user_buffer(link_fd);
159+
}
160+
bpf_link__detach(skel->links.kprobe_run);
161+
}
162+
163+
static void test_tp_fill_link_info(struct test_fill_link_info *skel)
164+
{
165+
int link_fd, err;
166+
167+
skel->links.tp_run = bpf_program__attach_tracepoint(skel->progs.tp_run, TP_CAT, TP_NAME);
168+
if (!ASSERT_OK_PTR(skel->links.tp_run, "attach_tp"))
169+
return;
170+
171+
link_fd = bpf_link__fd(skel->links.tp_run);
172+
err = verify_perf_link_info(link_fd, BPF_PERF_EVENT_TRACEPOINT, 0, 0, 0);
173+
ASSERT_OK(err, "verify_perf_link_info");
174+
bpf_link__detach(skel->links.tp_run);
175+
}
176+
177+
static void test_uprobe_fill_link_info(struct test_fill_link_info *skel,
178+
enum bpf_perf_event_type type)
179+
{
180+
int link_fd, err;
181+
182+
skel->links.uprobe_run = bpf_program__attach_uprobe(skel->progs.uprobe_run,
183+
type == BPF_PERF_EVENT_URETPROBE,
184+
0, /* self pid */
185+
UPROBE_FILE, uprobe_offset);
186+
if (!ASSERT_OK_PTR(skel->links.uprobe_run, "attach_uprobe"))
187+
return;
188+
189+
link_fd = bpf_link__fd(skel->links.uprobe_run);
190+
err = verify_perf_link_info(link_fd, type, 0, uprobe_offset, 0);
191+
ASSERT_OK(err, "verify_perf_link_info");
192+
bpf_link__detach(skel->links.uprobe_run);
193+
}
194+
195+
static int verify_kmulti_link_info(int fd, bool retprobe)
196+
{
197+
struct bpf_link_info info;
198+
__u32 len = sizeof(info);
199+
__u64 addrs[KMULTI_CNT];
200+
int flags, i, err;
201+
202+
memset(&info, 0, sizeof(info));
203+
204+
again:
205+
err = bpf_link_get_info_by_fd(fd, &info, &len);
206+
if (!ASSERT_OK(err, "get_link_info"))
207+
return -1;
208+
209+
if (!ASSERT_EQ(info.type, BPF_LINK_TYPE_KPROBE_MULTI, "kmulti_type"))
210+
return -1;
211+
212+
ASSERT_EQ(info.kprobe_multi.count, KMULTI_CNT, "func_cnt");
213+
flags = info.kprobe_multi.flags & BPF_F_KPROBE_MULTI_RETURN;
214+
if (!retprobe)
215+
ASSERT_EQ(flags, 0, "kmulti_flags");
216+
else
217+
ASSERT_NEQ(flags, 0, "kretmulti_flags");
218+
219+
if (!info.kprobe_multi.addrs) {
220+
info.kprobe_multi.addrs = ptr_to_u64(addrs);
221+
goto again;
222+
}
223+
for (i = 0; i < KMULTI_CNT; i++)
224+
ASSERT_EQ(addrs[i], kmulti_addrs[i], "kmulti_addrs");
225+
return 0;
226+
}
227+
228+
static void verify_kmulti_invalid_user_buffer(int fd)
229+
{
230+
struct bpf_link_info info;
231+
__u32 len = sizeof(info);
232+
__u64 addrs[KMULTI_CNT];
233+
int err, i;
234+
235+
memset(&info, 0, sizeof(info));
236+
237+
info.kprobe_multi.count = KMULTI_CNT;
238+
err = bpf_link_get_info_by_fd(fd, &info, &len);
239+
ASSERT_EQ(err, -EINVAL, "no_addr");
240+
241+
info.kprobe_multi.addrs = ptr_to_u64(addrs);
242+
info.kprobe_multi.count = 0;
243+
err = bpf_link_get_info_by_fd(fd, &info, &len);
244+
ASSERT_EQ(err, -EINVAL, "no_cnt");
245+
246+
for (i = 0; i < KMULTI_CNT; i++)
247+
addrs[i] = 0;
248+
info.kprobe_multi.count = KMULTI_CNT - 1;
249+
err = bpf_link_get_info_by_fd(fd, &info, &len);
250+
ASSERT_EQ(err, -ENOSPC, "smaller_cnt");
251+
for (i = 0; i < KMULTI_CNT - 1; i++)
252+
ASSERT_EQ(addrs[i], kmulti_addrs[i], "kmulti_addrs");
253+
ASSERT_EQ(addrs[i], 0, "kmulti_addrs");
254+
255+
for (i = 0; i < KMULTI_CNT; i++)
256+
addrs[i] = 0;
257+
info.kprobe_multi.count = KMULTI_CNT + 1;
258+
err = bpf_link_get_info_by_fd(fd, &info, &len);
259+
ASSERT_EQ(err, 0, "bigger_cnt");
260+
for (i = 0; i < KMULTI_CNT; i++)
261+
ASSERT_EQ(addrs[i], kmulti_addrs[i], "kmulti_addrs");
262+
263+
info.kprobe_multi.count = KMULTI_CNT;
264+
info.kprobe_multi.addrs = 0x1; /* invalid addr */
265+
err = bpf_link_get_info_by_fd(fd, &info, &len);
266+
ASSERT_EQ(err, -EFAULT, "invalid_buff");
267+
}
268+
269+
static int symbols_cmp_r(const void *a, const void *b)
270+
{
271+
const char **str_a = (const char **) a;
272+
const char **str_b = (const char **) b;
273+
274+
return strcmp(*str_a, *str_b);
275+
}
276+
277+
static void test_kprobe_multi_fill_link_info(struct test_fill_link_info *skel,
278+
bool retprobe, bool invalid)
279+
{
280+
LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
281+
int link_fd, err;
282+
283+
opts.syms = kmulti_syms;
284+
opts.cnt = KMULTI_CNT;
285+
opts.retprobe = retprobe;
286+
skel->links.kmulti_run = bpf_program__attach_kprobe_multi_opts(skel->progs.kmulti_run,
287+
NULL, &opts);
288+
if (!ASSERT_OK_PTR(skel->links.kmulti_run, "attach_kprobe_multi"))
289+
return;
290+
291+
link_fd = bpf_link__fd(skel->links.kmulti_run);
292+
if (!invalid) {
293+
err = verify_kmulti_link_info(link_fd, retprobe);
294+
ASSERT_OK(err, "verify_kmulti_link_info");
295+
} else {
296+
verify_kmulti_invalid_user_buffer(link_fd);
297+
}
298+
bpf_link__detach(skel->links.kmulti_run);
299+
}
300+
301+
void test_fill_link_info(void)
302+
{
303+
struct test_fill_link_info *skel;
304+
int i;
305+
306+
skel = test_fill_link_info__open_and_load();
307+
if (!ASSERT_OK_PTR(skel, "skel_open"))
308+
return;
309+
310+
/* load kallsyms to compare the addr */
311+
if (!ASSERT_OK(load_kallsyms_refresh(), "load_kallsyms_refresh"))
312+
goto cleanup;
313+
314+
kprobe_addr = ksym_get_addr(KPROBE_FUNC);
315+
if (test__start_subtest("kprobe_link_info"))
316+
test_kprobe_fill_link_info(skel, BPF_PERF_EVENT_KPROBE, false);
317+
if (test__start_subtest("kretprobe_link_info"))
318+
test_kprobe_fill_link_info(skel, BPF_PERF_EVENT_KRETPROBE, false);
319+
if (test__start_subtest("kprobe_invalid_ubuff"))
320+
test_kprobe_fill_link_info(skel, BPF_PERF_EVENT_KPROBE, true);
321+
if (test__start_subtest("tracepoint_link_info"))
322+
test_tp_fill_link_info(skel);
323+
324+
uprobe_offset = get_uprobe_offset(&uprobe_func);
325+
if (test__start_subtest("uprobe_link_info"))
326+
test_uprobe_fill_link_info(skel, BPF_PERF_EVENT_UPROBE);
327+
if (test__start_subtest("uretprobe_link_info"))
328+
test_uprobe_fill_link_info(skel, BPF_PERF_EVENT_URETPROBE);
329+
330+
qsort(kmulti_syms, KMULTI_CNT, sizeof(kmulti_syms[0]), symbols_cmp_r);
331+
for (i = 0; i < KMULTI_CNT; i++)
332+
kmulti_addrs[i] = ksym_get_addr(kmulti_syms[i]);
333+
if (test__start_subtest("kprobe_multi_link_info"))
334+
test_kprobe_multi_fill_link_info(skel, false, false);
335+
if (test__start_subtest("kretprobe_multi_link_info"))
336+
test_kprobe_multi_fill_link_info(skel, true, false);
337+
if (test__start_subtest("kprobe_multi_invalid_ubuff"))
338+
test_kprobe_multi_fill_link_info(skel, true, true);
339+
340+
cleanup:
341+
test_fill_link_info__destroy(skel);
342+
}
Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
/* Copyright (C) 2023 Yafang Shao <[email protected]> */
3+
4+
#include "vmlinux.h"
5+
#include <bpf/bpf_tracing.h>
6+
#include <stdbool.h>
7+
8+
extern bool CONFIG_X86_KERNEL_IBT __kconfig __weak;
9+
10+
/* This function is here to have CONFIG_X86_KERNEL_IBT
11+
* used and added to object BTF.
12+
*/
13+
int unused(void)
14+
{
15+
return CONFIG_X86_KERNEL_IBT ? 0 : 1;
16+
}
17+
18+
SEC("kprobe")
19+
int BPF_PROG(kprobe_run)
20+
{
21+
return 0;
22+
}
23+
24+
SEC("uprobe")
25+
int BPF_PROG(uprobe_run)
26+
{
27+
return 0;
28+
}
29+
30+
SEC("tracepoint")
31+
int BPF_PROG(tp_run)
32+
{
33+
return 0;
34+
}
35+
36+
SEC("kprobe.multi")
37+
int BPF_PROG(kmulti_run)
38+
{
39+
return 0;
40+
}
41+
42+
char _license[] SEC("license") = "GPL";

0 commit comments

Comments
 (0)