Skip to content

Commit 3b5584b

Browse files
laoarKernel Patches Daemon
authored and
Kernel Patches Daemon
committed
selftests/bpf: Add selftest for fill_link_info
Add selftest for the fill_link_info of uprobe, kprobe and tracepoint. The result: $ tools/testing/selftests/bpf/test_progs --name=fill_link_info #79/1 fill_link_info/kprobe_link_info:OK #79/2 fill_link_info/kretprobe_link_info:OK #79/3 fill_link_info/kprobe_fill_invalid_user_buff:OK #79/4 fill_link_info/tracepoint_link_info:OK #79/5 fill_link_info/uprobe_link_info:OK #79/6 fill_link_info/uretprobe_link_info:OK #79/7 fill_link_info/kprobe_multi_link_info:OK #79/8 fill_link_info/kretprobe_multi_link_info:OK #79/9 fill_link_info/kprobe_multi_ubuff:OK #79 fill_link_info:OK Summary: 1/9 PASSED, 0 SKIPPED, 0 FAILED The test case for kprobe_multi won't be run on aarch64, as it is not supported. Signed-off-by: Yafang Shao <[email protected]> Acked-by: Yonghong Song <[email protected]>
1 parent 6d8c34f commit 3b5584b

File tree

3 files changed

+382
-0
lines changed

3 files changed

+382
-0
lines changed

tools/testing/selftests/bpf/DENYLIST.aarch64

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,3 +12,6 @@ kprobe_multi_test/skel_api # libbpf: failed to load BPF sk
1212
module_attach # prog 'kprobe_multi': failed to auto-attach: -95
1313
fentry_test/fentry_many_args # fentry_many_args:FAIL:fentry_many_args_attach unexpected error: -524
1414
fexit_test/fexit_many_args # fexit_many_args:FAIL:fexit_many_args_attach unexpected error: -524
15+
fill_link_info/kprobe_multi_link_info # bpf_program__attach_kprobe_multi_opts unexpected error: -95
16+
fill_link_info/kretprobe_multi_link_info # bpf_program__attach_kprobe_multi_opts unexpected error: -95
17+
fill_link_info/kprobe_multi_ubuff # bpf_program__attach_kprobe_multi_opts unexpected error: -95
Lines changed: 337 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,337 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
/* Copyright (C) 2023 Yafang Shao <[email protected]> */
3+
4+
#include <string.h>
5+
#include <linux/bpf.h>
6+
#include <linux/limits.h>
7+
#include <test_progs.h>
8+
#include "trace_helpers.h"
9+
#include "test_fill_link_info.skel.h"
10+
11+
#define TP_CAT "sched"
12+
#define TP_NAME "sched_switch"
13+
#define KPROBE_FUNC "tcp_rcv_established"
14+
#define UPROBE_FILE "/proc/self/exe"
15+
#define KMULTI_CNT (4)
16+
17+
/* uprobe attach point */
18+
static noinline void uprobe_func(void)
19+
{
20+
asm volatile ("");
21+
}
22+
23+
static int verify_perf_link_info(int fd, enum bpf_perf_event_type type, long addr,
24+
ssize_t offset, ssize_t entry_offset)
25+
{
26+
struct bpf_link_info info;
27+
__u32 len = sizeof(info);
28+
char buf[PATH_MAX];
29+
int err = 0;
30+
31+
memset(&info, 0, sizeof(info));
32+
buf[0] = '\0';
33+
34+
again:
35+
err = bpf_link_get_info_by_fd(fd, &info, &len);
36+
if (!ASSERT_OK(err, "get_link_info"))
37+
return -1;
38+
39+
if (!ASSERT_EQ(info.type, BPF_LINK_TYPE_PERF_EVENT, "link_type"))
40+
return -1;
41+
if (!ASSERT_EQ(info.perf_event.type, type, "perf_type_match"))
42+
return -1;
43+
44+
switch (info.perf_event.type) {
45+
case BPF_PERF_EVENT_KPROBE:
46+
case BPF_PERF_EVENT_KRETPROBE:
47+
ASSERT_EQ(info.perf_event.kprobe.offset, offset, "kprobe_offset");
48+
49+
/* In case kernel.kptr_restrict is not permitted or MAX_SYMS is reached */
50+
if (addr)
51+
ASSERT_EQ(info.perf_event.kprobe.addr, addr + entry_offset,
52+
"kprobe_addr");
53+
54+
if (!info.perf_event.kprobe.func_name) {
55+
ASSERT_EQ(info.perf_event.kprobe.name_len, 0, "name_len");
56+
info.perf_event.kprobe.func_name = ptr_to_u64(&buf);
57+
info.perf_event.kprobe.name_len = sizeof(buf);
58+
goto again;
59+
}
60+
61+
err = strncmp(u64_to_ptr(info.perf_event.kprobe.func_name), KPROBE_FUNC,
62+
strlen(KPROBE_FUNC));
63+
ASSERT_EQ(err, 0, "cmp_kprobe_func_name");
64+
break;
65+
case BPF_PERF_EVENT_TRACEPOINT:
66+
if (!info.perf_event.tracepoint.tp_name) {
67+
ASSERT_EQ(info.perf_event.tracepoint.name_len, 0, "name_len");
68+
info.perf_event.tracepoint.tp_name = ptr_to_u64(&buf);
69+
info.perf_event.tracepoint.name_len = sizeof(buf);
70+
goto again;
71+
}
72+
73+
err = strncmp(u64_to_ptr(info.perf_event.tracepoint.tp_name), TP_NAME,
74+
strlen(TP_NAME));
75+
ASSERT_EQ(err, 0, "cmp_tp_name");
76+
break;
77+
case BPF_PERF_EVENT_UPROBE:
78+
case BPF_PERF_EVENT_URETPROBE:
79+
ASSERT_EQ(info.perf_event.uprobe.offset, offset, "uprobe_offset");
80+
81+
if (!info.perf_event.uprobe.file_name) {
82+
ASSERT_EQ(info.perf_event.uprobe.name_len, 0, "name_len");
83+
info.perf_event.uprobe.file_name = ptr_to_u64(&buf);
84+
info.perf_event.uprobe.name_len = sizeof(buf);
85+
goto again;
86+
}
87+
88+
err = strncmp(u64_to_ptr(info.perf_event.uprobe.file_name), UPROBE_FILE,
89+
strlen(UPROBE_FILE));
90+
ASSERT_EQ(err, 0, "cmp_file_name");
91+
break;
92+
default:
93+
break;
94+
}
95+
return err;
96+
}
97+
98+
static void kprobe_fill_invalid_user_buffer(int fd)
99+
{
100+
struct bpf_link_info info;
101+
__u32 len = sizeof(info);
102+
int err;
103+
104+
memset(&info, 0, sizeof(info));
105+
106+
info.perf_event.kprobe.func_name = 0x1; /* invalid address */
107+
err = bpf_link_get_info_by_fd(fd, &info, &len);
108+
ASSERT_EQ(err, -EINVAL, "invalid_buff_and_len");
109+
110+
info.perf_event.kprobe.name_len = 64;
111+
err = bpf_link_get_info_by_fd(fd, &info, &len);
112+
ASSERT_EQ(err, -EFAULT, "invalid_buff");
113+
114+
info.perf_event.kprobe.func_name = 0;
115+
err = bpf_link_get_info_by_fd(fd, &info, &len);
116+
ASSERT_EQ(err, -EINVAL, "invalid_len");
117+
118+
ASSERT_EQ(info.perf_event.kprobe.addr, 0, "func_addr");
119+
ASSERT_EQ(info.perf_event.kprobe.offset, 0, "func_offset");
120+
ASSERT_EQ(info.perf_event.type, 0, "type");
121+
}
122+
123+
static void test_kprobe_fill_link_info(struct test_fill_link_info *skel,
124+
enum bpf_perf_event_type type,
125+
bool retprobe, bool invalid)
126+
{
127+
DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts,
128+
.attach_mode = PROBE_ATTACH_MODE_LINK,
129+
.retprobe = retprobe,
130+
);
131+
ssize_t offset = 0, entry_offset = 0;
132+
int link_fd, err;
133+
long addr;
134+
135+
skel->links.kprobe_run = bpf_program__attach_kprobe_opts(skel->progs.kprobe_run,
136+
KPROBE_FUNC, &opts);
137+
if (!ASSERT_OK_PTR(skel->links.kprobe_run, "attach_kprobe"))
138+
return;
139+
140+
link_fd = bpf_link__fd(skel->links.kprobe_run);
141+
addr = ksym_get_addr(KPROBE_FUNC);
142+
if (!invalid) {
143+
/* See also arch_adjust_kprobe_addr(). */
144+
if (skel->kconfig->CONFIG_X86_KERNEL_IBT)
145+
entry_offset = 4;
146+
err = verify_perf_link_info(link_fd, type, addr, offset, entry_offset);
147+
ASSERT_OK(err, "verify_perf_link_info");
148+
} else {
149+
kprobe_fill_invalid_user_buffer(link_fd);
150+
}
151+
bpf_link__detach(skel->links.kprobe_run);
152+
}
153+
154+
static void test_tp_fill_link_info(struct test_fill_link_info *skel)
155+
{
156+
int link_fd, err;
157+
158+
skel->links.tp_run = bpf_program__attach_tracepoint(skel->progs.tp_run, TP_CAT, TP_NAME);
159+
if (!ASSERT_OK_PTR(skel->links.tp_run, "attach_tp"))
160+
return;
161+
162+
link_fd = bpf_link__fd(skel->links.tp_run);
163+
err = verify_perf_link_info(link_fd, BPF_PERF_EVENT_TRACEPOINT, 0, 0, 0);
164+
ASSERT_OK(err, "verify_perf_link_info");
165+
bpf_link__detach(skel->links.tp_run);
166+
}
167+
168+
static void test_uprobe_fill_link_info(struct test_fill_link_info *skel,
169+
enum bpf_perf_event_type type, ssize_t offset,
170+
bool retprobe)
171+
{
172+
int link_fd, err;
173+
174+
skel->links.uprobe_run = bpf_program__attach_uprobe(skel->progs.uprobe_run, retprobe,
175+
0, /* self pid */
176+
UPROBE_FILE, offset);
177+
if (!ASSERT_OK_PTR(skel->links.uprobe_run, "attach_uprobe"))
178+
return;
179+
180+
link_fd = bpf_link__fd(skel->links.uprobe_run);
181+
err = verify_perf_link_info(link_fd, type, 0, offset, 0);
182+
ASSERT_OK(err, "verify_perf_link_info");
183+
bpf_link__detach(skel->links.uprobe_run);
184+
}
185+
186+
static int verify_kmulti_link_info(int fd, const __u64 *addrs, bool retprobe)
187+
{
188+
__u64 kmulti_addrs[KMULTI_CNT];
189+
struct bpf_link_info info;
190+
__u32 len = sizeof(info);
191+
int flags, i, err = 0;
192+
193+
memset(&info, 0, sizeof(info));
194+
195+
again:
196+
err = bpf_link_get_info_by_fd(fd, &info, &len);
197+
if (!ASSERT_OK(err, "get_link_info"))
198+
return -1;
199+
200+
if (!ASSERT_EQ(info.type, BPF_LINK_TYPE_KPROBE_MULTI, "kmulti_type"))
201+
return -1;
202+
203+
ASSERT_EQ(info.kprobe_multi.count, KMULTI_CNT, "func_cnt");
204+
flags = info.kprobe_multi.flags & BPF_F_KPROBE_MULTI_RETURN;
205+
if (!retprobe)
206+
ASSERT_EQ(flags, 0, "kmulti_flags");
207+
else
208+
ASSERT_NEQ(flags, 0, "kretmulti_flags");
209+
210+
if (!info.kprobe_multi.addrs) {
211+
info.kprobe_multi.addrs = ptr_to_u64(kmulti_addrs);
212+
goto again;
213+
}
214+
for (i = 0; i < KMULTI_CNT; i++)
215+
ASSERT_EQ(kmulti_addrs[i], addrs[i], "kmulti_addrs");
216+
return 0;
217+
}
218+
219+
static void verify_kmulti_user_buffer(int fd, const __u64 *addrs)
220+
{
221+
__u64 kmulti_addrs[KMULTI_CNT];
222+
struct bpf_link_info info;
223+
__u32 len = sizeof(info);
224+
int err, i;
225+
226+
memset(&info, 0, sizeof(info));
227+
228+
info.kprobe_multi.count = KMULTI_CNT;
229+
err = bpf_link_get_info_by_fd(fd, &info, &len);
230+
ASSERT_EQ(err, -EINVAL, "no_addr");
231+
232+
info.kprobe_multi.addrs = ptr_to_u64(kmulti_addrs);
233+
info.kprobe_multi.count = 0;
234+
err = bpf_link_get_info_by_fd(fd, &info, &len);
235+
ASSERT_EQ(err, -EINVAL, "no_cnt");
236+
237+
for (i = 0; i < KMULTI_CNT; i++)
238+
kmulti_addrs[i] = 0;
239+
info.kprobe_multi.count = KMULTI_CNT - 1;
240+
err = bpf_link_get_info_by_fd(fd, &info, &len);
241+
ASSERT_EQ(err, -ENOSPC, "smaller_cnt");
242+
for (i = 0; i < KMULTI_CNT - 1; i++)
243+
ASSERT_EQ(kmulti_addrs[i], addrs[i], "kmulti_addrs");
244+
ASSERT_EQ(kmulti_addrs[i], 0, "kmulti_addrs");
245+
246+
for (i = 0; i < KMULTI_CNT; i++)
247+
kmulti_addrs[i] = 0;
248+
info.kprobe_multi.count = KMULTI_CNT + 1;
249+
err = bpf_link_get_info_by_fd(fd, &info, &len);
250+
ASSERT_EQ(err, 0, "bigger_cnt");
251+
for (i = 0; i < KMULTI_CNT; i++)
252+
ASSERT_EQ(kmulti_addrs[i], addrs[i], "kmulti_addrs");
253+
254+
info.kprobe_multi.count = KMULTI_CNT;
255+
info.kprobe_multi.addrs = 0x1; /* invalid addr */
256+
err = bpf_link_get_info_by_fd(fd, &info, &len);
257+
ASSERT_EQ(err, -EFAULT, "invalid_buff");
258+
}
259+
260+
static int symbols_cmp_r(const void *a, const void *b)
261+
{
262+
const char **str_a = (const char **) a;
263+
const char **str_b = (const char **) b;
264+
265+
return strcmp(*str_a, *str_b);
266+
}
267+
268+
static void test_kprobe_multi_fill_link_info(struct test_fill_link_info *skel,
269+
bool retprobe, bool buffer)
270+
{
271+
LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
272+
const char *syms[KMULTI_CNT] = {
273+
"schedule_timeout_interruptible",
274+
"schedule_timeout_uninterruptible",
275+
"schedule_timeout_idle",
276+
"schedule_timeout_killable",
277+
};
278+
__u64 addrs[KMULTI_CNT];
279+
int link_fd, i, err = 0;
280+
281+
qsort(syms, KMULTI_CNT, sizeof(syms[0]), symbols_cmp_r);
282+
opts.syms = syms;
283+
opts.cnt = KMULTI_CNT;
284+
opts.retprobe = retprobe;
285+
skel->links.kmulti_run = bpf_program__attach_kprobe_multi_opts(skel->progs.kmulti_run,
286+
NULL, &opts);
287+
if (!ASSERT_OK_PTR(skel->links.kmulti_run, "attach_kprobe_multi"))
288+
return;
289+
290+
link_fd = bpf_link__fd(skel->links.kmulti_run);
291+
for (i = 0; i < KMULTI_CNT; i++)
292+
addrs[i] = ksym_get_addr(syms[i]);
293+
294+
if (!buffer)
295+
err = verify_kmulti_link_info(link_fd, addrs, retprobe);
296+
else
297+
verify_kmulti_user_buffer(link_fd, addrs);
298+
ASSERT_OK(err, "verify_kmulti_link_info");
299+
bpf_link__detach(skel->links.kmulti_run);
300+
}
301+
302+
void test_fill_link_info(void)
303+
{
304+
struct test_fill_link_info *skel;
305+
ssize_t offset;
306+
307+
skel = test_fill_link_info__open_and_load();
308+
if (!ASSERT_OK_PTR(skel, "skel_open"))
309+
return;
310+
311+
/* load kallsyms to compare the addr */
312+
if (!ASSERT_OK(load_kallsyms_refresh(), "load_kallsyms_refresh"))
313+
goto cleanup;
314+
if (test__start_subtest("kprobe_link_info"))
315+
test_kprobe_fill_link_info(skel, BPF_PERF_EVENT_KPROBE, false, false);
316+
if (test__start_subtest("kretprobe_link_info"))
317+
test_kprobe_fill_link_info(skel, BPF_PERF_EVENT_KRETPROBE, true, false);
318+
if (test__start_subtest("kprobe_fill_invalid_user_buff"))
319+
test_kprobe_fill_link_info(skel, BPF_PERF_EVENT_KPROBE, false, true);
320+
if (test__start_subtest("tracepoint_link_info"))
321+
test_tp_fill_link_info(skel);
322+
323+
offset = get_uprobe_offset(&uprobe_func);
324+
if (test__start_subtest("uprobe_link_info"))
325+
test_uprobe_fill_link_info(skel, BPF_PERF_EVENT_UPROBE, offset, false);
326+
if (test__start_subtest("uretprobe_link_info"))
327+
test_uprobe_fill_link_info(skel, BPF_PERF_EVENT_URETPROBE, offset, true);
328+
if (test__start_subtest("kprobe_multi_link_info"))
329+
test_kprobe_multi_fill_link_info(skel, false, false);
330+
if (test__start_subtest("kretprobe_multi_link_info"))
331+
test_kprobe_multi_fill_link_info(skel, true, false);
332+
if (test__start_subtest("kprobe_multi_ubuff"))
333+
test_kprobe_multi_fill_link_info(skel, true, true);
334+
335+
cleanup:
336+
test_fill_link_info__destroy(skel);
337+
}
Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
/* Copyright (C) 2023 Yafang Shao <[email protected]> */
3+
4+
#include "vmlinux.h"
5+
#include <bpf/bpf_tracing.h>
6+
#include <stdbool.h>
7+
8+
extern bool CONFIG_X86_KERNEL_IBT __kconfig __weak;
9+
10+
/* This function is here to have CONFIG_X86_KERNEL_IBT
11+
* used and added to object BTF.
12+
*/
13+
int unused(void)
14+
{
15+
return CONFIG_X86_KERNEL_IBT ? 0 : 1;
16+
}
17+
18+
SEC("kprobe")
19+
int BPF_PROG(kprobe_run)
20+
{
21+
return 0;
22+
}
23+
24+
SEC("uprobe")
25+
int BPF_PROG(uprobe_run)
26+
{
27+
return 0;
28+
}
29+
30+
SEC("tracepoint")
31+
int BPF_PROG(tp_run)
32+
{
33+
return 0;
34+
}
35+
36+
SEC("kprobe.multi")
37+
int BPF_PROG(kmulti_run)
38+
{
39+
return 0;
40+
}
41+
42+
char _license[] SEC("license") = "GPL";

0 commit comments

Comments
 (0)