Skip to content

Commit 2fecdde

Browse files
laoarKernel Patches Daemon
authored and
Kernel Patches Daemon
committed
selftests/bpf: Add selftest for fill_link_info
Add selftest for the fill_link_info of uprobe, kprobe and tracepoint. The result: $ tools/testing/selftests/bpf/test_progs --name=fill_link_info #79/1 fill_link_info/kprobe_link_info:OK #79/2 fill_link_info/kretprobe_link_info:OK #79/3 fill_link_info/kprobe_fill_invalid_user_buff:OK #79/4 fill_link_info/tracepoint_link_info:OK #79/5 fill_link_info/uprobe_link_info:OK #79/6 fill_link_info/uretprobe_link_info:OK #79/7 fill_link_info/kprobe_multi_link_info:OK #79/8 fill_link_info/kretprobe_multi_link_info:OK #79/9 fill_link_info/kprobe_multi_ubuff:OK #79 fill_link_info:OK Summary: 1/9 PASSED, 0 SKIPPED, 0 FAILED The test case for kprobe_multi won't be run on aarch64, as it is not supported. Signed-off-by: Yafang Shao <[email protected]>
1 parent 0cab7f7 commit 2fecdde

File tree

3 files changed

+414
-0
lines changed

3 files changed

+414
-0
lines changed

tools/testing/selftests/bpf/DENYLIST.aarch64

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,3 +12,6 @@ kprobe_multi_test/skel_api # libbpf: failed to load BPF sk
1212
module_attach # prog 'kprobe_multi': failed to auto-attach: -95
1313
fentry_test/fentry_many_args # fentry_many_args:FAIL:fentry_many_args_attach unexpected error: -524
1414
fexit_test/fexit_many_args # fexit_many_args:FAIL:fexit_many_args_attach unexpected error: -524
15+
fill_link_info/kprobe_multi_link_info # bpf_program__attach_kprobe_multi_opts unexpected error: -95
16+
fill_link_info/kretprobe_multi_link_info # bpf_program__attach_kprobe_multi_opts unexpected error: -95
17+
fill_link_info/kprobe_multi_ubuff # bpf_program__attach_kprobe_multi_opts unexpected error: -95
Lines changed: 369 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,369 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
/* Copyright (C) 2023 Yafang Shao <[email protected]> */
3+
4+
#include <string.h>
5+
#include <linux/bpf.h>
6+
#include <linux/limits.h>
7+
#include <test_progs.h>
8+
#include "trace_helpers.h"
9+
#include "test_fill_link_info.skel.h"
10+
11+
#define TP_CAT "sched"
12+
#define TP_NAME "sched_switch"
13+
#define KPROBE_FUNC "tcp_rcv_established"
14+
#define UPROBE_FILE "/proc/self/exe"
15+
#define KMULTI_CNT (4)
16+
17+
/* uprobe attach point */
18+
static noinline void uprobe_func(void)
19+
{
20+
asm volatile ("");
21+
}
22+
23+
static int verify_perf_link_info(int fd, enum bpf_perf_event_type type, long addr,
24+
ssize_t offset, ssize_t entry_offset)
25+
{
26+
struct bpf_link_info info;
27+
__u32 len = sizeof(info);
28+
char buf[PATH_MAX];
29+
int err = 0;
30+
31+
memset(&info, 0, sizeof(info));
32+
buf[0] = '\0';
33+
34+
again:
35+
err = bpf_link_get_info_by_fd(fd, &info, &len);
36+
if (!ASSERT_OK(err, "get_link_info"))
37+
return -1;
38+
39+
switch (info.type) {
40+
case BPF_LINK_TYPE_PERF_EVENT:
41+
if (!ASSERT_EQ(info.perf_event.type, type, "perf_type_match"))
42+
return -1;
43+
44+
switch (info.perf_event.type) {
45+
case BPF_PERF_EVENT_KPROBE:
46+
case BPF_PERF_EVENT_KRETPROBE:
47+
ASSERT_EQ(info.perf_event.kprobe.offset, offset, "kprobe_offset");
48+
49+
/* In case kptr setting is not permitted or MAX_SYMS is reached */
50+
if (addr)
51+
ASSERT_EQ(info.perf_event.kprobe.addr, addr + entry_offset,
52+
"kprobe_addr");
53+
54+
if (!info.perf_event.kprobe.func_name) {
55+
ASSERT_EQ(info.perf_event.kprobe.name_len, 0, "name_len");
56+
info.perf_event.kprobe.func_name = ptr_to_u64(&buf);
57+
info.perf_event.kprobe.name_len = sizeof(buf);
58+
goto again;
59+
}
60+
61+
err = strncmp(u64_to_ptr(info.perf_event.kprobe.func_name), KPROBE_FUNC,
62+
strlen(KPROBE_FUNC));
63+
ASSERT_EQ(err, 0, "cmp_kprobe_func_name");
64+
break;
65+
case BPF_PERF_EVENT_TRACEPOINT:
66+
if (!info.perf_event.tracepoint.tp_name) {
67+
ASSERT_EQ(info.perf_event.tracepoint.name_len, 0, "name_len");
68+
info.perf_event.tracepoint.tp_name = ptr_to_u64(&buf);
69+
info.perf_event.tracepoint.name_len = sizeof(buf);
70+
goto again;
71+
}
72+
73+
err = strncmp(u64_to_ptr(info.perf_event.tracepoint.tp_name), TP_NAME,
74+
strlen(TP_NAME));
75+
ASSERT_EQ(err, 0, "cmp_tp_name");
76+
break;
77+
case BPF_PERF_EVENT_UPROBE:
78+
case BPF_PERF_EVENT_URETPROBE:
79+
ASSERT_EQ(info.perf_event.uprobe.offset, offset, "uprobe_offset");
80+
81+
if (!info.perf_event.uprobe.file_name) {
82+
ASSERT_EQ(info.perf_event.uprobe.name_len, 0, "name_len");
83+
info.perf_event.uprobe.file_name = ptr_to_u64(&buf);
84+
info.perf_event.uprobe.name_len = sizeof(buf);
85+
goto again;
86+
}
87+
88+
err = strncmp(u64_to_ptr(info.perf_event.uprobe.file_name), UPROBE_FILE,
89+
strlen(UPROBE_FILE));
90+
ASSERT_EQ(err, 0, "cmp_file_name");
91+
break;
92+
default:
93+
break;
94+
}
95+
break;
96+
default:
97+
switch (type) {
98+
case BPF_PERF_EVENT_KPROBE:
99+
case BPF_PERF_EVENT_KRETPROBE:
100+
case BPF_PERF_EVENT_TRACEPOINT:
101+
case BPF_PERF_EVENT_UPROBE:
102+
case BPF_PERF_EVENT_URETPROBE:
103+
err = -1;
104+
break;
105+
default:
106+
break;
107+
}
108+
break;
109+
}
110+
return err;
111+
}
112+
113+
static void kprobe_fill_invalid_user_buffer(int fd)
114+
{
115+
struct bpf_link_info info;
116+
__u32 len = sizeof(info);
117+
int err;
118+
119+
memset(&info, 0, sizeof(info));
120+
121+
info.perf_event.kprobe.func_name = 0x1; /* invalid address */
122+
err = bpf_link_get_info_by_fd(fd, &info, &len);
123+
ASSERT_EQ(err, -EINVAL, "invalid_buff_and_len");
124+
125+
info.perf_event.kprobe.name_len = 64;
126+
err = bpf_link_get_info_by_fd(fd, &info, &len);
127+
ASSERT_EQ(err, -EFAULT, "invalid_buff");
128+
129+
info.perf_event.kprobe.func_name = 0;
130+
err = bpf_link_get_info_by_fd(fd, &info, &len);
131+
ASSERT_EQ(err, -EINVAL, "invalid_len");
132+
133+
ASSERT_EQ(info.perf_event.kprobe.addr, 0, "func_addr");
134+
ASSERT_EQ(info.perf_event.kprobe.offset, 0, "func_offset");
135+
ASSERT_EQ(info.perf_event.type, 0, "type");
136+
}
137+
138+
static void test_kprobe_fill_link_info(struct test_fill_link_info *skel,
139+
enum bpf_perf_event_type type,
140+
bool retprobe, bool invalid)
141+
{
142+
DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts,
143+
.attach_mode = PROBE_ATTACH_MODE_LINK,
144+
.retprobe = retprobe,
145+
);
146+
ssize_t offset = 0, entry_offset = 0;
147+
int link_fd, err;
148+
long addr;
149+
150+
skel->links.kprobe_run = bpf_program__attach_kprobe_opts(skel->progs.kprobe_run,
151+
KPROBE_FUNC, &opts);
152+
if (!ASSERT_OK_PTR(skel->links.kprobe_run, "attach_kprobe"))
153+
return;
154+
155+
link_fd = bpf_link__fd(skel->links.kprobe_run);
156+
if (!ASSERT_GE(link_fd, 0, "link_fd"))
157+
return;
158+
159+
addr = ksym_get_addr(KPROBE_FUNC);
160+
if (!invalid) {
161+
/* See also arch_adjust_kprobe_addr(). */
162+
if (skel->kconfig->CONFIG_X86_KERNEL_IBT)
163+
entry_offset = 4;
164+
err = verify_perf_link_info(link_fd, type, addr, offset, offset ?: entry_offset);
165+
ASSERT_OK(err, "verify_perf_link_info");
166+
} else {
167+
kprobe_fill_invalid_user_buffer(link_fd);
168+
}
169+
bpf_link__detach(skel->links.kprobe_run);
170+
}
171+
172+
static void test_tp_fill_link_info(struct test_fill_link_info *skel)
173+
{
174+
int link_fd, err;
175+
176+
skel->links.tp_run = bpf_program__attach_tracepoint(skel->progs.tp_run, TP_CAT, TP_NAME);
177+
if (!ASSERT_OK_PTR(skel->links.tp_run, "attach_tp"))
178+
return;
179+
180+
link_fd = bpf_link__fd(skel->links.tp_run);
181+
if (!ASSERT_GE(link_fd, 0, "link_fd"))
182+
return;
183+
184+
err = verify_perf_link_info(link_fd, BPF_PERF_EVENT_TRACEPOINT, 0, 0, 0);
185+
ASSERT_OK(err, "verify_perf_link_info");
186+
bpf_link__detach(skel->links.tp_run);
187+
}
188+
189+
static void test_uprobe_fill_link_info(struct test_fill_link_info *skel,
190+
enum bpf_perf_event_type type, ssize_t offset,
191+
bool retprobe)
192+
{
193+
int link_fd, err;
194+
195+
skel->links.uprobe_run = bpf_program__attach_uprobe(skel->progs.uprobe_run, retprobe,
196+
0, /* self pid */
197+
UPROBE_FILE, offset);
198+
if (!ASSERT_OK_PTR(skel->links.uprobe_run, "attach_uprobe"))
199+
return;
200+
201+
link_fd = bpf_link__fd(skel->links.uprobe_run);
202+
if (!ASSERT_GE(link_fd, 0, "link_fd"))
203+
return;
204+
205+
err = verify_perf_link_info(link_fd, type, 0, offset, 0);
206+
ASSERT_OK(err, "verify_perf_link_info");
207+
bpf_link__detach(skel->links.uprobe_run);
208+
}
209+
210+
static int verify_kmulti_link_info(int fd, const __u64 *addrs, bool retprobe)
211+
{
212+
__u64 kmulti_addrs[KMULTI_CNT];
213+
struct bpf_link_info info;
214+
__u32 len = sizeof(info);
215+
int flags, i, err = 0;
216+
217+
memset(&info, 0, sizeof(info));
218+
219+
again:
220+
err = bpf_link_get_info_by_fd(fd, &info, &len);
221+
if (!ASSERT_OK(err, "get_link_info"))
222+
return -1;
223+
224+
ASSERT_EQ(info.type, BPF_LINK_TYPE_KPROBE_MULTI, "kmulti_type");
225+
switch (info.type) {
226+
case BPF_LINK_TYPE_KPROBE_MULTI:
227+
ASSERT_EQ(info.kprobe_multi.count, KMULTI_CNT, "func_cnt");
228+
flags = info.kprobe_multi.flags & BPF_F_KPROBE_MULTI_RETURN;
229+
if (!retprobe)
230+
ASSERT_EQ(flags, 0, "kmulti_flags");
231+
else
232+
ASSERT_NEQ(flags, 0, "kretmulti_flags");
233+
234+
if (!info.kprobe_multi.addrs) {
235+
info.kprobe_multi.addrs = ptr_to_u64(kmulti_addrs);
236+
goto again;
237+
}
238+
for (i = 0; i < KMULTI_CNT; i++)
239+
ASSERT_EQ(kmulti_addrs[i], addrs[i], "kmulti_addrs");
240+
break;
241+
default:
242+
err = -1;
243+
break;
244+
}
245+
return err;
246+
}
247+
248+
static void verify_kmulti_user_buffer(int fd, const __u64 *addrs)
249+
{
250+
__u64 kmulti_addrs[KMULTI_CNT];
251+
struct bpf_link_info info;
252+
__u32 len = sizeof(info);
253+
int err, i;
254+
255+
memset(&info, 0, sizeof(info));
256+
257+
info.kprobe_multi.count = KMULTI_CNT;
258+
err = bpf_link_get_info_by_fd(fd, &info, &len);
259+
ASSERT_EQ(err, -EINVAL, "no_addr");
260+
261+
info.kprobe_multi.addrs = ptr_to_u64(kmulti_addrs);
262+
info.kprobe_multi.count = 0;
263+
err = bpf_link_get_info_by_fd(fd, &info, &len);
264+
ASSERT_EQ(err, -EINVAL, "no_cnt");
265+
266+
for (i = 0; i < KMULTI_CNT; i++)
267+
kmulti_addrs[i] = 0;
268+
info.kprobe_multi.count = KMULTI_CNT - 1;
269+
err = bpf_link_get_info_by_fd(fd, &info, &len);
270+
ASSERT_EQ(err, -ENOSPC, "smaller_cnt");
271+
for (i = 0; i < KMULTI_CNT - 1; i++)
272+
ASSERT_EQ(kmulti_addrs[i], addrs[i], "kmulti_addrs");
273+
ASSERT_EQ(kmulti_addrs[i], 0, "kmulti_addrs");
274+
275+
for (i = 0; i < KMULTI_CNT; i++)
276+
kmulti_addrs[i] = 0;
277+
info.kprobe_multi.count = KMULTI_CNT + 1;
278+
err = bpf_link_get_info_by_fd(fd, &info, &len);
279+
ASSERT_EQ(err, 0, "bigger_cnt");
280+
for (i = 0; i < KMULTI_CNT; i++)
281+
ASSERT_EQ(kmulti_addrs[i], addrs[i], "kmulti_addrs");
282+
283+
info.kprobe_multi.count = KMULTI_CNT;
284+
info.kprobe_multi.addrs = 0x1; /* invalid addr */
285+
err = bpf_link_get_info_by_fd(fd, &info, &len);
286+
ASSERT_EQ(err, -EFAULT, "invalid_buff");
287+
}
288+
289+
static int symbols_cmp_r(const void *a, const void *b)
290+
{
291+
const char **str_a = (const char **) a;
292+
const char **str_b = (const char **) b;
293+
294+
return strcmp(*str_a, *str_b);
295+
}
296+
297+
static void test_kprobe_multi_fill_link_info(struct test_fill_link_info *skel,
298+
bool retprobe, bool buffer)
299+
{
300+
LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
301+
const char *syms[KMULTI_CNT] = {
302+
"schedule_timeout_interruptible",
303+
"schedule_timeout_uninterruptible",
304+
"schedule_timeout_idle",
305+
"schedule_timeout_killable",
306+
};
307+
__u64 addrs[KMULTI_CNT];
308+
int link_fd, i, err = 0;
309+
310+
qsort(syms, KMULTI_CNT, sizeof(syms[0]), symbols_cmp_r);
311+
opts.syms = syms;
312+
opts.cnt = KMULTI_CNT;
313+
opts.retprobe = retprobe;
314+
skel->links.kmulti_run = bpf_program__attach_kprobe_multi_opts(skel->progs.kmulti_run,
315+
NULL, &opts);
316+
if (!ASSERT_OK_PTR(skel->links.kmulti_run, "attach_kprobe_multi"))
317+
return;
318+
319+
link_fd = bpf_link__fd(skel->links.kmulti_run);
320+
if (!ASSERT_GE(link_fd, 0, "link_fd"))
321+
return;
322+
323+
for (i = 0; i < KMULTI_CNT; i++)
324+
addrs[i] = ksym_get_addr(syms[i]);
325+
326+
if (!buffer)
327+
err = verify_kmulti_link_info(link_fd, addrs, retprobe);
328+
else
329+
verify_kmulti_user_buffer(link_fd, addrs);
330+
ASSERT_OK(err, "verify_kmulti_link_info");
331+
bpf_link__detach(skel->links.kmulti_run);
332+
}
333+
334+
void test_fill_link_info(void)
335+
{
336+
struct test_fill_link_info *skel;
337+
ssize_t offset;
338+
339+
skel = test_fill_link_info__open_and_load();
340+
if (!ASSERT_OK_PTR(skel, "skel_open"))
341+
goto cleanup;
342+
343+
/* load kallsyms to compare the addr */
344+
if (!ASSERT_OK(load_kallsyms_refresh(), "load_kallsyms_refresh"))
345+
return;
346+
if (test__start_subtest("kprobe_link_info"))
347+
test_kprobe_fill_link_info(skel, BPF_PERF_EVENT_KPROBE, false, false);
348+
if (test__start_subtest("kretprobe_link_info"))
349+
test_kprobe_fill_link_info(skel, BPF_PERF_EVENT_KRETPROBE, true, false);
350+
if (test__start_subtest("kprobe_fill_invalid_user_buff"))
351+
test_kprobe_fill_link_info(skel, BPF_PERF_EVENT_KPROBE, false, true);
352+
if (test__start_subtest("tracepoint_link_info"))
353+
test_tp_fill_link_info(skel);
354+
355+
offset = get_uprobe_offset(&uprobe_func);
356+
if (test__start_subtest("uprobe_link_info"))
357+
test_uprobe_fill_link_info(skel, BPF_PERF_EVENT_UPROBE, offset, false);
358+
if (test__start_subtest("uretprobe_link_info"))
359+
test_uprobe_fill_link_info(skel, BPF_PERF_EVENT_URETPROBE, offset, true);
360+
if (test__start_subtest("kprobe_multi_link_info"))
361+
test_kprobe_multi_fill_link_info(skel, false, false);
362+
if (test__start_subtest("kretprobe_multi_link_info"))
363+
test_kprobe_multi_fill_link_info(skel, true, false);
364+
if (test__start_subtest("kprobe_multi_ubuff"))
365+
test_kprobe_multi_fill_link_info(skel, true, true);
366+
367+
cleanup:
368+
test_fill_link_info__destroy(skel);
369+
}

0 commit comments

Comments
 (0)