|
6 | 6 | #include <linux/types.h>
|
7 | 7 | #include <linux/spinlock.h>
|
8 | 8 | #include <linux/bpf.h>
|
| 9 | +#include <linux/btf.h> |
9 | 10 | #include <linux/btf_ids.h>
|
10 | 11 | #include <linux/bpf_local_storage.h>
|
11 | 12 | #include <net/bpf_sk_storage.h>
|
@@ -378,6 +379,79 @@ const struct bpf_func_proto bpf_sk_storage_delete_proto = {
|
378 | 379 | .arg2_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
|
379 | 380 | };
|
380 | 381 |
|
| 382 | +static bool bpf_sk_storage_tracing_allowed(const struct bpf_prog *prog) |
| 383 | +{ |
| 384 | + const struct btf *btf_vmlinux; |
| 385 | + const struct btf_type *t; |
| 386 | + const char *tname; |
| 387 | + u32 btf_id; |
| 388 | + |
| 389 | + if (prog->aux->dst_prog) |
| 390 | + return false; |
| 391 | + |
| 392 | + /* Ensure the tracing program is not tracing |
| 393 | + * any bpf_sk_storage*() function and also |
| 394 | + * use the bpf_sk_storage_(get|delete) helper. |
| 395 | + */ |
| 396 | + switch (prog->expected_attach_type) { |
| 397 | + case BPF_TRACE_RAW_TP: |
| 398 | + /* bpf_sk_storage has no trace point */ |
| 399 | + return true; |
| 400 | + case BPF_TRACE_FENTRY: |
| 401 | + case BPF_TRACE_FEXIT: |
| 402 | + btf_vmlinux = bpf_get_btf_vmlinux(); |
| 403 | + btf_id = prog->aux->attach_btf_id; |
| 404 | + t = btf_type_by_id(btf_vmlinux, btf_id); |
| 405 | + tname = btf_name_by_offset(btf_vmlinux, t->name_off); |
| 406 | + return !!strncmp(tname, "bpf_sk_storage", |
| 407 | + strlen("bpf_sk_storage")); |
| 408 | + default: |
| 409 | + return false; |
| 410 | + } |
| 411 | + |
| 412 | + return false; |
| 413 | +} |
| 414 | + |
| 415 | +BPF_CALL_4(bpf_sk_storage_get_tracing, struct bpf_map *, map, struct sock *, sk, |
| 416 | + void *, value, u64, flags) |
| 417 | +{ |
| 418 | + if (!in_serving_softirq() && !in_task()) |
| 419 | + return (unsigned long)NULL; |
| 420 | + |
| 421 | + return (unsigned long)____bpf_sk_storage_get(map, sk, value, flags); |
| 422 | +} |
| 423 | + |
| 424 | +BPF_CALL_2(bpf_sk_storage_delete_tracing, struct bpf_map *, map, |
| 425 | + struct sock *, sk) |
| 426 | +{ |
| 427 | + if (!in_serving_softirq() && !in_task()) |
| 428 | + return -EPERM; |
| 429 | + |
| 430 | + return ____bpf_sk_storage_delete(map, sk); |
| 431 | +} |
| 432 | + |
| 433 | +const struct bpf_func_proto bpf_sk_storage_get_tracing_proto = { |
| 434 | + .func = bpf_sk_storage_get_tracing, |
| 435 | + .gpl_only = false, |
| 436 | + .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, |
| 437 | + .arg1_type = ARG_CONST_MAP_PTR, |
| 438 | + .arg2_type = ARG_PTR_TO_BTF_ID, |
| 439 | + .arg2_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON], |
| 440 | + .arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL, |
| 441 | + .arg4_type = ARG_ANYTHING, |
| 442 | + .allowed = bpf_sk_storage_tracing_allowed, |
| 443 | +}; |
| 444 | + |
| 445 | +const struct bpf_func_proto bpf_sk_storage_delete_tracing_proto = { |
| 446 | + .func = bpf_sk_storage_delete_tracing, |
| 447 | + .gpl_only = false, |
| 448 | + .ret_type = RET_INTEGER, |
| 449 | + .arg1_type = ARG_CONST_MAP_PTR, |
| 450 | + .arg2_type = ARG_PTR_TO_BTF_ID, |
| 451 | + .arg2_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON], |
| 452 | + .allowed = bpf_sk_storage_tracing_allowed, |
| 453 | +}; |
| 454 | + |
381 | 455 | struct bpf_sk_storage_diag {
|
382 | 456 | u32 nr_maps;
|
383 | 457 | struct bpf_map *maps[];
|
|
0 commit comments