diff options
author | Jakub Kicinski <kuba@kernel.org> | 2022-11-02 08:18:26 -0700 |
---|---|---|
committer | Jakub Kicinski <kuba@kernel.org> | 2022-11-02 08:18:27 -0700 |
commit | b54a0d4094f5c94eda1cafad44323306afe8f807 (patch) | |
tree | 8c5b09af5f3d7b96947956e11845edd86b93d4a7 /kernel/trace | |
parent | ef2dd61af7366e5a42e828fff04932e32eb0eacc (diff) | |
parent | 3a07dcf8f57b9a90b1c07df3e9091fd04baa3036 (diff) |
Merge tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Daniel Borkmann says:
====================
bpf-next 2022-11-02
We've added 70 non-merge commits during the last 14 day(s) which contain
a total of 96 files changed, 3203 insertions(+), 640 deletions(-).
The main changes are:
1) Make cgroup local storage available to non-cgroup attached BPF programs
such as tc BPF ones, from Yonghong Song.
2) Avoid unnecessary deadlock detection and failures wrt BPF task storage
helpers, from Martin KaFai Lau.
3) Add LLVM disassembler as default library for dumping JITed code
in bpftool, from Quentin Monnet.
4) Various kprobe_multi_link fixes related to kernel modules,
from Jiri Olsa.
5) Optimize x86-64 JIT with emitting BMI2-based shift instructions,
from Jie Meng.
6) Improve BPF verifier's memory type compatibility for map key/value
arguments, from Dave Marchevsky.
7) Only create mmap-able data section maps in libbpf when data is exposed
via skeletons, from Andrii Nakryiko.
8) Add an autoattach option for bpftool to load all object assets,
from Wang Yufen.
9) Various memory handling fixes for libbpf and BPF selftests,
from Xu Kuohai.
10) Initial support for BPF selftest's vmtest.sh on arm64,
from Manu Bretelle.
11) Improve libbpf's BTF handling to dedup identical structs,
from Alan Maguire.
12) Add BPF CI and denylist documentation for BPF selftests,
from Daniel Müller.
13) Check BPF cpumap max_entries before doing allocation work,
from Florian Lehner.
* tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next: (70 commits)
samples/bpf: Fix typo in README
bpf: Remove the obsolte u64_stats_fetch_*_irq() users.
bpf: check max_entries before allocating memory
bpf: Fix a typo in comment for DFS algorithm
bpftool: Fix spelling mistake "disasembler" -> "disassembler"
selftests/bpf: Fix bpftool synctypes checking failure
selftests/bpf: Panic on hard/soft lockup
docs/bpf: Add documentation for new cgroup local storage
selftests/bpf: Add test cgrp_local_storage to DENYLIST.s390x
selftests/bpf: Add selftests for new cgroup local storage
selftests/bpf: Fix test test_libbpf_str/bpf_map_type_str
bpftool: Support new cgroup local storage
libbpf: Support new cgroup local storage
bpf: Implement cgroup storage available to non-cgroup-attached bpf progs
bpf: Refactor some inode/task/sk storage functions for reuse
bpf: Make struct cgroup btf id global
selftests/bpf: Tracing prog can still do lookup under busy lock
selftests/bpf: Ensure no task storage failure for bpf_lsm.s prog due to deadlock detection
bpf: Add new bpf_task_storage_delete proto with no deadlock detection
bpf: bpf_task_storage_delete_recur does lookup first before the deadlock check
...
====================
Link: https://lore.kernel.org/r/20221102062120.5724-1-daniel@iogearbox.net
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/bpf_trace.c | 107 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 16 |
2 files changed, 115 insertions, 8 deletions
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 1ed08967fb97..f2d8d070d024 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -6,6 +6,7 @@ #include <linux/types.h> #include <linux/slab.h> #include <linux/bpf.h> +#include <linux/bpf_verifier.h> #include <linux/bpf_perf_event.h> #include <linux/btf.h> #include <linux/filter.h> @@ -1456,6 +1457,10 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_get_current_cgroup_id_proto; case BPF_FUNC_get_current_ancestor_cgroup_id: return &bpf_get_current_ancestor_cgroup_id_proto; + case BPF_FUNC_cgrp_storage_get: + return &bpf_cgrp_storage_get_proto; + case BPF_FUNC_cgrp_storage_delete: + return &bpf_cgrp_storage_delete_proto; #endif case BPF_FUNC_send_signal: return &bpf_send_signal_proto; @@ -1490,8 +1495,12 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) case BPF_FUNC_this_cpu_ptr: return &bpf_this_cpu_ptr_proto; case BPF_FUNC_task_storage_get: + if (bpf_prog_check_recur(prog)) + return &bpf_task_storage_get_recur_proto; return &bpf_task_storage_get_proto; case BPF_FUNC_task_storage_delete: + if (bpf_prog_check_recur(prog)) + return &bpf_task_storage_delete_recur_proto; return &bpf_task_storage_delete_proto; case BPF_FUNC_for_each_map_elem: return &bpf_for_each_map_elem_proto; @@ -2452,6 +2461,8 @@ struct bpf_kprobe_multi_link { unsigned long *addrs; u64 *cookies; u32 cnt; + u32 mods_cnt; + struct module **mods; }; struct bpf_kprobe_multi_run_ctx { @@ -2507,6 +2518,14 @@ error: return err; } +static void kprobe_multi_put_modules(struct module **mods, u32 cnt) +{ + u32 i; + + for (i = 0; i < cnt; i++) + module_put(mods[i]); +} + static void free_user_syms(struct user_syms *us) { kvfree(us->syms); @@ -2519,6 +2538,7 @@ static void bpf_kprobe_multi_link_release(struct bpf_link *link) kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link); unregister_fprobe(&kmulti_link->fp); + kprobe_multi_put_modules(kmulti_link->mods, kmulti_link->mods_cnt); } static void bpf_kprobe_multi_link_dealloc(struct bpf_link *link) @@ -2528,6 +2548,7 @@ static void bpf_kprobe_multi_link_dealloc(struct bpf_link *link) kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link); kvfree(kmulti_link->addrs); kvfree(kmulti_link->cookies); + kfree(kmulti_link->mods); kfree(kmulti_link); } @@ -2550,7 +2571,7 @@ static void bpf_kprobe_multi_cookie_swap(void *a, void *b, int size, const void swap(*cookie_a, *cookie_b); } -static int __bpf_kprobe_multi_cookie_cmp(const void *a, const void *b) +static int bpf_kprobe_multi_addrs_cmp(const void *a, const void *b) { const unsigned long *addr_a = a, *addr_b = b; @@ -2561,7 +2582,7 @@ static int __bpf_kprobe_multi_cookie_cmp(const void *a, const void *b) static int bpf_kprobe_multi_cookie_cmp(const void *a, const void *b, const void *priv) { - return __bpf_kprobe_multi_cookie_cmp(a, b); + return bpf_kprobe_multi_addrs_cmp(a, b); } static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx) @@ -2579,7 +2600,7 @@ static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx) return 0; entry_ip = run_ctx->entry_ip; addr = bsearch(&entry_ip, link->addrs, link->cnt, sizeof(entry_ip), - __bpf_kprobe_multi_cookie_cmp); + bpf_kprobe_multi_addrs_cmp); if (!addr) return 0; cookie = link->cookies + (addr - link->addrs); @@ -2663,6 +2684,71 @@ static void symbols_swap_r(void *a, void *b, int size, const void *priv) } } +struct module_addr_args { + unsigned long *addrs; + u32 addrs_cnt; + struct module **mods; + int mods_cnt; + int mods_cap; +}; + +static int module_callback(void *data, const char *name, + struct module *mod, unsigned long addr) +{ + struct module_addr_args *args = data; + struct module **mods; + + /* We iterate all modules symbols and for each we: + * - search for it in provided addresses array + * - if found we check if we already have the module pointer stored + * (we iterate modules sequentially, so we can check just the last + * module pointer) + * - take module reference and store it + */ + if (!bsearch(&addr, args->addrs, args->addrs_cnt, sizeof(addr), + bpf_kprobe_multi_addrs_cmp)) + return 0; + + if (args->mods && args->mods[args->mods_cnt - 1] == mod) + return 0; + + if (args->mods_cnt == args->mods_cap) { + args->mods_cap = max(16, args->mods_cap * 3 / 2); + mods = krealloc_array(args->mods, args->mods_cap, sizeof(*mods), GFP_KERNEL); + if (!mods) + return -ENOMEM; + args->mods = mods; + } + + if (!try_module_get(mod)) + return -EINVAL; + + args->mods[args->mods_cnt] = mod; + args->mods_cnt++; + return 0; +} + +static int get_modules_for_addrs(struct module ***mods, unsigned long *addrs, u32 addrs_cnt) +{ + struct module_addr_args args = { + .addrs = addrs, + .addrs_cnt = addrs_cnt, + }; + int err; + + /* We return either err < 0 in case of error, ... */ + err = module_kallsyms_on_each_symbol(module_callback, &args); + if (err) { + kprobe_multi_put_modules(args.mods, args.mods_cnt); + kfree(args.mods); + return err; + } + + /* or number of modules found if everything is ok. */ + *mods = args.mods; + return args.mods_cnt; +} + int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) { struct bpf_kprobe_multi_link *link = NULL; @@ -2773,10 +2859,25 @@ int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr bpf_kprobe_multi_cookie_cmp, bpf_kprobe_multi_cookie_swap, link); + } else { + /* + * We need to sort addrs array even if there are no cookies + * provided, to allow bsearch in get_modules_for_addrs. + */ + sort(addrs, cnt, sizeof(*addrs), + bpf_kprobe_multi_addrs_cmp, NULL); + } + + err = get_modules_for_addrs(&link->mods, addrs, cnt); + if (err < 0) { + bpf_link_cleanup(&link_primer); + return err; } + link->mods_cnt = err; err = register_fprobe_ips(&link->fp, addrs, cnt); if (err) { + kprobe_multi_put_modules(link->mods, link->mods_cnt); bpf_link_cleanup(&link_primer); return err; } diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index fbf2543111c0..72de9009a6a0 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -8267,6 +8267,10 @@ struct kallsyms_data { size_t found; }; +/* This function gets called for all kernel and module symbols + * and returns 1 in case we resolved all the requested symbols, + * 0 otherwise. + */ static int kallsyms_callback(void *data, const char *name, struct module *mod, unsigned long addr) { @@ -8309,17 +8313,19 @@ static int kallsyms_callback(void *data, const char *name, int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *addrs) { struct kallsyms_data args; - int err; + int found_all; memset(addrs, 0, sizeof(*addrs) * cnt); args.addrs = addrs; args.syms = sorted_syms; args.cnt = cnt; args.found = 0; - err = kallsyms_on_each_symbol(kallsyms_callback, &args); - if (err < 0) - return err; - return args.found == args.cnt ? 0 : -ESRCH; + + found_all = kallsyms_on_each_symbol(kallsyms_callback, &args); + if (found_all) + return 0; + found_all = module_kallsyms_on_each_symbol(kallsyms_callback, &args); + return found_all ? 0 : -ESRCH; } #ifdef CONFIG_SYSCTL |