summaryrefslogtreecommitdiff
path: root/kernel/bpf/bpf_iter.c
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@kernel.org>2020-08-06 16:39:14 -0700
committerAlexei Starovoitov <ast@kernel.org>2020-08-06 16:39:19 -0700
commit0ac10dc1888cd1a8b994f32b51f0eaeba1e803ef (patch)
tree89c462f3b9f1fa1ea8e5d22f67d2f21fbf2766e1 /kernel/bpf/bpf_iter.c
parent6bcaf41f9613278cd5897fc80ab93033bda8efaa (diff)
parent74fc097de327b37e8fe3ff580ce7ffaa7c1740dd (diff)
Merge branch 'bpf_iter-uapi-fix'
Yonghong Song says: ==================== Andrii raised a concern that current uapi for bpf iterator map element is a little restrictive and not suitable for future potential complex customization. This is a valid suggestion, considering people may indeed add more complex custimization to the iterator, e.g., cgroup_id + user_id, etc. for task or task_file. Another example might be map_id plus additional control so that the bpf iterator may bail out a bucket earlier if a bucket has too many elements which may hold lock too long and impact other parts of systems. Patch #1 modified uapi with kernel changes. Patch #2 adjusted libbpf api accordingly. Changelogs: v3 -> v4: . add a forward declaration of bpf_iter_link_info in tools/lib/bpf/bpf.h in case that libbpf is built against not-latest uapi bpf.h. . target the patch set to "bpf" instead of "bpf-next" v2 -> v3: . undo "not reject iter_info.map.map_fd == 0" from v1. In the future map_fd may become optional, so let us use map_fd == 0 indicating the map_fd is not set by user space. . add link_info_len to bpf_iter_attach_opts to ensure always correct link_info_len from user. Otherwise, libbpf may deduce incorrect link_info_len if it uses different uapi header than the user app. v1 -> v2: . ensure link_create target_fd/flags == 0 since they are not used. (Andrii) . if either of iter_info ptr == 0 or iter_info_len == 0, but not both, return error to user space. (Andrii) . do not reject iter_info.map.map_fd == 0, go ahead to use it trying to get a map reference since the map_fd is required for map_elem iterator. . use bpf_iter_link_info in bpf_iter_attach_opts instead of map_fd. this way, user space is responsible to set up bpf_iter_link_info and libbpf just passes the data to the kernel, simplifying libbpf design. (Andrii) ==================== Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'kernel/bpf/bpf_iter.c')
-rw-r--r--kernel/bpf/bpf_iter.c58
1 files changed, 29 insertions, 29 deletions
diff --git a/kernel/bpf/bpf_iter.c b/kernel/bpf/bpf_iter.c
index 363b9cafc2d8..b6715964b685 100644
--- a/kernel/bpf/bpf_iter.c
+++ b/kernel/bpf/bpf_iter.c
@@ -338,8 +338,8 @@ static void bpf_iter_link_release(struct bpf_link *link)
struct bpf_iter_link *iter_link =
container_of(link, struct bpf_iter_link, link);
- if (iter_link->aux.map)
- bpf_map_put_with_uref(iter_link->aux.map);
+ if (iter_link->tinfo->reg_info->detach_target)
+ iter_link->tinfo->reg_info->detach_target(&iter_link->aux);
}
static void bpf_iter_link_dealloc(struct bpf_link *link)
@@ -390,15 +390,35 @@ bool bpf_link_is_iter(struct bpf_link *link)
int bpf_iter_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
{
+ union bpf_iter_link_info __user *ulinfo;
struct bpf_link_primer link_primer;
struct bpf_iter_target_info *tinfo;
- struct bpf_iter_aux_info aux = {};
+ union bpf_iter_link_info linfo;
struct bpf_iter_link *link;
- u32 prog_btf_id, target_fd;
+ u32 prog_btf_id, linfo_len;
bool existed = false;
- struct bpf_map *map;
int err;
+ if (attr->link_create.target_fd || attr->link_create.flags)
+ return -EINVAL;
+
+ memset(&linfo, 0, sizeof(union bpf_iter_link_info));
+
+ ulinfo = u64_to_user_ptr(attr->link_create.iter_info);
+ linfo_len = attr->link_create.iter_info_len;
+ if (!ulinfo ^ !linfo_len)
+ return -EINVAL;
+
+ if (ulinfo) {
+ err = bpf_check_uarg_tail_zero(ulinfo, sizeof(linfo),
+ linfo_len);
+ if (err)
+ return err;
+ linfo_len = min_t(u32, linfo_len, sizeof(linfo));
+ if (copy_from_user(&linfo, ulinfo, linfo_len))
+ return -EFAULT;
+ }
+
prog_btf_id = prog->aux->attach_btf_id;
mutex_lock(&targets_mutex);
list_for_each_entry(tinfo, &targets, list) {
@@ -411,13 +431,6 @@ int bpf_iter_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
if (!existed)
return -ENOENT;
- /* Make sure user supplied flags are target expected. */
- target_fd = attr->link_create.target_fd;
- if (attr->link_create.flags != tinfo->reg_info->req_linfo)
- return -EINVAL;
- if (!attr->link_create.flags && target_fd)
- return -EINVAL;
-
link = kzalloc(sizeof(*link), GFP_USER | __GFP_NOWARN);
if (!link)
return -ENOMEM;
@@ -431,28 +444,15 @@ int bpf_iter_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
return err;
}
- if (tinfo->reg_info->req_linfo == BPF_ITER_LINK_MAP_FD) {
- map = bpf_map_get_with_uref(target_fd);
- if (IS_ERR(map)) {
- err = PTR_ERR(map);
- goto cleanup_link;
- }
-
- aux.map = map;
- err = tinfo->reg_info->check_target(prog, &aux);
+ if (tinfo->reg_info->attach_target) {
+ err = tinfo->reg_info->attach_target(prog, &linfo, &link->aux);
if (err) {
- bpf_map_put_with_uref(map);
- goto cleanup_link;
+ bpf_link_cleanup(&link_primer);
+ return err;
}
-
- link->aux.map = map;
}
return bpf_link_settle(&link_primer);
-
-cleanup_link:
- bpf_link_cleanup(&link_primer);
- return err;
}
static void init_seq_meta(struct bpf_iter_priv_data *priv_data,