From 0a7c2a84359612e54328aa52030eb202093da6e2 Mon Sep 17 00:00:00 2001 From: Tony Ambardar Date: Fri, 24 Jan 2025 22:52:36 -0800 Subject: libbpf: Fix accessing BTF.ext core_relo header Update btf_ext_parse_info() to ensure the core_relo header is present before reading its fields. This avoids a potential buffer read overflow reported by the OSS Fuzz project. Fixes: cf579164e9ea ("libbpf: Support BTF.ext loading and output in either endianness") Signed-off-by: Tony Ambardar Signed-off-by: Andrii Nakryiko Link: https://issues.oss-fuzz.com/issues/388905046 Link: https://lore.kernel.org/bpf/20250125065236.2603346-1-itugrok@yahoo.com Signed-off-by: Alexei Starovoitov --- tools/lib/bpf/btf.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index 48c66f3a9200..560b519f820e 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -3015,8 +3015,6 @@ static int btf_ext_parse_info(struct btf_ext *btf_ext, bool is_native) .desc = "line_info", }; struct btf_ext_sec_info_param core_relo = { - .off = btf_ext->hdr->core_relo_off, - .len = btf_ext->hdr->core_relo_len, .min_rec_size = sizeof(struct bpf_core_relo), .ext_info = &btf_ext->core_relo_info, .desc = "core_relo", @@ -3034,6 +3032,8 @@ static int btf_ext_parse_info(struct btf_ext *btf_ext, bool is_native) if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, core_relo_len)) return 0; /* skip core relos parsing */ + core_relo.off = btf_ext->hdr->core_relo_off; + core_relo.len = btf_ext->hdr->core_relo_len; err = btf_ext_parse_sec_info(btf_ext, &core_relo, is_native); if (err) return err; -- cgit v1.2.3 From 51d1b1d42841c557dabde5b140ae20774591e6dc Mon Sep 17 00:00:00 2001 From: Ihor Solodrai Date: Thu, 30 Jan 2025 12:12:34 -0800 Subject: libbpf: Introduce kflag for type_tags and decl_tags in BTF Add the following functions to libbpf API: * btf__add_type_attr() * btf__add_decl_attr() These functions allow to add to BTF the type tags and decl tags with info->kflag set to 1. The kflag indicates that the tag directly encodes an __attribute__ and not a normal tag. See Documentation/bpf/btf.rst changes in the subsequent patch for details on the semantics. Suggested-by: Andrii Nakryiko Signed-off-by: Ihor Solodrai Signed-off-by: Andrii Nakryiko Reviewed-by: Alan Maguire Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20250130201239.1429648-2-ihor.solodrai@linux.dev --- tools/lib/bpf/btf.c | 86 +++++++++++++++++++++++++++++++++++------------- tools/lib/bpf/btf.h | 3 ++ tools/lib/bpf/libbpf.map | 2 ++ 3 files changed, 68 insertions(+), 23 deletions(-) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index 560b519f820e..eea99c766a20 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -2090,7 +2090,7 @@ static int validate_type_id(int id) } /* generic append function for PTR, TYPEDEF, CONST/VOLATILE/RESTRICT */ -static int btf_add_ref_kind(struct btf *btf, int kind, const char *name, int ref_type_id) +static int btf_add_ref_kind(struct btf *btf, int kind, const char *name, int ref_type_id, int kflag) { struct btf_type *t; int sz, name_off = 0; @@ -2113,7 +2113,7 @@ static int btf_add_ref_kind(struct btf *btf, int kind, const char *name, int ref } t->name_off = name_off; - t->info = btf_type_info(kind, 0, 0); + t->info = btf_type_info(kind, 0, kflag); t->type = ref_type_id; return btf_commit_type(btf, sz); @@ -2128,7 +2128,7 @@ static int btf_add_ref_kind(struct btf *btf, int kind, const char *name, int ref */ int btf__add_ptr(struct btf *btf, int ref_type_id) { - return btf_add_ref_kind(btf, BTF_KIND_PTR, NULL, ref_type_id); + return btf_add_ref_kind(btf, BTF_KIND_PTR, NULL, ref_type_id, 0); } /* @@ -2506,7 +2506,7 @@ int btf__add_fwd(struct btf *btf, const char *name, enum btf_fwd_kind fwd_kind) struct btf_type *t; int id; - id = btf_add_ref_kind(btf, BTF_KIND_FWD, name, 0); + id = btf_add_ref_kind(btf, BTF_KIND_FWD, name, 0, 0); if (id <= 0) return id; t = btf_type_by_id(btf, id); @@ -2536,7 +2536,7 @@ int btf__add_typedef(struct btf *btf, const char *name, int ref_type_id) if (!name || !name[0]) return libbpf_err(-EINVAL); - return btf_add_ref_kind(btf, BTF_KIND_TYPEDEF, name, ref_type_id); + return btf_add_ref_kind(btf, BTF_KIND_TYPEDEF, name, ref_type_id, 0); } /* @@ -2548,7 +2548,7 @@ int btf__add_typedef(struct btf *btf, const char *name, int ref_type_id) */ int btf__add_volatile(struct btf *btf, int ref_type_id) { - return btf_add_ref_kind(btf, BTF_KIND_VOLATILE, NULL, ref_type_id); + return btf_add_ref_kind(btf, BTF_KIND_VOLATILE, NULL, ref_type_id, 0); } /* @@ -2560,7 +2560,7 @@ int btf__add_volatile(struct btf *btf, int ref_type_id) */ int btf__add_const(struct btf *btf, int ref_type_id) { - return btf_add_ref_kind(btf, BTF_KIND_CONST, NULL, ref_type_id); + return btf_add_ref_kind(btf, BTF_KIND_CONST, NULL, ref_type_id, 0); } /* @@ -2572,7 +2572,7 @@ int btf__add_const(struct btf *btf, int ref_type_id) */ int btf__add_restrict(struct btf *btf, int ref_type_id) { - return btf_add_ref_kind(btf, BTF_KIND_RESTRICT, NULL, ref_type_id); + return btf_add_ref_kind(btf, BTF_KIND_RESTRICT, NULL, ref_type_id, 0); } /* @@ -2588,7 +2588,24 @@ int btf__add_type_tag(struct btf *btf, const char *value, int ref_type_id) if (!value || !value[0]) return libbpf_err(-EINVAL); - return btf_add_ref_kind(btf, BTF_KIND_TYPE_TAG, value, ref_type_id); + return btf_add_ref_kind(btf, BTF_KIND_TYPE_TAG, value, ref_type_id, 0); +} + +/* + * Append new BTF_KIND_TYPE_TAG type with: + * - *value*, non-empty/non-NULL tag value; + * - *ref_type_id* - referenced type ID, it might not exist yet; + * Set info->kflag to 1, indicating this tag is an __attribute__ + * Returns: + * - >0, type ID of newly added BTF type; + * - <0, on error. + */ +int btf__add_type_attr(struct btf *btf, const char *value, int ref_type_id) +{ + if (!value || !value[0]) + return libbpf_err(-EINVAL); + + return btf_add_ref_kind(btf, BTF_KIND_TYPE_TAG, value, ref_type_id, 1); } /* @@ -2610,7 +2627,7 @@ int btf__add_func(struct btf *btf, const char *name, linkage != BTF_FUNC_EXTERN) return libbpf_err(-EINVAL); - id = btf_add_ref_kind(btf, BTF_KIND_FUNC, name, proto_type_id); + id = btf_add_ref_kind(btf, BTF_KIND_FUNC, name, proto_type_id, 0); if (id > 0) { struct btf_type *t = btf_type_by_id(btf, id); @@ -2845,18 +2862,8 @@ int btf__add_datasec_var_info(struct btf *btf, int var_type_id, __u32 offset, __ return 0; } -/* - * Append new BTF_KIND_DECL_TAG type with: - * - *value* - non-empty/non-NULL string; - * - *ref_type_id* - referenced type ID, it might not exist yet; - * - *component_idx* - -1 for tagging reference type, otherwise struct/union - * member or function argument index; - * Returns: - * - >0, type ID of newly added BTF type; - * - <0, on error. - */ -int btf__add_decl_tag(struct btf *btf, const char *value, int ref_type_id, - int component_idx) +static int btf_add_decl_tag(struct btf *btf, const char *value, int ref_type_id, + int component_idx, int kflag) { struct btf_type *t; int sz, value_off; @@ -2880,13 +2887,46 @@ int btf__add_decl_tag(struct btf *btf, const char *value, int ref_type_id, return value_off; t->name_off = value_off; - t->info = btf_type_info(BTF_KIND_DECL_TAG, 0, false); + t->info = btf_type_info(BTF_KIND_DECL_TAG, 0, kflag); t->type = ref_type_id; btf_decl_tag(t)->component_idx = component_idx; return btf_commit_type(btf, sz); } +/* + * Append new BTF_KIND_DECL_TAG type with: + * - *value* - non-empty/non-NULL string; + * - *ref_type_id* - referenced type ID, it might not exist yet; + * - *component_idx* - -1 for tagging reference type, otherwise struct/union + * member or function argument index; + * Returns: + * - >0, type ID of newly added BTF type; + * - <0, on error. + */ +int btf__add_decl_tag(struct btf *btf, const char *value, int ref_type_id, + int component_idx) +{ + return btf_add_decl_tag(btf, value, ref_type_id, component_idx, 0); +} + +/* + * Append new BTF_KIND_DECL_TAG type with: + * - *value* - non-empty/non-NULL string; + * - *ref_type_id* - referenced type ID, it might not exist yet; + * - *component_idx* - -1 for tagging reference type, otherwise struct/union + * member or function argument index; + * Set info->kflag to 1, indicating this tag is an __attribute__ + * Returns: + * - >0, type ID of newly added BTF type; + * - <0, on error. + */ +int btf__add_decl_attr(struct btf *btf, const char *value, int ref_type_id, + int component_idx) +{ + return btf_add_decl_tag(btf, value, ref_type_id, component_idx, 1); +} + struct btf_ext_sec_info_param { __u32 off; __u32 len; diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h index 47ee8f6ac489..4392451d634b 100644 --- a/tools/lib/bpf/btf.h +++ b/tools/lib/bpf/btf.h @@ -227,6 +227,7 @@ LIBBPF_API int btf__add_volatile(struct btf *btf, int ref_type_id); LIBBPF_API int btf__add_const(struct btf *btf, int ref_type_id); LIBBPF_API int btf__add_restrict(struct btf *btf, int ref_type_id); LIBBPF_API int btf__add_type_tag(struct btf *btf, const char *value, int ref_type_id); +LIBBPF_API int btf__add_type_attr(struct btf *btf, const char *value, int ref_type_id); /* func and func_proto construction APIs */ LIBBPF_API int btf__add_func(struct btf *btf, const char *name, @@ -243,6 +244,8 @@ LIBBPF_API int btf__add_datasec_var_info(struct btf *btf, int var_type_id, /* tag construction API */ LIBBPF_API int btf__add_decl_tag(struct btf *btf, const char *value, int ref_type_id, int component_idx); +LIBBPF_API int btf__add_decl_attr(struct btf *btf, const char *value, int ref_type_id, + int component_idx); struct btf_dedup_opts { size_t sz; diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index a8b2936a1646..b5a838de6f47 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -436,4 +436,6 @@ LIBBPF_1.6.0 { bpf_linker__add_buf; bpf_linker__add_fd; bpf_linker__new_fd; + btf__add_decl_attr; + btf__add_type_attr; } LIBBPF_1.5.0; -- cgit v1.2.3 From 2019c58318b886bb1df523b7a063164943b87785 Mon Sep 17 00:00:00 2001 From: Ihor Solodrai Date: Thu, 30 Jan 2025 12:12:36 -0800 Subject: libbpf: Check the kflag of type tags in btf_dump If the kflag is set for a BTF type tag, then the tag represents an arbitrary __attribute__. Change btf_dump accordingly. Signed-off-by: Ihor Solodrai Signed-off-by: Andrii Nakryiko Reviewed-by: Alan Maguire Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20250130201239.1429648-4-ihor.solodrai@linux.dev --- tools/lib/bpf/btf_dump.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c index a3fc6908f6c9..460c3e57fadb 100644 --- a/tools/lib/bpf/btf_dump.c +++ b/tools/lib/bpf/btf_dump.c @@ -1494,7 +1494,10 @@ static void btf_dump_emit_type_chain(struct btf_dump *d, case BTF_KIND_TYPE_TAG: btf_dump_emit_mods(d, decls); name = btf_name_of(d, t->name_off); - btf_dump_printf(d, " __attribute__((btf_type_tag(\"%s\")))", name); + if (btf_kflag(t)) + btf_dump_printf(d, " __attribute__((%s))", name); + else + btf_dump_printf(d, " __attribute__((btf_type_tag(\"%s\")))", name); break; case BTF_KIND_ARRAY: { const struct btf_array *a = btf_array(t); -- cgit v1.2.3 From 06096d19ee3897a7e70922580159607fe315da7a Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Thu, 6 Feb 2025 17:48:08 -0800 Subject: libbpf: fix LDX/STX/ST CO-RE relocation size adjustment logic Libbpf has a somewhat obscure feature of automatically adjusting the "size" of LDX/STX/ST instruction (memory store and load instructions), based on originally recorded access size (u8, u16, u32, or u64) and the actual size of the field on target kernel. This is meant to facilitate using BPF CO-RE on 32-bit architectures (pointers are always 64-bit in BPF, but host kernel's BTF will have it as 32-bit type), as well as generally supporting safe type changes (unsigned integer type changes can be transparently "relocated"). One issue that surfaced only now, 5 years after this logic was implemented, is how this all works when dealing with fields that are arrays. This isn't all that easy and straightforward to hit (see selftests that reproduce this condition), but one of sched_ext BPF programs did hit it with innocent looking loop. Long story short, libbpf used to calculate entire array size, instead of making sure to only calculate array's element size. But it's the element that is loaded by LDX/STX/ST instructions (1, 2, 4, or 8 bytes), so that's what libbpf should check. This patch adjusts the logic for arrays and fixed the issue. Reported-by: Emil Tsalapatis Signed-off-by: Andrii Nakryiko Acked-by: Eduard Zingerman Link: https://lore.kernel.org/r/20250207014809.1573841-1-andrii@kernel.org Signed-off-by: Alexei Starovoitov --- tools/lib/bpf/relo_core.c | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/relo_core.c b/tools/lib/bpf/relo_core.c index 7632e9d41827..2b83c98a1137 100644 --- a/tools/lib/bpf/relo_core.c +++ b/tools/lib/bpf/relo_core.c @@ -683,7 +683,7 @@ static int bpf_core_calc_field_relo(const char *prog_name, { const struct bpf_core_accessor *acc; const struct btf_type *t; - __u32 byte_off, byte_sz, bit_off, bit_sz, field_type_id; + __u32 byte_off, byte_sz, bit_off, bit_sz, field_type_id, elem_id; const struct btf_member *m; const struct btf_type *mt; bool bitfield; @@ -706,8 +706,14 @@ static int bpf_core_calc_field_relo(const char *prog_name, if (!acc->name) { if (relo->kind == BPF_CORE_FIELD_BYTE_OFFSET) { *val = spec->bit_offset / 8; - /* remember field size for load/store mem size */ - sz = btf__resolve_size(spec->btf, acc->type_id); + /* remember field size for load/store mem size; + * note, for arrays we care about individual element + * sizes, not the overall array size + */ + t = skip_mods_and_typedefs(spec->btf, acc->type_id, &elem_id); + while (btf_is_array(t)) + t = skip_mods_and_typedefs(spec->btf, btf_array(t)->type, &elem_id); + sz = btf__resolve_size(spec->btf, elem_id); if (sz < 0) return -EINVAL; *field_sz = sz; @@ -767,7 +773,17 @@ static int bpf_core_calc_field_relo(const char *prog_name, case BPF_CORE_FIELD_BYTE_OFFSET: *val = byte_off; if (!bitfield) { - *field_sz = byte_sz; + /* remember field size for load/store mem size; + * note, for arrays we care about individual element + * sizes, not the overall array size + */ + t = skip_mods_and_typedefs(spec->btf, field_type_id, &elem_id); + while (btf_is_array(t)) + t = skip_mods_and_typedefs(spec->btf, btf_array(t)->type, &elem_id); + sz = btf__resolve_size(spec->btf, elem_id); + if (sz < 0) + return -EINVAL; + *field_sz = sz; *type_id = field_type_id; } break; -- cgit v1.2.3 From e8af068239ca735a5b915e454f6298988d833755 Mon Sep 17 00:00:00 2001 From: Tao Chen Date: Wed, 19 Feb 2025 23:37:11 +0800 Subject: libbpf: Wrap libbpf API direct err with libbpf_err Just wrap the direct err with libbpf_err, keep consistency with other APIs. Signed-off-by: Tao Chen Signed-off-by: Andrii Nakryiko Acked-by: Eduard Zingerman Link: https://lore.kernel.org/bpf/20250219153711.29651-1-chen.dylane@linux.dev --- tools/lib/bpf/libbpf.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 194809da5172..6df258912e1e 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -9145,12 +9145,12 @@ int bpf_object__gen_loader(struct bpf_object *obj, struct gen_loader_opts *opts) struct bpf_gen *gen; if (!opts) - return -EFAULT; + return libbpf_err(-EFAULT); if (!OPTS_VALID(opts, gen_loader_opts)) - return -EINVAL; + return libbpf_err(-EINVAL); gen = calloc(sizeof(*gen), 1); if (!gen) - return -ENOMEM; + return libbpf_err(-ENOMEM); gen->opts = opts; gen->swapped_endian = !is_native_endianness(obj); obj->gen_loader = gen; @@ -9262,13 +9262,13 @@ int bpf_program__set_insns(struct bpf_program *prog, struct bpf_insn *insns; if (prog->obj->loaded) - return -EBUSY; + return libbpf_err(-EBUSY); insns = libbpf_reallocarray(prog->insns, new_insn_cnt, sizeof(*insns)); /* NULL is a valid return from reallocarray if the new count is zero */ if (!insns && new_insn_cnt) { pr_warn("prog '%s': failed to realloc prog code\n", prog->name); - return -ENOMEM; + return libbpf_err(-ENOMEM); } memcpy(insns, new_insns, new_insn_cnt * sizeof(*insns)); @@ -9379,11 +9379,11 @@ const char *bpf_program__log_buf(const struct bpf_program *prog, size_t *log_siz int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log_size) { if (log_size && !log_buf) - return -EINVAL; + return libbpf_err(-EINVAL); if (prog->log_size > UINT_MAX) - return -EINVAL; + return libbpf_err(-EINVAL); if (prog->obj->loaded) - return -EBUSY; + return libbpf_err(-EBUSY); prog->log_buf = log_buf; prog->log_size = log_size; @@ -10307,7 +10307,7 @@ int bpf_map__set_value_size(struct bpf_map *map, __u32 size) int err; if (map->def.type != BPF_MAP_TYPE_ARRAY) - return -EOPNOTSUPP; + return libbpf_err(-EOPNOTSUPP); mmap_old_sz = bpf_map_mmap_sz(map); mmap_new_sz = array_map_mmap_sz(size, map->def.max_entries); @@ -10315,7 +10315,7 @@ int bpf_map__set_value_size(struct bpf_map *map, __u32 size) if (err) { pr_warn("map '%s': failed to resize memory-mapped region: %s\n", bpf_map__name(map), errstr(err)); - return err; + return libbpf_err(err); } err = map_btf_datasec_resize(map, size); if (err && err != -ENOENT) { @@ -13070,17 +13070,17 @@ int bpf_link__update_map(struct bpf_link *link, const struct bpf_map *map) int err; if (!bpf_map__is_struct_ops(map)) - return -EINVAL; + return libbpf_err(-EINVAL); if (map->fd < 0) { pr_warn("map '%s': can't use BPF map without FD (was it created?)\n", map->name); - return -EINVAL; + return libbpf_err(-EINVAL); } st_ops_link = container_of(link, struct bpf_link_struct_ops, link); /* Ensure the type of a link is correct */ if (st_ops_link->map_fd < 0) - return -EINVAL; + return libbpf_err(-EINVAL); err = bpf_map_update_elem(map->fd, &zero, map->st_ops->kern_vdata, 0); /* It can be EBUSY if the map has been used to create or -- cgit v1.2.3 From e0525cd72b5979d8089fe524a071ea93fd011dc9 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 19 Feb 2025 16:28:21 -0800 Subject: libbpf: Fix hypothetical STT_SECTION extern NULL deref case Fix theoretical NULL dereference in linker when resolving *extern* STT_SECTION symbol against not-yet-existing ELF section. Not sure if it's possible in practice for valid ELF object files (this would require embedded assembly manipulations, at which point BTF will be missing), but fix the s/dst_sym/dst_sec/ typo guarding this condition anyways. Fixes: faf6ed321cf6 ("libbpf: Add BPF static linker APIs") Fixes: a46349227cd8 ("libbpf: Add linker extern resolution support for functions and global variables") Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20250220002821.834400-1-andrii@kernel.org Signed-off-by: Alexei Starovoitov --- tools/lib/bpf/linker.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/linker.c b/tools/lib/bpf/linker.c index b52f71c59616..800e0ef09c37 100644 --- a/tools/lib/bpf/linker.c +++ b/tools/lib/bpf/linker.c @@ -2163,7 +2163,7 @@ add_sym: obj->sym_map[src_sym_idx] = dst_sym_idx; - if (sym_type == STT_SECTION && dst_sym) { + if (sym_type == STT_SECTION && dst_sec) { dst_sec->sec_sym_idx = dst_sym_idx; dst_sym->st_value = 0; } -- cgit v1.2.3 From 236d3910117e9f97ebf75e511d8bcc950f1a4e5f Mon Sep 17 00:00:00 2001 From: Nandakumar Edamana Date: Sat, 22 Feb 2025 02:31:11 +0530 Subject: libbpf: Fix out-of-bound read In `set_kcfg_value_str`, an untrusted string is accessed with the assumption that it will be at least two characters long due to the presence of checks for opening and closing quotes. But the check for the closing quote (value[len - 1] != '"') misses the fact that it could be checking the opening quote itself in case of an invalid input that consists of just the opening quote. This commit adds an explicit check to make sure the string is at least two characters long. Signed-off-by: Nandakumar Edamana Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20250221210110.3182084-1-nandakumar@nandakumar.co.in --- tools/lib/bpf/libbpf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 6df258912e1e..899e98225f3b 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -2106,7 +2106,7 @@ static int set_kcfg_value_str(struct extern_desc *ext, char *ext_val, } len = strlen(value); - if (value[len - 1] != '"') { + if (len < 2 || value[len - 1] != '"') { pr_warn("extern (kcfg) '%s': invalid string config '%s'\n", ext->name, value); return -EINVAL; -- cgit v1.2.3 From b62dff14402a80962fd83a40f73f230fff78b18f Mon Sep 17 00:00:00 2001 From: Ihor Solodrai Date: Mon, 24 Feb 2025 15:57:55 -0800 Subject: libbpf: Implement bpf_usdt_arg_size BPF function Information about USDT argument size is implicitly stored in __bpf_usdt_arg_spec, but currently it's not accessbile to BPF programs that use USDT. Implement bpf_sdt_arg_size() that returns the size of an USDT argument in bytes. v1->v2: * do not add __bpf_usdt_arg_spec() helper v1: https://lore.kernel.org/bpf/20250220215904.3362709-1-ihor.solodrai@linux.dev/ Suggested-by: Andrii Nakryiko Signed-off-by: Ihor Solodrai Signed-off-by: Andrii Nakryiko Reviewed-by: Jiri Olsa Link: https://lore.kernel.org/bpf/20250224235756.2612606-1-ihor.solodrai@linux.dev --- tools/lib/bpf/usdt.bpf.h | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/usdt.bpf.h b/tools/lib/bpf/usdt.bpf.h index b811f754939f..2a7865c8e3fe 100644 --- a/tools/lib/bpf/usdt.bpf.h +++ b/tools/lib/bpf/usdt.bpf.h @@ -108,6 +108,38 @@ int bpf_usdt_arg_cnt(struct pt_regs *ctx) return spec->arg_cnt; } +/* Returns the size in bytes of the #*arg_num* (zero-indexed) USDT argument. + * Returns negative error if argument is not found or arg_num is invalid. + */ +static __always_inline +int bpf_usdt_arg_size(struct pt_regs *ctx, __u64 arg_num) +{ + struct __bpf_usdt_arg_spec *arg_spec; + struct __bpf_usdt_spec *spec; + int spec_id; + + spec_id = __bpf_usdt_spec_id(ctx); + if (spec_id < 0) + return -ESRCH; + + spec = bpf_map_lookup_elem(&__bpf_usdt_specs, &spec_id); + if (!spec) + return -ESRCH; + + if (arg_num >= BPF_USDT_MAX_ARG_CNT) + return -ENOENT; + barrier_var(arg_num); + if (arg_num >= spec->arg_cnt) + return -ENOENT; + + arg_spec = &spec->args[arg_num]; + + /* arg_spec->arg_bitshift = 64 - arg_sz * 8 + * so: arg_sz = (64 - arg_spec->arg_bitshift) / 8 + */ + return (unsigned int)(64 - arg_spec->arg_bitshift) / 8; +} + /* Fetch USDT argument #*arg_num* (zero-indexed) and put its value into *res. * Returns 0 on success; negative error, otherwise. * On error *res is guaranteed to be set to zero. -- cgit v1.2.3 From 6ef78c41911d34f02e26529a3ce3183589db4b82 Mon Sep 17 00:00:00 2001 From: Mykyta Yatsenko Date: Mon, 3 Mar 2025 13:57:49 +0000 Subject: libbpf: Use map_is_created helper in map setters Refactoring: use map_is_created helper in map setters that need to check the state of the map. This helps to reduce the number of the places that depend explicitly on the loaded flag, simplifying refactoring in the next patch of this set. Signed-off-by: Mykyta Yatsenko Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20250303135752.158343-2-mykyta.yatsenko5@gmail.com Signed-off-by: Alexei Starovoitov --- tools/lib/bpf/libbpf.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 899e98225f3b..4895c7ae6422 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -4845,6 +4845,11 @@ static int bpf_get_map_info_from_fdinfo(int fd, struct bpf_map_info *info) return 0; } +static bool map_is_created(const struct bpf_map *map) +{ + return map->obj->loaded || map->reused; +} + bool bpf_map__autocreate(const struct bpf_map *map) { return map->autocreate; @@ -4852,7 +4857,7 @@ bool bpf_map__autocreate(const struct bpf_map *map) int bpf_map__set_autocreate(struct bpf_map *map, bool autocreate) { - if (map->obj->loaded) + if (map_is_created(map)) return libbpf_err(-EBUSY); map->autocreate = autocreate; @@ -4946,7 +4951,7 @@ struct bpf_map *bpf_map__inner_map(struct bpf_map *map) int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries) { - if (map->obj->loaded) + if (map_is_created(map)) return libbpf_err(-EBUSY); map->def.max_entries = max_entries; @@ -5191,11 +5196,6 @@ bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map) static void bpf_map__destroy(struct bpf_map *map); -static bool map_is_created(const struct bpf_map *map) -{ - return map->obj->loaded || map->reused; -} - static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, bool is_inner) { LIBBPF_OPTS(bpf_map_create_opts, create_attr); @@ -10299,7 +10299,7 @@ static int map_btf_datasec_resize(struct bpf_map *map, __u32 size) int bpf_map__set_value_size(struct bpf_map *map, __u32 size) { - if (map->obj->loaded || map->reused) + if (map_is_created(map)) return libbpf_err(-EBUSY); if (map->mmaped) { @@ -10345,7 +10345,7 @@ int bpf_map__set_initial_value(struct bpf_map *map, { size_t actual_sz; - if (map->obj->loaded || map->reused) + if (map_is_created(map)) return libbpf_err(-EBUSY); if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG) -- cgit v1.2.3 From 9a9e347835d007f279274d175b9fcd9b47d9ee50 Mon Sep 17 00:00:00 2001 From: Mykyta Yatsenko Date: Mon, 3 Mar 2025 13:57:50 +0000 Subject: libbpf: Introduce more granular state for bpf_object We are going to split bpf_object loading into 2 stages: preparation and loading. This will increase flexibility when working with bpf_object and unlock some optimizations and use cases. This patch substitutes a boolean flag (loaded) by more finely-grained state for bpf_object. Signed-off-by: Mykyta Yatsenko Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20250303135752.158343-3-mykyta.yatsenko5@gmail.com Signed-off-by: Alexei Starovoitov --- tools/lib/bpf/libbpf.c | 39 ++++++++++++++++++++++----------------- 1 file changed, 22 insertions(+), 17 deletions(-) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 4895c7ae6422..7210278ecdcf 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -670,11 +670,18 @@ struct elf_state { struct usdt_manager; +enum bpf_object_state { + OBJ_OPEN, + OBJ_PREPARED, + OBJ_LOADED, +}; + struct bpf_object { char name[BPF_OBJ_NAME_LEN]; char license[64]; __u32 kern_version; + enum bpf_object_state state; struct bpf_program *programs; size_t nr_programs; struct bpf_map *maps; @@ -686,7 +693,6 @@ struct bpf_object { int nr_extern; int kconfig_map_idx; - bool loaded; bool has_subcalls; bool has_rodata; @@ -1511,7 +1517,7 @@ static struct bpf_object *bpf_object__new(const char *path, obj->kconfig_map_idx = -1; obj->kern_version = get_kernel_version(); - obj->loaded = false; + obj->state = OBJ_OPEN; return obj; } @@ -4847,7 +4853,7 @@ static int bpf_get_map_info_from_fdinfo(int fd, struct bpf_map_info *info) static bool map_is_created(const struct bpf_map *map) { - return map->obj->loaded || map->reused; + return map->obj->state >= OBJ_PREPARED || map->reused; } bool bpf_map__autocreate(const struct bpf_map *map) @@ -8550,7 +8556,7 @@ static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const ch if (!obj) return libbpf_err(-EINVAL); - if (obj->loaded) { + if (obj->state >= OBJ_LOADED) { pr_warn("object '%s': load can't be attempted twice\n", obj->name); return libbpf_err(-EINVAL); } @@ -8602,8 +8608,7 @@ static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const ch btf__free(obj->btf_vmlinux); obj->btf_vmlinux = NULL; - obj->loaded = true; /* doesn't matter if successfully or not */ - + obj->state = OBJ_LOADED; /* doesn't matter if successfully or not */ if (err) goto out; @@ -8866,7 +8871,7 @@ int bpf_object__pin_maps(struct bpf_object *obj, const char *path) if (!obj) return libbpf_err(-ENOENT); - if (!obj->loaded) { + if (obj->state < OBJ_PREPARED) { pr_warn("object not yet loaded; load it first\n"); return libbpf_err(-ENOENT); } @@ -8945,7 +8950,7 @@ int bpf_object__pin_programs(struct bpf_object *obj, const char *path) if (!obj) return libbpf_err(-ENOENT); - if (!obj->loaded) { + if (obj->state < OBJ_LOADED) { pr_warn("object not yet loaded; load it first\n"); return libbpf_err(-ENOENT); } @@ -9132,7 +9137,7 @@ int bpf_object__btf_fd(const struct bpf_object *obj) int bpf_object__set_kversion(struct bpf_object *obj, __u32 kern_version) { - if (obj->loaded) + if (obj->state >= OBJ_LOADED) return libbpf_err(-EINVAL); obj->kern_version = kern_version; @@ -9229,7 +9234,7 @@ bool bpf_program__autoload(const struct bpf_program *prog) int bpf_program__set_autoload(struct bpf_program *prog, bool autoload) { - if (prog->obj->loaded) + if (prog->obj->state >= OBJ_LOADED) return libbpf_err(-EINVAL); prog->autoload = autoload; @@ -9261,7 +9266,7 @@ int bpf_program__set_insns(struct bpf_program *prog, { struct bpf_insn *insns; - if (prog->obj->loaded) + if (prog->obj->state >= OBJ_LOADED) return libbpf_err(-EBUSY); insns = libbpf_reallocarray(prog->insns, new_insn_cnt, sizeof(*insns)); @@ -9304,7 +9309,7 @@ static int last_custom_sec_def_handler_id; int bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type) { - if (prog->obj->loaded) + if (prog->obj->state >= OBJ_LOADED) return libbpf_err(-EBUSY); /* if type is not changed, do nothing */ @@ -9335,7 +9340,7 @@ enum bpf_attach_type bpf_program__expected_attach_type(const struct bpf_program int bpf_program__set_expected_attach_type(struct bpf_program *prog, enum bpf_attach_type type) { - if (prog->obj->loaded) + if (prog->obj->state >= OBJ_LOADED) return libbpf_err(-EBUSY); prog->expected_attach_type = type; @@ -9349,7 +9354,7 @@ __u32 bpf_program__flags(const struct bpf_program *prog) int bpf_program__set_flags(struct bpf_program *prog, __u32 flags) { - if (prog->obj->loaded) + if (prog->obj->state >= OBJ_LOADED) return libbpf_err(-EBUSY); prog->prog_flags = flags; @@ -9363,7 +9368,7 @@ __u32 bpf_program__log_level(const struct bpf_program *prog) int bpf_program__set_log_level(struct bpf_program *prog, __u32 log_level) { - if (prog->obj->loaded) + if (prog->obj->state >= OBJ_LOADED) return libbpf_err(-EBUSY); prog->log_level = log_level; @@ -9382,7 +9387,7 @@ int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log return libbpf_err(-EINVAL); if (prog->log_size > UINT_MAX) return libbpf_err(-EINVAL); - if (prog->obj->loaded) + if (prog->obj->state >= OBJ_LOADED) return libbpf_err(-EBUSY); prog->log_buf = log_buf; @@ -13666,7 +13671,7 @@ int bpf_program__set_attach_target(struct bpf_program *prog, if (!prog || attach_prog_fd < 0) return libbpf_err(-EINVAL); - if (prog->obj->loaded) + if (prog->obj->state >= OBJ_LOADED) return libbpf_err(-EINVAL); if (attach_prog_fd && !attach_func_name) { -- cgit v1.2.3 From 1315c28ed8095108f90dfe882e9e1e68a101fc75 Mon Sep 17 00:00:00 2001 From: Mykyta Yatsenko Date: Mon, 3 Mar 2025 13:57:51 +0000 Subject: libbpf: Split bpf object load into prepare/load Introduce bpf_object__prepare API: additional intermediate preparation step that performs ELF processing, relocations, prepares final state of BPF program instructions (accessible with bpf_program__insns()), creates and (potentially) pins maps, and stops short of loading BPF programs. We anticipate few use cases for this API, such as: * Use prepare to initialize bpf_token, without loading freplace programs, unlocking possibility to lookup BTF of other programs. * Execute prepare to obtain finalized BPF program instructions without loading programs, enabling tools like veristat to process one program at a time, without incurring cost of ELF parsing and processing. Signed-off-by: Mykyta Yatsenko Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20250303135752.158343-4-mykyta.yatsenko5@gmail.com Signed-off-by: Alexei Starovoitov --- tools/lib/bpf/libbpf.c | 146 +++++++++++++++++++++++++++++++++-------------- tools/lib/bpf/libbpf.h | 13 +++++ tools/lib/bpf/libbpf.map | 1 + 3 files changed, 117 insertions(+), 43 deletions(-) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 7210278ecdcf..8e32286854ef 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -7901,13 +7901,6 @@ bpf_object__load_progs(struct bpf_object *obj, int log_level) size_t i; int err; - for (i = 0; i < obj->nr_programs; i++) { - prog = &obj->programs[i]; - err = bpf_object__sanitize_prog(obj, prog); - if (err) - return err; - } - for (i = 0; i < obj->nr_programs; i++) { prog = &obj->programs[i]; if (prog_is_subprog(obj, prog)) @@ -7933,6 +7926,21 @@ bpf_object__load_progs(struct bpf_object *obj, int log_level) return 0; } +static int bpf_object_prepare_progs(struct bpf_object *obj) +{ + struct bpf_program *prog; + size_t i; + int err; + + for (i = 0; i < obj->nr_programs; i++) { + prog = &obj->programs[i]; + err = bpf_object__sanitize_prog(obj, prog); + if (err) + return err; + } + return 0; +} + static const struct bpf_sec_def *find_sec_def(const char *sec_name); static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object_open_opts *opts) @@ -8549,9 +8557,72 @@ static int bpf_object_prepare_struct_ops(struct bpf_object *obj) return 0; } +static void bpf_object_unpin(struct bpf_object *obj) +{ + int i; + + /* unpin any maps that were auto-pinned during load */ + for (i = 0; i < obj->nr_maps; i++) + if (obj->maps[i].pinned && !obj->maps[i].reused) + bpf_map__unpin(&obj->maps[i], NULL); +} + +static void bpf_object_post_load_cleanup(struct bpf_object *obj) +{ + int i; + + /* clean up fd_array */ + zfree(&obj->fd_array); + + /* clean up module BTFs */ + for (i = 0; i < obj->btf_module_cnt; i++) { + close(obj->btf_modules[i].fd); + btf__free(obj->btf_modules[i].btf); + free(obj->btf_modules[i].name); + } + obj->btf_module_cnt = 0; + zfree(&obj->btf_modules); + + /* clean up vmlinux BTF */ + btf__free(obj->btf_vmlinux); + obj->btf_vmlinux = NULL; +} + +static int bpf_object_prepare(struct bpf_object *obj, const char *target_btf_path) +{ + int err; + + if (obj->state >= OBJ_PREPARED) { + pr_warn("object '%s': prepare loading can't be attempted twice\n", obj->name); + return -EINVAL; + } + + err = bpf_object_prepare_token(obj); + err = err ? : bpf_object__probe_loading(obj); + err = err ? : bpf_object__load_vmlinux_btf(obj, false); + err = err ? : bpf_object__resolve_externs(obj, obj->kconfig); + err = err ? : bpf_object__sanitize_maps(obj); + err = err ? : bpf_object__init_kern_struct_ops_maps(obj); + err = err ? : bpf_object_adjust_struct_ops_autoload(obj); + err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : target_btf_path); + err = err ? : bpf_object__sanitize_and_load_btf(obj); + err = err ? : bpf_object__create_maps(obj); + err = err ? : bpf_object_prepare_progs(obj); + + if (err) { + bpf_object_unpin(obj); + bpf_object_unload(obj); + obj->state = OBJ_LOADED; + return err; + } + + obj->state = OBJ_PREPARED; + return 0; +} + static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const char *target_btf_path) { - int err, i; + int err; if (!obj) return libbpf_err(-EINVAL); @@ -8571,17 +8642,12 @@ static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const ch return libbpf_err(-LIBBPF_ERRNO__ENDIAN); } - err = bpf_object_prepare_token(obj); - err = err ? : bpf_object__probe_loading(obj); - err = err ? : bpf_object__load_vmlinux_btf(obj, false); - err = err ? : bpf_object__resolve_externs(obj, obj->kconfig); - err = err ? : bpf_object__sanitize_maps(obj); - err = err ? : bpf_object__init_kern_struct_ops_maps(obj); - err = err ? : bpf_object_adjust_struct_ops_autoload(obj); - err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : target_btf_path); - err = err ? : bpf_object__sanitize_and_load_btf(obj); - err = err ? : bpf_object__create_maps(obj); - err = err ? : bpf_object__load_progs(obj, extra_log_level); + if (obj->state < OBJ_PREPARED) { + err = bpf_object_prepare(obj, target_btf_path); + if (err) + return libbpf_err(err); + } + err = bpf_object__load_progs(obj, extra_log_level); err = err ? : bpf_object_init_prog_arrays(obj); err = err ? : bpf_object_prepare_struct_ops(obj); @@ -8593,35 +8659,22 @@ static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const ch err = bpf_gen__finish(obj->gen_loader, obj->nr_programs, obj->nr_maps); } - /* clean up fd_array */ - zfree(&obj->fd_array); + bpf_object_post_load_cleanup(obj); + obj->state = OBJ_LOADED; /* doesn't matter if successfully or not */ - /* clean up module BTFs */ - for (i = 0; i < obj->btf_module_cnt; i++) { - close(obj->btf_modules[i].fd); - btf__free(obj->btf_modules[i].btf); - free(obj->btf_modules[i].name); + if (err) { + bpf_object_unpin(obj); + bpf_object_unload(obj); + pr_warn("failed to load object '%s'\n", obj->path); + return libbpf_err(err); } - free(obj->btf_modules); - - /* clean up vmlinux BTF */ - btf__free(obj->btf_vmlinux); - obj->btf_vmlinux = NULL; - - obj->state = OBJ_LOADED; /* doesn't matter if successfully or not */ - if (err) - goto out; return 0; -out: - /* unpin any maps that were auto-pinned during load */ - for (i = 0; i < obj->nr_maps; i++) - if (obj->maps[i].pinned && !obj->maps[i].reused) - bpf_map__unpin(&obj->maps[i], NULL); +} - bpf_object_unload(obj); - pr_warn("failed to load object '%s'\n", obj->path); - return libbpf_err(err); +int bpf_object__prepare(struct bpf_object *obj) +{ + return libbpf_err(bpf_object_prepare(obj, NULL)); } int bpf_object__load(struct bpf_object *obj) @@ -9069,6 +9122,13 @@ void bpf_object__close(struct bpf_object *obj) if (IS_ERR_OR_NULL(obj)) return; + /* + * if user called bpf_object__prepare() without ever getting to + * bpf_object__load(), we need to clean up stuff that is normally + * cleaned up at the end of loading step + */ + bpf_object_post_load_cleanup(obj); + usdt_manager_free(obj->usdt_man); obj->usdt_man = NULL; diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index 3020ee45303a..e0605403f977 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -241,6 +241,19 @@ LIBBPF_API struct bpf_object * bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz, const struct bpf_object_open_opts *opts); +/** + * @brief **bpf_object__prepare()** prepares BPF object for loading: + * performs ELF processing, relocations, prepares final state of BPF program + * instructions (accessible with bpf_program__insns()), creates and + * (potentially) pins maps. Leaves BPF object in the state ready for program + * loading. + * @param obj Pointer to a valid BPF object instance returned by + * **bpf_object__open*()** API + * @return 0, on success; negative error code, otherwise, error code is + * stored in errno + */ +int bpf_object__prepare(struct bpf_object *obj); + /** * @brief **bpf_object__load()** loads BPF object into kernel. * @param obj Pointer to a valid BPF object instance returned by diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index b5a838de6f47..d8b71f22f197 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -436,6 +436,7 @@ LIBBPF_1.6.0 { bpf_linker__add_buf; bpf_linker__add_fd; bpf_linker__new_fd; + bpf_object__prepare; btf__add_decl_attr; btf__add_type_attr; } LIBBPF_1.5.0; -- cgit v1.2.3 From 974ef9f0d23edc1a802691c585b84514b414a96d Mon Sep 17 00:00:00 2001 From: Mykyta Yatsenko Date: Mon, 17 Mar 2025 17:40:38 +0000 Subject: libbpf: Pass BPF token from find_prog_btf_id to BPF_BTF_GET_FD_BY_ID Pass BPF token from bpf_program__set_attach_target to BPF_BTF_GET_FD_BY_ID bpf command. When freplace program attaches to target program, it needs to look up for BTF of the target, this may require BPF token, if, for example, running from user namespace. Signed-off-by: Mykyta Yatsenko Signed-off-by: Andrii Nakryiko Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/20250317174039.161275-4-mykyta.yatsenko5@gmail.com --- tools/lib/bpf/bpf.c | 3 ++- tools/lib/bpf/bpf.h | 3 ++- tools/lib/bpf/btf.c | 15 +++++++++++++-- tools/lib/bpf/libbpf.c | 10 +++++----- tools/lib/bpf/libbpf_internal.h | 1 + 5 files changed, 23 insertions(+), 9 deletions(-) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index 359f73ead613..a9c3e33d0f8a 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -1097,7 +1097,7 @@ int bpf_map_get_fd_by_id(__u32 id) int bpf_btf_get_fd_by_id_opts(__u32 id, const struct bpf_get_fd_by_id_opts *opts) { - const size_t attr_sz = offsetofend(union bpf_attr, open_flags); + const size_t attr_sz = offsetofend(union bpf_attr, fd_by_id_token_fd); union bpf_attr attr; int fd; @@ -1107,6 +1107,7 @@ int bpf_btf_get_fd_by_id_opts(__u32 id, memset(&attr, 0, attr_sz); attr.btf_id = id; attr.open_flags = OPTS_GET(opts, open_flags, 0); + attr.fd_by_id_token_fd = OPTS_GET(opts, token_fd, 0); fd = sys_bpf_fd(BPF_BTF_GET_FD_BY_ID, &attr, attr_sz); return libbpf_err_errno(fd); diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index 435da95d2058..777627d33d25 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -487,9 +487,10 @@ LIBBPF_API int bpf_link_get_next_id(__u32 start_id, __u32 *next_id); struct bpf_get_fd_by_id_opts { size_t sz; /* size of this struct for forward/backward compatibility */ __u32 open_flags; /* permissions requested for the operation on fd */ + __u32 token_fd; size_t :0; }; -#define bpf_get_fd_by_id_opts__last_field open_flags +#define bpf_get_fd_by_id_opts__last_field token_fd LIBBPF_API int bpf_prog_get_fd_by_id(__u32 id); LIBBPF_API int bpf_prog_get_fd_by_id_opts(__u32 id, diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index eea99c766a20..38bc6b14b066 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -1619,12 +1619,18 @@ exit_free: return btf; } -struct btf *btf__load_from_kernel_by_id_split(__u32 id, struct btf *base_btf) +struct btf *btf_load_from_kernel(__u32 id, struct btf *base_btf, int token_fd) { struct btf *btf; int btf_fd; + LIBBPF_OPTS(bpf_get_fd_by_id_opts, opts); + + if (token_fd) { + opts.open_flags |= BPF_F_TOKEN_FD; + opts.token_fd = token_fd; + } - btf_fd = bpf_btf_get_fd_by_id(id); + btf_fd = bpf_btf_get_fd_by_id_opts(id, &opts); if (btf_fd < 0) return libbpf_err_ptr(-errno); @@ -1634,6 +1640,11 @@ struct btf *btf__load_from_kernel_by_id_split(__u32 id, struct btf *base_btf) return libbpf_ptr(btf); } +struct btf *btf__load_from_kernel_by_id_split(__u32 id, struct btf *base_btf) +{ + return btf_load_from_kernel(id, base_btf, 0); +} + struct btf *btf__load_from_kernel_by_id(__u32 id) { return btf__load_from_kernel_by_id_split(id, NULL); diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 8e32286854ef..6b85060f07b3 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -10024,7 +10024,7 @@ int libbpf_find_vmlinux_btf_id(const char *name, return libbpf_err(err); } -static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd) +static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd, int token_fd) { struct bpf_prog_info info; __u32 info_len = sizeof(info); @@ -10044,7 +10044,7 @@ static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd) pr_warn("The target program doesn't have BTF\n"); goto out; } - btf = btf__load_from_kernel_by_id(info.btf_id); + btf = btf_load_from_kernel(info.btf_id, NULL, token_fd); err = libbpf_get_error(btf); if (err) { pr_warn("Failed to get BTF %d of the program: %s\n", info.btf_id, errstr(err)); @@ -10127,7 +10127,7 @@ static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attac pr_warn("prog '%s': attach program FD is not set\n", prog->name); return -EINVAL; } - err = libbpf_find_prog_btf_id(attach_name, attach_prog_fd); + err = libbpf_find_prog_btf_id(attach_name, attach_prog_fd, prog->obj->token_fd); if (err < 0) { pr_warn("prog '%s': failed to find BPF program (FD %d) BTF ID for '%s': %s\n", prog->name, attach_prog_fd, attach_name, errstr(err)); @@ -12923,7 +12923,7 @@ struct bpf_link *bpf_program__attach_freplace(const struct bpf_program *prog, if (target_fd) { LIBBPF_OPTS(bpf_link_create_opts, target_opts); - btf_id = libbpf_find_prog_btf_id(attach_func_name, target_fd); + btf_id = libbpf_find_prog_btf_id(attach_func_name, target_fd, prog->obj->token_fd); if (btf_id < 0) return libbpf_err_ptr(btf_id); @@ -13744,7 +13744,7 @@ int bpf_program__set_attach_target(struct bpf_program *prog, if (attach_prog_fd) { btf_id = libbpf_find_prog_btf_id(attach_func_name, - attach_prog_fd); + attach_prog_fd, prog->obj->token_fd); if (btf_id < 0) return libbpf_err(btf_id); } else { diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h index de498e2dd6b0..76669c73dcd1 100644 --- a/tools/lib/bpf/libbpf_internal.h +++ b/tools/lib/bpf/libbpf_internal.h @@ -409,6 +409,7 @@ int libbpf__load_raw_btf(const char *raw_types, size_t types_len, int btf_load_into_kernel(struct btf *btf, char *log_buf, size_t log_sz, __u32 log_level, int token_fd); +struct btf *btf_load_from_kernel(__u32 id, struct btf *base_btf, int token_fd); struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf); void btf_get_kernel_prefix_kind(enum bpf_attach_type attach_type, -- cgit v1.2.3 From 307ef667e94530c2f2f77797bfe9ea85c22bec7d Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Thu, 20 Mar 2025 15:24:39 -0700 Subject: libbpf: Add namespace for errstr making it libbpf_errstr When statically linking symbols can be replaced with those from other statically linked libraries depending on the link order and the hoped for "multiple definition" error may not appear. To avoid conflicts it is good practice to namespace symbols, this change renames errstr to libbpf_errstr. To avoid churn a #define is used to turn use of errstr(err) to libbpf_errstr(err). Fixes: 1633a83bf993 ("libbpf: Introduce errstr() for stringifying errno") Signed-off-by: Ian Rogers Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20250320222439.1350187-1-irogers@google.com --- tools/lib/bpf/str_error.c | 2 +- tools/lib/bpf/str_error.h | 7 +++++-- 2 files changed, 6 insertions(+), 3 deletions(-) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/str_error.c b/tools/lib/bpf/str_error.c index 8743049e32b7..9a541762f54c 100644 --- a/tools/lib/bpf/str_error.c +++ b/tools/lib/bpf/str_error.c @@ -36,7 +36,7 @@ char *libbpf_strerror_r(int err, char *dst, int len) return dst; } -const char *errstr(int err) +const char *libbpf_errstr(int err) { static __thread char buf[12]; diff --git a/tools/lib/bpf/str_error.h b/tools/lib/bpf/str_error.h index 66ffebde0684..53e7fbffc13e 100644 --- a/tools/lib/bpf/str_error.h +++ b/tools/lib/bpf/str_error.h @@ -7,10 +7,13 @@ char *libbpf_strerror_r(int err, char *dst, int len); /** - * @brief **errstr()** returns string corresponding to numeric errno + * @brief **libbpf_errstr()** returns string corresponding to numeric errno * @param err negative numeric errno * @return pointer to string representation of the errno, that is invalidated * upon the next call. */ -const char *errstr(int err); +const char *libbpf_errstr(int err); + +#define errstr(err) libbpf_errstr(err) + #endif /* __LIBBPF_STR_ERROR_H */ -- cgit v1.2.3