summaryrefslogtreecommitdiffstats
path: root/tools/testing/selftests/bpf/prog_tests/map_kptr_race.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2026-02-28 19:54:28 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2026-02-28 19:54:28 -0800
commiteb71ab2bf72260054677e348498ba995a057c463 (patch)
tree425776573c762c80fd1e173f92537741300fb8d1 /tools/testing/selftests/bpf/prog_tests/map_kptr_race.c
parent63a43faf6a68ce0045c874b32e60acac2089a41a (diff)
parentb9c0a5c48396aea4cde25fc701027ebbc5d78de1 (diff)
downloadlinux-master.tar.gz
linux-master.zip
Merge tag 'bpf-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpfHEADmaster
Pull bpf fixes from Alexei Starovoitov: - Fix alignment of arm64 JIT buffer to prevent atomic tearing (Fuad Tabba) - Fix invariant violation for single value tnums in the verifier (Harishankar Vishwanathan, Paul Chaignon) - Fix a bunch of issues found by ASAN in selftests/bpf (Ihor Solodrai) - Fix race in devmpa and cpumap on PREEMPT_RT (Jiayuan Chen) - Fix show_fdinfo of kprobe_multi when cookies are not present (Jiri Olsa) - Fix race in freeing special fields in BPF maps to prevent memory leaks (Kumar Kartikeya Dwivedi) - Fix OOB read in dmabuf_collector (T.J. Mercier) * tag 'bpf-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf: (36 commits) selftests/bpf: Avoid simplification of crafted bounds test selftests/bpf: Test refinement of single-value tnum bpf: Improve bounds when tnum has a single possible value bpf: Introduce tnum_step to step through tnum's members bpf: Fix race in devmap on PREEMPT_RT bpf: Fix race in cpumap on PREEMPT_RT selftests/bpf: Add tests for special fields races bpf: Retire rcu_trace_implies_rcu_gp() from local storage bpf: Delay freeing fields in local storage bpf: Lose const-ness of map in map_check_btf() bpf: Register dtor for freeing special fields selftests/bpf: Fix OOB read in dmabuf_collector selftests/bpf: Fix a memory leak in xdp_flowtable test bpf: Fix stack-out-of-bounds write in devmap bpf: Fix kprobe_multi cookies access in show_fdinfo callback bpf, arm64: Force 8-byte alignment for JIT buffer to prevent atomic tearing selftests/bpf: Don't override SIGSEGV handler with ASAN selftests/bpf: Check BPFTOOL env var in detect_bpftool_path() selftests/bpf: Fix out-of-bounds array access bugs reported by ASAN selftests/bpf: Fix array bounds warning in jit_disasm_helpers ...
Diffstat (limited to 'tools/testing/selftests/bpf/prog_tests/map_kptr_race.c')
-rw-r--r--tools/testing/selftests/bpf/prog_tests/map_kptr_race.c218
1 files changed, 218 insertions, 0 deletions
diff --git a/tools/testing/selftests/bpf/prog_tests/map_kptr_race.c b/tools/testing/selftests/bpf/prog_tests/map_kptr_race.c
new file mode 100644
index 000000000000..506ed55e8528
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/map_kptr_race.c
@@ -0,0 +1,218 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2026 Meta Platforms, Inc. and affiliates. */
+#include <test_progs.h>
+#include <network_helpers.h>
+
+#include "map_kptr_race.skel.h"
+
+static int get_map_id(int map_fd)
+{
+ struct bpf_map_info info = {};
+ __u32 len = sizeof(info);
+
+ if (!ASSERT_OK(bpf_map_get_info_by_fd(map_fd, &info, &len), "get_map_info"))
+ return -1;
+ return info.id;
+}
+
+static int read_refs(struct map_kptr_race *skel)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, opts);
+ int ret;
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.count_ref), &opts);
+ if (!ASSERT_OK(ret, "count_ref run"))
+ return -1;
+ if (!ASSERT_OK(opts.retval, "count_ref retval"))
+ return -1;
+ return skel->bss->num_of_refs;
+}
+
+static void test_htab_leak(void)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1,
+ );
+ struct map_kptr_race *skel, *watcher;
+ int ret, map_id;
+
+ skel = map_kptr_race__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open_and_load"))
+ return;
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_htab_leak), &opts);
+ if (!ASSERT_OK(ret, "test_htab_leak run"))
+ goto out_skel;
+ if (!ASSERT_OK(opts.retval, "test_htab_leak retval"))
+ goto out_skel;
+
+ map_id = get_map_id(bpf_map__fd(skel->maps.race_hash_map));
+ if (!ASSERT_GE(map_id, 0, "map_id"))
+ goto out_skel;
+
+ watcher = map_kptr_race__open_and_load();
+ if (!ASSERT_OK_PTR(watcher, "watcher open_and_load"))
+ goto out_skel;
+
+ watcher->bss->target_map_id = map_id;
+ watcher->links.map_put = bpf_program__attach(watcher->progs.map_put);
+ if (!ASSERT_OK_PTR(watcher->links.map_put, "attach fentry"))
+ goto out_watcher;
+ watcher->links.htab_map_free = bpf_program__attach(watcher->progs.htab_map_free);
+ if (!ASSERT_OK_PTR(watcher->links.htab_map_free, "attach fexit"))
+ goto out_watcher;
+
+ map_kptr_race__destroy(skel);
+ skel = NULL;
+
+ kern_sync_rcu();
+
+ while (!READ_ONCE(watcher->bss->map_freed))
+ sched_yield();
+
+ ASSERT_EQ(watcher->bss->map_freed, 1, "map_freed");
+ ASSERT_EQ(read_refs(watcher), 2, "htab refcount");
+
+out_watcher:
+ map_kptr_race__destroy(watcher);
+out_skel:
+ map_kptr_race__destroy(skel);
+}
+
+static void test_percpu_htab_leak(void)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1,
+ );
+ struct map_kptr_race *skel, *watcher;
+ int ret, map_id;
+
+ skel = map_kptr_race__open();
+ if (!ASSERT_OK_PTR(skel, "open"))
+ return;
+
+ skel->rodata->nr_cpus = libbpf_num_possible_cpus();
+ if (skel->rodata->nr_cpus > 16)
+ skel->rodata->nr_cpus = 16;
+
+ ret = map_kptr_race__load(skel);
+ if (!ASSERT_OK(ret, "load"))
+ goto out_skel;
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_percpu_htab_leak), &opts);
+ if (!ASSERT_OK(ret, "test_percpu_htab_leak run"))
+ goto out_skel;
+ if (!ASSERT_OK(opts.retval, "test_percpu_htab_leak retval"))
+ goto out_skel;
+
+ map_id = get_map_id(bpf_map__fd(skel->maps.race_percpu_hash_map));
+ if (!ASSERT_GE(map_id, 0, "map_id"))
+ goto out_skel;
+
+ watcher = map_kptr_race__open_and_load();
+ if (!ASSERT_OK_PTR(watcher, "watcher open_and_load"))
+ goto out_skel;
+
+ watcher->bss->target_map_id = map_id;
+ watcher->links.map_put = bpf_program__attach(watcher->progs.map_put);
+ if (!ASSERT_OK_PTR(watcher->links.map_put, "attach fentry"))
+ goto out_watcher;
+ watcher->links.htab_map_free = bpf_program__attach(watcher->progs.htab_map_free);
+ if (!ASSERT_OK_PTR(watcher->links.htab_map_free, "attach fexit"))
+ goto out_watcher;
+
+ map_kptr_race__destroy(skel);
+ skel = NULL;
+
+ kern_sync_rcu();
+
+ while (!READ_ONCE(watcher->bss->map_freed))
+ sched_yield();
+
+ ASSERT_EQ(watcher->bss->map_freed, 1, "map_freed");
+ ASSERT_EQ(read_refs(watcher), 2, "percpu_htab refcount");
+
+out_watcher:
+ map_kptr_race__destroy(watcher);
+out_skel:
+ map_kptr_race__destroy(skel);
+}
+
+static void test_sk_ls_leak(void)
+{
+ struct map_kptr_race *skel, *watcher;
+ int listen_fd = -1, client_fd = -1, map_id;
+
+ skel = map_kptr_race__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open_and_load"))
+ return;
+
+ if (!ASSERT_OK(map_kptr_race__attach(skel), "attach"))
+ goto out_skel;
+
+ listen_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0);
+ if (!ASSERT_GE(listen_fd, 0, "start_server"))
+ goto out_skel;
+
+ client_fd = connect_to_fd(listen_fd, 0);
+ if (!ASSERT_GE(client_fd, 0, "connect_to_fd"))
+ goto out_skel;
+
+ if (!ASSERT_EQ(skel->bss->sk_ls_leak_done, 1, "sk_ls_leak_done"))
+ goto out_skel;
+
+ close(client_fd);
+ client_fd = -1;
+ close(listen_fd);
+ listen_fd = -1;
+
+ map_id = get_map_id(bpf_map__fd(skel->maps.race_sk_ls_map));
+ if (!ASSERT_GE(map_id, 0, "map_id"))
+ goto out_skel;
+
+ watcher = map_kptr_race__open_and_load();
+ if (!ASSERT_OK_PTR(watcher, "watcher open_and_load"))
+ goto out_skel;
+
+ watcher->bss->target_map_id = map_id;
+ watcher->links.map_put = bpf_program__attach(watcher->progs.map_put);
+ if (!ASSERT_OK_PTR(watcher->links.map_put, "attach fentry"))
+ goto out_watcher;
+ watcher->links.sk_map_free = bpf_program__attach(watcher->progs.sk_map_free);
+ if (!ASSERT_OK_PTR(watcher->links.sk_map_free, "attach fexit"))
+ goto out_watcher;
+
+ map_kptr_race__destroy(skel);
+ skel = NULL;
+
+ kern_sync_rcu();
+
+ while (!READ_ONCE(watcher->bss->map_freed))
+ sched_yield();
+
+ ASSERT_EQ(watcher->bss->map_freed, 1, "map_freed");
+ ASSERT_EQ(read_refs(watcher), 2, "sk_ls refcount");
+
+out_watcher:
+ map_kptr_race__destroy(watcher);
+out_skel:
+ if (client_fd >= 0)
+ close(client_fd);
+ if (listen_fd >= 0)
+ close(listen_fd);
+ map_kptr_race__destroy(skel);
+}
+
+void serial_test_map_kptr_race(void)
+{
+ if (test__start_subtest("htab_leak"))
+ test_htab_leak();
+ if (test__start_subtest("percpu_htab_leak"))
+ test_percpu_htab_leak();
+ if (test__start_subtest("sk_ls_leak"))
+ test_sk_ls_leak();
+}