aboutsummaryrefslogtreecommitdiffstats
path: root/tools/testing
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@kernel.org>2025-03-22 06:15:27 -0700
committerAlexei Starovoitov <ast@kernel.org>2025-03-22 06:19:09 -0700
commit9aa8fe29f624610b4694d5b5695e1017c4753f31 (patch)
tree993a4903cf9b49124c6cea0c33739ce65d8f1072 /tools/testing
parentlibbpf: Add namespace for errstr making it libbpf_errstr (diff)
parentselftests/bpf: Add selftests for load-acquire/store-release when register num... (diff)
downloadlinux-9aa8fe29f624610b4694d5b5695e1017c4753f31.tar.gz
linux-9aa8fe29f624610b4694d5b5695e1017c4753f31.zip
Merge branch 'bpf-fix-oob-read-and-add-tests-for-load-acquire-store-release'
Kohei Enju says: ==================== bpf: Fix OOB read and add tests for load-acquire/store-release This patch series addresses an out-of-bounds read issue in check_atomic_load/store() reported by syzkaller when an invalid register number (MAX_BPF_REG or greater) is used. The first patch fixes the actual bug by changing the order of validity checks, ensuring register validity is checked before atomic_ptr_type_ok() is called. It also updates some tests that were assuming the previous order of checks. The second patch adds new tests specifically for the invalid register number case to prevent regression in the future. Changes: v3: - Change invalid register from R11 to R15 in new tests v2: https://lore.kernel.org/all/20250321110010.95217-4-enjuk@amazon.com/ - Just swap atomic_ptr_type_ok() and check_load_mem()/check_store_reg() - Update some tests that were assuming the previous order of checks - Add new tests specifically for the invalid register number v1: https://lore.kernel.org/bpf/20250314195619.23772-2-enjuk@amazon.com/ Reported-by: syzbot+a5964227adc0f904549c@syzkaller.appspotmail.com Closes: https://syzkaller.appspot.com/bug?extid=a5964227adc0f904549c ==================== Link: https://patch.msgid.link/20250322045340.18010-4-enjuk@amazon.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'tools/testing')
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_load_acquire.c26
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_store_release.c28
2 files changed, 49 insertions, 5 deletions
diff --git a/tools/testing/selftests/bpf/progs/verifier_load_acquire.c b/tools/testing/selftests/bpf/progs/verifier_load_acquire.c
index 608097453832..77698d5a19e4 100644
--- a/tools/testing/selftests/bpf/progs/verifier_load_acquire.c
+++ b/tools/testing/selftests/bpf/progs/verifier_load_acquire.c
@@ -139,10 +139,16 @@ __naked void load_acquire_from_pkt_pointer(void)
{
asm volatile (
"r2 = *(u32 *)(r1 + %[xdp_md_data]);"
+ "r3 = *(u32 *)(r1 + %[xdp_md_data_end]);"
+ "r1 = r2;"
+ "r1 += 8;"
+ "if r1 >= r3 goto l0_%=;"
".8byte %[load_acquire_insn];" // w0 = load_acquire((u8 *)(r2 + 0));
+"l0_%=: r0 = 0;"
"exit;"
:
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)),
__imm_insn(load_acquire_insn,
BPF_ATOMIC_OP(BPF_B, BPF_LOAD_ACQ, BPF_REG_0, BPF_REG_2, 0))
: __clobber_all);
@@ -172,12 +178,28 @@ __naked void load_acquire_from_sock_pointer(void)
{
asm volatile (
"r2 = *(u64 *)(r1 + %[sk_reuseport_md_sk]);"
- ".8byte %[load_acquire_insn];" // w0 = load_acquire((u8 *)(r2 + 0));
+ // w0 = load_acquire((u8 *)(r2 + offsetof(struct bpf_sock, family)));
+ ".8byte %[load_acquire_insn];"
"exit;"
:
: __imm_const(sk_reuseport_md_sk, offsetof(struct sk_reuseport_md, sk)),
__imm_insn(load_acquire_insn,
- BPF_ATOMIC_OP(BPF_B, BPF_LOAD_ACQ, BPF_REG_0, BPF_REG_2, 0))
+ BPF_ATOMIC_OP(BPF_B, BPF_LOAD_ACQ, BPF_REG_0, BPF_REG_2,
+ offsetof(struct bpf_sock, family)))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("load-acquire with invalid register R15")
+__failure __failure_unpriv __msg("R15 is invalid")
+__naked void load_acquire_with_invalid_reg(void)
+{
+ asm volatile (
+ ".8byte %[load_acquire_insn];" // r0 = load_acquire((u64 *)(r15 + 0));
+ "exit;"
+ :
+ : __imm_insn(load_acquire_insn,
+ BPF_ATOMIC_OP(BPF_DW, BPF_LOAD_ACQ, BPF_REG_0, 15 /* invalid reg */, 0))
: __clobber_all);
}
diff --git a/tools/testing/selftests/bpf/progs/verifier_store_release.c b/tools/testing/selftests/bpf/progs/verifier_store_release.c
index f1c64c08f25f..c0442d5bb049 100644
--- a/tools/testing/selftests/bpf/progs/verifier_store_release.c
+++ b/tools/testing/selftests/bpf/progs/verifier_store_release.c
@@ -140,11 +140,13 @@ __naked void store_release_to_ctx_pointer(void)
{
asm volatile (
"w0 = 0;"
- ".8byte %[store_release_insn];" // store_release((u8 *)(r1 + 0), w0);
+ // store_release((u8 *)(r1 + offsetof(struct __sk_buff, cb[0])), w0);
+ ".8byte %[store_release_insn];"
"exit;"
:
: __imm_insn(store_release_insn,
- BPF_ATOMIC_OP(BPF_B, BPF_STORE_REL, BPF_REG_1, BPF_REG_0, 0))
+ BPF_ATOMIC_OP(BPF_B, BPF_STORE_REL, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0])))
: __clobber_all);
}
@@ -156,10 +158,16 @@ __naked void store_release_to_pkt_pointer(void)
asm volatile (
"w0 = 0;"
"r2 = *(u32 *)(r1 + %[xdp_md_data]);"
+ "r3 = *(u32 *)(r1 + %[xdp_md_data_end]);"
+ "r1 = r2;"
+ "r1 += 8;"
+ "if r1 >= r3 goto l0_%=;"
".8byte %[store_release_insn];" // store_release((u8 *)(r2 + 0), w0);
+"l0_%=: r0 = 0;"
"exit;"
:
: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+ __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)),
__imm_insn(store_release_insn,
BPF_ATOMIC_OP(BPF_B, BPF_STORE_REL, BPF_REG_2, BPF_REG_0, 0))
: __clobber_all);
@@ -185,7 +193,7 @@ __naked void store_release_to_flow_keys_pointer(void)
SEC("sk_reuseport")
__description("store-release to sock pointer")
-__failure __msg("BPF_ATOMIC stores into R2 sock is not allowed")
+__failure __msg("R2 cannot write into sock")
__naked void store_release_to_sock_pointer(void)
{
asm volatile (
@@ -249,6 +257,20 @@ __naked void store_release_leak_pointer_to_map(void)
: __clobber_all);
}
+SEC("socket")
+__description("store-release with invalid register R15")
+__failure __failure_unpriv __msg("R15 is invalid")
+__naked void store_release_with_invalid_reg(void)
+{
+ asm volatile (
+ ".8byte %[store_release_insn];" // store_release((u64 *)(r15 + 0), r1);
+ "exit;"
+ :
+ : __imm_insn(store_release_insn,
+ BPF_ATOMIC_OP(BPF_DW, BPF_STORE_REL, 15 /* invalid reg */, BPF_REG_1, 0))
+ : __clobber_all);
+}
+
#else
SEC("socket")