diff options
| author | Alexei Starovoitov <ast@kernel.org> | 2026-03-13 19:09:35 -0700 |
|---|---|---|
| committer | Alexei Starovoitov <ast@kernel.org> | 2026-03-13 19:09:35 -0700 |
| commit | bb41fcef5c7932e61ce87f573497ab0472cfe496 (patch) | |
| tree | 631e074d556a9837683969324576bd03987428c3 | |
| parent | 2af3aa702c05ecd05850db9d9e110be9ffa3cf47 (diff) | |
| parent | 0a753d8cd61e31cc438a4fc414cc01655d3f3b72 (diff) | |
| download | linux-bb41fcef5c7932e61ce87f573497ab0472cfe496.tar.gz linux-bb41fcef5c7932e61ce87f573497ab0472cfe496.zip | |
Merge branch 'optimize-bounds-refinement-by-reordering-deductions'
Paul Chaignon says:
====================
Optimize bounds refinement by reordering deductions
This patchset optimizes the bounds refinement (reg_bounds_sync) by
reordering deductions in __reg_deduce_bounds. This reordering allows us
to improve precision slightly while losing one call to
__reg_deduce_bounds.
The first patch from Eduard refactors the __reg_deduce_bounds
subfunctions, the second patch implements the reordering, and the last
one adds a selftest.
Changes in v3:
- Added first commit from Eduard that significantly helps with
readability of second commit.
- Reshuffled a bit more the functions in the second commit to improve
precision (Eduard).
- Rebased.
Changes in v2:
- Updated description to mention potential precision improvement and
to clarify the sequence of refinements (Shung-Hsi).
- Added the second patch.
- Rebased.
====================
Link: https://patch.msgid.link/cover.1773401138.git.paul.chaignon@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
| -rw-r--r-- | kernel/bpf/verifier.c | 18 | ||||
| -rw-r--r-- | tools/testing/selftests/bpf/progs/verifier_bounds.c | 33 |
2 files changed, 44 insertions, 7 deletions
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 4fbacd2149cd..e29f15419fcb 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -2448,7 +2448,7 @@ static void __update_reg_bounds(struct bpf_reg_state *reg) } /* Uses signed min/max values to inform unsigned, and vice-versa */ -static void __reg32_deduce_bounds(struct bpf_reg_state *reg) +static void deduce_bounds_32_from_64(struct bpf_reg_state *reg) { /* If upper 32 bits of u64/s64 range don't change, we can use lower 32 * bits to improve our u32/s32 boundaries. @@ -2518,6 +2518,10 @@ static void __reg32_deduce_bounds(struct bpf_reg_state *reg) reg->s32_min_value = max_t(s32, reg->s32_min_value, (s32)reg->smin_value); reg->s32_max_value = min_t(s32, reg->s32_max_value, (s32)reg->smax_value); } +} + +static void deduce_bounds_32_from_32(struct bpf_reg_state *reg) +{ /* if u32 range forms a valid s32 range (due to matching sign bit), * try to learn from that */ @@ -2559,7 +2563,7 @@ static void __reg32_deduce_bounds(struct bpf_reg_state *reg) } } -static void __reg64_deduce_bounds(struct bpf_reg_state *reg) +static void deduce_bounds_64_from_64(struct bpf_reg_state *reg) { /* If u64 range forms a valid s64 range (due to matching sign bit), * try to learn from that. Let's do a bit of ASCII art to see when @@ -2694,7 +2698,7 @@ static void __reg64_deduce_bounds(struct bpf_reg_state *reg) } } -static void __reg_deduce_mixed_bounds(struct bpf_reg_state *reg) +static void deduce_bounds_64_from_32(struct bpf_reg_state *reg) { /* Try to tighten 64-bit bounds from 32-bit knowledge, using 32-bit * values on both sides of 64-bit range in hope to have tighter range. @@ -2763,9 +2767,10 @@ static void __reg_deduce_mixed_bounds(struct bpf_reg_state *reg) static void __reg_deduce_bounds(struct bpf_reg_state *reg) { - __reg32_deduce_bounds(reg); - __reg64_deduce_bounds(reg); - __reg_deduce_mixed_bounds(reg); + deduce_bounds_64_from_64(reg); + deduce_bounds_32_from_64(reg); + deduce_bounds_32_from_32(reg); + deduce_bounds_64_from_32(reg); } /* Attempts to improve var_off based on unsigned min/max information */ @@ -2788,7 +2793,6 @@ static void reg_bounds_sync(struct bpf_reg_state *reg) /* We might have learned something about the sign bit. */ __reg_deduce_bounds(reg); __reg_deduce_bounds(reg); - __reg_deduce_bounds(reg); /* We might have learned some bits from the bounds. */ __reg_bound_offset(reg); /* Intersecting with the old var_off might have improved our bounds diff --git a/tools/testing/selftests/bpf/progs/verifier_bounds.c b/tools/testing/selftests/bpf/progs/verifier_bounds.c index ce09379130aa..3724d5e5bcb3 100644 --- a/tools/testing/selftests/bpf/progs/verifier_bounds.c +++ b/tools/testing/selftests/bpf/progs/verifier_bounds.c @@ -2037,4 +2037,37 @@ __naked void signed_unsigned_intersection32_case2(void *ctx) : __clobber_all); } +/* After instruction 3, the u64 and s64 ranges look as follows: + * 0 umin=2 umax=0xff..ff00..03 U64_MAX + * | [xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx] | + * |----------------------------|------------------------------| + * |xx] [xxxxxxxxxxxxxxxxxxxxxxxxxxxx| + * 0 smax=2 smin=0x800..02 -1 + * + * The two ranges can't be refined because they overlap in two places. Once we + * add an upper-bound to u64 at instruction 4, the refinement can happen. This + * test validates that this refinement does happen and is not overwritten by + * the less-precise 32bits ranges. + */ +SEC("socket") +__description("bounds refinement: 64bits ranges not overwritten by 32bits ranges") +__msg("3: (65) if r0 s> 0x2 {{.*}} R0=scalar(smin=0x8000000000000002,smax=2,umin=smin32=umin32=2,umax=0xffffffff00000003,smax32=umax32=3") +__msg("4: (25) if r0 > 0x13 {{.*}} R0=2") +__success __log_level(2) +__naked void refinement_32bounds_not_overwriting_64bounds(void *ctx) +{ + asm volatile(" \ + call %[bpf_get_prandom_u32]; \ + if w0 < 2 goto +5; \ + if w0 > 3 goto +4; \ + if r0 s> 2 goto +3; \ + if r0 > 19 goto +2; \ + if r0 == 2 goto +1; \ + r10 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + char _license[] SEC("license") = "GPL"; |
