aboutsummaryrefslogtreecommitdiffstats
path: root/tools/testing
diff options
context:
space:
mode:
authorLeon Hwang <leon.hwang@linux.dev>2025-09-03 22:04:37 +0800
committerAlexei Starovoitov <ast@kernel.org>2025-09-04 09:05:58 -0700
commit4b69e31329b6ef22dc80b45d4a865f57c2346d44 (patch)
tree516c35ec9a9c6513a74ddc2ef6f1726f8dbcfa51 /tools/testing
parentbpf, arm64: Remove duplicated bpf_flush_icache() (diff)
downloadlinux-4b69e31329b6ef22dc80b45d4a865f57c2346d44.tar.gz
linux-4b69e31329b6ef22dc80b45d4a865f57c2346d44.zip
selftests/bpf: Introduce experimental bpf_in_interrupt()
Filtering pid_tgid is meanlingless when the current task is preempted by an interrupt. To address this, introduce 'bpf_in_interrupt()' helper function, which allows BPF programs to determine whether they are executing in interrupt context. 'get_preempt_count()': * On x86, '*(int *) bpf_this_cpu_ptr(&__preempt_count)'. * On arm64, 'bpf_get_current_task_btf()->thread_info.preempt.count'. Then 'bpf_in_interrupt()' will be: * If !PREEMPT_RT, 'get_preempt_count() & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_MASK)'. * If PREEMPT_RT, '(get_preempt_count() & (NMI_MASK | HARDIRQ_MASK)) | (bpf_get_current_task_btf()->softirq_disable_cnt & SOFTIRQ_MASK)'. As for other archs, it can be added support by updating 'get_preempt_count()'. Suggested-by: Alexei Starovoitov <alexei.starovoitov@gmail.com> Signed-off-by: Leon Hwang <leon.hwang@linux.dev> Link: https://lore.kernel.org/r/20250903140438.59517-2-leon.hwang@linux.dev Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'tools/testing')
-rw-r--r--tools/testing/selftests/bpf/bpf_experimental.h54
1 files changed, 54 insertions, 0 deletions
diff --git a/tools/testing/selftests/bpf/bpf_experimental.h b/tools/testing/selftests/bpf/bpf_experimental.h
index da7e230f2781..d89eda3fd8a3 100644
--- a/tools/testing/selftests/bpf/bpf_experimental.h
+++ b/tools/testing/selftests/bpf/bpf_experimental.h
@@ -599,4 +599,58 @@ extern void bpf_iter_dmabuf_destroy(struct bpf_iter_dmabuf *it) __weak __ksym;
extern int bpf_cgroup_read_xattr(struct cgroup *cgroup, const char *name__str,
struct bpf_dynptr *value_p) __weak __ksym;
+#define PREEMPT_BITS 8
+#define SOFTIRQ_BITS 8
+#define HARDIRQ_BITS 4
+#define NMI_BITS 4
+
+#define PREEMPT_SHIFT 0
+#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
+#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
+#define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS)
+
+#define __IRQ_MASK(x) ((1UL << (x))-1)
+
+#define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
+#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
+#define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT)
+
+extern bool CONFIG_PREEMPT_RT __kconfig __weak;
+#ifdef bpf_target_x86
+extern const int __preempt_count __ksym;
+#endif
+
+struct task_struct___preempt_rt {
+ int softirq_disable_cnt;
+} __attribute__((preserve_access_index));
+
+static inline int get_preempt_count(void)
+{
+#if defined(bpf_target_x86)
+ return *(int *) bpf_this_cpu_ptr(&__preempt_count);
+#elif defined(bpf_target_arm64)
+ return bpf_get_current_task_btf()->thread_info.preempt.count;
+#endif
+ return 0;
+}
+
+/* Description
+ * Report whether it is in interrupt context. Only works on the following archs:
+ * * x86
+ * * arm64
+ */
+static inline int bpf_in_interrupt(void)
+{
+ struct task_struct___preempt_rt *tsk;
+ int pcnt;
+
+ pcnt = get_preempt_count();
+ if (!CONFIG_PREEMPT_RT)
+ return pcnt & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_MASK);
+
+ tsk = (void *) bpf_get_current_task_btf();
+ return (pcnt & (NMI_MASK | HARDIRQ_MASK)) |
+ (tsk->softirq_disable_cnt & SOFTIRQ_MASK);
+}
+
#endif