summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorCheng-Yang Chou <yphbchou0911@gmail.com>2026-03-15 16:24:41 +0800
committerTejun Heo <tj@kernel.org>2026-03-14 22:54:05 -1000
commitf96bc0fa92be8dc0ec97bbe5bec6d5df26f9585b (patch)
treea87bb879c9b1aea16e82500f400c2abfbd6aed42
parent6712c4fefca0422851b71d1a58a32ea03f69310f (diff)
downloadlinux-f96bc0fa92be8dc0ec97bbe5bec6d5df26f9585b.tar.gz
linux-f96bc0fa92be8dc0ec97bbe5bec6d5df26f9585b.zip
sched_ext: Update selftests to drop ops.cpu_acquire/release()
ops.cpu_acquire/release() are deprecated by commit a3f5d4822253 ("sched_ext: Allow scx_bpf_reenqueue_local() to be called from anywhere") in favor of handling CPU preemption via the sched_switch tracepoint. In the maximal selftest, replace the cpu_acquire/release stubs with a minimal sched_switch TP program. Attach all non-struct_ops programs (including the new TP) via maximal__attach() after disabling auto-attach for the maximal_ops struct_ops map, which is managed manually in run(). Apply the same fix to reload_loop, which also uses the maximal skeleton. Signed-off-by: Cheng-Yang Chou <yphbchou0911@gmail.com> Reviewed-by: Andrea Righi <arighi@nvidia.com> Signed-off-by: Tejun Heo <tj@kernel.org>
-rw-r--r--tools/testing/selftests/sched_ext/maximal.bpf.c15
-rw-r--r--tools/testing/selftests/sched_ext/maximal.c3
-rw-r--r--tools/testing/selftests/sched_ext/reload_loop.c3
3 files changed, 12 insertions, 9 deletions
diff --git a/tools/testing/selftests/sched_ext/maximal.bpf.c b/tools/testing/selftests/sched_ext/maximal.bpf.c
index a3aabeb82e6b..04a369078aac 100644
--- a/tools/testing/selftests/sched_ext/maximal.bpf.c
+++ b/tools/testing/selftests/sched_ext/maximal.bpf.c
@@ -67,13 +67,12 @@ void BPF_STRUCT_OPS(maximal_set_cpumask, struct task_struct *p,
void BPF_STRUCT_OPS(maximal_update_idle, s32 cpu, bool idle)
{}
-void BPF_STRUCT_OPS(maximal_cpu_acquire, s32 cpu,
- struct scx_cpu_acquire_args *args)
-{}
-
-void BPF_STRUCT_OPS(maximal_cpu_release, s32 cpu,
- struct scx_cpu_release_args *args)
-{}
+SEC("tp_btf/sched_switch")
+int BPF_PROG(maximal_sched_switch, bool preempt, struct task_struct *prev,
+ struct task_struct *next, unsigned int prev_state)
+{
+ return 0;
+}
void BPF_STRUCT_OPS(maximal_cpu_online, s32 cpu)
{}
@@ -150,8 +149,6 @@ struct sched_ext_ops maximal_ops = {
.set_weight = (void *) maximal_set_weight,
.set_cpumask = (void *) maximal_set_cpumask,
.update_idle = (void *) maximal_update_idle,
- .cpu_acquire = (void *) maximal_cpu_acquire,
- .cpu_release = (void *) maximal_cpu_release,
.cpu_online = (void *) maximal_cpu_online,
.cpu_offline = (void *) maximal_cpu_offline,
.init_task = (void *) maximal_init_task,
diff --git a/tools/testing/selftests/sched_ext/maximal.c b/tools/testing/selftests/sched_ext/maximal.c
index c6be50a9941d..1dc369224670 100644
--- a/tools/testing/selftests/sched_ext/maximal.c
+++ b/tools/testing/selftests/sched_ext/maximal.c
@@ -19,6 +19,9 @@ static enum scx_test_status setup(void **ctx)
SCX_ENUM_INIT(skel);
SCX_FAIL_IF(maximal__load(skel), "Failed to load skel");
+ bpf_map__set_autoattach(skel->maps.maximal_ops, false);
+ SCX_FAIL_IF(maximal__attach(skel), "Failed to attach skel");
+
*ctx = skel;
return SCX_TEST_PASS;
diff --git a/tools/testing/selftests/sched_ext/reload_loop.c b/tools/testing/selftests/sched_ext/reload_loop.c
index 308211d80436..49297b83d748 100644
--- a/tools/testing/selftests/sched_ext/reload_loop.c
+++ b/tools/testing/selftests/sched_ext/reload_loop.c
@@ -23,6 +23,9 @@ static enum scx_test_status setup(void **ctx)
SCX_ENUM_INIT(skel);
SCX_FAIL_IF(maximal__load(skel), "Failed to load skel");
+ bpf_map__set_autoattach(skel->maps.maximal_ops, false);
+ SCX_FAIL_IF(maximal__attach(skel), "Failed to attach skel");
+
return SCX_TEST_PASS;
}