aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorMarco Crivellari <marco.crivellari@suse.com>2025-09-19 16:50:37 +0200
committerPaul E. McKenney <paulmck@kernel.org>2025-09-23 02:01:18 -0700
commit143ddfa169bbb733275b1a720bafd5a4366b3d89 (patch)
treea752e568795850c99f3ff04686f9736c467cdc3b /kernel
parentrculist: move list_for_each_rcu() to where it belongs (diff)
downloadlinux-143ddfa169bbb733275b1a720bafd5a4366b3d89.tar.gz
linux-143ddfa169bbb733275b1a720bafd5a4366b3d89.zip
rcu: replace use of system_wq with system_percpu_wq
Currently if a user enqueue a work item using schedule_delayed_work() the used wq is "system_wq" (per-cpu wq) while queue_delayed_work() use WORK_CPU_UNBOUND (used when a cpu is not specified). The same applies to schedule_work() that is using system_wq and queue_work(), that makes use again of WORK_CPU_UNBOUND. This lack of consistentcy cannot be addressed without refactoring the API. system_wq is a per-CPU worqueue, yet nothing in its name tells about that CPU affinity constraint, which is very often not required by users. Make it clear by adding a system_percpu_wq. The old wq will be kept for a few release cylces. Suggested-by: Tejun Heo <tj@kernel.org> Signed-off-by: Marco Crivellari <marco.crivellari@suse.com> Reviewed-by: Frederic Weisbecker <frederic@kernel.org> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/rcu/tasks.h4
1 files changed, 2 insertions, 2 deletions
diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h
index f92443561d36..2dc044fd126e 100644
--- a/kernel/rcu/tasks.h
+++ b/kernel/rcu/tasks.h
@@ -553,13 +553,13 @@ static void rcu_tasks_invoke_cbs(struct rcu_tasks *rtp, struct rcu_tasks_percpu
rtpcp_next = rtp->rtpcp_array[index];
if (rtpcp_next->cpu < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
cpuwq = rcu_cpu_beenfullyonline(rtpcp_next->cpu) ? rtpcp_next->cpu : WORK_CPU_UNBOUND;
- queue_work_on(cpuwq, system_wq, &rtpcp_next->rtp_work);
+ queue_work_on(cpuwq, system_percpu_wq, &rtpcp_next->rtp_work);
index++;
if (index < num_possible_cpus()) {
rtpcp_next = rtp->rtpcp_array[index];
if (rtpcp_next->cpu < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
cpuwq = rcu_cpu_beenfullyonline(rtpcp_next->cpu) ? rtpcp_next->cpu : WORK_CPU_UNBOUND;
- queue_work_on(cpuwq, system_wq, &rtpcp_next->rtp_work);
+ queue_work_on(cpuwq, system_percpu_wq, &rtpcp_next->rtp_work);
}
}
}