aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorChen Ridong <chenridong@huawei.com>2025-08-25 03:23:52 +0000
committerTejun Heo <tj@kernel.org>2025-08-25 08:20:22 -1000
commit2c98144fc832b35c4e9293a3bfc518608d6f5145 (patch)
treed02542f35c04d301578f62b1efe4771bc0426a57 /kernel
parentcpuset: separate tmpmasks and cpuset allocation logic (diff)
downloadlinux-2c98144fc832b35c4e9293a3bfc518608d6f5145.tar.gz
linux-2c98144fc832b35c4e9293a3bfc518608d6f5145.zip
cpuset: add helpers for cpus read and cpuset_mutex locks
cpuset: add helpers for cpus_read_lock and cpuset_mutex locks. Replace repetitive locking patterns with new helpers: - cpuset_full_lock() - cpuset_full_unlock() This makes the code cleaner and ensures consistent lock ordering. Signed-off-by: Chen Ridong <chenridong@huawei.com> Reviewed-by: Waiman Long <longman@redhat.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup/cpuset-internal.h2
-rw-r--r--kernel/cgroup/cpuset-v1.c12
-rw-r--r--kernel/cgroup/cpuset.c60
3 files changed, 40 insertions, 34 deletions
diff --git a/kernel/cgroup/cpuset-internal.h b/kernel/cgroup/cpuset-internal.h
index 75b3aef39231..337608f408ce 100644
--- a/kernel/cgroup/cpuset-internal.h
+++ b/kernel/cgroup/cpuset-internal.h
@@ -276,6 +276,8 @@ int cpuset_update_flag(cpuset_flagbits_t bit, struct cpuset *cs, int turning_on)
ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
char *buf, size_t nbytes, loff_t off);
int cpuset_common_seq_show(struct seq_file *sf, void *v);
+void cpuset_full_lock(void);
+void cpuset_full_unlock(void);
/*
* cpuset-v1.c
diff --git a/kernel/cgroup/cpuset-v1.c b/kernel/cgroup/cpuset-v1.c
index b69a7db67090..12e76774c75b 100644
--- a/kernel/cgroup/cpuset-v1.c
+++ b/kernel/cgroup/cpuset-v1.c
@@ -169,8 +169,7 @@ static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft,
cpuset_filetype_t type = cft->private;
int retval = -ENODEV;
- cpus_read_lock();
- cpuset_lock();
+ cpuset_full_lock();
if (!is_cpuset_online(cs))
goto out_unlock;
@@ -184,8 +183,7 @@ static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft,
break;
}
out_unlock:
- cpuset_unlock();
- cpus_read_unlock();
+ cpuset_full_unlock();
return retval;
}
@@ -454,8 +452,7 @@ static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft,
cpuset_filetype_t type = cft->private;
int retval = 0;
- cpus_read_lock();
- cpuset_lock();
+ cpuset_full_lock();
if (!is_cpuset_online(cs)) {
retval = -ENODEV;
goto out_unlock;
@@ -498,8 +495,7 @@ static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft,
break;
}
out_unlock:
- cpuset_unlock();
- cpus_read_unlock();
+ cpuset_full_unlock();
return retval;
}
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 7b0b81c835bf..a78ccd11ce9b 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -250,6 +250,12 @@ static struct cpuset top_cpuset = {
static DEFINE_MUTEX(cpuset_mutex);
+/**
+ * cpuset_lock - Acquire the global cpuset mutex
+ *
+ * This locks the global cpuset mutex to prevent modifications to cpuset
+ * hierarchy and configurations. This helper is not enough to make modification.
+ */
void cpuset_lock(void)
{
mutex_lock(&cpuset_mutex);
@@ -260,6 +266,24 @@ void cpuset_unlock(void)
mutex_unlock(&cpuset_mutex);
}
+/**
+ * cpuset_full_lock - Acquire full protection for cpuset modification
+ *
+ * Takes both CPU hotplug read lock (cpus_read_lock()) and cpuset mutex
+ * to safely modify cpuset data.
+ */
+void cpuset_full_lock(void)
+{
+ cpus_read_lock();
+ mutex_lock(&cpuset_mutex);
+}
+
+void cpuset_full_unlock(void)
+{
+ mutex_unlock(&cpuset_mutex);
+ cpus_read_unlock();
+}
+
static DEFINE_SPINLOCK(callback_lock);
void cpuset_callback_lock_irq(void)
@@ -3234,8 +3258,7 @@ ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
int retval = -ENODEV;
buf = strstrip(buf);
- cpus_read_lock();
- mutex_lock(&cpuset_mutex);
+ cpuset_full_lock();
if (!is_cpuset_online(cs))
goto out_unlock;
@@ -3264,8 +3287,7 @@ ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
if (force_sd_rebuild)
rebuild_sched_domains_locked();
out_unlock:
- mutex_unlock(&cpuset_mutex);
- cpus_read_unlock();
+ cpuset_full_unlock();
flush_workqueue(cpuset_migrate_mm_wq);
return retval ?: nbytes;
}
@@ -3368,12 +3390,10 @@ static ssize_t cpuset_partition_write(struct kernfs_open_file *of, char *buf,
else
return -EINVAL;
- cpus_read_lock();
- mutex_lock(&cpuset_mutex);
+ cpuset_full_lock();
if (is_cpuset_online(cs))
retval = update_prstate(cs, val);
- mutex_unlock(&cpuset_mutex);
- cpus_read_unlock();
+ cpuset_full_unlock();
return retval ?: nbytes;
}
@@ -3498,9 +3518,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
if (!parent)
return 0;
- cpus_read_lock();
- mutex_lock(&cpuset_mutex);
-
+ cpuset_full_lock();
if (is_spread_page(parent))
set_bit(CS_SPREAD_PAGE, &cs->flags);
if (is_spread_slab(parent))
@@ -3552,8 +3570,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
spin_unlock_irq(&callback_lock);
out_unlock:
- mutex_unlock(&cpuset_mutex);
- cpus_read_unlock();
+ cpuset_full_unlock();
return 0;
}
@@ -3568,16 +3585,12 @@ static void cpuset_css_offline(struct cgroup_subsys_state *css)
{
struct cpuset *cs = css_cs(css);
- cpus_read_lock();
- mutex_lock(&cpuset_mutex);
-
+ cpuset_full_lock();
if (!cpuset_v2() && is_sched_load_balance(cs))
cpuset_update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
cpuset_dec();
-
- mutex_unlock(&cpuset_mutex);
- cpus_read_unlock();
+ cpuset_full_unlock();
}
/*
@@ -3589,16 +3602,11 @@ static void cpuset_css_killed(struct cgroup_subsys_state *css)
{
struct cpuset *cs = css_cs(css);
- cpus_read_lock();
- mutex_lock(&cpuset_mutex);
-
+ cpuset_full_lock();
/* Reset valid partition back to member */
if (is_partition_valid(cs))
update_prstate(cs, PRS_MEMBER);
-
- mutex_unlock(&cpuset_mutex);
- cpus_read_unlock();
-
+ cpuset_full_unlock();
}
static void cpuset_css_free(struct cgroup_subsys_state *css)