aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/events
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2024-11-04 14:39:15 +0100
committerIngo Molnar <mingo@kernel.org>2025-03-04 09:42:29 +0100
commit6c8b0b835f003647e593c08331a4dd2150d5eb0e (patch)
treef91dc078e1f3e14c042142bbe9555e438cd9c8a6 /kernel/events
parentperf/core: Simplify the perf_pmu_register() error path (diff)
downloadlinux-6c8b0b835f003647e593c08331a4dd2150d5eb0e.tar.gz
linux-6c8b0b835f003647e593c08331a4dd2150d5eb0e.zip
perf/core: Simplify perf_pmu_register()
Using the previously introduced perf_pmu_free() and a new IDR helper, simplify the perf_pmu_register error paths. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Acked-by: Ravi Bangoria <ravi.bangoria@amd.com> Link: https://lore.kernel.org/r/20241104135518.198937277@infradead.org
Diffstat (limited to 'kernel/events')
-rw-r--r--kernel/events/core.c71
1 files changed, 29 insertions, 42 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index ee5cdd692383..215dad53aa1b 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -11914,52 +11914,49 @@ static void perf_pmu_free(struct pmu *pmu)
free_percpu(pmu->cpu_pmu_context);
}
-int perf_pmu_register(struct pmu *pmu, const char *name, int type)
+DEFINE_FREE(pmu_unregister, struct pmu *, if (_T) perf_pmu_free(_T))
+
+int perf_pmu_register(struct pmu *_pmu, const char *name, int type)
{
- int cpu, ret, max = PERF_TYPE_MAX;
+ int cpu, max = PERF_TYPE_MAX;
- pmu->type = -1;
+ struct pmu *pmu __free(pmu_unregister) = _pmu;
+ guard(mutex)(&pmus_lock);
- mutex_lock(&pmus_lock);
- ret = -ENOMEM;
pmu->pmu_disable_count = alloc_percpu(int);
if (!pmu->pmu_disable_count)
- goto unlock;
+ return -ENOMEM;
- if (WARN_ONCE(!name, "Can not register anonymous pmu.\n")) {
- ret = -EINVAL;
- goto free;
- }
+ if (WARN_ONCE(!name, "Can not register anonymous pmu.\n"))
+ return -EINVAL;
- if (WARN_ONCE(pmu->scope >= PERF_PMU_MAX_SCOPE, "Can not register a pmu with an invalid scope.\n")) {
- ret = -EINVAL;
- goto free;
- }
+ if (WARN_ONCE(pmu->scope >= PERF_PMU_MAX_SCOPE,
+ "Can not register a pmu with an invalid scope.\n"))
+ return -EINVAL;
pmu->name = name;
if (type >= 0)
max = type;
- ret = idr_alloc(&pmu_idr, NULL, max, 0, GFP_KERNEL);
- if (ret < 0)
- goto free;
+ CLASS(idr_alloc, pmu_type)(&pmu_idr, NULL, max, 0, GFP_KERNEL);
+ if (pmu_type.id < 0)
+ return pmu_type.id;
- WARN_ON(type >= 0 && ret != type);
+ WARN_ON(type >= 0 && pmu_type.id != type);
- pmu->type = ret;
+ pmu->type = pmu_type.id;
atomic_set(&pmu->exclusive_cnt, 0);
if (pmu_bus_running && !pmu->dev) {
- ret = pmu_dev_alloc(pmu);
+ int ret = pmu_dev_alloc(pmu);
if (ret)
- goto free;
+ return ret;
}
- ret = -ENOMEM;
pmu->cpu_pmu_context = alloc_percpu(struct perf_cpu_pmu_context);
if (!pmu->cpu_pmu_context)
- goto free;
+ return -ENOMEM;
for_each_possible_cpu(cpu) {
struct perf_cpu_pmu_context *cpc;
@@ -12000,32 +11997,22 @@ int perf_pmu_register(struct pmu *pmu, const char *name, int type)
/*
* Now that the PMU is complete, make it visible to perf_try_init_event().
*/
- if (!idr_cmpxchg(&pmu_idr, pmu->type, NULL, pmu)) {
- ret = -EINVAL;
- goto free;
- }
+ if (!idr_cmpxchg(&pmu_idr, pmu->type, NULL, pmu))
+ return -EINVAL;
list_add_rcu(&pmu->entry, &pmus);
- ret = 0;
-unlock:
- mutex_unlock(&pmus_lock);
-
- return ret;
-
-free:
- if (pmu->type >= 0)
- idr_remove(&pmu_idr, pmu->type);
- perf_pmu_free(pmu);
- goto unlock;
+ take_idr_id(pmu_type);
+ _pmu = no_free_ptr(pmu); // let it rip
+ return 0;
}
EXPORT_SYMBOL_GPL(perf_pmu_register);
void perf_pmu_unregister(struct pmu *pmu)
{
- mutex_lock(&pmus_lock);
- list_del_rcu(&pmu->entry);
- idr_remove(&pmu_idr, pmu->type);
- mutex_unlock(&pmus_lock);
+ scoped_guard (mutex, &pmus_lock) {
+ list_del_rcu(&pmu->entry);
+ idr_remove(&pmu_idr, pmu->type);
+ }
/*
* We dereference the pmu list under both SRCU and regular RCU, so