diff options
| author | Thomas Gleixner <tglx@kernel.org> | 2026-02-24 17:37:04 +0100 |
|---|---|---|
| committer | Peter Zijlstra <peterz@infradead.org> | 2026-02-27 16:40:09 +0100 |
| commit | 6abfc2bd5b0cff70db99a273f2a161e2273eae6d (patch) | |
| tree | 1d275013704ad808e33d239b1e8b2bbdd58114d1 /kernel/time/hrtimer.c | |
| parent | f2e388a019e4cf83a15883a3d1f1384298e9a6aa (diff) | |
| download | linux-6abfc2bd5b0cff70db99a273f2a161e2273eae6d.tar.gz linux-6abfc2bd5b0cff70db99a273f2a161e2273eae6d.zip | |
hrtimer: Use guards where appropriate
Simplify and tidy up the code where possible.
Signed-off-by: Thomas Gleixner <tglx@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://patch.msgid.link/20260224163430.275551488@kernel.org
Diffstat (limited to 'kernel/time/hrtimer.c')
| -rw-r--r-- | kernel/time/hrtimer.c | 48 |
1 files changed, 15 insertions, 33 deletions
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index 6e4ac8dea312..a5df3c42e323 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -838,13 +838,12 @@ static void retrigger_next_event(void *arg) * In periodic low resolution mode, the next softirq expiration * must also be updated. */ - raw_spin_lock(&base->lock); + guard(raw_spinlock)(&base->lock); hrtimer_update_base(base); if (hrtimer_hres_active(base)) hrtimer_force_reprogram(base, 0); else hrtimer_update_next_event(base); - raw_spin_unlock(&base->lock); } /* @@ -994,7 +993,6 @@ static bool update_needs_ipi(struct hrtimer_cpu_base *cpu_base, void clock_was_set(unsigned int bases) { cpumask_var_t mask; - int cpu; if (!hrtimer_highres_enabled() && !tick_nohz_is_active()) goto out_timerfd; @@ -1005,24 +1003,19 @@ void clock_was_set(unsigned int bases) } /* Avoid interrupting CPUs if possible */ - cpus_read_lock(); - for_each_online_cpu(cpu) { - struct hrtimer_cpu_base *cpu_base; - unsigned long flags; + scoped_guard(cpus_read_lock) { + int cpu; - cpu_base = &per_cpu(hrtimer_bases, cpu); - raw_spin_lock_irqsave(&cpu_base->lock, flags); + for_each_online_cpu(cpu) { + struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); - if (update_needs_ipi(cpu_base, bases)) - cpumask_set_cpu(cpu, mask); - - raw_spin_unlock_irqrestore(&cpu_base->lock, flags); + guard(raw_spinlock_irqsave)(&cpu_base->lock); + if (update_needs_ipi(cpu_base, bases)) + cpumask_set_cpu(cpu, mask); + } + scoped_guard(preempt) + smp_call_function_many(mask, retrigger_next_event, NULL, 1); } - - preempt_disable(); - smp_call_function_many(mask, retrigger_next_event, NULL, 1); - preempt_enable(); - cpus_read_unlock(); free_cpumask_var(mask); out_timerfd: @@ -1600,15 +1593,11 @@ u64 hrtimer_get_next_event(void) { struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); u64 expires = KTIME_MAX; - unsigned long flags; - - raw_spin_lock_irqsave(&cpu_base->lock, flags); + guard(raw_spinlock_irqsave)(&cpu_base->lock); if (!hrtimer_hres_active(cpu_base)) expires = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL); - raw_spin_unlock_irqrestore(&cpu_base->lock, flags); - return expires; } @@ -1623,25 +1612,18 @@ u64 hrtimer_next_event_without(const struct hrtimer *exclude) { struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); u64 expires = KTIME_MAX; - unsigned long flags; - - raw_spin_lock_irqsave(&cpu_base->lock, flags); + guard(raw_spinlock_irqsave)(&cpu_base->lock); if (hrtimer_hres_active(cpu_base)) { unsigned int active; if (!cpu_base->softirq_activated) { active = cpu_base->active_bases & HRTIMER_ACTIVE_SOFT; - expires = __hrtimer_next_event_base(cpu_base, exclude, - active, KTIME_MAX); + expires = __hrtimer_next_event_base(cpu_base, exclude, active, KTIME_MAX); } active = cpu_base->active_bases & HRTIMER_ACTIVE_HARD; - expires = __hrtimer_next_event_base(cpu_base, exclude, active, - expires); + expires = __hrtimer_next_event_base(cpu_base, exclude, active, expires); } - - raw_spin_unlock_irqrestore(&cpu_base->lock, flags); - return expires; } #endif |
