aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/locking
diff options
context:
space:
mode:
authorMiquel Raynal <miquel.raynal@bootlin.com>2025-01-24 10:52:35 +0100
committerMiquel Raynal <miquel.raynal@bootlin.com>2025-01-24 10:52:35 +0100
commit0ddeb4fe9d3b501c2c6a3522325d88ee166e02ea (patch)
treec7eb9bcfdd32baf6ef11e330c0f72807e3c0116d /kernel/locking
parentMerge tag 'spi-nor/for-6.14' into mtd/next (diff)
parentmtd: spinand: skyhigh: Align with recent read from cache variant changes (diff)
downloadlinux-0ddeb4fe9d3b501c2c6a3522325d88ee166e02ea.tar.gz
linux-0ddeb4fe9d3b501c2c6a3522325d88ee166e02ea.zip
Merge tag 'nand/for-6.14' into mtd/next
* Raw NAND changes A new controller driver, from Nuvoton, has been merged. Bastien Curutchet has contributed a series improving the Davinci controller driver, both on the organization of the code, but also on the performance side. The binding has also been converted to yaml, received a new OOB layout and now supports on-die ECC engines. The Qualcomm controller driver has been deeply cleaned to extract some parts of the code into a shared file with the Qualcomm SPI memory controller. Aside from these main changes, the Cadence binding has been converted to yaml, the brcmnand controller driver has received a small fix, otherwise some more minor changes have also made their way in. * SPI NAND changes The SPI NAND subsystem has seen a great improvement, with the advent of DTR operations (DDR operations, which may be extended to the address cycles). The first vendor driver to benefit from these improvements is the Winbond driver. A new manufacturer driver is added SkyHigh, with a new constraint for the core, it is impossible to disable the on-die ECC engine. A Foresee device is also now supported.
Diffstat (limited to 'kernel/locking')
-rw-r--r--kernel/locking/rtmutex.c21
-rw-r--r--kernel/locking/rtmutex_api.c2
2 files changed, 17 insertions, 6 deletions
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index ac1365afcc4a..697a56d3d949 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1248,10 +1248,7 @@ static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock,
/* Check whether the waiter should back out immediately */
rtm = container_of(lock, struct rt_mutex, rtmutex);
- preempt_disable();
res = __ww_mutex_add_waiter(waiter, rtm, ww_ctx, wake_q);
- wake_up_q(wake_q);
- preempt_enable();
if (res) {
raw_spin_lock(&task->pi_lock);
rt_mutex_dequeue(lock, waiter);
@@ -1295,7 +1292,13 @@ static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock,
*/
get_task_struct(owner);
+ preempt_disable();
raw_spin_unlock_irq(&lock->wait_lock);
+ /* wake up any tasks on the wake_q before calling rt_mutex_adjust_prio_chain */
+ wake_up_q(wake_q);
+ wake_q_init(wake_q);
+ preempt_enable();
+
res = rt_mutex_adjust_prio_chain(owner, chwalk, lock,
next_lock, waiter, task);
@@ -1599,6 +1602,7 @@ static void __sched remove_waiter(struct rt_mutex_base *lock,
* or TASK_UNINTERRUPTIBLE)
* @timeout: the pre-initialized and started timer, or NULL for none
* @waiter: the pre-initialized rt_mutex_waiter
+ * @wake_q: wake_q of tasks to wake when we drop the lock->wait_lock
*
* Must be called with lock->wait_lock held and interrupts disabled
*/
@@ -1606,7 +1610,8 @@ static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock,
struct ww_acquire_ctx *ww_ctx,
unsigned int state,
struct hrtimer_sleeper *timeout,
- struct rt_mutex_waiter *waiter)
+ struct rt_mutex_waiter *waiter,
+ struct wake_q_head *wake_q)
__releases(&lock->wait_lock) __acquires(&lock->wait_lock)
{
struct rt_mutex *rtm = container_of(lock, struct rt_mutex, rtmutex);
@@ -1637,7 +1642,13 @@ static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock,
owner = rt_mutex_owner(lock);
else
owner = NULL;
+ preempt_disable();
raw_spin_unlock_irq(&lock->wait_lock);
+ if (wake_q) {
+ wake_up_q(wake_q);
+ wake_q_init(wake_q);
+ }
+ preempt_enable();
if (!owner || !rtmutex_spin_on_owner(lock, waiter, owner))
rt_mutex_schedule();
@@ -1711,7 +1722,7 @@ static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock,
ret = task_blocks_on_rt_mutex(lock, waiter, current, ww_ctx, chwalk, wake_q);
if (likely(!ret))
- ret = rt_mutex_slowlock_block(lock, ww_ctx, state, NULL, waiter);
+ ret = rt_mutex_slowlock_block(lock, ww_ctx, state, NULL, waiter, wake_q);
if (likely(!ret)) {
/* acquired the lock */
diff --git a/kernel/locking/rtmutex_api.c b/kernel/locking/rtmutex_api.c
index 33ea31d6a7b3..191e4720e546 100644
--- a/kernel/locking/rtmutex_api.c
+++ b/kernel/locking/rtmutex_api.c
@@ -383,7 +383,7 @@ int __sched rt_mutex_wait_proxy_lock(struct rt_mutex_base *lock,
raw_spin_lock_irq(&lock->wait_lock);
/* sleep on the mutex */
set_current_state(TASK_INTERRUPTIBLE);
- ret = rt_mutex_slowlock_block(lock, NULL, TASK_INTERRUPTIBLE, to, waiter);
+ ret = rt_mutex_slowlock_block(lock, NULL, TASK_INTERRUPTIBLE, to, waiter, NULL);
/*
* try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
* have to fix that up.