summaryrefslogtreecommitdiff
path: root/kernel/locking
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2021-08-15 23:28:25 +0200
committerIngo Molnar <mingo@kernel.org>2021-08-17 17:45:37 +0200
commit1c143c4b65da09081d644110e619decc49c9dee4 (patch)
tree4183119f2daea02c3d6617a996603a0af020cd4e /kernel/locking
parent342a93247e0837101f27bbcca26f402902df98dc (diff)
locking/rtmutex: Provide the spin/rwlock core lock function
A simplified version of the rtmutex slowlock function, which neither handles signals nor timeouts, and is careful about preserving the state of the blocked task across the lock operation. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Link: https://lore.kernel.org/r/20210815211303.770228446@linutronix.de
Diffstat (limited to 'kernel/locking')
-rw-r--r--kernel/locking/rtmutex.c60
-rw-r--r--kernel/locking/rtmutex_common.h2
2 files changed, 61 insertions, 1 deletions
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 949781aa54b1..951bef073891 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1416,3 +1416,63 @@ static __always_inline int __rt_mutex_lock(struct rt_mutex_base *lock,
return rt_mutex_slowlock(lock, state);
}
#endif /* RT_MUTEX_BUILD_MUTEX */
+
+#ifdef RT_MUTEX_BUILD_SPINLOCKS
+/*
+ * Functions required for spin/rw_lock substitution on RT kernels
+ */
+
+/**
+ * rtlock_slowlock_locked - Slow path lock acquisition for RT locks
+ * @lock: The underlying RT mutex
+ */
+static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock)
+{
+ struct rt_mutex_waiter waiter;
+
+ lockdep_assert_held(&lock->wait_lock);
+
+ if (try_to_take_rt_mutex(lock, current, NULL))
+ return;
+
+ rt_mutex_init_rtlock_waiter(&waiter);
+
+ /* Save current state and set state to TASK_RTLOCK_WAIT */
+ current_save_and_set_rtlock_wait_state();
+
+ task_blocks_on_rt_mutex(lock, &waiter, current, RT_MUTEX_MIN_CHAINWALK);
+
+ for (;;) {
+ /* Try to acquire the lock again */
+ if (try_to_take_rt_mutex(lock, current, &waiter))
+ break;
+
+ raw_spin_unlock_irq(&lock->wait_lock);
+
+ schedule_rtlock();
+
+ raw_spin_lock_irq(&lock->wait_lock);
+ set_current_state(TASK_RTLOCK_WAIT);
+ }
+
+ /* Restore the task state */
+ current_restore_rtlock_saved_state();
+
+ /*
+ * try_to_take_rt_mutex() sets the waiter bit unconditionally.
+ * We might have to fix that up:
+ */
+ fixup_rt_mutex_waiters(lock);
+ debug_rt_mutex_free_waiter(&waiter);
+}
+
+static __always_inline void __sched rtlock_slowlock(struct rt_mutex_base *lock)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&lock->wait_lock, flags);
+ rtlock_slowlock_locked(lock);
+ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
+}
+
+#endif /* RT_MUTEX_BUILD_SPINLOCKS */
diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h
index 424ee0f5e5a4..ccf0e36d6c31 100644
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
@@ -181,7 +181,7 @@ static inline void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
waiter->task = NULL;
}
-static inline void rtlock_init_rtmutex_waiter(struct rt_mutex_waiter *waiter)
+static inline void rt_mutex_init_rtlock_waiter(struct rt_mutex_waiter *waiter)
{
rt_mutex_init_waiter(waiter);
waiter->wake_state = TASK_RTLOCK_WAIT;