summaryrefslogtreecommitdiff
path: root/kernel/locking/lockdep.c
diff options
context:
space:
mode:
authorMark Brown <broonie@kernel.org>2020-12-10 13:30:11 +0000
committerMark Brown <broonie@kernel.org>2020-12-10 13:30:11 +0000
commit49ab19a4a51a31cb06992386cec4be82ebca5a2d (patch)
treede7d31ec7ded2c8ab8dbdfe5a55fa283068023d0 /kernel/locking/lockdep.c
parentb0dfd948379c79b8754e224e29b99d30ce0d79b8 (diff)
parent3b25f337929e73232f0aa990cd68a129f53652e2 (diff)
Merge series "spi: spi-geni-qcom: Use gpio descriptors for CS" from Stephen Boyd <swboyd@chromium.org>:
Collected patches from the two series below and associated tags so they can be merged in one pile through the spi tree. Merry December! SPI: https://lore.kernel.org/r/20201202214935.1114381-1-swboyd@chromium.org cros-ec: https://lore.kernel.org/r/20201203011649.1405292-1-swboyd@chromium.org Cc: Akash Asthana <akashast@codeaurora.org> Cc: Simon Glass <sjg@chromium.org> Cc: Gwendal Grignou <gwendal@chromium.org> Cc: Douglas Anderson <dianders@chromium.org> Cc: Alexandru M Stan <amstan@chromium.org> Stephen Boyd (3): platform/chrome: cros_ec_spi: Don't overwrite spi::mode platform/chrome: cros_ec_spi: Drop bits_per_word assignment spi: spi-geni-qcom: Use the new method of gpio CS control drivers/platform/chrome/cros_ec_spi.c | 2 -- drivers/spi/spi-geni-qcom.c | 1 + 2 files changed, 1 insertion(+), 2 deletions(-) base-commit: b65054597872ce3aefbc6a666385eabdf9e288da -- https://chromeos.dev
Diffstat (limited to 'kernel/locking/lockdep.c')
-rw-r--r--kernel/locking/lockdep.c45
1 files changed, 20 insertions, 25 deletions
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 3e99dfef8408..c1418b47f625 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -84,7 +84,7 @@ static inline bool lockdep_enabled(void)
if (!debug_locks)
return false;
- if (raw_cpu_read(lockdep_recursion))
+ if (this_cpu_read(lockdep_recursion))
return false;
if (current->lockdep_recursion)
@@ -108,19 +108,21 @@ static inline void lockdep_lock(void)
{
DEBUG_LOCKS_WARN_ON(!irqs_disabled());
+ __this_cpu_inc(lockdep_recursion);
arch_spin_lock(&__lock);
__owner = current;
- __this_cpu_inc(lockdep_recursion);
}
static inline void lockdep_unlock(void)
{
+ DEBUG_LOCKS_WARN_ON(!irqs_disabled());
+
if (debug_locks && DEBUG_LOCKS_WARN_ON(__owner != current))
return;
- __this_cpu_dec(lockdep_recursion);
__owner = NULL;
arch_spin_unlock(&__lock);
+ __this_cpu_dec(lockdep_recursion);
}
static inline bool lockdep_assert_locked(void)
@@ -2765,7 +2767,9 @@ print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
* (Note that this has to be done separately, because the graph cannot
* detect such classes of deadlocks.)
*
- * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
+ * Returns: 0 on deadlock detected, 1 on OK, 2 if another lock with the same
+ * lock class is held but nest_lock is also held, i.e. we rely on the
+ * nest_lock to avoid the deadlock.
*/
static int
check_deadlock(struct task_struct *curr, struct held_lock *next)
@@ -2788,7 +2792,7 @@ check_deadlock(struct task_struct *curr, struct held_lock *next)
* lock class (i.e. read_lock(lock)+read_lock(lock)):
*/
if ((next->read == 2) && prev->read)
- return 2;
+ continue;
/*
* We're holding the nest_lock, which serializes this lock's
@@ -3593,15 +3597,12 @@ static int validate_chain(struct task_struct *curr,
if (!ret)
return 0;
/*
- * Mark recursive read, as we jump over it when
- * building dependencies (just like we jump over
- * trylock entries):
- */
- if (ret == 2)
- hlock->read = 2;
- /*
* Add dependency only if this lock is not the head
- * of the chain, and if it's not a secondary read-lock:
+ * of the chain, and if the new lock introduces no more
+ * lock dependency (because we already hold a lock with the
+ * same lock class) nor deadlock (because the nest_lock
+ * serializes nesting locks), see the comments for
+ * check_deadlock().
*/
if (!chain_head && ret != 2) {
if (!check_prevs_add(curr, hlock))
@@ -4057,7 +4058,7 @@ void lockdep_hardirqs_on_prepare(unsigned long ip)
if (unlikely(in_nmi()))
return;
- if (unlikely(__this_cpu_read(lockdep_recursion)))
+ if (unlikely(this_cpu_read(lockdep_recursion)))
return;
if (unlikely(lockdep_hardirqs_enabled())) {
@@ -4126,7 +4127,7 @@ void noinstr lockdep_hardirqs_on(unsigned long ip)
goto skip_checks;
}
- if (unlikely(__this_cpu_read(lockdep_recursion)))
+ if (unlikely(this_cpu_read(lockdep_recursion)))
return;
if (lockdep_hardirqs_enabled()) {
@@ -4396,6 +4397,9 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
if (unlikely(hlock_class(this)->usage_mask & new_mask))
goto unlock;
+ if (!hlock_class(this)->usage_mask)
+ debug_atomic_dec(nr_unused_locks);
+
hlock_class(this)->usage_mask |= new_mask;
if (new_bit < LOCK_TRACE_STATES) {
@@ -4403,19 +4407,10 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
return 0;
}
- switch (new_bit) {
- case 0 ... LOCK_USED-1:
+ if (new_bit < LOCK_USED) {
ret = mark_lock_irq(curr, this, new_bit);
if (!ret)
return 0;
- break;
-
- case LOCK_USED:
- debug_atomic_dec(nr_unused_locks);
- break;
-
- default:
- break;
}
unlock: