summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-07-17 11:53:53 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-07-17 11:53:53 -0700
commit57ab5f740202babe93c8796013c5cfdfd6d08d7d (patch)
tree77ad80291bdaa8e95f2a26956a2d7d8625f049fa /include
parentfdcec00405fae0befdd7bbcbe738b7325e5746fb (diff)
parent360aa640a59f269b784848c0b2d6d462952750d9 (diff)
Merge tag 'hwlock-v5.3' of git://github.com/andersson/remoteproc
Pull hwspinlock updates from Bjorn Andersson: "This contains support for hardware spinlock TI K3 AM65x and J721E family of SoCs, support for using hwspinlocks from atomic context and better error reporting when dealing with hardware disabled in DeviceTree" * tag 'hwlock-v5.3' of git://github.com/andersson/remoteproc: hwspinlock: add the 'in_atomic' API hwspinlock: document the hwspinlock 'raw' API hwspinlock: stm32: implement the relax() ops hwspinlock: ignore disabled device hwspinlock/omap: Add a trace during probe hwspinlock/omap: Add support for TI K3 SoCs dt-bindings: hwlock: Update OMAP binding for TI K3 SoCs
Diffstat (limited to 'include')
-rw-r--r--include/linux/hwspinlock.h61
1 files changed, 58 insertions, 3 deletions
diff --git a/include/linux/hwspinlock.h b/include/linux/hwspinlock.h
index 0afe693be5f4..bfe7c1f1ac6d 100644
--- a/include/linux/hwspinlock.h
+++ b/include/linux/hwspinlock.h
@@ -14,9 +14,10 @@
#include <linux/sched.h>
/* hwspinlock mode argument */
-#define HWLOCK_IRQSTATE 0x01 /* Disable interrupts, save state */
-#define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */
-#define HWLOCK_RAW 0x03
+#define HWLOCK_IRQSTATE 0x01 /* Disable interrupts, save state */
+#define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */
+#define HWLOCK_RAW 0x03
+#define HWLOCK_IN_ATOMIC 0x04 /* Called while in atomic context */
struct device;
struct device_node;
@@ -223,6 +224,23 @@ static inline int hwspin_trylock_raw(struct hwspinlock *hwlock)
}
/**
+ * hwspin_trylock_in_atomic() - attempt to lock a specific hwspinlock
+ * @hwlock: an hwspinlock which we want to trylock
+ *
+ * This function attempts to lock an hwspinlock, and will immediately fail
+ * if the hwspinlock is already taken.
+ *
+ * This function shall be called only from an atomic context.
+ *
+ * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
+ * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
+ */
+static inline int hwspin_trylock_in_atomic(struct hwspinlock *hwlock)
+{
+ return __hwspin_trylock(hwlock, HWLOCK_IN_ATOMIC, NULL);
+}
+
+/**
* hwspin_trylock() - attempt to lock a specific hwspinlock
* @hwlock: an hwspinlock which we want to trylock
*
@@ -313,6 +331,28 @@ int hwspin_lock_timeout_raw(struct hwspinlock *hwlock, unsigned int to)
}
/**
+ * hwspin_lock_timeout_in_atomic() - lock an hwspinlock with timeout limit
+ * @hwlock: the hwspinlock to be locked
+ * @to: timeout value in msecs
+ *
+ * This function locks the underlying @hwlock. If the @hwlock
+ * is already taken, the function will busy loop waiting for it to
+ * be released, but give up when @timeout msecs have elapsed.
+ *
+ * This function shall be called only from an atomic context and the timeout
+ * value shall not exceed a few msecs.
+ *
+ * Returns 0 when the @hwlock was successfully taken, and an appropriate
+ * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
+ * busy after @timeout msecs). The function will never sleep.
+ */
+static inline
+int hwspin_lock_timeout_in_atomic(struct hwspinlock *hwlock, unsigned int to)
+{
+ return __hwspin_lock_timeout(hwlock, to, HWLOCK_IN_ATOMIC, NULL);
+}
+
+/**
* hwspin_lock_timeout() - lock an hwspinlock with timeout limit
* @hwlock: the hwspinlock to be locked
* @to: timeout value in msecs
@@ -387,6 +427,21 @@ static inline void hwspin_unlock_raw(struct hwspinlock *hwlock)
}
/**
+ * hwspin_unlock_in_atomic() - unlock hwspinlock
+ * @hwlock: a previously-acquired hwspinlock which we want to unlock
+ *
+ * This function will unlock a specific hwspinlock.
+ *
+ * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
+ * this function: it is a bug to call unlock on a @hwlock that is already
+ * unlocked.
+ */
+static inline void hwspin_unlock_in_atomic(struct hwspinlock *hwlock)
+{
+ __hwspin_unlock(hwlock, HWLOCK_IN_ATOMIC, NULL);
+}
+
+/**
* hwspin_unlock() - unlock hwspinlock
* @hwlock: a previously-acquired hwspinlock which we want to unlock
*