aboutsummaryrefslogtreecommitdiff
path: root/include/asm-generic/qspinlock.h
diff options
context:
space:
mode:
authorMark Brown <broonie@kernel.org>2016-07-29 21:38:36 +0100
committerMark Brown <broonie@kernel.org>2016-07-29 21:38:36 +0100
commitb0ba6b0a5eb2b51037a07dbf5a7470ca804d575c (patch)
tree0a4c5f048afa1125d8bc242477965cec699f94ee /include/asm-generic/qspinlock.h
parentac248a8b81109d6b41a9338a09538e103f7e31c0 (diff)
parentb05965f284db3e086022f4e318e46cb5bffb1376 (diff)
Merge tag 'v4.4.16' into linux-linaro-lsk-v4.4lsk-v4.4-16.07
This is the 4.4.16 stable release # gpg: Signature made Wed 27 Jul 2016 17:48:38 BST using RSA key ID 6092693E # gpg: requesting key 6092693E from hkp server the.earth.li # gpg: key 6092693E: public key "Greg Kroah-Hartman (Linux kernel stable release signing key) <greg@kroah.com>" imported # gpg: public key of ultimately trusted key B4B0BED6 not found # gpg: 2 marginal(s) needed, 1 complete(s) needed, PGP trust model # gpg: depth: 0 valid: 1 signed: 0 trust: 0-, 0q, 0n, 0m, 0f, 1u # gpg: Total number processed: 1 # gpg: imported: 1 (RSA: 1) # gpg: Good signature from "Greg Kroah-Hartman (Linux kernel stable release signing key) <greg@kroah.com>" # gpg: WARNING: This key is not certified with a trusted signature! # gpg: There is no indication that the signature belongs to the owner. # Primary key fingerprint: 647F 2865 4894 E3BD 4571 99BE 38DB BDC8 6092 693E
Diffstat (limited to 'include/asm-generic/qspinlock.h')
-rw-r--r--include/asm-generic/qspinlock.h53
1 files changed, 17 insertions, 36 deletions
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
index 7d633f19e38a..1885fc44b1bc 100644
--- a/include/asm-generic/qspinlock.h
+++ b/include/asm-generic/qspinlock.h
@@ -21,37 +21,33 @@
#include <asm-generic/qspinlock_types.h>
/**
+ * queued_spin_unlock_wait - wait until the _current_ lock holder releases the lock
+ * @lock : Pointer to queued spinlock structure
+ *
+ * There is a very slight possibility of live-lock if the lockers keep coming
+ * and the waiter is just unfortunate enough to not see any unlock state.
+ */
+#ifndef queued_spin_unlock_wait
+extern void queued_spin_unlock_wait(struct qspinlock *lock);
+#endif
+
+/**
* queued_spin_is_locked - is the spinlock locked?
* @lock: Pointer to queued spinlock structure
* Return: 1 if it is locked, 0 otherwise
*/
+#ifndef queued_spin_is_locked
static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
{
/*
- * queued_spin_lock_slowpath() can ACQUIRE the lock before
- * issuing the unordered store that sets _Q_LOCKED_VAL.
- *
- * See both smp_cond_acquire() sites for more detail.
- *
- * This however means that in code like:
- *
- * spin_lock(A) spin_lock(B)
- * spin_unlock_wait(B) spin_is_locked(A)
- * do_something() do_something()
- *
- * Both CPUs can end up running do_something() because the store
- * setting _Q_LOCKED_VAL will pass through the loads in
- * spin_unlock_wait() and/or spin_is_locked().
+ * See queued_spin_unlock_wait().
*
- * Avoid this by issuing a full memory barrier between the spin_lock()
- * and the loads in spin_unlock_wait() and spin_is_locked().
- *
- * Note that regular mutual exclusion doesn't care about this
- * delayed store.
+ * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL
+ * isn't immediately observable.
*/
- smp_mb();
- return atomic_read(&lock->val) & _Q_LOCKED_MASK;
+ return atomic_read(&lock->val);
}
+#endif
/**
* queued_spin_value_unlocked - is the spinlock structure unlocked?
@@ -121,21 +117,6 @@ static __always_inline void queued_spin_unlock(struct qspinlock *lock)
}
#endif
-/**
- * queued_spin_unlock_wait - wait until current lock holder releases the lock
- * @lock : Pointer to queued spinlock structure
- *
- * There is a very slight possibility of live-lock if the lockers keep coming
- * and the waiter is just unfortunate enough to not see any unlock state.
- */
-static inline void queued_spin_unlock_wait(struct qspinlock *lock)
-{
- /* See queued_spin_is_locked() */
- smp_mb();
- while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
- cpu_relax();
-}
-
#ifndef virt_spin_lock
static __always_inline bool virt_spin_lock(struct qspinlock *lock)
{