aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDaniel Thompson <daniel.thompson@linaro.org>2014-06-12 09:51:33 +0100
committerDaniel Thompson <daniel.thompson@linaro.org>2014-06-13 09:18:04 +0100
commit75c8b4a3c9d994ab61050b5f2574d6165575af53 (patch)
tree83bf337be7b41ad792f20db566649bae339b81bd
parent294c3b985bcc1ff7ed05870082aebe20b02ab896 (diff)
arm: Make spin locks timeout and dump stackdev/out-of-tree
Losing the WFE will badly damage scalability because the spin lock will hammer the memory bus during contention. Thus I don't think this code is suiable to go upstream. On the other hand a quad core system still boots OK and even pretty nasty wedges (for example, in the FIQ handler) should result in a back trace. The time out is currently 10 giga loops which takes some time to reach (about a minute on a 1GHz test system). The constant can shrink quite a lot without triggering false warnings but this should only be needed it you *expect* to get stuck continually. Signed-off-by: Daniel Thompson <daniel.thompson@linaro.org>
-rw-r--r--arch/arm/include/asm/spinlock.h4
1 files changed, 3 insertions, 1 deletions
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index ac4bfae26702..2d9703f7806c 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -60,6 +60,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
unsigned long tmp;
u32 newval;
arch_spinlock_t lockval;
+ u64 counter = 0;
prefetchw(&lock->slock);
__asm__ __volatile__(
@@ -73,7 +74,8 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
: "cc");
while (lockval.tickets.next != lockval.tickets.owner) {
- wfe();
+ WARN_ON(counter++ == 10000000000);
+ /*wfe();*/
lockval.tickets.owner = ACCESS_ONCE(lock->tickets.owner);
}