aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDaniel Thompson <daniel.thompson@linaro.org>2014-06-12 09:51:33 +0100
committerDaniel Thompson <daniel.thompson@linaro.org>2014-06-20 10:19:48 +0100
commit520d39037a8042284cbb7352c1e88e45c309297e (patch)
tree3b5f6dc03481bbd882ea1f734d9da9a3ef3fff22
parent87df2325c801c179db8f64d7beb325a92a9a1d8d (diff)
arm: Make spin locks timeout and dump stack
Losing the WFE will badly damage scalability because the spin lock will hammer the memory bus during contention. Thus I don't think this code is suiable to go upstream. On the other hand a quad core system still boots OK and even pretty nasty wedges (for example, in the FIQ handler) should result in a back trace. The time out is currently 10 giga loops which takes some time to reach (about a minute on a 1GHz test system). The constant can shrink quite a lot without triggering false warnings but this should only be needed it you *expect* to get stuck continually. Signed-off-by: Daniel Thompson <daniel.thompson@linaro.org>
-rw-r--r--arch/arm/include/asm/spinlock.h4
1 files changed, 3 insertions, 1 deletions
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index ac4bfae26702..2d9703f7806c 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -60,6 +60,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
unsigned long tmp;
u32 newval;
arch_spinlock_t lockval;
+ u64 counter = 0;
prefetchw(&lock->slock);
__asm__ __volatile__(
@@ -73,7 +74,8 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
: "cc");
while (lockval.tickets.next != lockval.tickets.owner) {
- wfe();
+ WARN_ON(counter++ == 10000000000);
+ /*wfe();*/
lockval.tickets.owner = ACCESS_ONCE(lock->tickets.owner);
}