aboutsummaryrefslogtreecommitdiff
path: root/arch/sparc/kernel
diff options
context:
space:
mode:
authorKirill Tkhai <tkhai@yandex.ru>2014-04-17 00:45:24 +0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2014-08-14 09:38:24 +0800
commit7f6073dcb4268b6a848894c8dd07832b39992375 (patch)
treeae1c810d67cc73c55c4fd068f9f29658e219cb4f /arch/sparc/kernel
parent4e81df2a60624359b40ed435804e4b7ea31b545c (diff)
sparc64: Make itc_sync_lock raw
[ Upstream commit 49b6c01f4c1de3b5e5427ac5aba80f9f6d27837a ] One more place where we must not be able to be preempted or to be interrupted in RT. Always actually disable interrupts during synchronization cycle. Signed-off-by: Kirill Tkhai <tkhai@yandex.ru> Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'arch/sparc/kernel')
-rw-r--r--arch/sparc/kernel/smp_64.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index b085311dcd0e..8416d7fadcce 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -151,7 +151,7 @@ void cpu_panic(void)
#define NUM_ROUNDS 64 /* magic value */
#define NUM_ITERS 5 /* likewise */
-static DEFINE_SPINLOCK(itc_sync_lock);
+static DEFINE_RAW_SPINLOCK(itc_sync_lock);
static unsigned long go[SLAVE + 1];
#define DEBUG_TICK_SYNC 0
@@ -259,7 +259,7 @@ static void smp_synchronize_one_tick(int cpu)
go[MASTER] = 0;
membar_safe("#StoreLoad");
- spin_lock_irqsave(&itc_sync_lock, flags);
+ raw_spin_lock_irqsave(&itc_sync_lock, flags);
{
for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
while (!go[MASTER])
@@ -270,7 +270,7 @@ static void smp_synchronize_one_tick(int cpu)
membar_safe("#StoreLoad");
}
}
- spin_unlock_irqrestore(&itc_sync_lock, flags);
+ raw_spin_unlock_irqrestore(&itc_sync_lock, flags);
}
#if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)