aboutsummaryrefslogtreecommitdiff
path: root/arch/arm64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/arm64/include/asm/assembler.h23
-rw-r--r--arch/arm64/include/asm/smp.h2
-rw-r--r--arch/arm64/kernel/entry.S77
-rw-r--r--arch/arm64/kernel/smp.c7
5 files changed, 95 insertions, 15 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 0983de767cf8..da4c654e1511 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -88,6 +88,7 @@ config ARM64
select HAVE_IRQ_TIME_ACCOUNTING
select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP if NUMA
+ select HAVE_NMI
select HAVE_PATA_PLATFORM
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index f7b6c11cb4c2..a704eaa8bf1d 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -33,6 +33,29 @@
#include <asm/thread_info.h>
/*
+ * Enable and disable pseudo NMI.
+ */
+ .macro disable_nmi
+#ifdef CONFIG_USE_ICC_SYSREGS_FOR_IRQFLAGS
+alternative_if_not ARM64_HAS_SYSREG_GIC_CPUIF
+ nop
+alternative_else
+ msr daifset, #2
+alternative_endif
+#endif
+ .endm
+
+ .macro enable_nmi
+#ifdef CONFIG_USE_ICC_SYSREGS_FOR_IRQFLAGS
+alternative_if_not ARM64_HAS_SYSREG_GIC_CPUIF
+ nop
+alternative_else
+ msr daifclr, #2
+alternative_endif
+#endif
+ .endm
+
+/*
* Enable and disable interrupts.
*/
.macro disable_irq, tmp
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index d050d720a1b4..dc00c0056868 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -35,6 +35,8 @@
#include <linux/cpumask.h>
#include <linux/thread_info.h>
+#define SMP_IPI_NMI_MASK (1 << 6)
+
DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
/*
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 84eda478f810..96a36b849980 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -321,6 +321,40 @@ alternative_endif
mov sp, x19
.endm
+ .macro trace_hardirqs_off, pstate
+#ifdef CONFIG_TRACE_IRQFLAGS
+#ifdef CONFIG_USE_ICC_SYSREGS_FOR_IRQFLAGS
+alternative_if_not ARM64_HAS_SYSREG_GIC_CPUIF
+ bl trace_hardirqs_off
+ nop
+alternative_else
+ tbnz \pstate, #PSR_G_SHIFT, 1f // PSR_G_BIT
+ bl trace_hardirqs_off
+1:
+alternative_endif
+#else
+ bl trace_hardirqs_off
+#endif
+#endif
+ .endm
+
+ .macro trace_hardirqs_on, pstate
+#ifdef CONFIG_TRACE_IRQFLAGS
+#ifdef CONFIG_USE_ICC_SYSREGS_FOR_IRQFLAGS
+alternative_if_not ARM64_HAS_SYSREG_GIC_CPUIF
+ bl trace_hardirqs_on
+ nop
+alternative_else
+ tbnz \pstate, #PSR_G_SHIFT, 1f // PSR_G_BIT
+ bl trace_hardirqs_on
+1:
+alternative_endif
+#else
+ bl trace_hardirqs_on
+#endif
+#endif
+ .endm
+
/*
* These are the registers used in the syscall handler, and allow us to
* have in theory up to 7 arguments to a function - x0 to x6.
@@ -466,20 +500,19 @@ el1_da:
* Data abort handling
*/
mrs x0, far_el1
+ enable_nmi
enable_dbg
// re-enable interrupts if they were enabled in the aborted context
#ifdef CONFIG_USE_ICC_SYSREGS_FOR_IRQFLAGS
alternative_if_not ARM64_HAS_SYSREG_GIC_CPUIF
tbnz x23, #7, 1f // PSR_I_BIT
nop
- nop
msr daifclr, #2
1:
alternative_else
tbnz x23, #PSR_G_SHIFT, 1f // PSR_G_BIT
mov x2, #ICC_PMR_EL1_UNMASKED
msr_s ICC_PMR_EL1, x2
- msr daifclr, #2
1:
alternative_endif
#else
@@ -492,6 +525,7 @@ alternative_endif
// disable interrupts before pulling preserved data off the stack
disable_irq x21
+ disable_nmi
kernel_exit 1
el1_sp_pc:
/*
@@ -532,10 +566,14 @@ ENDPROC(el1_sync)
el1_irq:
kernel_entry 1
enable_dbg
-#ifdef CONFIG_TRACE_IRQFLAGS
- bl trace_hardirqs_off
-#endif
+ trace_hardirqs_off x23
+ /*
+ * On systems with CONFIG_USE_ICC_SYSREGS_FOR_IRQFLAGS then
+ * we do not yet know if this IRQ is a pseudo-NMI or a normal
+ * interrupt. For that reason we must rely on the irq_handler to
+ * enable the NMI once the interrupt type is determined.
+ */
irq_handler
#ifdef CONFIG_PREEMPT
@@ -546,9 +584,9 @@ el1_irq:
bl el1_preempt
1:
#endif
-#ifdef CONFIG_TRACE_IRQFLAGS
- bl trace_hardirqs_on
-#endif
+
+ disable_nmi
+ trace_hardirqs_on x23
kernel_exit 1
ENDPROC(el1_irq)
@@ -645,6 +683,7 @@ el0_da:
*/
mrs x26, far_el1
// enable interrupts before calling the main handler
+ enable_nmi
enable_dbg_and_irq x0
ct_user_exit
bic x0, x26, #(0xff << 56)
@@ -658,6 +697,7 @@ el0_ia:
*/
mrs x26, far_el1
// enable interrupts before calling the main handler
+ enable_nmi
enable_dbg_and_irq x0
ct_user_exit
mov x0, x26
@@ -691,6 +731,7 @@ el0_sp_pc:
*/
mrs x26, far_el1
// enable interrupts before calling the main handler
+ enable_nmi
enable_dbg_and_irq x0
ct_user_exit
mov x0, x26
@@ -703,6 +744,7 @@ el0_undef:
* Undefined instruction
*/
// enable interrupts before calling the main handler
+ enable_nmi
enable_dbg_and_irq x0
ct_user_exit
mov x0, sp
@@ -745,16 +787,18 @@ el0_irq:
kernel_entry 0
el0_irq_naked:
enable_dbg
-#ifdef CONFIG_TRACE_IRQFLAGS
- bl trace_hardirqs_off
-#endif
-
+ trace_hardirqs_off x23
ct_user_exit
+
+ /*
+ * On systems with CONFIG_USE_ICC_SYSREGS_FOR_IRQFLAGS then
+ * we do not yet know if this IRQ is a pseudo-NMI or a normal
+ * interrupt. For that reason we must rely on the irq_handler to
+ * enable the NMI once the interrupt type is determined.
+ */
irq_handler
-#ifdef CONFIG_TRACE_IRQFLAGS
- bl trace_hardirqs_on
-#endif
+ trace_hardirqs_on x23
b ret_to_user
ENDPROC(el0_irq)
@@ -803,6 +847,7 @@ ret_fast_syscall:
and x2, x1, #_TIF_WORK_MASK
cbnz x2, work_pending
enable_step_tsk x1, x2
+ disable_nmi
kernel_exit 0
ret_fast_syscall_trace:
enable_irq x0 // enable interrupts
@@ -829,6 +874,7 @@ ret_to_user:
cbnz x2, work_pending
finish_ret_to_user:
enable_step_tsk x1, x2
+ disable_nmi
kernel_exit 0
ENDPROC(ret_to_user)
@@ -854,6 +900,7 @@ el0_svc:
mov sc_nr, #__NR_syscalls
el0_svc_naked: // compat entry point
stp x0, scno, [sp, #S_ORIG_X0] // save the original x0 and syscall number
+ enable_nmi
enable_dbg_and_irq x16
ct_user_exit 1
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index a0a06580bcef..1f87a44d0847 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -899,6 +899,13 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
#endif
case IPI_CPU_BACKTRACE:
+ BUILD_BUG_ON(SMP_IPI_NMI_MASK != BIT(IPI_CPU_BACKTRACE));
+
+ if (in_nmi()) {
+ nmi_cpu_backtrace(regs);
+ break;
+ }
+
printk_nmi_enter();
irq_enter();
nmi_cpu_backtrace(regs);