diff options
Diffstat (limited to 'arch/arm64/kernel')
-rw-r--r-- | arch/arm64/kernel/entry.S | 79 | ||||
-rw-r--r-- | arch/arm64/kernel/smp.c | 32 |
2 files changed, 91 insertions, 20 deletions
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 128a2f11ec9a..a00ff5417cd2 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -252,6 +252,40 @@ alternative_endif mov sp, x19 .endm + .macro trace_hardirqs_off, pstate +#ifdef CONFIG_TRACE_IRQFLAGS +#ifdef CONFIG_USE_ICC_SYSREGS_FOR_IRQFLAGS +alternative_if_not ARM64_HAS_SYSREG_GIC_CPUIF + bl trace_hardirqs_off + nop +alternative_else + tbnz \pstate, #PSR_G_SHIFT, 1f // PSR_G_BIT + bl trace_hardirqs_off +1: +alternative_endif +#else + bl trace_hardirqs_off +#endif +#endif + .endm + + .macro trace_hardirqs_on, pstate +#ifdef CONFIG_TRACE_IRQFLAGS +#ifdef CONFIG_USE_ICC_SYSREGS_FOR_IRQFLAGS +alternative_if_not ARM64_HAS_SYSREG_GIC_CPUIF + bl trace_hardirqs_on + nop +alternative_else + tbnz \pstate, #PSR_G_SHIFT, 1f // PSR_G_BIT + bl trace_hardirqs_on +1: +alternative_endif +#else + bl trace_hardirqs_on +#endif +#endif + .endm + /* * These are the registers used in the syscall handler, and allow us to * have in theory up to 7 arguments to a function - x0 to x6. @@ -389,20 +423,19 @@ el1_da: * Data abort handling */ mrs x0, far_el1 + enable_nmi enable_dbg // re-enable interrupts if they were enabled in the aborted context #ifdef CONFIG_USE_ICC_SYSREGS_FOR_IRQFLAGS alternative_if_not ARM64_HAS_SYSREG_GIC_CPUIF tbnz x23, #7, 1f // PSR_I_BIT nop - nop msr daifclr, #2 1: alternative_else tbnz x23, #PSR_G_SHIFT, 1f // PSR_G_BIT mov x2, #ICC_PMR_EL1_UNMASKED msr_s ICC_PMR_EL1, x2 - msr daifclr, #2 1: alternative_endif #else @@ -415,6 +448,7 @@ alternative_endif // disable interrupts before pulling preserved data off the stack disable_irq x21 + disable_nmi kernel_exit 1 el1_sp_pc: /* @@ -455,11 +489,16 @@ ENDPROC(el1_sync) el1_irq: kernel_entry 1 enable_dbg -#ifdef CONFIG_TRACE_IRQFLAGS - bl trace_hardirqs_off -#endif + trace_hardirqs_off x23 get_thread_info tsk + + /* + * On systems with CONFIG_USE_ICC_SYSREGS_FOR_IRQFLAGS then + * we do not yet know if this IRQ is a pseudo-NMI or a normal + * interrupt. For that reason we must rely on the irq_handler to + * enable the NMI once the interrupt type is determined. + */ irq_handler #ifdef CONFIG_PREEMPT @@ -470,9 +509,9 @@ el1_irq: bl el1_preempt 1: #endif -#ifdef CONFIG_TRACE_IRQFLAGS - bl trace_hardirqs_on -#endif + + disable_nmi + trace_hardirqs_on x23 kernel_exit 1 ENDPROC(el1_irq) @@ -569,6 +608,7 @@ el0_da: */ mrs x26, far_el1 // enable interrupts before calling the main handler + enable_nmi enable_dbg_and_irq x0 ct_user_exit bic x0, x26, #(0xff << 56) @@ -582,6 +622,7 @@ el0_ia: */ mrs x26, far_el1 // enable interrupts before calling the main handler + enable_nmi enable_dbg_and_irq x0 ct_user_exit mov x0, x26 @@ -615,6 +656,7 @@ el0_sp_pc: */ mrs x26, far_el1 // enable interrupts before calling the main handler + enable_nmi enable_dbg_and_irq x0 ct_user_exit mov x0, x26 @@ -627,6 +669,7 @@ el0_undef: * Undefined instruction */ // enable interrupts before calling the main handler + enable_nmi enable_dbg_and_irq x0 ct_user_exit mov x0, sp @@ -659,16 +702,18 @@ el0_irq: kernel_entry 0 el0_irq_naked: enable_dbg -#ifdef CONFIG_TRACE_IRQFLAGS - bl trace_hardirqs_off -#endif - + trace_hardirqs_off x23 ct_user_exit + + /* + * On systems with CONFIG_USE_ICC_SYSREGS_FOR_IRQFLAGS then + * we do not yet know if this IRQ is a pseudo-NMI or a normal + * interrupt. For that reason we must rely on the irq_handler to + * enable the NMI once the interrupt type is determined. + */ irq_handler -#ifdef CONFIG_TRACE_IRQFLAGS - bl trace_hardirqs_on -#endif + trace_hardirqs_on x23 b ret_to_user ENDPROC(el0_irq) @@ -718,6 +763,7 @@ ret_fast_syscall: and x2, x1, #_TIF_WORK_MASK cbnz x2, work_pending enable_step_tsk x1, x2 + disable_nmi kernel_exit 0 ret_fast_syscall_trace: enable_irq x0 // enable interrupts @@ -730,6 +776,7 @@ work_pending: tbnz x1, #TIF_NEED_RESCHED, work_resched /* TIF_SIGPENDING, TIF_NOTIFY_RESUME or TIF_FOREIGN_FPSTATE case */ mov x0, sp // 'regs' + enable_nmi enable_irq x21 // enable interrupts for do_notify_resume() bl do_notify_resume b ret_to_user @@ -748,6 +795,7 @@ ret_to_user: and x2, x1, #_TIF_WORK_MASK cbnz x2, work_pending enable_step_tsk x1, x2 + disable_nmi kernel_exit 0 ENDPROC(ret_to_user) @@ -773,6 +821,7 @@ el0_svc: mov sc_nr, #__NR_syscalls el0_svc_naked: // compat entry point stp x0, scno, [sp, #S_ORIG_X0] // save the original x0 and syscall number + enable_nmi enable_dbg_and_irq x16 ct_user_exit 1 diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 206dceb725fc..103731f3d74f 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -863,11 +863,15 @@ void handle_IPI(int ipinr, struct pt_regs *regs) #endif case IPI_CPU_BACKTRACE: - printk_nmi_enter(); - irq_enter(); + if (in_nmi()) { + printk_nmi_enter(); + irq_enter(); + } nmi_cpu_backtrace(regs); - irq_exit(); - printk_nmi_exit(); + if (in_nmi()) { + irq_exit(); + printk_nmi_exit(); + } break; #ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL @@ -934,13 +938,31 @@ int setup_profiling_timer(unsigned int multiplier) return -EINVAL; } +/* + * IPI_CPU_BACKTRACE is either implemented either as a normal IRQ or, + * if the hardware can supports it, using a pseudo-NMI. + * + * The mechanism used to implement pseudo-NMI means that in both cases + * testing if the backtrace IPI is disabled requires us to check the + * PSR I bit. However in the later case we cannot use irqs_disabled() + * to check the I bit because, when the pseudo-NMI is active that + * function examines the GIC PMR instead. + */ +static unsigned long nmi_disabled(void) +{ + unsigned long flags; + + asm volatile("mrs %0, daif" : "=r"(flags) :: "memory"); + return flags & PSR_I_BIT; +} + static void raise_nmi(cpumask_t *mask) { /* * Generate the backtrace directly if we are running in a * calling context that is not preemptible by the backtrace IPI. */ - if (cpumask_test_cpu(smp_processor_id(), mask) && irqs_disabled()) + if (cpumask_test_cpu(smp_processor_id(), mask) && nmi_disabled()) nmi_cpu_backtrace(NULL); smp_cross_call(mask, IPI_CPU_BACKTRACE); |