diff options
Diffstat (limited to 'arch/arm64/kernel/entry.S')
-rw-r--r-- | arch/arm64/kernel/entry.S | 75 |
1 files changed, 64 insertions, 11 deletions
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 12e8d2bcb3f9..128a2f11ec9a 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -20,6 +20,7 @@ #include <linux/init.h> #include <linux/linkage.h> +#include <linux/irqchip/arm-gic-v3.h> #include <asm/alternative.h> #include <asm/assembler.h> @@ -100,6 +101,26 @@ .endif mrs x22, elr_el1 mrs x23, spsr_el1 +#ifdef CONFIG_USE_ICC_SYSREGS_FOR_IRQFLAGS + /* + * Save the context held in the PMR register and copy the current + * I bit state to the PMR. Re-enable of the I bit happens in later + * code that knows what type of trap we are handling. + */ +alternative_if_not ARM64_HAS_SYSREG_GIC_CPUIF + b 1f +alternative_else + mrs_s x20, ICC_PMR_EL1 // Get PMR +alternative_endif + and x20, x20, #ICC_PMR_EL1_G_BIT // Extract mask bit + lsl x20, x20, #PSR_G_PMR_G_SHIFT // Shift to a PSTATE RES0 bit + eor x20, x20, #PSR_G_BIT // Invert bit + orr x23, x20, x23 // Store PMR within PSTATE + mov x20, #ICC_PMR_EL1_MASKED + msr_s ICC_PMR_EL1, x20 // Mask normal interrupts at PMR +1: +#endif + stp lr, x21, [sp, #S_LR] stp x22, x23, [sp, #S_PC] @@ -152,6 +173,22 @@ alternative_else alternative_endif #endif .endif +#ifdef CONFIG_USE_ICC_SYSREGS_FOR_IRQFLAGS + /* + * Restore the context to the PMR (and ensure the reserved bit is + * restored to zero before being copied back to the PSR). + */ +alternative_if_not ARM64_HAS_SYSREG_GIC_CPUIF + b 1f +alternative_else + and x20, x22, #PSR_G_BIT // Get stolen PSTATE bit +alternative_endif + and x22, x22, #~PSR_G_BIT // Clear stolen bit + lsr x20, x20, #PSR_G_PMR_G_SHIFT // Shift back to PMR mask + eor x20, x20, #ICC_PMR_EL1_UNMASKED // x20 gets 0xf0 or 0xb0 + msr_s ICC_PMR_EL1, x20 // Write to PMR +1: +#endif msr elr_el1, x21 // set up the return data msr spsr_el1, x22 ldp x0, x1, [sp, #16 * 0] @@ -354,14 +391,30 @@ el1_da: mrs x0, far_el1 enable_dbg // re-enable interrupts if they were enabled in the aborted context +#ifdef CONFIG_USE_ICC_SYSREGS_FOR_IRQFLAGS +alternative_if_not ARM64_HAS_SYSREG_GIC_CPUIF tbnz x23, #7, 1f // PSR_I_BIT - enable_irq + nop + nop + msr daifclr, #2 +1: +alternative_else + tbnz x23, #PSR_G_SHIFT, 1f // PSR_G_BIT + mov x2, #ICC_PMR_EL1_UNMASKED + msr_s ICC_PMR_EL1, x2 + msr daifclr, #2 1: +alternative_endif +#else + tbnz x23, #7, 1f // PSR_I_BIT + enable_irq x2 +1: +#endif mov x2, sp // struct pt_regs bl do_mem_abort // disable interrupts before pulling preserved data off the stack - disable_irq + disable_irq x21 kernel_exit 1 el1_sp_pc: /* @@ -516,7 +569,7 @@ el0_da: */ mrs x26, far_el1 // enable interrupts before calling the main handler - enable_dbg_and_irq + enable_dbg_and_irq x0 ct_user_exit bic x0, x26, #(0xff << 56) mov x1, x25 @@ -529,7 +582,7 @@ el0_ia: */ mrs x26, far_el1 // enable interrupts before calling the main handler - enable_dbg_and_irq + enable_dbg_and_irq x0 ct_user_exit mov x0, x26 orr x1, x25, #1 << 24 // use reserved ISS bit for instruction aborts @@ -562,7 +615,7 @@ el0_sp_pc: */ mrs x26, far_el1 // enable interrupts before calling the main handler - enable_dbg_and_irq + enable_dbg_and_irq x0 ct_user_exit mov x0, x26 mov x1, x25 @@ -574,7 +627,7 @@ el0_undef: * Undefined instruction */ // enable interrupts before calling the main handler - enable_dbg_and_irq + enable_dbg_and_irq x0 ct_user_exit mov x0, sp bl do_undefinstr @@ -657,7 +710,7 @@ ENDPROC(cpu_switch_to) * and this includes saving x0 back into the kernel stack. */ ret_fast_syscall: - disable_irq // disable interrupts + disable_irq x21 // disable interrupts str x0, [sp, #S_X0] // returned x0 ldr x1, [tsk, #TI_FLAGS] // re-check for syscall tracing and x2, x1, #_TIF_SYSCALL_WORK @@ -667,7 +720,7 @@ ret_fast_syscall: enable_step_tsk x1, x2 kernel_exit 0 ret_fast_syscall_trace: - enable_irq // enable interrupts + enable_irq x0 // enable interrupts b __sys_trace_return_skipped // we already saved x0 /* @@ -677,7 +730,7 @@ work_pending: tbnz x1, #TIF_NEED_RESCHED, work_resched /* TIF_SIGPENDING, TIF_NOTIFY_RESUME or TIF_FOREIGN_FPSTATE case */ mov x0, sp // 'regs' - enable_irq // enable interrupts for do_notify_resume() + enable_irq x21 // enable interrupts for do_notify_resume() bl do_notify_resume b ret_to_user work_resched: @@ -690,7 +743,7 @@ work_resched: * "slow" syscall return path. */ ret_to_user: - disable_irq // disable interrupts + disable_irq x21 // disable interrupts ldr x1, [tsk, #TI_FLAGS] and x2, x1, #_TIF_WORK_MASK cbnz x2, work_pending @@ -720,7 +773,7 @@ el0_svc: mov sc_nr, #__NR_syscalls el0_svc_naked: // compat entry point stp x0, scno, [sp, #S_ORIG_X0] // save the original x0 and syscall number - enable_dbg_and_irq + enable_dbg_and_irq x16 ct_user_exit 1 ldr x16, [tsk, #TI_FLAGS] // check for syscall hooks |