From caa84939a4d8b1189dea8619ccc57bdb3026b125 Mon Sep 17 00:00:00 2001 From: Jeenu Viswambharan Date: Thu, 6 Feb 2014 10:36:15 +0000 Subject: Add support for handling runtime service requests This patch uses the reworked exception handling support to handle runtime service requests through SMCs following the SMC calling convention. This is a giant commit since all the changes are inter-related. It does the following: 1. Replace the old exception handling mechanism with the new one 2. Enforce that SP_EL0 is used C runtime stacks. 3. Ensures that the cold and warm boot paths use the 'cpu_context' structure to program an ERET into the next lower EL. 4. Ensures that SP_EL3 always points to the next 'cpu_context' structure prior to an ERET into the next lower EL 5. Introduces a PSCI SMC handler which completes the use of PSCI as a runtime service Change-Id: I661797f834c0803d2c674d20f504df1b04c2b852 Co-authored-by: Achin Gupta --- bl31/aarch64/bl31_entrypoint.S | 39 +-- bl31/aarch64/exception_handlers.c | 184 -------------- bl31/aarch64/runtime_exceptions.S | 432 +++++++++++++++++++++++++-------- bl31/aarch64/runtime_exceptions_next.S | 432 --------------------------------- bl31/bl31.mk | 1 - bl31/bl31_main.c | 36 ++- bl31/context_mgmt.c | 50 +++- common/psci/psci_common.c | 49 ++-- common/psci/psci_entry.S | 34 ++- common/psci/psci_main.c | 70 ++++++ common/psci/psci_private.h | 10 + common/psci/psci_setup.c | 5 +- common/runtime_svc.c | 3 +- include/aarch64/arch.h | 2 +- include/asm_macros.S | 12 - include/context.h | 4 +- include/context_mgmt.h | 3 + include/runtime_svc.h | 90 +------ lib/arch/aarch64/misc_helpers.S | 48 ---- 19 files changed, 561 insertions(+), 943 deletions(-) delete mode 100644 bl31/aarch64/exception_handlers.c delete mode 100644 bl31/aarch64/runtime_exceptions_next.S diff --git a/bl31/aarch64/bl31_entrypoint.S b/bl31/aarch64/bl31_entrypoint.S index cd0c023..1b8488d 100644 --- a/bl31/aarch64/bl31_entrypoint.S +++ b/bl31/aarch64/bl31_entrypoint.S @@ -31,6 +31,7 @@ #include #include #include +#include "cm_macros.S" .globl bl31_entrypoint @@ -129,6 +130,12 @@ bl31_entrypoint: ; .type bl31_entrypoint, %function ldr x1, =__COHERENT_RAM_UNALIGNED_SIZE__ bl zeromem16 + /* --------------------------------------------- + * Use SP_EL0 for the C runtime stack. + * --------------------------------------------- + */ + msr spsel, #0 + /* -------------------------------------------- * Give ourselves a small coherent stack to * ease the pain of initializing the MMU @@ -155,32 +162,26 @@ bl31_entrypoint: ; .type bl31_entrypoint, %function bl platform_set_stack /* --------------------------------------------- - * Use the more complex exception vectors now - * the stacks are setup. + * Jump to main function. * --------------------------------------------- */ - adr x1, runtime_exceptions - msr vbar_el3, x1 + bl bl31_main /* --------------------------------------------- - * Use SP_EL0 to initialize BL31. It allows us - * to jump to the next image without having to - * come back here to ensure all of the stack's - * been popped out. run_image() is not nice - * enough to reset the stack pointer before - * handing control to the next stage. + * Use the more complex exception vectors now + * that context management is setup. SP_EL3 is + * pointing to a 'cpu_context' structure which + * has an exception stack allocated. Since + * we're just about to leave this EL with ERET, + * we don't need an ISB here * --------------------------------------------- */ - mov x0, sp - msr sp_el0, x0 - msr spsel, #0 - isb + adr x1, runtime_exceptions + msr vbar_el3, x1 - /* --------------------------------------------- - * Jump to main function. - * --------------------------------------------- - */ - bl bl31_main + zero_callee_saved_regs + b el3_exit _panic: + wfi b _panic diff --git a/bl31/aarch64/exception_handlers.c b/bl31/aarch64/exception_handlers.c deleted file mode 100644 index 3151294..0000000 --- a/bl31/aarch64/exception_handlers.c +++ /dev/null @@ -1,184 +0,0 @@ -/* - * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include -#include -#include -#include -#include - -/******************************************************************************* - * This function checks whether this is a valid smc e.g. - * the function id is correct, top word of args are zeroed - * when aarch64 makes an aarch32 call etc. - ******************************************************************************/ -int validate_smc(gp_regs *regs) -{ - unsigned int rw = GET_RW(regs->spsr); - unsigned int cc = GET_SMC_CC(regs->x0); - - /* Check if there is a difference in the caller RW and SMC CC */ - if (rw == cc) { - - /* Check whether the caller has chosen the right func. id */ - if (cc == SMC_64) { - regs->x0 = SMC_UNK; - return SMC_UNK; - } - - /* - * Paranoid check to zero the top word of passed args - * irrespective of caller's register width. - * - * TODO: Check if this needed if the caller is aarch32 - */ - regs->x0 &= (unsigned int) 0xFFFFFFFF; - regs->x1 &= (unsigned int) 0xFFFFFFFF; - regs->x2 &= (unsigned int) 0xFFFFFFFF; - regs->x3 &= (unsigned int) 0xFFFFFFFF; - regs->x4 &= (unsigned int) 0xFFFFFFFF; - regs->x5 &= (unsigned int) 0xFFFFFFFF; - regs->x6 &= (unsigned int) 0xFFFFFFFF; - } - - return 0; -} - -/* TODO: Break down the SMC handler into fast and standard SMC handlers. */ -void smc_handler(unsigned type, unsigned long esr, gp_regs *regs) -{ - /* Check if the SMC has been correctly called */ - if (validate_smc(regs) != 0) - return; - - switch (regs->x0) { - case PSCI_VERSION: - regs->x0 = psci_version(); - break; - - case PSCI_CPU_OFF: - regs->x0 = __psci_cpu_off(); - break; - - case PSCI_CPU_SUSPEND_AARCH64: - case PSCI_CPU_SUSPEND_AARCH32: - regs->x0 = __psci_cpu_suspend(regs->x1, regs->x2, regs->x3); - break; - - case PSCI_CPU_ON_AARCH64: - case PSCI_CPU_ON_AARCH32: - regs->x0 = psci_cpu_on(regs->x1, regs->x2, regs->x3); - break; - - case PSCI_AFFINITY_INFO_AARCH32: - case PSCI_AFFINITY_INFO_AARCH64: - regs->x0 = psci_affinity_info(regs->x1, regs->x2); - break; - - default: - regs->x0 = SMC_UNK; - } - - return; -} - -void irq_handler(unsigned type, unsigned long esr, gp_regs *regs) -{ - plat_report_exception(type); - assert(0); -} - -void fiq_handler(unsigned type, unsigned long esr, gp_regs *regs) -{ - plat_report_exception(type); - assert(0); -} - -void serror_handler(unsigned type, unsigned long esr, gp_regs *regs) -{ - plat_report_exception(type); - assert(0); -} - -void sync_exception_handler(unsigned type, gp_regs *regs) -{ - unsigned long esr = read_esr(); - unsigned int ec = EC_BITS(esr); - - switch (ec) { - - case EC_AARCH32_SMC: - case EC_AARCH64_SMC: - smc_handler(type, esr, regs); - break; - - default: - plat_report_exception(type); - assert(0); - } - return; -} - -void async_exception_handler(unsigned type, gp_regs *regs) -{ - unsigned long esr = read_esr(); - - switch (type) { - - case IRQ_SP_EL0: - case IRQ_SP_ELX: - case IRQ_AARCH64: - case IRQ_AARCH32: - irq_handler(type, esr, regs); - break; - - case FIQ_SP_EL0: - case FIQ_SP_ELX: - case FIQ_AARCH64: - case FIQ_AARCH32: - fiq_handler(type, esr, regs); - break; - - case SERROR_SP_EL0: - case SERROR_SP_ELX: - case SERROR_AARCH64: - case SERROR_AARCH32: - serror_handler(type, esr, regs); - break; - - default: - plat_report_exception(type); - assert(0); - } - - return; -} diff --git a/bl31/aarch64/runtime_exceptions.S b/bl31/aarch64/runtime_exceptions.S index 92835dc..10e65dc 100644 --- a/bl31/aarch64/runtime_exceptions.S +++ b/bl31/aarch64/runtime_exceptions.S @@ -30,12 +30,13 @@ #include #include +#include +#include +#include "cm_macros.S" .globl runtime_exceptions - - -#include - + .globl el3_exit + .globl get_exception_stack .section .vectors, "ax"; .align 11 @@ -46,39 +47,32 @@ runtime_exceptions: * ----------------------------------------------------- */ sync_exception_sp_el0: - exception_entry save_regs - mov x0, #SYNC_EXCEPTION_SP_EL0 - mov x1, sp - bl sync_exception_handler - exception_exit restore_regs - eret + /* ----------------------------------------------------- + * We don't expect any synchronous exceptions from EL3 + * ----------------------------------------------------- + */ + wfi + b sync_exception_sp_el0 .align 7 + /* ----------------------------------------------------- + * EL3 code is non-reentrant. Any asynchronous exception + * is a serious error. Loop infinitely. + * ----------------------------------------------------- + */ irq_sp_el0: - exception_entry save_regs - mov x0, #IRQ_SP_EL0 - mov x1, sp - bl async_exception_handler - exception_exit restore_regs - eret + handle_async_exception IRQ_SP_EL0 + b irq_sp_el0 .align 7 fiq_sp_el0: - exception_entry save_regs - mov x0, #FIQ_SP_EL0 - mov x1, sp - bl async_exception_handler - exception_exit restore_regs - eret + handle_async_exception FIQ_SP_EL0 + b fiq_sp_el0 .align 7 serror_sp_el0: - exception_entry save_regs - mov x0, #SERROR_SP_EL0 - mov x1, sp - bl async_exception_handler - exception_exit restore_regs - eret + handle_async_exception SERROR_SP_EL0 + b serror_sp_el0 /* ----------------------------------------------------- * Current EL with SPx: 0x200 - 0x380 @@ -86,39 +80,35 @@ serror_sp_el0: */ .align 7 sync_exception_sp_elx: - exception_entry save_regs - mov x0, #SYNC_EXCEPTION_SP_ELX - mov x1, sp - bl sync_exception_handler - exception_exit restore_regs - eret + /* ----------------------------------------------------- + * This exception will trigger if anything went wrong + * during a previous exception entry or exit or while + * handling an earlier unexpected synchronous exception. + * In any case we cannot rely on SP_EL3. Switching to a + * known safe area of memory will corrupt at least a + * single register. It is best to enter wfi in loop as + * that will preserve the system state for analysis + * through a debugger later. + * ----------------------------------------------------- + */ + wfi + b sync_exception_sp_elx + /* ----------------------------------------------------- + * As mentioned in the previous comment, all bets are + * off if SP_EL3 cannot be relied upon. Report their + * occurrence. + * ----------------------------------------------------- + */ .align 7 irq_sp_elx: - exception_entry save_regs - mov x0, #IRQ_SP_ELX - mov x1, sp - bl async_exception_handler - exception_exit restore_regs - eret - + b irq_sp_elx .align 7 fiq_sp_elx: - exception_entry save_regs - mov x0, #FIQ_SP_ELX - mov x1, sp - bl async_exception_handler - exception_exit restore_regs - eret - + b fiq_sp_elx .align 7 serror_sp_elx: - exception_entry save_regs - mov x0, #SERROR_SP_ELX - mov x1, sp - bl async_exception_handler - exception_exit restore_regs - eret + b serror_sp_elx /* ----------------------------------------------------- * Lower EL using AArch64 : 0x400 - 0x580 @@ -126,39 +116,35 @@ serror_sp_elx: */ .align 7 sync_exception_aarch64: - exception_entry save_regs - mov x0, #SYNC_EXCEPTION_AARCH64 - mov x1, sp - bl sync_exception_handler - exception_exit restore_regs - eret + /* ----------------------------------------------------- + * This exception vector will be the entry point for + * SMCs and traps that are unhandled at lower ELs most + * commonly. SP_EL3 should point to a valid cpu context + * where the general purpose and system register state + * can be saved. + * ----------------------------------------------------- + */ + handle_sync_exception .align 7 + /* ----------------------------------------------------- + * Asynchronous exceptions from lower ELs are not + * currently supported. Report their occurrence. + * ----------------------------------------------------- + */ irq_aarch64: - exception_entry save_regs - mov x0, #IRQ_AARCH64 - mov x1, sp - bl async_exception_handler - exception_exit restore_regs - eret + handle_async_exception IRQ_AARCH64 + b irq_aarch64 .align 7 fiq_aarch64: - exception_entry save_regs - mov x0, #FIQ_AARCH64 - mov x1, sp - bl async_exception_handler - exception_exit restore_regs - eret + handle_async_exception FIQ_AARCH64 + b fiq_aarch64 .align 7 serror_aarch64: - exception_entry save_regs - mov x0, #IRQ_AARCH32 - mov x1, sp - bl async_exception_handler - exception_exit restore_regs - eret + handle_async_exception SERROR_AARCH64 + b serror_aarch64 /* ----------------------------------------------------- * Lower EL using AArch32 : 0x600 - 0x780 @@ -166,39 +152,281 @@ serror_aarch64: */ .align 7 sync_exception_aarch32: - exception_entry save_regs - mov x0, #SYNC_EXCEPTION_AARCH32 - mov x1, sp - bl sync_exception_handler - exception_exit restore_regs - eret + /* ----------------------------------------------------- + * This exception vector will be the entry point for + * SMCs and traps that are unhandled at lower ELs most + * commonly. SP_EL3 should point to a valid cpu context + * where the general purpose and system register state + * can be saved. + * ----------------------------------------------------- + */ + handle_sync_exception .align 7 + /* ----------------------------------------------------- + * Asynchronous exceptions from lower ELs are not + * currently supported. Report their occurrence. + * ----------------------------------------------------- + */ irq_aarch32: - exception_entry save_regs - mov x0, #IRQ_AARCH32 - mov x1, sp - bl async_exception_handler - exception_exit restore_regs - eret + handle_async_exception IRQ_AARCH32 + b irq_aarch32 .align 7 fiq_aarch32: - exception_entry save_regs - mov x0, #FIQ_AARCH32 - mov x1, sp - bl async_exception_handler - exception_exit restore_regs - eret + handle_async_exception FIQ_AARCH32 + b fiq_aarch32 .align 7 serror_aarch32: - exception_entry save_regs - mov x0, #SERROR_AARCH32 - mov x1, sp - bl async_exception_handler - exception_exit restore_regs + handle_async_exception SERROR_AARCH32 + b serror_aarch32 + .align 7 + + .section .text, "ax" + /* ----------------------------------------------------- + * The following code handles secure monitor calls. + * Depending upon the execution state from where the SMC + * has been invoked, it frees some general purpose + * registers to perform the remaining tasks. They + * involve finding the runtime service handler that is + * the target of the SMC & switching to runtime stacks + * (SP_EL0) before calling the handler. + * + * Note that x30 has been explicitly saved and can be + * used here + * ----------------------------------------------------- + */ +smc_handler32: + /* Check whether aarch32 issued an SMC64 */ + tbnz x0, #FUNCID_CC_SHIFT, smc_prohibited + + /* ----------------------------------------------------- + * Since we're are coming from aarch32, x8-x18 need to + * be saved as per SMC32 calling convention. If a lower + * EL in aarch64 is making an SMC32 call then it must + * have saved x8-x17 already therein. + * ----------------------------------------------------- + */ + stp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8] + stp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10] + stp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12] + stp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14] + stp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16] + + /* x4-x7, x18, sp_el0 are saved below */ + +smc_handler64: + /* ----------------------------------------------------- + * Populate the parameters for the SMC handler. We + * already have x0-x4 in place. x5 will point to a + * cookie (not used now). x6 will point to the context + * structure (SP_EL3) and x7 will contain flags we need + * to pass to the handler Hence save x5-x7. Note that x4 + * only needs to be preserved for AArch32 callers but we + * do it for AArch64 callers as well for convenience + * ----------------------------------------------------- + */ + stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4] + stp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6] + + mov x5, xzr + mov x6, sp + + /* Get the unique owning entity number */ + ubfx x16, x0, #FUNCID_OEN_SHIFT, #FUNCID_OEN_WIDTH + ubfx x15, x0, #FUNCID_TYPE_SHIFT, #FUNCID_TYPE_WIDTH + orr x16, x16, x15, lsl #FUNCID_OEN_WIDTH + + adr x11, (__RT_SVC_DESCS_START__ + RT_SVC_DESC_HANDLE) + + /* Load descriptor index from array of indices */ + adr x14, rt_svc_descs_indices + ldrb w15, [x14, x16] + + /* Save x18 and SP_EL0 */ + mrs x17, sp_el0 + stp x18, x17, [x6, #CTX_GPREGS_OFFSET + CTX_GPREG_X18] + + /* ----------------------------------------------------- + * Restore the saved C runtime stack value which will + * become the new SP_EL0 i.e. EL3 runtime stack. It was + * saved in the 'cpu_context' structure prior to the last + * ERET from EL3. + * ----------------------------------------------------- + */ + ldr x12, [x6, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP] + + /* + * Any index greater than 127 is invalid. Check bit 7 for + * a valid index + */ + tbnz w15, 7, smc_unknown + + /* Switch to SP_EL0 */ + msr spsel, #0 + + /* ----------------------------------------------------- + * Get the descriptor using the index + * x11 = (base + off), x15 = index + * + * handler = (base + off) + (index << log2(size)) + * ----------------------------------------------------- + */ + lsl w10, w15, #RT_SVC_SIZE_LOG2 + ldr x15, [x11, w10, uxtw] + + /* ----------------------------------------------------- + * Save the SPSR_EL3, ELR_EL3, & SCR_EL3 in case there + * is a world switch during SMC handling. + * TODO: Revisit if all system registers can be saved + * later. + * ----------------------------------------------------- + */ + mrs x16, spsr_el3 + mrs x17, elr_el3 + mrs x18, scr_el3 + stp x16, x17, [x6, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3] + stp x18, xzr, [x6, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3] + + /* Copy SCR_EL3.NS bit to the flag to indicate caller's security */ + bfi x7, x18, #0, #1 + + mov sp, x12 + + /* ----------------------------------------------------- + * Call the Secure Monitor Call handler and then drop + * directly into el3_exit() which will program any + * remaining architectural state prior to issuing the + * ERET to the desired lower EL. + * ----------------------------------------------------- + */ +#if DEBUG + cbz x15, rt_svc_fw_critical_error +#endif + blr x15 + + /* ----------------------------------------------------- + * This routine assumes that the SP_EL3 is pointing to + * a valid context structure from where the gp regs and + * other special registers can be retrieved. + * ----------------------------------------------------- + */ +el3_exit: ; .type el3_exit, %function + /* ----------------------------------------------------- + * Save the current SP_EL0 i.e. the EL3 runtime stack + * which will be used for handling the next SMC. Then + * switch to SP_EL3 + * ----------------------------------------------------- + */ + mov x17, sp + msr spsel, #1 + str x17, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP] + + /* ----------------------------------------------------- + * Restore SPSR_EL3, ELR_EL3 and SCR_EL3 prior to ERET + * ----------------------------------------------------- + */ + ldp x18, xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3] + ldp x16, x17, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3] + msr scr_el3, x18 + msr spsr_el3, x16 + msr elr_el3, x17 + + /* Restore saved general purpose registers and return */ + bl restore_scratch_registers + ldp x30, xzr, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] eret - .align 7 +smc_unknown: + /* + * Here we restore x4-x18 regardless of where we came from. AArch32 + * callers will find the registers contents unchanged, but AArch64 + * callers will find the registers modified (with stale earlier NS + * content). Either way, we aren't leaking any secure information + * through them + */ + bl restore_scratch_registers_callee + +smc_prohibited: + ldp x30, xzr, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] + mov w0, #SMC_UNK + eret + +rt_svc_fw_critical_error: + b rt_svc_fw_critical_error + + /* ----------------------------------------------------- + * The following functions are used to saved and restore + * all the caller saved registers as per the aapcs_64. + * These are not macros to ensure their invocation fits + * within the 32 instructions per exception vector. + * ----------------------------------------------------- + */ +save_scratch_registers: ; .type save_scratch_registers, %function + stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] + stp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2] + stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4] + stp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6] + stp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8] + stp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10] + stp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12] + stp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14] + stp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16] + mrs x17, sp_el0 + stp x18, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18] + ret + +restore_scratch_registers: ; .type restore_scratch_registers, %function + ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] + ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2] + +restore_scratch_registers_callee: + ldp x18, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18] + + ldp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4] + ldp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6] + ldp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8] + ldp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10] + ldp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12] + ldp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14] + + msr sp_el0, x17 + ldp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16] + ret + + /* ----------------------------------------------------- + * 256 bytes of exception stack for each cpu + * ----------------------------------------------------- + */ +#if DEBUG +#define PCPU_EXCEPTION_STACK_SIZE 0x300 +#else +#define PCPU_EXCEPTION_STACK_SIZE 0x100 +#endif + /* ----------------------------------------------------- + * void get_exception_stack (uint64_t mpidr) : This + * function is used to allocate a small stack for + * reporting unhandled exceptions + * ----------------------------------------------------- + */ +get_exception_stack: ; .type get_exception_stack, %function + mov x10, x30 // lr + bl platform_get_core_pos + add x0, x0, #1 + mov x1, #PCPU_EXCEPTION_STACK_SIZE + mul x0, x0, x1 + ldr x1, =pcpu_exception_stack + add x0, x1, x0 + ret x10 + + /* ----------------------------------------------------- + * Per-cpu exception stacks in normal memory. + * ----------------------------------------------------- + */ + .section data, "aw", %nobits; .align 6 + +pcpu_exception_stack: + /* Zero fill */ + .space (PLATFORM_CORE_COUNT * PCPU_EXCEPTION_STACK_SIZE), 0 diff --git a/bl31/aarch64/runtime_exceptions_next.S b/bl31/aarch64/runtime_exceptions_next.S deleted file mode 100644 index 10e65dc..0000000 --- a/bl31/aarch64/runtime_exceptions_next.S +++ /dev/null @@ -1,432 +0,0 @@ -/* - * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include -#include -#include "cm_macros.S" - - .globl runtime_exceptions - .globl el3_exit - .globl get_exception_stack - - .section .vectors, "ax"; .align 11 - - .align 7 -runtime_exceptions: - /* ----------------------------------------------------- - * Current EL with _sp_el0 : 0x0 - 0x180 - * ----------------------------------------------------- - */ -sync_exception_sp_el0: - /* ----------------------------------------------------- - * We don't expect any synchronous exceptions from EL3 - * ----------------------------------------------------- - */ - wfi - b sync_exception_sp_el0 - - .align 7 - /* ----------------------------------------------------- - * EL3 code is non-reentrant. Any asynchronous exception - * is a serious error. Loop infinitely. - * ----------------------------------------------------- - */ -irq_sp_el0: - handle_async_exception IRQ_SP_EL0 - b irq_sp_el0 - - .align 7 -fiq_sp_el0: - handle_async_exception FIQ_SP_EL0 - b fiq_sp_el0 - - .align 7 -serror_sp_el0: - handle_async_exception SERROR_SP_EL0 - b serror_sp_el0 - - /* ----------------------------------------------------- - * Current EL with SPx: 0x200 - 0x380 - * ----------------------------------------------------- - */ - .align 7 -sync_exception_sp_elx: - /* ----------------------------------------------------- - * This exception will trigger if anything went wrong - * during a previous exception entry or exit or while - * handling an earlier unexpected synchronous exception. - * In any case we cannot rely on SP_EL3. Switching to a - * known safe area of memory will corrupt at least a - * single register. It is best to enter wfi in loop as - * that will preserve the system state for analysis - * through a debugger later. - * ----------------------------------------------------- - */ - wfi - b sync_exception_sp_elx - - /* ----------------------------------------------------- - * As mentioned in the previous comment, all bets are - * off if SP_EL3 cannot be relied upon. Report their - * occurrence. - * ----------------------------------------------------- - */ - .align 7 -irq_sp_elx: - b irq_sp_elx - .align 7 -fiq_sp_elx: - b fiq_sp_elx - .align 7 -serror_sp_elx: - b serror_sp_elx - - /* ----------------------------------------------------- - * Lower EL using AArch64 : 0x400 - 0x580 - * ----------------------------------------------------- - */ - .align 7 -sync_exception_aarch64: - /* ----------------------------------------------------- - * This exception vector will be the entry point for - * SMCs and traps that are unhandled at lower ELs most - * commonly. SP_EL3 should point to a valid cpu context - * where the general purpose and system register state - * can be saved. - * ----------------------------------------------------- - */ - handle_sync_exception - - .align 7 - /* ----------------------------------------------------- - * Asynchronous exceptions from lower ELs are not - * currently supported. Report their occurrence. - * ----------------------------------------------------- - */ -irq_aarch64: - handle_async_exception IRQ_AARCH64 - b irq_aarch64 - - .align 7 -fiq_aarch64: - handle_async_exception FIQ_AARCH64 - b fiq_aarch64 - - .align 7 -serror_aarch64: - handle_async_exception SERROR_AARCH64 - b serror_aarch64 - - /* ----------------------------------------------------- - * Lower EL using AArch32 : 0x600 - 0x780 - * ----------------------------------------------------- - */ - .align 7 -sync_exception_aarch32: - /* ----------------------------------------------------- - * This exception vector will be the entry point for - * SMCs and traps that are unhandled at lower ELs most - * commonly. SP_EL3 should point to a valid cpu context - * where the general purpose and system register state - * can be saved. - * ----------------------------------------------------- - */ - handle_sync_exception - - .align 7 - /* ----------------------------------------------------- - * Asynchronous exceptions from lower ELs are not - * currently supported. Report their occurrence. - * ----------------------------------------------------- - */ -irq_aarch32: - handle_async_exception IRQ_AARCH32 - b irq_aarch32 - - .align 7 -fiq_aarch32: - handle_async_exception FIQ_AARCH32 - b fiq_aarch32 - - .align 7 -serror_aarch32: - handle_async_exception SERROR_AARCH32 - b serror_aarch32 - .align 7 - - .section .text, "ax" - /* ----------------------------------------------------- - * The following code handles secure monitor calls. - * Depending upon the execution state from where the SMC - * has been invoked, it frees some general purpose - * registers to perform the remaining tasks. They - * involve finding the runtime service handler that is - * the target of the SMC & switching to runtime stacks - * (SP_EL0) before calling the handler. - * - * Note that x30 has been explicitly saved and can be - * used here - * ----------------------------------------------------- - */ -smc_handler32: - /* Check whether aarch32 issued an SMC64 */ - tbnz x0, #FUNCID_CC_SHIFT, smc_prohibited - - /* ----------------------------------------------------- - * Since we're are coming from aarch32, x8-x18 need to - * be saved as per SMC32 calling convention. If a lower - * EL in aarch64 is making an SMC32 call then it must - * have saved x8-x17 already therein. - * ----------------------------------------------------- - */ - stp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8] - stp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10] - stp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12] - stp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14] - stp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16] - - /* x4-x7, x18, sp_el0 are saved below */ - -smc_handler64: - /* ----------------------------------------------------- - * Populate the parameters for the SMC handler. We - * already have x0-x4 in place. x5 will point to a - * cookie (not used now). x6 will point to the context - * structure (SP_EL3) and x7 will contain flags we need - * to pass to the handler Hence save x5-x7. Note that x4 - * only needs to be preserved for AArch32 callers but we - * do it for AArch64 callers as well for convenience - * ----------------------------------------------------- - */ - stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4] - stp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6] - - mov x5, xzr - mov x6, sp - - /* Get the unique owning entity number */ - ubfx x16, x0, #FUNCID_OEN_SHIFT, #FUNCID_OEN_WIDTH - ubfx x15, x0, #FUNCID_TYPE_SHIFT, #FUNCID_TYPE_WIDTH - orr x16, x16, x15, lsl #FUNCID_OEN_WIDTH - - adr x11, (__RT_SVC_DESCS_START__ + RT_SVC_DESC_HANDLE) - - /* Load descriptor index from array of indices */ - adr x14, rt_svc_descs_indices - ldrb w15, [x14, x16] - - /* Save x18 and SP_EL0 */ - mrs x17, sp_el0 - stp x18, x17, [x6, #CTX_GPREGS_OFFSET + CTX_GPREG_X18] - - /* ----------------------------------------------------- - * Restore the saved C runtime stack value which will - * become the new SP_EL0 i.e. EL3 runtime stack. It was - * saved in the 'cpu_context' structure prior to the last - * ERET from EL3. - * ----------------------------------------------------- - */ - ldr x12, [x6, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP] - - /* - * Any index greater than 127 is invalid. Check bit 7 for - * a valid index - */ - tbnz w15, 7, smc_unknown - - /* Switch to SP_EL0 */ - msr spsel, #0 - - /* ----------------------------------------------------- - * Get the descriptor using the index - * x11 = (base + off), x15 = index - * - * handler = (base + off) + (index << log2(size)) - * ----------------------------------------------------- - */ - lsl w10, w15, #RT_SVC_SIZE_LOG2 - ldr x15, [x11, w10, uxtw] - - /* ----------------------------------------------------- - * Save the SPSR_EL3, ELR_EL3, & SCR_EL3 in case there - * is a world switch during SMC handling. - * TODO: Revisit if all system registers can be saved - * later. - * ----------------------------------------------------- - */ - mrs x16, spsr_el3 - mrs x17, elr_el3 - mrs x18, scr_el3 - stp x16, x17, [x6, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3] - stp x18, xzr, [x6, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3] - - /* Copy SCR_EL3.NS bit to the flag to indicate caller's security */ - bfi x7, x18, #0, #1 - - mov sp, x12 - - /* ----------------------------------------------------- - * Call the Secure Monitor Call handler and then drop - * directly into el3_exit() which will program any - * remaining architectural state prior to issuing the - * ERET to the desired lower EL. - * ----------------------------------------------------- - */ -#if DEBUG - cbz x15, rt_svc_fw_critical_error -#endif - blr x15 - - /* ----------------------------------------------------- - * This routine assumes that the SP_EL3 is pointing to - * a valid context structure from where the gp regs and - * other special registers can be retrieved. - * ----------------------------------------------------- - */ -el3_exit: ; .type el3_exit, %function - /* ----------------------------------------------------- - * Save the current SP_EL0 i.e. the EL3 runtime stack - * which will be used for handling the next SMC. Then - * switch to SP_EL3 - * ----------------------------------------------------- - */ - mov x17, sp - msr spsel, #1 - str x17, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP] - - /* ----------------------------------------------------- - * Restore SPSR_EL3, ELR_EL3 and SCR_EL3 prior to ERET - * ----------------------------------------------------- - */ - ldp x18, xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3] - ldp x16, x17, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3] - msr scr_el3, x18 - msr spsr_el3, x16 - msr elr_el3, x17 - - /* Restore saved general purpose registers and return */ - bl restore_scratch_registers - ldp x30, xzr, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] - eret - -smc_unknown: - /* - * Here we restore x4-x18 regardless of where we came from. AArch32 - * callers will find the registers contents unchanged, but AArch64 - * callers will find the registers modified (with stale earlier NS - * content). Either way, we aren't leaking any secure information - * through them - */ - bl restore_scratch_registers_callee - -smc_prohibited: - ldp x30, xzr, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] - mov w0, #SMC_UNK - eret - -rt_svc_fw_critical_error: - b rt_svc_fw_critical_error - - /* ----------------------------------------------------- - * The following functions are used to saved and restore - * all the caller saved registers as per the aapcs_64. - * These are not macros to ensure their invocation fits - * within the 32 instructions per exception vector. - * ----------------------------------------------------- - */ -save_scratch_registers: ; .type save_scratch_registers, %function - stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] - stp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2] - stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4] - stp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6] - stp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8] - stp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10] - stp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12] - stp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14] - stp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16] - mrs x17, sp_el0 - stp x18, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18] - ret - -restore_scratch_registers: ; .type restore_scratch_registers, %function - ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] - ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2] - -restore_scratch_registers_callee: - ldp x18, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18] - - ldp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4] - ldp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6] - ldp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8] - ldp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10] - ldp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12] - ldp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14] - - msr sp_el0, x17 - ldp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16] - ret - - /* ----------------------------------------------------- - * 256 bytes of exception stack for each cpu - * ----------------------------------------------------- - */ -#if DEBUG -#define PCPU_EXCEPTION_STACK_SIZE 0x300 -#else -#define PCPU_EXCEPTION_STACK_SIZE 0x100 -#endif - /* ----------------------------------------------------- - * void get_exception_stack (uint64_t mpidr) : This - * function is used to allocate a small stack for - * reporting unhandled exceptions - * ----------------------------------------------------- - */ -get_exception_stack: ; .type get_exception_stack, %function - mov x10, x30 // lr - bl platform_get_core_pos - add x0, x0, #1 - mov x1, #PCPU_EXCEPTION_STACK_SIZE - mul x0, x0, x1 - ldr x1, =pcpu_exception_stack - add x0, x1, x0 - ret x10 - - /* ----------------------------------------------------- - * Per-cpu exception stacks in normal memory. - * ----------------------------------------------------- - */ - .section data, "aw", %nobits; .align 6 - -pcpu_exception_stack: - /* Zero fill */ - .space (PLATFORM_CORE_COUNT * PCPU_EXCEPTION_STACK_SIZE), 0 - diff --git a/bl31/bl31.mk b/bl31/bl31.mk index 280aa88..fec8ebe 100644 --- a/bl31/bl31.mk +++ b/bl31/bl31.mk @@ -49,7 +49,6 @@ vpath %.S lib/arch/${ARCH} \ BL31_OBJS += bl31_arch_setup.o \ bl31_entrypoint.o \ - exception_handlers.o \ runtime_exceptions.o \ bl31_main.o \ psci_entry.o \ diff --git a/bl31/bl31_main.c b/bl31/bl31_main.c index dc65b60..9d2dc29 100644 --- a/bl31/bl31_main.c +++ b/bl31/bl31_main.c @@ -48,15 +48,18 @@ void bl31_lib_init() cm_init(); } -void bl31_arch_next_el_setup(void); - /******************************************************************************* * BL31 is responsible for setting up the runtime services for the primary cpu - * before passing control to the bootloader (UEFI) or Linux. + * before passing control to the bootloader (UEFI) or Linux. This function calls + * runtime_svc_init() which initializes all registered runtime services. The run + * time services would setup enough context for the core to swtich to the next + * exception level. When this function returns, the core will switch to the + * programmed exception level via. an ERET. ******************************************************************************/ void bl31_main(void) { - el_change_info *image_info; + el_change_info *next_image_info; + uint32_t scr; /* Perform remaining generic architectural setup from EL3 */ bl31_arch_setup(); @@ -76,10 +79,27 @@ void bl31_main(void) /* Clean caches before re-entering normal world */ dcsw_op_all(DCCSW); - image_info = bl31_get_next_image_info(); + /* + * Setup minimal architectural state of the next highest EL to + * allow execution in it immediately upon entering it. + */ bl31_arch_next_el_setup(); - change_el(image_info); - /* There is no valid reason for change_el() to return */ - assert(0); + /* Program EL3 registers to enable entry into the next EL */ + next_image_info = bl31_get_next_image_info(); + scr = read_scr(); + if (next_image_info->security_state == NON_SECURE) + scr |= SCR_NS_BIT; + + /* + * Tell the context mgmt. library to ensure that SP_EL3 points to + * the right context to exit from EL3 correctly. + */ + cm_set_el3_eret_context(next_image_info->security_state, + next_image_info->entrypoint, + next_image_info->spsr, + scr); + + /* Finally set the next context */ + cm_set_next_eret_context(next_image_info->security_state); } diff --git a/bl31/context_mgmt.c b/bl31/context_mgmt.c index bbdbad2..44ef0d9 100644 --- a/bl31/context_mgmt.c +++ b/bl31/context_mgmt.c @@ -143,11 +143,31 @@ void cm_el1_sysregs_context_restore(uint32_t security_state) } /******************************************************************************* - * This function is used to program SP_EL3 to point to the 'cpu_context' - * structure which will be used for programming the EL3 architectural state to - * enable an ERET into a lower EL e.g. general purpose registers and system - * registers like SCR_EL3, SPSR_EL3, SCR_EL3 etc. The same structure will be - * used to save the same registers after an exception entry from the lower EL. + * This function function populates 'cpu_context' pertaining to the given + * security state with the entrypoint, SPSR and SCR values so that an ERET from + * this securit state correctly restores corresponding values to drop the CPU to + * the next exception level + ******************************************************************************/ +void cm_set_el3_eret_context(uint32_t security_state, uint64_t entrypoint, + uint32_t spsr, uint32_t scr) +{ + cpu_context *ctx; + el3_state *state; + + ctx = cm_get_context(read_mpidr(), security_state); + assert(ctx); + + /* Populate EL3 state so that we've the right context before doing ERET */ + state = get_el3state_ctx(ctx); + write_ctx_reg(state, CTX_SPSR_EL3, spsr); + write_ctx_reg(state, CTX_ELR_EL3, entrypoint); + write_ctx_reg(state, CTX_SCR_EL3, scr); +} + +/******************************************************************************* + * This function is used to program the context that's used for exception + * return. This initializes the SP_EL3 to a pointer to a 'cpu_context' set for + * the required security state ******************************************************************************/ void cm_set_next_eret_context(uint32_t security_state) { @@ -155,6 +175,7 @@ void cm_set_next_eret_context(uint32_t security_state) #if DEBUG uint64_t sp_mode; #endif + ctx = cm_get_context(read_mpidr(), security_state); assert(ctx); @@ -174,3 +195,22 @@ void cm_set_next_eret_context(uint32_t security_state) "msr spsel, #0\n" : : "r" (ctx)); } + +/******************************************************************************* + * This function is used to program exception stack in the 'cpu_context' + * structure. This is the initial stack used for taking and handling exceptions + * at EL3. This stack is expected to be initialized once by each security state + ******************************************************************************/ +void cm_init_exception_stack(uint64_t mpidr, uint32_t security_state) +{ + cpu_context *ctx; + el3_state *state; + + ctx = cm_get_context(mpidr, security_state); + assert(ctx); + + /* Set exception stack in the context */ + state = get_el3state_ctx(ctx); + + write_ctx_reg(state, CTX_EXCEPTION_SP, get_exception_stack(mpidr)); +} diff --git a/common/psci/psci_common.c b/common/psci/psci_common.c index 193655d..214db78 100644 --- a/common/psci/psci_common.c +++ b/common/psci/psci_common.c @@ -36,6 +36,7 @@ #include #include #include +#include #include #include "debug.h" @@ -86,7 +87,8 @@ int get_power_on_target_afflvl(unsigned long mpidr) unsigned int state; /* Retrieve our node from the topology tree */ - node = psci_get_aff_map_node(mpidr & MPIDR_AFFINITY_MASK, MPIDR_AFFLVL0); + node = psci_get_aff_map_node(mpidr & MPIDR_AFFINITY_MASK, + MPIDR_AFFLVL0); assert(node); /* @@ -222,13 +224,12 @@ int psci_validate_mpidr(unsigned long mpidr, int level) void psci_get_ns_entry_info(unsigned int index) { unsigned long sctlr = 0, scr, el_status, id_aa64pfr0; - gp_regs *ns_gp_regs; + uint64_t mpidr = read_mpidr(); + cpu_context *ns_entry_context; + gp_regs *ns_entry_gpregs; scr = read_scr(); - /* Switch to the non-secure view of the registers */ - write_scr(scr | SCR_NS_BIT); - /* Find out which EL we are going to */ id_aa64pfr0 = read_id_aa64pfr0_el1(); el_status = (id_aa64pfr0 >> ID_AA64PFR0_EL2_SHIFT) & @@ -257,23 +258,29 @@ void psci_get_ns_entry_info(unsigned int index) write_sctlr_el1(sctlr); /* Fulfill the cpu_on entry reqs. as per the psci spec */ - write_scr(scr); - write_elr(psci_ns_entry_info[index].eret_info.entrypoint); + ns_entry_context = (cpu_context *) cm_get_context(mpidr, NON_SECURE); + assert(ns_entry_context); /* - * Set the general purpose registers to ~0 upon entry into the - * non-secure world except for x0 which should contain the - * context id & spsr. This is done directly on the "would be" - * stack pointer. Prior to entry into the non-secure world, an - * offset equivalent to the size of the 'gp_regs' structure is - * added to the sp. This general purpose register context is - * retrieved then. + * Setup general purpose registers to return the context id and + * prevent leakage of secure information into the normal world. */ - ns_gp_regs = (gp_regs *) platform_get_stack(read_mpidr()); - ns_gp_regs--; - memset(ns_gp_regs, ~0, sizeof(*ns_gp_regs)); - ns_gp_regs->x0 = psci_ns_entry_info[index].context_id; - ns_gp_regs->spsr = psci_ns_entry_info[index].eret_info.spsr; + ns_entry_gpregs = get_gpregs_ctx(ns_entry_context); + write_ctx_reg(ns_entry_gpregs, + CTX_GPREG_X0, + psci_ns_entry_info[index].context_id); + + /* + * Tell the context management library to setup EL3 system registers to + * be able to ERET into the ns state, and SP_EL3 points to the right + * context to exit from EL3 correctly. + */ + cm_set_el3_eret_context(NON_SECURE, + psci_ns_entry_info[index].eret_info.entrypoint, + psci_ns_entry_info[index].eret_info.spsr, + scr); + + cm_set_next_eret_context(NON_SECURE); } /******************************************************************************* @@ -344,7 +351,7 @@ int psci_set_ns_entry_info(unsigned int index, */ spsr |= DAIF_ABT_BIT | DAIF_IRQ_BIT | DAIF_FIQ_BIT; spsr <<= PSR_DAIF_SHIFT; - if(ee) + if (ee) spsr |= SPSR32_EE_BIT; spsr |= mode; @@ -500,7 +507,7 @@ void psci_afflvl_power_on_finish(unsigned long mpidr, mpidr_aff_map_nodes mpidr_nodes; int rc; - mpidr &= MPIDR_AFFINITY_MASK;; + mpidr &= MPIDR_AFFINITY_MASK; /* * Collect the pointers to the nodes in the topology tree for diff --git a/common/psci/psci_entry.S b/common/psci/psci_entry.S index 28a4143..15e074c 100644 --- a/common/psci/psci_entry.S +++ b/common/psci/psci_entry.S @@ -34,6 +34,7 @@ #include #include #include +#include .globl psci_aff_on_finish_entry .globl psci_aff_suspend_finish_entry @@ -74,6 +75,13 @@ psci_aff_common_finish_entry: msr vbar_el3, x0 isb + /* --------------------------------------------- + * Use SP_EL0 for the C runtime stack. + * --------------------------------------------- + */ + msr spsel, #0 + isb + bl read_mpidr mov x19, x0 bl platform_set_coherent_stack @@ -102,31 +110,19 @@ psci_aff_common_finish_entry: bl platform_set_stack /* --------------------------------------------- - * Now that the execution stack has been set + * Now that the context management has been set * up, enable full runtime exception handling. - * Since we're just about to leave this EL with - * ERET, we don't need an ISB here + * SP_EL3 is pointing to a 'cpu_context' + * structure which has an exception stack + * allocated. Since we're just about to leave + * this EL with ERET, we don't need an ISB here * --------------------------------------------- */ adr x0, runtime_exceptions msr vbar_el3, x0 - /* -------------------------------------------- - * Use the size of the general purpose register - * context to restore the register state - * stashed by earlier code - * -------------------------------------------- - */ - sub sp, sp, #SIZEOF_GPREGS - exception_exit restore_regs - - /* -------------------------------------------- - * Jump back to the non-secure world assuming - * that the elr and spsr setup has been done - * by the finishers - * -------------------------------------------- - */ - eret + zero_callee_saved_regs + b el3_exit _panic: b _panic diff --git a/common/psci/psci_main.c b/common/psci/psci_main.c index a70a21a..67f189d 100644 --- a/common/psci/psci_main.c +++ b/common/psci/psci_main.c @@ -35,6 +35,9 @@ #include #include #include +#include +#include +#include /******************************************************************************* * PSCI frontend api for servicing SMCs. Described in the PSCI spec. @@ -199,3 +202,70 @@ void psci_system_reset(void) assert(0); } +/******************************************************************************* + * PSCI top level handler for servicing SMCs. + ******************************************************************************/ +uint64_t psci_smc_handler(uint32_t smc_fid, + uint64_t x1, + uint64_t x2, + uint64_t x3, + uint64_t x4, + void *cookie, + void *handle, + uint64_t flags) +{ + uint64_t rc; + + switch (smc_fid) { + case PSCI_VERSION: + rc = psci_version(); + break; + + case PSCI_CPU_OFF: + rc = __psci_cpu_off(); + break; + + case PSCI_CPU_SUSPEND_AARCH64: + case PSCI_CPU_SUSPEND_AARCH32: + rc = __psci_cpu_suspend(x1, x2, x3); + break; + + case PSCI_CPU_ON_AARCH64: + case PSCI_CPU_ON_AARCH32: + rc = psci_cpu_on(x1, x2, x3); + break; + + case PSCI_AFFINITY_INFO_AARCH32: + case PSCI_AFFINITY_INFO_AARCH64: + rc = psci_affinity_info(x1, x2); + break; + + case PSCI_MIG_AARCH32: + case PSCI_MIG_AARCH64: + rc = psci_migrate(x1); + break; + + case PSCI_MIG_INFO_TYPE: + rc = psci_migrate_info_type(); + break; + + case PSCI_MIG_INFO_UP_CPU_AARCH32: + case PSCI_MIG_INFO_UP_CPU_AARCH64: + rc = psci_migrate_info_up_cpu(); + break; + + case PSCI_SYSTEM_OFF: + psci_system_off(); + assert(0); + + case PSCI_SYSTEM_RESET: + psci_system_reset(); + assert(0); + + default: + rc = SMC_UNK; + WARN("Unimplemented psci call -> 0x%x \n", smc_fid); + } + + SMC_RET1(handle, rc); +} diff --git a/common/psci/psci_private.h b/common/psci/psci_private.h index de9c291..3d7ae74 100644 --- a/common/psci/psci_private.h +++ b/common/psci/psci_private.h @@ -151,6 +151,16 @@ extern int psci_afflvl_suspend(unsigned long, int, int); extern unsigned int psci_afflvl_suspend_finish(unsigned long, int, int); + +/* Private exported functions from psci_main.c */ +extern uint64_t psci_smc_handler(uint32_t smc_fid, + uint64_t x1, + uint64_t x2, + uint64_t x3, + uint64_t x4, + void *cookie, + void *handle, + uint64_t flags); #endif /*__ASSEMBLY__*/ #endif /* __PSCI_PRIVATE_H__ */ diff --git a/common/psci/psci_setup.c b/common/psci/psci_setup.c index c471d1f..8d7903c 100644 --- a/common/psci/psci_setup.c +++ b/common/psci/psci_setup.c @@ -195,6 +195,9 @@ static void psci_init_aff_map_node(unsigned long mpidr, cm_set_context(mpidr, (void *) &psci_ns_context[linear_id], NON_SECURE); + + /* Initialize exception stack in the context */ + cm_init_exception_stack(mpidr, NON_SECURE); } return; @@ -348,5 +351,5 @@ DECLARE_RT_SVC( OEN_STD_END, SMC_TYPE_FAST, psci_setup, - NULL + psci_smc_handler ); diff --git a/common/runtime_svc.c b/common/runtime_svc.c index 0ea1bf5..6e8a1bb 100644 --- a/common/runtime_svc.c +++ b/common/runtime_svc.c @@ -41,6 +41,7 @@ #include #include #include +#include /******************************************************************************* * The 'rt_svc_descs' array holds the runtime service descriptors exported by @@ -146,7 +147,7 @@ error: void fault_handler(void *handle) { - gp_regs_next *gpregs_ctx = get_gpregs_ctx(handle); + gp_regs *gpregs_ctx = get_gpregs_ctx(handle); ERROR("Unhandled synchronous fault. Register dump @ 0x%x \n", gpregs_ctx); panic(); diff --git a/include/aarch64/arch.h b/include/aarch64/arch.h index e8773d9..69926a3 100644 --- a/include/aarch64/arch.h +++ b/include/aarch64/arch.h @@ -320,7 +320,7 @@ extern void runtime_exceptions(void); extern void bl1_arch_setup(void); extern void bl2_arch_setup(void); extern void bl31_arch_setup(void); - +extern void bl31_arch_next_el_setup(void); #endif /*__ASSEMBLY__*/ #endif /* __ARCH_H__ */ diff --git a/include/asm_macros.S b/include/asm_macros.S index 9de135c..decef0b 100644 --- a/include/asm_macros.S +++ b/include/asm_macros.S @@ -54,18 +54,6 @@ .endm - .macro exception_entry func - stp x29, x30, [sp, #-(SIZEOF_GPREGS - GPREGS_FP_OFF)]! - bl \func - .endm - - - .macro exception_exit func - bl \func - ldp x29, x30, [sp], #(SIZEOF_GPREGS - GPREGS_FP_OFF) - .endm - - .macro smc_check label bl read_esr ubfx x0, x0, #ESR_EC_SHIFT, #ESR_EC_LENGTH diff --git a/include/context.h b/include/context.h index 1c1a3a2..082e511 100644 --- a/include/context.h +++ b/include/context.h @@ -193,7 +193,7 @@ * callee saved registers if used by the C runtime and the assembler * does not touch the remaining. */ -DEFINE_REG_STRUCT(gp_regs_next, CTX_GPREG_ALL); +DEFINE_REG_STRUCT(gp_regs, CTX_GPREG_ALL); /* * AArch64 EL1 system register context structure for preserving the @@ -233,7 +233,7 @@ DEFINE_REG_STRUCT(el3_state, CTX_EL3STATE_ALL); * correspond to either the secure or the non-secure state. */ typedef struct { - gp_regs_next gpregs_ctx; + gp_regs gpregs_ctx; el3_state el3state_ctx; el1_sys_regs sysregs_ctx; fp_regs fpregs_ctx; diff --git a/include/context_mgmt.h b/include/context_mgmt.h index 2150a55..b8c8077 100644 --- a/include/context_mgmt.h +++ b/include/context_mgmt.h @@ -46,6 +46,9 @@ extern void cm_el3_sysregs_context_save(uint32_t security_state); extern void cm_el3_sysregs_context_restore(uint32_t security_state); extern void cm_el1_sysregs_context_save(uint32_t security_state); extern void cm_el1_sysregs_context_restore(uint32_t security_state); +extern void cm_set_el3_eret_context(uint32_t security_state, uint64_t entrypoint, + uint32_t spsr, uint32_t scr); extern void cm_set_next_eret_context(uint32_t security_state); +extern void cm_init_exception_stack(uint64_t mpidr, uint32_t security_state); #endif /*__ASSEMBLY__*/ #endif /* __CM_H__ */ diff --git a/include/runtime_svc.h b/include/runtime_svc.h index 3ee043e..ad202a9 100644 --- a/include/runtime_svc.h +++ b/include/runtime_svc.h @@ -101,43 +101,8 @@ #define SERROR_AARCH32 0xf /******************************************************************************* - * Constants that allow assembler code to access members of the 'gp_regs' - * structure at their correct offsets. + * Structure definition, typedefs & constants for the runtime service framework ******************************************************************************/ -#define SIZEOF_GPREGS 0x110 -#define GPREGS_X0_OFF 0x0 -#define GPREGS_X1_OFF 0x8 -#define GPREGS_X2_OFF 0x10 -#define GPREGS_X3_OFF 0x18 -#define GPREGS_X4_OFF 0x20 -#define GPREGS_X5_OFF 0x28 -#define GPREGS_X6_OFF 0x30 -#define GPREGS_X7_OFF 0x38 -#define GPREGS_X8_OFF 0x40 -#define GPREGS_X9_OFF 0x48 -#define GPREGS_X10_OFF 0x50 -#define GPREGS_X11_OFF 0x58 -#define GPREGS_X12_OFF 0x60 -#define GPREGS_X13_OFF 0x68 -#define GPREGS_X14_OFF 0x70 -#define GPREGS_X15_OFF 0x78 -#define GPREGS_X16_OFF 0x80 -#define GPREGS_X17_OFF 0x88 -#define GPREGS_X18_OFF 0x90 -#define GPREGS_X19_OFF 0x98 -#define GPREGS_X20_OFF 0xA0 -#define GPREGS_X21_OFF 0xA8 -#define GPREGS_X22_OFF 0xB0 -#define GPREGS_X23_OFF 0xB8 -#define GPREGS_X24_OFF 0xC0 -#define GPREGS_X25_OFF 0xC8 -#define GPREGS_X26_OFF 0xD0 -#define GPREGS_X27_OFF 0xD8 -#define GPREGS_X28_OFF 0xE0 -#define GPREGS_SP_EL0_OFF 0xE8 -#define GPREGS_SPSR_OFF 0xF0 -#define GPREGS_FP_OFF 0x100 -#define GPREGS_LR_OFF 0x108 /* * Constants to allow the assembler access a runtime service @@ -218,46 +183,6 @@ typedef uint64_t (*rt_svc_handle)(uint32_t smc_fid, void *cookie, void *handle, uint64_t flags); -typedef struct { - uint64_t x0; - uint64_t x1; - uint64_t x2; - uint64_t x3; - uint64_t x4; - uint64_t x5; - uint64_t x6; - uint64_t x7; - uint64_t x8; - uint64_t x9; - uint64_t x10; - uint64_t x11; - uint64_t x12; - uint64_t x13; - uint64_t x14; - uint64_t x15; - uint64_t x16; - uint64_t x17; - uint64_t x18; - uint64_t x19; - uint64_t x20; - uint64_t x21; - uint64_t x22; - uint64_t x23; - uint64_t x24; - uint64_t x25; - uint64_t x26; - uint64_t x27; - uint64_t x28; - uint64_t sp_el0; - uint32_t spsr; - /* - * Alignment constraint which allows save & restore of fp & lr on the - * stack during exception handling - */ - uint64_t fp __aligned(16); - uint64_t lr; -} __aligned(16) gp_regs; - typedef struct { uint8_t start_oen; uint8_t end_oen; @@ -280,16 +205,6 @@ typedef struct { _setup, \ _smch } -/******************************************************************************* - * Compile time assertions to ensure that: - * 1) the assembler code's view of the size of the 'gp_regs' data structure is - * the same as the actual size of this data structure. - * 2) the assembler code's view of the offset of the frame pointer member of - * the 'gp_regs' structure is the same as the actual offset of this member. - ******************************************************************************/ -CASSERT((sizeof(gp_regs) == SIZEOF_GPREGS), assert_sizeof_gpregs_mismatch); -CASSERT(GPREGS_FP_OFF == __builtin_offsetof(gp_regs, fp), \ - assert_gpregs_fp_offset_mismatch); /* * Compile time assertions related to the 'rt_svc_desc' structure to: * 1. ensure that the assembler and the compiler view of the size @@ -323,6 +238,7 @@ CASSERT(RT_SVC_DESC_HANDLE == __builtin_offsetof(rt_svc_desc, handle), \ extern void runtime_svc_init(); extern uint64_t __RT_SVC_DESCS_START__; extern uint64_t __RT_SVC_DESCS_END__; - +extern uint64_t get_exception_stack(uint64_t mpidr); +extern void fault_handler(void *handle); #endif /*__ASSEMBLY__*/ #endif /* __RUNTIME_SVC_H__ */ diff --git a/lib/arch/aarch64/misc_helpers.S b/lib/arch/aarch64/misc_helpers.S index 37258e1..324be76 100644 --- a/lib/arch/aarch64/misc_helpers.S +++ b/lib/arch/aarch64/misc_helpers.S @@ -31,9 +31,6 @@ #include #include - .globl save_regs - .globl restore_regs - .globl enable_irq .globl disable_irq @@ -84,51 +81,6 @@ .section .text, "ax" -save_regs: ; .type save_regs, %function - sub sp, sp, #GPREGS_FP_OFF - stp x0, x1, [sp, #GPREGS_X0_OFF] - stp x2, x3, [sp, #GPREGS_X2_OFF] - stp x4, x5, [sp, #GPREGS_X4_OFF] - stp x6, x7, [sp, #GPREGS_X6_OFF] - stp x8, x9, [sp, #GPREGS_X8_OFF] - stp x10, x11, [sp, #GPREGS_X10_OFF] - stp x12, x13, [sp, #GPREGS_X12_OFF] - stp x14, x15, [sp, #GPREGS_X14_OFF] - stp x16, x17, [sp, #GPREGS_X16_OFF] - stp x18, x19, [sp, #GPREGS_X18_OFF] - stp x20, x21, [sp, #GPREGS_X20_OFF] - stp x22, x23, [sp, #GPREGS_X22_OFF] - stp x24, x25, [sp, #GPREGS_X24_OFF] - stp x26, x27, [sp, #GPREGS_X26_OFF] - mrs x0, sp_el0 - stp x28, x0, [sp, #GPREGS_X28_OFF] - mrs x0, spsr_el3 - str w0, [sp, #GPREGS_SPSR_OFF] - ret - - -restore_regs: ; .type restore_regs, %function - ldr w9, [sp, #GPREGS_SPSR_OFF] - msr spsr_el3, x9 - ldp x28, x9, [sp, #GPREGS_X28_OFF] - msr sp_el0, x9 - ldp x26, x27, [sp, #GPREGS_X26_OFF] - ldp x24, x25, [sp, #GPREGS_X24_OFF] - ldp x22, x23, [sp, #GPREGS_X22_OFF] - ldp x20, x21, [sp, #GPREGS_X20_OFF] - ldp x18, x19, [sp, #GPREGS_X18_OFF] - ldp x16, x17, [sp, #GPREGS_X16_OFF] - ldp x14, x15, [sp, #GPREGS_X14_OFF] - ldp x12, x13, [sp, #GPREGS_X12_OFF] - ldp x10, x11, [sp, #GPREGS_X10_OFF] - ldp x8, x9, [sp, #GPREGS_X8_OFF] - ldp x6, x7, [sp, #GPREGS_X6_OFF] - ldp x4, x5, [sp, #GPREGS_X4_OFF] - ldp x2, x3, [sp, #GPREGS_X2_OFF] - ldp x0, x1, [sp, #GPREGS_X0_OFF] - add sp, sp, #GPREGS_FP_OFF - ret - get_afflvl_shift: ; .type get_afflvl_shift, %function cmp x0, #3 cinc x0, x0, eq -- cgit v1.2.3