aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--bl31/aarch64/runtime_exceptions_next.S432
-rw-r--r--common/runtime_svc.c9
-rw-r--r--include/asm_macros.S14
-rw-r--r--include/cm_macros.S115
-rw-r--r--include/context.h49
-rw-r--r--include/debug.h5
-rw-r--r--include/runtime_svc.h12
7 files changed, 622 insertions, 14 deletions
diff --git a/bl31/aarch64/runtime_exceptions_next.S b/bl31/aarch64/runtime_exceptions_next.S
new file mode 100644
index 0000000..10e65dc
--- /dev/null
+++ b/bl31/aarch64/runtime_exceptions_next.S
@@ -0,0 +1,432 @@
+/*
+ * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <runtime_svc.h>
+#include <platform.h>
+#include <context.h>
+#include "cm_macros.S"
+
+ .globl runtime_exceptions
+ .globl el3_exit
+ .globl get_exception_stack
+
+ .section .vectors, "ax"; .align 11
+
+ .align 7
+runtime_exceptions:
+ /* -----------------------------------------------------
+ * Current EL with _sp_el0 : 0x0 - 0x180
+ * -----------------------------------------------------
+ */
+sync_exception_sp_el0:
+ /* -----------------------------------------------------
+ * We don't expect any synchronous exceptions from EL3
+ * -----------------------------------------------------
+ */
+ wfi
+ b sync_exception_sp_el0
+
+ .align 7
+ /* -----------------------------------------------------
+ * EL3 code is non-reentrant. Any asynchronous exception
+ * is a serious error. Loop infinitely.
+ * -----------------------------------------------------
+ */
+irq_sp_el0:
+ handle_async_exception IRQ_SP_EL0
+ b irq_sp_el0
+
+ .align 7
+fiq_sp_el0:
+ handle_async_exception FIQ_SP_EL0
+ b fiq_sp_el0
+
+ .align 7
+serror_sp_el0:
+ handle_async_exception SERROR_SP_EL0
+ b serror_sp_el0
+
+ /* -----------------------------------------------------
+ * Current EL with SPx: 0x200 - 0x380
+ * -----------------------------------------------------
+ */
+ .align 7
+sync_exception_sp_elx:
+ /* -----------------------------------------------------
+ * This exception will trigger if anything went wrong
+ * during a previous exception entry or exit or while
+ * handling an earlier unexpected synchronous exception.
+ * In any case we cannot rely on SP_EL3. Switching to a
+ * known safe area of memory will corrupt at least a
+ * single register. It is best to enter wfi in loop as
+ * that will preserve the system state for analysis
+ * through a debugger later.
+ * -----------------------------------------------------
+ */
+ wfi
+ b sync_exception_sp_elx
+
+ /* -----------------------------------------------------
+ * As mentioned in the previous comment, all bets are
+ * off if SP_EL3 cannot be relied upon. Report their
+ * occurrence.
+ * -----------------------------------------------------
+ */
+ .align 7
+irq_sp_elx:
+ b irq_sp_elx
+ .align 7
+fiq_sp_elx:
+ b fiq_sp_elx
+ .align 7
+serror_sp_elx:
+ b serror_sp_elx
+
+ /* -----------------------------------------------------
+ * Lower EL using AArch64 : 0x400 - 0x580
+ * -----------------------------------------------------
+ */
+ .align 7
+sync_exception_aarch64:
+ /* -----------------------------------------------------
+ * This exception vector will be the entry point for
+ * SMCs and traps that are unhandled at lower ELs most
+ * commonly. SP_EL3 should point to a valid cpu context
+ * where the general purpose and system register state
+ * can be saved.
+ * -----------------------------------------------------
+ */
+ handle_sync_exception
+
+ .align 7
+ /* -----------------------------------------------------
+ * Asynchronous exceptions from lower ELs are not
+ * currently supported. Report their occurrence.
+ * -----------------------------------------------------
+ */
+irq_aarch64:
+ handle_async_exception IRQ_AARCH64
+ b irq_aarch64
+
+ .align 7
+fiq_aarch64:
+ handle_async_exception FIQ_AARCH64
+ b fiq_aarch64
+
+ .align 7
+serror_aarch64:
+ handle_async_exception SERROR_AARCH64
+ b serror_aarch64
+
+ /* -----------------------------------------------------
+ * Lower EL using AArch32 : 0x600 - 0x780
+ * -----------------------------------------------------
+ */
+ .align 7
+sync_exception_aarch32:
+ /* -----------------------------------------------------
+ * This exception vector will be the entry point for
+ * SMCs and traps that are unhandled at lower ELs most
+ * commonly. SP_EL3 should point to a valid cpu context
+ * where the general purpose and system register state
+ * can be saved.
+ * -----------------------------------------------------
+ */
+ handle_sync_exception
+
+ .align 7
+ /* -----------------------------------------------------
+ * Asynchronous exceptions from lower ELs are not
+ * currently supported. Report their occurrence.
+ * -----------------------------------------------------
+ */
+irq_aarch32:
+ handle_async_exception IRQ_AARCH32
+ b irq_aarch32
+
+ .align 7
+fiq_aarch32:
+ handle_async_exception FIQ_AARCH32
+ b fiq_aarch32
+
+ .align 7
+serror_aarch32:
+ handle_async_exception SERROR_AARCH32
+ b serror_aarch32
+ .align 7
+
+ .section .text, "ax"
+ /* -----------------------------------------------------
+ * The following code handles secure monitor calls.
+ * Depending upon the execution state from where the SMC
+ * has been invoked, it frees some general purpose
+ * registers to perform the remaining tasks. They
+ * involve finding the runtime service handler that is
+ * the target of the SMC & switching to runtime stacks
+ * (SP_EL0) before calling the handler.
+ *
+ * Note that x30 has been explicitly saved and can be
+ * used here
+ * -----------------------------------------------------
+ */
+smc_handler32:
+ /* Check whether aarch32 issued an SMC64 */
+ tbnz x0, #FUNCID_CC_SHIFT, smc_prohibited
+
+ /* -----------------------------------------------------
+ * Since we're are coming from aarch32, x8-x18 need to
+ * be saved as per SMC32 calling convention. If a lower
+ * EL in aarch64 is making an SMC32 call then it must
+ * have saved x8-x17 already therein.
+ * -----------------------------------------------------
+ */
+ stp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
+ stp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
+ stp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
+ stp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
+ stp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
+
+ /* x4-x7, x18, sp_el0 are saved below */
+
+smc_handler64:
+ /* -----------------------------------------------------
+ * Populate the parameters for the SMC handler. We
+ * already have x0-x4 in place. x5 will point to a
+ * cookie (not used now). x6 will point to the context
+ * structure (SP_EL3) and x7 will contain flags we need
+ * to pass to the handler Hence save x5-x7. Note that x4
+ * only needs to be preserved for AArch32 callers but we
+ * do it for AArch64 callers as well for convenience
+ * -----------------------------------------------------
+ */
+ stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
+ stp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
+
+ mov x5, xzr
+ mov x6, sp
+
+ /* Get the unique owning entity number */
+ ubfx x16, x0, #FUNCID_OEN_SHIFT, #FUNCID_OEN_WIDTH
+ ubfx x15, x0, #FUNCID_TYPE_SHIFT, #FUNCID_TYPE_WIDTH
+ orr x16, x16, x15, lsl #FUNCID_OEN_WIDTH
+
+ adr x11, (__RT_SVC_DESCS_START__ + RT_SVC_DESC_HANDLE)
+
+ /* Load descriptor index from array of indices */
+ adr x14, rt_svc_descs_indices
+ ldrb w15, [x14, x16]
+
+ /* Save x18 and SP_EL0 */
+ mrs x17, sp_el0
+ stp x18, x17, [x6, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
+
+ /* -----------------------------------------------------
+ * Restore the saved C runtime stack value which will
+ * become the new SP_EL0 i.e. EL3 runtime stack. It was
+ * saved in the 'cpu_context' structure prior to the last
+ * ERET from EL3.
+ * -----------------------------------------------------
+ */
+ ldr x12, [x6, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
+
+ /*
+ * Any index greater than 127 is invalid. Check bit 7 for
+ * a valid index
+ */
+ tbnz w15, 7, smc_unknown
+
+ /* Switch to SP_EL0 */
+ msr spsel, #0
+
+ /* -----------------------------------------------------
+ * Get the descriptor using the index
+ * x11 = (base + off), x15 = index
+ *
+ * handler = (base + off) + (index << log2(size))
+ * -----------------------------------------------------
+ */
+ lsl w10, w15, #RT_SVC_SIZE_LOG2
+ ldr x15, [x11, w10, uxtw]
+
+ /* -----------------------------------------------------
+ * Save the SPSR_EL3, ELR_EL3, & SCR_EL3 in case there
+ * is a world switch during SMC handling.
+ * TODO: Revisit if all system registers can be saved
+ * later.
+ * -----------------------------------------------------
+ */
+ mrs x16, spsr_el3
+ mrs x17, elr_el3
+ mrs x18, scr_el3
+ stp x16, x17, [x6, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
+ stp x18, xzr, [x6, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
+
+ /* Copy SCR_EL3.NS bit to the flag to indicate caller's security */
+ bfi x7, x18, #0, #1
+
+ mov sp, x12
+
+ /* -----------------------------------------------------
+ * Call the Secure Monitor Call handler and then drop
+ * directly into el3_exit() which will program any
+ * remaining architectural state prior to issuing the
+ * ERET to the desired lower EL.
+ * -----------------------------------------------------
+ */
+#if DEBUG
+ cbz x15, rt_svc_fw_critical_error
+#endif
+ blr x15
+
+ /* -----------------------------------------------------
+ * This routine assumes that the SP_EL3 is pointing to
+ * a valid context structure from where the gp regs and
+ * other special registers can be retrieved.
+ * -----------------------------------------------------
+ */
+el3_exit: ; .type el3_exit, %function
+ /* -----------------------------------------------------
+ * Save the current SP_EL0 i.e. the EL3 runtime stack
+ * which will be used for handling the next SMC. Then
+ * switch to SP_EL3
+ * -----------------------------------------------------
+ */
+ mov x17, sp
+ msr spsel, #1
+ str x17, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
+
+ /* -----------------------------------------------------
+ * Restore SPSR_EL3, ELR_EL3 and SCR_EL3 prior to ERET
+ * -----------------------------------------------------
+ */
+ ldp x18, xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
+ ldp x16, x17, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
+ msr scr_el3, x18
+ msr spsr_el3, x16
+ msr elr_el3, x17
+
+ /* Restore saved general purpose registers and return */
+ bl restore_scratch_registers
+ ldp x30, xzr, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
+ eret
+
+smc_unknown:
+ /*
+ * Here we restore x4-x18 regardless of where we came from. AArch32
+ * callers will find the registers contents unchanged, but AArch64
+ * callers will find the registers modified (with stale earlier NS
+ * content). Either way, we aren't leaking any secure information
+ * through them
+ */
+ bl restore_scratch_registers_callee
+
+smc_prohibited:
+ ldp x30, xzr, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
+ mov w0, #SMC_UNK
+ eret
+
+rt_svc_fw_critical_error:
+ b rt_svc_fw_critical_error
+
+ /* -----------------------------------------------------
+ * The following functions are used to saved and restore
+ * all the caller saved registers as per the aapcs_64.
+ * These are not macros to ensure their invocation fits
+ * within the 32 instructions per exception vector.
+ * -----------------------------------------------------
+ */
+save_scratch_registers: ; .type save_scratch_registers, %function
+ stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+ stp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+ stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
+ stp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
+ stp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
+ stp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
+ stp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
+ stp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
+ stp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
+ mrs x17, sp_el0
+ stp x18, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
+ ret
+
+restore_scratch_registers: ; .type restore_scratch_registers, %function
+ ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+ ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+
+restore_scratch_registers_callee:
+ ldp x18, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
+
+ ldp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
+ ldp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
+ ldp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
+ ldp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
+ ldp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
+ ldp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
+
+ msr sp_el0, x17
+ ldp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
+ ret
+
+ /* -----------------------------------------------------
+ * 256 bytes of exception stack for each cpu
+ * -----------------------------------------------------
+ */
+#if DEBUG
+#define PCPU_EXCEPTION_STACK_SIZE 0x300
+#else
+#define PCPU_EXCEPTION_STACK_SIZE 0x100
+#endif
+ /* -----------------------------------------------------
+ * void get_exception_stack (uint64_t mpidr) : This
+ * function is used to allocate a small stack for
+ * reporting unhandled exceptions
+ * -----------------------------------------------------
+ */
+get_exception_stack: ; .type get_exception_stack, %function
+ mov x10, x30 // lr
+ bl platform_get_core_pos
+ add x0, x0, #1
+ mov x1, #PCPU_EXCEPTION_STACK_SIZE
+ mul x0, x0, x1
+ ldr x1, =pcpu_exception_stack
+ add x0, x1, x0
+ ret x10
+
+ /* -----------------------------------------------------
+ * Per-cpu exception stacks in normal memory.
+ * -----------------------------------------------------
+ */
+ .section data, "aw", %nobits; .align 6
+
+pcpu_exception_stack:
+ /* Zero fill */
+ .space (PLATFORM_CORE_COUNT * PCPU_EXCEPTION_STACK_SIZE), 0
+
diff --git a/common/runtime_svc.c b/common/runtime_svc.c
index c059078..0ea1bf5 100644
--- a/common/runtime_svc.c
+++ b/common/runtime_svc.c
@@ -39,6 +39,7 @@
#include <bl_common.h>
#include <psci.h>
#include <runtime_svc.h>
+#include <context.h>
#include <debug.h>
/*******************************************************************************
@@ -142,3 +143,11 @@ void runtime_svc_init()
error:
panic();
}
+
+void fault_handler(void *handle)
+{
+ gp_regs_next *gpregs_ctx = get_gpregs_ctx(handle);
+ ERROR("Unhandled synchronous fault. Register dump @ 0x%x \n",
+ gpregs_ctx);
+ panic();
+}
diff --git a/include/asm_macros.S b/include/asm_macros.S
index 4294be4..9de135c 100644
--- a/include/asm_macros.S
+++ b/include/asm_macros.S
@@ -39,18 +39,18 @@
.macro dcache_line_size reg, tmp
- mrs \tmp, ctr_el0
+ mrs \tmp, ctr_el0
ubfx \tmp, \tmp, #16, #4
- mov \reg, #4
- lsl \reg, \reg, \tmp
+ mov \reg, #4
+ lsl \reg, \reg, \tmp
.endm
.macro icache_line_size reg, tmp
- mrs \tmp, ctr_el0
- and \tmp, \tmp, #0xf
- mov \reg, #4
- lsl \reg, \reg, \tmp
+ mrs \tmp, ctr_el0
+ and \tmp, \tmp, #0xf
+ mov \reg, #4
+ lsl \reg, \reg, \tmp
.endm
diff --git a/include/cm_macros.S b/include/cm_macros.S
new file mode 100644
index 0000000..9eeec18
--- /dev/null
+++ b/include/cm_macros.S
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+ /* ---------------------------------------------
+ * Zero out the callee saved register to prevent
+ * leakage of secure state into the normal world
+ * during the first ERET after a cold/warm boot.
+ * ---------------------------------------------
+ */
+ .macro zero_callee_saved_regs
+ mov x19, xzr
+ mov x20, xzr
+ mov x21, xzr
+ mov x22, xzr
+ mov x23, xzr
+ mov x24, xzr
+ mov x25, xzr
+ mov x26, xzr
+ mov x27, xzr
+ mov x28, xzr
+ mov x29, xzr
+ .endm
+
+ .macro switch_to_exception_stack reg1 reg2
+ mov \reg1 , sp
+ ldr \reg2, [\reg1, #CTX_EL3STATE_OFFSET + CTX_EXCEPTION_SP]
+ mov sp, \reg2
+ .endm
+
+ /* -----------------------------------------------------
+ * Handle SMC exceptions seperately from other sync.
+ * exceptions.
+ * -----------------------------------------------------
+ */
+ .macro handle_sync_exception
+ stp x30, xzr, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
+ mrs x30, esr_el3
+ ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
+
+ cmp x30, #EC_AARCH32_SMC
+ b.eq smc_handler32
+
+ cmp x30, #EC_AARCH64_SMC
+ b.eq smc_handler64
+
+ /* -----------------------------------------------------
+ * The following code handles any synchronous exception
+ * that is not an SMC. SP_EL3 is pointing to a context
+ * structure where all the scratch registers are saved.
+ * An exception stack is also retrieved from the context
+ * Currently, a register dump is printed since BL31 does
+ * not expect any such exceptions.
+ * -----------------------------------------------------
+ */
+ bl save_scratch_registers
+ switch_to_exception_stack x0 x1
+
+ /* Save the core_context pointer for handled faults */
+ stp x0, xzr, [sp, #-0x10]!
+ bl fault_handler
+ ldp x0, xzr, [sp], #0x10
+
+ mov sp, x0
+ bl restore_scratch_registers
+ ldp x30, xzr, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
+ eret
+ .endm
+
+ /* -----------------------------------------------------
+ * Use a platform defined mechanism to report an async.
+ * exception.
+ * -----------------------------------------------------
+ */
+ .macro handle_async_exception type
+ stp x30, xzr, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
+ bl save_scratch_registers
+ switch_to_exception_stack x0 x1
+
+ /* Save the core_context pointer */
+ stp x0, xzr, [sp, #-0x10]!
+ mov x0, \type
+ bl plat_report_exception
+ ldp x0, xzr, [sp], #0x10
+
+ mov sp, x0
+ bl restore_scratch_registers
+ ldp x30, xzr, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
+ .endm
+
diff --git a/include/context.h b/include/context.h
index 615f4f4..1c1a3a2 100644
--- a/include/context.h
+++ b/include/context.h
@@ -35,13 +35,42 @@
#include <arch.h>
/*******************************************************************************
+ * Constants that allow assembler code to access members of and the 'gp_regs'
+ * structure at their correct offsets.
+ ******************************************************************************/
+#define CTX_GPREGS_OFFSET 0x0
+#define CTX_GPREG_X0 0x0
+#define CTX_GPREG_X1 0x8
+#define CTX_GPREG_X2 0x10
+#define CTX_GPREG_X3 0x18
+#define CTX_GPREG_X4 0x20
+#define CTX_GPREG_X5 0x28
+#define CTX_GPREG_X6 0x30
+#define CTX_GPREG_X7 0x38
+#define CTX_GPREG_X8 0x40
+#define CTX_GPREG_X9 0x48
+#define CTX_GPREG_X10 0x50
+#define CTX_GPREG_X11 0x58
+#define CTX_GPREG_X12 0x60
+#define CTX_GPREG_X13 0x68
+#define CTX_GPREG_X14 0x70
+#define CTX_GPREG_X15 0x78
+#define CTX_GPREG_X16 0x80
+#define CTX_GPREG_X17 0x88
+#define CTX_GPREG_X18 0x90
+#define CTX_GPREG_SP_EL0 0x98
+#define CTX_GPREG_LR 0xa0
+/* Unused space to allow registers to be stored as pairs */
+#define CTX_GPREGS_END 0xb0
+
+/*******************************************************************************
* Constants that allow assembler code to access members of and the 'el3_state'
* structure at their correct offsets. Note that some of the registers are only
* 32-bits wide but are stored as 64-bit values for convenience
******************************************************************************/
-#define CTX_EL3STATE_OFFSET 0x0
-#define CTX_SAVED_SP_EL3 0x0
-#define CTX_SAVED_SP_EL0 0x8
+#define CTX_EL3STATE_OFFSET (CTX_GPREGS_OFFSET + CTX_GPREGS_END)
+#define CTX_EXCEPTION_SP 0x0
+#define CTX_RUNTIME_SP 0x8
#define CTX_SPSR_EL3 0x10
#define CTX_ELR_EL3 0x18
#define CTX_SCR_EL3 0x20
@@ -153,11 +182,20 @@
} __aligned(16) name
/* Constants to determine the size of individual context structures */
+#define CTX_GPREG_ALL (CTX_GPREGS_END >> DWORD_SHIFT)
#define CTX_SYSREG_ALL (CTX_SYSREGS_END >> DWORD_SHIFT)
#define CTX_FPREG_ALL (CTX_FPREGS_END >> DWORD_SHIFT)
#define CTX_EL3STATE_ALL (CTX_EL3STATE_END >> DWORD_SHIFT)
/*
+ * AArch64 general purpose register context structure. Only x0-x18, lr
+ * are saved as the compiler is expected to preserve the remaining
+ * callee saved registers if used by the C runtime and the assembler
+ * does not touch the remaining.
+ */
+DEFINE_REG_STRUCT(gp_regs_next, CTX_GPREG_ALL);
+
+/*
* AArch64 EL1 system register context structure for preserving the
* architectural state during switches from one security state to
* another in EL1.
@@ -195,6 +233,7 @@ DEFINE_REG_STRUCT(el3_state, CTX_EL3STATE_ALL);
* correspond to either the secure or the non-secure state.
*/
typedef struct {
+ gp_regs_next gpregs_ctx;
el3_state el3state_ctx;
el1_sys_regs sysregs_ctx;
fp_regs fpregs_ctx;
@@ -204,12 +243,15 @@ typedef struct {
#define get_el3state_ctx(h) (&((cpu_context *) h)->el3state_ctx)
#define get_fpregs_ctx(h) (&((cpu_context *) h)->fpregs_ctx)
#define get_sysregs_ctx(h) (&((cpu_context *) h)->sysregs_ctx)
+#define get_gpregs_ctx(h) (&((cpu_context *) h)->gpregs_ctx)
/*
* Compile time assertions related to the 'cpu_context' structure to
* ensure that the assembler and the compiler view of the offsets of
* the structure members is the same.
*/
+CASSERT(CTX_GPREGS_OFFSET == __builtin_offsetof(cpu_context, gpregs_ctx), \
+ assert_core_context_gp_offset_mismatch);
CASSERT(CTX_SYSREGS_OFFSET == __builtin_offsetof(cpu_context, sysregs_ctx), \
assert_core_context_sys_offset_mismatch);
CASSERT(CTX_FPREGS_OFFSET == __builtin_offsetof(cpu_context, fpregs_ctx), \
@@ -229,6 +271,7 @@ void fpregs_context_restore(fp_regs *regs);
#undef CTX_SYSREG_ALL
#undef CTX_FP_ALL
+#undef CTX_GPREG_ALL
#undef CTX_EL3STATE_ALL
#endif /* __ASSEMBLY__ */
diff --git a/include/debug.h b/include/debug.h
index e8ba7e2..f829477 100644
--- a/include/debug.h
+++ b/include/debug.h
@@ -59,10 +59,11 @@
/* For the moment this Panic function is very basic, Report an error and
* spin. This can be expanded in the future to provide more information.
*/
-static inline void panic(void)
+static inline void __attribute__((noreturn)) panic(void)
{
ERROR("PANIC\n");
- while (1);
+ while (1)
+ ;
}
#endif /* __ASSEMBLY__ */
diff --git a/include/runtime_svc.h b/include/runtime_svc.h
index db8fd29..3ee043e 100644
--- a/include/runtime_svc.h
+++ b/include/runtime_svc.h
@@ -143,7 +143,8 @@
* Constants to allow the assembler access a runtime service
* descriptor
*/
-#define SIZEOF_RT_SVC_DESC 32
+#define RT_SVC_SIZE_LOG2 5
+#define SIZEOF_RT_SVC_DESC (1 << RT_SVC_SIZE_LOG2)
#define RT_SVC_DESC_INIT 16
#define RT_SVC_DESC_HANDLE 24
@@ -156,6 +157,13 @@
#ifndef __ASSEMBLY__
+/* Various flags passed to SMC handlers */
+#define SMC_FROM_SECURE (0 << 0)
+#define SMC_FROM_NON_SECURE (1 << 0)
+
+#define is_caller_non_secure(_f) (!!(_f & SMC_FROM_NON_SECURE))
+#define is_caller_secure(_f) (!(is_caller_non_secure(_f)))
+
/* Prototype for runtime service initializing function */
typedef int32_t (*rt_svc_init)(void);
@@ -288,7 +296,7 @@ CASSERT(GPREGS_FP_OFF == __builtin_offsetof(gp_regs, fp), \
* of the structure are the same.
* 2. ensure that the assembler and the compiler see the initialisation
* routine at the same offset.
- * 2. ensure that the assembler and the compiler see the handler
+ * 3. ensure that the assembler and the compiler see the handler
* routine at the same offset.
*/
CASSERT((sizeof(rt_svc_desc) == SIZEOF_RT_SVC_DESC), \