aboutsummaryrefslogtreecommitdiff
path: root/bl31
diff options
context:
space:
mode:
Diffstat (limited to 'bl31')
-rw-r--r--bl31/aarch64/bl31_arch_setup.c100
-rw-r--r--bl31/aarch64/bl31_entrypoint.S121
-rw-r--r--bl31/aarch64/exception_handlers.c184
-rw-r--r--bl31/aarch64/runtime_exceptions.S248
-rw-r--r--bl31/bl31.ld.S88
-rw-r--r--bl31/bl31.mk55
-rw-r--r--bl31/bl31_main.c76
7 files changed, 872 insertions, 0 deletions
diff --git a/bl31/aarch64/bl31_arch_setup.c b/bl31/aarch64/bl31_arch_setup.c
new file mode 100644
index 0000000..f6fa088
--- /dev/null
+++ b/bl31/aarch64/bl31_arch_setup.c
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2013, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch_helpers.h>
+#include <platform.h>
+#include <assert.h>
+
+/*******************************************************************************
+ * This duplicates what the primary cpu did after a cold boot in BL1. The same
+ * needs to be done when a cpu is hotplugged in. This function could also over-
+ * ride any EL3 setup done by BL1 as this code resides in rw memory.
+ ******************************************************************************/
+void bl31_arch_setup(void)
+{
+ unsigned long tmp_reg = 0;
+ unsigned int counter_base_frequency;
+
+ /* Enable alignment checks and set the exception endianness to LE */
+ tmp_reg = read_sctlr();
+ tmp_reg |= (SCTLR_A_BIT | SCTLR_SA_BIT);
+ tmp_reg &= ~SCTLR_EE_BIT;
+ write_sctlr(tmp_reg);
+
+ /*
+ * Enable HVCs, allow NS to mask CPSR.A, route FIQs to EL3, set the
+ * next EL to be aarch64
+ */
+ tmp_reg = SCR_RES1_BITS | SCR_RW_BIT | SCR_HCE_BIT | SCR_FIQ_BIT;
+ write_scr(tmp_reg);
+
+ /* Do not trap coprocessor accesses from lower ELs to EL3 */
+ write_cptr_el3(0);
+
+ /* Read the frequency from Frequency modes table */
+ counter_base_frequency = mmio_read_32(SYS_CNTCTL_BASE + CNTFID_OFF);
+ /* The first entry of the frequency modes table must not be 0 */
+ assert(counter_base_frequency != 0);
+
+ /* Program the counter frequency */
+ write_cntfrq_el0(counter_base_frequency);
+ return;
+}
+
+/*******************************************************************************
+ * Detect what is the next Non-Secure EL and setup the required architectural
+ * state
+ ******************************************************************************/
+void bl31_arch_next_el_setup(void) {
+ unsigned long id_aa64pfr0 = read_id_aa64pfr0_el1();
+ unsigned long current_sctlr, next_sctlr;
+ unsigned long el_status;
+ unsigned long scr = read_scr();
+
+ /* Use the same endianness than the current BL */
+ current_sctlr = read_sctlr();
+ next_sctlr = (current_sctlr & SCTLR_EE_BIT);
+
+ /* Find out which EL we are going to */
+ el_status = (id_aa64pfr0 >> ID_AA64PFR0_EL2_SHIFT) & ID_AA64PFR0_ELX_MASK;
+
+ /* Check what if EL2 is supported */
+ if (el_status && (scr & SCR_HCE_BIT)) {
+ /* Set SCTLR EL2 */
+ next_sctlr |= SCTLR_EL2_RES1;
+
+ write_sctlr_el2(next_sctlr);
+ } else {
+ /* Set SCTLR Non-Secure EL1 */
+ next_sctlr |= SCTLR_EL1_RES1;
+
+ write_sctlr_el1(next_sctlr);
+ }
+}
diff --git a/bl31/aarch64/bl31_entrypoint.S b/bl31/aarch64/bl31_entrypoint.S
new file mode 100644
index 0000000..3a850e6
--- /dev/null
+++ b/bl31/aarch64/bl31_entrypoint.S
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2013, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <bl1.h>
+#include <bl_common.h>
+#include <platform.h>
+
+
+ .globl bl31_entrypoint
+
+
+ .section entry_code, "ax"; .align 3
+
+ /* -----------------------------------------------------
+ * bl31_entrypoint() is the cold boot entrypoint,
+ * executed only by the primary cpu.
+ * -----------------------------------------------------
+ */
+
+bl31_entrypoint:; .type bl31_entrypoint, %function
+ /* ---------------------------------------------
+ * BL2 has populated x0,x3,x4 with the opcode
+ * indicating BL31 should be run, memory layout
+ * of the trusted SRAM available to BL31 and
+ * information about running the non-trusted
+ * software already loaded by BL2. Check the
+ * opcode out of paranoia.
+ * ---------------------------------------------
+ */
+ mov x19, #RUN_IMAGE
+ cmp x0, x19
+ b.ne _panic
+ mov x20, x3
+ mov x21, x4
+
+ /* ---------------------------------------------
+ * This is BL31 which is expected to be executed
+ * only by the primary cpu (at least for now).
+ * So, make sure no secondary has lost its way.
+ * ---------------------------------------------
+ */
+ bl read_mpidr
+ mov x19, x0
+ bl platform_is_primary_cpu
+ cbz x0, _panic
+
+ /* --------------------------------------------
+ * Give ourselves a small coherent stack to
+ * ease the pain of initializing the MMU
+ * --------------------------------------------
+ */
+ mov x0, x19
+ bl platform_set_coherent_stack
+
+ /* ---------------------------------------------
+ * Perform platform specific early arch. setup
+ * ---------------------------------------------
+ */
+ mov x0, x20
+ mov x1, x21
+ mov x2, x19
+ bl bl31_early_platform_setup
+ bl bl31_plat_arch_setup
+
+ /* ---------------------------------------------
+ * Give ourselves a stack allocated in Normal
+ * -IS-WBWA memory
+ * ---------------------------------------------
+ */
+ mov x0, x19
+ bl platform_set_stack
+
+ /* ---------------------------------------------
+ * Use SP_EL0 to initialize BL31. It allows us
+ * to jump to the next image without having to
+ * come back here to ensure all of the stack's
+ * been popped out. run_image() is not nice
+ * enough to reset the stack pointer before
+ * handing control to the next stage.
+ * ---------------------------------------------
+ */
+ mov x0, sp
+ msr sp_el0, x0
+ msr spsel, #0
+ isb
+
+ /* ---------------------------------------------
+ * Jump to main function.
+ * ---------------------------------------------
+ */
+ bl bl31_main
+
+_panic:
+ b _panic
diff --git a/bl31/aarch64/exception_handlers.c b/bl31/aarch64/exception_handlers.c
new file mode 100644
index 0000000..860d8eb
--- /dev/null
+++ b/bl31/aarch64/exception_handlers.c
@@ -0,0 +1,184 @@
+/*
+ * Copyright (c) 2013, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch_helpers.h>
+#include <platform.h>
+#include <bl_common.h>
+#include <bl31.h>
+#include <psci.h>
+#include <assert.h>
+#include <runtime_svc.h>
+
+/*******************************************************************************
+ * This function checks whether this is a valid smc e.g.
+ * the function id is correct, top word of args are zeroed
+ * when aarch64 makes an aarch32 call etc.
+ ******************************************************************************/
+int validate_smc(gp_regs *regs)
+{
+ unsigned int rw = GET_RW(regs->spsr);
+ unsigned int cc = GET_SMC_CC(regs->x0);
+
+ /* Check if there is a difference in the caller RW and SMC CC */
+ if (rw == cc) {
+
+ /* Check whether the caller has chosen the right func. id */
+ if (cc == SMC_64) {
+ regs->x0 = SMC_UNK;
+ return SMC_UNK;
+ }
+
+ /*
+ * Paranoid check to zero the top word of passed args
+ * irrespective of caller's register width.
+ *
+ * TODO: Check if this needed if the caller is aarch32
+ */
+ regs->x0 &= (unsigned int) 0xFFFFFFFF;
+ regs->x1 &= (unsigned int) 0xFFFFFFFF;
+ regs->x2 &= (unsigned int) 0xFFFFFFFF;
+ regs->x3 &= (unsigned int) 0xFFFFFFFF;
+ regs->x4 &= (unsigned int) 0xFFFFFFFF;
+ regs->x5 &= (unsigned int) 0xFFFFFFFF;
+ regs->x6 &= (unsigned int) 0xFFFFFFFF;
+ }
+
+ return 0;
+}
+
+/* TODO: Break down the SMC handler into fast and standard SMC handlers. */
+void smc_handler(unsigned type, unsigned long esr, gp_regs *regs)
+{
+ /* Check if the SMC has been correctly called */
+ if (validate_smc(regs) != 0)
+ return;
+
+ switch (regs->x0) {
+ case PSCI_VERSION:
+ regs->x0 = psci_version();
+ break;
+
+ case PSCI_CPU_OFF:
+ regs->x0 = __psci_cpu_off();
+ break;
+
+ case PSCI_CPU_SUSPEND_AARCH64:
+ case PSCI_CPU_SUSPEND_AARCH32:
+ regs->x0 = __psci_cpu_suspend(regs->x1, regs->x2, regs->x3);
+ break;
+
+ case PSCI_CPU_ON_AARCH64:
+ case PSCI_CPU_ON_AARCH32:
+ regs->x0 = psci_cpu_on(regs->x1, regs->x2, regs->x3);
+ break;
+
+ case PSCI_AFFINITY_INFO_AARCH32:
+ case PSCI_AFFINITY_INFO_AARCH64:
+ regs->x0 = psci_affinity_info(regs->x1, regs->x2);
+ break;
+
+ default:
+ regs->x0 = SMC_UNK;
+ }
+
+ return;
+}
+
+void irq_handler(unsigned type, unsigned long esr, gp_regs *regs)
+{
+ plat_report_exception(type);
+ assert(0);
+}
+
+void fiq_handler(unsigned type, unsigned long esr, gp_regs *regs)
+{
+ plat_report_exception(type);
+ assert(0);
+}
+
+void serror_handler(unsigned type, unsigned long esr, gp_regs *regs)
+{
+ plat_report_exception(type);
+ assert(0);
+}
+
+void sync_exception_handler(unsigned type, gp_regs *regs)
+{
+ unsigned long esr = read_esr();
+ unsigned int ec = EC_BITS(esr);
+
+ switch (ec) {
+
+ case EC_AARCH32_SMC:
+ case EC_AARCH64_SMC:
+ smc_handler(type, esr, regs);
+ break;
+
+ default:
+ plat_report_exception(type);
+ assert(0);
+ }
+ return;
+}
+
+void async_exception_handler(unsigned type, gp_regs *regs)
+{
+ unsigned long esr = read_esr();
+
+ switch (type) {
+
+ case IRQ_SP_EL0:
+ case IRQ_SP_ELX:
+ case IRQ_AARCH64:
+ case IRQ_AARCH32:
+ irq_handler(type, esr, regs);
+ break;
+
+ case FIQ_SP_EL0:
+ case FIQ_SP_ELX:
+ case FIQ_AARCH64:
+ case FIQ_AARCH32:
+ fiq_handler(type, esr, regs);
+ break;
+
+ case SERROR_SP_EL0:
+ case SERROR_SP_ELX:
+ case SERROR_AARCH64:
+ case SERROR_AARCH32:
+ serror_handler(type, esr, regs);
+ break;
+
+ default:
+ plat_report_exception(type);
+ assert(0);
+ }
+
+ return;
+}
diff --git a/bl31/aarch64/runtime_exceptions.S b/bl31/aarch64/runtime_exceptions.S
new file mode 100644
index 0000000..21976ad
--- /dev/null
+++ b/bl31/aarch64/runtime_exceptions.S
@@ -0,0 +1,248 @@
+/*
+ * Copyright (c) 2013, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <runtime_svc.h>
+
+ .globl runtime_exceptions
+
+
+#include <asm_macros.S>
+
+
+ .section aarch64_code, "ax"; .align 11
+
+ .align 7
+runtime_exceptions:
+ /* -----------------------------------------------------
+ * Current EL with _sp_el0 : 0x0 - 0x180
+ * -----------------------------------------------------
+ */
+sync_exception_sp_el0:
+ exception_entry save_regs
+ mov x0, #SYNC_EXCEPTION_SP_EL0
+ mov x1, sp
+ bl sync_exception_handler
+ exception_exit restore_regs
+ eret
+
+ .align 7
+irq_sp_el0:
+ exception_entry save_regs
+ mov x0, #IRQ_SP_EL0
+ mov x1, sp
+ bl async_exception_handler
+ exception_exit restore_regs
+ eret
+
+ .align 7
+fiq_sp_el0:
+ exception_entry save_regs
+ mov x0, #FIQ_SP_EL0
+ mov x1, sp
+ bl async_exception_handler
+ exception_exit restore_regs
+ eret
+
+ .align 7
+serror_sp_el0:
+ exception_entry save_regs
+ mov x0, #SERROR_SP_EL0
+ mov x1, sp
+ bl async_exception_handler
+ exception_exit restore_regs
+ eret
+
+ /* -----------------------------------------------------
+ * Current EL with SPx: 0x200 - 0x380
+ * -----------------------------------------------------
+ */
+ .align 7
+sync_exception_sp_elx:
+ exception_entry save_regs
+ mov x0, #SYNC_EXCEPTION_SP_ELX
+ mov x1, sp
+ bl sync_exception_handler
+ exception_exit restore_regs
+ eret
+
+ .align 7
+irq_sp_elx:
+ exception_entry save_regs
+ mov x0, #IRQ_SP_ELX
+ mov x1, sp
+ bl async_exception_handler
+ exception_exit restore_regs
+ eret
+
+ .align 7
+fiq_sp_elx:
+ exception_entry save_regs
+ mov x0, #FIQ_SP_ELX
+ mov x1, sp
+ bl async_exception_handler
+ exception_exit restore_regs
+ eret
+
+ .align 7
+serror_sp_elx:
+ exception_entry save_regs
+ mov x0, #SERROR_SP_ELX
+ mov x1, sp
+ bl async_exception_handler
+ exception_exit restore_regs
+ eret
+
+ /* -----------------------------------------------------
+ * Lower EL using AArch64 : 0x400 - 0x580
+ * -----------------------------------------------------
+ */
+ .align 7
+sync_exception_aarch64:
+ exception_entry save_regs
+ mov x0, #SYNC_EXCEPTION_AARCH64
+ mov x1, sp
+ bl sync_exception_handler
+ exception_exit restore_regs
+ eret
+
+ .align 7
+irq_aarch64:
+ exception_entry save_regs
+ mov x0, #IRQ_AARCH64
+ mov x1, sp
+ bl async_exception_handler
+ exception_exit restore_regs
+ eret
+
+ .align 7
+fiq_aarch64:
+ exception_entry save_regs
+ mov x0, #FIQ_AARCH64
+ mov x1, sp
+ bl async_exception_handler
+ exception_exit restore_regs
+ eret
+
+ .align 7
+serror_aarch64:
+ exception_entry save_regs
+ mov x0, #IRQ_AARCH32
+ mov x1, sp
+ bl async_exception_handler
+ exception_exit restore_regs
+ eret
+
+ /* -----------------------------------------------------
+ * Lower EL using AArch32 : 0x600 - 0x780
+ * -----------------------------------------------------
+ */
+ .align 7
+sync_exception_aarch32:
+ exception_entry save_regs
+ mov x0, #SYNC_EXCEPTION_AARCH32
+ mov x1, sp
+ bl sync_exception_handler
+ exception_exit restore_regs
+ eret
+
+ .align 7
+irq_aarch32:
+ exception_entry save_regs
+ mov x0, #IRQ_AARCH32
+ mov x1, sp
+ bl async_exception_handler
+ exception_exit restore_regs
+ eret
+
+ .align 7
+fiq_aarch32:
+ exception_entry save_regs
+ mov x0, #FIQ_AARCH32
+ mov x1, sp
+ bl async_exception_handler
+ exception_exit restore_regs
+ eret
+
+ .align 7
+serror_aarch32:
+ exception_entry save_regs
+ mov x0, #SERROR_AARCH32
+ mov x1, sp
+ bl async_exception_handler
+ exception_exit restore_regs
+ eret
+
+ .align 7
+
+save_regs:; .type save_regs, %function
+ sub sp, sp, #0x100
+ stp x0, x1, [sp, #0x0]
+ stp x2, x3, [sp, #0x10]
+ stp x4, x5, [sp, #0x20]
+ stp x6, x7, [sp, #0x30]
+ stp x8, x9, [sp, #0x40]
+ stp x10, x11, [sp, #0x50]
+ stp x12, x13, [sp, #0x60]
+ stp x14, x15, [sp, #0x70]
+ stp x16, x17, [sp, #0x80]
+ stp x18, x19, [sp, #0x90]
+ stp x20, x21, [sp, #0xa0]
+ stp x22, x23, [sp, #0xb0]
+ stp x24, x25, [sp, #0xc0]
+ stp x26, x27, [sp, #0xd0]
+ mrs x0, sp_el0
+ stp x28, x0, [sp, #0xe0]
+ mrs x0, spsr_el3
+ str x0, [sp, #0xf0]
+ ret
+
+
+restore_regs:; .type restore_regs, %function
+ ldr x9, [sp, #0xf0]
+ msr spsr_el3, x9
+ ldp x28, x9, [sp, #0xe0]
+ msr sp_el0, x9
+ ldp x26, x27, [sp, #0xd0]
+ ldp x24, x25, [sp, #0xc0]
+ ldp x22, x23, [sp, #0xb0]
+ ldp x20, x21, [sp, #0xa0]
+ ldp x18, x19, [sp, #0x90]
+ ldp x16, x17, [sp, #0x80]
+ ldp x14, x15, [sp, #0x70]
+ ldp x12, x13, [sp, #0x60]
+ ldp x10, x11, [sp, #0x50]
+ ldp x8, x9, [sp, #0x40]
+ ldp x6, x7, [sp, #0x30]
+ ldp x4, x5, [sp, #0x20]
+ ldp x2, x3, [sp, #0x10]
+ ldp x0, x1, [sp, #0x0]
+ add sp, sp, #0x100
+ ret
diff --git a/bl31/bl31.ld.S b/bl31/bl31.ld.S
new file mode 100644
index 0000000..5ad8648
--- /dev/null
+++ b/bl31/bl31.ld.S
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2013, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <platform.h>
+
+OUTPUT_FORMAT(PLATFORM_LINKER_FORMAT)
+OUTPUT_ARCH(PLATFORM_LINKER_ARCH)
+
+
+MEMORY {
+ /* RAM is read/write and Initialised */
+ RAM (rwx): ORIGIN = TZRAM_BASE, LENGTH = TZRAM_SIZE
+}
+
+
+SECTIONS
+{
+ . = BL31_BASE;
+
+ BL31_RO ALIGN (4096): {
+ *(entry_code)
+ *(.text)
+ *(.rodata)
+ } >RAM
+
+ BL31_STACKS ALIGN (4096): {
+ . += 0x1000;
+ *(tzfw_normal_stacks)
+ } >RAM
+
+ BL31_COHERENT_RAM ALIGN (4096): {
+ *(tzfw_coherent_mem)
+ /* . += 0x1000;*/
+ /* Do we need to ensure at least 4k here? */
+ . = ALIGN(4096);
+ } >RAM
+
+ __BL31_DATA_START__ = .;
+ .bss ALIGN (4096): {
+ *(.bss)
+ *(COMMON)
+ } >RAM
+
+ .data : {
+ *(.data)
+ } >RAM
+ __BL31_DATA_STOP__ = .;
+
+
+ __BL31_RO_BASE__ = LOADADDR(BL31_RO);
+ __BL31_RO_SIZE__ = SIZEOF(BL31_RO);
+
+ __BL31_STACKS_BASE__ = LOADADDR(BL31_STACKS);
+ __BL31_STACKS_SIZE__ = SIZEOF(BL31_STACKS);
+
+ __BL31_COHERENT_RAM_BASE__ = LOADADDR(BL31_COHERENT_RAM);
+ __BL31_COHERENT_RAM_SIZE__ = SIZEOF(BL31_COHERENT_RAM);
+
+ __BL31_RW_BASE__ = __BL31_DATA_START__;
+ __BL31_RW_SIZE__ = __BL31_DATA_STOP__ - __BL31_DATA_START__;
+}
diff --git a/bl31/bl31.mk b/bl31/bl31.mk
new file mode 100644
index 0000000..dcf78bc
--- /dev/null
+++ b/bl31/bl31.mk
@@ -0,0 +1,55 @@
+#
+# Copyright (c) 2013, ARM Limited. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# Redistributions of source code must retain the above copyright notice, this
+# list of conditions and the following disclaimer.
+#
+# Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# Neither the name of ARM nor the names of its contributors may be used
+# to endorse or promote products derived from this software without specific
+# prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+
+vpath %.c drivers/arm/interconnect/cci-400/ common/ lib/ \
+ drivers/arm/peripherals/pl011 plat/fvp common/psci \
+ lib/semihosting arch/aarch64/ lib/non-semihosting \
+ lib/sync/locks/bakery/ drivers/power/ arch/system/gic/ \
+ plat/fvp/aarch64/
+
+vpath %.S lib/arch/aarch64 common/psci \
+ lib/semihosting/aarch64 include/ plat/fvp/${ARCH} \
+ lib/sync/locks/exclusive plat/common/aarch64/ \
+ arch/system/gic/${ARCH}
+
+BL31_ASM_OBJS := bl31_entrypoint.o runtime_exceptions.o psci_entry.o \
+ spinlock.o gic_v3_sysregs.o fvp_helpers.o
+BL31_C_OBJS := bl31_main.o bl31_plat_setup.o bl31_arch_setup.o \
+ exception_handlers.o bakery_lock.o cci400.o \
+ fvp_common.o fvp_pm.o fvp_pwrc.o fvp_topology.o \
+ runtime_svc.o gic_v3.o gic_v2.o psci_setup.o \
+ psci_common.o psci_afflvl_on.o psci_main.o \
+ psci_afflvl_off.o psci_afflvl_suspend.o
+
+BL31_ENTRY_POINT := bl31_entrypoint
+BL31_MAPFILE := bl31.map
+BL31_LINKERFILE := bl31.ld
+
+BL31_OBJS := $(BL31_C_OBJS) $(BL31_ASM_OBJS)
diff --git a/bl31/bl31_main.c b/bl31/bl31_main.c
new file mode 100644
index 0000000..e8fa2f8
--- /dev/null
+++ b/bl31/bl31_main.c
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2013, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+#include <arch_helpers.h>
+#include <console.h>
+#include <platform.h>
+#include <semihosting.h>
+#include <bl_common.h>
+#include <bl31.h>
+#include <runtime_svc.h>
+
+void bl31_arch_next_el_setup(void);
+
+/*******************************************************************************
+ * BL31 is responsible for setting up the runtime services for the primary cpu
+ * before passing control to the bootloader (UEFI) or Linux.
+ ******************************************************************************/
+void bl31_main(void)
+{
+ el_change_info *image_info;
+ unsigned long mpidr = read_mpidr();
+
+ /* Perform remaining generic architectural setup from EL3 */
+ bl31_arch_setup();
+
+ /* Perform platform setup in BL1 */
+ bl31_platform_setup();
+
+#if defined (__GNUC__)
+ printf("BL31 Built : %s, %s\n\r", __TIME__, __DATE__);
+#endif
+
+
+ /* Initialize the runtime services e.g. psci */
+ runtime_svc_init(mpidr);
+
+ /* Clean caches before re-entering normal world */
+ dcsw_op_all(DCCSW);
+
+ image_info = bl31_get_next_image_info(mpidr);
+ bl31_arch_next_el_setup();
+ change_el(image_info);
+
+ /* There is no valid reason for change_el() to return */
+ assert(0);
+}