aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGreg Bellows <greg.bellows@linaro.org>2015-02-26 15:45:52 -0600
committerGreg Bellows <greg.bellows@linaro.org>2015-02-26 15:45:52 -0600
commitece391a6a8a5241f9802e3f4ac9981e74860d063 (patch)
treebb293ac6cac0f6fb090d9b38d1b94da1e0e78124
parentabaaa0cd989cb3a6b20231c8fd98c2bc5ee73ba7 (diff)
Add EL1 initialization
Signed-off-by: Greg Bellows <greg.bellows@linaro.org>
-rw-r--r--.gdbinit6413
-rw-r--r--aarch64/Makefile9
-rw-r--r--aarch64/el1.c136
-rw-r--r--aarch64/el1_exception.S33
-rw-r--r--aarch64/el1_loader.h13
-rw-r--r--aarch64/el1_sec_init.S247
-rw-r--r--aarch64/el3_exception.S2
-rw-r--r--aarch64/el3_init.S8
-rw-r--r--platform/virt/platform.h2
9 files changed, 340 insertions, 123 deletions
diff --git a/.gdbinit64 b/.gdbinit64
index a479e0a..540dc60 100644
--- a/.gdbinit64
+++ b/.gdbinit64
@@ -2,6 +2,15 @@ set arch aarch64
target remote :1234
file aarch64/el3.elf
add-symbol-file aarch64/el3.elf &_EL3_TEXT_BASE
-add-symbol-file aarch64/el1_secure.elf 0x10000
-add-symbol-file aarch64/el1_secure.elf &_EL1_S_TEXT_BASE
set print pretty
+
+define load_el3
+file aarch64/el3.elf
+add-symbol-file aarch64/el3.elf &_EL3_TEXT_BASE
+end
+
+define load_el1s
+file aarch64/el1_sec.elf
+add-symbol-file aarch64/el1_sec.elf &_EL1_S_TEXT_BASE
+end
+
diff --git a/aarch64/Makefile b/aarch64/Makefile
index 856e94b..67d6c3e 100644
--- a/aarch64/Makefile
+++ b/aarch64/Makefile
@@ -10,7 +10,10 @@ EL3_ELF = el3.elf
EL3_IMAGE = el3.bin
EL3_LOAD = el3.lds
EL1_S_LOAD = el1_sec.lds
-EL1_S_OBJS = el1_sec_init.o #\
+EL1_S_OBJS = el1_sec_init.o \
+ el1_exception.o \
+ el1.o \
+ #\
secure_svc.o \
secure_asm.o \
sm_asm.o \
@@ -28,8 +31,8 @@ $(EL3_ELF): $(EL3_OBJS) $(EL3_LOAD)
$(EL3_IMAGE): $(EL3_ELF)
$(OBJCOPY) -O binary $< $@
-$(EL1_S_ELF): $(EL1_S_OBJS) $(TZOBJS) $(EL1_S_LOAD)
- $(LD) -o $@ $(EL1_S_OBJS) $(TZOBJS) $(FLATLIBS) --script=$(EL1_S_LOAD)
+$(EL1_S_ELF): $(EL1_S_OBJS) $(EL1_S_LOAD)
+ $(LD) -o $@ $(EL1_S_OBJS) $(FLATLIBS) --script=$(EL1_S_LOAD)
$(EL1_S_IMAGE): $(EL1_S_ELF)
$(OBJCOPY) -O binary $< $@
diff --git a/aarch64/el1.c b/aarch64/el1.c
new file mode 100644
index 0000000..8fced60
--- /dev/null
+++ b/aarch64/el1.c
@@ -0,0 +1,136 @@
+#include "platform.h"
+#include "common_svc.h"
+#include "common_defs.h"
+#include "common_mmu.h"
+#include "el1_loader.h"
+#include "string.h"
+
+typedef union {
+ struct {
+ uint32_t dfsc : 6;
+ uint32_t wnr : 1;
+ uint32_t s1ptw : 1;
+ uint32_t cm : 1;
+ uint32_t ea : 1;
+ uint32_t fnv : 1;
+ uint32_t res0 : 3;
+ uint32_t ar : 1;
+ uint32_t sf : 1;
+ uint32_t srt : 5;
+ uint32_t sse: 1;
+ uint32_t sas: 2;
+ uint32_t isv: 1;
+ };
+ uint32_t raw;
+} armv8_data_abort_iss_t;
+
+typedef union {
+ struct {
+ uint64_t type : 2;
+ uint64_t attridx : 3;
+ uint64_t ns : 1;
+ uint64_t ap2_1 : 2;
+ uint64_t sh1_0 : 2;
+ uint64_t af : 1;
+ uint64_t ng : 1;
+ uint64_t pa : 36;
+ uint64_t res0 : 4;
+ uint64_t contig : 1;
+ uint64_t pxn : 1;
+ uint64_t xn : 1;
+ };
+ uint64_t raw;
+} armv8_4k_pg_pte_t;
+
+typedef union {
+ struct {
+ uint64_t type : 2;
+ uint64_t ignored0 : 10;
+ uint64_t pa : 36;
+ uint64_t res0 : 4;
+ uint64_t ignored1 : 7;
+ uint64_t pxn : 1;
+ uint64_t xn : 1;
+ uint64_t ap : 2;
+ uint64_t ns : 1;
+ };
+ uint64_t raw;
+} armv8_4k_tbl_pte_t;
+
+uint64_t el1_next_pa = 0;
+uint64_t el1_allocate_pa() {
+ uint64_t next = el1_next_pa;
+ el1_next_pa += 0x1000;
+ return next;
+}
+
+void el1_map_va(uintptr_t addr)
+{
+ uint64_t pa = EL3_PGTBL_BASE;
+ uint32_t i;
+ armv8_4k_tbl_pte_t *pte;
+ armv8_4k_pg_pte_t *l3pte;
+
+ for (i = 0; i < 4; i++) {
+ /* Each successive level uses the next lower 9 VA bits in a 48-bit
+ * address, hence the i*9.
+ */
+ uint64_t off = ((addr >> (39-(i*9))) & 0x1FF) << 3;
+ pte = (armv8_4k_tbl_pte_t *)(pa | off);
+ if (!pte->type) {
+ pa = el1_allocate_pa();
+ pte->pa = pa >> 12;
+ pte->type = 3;
+ } else {
+ pa = pte->pa << 12;
+ }
+ }
+
+ l3pte = (armv8_4k_pg_pte_t *)pte;
+ l3pte->af = 1;
+}
+
+void el1_handle_exception(uint64_t ec, uint64_t iss, uint64_t addr)
+{
+ armv8_data_abort_iss_t dai = {.raw = iss};
+ switch (ec) {
+ case 0x17: /* SMC from aarch64 */
+ switch (iss) {
+ /*
+ case SMC_YIELD:
+ DEBUG_MSG("took an SMC(SMC_YIELD) exception\n");
+ monitor_switch();
+ break;
+ case SMC_DISPATCH_MONITOR:
+ DEBUG_MSG("took an SMC(SMC_DSPATCH_MONITOR) exception\n");
+ el1_dispatch(NULL);
+ break;
+ case SMC_NOOP:
+ DEBUG_MSG("took an SMC(SMC_NOOP) exception\n");
+ break;
+ */
+ default:
+ printf("Unrecognized AArch64 SMC opcode: iss = %d\n", iss);
+ }
+ break;
+ case 0x24:
+ printf("Data abort (%s) at lower level: address = %0lx\n",
+ dai.wnr ? "write" : "read", addr);
+ break;
+ case 0x25:
+ printf("Data abort (%s) at current level (EL3): address = %0lx\n",
+ dai.wnr ? "write" : "read", addr);
+ el1_map_va(addr);
+ break;
+
+ default:
+ printf("Unhandled EL3 exception: EC = %d ISS = %d\n", ec, iss);
+ break;
+ }
+}
+
+void el1_start()
+{
+ printf("Entered el1_start\n");
+ return;
+}
diff --git a/aarch64/el1_exception.S b/aarch64/el1_exception.S
new file mode 100644
index 0000000..87ccbc5
--- /dev/null
+++ b/aarch64/el1_exception.S
@@ -0,0 +1,33 @@
+.section .vectors
+.align 12 // Align to vector table size (0x800)
+.globl el1_vectors
+el1_vectors:
+.word 0 // Add padding to force the below alignment
+.align 9 // Force these vectors to 0x400 alignment
+el1_sync_exception_current:
+ mrs x0, esr_el1
+ mov x1, #0xffffff
+ and x1, x1, x0
+ lsr x0, x0, #26
+ mrs x2, far_el1
+ bl el1_handle_exception
+ eret
+.align 10 // Force these vectors to 0x400 alignment
+el1_sync_exception_lower64:
+ mrs x0, esr_el1
+ mov x1, #0xffffff
+ and x1, x1, x0
+ lsr x0, x0, #26
+ bl el1_handle_exception
+ eret
+.align 7
+el1_serr_exception:
+ b el1_serr_exception
+.align 7
+el1_irq_exception:
+ b el1_irq_exception
+.align 7
+el1_fiq_exception:
+ b el1_fiq_exception
+
+.end
diff --git a/aarch64/el1_loader.h b/aarch64/el1_loader.h
new file mode 100644
index 0000000..4bf0bc5
--- /dev/null
+++ b/aarch64/el1_loader.h
@@ -0,0 +1,13 @@
+#ifndef _EL1_S_LOADER_H
+#define _EL1_S_LOADER_H
+
+extern uintptr_t _EL1_S_TEXT_BASE;
+uintptr_t EL1_S_TEXT_BASE = (uintptr_t)&_EL1_S_TEXT_BASE;
+extern uintptr_t _EL1_S_DATA_BASE;
+uintptr_t EL1_S_DATA_BASE = (uintptr_t)&_EL1_S_DATA_BASE;
+extern uintptr_t _EL1_S_TEXT_SIZE;
+uint64_t EL1_S_TEXT_SIZE = (uint64_t)&_EL1_S_TEXT_SIZE;
+extern uintptr_t _EL1_S_DATA_SIZE;
+uint64_t EL1_S_DATA_SIZE = (uint64_t)&_EL1_S_DATA_SIZE;
+
+#endif
diff --git a/aarch64/el1_sec_init.S b/aarch64/el1_sec_init.S
index 29172fe..e59cf2b 100644
--- a/aarch64/el1_sec_init.S
+++ b/aarch64/el1_sec_init.S
@@ -1,127 +1,150 @@
#include "common_defs.h"
-#include "platform.h"
-
-.section .vectors
-secure_vectors:
- .word 0 /* reset */
- b secure_undef_vec /* undef */
- b secure_svc_vec /* svc */
- b secure_pabort_vec /* pabt */
- b secure_dabort_vec /* dabt */
- .word 0 /* hmc */
- .word 0 /* irq */
- .word 0 /* fiq */
-
-secure_undef_vec:
-// ldr x10, =secure_undef_handler
-// blr x10
- eret
-
-secure_pabort_vec:
-// bl secure_pabort_handler
-// b end
-
-secure_dabort_vec:
-// bl secure_dabort_handler
-// b end
-
-secure_svc_vec:
- /* Check if this is a return from USR mode and pop the return address off
- * the stack. If so, we got here through the dispatch mechanism that
- * pushed the return on the stack which should be secure svc loop. For
- * this reason we want to return to SVC mode and not a return from
- * exception.
- * Otherwise, route handling to the secure svc_handler. This is the case
- * where we came from secure usr mode.
- */
- cmp x0, #SVC_RETURN_FROM_SECURE_USR
- bne 1f
- mov x0, x1
- ret
-1:
-// ldr x10, =secure_svc_handler
-// blr x10
- eret
-
-.section .init
-secure_init:
- /* We enter with R0 pointing to the nonsecure entry point. Put it in R11
- * for now to avoid overwriting it on calls. */
- mov x11, x0
- /* Disable interrupts for now */
- mrs x10, daif
- orr x10, x10, #0xc0 /* Mask IRQ and FIQ */
- msr daif, x10
+#define PT_BASE EL1_S_PGTBL_BASE
- /* Setup the secure EL1 vectors
- */
- ldr x10, =secure_vectors
+.section .init
+.align 12
+.global el1_init
+/* el1_init() */
+el1_init:
+ /* Set-up the EL1 vbar */
+ ldr x10, =el1_vectors
msr vbar_el1, x10
- isb
-
-secure_stack_init:
- /* Set-up the secure SVC stack */
- ldr x10, =SEC_STACK_BASE
+ /* The stack still needs to be allocated and mapped so we set up a
+ * temporary stack for the time being.
+ */
+ ldr x10, =RAM_BASE+0x2000
mov sp, x10
- smc #SMC_YIELD
+ /* Enable floating point register usage as printf uses it */
+ mrs x10, cpacr_el1
+ orr x10, x10, #3<<20 /* Set CPACR.FPEN to enable FP */
+ msr cpacr_el1, x10
-secure_mmu_init:
+el1_init_mmu:
/* Disable data and instruction caches */
- mrs x10, sctlr_el3
+ mrs x10, sctlr_el1
bic x10, x10, #0x0004
bic x10, x10, #0x1000
- msr sctlr_el3, x10
-
- /* Set-up the initial secure page tables */
-// bl secure_pagetable_init
-
- /* Set TTBR0 to the initialized address plus enable shareable write-back
- * write-allocate.
+ msr sctlr_el1, x10
+
+ ldr x10, =PT_BASE /* Base of L0 page table */
+ lsr x11, x10, #TnSZ /* Shift the invalid bits out */
+ lsl x11, x11, #TnSZ
+ msr ttbr0_el1, x11
+ mov x11, #TnSZ
+ msr tcr_el1, x11 /* PS = 32bit, TG0 = 4k, TnSZ */
+
+ /* Use the top of the stack to track our PA pool pointer */
+ ldr x10, =PT_BASE+0x1000
+ str x10, [sp]
+
+el1_map_init:
+ /* Direct map the init code */
+ ldr x0, =_EL1_S_INIT_BASE
+ ldr x1, =_EL1_S_INIT_BASE
+ ldr x2, =_EL1_S_INIT_SIZE
+ bl map_va_to_pa_range
+
+el1_map_flash:
+ /* Direct map the EL1 flash sections so we can copy from them once
+ * the MMU has been enabled.
*/
- ldr x10, =SEC_PGTBL_BASE
- orr x10, x10, #0x8
- orr x10, x10, #0x2
- orr x10, x10, #0x1
- msr ttbr0_el3, x10
-
- /* Enable the mmu */
-// tlbi alle3
- mrs x10, sctlr_el3
- orr x10, x10, #0x1
- msr sctlr_el3, x10
+ ldr x0, =_EL1_S_FLASH_TEXT
+ ldr x1, =_EL1_S_FLASH_TEXT
+ ldr x2, =_EL1_S_TEXT_SIZE
+ bl map_va_to_pa_range
+
+ ldr x0, =_EL1_S_FLASH_DATA
+ ldr x1, =_EL1_S_FLASH_DATA
+ ldr x2, =_EL1_S_DATA_SIZE
+ bl map_va_to_pa_range
+
+el1_map_text:
+ /* Map the EL1 text address range */
+ ldr x0, =_EL1_S_TEXT_BASE
+ ldr x1, =_EL1_S_TEXT_SIZE
+ bl map_va_range
+
+el1_map_data:
+ /* Map the EL1 data address range */
+ ldr x0, =_EL1_S_DATA_BASE
+ ldr x1, =_EL1_S_DATA_SIZE
+ bl map_va_range
+
+el1_map_stack:
+ /* Map the first page of the stack so we can get off the ground */
+ ldr x0, =EL1_S_STACK_BASE-0x1000
+ bl map_va
+
+el1_map_pt:
+ /* Direct map the page table pool */
+ ldr x0, =EL1_S_PGTBL_BASE
+ ldr x1, =EL1_S_PGTBL_BASE
+ ldr x2, =EL1_S_PGTBL_SIZE
+ bl map_va_to_pa_range
+
+el1_map_uart:
+ ldr x0, =UART0_BASE
+ ldr x1, =UART0_BASE
+ bl map_va_to_pa
+
+save_last_pa:
+ ldr x17, =RAM_BASE+0x2000
+ ldr x17, [x17]
+
+/* We should have all the critical address regions mapped at this point.
+ * Anything that has not already been mapped will be handled on demand. If it
+ * needs to be copied out of flash, it needs to be mapped.
+ * The mapped regions are:
+ * - EL1 init code - So we can keep executing after the MMU is enabled
+ * - EL1 flash region - So we can copy the code and data
+ * - EL1 code and text segments - Need to be copied from flash
+ * - EL1 initial stack page - Needed by the exception handler
+ * - EL1 PT PA pages - Needed so we don't fault on demand paging
+ * - UART - So we can use it to print
+ */
+el1_enable_mmu:
+ mrs x10, sctlr_el1
+ orr x10, x10, #0x1 // Enable MMU
+ msr sctlr_el1, x10
isb
dsb sy
-check_secure:
- /* Check that are entry state makes sense before initializing the monitor
- * mode.
- */
-// bl secure_check_init
-
-secure_init_monitor:
- /* Call monitor mode initialization and pass to it the non-secure execution
- * entry point.
- */
- mov x0, x11
-// bl monitor_init
-
- /* Once monitor mode is set-up, we yield to non-secure execution. The
- * non-secure entrypoint was established in monitor init.
- */
- mov x0, #SMC_YIELD
- smc #1
-
- /* The first return to the secure world will set us off into our
- * secure-side monitor loop. The only way out is to issue an EXIT SMC call
- * to the secure world.
- */
-// bl secure_svc_loop
-
- /* If we get here we are on the way out, poweroff the device */
-end:
-// b secure_shutdown
-b end
-
+/* The EL1 address space is set-up and the MMU is started so it is safe to copy
+ * the text and data sections in.
+ */
+el1_copy_text:
+ ldr x0, =_EL1_S_TEXT_BASE
+ ldr x1, =_EL1_S_FLASH_TEXT
+ ldr x2, =_EL1_S_TEXT_SIZE
+ bl memcpy
+
+el1_copy_data:
+ ldr x0, =_EL1_S_DATA_BASE
+ ldr x1, =_EL1_S_FLASH_DATA
+ ldr x2, =_EL1_S_DATA_SIZE
+ bl memcpy
+
+/* Now that the MMU is enabled and the inital stack page is mapped we can
+ * safely set the stack pointer.
+ */
+el1_init_stack:
+ ldr x10, =EL1_S_STACK_BASE
+ mov sp, x10
+
+/* Migrate the next PA to the non-init code */
+ ldr x10, =el1_next_pa
+ str x17, [x10]
+
+el1_init_monitor:
+// ldr x2, =el1_start
+ b el1_start
+
+/* We should never get here */
+el1_init_end:
+ b el1_init_end
+
+#include "init_util.S"
+.end
diff --git a/aarch64/el3_exception.S b/aarch64/el3_exception.S
index 2ee09cf..80388c0 100644
--- a/aarch64/el3_exception.S
+++ b/aarch64/el3_exception.S
@@ -1,5 +1,5 @@
-.align 11 // Align to vector table size (0x800)
.section .vectors
+.align 12 // Align to vector table size (0x800)
.globl el3_vectors
el3_vectors:
.word 0 // Add padding to force the below alignment
diff --git a/aarch64/el3_init.S b/aarch64/el3_init.S
index cb7fc65..f7e3cad 100644
--- a/aarch64/el3_init.S
+++ b/aarch64/el3_init.S
@@ -2,10 +2,8 @@
#define PT_BASE EL3_PGTBL_BASE
-#include "init_util.S"
-
-.align 12
.section .init
+.align 12
.global el3_init
/* el3_init(sec_elr, nsec_elr) */
el3_init:
@@ -25,7 +23,7 @@ el3_init:
/* Enable floating point register usage as printf uses it */
mrs x10, cptr_el3
- and x10, x10, #~0x400 /* CPTR.TFP */
+ and x10, x10, #~0x400 /* Clear CPTR.TFP to enable FP */
msr cptr_el3, x10
el3_init_mmu:
@@ -153,4 +151,6 @@ el3_init_monitor:
el3_init_end:
b el3_init_end
+#include "init_util.S"
+
.end
diff --git a/platform/virt/platform.h b/platform/virt/platform.h
index b7b9191..43d6a04 100644
--- a/platform/virt/platform.h
+++ b/platform/virt/platform.h
@@ -18,7 +18,7 @@
#define EL3_RAM_BASE RAM_BASE
#define EL3_RAM_SIZE (512*1024)
#define EL1_S_RAM_BASE (RAM_BASE+0x1000000)
-#define EL1_S_RAM_SIZE ((RAM_SIZE>>1)-EL3_RAM_SIZE)
+#define EL1_S_RAM_SIZE (512*1024)
#define EL1_NS_RAM_BASE (RAM_BASE+0x8000000)
#define EL1_NS_RAM_SIZE (RAM_SIZE/2)
#define VA_SIZE 48