aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGreg Bellows <greg.bellows@linaro.org>2015-03-03 13:43:02 -0600
committerGreg Bellows <greg.bellows@linaro.org>2015-03-03 13:43:02 -0600
commit03b3a6961248a0d42b3463e60d81b994358e37bd (patch)
tree0427ecd76bdcd1b5a20201184b71a775aecdf734
parente9bc256b7fafccd92ef48209285af41c22de73da (diff)
Add unmapping of init code
Signed-off-by: Greg Bellows <greg.bellows@linaro.org>
-rw-r--r--aarch64/common/init_util.S48
-rw-r--r--aarch64/common/smc.h2
-rw-r--r--aarch64/el1_common/el1.c49
-rw-r--r--aarch64/el1_common/el1_init.S39
-rw-r--r--aarch64/el1_s/el1_loader.h6
-rw-r--r--aarch64/el3/el3.c28
-rw-r--r--aarch64/el3/el3_init.S21
-rw-r--r--aarch64/el3/el3_monitor_asm.S2
8 files changed, 167 insertions, 28 deletions
diff --git a/aarch64/common/init_util.S b/aarch64/common/init_util.S
index 3942ccd..0f9e393 100644
--- a/aarch64/common/init_util.S
+++ b/aarch64/common/init_util.S
@@ -49,6 +49,37 @@ map_done:
ldp x30, x10, [sp], #16
ret
+.globl unmap_va
+/* unmap_va(VA) */
+unmap_va:
+ stp x30, x10, [sp, #-16]!
+ stp x11, x12, [sp, #-16]!
+ stp x13, x14, [sp, #-16]!
+ ldr x12, =PT_BASE
+ mov x13, #0x4
+ mov x14, #39
+unmap_loop:
+ and x11, x12, #~0xFFF /* Strip off descriptor non-address bits */
+ lsr x10, x0, x14 /* Shift out VA bits for the level */
+ sub x14, x14, #9 /* Update shift amount for next level */
+ and x10, x10, #0x1FF /* Filter top VA bits for PT offset */
+ lsl x10, x10, #3 /* Shift PT offset to bytes */
+ orr x10, x10, x11 /* Compute descriptor address */
+ sub x13, x13, #1 /* Decrease level */
+ cbz x13, unmap_page /* If we reached level 0 then finalize */
+ ldr x12, [x10] /* Otherwise, fetch the descriptor */
+ and x11, x12, #0x1 /* Filter valid bit */
+ cbz x11, unmap_done /* Assume an invalid PT page means done */
+ b unmap_loop /* Next level */
+unmap_page:
+ mov x12, #0 /* Clear the page PTE */
+ str x12, [x10] /* Fill in PT entry */
+unmap_done:
+ ldp x13, x14, [sp], #16
+ ldp x11, x12, [sp], #16
+ ldp x30, x10, [sp], #16
+ ret
+
.globl map_va
/* map_va(VA, pgprop) */
map_va:
@@ -125,6 +156,23 @@ map_va_done:
ldr x30, [sp], #8
ret
+/* unmap_va_range(VA, len) */
+unmap_va_range:
+ str x30, [sp, #-8]!
+ stp x0, x1, [sp, #-16]!
+ add x1, x1, #0xFFF
+ and x1, x1, #~0xFFF
+unmap_va_loop:
+ cbz x1, unmap_va_done
+ bl unmap_va
+ add x0, x0, #0x1000
+ sub x1, x1, #0x1000
+ b unmap_va_loop
+unmap_va_done:
+ ldp x0, x1, [sp], #16
+ ldr x30, [sp], #8
+ ret
+
/* memcpy(dest, src) */
memcpy:
cbz x2, memcpy_done
diff --git a/aarch64/common/smc.h b/aarch64/common/smc.h
index bcc8b26..d4a293c 100644
--- a/aarch64/common/smc.h
+++ b/aarch64/common/smc.h
@@ -9,6 +9,7 @@
#define SMC_ALLOCATE_SECURE_MEMORY 4
#define SMC_EXIT 5
+#ifndef __ASSEMBLY__
typedef struct {
uint32_t (*func)(uint32_t);
uint32_t arg;
@@ -20,5 +21,6 @@ typedef struct {
tztest_dispatch_t dispatch;
};
} tztest_smc_desc_t;
+#endif
#endif
diff --git a/aarch64/el1_common/el1.c b/aarch64/el1_common/el1.c
index 8f696ce..aee6857 100644
--- a/aarch64/el1_common/el1.c
+++ b/aarch64/el1_common/el1.c
@@ -2,9 +2,9 @@
#include "platform.h"
#include "smc.h"
#include "svc.h"
-#include "el1_loader.h"
#include "string.h"
#include "el1.h"
+#include "el1_loader.h"
#include "armv8_exception.h"
#include "armv8_vmsa.h"
#include "arm_builtins.h"
@@ -18,7 +18,7 @@ uint64_t el1_allocate_pa() {
void el1_map_va(uintptr_t addr)
{
- uint64_t pa = EL1_S_PGTBL_BASE;
+ uint64_t pa = EL1_PGTBL_BASE;
uint32_t i;
uint64_t *pte;
@@ -40,11 +40,41 @@ void el1_map_va(uintptr_t addr)
*pte |= PTE_ACCESS;
}
+int el1_unmap_va(uint64_t addr)
+{
+ uint64_t pa = EL1_PGTBL_BASE;
+ uint32_t i;
+ uint64_t *pte;
+
+ for (i = 0; i < 4; i++) {
+ /* Each successive level uses the next lower 9 VA bits in a 48-bit
+ * address, hence the i*9.
+ */
+ uint64_t off = ((addr >> (39-(i*9))) & 0x1FF) << 3;
+ pte = (uint64_t *)(pa | off);
+ if (!(*pte & 0x1)) {
+ /* This is not a valid page, return an error */
+ return -1;
+ } else {
+ pa = *pte & 0x000FFFFFF000;
+ }
+ }
+
+ /* Clear the page descriptor */
+ *pte = 0;
+
+ return 0;
+}
+
void el1_handle_exception(uint64_t ec, uint64_t iss, uint64_t addr)
{
armv8_data_abort_iss_t dai = {.raw = iss};
// armv8_inst_abort_iss_t iai = {.raw = iss};
switch (ec) {
+ case EC_SVC32:
+ case EC_SVC64:
+ printf("Took and SVC exception\n");
+ break;
case EC_IABORT_LOWER:
printf("Instruction abort at lower level: address = %0lx\n",
addr);
@@ -63,16 +93,27 @@ void el1_handle_exception(uint64_t ec, uint64_t iss, uint64_t addr)
dai.wnr ? "write" : "read", addr);
el1_map_va(addr);
break;
-
default:
printf("Unhandled EL3 exception: EC = %d ISS = %d\n", ec, iss);
break;
}
}
-void el1_start()
+void el1_start(uint64_t base, uint64_t size)
{
int i = 0;
+ uint64_t addr = base;
+ size_t len;
+
+ /* Unmap the init segement so we don't accidentally use it */
+ for (len = 0; len < ((size + 0xFFF) & ~0xFFF);
+ len += 0x1000, addr += 0x1000) {
+ if (el1_unmap_va(addr)) {
+ printf("Failed to unmap va 0x%x\n", addr);
+ } else {
+ printf("Unmapped va 0x%x\n", addr);
+ }
+ }
while (1) {
printf("%d: Entered %s el1_start\n", i, SECURE_STATE);
diff --git a/aarch64/el1_common/el1_init.S b/aarch64/el1_common/el1_init.S
index b68550f..ee44d4e 100644
--- a/aarch64/el1_common/el1_init.S
+++ b/aarch64/el1_common/el1_init.S
@@ -92,6 +92,7 @@ el1_map_uart:
mov x1, #(PTE_PAGE|PTE_ACCESS|PTE_PRIV_RW)
bl map_pa
+/* Save the next PA pointer until after we have enabled the MMU */
save_last_pa:
ldr x17, =RAM_BASE+0x2000
ldr x17, [x17]
@@ -114,8 +115,19 @@ el1_enable_mmu:
isb
dsb sy
+/* Now that the MMU is enabled and the inital stack page is mapped we can
+ * safely set the stack pointer.
+ */
+el1_init_stack:
+ ldr x10, =EL1_STACK_BASE
+ mov sp, x10
+
+/* Migrate the next PA to the non-init code */
+ ldr x10, =el1_next_pa
+ str x17, [x10]
+
/* The EL1 address space is set-up and the MMU is started so it is safe to copy
- * the text and data sections in.
+ * the text and data sections in and unmap the respective flash.
*/
el1_copy_text:
ldr x0, =_EL1_TEXT_BASE
@@ -123,24 +135,27 @@ el1_copy_text:
ldr x2, =_EL1_TEXT_SIZE
bl memcpy
+ ldr x0, =_EL1_FLASH_TEXT
+ ldr x1, =_EL1_TEXT_SIZE
+ bl unmap_va_range
+
el1_copy_data:
ldr x0, =_EL1_DATA_BASE
ldr x1, =_EL1_FLASH_DATA
ldr x2, =_EL1_DATA_SIZE
bl memcpy
-/* Now that the MMU is enabled and the inital stack page is mapped we can
- * safely set the stack pointer.
- */
-el1_init_stack:
- ldr x10, =EL1_STACK_BASE
- mov sp, x10
-
-/* Migrate the next PA to the non-init code */
- ldr x10, =el1_next_pa
- str x17, [x10]
+ ldr x0, =_EL1_FLASH_DATA
+ ldr x1, =_EL1_DATA_SIZE
+ bl unmap_va_range
-el1_init_monitor:
+el1_init_start:
+ /* Pass the address and size of the init section to start so it
+ * can unmap it. We must wait until we are in start because we are still
+ * running out of the init segment.
+ */
+ ldr x0, =_EL1_INIT_BASE
+ ldr x1, =_EL1_INIT_SIZE
b el1_start
/* We should never get here */
diff --git a/aarch64/el1_s/el1_loader.h b/aarch64/el1_s/el1_loader.h
index 4bf0bc5..10d283d 100644
--- a/aarch64/el1_s/el1_loader.h
+++ b/aarch64/el1_s/el1_loader.h
@@ -1,6 +1,12 @@
#ifndef _EL1_S_LOADER_H
#define _EL1_S_LOADER_H
+extern uintptr_t _EL1_S_INIT_BASE;
+uintptr_t EL1_S_INIT_BASE = (uintptr_t)&_EL1_S_INIT_BASE;
+extern uintptr_t _EL1_S_INIT_SIZE;
+uintptr_t EL1_S_INIT_SIZE = (uintptr_t)&_EL1_S_INIT_SIZE;
+extern uintptr_t _EL1_S_FLASH_TEXT;
+uintptr_t EL1_S_FLASH_TEXT = (uintptr_t)&_EL1_S_FLASH_TEXT;
extern uintptr_t _EL1_S_TEXT_BASE;
uintptr_t EL1_S_TEXT_BASE = (uintptr_t)&_EL1_S_TEXT_BASE;
extern uintptr_t _EL1_S_DATA_BASE;
diff --git a/aarch64/el3/el3.c b/aarch64/el3/el3.c
index cf3b38b..7b5ec36 100644
--- a/aarch64/el3/el3.c
+++ b/aarch64/el3/el3.c
@@ -54,6 +54,32 @@ void el3_map_va(uintptr_t addr)
*pte |= PTE_ACCESS;
}
+int el3_unmap_va(uint64_t addr)
+{
+ uint64_t pa = EL3_PGTBL_BASE;
+ uint32_t i;
+ uint64_t *pte;
+
+ for (i = 0; i < 4; i++) {
+ /* Each successive level uses the next lower 9 VA bits in a 48-bit
+ * address, hence the i*9.
+ */
+ uint64_t off = ((addr >> (39-(i*9))) & 0x1FF) << 3;
+ pte = (uint64_t *)(pa | off);
+ if (!(*pte & 0x1)) {
+ /* This is not a valid page, return an error */
+ return -1;
+ } else {
+ pa = *pte & 0x000FFFFFF000;
+ }
+ }
+
+ /* Clear the page descriptor */
+ *pte = 0;
+
+ return 0;
+}
+
void el3_handle_exception(uint64_t ec, uint64_t iss, uint64_t addr)
{
armv8_data_abort_iss_t dai = {.raw = iss};
@@ -90,5 +116,3 @@ void el3_handle_exception(uint64_t ec, uint64_t iss, uint64_t addr)
break;
}
}
-
-
diff --git a/aarch64/el3/el3_init.S b/aarch64/el3/el3_init.S
index bb17582..3908277 100644
--- a/aarch64/el3/el3_init.S
+++ b/aarch64/el3/el3_init.S
@@ -122,6 +122,17 @@ el3_enable_mmu:
isb
dsb sy
+/* Now that the MMU is enabled and the inital stack page is mapped we can
+ * safely set the stack pointer.
+ */
+el3_init_stack:
+ ldr x10, =EL3_STACK_BASE
+ mov sp, x10
+
+/* Migrate the next PA to the non-init code */
+ ldr x10, =el3_next_pa
+ str x17, [x10]
+
/* The EL3 address space is set-up and the MMU is started so it is safe to copy
* the text and data sections in.
*/
@@ -137,16 +148,6 @@ el3_copy_data:
ldr x2, =_EL3_DATA_SIZE
bl memcpy
-/* Now that the MMU is enabled and the inital stack page is mapped we can
- * safely set the stack pointer.
- */
-el3_init_stack:
- mov sp, EL3_STACK_BASE
-
-/* Migrate the next PA to the non-init code */
- ldr x10, =el3_next_pa
- str x17, [x10]
-
el3_init_monitor:
mov x0, x15
mov x1, x16
diff --git a/aarch64/el3/el3_monitor_asm.S b/aarch64/el3/el3_monitor_asm.S
index 62df320..c5301d4 100644
--- a/aarch64/el3/el3_monitor_asm.S
+++ b/aarch64/el3/el3_monitor_asm.S
@@ -1,4 +1,6 @@
+#define __ASSEMBLY__
#include "platform.h"
+#include "smc.h"
.section .text
.globl monitor_init