aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJens Wiklander <jens.wiklander@linaro.org>2015-01-02 22:49:57 +0100
committerJens Wiklander <jens.wiklander@linaro.org>2015-02-23 13:27:35 +0100
commit0eff3e9bf016399fce96b1f495b4e9605d6e5bdc (patch)
tree7bb61879534b68d7d4d575e28eaaf54428447883
parent94122a3537298dd008fdb4e85e45ae2d9b15c454 (diff)
arm32: Adds LPAE support
Enables use of Large Physical Address Extension (LPAE) with CFG_WITH_LPAE=y To facilitate both the old V7 and the new LPAE translation tables an abstraction layer has been added to avoid direct manipulations of the translation tables. Both V7 and LPAE implementation uses TEX remapping (SCTLR.TRE = 1) and enables access flags (SCTLR.AFE = 1). This is a change in the V7 code base as those bits where 0 before. There are two reason for this change: 1."From the introduction of the Large Physical Address Extension, ARM deprecates any use of the AP[2:0] scheme for defining MMU access permissions. This deprecation applies to software for all ARMv7-A implementations, regardless of whether they include the Large Physical Address Extension." 2. With SCTLR.TRE = 1 and SCTLR.AFE = 1 V7 and LPAE implementations uses the same way of specifying access permissions and memory attributes. Currently only supported on plat-vexpress. Bugfix v7 config: sets NOS bit in TTBRx registers Signed-off-by: Jens Wiklander <jens.wiklander@linaro.org> Tested-by: Jens Wiklander <jens.wiklander@linaro.org> (QEMU, FVP Base model) Reviewed-by: Pascal Brand <pascal.brand@linaro.org> Tested-by: Pascal Brand <pascal.brand@linaro.org> (STM platform)
-rw-r--r--core/arch/arm32/include/arm32.h157
-rw-r--r--core/arch/arm32/include/mm/core_mmu.h210
-rw-r--r--core/arch/arm32/include/mm/tee_mm_def.h33
-rw-r--r--core/arch/arm32/include/mm/tee_mmu_defs.h61
-rw-r--r--core/arch/arm32/include/mm/tee_pager.h14
-rw-r--r--core/arch/arm32/kernel/tee_ta_manager.c12
-rw-r--r--core/arch/arm32/kernel/thread.c8
-rw-r--r--core/arch/arm32/kernel/thread_private.h4
-rw-r--r--core/arch/arm32/mm/core_mmu.c320
-rw-r--r--core/arch/arm32/mm/core_mmu_lpae.c689
-rw-r--r--core/arch/arm32/mm/core_mmu_private.h (renamed from core/include/mm/tee_mmu_unpg.h)23
-rw-r--r--core/arch/arm32/mm/core_mmu_v7.c688
-rw-r--r--core/arch/arm32/mm/sub.mk6
-rw-r--r--core/arch/arm32/mm/tee_mm.c5
-rw-r--r--core/arch/arm32/mm/tee_mmu.c882
-rw-r--r--core/arch/arm32/mm/tee_mmu_unpg.c75
-rw-r--r--core/arch/arm32/mm/tee_pager.c191
-rw-r--r--core/arch/arm32/plat-stm/core_bootcfg.c8
-rw-r--r--core/arch/arm32/plat-stm/main.c11
-rw-r--r--core/arch/arm32/plat-stm/platform_config.h5
-rw-r--r--core/arch/arm32/plat-stm/tz_sinit.S2
-rw-r--r--core/arch/arm32/plat-sunxi/core_bootcfg.c16
-rw-r--r--core/arch/arm32/plat-sunxi/entry.S2
-rw-r--r--core/arch/arm32/plat-sunxi/main.c11
-rw-r--r--core/arch/arm32/plat-sunxi/platform_config.h7
-rw-r--r--core/arch/arm32/plat-vexpress/conf.mk1
-rw-r--r--core/arch/arm32/plat-vexpress/core_bootcfg.c35
-rw-r--r--core/arch/arm32/plat-vexpress/entry.S2
-rw-r--r--core/arch/arm32/plat-vexpress/main.c42
-rw-r--r--core/arch/arm32/plat-vexpress/platform_config.h34
-rw-r--r--core/include/kernel/tee_ta_manager_unpg.h2
-rw-r--r--core/include/mm/tee_mmu.h2
-rw-r--r--core/include/mm/tee_mmu_types.h53
-rwxr-xr-xscripts/setup_qemu_optee.sh2
34 files changed, 2527 insertions, 1086 deletions
diff --git a/core/arch/arm32/include/arm32.h b/core/arch/arm32/include/arm32.h
index a556451..1a37373 100644
--- a/core/arch/arm32/include/arm32.h
+++ b/core/arch/arm32/include/arm32.h
@@ -101,21 +101,68 @@
#define NSACR_NS_L2ERR (1 << 17)
#define NSACR_NS_SMP (1 << 18)
+#define CPACR_CP(co_proc, access) ((access) << ((co_proc) * 2))
+#define CPACR_CP_ACCESS_DENIED 0x0
+#define CPACR_CP_ACCESS_PL1_ONLY 0x1
+#define CPACR_CP_ACCESS_FULL 0x2
+
+
#define DACR_DOMAIN(num, perm) ((perm) << ((num) * 2))
#define DACR_DOMAIN_PERM_NO_ACCESS 0x0
#define DACR_DOMAIN_PERM_CLIENT 0x1
#define DACR_DOMAIN_PERM_MANAGER 0x3
+/*
+ * TTBCR has different register layout if LPAE is enabled or not.
+ * TTBCR.EAE == 0 => LPAE is not enabled
+ * TTBCR.EAE == 1 => LPAE is enabled
+ */
+#define TTBCR_EAE (1 << 31)
+
+/* When TTBCR.EAE == 0 */
#define TTBCR_PD0 (1 << 4)
#define TTBCR_PD1 (1 << 5)
-#define NSACR_CP10 (1 << 10)
-#define NSACR_CP11 (1 << 11)
-
-#define CPACR_CP(co_proc, access) ((access) << ((co_proc) * 2))
-#define CPACR_CP_ACCESS_DENIED 0x0
-#define CPACR_CP_ACCESS_PL1_ONLY 0x1
-#define CPACR_CP_ACCESS_FULL 0x2
+/* When TTBCR.EAE == 1 */
+#define TTBCR_T0SZ_SHIFT 0
+#define TTBCR_EPD0 (1 << 7)
+#define TTBCR_IRGN0_SHIFT 8
+#define TTBCR_ORGN0_SHIFT 10
+#define TTBCR_SH0_SHIFT 12
+#define TTBCR_T1SZ_SHIFT 16
+#define TTBCR_A1 (1 << 22)
+#define TTBCR_EPD1 (1 << 23)
+#define TTBCR_IRGN1_SHIFT 24
+#define TTBCR_ORGN1_SHIFT 26
+#define TTBCR_SH1_SHIFT 28
+
+/* Normal memory, Inner/Outer Non-cacheable */
+#define TTBCR_XRGNX_NC 0x0
+/* Normal memory, Inner/Outer Write-Back Write-Allocate Cacheable */
+#define TTBCR_XRGNX_WB 0x1
+/* Normal memory, Inner/Outer Write-Through Cacheable */
+#define TTBCR_XRGNX_WT 0x2
+/* Normal memory, Inner/Outer Write-Back no Write-Allocate Cacheable */
+#define TTBCR_XRGNX_WBWA 0x3
+
+/* Non-shareable */
+#define TTBCR_SHX_NSH 0x0
+/* Outer Shareable */
+#define TTBCR_SHX_OSH 0x2
+/* Inner Shareable */
+#define TTBCR_SHX_ISH 0x3
+
+#define TTBR_ASID_MASK 0xff
+#define TTBR_ASID_SHIFT 48
+
+
+#define FSR_LPAE (1 << 9)
+
+/* Valid if FSR.LPAE is 1 */
+#define FSR_STATUS_MASK ((1 << 6) - 1)
+
+/* Valid if FSR.LPAE is 0 */
+#define FSR_FS_MASK ((1 << 10) | ((1 << 3) - 1))
#ifndef ASM
static inline uint32_t read_mpidr(void)
@@ -172,6 +219,13 @@ static inline void write_ttbr0(uint32_t ttbr0)
);
}
+static inline void write_ttbr0_64bit(uint64_t ttbr0)
+{
+ asm volatile ("mcrr p15, 0, %Q[ttbr0], %R[ttbr0], c2"
+ : : [ttbr0] "r" (ttbr0)
+ );
+}
+
static inline uint32_t read_ttbr0(void)
{
uint32_t ttbr0;
@@ -183,26 +237,27 @@ static inline uint32_t read_ttbr0(void)
return ttbr0;
}
-static inline void write_ats1cpw(uint32_t va)
+static inline uint64_t read_ttbr0_64bit(void)
{
- asm volatile ("mcr p15, 0, %[va], c7, c8, 1"
- : : [va] "r" (va)
+ uint64_t ttbr0;
+
+ asm volatile ("mrrc p15, 0, %Q[ttbr0], %R[ttbr0], c2"
+ : [ttbr0] "=r" (ttbr0)
);
+
+ return ttbr0;
}
-static inline uint32_t read_par(void)
+static inline void write_ttbr1(uint32_t ttbr1)
{
- uint32_t par;
-
- asm volatile ("mrc p15, 0, %[par], c7, c4, 0"
- : [par] "=r" (par)
+ asm volatile ("mcr p15, 0, %[ttbr1], c2, c0, 1"
+ : : [ttbr1] "r" (ttbr1)
);
- return par;
}
-static inline void write_ttbr1(uint32_t ttbr1)
+static inline void write_ttbr1_64bit(uint64_t ttbr1)
{
- asm volatile ("mcr p15, 0, %[ttbr1], c2, c0, 1"
+ asm volatile ("mcrr p15, 1, %Q[ttbr1], %R[ttbr1], c2"
: : [ttbr1] "r" (ttbr1)
);
}
@@ -226,6 +281,17 @@ static inline void write_ttbcr(uint32_t ttbcr)
);
}
+static inline uint32_t read_ttbcr(void)
+{
+ uint32_t ttbcr;
+
+ asm volatile ("mrc p15, 0, %[ttbcr], c2, c0, 2"
+ : [ttbcr] "=r" (ttbcr)
+ );
+
+ return ttbcr;
+}
+
static inline void write_dacr(uint32_t dacr)
{
asm volatile ("mcr p15, 0, %[dacr], c3, c0, 0"
@@ -289,6 +355,61 @@ static inline void dsb(void)
asm volatile ("dsb");
}
+static inline void write_ats1cpw(uint32_t va)
+{
+ asm volatile ("mcr p15, 0, %[va], c7, c8, 1"
+ : : [va] "r" (va)
+ );
+}
+
+static inline uint32_t read_par(void)
+{
+ uint32_t par;
+
+ asm volatile ("mrc p15, 0, %[par], c7, c4, 0"
+ : [par] "=r" (par)
+ );
+ return par;
+}
+
+static inline void write_mair0(uint32_t mair0)
+{
+ asm volatile ("mcr p15, 0, %[mair0], c10, c2, 0"
+ : : [mair0] "r" (mair0)
+ );
+}
+
+static inline void write_prrr(uint32_t prrr)
+{
+ /*
+ * Same physical register as MAIR0.
+ *
+ * When an implementation includes the Large Physical Address
+ * Extension, and address translation is using the Long-descriptor
+ * translation table formats, MAIR0 replaces the PRRR
+ */
+ write_mair0(prrr);
+}
+
+static inline void write_mair1(uint32_t mair1)
+{
+ asm volatile ("mcr p15, 0, %[mair1], c10, c2, 1"
+ : : [mair1] "r" (mair1)
+ );
+}
+
+static inline void write_nmrr(uint32_t nmrr)
+{
+ /*
+ * Same physical register as MAIR1.
+ *
+ * When an implementation includes the Large Physical Address
+ * Extension, and address translation is using the Long-descriptor
+ * translation table formats, MAIR1 replaces the NMRR
+ */
+ write_mair1(nmrr);
+}
+
static inline uint32_t read_contextidr(void)
{
uint32_t contextidr;
diff --git a/core/arch/arm32/include/mm/core_mmu.h b/core/arch/arm32/include/mm/core_mmu.h
index ff1b3a1..0d21dd5 100644
--- a/core/arch/arm32/include/mm/core_mmu.h
+++ b/core/arch/arm32/include/mm/core_mmu.h
@@ -30,6 +30,37 @@
#include <types_ext.h>
#include <kernel/tee_common_unpg.h>
#include <mm/core_memprot.h>
+#include <mm/tee_mmu_types.h>
+
+#include <assert.h>
+
+/*
+ * PGDIR is the translation table above the translation table that holds
+ * the pages.
+ */
+#ifdef CFG_WITH_LPAE
+#define CORE_MMU_PGDIR_SHIFT 21
+#else
+#define CORE_MMU_PGDIR_SHIFT 20
+#endif
+
+/* Devices are mapped using this granularity */
+#define CORE_MMU_DEVICE_SHIFT CORE_MMU_PGDIR_SHIFT
+#define CORE_MMU_DEVICE_SIZE (1 << CORE_MMU_DEVICE_SHIFT)
+#define CORE_MMU_DEVICE_MASK (CORE_MMU_DEVICE_SIZE - 1)
+
+/* TA user space code, data, stack and heap are mapped using this granularity */
+#define CORE_MMU_USER_CODE_SHIFT CORE_MMU_PGDIR_SHIFT
+#define CORE_MMU_USER_CODE_SIZE (1 << CORE_MMU_USER_CODE_SHIFT)
+#define CORE_MMU_USER_CODE_MASK (CORE_MMU_USER_CODE_SIZE - 1)
+
+/* TA user space parameters are mapped using this granularity */
+#define CORE_MMU_USER_PARAM_SHIFT CORE_MMU_PGDIR_SHIFT
+#define CORE_MMU_USER_PARAM_SIZE (1 << CORE_MMU_USER_PARAM_SHIFT)
+#define CORE_MMU_USER_PARAM_MASK (CORE_MMU_USER_PARAM_SIZE - 1)
+
+/* The maximum VA for user space */
+#define CORE_MMU_USER_MAX_ADDR (32 * 1024 * 1024)
/*
* @type: enumerate: specifiy the purpose of the memory area.
@@ -81,10 +112,179 @@ enum teecore_memtypes {
extern unsigned long default_nsec_shm_paddr;
extern unsigned long default_nsec_shm_size;
-uint32_t core_map_area_flag(void *p, size_t l);
-void core_init_mmu_tables(void);
+void core_init_mmu_map(void);
void core_init_mmu_regs(void);
+
+#ifdef CFG_WITH_LPAE
+/*
+ * struct core_mmu_user_map - current user mapping register state
+ * @ttbr0: content of ttbr0
+ * @enabled: true if usage of ttbr0 is enabled
+ *
+ * Note that this struct should be treated as an opaque struct since
+ * the content depends on descriptor table format.
+ */
+struct core_mmu_user_map {
+ uint64_t ttbr0;
+ bool enabled;
+};
+#else
+/*
+ * struct core_mmu_user_map - current user mapping register state
+ * @ttbr0: content of ttbr0
+ * @ctxid: content of contextidr
+ *
+ * Note that this struct should be treated as an opaque struct since
+ * the content depends on descriptor table format.
+ */
+struct core_mmu_user_map {
+ uint32_t ttbr0;
+ uint32_t ctxid;
+};
+#endif
+
+/*
+ * enum core_mmu_fault - different kinds of faults
+ * @CORE_MMU_FAULT_ALIGNMENT: alignment fault
+ * @CORE_MMU_FAULT_DEBUG_EVENT: debug event
+ * @CORE_MMU_FAULT_TRANSLATION: translation fault
+ * @CORE_MMU_FAULT_ASYNC_EXTERNAL: asynchronous external abort
+ * @CORE_MMU_FAULT_OTHER: Other/unknown fault
+ */
+enum core_mmu_fault {
+ CORE_MMU_FAULT_ALIGNMENT,
+ CORE_MMU_FAULT_DEBUG_EVENT,
+ CORE_MMU_FAULT_TRANSLATION,
+ CORE_MMU_FAULT_PERMISSION,
+ CORE_MMU_FAULT_ASYNC_EXTERNAL,
+ CORE_MMU_FAULT_OTHER,
+};
+
+/*
+ * core_mmu_get_fault_type() - get fault type
+ * @fsr: Content of fault status register
+ * @returns an enum describing the content of fault status register.
+ */
+enum core_mmu_fault core_mmu_get_fault_type(uint32_t fsr);
+
+/*
+ * core_mmu_create_user_map() - Create user space mapping
+ * @mmu: Generic representation of user space mapping
+ * @asid: Address space identifier for this mapping
+ * @map: MMU configuration to use when activating this VA space
+ */
+void core_mmu_create_user_map(struct tee_mmu_info *mmu, uint32_t asid,
+ struct core_mmu_user_map *map);
+/*
+ * core_mmu_get_user_map() - Reads current MMU configuration for user VA space
+ * @map: MMU configuration for current user VA space.
+ */
+void core_mmu_get_user_map(struct core_mmu_user_map *map);
+
+/*
+ * core_mmu_set_user_map() - Set new MMU configuration for user VA space
+ * @map: If NULL will disable user VA space, if not NULL the user
+ * VA space to activate.
+ */
+void core_mmu_set_user_map(struct core_mmu_user_map *map);
+
+/*
+ * struct core_mmu_table_info - Properties for a translation table
+ * @table: Pointer to translation table
+ * @va_base: VA base address of the transaltion table
+ * @level: Translation table level
+ * @shift: The shift of each entry in the table
+ * @num_entries: Number of entries in this table.
+ */
+struct core_mmu_table_info {
+ void *table;
+ vaddr_t va_base;
+ unsigned level;
+ unsigned shift;
+ unsigned num_entries;
+};
+
+/*
+ * core_mmu_find_table() - Locates a translation table
+ * @va: Virtual address for the table to cover
+ * @max_level: Don't traverse beyond this level
+ * @tbl_info: Pointer to where to store properties.
+ * @return true if a translation table was found, false on error
+ */
+bool core_mmu_find_table(vaddr_t va, unsigned max_level,
+ struct core_mmu_table_info *tbl_info);
+
+/*
+ * core_mmu_set_entry() - Set entry in translation table
+ * @tbl_info: Translation table properties
+ * @idx: Index of entry to update
+ * @pa: Physical address to assign entry
+ * @attr: Attributes to assign entry
+ */
+void core_mmu_set_entry(struct core_mmu_table_info *tbl_info, unsigned idx,
+ paddr_t pa, uint32_t attr);
+
+/*
+ * core_mmu_get_entry() - Get entry from translation table
+ * @tbl_info: Translation table properties
+ * @idx: Index of entry to read
+ * @pa: Physical address is returned here if pa is not NULL
+ * @attr: Attributues are returned here if attr is not NULL
+ */
+void core_mmu_get_entry(struct core_mmu_table_info *tbl_info, unsigned idx,
+ paddr_t *pa, uint32_t *attr);
+
+/*
+ * core_mmu_va2idx() - Translate from virtual address to table index
+ * @tbl_info: Translation table properties
+ * @va: Virtual address to translate
+ * @returns index in transaltion table
+ */
+static inline unsigned core_mmu_va2idx(struct core_mmu_table_info *tbl_info,
+ vaddr_t va)
+{
+ return (va - tbl_info->va_base) >> tbl_info->shift;
+}
+
+/*
+ * core_mmu_idx2va() - Translate from table index to virtual address
+ * @tbl_info: Translation table properties
+ * @idx: Index to translate
+ * @returns Virtual address
+ */
+static inline vaddr_t core_mmu_idx2va(struct core_mmu_table_info *tbl_info,
+ unsigned idx)
+{
+ return (idx << tbl_info->shift) + tbl_info->va_base;
+}
+
+/*
+ * core_mmu_get_block_offset() - Get offset inside a block/page
+ * @tbl_info: Translation table properties
+ * @pa: Physical address
+ * @returns offset within one block of the translation table
+ */
+static inline size_t core_mmu_get_block_offset(
+ struct core_mmu_table_info *tbl_info, paddr_t pa)
+{
+ return pa & ((1 << tbl_info->shift) - 1);
+}
+
+/*
+ * core_mmu_user_mapping_is_active() - Report if user mapping is active
+ * @returns true if a user VA space is active, false if user VA space is
+ * inactive.
+ */
+bool core_mmu_user_mapping_is_active(void);
+
+/*
+ * core_mmu_mattr_is_ok() - Check that supplied mem attributes can be used
+ * @returns true if the attributes can be used, false if not.
+ */
+bool core_mmu_mattr_is_ok(uint32_t mattr);
+
+#ifndef CFG_WITH_LPAE
paddr_t core_mmu_get_main_ttb_pa(void);
vaddr_t core_mmu_get_main_ttb_va(void);
paddr_t core_mmu_get_ul1_ttb_pa(void);
@@ -98,10 +298,8 @@ vaddr_t core_mmu_get_ul1_ttb_va(void);
* decribed by @map.
* @returns NULL on failure or a pointer to the L2 table(s)
*/
-void *core_mmu_alloc_l2(struct map_area *map);
-
-int core_mmu_map(unsigned long paddr, size_t size, unsigned long flags);
-int core_mmu_unmap(unsigned long paddr, size_t size);
+void *core_mmu_alloc_l2(struct tee_mmap_region *mm);
+#endif
void core_mmu_get_mem_by_type(unsigned int type, unsigned int *s,
unsigned int *e);
diff --git a/core/arch/arm32/include/mm/tee_mm_def.h b/core/arch/arm32/include/mm/tee_mm_def.h
index 996e6ac..8b22e96 100644
--- a/core/arch/arm32/include/mm/tee_mm_def.h
+++ b/core/arch/arm32/include/mm/tee_mm_def.h
@@ -32,42 +32,9 @@
#define SMALL_PAGE_MASK 0x00000fff
#define SMALL_PAGE_SIZE 0x00001000
-#define SECTION_SHIFT 20
-#define SECTION_MASK 0x000fffff
-#define SECTION_SIZE 0x00100000
-
/* define section to load */
#define TEE_DDR_VLOFFSET 0x1
-/* Reset error code */
-#define TEE_RESET_INVALID_PAGE_ERROR 0xBADB7000
-
-/*
- * MMU related values
- */
-#define TEE_MMU_UL1_BASE core_mmu_get_ta_ul1_va()
-#define TEE_MMU_UL1_PA_BASE core_mmu_get_ta_ul1_pa()
-
-#define TEE_MMU_DEFAULT_ATTRS \
- (TEE_MMU_TTB_S | TEE_MMU_TTB_IRGN_WBWA | TEE_MMU_TTB_RNG_WBWA)
-
-/* Page attributes */
-
-/*
- * Small pages [31:12]PA, not Global, Sharable, Access Permission,
- * Memory region attribute [8:6], Access permissions [5:4],
- * C, B, Small page, Outer and Inner Write-Back, Write-Allocate
- */
-#define TEE_PAGER_PAGE_UNLOADED \
- (TEE_MMU_L2SP_SMALL_PAGE | TEE_MMU_L2SP_WBWA | TEE_MMU_L2SP_S)
-
-#define TEE_PAGER_PAGE_LOADED \
- (TEE_PAGER_PAGE_UNLOADED | TEE_MMU_L2SP_PRIV_ACC)
-
-#define TEE_PAGER_STACKS_ATTRIBUTES \
- (TEE_PAGER_PAGE_LOADED | TEE_MMU_L2SP_XN)
-
-#define TEE_PAGER_NO_ACCESS_ATTRIBUTES 0x00000000
/*
* Register addresses related to time
diff --git a/core/arch/arm32/include/mm/tee_mmu_defs.h b/core/arch/arm32/include/mm/tee_mmu_defs.h
index efa66e8..7d6645f 100644
--- a/core/arch/arm32/include/mm/tee_mmu_defs.h
+++ b/core/arch/arm32/include/mm/tee_mmu_defs.h
@@ -27,6 +27,7 @@
#ifndef TEE_MMU_DEFS_H
#define TEE_MMU_DEFS_H
+#ifndef CFG_WITH_LPAE
/* Defined to the smallest possible secondary L1 MMU table */
#define TEE_MMU_TTBCR_N_VALUE 7
@@ -35,18 +36,15 @@
#define TEE_MMU_UL1_SIZE (TEE_MMU_UL1_NUM_ENTRIES * sizeof(uint32_t))
#define TEE_MMU_UL1_ALIGNMENT TEE_MMU_UL1_SIZE
+#endif
/*
* kmap works in common mapping starting at virtual address just above the
* per CPU user mapping. kmap has 32 MiB of virtual address space.
*/
-#define TEE_MMU_KMAP_OFFS TEE_MMU_UL1_NUM_ENTRIES
-#define TEE_MMU_KMAP_NUM_ENTRIES 32
-#define TEE_MMU_KMAP_START_VA (TEE_MMU_UL1_NUM_ENTRIES << \
- SECTION_SHIFT)
-#define TEE_MMU_KMAP_END_VA ((TEE_MMU_UL1_NUM_ENTRIES + \
- TEE_MMU_KMAP_NUM_ENTRIES) << \
- SECTION_SHIFT)
+#define TEE_MMU_KMAP_START_VA (32 * 1024 * 1024)
+#define TEE_MMU_KMAP_END_VA (64 * 1024 * 1024)
+
#define TEE_MMU_L1_NUM_ENTRIES (TEE_MMU_L1_SIZE / 4)
#define TEE_MMU_L1_SIZE (1 << 14)
@@ -58,58 +56,9 @@
/* TTB attributes */
-/* Mask for all attributes */
-#define TEE_MMU_TTB_ATTR_MASK ((1 << 7) - 1)
/* TTB0 of TTBR0 (depends on TEE_MMU_TTBCR_N_VALUE) */
#define TEE_MMU_TTB_UL1_MASK (~(TEE_MMU_UL1_ALIGNMENT - 1))
/* TTB1 of TTBR1 */
#define TEE_MMU_TTB_L1_MASK (~(TEE_MMU_L1_ALIGNMENT - 1))
-
-/* Sharable */
-#define TEE_MMU_TTB_S (1 << 1)
-
-/* Not Outer Sharable */
-#define TEE_MMU_TTB_NOS (1 << 5)
-
-/* Normal memory, Inner Non-cacheable */
-#define TEE_MMU_TTB_IRGN_NC 0
-
-/* Normal memory, Inner Write-Back Write-Allocate Cacheable */
-#define TEE_MMU_TTB_IRGN_WBWA (1 << 6)
-
-/* Normal memory, Inner Write-Through Cacheable */
-#define TEE_MMU_TTB_IRGN_WT 1
-
-/* Normal memory, Inner Write-Back no Write-Allocate Cacheable */
-#define TEE_MMU_TTB_IRGN_WB (1 | (1 << 6))
-
-/* Normal memory, Outer Write-Back Write-Allocate Cacheable */
-#define TEE_MMU_TTB_RNG_WBWA (1 << 3)
-
-/*
- * Second-level descriptor Small page table Attributes
- */
-
-/* Small page */
-#define TEE_MMU_L2SP_SMALL_PAGE (1 << 1)
-
-/* Execute never */
-#define TEE_MMU_L2SP_XN 1
-
-/* Normal memory, Outer Write-Back Write-Allocate Cacheable */
-#define TEE_MMU_L2SP_WBWA ((1 << 6) | (1 << 3) | (1 << 2))
-
-/* Not global */
-#define TEE_MMU_L2SP_NG (1 << 11)
-
-/* Sharable */
-#define TEE_MMU_L2SP_S (1 << 10)
-
-/* Privileged access only */
-#define TEE_MMU_L2SP_PRIV_ACC (1 << 4)
-
-/* Clear access from attribute */
-#define TEE_MMU_L2SP_CLEAR_ACC(attr) ((attr) & ~((1 << 5) | (1 << 4)))
-
#endif /* TEE_MMU_DEFS_H */
diff --git a/core/arch/arm32/include/mm/tee_pager.h b/core/arch/arm32/include/mm/tee_pager.h
index daa720a..6b99804 100644
--- a/core/arch/arm32/include/mm/tee_pager.h
+++ b/core/arch/arm32/include/mm/tee_pager.h
@@ -67,20 +67,6 @@
bool tee_pager_add_area(tee_mm_entry_t *mm, uint32_t flags, const void *store,
const void *hashes);
-/*
- * tee_pager_init() - Initializes the pager
- * @xlat_table: Address of translation table mapping the region covered
- * by tee_mm_vcore
- * @mm: Memory region with paging activated, should be allocated
- * from tee_mm_vcore
- * @store: Address of backing store of the paged region
- * @hashes: Hashes for the pages in the backing store
- *
- * The pager will use tee_mm_vcore.lo as virtual base address for the
- * tranlation table.
- */
-void tee_pager_init(void *xlat_table);
-
void tee_pager_abort_handler(uint32_t abort_type,
struct thread_abort_regs *regs);
diff --git a/core/arch/arm32/kernel/tee_ta_manager.c b/core/arch/arm32/kernel/tee_ta_manager.c
index 2dadb11..c34aaff 100644
--- a/core/arch/arm32/kernel/tee_ta_manager.c
+++ b/core/arch/arm32/kernel/tee_ta_manager.c
@@ -319,7 +319,8 @@ static void tee_ta_init_heap(struct tee_ta_ctx *const ctx, size_t heap_size)
*/
/* XXX this function shouldn't know this mapping */
- heap_start_addr = ((TEE_DDR_VLOFFSET + 1) << SECTION_SHIFT) - heap_size;
+ heap_start_addr = ((TEE_DDR_VLOFFSET + 1) << CORE_MMU_USER_CODE_SHIFT) -
+ heap_size;
data = (uint32_t *)(tee_ta_get_exec(ctx) + ctx->head->ro_size +
(ctx->head->rel_dyn_got_size & TA_HEAD_GOT_MASK));
@@ -514,7 +515,8 @@ static TEE_Result tee_ta_load_user_ta(struct tee_ta_ctx *ctx,
ctx->mm_heap_stack = tee_mm_alloc(&tee_mm_sec_ddr,
*heap_size + ctx->stack_size);
if (!ctx->mm_heap_stack) {
- EMSG("Failed to allocate %u bytes\n", SECTION_SIZE);
+ EMSG("Failed to allocate %u bytes\n",
+ *heap_size + ctx->stack_size);
EMSG(" of memory for user heap and stack\n");
return TEE_ERROR_OUT_OF_MEMORY;
}
@@ -1493,10 +1495,10 @@ void tee_ta_set_current_session(struct tee_ta_session *sess)
tee_mmu_set_ctx(ctx);
}
/*
- * If sess == NULL we must have kernel mapping,
- * if sess != NULL we must not have kernel mapping.
+ * If sess == NULL we must not have user mapping active,
+ * if sess != NULL we must have have user mapping active.
*/
- assert((sess == NULL) == tee_mmu_is_kernel_mapping());
+ assert((sess == NULL) == !core_mmu_user_mapping_is_active());
}
TEE_Result tee_ta_get_client_id(TEE_Identity *id)
diff --git a/core/arch/arm32/kernel/thread.c b/core/arch/arm32/kernel/thread.c
index 35deca3..4968989 100644
--- a/core/arch/arm32/kernel/thread.c
+++ b/core/arch/arm32/kernel/thread.c
@@ -251,7 +251,7 @@ static void thread_resume_from_rpc(struct thread_smc_args *args)
l->curr_thread = n;
if (threads[n].have_user_map)
- tee_mmu_set_map(&threads[n].user_map);
+ core_mmu_set_user_map(&threads[n].user_map);
/*
* Return from RPC to request service of an IRQ must not
@@ -358,10 +358,10 @@ int thread_state_suspend(uint32_t flags, uint32_t cpsr, uint32_t pc)
threads[ct].regs.pc = pc;
threads[ct].state = THREAD_STATE_SUSPENDED;
- threads[ct].have_user_map = !tee_mmu_is_kernel_mapping();
+ threads[ct].have_user_map = core_mmu_user_mapping_is_active();
if (threads[ct].have_user_map) {
- tee_mmu_get_map(&threads[ct].user_map);
- tee_mmu_set_map(NULL);
+ core_mmu_get_user_map(&threads[ct].user_map);
+ core_mmu_set_user_map(NULL);
}
diff --git a/core/arch/arm32/kernel/thread_private.h b/core/arch/arm32/kernel/thread_private.h
index 91c1a19..25156b7 100644
--- a/core/arch/arm32/kernel/thread_private.h
+++ b/core/arch/arm32/kernel/thread_private.h
@@ -28,7 +28,7 @@
#ifndef THREAD_PRIVATE_H
#define THREAD_PRIVATE_H
-#include <mm/tee_mmu_unpg.h>
+#include <mm/core_mmu.h>
#include <kernel/vfp.h>
enum thread_state {
@@ -67,7 +67,7 @@ struct thread_ctx {
uint32_t hyp_clnt_id;
uint32_t flags;
struct thread_ctx_regs regs;
- struct tee_mmu_mapping user_map;
+ struct core_mmu_user_map user_map;
bool have_user_map;
};
diff --git a/core/arch/arm32/mm/core_mmu.c b/core/arch/arm32/mm/core_mmu.c
index 76b9d3c..1be671e 100644
--- a/core/arch/arm32/mm/core_mmu.c
+++ b/core/arch/arm32/mm/core_mmu.c
@@ -36,17 +36,14 @@
#include <assert.h>
#include <kernel/tz_proc.h>
#include <kernel/tz_ssvce.h>
-#include <kernel/thread.h>
-#include <arm32.h>
#include <mm/core_mmu.h>
-#include <mm/core_memprot.h>
-#include <mm/tee_mmu.h>
-#include <mm/tee_mmu_defs.h>
-#include <kernel/misc.h>
#include <trace.h>
#include <kernel/tee_misc.h>
#include <kernel/panic.h>
#include <util.h>
+#include "core_mmu_private.h"
+
+#define MAX_MMAP_REGIONS 10
/* Default NSec shared memory allocated from NSec world */
unsigned long default_nsec_shm_paddr;
@@ -100,183 +97,100 @@ static struct map_area *find_map_by_pa(unsigned long pa)
return NULL;
}
-/* armv7 memory mapping attributes: section mapping */
-#define SECTION_SECURE (0 << 19)
-#define SECTION_NOTSECURE (1 << 19)
-#define SECTION_SHARED (1 << 16)
-#define SECTION_NOTGLOBAL (1 << 17)
-#define SECTION_RW ((0 << 15) | (1 << 10))
-#define SECTION_RO ((1 << 15) | (1 << 10))
-#define SECTION_TEXCB(tex, c, b) ((tex << 12) | (c << 3) | (b << 2))
-#define SECTION_DEVICE SECTION_TEXCB(0, 0, 1)
-#define SECTION_NORMAL SECTION_TEXCB(1, 0, 0)
-#define SECTION_NORMAL_CACHED SECTION_TEXCB(1, 1, 1)
-#define SECTION_NO_EXEC (1 << 4)
-#define SECTION_SECTION (2 << 0)
-
-#define SECTION_PT_NOTSECURE (1 << 3)
-#define SECTION_PT_PT (1 << 0)
-
-#define SMALL_PAGE_SMALL_PAGE (1 << 1)
-#define SMALL_PAGE_SHARED (1 << 10)
-#define SMALL_PAGE_TEXCB(tex, c, b) ((tex << 6) | (c << 3) | (b << 2))
-#define SMALL_PAGE_DEVICE SMALL_PAGE_TEXCB(0, 0, 1)
-#define SMALL_PAGE_NORMAL SMALL_PAGE_TEXCB(1, 0, 0)
-#define SMALL_PAGE_NORMAL_CACHED SMALL_PAGE_TEXCB(1, 1, 1)
-#define SMALL_PAGE_RW ((0 << 9) | (1 << 4))
-#define SMALL_PAGE_RO ((1 << 9) | (1 << 4))
-#define SMALL_PAGE_NO_EXEC (1 << 0)
+static void insert_mmap(struct tee_mmap_region *mm, size_t max_elem,
+ struct tee_mmap_region *mme)
+{
+ size_t n;
+ for (n = 0; n < (max_elem - 1); n++) {
+ if (!mm[n].size) {
+ mm[n] = *mme;
+ return;
+ }
-/*
- * memarea_not_mapped - check memory not already (partially) mapped
- * A finer mapping must be supported. Currently section mapping only!
- */
-static bool memarea_not_mapped(struct map_area *map, void *ttbr0)
-{
- uint32_t m, n;
-
- m = (map->pa >> 20) * 4; /* assumes pa=va */
- n = map->size >> 20;
- while (n--) {
- if (*((uint32_t *)((uint32_t)ttbr0 + m)) != 0) {
- EMSG("m %d [0x%x] map->pa 0x%x map->size 0x%x",
- m, *((uint32_t *)((uint32_t)ttbr0 + m)),
- map->pa, map->size);
- return false;
+ if (core_is_buffer_intersect(mme->va, mme->size, mm[n].va,
+ mm[n].size)) {
+ vaddr_t end_va;
+
+ /* Check that the overlapping maps are compatible */
+ if (mme->attr != mm[n].attr ||
+ (mme->pa - mme->va) != (mm[n].pa - mm[n].va)) {
+ EMSG("Incompatible mmap regions");
+ panic();
+ }
+
+ /* Grow the current map */
+ end_va = MAX(mme->va + mme->size,
+ mm[n].va + mm[n].size);
+ mm[n].va = MIN(mme->va, mm[n].va);
+ mm[n].pa = MIN(mme->pa, mm[n].pa);
+ mm[n].size = end_va - mm[n].va;
+ return;
+ }
+
+ if (mme->va < mm[n].va) {
+ memmove(mm + n + 1, mm + n,
+ (max_elem - n - 1) * sizeof(*mm));
+ mm[n] = *mme;
+ /*
+ * Panics if the terminating element was
+ * overwritten.
+ */
+ if (mm[max_elem - 1].size)
+ break;
+ return;
}
- m += 4;
}
- return true;
+ EMSG("Too many mmap regions");
+ panic();
}
-static paddr_t map_page_memarea(struct map_area *map)
+static void core_mmu_mmap_init(struct tee_mmap_region *mm, size_t max_elem,
+ struct map_area *map)
{
- uint32_t *l2 = core_mmu_alloc_l2(map);
- size_t pg_idx;
- uint32_t attr;
-
- TEE_ASSERT(l2);
-
- attr = SMALL_PAGE_SMALL_PAGE | SMALL_PAGE_SHARED;
-
- if (map->device)
- attr |= SMALL_PAGE_DEVICE;
- else if (map->cached)
- attr |= SMALL_PAGE_NORMAL_CACHED;
- else
- attr |= SMALL_PAGE_NORMAL;
-
- if (map->rw)
- attr |= SMALL_PAGE_RW;
- else
- attr |= SMALL_PAGE_RO;
-
- if (!map->exec)
- attr |= SMALL_PAGE_NO_EXEC;
-
- /* Zero fill initial entries */
- pg_idx = 0;
- while ((pg_idx * SMALL_PAGE_SIZE) < (map->pa & SECTION_MASK)) {
- l2[pg_idx] = 0;
- pg_idx++;
- }
+ struct tee_mmap_region mme;
+ size_t n;
- /* Fill in the entries */
- while ((pg_idx * SMALL_PAGE_SIZE) < map->size) {
- l2[pg_idx] = ((map->pa & ~SECTION_MASK) +
- pg_idx * SMALL_PAGE_SIZE) | attr;
- pg_idx++;
- }
+ memset(mm, 0, max_elem * sizeof(struct tee_mmap_region));
- /* Zero fill the rest */
- while (pg_idx < ROUNDUP(map->size, SECTION_SIZE) / SMALL_PAGE_SIZE) {
- l2[pg_idx] = 0;
- pg_idx++;
- }
+ for (n = 0; map[n].type != MEM_AREA_NOTYPE; n++) {
+ mme.pa = map[n].pa;
+ mme.va = map[n].pa;
+ mme.size = map[n].size;
- return (paddr_t)l2;
-}
+ mme.attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_PR |
+ TEE_MATTR_GLOBAL;
-/*
-* map_memarea - load mapping in target L1 table
-* A finer mapping must be supported. Currently section mapping only!
-*/
-static void map_memarea(struct map_area *map, uint32_t *ttb)
-{
- size_t m, n;
- uint32_t attr;
- paddr_t pa;
- uint32_t region_size;
- uint32_t region_mask;
-
- TEE_ASSERT(map && ttb);
-
- switch (map->region_size) {
- case 0: /* Default to 1MB section mapping */
- case SECTION_SIZE:
- region_size = SECTION_SIZE;
- region_mask = SECTION_MASK;
- break;
- case SMALL_PAGE_SIZE:
- region_size = SMALL_PAGE_SIZE;
- region_mask = SMALL_PAGE_MASK;
- break;
- default:
- panic();
- }
+ if (map[n].device || !map[n].cached)
+ mme.attr |= TEE_MATTR_NONCACHE;
+ else
+ mme.attr |= TEE_MATTR_CACHE_DEFAULT;
- /* invalid area confing */
- if (map->va || ((map->pa + map->size - 1) < map->pa) ||
- !map->size || (map->size & region_mask) ||
- (map->pa & region_mask))
- panic();
+ if (map[n].rw)
+ mme.attr |= TEE_MATTR_PW;
- if (region_size == SECTION_SIZE) {
- attr = SECTION_SHARED | SECTION_NOTGLOBAL | SECTION_SECTION;
+ if (map[n].exec)
+ mme.attr |= TEE_MATTR_PX;
- if (map->device == true)
- attr |= SECTION_DEVICE;
- else if (map->cached == true)
- attr |= SECTION_NORMAL_CACHED;
- else
- attr |= SECTION_NORMAL;
+ if (map[n].secure)
+ mme.attr |= TEE_MATTR_SECURE;
- if (map->rw == true)
- attr |= SECTION_RW;
- else
- attr |= SECTION_RO;
-
- if (map->exec == false)
- attr |= SECTION_NO_EXEC;
- if (map->secure == false)
- attr |= SECTION_NOTSECURE;
-
- pa = map->pa;
- } else {
- attr = SECTION_PT_PT;
- if (!map->secure)
- attr |= SECTION_PT_NOTSECURE;
- pa = map_page_memarea(map);
+ insert_mmap(mm, max_elem, &mme);
}
+}
- map->va = map->pa; /* 1-to-1 pa=va mapping */
- m = (map->pa >> SECTION_SHIFT);
- n = ROUNDUP(map->size, SECTION_SIZE) >> SECTION_SHIFT;
- while (n--) {
- ttb[m] = pa | attr;
- m++;
- if (region_size == SECTION_SIZE)
- pa += SECTION_SIZE;
- else
- pa += TEE_MMU_L2_SIZE;
- }
-}
-/* load_bootcfg_mapping - attempt to map the teecore static mapping */
-static void load_bootcfg_mapping(void *ttb1)
+/*
+ * core_init_mmu_map - init tee core default memory mapping
+ *
+ * this routine sets the static default tee core mapping.
+ *
+ * If an error happend: core_init_mmu_map is expected to reset.
+ */
+void core_init_mmu_map(void)
{
+ struct tee_mmap_region mm[MAX_MMAP_REGIONS + 1];
struct map_area *map, *in;
/* get memory bootcfg from system */
@@ -296,19 +210,13 @@ static void load_bootcfg_mapping(void *ttb1)
map_ta_ram = NULL;
map_nsec_shm = NULL;
- /* reset L1 table */
- memset(ttb1, 0, TEE_MMU_L1_SIZE);
-
/* map what needs to be mapped (non-null size and non INTRAM/EXTRAM) */
map = in;
while (map->type != MEM_AREA_NOTYPE) {
- if (!memarea_not_mapped(map, ttb1)) {
- EMSG("overlapping mapping ! trap CPU");
- TEE_ASSERT(0);
- }
-
- map_memarea(map, ttb1);
+ if (map->va)
+ panic();
+ map->va = map->pa; /* 1-to-1 pa = va mapping */
if (map->type == MEM_AREA_TEE_RAM)
map_tee_ram = map;
else if (map->type == MEM_AREA_TA_RAM)
@@ -326,56 +234,33 @@ static void load_bootcfg_mapping(void *ttb1)
}
static_memory_map = in;
+
+ core_mmu_mmap_init(mm, ARRAY_SIZE(mm), in);
+
+ core_init_mmu_tables(mm);
}
-/*
- * core_init_mmu - init tee core default memory mapping
- *
- * location of target MMU L1 table is provided as argument.
- * this routine sets the static default tee core mapping.
- *
- * If an error happend: core_init_mmu.c is expected to reset.
- */
-void core_init_mmu_tables(void)
+/* routines to retrieve shared mem configuration */
+bool core_mmu_is_shm_cached(void)
{
- load_bootcfg_mapping((void *)core_mmu_get_main_ttb_va());
+ return map_nsec_shm ? map_nsec_shm->cached : false;
}
-void core_init_mmu_regs(void)
+bool core_mmu_mattr_is_ok(uint32_t mattr)
{
- uint32_t sctlr;
- paddr_t ttb_pa = core_mmu_get_main_ttb_pa();
-
- /*
- * Program Domain access control register with two domains:
- * domain 0: teecore
- * domain 1: TA
- */
- write_dacr(DACR_DOMAIN(0, DACR_DOMAIN_PERM_CLIENT) |
- DACR_DOMAIN(1, DACR_DOMAIN_PERM_CLIENT));
-
- /*
- * Disable TEX Remap
- * (This allows TEX field in page table entry take affect)
- */
- sctlr = read_sctlr();
- sctlr &= ~SCTLR_TRE;
- write_sctlr(sctlr);
-
/*
- * Enable lookups using TTBR0 and TTBR1 with the split of addresses
- * defined by TEE_MMU_TTBCR_N_VALUE.
+ * Keep in sync with core_mmu_lpae.c:mattr_to_desc and
+ * core_mmu_v7.c:mattr_to_texcb
*/
- write_ttbcr(TEE_MMU_TTBCR_N_VALUE);
- write_ttbr0(ttb_pa | TEE_MMU_DEFAULT_ATTRS);
- write_ttbr1(ttb_pa | TEE_MMU_DEFAULT_ATTRS);
-}
-
-/* routines to retreive shared mem configuration */
-bool core_mmu_is_shm_cached(void)
-{
- return map_nsec_shm ? map_nsec_shm->cached : false;
+ switch (mattr & (TEE_MATTR_I_WRITE_THR | TEE_MATTR_I_WRITE_BACK |
+ TEE_MATTR_O_WRITE_THR | TEE_MATTR_O_WRITE_BACK)) {
+ case TEE_MATTR_NONCACHE:
+ case TEE_MATTR_I_WRITE_BACK | TEE_MATTR_O_WRITE_BACK:
+ return true;
+ default:
+ return false;
+ }
}
/*
@@ -599,12 +484,3 @@ __weak unsigned int cache_maintenance_l2(int op __unused,
return TEE_ERROR_NOT_IMPLEMENTED;
}
-
-__weak void *core_mmu_alloc_l2(struct map_area *map __unused)
-{
- /*
- * This function should be redefined in platform specific part if
- * needed.
- */
- return NULL;
-}
diff --git a/core/arch/arm32/mm/core_mmu_lpae.c b/core/arch/arm32/mm/core_mmu_lpae.c
new file mode 100644
index 0000000..9ce6d8d
--- /dev/null
+++ b/core/arch/arm32/mm/core_mmu_lpae.c
@@ -0,0 +1,689 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <platform_config.h>
+
+#include <types_ext.h>
+#include <inttypes.h>
+#include <string.h>
+#include <compiler.h>
+#include <assert.h>
+#include <trace.h>
+#include <mm/tee_mmu_defs.h>
+#include <kernel/thread.h>
+#include <kernel/panic.h>
+#include <arm32.h>
+#include "core_mmu_private.h"
+
+#ifndef DEBUG_XLAT_TABLE
+#define DEBUG_XLAT_TABLE 0
+#endif
+
+#if DEBUG_XLAT_TABLE
+#define debug_print(...) DMSG_RAW(__VA_ARGS__)
+#else
+#define debug_print(...) ((void)0)
+#endif
+
+
+/*
+ * Miscellaneous MMU related constants
+ */
+
+/* Defined to the smallest possible secondary L1 MMU table */
+#define TEE_MMU_TTBCR_T0SZ 7
+#define TEE_MMU_TTBCR_T1SZ 0
+
+#define INVALID_DESC 0x0
+#define BLOCK_DESC 0x1
+#define TABLE_DESC 0x3
+
+#define HIDDEN_DESC 0x4
+#define PHYSPAGE_DESC 0x8
+
+
+#define XN (1ull << 2)
+#define PXN (1ull << 1)
+#define CONT_HINT (1ull << 0)
+
+#define UPPER_ATTRS(x) (((x) & 0x7) << 52)
+#define NON_GLOBAL (1ull << 9)
+#define ACCESS_FLAG (1ull << 8)
+#define NSH (0x0 << 6)
+#define OSH (0x2 << 6)
+#define ISH (0x3 << 6)
+
+#define AP_RO (0x1 << 5)
+#define AP_RW (0x0 << 5)
+#define AP_UNPRIV (0x1 << 4)
+
+#define NS (0x1 << 3)
+#define LOWER_ATTRS(x) (((x) & 0xfff) << 2)
+
+#define ATTR_DEVICE_INDEX 0x0
+#define ATTR_IWBWA_OWBWA_NTR_INDEX 0x1
+
+#define ATTR_DEVICE (0x4)
+#define ATTR_IWBWA_OWBWA_NTR (0xff)
+
+#define MAIR_ATTR_SET(attr, index) (attr << ((index) << 3))
+
+/* (internal) physical address size bits in EL3/EL1 */
+#define TCR_PS_BITS_4GB (0x0)
+#define TCR_PS_BITS_64GB (0x1)
+#define TCR_PS_BITS_1TB (0x2)
+#define TCR_PS_BITS_4TB (0x3)
+#define TCR_PS_BITS_16TB (0x4)
+#define TCR_PS_BITS_256TB (0x5)
+
+#define ADDR_MASK_48_TO_63 0xFFFF000000000000ULL
+#define ADDR_MASK_44_TO_47 0x0000F00000000000ULL
+#define ADDR_MASK_42_TO_43 0x00000C0000000000ULL
+#define ADDR_MASK_40_TO_41 0x0000030000000000ULL
+#define ADDR_MASK_36_TO_39 0x000000F000000000ULL
+#define ADDR_MASK_32_TO_35 0x0000000F00000000ULL
+
+#define UNSET_DESC ((uint64_t)-1)
+
+#define FOUR_KB_SHIFT 12
+#define PAGE_SIZE_SHIFT FOUR_KB_SHIFT
+#define PAGE_SIZE (1 << PAGE_SIZE_SHIFT)
+#define PAGE_SIZE_MASK (PAGE_SIZE - 1)
+#define IS_PAGE_ALIGNED(addr) (((addr) & PAGE_SIZE_MASK) == 0)
+
+#define XLAT_ENTRY_SIZE_SHIFT 3 /* Each MMU table entry is 8 bytes (1 << 3) */
+#define XLAT_ENTRY_SIZE (1 << XLAT_ENTRY_SIZE_SHIFT)
+
+#define XLAT_TABLE_SIZE_SHIFT PAGE_SIZE_SHIFT
+#define XLAT_TABLE_SIZE (1 << XLAT_TABLE_SIZE_SHIFT)
+
+/* Values for number of entries in each MMU translation table */
+#define XLAT_TABLE_ENTRIES_SHIFT (XLAT_TABLE_SIZE_SHIFT - XLAT_ENTRY_SIZE_SHIFT)
+#define XLAT_TABLE_ENTRIES (1 << XLAT_TABLE_ENTRIES_SHIFT)
+#define XLAT_TABLE_ENTRIES_MASK (XLAT_TABLE_ENTRIES - 1)
+
+/* Values to convert a memory address to an index into a translation table */
+#define L3_XLAT_ADDRESS_SHIFT PAGE_SIZE_SHIFT
+#define L2_XLAT_ADDRESS_SHIFT (L3_XLAT_ADDRESS_SHIFT + \
+ XLAT_TABLE_ENTRIES_SHIFT)
+#define L1_XLAT_ADDRESS_SHIFT (L2_XLAT_ADDRESS_SHIFT + \
+ XLAT_TABLE_ENTRIES_SHIFT)
+
+
+
+#define ADDR_SPACE_SIZE (1ull << 32)
+#define MAX_MMAP_REGIONS 16
+#define NUM_L1_ENTRIES (ADDR_SPACE_SIZE >> L1_XLAT_ADDRESS_SHIFT)
+
+
+static uint64_t l1_xlation_table[NUM_L1_ENTRIES]
+ __aligned(NUM_L1_ENTRIES * sizeof(uint64_t)) __section(".nozi.mmu.l1");
+
+static uint64_t xlat_tables[MAX_XLAT_TABLES][XLAT_TABLE_ENTRIES]
+ __aligned(XLAT_TABLE_SIZE) __section(".nozi.mmu.l2");
+
+/* MMU L2 table for TAs, one for each Core */
+static uint64_t xlat_tables_ul1[NUM_THREADS][XLAT_TABLE_ENTRIES]
+ __aligned(XLAT_TABLE_SIZE) __section(".nozi.mmu.l2");
+
+
+static unsigned next_xlat __data;
+static uint64_t tcr_ps_bits __data;
+
+static uint32_t desc_to_mattr(uint64_t desc)
+{
+ uint32_t a;
+
+ if (!(desc & 1)) {
+ if (desc & HIDDEN_DESC)
+ return TEE_MATTR_HIDDEN_BLOCK;
+ if (desc & PHYSPAGE_DESC)
+ return TEE_MATTR_PHYS_BLOCK;
+ return 0;
+ }
+
+ a = TEE_MATTR_VALID_BLOCK;
+
+ if (desc & LOWER_ATTRS(ACCESS_FLAG))
+ a |= TEE_MATTR_PRX | TEE_MATTR_URX;
+
+ if (!(desc & LOWER_ATTRS(AP_RO)))
+ a |= TEE_MATTR_PW | TEE_MATTR_UW;
+
+ if (!(desc & LOWER_ATTRS(AP_UNPRIV)))
+ a &= ~TEE_MATTR_URWX;
+
+ if (desc & UPPER_ATTRS(XN))
+ a &= ~(TEE_MATTR_PX | TEE_MATTR_UX);
+
+ if (desc & UPPER_ATTRS(PXN))
+ a &= ~TEE_MATTR_PX;
+
+ switch (desc & LOWER_ATTRS(0x7)) {
+ case LOWER_ATTRS(ATTR_IWBWA_OWBWA_NTR_INDEX):
+ a |= TEE_MATTR_CACHE_DEFAULT;
+ break;
+ case LOWER_ATTRS(ATTR_DEVICE_INDEX):
+ a |= TEE_MATTR_NONCACHE;
+ break;
+ default:
+ a |= TEE_MATTR_CACHE_UNKNOWN;
+ break;
+ }
+
+ if (!(desc & LOWER_ATTRS(NON_GLOBAL)))
+ a |= TEE_MATTR_GLOBAL;
+
+ if (!(desc & LOWER_ATTRS(NS)))
+ a |= TEE_MATTR_SECURE;
+
+ return a;
+}
+
+static uint64_t mattr_to_desc(unsigned level, uint32_t attr)
+{
+ uint64_t desc;
+ uint32_t a = attr;
+
+ if (a & TEE_MATTR_HIDDEN_BLOCK)
+ return INVALID_DESC | HIDDEN_DESC;
+
+ if (a & TEE_MATTR_PHYS_BLOCK)
+ return INVALID_DESC | PHYSPAGE_DESC;
+
+ if (!(a & TEE_MATTR_VALID_BLOCK))
+ return 0;
+
+ if (a & (TEE_MATTR_PX | TEE_MATTR_PW))
+ a |= TEE_MATTR_PR;
+ if (a & (TEE_MATTR_UX | TEE_MATTR_UW))
+ a |= TEE_MATTR_UR;
+ if (a & TEE_MATTR_UR)
+ a |= TEE_MATTR_PR;
+ if (a & TEE_MATTR_UW)
+ a |= TEE_MATTR_PW;
+
+ desc = level == 3 ? TABLE_DESC : BLOCK_DESC;
+
+ if (!(a & (TEE_MATTR_PX | TEE_MATTR_UX)))
+ desc |= UPPER_ATTRS(XN);
+ if (!(a & TEE_MATTR_PX))
+ desc |= UPPER_ATTRS(PXN);
+
+ if (a & TEE_MATTR_UR)
+ desc |= LOWER_ATTRS(AP_UNPRIV);
+
+ if (!(a & TEE_MATTR_PW))
+ desc |= LOWER_ATTRS(AP_RO);
+
+ /* Keep in sync with core_mmu.c:core_mmu_mattr_is_ok */
+ switch (a & (TEE_MATTR_I_WRITE_THR | TEE_MATTR_I_WRITE_BACK |
+ TEE_MATTR_O_WRITE_THR | TEE_MATTR_O_WRITE_BACK)) {
+ case TEE_MATTR_NONCACHE:
+ desc |= LOWER_ATTRS(ATTR_DEVICE_INDEX | OSH);
+ break;
+ case TEE_MATTR_I_WRITE_BACK | TEE_MATTR_O_WRITE_BACK:
+ desc |= LOWER_ATTRS(ATTR_IWBWA_OWBWA_NTR_INDEX | ISH);
+ break;
+ default:
+ /*
+ * "Can't happen" the attribute is supposed to be checked
+ * with core_mmu_mattr_is_ok() before.
+ */
+ panic();
+ }
+
+ if (a & (TEE_MATTR_UR | TEE_MATTR_PR))
+ desc |= LOWER_ATTRS(ACCESS_FLAG);
+
+ if (!(a & TEE_MATTR_GLOBAL))
+ desc |= LOWER_ATTRS(NON_GLOBAL);
+
+ desc |= a & TEE_MATTR_SECURE ? 0 : LOWER_ATTRS(NS);
+
+ return desc;
+}
+
+static uint64_t mmap_desc(uint32_t attr, uint64_t addr_pa,
+ unsigned level)
+{
+ return mattr_to_desc(level, attr) | addr_pa;
+}
+
+static int mmap_region_attr(struct tee_mmap_region *mm, uint64_t base_va,
+ uint64_t size)
+{
+ uint32_t attr = mm->attr;
+
+ for (;;) {
+ mm++;
+
+ if (!mm->size)
+ return attr; /* Reached end of list */
+
+ if (mm->va >= base_va + size)
+ return attr; /* Next region is after area so end */
+
+ if (mm->va + mm->size <= base_va)
+ continue; /* Next region has already been overtaken */
+
+ if (mm->attr == attr)
+ continue; /* Region doesn't override attribs so skip */
+
+ if (mm->va > base_va ||
+ mm->va + mm->size < base_va + size)
+ return -1; /* Region doesn't fully cover our area */
+ }
+}
+
+static struct tee_mmap_region *init_xlation_table(struct tee_mmap_region *mm,
+ uint64_t base_va, uint64_t *table, unsigned level)
+{
+ unsigned level_size_shift = L1_XLAT_ADDRESS_SHIFT - (level - 1) *
+ XLAT_TABLE_ENTRIES_SHIFT;
+ unsigned level_size = 1 << level_size_shift;
+ uint64_t level_index_mask = XLAT_TABLE_ENTRIES_MASK << level_size_shift;
+
+ assert(level <= 3);
+
+ debug_print("New xlat table (level %u):", level);
+
+ do {
+ uint64_t desc = UNSET_DESC;
+
+ if (mm->va + mm->size <= base_va) {
+ /* Area now after the region so skip it */
+ mm++;
+ continue;
+ }
+
+
+ if (mm->va >= base_va + level_size) {
+ /* Next region is after area so nothing to map yet */
+ desc = INVALID_DESC;
+ debug_print("%*s%010" PRIx64 " %8x",
+ level * 2, "", base_va, level_size);
+ } else if (mm->va <= base_va && mm->va + mm->size >=
+ base_va + level_size) {
+ /* Next region covers all of area */
+ int attr = mmap_region_attr(mm, base_va, level_size);
+
+ if (attr >= 0) {
+ desc = mmap_desc(attr,
+ base_va - mm->va + mm->pa,
+ level);
+ debug_print("%*s%010" PRIx64 " %8x %s-%s-%s-%s",
+ level * 2, "", base_va, level_size,
+ attr & TEE_MATTR_CACHE_DEFAULT ?
+ "MEM" : "DEV",
+ attr & TEE_MATTR_PW ? "RW" : "RO",
+ attr & TEE_MATTR_PX ? "X" : "XN",
+ attr & TEE_MATTR_SECURE ? "S" : "NS");
+ } else {
+ debug_print("%*s%010" PRIx64 " %8x",
+ level * 2, "", base_va, level_size);
+ }
+ }
+ /* else Next region only partially covers area, so need */
+
+ if (desc == UNSET_DESC) {
+ /* Area not covered by a region so need finer table */
+ uint64_t *new_table = xlat_tables[next_xlat++];
+
+ assert(next_xlat <= MAX_XLAT_TABLES);
+ desc = TABLE_DESC | (uint64_t)(uintptr_t)new_table;
+
+ /* Recurse to fill in new table */
+ mm = init_xlation_table(mm, base_va, new_table,
+ level + 1);
+ }
+
+ *table++ = desc;
+ base_va += level_size;
+ } while (mm->size && (base_va & level_index_mask));
+
+ return mm;
+}
+
+static unsigned int calc_physical_addr_size_bits(uint64_t max_addr)
+{
+ /* Physical address can't exceed 48 bits */
+ assert((max_addr & ADDR_MASK_48_TO_63) == 0);
+
+ /* 48 bits address */
+ if (max_addr & ADDR_MASK_44_TO_47)
+ return TCR_PS_BITS_256TB;
+
+ /* 44 bits address */
+ if (max_addr & ADDR_MASK_42_TO_43)
+ return TCR_PS_BITS_16TB;
+
+ /* 42 bits address */
+ if (max_addr & ADDR_MASK_40_TO_41)
+ return TCR_PS_BITS_4TB;
+
+ /* 40 bits address */
+ if (max_addr & ADDR_MASK_36_TO_39)
+ return TCR_PS_BITS_1TB;
+
+ /* 36 bits address */
+ if (max_addr & ADDR_MASK_32_TO_35)
+ return TCR_PS_BITS_64GB;
+
+ return TCR_PS_BITS_4GB;
+}
+
+void core_init_mmu_tables(struct tee_mmap_region *mm)
+{
+ paddr_t max_pa = 0;
+ uint64_t max_va = 0;
+ size_t n;
+
+ for (n = 0; mm[n].size; n++) {
+ paddr_t pa_end;
+ vaddr_t va_end;
+
+ debug_print(" %010" PRIx32 " %010" PRIx32 " %10" PRIx32 " %x",
+ mm[n].pa, mm[n].pa, mm[n].size, mm[n].attr);
+
+ assert(IS_PAGE_ALIGNED(mm[n].pa));
+ assert(IS_PAGE_ALIGNED(mm[n].size));
+
+ pa_end = mm[n].pa + mm[n].size - 1;
+ va_end = mm[n].va + mm[n].size - 1;
+ if (pa_end > max_pa)
+ max_pa = pa_end;
+ if (va_end > max_va)
+ max_va = va_end;
+ }
+
+ init_xlation_table(mm, 0, l1_xlation_table, 1);
+ tcr_ps_bits = calc_physical_addr_size_bits(max_pa);
+ COMPILE_TIME_ASSERT(ADDR_SPACE_SIZE > 0);
+ assert(max_va < ADDR_SPACE_SIZE);
+}
+
+void core_init_mmu_regs(void)
+{
+ uint32_t ttbcr = TTBCR_EAE;
+ uint32_t mair;
+
+ mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
+ mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, ATTR_IWBWA_OWBWA_NTR_INDEX);
+ write_mair0(mair);
+
+ ttbcr |= TEE_MMU_TTBCR_T0SZ << TTBCR_T0SZ_SHIFT;
+ ttbcr |= TEE_MMU_TTBCR_T1SZ << TTBCR_T1SZ_SHIFT;
+ ttbcr |= TTBCR_XRGNX_WBWA << TTBCR_IRGN0_SHIFT;
+ ttbcr |= TTBCR_XRGNX_WBWA << TTBCR_ORGN0_SHIFT;
+ ttbcr |= TTBCR_XRGNX_WBWA << TTBCR_IRGN1_SHIFT;
+ ttbcr |= TTBCR_XRGNX_WBWA << TTBCR_ORGN1_SHIFT;
+ ttbcr |= TTBCR_SHX_ISH << TTBCR_SH0_SHIFT;
+ ttbcr |= TTBCR_SHX_ISH << TTBCR_SH1_SHIFT;
+
+ /* Disable the use of TTBR0 which handles low addresses */
+ ttbcr |= TTBCR_EPD0;
+
+ /* TTBCR.A1 = 0 => ASID is stored in TTBR0 */
+
+ write_ttbcr(ttbcr);
+
+ write_ttbr0_64bit(0);
+ write_ttbr1_64bit((paddr_t)l1_xlation_table);
+}
+
+static void set_region(struct core_mmu_table_info *tbl_info,
+ struct tee_mmap_region *region)
+{
+ unsigned end;
+ unsigned idx;
+ paddr_t pa;
+
+ /* va, len and pa should be block aligned */
+ assert(!core_mmu_get_block_offset(tbl_info, region->va));
+ assert(!core_mmu_get_block_offset(tbl_info, region->size));
+ assert(!core_mmu_get_block_offset(tbl_info, region->pa));
+
+ idx = core_mmu_va2idx(tbl_info, region->va);
+ end = core_mmu_va2idx(tbl_info, region->va + region->size);
+ pa = region->pa;
+
+ while (idx < end) {
+ core_mmu_set_entry(tbl_info, idx, pa, region->attr);
+ idx++;
+ pa += 1 << tbl_info->shift;
+ }
+}
+
+static paddr_t populate_user_map(struct tee_mmu_info *mmu)
+{
+ struct core_mmu_table_info tbl_info;
+ unsigned n;
+ struct tee_mmap_region region;
+
+ tbl_info.table = xlat_tables_ul1[thread_get_id()];
+ tbl_info.va_base = 0;
+ tbl_info.level = 2;
+ tbl_info.shift = L2_XLAT_ADDRESS_SHIFT;
+ tbl_info.num_entries = XLAT_TABLE_ENTRIES;
+
+ region.pa = 0;
+ region.va = 0;
+ region.attr = 0;
+
+ for (n = 0; n < mmu->size; n++) {
+ if (!mmu->table[n].size)
+ continue;
+
+ /* Empty mapping for gaps */
+ region.size = mmu->table[n].va - region.va;
+ set_region(&tbl_info, &region);
+
+ set_region(&tbl_info, mmu->table + n);
+ region.va = mmu->table[n].va + mmu->table[n].size;
+ assert(region.va <= CORE_MMU_USER_MAX_ADDR);
+ }
+ region.size = CORE_MMU_USER_MAX_ADDR - region.va;
+ set_region(&tbl_info, &region);
+
+ return (paddr_t)tbl_info.table;
+}
+
+void core_mmu_create_user_map(struct tee_mmu_info *mmu, uint32_t asid,
+ struct core_mmu_user_map *map)
+{
+ if (mmu) {
+ map->ttbr0 = populate_user_map(mmu);
+ map->ttbr0 |= (uint64_t)(asid & TTBR_ASID_MASK) <<
+ TTBR_ASID_SHIFT;
+ map->enabled = true;
+ } else {
+ map->ttbr0 = 0;
+ map->enabled = false;
+ }
+}
+
+bool core_mmu_find_table(vaddr_t va, unsigned max_level,
+ struct core_mmu_table_info *tbl_info)
+{
+ uint64_t *tbl = l1_xlation_table;
+ uintptr_t ntbl;
+ unsigned level = 1;
+ vaddr_t va_base = 0;
+ unsigned num_entries = NUM_L1_ENTRIES;
+
+ while (true) {
+ unsigned level_size_shift =
+ L1_XLAT_ADDRESS_SHIFT - (level - 1) *
+ XLAT_TABLE_ENTRIES_SHIFT;
+ unsigned n = (va - va_base) >> level_size_shift;
+
+ if (n >= num_entries)
+ return false;
+
+ if (level == max_level || level == 3 ||
+ (tbl[n] & TABLE_DESC) != TABLE_DESC) {
+ /*
+ * We've either reached max_level, level 3, a block
+ * mapping entry or an "invalid" mapping entry.
+ */
+ tbl_info->table = tbl;
+ tbl_info->va_base = va_base;
+ tbl_info->level = level;
+ tbl_info->shift = level_size_shift;
+ tbl_info->num_entries = num_entries;
+ return true;
+ }
+
+ /* Copy bits 39:12 from tbl[n] to ntbl */
+ ntbl = (tbl[n] & ((1ULL << 40) - 1)) & ~((1 << 12) - 1);
+
+ tbl = (uint64_t *)ntbl;
+
+ va_base += n << level_size_shift;
+ level++;
+ num_entries = XLAT_TABLE_ENTRIES;
+ }
+}
+
+void core_mmu_set_entry(struct core_mmu_table_info *tbl_info, unsigned idx,
+ paddr_t pa, uint32_t attr)
+{
+ uint64_t *table = tbl_info->table;
+ uint64_t desc = mattr_to_desc(tbl_info->level, attr);
+
+ assert(idx < tbl_info->num_entries);
+
+ table[idx] = desc | pa;
+}
+
+void core_mmu_get_entry(struct core_mmu_table_info *tbl_info, unsigned idx,
+ paddr_t *pa, uint32_t *attr)
+{
+ uint64_t *table = tbl_info->table;
+
+ assert(idx < tbl_info->num_entries);
+
+ if (pa)
+ *pa = (table[idx] & ((1ull << 40) - 1)) & ~((1 << 12) - 1);
+
+ if (attr)
+ *attr = desc_to_mattr(table[idx]);
+}
+
+void core_mmu_get_user_map(struct core_mmu_user_map *map)
+{
+ map->ttbr0 = read_ttbr0_64bit();
+ if (read_ttbcr() & TTBCR_EPD0)
+ map->enabled = false;
+ else
+ map->enabled = true;
+}
+
+void core_mmu_set_user_map(struct core_mmu_user_map *map)
+{
+ uint32_t ttbcr;
+ uint64_t ttbr;
+ uint32_t cpsr = read_cpsr();
+
+ write_cpsr(cpsr | CPSR_FIA);
+
+ ttbcr = read_ttbcr();
+
+ if (map && map->enabled) {
+ ttbr = map->ttbr0;
+ ttbcr &= ~TTBCR_EPD0;
+ } else {
+ ttbr = 0;
+ ttbcr |= TTBCR_EPD0;
+ }
+ write_ttbr0_64bit(ttbr);
+ write_ttbcr(ttbcr);
+ isb();
+ core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
+
+ write_cpsr(cpsr);
+}
+
+bool core_mmu_user_mapping_is_active(void)
+{
+ return !(read_ttbcr() & TTBCR_EPD0);
+}
+
+enum core_mmu_fault core_mmu_get_fault_type(uint32_t fsr)
+{
+ assert(fsr & FSR_LPAE);
+ switch (fsr & FSR_STATUS_MASK) {
+ case 0x21: /* b100001 Alignment fault */
+ return CORE_MMU_FAULT_ALIGNMENT;
+ case 0x11: /* b010001 Asynchronous extern abort (DFSR only) */
+ return CORE_MMU_FAULT_ASYNC_EXTERNAL;
+ case 0x12: /* b100010 Debug event */
+ return CORE_MMU_FAULT_DEBUG_EVENT;
+ default:
+ break;
+ }
+
+ switch ((fsr & FSR_STATUS_MASK) >> 2) {
+ case 0x1: /* b0001LL Translation fault */
+ return CORE_MMU_FAULT_TRANSLATION;
+ case 0x2: /* b0010LL Access flag fault */
+ case 0x3: /* b0011LL Permission fault */
+ return CORE_MMU_FAULT_PERMISSION;
+ default:
+ return CORE_MMU_FAULT_OTHER;
+ }
+}
diff --git a/core/include/mm/tee_mmu_unpg.h b/core/arch/arm32/mm/core_mmu_private.h
index 2626010..777c8b0 100644
--- a/core/include/mm/tee_mmu_unpg.h
+++ b/core/arch/arm32/mm/core_mmu_private.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, STMicroelectronics International N.V.
+ * Copyright (c) 2015, Linaro Limited
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -24,23 +24,14 @@
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef TEE_MMU_UNPG_H
-#define TEE_MMU_UNPG_H
+#ifndef CORE_MMU_PRIVATE_H
+#define CORE_MMU_PRIVATE_H
-#include <stdint.h>
+#include <mm/core_mmu.h>
+#include <mm/tee_mmu_types.h>
-struct tee_mmu_mapping {
- uint32_t ttbr0;
- uint32_t ctxid;
-};
-void tee_mmu_get_map(struct tee_mmu_mapping *map);
+void core_init_mmu_tables(struct tee_mmap_region *mm);
-void tee_mmu_set_map(struct tee_mmu_mapping *map);
+#endif /*CORE_MMU_PRIVATE_H*/
-/*
- * Switch TTBR0 configuration and Context ID (PROCID & ASID)
- */
-void tee_mmu_switch(uint32_t ttbr0_base, uint32_t ctxid);
-
-#endif /* TEE_MMU_UNPG_H */
diff --git a/core/arch/arm32/mm/core_mmu_v7.c b/core/arch/arm32/mm/core_mmu_v7.c
new file mode 100644
index 0000000..4ca6a1c
--- /dev/null
+++ b/core/arch/arm32/mm/core_mmu_v7.c
@@ -0,0 +1,688 @@
+/*
+ * Copyright (c) 2014, STMicroelectronics International N.V.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <platform_config.h>
+
+#include <stdlib.h>
+#include <assert.h>
+#include <arm32.h>
+#include <mm/core_mmu.h>
+#include <mm/tee_mmu_defs.h>
+#include <trace.h>
+#include <kernel/panic.h>
+#include <util.h>
+#include "core_mmu_private.h"
+
+/*
+ * MMU related values
+ */
+
+/* Sharable */
+#define TEE_MMU_TTB_S (1 << 1)
+
+/* Not Outer Sharable */
+#define TEE_MMU_TTB_NOS (1 << 5)
+
+/* Normal memory, Inner Non-cacheable */
+#define TEE_MMU_TTB_IRGN_NC 0
+
+/* Normal memory, Inner Write-Back Write-Allocate Cacheable */
+#define TEE_MMU_TTB_IRGN_WBWA (1 << 6)
+
+/* Normal memory, Inner Write-Through Cacheable */
+#define TEE_MMU_TTB_IRGN_WT 1
+
+/* Normal memory, Inner Write-Back no Write-Allocate Cacheable */
+#define TEE_MMU_TTB_IRGN_WB (1 | (1 << 6))
+
+/* Normal memory, Outer Write-Back Write-Allocate Cacheable */
+#define TEE_MMU_TTB_RNG_WBWA (1 << 3)
+
+#define TEE_MMU_DEFAULT_ATTRS \
+ (TEE_MMU_TTB_S | TEE_MMU_TTB_NOS | \
+ TEE_MMU_TTB_IRGN_WBWA | TEE_MMU_TTB_RNG_WBWA)
+
+
+#define INVALID_DESC 0x0
+#define HIDDEN_DESC 0x4
+#define PHYSPAGE_DESC 0x8
+
+
+#define SECTION_SHIFT 20
+#define SECTION_MASK 0x000fffff
+#define SECTION_SIZE 0x00100000
+
+/* armv7 memory mapping attributes: section mapping */
+#define SECTION_SECURE (0 << 19)
+#define SECTION_NOTSECURE (1 << 19)
+#define SECTION_SHARED (1 << 16)
+#define SECTION_NOTGLOBAL (1 << 17)
+#define SECTION_ACCESS_FLAG (1 << 10)
+#define SECTION_UNPRIV (1 << 11)
+#define SECTION_RO (1 << 15)
+#define SECTION_TEXCB(texcb) ((((texcb) >> 2) << 12) | \
+ ((((texcb) >> 1) & 0x1) << 3) | \
+ (((texcb) & 0x1) << 2))
+#define SECTION_DEVICE SECTION_TEXCB(ATTR_DEVICE_INDEX)
+#define SECTION_NORMAL SECTION_TEXCB(ATTR_DEVICE_INDEX)
+#define SECTION_NORMAL_CACHED SECTION_TEXCB(ATTR_IWBWA_OWBWA_INDEX)
+
+#define SECTION_XN (1 << 4)
+#define SECTION_PXN (1 << 0)
+#define SECTION_SECTION (2 << 0)
+
+#define SECTION_PT_NOTSECURE (1 << 3)
+#define SECTION_PT_PT (1 << 0)
+
+#define SMALL_PAGE_SMALL_PAGE (1 << 1)
+#define SMALL_PAGE_SHARED (1 << 10)
+#define SMALL_PAGE_NOTGLOBAL (1 << 11)
+#define SMALL_PAGE_TEXCB(texcb) ((((texcb) >> 2) << 6) | \
+ ((((texcb) >> 1) & 0x1) << 3) | \
+ (((texcb) & 0x1) << 2))
+#define SMALL_PAGE_DEVICE SMALL_PAGE_TEXCB(ATTR_DEVICE_INDEX)
+#define SMALL_PAGE_NORMAL SMALL_PAGE_TEXCB(ATTR_DEVICE_INDEX)
+#define SMALL_PAGE_NORMAL_CACHED SMALL_PAGE_TEXCB(ATTR_IWBWA_OWBWA_INDEX)
+#define SMALL_PAGE_ACCESS_FLAG (1 << 4)
+#define SMALL_PAGE_UNPRIV (1 << 5)
+#define SMALL_PAGE_RO (1 << 9)
+#define SMALL_PAGE_XN (1 << 0)
+
+
+/* The TEX, C and B bits concatenated */
+#define ATTR_DEVICE_INDEX 0x0
+#define ATTR_IWBWA_OWBWA_INDEX 0x1
+
+#define PRRR_IDX(idx, tr, nos) (((tr) << (2 * (idx))) | \
+ ((uint32_t)(nos) << ((idx) + 24)))
+#define NMRR_IDX(idx, ir, or) (((ir) << (2 * (idx))) | \
+ ((uint32_t)(or) << (2 * (idx) + 16)))
+#define PRRR_DS0 (1 << 16)
+#define PRRR_DS1 (1 << 17)
+#define PRRR_NS0 (1 << 18)
+#define PRRR_NS1 (1 << 19)
+
+#define ATTR_DEVICE_PRRR PRRR_IDX(ATTR_DEVICE_INDEX, 1, 0)
+#define ATTR_DEVICE_NMRR NMRR_IDX(ATTR_DEVICE_INDEX, 0, 0)
+
+#define ATTR_IWBWA_OWBWA_PRRR PRRR_IDX(ATTR_IWBWA_OWBWA_INDEX, 2, 1)
+#define ATTR_IWBWA_OWBWA_NMRR NMRR_IDX(ATTR_IWBWA_OWBWA_INDEX, 1, 1)
+
+enum desc_type {
+ DESC_TYPE_PAGE_TABLE,
+ DESC_TYPE_SECTION,
+ DESC_TYPE_SUPER_SECTION,
+ DESC_TYPE_LARGE_PAGE,
+ DESC_TYPE_SMALL_PAGE,
+ DESC_TYPE_INVALID,
+};
+
+static enum desc_type get_desc_type(unsigned level, uint32_t desc)
+{
+ assert(level >= 1 && level <= 2);
+
+ if (level == 1) {
+ if ((desc & 0x3) == 0x1)
+ return DESC_TYPE_PAGE_TABLE;
+
+ if ((desc & 0x2) == 0x2) {
+ if (desc & (1 << 18))
+ return DESC_TYPE_SUPER_SECTION;
+ return DESC_TYPE_SECTION;
+ }
+ } else {
+ if ((desc & 0x3) == 0x1)
+ return DESC_TYPE_LARGE_PAGE;
+
+ if ((desc & 0x2) == 0x2)
+ return DESC_TYPE_SMALL_PAGE;
+ }
+
+ return DESC_TYPE_INVALID;
+}
+
+static uint32_t texcb_to_mattr(uint32_t texcb)
+{
+ switch (texcb) {
+ case ATTR_IWBWA_OWBWA_INDEX:
+ return TEE_MATTR_CACHE_DEFAULT;
+ case ATTR_DEVICE_INDEX:
+ return TEE_MATTR_NONCACHE;
+ default:
+ return TEE_MATTR_CACHE_UNKNOWN;
+ }
+}
+
+static uint32_t mattr_to_texcb(uint32_t attr)
+{
+ /* Keep in sync with core_mmu.c:core_mmu_mattr_is_ok */
+ switch (attr & (TEE_MATTR_I_WRITE_THR | TEE_MATTR_I_WRITE_BACK |
+ TEE_MATTR_O_WRITE_THR | TEE_MATTR_O_WRITE_BACK)) {
+ case TEE_MATTR_NONCACHE:
+ return ATTR_DEVICE_INDEX;
+ case TEE_MATTR_I_WRITE_BACK | TEE_MATTR_O_WRITE_BACK:
+ return ATTR_IWBWA_OWBWA_INDEX;
+ default:
+ /*
+ * "Can't happen" the attribute is supposed to be checked
+ * with core_mmu_mattr_is_ok() before.
+ */
+ panic();
+ }
+}
+
+
+static uint32_t desc_to_mattr(unsigned level, uint32_t desc)
+{
+ uint32_t a = TEE_MATTR_VALID_BLOCK;
+
+ switch (get_desc_type(level, desc)) {
+ case DESC_TYPE_SECTION:
+ if (desc & SECTION_ACCESS_FLAG)
+ a |= TEE_MATTR_PRX | TEE_MATTR_URX;
+
+ if (!(desc & SECTION_RO))
+ a |= TEE_MATTR_PW | TEE_MATTR_UW;
+
+ if (desc & SECTION_XN)
+ a &= ~(TEE_MATTR_PX | TEE_MATTR_UX);
+
+ if (desc & SECTION_PXN)
+ a &= ~TEE_MATTR_PX;
+
+ a |= texcb_to_mattr(((desc >> 12) & 0x7) | ((desc >> 2) & 0x3));
+
+ if (!(desc & SECTION_NOTGLOBAL))
+ a |= TEE_MATTR_GLOBAL;
+
+ if (!(desc & SECTION_NOTSECURE))
+ a |= TEE_MATTR_SECURE;
+
+ break;
+ case DESC_TYPE_SMALL_PAGE:
+ if (desc & SMALL_PAGE_ACCESS_FLAG)
+ a |= TEE_MATTR_PRX | TEE_MATTR_URX;
+
+ if (!(desc & SMALL_PAGE_RO))
+ a |= TEE_MATTR_PW | TEE_MATTR_UW;
+
+ if (desc & SMALL_PAGE_XN)
+ a &= ~(TEE_MATTR_PX | TEE_MATTR_UX);
+
+ a |= texcb_to_mattr(((desc >> 6) & 0x7) | ((desc >> 2) & 0x3));
+
+ if (!(desc & SMALL_PAGE_NOTGLOBAL))
+ a |= TEE_MATTR_GLOBAL;
+ break;
+ case DESC_TYPE_INVALID:
+ if (desc & HIDDEN_DESC)
+ return TEE_MATTR_HIDDEN_BLOCK;
+ if (desc & PHYSPAGE_DESC)
+ return TEE_MATTR_PHYS_BLOCK;
+ return 0;
+ default:
+ return 0;
+ }
+
+ return a;
+}
+
+static uint32_t mattr_to_desc(unsigned level, uint32_t attr)
+{
+ uint32_t desc;
+ uint32_t a = attr;
+ unsigned texcb;
+
+ if (a & TEE_MATTR_HIDDEN_BLOCK)
+ return INVALID_DESC | HIDDEN_DESC;
+
+ if (a & TEE_MATTR_PHYS_BLOCK)
+ return INVALID_DESC | PHYSPAGE_DESC;
+
+ if (level == 1 && (a & TEE_MATTR_TABLE)) {
+ desc = SECTION_PT_PT;
+ if (!(a & TEE_MATTR_SECURE))
+ desc |= SECTION_PT_NOTSECURE;
+ return desc;
+ }
+
+ if (!(a & TEE_MATTR_VALID_BLOCK))
+ return 0;
+
+ if (a & (TEE_MATTR_PX | TEE_MATTR_PW))
+ a |= TEE_MATTR_PR;
+ if (a & (TEE_MATTR_UX | TEE_MATTR_UW))
+ a |= TEE_MATTR_UR;
+ if (a & TEE_MATTR_UR)
+ a |= TEE_MATTR_PR;
+ if (a & TEE_MATTR_UW)
+ a |= TEE_MATTR_PW;
+
+
+ texcb = mattr_to_texcb(a);
+
+ if (level == 1) { /* Section */
+ desc = SECTION_SECTION | SECTION_SHARED;
+
+ if (!(a & (TEE_MATTR_PX | TEE_MATTR_UX)))
+ desc |= SECTION_XN;
+
+#ifdef CFG_HWSUPP_MEM_PERM_PXN
+ if (!(a & TEE_MATTR_PX))
+ desc |= SECTION_PXN;
+#endif
+
+ if (a & TEE_MATTR_UR)
+ desc |= SECTION_UNPRIV;
+
+ if (!(a & TEE_MATTR_PW))
+ desc |= SECTION_RO;
+
+ if (a & (TEE_MATTR_UR | TEE_MATTR_PR))
+ desc |= SECTION_ACCESS_FLAG;
+
+ if (!(a & TEE_MATTR_GLOBAL))
+ desc |= SECTION_NOTGLOBAL;
+
+ if (!(a & TEE_MATTR_SECURE))
+ desc |= SECTION_NOTSECURE;
+
+ desc |= SECTION_TEXCB(texcb);
+ } else {
+ desc = SMALL_PAGE_SMALL_PAGE | SMALL_PAGE_SHARED;
+
+ if (!(a & (TEE_MATTR_PX | TEE_MATTR_UX)))
+ desc |= SMALL_PAGE_XN;
+
+ if (a & TEE_MATTR_UR)
+ desc |= SMALL_PAGE_UNPRIV;
+
+ if (!(a & TEE_MATTR_PW))
+ desc |= SMALL_PAGE_RO;
+
+ if (a & (TEE_MATTR_UR | TEE_MATTR_PR))
+ desc |= SMALL_PAGE_ACCESS_FLAG;
+
+ if (!(a & TEE_MATTR_GLOBAL))
+ desc |= SMALL_PAGE_NOTGLOBAL;
+
+ desc |= SMALL_PAGE_TEXCB(texcb);
+ }
+
+ return desc;
+}
+
+static void set_region(struct core_mmu_table_info *tbl_info,
+ struct tee_mmap_region *region)
+{
+ unsigned end;
+ unsigned idx;
+ paddr_t pa;
+
+ /* va, len and pa should be block aligned */
+ assert(!core_mmu_get_block_offset(tbl_info, region->va));
+ assert(!core_mmu_get_block_offset(tbl_info, region->size));
+ assert(!core_mmu_get_block_offset(tbl_info, region->pa));
+
+ idx = core_mmu_va2idx(tbl_info, region->va);
+ end = core_mmu_va2idx(tbl_info, region->va + region->size);
+ pa = region->pa;
+
+ while (idx < end) {
+ core_mmu_set_entry(tbl_info, idx, pa, region->attr);
+ idx++;
+ pa += 1 << tbl_info->shift;
+ }
+}
+
+static paddr_t populate_user_map(struct tee_mmu_info *mmu)
+{
+ struct core_mmu_table_info tbl_info;
+ unsigned n;
+ struct tee_mmap_region region;
+
+ tbl_info.table = (void *)core_mmu_get_ul1_ttb_va();
+ tbl_info.va_base = 0;
+ tbl_info.level = 1;
+ tbl_info.shift = SECTION_SHIFT;
+ tbl_info.num_entries = TEE_MMU_UL1_NUM_ENTRIES;
+
+ region.pa = 0;
+ region.va = 0;
+ region.attr = 0;
+
+ for (n = 0; n < mmu->size; n++) {
+ if (!mmu->table[n].size)
+ continue;
+
+ /* Empty mapping for gaps */
+ region.size = mmu->table[n].va - region.va;
+ set_region(&tbl_info, &region);
+
+ set_region(&tbl_info, mmu->table + n);
+ region.va = mmu->table[n].va + mmu->table[n].size;
+ assert(region.va <= CORE_MMU_USER_MAX_ADDR);
+ }
+ region.size = CORE_MMU_USER_MAX_ADDR - region.va;
+ set_region(&tbl_info, &region);
+
+ return core_mmu_get_ul1_ttb_pa() | TEE_MMU_DEFAULT_ATTRS;
+}
+
+void core_mmu_create_user_map(struct tee_mmu_info *mmu, uint32_t asid,
+ struct core_mmu_user_map *map)
+{
+ if (mmu) {
+ map->ttbr0 = populate_user_map(mmu);
+ map->ctxid = asid & 0xff;
+ } else {
+ map->ttbr0 = read_ttbr1();
+ map->ctxid = 0;
+ }
+}
+
+static void set_info_table(struct core_mmu_table_info *tbl_info,
+ unsigned level, vaddr_t va_base, void *table)
+{
+ tbl_info->level = level;
+ tbl_info->table = table;
+ tbl_info->va_base = va_base;
+ if (level == 1) {
+ tbl_info->shift = SECTION_SHIFT;
+ tbl_info->num_entries = TEE_MMU_L1_NUM_ENTRIES;
+ } else {
+ tbl_info->shift = SMALL_PAGE_SHIFT;
+ tbl_info->num_entries = TEE_MMU_L2_NUM_ENTRIES;
+ }
+}
+
+bool core_mmu_find_table(vaddr_t va, unsigned max_level,
+ struct core_mmu_table_info *tbl_info)
+{
+ uint32_t *tbl = (uint32_t *)core_mmu_get_main_ttb_va();
+ unsigned n = va >> SECTION_SHIFT;
+
+ if (max_level == 1 || (tbl[n] & 0x3) != 0x1) {
+ set_info_table(tbl_info, 1, 0, tbl);
+ } else {
+ uintptr_t ntbl = tbl[n] & ~((1 << 10) - 1);
+
+ set_info_table(tbl_info, 2, n << SECTION_SHIFT, (void *)ntbl);
+ }
+ return true;
+}
+
+void core_mmu_set_entry(struct core_mmu_table_info *tbl_info, unsigned idx,
+ paddr_t pa, uint32_t attr)
+{
+ uint32_t *table = tbl_info->table;
+ uint32_t desc = mattr_to_desc(tbl_info->level, attr);
+
+ assert(idx < tbl_info->num_entries);
+
+ table[idx] = desc | pa;
+}
+
+static paddr_t desc_to_pa(unsigned level, uint32_t desc)
+{
+ unsigned shift_mask;
+
+ switch (get_desc_type(level, desc)) {
+ case DESC_TYPE_PAGE_TABLE:
+ shift_mask = 10;
+ break;
+ case DESC_TYPE_SECTION:
+ shift_mask = 20;
+ break;
+ case DESC_TYPE_SUPER_SECTION:
+ shift_mask = 24; /* We're ignoring bits 32 and above. */
+ break;
+ case DESC_TYPE_LARGE_PAGE:
+ shift_mask = 16;
+ break;
+ case DESC_TYPE_SMALL_PAGE:
+ shift_mask = 12;
+ break;
+ default:
+ /* Invalid section, HIDDEN_DESC, PHYSPAGE_DESC */
+ shift_mask = 4;
+ }
+
+ return desc & ~((1 << shift_mask) - 1);
+}
+
+void core_mmu_get_entry(struct core_mmu_table_info *tbl_info, unsigned idx,
+ paddr_t *pa, uint32_t *attr)
+{
+ uint32_t *table = tbl_info->table;
+
+ assert(idx < tbl_info->num_entries);
+
+ if (pa)
+ *pa = desc_to_pa(tbl_info->level, table[idx]);
+
+ if (attr)
+ *attr = desc_to_mattr(tbl_info->level, table[idx]);
+}
+
+void core_mmu_get_user_map(struct core_mmu_user_map *map)
+{
+ map->ttbr0 = read_ttbr0();
+ map->ctxid = read_contextidr();
+}
+
+void core_mmu_set_user_map(struct core_mmu_user_map *map)
+{
+ uint32_t cpsr = read_cpsr();
+
+ write_cpsr(cpsr | CPSR_FIA);
+
+ /*
+ * Update the reserved Context ID and TTBR0
+ */
+
+ dsb(); /* ARM erratum 754322 */
+ write_contextidr(0);
+ isb();
+
+ if (map) {
+ write_ttbr0(map->ttbr0);
+ isb();
+ write_contextidr(map->ctxid);
+ } else {
+ write_ttbr0(read_ttbr1());
+ }
+ isb();
+ core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
+
+ write_cpsr(cpsr);
+}
+
+bool core_mmu_user_mapping_is_active(void)
+{
+ return read_ttbr0() != read_ttbr1();
+}
+
+static paddr_t map_page_memarea(struct tee_mmap_region *mm)
+{
+ uint32_t *l2 = core_mmu_alloc_l2(mm);
+ size_t pg_idx;
+ uint32_t attr;
+
+ TEE_ASSERT(l2);
+
+ attr = mattr_to_desc(2, mm->attr);
+
+ /* Zero fill initial entries */
+ pg_idx = 0;
+ while ((pg_idx * SMALL_PAGE_SIZE) < (mm->pa & SECTION_MASK)) {
+ l2[pg_idx] = 0;
+ pg_idx++;
+ }
+
+ /* Fill in the entries */
+ while ((pg_idx * SMALL_PAGE_SIZE) < mm->size) {
+ l2[pg_idx] = ((mm->pa & ~SMALL_PAGE_MASK) +
+ pg_idx * SMALL_PAGE_SIZE) | attr;
+ pg_idx++;
+ }
+
+ /* Zero fill the rest */
+ while (pg_idx < ROUNDUP(mm->size, SECTION_SIZE) / SMALL_PAGE_SIZE) {
+ l2[pg_idx] = 0;
+ pg_idx++;
+ }
+
+ return (paddr_t)l2;
+}
+
+/*
+* map_memarea - load mapping in target L1 table
+* A finer mapping must be supported. Currently section mapping only!
+*/
+static void map_memarea(struct tee_mmap_region *mm, uint32_t *ttb)
+{
+ size_t m, n;
+ uint32_t attr;
+ paddr_t pa;
+ uint32_t region_size;
+
+ TEE_ASSERT(mm && ttb);
+
+ if ((mm->va | mm->pa | mm->size) & SECTION_MASK) {
+ region_size = SMALL_PAGE_SIZE;
+
+ /*
+ * Need finer grained mapping, if small pages aren't
+ * good enough, panic.
+ */
+ if ((mm->va | mm->pa | mm->size) & SMALL_PAGE_MASK) {
+ EMSG("va 0x%x pa 0x%x size 0x%x can't be mapped",
+ mm->va, mm->pa, mm->size);
+ panic();
+ }
+
+ attr = mattr_to_desc(1, mm->attr | TEE_MATTR_TABLE);
+ pa = map_page_memarea(mm);
+ } else {
+ region_size = SECTION_SIZE;
+
+ attr = mattr_to_desc(1, mm->attr);
+ pa = mm->pa;
+ }
+
+ m = (mm->pa >> SECTION_SHIFT);
+ n = ROUNDUP(mm->size, SECTION_SIZE) >> SECTION_SHIFT;
+ while (n--) {
+ ttb[m] = pa | attr;
+ m++;
+ if (region_size == SECTION_SIZE)
+ pa += SECTION_SIZE;
+ else
+ pa += TEE_MMU_L2_SIZE;
+ }
+}
+
+void core_init_mmu_tables(struct tee_mmap_region *mm)
+{
+ void *ttb1 = (void *)core_mmu_get_main_ttb_va();
+ size_t n;
+
+ /* reset L1 table */
+ memset(ttb1, 0, TEE_MMU_L1_SIZE);
+
+ for (n = 0; mm[n].size; n++)
+ map_memarea(mm + n, ttb1);
+}
+
+void core_init_mmu_regs(void)
+{
+ uint32_t prrr;
+ uint32_t nmrr;
+ paddr_t ttb_pa = core_mmu_get_main_ttb_pa();
+
+ /* Enable Access flag (simplified access permissions) and TEX remap */
+ write_sctlr(read_sctlr() | SCTLR_AFE | SCTLR_TRE);
+
+ prrr = ATTR_DEVICE_PRRR | ATTR_IWBWA_OWBWA_PRRR;
+ nmrr = ATTR_DEVICE_NMRR | ATTR_IWBWA_OWBWA_NMRR;
+
+ prrr |= PRRR_NS1 | PRRR_DS1;
+
+ write_prrr(prrr);
+ write_nmrr(nmrr);
+
+
+ /*
+ * Program Domain access control register with two domains:
+ * domain 0: teecore
+ * domain 1: TA
+ */
+ write_dacr(DACR_DOMAIN(0, DACR_DOMAIN_PERM_CLIENT) |
+ DACR_DOMAIN(1, DACR_DOMAIN_PERM_CLIENT));
+
+ /*
+ * Enable lookups using TTBR0 and TTBR1 with the split of addresses
+ * defined by TEE_MMU_TTBCR_N_VALUE.
+ */
+ write_ttbcr(TEE_MMU_TTBCR_N_VALUE);
+
+ write_ttbr0(ttb_pa | TEE_MMU_DEFAULT_ATTRS);
+ write_ttbr1(ttb_pa | TEE_MMU_DEFAULT_ATTRS);
+}
+
+__weak void *core_mmu_alloc_l2(struct tee_mmap_region *mm __unused)
+{
+ /*
+ * This function should be redefined in platform specific part if
+ * needed.
+ */
+ return NULL;
+}
+
+enum core_mmu_fault core_mmu_get_fault_type(uint32_t fsr)
+{
+ assert(!(fsr & FSR_LPAE));
+ switch (fsr & FSR_FS_MASK) {
+ case 0x1: /* DFSR[10,3:0] 0b00001 Alignment fault (DFSR only) */
+ return CORE_MMU_FAULT_ALIGNMENT;
+ case 0x2: /* DFSR[10,3:0] 0b00010 Debug event */
+ return CORE_MMU_FAULT_DEBUG_EVENT;
+ case 0x5: /* DFSR[10,3:0] b00101 Translation fault first level */
+ case 0x7: /* DFSR[10,3:0] b00111 Translation fault second level */
+ return CORE_MMU_FAULT_TRANSLATION;
+ case 0xd: /* DFSR[10,3:0] b01101 Permission fault first level */
+ case 0xf: /* DFSR[10,3:0] b01111 Permission fault second level */
+ return CORE_MMU_FAULT_PERMISSION;
+
+ case (1 << 10) | 0x6:
+ /* DFSR[10,3:0] 0b10110 Async external abort (DFSR only) */
+ return CORE_MMU_FAULT_ASYNC_EXTERNAL;
+
+ default:
+ return CORE_MMU_FAULT_OTHER;
+ }
+}
diff --git a/core/arch/arm32/mm/sub.mk b/core/arch/arm32/mm/sub.mk
index f8e8909..5723124 100644
--- a/core/arch/arm32/mm/sub.mk
+++ b/core/arch/arm32/mm/sub.mk
@@ -1,6 +1,10 @@
srcs-y += core_mmu.c
srcs-y += tee_pager.c
srcs-y += tee_mmu.c
+ifeq ($(CFG_WITH_LPAE),y)
+srcs-y += core_mmu_lpae.c
+else
+srcs-y += core_mmu_v7.c
+endif
srcs-y += tee_mm.c
srcs-y += tee_mm_unpg.c
-srcs-y += tee_mmu_unpg.c
diff --git a/core/arch/arm32/mm/tee_mm.c b/core/arch/arm32/mm/tee_mm.c
index b742481..323e425 100644
--- a/core/arch/arm32/mm/tee_mm.c
+++ b/core/arch/arm32/mm/tee_mm.c
@@ -26,6 +26,7 @@
*/
#include <kernel/tee_common.h>
+#include <util.h>
#include <trace.h>
#include <mm/tee_mm.h>
@@ -38,8 +39,8 @@ bool tee_mm_init(tee_mm_pool_t *pool, uint32_t lo, uint32_t hi, uint8_t shift,
if (pool == NULL)
return false;
- pool->lo = lo;
- pool->hi = hi;
+ pool->lo = ROUNDUP(lo, 1 << shift);
+ pool->hi = ROUNDDOWN(hi, 1 << shift);
pool->shift = shift;
pool->flags = flags;
pool->entry = calloc(1, sizeof(tee_mm_entry_t));
diff --git a/core/arch/arm32/mm/tee_mmu.c b/core/arch/arm32/mm/tee_mmu.c
index 0d2d986..75a9945 100644
--- a/core/arch/arm32/mm/tee_mmu.c
+++ b/core/arch/arm32/mm/tee_mmu.c
@@ -26,12 +26,12 @@
*/
#include <assert.h>
#include <stdlib.h>
+#include <types_ext.h>
#include <arm32.h>
#include <util.h>
#include <kernel/tee_common.h>
#include <mm/tee_mmu.h>
-#include <mm/tee_mmu_unpg.h>
#include <mm/tee_mmu_types.h>
#include <mm/tee_mmu_defs.h>
#include <user_ta_header.h>
@@ -48,153 +48,174 @@
#include <kernel/tz_ssvce.h>
#include <kernel/panic.h>
-#define TEE_MMU_PAGE_TEX_SHIFT 6
-
-/* MMU table page flags */
-#define TEE_MMU_PAGE_NG (1 << 11)
-#define TEE_MMU_PAGE_S (1 << 10)
-#define TEE_MMU_PAGE_AP2 (1 << 9)
-#define TEE_MMU_PAGE_TEX(x) (x << TEE_MMU_PAGE_TEX_SHIFT)
-#define TEE_MMU_PAGE_AP1 (1 << 5)
-#define TEE_MMU_PAGE_AP0 (1 << 4)
-#define TEE_MMU_PAGE_C (1 << 3)
-#define TEE_MMU_PAGE_B (1 << 2)
-#define TEE_MMU_PAGE (1 << 1)
-#define TEE_MMU_PAGE_XN (1 << 0)
-
-#define TEE_MMU_PAGE_CACHE_MASK \
- (TEE_MMU_PAGE_TEX(7) | TEE_MMU_PAGE_C | TEE_MMU_PAGE_B)
-
-#define TEE_MMU_PAGE_MASK ((1 << 12) - 1)
-
-/* For legacy */
-#define TEE_MMU_PAGE_LEGACY 0
-
-/* MMU table section flags */
-#define TEE_MMU_SECTION_NS (1 << 19)
-#define TEE_MMU_SECTION_NG (1 << 17)
-#define TEE_MMU_SECTION_S (1 << 16)
-#define TEE_MMU_SECTION_AP2 (1 << 15)
-#define TEE_MMU_SECTION_TEX(x) (x << 12)
-#define TEE_MMU_SECTION_AP1 (1 << 11)
-#define TEE_MMU_SECTION_AP0 (1 << 10)
-#define TEE_MMU_SECTION_DOMAIN(x) (x << 5)
-#define TEE_MMU_SECTION_XN (1 << 4)
-#define TEE_MMU_SECTION_C (1 << 3)
-#define TEE_MMU_SECTION_B (1 << 2)
-#define TEE_MMU_SECTION (1 << 1)
-
-/* User data, no cache attributes */
-#define TEE_MMU_SECTION_UDATA \
- (TEE_MMU_SECTION_NG | TEE_MMU_SECTION_S | \
- TEE_MMU_SECTION_AP1 | TEE_MMU_SECTION_AP0 | TEE_MMU_SECTION_XN |\
- TEE_MMU_SECTION_DOMAIN(1) | TEE_MMU_SECTION)
-
-/* User code, no cache attributes */
-#define TEE_MMU_SECTION_UCODE \
- (TEE_MMU_SECTION_NG | TEE_MMU_SECTION_S | \
- TEE_MMU_SECTION_AP1 | TEE_MMU_SECTION_AP0 | \
- TEE_MMU_SECTION_DOMAIN(1) | TEE_MMU_SECTION)
-
-/* Kernel data, global, privonly access, no exec, no cache attributes */
-#define TEE_MMU_SECTION_KDATA \
- (TEE_MMU_SECTION_S | \
- TEE_MMU_SECTION_AP0 | TEE_MMU_SECTION_XN | \
- TEE_MMU_SECTION_DOMAIN(1) | TEE_MMU_SECTION)
-
-/* Kernel data, global, privonly access, no exec, no cache attributes */
-#define TEE_MMU_SECTION_KCODE \
- (TEE_MMU_SECTION_S | \
- TEE_MMU_SECTION_AP0 | \
- TEE_MMU_SECTION_DOMAIN(1) | TEE_MMU_SECTION)
-
-/* Outer & Inner Write-Back, Write-Allocate. Default cache settings */
-#define TEE_MMU_SECTION_CACHEMASK \
- (TEE_MMU_SECTION_TEX(7) | TEE_MMU_SECTION_C | TEE_MMU_SECTION_B)
-#define TEE_MMU_SECTION_OIWBWA \
- (TEE_MMU_SECTION_TEX(1) | TEE_MMU_SECTION_C | TEE_MMU_SECTION_B)
-#define TEE_MMU_SECTION_NOCACHE \
- TEE_MMU_SECTION_TEX(1)
-
-#define TEE_MMU_UL1_ENTRY(page_num) \
- (*(uint32_t *)(TEE_MMU_UL1_BASE + ((uint32_t)(page_num)) * 4))
-
-/* Extract AP[2] and AP[1:0] */
-#define TEE_MMU_L1_AP(e) (((e >> 13) & 1) | ((e >> 10) & 3))
-
-#define TEE_MMU_AP_USER_RO 0x02
-#define TEE_MMU_AP_USER_RW 0x03
+#define TEE_MMU_UMAP_HEAP_STACK_IDX 0
+#define TEE_MMU_UMAP_CODE_IDX 1
+#define TEE_MMU_UMAP_PARAM_IDX 2
+#define TEE_MMU_UMAP_MAX_ENTRIES 6
+
+#define TEE_MMU_UDATA_ATTR (TEE_MATTR_VALID_BLOCK | \
+ TEE_MATTR_PRW | TEE_MATTR_URW | \
+ TEE_MATTR_SECURE)
+#define TEE_MMU_UCODE_ATTR (TEE_MATTR_VALID_BLOCK | \
+ TEE_MATTR_PRW | TEE_MATTR_URWX | \
+ TEE_MATTR_SECURE)
+
+#define TEE_MMU_UCACHE_DEFAULT_ATTR (TEE_MATTR_I_WRITE_BACK | \
+ TEE_MATTR_O_WRITE_BACK)
/* Support for 31 concurrent sessions */
static uint32_t g_asid = 0xffffffff;
static tee_mm_pool_t tee_mmu_virt_kmap;
-static uint32_t tee_mmu_get_io_size(const struct tee_ta_param *param)
+
+static void tee_mmu_umap_clear(struct tee_mmu_info *mmu)
{
- uint32_t i;
- uint32_t res = 0;
-
- for (i = 0; i < 4; i++) {
- uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, i);
-
- if ((param_type == TEE_PARAM_TYPE_MEMREF_INPUT ||
- param_type == TEE_PARAM_TYPE_MEMREF_OUTPUT ||
- param_type == TEE_PARAM_TYPE_MEMREF_INOUT) &&
- param->params[i].memref.size != 0) {
- res +=
- ((((uint32_t) param->params[i].memref.
- buffer & SECTION_MASK) +
- param->params[i].memref.size) >> SECTION_SHIFT) +
- 1;
- }
+ if (mmu->table && mmu->size != TEE_MMU_UMAP_MAX_ENTRIES) {
+ free(mmu->table);
+ mmu->table = NULL;
}
- return res;
+ if (!mmu->table)
+ return;
+
+ memset(mmu->table, 0, sizeof(struct tee_mmap_region) *
+ TEE_MMU_UMAP_MAX_ENTRIES);
}
-/*
- * tee_mmu_is_mapped - Check if range defined by input params is mapped.
- */
-static bool tee_mmu_is_mapped(const struct tee_ta_ctx *ctx, const paddr_t addr,
- const uint32_t length, const uint32_t type)
+
+
+static TEE_Result tee_mmu_umap_init(struct tee_mmu_info *mmu)
{
- uint32_t n;
- uint32_t section_start;
- uint32_t section_end;
- uint32_t *t;
- void *va;
+ tee_mmu_umap_clear(mmu);
+
+ if (!mmu->table) {
+ mmu->table = calloc(TEE_MMU_UMAP_MAX_ENTRIES,
+ sizeof(struct tee_mmap_region));
+ if (!mmu->table)
+ return TEE_ERROR_OUT_OF_MEMORY;
+ mmu->size = TEE_MMU_UMAP_MAX_ENTRIES;
+ }
- if (!ctx || !ctx->mmu || !ctx->mmu->table)
- return false; /* No user mapping initialized */
+ return TEE_SUCCESS;
+}
- if (((addr + length) >> SECTION_SHIFT) > ctx->mmu->size)
- return false; /* Range too large to be mapped */
+static void tee_mmu_umap_set_pa(struct tee_mmap_region *tbl,
+ size_t granule, paddr_t pa, size_t size, uint32_t attr)
+{
+ paddr_t upa = ROUNDDOWN(pa, granule);
+ size_t usz = ROUNDUP(pa - upa + size, granule);
- /* Try to look up start of range */
- if (tee_mmu_user_pa2va(ctx, (void *)addr, &va))
- return false;
+ tbl->pa = upa;
+ tbl->size = usz;
+ tbl->attr = attr;
+}
- /* Assign the base section */
- t = ctx->mmu->table + ((vaddr_t)va >> SECTION_SHIFT);
+static TEE_Result tee_mmu_umap_add_param(struct tee_mmu_info *mmu, paddr_t pa,
+ size_t size, uint32_t attr)
+{
+ struct tee_mmap_region *last_entry = NULL;
+ size_t n;
+ paddr_t npa;
+ size_t nsz;
- /*
- * Check all sections maps contiguous memory and have the correct type.
- */
- section_start = addr >> SECTION_SHIFT;
- section_end = (addr + length - 1) >> SECTION_SHIFT;
- for (n = 0; n <= section_end - section_start; n++) {
- if ((t[n] & SECTION_MASK) != type)
- return false; /* Incorrect type */
-
- if (t[n] >> SECTION_SHIFT !=
- ((n + section_start) >> SECTION_SHIFT))
- return false; /* PA doesn't match */
+ /* Check that we can map memory using this attribute */
+ if (!core_mmu_mattr_is_ok(attr))
+ return TEE_ERROR_BAD_PARAMETERS;
+
+ /* Find empty entry */
+ for (n = TEE_MMU_UMAP_PARAM_IDX; n < TEE_MMU_UMAP_MAX_ENTRIES; n++)
+ if (!mmu->table[n].size)
+ break;
+
+ if (n == TEE_MMU_UMAP_MAX_ENTRIES) {
+ /* No entries left "can't happen" */
+ return TEE_ERROR_EXCESS_DATA;
}
- return true;
+ tee_mmu_umap_set_pa(mmu->table + n, CORE_MMU_USER_PARAM_SIZE,
+ pa, size, attr);
+
+ /* Try to coalesce some entries */
+ while (true) {
+ /* Find last param */
+ n = TEE_MMU_UMAP_MAX_ENTRIES - 1;
+
+ while (!mmu->table[n].size) {
+ n--;
+ if (n < TEE_MMU_UMAP_PARAM_IDX) {
+ /* No param entries found, "can't happen" */
+ return TEE_ERROR_BAD_STATE;
+ }
+ }
+
+ if (last_entry == mmu->table + n)
+ return TEE_SUCCESS; /* Can't coalesc more */
+ last_entry = mmu->table + n;
+
+ n--;
+ while (n >= TEE_MMU_UMAP_PARAM_IDX) {
+ struct tee_mmap_region *entry = mmu->table + n;
+
+ n--;
+ if (last_entry->attr != entry->attr) {
+ if (core_is_buffer_intersect(last_entry->pa,
+ last_entry->size,
+ entry->pa,
+ entry->size))
+ return TEE_ERROR_ACCESS_CONFLICT;
+ continue;
+ }
+
+ if ((last_entry->pa + last_entry->size) == entry->pa ||
+ (entry->pa + entry->size) == last_entry->pa ||
+ core_is_buffer_intersect(last_entry->pa,
+ last_entry->size,
+ entry->pa, entry->size)) {
+ npa = MIN(last_entry->pa, entry->pa);
+ nsz = MAX(last_entry->pa + last_entry->size,
+ entry->pa + entry->size) - npa;
+ entry->pa = npa;
+ entry->size = nsz;
+ last_entry->pa = 0;
+ last_entry->size = 0;
+ last_entry->attr = 0;
+ break;
+ }
+ }
+ }
}
+static TEE_Result tee_mmu_umap_set_vas(struct tee_mmu_info *mmu)
+{
+ size_t n;
+ vaddr_t va;
+
+ assert(mmu->table && mmu->size == TEE_MMU_UMAP_MAX_ENTRIES);
+
+ va = CORE_MMU_USER_CODE_SIZE;
+ for (n = 0; n < TEE_MMU_UMAP_PARAM_IDX; n++) {
+ assert(mmu->table[n].size); /* PA must be assigned by now */
+ mmu->table[n].va = va;
+ va += CORE_MMU_USER_CODE_SIZE;
+ }
+
+ va = ROUNDUP(va, CORE_MMU_USER_PARAM_SIZE);
+ for (; n < TEE_MMU_UMAP_MAX_ENTRIES; n++) {
+ if (!mmu->table[n].size)
+ continue;
+ mmu->table[n].va = va;
+ va += mmu->table[n].size;
+ /* Put some empty space between each area */
+ va += CORE_MMU_USER_PARAM_SIZE;
+ if (va >= CORE_MMU_USER_MAX_ADDR)
+ return TEE_ERROR_EXCESS_DATA;
+ }
+
+ return TEE_SUCCESS;
+}
+
+
TEE_Result tee_mmu_init(struct tee_ta_ctx *ctx)
{
uint32_t asid = 1;
@@ -215,115 +236,13 @@ TEE_Result tee_mmu_init(struct tee_ta_ctx *ctx)
g_asid &= ~asid;
}
- ctx->mmu = malloc(sizeof(tee_mmu_info_t));
- if (ctx->mmu) {
- tee_mmu_info_t *p = ctx->mmu;
- p->table = 0;
- p->size = 0;
- } else {
+ ctx->mmu = calloc(1, sizeof(struct tee_mmu_info));
+ if (!ctx->mmu)
return TEE_ERROR_OUT_OF_MEMORY;
- }
return TEE_SUCCESS;
}
-static TEE_Result tee_mmu_map_io(struct tee_ta_ctx *ctx, uint32_t **buffer,
- const uint32_t vio, struct tee_ta_param *param)
-{
- uint32_t i;
- uint32_t vi_offset = vio;
- TEE_Result res = TEE_SUCCESS;
- uint32_t sect_prot;
- uint32_t sec;
- uint32_t section_start;
- uint32_t section_end;
-
- /* Map IO buffers in public memory */
- for (i = 0; i < 4; i++) {
- uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, i);
- TEE_Param *p = &param->params[i];
-
- if ((!((param_type == TEE_PARAM_TYPE_MEMREF_INPUT) ||
- (param_type == TEE_PARAM_TYPE_MEMREF_OUTPUT) ||
- (param_type == TEE_PARAM_TYPE_MEMREF_INOUT))) ||
- (p->memref.size == 0))
- continue;
-
- if ((ctx->flags & TA_FLAG_USER_MODE) ==
- TA_FLAG_USER_MODE) {
- sect_prot = TEE_MMU_SECTION_UDATA;
- } else {
- sect_prot = TEE_MMU_SECTION_KDATA;
- }
- /* Set NS bit if buffer is not secure */
- if (tee_pbuf_is_non_sec
- (p->memref.buffer, p->memref.size) == true) {
- sect_prot |= TEE_MMU_SECTION_NS;
- } else {
- /*
- * TODO
- * Security checks shouldn't be done here,
- * tee_ta_verify_param() should take care of that.
- */
-#if 0
- /*
- * If secure, check here if security level is
- * reached. This operation is likely to be
- * platform dependent.
- */
-
- /* case STTEE on Orly2: it has to be TEE external DDR */
- if (core_pbuf_is(CORE_MEM_EXTRAM,
- (tee_paddr_t) p->memref.buffer,
- p->memref.size) == false)
- return TEE_ERROR_SECURITY;
-#endif
- }
-
- /*
- * Configure inner and outer cache settings.
- */
- sect_prot &= ~TEE_MMU_SECTION_CACHEMASK;
- sect_prot |= TEE_MMU_SECTION_TEX(4);
- if (param->param_attr[i] & TEESMC_ATTR_CACHE_O_WRITE_THR)
- sect_prot |= TEE_MMU_SECTION_TEX(2);
- if (param->param_attr[i] & TEESMC_ATTR_CACHE_I_WRITE_BACK)
- sect_prot |= TEE_MMU_SECTION_TEX(1);
- if (param->param_attr[i] & TEESMC_ATTR_CACHE_O_WRITE_THR)
- sect_prot |= TEE_MMU_SECTION_C;
- if (param->param_attr[i] & TEESMC_ATTR_CACHE_O_WRITE_BACK)
- sect_prot |= TEE_MMU_SECTION_B;
-
- if (((sect_prot & TEE_MMU_SECTION_NS) == TEE_MMU_SECTION_NS) &&
- ((sect_prot & TEE_MMU_SECTION_XN) == 0)) {
- EMSG("invalid map config: nsec mem map as executable!");
- sect_prot |= TEE_MMU_SECTION_XN;
- }
-
- if (tee_mmu_is_mapped(ctx, (uint32_t) p->memref.buffer,
- p->memref.size, sect_prot)) {
- res = tee_mmu_user_pa2va(ctx, p->memref.buffer,
- &p->memref.buffer);
- if (res != TEE_SUCCESS)
- return res;
- } else {
- section_start = (uint32_t)p->memref.buffer >>
- SECTION_SHIFT;
- section_end = ((uint32_t)p->memref.buffer +
- p->memref.size - 1) >> SECTION_SHIFT;
- p->memref.buffer = (void *)((vi_offset << SECTION_SHIFT)
- + ((uint32_t)p->memref.buffer & SECTION_MASK));
- for (sec = section_start; sec <= section_end; sec++) {
- **buffer = (sec << SECTION_SHIFT) | sect_prot;
- (*buffer)++;
- }
- vi_offset += (section_end - section_start + 1);
- }
- }
-
- return res;
-}
-
/*
* tee_mmu_map - alloc and fill mmu mapping table for a user TA (uTA).
*
@@ -338,87 +257,108 @@ static TEE_Result tee_mmu_map_io(struct tee_ta_ctx *ctx, uint32_t **buffer,
TEE_Result tee_mmu_map(struct tee_ta_ctx *ctx, struct tee_ta_param *param)
{
TEE_Result res = TEE_SUCCESS;
- uint32_t py_offset;
- paddr_t p;
+ paddr_t pa;
uintptr_t smem;
- uint32_t *buffer;
- uint32_t section = 0, section_cnt = 0;
+ size_t n;
TEE_ASSERT((ctx->flags & TA_FLAG_EXEC_DDR) != 0);
- ctx->mmu->size = tee_mm_get_size(ctx->mm_heap_stack) +
- tee_mm_get_size(ctx->mm) + tee_mmu_get_io_size(param) +
- TEE_DDR_VLOFFSET;
-
- if (ctx->mmu->size > TEE_MMU_UL1_NUM_ENTRIES) {
- res = TEE_ERROR_EXCESS_DATA;
- goto exit;
- }
-
- if (ctx->mmu->table)
- free(ctx->mmu->table);
-
- ctx->mmu->table = malloc(ctx->mmu->size * 4);
- if (ctx->mmu->table == NULL) {
- res = TEE_ERROR_OUT_OF_MEMORY;
+ res = tee_mmu_umap_init(ctx->mmu);
+ if (res != TEE_SUCCESS)
goto exit;
- }
- memset(ctx->mmu->table, 0, ctx->mmu->size * 4);
/*
* Map heap and stack
*/
smem = tee_mm_get_smem(ctx->mm_heap_stack);
- if (core_va2pa((void *)smem, &p)) {
+ if (core_va2pa((void *)smem, &pa)) {
res = TEE_ERROR_SECURITY;
goto exit;
}
-
- py_offset = (uint32_t)p >> SECTION_SHIFT;
-
- buffer = (uint32_t *)ctx->mmu->table + TEE_DDR_VLOFFSET;
- while (section < tee_mm_get_size(ctx->mm_heap_stack)) {
- *buffer++ = ((section++ + py_offset) << SECTION_SHIFT) |
- TEE_MMU_SECTION_UDATA | TEE_MMU_SECTION_OIWBWA;
- section_cnt++;
- }
+ tee_mmu_umap_set_pa(ctx->mmu->table + TEE_MMU_UMAP_HEAP_STACK_IDX,
+ CORE_MMU_USER_CODE_SIZE,
+ pa, tee_mm_get_bytes(ctx->mm_heap_stack),
+ TEE_MMU_UDATA_ATTR | TEE_MMU_UCACHE_DEFAULT_ATTR);
/*
* Map code
*/
smem = tee_mm_get_smem(ctx->mm);
- if (core_va2pa((void *)smem, &p)) {
+ if (core_va2pa((void *)smem, &pa)) {
res = TEE_ERROR_SECURITY;
goto exit;
}
+ tee_mmu_umap_set_pa(ctx->mmu->table + TEE_MMU_UMAP_CODE_IDX,
+ CORE_MMU_USER_CODE_SIZE,
+ pa, tee_mm_get_bytes(ctx->mm),
+ TEE_MMU_UCODE_ATTR | TEE_MMU_UCACHE_DEFAULT_ATTR);
+
+
+ for (n = 0; n < 4; n++) {
+ uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n);
+ TEE_Param *p = &param->params[n];
+ uint32_t attr = TEE_MMU_UDATA_ATTR;
+
+ if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT &&
+ param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT &&
+ param_type != TEE_PARAM_TYPE_MEMREF_INOUT)
+ continue;
+ if (p->memref.size == 0)
+ continue;
+
+ if (tee_pbuf_is_non_sec(p->memref.buffer, p->memref.size))
+ attr &= ~TEE_MATTR_SECURE;
+
+ if (param->param_attr[n] & TEESMC_ATTR_CACHE_I_WRITE_THR)
+ attr |= TEE_MATTR_I_WRITE_THR;
+ if (param->param_attr[n] & TEESMC_ATTR_CACHE_I_WRITE_BACK)
+ attr |= TEE_MATTR_I_WRITE_BACK;
+ if (param->param_attr[n] & TEESMC_ATTR_CACHE_O_WRITE_THR)
+ attr |= TEE_MATTR_O_WRITE_THR;
+ if (param->param_attr[n] & TEESMC_ATTR_CACHE_O_WRITE_BACK)
+ attr |= TEE_MATTR_O_WRITE_BACK;
+
+
+ res = tee_mmu_umap_add_param(ctx->mmu,
+ (paddr_t)p->memref.buffer, p->memref.size,
+ attr);
+ if (res != TEE_SUCCESS)
+ goto exit;
+ }
- py_offset = (uint32_t) p >> SECTION_SHIFT;
+ res = tee_mmu_umap_set_vas(ctx->mmu);
+ if (res != TEE_SUCCESS)
+ goto exit;
- section = 0;
- while (section < tee_mm_get_size(ctx->mm)) {
- *buffer++ = ((section++ + py_offset) << SECTION_SHIFT) |
- (TEE_MMU_SECTION_UCODE | TEE_MMU_SECTION_OIWBWA);
- section_cnt++;
+ for (n = 0; n < 4; n++) {
+ uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n);
+ TEE_Param *p = &param->params[n];
+
+ if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT &&
+ param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT &&
+ param_type != TEE_PARAM_TYPE_MEMREF_INOUT)
+ continue;
+ if (p->memref.size == 0)
+ continue;
+
+ res = tee_mmu_user_pa2va(ctx, p->memref.buffer,
+ &p->memref.buffer);
+ if (res != TEE_SUCCESS)
+ goto exit;
}
- ctx->mmu->ta_private_vmem_start = TEE_DDR_VLOFFSET << SECTION_SHIFT;
- ctx->mmu->ta_private_vmem_end = (TEE_DDR_VLOFFSET + section_cnt) <<
- SECTION_SHIFT;
+ ctx->mmu->ta_private_vmem_start = ctx->mmu->table[0].va;
- /*
- * Map io parameters
- */
- res =
- tee_mmu_map_io(ctx, &buffer,
- ((uint32_t) buffer - (uint32_t) ctx->mmu->table) / 4,
- param);
+ n = TEE_MMU_UMAP_MAX_ENTRIES;
+ do {
+ n--;
+ } while (n && !ctx->mmu->table[n].size);
+ ctx->mmu->ta_private_vmem_end = ctx->mmu->table[n].va +
+ ctx->mmu->table[n].size;
exit:
- if (res != TEE_SUCCESS) {
- free(ctx->mmu->table);
- ctx->mmu->table = NULL;
- ctx->mmu->size = 0;
- }
+ if (res != TEE_SUCCESS)
+ tee_mmu_umap_clear(ctx->mmu);
return res;
}
@@ -438,8 +378,7 @@ void tee_mmu_final(struct tee_ta_ctx *ctx)
ctx->context = 0;
if (ctx->mmu != NULL) {
- tee_mmu_info_t *p = ctx->mmu;
- free(p->table);
+ free(ctx->mmu->table);
free(ctx->mmu);
}
ctx->mmu = NULL;
@@ -466,53 +405,62 @@ bool tee_mmu_is_vbuf_intersect_ta_private(const struct tee_ta_ctx *ctx,
TEE_Result tee_mmu_kernel_to_user(const struct tee_ta_ctx *ctx,
const uint32_t kaddr, uint32_t *uaddr)
{
- uint32_t i = 0;
- uint32_t pa;
+ TEE_Result res;
+ void *ua;
+ paddr_t pa;
if (core_va2pa((void *)kaddr, &pa))
- return TEE_ERROR_SECURITY;
+ return TEE_ERROR_ACCESS_DENIED;
+
+ res = tee_mmu_user_pa2va(ctx, (void *)pa, &ua);
+ if (res == TEE_SUCCESS)
+ *uaddr = (uint32_t)ua;
+ return res;
+}
+
+static TEE_Result tee_mmu_user_va2pa_attr(const struct tee_ta_ctx *ctx,
+ void *ua, paddr_t *pa, uint32_t *attr)
+{
+ size_t n;
+
+ if (!ctx->mmu->table)
+ return TEE_ERROR_ACCESS_DENIED;
- while (i < ctx->mmu->size) {
- if ((pa & (~SECTION_MASK)) ==
- (ctx->mmu->table[i] & (~SECTION_MASK))) {
- *uaddr = (i << SECTION_SHIFT) + (kaddr & SECTION_MASK);
+ for (n = 0; n < ctx->mmu->size; n++) {
+ if (core_is_buffer_inside(ua, 1, ctx->mmu->table[n].va,
+ ctx->mmu->table[n].size)) {
+ *pa = (paddr_t)ua - ctx->mmu->table[n].va +
+ ctx->mmu->table[n].pa;
+ if (attr)
+ *attr = ctx->mmu->table[n].attr;
return TEE_SUCCESS;
}
- i++;
}
-
- return TEE_ERROR_ITEM_NOT_FOUND;
+ return TEE_ERROR_ACCESS_DENIED;
}
TEE_Result tee_mmu_user_va2pa_helper(const struct tee_ta_ctx *ctx, void *ua,
paddr_t *pa)
{
- uint32_t n = (uint32_t) ua >> SECTION_SHIFT;
-
- if (n >= ctx->mmu->size)
- return TEE_ERROR_ACCESS_DENIED;
-
- *pa = (ctx->mmu->table[n] & ~SECTION_MASK) |
- ((uint32_t) ua & SECTION_MASK);
- return TEE_SUCCESS;
+ return tee_mmu_user_va2pa_attr(ctx, ua, pa, NULL);
}
/* */
TEE_Result tee_mmu_user_pa2va_helper(const struct tee_ta_ctx *ctx, void *pa,
void **va)
{
- uint32_t i = 0;
-
- while (i < ctx->mmu->size) {
- if (ctx->mmu->table[i] != 0 &&
- (uint32_t) pa >= (ctx->mmu->table[i] & ~SECTION_MASK) &&
- (uint32_t) pa < ((ctx->mmu->table[i] & ~SECTION_MASK)
- + (1 << SECTION_SHIFT))) {
- *va = (void *)((i << SECTION_SHIFT) +
- ((uint32_t) pa & SECTION_MASK));
+ size_t n;
+
+ if (!ctx->mmu->table)
+ return TEE_ERROR_ACCESS_DENIED;
+
+ for (n = 0; n < ctx->mmu->size; n++) {
+ if (core_is_buffer_inside(pa, 1, ctx->mmu->table[n].pa,
+ ctx->mmu->table[n].size)) {
+ *va = (void *)((paddr_t)pa - ctx->mmu->table[n].pa +
+ ctx->mmu->table[n].va);
return TEE_SUCCESS;
}
- i++;
}
return TEE_ERROR_ACCESS_DENIED;
}
@@ -522,30 +470,27 @@ TEE_Result tee_mmu_check_access_rights(struct tee_ta_ctx *ctx,
size_t len)
{
tee_uaddr_t a;
- uint32_t param_section;
+ size_t addr_incr = MIN(CORE_MMU_USER_CODE_SIZE,
+ CORE_MMU_USER_PARAM_SIZE);
/* Address wrap */
- if (uaddr + len < uaddr)
+ if ((uaddr + len) < uaddr)
return TEE_ERROR_ACCESS_DENIED;
- param_section = TEE_DDR_VLOFFSET +
- tee_mm_get_size(ctx->mm_heap_stack) + tee_mm_get_size(ctx->mm);
+ for (a = uaddr; a < (uaddr + len); a += addr_incr) {
+ paddr_t pa;
+ uint32_t attr;
+ TEE_Result res;
- for (a = uaddr; a < (uaddr + len); a += SECTION_SIZE) {
- uint32_t n = a >> SECTION_SHIFT;
+ res = tee_mmu_user_va2pa_attr(ctx, (void *)a, &pa, &attr);
+ if (res != TEE_SUCCESS)
+ return res;
- if (n >= ctx->mmu->size)
- return TEE_ERROR_ACCESS_DENIED;
-
- if ((flags & TEE_MEMORY_ACCESS_ANY_OWNER) !=
- TEE_MEMORY_ACCESS_ANY_OWNER && n >= param_section) {
- paddr_t pa;
- TEE_Result res =
- tee_mmu_user_va2pa(ctx, (void *)a, &pa);
-
- if (res != TEE_SUCCESS)
- return res;
+ if (!(flags & TEE_MEMORY_ACCESS_ANY_OWNER)) {
/*
+ * Strict check that no one else (wich equal or
+ * less trust) may can access this memory.
+ *
* Parameters are shared with normal world if they
* aren't in secure DDR.
*
@@ -553,22 +498,20 @@ TEE_Result tee_mmu_check_access_rights(struct tee_ta_ctx *ctx,
* TA is invoking another TA and in that case there's
* new memory allocated privately for the paramters to
* this TA.
+ *
+ * If we do this check for an address on TA
+ * internal memory it's harmless as it will always
+ * be in secure DDR.
*/
if (!tee_mm_addr_is_within_range(&tee_mm_sec_ddr, pa))
return TEE_ERROR_ACCESS_DENIED;
+
}
- /* Check Access Protection from L1 entry */
- switch (TEE_MMU_L1_AP(ctx->mmu->table[n])) {
- case TEE_MMU_AP_USER_RO:
- if ((flags & TEE_MEMORY_ACCESS_WRITE) != 0)
- return TEE_ERROR_ACCESS_DENIED;
- break;
- case TEE_MMU_AP_USER_RW:
- break;
- default:
+ if ((flags & TEE_MEMORY_ACCESS_WRITE) && !(attr & TEE_MATTR_UW))
+ return TEE_ERROR_ACCESS_DENIED;
+ if ((flags & TEE_MEMORY_ACCESS_READ) && !(attr & TEE_MATTR_UR))
return TEE_ERROR_ACCESS_DENIED;
- }
}
return TEE_SUCCESS;
@@ -576,29 +519,23 @@ TEE_Result tee_mmu_check_access_rights(struct tee_ta_ctx *ctx,
void tee_mmu_set_ctx(struct tee_ta_ctx *ctx)
{
- if (ctx == NULL) {
- tee_mmu_switch(read_ttbr1(), 0);
+ if (!ctx) {
+ core_mmu_set_user_map(NULL);
} else {
- paddr_t base = core_mmu_get_ul1_ttb_pa();
- uint32_t *ul1 = (void *)core_mmu_get_ul1_ttb_va();
+ struct core_mmu_user_map map;
- /* copy uTA mapping at begning of mmu table */
- memcpy(ul1, ctx->mmu->table, ctx->mmu->size * 4);
- memset(ul1 + ctx->mmu->size, 0,
- (TEE_MMU_UL1_NUM_ENTRIES - ctx->mmu->size) * 4);
-
- /* Change ASID to new value */
- tee_mmu_switch(base | TEE_MMU_DEFAULT_ATTRS, ctx->context);
+ core_mmu_create_user_map(ctx->mmu, ctx->context, &map);
+ core_mmu_set_user_map(&map);
}
- core_tlb_maintenance(TLBINV_CURRENT_ASID, 0);
}
uintptr_t tee_mmu_get_load_addr(const struct tee_ta_ctx *const ctx)
{
TEE_ASSERT((ctx->flags & TA_FLAG_EXEC_DDR) != 0);
+ TEE_ASSERT(ctx->mmu && ctx->mmu->table &&
+ ctx->mmu->size >= TEE_MMU_UMAP_CODE_IDX);
- return (TEE_DDR_VLOFFSET + tee_mm_get_size(ctx->mm_heap_stack)) <<
- SECTION_SHIFT;
+ return ctx->mmu->table[TEE_MMU_UMAP_CODE_IDX].va;
}
/*
@@ -611,63 +548,60 @@ uintptr_t tee_mmu_get_load_addr(const struct tee_ta_ctx *const ctx)
*/
void tee_mmu_kmap_init(void)
{
- tee_vaddr_t s = TEE_MMU_KMAP_START_VA;
- tee_vaddr_t e = TEE_MMU_KMAP_END_VA;
+ vaddr_t s = TEE_MMU_KMAP_START_VA;
+ vaddr_t e = TEE_MMU_KMAP_END_VA;
+ struct core_mmu_table_info tbl_info;
+
+ if (!core_mmu_find_table(s, UINT_MAX, &tbl_info))
+ panic();
- if (!tee_mm_init(&tee_mmu_virt_kmap, s, e, SECTION_SHIFT,
+ if (!tee_mm_init(&tee_mmu_virt_kmap, s, e, tbl_info.shift,
TEE_MM_POOL_NO_FLAGS)) {
DMSG("Failed to init kmap. Trap CPU!");
- TEE_ASSERT(0);
+ panic();
}
}
-static uint32_t *get_kmap_l1_base(void)
-{
- uint32_t *l1 = (uint32_t *)core_mmu_get_main_ttb_va();
-
- /* Return address where kmap entries start */
- return l1 + TEE_MMU_KMAP_OFFS;
-}
-
TEE_Result tee_mmu_kmap_helper(tee_paddr_t pa, size_t len, void **va)
{
tee_mm_entry_t *mm;
+ uint32_t attr;
+ struct core_mmu_table_info tbl_info;
+ uint32_t pa_s;
+ uint32_t pa_e;
size_t n;
- uint32_t *l1 = get_kmap_l1_base();
- uint32_t py_offset = (uint32_t) pa >> SECTION_SHIFT;
- uint32_t pa_s = ROUNDDOWN(pa, SECTION_SIZE);
- uint32_t pa_e = ROUNDUP(pa + len, SECTION_SIZE);
- uint32_t flags;
+ size_t offs;
+
+ if (!core_mmu_find_table(TEE_MMU_KMAP_START_VA, UINT_MAX, &tbl_info))
+ panic();
+
+ pa_s = ROUNDDOWN(pa, 1 << tbl_info.shift);
+ pa_e = ROUNDUP(pa + len, 1 << tbl_info.shift);
mm = tee_mm_alloc(&tee_mmu_virt_kmap, pa_e - pa_s);
- if (mm == NULL)
+ if (!mm)
return TEE_ERROR_OUT_OF_MEMORY;
- /*
- * check memory attributes (must either secure or unsecured)
- *
- * Warning: platform depedancy: was is cached and uncached.
- */
- flags = TEE_MMU_SECTION_KDATA;
- if (tee_pbuf_is_sec(pa, len) == true) {
- flags |= TEE_MMU_SECTION_OIWBWA;
- } else if (tee_pbuf_is_non_sec(pa, len) == true) {
- flags |= TEE_MMU_SECTION_NS;
+ attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_PRW | TEE_MATTR_GLOBAL;
+ if (tee_pbuf_is_sec(pa, len)) {
+ attr |= TEE_MATTR_SECURE;
+ attr |= TEE_MATTR_I_WRITE_BACK | TEE_MATTR_O_WRITE_BACK;
+ } else if (tee_pbuf_is_non_sec(pa, len)) {
if (core_mmu_is_shm_cached())
- flags |= TEE_MMU_SECTION_OIWBWA;
- else
- flags |= TEE_MMU_SECTION_NOCACHE;
- } else {
+ attr |= TEE_MATTR_I_WRITE_BACK | TEE_MATTR_O_WRITE_BACK;
+ } else
return TEE_ERROR_GENERIC;
- }
+
+ offs = (tee_mm_get_smem(mm) - tbl_info.va_base) >> tbl_info.shift;
for (n = 0; n < tee_mm_get_size(mm); n++)
- l1[n + tee_mm_get_offset(mm)] =
- ((n + py_offset) << SECTION_SHIFT) | flags;
+ core_mmu_set_entry(&tbl_info, n + offs,
+ pa_s + (n << tbl_info.shift), attr);
core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
- *va = (void *)(tee_mm_get_smem(mm) + (pa & SECTION_MASK));
+ *va = (void *)(tee_mm_get_smem(mm) +
+ core_mmu_get_block_offset(&tbl_info, pa));
return TEE_SUCCESS;
}
@@ -675,15 +609,20 @@ void tee_mmu_kunmap(void *va, size_t len)
{
size_t n;
tee_mm_entry_t *mm;
- uint32_t *l1 = get_kmap_l1_base();
+ struct core_mmu_table_info tbl_info;
+ size_t offs;
+
+ if (!core_mmu_find_table(TEE_MMU_KMAP_START_VA, UINT_MAX, &tbl_info))
+ panic();
mm = tee_mm_find(&tee_mmu_virt_kmap, (uint32_t)va);
if (mm == NULL || len > tee_mm_get_bytes(mm))
return; /* Invalid range, not much to do */
/* Clear the mmu entries */
+ offs = (tee_mm_get_smem(mm) - tbl_info.va_base) >> tbl_info.shift;
for (n = 0; n < tee_mm_get_size(mm); n++)
- l1[n + tee_mm_get_offset(mm)] = 0;
+ core_mmu_set_entry(&tbl_info, n + offs, 0, 0);
core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
tee_mm_free(mm);
@@ -692,35 +631,69 @@ void tee_mmu_kunmap(void *va, size_t len)
TEE_Result tee_mmu_kmap_pa2va_helper(void *pa, void **va)
{
size_t n;
- uint32_t *l1 = (uint32_t *)core_mmu_get_main_ttb_va();
-
- for (n = TEE_MMU_KMAP_OFFS;
- n < (TEE_MMU_KMAP_OFFS + TEE_MMU_KMAP_NUM_ENTRIES); n++) {
- if (l1[n] != 0 &&
- (uint32_t)pa >= (l1[n] & ~SECTION_MASK) &&
- (uint32_t)pa < ((l1[n] & ~SECTION_MASK)
- + (1 << SECTION_SHIFT))) {
- *va = (void *)((n << SECTION_SHIFT) +
- ((uint32_t)pa & SECTION_MASK));
+ struct core_mmu_table_info tbl_info;
+ size_t shift;
+ paddr_t match_pa;
+
+ if (!core_mmu_find_table(TEE_MMU_KMAP_START_VA, UINT_MAX, &tbl_info))
+ panic();
+
+ shift = tbl_info.shift;
+ match_pa = ROUNDDOWN((paddr_t)pa, 1 << shift);
+
+ for (n = core_mmu_va2idx(&tbl_info, TEE_MMU_KMAP_START_VA);
+ n < core_mmu_va2idx(&tbl_info, TEE_MMU_KMAP_END_VA); n++) {
+ uint32_t attr;
+ paddr_t npa;
+
+ core_mmu_get_entry(&tbl_info, n, &npa, &attr);
+ if (!(attr & TEE_MATTR_VALID_BLOCK))
+ continue;
+ assert(!(attr & TEE_MATTR_TABLE));
+
+ if (npa == match_pa) {
+ *va = (void *)(core_mmu_idx2va(&tbl_info, n) +
+ ((paddr_t)pa - match_pa));
return TEE_SUCCESS;
}
}
+
return TEE_ERROR_ACCESS_DENIED;
}
-TEE_Result tee_mmu_kmap_va2pa_helper(void *va, void **pa)
+static TEE_Result tee_mmu_kmap_va2pa_attr(void *va, void **pa, uint32_t *attr)
{
- uint32_t n = (uint32_t)va >> SECTION_SHIFT;
- uint32_t *l1 = (uint32_t *)core_mmu_get_main_ttb_va();
+ struct core_mmu_table_info tbl_info;
+ size_t block_offset;
+ size_t n;
+ paddr_t npa;
+ uint32_t nattr;
- if (n < TEE_MMU_KMAP_OFFS &&
- n >= (TEE_MMU_KMAP_OFFS + TEE_MMU_KMAP_NUM_ENTRIES))
+ if (!core_mmu_find_table(TEE_MMU_KMAP_START_VA, UINT_MAX, &tbl_info))
+ panic();
+
+ if (!tee_mm_addr_is_within_range(&tee_mmu_virt_kmap, (vaddr_t)va))
return TEE_ERROR_ACCESS_DENIED;
- *pa = (void *)((l1[n] & ~SECTION_MASK) | ((uint32_t)va & SECTION_MASK));
+
+ n = core_mmu_va2idx(&tbl_info, (vaddr_t)va);
+ core_mmu_get_entry(&tbl_info, n, &npa, &nattr);
+ if (!(nattr & TEE_MATTR_VALID_BLOCK))
+ return TEE_ERROR_ACCESS_DENIED;
+
+ block_offset = core_mmu_get_block_offset(&tbl_info, (vaddr_t)va);
+ *pa = (void *)(npa + block_offset);
+
+ if (attr)
+ *attr = nattr;
return TEE_SUCCESS;
}
+TEE_Result tee_mmu_kmap_va2pa_helper(void *va, void **pa)
+{
+ return tee_mmu_kmap_va2pa_attr(va, pa, NULL);
+}
+
bool tee_mmu_kmap_is_mapped(void *va, size_t len)
{
tee_vaddr_t a = (tee_vaddr_t)va;
@@ -735,12 +708,6 @@ bool tee_mmu_kmap_is_mapped(void *va, size_t len)
return true;
}
-bool tee_mmu_is_kernel_mapping(void)
-{
- /* TODO use ASID instead */
- return read_ttbr0() == read_ttbr1();
-}
-
void teecore_init_ta_ram(void)
{
unsigned int s, e;
@@ -749,8 +716,8 @@ void teecore_init_ta_ram(void)
* shared mem allcated from teecore */
core_mmu_get_mem_by_type(MEM_AREA_TA_RAM, &s, &e);
- TEE_ASSERT((s & (SECTION_SIZE - 1)) == 0);
- TEE_ASSERT((e & (SECTION_SIZE - 1)) == 0);
+ TEE_ASSERT((s & (CORE_MMU_USER_CODE_SIZE - 1)) == 0);
+ TEE_ASSERT((e & (CORE_MMU_USER_CODE_SIZE - 1)) == 0);
/* extra check: we could rely on core_mmu_get_mem_by_type() */
TEE_ASSERT(tee_vbuf_is_sec(s, e - s) == true);
@@ -758,7 +725,8 @@ void teecore_init_ta_ram(void)
/* remove previous config and init TA ddr memory pool */
tee_mm_final(&tee_mm_sec_ddr);
- tee_mm_init(&tee_mm_sec_ddr, s, e, SECTION_SHIFT, TEE_MM_POOL_NO_FLAGS);
+ tee_mm_init(&tee_mm_sec_ddr, s, e, CORE_MMU_USER_CODE_SHIFT,
+ TEE_MM_POOL_NO_FLAGS);
}
void teecore_init_pub_ram(void)
@@ -770,8 +738,8 @@ void teecore_init_pub_ram(void)
core_mmu_get_mem_by_type(MEM_AREA_NSEC_SHM, &s, &e);
TEE_ASSERT(s < e);
- TEE_ASSERT((s & (SECTION_SIZE - 1)) == 0);
- TEE_ASSERT((e & (SECTION_SIZE - 1)) == 0);
+ TEE_ASSERT((s & SMALL_PAGE_MASK) == 0);
+ TEE_ASSERT((e & SMALL_PAGE_MASK) == 0);
/* extra check: we could rely on core_mmu_get_mem_by_type() */
TEE_ASSERT(tee_vbuf_is_non_sec(s, e - s) == true);
@@ -803,65 +771,43 @@ void tee_mmu_iounmap(void *va __unused)
/* iounmap(va); */
}
-static uint32_t section_to_teesmc_cache_attr(uint32_t sect)
+static uint32_t mattr_to_teesmc_cache_attr(uint32_t mattr)
{
-
- if (sect & TEE_MMU_SECTION_TEX(4)) {
- uint32_t attr = 0;
-
- if (sect & TEE_MMU_SECTION_TEX(2))
- attr |= TEESMC_ATTR_CACHE_O_WRITE_THR;
- if (sect & TEE_MMU_SECTION_TEX(1))
- attr |= TEESMC_ATTR_CACHE_I_WRITE_BACK;
- if (sect & TEE_MMU_SECTION_C)
- attr |= TEESMC_ATTR_CACHE_O_WRITE_THR;
- if (sect & TEE_MMU_SECTION_B)
- attr |= TEESMC_ATTR_CACHE_O_WRITE_BACK;
- assert(attr == TEESMC_ATTR_CACHE_DEFAULT);
- return attr;
- }
-
- switch (sect & TEE_MMU_SECTION_CACHEMASK) {
- /* outer and inner write-back */
- /* no write-allocate */
- case TEE_MMU_SECTION_TEX(0) | TEE_MMU_SECTION_B:
- /* write-allocate */
- case TEE_MMU_SECTION_TEX(1) | TEE_MMU_SECTION_B | TEE_MMU_SECTION_C:
- return TEESMC_ATTR_CACHE_I_WRITE_BACK |
- TEESMC_ATTR_CACHE_O_WRITE_BACK;
-
- /* outer and inner write-through */
- case TEE_MMU_SECTION_TEX(0) | TEE_MMU_SECTION_C:
- panic();
- return TEESMC_ATTR_CACHE_I_WRITE_THR |
- TEESMC_ATTR_CACHE_O_WRITE_THR;
-
- /* outer and inner no-cache */
- case TEE_MMU_SECTION_TEX(1):
- panic();
- return TEESMC_ATTR_CACHE_I_NONCACHE |
- TEESMC_ATTR_CACHE_O_NONCACHE;
- default:
- panic();
- }
+ uint32_t attr = 0;
+
+ if (mattr & TEE_MATTR_I_WRITE_THR)
+ attr |= TEESMC_ATTR_CACHE_I_WRITE_THR;
+ if (mattr & TEE_MATTR_I_WRITE_BACK)
+ attr |= TEESMC_ATTR_CACHE_I_WRITE_BACK;
+ if (mattr & TEE_MATTR_O_WRITE_THR)
+ attr |= TEESMC_ATTR_CACHE_O_WRITE_THR;
+ if (mattr & TEE_MATTR_O_WRITE_BACK)
+ attr |= TEESMC_ATTR_CACHE_O_WRITE_BACK;
+
+ return attr;
}
uint32_t tee_mmu_kmap_get_cache_attr(void *va)
{
- uint32_t n = (vaddr_t)va >> SECTION_SHIFT;
- uint32_t *l1 = (uint32_t *)core_mmu_get_main_ttb_va();
+ TEE_Result res;
+ void *pa;
+ uint32_t attr;
- assert(n >= TEE_MMU_KMAP_OFFS &&
- n < (TEE_MMU_KMAP_OFFS + TEE_MMU_KMAP_NUM_ENTRIES));
+ res = tee_mmu_kmap_va2pa_attr(va, &pa, &attr);
+ assert(res == TEE_SUCCESS);
- return section_to_teesmc_cache_attr(l1[n]);
+ return mattr_to_teesmc_cache_attr(attr);
}
+
uint32_t tee_mmu_user_get_cache_attr(struct tee_ta_ctx *ctx, void *va)
{
- uint32_t n = (vaddr_t)va >> SECTION_SHIFT;
+ TEE_Result res;
+ paddr_t pa;
+ uint32_t attr;
- assert(n < ctx->mmu->size);
+ res = tee_mmu_user_va2pa_attr(ctx, va, &pa, &attr);
+ assert(res == TEE_SUCCESS);
- return section_to_teesmc_cache_attr(ctx->mmu->table[n]);
+ return mattr_to_teesmc_cache_attr(attr);
}
diff --git a/core/arch/arm32/mm/tee_mmu_unpg.c b/core/arch/arm32/mm/tee_mmu_unpg.c
deleted file mode 100644
index db75302..0000000
--- a/core/arch/arm32/mm/tee_mmu_unpg.c
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Copyright (c) 2014, STMicroelectronics International N.V.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <arm32.h>
-#include <mm/tee_mmu_unpg.h>
-#include <mm/tee_mmu_defs.h>
-#include <mm/core_mmu.h>
-
-void tee_mmu_get_map(struct tee_mmu_mapping *map)
-{
- if (map == NULL)
- return;
-
- map->ttbr0 = read_ttbr0();
- map->ctxid = read_contextidr();
-}
-
-void tee_mmu_set_map(struct tee_mmu_mapping *map)
-{
- if (map == NULL)
- tee_mmu_switch(read_ttbr1(), 0);
- else
- tee_mmu_switch(map->ttbr0, map->ctxid);
-
- core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
-}
-
-void tee_mmu_switch(uint32_t ttbr0_base, uint32_t ctxid)
-{
- uint32_t cpsr = read_cpsr();
-
- /* Disable interrupts */
- write_cpsr(cpsr | CPSR_FIA);
-
- /*
- * Update the reserved Context ID and TTBR0
- */
-
- dsb(); /* ARM erratum 754322 */
- write_contextidr(0);
- isb();
-
- write_ttbr0(ttbr0_base);
- isb();
-
- write_contextidr(ctxid & 0xff);
- isb();
-
- /* Restore interrupts */
- write_cpsr(cpsr);
-}
diff --git a/core/arch/arm32/mm/tee_pager.c b/core/arch/arm32/mm/tee_pager.c
index 663c668..1bea692 100644
--- a/core/arch/arm32/mm/tee_pager.c
+++ b/core/arch/arm32/mm/tee_pager.c
@@ -36,9 +36,9 @@
#include <kernel/tee_ta_manager.h>
#include <kernel/tee_kta_trace.h>
#include <kernel/misc.h>
+#include <kernel/tee_misc.h>
#include <mm/tee_pager.h>
-#include <mm/tee_mm_unpg.h>
-#include <mm/tee_mmu_unpg.h>
+#include <mm/tee_mm.h>
#include <mm/core_mmu.h>
#include <tee/arch_svc.h>
#include <arm32.h>
@@ -47,19 +47,6 @@
#include <utee_defines.h>
#include <trace.h>
-/* Interesting aborts for TEE pager */
-#define TEE_PAGER_FSR_FS_MASK 0x040F
- /* DFSR[10,3:0] 0b00001 */
-#define TEE_PAGER_FSR_FS_ALIGNMENT_FAULT 0x0001
- /* DFSR[10,3:0] 0b00010 */
-#define TEE_PAGER_FSR_FS_DEBUG_EVENT 0x0002
- /* DFSR[10,3:0] 0b10110 */
-#define TEE_PAGER_FSR_FS_ASYNC_EXTERNAL_ABORT 0x0406
- /* DFSR[10,3:0] 0b01101 */
-#define TEE_PAGER_FSR_FS_PERMISSION_FAULT_SECTION 0x000D
- /* DFSR[10,3:0] 0b01111 */
-#define TEE_PAGER_FSR_FS_PERMISSION_FAULT_PAGE 0x000F
-
struct tee_pager_abort_info {
uint32_t abort_type;
uint32_t fsr;
@@ -87,17 +74,16 @@ static TAILQ_HEAD(tee_pager_area_head, tee_pager_area) tee_pager_area_head =
TAILQ_HEAD_INITIALIZER(tee_pager_area_head);
/*
- * Represents a physical page used for paging.
+ * struct tee_pager_pmem - Represents a physical page used for paging.
*
- * mmu_entry points to currently used MMU entry. This actual physical
- * address is stored here so even if the page isn't mapped, there's allways
- * an MMU entry holding the physical address.
+ * @pgidx an index of the entry in tbl_info. The actual physical
+ * address is stored here so even if the page isn't mapped,
+ * there's always an MMU entry holding the physical address.
*
- * session_handle is a pointer returned by tee_ta_load_page() and later
- * used when saving rw-data.
+ * @area a pointer to the pager area
*/
struct tee_pager_pmem {
- uint32_t *mmu_entry;
+ unsigned pgidx;
struct tee_pager_area *area;
TAILQ_ENTRY(tee_pager_pmem) link;
};
@@ -118,15 +104,16 @@ static struct tee_pager_pmem_head tee_pager_rw_pmem_head =
static size_t tee_pager_npages;
/*
- * Pointer to L2 translation table used to map the virtual memory range
+ * Reference to translation table used to map the virtual memory range
* covered by the pager.
*/
-static uint32_t *l2_table;
+static struct core_mmu_table_info tbl_info;
bool tee_pager_add_area(tee_mm_entry_t *mm, uint32_t flags, const void *store,
const void *hashes)
{
struct tee_pager_area *area;
+ size_t tbl_va_size;
DMSG("0x%x - 0x%x : flags 0x%x, store %p, hashes %p",
tee_mm_get_smem(mm),
@@ -140,9 +127,33 @@ bool tee_pager_add_area(tee_mm_entry_t *mm, uint32_t flags, const void *store,
else
panic();
+ if (!tbl_info.num_entries) {
+ if (!core_mmu_find_table(tee_mm_get_smem(mm), UINT_MAX,
+ &tbl_info))
+ return false;
+ if ((1 << tbl_info.shift) != SMALL_PAGE_SIZE) {
+ DMSG("Unsupported page size in translation table %u",
+ 1 << tbl_info.shift);
+ return false;
+ }
+ }
+
+ tbl_va_size = (1 << tbl_info.shift) * tbl_info.num_entries;
+ if (!core_is_buffer_inside(tee_mm_get_smem(mm), tee_mm_get_bytes(mm),
+ tbl_info.va_base, tbl_va_size)) {
+ DMSG("area 0x%x len 0x%x doesn't fit it translation table 0x%x len 0x%x",
+ tee_mm_get_smem(mm), tee_mm_get_bytes(mm),
+ tbl_info.va_base, tbl_va_size);
+ return false;
+ }
+
+
+
area = malloc(sizeof(struct tee_pager_area));
if (!area)
return false;
+
+
area->mm = mm;
area->flags = flags;
area->store = store;
@@ -165,27 +176,17 @@ static struct tee_pager_area *tee_pager_find_area(vaddr_t va)
return NULL;
}
-void tee_pager_init(void *xlat_table)
+static uint32_t get_area_mattr(struct tee_pager_area *area __unused)
{
- l2_table = xlat_table;
-}
+ uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_GLOBAL |
+ TEE_MATTR_CACHE_DEFAULT | TEE_MATTR_SECURE;
+ attr |= TEE_MATTR_PRWX;
-/* Get L2 translation entry address from virtual address */
-static uint32_t *tee_pager_va_to_xe(vaddr_t va)
-{
- vaddr_t page_va = va & ~SMALL_PAGE_MASK;
- size_t mmu_entry_offset = (page_va - tee_mm_vcore.lo) >>
- SMALL_PAGE_SHIFT;
-
- return l2_table + mmu_entry_offset;
+ return attr;
}
-/* Get virtual address of page from translation entry */
-static vaddr_t tee_pager_xe_to_va(uint32_t *xe)
-{
- return (vaddr_t)(xe - l2_table) * SMALL_PAGE_SIZE + tee_mm_vcore.lo;
-}
+
static void tee_pager_load_page(struct tee_pager_area *area, vaddr_t page_va)
{
@@ -224,11 +225,18 @@ static bool tee_pager_unhide_page(vaddr_t page_va)
struct tee_pager_pmem *pmem;
TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
- if (((*pmem->mmu_entry & SMALL_PAGE_MASK) ==
- TEE_PAGER_PAGE_UNLOADED) &&
- page_va == tee_pager_xe_to_va(pmem->mmu_entry)) {
+ paddr_t pa;
+ uint32_t attr;
+
+ core_mmu_get_entry(&tbl_info, pmem->pgidx, &pa, &attr);
+
+ if (!(attr & TEE_MATTR_HIDDEN_BLOCK))
+ continue;
+
+ if (core_mmu_va2idx(&tbl_info, page_va) == pmem->pgidx) {
/* page is hidden, show and move to back */
- *pmem->mmu_entry |= TEE_MMU_L2SP_PRIV_ACC;
+ core_mmu_set_entry(&tbl_info, pmem->pgidx, pa,
+ get_area_mattr(pmem->area));
TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
@@ -248,10 +256,19 @@ static void tee_pager_hide_pages(void)
size_t n = 0;
TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
+ paddr_t pa;
+ uint32_t attr;
+
if (n >= TEE_PAGER_NHIDE)
break;
n++;
- *pmem->mmu_entry = TEE_MMU_L2SP_CLEAR_ACC(*pmem->mmu_entry);
+ core_mmu_get_entry(&tbl_info, pmem->pgidx, &pa, &attr);
+ if (!(attr & TEE_MATTR_VALID_BLOCK))
+ continue;
+
+ core_mmu_set_entry(&tbl_info, pmem->pgidx, pa,
+ TEE_MATTR_HIDDEN_BLOCK);
+
}
/* TODO only invalidate entries touched above */
@@ -321,7 +338,6 @@ static void tee_pager_print_error_abort(
}
-
static enum tee_pager_fault_type tee_pager_get_fault_type(
struct tee_pager_abort_info *ai)
{
@@ -346,28 +362,33 @@ static enum tee_pager_fault_type tee_pager_get_fault_type(
panic();
}
- switch (ai->fsr & TEE_PAGER_FSR_FS_MASK) {
- /* Only possible for data abort */
- case TEE_PAGER_FSR_FS_ALIGNMENT_FAULT:
+ switch (core_mmu_get_fault_type(ai->fsr)) {
+ case CORE_MMU_FAULT_ALIGNMENT:
tee_pager_print_error_abort(ai);
EMSG("[TEE_PAGER] alignement fault! (trap CPU)");
panic();
+ break;
- case TEE_PAGER_FSR_FS_DEBUG_EVENT:
+ case CORE_MMU_FAULT_DEBUG_EVENT:
tee_pager_print_abort(ai);
DMSG("[TEE_PAGER] Ignoring debug event!");
return TEE_PAGER_FAULT_TYPE_IGNORE;
- /* Only possible for data abort */
- case TEE_PAGER_FSR_FS_ASYNC_EXTERNAL_ABORT:
+ case CORE_MMU_FAULT_TRANSLATION:
+ case CORE_MMU_FAULT_PERMISSION:
+ return TEE_PAGER_FAULT_TYPE_PAGABLE;
+
+ case CORE_MMU_FAULT_ASYNC_EXTERNAL:
tee_pager_print_abort(ai);
DMSG("[TEE_PAGER] Ignoring async external abort!");
return TEE_PAGER_FAULT_TYPE_IGNORE;
+ case CORE_MMU_FAULT_OTHER:
default:
- break;
+ tee_pager_print_abort(ai);
+ DMSG("[TEE_PAGER] Unhandled fault!");
+ return TEE_PAGER_FAULT_TYPE_IGNORE;
}
- return TEE_PAGER_FAULT_TYPE_PAGABLE;
}
@@ -378,25 +399,27 @@ static struct tee_pager_pmem *tee_pager_get_page(
struct tee_pager_abort_info *ai,
struct tee_pager_area *area)
{
- vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
-
- uint32_t pa;
- uint32_t *mmu_entry = tee_pager_va_to_xe(page_va);
+ unsigned pgidx = core_mmu_va2idx(&tbl_info, ai->va);
struct tee_pager_pmem *pmem;
+ paddr_t pa;
+ uint32_t attr;
+
+ core_mmu_get_entry(&tbl_info, pgidx, &pa, &attr);
+
+ assert(!(attr & (TEE_MATTR_VALID_BLOCK | TEE_MATTR_HIDDEN_BLOCK)));
- if (*mmu_entry != 0) {
+ if (attr & TEE_MATTR_PHYS_BLOCK) {
/*
* There's an pmem entry using this mmu entry, let's use
* that entry in the new mapping.
*/
TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
- if (pmem->mmu_entry == mmu_entry)
+ if (pmem->pgidx == pgidx)
break;
}
if (!pmem) {
tee_pager_print_abort(ai);
- DMSG("Couldn't find pmem for mmu_entry %p",
- (void *)mmu_entry);
+ DMSG("Couldn't find pmem for pgidx %u", pgidx);
panic();
}
} else {
@@ -406,14 +429,13 @@ static struct tee_pager_pmem *tee_pager_get_page(
DMSG("No pmem entries");
panic();
}
+ core_mmu_get_entry(&tbl_info, pmem->pgidx, &pa, &attr);
+ core_mmu_set_entry(&tbl_info, pmem->pgidx, 0, 0);
}
- /* add page to mmu table, small pages [31:12]PA */
- pa = *pmem->mmu_entry & ~SMALL_PAGE_MASK;
-
- *pmem->mmu_entry = 0;
- pmem->mmu_entry = mmu_entry;
- *pmem->mmu_entry = pa | TEE_PAGER_PAGE_LOADED;
+ pmem->pgidx = pgidx;
+ pmem->area = area;
+ core_mmu_set_entry(&tbl_info, pgidx, pa, get_area_mattr(area));
TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
if (area->store) {
@@ -430,7 +452,7 @@ static struct tee_pager_pmem *tee_pager_get_page(
core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
#ifdef TEE_PAGER_DEBUG_PRINT
- DMSG("Mapped 0x%x -> 0x%x", page_va, pa);
+ DMSG("Mapped 0x%x -> 0x%x", core_mmu_idx2va(&tbl_info, pgidx), pa);
#endif
return pmem;
@@ -455,12 +477,9 @@ static void tee_pager_handle_fault(struct tee_pager_abort_info *ai)
if (!tee_pager_unhide_page(page_va)) {
/* the page wasn't hidden */
- struct tee_pager_pmem *pmem;
-
- pmem = tee_pager_get_page(ai, area);
+ tee_pager_get_page(ai, area);
/* load page code & data */
- pmem->area = area;
tee_pager_load_page(area, page_va);
/* TODO remap readonly if TEE_PAGER_AREA_RO */
tee_pager_verify_page(area, page_va);
@@ -558,10 +577,14 @@ void tee_pager_add_pages(tee_vaddr_t vaddr, size_t npages, bool unmap)
for (n = 0; n < npages; n++) {
struct tee_pager_pmem *pmem;
tee_vaddr_t va = vaddr + n * SMALL_PAGE_SIZE;
- uint32_t *mmu_entry = tee_pager_va_to_xe(va);
+ unsigned pgidx = core_mmu_va2idx(&tbl_info, va);
+ paddr_t pa;
+ uint32_t attr;
- /* Ignore unmapped entries */
- if (*mmu_entry == 0)
+ core_mmu_get_entry(&tbl_info, pgidx, &pa, &attr);
+
+ /* Ignore unmapped pages/blocks */
+ if (!(attr & TEE_MATTR_VALID_BLOCK))
continue;
pmem = malloc(sizeof(struct tee_pager_pmem));
@@ -570,20 +593,18 @@ void tee_pager_add_pages(tee_vaddr_t vaddr, size_t npages, bool unmap)
panic();
}
- pmem->mmu_entry = (uint32_t *)mmu_entry;
+ pmem->pgidx = pgidx;
pmem->area = NULL;
if (unmap) {
/*
- * Set to TEE_PAGER_NO_ACCESS_ATTRIBUTES and not
- * TEE_PAGER_PAGE_UNLOADED since pager would
- * misstake it for a hidden page in case the
- * virtual address was reused before the physical
- * page was used for another virtual page.
+ * Note that we're making the page inaccessible
+ * with the TEE_MATTR_PHYS_BLOCK attribute to
+ * indicate that the descriptor still holds a valid
+ * physical address of a page.
*/
- *mmu_entry = (*mmu_entry & ~SMALL_PAGE_MASK) |
- TEE_PAGER_NO_ACCESS_ATTRIBUTES;
-
+ core_mmu_set_entry(&tbl_info, pgidx, pa,
+ TEE_MATTR_PHYS_BLOCK);
}
tee_pager_npages++;
TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
diff --git a/core/arch/arm32/plat-stm/core_bootcfg.c b/core/arch/arm32/plat-stm/core_bootcfg.c
index aaac237..2904a01 100644
--- a/core/arch/arm32/plat-stm/core_bootcfg.c
+++ b/core/arch/arm32/plat-stm/core_bootcfg.c
@@ -137,19 +137,21 @@ static struct map_area bootcfg_memory_map[] = {
{ /* CPU mem map HW registers */
.type = MEM_AREA_IO_NSEC,
- .pa = CPU_IOMEM_BASE & ~SECTION_MASK, .size = SECTION_SIZE,
+ .pa = CPU_IOMEM_BASE & ~CORE_MMU_DEVICE_MASK,
+ .size = CORE_MMU_DEVICE_SIZE,
.device = true, .secure = true, .rw = true,
},
{ /* ASC IP for UART HW tracing */
.type = MEM_AREA_IO_NSEC,
- .pa = UART_CONSOLE_BASE & ~SECTION_MASK, .size = SECTION_SIZE,
+ .pa = UART_CONSOLE_BASE & ~CORE_MMU_DEVICE_MASK,
+ .size = CORE_MMU_DEVICE_SIZE,
.device = true, .secure = false, .rw = true,
},
{ /* RNG IP for some random support */
.type = MEM_AREA_IO_SEC,
- .pa = RNG_BASE & ~SECTION_MASK, .size = SECTION_SIZE,
+ .pa = RNG_BASE & ~CORE_MMU_DEVICE_MASK, .size = CORE_MMU_DEVICE_SIZE,
.device = true, .secure = true, .rw = true,
},
diff --git a/core/arch/arm32/plat-stm/main.c b/core/arch/arm32/plat-stm/main.c
index e2d1bca..bcd4d62 100644
--- a/core/arch/arm32/plat-stm/main.c
+++ b/core/arch/arm32/plat-stm/main.c
@@ -369,20 +369,19 @@ void console_flush_tx_fifo(void)
extern uint8_t *SEC_MMU_L2_TTB_FLD;
extern uint8_t *SEC_MMU_L2_TTB_END;
-void *core_mmu_alloc_l2(struct map_area *map)
+void *core_mmu_alloc_l2(struct tee_mmap_region *mm)
{
/* Can't have this in .bss since it's not initialized yet */
static size_t l2_offs __attribute__((section(".data")));
const size_t l2_size = SEC_MMU_L2_TTB_END - SEC_MMU_L2_TTB_FLD;
+ const size_t l2_va_size = TEE_MMU_L2_NUM_ENTRIES * SMALL_PAGE_SIZE;
size_t l2_va_space = ((l2_size - l2_offs) / TEE_MMU_L2_SIZE) *
- SECTION_SIZE;
+ l2_va_size;
if (l2_offs)
return NULL;
- if (map->type != MEM_AREA_TEE_RAM)
+ if (mm->size > l2_va_space)
return NULL;
- if (map->size > l2_va_space)
- return NULL;
- l2_offs += ROUNDUP(map->size, SECTION_SIZE) / SECTION_SIZE;
+ l2_offs += ROUNDUP(mm->size, l2_va_size) / l2_va_size;
return SEC_MMU_L2_TTB_FLD;
}
diff --git a/core/arch/arm32/plat-stm/platform_config.h b/core/arch/arm32/plat-stm/platform_config.h
index 5e862ff..31e724c 100644
--- a/core/arch/arm32/plat-stm/platform_config.h
+++ b/core/arch/arm32/plat-stm/platform_config.h
@@ -48,6 +48,9 @@
#ifdef CFG_WITH_PAGER
#error "Pager not supported for platform STM"
#endif
+#ifdef CFG_WITH_LPAE
+#error "LPAE not supported for platform STM"
+#endif
/*
* TEE/TZ RAM layout:
@@ -82,7 +85,7 @@
#define TZDRAM_SIZE (CFG_TEE_RAM_SIZE + CFG_TA_RAM_SIZE)
#define CFG_SHMEM_START (TZDRAM_BASE + TZDRAM_SIZE)
-#define CFG_SHMEM_SIZE (SECTION_SIZE)
+#define CFG_SHMEM_SIZE CFG_PUB_RAM_SIZE
/* define the memory areas (TEE_RAM must start at reserved DDR start addr */
#define CFG_TEE_RAM_START (TZDRAM_BASE)
diff --git a/core/arch/arm32/plat-stm/tz_sinit.S b/core/arch/arm32/plat-stm/tz_sinit.S
index 4df7356..6d7e8ee 100644
--- a/core/arch/arm32/plat-stm/tz_sinit.S
+++ b/core/arch/arm32/plat-stm/tz_sinit.S
@@ -201,7 +201,7 @@ _BootCPU0:
bl arm_cl2_invbyway
bl arm_cl2_enable
- bl core_init_mmu_tables
+ bl core_init_mmu_map
bl core_init_mmu_regs
bl cpu_mmu_enable
bl cpu_mmu_enable_icache
diff --git a/core/arch/arm32/plat-sunxi/core_bootcfg.c b/core/arch/arm32/plat-sunxi/core_bootcfg.c
index b79bb6e..594be35 100644
--- a/core/arch/arm32/plat-sunxi/core_bootcfg.c
+++ b/core/arch/arm32/plat-sunxi/core_bootcfg.c
@@ -173,29 +173,33 @@ static struct map_area bootcfg_memory[] = {
{ /* teecore public RAM - NonSecure, non-exec. */
.type = MEM_AREA_NSEC_SHM,
- .pa = CFG_PUB_RAM_START, .size = SECTION_SIZE,
+ .pa = CFG_PUB_RAM_START, .size = CFG_PUB_RAM_SIZE,
.cached = true, .secure = false, .rw = true, .exec = false,
},
-
+
{ /* AHB0 devices */
.type = MEM_AREA_IO_NSEC,
- .pa = 0x01400000 & ~SECTION_MASK, .size = 0x00900000,
+ .pa = 0x01400000 & ~CORE_MMU_DEVICE_MASK,
+ .size = ROUNDUP(0x00900000, CORE_MMU_DEVICE_SIZE),
.device = true, .secure = true, .rw = true,
},
{ /* AHB1 devices */
.type = MEM_AREA_IO_NSEC,
- .pa = (0x00800000) & ~SECTION_MASK, .size = 0x00300000,
+ .pa = (0x00800000) & ~CORE_MMU_DEVICE_MASK,
+ .size = ROUNDUP(0x00300000, CORE_MMU_DEVICE_SIZE),
.device = true, .secure = true, .rw = true,
},
{ /* AHB2 devices */
.type = MEM_AREA_IO_NSEC,
- .pa = (0x03000000) & ~SECTION_MASK, .size = 0x01000000,
+ .pa = (0x03000000) & ~CORE_MMU_DEVICE_MASK,
+ .size = ROUNDUP(0x01000000, CORE_MMU_DEVICE_SIZE),
.device = true, .secure = true, .rw = true,
},
{ /* AHBS devices */
.type = MEM_AREA_IO_NSEC,
- .pa = (0x06000000) & ~SECTION_MASK, .size = 0x02200000,
+ .pa = (0x06000000) & ~CORE_MMU_DEVICE_MASK,
+ .size = ROUNDUP(0x02200000, CORE_MMU_DEVICE_SIZE),
.device = true, .secure = true, .rw = true,
},
diff --git a/core/arch/arm32/plat-sunxi/entry.S b/core/arch/arm32/plat-sunxi/entry.S
index 882837c..3d3d67d 100644
--- a/core/arch/arm32/plat-sunxi/entry.S
+++ b/core/arch/arm32/plat-sunxi/entry.S
@@ -73,7 +73,7 @@ LOCAL_FUNC reset , :
orr r0, r0, #ACTLR_SMP
write_actlr r0
- bl core_init_mmu_tables
+ bl core_init_mmu_map
bl core_init_mmu_regs
bl cpu_mmu_enable
bl cpu_mmu_enable_icache
diff --git a/core/arch/arm32/plat-sunxi/main.c b/core/arch/arm32/plat-sunxi/main.c
index 5a4f783..612050a 100644
--- a/core/arch/arm32/plat-sunxi/main.c
+++ b/core/arch/arm32/plat-sunxi/main.c
@@ -395,19 +395,18 @@ vaddr_t core_mmu_get_ul1_ttb_va(void)
return (vaddr_t)main_mmu_ul1_ttb[thread_get_id()];
}
-void *core_mmu_alloc_l2(struct map_area *map)
+void *core_mmu_alloc_l2(struct tee_mmap_region *mm)
{
/* Can't have this in .bss since it's not initialized yet */
static size_t l2_offs __attribute__((section(".data")));
+ const size_t l2_va_size = TEE_MMU_L2_NUM_ENTRIES * SMALL_PAGE_SIZE;
size_t l2_va_space = ((sizeof(main_mmu_l2_ttb) - l2_offs) /
- TEE_MMU_L2_SIZE) * SECTION_SIZE;
+ TEE_MMU_L2_SIZE) * l2_va_size;
if (l2_offs)
return NULL;
- if (map->type != MEM_AREA_TEE_RAM)
+ if (mm->size > l2_va_space)
return NULL;
- if (map->size > l2_va_space)
- return NULL;
- l2_offs += ROUNDUP(map->size, SECTION_SIZE) / SECTION_SIZE;
+ l2_offs += ROUNDUP(mm->size, l2_va_size) / l2_va_size;
return main_mmu_l2_ttb;
}
diff --git a/core/arch/arm32/plat-sunxi/platform_config.h b/core/arch/arm32/plat-sunxi/platform_config.h
index ae98a42..ffb286f 100644
--- a/core/arch/arm32/plat-sunxi/platform_config.h
+++ b/core/arch/arm32/plat-sunxi/platform_config.h
@@ -33,6 +33,13 @@
#define PLATFORM_LINKER_FORMAT "elf32-littlearm"
#define PLATFORM_LINKER_ARCH arm
+#ifdef CFG_WITH_PAGER
+#error "Pager not supported for platform sunxi"
+#endif
+#ifdef CFG_WITH_LPAE
+#error "LPAE not supported for platform sunxi"
+#endif
+
#define GIC_BASE 0x01c40000
#define GICC_OFFSET 0x2000
#define GICD_OFFSET 0x1000
diff --git a/core/arch/arm32/plat-vexpress/conf.mk b/core/arch/arm32/plat-vexpress/conf.mk
index 890d373..94e1703 100644
--- a/core/arch/arm32/plat-vexpress/conf.mk
+++ b/core/arch/arm32/plat-vexpress/conf.mk
@@ -24,6 +24,7 @@ libtomcrypt_with_optimize_size := y
WITH_SECURE_TIME_SOURCE_CNTPCT := y
WITH_UART_DRV := y
WITH_GIC_DRV := y
+CFG_HWSUPP_MEM_PERM_PXN := y
ifeq ($(PLATFORM_FLAVOR),juno)
CFG_CRYPTO_SHA256_ARM32_CE ?= y
diff --git a/core/arch/arm32/plat-vexpress/core_bootcfg.c b/core/arch/arm32/plat-vexpress/core_bootcfg.c
index 8723d6a..e0907e5 100644
--- a/core/arch/arm32/plat-vexpress/core_bootcfg.c
+++ b/core/arch/arm32/plat-vexpress/core_bootcfg.c
@@ -120,14 +120,6 @@ static bool pbuf_is(enum buf_is_attr attr, paddr_t paddr, size_t size)
/* platform specific memory layout provided to teecore */
static struct map_area bootcfg_memory_map[] = {
-#ifdef ROM_BASE
- {
- .type = MEM_AREA_IO_SEC,
- .pa = ROM_BASE, .size = ROM_SIZE,
- .cached = true, .secure = true, .rw = false, .exec = false,
- },
-#endif
-
{ /* teecore execution RAM */
.type = MEM_AREA_TEE_RAM,
.pa = CFG_TEE_RAM_START, .size = CFG_TEE_RAM_PH_SIZE,
@@ -149,37 +141,30 @@ static struct map_area bootcfg_memory_map[] = {
.cached = true, .secure = false, .rw = true, .exec = false,
},
- { /* UART */
+ {
.type = MEM_AREA_IO_NSEC,
- .pa = CONSOLE_UART_BASE & ~SECTION_MASK, .size = SECTION_SIZE,
+ .pa = DEVICE0_BASE, .size = DEVICE0_SIZE,
.device = true, .secure = true, .rw = true,
},
-
- { /* GIC */
+ {
.type = MEM_AREA_IO_SEC,
- .pa = GIC_BASE & ~SECTION_MASK, .size = SECTION_SIZE,
+ .pa = DEVICE1_BASE, .size = DEVICE1_SIZE,
.device = true, .secure = true, .rw = true,
},
-
-#if PLATFORM_FLAVOR_IS(fvp)
- { /*
- * FVP's GIC Distributor is beyond SECTION_SIZE,
- * and need to be mapped seperately.
- */
+#ifdef DEVICE2_BASE
+ {
.type = MEM_AREA_IO_SEC,
- .pa = (GIC_BASE + GICD_OFFSET) & ~SECTION_MASK, .size = SECTION_SIZE,
+ .pa = DEVICE2_BASE, .size = DEVICE2_SIZE,
.device = true, .secure = true, .rw = true,
},
#endif
-
-#ifdef CFG_PCSC_PASSTHRU_READER_DRV
- { /* PCSC passthru reader */
+#ifdef DEVICE3_BASE
+ {
.type = MEM_AREA_IO_SEC,
- .pa = PCSC_BASE & ~SECTION_MASK, .size = SECTION_SIZE,
+ .pa = DEVICE3_BASE, .size = DEVICE3_SIZE,
.device = true, .secure = true, .rw = true,
},
#endif
-
{.type = MEM_AREA_NOTYPE}
};
diff --git a/core/arch/arm32/plat-vexpress/entry.S b/core/arch/arm32/plat-vexpress/entry.S
index cfe60c2..0998148 100644
--- a/core/arch/arm32/plat-vexpress/entry.S
+++ b/core/arch/arm32/plat-vexpress/entry.S
@@ -128,7 +128,7 @@ copy_init:
ldr r2, =CONSOLE_BAUDRATE
bl uart_init
- bl core_init_mmu_tables
+ bl core_init_mmu_map
bl core_init_mmu_regs
bl cpu_mmu_enable
bl cpu_mmu_enable_icache
diff --git a/core/arch/arm32/plat-vexpress/main.c b/core/arch/arm32/plat-vexpress/main.c
index c783ecd..f1c693c 100644
--- a/core/arch/arm32/plat-vexpress/main.c
+++ b/core/arch/arm32/plat-vexpress/main.c
@@ -120,6 +120,7 @@ const vaddr_t stack_tmp_top[CFG_TEE_CORE_NB_CORE] = {
#endif
};
+#ifndef CFG_WITH_LPAE
/* Main MMU L1 table for teecore */
static uint32_t main_mmu_l1_ttb[TEE_MMU_L1_NUM_ENTRIES]
__attribute__((section(".nozi.mmu.l1"),
@@ -132,6 +133,7 @@ static uint32_t main_mmu_l2_ttb[TEE_MMU_L2_NUM_ENTRIES]
static uint32_t main_mmu_ul1_ttb[NUM_THREADS][TEE_MMU_UL1_NUM_ENTRIES]
__attribute__((section(".nozi.mmu.ul1"),
aligned(TEE_MMU_UL1_ALIGNMENT)));
+#endif
extern uint8_t __text_init_start[];
extern uint8_t __data_start[];
@@ -320,6 +322,20 @@ static void main_init_gic(void)
#endif
#ifdef CFG_WITH_PAGER
+
+static size_t get_block_size(void)
+{
+ struct core_mmu_table_info tbl_info;
+ unsigned l;
+
+ if (!core_mmu_find_table(CFG_TEE_RAM_START, UINT_MAX, &tbl_info))
+ panic();
+ l = tbl_info.level - 1;
+ if (!core_mmu_find_table(CFG_TEE_RAM_START, l, &tbl_info))
+ panic();
+ return 1 << tbl_info.shift;
+}
+
static void main_init_runtime(uint32_t pagable_part)
{
size_t n;
@@ -331,6 +347,7 @@ static void main_init_runtime(uint32_t pagable_part)
uint8_t *paged_store;
uint8_t *hashes;
uint8_t *tmp_hashes = __init_start + init_size;
+ size_t block_size;
TEE_ASSERT(pagable_size % SMALL_PAGE_SIZE == 0);
@@ -409,15 +426,14 @@ static void main_init_runtime(uint32_t pagable_part)
* Inialize the virtual memory pool used for main_mmu_l2_ttb which
* is supplied to tee_pager_init() below.
*/
+ block_size = get_block_size();
if (!tee_mm_init(&tee_mm_vcore,
- ROUNDDOWN(CFG_TEE_RAM_START, SECTION_SIZE),
- ROUNDDOWN(CFG_TEE_RAM_START + CFG_TEE_RAM_VA_SIZE,
- SECTION_SIZE),
+ ROUNDDOWN(CFG_TEE_RAM_START, block_size),
+ ROUNDUP(CFG_TEE_RAM_START + CFG_TEE_RAM_VA_SIZE,
+ block_size),
SMALL_PAGE_SHIFT, 0))
panic();
- tee_pager_init(main_mmu_l2_ttb);
-
/*
* Claim virtual memory which isn't paged, note that there migth be
* a gap between tee_mm_vcore.lo and TEE_RAM_START which is also
@@ -689,6 +705,7 @@ static uint32_t main_default_pm_handler(uint32_t a0, uint32_t a1)
}
#endif
+#ifndef CFG_WITH_LPAE
paddr_t core_mmu_get_main_ttb_pa(void)
{
/* Note that this depends on flat mapping of TEE Core */
@@ -716,6 +733,7 @@ vaddr_t core_mmu_get_ul1_ttb_va(void)
{
return (vaddr_t)main_mmu_ul1_ttb[thread_get_id()];
}
+#endif
void console_putc(int ch)
{
@@ -729,20 +747,20 @@ void console_flush_tx_fifo(void)
uart_flush_tx_fifo(CONSOLE_UART_BASE);
}
-void *core_mmu_alloc_l2(struct map_area *map)
+#ifndef CFG_WITH_LPAE
+void *core_mmu_alloc_l2(struct tee_mmap_region *mm)
{
/* Can't have this in .bss since it's not initialized yet */
static size_t l2_offs __attribute__((section(".data")));
+ const size_t l2_va_size = TEE_MMU_L2_NUM_ENTRIES * SMALL_PAGE_SIZE;
size_t l2_va_space = ((sizeof(main_mmu_l2_ttb) - l2_offs) /
- TEE_MMU_L2_SIZE) * SECTION_SIZE;
+ TEE_MMU_L2_SIZE) * l2_va_size;
if (l2_offs)
return NULL;
- if (map->type != MEM_AREA_TEE_RAM)
+ if (mm->size > l2_va_space)
return NULL;
- if (map->size > l2_va_space)
- return NULL;
- l2_offs += ROUNDUP(map->size, SECTION_SIZE) / SECTION_SIZE;
+ l2_offs += ROUNDUP(mm->size, l2_va_size) / l2_va_size;
return main_mmu_l2_ttb;
}
-
+#endif
diff --git a/core/arch/arm32/plat-vexpress/platform_config.h b/core/arch/arm32/plat-vexpress/platform_config.h
index 92e55a0..9142c6a 100644
--- a/core/arch/arm32/plat-vexpress/platform_config.h
+++ b/core/arch/arm32/plat-vexpress/platform_config.h
@@ -211,9 +211,6 @@
* QEMU virt specifics.
*/
-#define ROM_BASE 0x00000000
-#define ROM_SIZE (32 * 1024 * 1024)
-
#define DRAM0_BASE 0x40000000
#define DRAM0_SIZE (0x40000000 - DRAM0_TEERES_SIZE)
@@ -269,8 +266,8 @@
*/
#define CFG_TEE_RAM_PH_SIZE TZSRAM_SIZE
#define CFG_TEE_RAM_START TZSRAM_BASE
-#define CFG_TA_RAM_START TZDRAM_BASE
-#define CFG_TA_RAM_SIZE TZDRAM_SIZE
+#define CFG_TA_RAM_START ROUNDUP(TZDRAM_BASE, CORE_MMU_DEVICE_SIZE)
+#define CFG_TA_RAM_SIZE ROUNDDOWN(TZDRAM_SIZE, CORE_MMU_DEVICE_SIZE)
#else
/*
* Assumes that either TZSRAM isn't large enough or TZSRAM doesn't exist,
@@ -283,8 +280,31 @@
*/
#define CFG_TEE_RAM_PH_SIZE CFG_TEE_RAM_VA_SIZE
#define CFG_TEE_RAM_START TZDRAM_BASE
-#define CFG_TA_RAM_START (TZDRAM_BASE + CFG_TEE_RAM_VA_SIZE)
-#define CFG_TA_RAM_SIZE (TZDRAM_SIZE - CFG_TEE_RAM_VA_SIZE)
+#define CFG_TA_RAM_START ROUNDUP((TZDRAM_BASE + CFG_TEE_RAM_VA_SIZE), \
+ CORE_MMU_DEVICE_SIZE)
+#define CFG_TA_RAM_SIZE ROUNDDOWN((TZDRAM_SIZE - CFG_TEE_RAM_VA_SIZE), \
+ CORE_MMU_DEVICE_SIZE)
+#endif
+
+#define DEVICE0_BASE ROUNDDOWN(CONSOLE_UART_BASE, \
+ CORE_MMU_DEVICE_SIZE)
+#define DEVICE0_SIZE CORE_MMU_DEVICE_SIZE
+
+#define DEVICE1_BASE ROUNDDOWN(GIC_BASE, CORE_MMU_DEVICE_SIZE)
+#define DEVICE1_SIZE CORE_MMU_DEVICE_SIZE
+
+#define DEVICE2_BASE ROUNDDOWN(GIC_BASE + GICD_OFFSET, \
+ CORE_MMU_DEVICE_SIZE)
+#define DEVICE2_SIZE CORE_MMU_DEVICE_SIZE
+
+#ifdef CFG_PCSC_PASSTHRU_READER_DRV
+#define DEVICE3_BASE ROUNDDOWN(PCSC_BASE, CORE_MMU_DEVICE_SIZE)
+#define DEVICE3_SIZE CORE_MMU_DEVICE_SIZE
+#endif
+
+
+#ifdef CFG_WITH_LPAE
+#define MAX_XLAT_TABLES 5
#endif
#ifndef UART_BAUDRATE
diff --git a/core/include/kernel/tee_ta_manager_unpg.h b/core/include/kernel/tee_ta_manager_unpg.h
index 7e4554b..c77eff6 100644
--- a/core/include/kernel/tee_ta_manager_unpg.h
+++ b/core/include/kernel/tee_ta_manager_unpg.h
@@ -74,7 +74,7 @@ struct tee_ta_ctx {
size_t stack_size; /* size of stack */
uint32_t load_addr; /* elf load addr (from TAs address space) */
uint32_t context; /* Context ID of the process */
- tee_mmu_info_t *mmu; /* Saved MMU information (ddr only) */
+ struct tee_mmu_info *mmu; /* Saved MMU information (ddr only) */
uint32_t num_res_funcs; /* number of reserved ta_func_head_t (2 or 0) */
uint32_t flags; /* TA_FLAGS from sub header */
uint32_t panicked; /* True if TA has panicked, written from asm */
diff --git a/core/include/mm/tee_mmu.h b/core/include/mm/tee_mmu.h
index 1c33dd5..e196201 100644
--- a/core/include/mm/tee_mmu.h
+++ b/core/include/mm/tee_mmu.h
@@ -173,8 +173,6 @@ TEE_Result tee_mmu_kmap_va2pa_helper(void *va, void **pa);
bool tee_mmu_kmap_is_mapped(void *va, size_t len);
-bool tee_mmu_is_kernel_mapping(void);
-
uint32_t tee_mmu_kmap_get_cache_attr(void *va);
uint32_t tee_mmu_user_get_cache_attr(struct tee_ta_ctx *ctx, void *va);
diff --git a/core/include/mm/tee_mmu_types.h b/core/include/mm/tee_mmu_types.h
index 24562d5..2eaaba2 100644
--- a/core/include/mm/tee_mmu_types.h
+++ b/core/include/mm/tee_mmu_types.h
@@ -29,12 +29,53 @@
#include <stdint.h>
-struct _tee_mmu_info_t {
- uint32_t *table;
- uint32_t size;
- uint32_t ta_private_vmem_start;
- uint32_t ta_private_vmem_end;
+#define TEE_MATTR_VALID_BLOCK (1 << 0)
+#define TEE_MATTR_HIDDEN_BLOCK (1 << 1)
+#define TEE_MATTR_PHYS_BLOCK (1 << 2)
+#define TEE_MATTR_TABLE (1 << 3)
+#define TEE_MATTR_PR (1 << 4)
+#define TEE_MATTR_PW (1 << 5)
+#define TEE_MATTR_PX (1 << 6)
+#define TEE_MATTR_PRW (TEE_MATTR_PR | TEE_MATTR_PW)
+#define TEE_MATTR_PRX (TEE_MATTR_PR | TEE_MATTR_PX)
+#define TEE_MATTR_PRWX (TEE_MATTR_PRW | TEE_MATTR_PX)
+#define TEE_MATTR_UR (1 << 7)
+#define TEE_MATTR_UW (1 << 8)
+#define TEE_MATTR_UX (1 << 9)
+#define TEE_MATTR_URW (TEE_MATTR_UR | TEE_MATTR_UW)
+#define TEE_MATTR_URX (TEE_MATTR_UR | TEE_MATTR_UX)
+#define TEE_MATTR_URWX (TEE_MATTR_URW | TEE_MATTR_UX)
+
+#define TEE_MATTR_GLOBAL (1 << 10)
+#define TEE_MATTR_I_NONCACHE 0
+#define TEE_MATTR_I_WRITE_THR (1 << 11)
+#define TEE_MATTR_I_WRITE_BACK (1 << 12)
+#define TEE_MATTR_O_NONCACHE 0
+#define TEE_MATTR_O_WRITE_THR (1 << 13)
+#define TEE_MATTR_O_WRITE_BACK (1 << 14)
+
+#define TEE_MATTR_NONCACHE 0
+#define TEE_MATTR_CACHE_DEFAULT (TEE_MATTR_I_WRITE_BACK | \
+ TEE_MATTR_O_WRITE_BACK)
+
+#define TEE_MATTR_CACHE_UNKNOWN (1 << 15)
+
+#define TEE_MATTR_SECURE (1 << 16)
+
+struct tee_mmap_region {
+ paddr_t pa;
+ vaddr_t va;
+ size_t size;
+ uint32_t attr; /* TEE_MATTR_* above */
+};
+
+struct tee_mmu_info {
+ struct tee_mmap_region *table;
+ size_t size;
+ vaddr_t ta_private_vmem_start;
+ vaddr_t ta_private_vmem_end;
};
-typedef struct _tee_mmu_info_t tee_mmu_info_t;
+/* Note use of tee_mmu_info_t is deprecated */
+typedef struct tee_mmu_info tee_mmu_info_t;
#endif
diff --git a/scripts/setup_qemu_optee.sh b/scripts/setup_qemu_optee.sh
index 9e7dbe3..cdef29a 100755
--- a/scripts/setup_qemu_optee.sh
+++ b/scripts/setup_qemu_optee.sh
@@ -49,7 +49,7 @@ STABLE_QEMU_COMMIT=c00ed157431a4a6e0c4c481ba1c809623cbf908f
SRC_BIOS_QEMU=https://github.com/jenswi-linaro/bios_qemu_tz_arm.git
DST_BIOS_QEMU=$DEV_PATH/bios_qemu
-STABLE_BIOS_QEMU_COMMIT=f510738399008226874504256f4e5f59e63cfa6a
+STABLE_BIOS_QEMU_COMMIT=baf44144616e8b9b4f92e12d8a4e80379a5506b6
SRC_SOC_TERM=https://github.com/jenswi-linaro/soc_term.git
DST_SOC_TERM=$DEV_PATH/soc_term