/* * Copyright (c) 2012, ARM Limited. All rights reserved. * * Redistribution and use in source and binary forms, with * or without modification, are permitted provided that the * following conditions are met: * * Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * Redistributions in binary form must reproduce the * above copyright notice, this list of conditions and * the following disclaimer in the documentation * and/or other materials provided with the distribution. * * Neither the name of ARM nor the names of its * contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. */ #include "secure_world.h" sec_context secure_context[MAX_CORES] __attribute__ ((aligned(CACHE_LINE_SZ))); unsigned ns_entry_ptr[MAX_CORES]; unsigned small_pagetable[1024] __attribute__ ((aligned(4096))); unsigned host_cluster = HOST_CLUSTER; unsigned switcher = SWITCHER; /* Bakery lock to serialize access to the tube. */ static bakery_t lock_tube1 __attribute__ ((section("BL_SEC_DV_PAGE"))) = { 0}; void enable_caches(void) { unsigned cpu_id = read_cpuid(); unsigned cluster_id = read_clusterid(); unsigned first_cpu = find_first_cpu(); write_trace(&lock_tube1, SEC_TUBE1, "Secure Coherency Enable Start", read_cntpct(), 0x0, 0x0); /* Turn on coherency */ enable_coherency(); /* Enable caches */ write_sctlr(read_sctlr() | CR_I | CR_Z | CR_C); dsb(); isb(); /* * Only one cpu should enable the CCI while the other * cpus wait. */ if (first_cpu == cpu_id) { if (cluster_id) write32(A7_SL_IFACE_BASE + SNOOP_CTLR_REG, 0x3); else write32(A15_SL_IFACE_BASE + SNOOP_CTLR_REG, 0x3); dsb(); } /* Wait for the dust to settle down */ while (read32(CCI_BASE + STATUS_REG) & 0x1) ; write_trace(&lock_tube1, SEC_TUBE1, "Secure Coherency Enable End", read_cntpct(), 0x0, 0x0); return; } void secure_context_restore(void) { unsigned cpu_id = read_cpuid(); sec_context *sec_ctx = &secure_context[cpu_id]; write_trace(&lock_tube1, SEC_TUBE1, "Secure Context Restore Start", read_cntpct(), 0x0, 0x0); /* Restore state of CCI SAR */ write32(CCI_BASE + SECURE_ACCESS_REG, sec_ctx->cci_sar); /* Restore the security state of PPIs. */ write32(GIC_ID_PHY_BASE + GICD_SEC, sec_ctx->vgic_icdisr0); /* Restore the Priority mask register */ write32(GIC_IC_PHY_BASE + GICC_PRIMASK, sec_ctx->vgic_iccpmr); /* Restore the coprocessor context */ write_cntfrq(sec_ctx->cntfrq); write_mvbar(sec_ctx->mvbar); write_vbar(sec_ctx->vbar); write_nsacr(sec_ctx->nsacr); write_cpacr(sec_ctx->cpacr); write_actlr(sec_ctx->actlr); write_scr(sec_ctx->scr); write_sctlr(read_sctlr() | sec_ctx->sctlr); dsb(); isb(); write_trace(&lock_tube1, SEC_TUBE1, "Secure Context Restore End", read_cntpct(), 0x0, 0x0); return; } void secure_context_save(unsigned ns_entry_point, unsigned op_type) { unsigned cpu_id = read_cpuid(), ib_cluster = get_inbound(); sec_context *sec_ctx = &secure_context[cpu_id]; void (*warm_reset_handler) (void) = (void (*)(void))&warm_reset; ns_entry_ptr[cpu_id] = ns_entry_point; sec_ctx->cci_sar = read32(CCI_BASE + SECURE_ACCESS_REG); sec_ctx->vgic_icdisr0 = read32(GIC_ID_PHY_BASE + GICD_SEC); sec_ctx->vgic_iccpmr = read32(GIC_IC_PHY_BASE + GICC_PRIMASK); sec_ctx->mvbar = read_mvbar(); sec_ctx->vbar = read_vbar(); sec_ctx->nsacr = read_nsacr(); sec_ctx->cpacr = read_cpacr(); sec_ctx->actlr = read_actlr(); sec_ctx->scr = read_scr(); sec_ctx->sctlr = read_sctlr(); sec_ctx->cntfrq = read_cntfrq(); switch (op_type) { case OP_TYPE_SWITCH: /* * Now that the context has been saved, its safe to bring * our counterpart on the inbound cluster out of reset. */ set_reset_handler(ib_cluster, cpu_id, warm_reset_handler); powerup_ib_core(ib_cluster, cpu_id); break; case OP_TYPE_HP: /* * Ensure that the correct warm reset handler is set for * our way back. */ set_reset_handler(read_clusterid(), cpu_id, warm_reset_handler); /* Set it for the inbound as well in case we get switched */ set_reset_handler(ib_cluster, cpu_id, warm_reset_handler); break; default: break; } return; } /* Create the small page level 1 descriptor */ static void create_l1_sp_desc(unsigned virt_addr, unsigned l1_ttb_va, unsigned l2_ttb_pa) { unsigned ttb1_index = 0; unsigned ttb1_desc = 0; ttb1_index = (virt_addr & MB_MASK) >> MB_SHIFT; /* * Create a mapping if one is not already present. * Assuming that page tables are initialized to 0. */ if (!(read32(l1_ttb_va + 4 * ttb1_index) & SMALL_PAGE)) { l2_ttb_pa = l2_ttb_pa & SP_L1_BASE_MASK; ttb1_desc = l2_ttb_pa | SMALL_PAGE; write32(l1_ttb_va + 4 * ttb1_index, ttb1_desc); cln_dcache_mva_pou((unsigned *)l1_ttb_va + 4 * ttb1_index); } return; } /* Create the small page level 2 descriptor */ static void create_l2_sp_desc(unsigned virt_addr, unsigned phys_addr, unsigned l2_ttb_va, unsigned attrs) { unsigned int ttb2_index = 0; unsigned int ttb2_desc = 0; unsigned int mem_attrs = SP_SBO | SP_CACHEABLE | SP_BUFFERABLE | SP_TEX0 | SP_SHARED | SP_AP0; /* Use default attributes if the user has not passed any */ if (attrs) { mem_attrs = attrs; } /* Left shift by 12 followed by a right shift by 24 gives 2nd level index */ ttb2_index = (virt_addr << PAGE_SHIFT) >> (PAGE_SHIFT * 2); /* * Create a mapping if one is not already present * Assuming that page tables are initialized to 0. */ if (!(read32(l2_ttb_va + 4 * ttb2_index))) { ttb2_desc = (phys_addr & PAGE_MASK) | mem_attrs; write32(l2_ttb_va + 4 * ttb2_index, ttb2_desc); cln_dcache_mva_pou((unsigned *)l2_ttb_va + 4 * ttb2_index); } return; } void add_dv_page(unsigned pt_base) { unsigned start_addr = (unsigned)&BL_SEC_DV_PAGE$$Base; unsigned dv_mem_attrs = SP_AP0 | SP_SBO | SP_XN | SP_BUFFERABLE; unsigned addr = 0x0; /* * Create the L1 small page descriptor using the base address supplied. * The region specified must all fit within a single 1MB section. */ create_l1_sp_desc(start_addr, (unsigned)pt_base, (unsigned)small_pagetable); /* * We want all memory to be WBWA/S except for a page * which is device (used for the Bakery locks etc). */ for (addr = start_addr & MB_MASK; addr < (start_addr & MB_MASK) + 0x100000; addr += 4096) { create_l2_sp_desc(addr, addr, (unsigned)small_pagetable, (addr == start_addr ? dv_mem_attrs : 0)); } return; }