/* * Copyright (c) 2012, ARM Limited. All rights reserved. * * Redistribution and use in source and binary forms, with * or without modification, are permitted provided that the * following conditions are met: * * Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * Redistributions in binary form must reproduce the * above copyright notice, this list of conditions and * the following disclaimer in the documentation * and/or other materials provided with the distribution. * * Neither the name of ARM nor the names of its * contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. */ #include "bl.h" #include "virtualisor.h" #include "cache_geom.h" #include "a15.h" /* Forward declaration */ static virt_descriptor a15_virt_desc; extern cache_geometry host_cache_geometry[]; extern cache_geometry target_cache_geometry[]; /* * Functions to handle, save, restore and setup any CPU specific traps. */ unsigned a15_trap_handle(gp_regs * regs, unsigned hsr, unsigned sibling_cpu) { /* * Correct the L1 instruction cache CCSIDR value which was incorrectly * set in the default CP15 trap handle function. */ if (hsr >> 26 == TRAP_CP15_32) { unsigned Op1, Op2, CRn, CRm, Rt, csselr, level, ind; Op2 = (hsr >> 17) & 0x7; Op1 = (hsr >> 14) & 0x7; CRn = (hsr >> 10) & 0xf; Rt = (hsr >> 5) & 0xf; CRm = (hsr >> 1) & 0xf; csselr = read_csselr(); level = get_cache_level(csselr); ind = get_cache_ind(csselr); if (CRn == CRN_C0 && Op1 == 1 && CRm == 0 && Op2 == CCSIDR && level == 0 && ind == CIND_INST) { unsigned cpu_id = read_cpuid(); /* * Initialize cg_ptr to host_cache_geometry for system * configuration [BC=x, TC=A15, HC=A7] and to * target_cache_geometry for Virtualizer configuration * [BC=x, TC=A7, HC=A15] */ cache_geometry *cg_ptr = (IS_TGT_CLUSTER) ? &host_cache_geometry[cpu_id] : &target_cache_geometry[cpu_id]; regs->r[Rt] = cg_ptr->ccsidr[level][ind]; } } return 0; } unsigned a15_trap_save(unsigned first_cpu, unsigned sibling_cpu) { return 0; } unsigned a15_trap_restore(unsigned first_cpu, unsigned sibling_cpu) { if (sibling_cpu == A7) { unsigned hcr = read_hcr(); /* * Restore (enable) bit for trapping cache identification * registers. This is a necessary step on Virtualizer * configuration [BC=x, TC=A15, HC=A7] and a redundant * step on Virtualizer configuration * [BC=x, TC=A7, HC=A15]. */ hcr |= HCR_TID2; write_hcr(hcr); dsb(); isb(); } return 0; } unsigned a15_trap_setup(unsigned first_cpu, unsigned sibling_cpu) { if (switcher) { if (sibling_cpu == A7) { unsigned hcr = read_hcr(); /* * Initialize the L1 instruction cache CSSIDR value on * the boot cluster statically because the Virtualizer * relies on this information even before the * counterpart cluster is initialized dynamically. */ if (IS_BOOT_CLUSTER) { unsigned cpu_id = read_cpuid(); /* * Initialize cg_ptr to host_cache_geometry for * Virtualizer configuration * [BC=x, TC=A15, HC=A7] and to * target_cache_geometry for Virtualizer * configuration [BC=x, TC=A7, HC=A15]. */ cache_geometry *cg_ptr = (IS_TGT_CLUSTER) ? &host_cache_geometry[cpu_id] : &target_cache_geometry[cpu_id]; cg_ptr->ccsidr[0][CIND_INST] = CACHE_A7_L1_INST_CCSIDR; } /* * Enable bit for trapping cache identification * registers. This is a necessary step on Virtualizer * configuration [BC=x, TC=A15, HC=A7] and a redundant * step on Virtualizer configuration * [BC=x, TC=A7, HC=A15]. */ hcr |= HCR_TID2; write_hcr(hcr); dsb(); isb(); } } else { /* Always on */ } /* * Indicate that cpu specific virtualisor setup * has been done. Restore context instead on next * invocation */ a15_virt_desc.init[read_cpuid()] = 1; return 0; } static virt_descriptor a15_virt_desc __attribute__ ((section("virt_desc_section"))) = { A15, { 0}, a15_trap_setup, a15_trap_handle, a15_trap_save, a15_trap_restore,};