/* * Copyright (c) 2012, ARM Limited. All rights reserved. * * Redistribution and use in source and binary forms, with * or without modification, are permitted provided that the * following conditions are met: * * Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * Redistributions in binary form must reproduce the * above copyright notice, this list of conditions and * the following disclaimer in the documentation * and/or other materials provided with the distribution. * * Neither the name of ARM nor the names of its * contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. */ #include "virtualisor.h" #include "virt_helpers.h" #include "hyp_types.h" #include "cache_geom.h" #include "mem_trap.h" #include "gic_registers.h" #include "bl.h" #include "misc.h" extern cache_geometry host_cache_geometry[]; extern cache_geometry target_cache_geometry[]; extern cache_diff cache_delta[NUM_CPUS][MAX_CACHE_LEVELS]; extern void signal_switchover(void); extern unsigned cluster_reset_status(unsigned); extern unsigned handle_pmu(unsigned, unsigned, unsigned); extern void set_pmu_state(unsigned); void trap_cp15_mrc_mcr_handle(unsigned hsr, gp_regs * regs) { unsigned Op1, Op2, CRn, CRm, Rt, write, cpu_id = read_cpuid(); Op2 = (hsr >> 17) & 0x7; Op1 = (hsr >> 14) & 0x7; CRn = (hsr >> 10) & 0xf; Rt = (hsr >> 5) & 0xf; CRm = (hsr >> 1) & 0xf; write = !(hsr & 0x1); switch (CRn) { case CRN_C0: switch (Op1) { case 0: switch (CRm) { case 0: switch (Op2) { case MIDR: if (write) goto error; regs->r[Rt] = read_vmidr(); break; case CTR: if (write) goto error; regs->r[Rt] = read_ctr(); break; case TCMTR: if (write) goto error; regs->r[Rt] = read_tcmtr(); break; case TLBTR: if (write) goto error; regs->r[Rt] = read_tlbtr(); break; case MPIDR: if (write) goto error; regs->r[Rt] = read_vmpidr(); break; default: goto error; } break; case 1: switch (Op2) { case ID_PFR0: if (write) goto error; regs->r[Rt] = read_id_pfr0(); break; case ID_PFR1: if (write) goto error; regs->r[Rt] = read_id_pfr1(); break; case ID_DFR0: if (write) goto error; regs->r[Rt] = read_id_dfr0(); break; case ID_AFR0: if (write) goto error; regs->r[Rt] = read_id_afr0(); break; case ID_MMFR0: if (write) goto error; regs->r[Rt] = read_id_mmfr0(); break; case ID_MMFR1: if (write) goto error; regs->r[Rt] = read_id_mmfr1(); break; case ID_MMFR2: if (write) goto error; regs->r[Rt] = read_id_mmfr2(); break; case ID_MMFR3: if (write) goto error; regs->r[Rt] = read_id_mmfr3(); break; default: goto error; } break; case 2: switch (Op2) { case ID_ISAR0: if (write) goto error; regs->r[Rt] = read_id_isar0(); break; case ID_ISAR1: if (write) goto error; regs->r[Rt] = read_id_isar1(); break; case ID_ISAR2: if (write) goto error; regs->r[Rt] = read_id_isar2(); break; case ID_ISAR3: if (write) goto error; regs->r[Rt] = read_id_isar3(); break; case ID_ISAR4: if (write) goto error; regs->r[Rt] = read_id_isar4(); break; case ID_ISAR5: if (write) goto error; regs->r[Rt] = read_id_isar5(); break; default: /* RAZ */ regs->r[Rt] = 0x0; } break; case 3: case 4: case 5: case 6: case 7: if (write) goto error; /* RAZ */ regs->r[Rt] = 0x0; break; default: goto error; } break; case 1: switch (CRm) { case 0: switch (Op2) { unsigned csselr, level, ind; case CCSIDR: if (write) goto error; /* * The L1 instruction cache CCSIDR * value is incorrectly set on A7 and * A15 for Virtualizer configuration * [BC=x, TC=A15, HC=A7]. * The error is later corrected in the * A7 or A15 specific trap function. */ csselr = target_cache_geometry[cpu_id]. csselr; level = get_cache_level(csselr); ind = get_cache_ind(csselr); regs->r[Rt] = target_cache_geometry[cpu_id]. ccsidr[level][ind]; break; case CLIDR: if (write) goto error; regs->r[Rt] = target_cache_geometry[cpu_id]. clidr; break; case AIDR: if (write) goto error; regs->r[Rt] = read_aidr(); break; default: goto error; } break; default: goto error; } break; case 2: switch (CRm) { case 0: switch (Op2) { case CSSELR: if (write) { target_cache_geometry [cpu_id].csselr = regs->r[Rt]; write_csselr(regs->r[Rt]); } else regs->r[Rt] = target_cache_geometry [cpu_id].csselr; break; default: goto error; } break; default: goto error; } break; default: goto error; } break; case CRN_C7: switch (Op1) { case 0: switch (CRm) { case 6: switch (Op2) { case DCISW: { if (!write) goto error; handle_cm_op(regs->r[Rt], dcisw, &host_cache_geometry [cpu_id], &target_cache_geometry [cpu_id], &cache_delta [cpu_id][0]); break; } default: goto error; } break; case 10: switch (Op2) { case DCCSW: { if (!write) goto error; handle_cm_op(regs->r[Rt], dccsw, &host_cache_geometry [cpu_id], &target_cache_geometry [cpu_id], &cache_delta [cpu_id][0]); break; } default: goto error; } break; case 14: switch (Op2) { case DCCISW: { if (!write) goto error; handle_cm_op(regs->r[Rt], dccisw, &host_cache_geometry [cpu_id], &target_cache_geometry [cpu_id], &cache_delta [cpu_id][0]); break; } default: goto error; } break; default: goto error; } break; default: goto error; } break; case CRN_C9: switch (Op1) { case 1: switch (CRm) { case 0: switch (Op2) { case 2: /* * A write to the L2CTLR register means trouble * as the A7 version does not have all the fields * that the A15 has. Handling needs more thought */ if (write) { printf ("%s: Unexpected L2CTLR write \n", __FUNCTION__); goto error; } /* * A read of the L2CTLR should return the total number * of cpus across both the clusters in the "always on" * configuration. Since there are only 2 bits for the * number of cpus in the L2CTLR we need to flag any * system with > 4 cpus. */ if (!switcher) { unsigned num_cpus = CLUSTER_CPU_COUNT (host_cluster) + CLUSTER_CPU_COUNT (!host_cluster); if (num_cpus > 4) { printf ("%s: Unexpected L2CTLR read \n", __FUNCTION__); goto error; } regs->r[Rt] &= ~(0x3 << 24); regs->r[Rt] |= (num_cpus - 1) << 24; } else { regs->r[Rt] = read_l2ctlr(); } break; case 3: /* * A write to the L2ECTLR register means trouble * as it does not exist on A7. Handling needs more * thought */ if (write) { printf ("%s: Unexpected L2ECTLR write \n", __FUNCTION__); goto error; } else { regs->r[Rt] = read_l2ectlr(); } break; default: goto error; } break; default: goto error; } break; /* * Support for accesses to the PMON space. Its not been * verified whether all the registers are readable & * writable. But then, execution will never reach here * if a reg is inaccessible. It will be a undef abort * instead. */ case 0: switch (CRm) { case 14: switch (Op2) { case 0: if (write) write_pmuserenr(regs->r[Rt]); else regs->r[Rt] = read_pmuserenr(); set_pmu_state(PMU_STATE1); break; case 1: if (write) write_pmintenset(regs->r[Rt]); else regs->r[Rt] = read_pmintenset(); set_pmu_state(PMU_STATE1); break; case 2: if (write) write_pmintenclr(regs->r[Rt]); else regs->r[Rt] = read_pmintenclr(); set_pmu_state(PMU_STATE1); break; case 3: if (write) write_pmovsset(regs->r[Rt]); else regs->r[Rt] = read_pmovsset(); set_pmu_state(PMU_STATE1); break; default: goto error; } break; case 13: switch (Op2) { case 0: if (write) write_pmccntr(regs->r[Rt]); else regs->r[Rt] = read_pmccntr(); set_pmu_state(PMU_STATE1); break; case 1: if (write) write_pmxevtyper(regs->r[Rt]); else regs->r[Rt] = read_pmxevtyper(); set_pmu_state(PMU_STATE1); break; case 2: if (write) write_pmxevcntr(regs->r[Rt]); else regs->r[Rt] = read_pmxevcntr(); set_pmu_state(PMU_STATE1); break; default: goto error; } break; case 12: switch (Op2) { case 0: if (write) write_pmcr(regs->r[Rt]); else regs->r[Rt] = read_pmcr(); set_pmu_state(PMU_STATE1); break; case 1: if (write) write_pmcntenset(regs->r[Rt]); else regs->r[Rt] = read_pmcntenset(); set_pmu_state(PMU_STATE1); break; case 2: if (write) write_pmcntenclr(regs->r[Rt]); else regs->r[Rt] = read_pmcntenclr(); set_pmu_state(PMU_STATE1); break; case 3: if (write) write_pmovsr(regs->r[Rt]); else regs->r[Rt] = read_pmovsr(); set_pmu_state(PMU_STATE1); break; case 4: if (write) write_pmswinc(regs->r[Rt]); else regs->r[Rt] = read_pmswinc(); set_pmu_state(PMU_STATE1); break; case 5: if (write) write_pmselr(regs->r[Rt]); else regs->r[Rt] = read_pmselr(); set_pmu_state(PMU_STATE1); break; case 6: if (write) write_pmceid0(regs->r[Rt]); else regs->r[Rt] = read_pmceid0(); set_pmu_state(PMU_STATE1); break; case 7: if (write) write_pmceid1(regs->r[Rt]); else regs->r[Rt] = read_pmceid1(); set_pmu_state(PMU_STATE1); break; default: goto error; } break; } break; default: goto error; } break; default: goto error; } return; error: printf("%s: Unexpected cp15 instruction", __FUNCTION__); printf(" : %s", write ? "MCR p15" : "MRC p15"); printf(", %d, %d, %d, %d, %d \n", Op1, Rt, CRn, CRm, Op2); panic(); } void trap_dabort_handle(unsigned hsr, gp_regs * regs) { unsigned hdfar = 0x0, hpfar = 0x0, pa = 0x0, *data = 0x0; unsigned write = 0x0; hdfar = read_hdfar(); hpfar = read_hpfar(); pa = ((hpfar >> 4) << 12) + (hdfar & 0xfff); data = ®s->r[(hsr >> 16) & 0xf]; write = (hsr >> 6) & 0x1; /* distributor access */ if ((pa & ~0xfff) == GIC_ID_PHY_BASE) { handle_vgic_distif_abort(pa, data, write); } /* KFSCB access */ else if ((pa & ~0xfff) == KFSCB_BASE) { handle_kfscb_abort(pa, data, write); } return; } void trap_hvc_handle(unsigned hsr, gp_regs *regs) { unsigned opcode = regs->r[0]; switch (opcode) { /* * HVC call to switch to the other cluster. This is done * by sending a switchover IPI to all the cores in the cluster. */ case HVC_SWITCHER_CLUSTER_SWITCH: /* Do not switch till previous one has completed */ while (FALSE == cluster_reset_status(!read_clusterid())); signal_switchover(); break; /* * HVC call to return the physical MPIDR */ case HVC_VIRT_MPIDR_READ: regs->r[0] = read_mpidr(); break; case HVC_PMU_PMCR_READ: case HVC_PMU_PMCR_WRITE: case HVC_PMU_PMSELR_READ: case HVC_PMU_PMSELR_WRITE: case HVC_PMU_PMXEVTYPER_READ: case HVC_PMU_PMXEVTYPER_WRITE: case HVC_PMU_PMCNTENSET_READ: case HVC_PMU_PMCNTENSET_WRITE: case HVC_PMU_PMCNTENCLR_READ: case HVC_PMU_PMCNTENCLR_WRITE: case HVC_PMU_PMCCNTR_READ: case HVC_PMU_PMCCNTR_WRITE: case HVC_PMU_PMOVSR_READ: case HVC_PMU_PMOVSR_WRITE: case HVC_PMU_PMXEVCNTR_READ: case HVC_PMU_PMXEVCNTR_WRITE: case HVC_PMU_PMINTENSET_READ: case HVC_PMU_PMINTENSET_WRITE: case HVC_PMU_PMINTENCLR_READ: case HVC_PMU_PMINTENCLR_WRITE: case HVC_PMU_SWITCH: case HVC_PMU_GET_COUNTERS_SIZE: case HVC_PMU_SYNC_PMU_COUNTERS: regs->r[0] = handle_pmu(opcode, regs->r[1], regs->r[2]); break; default: break; } return; } void HandleVirtualisor(gp_regs * regs) { unsigned cpu_id = read_cpuid(), cpu_no = PART_NO(read_midr()), rc = 0; unsigned hsr = read_hsr(), elr = 0, vd_len = 0, index = 0; virt_descriptor *vd_array = &virt_desc_section$$Base; unsigned (*handler) (gp_regs *, unsigned, unsigned) = 0x0, sibling; /* Find our brother from another mother */ sibling = find_sibling_cpu(); /* * Perform the generic trap handling */ switch (hsr >> 26) { case TRAP_DABORT: trap_dabort_handle(hsr, regs); break; case TRAP_CP15_32: trap_cp15_mrc_mcr_handle(hsr, regs); break; case TRAP_HVC: trap_hvc_handle(hsr, regs); return; default: printf("%s: Unexpected trap", __FUNCTION__); printf(": HSR=0x%x Regs=0x%x \n", hsr, (unsigned)regs); panic(); } /* * Do any cpu specific trap handling. */ vd_len = (unsigned)&virt_desc_section$$Length; for (index = 0; index < (vd_len / sizeof(virt_descriptor)); index++) { if (cpu_no == vd_array[index].cpu_no) { handler = vd_array[index].trap_handle; if (handler) { rc = handler(regs, hsr, sibling); if (rc) { printf("%s: failed on cpu%d \n", __FUNCTION__, cpu_no); goto out; } } } } /* * This is a trap of the kind where we simply move * onto the next instruction in the actual program. * Move by 2 bytes if we came from Thumb mode else * by 4 bytes. */ elr = ((vm_context *) regs)->elr_hyp; if (hsr & (1 << 25)) elr += 4; else elr += 2; ((vm_context *) regs)->elr_hyp = elr; out: if (rc) { printf("%s: Failed : Cpu%d : Host=0x%x : Target=0x%x\n ", __FUNCTION__, cpu_id, cpu_no, sibling); panic(); } return; }