summaryrefslogtreecommitdiff
path: root/big-little/virtualisor
diff options
context:
space:
mode:
authorRobin Randhawa <robin.randhawa@arm.com>2012-02-16 16:54:06 +0000
committerRobin Randhawa <robin.randhawa@arm.com>2012-02-16 16:55:09 +0000
commit813834c76ac48f29a4e1f67bb341ff0d0911b414 (patch)
treef9aa160817422c3c77ea913ec501221f85996948 /big-little/virtualisor
parent48aa177c0c3e38dc84728df705e50ba924b6f424 (diff)
GNU indent pass over C and header files.
Basically: $ for f in $(find . -name "*.[ch]"; do indent -linux $f; done Signed-off-by: Robin Randhawa <robin.randhawa@arm.com>
Diffstat (limited to 'big-little/virtualisor')
-rw-r--r--big-little/virtualisor/cache_geom.c757
-rw-r--r--big-little/virtualisor/cpus/a15/a15.c46
-rw-r--r--big-little/virtualisor/cpus/a15/include/a15.h4
-rw-r--r--big-little/virtualisor/cpus/a7/a7.c42
-rw-r--r--big-little/virtualisor/cpus/a7/include/a7.h4
-rw-r--r--big-little/virtualisor/include/cache_geom.h81
-rw-r--r--big-little/virtualisor/include/mem_trap.h30
-rw-r--r--big-little/virtualisor/include/virtualisor.h42
-rw-r--r--big-little/virtualisor/mem_trap.c203
-rw-r--r--big-little/virtualisor/vgic_trap_handler.c91
-rw-r--r--big-little/virtualisor/virt_context.c355
-rw-r--r--big-little/virtualisor/virt_handle.c1073
-rw-r--r--big-little/virtualisor/virt_setup.c371
13 files changed, 1552 insertions, 1547 deletions
diff --git a/big-little/virtualisor/cache_geom.c b/big-little/virtualisor/cache_geom.c
index 1031ba4..17c3ee6 100644
--- a/big-little/virtualisor/cache_geom.c
+++ b/big-little/virtualisor/cache_geom.c
@@ -18,7 +18,7 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
#include "virtualisor.h"
#include "virt_helpers.h"
@@ -40,198 +40,192 @@ static unsigned cm_extline_cnt[NUM_CPUS][MAX_CACHE_LEVELS] = { 0 };
* levels and save the geometry at each level.
*
*/
-void find_cache_geometry(cache_geometry *cg_ptr)
+void find_cache_geometry(cache_geometry * cg_ptr)
{
- unsigned ctr, clidr, ccsidr, csselr, old_csselr;
-
- /* Save Cache size selection register */
- old_csselr = read_csselr();
- clidr = read_clidr();
- cg_ptr->clidr = clidr;
-
- for (ctr = 0; ctr < MAX_CACHE_LEVELS; ctr++) {
- unsigned cache_type = get_cache_type(clidr, ctr);
-
- /* Only seperate and Unifiied caches */
- if (cache_type >= 0x3) {
- /*
- * Choose the cache level & Data or Unified cache
- * as there are no set/way operations on the ICache
- */
- csselr = ctr << 1;
- write_csselr(csselr);
-
- isb();
-
- /*
- * Read the CCSIDR to record information about this
- * cache level.
- */
- ccsidr = read_ccsidr();
- cg_ptr->ccsidr[ctr] = ccsidr;
-
- } else {
- /*
- * Stop scanning at the first invalid/unsupported
- * cache level
- */
- break;
- }
- }
-
- /* Restore Cache size selection register */
- write_csselr(old_csselr);
- return;
+ unsigned ctr, clidr, ccsidr, csselr, old_csselr;
+
+ /* Save Cache size selection register */
+ old_csselr = read_csselr();
+ clidr = read_clidr();
+ cg_ptr->clidr = clidr;
+
+ for (ctr = 0; ctr < MAX_CACHE_LEVELS; ctr++) {
+ unsigned cache_type = get_cache_type(clidr, ctr);
+
+ /* Only seperate and Unifiied caches */
+ if (cache_type >= 0x3) {
+ /*
+ * Choose the cache level & Data or Unified cache
+ * as there are no set/way operations on the ICache
+ */
+ csselr = ctr << 1;
+ write_csselr(csselr);
+
+ isb();
+
+ /*
+ * Read the CCSIDR to record information about this
+ * cache level.
+ */
+ ccsidr = read_ccsidr();
+ cg_ptr->ccsidr[ctr] = ccsidr;
+
+ } else {
+ /*
+ * Stop scanning at the first invalid/unsupported
+ * cache level
+ */
+ break;
+ }
+ }
+
+ /* Restore Cache size selection register */
+ write_csselr(old_csselr);
+ return;
}
/*
* Given two cache geometries, find out how they differ
*/
-void find_cache_diff(cache_geometry *hcg_ptr, cache_geometry *tcg_ptr, cache_diff *cd_ptr)
+void find_cache_diff(cache_geometry * hcg_ptr, cache_geometry * tcg_ptr,
+ cache_diff * cd_ptr)
{
- unsigned tc_size = 0, hc_size = 0, tc_linelen = 0, hc_linelen = 0;
- unsigned tc_assoc = 0, hc_assoc = 0, tc_numsets = 0, hc_numsets = 0;
- unsigned ctr;
-
- for (ctr = 0; ctr < MAX_CACHE_LEVELS; ctr++) {
-
- /* Break at the first unimplemented cache level */
- if (get_cache_type(hcg_ptr->clidr, ctr) == 0)
- break;
-
- /* Cache associativity */
- tc_assoc = get_cache_assoc(tcg_ptr, ctr) + 1;
- hc_assoc = get_cache_assoc(hcg_ptr, ctr) + 1;
-
- /* Number of the sets in the cache */
- tc_numsets = get_cache_numsets(tcg_ptr, ctr) + 1;
- hc_numsets = get_cache_numsets(hcg_ptr, ctr) + 1;
-
- /* Cache line length in words */
- tc_linelen = 1 << (get_cache_linesz(tcg_ptr, ctr) + 2);
- hc_linelen = 1 << (get_cache_linesz(hcg_ptr, ctr) + 2);
-
- /* Cache size in words */
- tc_size = tc_assoc * tc_numsets * tc_linelen;
- hc_size = hc_assoc * hc_numsets * hc_linelen;
-
- /*
- * Find the factor by which the cache line sizes differ.
- * If so, then the target cacheline will have to be
- * multiplied or divided by the factor to get the absolute
- * cache line number. Then, find the number of absolute
- * cache lines in each cache
- */
- if (tc_linelen >= hc_linelen) {
- cd_ptr[ctr].tcline_factor =
- tc_linelen / hc_linelen;
- cd_ptr[ctr].tnumabs_clines =
- tc_assoc * tc_numsets *
- cd_ptr[ctr].tcline_factor;
- cd_ptr[ctr].hnumabs_clines =
- hc_assoc * hc_numsets;
- } else {
- cd_ptr[ctr].hcline_factor =
- hc_linelen / tc_linelen;
- cd_ptr[ctr].hnumabs_clines =
- hc_assoc * hc_numsets *
- cd_ptr[ctr].hcline_factor;
- cd_ptr[ctr].tnumabs_clines =
- tc_assoc * tc_numsets;
- }
-
- /*
- * Find if the cache sizes differ. If so, then set a flag
- * to indicate whether some set/way operations need to be
- * extended on the host cpu or ignored on the target cpu
- */
- if (tc_size > hc_size) {
- cd_ptr[ctr].csize_diff = TCSZ_BIG;
- }
-
- if (tc_size == hc_size) {
- cd_ptr[ctr].csize_diff =
- TCSZ_EQUAL;
- }
-
- if (tc_size < hc_size) {
- cd_ptr[ctr].csize_diff =
- TCSZ_SMALL;
- }
- }
-
- return;
+ unsigned tc_size = 0, hc_size = 0, tc_linelen = 0, hc_linelen = 0;
+ unsigned tc_assoc = 0, hc_assoc = 0, tc_numsets = 0, hc_numsets = 0;
+ unsigned ctr;
+
+ for (ctr = 0; ctr < MAX_CACHE_LEVELS; ctr++) {
+
+ /* Break at the first unimplemented cache level */
+ if (get_cache_type(hcg_ptr->clidr, ctr) == 0)
+ break;
+
+ /* Cache associativity */
+ tc_assoc = get_cache_assoc(tcg_ptr, ctr) + 1;
+ hc_assoc = get_cache_assoc(hcg_ptr, ctr) + 1;
+
+ /* Number of the sets in the cache */
+ tc_numsets = get_cache_numsets(tcg_ptr, ctr) + 1;
+ hc_numsets = get_cache_numsets(hcg_ptr, ctr) + 1;
+
+ /* Cache line length in words */
+ tc_linelen = 1 << (get_cache_linesz(tcg_ptr, ctr) + 2);
+ hc_linelen = 1 << (get_cache_linesz(hcg_ptr, ctr) + 2);
+
+ /* Cache size in words */
+ tc_size = tc_assoc * tc_numsets * tc_linelen;
+ hc_size = hc_assoc * hc_numsets * hc_linelen;
+
+ /*
+ * Find the factor by which the cache line sizes differ.
+ * If so, then the target cacheline will have to be
+ * multiplied or divided by the factor to get the absolute
+ * cache line number. Then, find the number of absolute
+ * cache lines in each cache
+ */
+ if (tc_linelen >= hc_linelen) {
+ cd_ptr[ctr].tcline_factor = tc_linelen / hc_linelen;
+ cd_ptr[ctr].tnumabs_clines =
+ tc_assoc * tc_numsets * cd_ptr[ctr].tcline_factor;
+ cd_ptr[ctr].hnumabs_clines = hc_assoc * hc_numsets;
+ } else {
+ cd_ptr[ctr].hcline_factor = hc_linelen / tc_linelen;
+ cd_ptr[ctr].hnumabs_clines =
+ hc_assoc * hc_numsets * cd_ptr[ctr].hcline_factor;
+ cd_ptr[ctr].tnumabs_clines = tc_assoc * tc_numsets;
+ }
+
+ /*
+ * Find if the cache sizes differ. If so, then set a flag
+ * to indicate whether some set/way operations need to be
+ * extended on the host cpu or ignored on the target cpu
+ */
+ if (tc_size > hc_size) {
+ cd_ptr[ctr].csize_diff = TCSZ_BIG;
+ }
+
+ if (tc_size == hc_size) {
+ cd_ptr[ctr].csize_diff = TCSZ_EQUAL;
+ }
+
+ if (tc_size < hc_size) {
+ cd_ptr[ctr].csize_diff = TCSZ_SMALL;
+ }
+ }
+
+ return;
}
-unsigned map_cache_geometries(cache_geometry *hcg_ptr, cache_geometry *tcg_ptr, cache_diff *cd_ptr)
+unsigned map_cache_geometries(cache_geometry * hcg_ptr,
+ cache_geometry * tcg_ptr, cache_diff * cd_ptr)
{
- unsigned rc = 0, cpu_id = read_cpuid();
- unsigned hcr = 0, cluster_id = read_clusterid(), sibling_cpuid = 0;
- unsigned abs_cpuid = 0;
-
- if (!switcher) {
- sibling_cpuid = abs_cpuid(cpu_id, !cluster_id);
- abs_cpuid = abs_cpuid(cpu_id, cluster_id);
- }
-
- if (cluster_id == host_cluster) {
-
- /* Find host cache topology */
- find_cache_geometry(hcg_ptr);
-
- /*
- * Wait for the target cpu to send an event indicating that
- * its discovered its cache topology.
- */
- if (!switcher) {
- wait_for_event(CACHE_GEOM_DONE, abs_cpuid);
- reset_event(CACHE_GEOM_DONE, abs_cpuid);
- }
-
- /*
- * Assuming that only no. of sets, ways and cache line
- * size will be different across the target and host
- * cpu caches. Hence the CLIDRs should look the same
- * Support for absence of cache levels and memory
- * Also this check ensures that the target cpu is
- * always run before the host else the cache geometry
- * will have to be hardcoded.
- * mapped caches will be added later.
- */
- if (hcg_ptr->clidr != tcg_ptr->clidr) {
- printf("%s: Host CLIDR=0x%x : Target CLIDR=0x%x \n",
- __FUNCTION__, hcg_ptr->clidr, tcg_ptr->clidr);
- rc = 1;
- goto out;
- }
-
- find_cache_diff(hcg_ptr, tcg_ptr, cd_ptr);
-
- /*
- * Enable bit for trapping set/way operations &
- * Cache identification regs
- */
- hcr = read_hcr();
- hcr |= HCR_TSW | HCR_TID2;
- write_hcr(hcr);
- dsb();
- isb();
-
- } else {
-
- /* Find the cache geometry on the target cpu */
- find_cache_geometry(tcg_ptr);
-
- /*
- * Send an event to the host cpu indicating that we have
- * discovered our cache topology
- */
- if(!switcher) {
- set_event(CACHE_GEOM_DONE, sibling_cpuid);
- }
- }
+ unsigned rc = 0, cpu_id = read_cpuid();
+ unsigned hcr = 0, cluster_id = read_clusterid(), sibling_cpuid = 0;
+ unsigned abs_cpuid = 0;
+
+ if (!switcher) {
+ sibling_cpuid = abs_cpuid(cpu_id, !cluster_id);
+ abs_cpuid = abs_cpuid(cpu_id, cluster_id);
+ }
+
+ if (cluster_id == host_cluster) {
+
+ /* Find host cache topology */
+ find_cache_geometry(hcg_ptr);
+
+ /*
+ * Wait for the target cpu to send an event indicating that
+ * its discovered its cache topology.
+ */
+ if (!switcher) {
+ wait_for_event(CACHE_GEOM_DONE, abs_cpuid);
+ reset_event(CACHE_GEOM_DONE, abs_cpuid);
+ }
+
+ /*
+ * Assuming that only no. of sets, ways and cache line
+ * size will be different across the target and host
+ * cpu caches. Hence the CLIDRs should look the same
+ * Support for absence of cache levels and memory
+ * Also this check ensures that the target cpu is
+ * always run before the host else the cache geometry
+ * will have to be hardcoded.
+ * mapped caches will be added later.
+ */
+ if (hcg_ptr->clidr != tcg_ptr->clidr) {
+ printf("%s: Host CLIDR=0x%x : Target CLIDR=0x%x \n",
+ __FUNCTION__, hcg_ptr->clidr, tcg_ptr->clidr);
+ rc = 1;
+ goto out;
+ }
+
+ find_cache_diff(hcg_ptr, tcg_ptr, cd_ptr);
+
+ /*
+ * Enable bit for trapping set/way operations &
+ * Cache identification regs
+ */
+ hcr = read_hcr();
+ hcr |= HCR_TSW | HCR_TID2;
+ write_hcr(hcr);
+ dsb();
+ isb();
+
+ } else {
+
+ /* Find the cache geometry on the target cpu */
+ find_cache_geometry(tcg_ptr);
+
+ /*
+ * Send an event to the host cpu indicating that we have
+ * discovered our cache topology
+ */
+ if (!switcher) {
+ set_event(CACHE_GEOM_DONE, sibling_cpuid);
+ }
+ }
out:
- return rc;
+ return rc;
}
/*
@@ -239,205 +233,204 @@ unsigned map_cache_geometries(cache_geometry *hcg_ptr, cache_geometry *tcg_ptr,
* handle a cache maintenance operation by set/way
*/
void handle_cm_op(unsigned reg,
- void (*op_handler) (unsigned),
- cache_geometry *hcg_ptr,
- cache_geometry *tcg_ptr,
- cache_diff *cd_ptr)
+ void (*op_handler) (unsigned),
+ cache_geometry * hcg_ptr,
+ cache_geometry * tcg_ptr, cache_diff * cd_ptr)
{
- unsigned clvl = 0, cpu_id = read_cpuid();
- unsigned tc_assoc = 0, tc_numsets = 0, tc_linesz = 0;
- unsigned hc_assoc = 0, hc_numsets = 0, hc_linesz = 0;
- unsigned lineno = 0, setno = 0, wayno = 0, abs_lineno = 0;
-
- /*
- * If target cache line size is greater than the host then
- * each maintenance op has to be performed on two lines on
- * host. Does not matter is the line size if equal
- */
- unsigned ctr = cd_ptr[clvl].tcline_factor;
-
- /*
- * Find out the cache level for which the set/way operation has invoked.
- * Use this to find the cache geometry in target cache to ascertain the
- * set & way number from the argument. Use this info to calculate the
- * target cache line number.
- */
- clvl = get_cache_level(reg);
- tc_linesz = get_cache_linesz(tcg_ptr, clvl);
- tc_assoc = get_cache_assoc(tcg_ptr, clvl);
- tc_numsets = get_cache_numsets(tcg_ptr, clvl);
-
- wayno = (reg >> __clz(tc_assoc)) & tc_assoc;
- setno = (reg >> (tc_linesz + 4)) & tc_numsets;
- lineno = (setno * (tc_assoc + 1)) + wayno;
-
- if(cmop_debug) {
- /*
- * tc_prev_line is initialised to -1 (unsigned). We can never have so many
- * cache lines. Helps determining when to record the start of a cm op.
- * If count != lineno then either we are not counting or have been counting
- * and now are out of sync. In either case, a new cm op is started
- */
- if (tc_prev_line[cpu_id][clvl] != lineno) {
- tc_prev_line[cpu_id][clvl] = lineno;
- /* All ops start out as partial ops */
- cm_op_stats[cpu_id][clvl].part_cmop_cnt++;
-
- /* Reset all our counters */
- cm_ignline_cnt[cpu_id][clvl] = 0;
- cm_extline_cnt[cpu_id][clvl] = 0;
- hc_line_cnt[cpu_id][clvl] = 0;
- cm_line_cnt[cpu_id][clvl] = 0;
- }
-
- tc_prev_line[cpu_id][clvl]--;
- cm_line_cnt[cpu_id][clvl]++;
- }
-
- /* Convert target cache line no. to absolute cache line no. */
- if (cd_ptr[clvl].tcline_factor)
- abs_lineno = lineno * cd_ptr[clvl].tcline_factor;
-
- /* Convert absolute cache line no. to host cache line no. */
- if (cd_ptr[clvl].hcline_factor)
- lineno = abs_lineno / cd_ptr[clvl].hcline_factor;
-
- /*
- * Find out the set & way no. on the host cache corresponding to the
- * cache line no. calculated on the target cache.
- */
- hc_linesz = get_cache_linesz(hcg_ptr, clvl);
- hc_assoc = get_cache_assoc(hcg_ptr, clvl);
- hc_numsets = get_cache_numsets(hcg_ptr, clvl);
-
- switch (cd_ptr[clvl].csize_diff) {
- case TCSZ_BIG:
- {
- if (abs_lineno <
- cd_ptr[clvl].hnumabs_clines) {
- while (ctr) {
- setno = lineno / (hc_assoc + 1);
- wayno = lineno % (hc_assoc + 1);
- reg =
- get_setway_reg(wayno, hc_assoc,
- setno, hc_linesz,
- clvl);;
- op_handler(reg);
- lineno++;
- ctr--;
-
- if(cmop_debug)
- hc_line_cnt[cpu_id][clvl]++;
-
- }
- } else {
- /* Ignore */
- if(cmop_debug)
- cm_ignline_cnt[cpu_id][clvl]++;
-
- }
- }
- break;
- case TCSZ_EQUAL:
- {
- while (ctr) {
- setno = lineno / (hc_assoc + 1);
- wayno = lineno % (hc_assoc + 1);
- reg =
- get_setway_reg(wayno, hc_assoc, setno,
- hc_linesz, clvl);;
- op_handler(reg);
- lineno++;
- ctr--;
-
- if(cmop_debug)
- hc_line_cnt[cpu_id][clvl]++;
-
- }
- }
- break;
-
- case TCSZ_SMALL:
- {
- while (ctr) {
- setno = lineno / (hc_assoc + 1);
- wayno = lineno % (hc_assoc + 1);
- reg =
- get_setway_reg(wayno, hc_assoc, setno,
- hc_linesz, clvl);;
- op_handler(reg);
- lineno++;
- ctr--;
-
- if(cmop_debug)
- hc_line_cnt[cpu_id][clvl]++;
-
- }
-
- /*
- * If the target cache is smaller than the host cache then we
- * need to extend the maintenance operation to rest of the host
- * cache.
- */
- if ((abs_lineno +
- (1 * cd_ptr[clvl].tcline_factor)) ==
- cd_ptr[clvl].tnumabs_clines) {
-
- /*
- * TODO: Temp hack. Due to the cache line factor we end up incrementing
- * the lineno and miss one line.
- */
- lineno--;
- for (lineno++;
- lineno < (hc_numsets + 1) * (hc_assoc + 1);
- lineno++) {
- setno = lineno / (hc_assoc + 1);
- wayno = lineno % (hc_assoc + 1);
-
- /* Create new register value for operation on host cache */
- reg =
- get_setway_reg(wayno, hc_assoc,
- setno, hc_linesz,
- clvl);;
- /* Perform the operation */
- op_handler(reg);
-
- if(cmop_debug)
- cm_extline_cnt[cpu_id][clvl]++;
-
- }
- } else {
- /* Ignore */
- }
- break;
- }
- }
-
-
- if(cmop_debug) {
- /*
- * If the op cnt has reached the maximum cache line number then
- * print the statistics collected so far
- *
- * NOTE: We don't reset the counter. It will done at the start
- * of the next cm op automatically. Its value now is one more
- * than the maximum valid target cache line number.
- */
- if (cm_line_cnt[cpu_id][clvl] == (tc_assoc + 1) * (tc_numsets + 1)) {
-
- printf("%s", __FUNCTION__);
- printf(" : TC Lines=0x%x ", cm_line_cnt[cpu_id][clvl]);
- printf(" : HC Lines=0x%x ", hc_line_cnt[cpu_id][clvl]);
- printf(" : Ign Lines=0x%x ", cm_ignline_cnt[cpu_id][clvl]);
- printf(" : Extra Lines=0x%x ", cm_extline_cnt[cpu_id][clvl]);
- printf("\n");
-
- /* Register this as a complete set/way operation */
- cm_op_stats[cpu_id][clvl].part_cmop_cnt--;
- cm_op_stats[cpu_id][clvl].cmpl_cmop_cnt++;
- }
- }
-
- return;
+ unsigned clvl = 0, cpu_id = read_cpuid();
+ unsigned tc_assoc = 0, tc_numsets = 0, tc_linesz = 0;
+ unsigned hc_assoc = 0, hc_numsets = 0, hc_linesz = 0;
+ unsigned lineno = 0, setno = 0, wayno = 0, abs_lineno = 0;
+
+ /*
+ * If target cache line size is greater than the host then
+ * each maintenance op has to be performed on two lines on
+ * host. Does not matter is the line size if equal
+ */
+ unsigned ctr = cd_ptr[clvl].tcline_factor;
+
+ /*
+ * Find out the cache level for which the set/way operation has invoked.
+ * Use this to find the cache geometry in target cache to ascertain the
+ * set & way number from the argument. Use this info to calculate the
+ * target cache line number.
+ */
+ clvl = get_cache_level(reg);
+ tc_linesz = get_cache_linesz(tcg_ptr, clvl);
+ tc_assoc = get_cache_assoc(tcg_ptr, clvl);
+ tc_numsets = get_cache_numsets(tcg_ptr, clvl);
+
+ wayno = (reg >> __clz(tc_assoc)) & tc_assoc;
+ setno = (reg >> (tc_linesz + 4)) & tc_numsets;
+ lineno = (setno * (tc_assoc + 1)) + wayno;
+
+ if (cmop_debug) {
+ /*
+ * tc_prev_line is initialised to -1 (unsigned). We can never have so many
+ * cache lines. Helps determining when to record the start of a cm op.
+ * If count != lineno then either we are not counting or have been counting
+ * and now are out of sync. In either case, a new cm op is started
+ */
+ if (tc_prev_line[cpu_id][clvl] != lineno) {
+ tc_prev_line[cpu_id][clvl] = lineno;
+ /* All ops start out as partial ops */
+ cm_op_stats[cpu_id][clvl].part_cmop_cnt++;
+
+ /* Reset all our counters */
+ cm_ignline_cnt[cpu_id][clvl] = 0;
+ cm_extline_cnt[cpu_id][clvl] = 0;
+ hc_line_cnt[cpu_id][clvl] = 0;
+ cm_line_cnt[cpu_id][clvl] = 0;
+ }
+
+ tc_prev_line[cpu_id][clvl]--;
+ cm_line_cnt[cpu_id][clvl]++;
+ }
+
+ /* Convert target cache line no. to absolute cache line no. */
+ if (cd_ptr[clvl].tcline_factor)
+ abs_lineno = lineno * cd_ptr[clvl].tcline_factor;
+
+ /* Convert absolute cache line no. to host cache line no. */
+ if (cd_ptr[clvl].hcline_factor)
+ lineno = abs_lineno / cd_ptr[clvl].hcline_factor;
+
+ /*
+ * Find out the set & way no. on the host cache corresponding to the
+ * cache line no. calculated on the target cache.
+ */
+ hc_linesz = get_cache_linesz(hcg_ptr, clvl);
+ hc_assoc = get_cache_assoc(hcg_ptr, clvl);
+ hc_numsets = get_cache_numsets(hcg_ptr, clvl);
+
+ switch (cd_ptr[clvl].csize_diff) {
+ case TCSZ_BIG:
+ {
+ if (abs_lineno < cd_ptr[clvl].hnumabs_clines) {
+ while (ctr) {
+ setno = lineno / (hc_assoc + 1);
+ wayno = lineno % (hc_assoc + 1);
+ reg =
+ get_setway_reg(wayno, hc_assoc,
+ setno, hc_linesz,
+ clvl);;
+ op_handler(reg);
+ lineno++;
+ ctr--;
+
+ if (cmop_debug)
+ hc_line_cnt[cpu_id][clvl]++;
+
+ }
+ } else {
+ /* Ignore */
+ if (cmop_debug)
+ cm_ignline_cnt[cpu_id][clvl]++;
+
+ }
+ }
+ break;
+ case TCSZ_EQUAL:
+ {
+ while (ctr) {
+ setno = lineno / (hc_assoc + 1);
+ wayno = lineno % (hc_assoc + 1);
+ reg =
+ get_setway_reg(wayno, hc_assoc, setno,
+ hc_linesz, clvl);;
+ op_handler(reg);
+ lineno++;
+ ctr--;
+
+ if (cmop_debug)
+ hc_line_cnt[cpu_id][clvl]++;
+
+ }
+ }
+ break;
+
+ case TCSZ_SMALL:
+ {
+ while (ctr) {
+ setno = lineno / (hc_assoc + 1);
+ wayno = lineno % (hc_assoc + 1);
+ reg =
+ get_setway_reg(wayno, hc_assoc, setno,
+ hc_linesz, clvl);;
+ op_handler(reg);
+ lineno++;
+ ctr--;
+
+ if (cmop_debug)
+ hc_line_cnt[cpu_id][clvl]++;
+
+ }
+
+ /*
+ * If the target cache is smaller than the host cache then we
+ * need to extend the maintenance operation to rest of the host
+ * cache.
+ */
+ if ((abs_lineno +
+ (1 * cd_ptr[clvl].tcline_factor)) ==
+ cd_ptr[clvl].tnumabs_clines) {
+
+ /*
+ * TODO: Temp hack. Due to the cache line factor we end up incrementing
+ * the lineno and miss one line.
+ */
+ lineno--;
+ for (lineno++;
+ lineno < (hc_numsets + 1) * (hc_assoc + 1);
+ lineno++) {
+ setno = lineno / (hc_assoc + 1);
+ wayno = lineno % (hc_assoc + 1);
+
+ /* Create new register value for operation on host cache */
+ reg =
+ get_setway_reg(wayno, hc_assoc,
+ setno, hc_linesz,
+ clvl);;
+ /* Perform the operation */
+ op_handler(reg);
+
+ if (cmop_debug)
+ cm_extline_cnt[cpu_id][clvl]++;
+
+ }
+ } else {
+ /* Ignore */
+ }
+ break;
+ }
+ }
+
+ if (cmop_debug) {
+ /*
+ * If the op cnt has reached the maximum cache line number then
+ * print the statistics collected so far
+ *
+ * NOTE: We don't reset the counter. It will done at the start
+ * of the next cm op automatically. Its value now is one more
+ * than the maximum valid target cache line number.
+ */
+ if (cm_line_cnt[cpu_id][clvl] ==
+ (tc_assoc + 1) * (tc_numsets + 1)) {
+
+ printf("%s", __FUNCTION__);
+ printf(" : TC Lines=0x%x ", cm_line_cnt[cpu_id][clvl]);
+ printf(" : HC Lines=0x%x ", hc_line_cnt[cpu_id][clvl]);
+ printf(" : Ign Lines=0x%x ",
+ cm_ignline_cnt[cpu_id][clvl]);
+ printf(" : Extra Lines=0x%x ",
+ cm_extline_cnt[cpu_id][clvl]);
+ printf("\n");
+
+ /* Register this as a complete set/way operation */
+ cm_op_stats[cpu_id][clvl].part_cmop_cnt--;
+ cm_op_stats[cpu_id][clvl].cmpl_cmop_cnt++;
+ }
+ }
+
+ return;
}
-
diff --git a/big-little/virtualisor/cpus/a15/a15.c b/big-little/virtualisor/cpus/a15/a15.c
index 942fd8f..17c91a4 100644
--- a/big-little/virtualisor/cpus/a15/a15.c
+++ b/big-little/virtualisor/cpus/a15/a15.c
@@ -18,7 +18,7 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
#include "bl.h"
#include "virtualisor.h"
@@ -33,41 +33,37 @@ static virt_descriptor a15_virt_desc;
*/
unsigned a15_trap_handle(gp_regs * regs, unsigned hsr, unsigned sibling_cpu)
{
- return 0;
+ return 0;
}
unsigned a15_trap_save(unsigned first_cpu, unsigned sibling_cpu)
{
- return 0;
+ return 0;
}
unsigned a15_trap_restore(unsigned first_cpu, unsigned sibling_cpu)
{
- return 0;
+ return 0;
}
unsigned a15_trap_setup(unsigned first_cpu, unsigned sibling_cpu)
{
- if (switcher) {
-
- } else {
- /* Always on */
- }
-
- /*
- * Indicate that cpu specific virtualisor setup
- * has been done. Restore context instead on next
- * invocation
- */
- a15_virt_desc.init[read_cpuid()] = 1;
- return 0;
+ if (switcher) {
+
+ } else {
+ /* Always on */
+ }
+
+ /*
+ * Indicate that cpu specific virtualisor setup
+ * has been done. Restore context instead on next
+ * invocation
+ */
+ a15_virt_desc.init[read_cpuid()] = 1;
+ return 0;
}
-static virt_descriptor a15_virt_desc __attribute__ ((section("virt_desc_section"))) = {
- A15,
- {0},
- a15_trap_setup,
- a15_trap_handle,
- a15_trap_save,
- a15_trap_restore,
-};
+static virt_descriptor a15_virt_desc
+ __attribute__ ((section("virt_desc_section"))) = {
+ A15, {
+0}, a15_trap_setup, a15_trap_handle, a15_trap_save, a15_trap_restore,};
diff --git a/big-little/virtualisor/cpus/a15/include/a15.h b/big-little/virtualisor/cpus/a15/include/a15.h
index c05bd75..3a9515d 100644
--- a/big-little/virtualisor/cpus/a15/include/a15.h
+++ b/big-little/virtualisor/cpus/a15/include/a15.h
@@ -18,9 +18,9 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
#ifndef __A15_H__
#define __A15_H__
-#endif /* __A15_H__ */
+#endif /* __A15_H__ */
diff --git a/big-little/virtualisor/cpus/a7/a7.c b/big-little/virtualisor/cpus/a7/a7.c
index 6ff8f6c..e9e16af 100644
--- a/big-little/virtualisor/cpus/a7/a7.c
+++ b/big-little/virtualisor/cpus/a7/a7.c
@@ -18,7 +18,7 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
#include "bl.h"
#include "virtualisor.h"
@@ -33,41 +33,37 @@ static virt_descriptor a7_virt_desc;
*/
unsigned a7_trap_handle(gp_regs * regs, unsigned hsr, unsigned sibling_cpu)
{
- return 0;
+ return 0;
}
unsigned a7_trap_save(unsigned first_cpu, unsigned sibling_cpu)
{
- return 0;
+ return 0;
}
unsigned a7_trap_restore(unsigned first_cpu, unsigned sibling_cpu)
{
- return 0;
+ return 0;
}
unsigned a7_trap_setup(unsigned first_cpu, unsigned sibling_cpu)
{
- if (switcher) {
+ if (switcher) {
- } else {
- /* Always on */
- }
+ } else {
+ /* Always on */
+ }
- /*
- * Indicate that cpu specific virtualisor setup
- * has been done. Restore context instead on next
- * invocation
- */
- a7_virt_desc.init[read_cpuid()] = 1;
- return 0;
+ /*
+ * Indicate that cpu specific virtualisor setup
+ * has been done. Restore context instead on next
+ * invocation
+ */
+ a7_virt_desc.init[read_cpuid()] = 1;
+ return 0;
}
-static virt_descriptor a7_virt_desc __attribute__ ((section("virt_desc_section"))) = {
- A7,
- {0},
- a7_trap_setup,
- a7_trap_handle,
- a7_trap_save,
- a7_trap_restore,
-};
+static virt_descriptor a7_virt_desc
+ __attribute__ ((section("virt_desc_section"))) = {
+ A7, {
+0}, a7_trap_setup, a7_trap_handle, a7_trap_save, a7_trap_restore,};
diff --git a/big-little/virtualisor/cpus/a7/include/a7.h b/big-little/virtualisor/cpus/a7/include/a7.h
index da3db59..ff3000e 100644
--- a/big-little/virtualisor/cpus/a7/include/a7.h
+++ b/big-little/virtualisor/cpus/a7/include/a7.h
@@ -18,9 +18,9 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
#ifndef __A7_H__
#define __A7_H__
-#endif /* __A7_H__ */
+#endif /* __A7_H__ */
diff --git a/big-little/virtualisor/include/cache_geom.h b/big-little/virtualisor/include/cache_geom.h
index 359c480..654a0f0 100644
--- a/big-little/virtualisor/include/cache_geom.h
+++ b/big-little/virtualisor/include/cache_geom.h
@@ -18,7 +18,7 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
#ifndef __CACHE_GEOM_H__
#define __CACHE_GEOM_H__
@@ -46,38 +46,38 @@
* maximum granularity.
*/
typedef struct cache_diff {
- /* Stores whether target cache is =,<,> host cache */
- unsigned csize_diff;
- /*
- * Stores factor by which target cache line
- * has to be multiplied to get absolute line
- * no.
- */
- unsigned tcline_factor;
- /*
- * Stores factor by which absolute cache line
- * no. has to be divided to get host cache line
- * no.
- */
- unsigned hcline_factor;
- /* Max absolute target cpu cache line number */
- unsigned tnumabs_clines;
- /* Max absolute host cpu cache line number */
- unsigned hnumabs_clines;
+ /* Stores whether target cache is =,<,> host cache */
+ unsigned csize_diff;
+ /*
+ * Stores factor by which target cache line
+ * has to be multiplied to get absolute line
+ * no.
+ */
+ unsigned tcline_factor;
+ /*
+ * Stores factor by which absolute cache line
+ * no. has to be divided to get host cache line
+ * no.
+ */
+ unsigned hcline_factor;
+ /* Max absolute target cpu cache line number */
+ unsigned tnumabs_clines;
+ /* Max absolute host cpu cache line number */
+ unsigned hnumabs_clines;
} cache_diff;
/*
* Data structure that defines the cache topology of a cpu
*/
typedef struct cache_geom {
- unsigned clidr;
- /*
- * One for each cpu to store the cache level
- * the OS thinks its operating on.
- */
- unsigned csselr;
- /* One for each cache level */
- unsigned ccsidr[MAX_CACHE_LEVELS];
+ unsigned clidr;
+ /*
+ * One for each cpu to store the cache level
+ * the OS thinks its operating on.
+ */
+ unsigned csselr;
+ /* One for each cache level */
+ unsigned ccsidr[MAX_CACHE_LEVELS];
} cache_geometry;
/*
@@ -85,23 +85,18 @@ typedef struct cache_geom {
* Reset for each switchover.
*/
typedef struct cache_stats {
- /* Number of cm ops which did not cover the whole cache */
- unsigned part_cmop_cnt;
- /* Number of cm ops which spanned the entire cache */
- unsigned cmpl_cmop_cnt;
+ /* Number of cm ops which did not cover the whole cache */
+ unsigned part_cmop_cnt;
+ /* Number of cm ops which spanned the entire cache */
+ unsigned cmpl_cmop_cnt;
} cache_stats;
-extern unsigned map_cache_geometries(cache_geometry *,
- cache_geometry *,
- cache_diff *);
+extern unsigned map_cache_geometries(cache_geometry *,
+ cache_geometry *, cache_diff *);
extern void find_cache_geometry(cache_geometry *);
-extern void find_cache_diff(cache_geometry *,
- cache_geometry *,
- cache_diff *);
+extern void find_cache_diff(cache_geometry *, cache_geometry *, cache_diff *);
extern void handle_cm_op(unsigned,
- void (*) (unsigned),
- cache_geometry *,
- cache_geometry *,
- cache_diff *);
-
-#endif /* __CACHE_GEOM_H__ */
+ void (*)(unsigned),
+ cache_geometry *, cache_geometry *, cache_diff *);
+
+#endif /* __CACHE_GEOM_H__ */
diff --git a/big-little/virtualisor/include/mem_trap.h b/big-little/virtualisor/include/mem_trap.h
index ac23844..ab68259 100644
--- a/big-little/virtualisor/include/mem_trap.h
+++ b/big-little/virtualisor/include/mem_trap.h
@@ -18,7 +18,7 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
#ifndef __MEM_TRAP_H__
#define __MEM_TRAP_H__
@@ -31,23 +31,23 @@
* routines.
*/
typedef struct trap_data {
- /* Does this structure contain valid data */
- unsigned valid;
- /* Which cluster to save/restore this trap on */
- unsigned cluster_id;
- /* Translation table address */
- unsigned long long table;
- /* Index corresponding to mapping */
- unsigned index;
- /* TODO: Revisit why we need two variables here */
- /* Original Descriptor */
- unsigned long long prev_desc;
- /* Current Descriptor */
- unsigned long long cur_desc;
+ /* Does this structure contain valid data */
+ unsigned valid;
+ /* Which cluster to save/restore this trap on */
+ unsigned cluster_id;
+ /* Translation table address */
+ unsigned long long table;
+ /* Index corresponding to mapping */
+ unsigned index;
+ /* TODO: Revisit why we need two variables here */
+ /* Original Descriptor */
+ unsigned long long prev_desc;
+ /* Current Descriptor */
+ unsigned long long cur_desc;
} mem_trap_data;
extern unsigned mem_trap_setup(unsigned, mem_trap_data *);
extern mem_trap_data s2_trap_section$$Base;
extern unsigned s2_trap_section$$Length;
-#endif /* __MEM_TRAP_H__ */
+#endif /* __MEM_TRAP_H__ */
diff --git a/big-little/virtualisor/include/virtualisor.h b/big-little/virtualisor/include/virtualisor.h
index abf74bb..f097d39 100644
--- a/big-little/virtualisor/include/virtualisor.h
+++ b/big-little/virtualisor/include/virtualisor.h
@@ -18,7 +18,7 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
#ifndef __VIRTUALISOR_H__
#define __VIRTUALISOR_H__
@@ -30,9 +30,9 @@
* Data structure that holds a copy of the virtualized regs
*/
typedef struct virt_regs {
- unsigned cluster_id;
- unsigned mpidr;
- unsigned midr;
+ unsigned cluster_id;
+ unsigned mpidr;
+ unsigned midr;
} virt_reg_data;
/*
@@ -40,14 +40,14 @@ typedef struct virt_regs {
* by the Virtualisation Extensions.
*/
typedef struct trap_regs {
- unsigned hcr;
- unsigned hdcr;
- unsigned hcptr;
- unsigned hstr;
+ unsigned hcr;
+ unsigned hdcr;
+ unsigned hcptr;
+ unsigned hstr;
} reg_trap_data;
typedef struct gp_regs {
- unsigned r[15];
+ unsigned r[15];
} gp_regs;
/*
@@ -57,17 +57,17 @@ typedef struct gp_regs {
* -igured trap.
*/
typedef struct virt_desc {
- /* cpu midr contents */
- unsigned cpu_no;
- /*
- * Bitmask to inidicate that Virtualisor setup has been
- * done on both host & target cpus.
- */
- unsigned char init[NUM_CPUS];
- unsigned (*trap_setup) (unsigned, unsigned);
- unsigned (*trap_handle) (gp_regs * regs, unsigned, unsigned);
- unsigned (*trap_save) (unsigned, unsigned);
- unsigned (*trap_restore) (unsigned, unsigned);
+ /* cpu midr contents */
+ unsigned cpu_no;
+ /*
+ * Bitmask to inidicate that Virtualisor setup has been
+ * done on both host & target cpus.
+ */
+ unsigned char init[NUM_CPUS];
+ unsigned (*trap_setup) (unsigned, unsigned);
+ unsigned (*trap_handle) (gp_regs * regs, unsigned, unsigned);
+ unsigned (*trap_save) (unsigned, unsigned);
+ unsigned (*trap_restore) (unsigned, unsigned);
} virt_descriptor;
extern void SetupVirtualisor(unsigned);
@@ -81,4 +81,4 @@ extern unsigned virt_desc_section$$Length;
extern unsigned host_cluster;
extern unsigned switcher;
-#endif /* __VIRTUALISOR_H__ */
+#endif /* __VIRTUALISOR_H__ */
diff --git a/big-little/virtualisor/mem_trap.c b/big-little/virtualisor/mem_trap.c
index 04c0bb8..c40433a 100644
--- a/big-little/virtualisor/mem_trap.c
+++ b/big-little/virtualisor/mem_trap.c
@@ -18,7 +18,7 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
#include "virtualisor.h"
#include "misc.h"
@@ -30,103 +30,112 @@
* HYP mode by invalidating its descriptor in the 2nd stage
* translation tables
*/
-unsigned mem_trap_setup(unsigned periph_addr, mem_trap_data *periph_trap_data)
+unsigned mem_trap_setup(unsigned periph_addr, mem_trap_data * periph_trap_data)
{
- unsigned rc = 0x0, four_kb_index = 0;
- unsigned one_gb_index = 0, two_mb_index = 0;
- unsigned long long vtcr = 0x0, hcr = 0x0, level = 0;
- unsigned long long pagetable_base = 0x0, l2_desc = 0;
- unsigned long long l3_desc = 0, l3_table = 0;
-
- /* Check if 2nd stage translations are enabled */
- hcr = read_hcr();
- if (!(hcr & HCR_VM)) {
- printf("%s: 2nd Stage translations not enabled \n", __FUNCTION__);
- rc = 0x1;
- goto out;
- }
-
- /* Check what level of tables we need to start at */
- vtcr = read_vtcr();
- level = (vtcr >> 6) & 0x3;
-
- /* Read the page table base address. */
- pagetable_base = read_vttbr();
-
- /* Calculate the table indices */
- one_gb_index = periph_addr >> 30;
-
- /* Each GB contains (1 << 9) or 512 2MBs */
- two_mb_index = (periph_addr >> 21) - ((1 << 9) * one_gb_index);
-
- /* Each GB contains (1 << 18) or 262144 4KBs */
- four_kb_index = (periph_addr >> 12) - ((1 << 9) * (periph_addr >> 21));
-
- /* For either starting level find out the level 2 desc */
- switch (level) {
-
- case 0x1:
- {
- /* Start from first level */
- unsigned long long l1_desc = 0;
- unsigned long long l2_table = 0;
-
- l1_desc = ((unsigned long long *)((unsigned)(&pagetable_base)[0]))[one_gb_index];
- if ((l1_desc & 0x3) != TABLE_MAPPING) {
- printf("%s: Invalid 1st level desc : 0x%llu \n", __FUNCTION__, l1_desc);
- rc = 0x1;
- goto out;
- }
-
- l2_table = l1_desc & 0xfffffff000UL;
- l2_desc = ((unsigned long long *)((unsigned)(&l2_table)[0]))[two_mb_index];
- break;
- }
-
- case 0x0:
- {
- /* Start from second level */
- l2_desc = ((unsigned long long *)((unsigned)(&pagetable_base)[0]))[two_mb_index];
- break;
- }
-
- default:
- printf("%s: Invalid Pagetable level \n", __FUNCTION__);
- rc = 0x1;
- }
-
- /* Validate the 2nd level descriptor */
- if ((l2_desc & 0x3) != TABLE_MAPPING) {
- printf("%s: Invalid 2nd level desc : 0x%llu \n",
- __FUNCTION__, l2_desc);
- rc = 0x1;
- goto out;
- }
-
- l3_table = l2_desc & 0xfffffff000UL;
- l3_desc = ((unsigned long long *)((unsigned)(&l3_table)[0]))[four_kb_index];
-
- /*
- * Validate the 3rd level descriptor. This means that the mapping is
- * already invalid and we have not touched it
- */
- if ((l3_desc & 0x3) != VALID_MAPPING) {
- printf("%s: Invalid 3rd level desc : 0x%llu \n",
- __FUNCTION__, l3_desc);
- rc = 0x1;
- goto out;
- }
-
- /* Save the info gathered so far */
- periph_trap_data->table = l3_table;
- periph_trap_data->index = four_kb_index;
- periph_trap_data->prev_desc = l3_desc;
- periph_trap_data->cluster_id = read_clusterid();
- periph_trap_data->valid = 1;
-
- /* Invalidate the peripheral page table entry */
- ((unsigned long long *)((unsigned)(&l3_table)[0]))[four_kb_index] = 0x0;
+ unsigned rc = 0x0, four_kb_index = 0;
+ unsigned one_gb_index = 0, two_mb_index = 0;
+ unsigned long long vtcr = 0x0, hcr = 0x0, level = 0;
+ unsigned long long pagetable_base = 0x0, l2_desc = 0;
+ unsigned long long l3_desc = 0, l3_table = 0;
+
+ /* Check if 2nd stage translations are enabled */
+ hcr = read_hcr();
+ if (!(hcr & HCR_VM)) {
+ printf("%s: 2nd Stage translations not enabled \n",
+ __FUNCTION__);
+ rc = 0x1;
+ goto out;
+ }
+
+ /* Check what level of tables we need to start at */
+ vtcr = read_vtcr();
+ level = (vtcr >> 6) & 0x3;
+
+ /* Read the page table base address. */
+ pagetable_base = read_vttbr();
+
+ /* Calculate the table indices */
+ one_gb_index = periph_addr >> 30;
+
+ /* Each GB contains (1 << 9) or 512 2MBs */
+ two_mb_index = (periph_addr >> 21) - ((1 << 9) * one_gb_index);
+
+ /* Each GB contains (1 << 18) or 262144 4KBs */
+ four_kb_index = (periph_addr >> 12) - ((1 << 9) * (periph_addr >> 21));
+
+ /* For either starting level find out the level 2 desc */
+ switch (level) {
+
+ case 0x1:
+ {
+ /* Start from first level */
+ unsigned long long l1_desc = 0;
+ unsigned long long l2_table = 0;
+
+ l1_desc =
+ ((unsigned long long
+ *)((unsigned)(&pagetable_base)[0]))[one_gb_index];
+ if ((l1_desc & 0x3) != TABLE_MAPPING) {
+ printf("%s: Invalid 1st level desc : 0x%llu \n",
+ __FUNCTION__, l1_desc);
+ rc = 0x1;
+ goto out;
+ }
+
+ l2_table = l1_desc & 0xfffffff000UL;
+ l2_desc =
+ ((unsigned long long
+ *)((unsigned)(&l2_table)[0]))[two_mb_index];
+ break;
+ }
+
+ case 0x0:
+ {
+ /* Start from second level */
+ l2_desc =
+ ((unsigned long long
+ *)((unsigned)(&pagetable_base)[0]))[two_mb_index];
+ break;
+ }
+
+ default:
+ printf("%s: Invalid Pagetable level \n", __FUNCTION__);
+ rc = 0x1;
+ }
+
+ /* Validate the 2nd level descriptor */
+ if ((l2_desc & 0x3) != TABLE_MAPPING) {
+ printf("%s: Invalid 2nd level desc : 0x%llu \n",
+ __FUNCTION__, l2_desc);
+ rc = 0x1;
+ goto out;
+ }
+
+ l3_table = l2_desc & 0xfffffff000UL;
+ l3_desc =
+ ((unsigned long long *)((unsigned)(&l3_table)[0]))[four_kb_index];
+
+ /*
+ * Validate the 3rd level descriptor. This means that the mapping is
+ * already invalid and we have not touched it
+ */
+ if ((l3_desc & 0x3) != VALID_MAPPING) {
+ printf("%s: Invalid 3rd level desc : 0x%llu \n",
+ __FUNCTION__, l3_desc);
+ rc = 0x1;
+ goto out;
+ }
+
+ /* Save the info gathered so far */
+ periph_trap_data->table = l3_table;
+ periph_trap_data->index = four_kb_index;
+ periph_trap_data->prev_desc = l3_desc;
+ periph_trap_data->cluster_id = read_clusterid();
+ periph_trap_data->valid = 1;
+
+ /* Invalidate the peripheral page table entry */
+ ((unsigned long long *)((unsigned)(&l3_table)[0]))[four_kb_index] = 0x0;
out:
- return rc;
+ return rc;
}
diff --git a/big-little/virtualisor/vgic_trap_handler.c b/big-little/virtualisor/vgic_trap_handler.c
index 44469fb..4e626d0 100644
--- a/big-little/virtualisor/vgic_trap_handler.c
+++ b/big-little/virtualisor/vgic_trap_handler.c
@@ -18,7 +18,7 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
#include "virtualisor.h"
#include "gic_registers.h"
@@ -31,52 +31,51 @@
*/
void handle_vgic_distif_abort(unsigned pa, unsigned *data, unsigned write)
{
- unsigned value = 0, reg_offset = pa & 0xfff;
+ unsigned value = 0, reg_offset = pa & 0xfff;
- switch (reg_offset >> 7) {
-
- /* Access to Processor Target registers */
- case (GICD_CPUS >> 7):
- if (write) {
- /*
- * OS is trying to reprogram the processor targets register.
- * Find out the cpu interface mask for this cluster and use
- * that instead to program the register.
- */
- value = get_cpuif_mask(*data);
- write32(pa, value);
- } else {
- value = read32(pa);
- *data = get_cpu_mask(value);
- }
+ switch (reg_offset >> 7) {
- break;
-
- /* Access to Software generated interrupt register */
- case (GICD_SW >> 7):
- if (write) {
- /* Get the updated cpu interface mask */
- value = get_cpuif_mask((*data >> 16) & 0xff) << 16;
- value |= *data & ~(0xff << 16);
- /*
- * Clear the old cpu interface mask & update
- * value with new cpu interface mask
- */
- write32(pa, value);
- } else {
- /* Cannot possibly have a read from SGI generation register */
- }
-
- break;
-
- default:
- if (write) {
- write32(pa, *data);
- } else {
- *data = read32(pa);
- }
- }
+ /* Access to Processor Target registers */
+ case (GICD_CPUS >> 7):
+ if (write) {
+ /*
+ * OS is trying to reprogram the processor targets register.
+ * Find out the cpu interface mask for this cluster and use
+ * that instead to program the register.
+ */
+ value = get_cpuif_mask(*data);
+ write32(pa, value);
+ } else {
+ value = read32(pa);
+ *data = get_cpu_mask(value);
+ }
- return;
-}
+ break;
+
+ /* Access to Software generated interrupt register */
+ case (GICD_SW >> 7):
+ if (write) {
+ /* Get the updated cpu interface mask */
+ value = get_cpuif_mask((*data >> 16) & 0xff) << 16;
+ value |= *data & ~(0xff << 16);
+ /*
+ * Clear the old cpu interface mask & update
+ * value with new cpu interface mask
+ */
+ write32(pa, value);
+ } else {
+ /* Cannot possibly have a read from SGI generation register */
+ }
+ break;
+
+ default:
+ if (write) {
+ write32(pa, *data);
+ } else {
+ *data = read32(pa);
+ }
+ }
+
+ return;
+}
diff --git a/big-little/virtualisor/virt_context.c b/big-little/virtualisor/virt_context.c
index 5472587..def3551 100644
--- a/big-little/virtualisor/virt_context.c
+++ b/big-little/virtualisor/virt_context.c
@@ -18,7 +18,7 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
#include "virtualisor.h"
#include "misc.h"
@@ -38,100 +38,100 @@ extern cache_stats cm_op_stats[NUM_CPUS][MAX_CACHE_LEVELS];
*/
void SaveVirtualisor(unsigned first_cpu)
{
- unsigned len = 0, ctr = 0, cpu_id = read_cpuid(), cpu_no = PART_NO(read_midr());
- unsigned cluster_id = read_clusterid(), index = 0, vd_len = 0, rc = 0;
- mem_trap_data *s2_td = &s2_trap_section$$Base;
- unsigned long long *cd_ptr = 0x0;
- unsigned *periph_addr = 0x0;
- virt_descriptor *vd_array = &virt_desc_section$$Base;
- unsigned (*handler) (unsigned, unsigned) = 0x0, sibling;
-
- /* Find our brother from another mother */
- sibling = find_sibling_cpu();
-
- if (cluster_id == host_cluster) {
- /*
- * Since there is only one second stage translation table, its
- * safe to assume that only one cpu (first_cpu) should save &
- * restore the context.
- */
- len = (unsigned)&s2_trap_section$$Length;
- if (cpu_id == first_cpu) {
- /* Iterate through the array of 2nd stage translation traps */
- for (ctr = 0; ctr < (len / sizeof(mem_trap_data)); ctr++) {
- if (s2_td[ctr].valid
- && s2_td[ctr].cluster_id == cluster_id) {
-
- /*
- * Save the current descriptor and restore the
- * previous. Need not worry about synchronisation
- * issues, as the existing entry was causing
- * translation faults. The TLB never caches fault
- * generating translations.
- */
- cd_ptr =
- &((unsigned long long
- *)((unsigned)(&s2_td[ctr].
- table)[0]))[s2_td[ctr].
- index];
- s2_td[ctr].cur_desc = *cd_ptr;
- *cd_ptr = s2_td[ctr].prev_desc;
- periph_addr = (unsigned *) cd_ptr;
- dsb();
- inv_tlb_mva((unsigned *) periph_addr[0]);
- inv_bpred_all();
- }
- }
- }
-
- /* Save the HYP trap registers for this cpu */
- host_trap_regs[cpu_id].hcr = read_hcr();
- host_trap_regs[cpu_id].hdcr = read_hdcr();
- host_trap_regs[cpu_id].hcptr = read_hcptr();
- host_trap_regs[cpu_id].hstr = read_hstr();
-
- if(cmop_debug) {
- /* Print Cache maintenance statistics */
- for (ctr = 0; ctr < MAX_CACHE_LEVELS; ctr++) {
- printf("Cache Level %d", ctr);
- printf(" : Partial ops=0x%x",
- cm_op_stats[cpu_id][ctr].part_cmop_cnt);
- printf(" : Complete ops=0x%x",
- cm_op_stats[cpu_id][ctr].cmpl_cmop_cnt);
- printf("\n");
- }
- }
-
- }
-
- /*
- * Call any cpu specific save routines (if any)
- */
- vd_len = (unsigned)&virt_desc_section$$Length;
- for (index = 0; index < (vd_len / sizeof(virt_descriptor)); index++) {
-
- if (cpu_no == vd_array[index].cpu_no) {
- handler = vd_array[index].trap_save;
- if(handler) {
- rc = handler(first_cpu, sibling);
- if (rc) {
- printf("%s: failed on cpu%d \n",
- __FUNCTION__,
- cpu_no);
- goto out;
- }
- }
- }
- }
+ unsigned len = 0, ctr = 0, cpu_id = read_cpuid(), cpu_no =
+ PART_NO(read_midr());
+ unsigned cluster_id = read_clusterid(), index = 0, vd_len = 0, rc = 0;
+ mem_trap_data *s2_td = &s2_trap_section$$Base;
+ unsigned long long *cd_ptr = 0x0;
+ unsigned *periph_addr = 0x0;
+ virt_descriptor *vd_array = &virt_desc_section$$Base;
+ unsigned (*handler) (unsigned, unsigned) = 0x0, sibling;
+
+ /* Find our brother from another mother */
+ sibling = find_sibling_cpu();
+
+ if (cluster_id == host_cluster) {
+ /*
+ * Since there is only one second stage translation table, its
+ * safe to assume that only one cpu (first_cpu) should save &
+ * restore the context.
+ */
+ len = (unsigned)&s2_trap_section$$Length;
+ if (cpu_id == first_cpu) {
+ /* Iterate through the array of 2nd stage translation traps */
+ for (ctr = 0; ctr < (len / sizeof(mem_trap_data));
+ ctr++) {
+ if (s2_td[ctr].valid
+ && s2_td[ctr].cluster_id == cluster_id) {
+
+ /*
+ * Save the current descriptor and restore the
+ * previous. Need not worry about synchronisation
+ * issues, as the existing entry was causing
+ * translation faults. The TLB never caches fault
+ * generating translations.
+ */
+ cd_ptr =
+ &((unsigned long long
+ *)((unsigned)(&s2_td[ctr].table)
+ [0]))[s2_td[ctr].index];
+ s2_td[ctr].cur_desc = *cd_ptr;
+ *cd_ptr = s2_td[ctr].prev_desc;
+ periph_addr = (unsigned *)cd_ptr;
+ dsb();
+ inv_tlb_mva((unsigned *)periph_addr[0]);
+ inv_bpred_all();
+ }
+ }
+ }
+
+ /* Save the HYP trap registers for this cpu */
+ host_trap_regs[cpu_id].hcr = read_hcr();
+ host_trap_regs[cpu_id].hdcr = read_hdcr();
+ host_trap_regs[cpu_id].hcptr = read_hcptr();
+ host_trap_regs[cpu_id].hstr = read_hstr();
+
+ if (cmop_debug) {
+ /* Print Cache maintenance statistics */
+ for (ctr = 0; ctr < MAX_CACHE_LEVELS; ctr++) {
+ printf("Cache Level %d", ctr);
+ printf(" : Partial ops=0x%x",
+ cm_op_stats[cpu_id][ctr].part_cmop_cnt);
+ printf(" : Complete ops=0x%x",
+ cm_op_stats[cpu_id][ctr].cmpl_cmop_cnt);
+ printf("\n");
+ }
+ }
+
+ }
+
+ /*
+ * Call any cpu specific save routines (if any)
+ */
+ vd_len = (unsigned)&virt_desc_section$$Length;
+ for (index = 0; index < (vd_len / sizeof(virt_descriptor)); index++) {
+
+ if (cpu_no == vd_array[index].cpu_no) {
+ handler = vd_array[index].trap_save;
+ if (handler) {
+ rc = handler(first_cpu, sibling);
+ if (rc) {
+ printf("%s: failed on cpu%d \n",
+ __FUNCTION__, cpu_no);
+ goto out;
+ }
+ }
+ }
+ }
out:
- if (rc) {
- printf("%s: Failed : Cpu%d : Host=0x%x : Target=0x%x\n ",
- __FUNCTION__, cpu_id, cpu_no, sibling);
- panic();
- }
+ if (rc) {
+ printf("%s: Failed : Cpu%d : Host=0x%x : Target=0x%x\n ",
+ __FUNCTION__, cpu_id, cpu_no, sibling);
+ panic();
+ }
- return;
+ return;
}
/*
@@ -141,92 +141,93 @@ void SaveVirtualisor(unsigned first_cpu)
*/
void RestoreVirtualisor(unsigned first_cpu)
{
- unsigned len = 0, ctr = 0, cpu_id = read_cpuid(), cpu_no = PART_NO(read_midr());
- unsigned cluster_id = read_clusterid(), index = 0, vd_len = 0, rc = 0;
- mem_trap_data *s2_td = &s2_trap_section$$Base;
- unsigned long long *cd_ptr = 0x0;
- unsigned *periph_addr = 0x0;
- virt_descriptor *vd_array = &virt_desc_section$$Base;
- unsigned (*handler) (unsigned, unsigned) = 0x0, sibling;
-
- /* Find our brother from another mother */
- sibling = find_sibling_cpu();
-
- if (cluster_id == host_cluster) {
- /*
- * Since there is only one second stage translation table, its
- * safe to assume that only one cpu (first_cpu) should save &
- * restore the context.
- */
- len = (unsigned)&s2_trap_section$$Length;
- if (cpu_id == first_cpu) {
- /* Iterate through the array of 2nd stage translation traps */
- for (ctr = 0; ctr < (len / sizeof(mem_trap_data)); ctr++) {
- if (s2_td[ctr].valid
- && s2_td[ctr].cluster_id == cluster_id) {
- /*
- * Restore the current descriptor and save the previous
- */
- cd_ptr =
- &((unsigned long long
- *)((unsigned)((&s2_td[ctr].
- table)[0])))[s2_td[ctr].
- index];
- s2_td[ctr].prev_desc = *cd_ptr;
- *cd_ptr = s2_td[ctr].cur_desc;
- periph_addr = (unsigned *) cd_ptr;
- dsb();
- inv_tlb_mva((unsigned *) periph_addr[0]);
- inv_bpred_all();
- }
- }
- }
-
- /* Now restore the virtualised ID registers for this cpu */
- write_vmidr(host_virt_regs[cpu_id].midr);
- write_vmpidr(host_virt_regs[cpu_id].mpidr);
-
- /* Restore the HYP trap registers for this cpu */
- write_hcr(host_trap_regs[cpu_id].hcr);
- write_hdcr(host_trap_regs[cpu_id].hdcr);
- write_hcptr(host_trap_regs[cpu_id].hcptr);
- write_hstr(host_trap_regs[cpu_id].hstr);
-
- if(cmop_debug) {
- /* Resetting Cache maintenance statistics */
- for (ctr = 0; ctr < MAX_CACHE_LEVELS; ctr++) {
- cm_op_stats[cpu_id][ctr].part_cmop_cnt = 0;
- cm_op_stats[cpu_id][ctr].cmpl_cmop_cnt = 0;
- }
- }
- }
-
- /*
- * Call any cpu specific restore routines (if any)
- */
- vd_len = (unsigned)&virt_desc_section$$Length;
- for (index = 0; index < (vd_len / sizeof(virt_descriptor)); index++) {
-
- if (cpu_no == vd_array[index].cpu_no) {
- handler = vd_array[index].trap_restore;
- if(handler) {
- rc = handler(first_cpu, sibling);
- if (rc) {
- printf("%s: failed on cpu%d \n",
- __FUNCTION__,
- cpu_no);
- goto out;
- }
- }
- }
- }
+ unsigned len = 0, ctr = 0, cpu_id = read_cpuid(), cpu_no =
+ PART_NO(read_midr());
+ unsigned cluster_id = read_clusterid(), index = 0, vd_len = 0, rc = 0;
+ mem_trap_data *s2_td = &s2_trap_section$$Base;
+ unsigned long long *cd_ptr = 0x0;
+ unsigned *periph_addr = 0x0;
+ virt_descriptor *vd_array = &virt_desc_section$$Base;
+ unsigned (*handler) (unsigned, unsigned) = 0x0, sibling;
+
+ /* Find our brother from another mother */
+ sibling = find_sibling_cpu();
+
+ if (cluster_id == host_cluster) {
+ /*
+ * Since there is only one second stage translation table, its
+ * safe to assume that only one cpu (first_cpu) should save &
+ * restore the context.
+ */
+ len = (unsigned)&s2_trap_section$$Length;
+ if (cpu_id == first_cpu) {
+ /* Iterate through the array of 2nd stage translation traps */
+ for (ctr = 0; ctr < (len / sizeof(mem_trap_data));
+ ctr++) {
+ if (s2_td[ctr].valid
+ && s2_td[ctr].cluster_id == cluster_id) {
+ /*
+ * Restore the current descriptor and save the previous
+ */
+ cd_ptr =
+ &((unsigned long long
+ *)((unsigned)((&s2_td[ctr].table)
+ [0])))[s2_td
+ [ctr].index];
+ s2_td[ctr].prev_desc = *cd_ptr;
+ *cd_ptr = s2_td[ctr].cur_desc;
+ periph_addr = (unsigned *)cd_ptr;
+ dsb();
+ inv_tlb_mva((unsigned *)periph_addr[0]);
+ inv_bpred_all();
+ }
+ }
+ }
+
+ /* Now restore the virtualised ID registers for this cpu */
+ write_vmidr(host_virt_regs[cpu_id].midr);
+ write_vmpidr(host_virt_regs[cpu_id].mpidr);
+
+ /* Restore the HYP trap registers for this cpu */
+ write_hcr(host_trap_regs[cpu_id].hcr);
+ write_hdcr(host_trap_regs[cpu_id].hdcr);
+ write_hcptr(host_trap_regs[cpu_id].hcptr);
+ write_hstr(host_trap_regs[cpu_id].hstr);
+
+ if (cmop_debug) {
+ /* Resetting Cache maintenance statistics */
+ for (ctr = 0; ctr < MAX_CACHE_LEVELS; ctr++) {
+ cm_op_stats[cpu_id][ctr].part_cmop_cnt = 0;
+ cm_op_stats[cpu_id][ctr].cmpl_cmop_cnt = 0;
+ }
+ }
+ }
+
+ /*
+ * Call any cpu specific restore routines (if any)
+ */
+ vd_len = (unsigned)&virt_desc_section$$Length;
+ for (index = 0; index < (vd_len / sizeof(virt_descriptor)); index++) {
+
+ if (cpu_no == vd_array[index].cpu_no) {
+ handler = vd_array[index].trap_restore;
+ if (handler) {
+ rc = handler(first_cpu, sibling);
+ if (rc) {
+ printf("%s: failed on cpu%d \n",
+ __FUNCTION__, cpu_no);
+ goto out;
+ }
+ }
+ }
+ }
out:
- if (rc) {
- printf("%s: Failed : Cpu%d : Host=0x%x : Target=0x%x\n ",
- __FUNCTION__, cpu_id, cpu_no, sibling);
- panic();
- }
+ if (rc) {
+ printf("%s: Failed : Cpu%d : Host=0x%x : Target=0x%x\n ",
+ __FUNCTION__, cpu_id, cpu_no, sibling);
+ panic();
+ }
- return;
+ return;
}
diff --git a/big-little/virtualisor/virt_handle.c b/big-little/virtualisor/virt_handle.c
index 3e3f3d7..a247534 100644
--- a/big-little/virtualisor/virt_handle.c
+++ b/big-little/virtualisor/virt_handle.c
@@ -18,7 +18,7 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
#include "virtualisor.h"
#include "virt_helpers.h"
@@ -34,556 +34,573 @@ extern cache_diff cache_delta[NUM_CPUS][MAX_CACHE_LEVELS];
void trap_cp15_mrc_mcr_handle(unsigned hsr, gp_regs * regs)
{
- unsigned Op1, Op2, CRn, CRm, Rt, write, cpu_id = read_cpuid();
+ unsigned Op1, Op2, CRn, CRm, Rt, write, cpu_id = read_cpuid();
- Op2 = (hsr >> 17) & 0x7;
- Op1 = (hsr >> 14) & 0x7;
- CRn = (hsr >> 10) & 0xf;
- Rt = (hsr >> 5) & 0xf;
- CRm = (hsr >> 1) & 0xf;
- write = !(hsr & 0x1);
+ Op2 = (hsr >> 17) & 0x7;
+ Op1 = (hsr >> 14) & 0x7;
+ CRn = (hsr >> 10) & 0xf;
+ Rt = (hsr >> 5) & 0xf;
+ CRm = (hsr >> 1) & 0xf;
+ write = !(hsr & 0x1);
- switch (CRn) {
- case CRN_C0:
- switch (Op1) {
- case 0:
- switch (CRm) {
- case 0:
- switch (Op2) {
- case MIDR:
- if (write)
- goto error;
- regs->r[Rt] = read_vmidr();
- break;
- case CTR:
- if (write)
- goto error;
- regs->r[Rt] = read_ctr();
- break;
- case TCMTR:
- if (write)
- goto error;
- regs->r[Rt] = read_tcmtr();
- break;
- case TLBTR:
- if (write)
- goto error;
- regs->r[Rt] = read_tlbtr();
- break;
- case MPIDR:
- if (write)
- goto error;
- regs->r[Rt] = read_vmpidr();
- break;
- default:
- goto error;
- }
- break;
- case 1:
- switch (Op2) {
- case ID_PFR0:
- if (write)
- goto error;
- regs->r[Rt] = read_id_pfr0();
- break;
- case ID_PFR1:
- if (write)
- goto error;
- regs->r[Rt] = read_id_pfr1();
- break;
- case ID_DFR0:
- if (write)
- goto error;
- regs->r[Rt] = read_id_dfr0();
- break;
- case ID_AFR0:
- if (write)
- goto error;
- regs->r[Rt] = read_id_afr0();
- break;
- case ID_MMFR0:
- if (write)
- goto error;
- regs->r[Rt] = read_id_mmfr0();
- break;
- case ID_MMFR1:
- if (write)
- goto error;
- regs->r[Rt] = read_id_mmfr1();
- break;
- case ID_MMFR2:
- if (write)
- goto error;
- regs->r[Rt] = read_id_mmfr2();
- break;
- case ID_MMFR3:
- if (write)
- goto error;
- regs->r[Rt] = read_id_mmfr3();
- break;
- default:
- goto error;
- }
- break;
- case 2:
- switch (Op2) {
- case ID_ISAR0:
- if (write)
- goto error;
- regs->r[Rt] = read_id_isar0();
- break;
- case ID_ISAR1:
- if (write)
- goto error;
- regs->r[Rt] = read_id_isar1();
- break;
- case ID_ISAR2:
- if (write)
- goto error;
- regs->r[Rt] = read_id_isar2();
- break;
- case ID_ISAR3:
- if (write)
- goto error;
- regs->r[Rt] = read_id_isar3();
- break;
- case ID_ISAR4:
- if (write)
- goto error;
- regs->r[Rt] = read_id_isar4();
- break;
- case ID_ISAR5:
- if (write)
- goto error;
- regs->r[Rt] = read_id_isar5();
- break;
- default:
- /* RAZ */
- regs->r[Rt] = 0x0;
- }
- break;
- case 3:
- case 4:
- case 5:
- case 6:
- case 7:
- if (write)
- goto error;
- /* RAZ */
- regs->r[Rt] = 0x0;
- break;
- default:
- goto error;
- }
- break;
- case 1:
- switch (CRm) {
- case 0:
- switch (Op2) {
- case CCSIDR:
- if (write)
- goto error;
- regs->r[Rt] =
- target_cache_geometry[cpu_id].
- ccsidr[get_cache_level
- (target_cache_geometry[cpu_id].
- csselr)];
- break;
- case CLIDR:
- if (write)
- goto error;
- regs->r[Rt] =
- target_cache_geometry[cpu_id].clidr;
- break;
- case AIDR:
- if (write)
- goto error;
- regs->r[Rt] = read_aidr();
- break;
- default:
- goto error;
- }
- break;
- default:
- goto error;
- }
- break;
- case 2:
- switch (CRm) {
- case 0:
- switch (Op2) {
- case CSSELR:
- if (write)
- target_cache_geometry[cpu_id].
- csselr = regs->r[Rt];
- else
- regs->r[Rt] =
- target_cache_geometry[cpu_id].
- csselr;
- break;
- default:
- goto error;
- }
- break;
- default:
- goto error;
- }
- break;
- default:
- goto error;
- }
- break;
- case CRN_C7:
- switch (Op1) {
- case 0:
- switch (CRm) {
- case 6:
- switch (Op2) {
- case DCISW:
- {
- if (!write)
- goto error;
- handle_cm_op(regs->r[Rt],
- dcisw,
- &host_cache_geometry[cpu_id],
- &target_cache_geometry[cpu_id],
- &cache_delta[cpu_id][0]);
- break;
- }
- default:
- goto error;
- }
- break;
- case 10:
- switch (Op2) {
- case DCCSW:
- {
- if (!write)
- goto error;
- handle_cm_op(regs->r[Rt],
- dccsw,
- &host_cache_geometry[cpu_id],
- &target_cache_geometry[cpu_id],
- &cache_delta[cpu_id][0]);
- break;
- }
- default:
- goto error;
- }
- break;
- case 14:
- switch (Op2) {
- case DCCISW:
- {
- if (!write)
- goto error;
- handle_cm_op(regs->r[Rt],
- dccisw,
- &host_cache_geometry[cpu_id],
- &target_cache_geometry[cpu_id],
- &cache_delta[cpu_id][0]);
- break;
- }
- default:
- goto error;
- }
- break;
- default:
- goto error;
- }
- break;
- default:
- goto error;
- }
- break;
- case CRN_C9:
- switch (Op1) {
- case 1:
- switch (CRm) {
- case 0:
- switch (Op2) {
- case 2:
- /*
- * A write to the L2CTLR register means trouble
- * as the A7 version does not have all the fields
- * that the A15 has. Handling needs more thought
- */
- if (write) {
- printf("%s: Unexpected L2CTLR write \n",
- __FUNCTION__);
- goto error;
- }
-
- /*
- * A read of the L2CTLR should return the total number
- * of cpus across both the clusters in the "always on"
- * configuration. Since there are only 2 bits for the
- * number of cpus in the L2CTLR we need to flag any
- * system with > 4 cpus.
- */
- if (!switcher) {
- unsigned num_cpus = CLUSTER_CPU_COUNT(host_cluster)
- + CLUSTER_CPU_COUNT(!host_cluster);
-
- if (num_cpus > 4) {
- printf("%s: Unexpected L2CTLR read \n",
- __FUNCTION__);
- goto error;
- }
+ switch (CRn) {
+ case CRN_C0:
+ switch (Op1) {
+ case 0:
+ switch (CRm) {
+ case 0:
+ switch (Op2) {
+ case MIDR:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_vmidr();
+ break;
+ case CTR:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_ctr();
+ break;
+ case TCMTR:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_tcmtr();
+ break;
+ case TLBTR:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_tlbtr();
+ break;
+ case MPIDR:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_vmpidr();
+ break;
+ default:
+ goto error;
+ }
+ break;
+ case 1:
+ switch (Op2) {
+ case ID_PFR0:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_id_pfr0();
+ break;
+ case ID_PFR1:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_id_pfr1();
+ break;
+ case ID_DFR0:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_id_dfr0();
+ break;
+ case ID_AFR0:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_id_afr0();
+ break;
+ case ID_MMFR0:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_id_mmfr0();
+ break;
+ case ID_MMFR1:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_id_mmfr1();
+ break;
+ case ID_MMFR2:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_id_mmfr2();
+ break;
+ case ID_MMFR3:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_id_mmfr3();
+ break;
+ default:
+ goto error;
+ }
+ break;
+ case 2:
+ switch (Op2) {
+ case ID_ISAR0:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_id_isar0();
+ break;
+ case ID_ISAR1:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_id_isar1();
+ break;
+ case ID_ISAR2:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_id_isar2();
+ break;
+ case ID_ISAR3:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_id_isar3();
+ break;
+ case ID_ISAR4:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_id_isar4();
+ break;
+ case ID_ISAR5:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_id_isar5();
+ break;
+ default:
+ /* RAZ */
+ regs->r[Rt] = 0x0;
+ }
+ break;
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ if (write)
+ goto error;
+ /* RAZ */
+ regs->r[Rt] = 0x0;
+ break;
+ default:
+ goto error;
+ }
+ break;
+ case 1:
+ switch (CRm) {
+ case 0:
+ switch (Op2) {
+ case CCSIDR:
+ if (write)
+ goto error;
+ regs->r[Rt] =
+ target_cache_geometry[cpu_id].ccsidr
+ [get_cache_level
+ (target_cache_geometry
+ [cpu_id].csselr)];
+ break;
+ case CLIDR:
+ if (write)
+ goto error;
+ regs->r[Rt] =
+ target_cache_geometry[cpu_id].clidr;
+ break;
+ case AIDR:
+ if (write)
+ goto error;
+ regs->r[Rt] = read_aidr();
+ break;
+ default:
+ goto error;
+ }
+ break;
+ default:
+ goto error;
+ }
+ break;
+ case 2:
+ switch (CRm) {
+ case 0:
+ switch (Op2) {
+ case CSSELR:
+ if (write)
+ target_cache_geometry
+ [cpu_id].csselr =
+ regs->r[Rt];
+ else
+ regs->r[Rt] =
+ target_cache_geometry
+ [cpu_id].csselr;
+ break;
+ default:
+ goto error;
+ }
+ break;
+ default:
+ goto error;
+ }
+ break;
+ default:
+ goto error;
+ }
+ break;
+ case CRN_C7:
+ switch (Op1) {
+ case 0:
+ switch (CRm) {
+ case 6:
+ switch (Op2) {
+ case DCISW:
+ {
+ if (!write)
+ goto error;
+ handle_cm_op(regs->r[Rt],
+ dcisw,
+ &host_cache_geometry
+ [cpu_id],
+ &target_cache_geometry
+ [cpu_id],
+ &cache_delta
+ [cpu_id][0]);
+ break;
+ }
+ default:
+ goto error;
+ }
+ break;
+ case 10:
+ switch (Op2) {
+ case DCCSW:
+ {
+ if (!write)
+ goto error;
+ handle_cm_op(regs->r[Rt],
+ dccsw,
+ &host_cache_geometry
+ [cpu_id],
+ &target_cache_geometry
+ [cpu_id],
+ &cache_delta
+ [cpu_id][0]);
+ break;
+ }
+ default:
+ goto error;
+ }
+ break;
+ case 14:
+ switch (Op2) {
+ case DCCISW:
+ {
+ if (!write)
+ goto error;
+ handle_cm_op(regs->r[Rt],
+ dccisw,
+ &host_cache_geometry
+ [cpu_id],
+ &target_cache_geometry
+ [cpu_id],
+ &cache_delta
+ [cpu_id][0]);
+ break;
+ }
+ default:
+ goto error;
+ }
+ break;
+ default:
+ goto error;
+ }
+ break;
+ default:
+ goto error;
+ }
+ break;
+ case CRN_C9:
+ switch (Op1) {
+ case 1:
+ switch (CRm) {
+ case 0:
+ switch (Op2) {
+ case 2:
+ /*
+ * A write to the L2CTLR register means trouble
+ * as the A7 version does not have all the fields
+ * that the A15 has. Handling needs more thought
+ */
+ if (write) {
+ printf
+ ("%s: Unexpected L2CTLR write \n",
+ __FUNCTION__);
+ goto error;
+ }
- regs->r[Rt] &= ~(0x3 << 24);
- regs->r[Rt] |= (num_cpus - 1) << 24;
- } else {
- regs->r[Rt] = read_l2ctlr();
- }
- break;
- case 3:
- /*
- * A write to the L2ECTLR register means trouble
- * as it does not exist on A7. Handling needs more
- * thought
- */
- if (write) {
- printf("%s: Unexpected L2ECTLR write \n",
- __FUNCTION__);
- goto error;
- } else {
- regs->r[Rt] = read_l2ectlr();
- }
- break;
- default:
- goto error;
- }
- break;
- default:
- goto error;
- }
- break;
+ /*
+ * A read of the L2CTLR should return the total number
+ * of cpus across both the clusters in the "always on"
+ * configuration. Since there are only 2 bits for the
+ * number of cpus in the L2CTLR we need to flag any
+ * system with > 4 cpus.
+ */
+ if (!switcher) {
+ unsigned num_cpus =
+ CLUSTER_CPU_COUNT
+ (host_cluster)
+ +
+ CLUSTER_CPU_COUNT
+ (!host_cluster);
- /*
- * Support for accesses to the PMON space. Its not been
- * verified whether all the registers are readable &
- * writable. But then, execution will never reach here
- * if a reg is inaccessible. It will be a undef abort
- * instead.
- */
- case 0:
- switch (CRm) {
- case 14:
- switch (Op2) {
- case 0:
- if(write)
- write_pmuserenr(regs->r[Rt]);
- else
- regs->r[Rt] = read_pmuserenr();
- break;
- case 1:
- if(write)
- write_pmintenset(regs->r[Rt]);
- else
- regs->r[Rt] = read_pmintenset();
- break;
- case 2:
- if(write)
- write_pmintenclr(regs->r[Rt]);
- else
- regs->r[Rt] = read_pmintenclr();
- break;
- case 3:
- if(write)
- write_pmovsset(regs->r[Rt]);
- else
- regs->r[Rt] = read_pmovsset();
- break;
- default:
- goto error;
- }
- break;
-
- case 13:
- switch (Op2) {
- case 0:
- if(write)
- write_pmccntr(regs->r[Rt]);
- else
- regs->r[Rt] = read_pmccntr();
- break;
- case 1:
- if(write)
- write_pmxevtyper(regs->r[Rt]);
- else
- regs->r[Rt] = read_pmxevtyper();
- break;
- case 2:
- if(write)
- write_pmxevcntr(regs->r[Rt]);
- else
- regs->r[Rt] = read_pmxevcntr();
- break;
- default:
- goto error;
- }
- break;
-
- case 12:
- switch (Op2) {
- case 0:
- if(write)
- write_pmcr(regs->r[Rt]);
- else
- regs->r[Rt] = read_pmcr();
- break;
- case 1:
- if(write)
- write_pmcntenset(regs->r[Rt]);
- else
- regs->r[Rt] = read_pmcntenset();
- break;
- case 2:
- if(write)
- write_pmcntenclr(regs->r[Rt]);
- else
- regs->r[Rt] = read_pmcntenclr();
- break;
- case 3:
- if(write)
- write_pmovsr(regs->r[Rt]);
- else
- regs->r[Rt] = read_pmovsr();
- break;
- case 4:
- if(write)
- write_pmswinc(regs->r[Rt]);
- else
- regs->r[Rt] = read_pmswinc();
- break;
- case 5:
- if(write)
- write_pmselr(regs->r[Rt]);
- else
- regs->r[Rt] = read_pmselr();
- break;
- case 6:
- if(write)
- write_pmceid0(regs->r[Rt]);
- else
- regs->r[Rt] = read_pmceid0();
- break;
- case 7:
- if(write)
- write_pmceid1(regs->r[Rt]);
- else
- regs->r[Rt] = read_pmceid1();
- break;
- default:
- goto error;
- }
- break;
- }
- break;
- default:
- goto error;
- }
- break;
- default:
- goto error;
- }
+ if (num_cpus > 4) {
+ printf
+ ("%s: Unexpected L2CTLR read \n",
+ __FUNCTION__);
+ goto error;
+ }
- return;
+ regs->r[Rt] &= ~(0x3 << 24);
+ regs->r[Rt] |=
+ (num_cpus - 1) << 24;
+ } else {
+ regs->r[Rt] = read_l2ctlr();
+ }
+ break;
+ case 3:
+ /*
+ * A write to the L2ECTLR register means trouble
+ * as it does not exist on A7. Handling needs more
+ * thought
+ */
+ if (write) {
+ printf
+ ("%s: Unexpected L2ECTLR write \n",
+ __FUNCTION__);
+ goto error;
+ } else {
+ regs->r[Rt] = read_l2ectlr();
+ }
+ break;
+ default:
+ goto error;
+ }
+ break;
+ default:
+ goto error;
+ }
+ break;
+
+ /*
+ * Support for accesses to the PMON space. Its not been
+ * verified whether all the registers are readable &
+ * writable. But then, execution will never reach here
+ * if a reg is inaccessible. It will be a undef abort
+ * instead.
+ */
+ case 0:
+ switch (CRm) {
+ case 14:
+ switch (Op2) {
+ case 0:
+ if (write)
+ write_pmuserenr(regs->r[Rt]);
+ else
+ regs->r[Rt] = read_pmuserenr();
+ break;
+ case 1:
+ if (write)
+ write_pmintenset(regs->r[Rt]);
+ else
+ regs->r[Rt] = read_pmintenset();
+ break;
+ case 2:
+ if (write)
+ write_pmintenclr(regs->r[Rt]);
+ else
+ regs->r[Rt] = read_pmintenclr();
+ break;
+ case 3:
+ if (write)
+ write_pmovsset(regs->r[Rt]);
+ else
+ regs->r[Rt] = read_pmovsset();
+ break;
+ default:
+ goto error;
+ }
+ break;
+
+ case 13:
+ switch (Op2) {
+ case 0:
+ if (write)
+ write_pmccntr(regs->r[Rt]);
+ else
+ regs->r[Rt] = read_pmccntr();
+ break;
+ case 1:
+ if (write)
+ write_pmxevtyper(regs->r[Rt]);
+ else
+ regs->r[Rt] = read_pmxevtyper();
+ break;
+ case 2:
+ if (write)
+ write_pmxevcntr(regs->r[Rt]);
+ else
+ regs->r[Rt] = read_pmxevcntr();
+ break;
+ default:
+ goto error;
+ }
+ break;
+
+ case 12:
+ switch (Op2) {
+ case 0:
+ if (write)
+ write_pmcr(regs->r[Rt]);
+ else
+ regs->r[Rt] = read_pmcr();
+ break;
+ case 1:
+ if (write)
+ write_pmcntenset(regs->r[Rt]);
+ else
+ regs->r[Rt] = read_pmcntenset();
+ break;
+ case 2:
+ if (write)
+ write_pmcntenclr(regs->r[Rt]);
+ else
+ regs->r[Rt] = read_pmcntenclr();
+ break;
+ case 3:
+ if (write)
+ write_pmovsr(regs->r[Rt]);
+ else
+ regs->r[Rt] = read_pmovsr();
+ break;
+ case 4:
+ if (write)
+ write_pmswinc(regs->r[Rt]);
+ else
+ regs->r[Rt] = read_pmswinc();
+ break;
+ case 5:
+ if (write)
+ write_pmselr(regs->r[Rt]);
+ else
+ regs->r[Rt] = read_pmselr();
+ break;
+ case 6:
+ if (write)
+ write_pmceid0(regs->r[Rt]);
+ else
+ regs->r[Rt] = read_pmceid0();
+ break;
+ case 7:
+ if (write)
+ write_pmceid1(regs->r[Rt]);
+ else
+ regs->r[Rt] = read_pmceid1();
+ break;
+ default:
+ goto error;
+ }
+ break;
+ }
+ break;
+ default:
+ goto error;
+ }
+ break;
+ default:
+ goto error;
+ }
+
+ return;
error:
- printf("%s: Unexpected cp15 instruction", __FUNCTION__);
- printf(" : %s", write ? "MCR p15" : "MRC p15");
- printf(", %d, %d, %d, %d, %d \n", Op1, Rt, CRn, CRm, Op2);
- panic();
+ printf("%s: Unexpected cp15 instruction", __FUNCTION__);
+ printf(" : %s", write ? "MCR p15" : "MRC p15");
+ printf(", %d, %d, %d, %d, %d \n", Op1, Rt, CRn, CRm, Op2);
+ panic();
}
void trap_dabort_handle(unsigned hsr, gp_regs * regs)
{
- unsigned hdfar = 0x0, hpfar = 0x0, pa = 0x0, *data = 0x0;
- unsigned write = 0x0;
+ unsigned hdfar = 0x0, hpfar = 0x0, pa = 0x0, *data = 0x0;
+ unsigned write = 0x0;
- hdfar = read_hdfar();
- hpfar = read_hpfar();
+ hdfar = read_hdfar();
+ hpfar = read_hpfar();
- pa = ((hpfar >> 4) << 12) + (hdfar & 0xfff);
- data = &regs->r[(hsr >> 16) & 0xf];
- write = (hsr >> 6) & 0x1;
+ pa = ((hpfar >> 4) << 12) + (hdfar & 0xfff);
+ data = &regs->r[(hsr >> 16) & 0xf];
+ write = (hsr >> 6) & 0x1;
- /* Only distributor accesses are virtualised at the moment */
- if ((pa & ~0xfff) == GIC_ID_PHY_BASE) {
- handle_vgic_distif_abort(pa, data, write);
- }
+ /* Only distributor accesses are virtualised at the moment */
+ if ((pa & ~0xfff) == GIC_ID_PHY_BASE) {
+ handle_vgic_distif_abort(pa, data, write);
+ }
- return;
+ return;
}
void HandleVirtualisor(gp_regs * regs)
{
- unsigned cpu_id = read_cpuid(), cpu_no = PART_NO(read_midr()), rc = 0;
- unsigned hsr = read_hsr(), elr = 0, vd_len = 0, index = 0;
- virt_descriptor *vd_array = &virt_desc_section$$Base;
- unsigned (*handler) (gp_regs *, unsigned, unsigned) = 0x0, sibling;
-
- /* Find our brother from another mother */
- sibling = find_sibling_cpu();
-
- /*
- * Perform the generic trap handling
- */
- switch (hsr >> 26) {
- case TRAP_DABORT:
- trap_dabort_handle(hsr, regs);
- break;
- case TRAP_CP15_32:
- trap_cp15_mrc_mcr_handle(hsr, regs);
- break;
- default:
- printf("%s: Unexpected trap", __FUNCTION__);
- printf(": HSR=0x%x Regs=0x%x \n", hsr, (unsigned) regs);
- panic();
- }
+ unsigned cpu_id = read_cpuid(), cpu_no = PART_NO(read_midr()), rc = 0;
+ unsigned hsr = read_hsr(), elr = 0, vd_len = 0, index = 0;
+ virt_descriptor *vd_array = &virt_desc_section$$Base;
+ unsigned (*handler) (gp_regs *, unsigned, unsigned) = 0x0, sibling;
+
+ /* Find our brother from another mother */
+ sibling = find_sibling_cpu();
+
+ /*
+ * Perform the generic trap handling
+ */
+ switch (hsr >> 26) {
+ case TRAP_DABORT:
+ trap_dabort_handle(hsr, regs);
+ break;
+ case TRAP_CP15_32:
+ trap_cp15_mrc_mcr_handle(hsr, regs);
+ break;
+ default:
+ printf("%s: Unexpected trap", __FUNCTION__);
+ printf(": HSR=0x%x Regs=0x%x \n", hsr, (unsigned)regs);
+ panic();
+ }
+
+ /*
+ * Do any cpu specific trap handling.
+ */
+ vd_len = (unsigned)&virt_desc_section$$Length;
+ for (index = 0; index < (vd_len / sizeof(virt_descriptor)); index++) {
- /*
- * Do any cpu specific trap handling.
- */
- vd_len = (unsigned)&virt_desc_section$$Length;
- for (index = 0; index < (vd_len / sizeof(virt_descriptor)); index++) {
-
- if (cpu_no == vd_array[index].cpu_no) {
- handler = vd_array[index].trap_handle;
- if(handler) {
- rc = handler(regs, hsr, sibling);
- if (rc) {
- printf("%s: failed on cpu%d \n",
- __FUNCTION__,
- cpu_no);
- goto out;
- }
- }
- }
- }
+ if (cpu_no == vd_array[index].cpu_no) {
+ handler = vd_array[index].trap_handle;
+ if (handler) {
+ rc = handler(regs, hsr, sibling);
+ if (rc) {
+ printf("%s: failed on cpu%d \n",
+ __FUNCTION__, cpu_no);
+ goto out;
+ }
+ }
+ }
+ }
- /*
- * This is a trap of the kind where we simply move
- * onto the next instruction in the actual program.
- * Move by 2 bytes if we came from Thumb mode else
- * by 4 bytes.
- */
- elr = ((vm_context *) regs)->elr_hyp;
- if (hsr & (1 << 25))
- elr += 4;
- else
- elr += 2;
- ((vm_context *) regs)->elr_hyp = elr;
+ /*
+ * This is a trap of the kind where we simply move
+ * onto the next instruction in the actual program.
+ * Move by 2 bytes if we came from Thumb mode else
+ * by 4 bytes.
+ */
+ elr = ((vm_context *) regs)->elr_hyp;
+ if (hsr & (1 << 25))
+ elr += 4;
+ else
+ elr += 2;
+ ((vm_context *) regs)->elr_hyp = elr;
out:
- if (rc) {
- printf("%s: Failed : Cpu%d : Host=0x%x : Target=0x%x\n ",
- __FUNCTION__, cpu_id, cpu_no, sibling);
- panic();
- }
+ if (rc) {
+ printf("%s: Failed : Cpu%d : Host=0x%x : Target=0x%x\n ",
+ __FUNCTION__, cpu_id, cpu_no, sibling);
+ panic();
+ }
- return;
+ return;
}
diff --git a/big-little/virtualisor/virt_setup.c b/big-little/virtualisor/virt_setup.c
index 8496765..d35adc6 100644
--- a/big-little/virtualisor/virt_setup.c
+++ b/big-little/virtualisor/virt_setup.c
@@ -18,7 +18,7 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
#include "virt_helpers.h"
#include "virtualisor.h"
@@ -35,11 +35,9 @@ cache_geometry target_cache_geometry[NUM_CPUS];
/* Cache geometry differences for each cpu at each level */
cache_diff cache_delta[NUM_CPUS][MAX_CACHE_LEVELS];
-static mem_trap_data svgic_distif_trap
-__attribute__ ((section("s2_trap_section"))) = {
- 0, 0x0, 0x0, 0x0, 0x0, 0x0,
-};
-
+static mem_trap_data svgic_distif_trap
+ __attribute__ ((section("s2_trap_section"))) = {
+0, 0x0, 0x0, 0x0, 0x0, 0x0,};
/*
* Flags which indicate whether the cpu independent
@@ -55,191 +53,192 @@ static unsigned virt_init[NUM_CPUS];
*/
unsigned find_sibling_cpu()
{
- unsigned cpu_no = PART_NO(read_midr());
-
- switch (DC_SYSTYPE) {
- case A15_A15:
- if(cpu_no == A15)
- return cpu_no;
- break;
- case A7_A15:
- case A15_A7:
- if(cpu_no == A15)
- return A7;
- else if(cpu_no == A7)
- return A15;
- else
- break;
- }
-
- printf("Unsupported Dual cluster system : 0x%x\n", DC_SYSTYPE);
- panic();
-
- return 0;
+ unsigned cpu_no = PART_NO(read_midr());
+
+ switch (DC_SYSTYPE) {
+ case A15_A15:
+ if (cpu_no == A15)
+ return cpu_no;
+ break;
+ case A7_A15:
+ case A15_A7:
+ if (cpu_no == A15)
+ return A7;
+ else if (cpu_no == A7)
+ return A15;
+ else
+ break;
+ }
+
+ printf("Unsupported Dual cluster system : 0x%x\n", DC_SYSTYPE);
+ panic();
+
+ return 0;
}
void SetupVirtualisor(unsigned first_cpu)
{
- unsigned rc = 0, cpu_id = read_cpuid(), cpu_no = PART_NO(read_midr());
- unsigned vd_len = 0, index = 0, cluster_id = read_clusterid();
- virt_descriptor *vd_array = &virt_desc_section$$Base;
- unsigned (*handler) (unsigned, unsigned) = 0x0, sibling;
- unsigned sibling_cpuid = 0, abs_cpuid = 0;
-
- if (!switcher) {
- sibling_cpuid = abs_cpuid(cpu_id, !cluster_id);
- abs_cpuid = abs_cpuid(cpu_id, cluster_id);
- }
-
- /* Find our brother from another mother */
- sibling = find_sibling_cpu();
-
- /*
- * Do the generic trap setup
- */
- if (virt_init[cpu_id] == FALSE) {
-
- /*
- * In the "always-on" configuration, both clusters have
- * ensure that the L2CTLR register includes the cpu count
- * of both the clusters while reporting the number of
- * secondary cpus. So setup the necessary trap.
- */
- if (!switcher) {
- /*
- * Enable traps to CRn = 9 cp15 space
- */
- write_hstr(read_hstr() | (1 << 9));
- }
-
- /*
- * Cache geometry of each cpu on the host cluster needs
- * to be virtualised if the cpu type is different from
- * that on the target cluster. This can be done generic-
- * ally.
- */
- if (cpu_no != sibling) {
- rc = map_cache_geometries(&host_cache_geometry[cpu_id],
- &target_cache_geometry[cpu_id],
- &cache_delta[cpu_id][0]);
- if (rc) {
- printf("%s: Failed to map cache geometries \n", __FUNCTION__);
- rc = 1;
- goto out;
- }
-
- }
-
-
- /*
- * Irrespective of what cpu types are present in the
- * dual cluster system, the host cluster has to trap
- * accesses to the vgic distributor when switching.
- */
- if (switcher && cluster_id == host_cluster) {
- if (cpu_id == first_cpu) {
- rc = mem_trap_setup(GIC_ID_PHY_BASE, &svgic_distif_trap);
- if (rc) {
- printf("%s: svgic distif trap setup failed \n",
- __FUNCTION__);
- goto out;
- }
- }
- }
-
-
- /*
- * If the two clusters have different cpu types, then the
- * target saves its midr and the host uses the value to
- * virtualise its midr.
- * mpidr is virtualised on the host cluster whether we are
- * running "always on" or "switching". The latter cares
- * about the cluster id while the former cares about the
- * cpu ids as well.
- */
- if (cluster_id != host_cluster) {
- host_virt_regs[cpu_id].mpidr = read_mpidr();
- if (cpu_no != sibling)
- host_virt_regs[cpu_id].midr = read_midr();
- if (!switcher) {
- /*
- * Send a signal to the host to indicate
- * that the regs is ready to be read. The
- * cpu id is the absolute cpu number across
- * clusters.
- */
- set_event(VID_REGS_DONE, sibling_cpuid);
- }
- } else {
- if (!switcher) {
- /*
- * Wait for the target to read its regs
- * before using them.
- */
- wait_for_event(VID_REGS_DONE, abs_cpuid);
- reset_event(VID_REGS_DONE, abs_cpuid);
-
- /*
- * Add number of cpus in the target cluster to
- * the cpuid of this cpu.
- */
- host_virt_regs[cpu_id].mpidr += CLUSTER_CPU_COUNT(!host_cluster);
- }
- write_vmpidr(host_virt_regs[cpu_id].mpidr);
- if (cpu_no != sibling)
- write_vmidr(host_virt_regs[cpu_id].midr);
- }
-
- if (cluster_id == host_cluster) {
- /*
- * Assuming that with the switcher, the host always
- * runs after the target. So, if we are here then
- * the target must have completed its initialisation
- *
- * In the other case, if we are here after exchanging
- * the events above, then the target has finished
- * initialising.
- */
- virt_init[cpu_id] = 1;
- }
-
- } else {
- if (switcher)
- RestoreVirtualisor(first_cpu);
- }
-
-
- /*
- * Do the cpu specific initialisation (if any)
- */
- vd_len = (unsigned)&virt_desc_section$$Length;
- for (index = 0; index < (vd_len / sizeof(virt_descriptor)); index++) {
-
- if (cpu_no == vd_array[index].cpu_no) {
- /* If not initialised then setup else restore*/
- if (vd_array[index].init[cpu_id] == 0)
- handler = vd_array[index].trap_setup;
- else
- handler = vd_array[index].trap_restore;
-
- if(handler) {
- rc = handler(first_cpu, sibling);
- if (rc) {
- printf("%s: failed on cpu%d \n",
- __FUNCTION__,
- cpu_no);
- goto out;
- }
- }
- }
- }
+ unsigned rc = 0, cpu_id = read_cpuid(), cpu_no = PART_NO(read_midr());
+ unsigned vd_len = 0, index = 0, cluster_id = read_clusterid();
+ virt_descriptor *vd_array = &virt_desc_section$$Base;
+ unsigned (*handler) (unsigned, unsigned) = 0x0, sibling;
+ unsigned sibling_cpuid = 0, abs_cpuid = 0;
+
+ if (!switcher) {
+ sibling_cpuid = abs_cpuid(cpu_id, !cluster_id);
+ abs_cpuid = abs_cpuid(cpu_id, cluster_id);
+ }
+
+ /* Find our brother from another mother */
+ sibling = find_sibling_cpu();
+
+ /*
+ * Do the generic trap setup
+ */
+ if (virt_init[cpu_id] == FALSE) {
+
+ /*
+ * In the "always-on" configuration, both clusters have
+ * ensure that the L2CTLR register includes the cpu count
+ * of both the clusters while reporting the number of
+ * secondary cpus. So setup the necessary trap.
+ */
+ if (!switcher) {
+ /*
+ * Enable traps to CRn = 9 cp15 space
+ */
+ write_hstr(read_hstr() | (1 << 9));
+ }
+
+ /*
+ * Cache geometry of each cpu on the host cluster needs
+ * to be virtualised if the cpu type is different from
+ * that on the target cluster. This can be done generic-
+ * ally.
+ */
+ if (cpu_no != sibling) {
+ rc = map_cache_geometries(&host_cache_geometry[cpu_id],
+ &target_cache_geometry
+ [cpu_id],
+ &cache_delta[cpu_id][0]);
+ if (rc) {
+ printf("%s: Failed to map cache geometries \n",
+ __FUNCTION__);
+ rc = 1;
+ goto out;
+ }
+
+ }
+
+ /*
+ * Irrespective of what cpu types are present in the
+ * dual cluster system, the host cluster has to trap
+ * accesses to the vgic distributor when switching.
+ */
+ if (switcher && cluster_id == host_cluster) {
+ if (cpu_id == first_cpu) {
+ rc = mem_trap_setup(GIC_ID_PHY_BASE,
+ &svgic_distif_trap);
+ if (rc) {
+ printf
+ ("%s: svgic distif trap setup failed \n",
+ __FUNCTION__);
+ goto out;
+ }
+ }
+ }
+
+ /*
+ * If the two clusters have different cpu types, then the
+ * target saves its midr and the host uses the value to
+ * virtualise its midr.
+ * mpidr is virtualised on the host cluster whether we are
+ * running "always on" or "switching". The latter cares
+ * about the cluster id while the former cares about the
+ * cpu ids as well.
+ */
+ if (cluster_id != host_cluster) {
+ host_virt_regs[cpu_id].mpidr = read_mpidr();
+ if (cpu_no != sibling)
+ host_virt_regs[cpu_id].midr = read_midr();
+ if (!switcher) {
+ /*
+ * Send a signal to the host to indicate
+ * that the regs is ready to be read. The
+ * cpu id is the absolute cpu number across
+ * clusters.
+ */
+ set_event(VID_REGS_DONE, sibling_cpuid);
+ }
+ } else {
+ if (!switcher) {
+ /*
+ * Wait for the target to read its regs
+ * before using them.
+ */
+ wait_for_event(VID_REGS_DONE, abs_cpuid);
+ reset_event(VID_REGS_DONE, abs_cpuid);
+
+ /*
+ * Add number of cpus in the target cluster to
+ * the cpuid of this cpu.
+ */
+ host_virt_regs[cpu_id].mpidr +=
+ CLUSTER_CPU_COUNT(!host_cluster);
+ }
+ write_vmpidr(host_virt_regs[cpu_id].mpidr);
+ if (cpu_no != sibling)
+ write_vmidr(host_virt_regs[cpu_id].midr);
+ }
+
+ if (cluster_id == host_cluster) {
+ /*
+ * Assuming that with the switcher, the host always
+ * runs after the target. So, if we are here then
+ * the target must have completed its initialisation
+ *
+ * In the other case, if we are here after exchanging
+ * the events above, then the target has finished
+ * initialising.
+ */
+ virt_init[cpu_id] = 1;
+ }
+
+ } else {
+ if (switcher)
+ RestoreVirtualisor(first_cpu);
+ }
+
+ /*
+ * Do the cpu specific initialisation (if any)
+ */
+ vd_len = (unsigned)&virt_desc_section$$Length;
+ for (index = 0; index < (vd_len / sizeof(virt_descriptor)); index++) {
+
+ if (cpu_no == vd_array[index].cpu_no) {
+ /* If not initialised then setup else restore */
+ if (vd_array[index].init[cpu_id] == 0)
+ handler = vd_array[index].trap_setup;
+ else
+ handler = vd_array[index].trap_restore;
+
+ if (handler) {
+ rc = handler(first_cpu, sibling);
+ if (rc) {
+ printf("%s: failed on cpu%d \n",
+ __FUNCTION__, cpu_no);
+ goto out;
+ }
+ }
+ }
+ }
out:
- if (rc) {
- printf("%s: Failed : Cpu%d : Host=0x%x : Target=0x%x\n ",
- __FUNCTION__, cpu_id, cpu_no, sibling);
- panic();
- }
+ if (rc) {
+ printf("%s: Failed : Cpu%d : Host=0x%x : Target=0x%x\n ",
+ __FUNCTION__, cpu_id, cpu_no, sibling);
+ panic();
+ }
- return;
+ return;
}