summaryrefslogtreecommitdiff
path: root/big-little/virtualisor/cache_geom.c
diff options
context:
space:
mode:
Diffstat (limited to 'big-little/virtualisor/cache_geom.c')
-rw-r--r--big-little/virtualisor/cache_geom.c757
1 files changed, 375 insertions, 382 deletions
diff --git a/big-little/virtualisor/cache_geom.c b/big-little/virtualisor/cache_geom.c
index 1031ba4..17c3ee6 100644
--- a/big-little/virtualisor/cache_geom.c
+++ b/big-little/virtualisor/cache_geom.c
@@ -18,7 +18,7 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
#include "virtualisor.h"
#include "virt_helpers.h"
@@ -40,198 +40,192 @@ static unsigned cm_extline_cnt[NUM_CPUS][MAX_CACHE_LEVELS] = { 0 };
* levels and save the geometry at each level.
*
*/
-void find_cache_geometry(cache_geometry *cg_ptr)
+void find_cache_geometry(cache_geometry * cg_ptr)
{
- unsigned ctr, clidr, ccsidr, csselr, old_csselr;
-
- /* Save Cache size selection register */
- old_csselr = read_csselr();
- clidr = read_clidr();
- cg_ptr->clidr = clidr;
-
- for (ctr = 0; ctr < MAX_CACHE_LEVELS; ctr++) {
- unsigned cache_type = get_cache_type(clidr, ctr);
-
- /* Only seperate and Unifiied caches */
- if (cache_type >= 0x3) {
- /*
- * Choose the cache level & Data or Unified cache
- * as there are no set/way operations on the ICache
- */
- csselr = ctr << 1;
- write_csselr(csselr);
-
- isb();
-
- /*
- * Read the CCSIDR to record information about this
- * cache level.
- */
- ccsidr = read_ccsidr();
- cg_ptr->ccsidr[ctr] = ccsidr;
-
- } else {
- /*
- * Stop scanning at the first invalid/unsupported
- * cache level
- */
- break;
- }
- }
-
- /* Restore Cache size selection register */
- write_csselr(old_csselr);
- return;
+ unsigned ctr, clidr, ccsidr, csselr, old_csselr;
+
+ /* Save Cache size selection register */
+ old_csselr = read_csselr();
+ clidr = read_clidr();
+ cg_ptr->clidr = clidr;
+
+ for (ctr = 0; ctr < MAX_CACHE_LEVELS; ctr++) {
+ unsigned cache_type = get_cache_type(clidr, ctr);
+
+ /* Only seperate and Unifiied caches */
+ if (cache_type >= 0x3) {
+ /*
+ * Choose the cache level & Data or Unified cache
+ * as there are no set/way operations on the ICache
+ */
+ csselr = ctr << 1;
+ write_csselr(csselr);
+
+ isb();
+
+ /*
+ * Read the CCSIDR to record information about this
+ * cache level.
+ */
+ ccsidr = read_ccsidr();
+ cg_ptr->ccsidr[ctr] = ccsidr;
+
+ } else {
+ /*
+ * Stop scanning at the first invalid/unsupported
+ * cache level
+ */
+ break;
+ }
+ }
+
+ /* Restore Cache size selection register */
+ write_csselr(old_csselr);
+ return;
}
/*
* Given two cache geometries, find out how they differ
*/
-void find_cache_diff(cache_geometry *hcg_ptr, cache_geometry *tcg_ptr, cache_diff *cd_ptr)
+void find_cache_diff(cache_geometry * hcg_ptr, cache_geometry * tcg_ptr,
+ cache_diff * cd_ptr)
{
- unsigned tc_size = 0, hc_size = 0, tc_linelen = 0, hc_linelen = 0;
- unsigned tc_assoc = 0, hc_assoc = 0, tc_numsets = 0, hc_numsets = 0;
- unsigned ctr;
-
- for (ctr = 0; ctr < MAX_CACHE_LEVELS; ctr++) {
-
- /* Break at the first unimplemented cache level */
- if (get_cache_type(hcg_ptr->clidr, ctr) == 0)
- break;
-
- /* Cache associativity */
- tc_assoc = get_cache_assoc(tcg_ptr, ctr) + 1;
- hc_assoc = get_cache_assoc(hcg_ptr, ctr) + 1;
-
- /* Number of the sets in the cache */
- tc_numsets = get_cache_numsets(tcg_ptr, ctr) + 1;
- hc_numsets = get_cache_numsets(hcg_ptr, ctr) + 1;
-
- /* Cache line length in words */
- tc_linelen = 1 << (get_cache_linesz(tcg_ptr, ctr) + 2);
- hc_linelen = 1 << (get_cache_linesz(hcg_ptr, ctr) + 2);
-
- /* Cache size in words */
- tc_size = tc_assoc * tc_numsets * tc_linelen;
- hc_size = hc_assoc * hc_numsets * hc_linelen;
-
- /*
- * Find the factor by which the cache line sizes differ.
- * If so, then the target cacheline will have to be
- * multiplied or divided by the factor to get the absolute
- * cache line number. Then, find the number of absolute
- * cache lines in each cache
- */
- if (tc_linelen >= hc_linelen) {
- cd_ptr[ctr].tcline_factor =
- tc_linelen / hc_linelen;
- cd_ptr[ctr].tnumabs_clines =
- tc_assoc * tc_numsets *
- cd_ptr[ctr].tcline_factor;
- cd_ptr[ctr].hnumabs_clines =
- hc_assoc * hc_numsets;
- } else {
- cd_ptr[ctr].hcline_factor =
- hc_linelen / tc_linelen;
- cd_ptr[ctr].hnumabs_clines =
- hc_assoc * hc_numsets *
- cd_ptr[ctr].hcline_factor;
- cd_ptr[ctr].tnumabs_clines =
- tc_assoc * tc_numsets;
- }
-
- /*
- * Find if the cache sizes differ. If so, then set a flag
- * to indicate whether some set/way operations need to be
- * extended on the host cpu or ignored on the target cpu
- */
- if (tc_size > hc_size) {
- cd_ptr[ctr].csize_diff = TCSZ_BIG;
- }
-
- if (tc_size == hc_size) {
- cd_ptr[ctr].csize_diff =
- TCSZ_EQUAL;
- }
-
- if (tc_size < hc_size) {
- cd_ptr[ctr].csize_diff =
- TCSZ_SMALL;
- }
- }
-
- return;
+ unsigned tc_size = 0, hc_size = 0, tc_linelen = 0, hc_linelen = 0;
+ unsigned tc_assoc = 0, hc_assoc = 0, tc_numsets = 0, hc_numsets = 0;
+ unsigned ctr;
+
+ for (ctr = 0; ctr < MAX_CACHE_LEVELS; ctr++) {
+
+ /* Break at the first unimplemented cache level */
+ if (get_cache_type(hcg_ptr->clidr, ctr) == 0)
+ break;
+
+ /* Cache associativity */
+ tc_assoc = get_cache_assoc(tcg_ptr, ctr) + 1;
+ hc_assoc = get_cache_assoc(hcg_ptr, ctr) + 1;
+
+ /* Number of the sets in the cache */
+ tc_numsets = get_cache_numsets(tcg_ptr, ctr) + 1;
+ hc_numsets = get_cache_numsets(hcg_ptr, ctr) + 1;
+
+ /* Cache line length in words */
+ tc_linelen = 1 << (get_cache_linesz(tcg_ptr, ctr) + 2);
+ hc_linelen = 1 << (get_cache_linesz(hcg_ptr, ctr) + 2);
+
+ /* Cache size in words */
+ tc_size = tc_assoc * tc_numsets * tc_linelen;
+ hc_size = hc_assoc * hc_numsets * hc_linelen;
+
+ /*
+ * Find the factor by which the cache line sizes differ.
+ * If so, then the target cacheline will have to be
+ * multiplied or divided by the factor to get the absolute
+ * cache line number. Then, find the number of absolute
+ * cache lines in each cache
+ */
+ if (tc_linelen >= hc_linelen) {
+ cd_ptr[ctr].tcline_factor = tc_linelen / hc_linelen;
+ cd_ptr[ctr].tnumabs_clines =
+ tc_assoc * tc_numsets * cd_ptr[ctr].tcline_factor;
+ cd_ptr[ctr].hnumabs_clines = hc_assoc * hc_numsets;
+ } else {
+ cd_ptr[ctr].hcline_factor = hc_linelen / tc_linelen;
+ cd_ptr[ctr].hnumabs_clines =
+ hc_assoc * hc_numsets * cd_ptr[ctr].hcline_factor;
+ cd_ptr[ctr].tnumabs_clines = tc_assoc * tc_numsets;
+ }
+
+ /*
+ * Find if the cache sizes differ. If so, then set a flag
+ * to indicate whether some set/way operations need to be
+ * extended on the host cpu or ignored on the target cpu
+ */
+ if (tc_size > hc_size) {
+ cd_ptr[ctr].csize_diff = TCSZ_BIG;
+ }
+
+ if (tc_size == hc_size) {
+ cd_ptr[ctr].csize_diff = TCSZ_EQUAL;
+ }
+
+ if (tc_size < hc_size) {
+ cd_ptr[ctr].csize_diff = TCSZ_SMALL;
+ }
+ }
+
+ return;
}
-unsigned map_cache_geometries(cache_geometry *hcg_ptr, cache_geometry *tcg_ptr, cache_diff *cd_ptr)
+unsigned map_cache_geometries(cache_geometry * hcg_ptr,
+ cache_geometry * tcg_ptr, cache_diff * cd_ptr)
{
- unsigned rc = 0, cpu_id = read_cpuid();
- unsigned hcr = 0, cluster_id = read_clusterid(), sibling_cpuid = 0;
- unsigned abs_cpuid = 0;
-
- if (!switcher) {
- sibling_cpuid = abs_cpuid(cpu_id, !cluster_id);
- abs_cpuid = abs_cpuid(cpu_id, cluster_id);
- }
-
- if (cluster_id == host_cluster) {
-
- /* Find host cache topology */
- find_cache_geometry(hcg_ptr);
-
- /*
- * Wait for the target cpu to send an event indicating that
- * its discovered its cache topology.
- */
- if (!switcher) {
- wait_for_event(CACHE_GEOM_DONE, abs_cpuid);
- reset_event(CACHE_GEOM_DONE, abs_cpuid);
- }
-
- /*
- * Assuming that only no. of sets, ways and cache line
- * size will be different across the target and host
- * cpu caches. Hence the CLIDRs should look the same
- * Support for absence of cache levels and memory
- * Also this check ensures that the target cpu is
- * always run before the host else the cache geometry
- * will have to be hardcoded.
- * mapped caches will be added later.
- */
- if (hcg_ptr->clidr != tcg_ptr->clidr) {
- printf("%s: Host CLIDR=0x%x : Target CLIDR=0x%x \n",
- __FUNCTION__, hcg_ptr->clidr, tcg_ptr->clidr);
- rc = 1;
- goto out;
- }
-
- find_cache_diff(hcg_ptr, tcg_ptr, cd_ptr);
-
- /*
- * Enable bit for trapping set/way operations &
- * Cache identification regs
- */
- hcr = read_hcr();
- hcr |= HCR_TSW | HCR_TID2;
- write_hcr(hcr);
- dsb();
- isb();
-
- } else {
-
- /* Find the cache geometry on the target cpu */
- find_cache_geometry(tcg_ptr);
-
- /*
- * Send an event to the host cpu indicating that we have
- * discovered our cache topology
- */
- if(!switcher) {
- set_event(CACHE_GEOM_DONE, sibling_cpuid);
- }
- }
+ unsigned rc = 0, cpu_id = read_cpuid();
+ unsigned hcr = 0, cluster_id = read_clusterid(), sibling_cpuid = 0;
+ unsigned abs_cpuid = 0;
+
+ if (!switcher) {
+ sibling_cpuid = abs_cpuid(cpu_id, !cluster_id);
+ abs_cpuid = abs_cpuid(cpu_id, cluster_id);
+ }
+
+ if (cluster_id == host_cluster) {
+
+ /* Find host cache topology */
+ find_cache_geometry(hcg_ptr);
+
+ /*
+ * Wait for the target cpu to send an event indicating that
+ * its discovered its cache topology.
+ */
+ if (!switcher) {
+ wait_for_event(CACHE_GEOM_DONE, abs_cpuid);
+ reset_event(CACHE_GEOM_DONE, abs_cpuid);
+ }
+
+ /*
+ * Assuming that only no. of sets, ways and cache line
+ * size will be different across the target and host
+ * cpu caches. Hence the CLIDRs should look the same
+ * Support for absence of cache levels and memory
+ * Also this check ensures that the target cpu is
+ * always run before the host else the cache geometry
+ * will have to be hardcoded.
+ * mapped caches will be added later.
+ */
+ if (hcg_ptr->clidr != tcg_ptr->clidr) {
+ printf("%s: Host CLIDR=0x%x : Target CLIDR=0x%x \n",
+ __FUNCTION__, hcg_ptr->clidr, tcg_ptr->clidr);
+ rc = 1;
+ goto out;
+ }
+
+ find_cache_diff(hcg_ptr, tcg_ptr, cd_ptr);
+
+ /*
+ * Enable bit for trapping set/way operations &
+ * Cache identification regs
+ */
+ hcr = read_hcr();
+ hcr |= HCR_TSW | HCR_TID2;
+ write_hcr(hcr);
+ dsb();
+ isb();
+
+ } else {
+
+ /* Find the cache geometry on the target cpu */
+ find_cache_geometry(tcg_ptr);
+
+ /*
+ * Send an event to the host cpu indicating that we have
+ * discovered our cache topology
+ */
+ if (!switcher) {
+ set_event(CACHE_GEOM_DONE, sibling_cpuid);
+ }
+ }
out:
- return rc;
+ return rc;
}
/*
@@ -239,205 +233,204 @@ unsigned map_cache_geometries(cache_geometry *hcg_ptr, cache_geometry *tcg_ptr,
* handle a cache maintenance operation by set/way
*/
void handle_cm_op(unsigned reg,
- void (*op_handler) (unsigned),
- cache_geometry *hcg_ptr,
- cache_geometry *tcg_ptr,
- cache_diff *cd_ptr)
+ void (*op_handler) (unsigned),
+ cache_geometry * hcg_ptr,
+ cache_geometry * tcg_ptr, cache_diff * cd_ptr)
{
- unsigned clvl = 0, cpu_id = read_cpuid();
- unsigned tc_assoc = 0, tc_numsets = 0, tc_linesz = 0;
- unsigned hc_assoc = 0, hc_numsets = 0, hc_linesz = 0;
- unsigned lineno = 0, setno = 0, wayno = 0, abs_lineno = 0;
-
- /*
- * If target cache line size is greater than the host then
- * each maintenance op has to be performed on two lines on
- * host. Does not matter is the line size if equal
- */
- unsigned ctr = cd_ptr[clvl].tcline_factor;
-
- /*
- * Find out the cache level for which the set/way operation has invoked.
- * Use this to find the cache geometry in target cache to ascertain the
- * set & way number from the argument. Use this info to calculate the
- * target cache line number.
- */
- clvl = get_cache_level(reg);
- tc_linesz = get_cache_linesz(tcg_ptr, clvl);
- tc_assoc = get_cache_assoc(tcg_ptr, clvl);
- tc_numsets = get_cache_numsets(tcg_ptr, clvl);
-
- wayno = (reg >> __clz(tc_assoc)) & tc_assoc;
- setno = (reg >> (tc_linesz + 4)) & tc_numsets;
- lineno = (setno * (tc_assoc + 1)) + wayno;
-
- if(cmop_debug) {
- /*
- * tc_prev_line is initialised to -1 (unsigned). We can never have so many
- * cache lines. Helps determining when to record the start of a cm op.
- * If count != lineno then either we are not counting or have been counting
- * and now are out of sync. In either case, a new cm op is started
- */
- if (tc_prev_line[cpu_id][clvl] != lineno) {
- tc_prev_line[cpu_id][clvl] = lineno;
- /* All ops start out as partial ops */
- cm_op_stats[cpu_id][clvl].part_cmop_cnt++;
-
- /* Reset all our counters */
- cm_ignline_cnt[cpu_id][clvl] = 0;
- cm_extline_cnt[cpu_id][clvl] = 0;
- hc_line_cnt[cpu_id][clvl] = 0;
- cm_line_cnt[cpu_id][clvl] = 0;
- }
-
- tc_prev_line[cpu_id][clvl]--;
- cm_line_cnt[cpu_id][clvl]++;
- }
-
- /* Convert target cache line no. to absolute cache line no. */
- if (cd_ptr[clvl].tcline_factor)
- abs_lineno = lineno * cd_ptr[clvl].tcline_factor;
-
- /* Convert absolute cache line no. to host cache line no. */
- if (cd_ptr[clvl].hcline_factor)
- lineno = abs_lineno / cd_ptr[clvl].hcline_factor;
-
- /*
- * Find out the set & way no. on the host cache corresponding to the
- * cache line no. calculated on the target cache.
- */
- hc_linesz = get_cache_linesz(hcg_ptr, clvl);
- hc_assoc = get_cache_assoc(hcg_ptr, clvl);
- hc_numsets = get_cache_numsets(hcg_ptr, clvl);
-
- switch (cd_ptr[clvl].csize_diff) {
- case TCSZ_BIG:
- {
- if (abs_lineno <
- cd_ptr[clvl].hnumabs_clines) {
- while (ctr) {
- setno = lineno / (hc_assoc + 1);
- wayno = lineno % (hc_assoc + 1);
- reg =
- get_setway_reg(wayno, hc_assoc,
- setno, hc_linesz,
- clvl);;
- op_handler(reg);
- lineno++;
- ctr--;
-
- if(cmop_debug)
- hc_line_cnt[cpu_id][clvl]++;
-
- }
- } else {
- /* Ignore */
- if(cmop_debug)
- cm_ignline_cnt[cpu_id][clvl]++;
-
- }
- }
- break;
- case TCSZ_EQUAL:
- {
- while (ctr) {
- setno = lineno / (hc_assoc + 1);
- wayno = lineno % (hc_assoc + 1);
- reg =
- get_setway_reg(wayno, hc_assoc, setno,
- hc_linesz, clvl);;
- op_handler(reg);
- lineno++;
- ctr--;
-
- if(cmop_debug)
- hc_line_cnt[cpu_id][clvl]++;
-
- }
- }
- break;
-
- case TCSZ_SMALL:
- {
- while (ctr) {
- setno = lineno / (hc_assoc + 1);
- wayno = lineno % (hc_assoc + 1);
- reg =
- get_setway_reg(wayno, hc_assoc, setno,
- hc_linesz, clvl);;
- op_handler(reg);
- lineno++;
- ctr--;
-
- if(cmop_debug)
- hc_line_cnt[cpu_id][clvl]++;
-
- }
-
- /*
- * If the target cache is smaller than the host cache then we
- * need to extend the maintenance operation to rest of the host
- * cache.
- */
- if ((abs_lineno +
- (1 * cd_ptr[clvl].tcline_factor)) ==
- cd_ptr[clvl].tnumabs_clines) {
-
- /*
- * TODO: Temp hack. Due to the cache line factor we end up incrementing
- * the lineno and miss one line.
- */
- lineno--;
- for (lineno++;
- lineno < (hc_numsets + 1) * (hc_assoc + 1);
- lineno++) {
- setno = lineno / (hc_assoc + 1);
- wayno = lineno % (hc_assoc + 1);
-
- /* Create new register value for operation on host cache */
- reg =
- get_setway_reg(wayno, hc_assoc,
- setno, hc_linesz,
- clvl);;
- /* Perform the operation */
- op_handler(reg);
-
- if(cmop_debug)
- cm_extline_cnt[cpu_id][clvl]++;
-
- }
- } else {
- /* Ignore */
- }
- break;
- }
- }
-
-
- if(cmop_debug) {
- /*
- * If the op cnt has reached the maximum cache line number then
- * print the statistics collected so far
- *
- * NOTE: We don't reset the counter. It will done at the start
- * of the next cm op automatically. Its value now is one more
- * than the maximum valid target cache line number.
- */
- if (cm_line_cnt[cpu_id][clvl] == (tc_assoc + 1) * (tc_numsets + 1)) {
-
- printf("%s", __FUNCTION__);
- printf(" : TC Lines=0x%x ", cm_line_cnt[cpu_id][clvl]);
- printf(" : HC Lines=0x%x ", hc_line_cnt[cpu_id][clvl]);
- printf(" : Ign Lines=0x%x ", cm_ignline_cnt[cpu_id][clvl]);
- printf(" : Extra Lines=0x%x ", cm_extline_cnt[cpu_id][clvl]);
- printf("\n");
-
- /* Register this as a complete set/way operation */
- cm_op_stats[cpu_id][clvl].part_cmop_cnt--;
- cm_op_stats[cpu_id][clvl].cmpl_cmop_cnt++;
- }
- }
-
- return;
+ unsigned clvl = 0, cpu_id = read_cpuid();
+ unsigned tc_assoc = 0, tc_numsets = 0, tc_linesz = 0;
+ unsigned hc_assoc = 0, hc_numsets = 0, hc_linesz = 0;
+ unsigned lineno = 0, setno = 0, wayno = 0, abs_lineno = 0;
+
+ /*
+ * If target cache line size is greater than the host then
+ * each maintenance op has to be performed on two lines on
+ * host. Does not matter is the line size if equal
+ */
+ unsigned ctr = cd_ptr[clvl].tcline_factor;
+
+ /*
+ * Find out the cache level for which the set/way operation has invoked.
+ * Use this to find the cache geometry in target cache to ascertain the
+ * set & way number from the argument. Use this info to calculate the
+ * target cache line number.
+ */
+ clvl = get_cache_level(reg);
+ tc_linesz = get_cache_linesz(tcg_ptr, clvl);
+ tc_assoc = get_cache_assoc(tcg_ptr, clvl);
+ tc_numsets = get_cache_numsets(tcg_ptr, clvl);
+
+ wayno = (reg >> __clz(tc_assoc)) & tc_assoc;
+ setno = (reg >> (tc_linesz + 4)) & tc_numsets;
+ lineno = (setno * (tc_assoc + 1)) + wayno;
+
+ if (cmop_debug) {
+ /*
+ * tc_prev_line is initialised to -1 (unsigned). We can never have so many
+ * cache lines. Helps determining when to record the start of a cm op.
+ * If count != lineno then either we are not counting or have been counting
+ * and now are out of sync. In either case, a new cm op is started
+ */
+ if (tc_prev_line[cpu_id][clvl] != lineno) {
+ tc_prev_line[cpu_id][clvl] = lineno;
+ /* All ops start out as partial ops */
+ cm_op_stats[cpu_id][clvl].part_cmop_cnt++;
+
+ /* Reset all our counters */
+ cm_ignline_cnt[cpu_id][clvl] = 0;
+ cm_extline_cnt[cpu_id][clvl] = 0;
+ hc_line_cnt[cpu_id][clvl] = 0;
+ cm_line_cnt[cpu_id][clvl] = 0;
+ }
+
+ tc_prev_line[cpu_id][clvl]--;
+ cm_line_cnt[cpu_id][clvl]++;
+ }
+
+ /* Convert target cache line no. to absolute cache line no. */
+ if (cd_ptr[clvl].tcline_factor)
+ abs_lineno = lineno * cd_ptr[clvl].tcline_factor;
+
+ /* Convert absolute cache line no. to host cache line no. */
+ if (cd_ptr[clvl].hcline_factor)
+ lineno = abs_lineno / cd_ptr[clvl].hcline_factor;
+
+ /*
+ * Find out the set & way no. on the host cache corresponding to the
+ * cache line no. calculated on the target cache.
+ */
+ hc_linesz = get_cache_linesz(hcg_ptr, clvl);
+ hc_assoc = get_cache_assoc(hcg_ptr, clvl);
+ hc_numsets = get_cache_numsets(hcg_ptr, clvl);
+
+ switch (cd_ptr[clvl].csize_diff) {
+ case TCSZ_BIG:
+ {
+ if (abs_lineno < cd_ptr[clvl].hnumabs_clines) {
+ while (ctr) {
+ setno = lineno / (hc_assoc + 1);
+ wayno = lineno % (hc_assoc + 1);
+ reg =
+ get_setway_reg(wayno, hc_assoc,
+ setno, hc_linesz,
+ clvl);;
+ op_handler(reg);
+ lineno++;
+ ctr--;
+
+ if (cmop_debug)
+ hc_line_cnt[cpu_id][clvl]++;
+
+ }
+ } else {
+ /* Ignore */
+ if (cmop_debug)
+ cm_ignline_cnt[cpu_id][clvl]++;
+
+ }
+ }
+ break;
+ case TCSZ_EQUAL:
+ {
+ while (ctr) {
+ setno = lineno / (hc_assoc + 1);
+ wayno = lineno % (hc_assoc + 1);
+ reg =
+ get_setway_reg(wayno, hc_assoc, setno,
+ hc_linesz, clvl);;
+ op_handler(reg);
+ lineno++;
+ ctr--;
+
+ if (cmop_debug)
+ hc_line_cnt[cpu_id][clvl]++;
+
+ }
+ }
+ break;
+
+ case TCSZ_SMALL:
+ {
+ while (ctr) {
+ setno = lineno / (hc_assoc + 1);
+ wayno = lineno % (hc_assoc + 1);
+ reg =
+ get_setway_reg(wayno, hc_assoc, setno,
+ hc_linesz, clvl);;
+ op_handler(reg);
+ lineno++;
+ ctr--;
+
+ if (cmop_debug)
+ hc_line_cnt[cpu_id][clvl]++;
+
+ }
+
+ /*
+ * If the target cache is smaller than the host cache then we
+ * need to extend the maintenance operation to rest of the host
+ * cache.
+ */
+ if ((abs_lineno +
+ (1 * cd_ptr[clvl].tcline_factor)) ==
+ cd_ptr[clvl].tnumabs_clines) {
+
+ /*
+ * TODO: Temp hack. Due to the cache line factor we end up incrementing
+ * the lineno and miss one line.
+ */
+ lineno--;
+ for (lineno++;
+ lineno < (hc_numsets + 1) * (hc_assoc + 1);
+ lineno++) {
+ setno = lineno / (hc_assoc + 1);
+ wayno = lineno % (hc_assoc + 1);
+
+ /* Create new register value for operation on host cache */
+ reg =
+ get_setway_reg(wayno, hc_assoc,
+ setno, hc_linesz,
+ clvl);;
+ /* Perform the operation */
+ op_handler(reg);
+
+ if (cmop_debug)
+ cm_extline_cnt[cpu_id][clvl]++;
+
+ }
+ } else {
+ /* Ignore */
+ }
+ break;
+ }
+ }
+
+ if (cmop_debug) {
+ /*
+ * If the op cnt has reached the maximum cache line number then
+ * print the statistics collected so far
+ *
+ * NOTE: We don't reset the counter. It will done at the start
+ * of the next cm op automatically. Its value now is one more
+ * than the maximum valid target cache line number.
+ */
+ if (cm_line_cnt[cpu_id][clvl] ==
+ (tc_assoc + 1) * (tc_numsets + 1)) {
+
+ printf("%s", __FUNCTION__);
+ printf(" : TC Lines=0x%x ", cm_line_cnt[cpu_id][clvl]);
+ printf(" : HC Lines=0x%x ", hc_line_cnt[cpu_id][clvl]);
+ printf(" : Ign Lines=0x%x ",
+ cm_ignline_cnt[cpu_id][clvl]);
+ printf(" : Extra Lines=0x%x ",
+ cm_extline_cnt[cpu_id][clvl]);
+ printf("\n");
+
+ /* Register this as a complete set/way operation */
+ cm_op_stats[cpu_id][clvl].part_cmop_cnt--;
+ cm_op_stats[cpu_id][clvl].cmpl_cmop_cnt++;
+ }
+ }
+
+ return;
}
-