diff options
author | Dietmar Eggemann <dietmar.eggemann@arm.com> | 2012-05-03 12:55:22 +0100 |
---|---|---|
committer | Dietmar Eggemann <dietmar.eggemann@arm.com> | 2012-05-23 12:44:34 +0100 |
commit | f085e47d3dc30aefb0e25129537ab35de56b3265 (patch) | |
tree | 81e35bcefdafde4ee0f21b9b56b28f0d435aee23 /big-little/virtualisor/cache_geom.c | |
parent | 4f48994f7037b257b7f48bd9574db09d119e7cc4 (diff) |
Hide differences in I-cache Topology.
The virtualization of the L1 instruction cache is introduced by this patch.
The A7 CPU has a VIPT L1 instruction cache with a cache line length of 32 bytes
whereas the A15 CPU has a PIPT L1 instruction cache with a cache line length of
64 bytes.
Virtualization of the L1 instruction cache does not follow the approach of the
already existing virtualization of the data/unified caches where cache
operations are virtualized on the host cluster to use the values of the target
cluster.
Instead, this patch guarantees that for L1 instruction cache operations on the
A15 CPU, the CCSIDR value of the A7 is used.
The ccsidr member of the structure cache_geometry is now a two dimensional
array being able to hold the appropriate values for the instruction and the
data/unified caches.
In order to be able to trap cache identification registers on the A15 CPU,
regardless if it is on the host or target cluster, the TID2 bit in the HCR
register is set in the A15 CPU specific trap setup function a15_trap_handle()
as well as restored in the A15 CPU specific trap restore function
a15_trap_restore(). This is of course only done if the sibling CPU is an A7.
The default CP15 trap function trap_cp15_mrc_mcr_handle(), which runs before
the CPU specific trap functions, sets the L1 instruction cache CCSIDR value
incorrectly on A7 and A15 for Virtualizer configuration [BC=x, TC=A15, HC=A7].
This error is corrected in the A7 or A15 CPU specific trap handle function.
Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Diffstat (limited to 'big-little/virtualisor/cache_geom.c')
-rw-r--r-- | big-little/virtualisor/cache_geom.c | 65 |
1 files changed, 32 insertions, 33 deletions
diff --git a/big-little/virtualisor/cache_geom.c b/big-little/virtualisor/cache_geom.c index 17c3ee6..7bcb42b 100644 --- a/big-little/virtualisor/cache_geom.c +++ b/big-little/virtualisor/cache_geom.c @@ -36,51 +36,50 @@ static unsigned cm_ignline_cnt[NUM_CPUS][MAX_CACHE_LEVELS] = { 0 }; static unsigned cm_extline_cnt[NUM_CPUS][MAX_CACHE_LEVELS] = { 0 }; /* - * Iterate through all the implemented cache - * levels and save the geometry at each level. + * Iterate through all the implemented cache levels and save the geometry at + * each level. * */ void find_cache_geometry(cache_geometry * cg_ptr) { - unsigned ctr, clidr, ccsidr, csselr, old_csselr; + unsigned ctr, csselr; - /* Save Cache size selection register */ - old_csselr = read_csselr(); - clidr = read_clidr(); - cg_ptr->clidr = clidr; + /* Save cache size selection register */ + csselr = read_csselr(); - for (ctr = 0; ctr < MAX_CACHE_LEVELS; ctr++) { - unsigned cache_type = get_cache_type(clidr, ctr); + cg_ptr->clidr = read_clidr(); - /* Only seperate and Unifiied caches */ - if (cache_type >= 0x3) { - /* - * Choose the cache level & Data or Unified cache - * as there are no set/way operations on the ICache - */ - csselr = ctr << 1; - write_csselr(csselr); + for (ctr = 0; ctr < MAX_CACHE_LEVELS; ctr++) { + unsigned cache_type = get_cache_type(cg_ptr->clidr, ctr); + if (cache_type == 0x03) { + /* instruction cache */ + write_csselr((ctr << 1) | CIND_INST); isb(); - + cg_ptr->ccsidr[ctr][CIND_INST] = read_ccsidr(); + /* data cache */ + write_csselr(ctr << 1); + isb(); + cg_ptr->ccsidr[ctr][CIND_DATA] = read_ccsidr(); + } + else if (cache_type == 0x04) { + /* unified cache */ + write_csselr(ctr << 1); + isb(); + cg_ptr->ccsidr[ctr][CIND_UNIF] = read_ccsidr(); + } + else { /* - * Read the CCSIDR to record information about this - * cache level. - */ - ccsidr = read_ccsidr(); - cg_ptr->ccsidr[ctr] = ccsidr; - - } else { - /* - * Stop scanning at the first invalid/unsupported - * cache level + * Stop scanning at the first invalid/unsupported cache + * level */ break; } + } - /* Restore Cache size selection register */ - write_csselr(old_csselr); + /* Restore cache size selection register */ + write_csselr(csselr); return; } @@ -203,7 +202,7 @@ unsigned map_cache_geometries(cache_geometry * hcg_ptr, /* * Enable bit for trapping set/way operations & - * Cache identification regs + * cache identification registers */ hcr = read_hcr(); hcr |= HCR_TSW | HCR_TID2; @@ -212,7 +211,6 @@ unsigned map_cache_geometries(cache_geometry * hcg_ptr, isb(); } else { - /* Find the cache geometry on the target cpu */ find_cache_geometry(tcg_ptr); @@ -235,7 +233,8 @@ unsigned map_cache_geometries(cache_geometry * hcg_ptr, void handle_cm_op(unsigned reg, void (*op_handler) (unsigned), cache_geometry * hcg_ptr, - cache_geometry * tcg_ptr, cache_diff * cd_ptr) + cache_geometry * tcg_ptr, + cache_diff * cd_ptr) { unsigned clvl = 0, cpu_id = read_cpuid(); unsigned tc_assoc = 0, tc_numsets = 0, tc_linesz = 0; |