aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorJon Medhurst <tixy@linaro.org>2013-12-02 11:47:16 +0000
committerJon Medhurst <tixy@linaro.org>2013-12-02 12:54:16 +0000
commit493c65aab4cfbdec1b065d409fd8b2c9b907d8ec (patch)
tree1f4dd1703e51c4beed66eac5fbceac00b37f9580 /arch
parent4bb2d496b52029fc12322af09f1a5dda95affdba (diff)
ARM: vexpress/TC2: Match mainline cache disabling sequence in tc2_pm_down
When the TC2 pm code was finally upstreamed [1] the cache disbling sequence had been modified to avoid some potential race conditions. So lets backport these changes. [1] Commit 11b277eabe70 ARM: vexpress/TC2: basic PM support Signed-off-by: Jon Medhurst <tixy@linaro.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/mach-vexpress/tc2_pm.c64
1 files changed, 49 insertions, 15 deletions
diff --git a/arch/arm/mach-vexpress/tc2_pm.c b/arch/arm/mach-vexpress/tc2_pm.c
index 2b519eee84d..b4b090ccc61 100644
--- a/arch/arm/mach-vexpress/tc2_pm.c
+++ b/arch/arm/mach-vexpress/tc2_pm.c
@@ -135,20 +135,40 @@ static void tc2_pm_down(u64 residency)
if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
arch_spin_unlock(&tc2_pm_lock);
- set_cr(get_cr() & ~CR_C);
- flush_cache_all();
- asm volatile ("clrex");
- set_auxcr(get_auxcr() & ~(1 << 6));
-
- cci_disable_port_by_cpu(mpidr);
+ if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A15) {
+ /*
+ * On the Cortex-A15 we need to disable
+ * L2 prefetching before flushing the cache.
+ */
+ asm volatile(
+ "mcr p15, 1, %0, c15, c0, 3 \n\t"
+ "isb \n\t"
+ "dsb "
+ : : "r" (0x400) );
+ }
/*
- * Ensure that both C & I bits are disabled in the SCTLR
- * before disabling ACE snoops. This ensures that no
- * coherency traffic will originate from this cpu after
- * ACE snoops are turned off.
+ * We need to disable and flush the whole (L1 and L2) cache.
+ * Let's do it in the safest possible way i.e. with
+ * no memory access within the following sequence
+ * including the stack.
*/
- cpu_proc_fin();
+ asm volatile(
+ "mrc p15, 0, r0, c1, c0, 0 @ get CR \n\t"
+ "bic r0, r0, #"__stringify(CR_C)" \n\t"
+ "mcr p15, 0, r0, c1, c0, 0 @ set CR \n\t"
+ "isb \n\t"
+ "bl v7_flush_dcache_all \n\t"
+ "clrex \n\t"
+ "mrc p15, 0, r0, c1, c0, 1 @ get AUXCR \n\t"
+ "bic r0, r0, #(1 << 6) @ disable local coherency \n\t"
+ "mcr p15, 0, r0, c1, c0, 1 @ set AUXCR \n\t"
+ "isb \n\t"
+ "dsb "
+ : : : "r0","r1","r2","r3","r4","r5","r6","r7",
+ "r9","r10","r11","lr","memory");
+
+ cci_disable_port_by_cpu(mpidr);
__mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
} else {
@@ -162,10 +182,24 @@ static void tc2_pm_down(u64 residency)
arch_spin_unlock(&tc2_pm_lock);
- set_cr(get_cr() & ~CR_C);
- flush_cache_louis();
- asm volatile ("clrex");
- set_auxcr(get_auxcr() & ~(1 << 6));
+ /*
+ * We need to disable and flush only the L1 cache.
+ * Let's do it in the safest possible way as above.
+ */
+ asm volatile(
+ "mrc p15, 0, r0, c1, c0, 0 @ get CR \n\t"
+ "bic r0, r0, #"__stringify(CR_C)" \n\t"
+ "mcr p15, 0, r0, c1, c0, 0 @ set CR \n\t"
+ "isb \n\t"
+ "bl v7_flush_dcache_louis \n\t"
+ "clrex \n\t"
+ "mrc p15, 0, r0, c1, c0, 1 @ get AUXCR \n\t"
+ "bic r0, r0, #(1 << 6) @ disable local coherency \n\t"
+ "mcr p15, 0, r0, c1, c0, 1 @ set AUXCR \n\t"
+ "isb \n\t"
+ "dsb "
+ : : : "r0","r1","r2","r3","r4","r5","r6","r7",
+ "r9","r10","r11","lr","memory");
}
__mcpm_cpu_down(cpu, cluster);