aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorMark Brown <broonie@linaro.org>2013-12-05 10:15:12 +0000
committerMark Brown <broonie@linaro.org>2013-12-05 10:15:12 +0000
commit3b21b35761de8348dd2f7cff9752449281d7a17b (patch)
tree4082db98fd37b572eb17838363a3ba4e96a7d7cf /arch
parentc04e0d85bf2565725091fd652c0b444b993fd993 (diff)
parent60572d7b1197b01f0b7c56e5ce31547c919df64b (diff)
Merge remote-tracking branch 'lsk/linux-linaro-lsk' into linux-linaro-lsk
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/include/asm/cacheflush.h46
-rw-r--r--arch/arm/mach-vexpress/dcscb.c32
-rw-r--r--arch/arm/mach-vexpress/tc2_pm.c30
3 files changed, 64 insertions, 44 deletions
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 17d0ae8672f..beb87022178 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -436,4 +436,50 @@ static inline void __sync_cache_range_r(volatile void *p, size_t size)
#define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr))
#define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr))
+/*
+ * Disabling cache access for one CPU in an ARMv7 SMP system is tricky.
+ * To do so we must:
+ *
+ * - Clear the SCTLR.C bit to prevent further cache allocations
+ * - Flush the desired level of cache
+ * - Clear the ACTLR "SMP" bit to disable local coherency
+ *
+ * ... and so without any intervening memory access in between those steps,
+ * not even to the stack.
+ *
+ * WARNING -- After this has been called:
+ *
+ * - No ldrex/strex (and similar) instructions must be used.
+ * - The CPU is obviously no longer coherent with the other CPUs.
+ * - This is unlikely to work as expected if Linux is running non-secure.
+ *
+ * Note:
+ *
+ * - This is known to apply to several ARMv7 processor implementations,
+ * however some exceptions may exist. Caveat emptor.
+ *
+ * - The clobber list is dictated by the call to v7_flush_dcache_*.
+ * fp is preserved to the stack explicitly prior disabling the cache
+ * since adding it to the clobber list is incompatible with having
+ * CONFIG_FRAME_POINTER=y. ip is saved as well if ever r12-clobbering
+ * trampoline are inserted by the linker and to keep sp 64-bit aligned.
+ */
+#define v7_exit_coherency_flush(level) \
+ asm volatile( \
+ "stmfd sp!, {fp, ip} \n\t" \
+ "mrc p15, 0, r0, c1, c0, 0 @ get SCTLR \n\t" \
+ "bic r0, r0, #"__stringify(CR_C)" \n\t" \
+ "mcr p15, 0, r0, c1, c0, 0 @ set SCTLR \n\t" \
+ "isb \n\t" \
+ "bl v7_flush_dcache_"__stringify(level)" \n\t" \
+ "clrex \n\t" \
+ "mrc p15, 0, r0, c1, c0, 1 @ get ACTLR \n\t" \
+ "bic r0, r0, #(1 << 6) @ disable local coherency \n\t" \
+ "mcr p15, 0, r0, c1, c0, 1 @ set ACTLR \n\t" \
+ "isb \n\t" \
+ "dsb \n\t" \
+ "ldmfd sp!, {fp, ip}" \
+ : : : "r0","r1","r2","r3","r4","r5","r6","r7", \
+ "r9","r10","lr","memory" )
+
#endif
diff --git a/arch/arm/mach-vexpress/dcscb.c b/arch/arm/mach-vexpress/dcscb.c
index 948aec0890f..b35700f8e01 100644
--- a/arch/arm/mach-vexpress/dcscb.c
+++ b/arch/arm/mach-vexpress/dcscb.c
@@ -134,17 +134,8 @@ static void dcscb_power_down(void)
if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
arch_spin_unlock(&dcscb_lock);
- /*
- * Flush all cache levels for this cluster.
- *
- * A15/A7 can hit in the cache with SCTLR.C=0, so we don't need
- * a preliminary flush here for those CPUs. At least, that's
- * the theory -- without the extra flush, Linux explodes on
- * RTSM (to be investigated).
- */
- flush_cache_all();
- set_cr(get_cr() & ~CR_C);
- flush_cache_all();
+ /* Flush all cache levels for this cluster. */
+ v7_exit_coherency_flush(all);
/*
* This is a harmless no-op. On platforms with a real
@@ -153,9 +144,6 @@ static void dcscb_power_down(void)
*/
outer_flush_all();
- /* Disable local coherency by clearing the ACTLR "SMP" bit: */
- set_auxcr(get_auxcr() & ~(1 << 6));
-
/*
* Disable cluster-level coherency by masking
* incoming snoops and DVM messages:
@@ -166,20 +154,8 @@ static void dcscb_power_down(void)
} else {
arch_spin_unlock(&dcscb_lock);
- /*
- * Flush the local CPU cache.
- *
- * A15/A7 can hit in the cache with SCTLR.C=0, so we don't need
- * a preliminary flush here for those CPUs. At least, that's
- * the theory -- without the extra flush, Linux explodes on
- * RTSM (to be investigated).
- */
- flush_cache_louis();
- set_cr(get_cr() & ~CR_C);
- flush_cache_louis();
-
- /* Disable local coherency by clearing the ACTLR "SMP" bit: */
- set_auxcr(get_auxcr() & ~(1 << 6));
+ /* Disable and flush the local CPU cache. */
+ v7_exit_coherency_flush(louis);
}
__mcpm_cpu_down(cpu, cluster);
diff --git a/arch/arm/mach-vexpress/tc2_pm.c b/arch/arm/mach-vexpress/tc2_pm.c
index 2b519eee84d..9fc264a3bad 100644
--- a/arch/arm/mach-vexpress/tc2_pm.c
+++ b/arch/arm/mach-vexpress/tc2_pm.c
@@ -135,20 +135,21 @@ static void tc2_pm_down(u64 residency)
if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
arch_spin_unlock(&tc2_pm_lock);
- set_cr(get_cr() & ~CR_C);
- flush_cache_all();
- asm volatile ("clrex");
- set_auxcr(get_auxcr() & ~(1 << 6));
+ if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A15) {
+ /*
+ * On the Cortex-A15 we need to disable
+ * L2 prefetching before flushing the cache.
+ */
+ asm volatile(
+ "mcr p15, 1, %0, c15, c0, 3 \n\t"
+ "isb \n\t"
+ "dsb "
+ : : "r" (0x400) );
+ }
- cci_disable_port_by_cpu(mpidr);
+ v7_exit_coherency_flush(all);
- /*
- * Ensure that both C & I bits are disabled in the SCTLR
- * before disabling ACE snoops. This ensures that no
- * coherency traffic will originate from this cpu after
- * ACE snoops are turned off.
- */
- cpu_proc_fin();
+ cci_disable_port_by_cpu(mpidr);
__mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
} else {
@@ -162,10 +163,7 @@ static void tc2_pm_down(u64 residency)
arch_spin_unlock(&tc2_pm_lock);
- set_cr(get_cr() & ~CR_C);
- flush_cache_louis();
- asm volatile ("clrex");
- set_auxcr(get_auxcr() & ~(1 << 6));
+ v7_exit_coherency_flush(louis);
}
__mcpm_cpu_down(cpu, cluster);