aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/common/bL_switcher.c
diff options
context:
space:
mode:
authorNicolas Pitre <nicolas.pitre@linaro.org>2012-10-23 01:39:08 -0400
committerNicolas Pitre <nicolas.pitre@linaro.org>2013-06-19 16:54:27 -0400
commit8e9c2d29bf6fee7a99db920c8a08aa691c2973d3 (patch)
tree40b928ffcb36919e4d84522e8f74141329c38cba /arch/arm/common/bL_switcher.c
parentfb57f198a209840d4e4653a5883d0f184a1a77ee (diff)
ARM: bL_switcher: synchronize the outbound with the inbound
Let's wait for the inbound to come up and snoop some of the outbound cache. That should be a bit more efficient than going down right away. Monitoring the CCI event counters could be a better approach eventually. Signed-off-by: Nicolas Pitre <nico@linaro.org>
Diffstat (limited to 'arch/arm/common/bL_switcher.c')
-rw-r--r--arch/arm/common/bL_switcher.c24
1 files changed, 22 insertions, 2 deletions
diff --git a/arch/arm/common/bL_switcher.c b/arch/arm/common/bL_switcher.c
index e379c918f08..0de28f101b7 100644
--- a/arch/arm/common/bL_switcher.c
+++ b/arch/arm/common/bL_switcher.c
@@ -52,9 +52,10 @@ static int read_mpidr(void)
* bL switcher core code.
*/
-static void bL_do_switch(void *_unused)
+static void bL_do_switch(void *_arg)
{
unsigned ib_mpidr, ib_cpu, ib_cluster;
+ long volatile handshake, **handshake_ptr = _arg;
pr_debug("%s\n", __func__);
@@ -62,6 +63,13 @@ static void bL_do_switch(void *_unused)
ib_cpu = MPIDR_AFFINITY_LEVEL(ib_mpidr, 0);
ib_cluster = MPIDR_AFFINITY_LEVEL(ib_mpidr, 1);
+ /* Advertise our handshake location */
+ if (handshake_ptr) {
+ handshake = 0;
+ *handshake_ptr = &handshake;
+ } else
+ handshake = -1;
+
/*
* Our state has been saved at this point. Let's release our
* inbound CPU.
@@ -80,6 +88,14 @@ static void bL_do_switch(void *_unused)
* we have none.
*/
+ /*
+ * Let's wait until our inbound is alive.
+ */
+ while (!handshake) {
+ wfe();
+ smp_mb();
+ }
+
/* Let's put ourself down. */
mcpm_cpu_power_down();
@@ -127,6 +143,7 @@ static int bL_switch_to(unsigned int new_cluster_id)
unsigned int ob_mpidr, ob_cpu, ob_cluster, ib_mpidr, ib_cpu, ib_cluster;
struct tick_device *tdev;
enum clock_event_mode tdev_mode;
+ long volatile *handshake_ptr;
int ret;
this_cpu = smp_processor_id();
@@ -200,7 +217,7 @@ static int bL_switch_to(unsigned int new_cluster_id)
sync_cache_w(&cpu_logical_map(this_cpu));
/* Let's do the actual CPU switch. */
- ret = cpu_suspend(0, bL_switchpoint);
+ ret = cpu_suspend((unsigned long)&handshake_ptr, bL_switchpoint);
if (ret > 0)
panic("%s: cpu_suspend() returned %d\n", __func__, ret);
@@ -222,6 +239,9 @@ static int bL_switch_to(unsigned int new_cluster_id)
local_fiq_enable();
local_irq_enable();
+ *handshake_ptr = 1;
+ dsb_sev();
+
if (ret)
pr_err("%s exiting with error %d\n", __func__, ret);
return ret;