summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDietmar Eggemann <dietmar.eggemann@arm.com>2012-03-15 13:00:25 +0000
committerDietmar Eggemann <dietmar.eggemann@arm.com>2012-05-22 10:43:46 +0100
commit75c47c425e99faa6180b738aa67828bd41df7815 (patch)
tree931135fb2f5013b051ebcc3db5829f7fe07bab30
parent1e79620987334875f4386f5761d1a75edd0c8ad3 (diff)
Hotplug: Add cpu hotplug support to the virtualizer.
This patch introduce the mechanics to be able to hotplug individual cpus as well as bringing them out of hotplug in coexistence with the cluster switching functionality of the virtualizer. Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
-rw-r--r--big-little/common/hyp_setup.c4
-rwxr-xr-xbig-little/common/hyp_vectors.s9
-rwxr-xr-xbig-little/common/pagetable_setup.c13
-rw-r--r--big-little/include/context.h14
-rw-r--r--big-little/include/events.h7
-rw-r--r--big-little/include/misc.h6
-rw-r--r--big-little/include/virt_helpers.h2
-rwxr-xr-xbig-little/secure_world/monmode_vectors.s32
-rw-r--r--big-little/secure_world/secure_context.c33
-rw-r--r--big-little/secure_world/secure_resets.c114
-rw-r--r--big-little/secure_world/secure_world.h12
-rw-r--r--big-little/switcher/context/ns_context.c353
-rw-r--r--big-little/switcher/trigger/async_switchover.c48
-rwxr-xr-xbig-little/switcher/trigger/handle_switchover.s36
-rw-r--r--big-little/switcher/trigger/sync_switchover.c3
15 files changed, 428 insertions, 258 deletions
diff --git a/big-little/common/hyp_setup.c b/big-little/common/hyp_setup.c
index b0cda90..d111e22 100644
--- a/big-little/common/hyp_setup.c
+++ b/big-little/common/hyp_setup.c
@@ -28,14 +28,12 @@
#include "bl.h"
extern unsigned vectors;
-extern system_context switcher_context;
extern void SetupVirtExtPageTables(unsigned, unsigned);
extern void Enable2ndStagePageTables(void);
extern void monmode_setup(void);
extern void config_uart(void);
extern void SetupVGIC(unsigned);
extern void enable_trigger(unsigned);
-extern void restore_context(unsigned);
extern unsigned async_switchover;
unsigned host_cluster = HOST_CLUSTER;
@@ -72,7 +70,7 @@ void bl_rest_init(void)
* Ask the secure world to initialise its context.
* Not required when "always on"
*/
- smc(SMC_SEC_INIT, 0);
+ smc(SMC_SEC_INIT, 0, 0);
/*
* Since we are using the shared vgic, we need to map
diff --git a/big-little/common/hyp_vectors.s b/big-little/common/hyp_vectors.s
index b8476d8..1249b5f 100755
--- a/big-little/common/hyp_vectors.s
+++ b/big-little/common/hyp_vectors.s
@@ -40,7 +40,8 @@
IMPORT write_httbr
IMPORT write_htcr
IMPORT bl_rest_init
- IMPORT hyp_l1_pagetable
+ IMPORT hyp_l1_pagetable
+ IMPORT find_restore_op_type
IF ASYNC_SWITCH = {FALSE}
IMPORT is_hvc
@@ -389,8 +390,10 @@ hyp_warm_reset_handler FUNCTION
; Restore the context now. CPU0 is the first cpu
; ----------------------------------------------------
hyp_entry sp
- mov r0, #0
- bl restore_context
+ bl find_restore_op_type
+ mov r1, r0
+ mov r0, #0
+ bl restore_context
hyp_exit sp
ENDFUNC
diff --git a/big-little/common/pagetable_setup.c b/big-little/common/pagetable_setup.c
index 1686ef8..4a25a8f 100755
--- a/big-little/common/pagetable_setup.c
+++ b/big-little/common/pagetable_setup.c
@@ -434,6 +434,8 @@ void SetupVirtExtPageTables(void)
unsigned first_cpu = find_first_cpu();
unsigned cluster_id = read_clusterid();
unsigned abs_cpuid = 0;
+ unsigned cpu_mask = 0;
+ unsigned num_cpus = 0;
if (!switcher)
abs_cpuid = abs_cpuid(cpu_id, cluster_id);
@@ -454,7 +456,10 @@ void SetupVirtExtPageTables(void)
if (cpu_id == first_cpu) {
CreateHypModePageTables();
Create2ndStagePageTables();
- set_events(VIRT_PGT_DONE);
+
+ /* Send the event to all the cpus on this cluster */
+ cpu_mask = (((1 << num_secondaries()) - 1) << 1) | 0x1;
+ set_events(VIRT_PGT_DONE, cpu_mask);
}
wait_for_event(VIRT_PGT_DONE, cpu_id);
@@ -469,7 +474,11 @@ void SetupVirtExtPageTables(void)
if (cpu_id == first_cpu && cluster_id == host_cluster) {
CreateHypModePageTables();
Create2ndStagePageTables();
- set_events(VIRT_PGT_DONE);
+
+ /* Send the event to all the cpus on this cluster */
+ num_cpus = CLUSTER_CPU_COUNT(cluster_id) + CLUSTER_CPU_COUNT(!cluster_id);
+ cpu_mask = (1 << num_cpus) - 1;
+ set_events(VIRT_PGT_DONE, cpu_mask);
}
wait_for_event(VIRT_PGT_DONE, abs_cpuid);
diff --git a/big-little/include/context.h b/big-little/include/context.h
index 11a737c..5beaa42 100644
--- a/big-little/include/context.h
+++ b/big-little/include/context.h
@@ -121,12 +121,18 @@ typedef struct system_context {
unsigned warm_reset;
} system_context;
-extern void context_save(unsigned, unsigned);
-extern void context_restore(unsigned, unsigned);
+/*
+ * Constants to indicate whether context manipulation is done
+ * as per a hotplug or cluster switch request.
+ */
+#define OP_TYPE_SWITCH 0x1
+#define OP_TYPE_HP 0x2
+
+extern unsigned find_restore_op_type(void);
+extern void save_context(unsigned, unsigned);
+extern void restore_context(unsigned, unsigned);
extern void save_generic_timers(generic_timer_context *);
extern void restore_eagle_timers(generic_timer_context *);
-extern void save_hyp_context(unsigned, unsigned);
-extern void restore_hyp_context(unsigned, unsigned);
extern void save_vfp(unsigned *);
extern void restore_vfp(unsigned *);
extern void enable_trigger(unsigned);
diff --git a/big-little/include/events.h b/big-little/include/events.h
index abebaa9..dc0025d 100644
--- a/big-little/include/events.h
+++ b/big-little/include/events.h
@@ -62,11 +62,10 @@
#define VID_REGS_DONE 11
/* Defines for Secure events */
-#define MAX_SEC_EVENTS 4
+#define MAX_SEC_EVENTS 3
#define SEC_L1_DONE 0
-#define OB_SHUTDOWN 1
-#define FLUSH_L2 2
-#define SETUP_RST 3
+#define FLUSH_L2 1
+#define SETUP_RST 2
extern void set_event(unsigned, unsigned);
diff --git a/big-little/include/misc.h b/big-little/include/misc.h
index cc7cb3b..bd15787 100644
--- a/big-little/include/misc.h
+++ b/big-little/include/misc.h
@@ -284,6 +284,11 @@
#define EAGLE 0x0
#define KFC 0x1
+/* Platform defines */
+#define VE_SYS_BASE 0X1C010000
+#define FLAGS_SET 0x30
+#define FLAGS_CLR 0x34
+
/* Control register bits */
#define CR_M (1<<0) /* MMU enabled */
#define CR_A (1<<1) /* Align fault enable */
@@ -396,6 +401,7 @@ extern unsigned BL_DV_PAGE$$Base;
extern unsigned BL_SEC_DV_PAGE$$Base;
extern unsigned host_cluster;
extern unsigned switcher;
+extern unsigned switchable_cpus_mask;
#define bitindex(x) (31-__builtin_clz(x))
#define find_first_cpu() 0
diff --git a/big-little/include/virt_helpers.h b/big-little/include/virt_helpers.h
index 7ec73c9..1388e35 100644
--- a/big-little/include/virt_helpers.h
+++ b/big-little/include/virt_helpers.h
@@ -41,7 +41,7 @@ extern unsigned num_secondaries(void);
extern unsigned *get_sp(unsigned, unsigned);
extern void virt_dead(void);
-extern void smc(unsigned, unsigned);
+extern void smc(unsigned, unsigned, unsigned);
extern void dcisw(unsigned);
extern void dccsw(unsigned);
extern void dccisw(unsigned);
diff --git a/big-little/secure_world/monmode_vectors.s b/big-little/secure_world/monmode_vectors.s
index b070e81..641de50 100755
--- a/big-little/secure_world/monmode_vectors.s
+++ b/big-little/secure_world/monmode_vectors.s
@@ -26,7 +26,7 @@
SMC_SEC_INIT EQU 0x0
SMC_SEC_SAVE EQU 0x1
-SMC_SEC_SHUTDOWN EQU 0x2
+SMC_SEC_SHUTDOWN EQU 0x2
L1 EQU 0x0
L2 EQU 0x1
INV EQU 0x0
@@ -91,7 +91,7 @@ STACK_SIZE EQU (96 << 2)
IMPORT write_nmrr
IMPORT get_sp
IMPORT secure_context_restore
- IMPORT powerdown_cluster
+ IMPORT do_power_op
IMPORT get_powerdown_stack
IMPORT wfi
IMPORT read_cpuid
@@ -181,9 +181,9 @@ monmode_fiq_vec
; Also assumes the availability of r4-r7
do_smc FUNCTION
; Switch to non-secure banked registers
- MRC p15, 0, r2, c1, c1, 0
- BIC r2, #SCR_NS
- MCR p15, 0, r2, c1, c1, 0
+ MRC p15, 0, r3, c1, c1, 0
+ BIC r3, #SCR_NS
+ MCR p15, 0, r3, c1, c1, 0
ISB
; Check if we are being called to setup the world
@@ -194,8 +194,8 @@ do_smc FUNCTION
BEQ save_secure
CMP r0, #SMC_SEC_SHUTDOWN
- BEQ shutdown_cluster
-
+ BEQ shutdown
+
smc_done
; Return to non-secure banked registers
MRC p15, 0, r0, c1, c1, 0
@@ -205,11 +205,22 @@ smc_done
ERET
ENDFUNC
-shutdown_cluster
+shutdown
+ MOV r4, r1
+ MOV r5, r2
+ ; ----------------------------------------------------
+ ; We need to switch stacks from being resident in normal
+ ; WBWA/S memory to SO memory to prevent potential stack
+ ; corruption after turning off the C bit in the SCTLR.
+ ; Subsequent accesses will be SO while there will be
+ ; valid cache lines of the stack from prior accesses
+ ; ----------------------------------------------------
BL read_cpuid
BL get_powerdown_stack
- MOV sp, r0
- BL powerdown_cluster
+ MOV sp, r0
+ MOV r0, r4
+ MOV r1, r5
+ BL do_power_op
enter_wfi
BL wfi
B enter_wfi
@@ -217,6 +228,7 @@ enter_wfi
save_secure
PUSH {lr}
MOV r0, r1
+ MOV r1, r2
BL secure_context_save
POP {lr}
B smc_done
diff --git a/big-little/secure_world/secure_context.c b/big-little/secure_world/secure_context.c
index 16c50ac..321f4ab 100644
--- a/big-little/secure_world/secure_context.c
+++ b/big-little/secure_world/secure_context.c
@@ -22,8 +22,6 @@
#include "secure_world.h"
-extern powerup_ib_core(unsigned, unsigned);
-
sec_context secure_context[MAX_CORES] __attribute__ ((aligned(CACHE_LINE_SZ)));
unsigned ns_entry_ptr[MAX_CORES];
unsigned small_pagetable[1024] __attribute__ ((aligned(4096)));
@@ -107,10 +105,11 @@ void secure_context_restore(void)
return;
}
-void secure_context_save(unsigned ns_entry_point)
+void secure_context_save(unsigned ns_entry_point, unsigned op_type)
{
- unsigned cpu_id = read_cpuid();
+ unsigned cpu_id = read_cpuid(), ib_cluster = get_inbound();
sec_context *sec_ctx = &secure_context[cpu_id];
+ void (*warm_reset_handler) (void) = (void (*)(void))&warm_reset;
ns_entry_ptr[cpu_id] = ns_entry_point;
sec_ctx->cci_sar = read32(CCI_BASE + SECURE_ACCESS_REG);
@@ -125,11 +124,27 @@ void secure_context_save(unsigned ns_entry_point)
sec_ctx->sctlr = read_sctlr();
sec_ctx->cntfrq = read_cntfrq();
- /*
- * Now that the context has been saved, its safe to bring
- * our counterpart on the inbound cluster out of reset.
- */
- powerup_ib_core(get_inbound(), cpu_id);
+ switch (op_type) {
+ case OP_TYPE_SWITCH:
+ /*
+ * Now that the context has been saved, its safe to bring
+ * our counterpart on the inbound cluster out of reset.
+ */
+ set_reset_handler(ib_cluster, cpu_id, warm_reset_handler);
+ powerup_ib_core(ib_cluster, cpu_id);
+ break;
+ case OP_TYPE_HP:
+ /*
+ * Ensure that the correct warm reset handler is set for
+ * our way back.
+ */
+ set_reset_handler(read_clusterid(), cpu_id, warm_reset_handler);
+ /* Set it for the inbound as well in case we get switched */
+ set_reset_handler(ib_cluster, cpu_id, warm_reset_handler);
+ break;
+ default:
+ break;
+ }
return;
}
diff --git a/big-little/secure_world/secure_resets.c b/big-little/secure_world/secure_resets.c
index 449264e..8c59d4a 100644
--- a/big-little/secure_world/secure_resets.c
+++ b/big-little/secure_world/secure_resets.c
@@ -24,8 +24,6 @@
#include "events.h"
#include "bakery.h"
-extern unsigned warm_reset;
-
/* Bakery lock to serialize access to the tube. */
bakery_t lock_tube0 __attribute__ ((section("BL_SEC_DV_PAGE"))) = {
0};
@@ -38,6 +36,16 @@ bakery_t lock_tube0 __attribute__ ((section("BL_SEC_DV_PAGE"))) = {
*/
static unsigned flush_ob_l2 = FLUSH_OB_L2;
+#if FLUSH_L2_FIX
+/*
+ * TODO:
+ * Dirty hack for backward compatibility. This
+ * variable helps determine whether this is the
+ * first switch.
+ */
+static unsigned switch_count = 0;
+#endif
+
#if FM_BETA
/*
* Variable in secure world to indicate the
@@ -64,8 +72,7 @@ static unsigned lock_ib_kfscb;
* KFSCB. It will always be used when the MMU is off.
* Each cluster will anyways use it sequentially
*/
-static bakery_t lock_ob_kfscb __attribute__ ((section("BL_SEC_DV_PAGE"))) = {
-0};
+static bakery_t lock_ob_kfscb __attribute__ ((section("BL_SEC_DV_PAGE"))) = {0};
/*
* Small stacks for after we have turned our caches off.
@@ -73,26 +80,14 @@ static bakery_t lock_ob_kfscb __attribute__ ((section("BL_SEC_DV_PAGE"))) = {
static unsigned long long powerdown_stacks[NUM_CPUS][32]
__attribute__ ((section("BL_SEC_DV_PAGE")));
-/*
- * The way a warm reset is detected has changed in the post beta FastModels.
- * The following workarounds make the earlier approach coexist with the
- * new one. Instead of dealing with a function pointer, they manipulate a
- * variable.
- */
-static void set_reset_handler(unsigned cluster_id, unsigned cpu_id,
- void (*handler) (void))
+
+unsigned long long *get_powerdown_stack(unsigned cpu_id)
{
-#if FM_BETA
- ve_reset_type[cpu_id]++;
- cln_dcache_mva_poc(&ve_reset_type[cpu_id]);
-#else
- write32(KFSCB_BASE + RST_HANDLER0 + ((cpu_id + (cluster_id << 2)) << 3),
- (unsigned)handler);
- dsb();
-#endif
+ return &powerdown_stacks[cpu_id + 1][0];
}
-static void (*get_reset_handler(unsigned cluster_id, unsigned cpu_id)) (void) {
+static void (*get_reset_handler(unsigned cluster_id, unsigned cpu_id)) (void)
+{
#if FM_BETA
return (void (*)(void))ve_reset_type[cpu_id];
#else
@@ -101,9 +96,28 @@ static void (*get_reset_handler(unsigned cluster_id, unsigned cpu_id)) (void) {
#endif
}
-unsigned long long *get_powerdown_stack(unsigned cpu_id)
+/*
+ * The way a warm reset is detected has changed in the post beta FastModels.
+ * The following workarounds make the earlier approach coexist with the
+ * new one. Instead of dealing with a function pointer, they manipulate a
+ * variable.
+ */
+void set_reset_handler(unsigned cluster_id, unsigned cpu_id, void (*handler)(void))
{
- return &powerdown_stacks[cpu_id + 1][0];
+ void (*prev_reset_handler)(void) = get_reset_handler(cluster_id, cpu_id);
+
+ if (prev_reset_handler != handler) {
+#if FM_BETA
+ ve_reset_type[cpu_id]++;
+ cln_dcache_mva_poc(&ve_reset_type[cpu_id]);
+#else
+ write32(KFSCB_BASE + RST_HANDLER0 + ((cpu_id + (cluster_id << 2)) << 3),
+ (unsigned) handler);
+ dsb();
+#endif
+ }
+
+ return;
}
unsigned get_inbound()
@@ -118,27 +132,18 @@ void powerup_ib_core(unsigned cluster_id, unsigned cpu_id)
{
unsigned rst_stat_reg = 0x0;
unsigned cpu_mask = 0x0;
- void (*cold_reset_handler) (void) = 0x0;
- void (*warm_reset_handler) (void) = (void (*)(void))&warm_reset;
- if (cold_reset_handler == get_reset_handler(cluster_id, cpu_id)) {
- set_reset_handler(cluster_id, cpu_id, warm_reset_handler);
- } else {
- if (flush_ob_l2) {
#if FLUSH_L2_FIX
- set_event(FLUSH_L2, cpu_id);
-#endif
- }
-
+ if (0 == switch_count) {
+ set_event(FLUSH_L2, cpu_id);
/*
- * The outbound cluster's last cpu send an event
- * indicating that its finished the last switchover.
- * Wait for it before bringing it's cores out of
- * reset.
+ * We really do not care about a race to update
+ * this variable as long it has a non-zero value
+ * after a switch.
*/
- wait_for_event(OB_SHUTDOWN, cpu_id);
- reset_event(OB_SHUTDOWN, cpu_id);
+ switch_count++;
}
+#endif
write_trace(&lock_tube0, SEC_TUBE0, "Powerup Inbound", read_cntpct(),
0x0, 0x0);
@@ -217,7 +222,7 @@ unsigned reset_status(unsigned cluster_id, unsigned rst_level,
}
}
-void powerdown_cluster(void)
+void do_power_op(unsigned cpu_mask, unsigned op_type)
{
unsigned cpu_id = read_cpuid();
unsigned cluster_id = read_clusterid();
@@ -239,11 +244,13 @@ void powerdown_cluster(void)
disable_coherency();
write_trace(&lock_tube0, SEC_TUBE0, "L1 Flush End", read_cntpct(), 0x0,
0x0);
- set_event(SEC_L1_DONE, cpu_id);
+ if (OP_TYPE_HP != op_type)
+ set_event(SEC_L1_DONE, cpu_id);
+ /* This code will never be executed for hotplug */
if (cpu_id == first_cpu) {
- wait_for_events(SEC_L1_DONE);
+ wait_for_events(SEC_L1_DONE, cpu_mask);
if (flush_ob_l2) {
#if FLUSH_L2_FIX
@@ -274,13 +281,7 @@ void powerdown_cluster(void)
* them to do so.
***************************************************************************/
- /*
- * Read the L2 control to get the number of secondary
- * cores present on this cluster. Shift mask by one to
- * get correct mask which includes the primary
- */
- secondary_mask = (1 << num_secondaries()) - 1;
- secondary_mask <<= 1;
+ secondary_mask = cpu_mask & ~(1 << cpu_id);
/* Wait for other cpus to enter reset */
while (secondary_mask !=
@@ -291,10 +292,19 @@ void powerdown_cluster(void)
else
powerdown_ob_core(cluster_id, cpu_id);
- set_events(OB_SHUTDOWN);
-
} else {
- powerdown_ob_core(cluster_id, cpu_id);
+ switch (op_type) {
+ case (OP_TYPE_HP):
+ get_bakery_spinlock(cpu_id, &lock_ob_kfscb);
+ write32(KFSCB_BASE + RST_HOLD0 + (cluster_id << 2), cpu_mask);
+ release_bakery_spinlock(cpu_id, &lock_ob_kfscb);
+ break;
+ case (OP_TYPE_SWITCH):
+ powerdown_ob_core(cluster_id, cpu_id);
+ break;
+ default:
+ panic();
+ }
}
write_trace(&lock_tube0, SEC_TUBE0, "Reset Initiated", read_cntpct(),
diff --git a/big-little/secure_world/secure_world.h b/big-little/secure_world/secure_world.h
index d466452..658016b 100644
--- a/big-little/secure_world/secure_world.h
+++ b/big-little/secure_world/secure_world.h
@@ -62,6 +62,13 @@
#define SP_SHARED (1 << 10)
#define SP_GLOBAL (1 << 11)
+/*
+ * Constants to indicate whether context manipulation is done
+ * as per a hotplug or cluster switch request.
+ */
+#define OP_TYPE_SWITCH 0x1
+#define OP_TYPE_HP 0x2
+
typedef struct sec_stack {
unsigned stack[STACK_SIZE];
} sec_stack;
@@ -82,6 +89,9 @@ typedef struct sec_context {
extern void enable_caches(void);
extern void secure_context_restore(void);
-extern void secure_context_save(unsigned);
+extern void secure_context_save(unsigned, unsigned);
+extern void powerup_ib_core(unsigned, unsigned);
+extern void set_reset_handler(unsigned, unsigned, void (*)(void));
+extern unsigned warm_reset;
#endif /* __SECURE_WORLD_H__ */
diff --git a/big-little/switcher/context/ns_context.c b/big-little/switcher/context/ns_context.c
index 2541319..b1fa3d2 100644
--- a/big-little/switcher/context/ns_context.c
+++ b/big-little/switcher/context/ns_context.c
@@ -49,6 +49,26 @@ static bakery_t lock_tube1 __attribute__ ((section("BL_DV_PAGE"))) = {
*/
system_context switcher_context = { 0 };
+/*
+ * Find out whether context has to be restored in response
+ * to a hotplug or cluster switch operation.
+ * If the same cpuid is out of reset on both clusters then
+ * it is a switch else hotplug.
+ */
+unsigned find_restore_op_type(void)
+{
+ unsigned rst_status = 0x0, cpu_id = read_cpuid();
+ unsigned mask = 0, cluster_id = read_clusterid();
+ unsigned clus_rst_status = 0x0;
+
+ mask = 1 << cpu_id;
+ rst_status = read32(KFSCB_BASE + RST_HOLD0 + (!cluster_id << 2));
+ clus_rst_status = rst_status & (1 << 8);
+ rst_status &= (mask | (mask << 4));
+
+ return rst_status | clus_rst_status ? OP_TYPE_HP: OP_TYPE_SWITCH;
+}
+
void stop_generic_timer(generic_timer_context * ctr_ctx)
{
/*
@@ -74,10 +94,9 @@ void stop_generic_timer(generic_timer_context * ctr_ctx)
return;
}
-void save_context(unsigned first_cpu)
+void save_context(unsigned first_cpu, unsigned op_type)
{
- unsigned cpu_id = read_cpuid();
- unsigned cluster_id = read_clusterid();
+ unsigned cpu_id = read_cpuid(), cluster_id = read_clusterid();
cpu_context *ns_cpu_ctx =
&switcher_context.cluster.core[cpu_id].ns_cpu_ctx;
unsigned *pmon_context = ns_cpu_ctx->pmon_regs;
@@ -88,6 +107,20 @@ void save_context(unsigned first_cpu)
generic_timer_context *cp15_timer_ctx = &ns_cpu_ctx->cp15_timer_ctx;
cp15_fault_regs *fault_ctx = &cp15_context->ns_cp15_fault_regs;
+ switch (op_type) {
+ case OP_TYPE_SWITCH:
+ write_trace(&lock_tube0, NS_TUBE0, "Switch Start", read_cntpct(),
+ 0x0, 0x0);
+ break;
+ case OP_TYPE_HP:
+ write_trace(&lock_tube0, NS_TUBE0, "Hotplug Start", read_cntpct(),
+ 0x0, 0x0);
+ break;
+ default:
+ printf("%s: Unsupported operation : 0x%x \n", __FUNCTION__, op_type);
+ panic();
+ }
+
write_trace(&lock_tube0, NS_TUBE0, "Context Save Start", read_cntpct(),
0x0, 0x0);
@@ -97,34 +130,51 @@ void save_context(unsigned first_cpu)
*/
write_trace(&lock_tube0, NS_TUBE0, "Secure Context Save Start",
read_cntpct(), 0x0, 0x0);
- smc(SMC_SEC_SAVE, (unsigned)hyp_warm_reset_handler);
+ smc(SMC_SEC_SAVE, (unsigned)hyp_warm_reset_handler, op_type);
write_trace(&lock_tube0, NS_TUBE0, "Secure Context Save End",
read_cntpct(), 0x0, 0x0);
- /*
- * Save the 32-bit Generic timer context & stop them
- */
- save_generic_timer((unsigned *)cp15_timer_ctx, 0x1);
- stop_generic_timer(cp15_timer_ctx);
-
- /*
- * Save v7 generic performance monitors
- * Save cpu general purpose banked registers
- * Save cp15 context
- */
- save_performance_monitors(pmon_context);
- save_banked_registers(gp_context);
- save_cp15(cp15_context->cp15_misc_regs);
- save_control_registers(cp15_context->cp15_ctrl_regs, 0x0);
- save_mmu(cp15_context->cp15_mmu_regs);
- save_fault_status((unsigned *)fault_ctx);
+ if (op_type == OP_TYPE_SWITCH) {
+ /*
+ * Save the 32-bit Generic timer context & stop them
+ */
+ save_generic_timer((unsigned *)cp15_timer_ctx, 0x1);
+ stop_generic_timer(cp15_timer_ctx);
+
+ /*
+ * Save v7 generic performance monitors
+ * Save cpu general purpose banked registers
+ * Save cp15 context
+ */
+ save_performance_monitors(pmon_context);
+ save_banked_registers(gp_context);
+ save_cp15(cp15_context->cp15_misc_regs);
+ save_control_registers(cp15_context->cp15_ctrl_regs, 0x0);
+ save_mmu(cp15_context->cp15_mmu_regs);
+ save_fault_status((unsigned *)fault_ctx);
+
+ /*
+ * Check if non-secure world has access to the vfp/neon registers
+ * and save them if so.
+ */
+ if (read_nsacr() & (0x3 << 10))
+ save_vfp(vfp_context);
+
+ /* Save vGIC virtual cpu interface (cpu view) context */
+ save_gic_interface(gic_pvt_context->gic_cpu_if_regs, VGIC_VM_PHY_BASE);
+
+ /*
+ * TODO:
+ * Is it safe for the secondary cpu to save its context
+ * while the GIC distributor is on. Should be as its
+ * banked context and the cpu itself is the only one
+ * who can change it. Still have to consider cases e.g
+ * SGIs/Localtimers becoming pending.
+ */
+ save_gic_distributor_private(gic_pvt_context->gic_dist_if_pvt_regs,
+ GIC_ID_PHY_BASE);
- /*
- * Check if non-secure world has access to the vfp/neon registers
- * and save them if so.
- */
- if (read_nsacr() & (0x3 << 10))
- save_vfp(vfp_context);
+ }
/*
* Disable the GIC CPU interface tp prevent interrupts from waking
@@ -132,9 +182,6 @@ void save_context(unsigned first_cpu)
*/
write32(GIC_IC_PHY_BASE + GICC_CTL, 0x0);
- /* Save vGIC virtual cpu interface (cpu view) context */
- save_gic_interface(gic_pvt_context->gic_cpu_if_regs, VGIC_VM_PHY_BASE);
-
/*
* Save the HYP view registers. These registers contain a snapshot
* of all the physical interrupts acknowledged till we
@@ -142,42 +189,34 @@ void save_context(unsigned first_cpu)
*/
vgic_savestate(cpu_id);
- /*
- * TODO:
- * Is it safe for the secondary cpu to save its context
- * while the GIC distributor is on. Should be as its
- * banked context and the cpu itself is the only one
- * who can change it. Still have to consider cases e.g
- * SGIs/Localtimers becoming pending.
- */
- save_gic_distributor_private(gic_pvt_context->gic_dist_if_pvt_regs,
- GIC_ID_PHY_BASE);
-
/* Safe place to save the Virtualisor context */
SaveVirtualisor(first_cpu);
- /*
- * Indicate to the inbound side that the context has been saved and is ready
- * for pickup.
- */
write_trace(&lock_tube0, NS_TUBE0, "Context Save End", read_cntpct(),
0x0, 0x0);
- set_event(OB_CONTEXT_DONE, cpu_id);
- /*
- * Now, we wait for the inbound cluster to signal that its done atleast picking
- * up the saved context.
- */
- if (cpu_id == first_cpu) {
- wait_for_events(IB_CONTEXT_DONE);
- write_trace(&lock_tube0, NS_TUBE0, "Inbound done",
- read_cntpct(), 0x0, 0x0);
+ if (op_type == OP_TYPE_SWITCH) {
+ /*
+ * Indicate to the inbound side that the context has been saved and is ready
+ * for pickup.
+ */
+ set_event(OB_CONTEXT_DONE, cpu_id);
+
+ /*
+ * Now, we wait for the inbound cluster to signal that its done atleast picking
+ * up the saved context.
+ */
+ if (cpu_id == first_cpu) {
+ wait_for_events(IB_CONTEXT_DONE, switchable_cpus_mask);
+ write_trace(&lock_tube0, NS_TUBE0, "Inbound done",
+ read_cntpct(), 0x0, 0x0);
+ }
}
return;
}
-void restore_context(unsigned first_cpu)
+void restore_context(unsigned first_cpu, unsigned op_type)
{
unsigned cpu_id = read_cpuid();
unsigned cluster_id = read_clusterid();
@@ -205,100 +244,130 @@ void restore_context(unsigned first_cpu)
map_cpuif(cluster_id, cpu_id);
SetupVGIC(warm_reset);
- /*
- * Inbound headstart i.e. the vGIC configuration, secure context
- * restore & cache invalidation has been done. Now wait for the
- * outbound to provide the context.
- */
- write_trace(&lock_tube1, NS_TUBE1, "Wait for context", read_cntpct(),
- 0x0, 0x0);
- wait_for_event(OB_CONTEXT_DONE, cpu_id);
- reset_event(OB_CONTEXT_DONE, cpu_id);
+ if (OP_TYPE_SWITCH == op_type) {
+ /*
+ * Inbound headstart i.e. the vGIC configuration, secure context
+ * restore & cache invalidation has been done. Now wait for the
+ * outbound to provide the context.
+ */
+ write_trace(&lock_tube1, NS_TUBE1, "Wait for context", read_cntpct(),
+ 0x0, 0x0);
+ wait_for_event(OB_CONTEXT_DONE, cpu_id);
+ reset_event(OB_CONTEXT_DONE, cpu_id);
+ }
- /*
- * First cpu restores the global context while the others take
- * care of their own.
- */
write_trace(&lock_tube1, NS_TUBE1, "Context Restore Start ",
- read_cntpct(), 0x0, 0x0);
- if (cpu_id == first_cpu)
- restore_gic_distributor_shared(gbl_context->gic_dist_if_regs,
- GIC_ID_PHY_BASE);
- restore_gic_distributor_private(gic_pvt_context->gic_dist_if_pvt_regs,
- GIC_ID_PHY_BASE);
- vgic_loadstate(cpu_id);
+ read_cnxtpct(), 0x0, 0x0);
+
+ if (OP_TYPE_SWITCH == op_type) {
+ /*
+ * First cpu restores the global context while the others take
+ * care of their own.
+ */
+ if (cpu_id == first_cpu)
+ restore_gic_distributor_shared(gbl_context->gic_dist_if_regs,
+ GIC_ID_PHY_BASE);
+ restore_gic_distributor_private(gic_pvt_context->gic_dist_if_pvt_regs,
+ GIC_ID_PHY_BASE);
+
+ /* Restore NS VGIC context */
+ restore_gic_interface(gic_pvt_context->gic_cpu_if_regs,
+ VGIC_VM_PHY_BASE);
+
+ /*
+ * Check if non-secure world has access to the vfp/neon registers
+ * and save them if so.
+ */
+ if (read_nsacr() & (0x3 << 10))
+ restore_vfp(vfp_context);
+
+ /*
+ * Restore cp15 context
+ * Restore cpu general purpose banked registers
+ * Restore v7 generic performance monitors
+ * Restore the 32-bit Generic timer context
+ */
+ restore_fault_status((unsigned *)fault_ctx);
+ restore_mmu(cp15_context->cp15_mmu_regs);
+ restore_control_registers(cp15_context->cp15_ctrl_regs, 0x0);
+ restore_cp15(cp15_context->cp15_misc_regs);
+ restore_banked_registers(gp_context);
+ restore_performance_monitors(pmon_context);
+ restore_generic_timer((unsigned *)cp15_timer_ctx, 0x1);
+ }
+ vgic_loadstate(cpu_id);
SetupVirtualisor(first_cpu);
- /* Restore NS VGIC context */
- restore_gic_interface(gic_pvt_context->gic_cpu_if_regs,
- VGIC_VM_PHY_BASE);
-
- /*
- * Check if non-secure world has access to the vfp/neon registers
- * and save them if so.
- */
- if (read_nsacr() & (0x3 << 10))
- restore_vfp(vfp_context);
-
- /*
- * Restore cp15 context
- * Restore cpu general purpose banked registers
- * Restore v7 generic performance monitors
- * Restore the 32-bit Generic timer context
- */
- restore_fault_status((unsigned *)fault_ctx);
- restore_mmu(cp15_context->cp15_mmu_regs);
- restore_control_registers(cp15_context->cp15_ctrl_regs, 0x0);
- restore_cp15(cp15_context->cp15_misc_regs);
- restore_banked_registers(gp_context);
- restore_performance_monitors(pmon_context);
- restore_generic_timer((unsigned *)cp15_timer_ctx, 0x1);
-
- /*
- * Paranoid check to ensure that all HYP/Secure context & Virtualisor
- * is restored before any core enters the non-secure mode to use it.
- */
- if (cpu_id == first_cpu) {
- set_events(HYP_CONTEXT_DONE);
+ if (OP_TYPE_SWITCH == op_type) {
+ /*
+ * Paranoid check to ensure that all HYP/Secure context & Virtualisor
+ * is restored before any core enters the non-secure mode to use it.
+ */
+ if (cpu_id == first_cpu) {
+ set_events(HYP_CONTEXT_DONE, switchable_cpus_mask);
+ }
+ wait_for_event(HYP_CONTEXT_DONE, cpu_id);
+ reset_event(HYP_CONTEXT_DONE, cpu_id);
}
- wait_for_event(HYP_CONTEXT_DONE, cpu_id);
- reset_event(HYP_CONTEXT_DONE, cpu_id);
-
- /*
- * Return the saved general purpose registers saved above the HYP mode
- * stack of our counterpart cpu on the other cluster.
- */
- dest_cpuif = get_cpuif(cluster_id, cpu_id);
- src_cpuif = get_cpuif(!cluster_id, cpu_id);
- dest = &guestos_state[dest_cpuif].context;
- src = &guestos_state[src_cpuif].context;
-
- dest->gp_regs[0] = src->gp_regs[0];
- dest->gp_regs[1] = src->gp_regs[1];
- dest->gp_regs[2] = src->gp_regs[2];
- dest->gp_regs[3] = src->gp_regs[3];
- dest->gp_regs[4] = src->gp_regs[4];
- dest->gp_regs[5] = src->gp_regs[5];
- dest->gp_regs[6] = src->gp_regs[6];
- dest->gp_regs[7] = src->gp_regs[7];
- dest->gp_regs[8] = src->gp_regs[8];
- dest->gp_regs[9] = src->gp_regs[9];
- dest->gp_regs[10] = src->gp_regs[10];
- dest->gp_regs[11] = src->gp_regs[11];
- dest->gp_regs[12] = src->gp_regs[12];
- dest->gp_regs[13] = src->gp_regs[13];
- dest->gp_regs[14] = src->gp_regs[14];
- dest->elr_hyp = src->elr_hyp;
- dest->spsr = src->spsr;
- dest->usr_lr = src->usr_lr;
-
- write_trace(&lock_tube1, NS_TUBE1, "Context Restore End", read_cntpct(),
- 0x0, 0x0);
- set_event(IB_CONTEXT_DONE, cpu_id);
- if (async_switchover && cpu_id == first_cpu)
- enable_trigger(read_cntfrq());
+ switch (op_type) {
+ case OP_TYPE_SWITCH:
+ /*
+ * Return the saved general purpose registers saved above the HYP mode
+ * stack of our counterpart cpu on the other cluster.
+ */
+ dest_cpuif = get_cpuif(cluster_id, cpu_id);
+ src_cpuif = get_cpuif(!cluster_id, cpu_id);
+ dest = &guestos_state[dest_cpuif].context;
+ src = &guestos_state[src_cpuif].context;
+
+ dest->gp_regs[0] = src->gp_regs[0];
+ dest->gp_regs[1] = src->gp_regs[1];
+ dest->gp_regs[2] = src->gp_regs[2];
+ dest->gp_regs[3] = src->gp_regs[3];
+ dest->gp_regs[4] = src->gp_regs[4];
+ dest->gp_regs[5] = src->gp_regs[5];
+ dest->gp_regs[6] = src->gp_regs[6];
+ dest->gp_regs[7] = src->gp_regs[7];
+ dest->gp_regs[8] = src->gp_regs[8];
+ dest->gp_regs[9] = src->gp_regs[9];
+ dest->gp_regs[10] = src->gp_regs[10];
+ dest->gp_regs[11] = src->gp_regs[11];
+ dest->gp_regs[12] = src->gp_regs[12];
+ dest->gp_regs[13] = src->gp_regs[13];
+ dest->gp_regs[14] = src->gp_regs[14];
+ dest->usr_lr = src->usr_lr;
+ dest->elr_hyp = src->elr_hyp;
+ dest->spsr = src->spsr;
+ write_trace(&lock_tube1, NS_TUBE1, "Context Restore End", read_cntpct(),
+ 0x0, 0x0);
+ set_event(IB_CONTEXT_DONE, cpu_id);
+ if (async_switchover && cpu_id == first_cpu)
+ enable_trigger(read_cntfrq());
+ write_trace(&lock_tube0, NS_TUBE0, "Switch End", read_cntpct(),
+ 0x0, 0x0);
+ break;
+ case OP_TYPE_HP:
+ /*
+ * Populate the OS entry point & mode in the HYP stack. We do not care
+ * about the gp registers as its a hotplug and everything starts afresh
+ */
+ dest_cpuif = get_cpuif(cluster_id, cpu_id);
+ dest = &guestos_state[dest_cpuif].context;
+ dest->elr_hyp = read32(VE_SYS_BASE + FLAGS_SET);
+ dest->spsr &= ~0x1f;
+ /* Re-entry into Linux should be with interrupts disabled and in SVC mode */
+ dest->spsr |= (0x3 << 6 | 0x13);
+ write_trace(&lock_tube1, NS_TUBE1, "Context Restore End", read_cntpct(),
+ 0x0, 0x0);
+ write_trace(&lock_tube0, NS_TUBE0, "Hotplug End", read_cntpct(),
+ 0x0, 0x0);
+ break;
+ default:
+ printf("%s: Unsupported operation : 0x%x \n", __FUNCTION__, op_type);
+ panic();
+ }
return;
}
diff --git a/big-little/switcher/trigger/async_switchover.c b/big-little/switcher/trigger/async_switchover.c
index 9c7287d..c6056ed 100644
--- a/big-little/switcher/trigger/async_switchover.c
+++ b/big-little/switcher/trigger/async_switchover.c
@@ -47,6 +47,13 @@ static unsigned timer_count;
static unsigned rand_async_switches = RAND_ASYNC;
/* Use HYP timer for async switches */
unsigned hyp_timer_trigger = USE_HYP_TIMERS;
+/*
+ * Mask that is populated at the beginning of each
+ * switch to indicate which cpus will take part in
+ * switching. Its derived from the RST_HLDx register
+ * and effects the event mechanism.
+ */
+unsigned switchable_cpus_mask = 0;
/*
* Returns the id of the first IPI that is not pending on
@@ -93,20 +100,44 @@ static void ack_trigger(void)
}
/*
+ * Utility routine to indicate whether a given cluster
+ * is in reset or not.
+ */
+unsigned cluster_reset_status(unsigned cluster_id)
+{
+ return read32(KFSCB_BASE + RST_STAT0 + (cluster_id << 2)) & (1 << 8);
+}
+
+/*
* Broadcast first available IPI so that all cpus can start switching to
* the other cluster.
*/
void signal_switchover(void)
{
- unsigned ipi_no = 0x0;
+ unsigned ipi_no = 0x0, cluster_id = read_clusterid();
+ unsigned cpuif_mask = 0;
+
+ /*
+ * Read the RST_HLDx register of this cluster to get the latest
+ * cpu reset status and send the IPI to only those cpus that are
+ * active.
+ *
+ * NOTE:
+ * 1. Reading the RST_HLDx instead of RST_STATx to allow any
+ * last cpus to power down if they are in the process.
+ * 2. We do not need locks around this variable as only one cpu
+ * will change it during a switch and always after the previous
+ * switch has completed.
+ */
+ switchable_cpus_mask = read32(KFSCB_BASE + RST_HOLD0 + (cluster_id << 2));
+ switchable_cpus_mask = (switchable_cpus_mask >> 4) & 0xf;
+ switchable_cpus_mask ^= (1 << CLUSTER_CPU_COUNT(cluster_id)) - 1;
- /* If x is the no. of cpus then corresponding mask would be (1 << x) - 1 */
- unsigned cpu_mask = (1 << (num_secondaries() + 1)) - 1;
/*
* Map the target cpuids to their cpu interfaces as the 1:1 mapping
* no longer exists with the external vGIC.
*/
- unsigned cpuif_mask = get_cpuif_mask(cpu_mask);
+ cpuif_mask = get_cpuif_mask(switchable_cpus_mask);
/*
* Send an ipi to all the cpus in the cluster including ourselves
@@ -153,7 +184,7 @@ unsigned check_switchover_ipi(unsigned cpu_if, unsigned ipi_no)
unsigned check_trigger(unsigned int_id, unsigned int_ack)
{
- unsigned cpuid = read_cpuid();
+ unsigned cpuid = read_cpuid(), cluster_id = read_clusterid();
unsigned platform = (read32(KFSCB_BASE + KFS_ID) >> 20) & 0xf;
/*
@@ -186,7 +217,10 @@ unsigned check_trigger(unsigned int_id, unsigned int_ack)
/* Indicator on emulation that switches are actually taking place */
if (platform != 0x1)
- printf("%d", read_clusterid());
+ printf("%d", cluster_id);
+
+ /* Do not switch till previous one has not completed */
+ while (FALSE == cluster_reset_status(!cluster_id));
/*
* Send an IPI to all the cores in this cluster to start
@@ -202,7 +236,7 @@ unsigned check_trigger(unsigned int_id, unsigned int_ack)
* level. Its disabled as its level triggerred and will reassert as
* soon as we leave this function since its not been cleared at the
* peripheral just yet. The local timer context is saved and this irq
- * cleared in "save_hyp_context". The interrupt is enabled then.
+ * cleared while saving the context. The interrupt is enabled then.
*/
gic_disable_int(int_id);
diff --git a/big-little/switcher/trigger/handle_switchover.s b/big-little/switcher/trigger/handle_switchover.s
index 5d90173..4f26183 100755
--- a/big-little/switcher/trigger/handle_switchover.s
+++ b/big-little/switcher/trigger/handle_switchover.s
@@ -25,35 +25,31 @@
PRESERVE8
IMPORT save_context
- IMPORT smc
+ IMPORT smc
+ IMPORT switchable_cpus_mask
EXPORT switch_cluster
SMC_SEC_SHUTDOWN EQU 0x2
+OP_TYPE_SWITCH EQU 0x1
- ; ----------------------------------------------------
- ; This function directs the switchover to the inbound
- ; cluster. The context is first saved, stacks switched
- ; & the cluster is powered down.
- ; We need to switch stacks from being resident in normal
- ; WBWA/S memory to SO memory to prevent potential stack
- ; corruption after turning off the C bit in the HSCTLR.
- ; Subsequent accesses will be SO while there will be
- ; valid cache lines of the stack from prior accesses
- ; ----------------------------------------------------
switch_cluster FUNCTION
; ----------------------------------------------------
- ; We don't push any registers on the stack as we are
- ; not going to return from this function
- ; ----------------------------------------------------
- MOV r4, r0
+ ; Save the NS state and ask the secure world to save
+ ; its context and bring the corresponding inbound core
+ ; out of reset
+ ; ----------------------------------------------------
+ MOV r1, #OP_TYPE_SWITCH
BL save_context
+
; ----------------------------------------------------
- ; We are now through with saving the context and the
- ; inbound cluster has started picking it up. Switch to
- ; the secure world to clean the caches and power down
- ; the cluster
- ; ----------------------------------------------------
+ ; All context has been saved and restored. The inbound
+ ; core has resumed payload execution. Ask the secure
+ ; world to clean the caches and power down the cluster
+ ; ----------------------------------------------------
MOV r0, #SMC_SEC_SHUTDOWN
+ LDR r1, =switchable_cpus_mask
+ LDR r1, [r1]
+ MOV r2, #OP_TYPE_SWITCH
BL smc
ENDFUNC
diff --git a/big-little/switcher/trigger/sync_switchover.c b/big-little/switcher/trigger/sync_switchover.c
index e8d8e99..cc2441d 100644
--- a/big-little/switcher/trigger/sync_switchover.c
+++ b/big-little/switcher/trigger/sync_switchover.c
@@ -25,6 +25,7 @@
#include "bl.h"
extern void signal_switchover(void);
+extern unsigned cluster_reset_status(unsigned);
unsigned is_hvc()
{
@@ -43,6 +44,8 @@ unsigned HandleHVC(vm_context * context)
* by sending a switchover IPI to all the cores in the cluster.
*/
case SYNC_SWITCHOVER:
+ /* Do not switch till previous one has not completed */
+ while (FALSE == cluster_reset_status(!read_clusterid()));
signal_switchover();
rc = TRUE;
break;