summaryrefslogtreecommitdiff
path: root/big-little/switcher/context/ns_context.c
diff options
context:
space:
mode:
authorRobin Randhawa <robin.randhawa@arm.com>2012-02-16 16:54:06 +0000
committerRobin Randhawa <robin.randhawa@arm.com>2012-02-16 16:55:09 +0000
commit813834c76ac48f29a4e1f67bb341ff0d0911b414 (patch)
treef9aa160817422c3c77ea913ec501221f85996948 /big-little/switcher/context/ns_context.c
parent48aa177c0c3e38dc84728df705e50ba924b6f424 (diff)
GNU indent pass over C and header files.
Basically: $ for f in $(find . -name "*.[ch]"; do indent -linux $f; done Signed-off-by: Robin Randhawa <robin.randhawa@arm.com>
Diffstat (limited to 'big-little/switcher/context/ns_context.c')
-rw-r--r--big-little/switcher/context/ns_context.c491
1 files changed, 250 insertions, 241 deletions
diff --git a/big-little/switcher/context/ns_context.c b/big-little/switcher/context/ns_context.c
index 201d930..2541319 100644
--- a/big-little/switcher/context/ns_context.c
+++ b/big-little/switcher/context/ns_context.c
@@ -18,7 +18,7 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
#include "virt_helpers.h"
#include "vgiclib.h"
@@ -37,259 +37,268 @@ extern unsigned async_switchover;
extern unsigned hyp_timer_trigger;
/* Bakery locks to serialize access to the tube. */
-static bakery_t lock_tube0 __attribute__ ((section("BL_DV_PAGE"))) = { 0 };
-static bakery_t lock_tube1 __attribute__ ((section("BL_DV_PAGE"))) = { 0 };
+static bakery_t lock_tube0 __attribute__ ((section("BL_DV_PAGE"))) = {
+0};
+
+static bakery_t lock_tube1 __attribute__ ((section("BL_DV_PAGE"))) = {
+0};
/*
* Top level structure which encapsulates the context of the entire
* Kingfisher system
*/
-system_context switcher_context = {0};
+system_context switcher_context = { 0 };
-void stop_generic_timer(generic_timer_context *ctr_ctx)
+void stop_generic_timer(generic_timer_context * ctr_ctx)
{
- /*
- * Disable the timer and mask the irq to prevent
- * suprious interrupts on this cpu interface. It
- * will bite us when we come back if we don't. It
- * will be replayed on the inbound cluster.
- */
- write_cntp_ctl(TIMER_MASK_IRQ);
-
-
- /*
- * If the local timer interrupt was being used as
- * the asynchronous trigger, then it was disabled
- * in handle_interrupt() to prevent this level-
- * triggerred interrupt from firing. Now that its
- * been acked at the peripheral. We can renable it
- */
- if(!hyp_timer_trigger) {
- if (ctr_ctx->cntp_ctl & TIMER_IRQ_STAT)
- gic_enable_int(LCL_TIMER_IRQ);
- }
-
- return;
+ /*
+ * Disable the timer and mask the irq to prevent
+ * suprious interrupts on this cpu interface. It
+ * will bite us when we come back if we don't. It
+ * will be replayed on the inbound cluster.
+ */
+ write_cntp_ctl(TIMER_MASK_IRQ);
+
+ /*
+ * If the local timer interrupt was being used as
+ * the asynchronous trigger, then it was disabled
+ * in handle_interrupt() to prevent this level-
+ * triggerred interrupt from firing. Now that its
+ * been acked at the peripheral. We can renable it
+ */
+ if (!hyp_timer_trigger) {
+ if (ctr_ctx->cntp_ctl & TIMER_IRQ_STAT)
+ gic_enable_int(LCL_TIMER_IRQ);
+ }
+
+ return;
}
void save_context(unsigned first_cpu)
{
- unsigned cpu_id = read_cpuid();
- unsigned cluster_id = read_clusterid();
- cpu_context *ns_cpu_ctx =
- &switcher_context.cluster.core[cpu_id].ns_cpu_ctx;
- unsigned *pmon_context = ns_cpu_ctx->pmon_regs;
- unsigned *gp_context = ns_cpu_ctx->banked_cpu_regs;
- unsigned *vfp_context = ns_cpu_ctx->vfp_regs;
- banked_cp15_context *cp15_context = &ns_cpu_ctx->banked_cp15_regs;
- gic_cpu_context *gic_pvt_context = &ns_cpu_ctx->gic_cpu_ctx;
- generic_timer_context *cp15_timer_ctx = &ns_cpu_ctx->cp15_timer_ctx;
- cp15_fault_regs *fault_ctx = &cp15_context->ns_cp15_fault_regs;
-
- write_trace(&lock_tube0, NS_TUBE0, "Context Save Start", read_cntpct(), 0x0, 0x0);
-
- /*
- * Good place to bring the inbound cluster out of reset, but first
- * we need to save the secure world context.
- */
- write_trace(&lock_tube0, NS_TUBE0, "Secure Context Save Start", read_cntpct(), 0x0, 0x0);
- smc(SMC_SEC_SAVE, (unsigned) hyp_warm_reset_handler);
- write_trace(&lock_tube0, NS_TUBE0, "Secure Context Save End", read_cntpct(), 0x0, 0x0);
-
- /*
- * Save the 32-bit Generic timer context & stop them
- */
- save_generic_timer((unsigned *) cp15_timer_ctx, 0x1);
- stop_generic_timer(cp15_timer_ctx);
-
- /*
- * Save v7 generic performance monitors
- * Save cpu general purpose banked registers
- * Save cp15 context
- */
- save_performance_monitors(pmon_context);
- save_banked_registers(gp_context);
- save_cp15(cp15_context->cp15_misc_regs);
- save_control_registers(cp15_context->cp15_ctrl_regs, 0x0);
- save_mmu(cp15_context->cp15_mmu_regs);
- save_fault_status((unsigned *) fault_ctx);
-
- /*
- * Check if non-secure world has access to the vfp/neon registers
- * and save them if so.
- */
- if (read_nsacr() & (0x3 << 10))
- save_vfp(vfp_context);
-
-
- /*
- * Disable the GIC CPU interface tp prevent interrupts from waking
- * the core from wfi() subsequently.
- */
- write32(GIC_IC_PHY_BASE + GICC_CTL, 0x0);
-
- /* Save vGIC virtual cpu interface (cpu view) context */
- save_gic_interface(gic_pvt_context->gic_cpu_if_regs, VGIC_VM_PHY_BASE);
-
- /*
- * Save the HYP view registers. These registers contain a snapshot
- * of all the physical interrupts acknowledged till we
- * entered this HYP mode.
- */
- vgic_savestate(cpu_id);
-
- /*
- * TODO:
- * Is it safe for the secondary cpu to save its context
- * while the GIC distributor is on. Should be as its
- * banked context and the cpu itself is the only one
- * who can change it. Still have to consider cases e.g
- * SGIs/Localtimers becoming pending.
- */
- save_gic_distributor_private(gic_pvt_context->gic_dist_if_pvt_regs,
- GIC_ID_PHY_BASE);
-
- /* Safe place to save the Virtualisor context */
- SaveVirtualisor(first_cpu);
-
- /*
- * Indicate to the inbound side that the context has been saved and is ready
- * for pickup.
- */
- write_trace(&lock_tube0, NS_TUBE0, "Context Save End", read_cntpct(), 0x0, 0x0);
- set_event(OB_CONTEXT_DONE, cpu_id);
-
- /*
- * Now, we wait for the inbound cluster to signal that its done atleast picking
- * up the saved context.
- */
- if (cpu_id == first_cpu) {
- wait_for_events(IB_CONTEXT_DONE);
- write_trace(&lock_tube0, NS_TUBE0, "Inbound done", read_cntpct(), 0x0, 0x0);
- }
-
- return;
+ unsigned cpu_id = read_cpuid();
+ unsigned cluster_id = read_clusterid();
+ cpu_context *ns_cpu_ctx =
+ &switcher_context.cluster.core[cpu_id].ns_cpu_ctx;
+ unsigned *pmon_context = ns_cpu_ctx->pmon_regs;
+ unsigned *gp_context = ns_cpu_ctx->banked_cpu_regs;
+ unsigned *vfp_context = ns_cpu_ctx->vfp_regs;
+ banked_cp15_context *cp15_context = &ns_cpu_ctx->banked_cp15_regs;
+ gic_cpu_context *gic_pvt_context = &ns_cpu_ctx->gic_cpu_ctx;
+ generic_timer_context *cp15_timer_ctx = &ns_cpu_ctx->cp15_timer_ctx;
+ cp15_fault_regs *fault_ctx = &cp15_context->ns_cp15_fault_regs;
+
+ write_trace(&lock_tube0, NS_TUBE0, "Context Save Start", read_cntpct(),
+ 0x0, 0x0);
+
+ /*
+ * Good place to bring the inbound cluster out of reset, but first
+ * we need to save the secure world context.
+ */
+ write_trace(&lock_tube0, NS_TUBE0, "Secure Context Save Start",
+ read_cntpct(), 0x0, 0x0);
+ smc(SMC_SEC_SAVE, (unsigned)hyp_warm_reset_handler);
+ write_trace(&lock_tube0, NS_TUBE0, "Secure Context Save End",
+ read_cntpct(), 0x0, 0x0);
+
+ /*
+ * Save the 32-bit Generic timer context & stop them
+ */
+ save_generic_timer((unsigned *)cp15_timer_ctx, 0x1);
+ stop_generic_timer(cp15_timer_ctx);
+
+ /*
+ * Save v7 generic performance monitors
+ * Save cpu general purpose banked registers
+ * Save cp15 context
+ */
+ save_performance_monitors(pmon_context);
+ save_banked_registers(gp_context);
+ save_cp15(cp15_context->cp15_misc_regs);
+ save_control_registers(cp15_context->cp15_ctrl_regs, 0x0);
+ save_mmu(cp15_context->cp15_mmu_regs);
+ save_fault_status((unsigned *)fault_ctx);
+
+ /*
+ * Check if non-secure world has access to the vfp/neon registers
+ * and save them if so.
+ */
+ if (read_nsacr() & (0x3 << 10))
+ save_vfp(vfp_context);
+
+ /*
+ * Disable the GIC CPU interface tp prevent interrupts from waking
+ * the core from wfi() subsequently.
+ */
+ write32(GIC_IC_PHY_BASE + GICC_CTL, 0x0);
+
+ /* Save vGIC virtual cpu interface (cpu view) context */
+ save_gic_interface(gic_pvt_context->gic_cpu_if_regs, VGIC_VM_PHY_BASE);
+
+ /*
+ * Save the HYP view registers. These registers contain a snapshot
+ * of all the physical interrupts acknowledged till we
+ * entered this HYP mode.
+ */
+ vgic_savestate(cpu_id);
+
+ /*
+ * TODO:
+ * Is it safe for the secondary cpu to save its context
+ * while the GIC distributor is on. Should be as its
+ * banked context and the cpu itself is the only one
+ * who can change it. Still have to consider cases e.g
+ * SGIs/Localtimers becoming pending.
+ */
+ save_gic_distributor_private(gic_pvt_context->gic_dist_if_pvt_regs,
+ GIC_ID_PHY_BASE);
+
+ /* Safe place to save the Virtualisor context */
+ SaveVirtualisor(first_cpu);
+
+ /*
+ * Indicate to the inbound side that the context has been saved and is ready
+ * for pickup.
+ */
+ write_trace(&lock_tube0, NS_TUBE0, "Context Save End", read_cntpct(),
+ 0x0, 0x0);
+ set_event(OB_CONTEXT_DONE, cpu_id);
+
+ /*
+ * Now, we wait for the inbound cluster to signal that its done atleast picking
+ * up the saved context.
+ */
+ if (cpu_id == first_cpu) {
+ wait_for_events(IB_CONTEXT_DONE);
+ write_trace(&lock_tube0, NS_TUBE0, "Inbound done",
+ read_cntpct(), 0x0, 0x0);
+ }
+
+ return;
}
void restore_context(unsigned first_cpu)
{
- unsigned cpu_id = read_cpuid();
- unsigned cluster_id = read_clusterid();
- unsigned warm_reset = 1;
- cpu_context *ns_cpu_ctx =
- &switcher_context.cluster.core[cpu_id].ns_cpu_ctx;
- global_context *gbl_context = &switcher_context.cluster.ns_cluster_ctx;
- unsigned *pmon_context = ns_cpu_ctx->pmon_regs;
- unsigned *gp_context = ns_cpu_ctx->banked_cpu_regs;
- unsigned *vfp_context = ns_cpu_ctx->vfp_regs;
- gic_cpu_context *gic_pvt_context = &ns_cpu_ctx->gic_cpu_ctx;
- generic_timer_context *cp15_timer_ctx = &ns_cpu_ctx->cp15_timer_ctx;
- banked_cp15_context *cp15_context = &ns_cpu_ctx->banked_cp15_regs;
- cp15_fault_regs *fault_ctx = &cp15_context->ns_cp15_fault_regs;
- vm_context *src = 0x0;
- vm_context *dest = 0x0;
- unsigned dest_cpuif = 0x0;
- unsigned src_cpuif = 0x0;
-
- /*
- * Map cpuids to cpu interface numbers so that cpu interface
- * specific context can be correctly restored on the external
- * vGIC.
- */
- map_cpuif(cluster_id, cpu_id);
- SetupVGIC(warm_reset);
-
- /*
- * Inbound headstart i.e. the vGIC configuration, secure context
- * restore & cache invalidation has been done. Now wait for the
- * outbound to provide the context.
- */
- write_trace(&lock_tube1, NS_TUBE1, "Wait for context", read_cntpct(), 0x0, 0x0);
- wait_for_event(OB_CONTEXT_DONE, cpu_id);
- reset_event(OB_CONTEXT_DONE, cpu_id);
-
- /*
- * First cpu restores the global context while the others take
- * care of their own.
- */
- write_trace(&lock_tube1, NS_TUBE1, "Context Restore Start ", read_cntpct(), 0x0, 0x0);
- if (cpu_id == first_cpu)
- restore_gic_distributor_shared(gbl_context->gic_dist_if_regs,
- GIC_ID_PHY_BASE);
- restore_gic_distributor_private(gic_pvt_context->gic_dist_if_pvt_regs,
- GIC_ID_PHY_BASE);
- vgic_loadstate(cpu_id);
-
- SetupVirtualisor(first_cpu);
-
- /* Restore NS VGIC context */
- restore_gic_interface(gic_pvt_context->gic_cpu_if_regs,
- VGIC_VM_PHY_BASE);
-
- /*
- * Check if non-secure world has access to the vfp/neon registers
- * and save them if so.
- */
- if (read_nsacr() & (0x3 << 10))
- restore_vfp(vfp_context);
-
- /*
- * Restore cp15 context
- * Restore cpu general purpose banked registers
- * Restore v7 generic performance monitors
- * Restore the 32-bit Generic timer context
- */
- restore_fault_status((unsigned *) fault_ctx);
- restore_mmu(cp15_context->cp15_mmu_regs);
- restore_control_registers(cp15_context->cp15_ctrl_regs, 0x0);
- restore_cp15(cp15_context->cp15_misc_regs);
- restore_banked_registers(gp_context);
- restore_performance_monitors(pmon_context);
- restore_generic_timer((unsigned *) cp15_timer_ctx, 0x1);
-
- /*
- * Paranoid check to ensure that all HYP/Secure context & Virtualisor
- * is restored before any core enters the non-secure mode to use it.
- */
- if (cpu_id == first_cpu) {
- set_events(HYP_CONTEXT_DONE);
- }
- wait_for_event(HYP_CONTEXT_DONE, cpu_id);
- reset_event(HYP_CONTEXT_DONE, cpu_id);
-
- /*
- * Return the saved general purpose registers saved above the HYP mode
- * stack of our counterpart cpu on the other cluster.
- */
- dest_cpuif = get_cpuif(cluster_id, cpu_id);
- src_cpuif = get_cpuif(!cluster_id, cpu_id);
- dest = &guestos_state[dest_cpuif].context;
- src = &guestos_state[src_cpuif].context;
-
- dest->gp_regs[0] = src->gp_regs[0];
- dest->gp_regs[1] = src->gp_regs[1];
- dest->gp_regs[2] = src->gp_regs[2];
- dest->gp_regs[3] = src->gp_regs[3];
- dest->gp_regs[4] = src->gp_regs[4];
- dest->gp_regs[5] = src->gp_regs[5];
- dest->gp_regs[6] = src->gp_regs[6];
- dest->gp_regs[7] = src->gp_regs[7];
- dest->gp_regs[8] = src->gp_regs[8];
- dest->gp_regs[9] = src->gp_regs[9];
- dest->gp_regs[10] = src->gp_regs[10];
- dest->gp_regs[11] = src->gp_regs[11];
- dest->gp_regs[12] = src->gp_regs[12];
- dest->gp_regs[13] = src->gp_regs[13];
- dest->gp_regs[14] = src->gp_regs[14];
- dest->elr_hyp = src->elr_hyp;
- dest->spsr = src->spsr;
- dest->usr_lr = src->usr_lr;
-
- write_trace(&lock_tube1, NS_TUBE1, "Context Restore End", read_cntpct(), 0x0, 0x0);
- set_event(IB_CONTEXT_DONE, cpu_id);
-
- if (async_switchover && cpu_id == first_cpu)
- enable_trigger(read_cntfrq());
-
- return;
+ unsigned cpu_id = read_cpuid();
+ unsigned cluster_id = read_clusterid();
+ unsigned warm_reset = 1;
+ cpu_context *ns_cpu_ctx =
+ &switcher_context.cluster.core[cpu_id].ns_cpu_ctx;
+ global_context *gbl_context = &switcher_context.cluster.ns_cluster_ctx;
+ unsigned *pmon_context = ns_cpu_ctx->pmon_regs;
+ unsigned *gp_context = ns_cpu_ctx->banked_cpu_regs;
+ unsigned *vfp_context = ns_cpu_ctx->vfp_regs;
+ gic_cpu_context *gic_pvt_context = &ns_cpu_ctx->gic_cpu_ctx;
+ generic_timer_context *cp15_timer_ctx = &ns_cpu_ctx->cp15_timer_ctx;
+ banked_cp15_context *cp15_context = &ns_cpu_ctx->banked_cp15_regs;
+ cp15_fault_regs *fault_ctx = &cp15_context->ns_cp15_fault_regs;
+ vm_context *src = 0x0;
+ vm_context *dest = 0x0;
+ unsigned dest_cpuif = 0x0;
+ unsigned src_cpuif = 0x0;
+
+ /*
+ * Map cpuids to cpu interface numbers so that cpu interface
+ * specific context can be correctly restored on the external
+ * vGIC.
+ */
+ map_cpuif(cluster_id, cpu_id);
+ SetupVGIC(warm_reset);
+
+ /*
+ * Inbound headstart i.e. the vGIC configuration, secure context
+ * restore & cache invalidation has been done. Now wait for the
+ * outbound to provide the context.
+ */
+ write_trace(&lock_tube1, NS_TUBE1, "Wait for context", read_cntpct(),
+ 0x0, 0x0);
+ wait_for_event(OB_CONTEXT_DONE, cpu_id);
+ reset_event(OB_CONTEXT_DONE, cpu_id);
+
+ /*
+ * First cpu restores the global context while the others take
+ * care of their own.
+ */
+ write_trace(&lock_tube1, NS_TUBE1, "Context Restore Start ",
+ read_cntpct(), 0x0, 0x0);
+ if (cpu_id == first_cpu)
+ restore_gic_distributor_shared(gbl_context->gic_dist_if_regs,
+ GIC_ID_PHY_BASE);
+ restore_gic_distributor_private(gic_pvt_context->gic_dist_if_pvt_regs,
+ GIC_ID_PHY_BASE);
+ vgic_loadstate(cpu_id);
+
+ SetupVirtualisor(first_cpu);
+
+ /* Restore NS VGIC context */
+ restore_gic_interface(gic_pvt_context->gic_cpu_if_regs,
+ VGIC_VM_PHY_BASE);
+
+ /*
+ * Check if non-secure world has access to the vfp/neon registers
+ * and save them if so.
+ */
+ if (read_nsacr() & (0x3 << 10))
+ restore_vfp(vfp_context);
+
+ /*
+ * Restore cp15 context
+ * Restore cpu general purpose banked registers
+ * Restore v7 generic performance monitors
+ * Restore the 32-bit Generic timer context
+ */
+ restore_fault_status((unsigned *)fault_ctx);
+ restore_mmu(cp15_context->cp15_mmu_regs);
+ restore_control_registers(cp15_context->cp15_ctrl_regs, 0x0);
+ restore_cp15(cp15_context->cp15_misc_regs);
+ restore_banked_registers(gp_context);
+ restore_performance_monitors(pmon_context);
+ restore_generic_timer((unsigned *)cp15_timer_ctx, 0x1);
+
+ /*
+ * Paranoid check to ensure that all HYP/Secure context & Virtualisor
+ * is restored before any core enters the non-secure mode to use it.
+ */
+ if (cpu_id == first_cpu) {
+ set_events(HYP_CONTEXT_DONE);
+ }
+ wait_for_event(HYP_CONTEXT_DONE, cpu_id);
+ reset_event(HYP_CONTEXT_DONE, cpu_id);
+
+ /*
+ * Return the saved general purpose registers saved above the HYP mode
+ * stack of our counterpart cpu on the other cluster.
+ */
+ dest_cpuif = get_cpuif(cluster_id, cpu_id);
+ src_cpuif = get_cpuif(!cluster_id, cpu_id);
+ dest = &guestos_state[dest_cpuif].context;
+ src = &guestos_state[src_cpuif].context;
+
+ dest->gp_regs[0] = src->gp_regs[0];
+ dest->gp_regs[1] = src->gp_regs[1];
+ dest->gp_regs[2] = src->gp_regs[2];
+ dest->gp_regs[3] = src->gp_regs[3];
+ dest->gp_regs[4] = src->gp_regs[4];
+ dest->gp_regs[5] = src->gp_regs[5];
+ dest->gp_regs[6] = src->gp_regs[6];
+ dest->gp_regs[7] = src->gp_regs[7];
+ dest->gp_regs[8] = src->gp_regs[8];
+ dest->gp_regs[9] = src->gp_regs[9];
+ dest->gp_regs[10] = src->gp_regs[10];
+ dest->gp_regs[11] = src->gp_regs[11];
+ dest->gp_regs[12] = src->gp_regs[12];
+ dest->gp_regs[13] = src->gp_regs[13];
+ dest->gp_regs[14] = src->gp_regs[14];
+ dest->elr_hyp = src->elr_hyp;
+ dest->spsr = src->spsr;
+ dest->usr_lr = src->usr_lr;
+
+ write_trace(&lock_tube1, NS_TUBE1, "Context Restore End", read_cntpct(),
+ 0x0, 0x0);
+ set_event(IB_CONTEXT_DONE, cpu_id);
+
+ if (async_switchover && cpu_id == first_cpu)
+ enable_trigger(read_cntfrq());
+
+ return;
}