summaryrefslogtreecommitdiff
path: root/big-little/switcher/trigger/async_switchover.c
diff options
context:
space:
mode:
Diffstat (limited to 'big-little/switcher/trigger/async_switchover.c')
-rw-r--r--big-little/switcher/trigger/async_switchover.c185
1 files changed, 93 insertions, 92 deletions
diff --git a/big-little/switcher/trigger/async_switchover.c b/big-little/switcher/trigger/async_switchover.c
index e5931d7..b8585e7 100644
--- a/big-little/switcher/trigger/async_switchover.c
+++ b/big-little/switcher/trigger/async_switchover.c
@@ -47,18 +47,18 @@ unsigned switchable_cpus_mask = 0;
static void ack_trigger(void)
{
- unsigned ctl = 0;
-
- ctl = read_cnthp_ctl();
- if (ctl & TIMER_IRQ_STAT) {
- /* Disable timer and mask interrupt */
- write_cnthp_ctl(TIMER_MASK_IRQ);
- } else {
- printf("Spurious HYP timer irq \n");
- panic();
- }
-
- return;
+ unsigned ctl = 0;
+
+ ctl = read_cnthp_ctl();
+ if (ctl & TIMER_IRQ_STAT) {
+ /* Disable timer and mask interrupt */
+ write_cnthp_ctl(TIMER_MASK_IRQ);
+ } else {
+ printf("Spurious HYP timer irq \n");
+ panic();
+ }
+
+ return;
}
/*
@@ -91,7 +91,8 @@ void signal_switchover(void)
* will change it during a switch and always after the previous
* switch has completed.
*/
- switchable_cpus_mask = read32(KFSCB_BASE + RST_HOLD0 + (cluster_id << 2));
+ switchable_cpus_mask =
+ read32(KFSCB_BASE + RST_HOLD0 + (cluster_id << 2));
switchable_cpus_mask = (switchable_cpus_mask >> 4) & 0xf;
switchable_cpus_mask ^= (1 << CLUSTER_CPU_COUNT(cluster_id)) - 1;
@@ -101,24 +102,24 @@ void signal_switchover(void)
*/
cpuif_mask = get_cpuif_mask(switchable_cpus_mask);
- /*
- * Send an ipi to all the online cpus in the cluster including ourselves
- * to start a switch to the inbound cluster.
- */
- send_hyp_ipi(cpuif_mask, IPI_CLUSTER_SWITCH);
+ /*
+ * Send an ipi to all the online cpus in the cluster including ourselves
+ * to start a switch to the inbound cluster.
+ */
+ send_hyp_ipi(cpuif_mask, IPI_CLUSTER_SWITCH);
return;
}
unsigned check_switchover_ipi(unsigned cpu_if, unsigned ipi_no)
{
- unsigned type = 0;
+ unsigned type = 0;
- type = get_hyp_ipi(cpu_if, ipi_no);
- if (type == IPI_CLUSTER_SWITCH)
- return TRUE;
- else
- return FALSE;
+ type = get_hyp_ipi(cpu_if, ipi_no);
+ if (type == IPI_CLUSTER_SWITCH)
+ return TRUE;
+ else
+ return FALSE;
}
@@ -160,7 +161,7 @@ unsigned check_trigger(unsigned int_id, unsigned int_ack)
printf("%d", cluster_id);
/* Do not switch till previous one has not completed */
- while (FALSE == cluster_reset_status(!cluster_id));
+ while (FALSE == cluster_reset_status(!cluster_id)) ;
/*
* Send an IPI to all the cores in this cluster to start
@@ -190,75 +191,75 @@ unsigned check_trigger(unsigned int_id, unsigned int_ack)
void keep_trigger_alive(void)
{
- /*
- * The OS might have disabled the HYP timer interrupt
- * while setting up its view of the vGIC. So enable
- * it if disabled upon receiving any other interrupt.
- * Better than virtualising vGIC accesses on the TARGET
- * CPU.
- */
- if (hyp_timer_trigger)
- if (!
- (read32(GIC_ID_PHY_BASE + GICD_ENABLESET) &
- (1 << HYP_TIMER_IRQ)))
- gic_enable_int(HYP_TIMER_IRQ);
-
- return;
+ /*
+ * The OS might have disabled the HYP timer interrupt
+ * while setting up its view of the vGIC. So enable
+ * it if disabled upon receiving any other interrupt.
+ * Better than virtualising vGIC accesses on the TARGET
+ * CPU.
+ */
+ if (hyp_timer_trigger)
+ if (!
+ (read32(GIC_ID_PHY_BASE + GICD_ENABLESET) &
+ (1 << HYP_TIMER_IRQ)))
+ gic_enable_int(HYP_TIMER_IRQ);
+
+ return;
}
void enable_trigger(unsigned tval)
{
- unsigned ctl = TIMER_ENABLE;
- unsigned platform = read32((KFSCB_BASE + KFS_ID) >> 20) & 0xf;
-
- /*
- * No need to lock this as its accessed by only one cpu
- * per cluster and that too one at a time.
- */
- static unsigned int rand_no = 0xdeadbeef;
- static struct _rand_state buffer;
-
- /*
- * Nothing needs to be done if physical local timers
- * are being used for doing a switchover.
- */
- if (hyp_timer_trigger == TRUE) {
- if (rand_async_switches) {
- _srand_r(&buffer, rand_no);
- rand_no = (unsigned)_rand_r(&buffer);
- }
-
- /* Enable timer and unmask interrupt */
- write_cnthp_ctl(ctl);
-
- if (rand_async_switches) {
- unsigned interval;
-
- /*
- * TODO: Assuming that the tval is always 12000000
- * Increment or decrement the timer value randomly
- * but never by more than a factor of 10
- */
- if (rand_no % 2)
- interval = tval * (rand_no % 10);
- else
- interval = tval / (rand_no % 10);
-
- write_cnthp_tval(interval);
-
- } else {
- /*
- * Program the timer to fire every 12000000 instructions
- * on the FastModel while 1500000 cycles on the Emulator
- */
- if (platform == 0x1)
- write_cnthp_tval(tval);
- else
- write_cnthp_tval(tval >> 3);
- }
-
- gic_enable_int(HYP_TIMER_IRQ);
- }
-
- return;
+ unsigned ctl = TIMER_ENABLE;
+ unsigned platform = read32((KFSCB_BASE + KFS_ID) >> 20) & 0xf;
+
+ /*
+ * No need to lock this as its accessed by only one cpu
+ * per cluster and that too one at a time.
+ */
+ static unsigned int rand_no = 0xdeadbeef;
+ static struct _rand_state buffer;
+
+ /*
+ * Nothing needs to be done if physical local timers
+ * are being used for doing a switchover.
+ */
+ if (hyp_timer_trigger == TRUE) {
+ if (rand_async_switches) {
+ _srand_r(&buffer, rand_no);
+ rand_no = (unsigned)_rand_r(&buffer);
+ }
+
+ /* Enable timer and unmask interrupt */
+ write_cnthp_ctl(ctl);
+
+ if (rand_async_switches) {
+ unsigned interval;
+
+ /*
+ * TODO: Assuming that the tval is always 12000000
+ * Increment or decrement the timer value randomly
+ * but never by more than a factor of 10
+ */
+ if (rand_no % 2)
+ interval = tval * (rand_no % 10);
+ else
+ interval = tval / (rand_no % 10);
+
+ write_cnthp_tval(interval);
+
+ } else {
+ /*
+ * Program the timer to fire every 12000000 instructions
+ * on the FastModel while 1500000 cycles on the Emulator
+ */
+ if (platform == 0x1)
+ write_cnthp_tval(tval);
+ else
+ write_cnthp_tval(tval >> 3);
+ }
+
+ gic_enable_int(HYP_TIMER_IRQ);
+ }
+
+ return;
}