summaryrefslogtreecommitdiff
path: root/big-little/switcher/trigger/async_switchover.c
diff options
context:
space:
mode:
Diffstat (limited to 'big-little/switcher/trigger/async_switchover.c')
-rw-r--r--big-little/switcher/trigger/async_switchover.c290
1 files changed, 290 insertions, 0 deletions
diff --git a/big-little/switcher/trigger/async_switchover.c b/big-little/switcher/trigger/async_switchover.c
new file mode 100644
index 0000000..056c8a1
--- /dev/null
+++ b/big-little/switcher/trigger/async_switchover.c
@@ -0,0 +1,290 @@
+/*
+ * Copyright (c) 2011, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the
+ * following conditions are met:
+ *
+ * Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the
+ * above copyright notice, this list of conditions and
+ * the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its
+ * contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ */
+
+#include "virt_helpers.h"
+#include "misc.h"
+#include "stdlib.h"
+#include "gic_registers.h"
+
+extern void gic_enable_int(unsigned);
+extern void gic_disable_int(unsigned);
+extern void gic_send_ipi(unsigned, unsigned);
+extern void gic_eoi_int(unsigned);
+extern void gic_deactivate_int(unsigned);
+extern int __rand_r(struct _rand_state *);
+/*
+ * Set of flags used by the interrupt handling code
+ * to distinguish between IPIs sent by the big-little
+ * code and the payload software.
+ * TODO: Assumes only one cpu will send an IPI at a
+ * time rather than multiple cpus sending the same
+ * IPI to each other at the same time from within the
+ * HYP mode.
+ */
+static unsigned lock_ipi_check;
+static unsigned hyp_ipi_check[16];
+static unsigned timer_count;
+/* Support for the switchover interval randomly but sanely */
+static unsigned rand_async_switches = RAND_ASYNC;
+/* Use HYP timer for async switches */
+unsigned hyp_timer_trigger = USE_HYP_TIMERS;
+
+/*
+ * Returns the id of the first IPI that is not pending on
+ * our cpu interface or the first IPI that is pending but
+ * was not generated by us. Returns 16 if no such IPI is
+ * found
+ */
+static unsigned get_free_ipi(void)
+{
+ unsigned ctr, shift, cpu_if_bit, cpu_id = read_cpuid(), cluster_id =
+ read_clusterid();
+
+ cpu_if_bit = 1 << get_cpuif(cluster_id, cpu_id);
+
+ /* Find the register offset */
+ for (ctr = 0; ctr < 4; ctr++)
+ /* Check whether IPI<shift> has already been generated by us */
+ for (shift = 0; shift < 4; shift++) {
+ if (read32
+ (GIC_ID_PHY_BASE + GICD_SPENDSGIR +
+ (ctr << 2)) & (cpu_if_bit << (shift << 3)))
+ continue;
+
+ return (ctr << 2) + shift;
+ }
+
+ return 16;
+}
+
+static void ack_trigger(void)
+{
+ unsigned ctl = 0;
+
+ ctl = read_cnthp_ctl();
+ if (ctl & TIMER_IRQ_STAT) {
+ /* Disable timer and mask interrupt */
+ write_cnthp_ctl(TIMER_MASK_IRQ);
+ } else {
+ printf("Spurious HYP timer irq \n");
+ panic();
+ }
+
+ return;
+}
+
+/*
+ * Broadcast first available IPI so that all cpus can start switching to
+ * the other cluster.
+ */
+void signal_switchover(void)
+{
+ unsigned ipi_no = 0x0;
+
+ /* If x is the no. of cpus then corresponding mask would be (1 << x) - 1 */
+ unsigned cpu_mask = (1 << (num_secondaries() + 1)) - 1;
+ /*
+ * Map the target cpuids to their cpu interfaces as the 1:1 mapping
+ * no longer exists with the external vGIC.
+ */
+ unsigned cpuif_mask = get_cpuif_mask(cpu_mask);
+
+ /*
+ * Send an ipi to all the cpus in the cluster including ourselves
+ * to start a switch to the inbound cluster. First choose a non-
+ * pending IPI to avoid a clash with the OS.
+ */
+ ipi_no = get_free_ipi();
+
+ /*
+ * For this IPI set the mask in our global variable. We do it, payload software
+ * does not. But, first check whether any earlier IPIs have already been acked
+ */
+ while (hyp_ipi_check[ipi_no]) ;
+ spin_lock(&lock_ipi_check);
+ hyp_ipi_check[ipi_no] = cpuif_mask;
+ dsb();
+ spin_unlock(&lock_ipi_check);
+
+ /* Send the IPI to the cpu_mask */
+ gic_send_ipi(cpuif_mask, ipi_no);
+
+ return;
+}
+
+unsigned check_switchover_ipi(unsigned cpu_if, unsigned ipi_no)
+{
+ unsigned rc = FALSE;
+
+ spin_lock(&lock_ipi_check);
+ /*
+ * If this IPI was sent by the big-little code then our cpu_if bit must have
+ * been set in the ipi_check flag. Reset the bit an indicate that its an
+ * internal IPI.
+ */
+ if (hyp_ipi_check[ipi_no] & (1 << cpu_if)) {
+ rc = TRUE;
+ hyp_ipi_check[ipi_no] &= ~(1 << cpu_if);
+ dsb();
+ }
+ spin_unlock(&lock_ipi_check);
+
+ return rc;
+}
+
+unsigned check_trigger(unsigned int_id, unsigned int_ack)
+{
+ unsigned cpuid = read_cpuid();
+ unsigned platform = (read32(KFSCB_BASE + KFS_ID) >> 20) & 0xf;
+
+ /*
+ * If we are not using HYP mode timers for triggering a switchover
+ * then check whether this is a suitable local timer interrupt to
+ * switch
+ */
+ if (hyp_timer_trigger == FALSE) {
+ /*
+ * We need to hijack every 128th timer interrupt on cpu0 and
+ * use it as a stimulus to switchover
+ */
+ if (cpuid == 0 && int_id == LCL_TIMER_IRQ)
+ timer_count++;
+
+ if (timer_count & LCL_TIMER_FREQ)
+ return FALSE;
+ }
+ /*
+ * Trigger a switchover upon getting a HYP timer IRQ. Its
+ * targetted only to cpu0.
+ */
+ else if (int_id != HYP_TIMER_IRQ)
+ return FALSE;
+
+ /*
+ * Do the needful now that it is confirmed that we need to move
+ * to the other cluster
+ */
+
+ /* Indicator on emulation that switches are actually taking place */
+ if (platform != 0x1)
+ printf("%d", read_clusterid());
+
+ /*
+ * Send an IPI to all the cores in this cluster to start
+ * a switchover.
+ */
+ signal_switchover();
+
+ if (hyp_timer_trigger)
+ ack_trigger();
+ else
+ /*
+ * Complete handling of the local timer interrupt at the physical gic
+ * level. Its disabled as its level triggerred and will reassert as
+ * soon as we leave this function since its not been cleared at the
+ * peripheral just yet. The local timer context is saved and this irq
+ * cleared in "save_hyp_context". The interrupt is enabled then.
+ */
+ gic_disable_int(int_id);
+
+ /* Finish handling this interrupt */
+ gic_eoi_int(int_ack);
+ if (read32(GIC_IC_PHY_BASE + GICC_CTL) & 0x200)
+ gic_deactivate_int(int_ack);
+
+ return TRUE;
+}
+
+void keep_trigger_alive(void)
+{
+ /*
+ * The OS might have disabled the HYP timer interrupt
+ * while setting up its view of the vGIC. So enable
+ * it if disabled upon receiving any other interrupt.
+ * Better than virtualising vGIC accesses on the TARGET
+ * CPU.
+ */
+ if (hyp_timer_trigger)
+ if (!
+ (read32(GIC_ID_PHY_BASE + GICD_ENABLESET) &
+ (1 << HYP_TIMER_IRQ)))
+ gic_enable_int(HYP_TIMER_IRQ);
+
+ return;
+}
+
+void enable_trigger(unsigned tval)
+{
+ unsigned ctl = TIMER_ENABLE;
+ unsigned platform = read32((KFSCB_BASE + KFS_ID) >> 20) & 0xf;
+
+ /*
+ * No need to lock this as its accessed by only one cpu
+ * per cluster and that too one at a time.
+ */
+ static unsigned int rand_no = 0xdeadbeef;
+ static struct _rand_state buffer;
+
+ /*
+ * Nothing needs to be done if physical local timers
+ * are being used for doing a switchover.
+ */
+ if (hyp_timer_trigger == TRUE) {
+ if (rand_async_switches) {
+ _srand_r(&buffer, rand_no);
+ rand_no = (unsigned) _rand_r(&buffer);
+ }
+
+ /* Enable timer and unmask interrupt */
+ write_cnthp_ctl(ctl);
+
+ if (rand_async_switches) {
+ unsigned interval;
+
+ /*
+ * TODO: Assuming that the tval is always 12000000
+ * Increment or decrement the timer value randomly
+ * but never by more than a factor of 10
+ */
+ if (rand_no % 2)
+ interval = tval * (rand_no % 10);
+ else
+ interval = tval / (rand_no % 10);
+
+ write_cnthp_tval(interval);
+
+ } else {
+ /*
+ * Program the timer to fire every 12000000 instructions
+ * on the FastModel while 1500000 cycles on the Emulator
+ */
+ if (platform == 0x1)
+ write_cnthp_tval(tval);
+ else
+ write_cnthp_tval(tval >> 3);
+ }
+
+ gic_enable_int(HYP_TIMER_IRQ);
+ }
+
+ return;
+}