summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDietmar Eggemann <dietmar.eggemann@arm.com>2012-03-15 14:24:21 +0000
committerDietmar Eggemann <dietmar.eggemann@arm.com>2012-05-22 10:43:46 +0100
commit976a6c2e10fecedc49a1da242ef87f05f87626ba (patch)
tree5099e3c7aa5e5ddb9d8cb68e0cdeca373fa2fefb
parent4c5f141f4012cf4bfb29391ed81452b37fedadc7 (diff)
Generic IPI support for Virtualizer communication
Thus far the Virtualizer only used an IPI for triggering a cluster switch. An IPI is now needed to migrate virtual interrupts as well. Hence the existing code to select a free IPI and use it for communication has been moved to the 'lib' directory. It support other IPI types as well now. Signed-off-by: Achin Gupta <achin.gupta@arm.com> Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
-rwxr-xr-xbig-little/Makefile2
-rw-r--r--big-little/include/hyp_vmmap.h11
-rw-r--r--big-little/include/ipi.h35
-rwxr-xr-xbig-little/lib/ipi.c135
-rw-r--r--big-little/switcher/trigger/async_switchover.c246
5 files changed, 270 insertions, 159 deletions
diff --git a/big-little/Makefile b/big-little/Makefile
index 716f526..7c7896d 100755
--- a/big-little/Makefile
+++ b/big-little/Makefile
@@ -61,7 +61,7 @@ vpath %.c switcher switcher/trigger switcher/context common/ lib/ secure_world
vpath %.s switcher switcher/trigger switcher/context common/ lib/ secure_world/ ../acsr
SWITCHER_OBJS = ns_context.o hyp_setup.o pagetable_setup.o virt_helpers.o sync_switchover.o \
- vgiclib.o vgic_handle.o uart.o v7.o gic.o handle_switchover.o tube.o \
+ vgiclib.o vgic_handle.o uart.o v7.o gic.o handle_switchover.o tube.o ipi.o \
virt_events.o bakery.o vgic_setup.o async_switchover.o hyp_vectors.o helpers.o
SECURE_ENTRY_POINT = monmode_vector_table
diff --git a/big-little/include/hyp_vmmap.h b/big-little/include/hyp_vmmap.h
index 4da98bb..348a6e5 100644
--- a/big-little/include/hyp_vmmap.h
+++ b/big-little/include/hyp_vmmap.h
@@ -30,13 +30,14 @@
* d e f i n e s
* --------------------------------------------------------------------------*/
-#define GIC_ID_PHY_BASE 0x2C001000 /* Physical Distributor */
-#define GIC_IC_PHY_BASE 0x2C002000 /* Physical CPU interface */
+#define GIC_ID_PHY_BASE 0x2C001000 /* Physical Distributor */
+#define GIC_IC_PHY_BASE 0x2C002000 /* Physical CPU interface */
-#define VGIC_HV_PHY_BASE 0x2C004000 /* Hypervisor's VIew */
-#define VGIC_VM_PHY_BASE 0x2C006000 /* Virtual Machine view */
+#define VGIC_HV_PHY_BASE 0x2C004000 /* Hypervisor's VIew */
+#define VGIC_HV_ALIAS_BASE 0x2C005000 /* Hypervisor's VIew of other interfaces */
+#define VGIC_VM_PHY_BASE 0x2C006000 /* Virtual Machine view */
#define UART0_PHY_BASE 0x1C090000
#define UART1_PHY_BASE 0x1C0A0000
-#endif /* __HYP_VMMAP_H__ */
+#endif /* __HYP_VMMAP_H__ */
diff --git a/big-little/include/ipi.h b/big-little/include/ipi.h
new file mode 100644
index 0000000..1f9e888
--- /dev/null
+++ b/big-little/include/ipi.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2012, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the
+ * following conditions are met:
+ *
+ * Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the
+ * above copyright notice, this list of conditions and
+ * the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its
+ * contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ */
+
+#ifndef __IPI_H__
+#define __IPI_H__
+
+#define MAX_IPI 16
+/* Opcode to trigger a cluster switch */
+#define IPI_CLUSTER_SWITCH 1
+/* Opcode to migrate vGIC virqs */
+#define IPI_MIGRATE_VIRQS 2
+
+extern unsigned send_hyp_ipi(unsigned, unsigned);
+extern unsigned get_hyp_ipi(unsigned, unsigned);
+
+#endif /* __IPI_H__ */
diff --git a/big-little/lib/ipi.c b/big-little/lib/ipi.c
new file mode 100755
index 0000000..936cda8
--- /dev/null
+++ b/big-little/lib/ipi.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2012, ARM Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the
+ * following conditions are met:
+ *
+ * Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the
+ * above copyright notice, this list of conditions and
+ * the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its
+ * contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ */
+
+#include "ipi.h"
+#include "misc.h"
+#include "virt_helpers.h"
+#include "bakery.h"
+#include "hyp_vmmap.h"
+#include "gic_registers.h"
+
+extern void gic_send_ipi(unsigned, unsigned);
+
+/*
+ * Set of flags used by the interrupt handling code
+ * to distinguish between IPIs sent by the big-little
+ * code and the payload software.
+ * TODO: Assumes only one cpu will send an IPI at a
+ * time rather than multiple cpus sending the same
+ * IPI to each other at the same time from within the
+ * HYP mode.
+ */
+static unsigned lock_ipi_check;
+static unsigned hyp_ipi_check[16];
+
+/*
+ * Returns the id of the first IPI that is not pending on
+ * our cpu interface or the first IPI that is pending but
+ * was not generated by us. Returns 16 if no such IPI is
+ * found
+ */
+static unsigned get_free_ipi(void)
+{
+ unsigned shift, cpu_if_bit, cpu_id = read_cpuid(), cluster_id =
+ read_clusterid();
+ int ctr;
+
+ cpu_if_bit = 1 << get_cpuif(cluster_id, cpu_id);
+
+ /* Find the register offset */
+ for (ctr = 3; ctr >= 0; ctr--)
+ /* Check whether IPI<shift> has already been generated by us */
+ for (shift = 0; shift < 4; shift++) {
+ if (read32
+ (GIC_ID_PHY_BASE + GICD_SPENDSGIR +
+ (ctr << 2)) & (cpu_if_bit << (shift << 3)))
+ continue;
+
+ return (ctr << 2) + shift;
+ }
+
+ return MAX_IPI;
+}
+
+/*
+ * Given a mask of destination cpus and what the IPI is supposed
+ * todo, select an unused IPI and send it.
+ */
+unsigned send_hyp_ipi(unsigned cpuif_mask, unsigned type)
+{
+ unsigned rc = TRUE;
+ unsigned ipi_no = 0;
+
+ /*
+ * First choose a non-pending IPI to avoid a clash with the OS.
+ */
+ ipi_no = get_free_ipi();
+ if (ipi_no == MAX_IPI) {
+ rc = FALSE;
+ return rc;
+ }
+
+ /*
+ * For this IPI set the mask in our global variable. We do it, payload software
+ * does not. But, first check whether any earlier IPIs have already been acked
+ */
+ while (TRUE) {
+ spin_lock(&lock_ipi_check);
+ if (hyp_ipi_check[ipi_no] & 0xff) {
+ spin_unlock(&lock_ipi_check);
+ } else {
+ hyp_ipi_check[ipi_no] = (type << 8) | cpuif_mask;
+ dsb();
+ spin_unlock(&lock_ipi_check);
+ break;
+ }
+ };
+
+ /* Send the IPI to the cpu_mask */
+ gic_send_ipi(cpuif_mask, ipi_no);
+
+ return rc;
+}
+
+/*
+ * Given a received IPI, check whether it is a HYP IPI and return
+ * the type if so else 0
+ */
+unsigned get_hyp_ipi(unsigned cpu_if, unsigned ipi_no)
+{
+ unsigned type = 0;
+
+ spin_lock(&lock_ipi_check);
+ /*
+ * If this IPI was sent by the big-little code then our cpu_if bit must have
+ * been set in the ipi_check flag. Reset the bit and indicate that its an
+ * internal IPI.
+ */
+ if (hyp_ipi_check[ipi_no] & (1 << cpu_if)) {
+ type = hyp_ipi_check[ipi_no] >> 8;
+ hyp_ipi_check[ipi_no] &= ~(1 << cpu_if);
+ dsb();
+ }
+ spin_unlock(&lock_ipi_check);
+
+ return type;
+}
diff --git a/big-little/switcher/trigger/async_switchover.c b/big-little/switcher/trigger/async_switchover.c
index c6056ed..e5931d7 100644
--- a/big-little/switcher/trigger/async_switchover.c
+++ b/big-little/switcher/trigger/async_switchover.c
@@ -20,6 +20,7 @@
* permission.
*/
+#include "ipi.h"
#include "virt_helpers.h"
#include "misc.h"
#include "stdlib.h"
@@ -27,21 +28,10 @@
extern void gic_enable_int(unsigned);
extern void gic_disable_int(unsigned);
-extern void gic_send_ipi(unsigned, unsigned);
extern void gic_eoi_int(unsigned);
extern void gic_deactivate_int(unsigned);
extern int __rand_r(struct _rand_state *);
-/*
- * Set of flags used by the interrupt handling code
- * to distinguish between IPIs sent by the big-little
- * code and the payload software.
- * TODO: Assumes only one cpu will send an IPI at a
- * time rather than multiple cpus sending the same
- * IPI to each other at the same time from within the
- * HYP mode.
- */
-static unsigned lock_ipi_check;
-static unsigned hyp_ipi_check[16];
+
static unsigned timer_count;
/* Support for the switchover interval randomly but sanely */
static unsigned rand_async_switches = RAND_ASYNC;
@@ -55,48 +45,20 @@ unsigned hyp_timer_trigger = USE_HYP_TIMERS;
*/
unsigned switchable_cpus_mask = 0;
-/*
- * Returns the id of the first IPI that is not pending on
- * our cpu interface or the first IPI that is pending but
- * was not generated by us. Returns 16 if no such IPI is
- * found
- */
-static unsigned get_free_ipi(void)
-{
- unsigned ctr, shift, cpu_if_bit, cpu_id = read_cpuid(), cluster_id =
- read_clusterid();
-
- cpu_if_bit = 1 << get_cpuif(cluster_id, cpu_id);
-
- /* Find the register offset */
- for (ctr = 0; ctr < 4; ctr++)
- /* Check whether IPI<shift> has already been generated by us */
- for (shift = 0; shift < 4; shift++) {
- if (read32
- (GIC_ID_PHY_BASE + GICD_SPENDSGIR +
- (ctr << 2)) & (cpu_if_bit << (shift << 3)))
- continue;
-
- return (ctr << 2) + shift;
- }
-
- return 16;
-}
-
static void ack_trigger(void)
{
- unsigned ctl = 0;
-
- ctl = read_cnthp_ctl();
- if (ctl & TIMER_IRQ_STAT) {
- /* Disable timer and mask interrupt */
- write_cnthp_ctl(TIMER_MASK_IRQ);
- } else {
- printf("Spurious HYP timer irq \n");
- panic();
- }
-
- return;
+ unsigned ctl = 0;
+
+ ctl = read_cnthp_ctl();
+ if (ctl & TIMER_IRQ_STAT) {
+ /* Disable timer and mask interrupt */
+ write_cnthp_ctl(TIMER_MASK_IRQ);
+ } else {
+ printf("Spurious HYP timer irq \n");
+ panic();
+ }
+
+ return;
}
/*
@@ -114,7 +76,7 @@ unsigned cluster_reset_status(unsigned cluster_id)
*/
void signal_switchover(void)
{
- unsigned ipi_no = 0x0, cluster_id = read_clusterid();
+ unsigned cluster_id = read_clusterid();
unsigned cpuif_mask = 0;
/*
@@ -139,47 +101,25 @@ void signal_switchover(void)
*/
cpuif_mask = get_cpuif_mask(switchable_cpus_mask);
- /*
- * Send an ipi to all the cpus in the cluster including ourselves
- * to start a switch to the inbound cluster. First choose a non-
- * pending IPI to avoid a clash with the OS.
- */
- ipi_no = get_free_ipi();
-
- /*
- * For this IPI set the mask in our global variable. We do it, payload software
- * does not. But, first check whether any earlier IPIs have already been acked
- */
- while (hyp_ipi_check[ipi_no]) ;
- spin_lock(&lock_ipi_check);
- hyp_ipi_check[ipi_no] = cpuif_mask;
- dsb();
- spin_unlock(&lock_ipi_check);
-
- /* Send the IPI to the cpu_mask */
- gic_send_ipi(cpuif_mask, ipi_no);
+ /*
+ * Send an ipi to all the online cpus in the cluster including ourselves
+ * to start a switch to the inbound cluster.
+ */
+ send_hyp_ipi(cpuif_mask, IPI_CLUSTER_SWITCH);
return;
}
unsigned check_switchover_ipi(unsigned cpu_if, unsigned ipi_no)
{
- unsigned rc = FALSE;
+ unsigned type = 0;
- spin_lock(&lock_ipi_check);
- /*
- * If this IPI was sent by the big-little code then our cpu_if bit must have
- * been set in the ipi_check flag. Reset the bit an indicate that its an
- * internal IPI.
- */
- if (hyp_ipi_check[ipi_no] & (1 << cpu_if)) {
- rc = TRUE;
- hyp_ipi_check[ipi_no] &= ~(1 << cpu_if);
- dsb();
- }
- spin_unlock(&lock_ipi_check);
+ type = get_hyp_ipi(cpu_if, ipi_no);
+ if (type == IPI_CLUSTER_SWITCH)
+ return TRUE;
+ else
+ return FALSE;
- return rc;
}
unsigned check_trigger(unsigned int_id, unsigned int_ack)
@@ -250,75 +190,75 @@ unsigned check_trigger(unsigned int_id, unsigned int_ack)
void keep_trigger_alive(void)
{
- /*
- * The OS might have disabled the HYP timer interrupt
- * while setting up its view of the vGIC. So enable
- * it if disabled upon receiving any other interrupt.
- * Better than virtualising vGIC accesses on the TARGET
- * CPU.
- */
- if (hyp_timer_trigger)
- if (!
- (read32(GIC_ID_PHY_BASE + GICD_ENABLESET) &
- (1 << HYP_TIMER_IRQ)))
- gic_enable_int(HYP_TIMER_IRQ);
-
- return;
+ /*
+ * The OS might have disabled the HYP timer interrupt
+ * while setting up its view of the vGIC. So enable
+ * it if disabled upon receiving any other interrupt.
+ * Better than virtualising vGIC accesses on the TARGET
+ * CPU.
+ */
+ if (hyp_timer_trigger)
+ if (!
+ (read32(GIC_ID_PHY_BASE + GICD_ENABLESET) &
+ (1 << HYP_TIMER_IRQ)))
+ gic_enable_int(HYP_TIMER_IRQ);
+
+ return;
}
void enable_trigger(unsigned tval)
{
- unsigned ctl = TIMER_ENABLE;
- unsigned platform = read32((KFSCB_BASE + KFS_ID) >> 20) & 0xf;
-
- /*
- * No need to lock this as its accessed by only one cpu
- * per cluster and that too one at a time.
- */
- static unsigned int rand_no = 0xdeadbeef;
- static struct _rand_state buffer;
-
- /*
- * Nothing needs to be done if physical local timers
- * are being used for doing a switchover.
- */
- if (hyp_timer_trigger == TRUE) {
- if (rand_async_switches) {
- _srand_r(&buffer, rand_no);
- rand_no = (unsigned)_rand_r(&buffer);
- }
-
- /* Enable timer and unmask interrupt */
- write_cnthp_ctl(ctl);
-
- if (rand_async_switches) {
- unsigned interval;
-
- /*
- * TODO: Assuming that the tval is always 12000000
- * Increment or decrement the timer value randomly
- * but never by more than a factor of 10
- */
- if (rand_no % 2)
- interval = tval * (rand_no % 10);
- else
- interval = tval / (rand_no % 10);
-
- write_cnthp_tval(interval);
-
- } else {
- /*
- * Program the timer to fire every 12000000 instructions
- * on the FastModel while 1500000 cycles on the Emulator
- */
- if (platform == 0x1)
- write_cnthp_tval(tval);
- else
- write_cnthp_tval(tval >> 3);
- }
-
- gic_enable_int(HYP_TIMER_IRQ);
- }
-
- return;
+ unsigned ctl = TIMER_ENABLE;
+ unsigned platform = read32((KFSCB_BASE + KFS_ID) >> 20) & 0xf;
+
+ /*
+ * No need to lock this as its accessed by only one cpu
+ * per cluster and that too one at a time.
+ */
+ static unsigned int rand_no = 0xdeadbeef;
+ static struct _rand_state buffer;
+
+ /*
+ * Nothing needs to be done if physical local timers
+ * are being used for doing a switchover.
+ */
+ if (hyp_timer_trigger == TRUE) {
+ if (rand_async_switches) {
+ _srand_r(&buffer, rand_no);
+ rand_no = (unsigned)_rand_r(&buffer);
+ }
+
+ /* Enable timer and unmask interrupt */
+ write_cnthp_ctl(ctl);
+
+ if (rand_async_switches) {
+ unsigned interval;
+
+ /*
+ * TODO: Assuming that the tval is always 12000000
+ * Increment or decrement the timer value randomly
+ * but never by more than a factor of 10
+ */
+ if (rand_no % 2)
+ interval = tval * (rand_no % 10);
+ else
+ interval = tval / (rand_no % 10);
+
+ write_cnthp_tval(interval);
+
+ } else {
+ /*
+ * Program the timer to fire every 12000000 instructions
+ * on the FastModel while 1500000 cycles on the Emulator
+ */
+ if (platform == 0x1)
+ write_cnthp_tval(tval);
+ else
+ write_cnthp_tval(tval >> 3);
+ }
+
+ gic_enable_int(HYP_TIMER_IRQ);
+ }
+
+ return;
}