summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDietmar Eggemann <dietmar.eggemann@arm.com>2012-03-15 14:34:01 +0000
committerDave Martin <dave.martin@linaro.org>2012-04-19 11:18:43 +0100
commit93a7cf52e9a962664a4055fa75ab0598cb8502ba (patch)
tree321a79d4138415a4907dbc7ab17b80298edb8390
parent4a4765859376cfe618600442cb854c877dc36cdc (diff)
Virqmig: Added virq migration support.
Signed-off-by: Achin Gupta <achin.gupta@arm.com> Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
-rw-r--r--big-little/common/vgic_handle.c22
-rw-r--r--big-little/common/vgiclib.c201
-rw-r--r--big-little/include/events.h3
-rw-r--r--big-little/include/vgiclib.h24
-rw-r--r--big-little/secure_world/secure_resets.c2
-rw-r--r--big-little/virtualisor/vgic_trap_handler.c8
6 files changed, 207 insertions, 53 deletions
diff --git a/big-little/common/vgic_handle.c b/big-little/common/vgic_handle.c
index b6bc29c..0e20dcf 100644
--- a/big-little/common/vgic_handle.c
+++ b/big-little/common/vgic_handle.c
@@ -26,6 +26,7 @@
#include "misc.h"
#include "events.h"
#include "vgiclib.h"
+#include "ipi.h"
extern vm_context *trigger_entry(vm_context *, unsigned);
extern unsigned get_cpuinfo(unsigned);
@@ -99,7 +100,7 @@ vm_context *handle_interrupt(vm_context * context)
unsigned int_pri = 0;
unsigned cpu_if = get_cpuif(cluster_id, cpuid);
vm_context *ret_ctx = context;
- unsigned do_switch = 0, first_cpu = find_first_cpu();
+ unsigned first_cpu = find_first_cpu();
/*
* Get the interrupt #
@@ -122,10 +123,9 @@ vm_context *handle_interrupt(vm_context * context)
* Special case IPIs, since we need the source CPU ID
*/
if (i < 16) {
- src_cpu = (status >> 10) & INTACK_CPUID_MASK;
+ unsigned type = get_hyp_ipi(cpu_if, i);
- /* Check whether we have been requested to switchover */
- do_switch = check_switchover_ipi(cpu_if, i);
+ src_cpu = (status >> 10) & INTACK_CPUID_MASK;
/*
* SGI Ack actually returns the source cpu interface
@@ -146,16 +146,26 @@ vm_context *handle_interrupt(vm_context * context)
gic_eoi_int(status);
if (read32(GIC_IC_PHY_BASE + GICC_CTL) & 0x200)
gic_deactivate_int(status);
+ switch (type) {
+ /* Check whether we have been requested to switchover */
- if (do_switch) {
+ case IPI_CLUSTER_SWITCH:
/*
* switch_cluster() takes the first_cpu as its arg. Since
* all the cores are expected to power down, its reasonable
* to assume cpu0 is the first cpu and will take care of
- * saving all the global context.
+ * saving all the global context. This never returns.
*/
switch_cluster(first_cpu);
+ break;
+
+ case IPI_MIGRATE_VIRQS:
+ complete_virq_migration(src_cpu);
return ret_ctx;
+
+ default:
+ break;
+ /* Not a HYP ipi. Send it to the payload */
}
}
diff --git a/big-little/common/vgiclib.c b/big-little/common/vgiclib.c
index 2a27088..5f5735b 100644
--- a/big-little/common/vgiclib.c
+++ b/big-little/common/vgiclib.c
@@ -24,6 +24,7 @@
#include "misc.h"
#include "virt_helpers.h"
#include "int_master.h"
+#include "ipi.h"
/*
* Manage overflowints somehow.. static pool with recycling allocators.
@@ -36,6 +37,166 @@ static struct overflowint theoverflowints[NUM_CPUS][MAXOVERFLOWINTS];
static struct gic_cpuif cpuifs[NUM_CPUS];
static unsigned hv_lr_count[NUM_CPUS] = { 0 };
+static mig_irq_info migrated_irqs[NUM_CPUS][MAX_MIG_IRQS] = {0};
+
+/*
+ * The vGIC spec implements 64 list registers across two 32-bit status
+ * registers. Since all of the list registers may not be implemented,
+ * this function returns the maximum index we need to bother about.
+ */
+static inline unsigned elrsr_max_index(unsigned cpuid)
+{
+ return (hv_lr_count[cpuid] - 1) >> 5;
+}
+
+/*
+ * In a HYP view list register status register both active and unimplemented
+ * interrupts are represented by a 0 bit. This function returns a 32-bit value
+ * where each set bit represents an active list register. Its basically the
+ * inverse of what the elrsr returns while taking into account unimplemented
+ * interrupts.
+ */
+static unsigned get_elrsr_active_bits(unsigned index, unsigned cpuid,
+ unsigned max_index)
+{
+ unsigned elrsr =
+ ~(read32(VGIC_HV_PHY_BASE + GICH_ELRSR0 + (index << 2)));
+
+ if (index == max_index) {
+ /*
+ * Get the remainder, shift 1 times remainder and subtract 1
+ * from it to form the mask.
+ */
+ elrsr &= (1 << (hv_lr_count[cpuid] - (32 * max_index))) - 1;
+ } else if (index > max_index) {
+ /*
+ * There can never be active virqs when the list registers
+ * do not exist.
+ */
+ elrsr = 0;
+ }
+
+ return elrsr;
+}
+
+
+/*
+ * For a given interrupt and cpu id, this function will
+ * check whether its virq is not inactive and return the
+ * descriptor to the caller.
+ */
+static unsigned dequeue_virq(unsigned irq, unsigned cpu_id)
+{
+ unsigned virq_desc = 0, max_index = elrsr_max_index(cpu_id), ctr = 0;
+ unsigned cur_elrsr = 0, i = 0;
+
+ for (ctr = 0; ctr <= max_index; ctr++) {
+ cur_elrsr = get_elrsr_active_bits(ctr, cpu_id, max_index);
+
+ for (i = bitindex(cur_elrsr); ((int)i) >= 0; i = bitindex(cur_elrsr)) {
+ unsigned list_reg = 0, int_id = 0;
+
+ list_reg = read32(VGIC_HV_PHY_BASE + GICH_LR_BASE + ((1 << 7) * ctr) + (i << 2));
+ int_id = (list_reg >> 10) & 0x3ff;
+
+ /* Clear the current bit */
+ cur_elrsr &= ~(1 << i);
+
+ if (irq == int_id) {
+ /*
+ * Invalidate the list register entry if the ids match and return
+ */
+ write32(VGIC_HV_PHY_BASE + GICH_LR_BASE + ((1 << 7) * ctr) + (i << 2), list_reg & ~(0x3 << 28));
+ return virq_desc;
+ }
+
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Given the original and current ICDIPTR & its offset
+ * find the ids of the interrupts that have been migrated.
+ * To do this:
+ * We XOR the two values to find which bytes differs (each
+ * byte corresponds to a single interrupt).
+ * The offset is from the start of the ICDIPTR map (0x800)
+ * & indicates the number of irqs covered so far.
+ * Adding it to the 'ctr' gives the exact irq id.
+ *
+ * Also find the previous and current cpu interface ids.
+ * Lastly, find if there is a virq pending for the migrated
+ * interrupt and return a cpu mask for asking other cpus to
+ * complete the migration.
+ */
+static unsigned set_mig_irq_info(unsigned orig, unsigned curr, unsigned icdiptr_offset)
+{
+ unsigned ctr, diff = orig ^ curr, cpu_mask = 0;
+ unsigned desc = 0, cpu_id = read_cpuid();
+
+ for (ctr = 0; ctr < MAX_MIG_IRQS; ctr++) {
+ if ((diff >> (ctr << 2)) & 0xff) {
+ migrated_irqs[cpu_id][ctr].id = icdiptr_offset + ctr;
+ migrated_irqs[cpu_id][ctr].src_cpuif = bitindex(orig);
+ migrated_irqs[cpu_id][ctr].dest_cpuif = bitindex(curr);
+ desc = dequeue_virq(migrated_irqs[cpu_id][ctr].id, cpu_id);
+ if (desc) {
+ migrated_irqs[cpu_id][ctr].desc = desc;
+ cpu_mask |= 1 << migrated_irqs[cpu_id][ctr].dest_cpuif;
+ }
+ }
+ }
+
+ return cpu_mask;
+}
+
+/*
+ * Find if any virqs are pending in the list registers for the physical interrupt(s)
+ * that have just been migrated. Save this information and ask the target cpu to
+ * enqueue them.
+ */
+unsigned start_virq_migration(unsigned orig, unsigned curr, unsigned icdiptr_offset)
+{
+ unsigned virq_mig_mask = 0;
+
+ /* Find the pending virqs */
+ virq_mig_mask = set_mig_irq_info(orig, curr, icdiptr_offset);
+
+ /*
+ * Ask the new target cpu interface to enqueue our
+ * pending virtual interrupts.
+ */
+ if (virq_mig_mask) {
+ send_hyp_ipi(virq_mig_mask, IPI_MIGRATE_VIRQS);
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * Pick up the migrated virqs from the cpu that sent them
+ */
+void complete_virq_migration(unsigned src_cpuid)
+{
+ unsigned ctr, cpu_id = read_cpuid();
+ unsigned cluster_id = read_clusterid();
+ unsigned dest_cpuif = get_cpuif(cluster_id, cpu_id);
+
+ for (ctr = 0; ctr < MAX_MIG_IRQS; ctr++) {
+ if (migrated_irqs[src_cpuid][ctr].desc &&
+ migrated_irqs[src_cpuid][ctr].dest_cpuif == dest_cpuif) {
+ enqueue_interrupt(migrated_irqs[src_cpuid][ctr].desc, cpu_id);
+ }
+ }
+
+// set_event(VIRQ_MIG_DONE, src_cpuid);
+ return;
+}
+
+
void dump_vgic_state()
{
unsigned int i;
@@ -153,46 +314,6 @@ static void set_vgic_queue_entry(struct gic_cpuif *cpuif, unsigned int descr)
*oflowh = oflowp;
}
-/*
- * The vGIC spec implements 64 list registers across two 32-bit status
- * registers. Since all of the list registers may not be implemented,
- * this function returns the maximum index we need to bother about.
- */
-static inline unsigned elrsr_max_index(unsigned cpuid)
-{
- return (hv_lr_count[cpuid] - 1) >> 5;
-}
-
-/*
- * In a HYP view list register status register both active and unimplemented
- * interrupts are represented by a 0 bit. This function returns a 32-bit value
- * where each set bit represents an active list register. Its basically the
- * inverse of what the elrsr returns while taking into account unimplemented
- * interrupts.
- */
-static unsigned get_elrsr_active_bits(unsigned index, unsigned cpuid,
- unsigned max_index)
-{
- unsigned elrsr =
- ~(read32(VGIC_HV_PHY_BASE + GICH_ELRSR0 + (index << 2)));
-
- if (index == max_index) {
- /*
- * Get the remainder, shift 1 times remainder and subtract 1
- * from it to form the mask.
- */
- elrsr &= (1 << (hv_lr_count[cpuid] - (32 * max_index))) - 1;
- } else if (index > max_index) {
- /*
- * There can never be active virqs when the list registers
- * do not exist.
- */
- elrsr = 0;
- }
-
- return elrsr;
-}
-
void vgic_savestate(unsigned int cpu)
{
struct gic_cpuif *cpuif = &(cpuifs[cpu]);
diff --git a/big-little/include/events.h b/big-little/include/events.h
index dc0025d..b31df03 100644
--- a/big-little/include/events.h
+++ b/big-little/include/events.h
@@ -30,7 +30,7 @@
/*
* Events for inter/intra-cluster sync
*/
-#define MAX_EVENTS 12
+#define MAX_EVENTS 13
/* Inter cluster events */
#define IB_CONTEXT_DONE 0
@@ -60,6 +60,7 @@
#define CACHE_GEOM_DONE 10
#define VID_REGS_DONE 11
+#define VIRQ_MIG_DONE 12
/* Defines for Secure events */
#define MAX_SEC_EVENTS 3
diff --git a/big-little/include/vgiclib.h b/big-little/include/vgiclib.h
index 0d5f461..5cbca56 100644
--- a/big-little/include/vgiclib.h
+++ b/big-little/include/vgiclib.h
@@ -23,6 +23,12 @@
#ifndef VGICLIB_H
#define VGICLIB_H
+/*
+ * The maximum number of virtual interrupts that can be
+ * migrated in response to a single ICDIPTR access.
+ */
+#define MAX_MIG_IRQS 4
+
#include "gic_registers.h"
struct overflowint {
@@ -42,10 +48,18 @@ struct gic_cpuif {
unsigned int freelist; /* Bitmask of which list entries are in use */
};
-void vgic_init(void);
-void vgic_savestate(unsigned int cpu);
-void vgic_loadstate(unsigned int cpu);
-void vgic_refresh(unsigned int cpu);
-void enqueue_interrupt(unsigned int descr, unsigned int cpu);
+typedef struct irq_info {
+ unsigned id;
+ unsigned src_cpuif;
+ unsigned dest_cpuif;
+ unsigned desc;
+} mig_irq_info;
+extern unsigned start_virq_migration(unsigned, unsigned, unsigned);
+extern void complete_virq_migration(unsigned);
+extern void vgic_init(void);
+extern void vgic_savestate(unsigned int cpu);
+extern void vgic_loadstate(unsigned int cpu);
+extern void vgic_refresh(unsigned int cpu);
+extern void enqueue_interrupt(unsigned int descr, unsigned int cpu);
#endif /* VGICLIB_H */
diff --git a/big-little/secure_world/secure_resets.c b/big-little/secure_world/secure_resets.c
index 8c59d4a..079c114 100644
--- a/big-little/secure_world/secure_resets.c
+++ b/big-little/secure_world/secure_resets.c
@@ -57,7 +57,7 @@ unsigned ve_reset_type[NUM_CPUS];
/*
* Allocate secure events in our device page
*/
-unsigned event[MAX_CORES][MAX_SEC_EVENTS]
+unsigned event[MAX_CORES][MAX_EVENTS]
__attribute__ ((section("BL_SEC_DV_PAGE")));
/*
diff --git a/big-little/virtualisor/vgic_trap_handler.c b/big-little/virtualisor/vgic_trap_handler.c
index 4e626d0..5e64eea 100644
--- a/big-little/virtualisor/vgic_trap_handler.c
+++ b/big-little/virtualisor/vgic_trap_handler.c
@@ -24,6 +24,7 @@
#include "gic_registers.h"
#include "misc.h"
#include "virt_helpers.h"
+#include "vgiclib.h"
/*
* Whether A15 or A7, the distributor accesses are virtualised in
@@ -38,6 +39,9 @@ void handle_vgic_distif_abort(unsigned pa, unsigned *data, unsigned write)
/* Access to Processor Target registers */
case (GICD_CPUS >> 7):
if (write) {
+ unsigned icdiptr_orig = read32(pa);
+ unsigned icdiptr_curr = 0;
+
/*
* OS is trying to reprogram the processor targets register.
* Find out the cpu interface mask for this cluster and use
@@ -45,6 +49,10 @@ void handle_vgic_distif_abort(unsigned pa, unsigned *data, unsigned write)
*/
value = get_cpuif_mask(*data);
write32(pa, value);
+ icdiptr_curr = value;
+
+ start_virq_migration(icdiptr_orig, icdiptr_curr, reg_offset - GICD_CPUS);
+
} else {
value = read32(pa);
*data = get_cpu_mask(value);