aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/common/gic.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/common/gic.c')
-rw-r--r--arch/arm/common/gic.c97
1 files changed, 93 insertions, 4 deletions
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index aa526998418..d108cc4e542 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -137,6 +137,86 @@ static inline unsigned int gic_irq(struct irq_data *d)
return d->hwirq;
}
+#ifdef CONFIG_OF
+static u32 gic_cpuif_logical_map[NR_CPUS];
+#define cpuif_logical_map(cpu) gic_cpuif_logical_map[cpu]
+
+/*
+ * Create a mapping of GIC CPU IF numbers to logical cpus through the device
+ * tree. GIC CPU IF are linked to the respective cpu nodes through the "cpu"
+ * phandle.
+ */
+static void __init gic_init_if_maps(struct gic_chip_data *gic)
+{
+ struct device_node *ncpu, *gic_cpuif;
+ struct irq_domain *domain = gic->domain;
+ int i;
+
+ for (i = 0; i < NR_CPUS; i++)
+ cpuif_logical_map(i) = cpu_logical_map(i);
+
+ if (!domain || !domain->of_node)
+ return;
+
+ for_each_child_of_node(domain->of_node, gic_cpuif) {
+ const u32 *cpuif_hwid, *mpidr;
+ int len;
+
+ if (!of_device_is_compatible(gic_cpuif, "arm,gic-cpuif"))
+ continue;
+
+ pr_debug(" * %s...\n", gic_cpuif->full_name);
+
+ ncpu = of_parse_phandle(gic_cpuif, "cpu", 0);
+
+ if (!ncpu) {
+ pr_err(" * %s missing cpu phandle\n",
+ gic_cpuif->full_name);
+ continue;
+ }
+
+ mpidr = of_get_property(ncpu, "reg", &len);
+
+ if (!mpidr || len != 4) {
+ pr_err(" * %s missing reg property\n",
+ ncpu->full_name);
+ continue;
+ }
+
+ cpuif_hwid = of_get_property(gic_cpuif, "cpuif-id", &len);
+
+ if (!cpuif_hwid || len != 4) {
+ pr_err(" * %s missing cpuif-id property\n",
+ gic_cpuif->full_name);
+ continue;
+ }
+
+ /*
+ * Do the logical enumeration once in arm_dt_init_cpu_maps and
+ * use it again here to avoid logical numbering mix-ups between
+ * cpu and interrupt controller ids
+ */
+
+ i = get_logical_index(be32_to_cpup(mpidr));
+
+ if (i < 0) {
+ pr_err(" * %s mpidr mismatch\n",
+ ncpu->full_name);
+ continue;
+ }
+
+ if (!i)
+ printk(KERN_INFO "Booting Linux on GIC CPU IF 0x%x\n",
+ be32_to_cpup(cpuif_hwid));
+
+ cpuif_logical_map(i) = be32_to_cpup(cpuif_hwid);
+ }
+}
+#else
+static inline void gic_init_if_maps(struct gic_chip_data *gic) {}
+#define cpuif_logical_map(cpu) cpu_logical_map(cpu)
+#endif
+
/*
* Routines to acknowledge, disable and enable interrupts
*/
@@ -242,7 +322,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
return -EINVAL;
mask = 0xff << shift;
- bit = 1 << (cpu_logical_map(cpu) + shift);
+ bit = 1 << (cpuif_logical_map(cpu) + shift);
raw_spin_lock(&irq_controller_lock);
val = readl_relaxed(reg) & ~mask;
@@ -349,7 +429,7 @@ static void __init gic_dist_init(struct gic_chip_data *gic)
u32 cpumask;
unsigned int gic_irqs = gic->gic_irqs;
void __iomem *base = gic_data_dist_base(gic);
- u32 cpu = cpu_logical_map(smp_processor_id());
+ u32 cpu = cpuif_logical_map(smp_processor_id());
cpumask = 1 << cpu;
cpumask |= cpumask << 8;
@@ -651,6 +731,7 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
BUG_ON(gic_nr >= MAX_GIC_NR);
gic = &gic_data[gic_nr];
+
#ifdef CONFIG_GIC_NON_BANKED
if (percpu_offset) { /* Frankein-GIC without banked registers... */
unsigned int cpu;
@@ -665,7 +746,8 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
}
for_each_possible_cpu(cpu) {
- unsigned long offset = percpu_offset * cpu_logical_map(cpu);
+ unsigned long offset =
+ percpu_offset * cpuif_logical_map(cpu);
*per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset;
*per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset;
}
@@ -716,6 +798,13 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
if (WARN_ON(!gic->domain))
return;
+ /*
+ * create the logical/physical mapping just for the
+ * primary GIC
+ */
+ if (gic_nr == 0)
+ gic_init_if_maps(gic);
+
gic_chip.flags |= gic_arch_extn.flags;
gic_dist_init(gic);
gic_cpu_init(gic);
@@ -737,7 +826,7 @@ void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
/* Convert our logical CPU mask into a physical one. */
for_each_cpu(cpu, mask)
- map |= 1 << cpu_logical_map(cpu);
+ map |= 1 << cpuif_logical_map(cpu);
/*
* Ensure that stores to Normal memory are visible to the