aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/kernel/perf_event_cpu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel/perf_event_cpu.c')
-rw-r--r--arch/arm/kernel/perf_event_cpu.c148
1 files changed, 148 insertions, 0 deletions
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
index dd9acc95ebc0..76227484baa9 100644
--- a/arch/arm/kernel/perf_event_cpu.c
+++ b/arch/arm/kernel/perf_event_cpu.c
@@ -59,6 +59,142 @@ int perf_num_counters(void)
}
EXPORT_SYMBOL_GPL(perf_num_counters);
+#ifdef CONFIG_SMP
+/*
+ * Workaround logic that is distributed to all cores if the PMU has only
+ * a single IRQ and the CPU receiving that IRQ cannot handle it. Its
+ * job is to try to service the interrupt on the current CPU. It will
+ * also enable the IRQ again if all the other CPUs have already tried to
+ * service it.
+ */
+static void cpu_pmu_do_percpu_work(struct irq_work *w)
+{
+ struct pmu_hw_events *hw_events =
+ container_of(w, struct pmu_hw_events, work);
+ struct arm_pmu *cpu_pmu = hw_events->percpu_pmu;
+
+ /* Ignore the return code, we can do nothing useful with it */
+ cpu_pmu->handle_irq(0, cpu_pmu);
+
+ if (atomic_dec_and_test(&cpu_pmu->remaining_irq_work))
+ enable_irq(cpu_pmu->muxed_spi_workaround_irq);
+}
+
+/*
+ * Issue work to the other CPUs. Must be called whilst we own the
+ * hotplug locks.
+ */
+static void cpu_pmu_queue_percpu_work(struct arm_pmu *cpu_pmu)
+{
+ int cpu;
+
+ atomic_add(num_online_cpus() - 1, &cpu_pmu->remaining_irq_work);
+
+ for_each_online_cpu(cpu) {
+ struct pmu_hw_events *hw_events =
+ per_cpu_ptr(cpu_pmu->hw_events, cpu);
+
+ if (cpu == smp_processor_id())
+ continue;
+
+ /*
+ * We assume that the IPI within irq_work_queue_on()
+ * implies a full memory barrier making the value of
+ * cpu_pmu->remaining_irq_work visible to the target.
+ */
+ if (!irq_work_queue_on(&hw_events->work, cpu))
+ if (atomic_dec_and_test(&cpu_pmu->remaining_irq_work))
+ enable_irq(cpu_pmu->muxed_spi_workaround_irq);
+ }
+}
+
+void cpu_pmu_muxed_spi_workaround_worker(struct work_struct *work)
+{
+ struct arm_pmu *cpu_pmu =
+ container_of(work, struct arm_pmu, muxed_spi_workaround_work);
+
+ get_online_cpus();
+ cpu_pmu_queue_percpu_work(cpu_pmu);
+ put_online_cpus();
+}
+
+/*
+ * Called when the main interrupt handler cannot determine the source
+ * of interrupt. It will deploy a workaround if we are running on an SMP
+ * platform with only a single muxed SPI.
+ *
+ * The workaround disables the interrupt and distributes irqwork to all
+ * other processors in the system. Hopefully one of them will clear the
+ * interrupt...
+ */
+static irqreturn_t cpu_pmu_handle_irq_none(int irq_num, struct arm_pmu *cpu_pmu)
+{
+
+ if (irq_num != cpu_pmu->muxed_spi_workaround_irq)
+ return IRQ_NONE;
+
+ disable_irq_nosync(cpu_pmu->muxed_spi_workaround_irq);
+
+ if (try_get_online_cpus()) {
+ cpu_pmu_queue_percpu_work(cpu_pmu);
+ put_online_cpus();
+ } else {
+ /*
+ * There is a CPU hotplug operation in flight making it
+ * unsafe for us to queue the percpu work. The PMU is
+ * already silenced so we'll leave it like that and
+ * schedule some work to tidy things up.
+ *
+ * Taking this code path should be very rare which is
+ * good because the latencies involved here are way to
+ * long for good profiling.
+ */
+ schedule_work(&cpu_pmu->muxed_spi_workaround_work);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int cpu_pmu_muxed_spi_workaround_init(struct arm_pmu *cpu_pmu)
+{
+ struct platform_device *pmu_device = cpu_pmu->plat_device;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct pmu_hw_events *hw_events =
+ per_cpu_ptr(cpu_pmu->hw_events, cpu);
+
+ init_irq_work(&hw_events->work, cpu_pmu_do_percpu_work);
+ }
+
+ INIT_WORK(&cpu_pmu->muxed_spi_workaround_work,
+ cpu_pmu_muxed_spi_workaround_worker);
+ atomic_set(&cpu_pmu->remaining_irq_work, 0);
+ cpu_pmu->muxed_spi_workaround_irq = platform_get_irq(pmu_device, 0);
+
+ return 0;
+}
+
+static void cpu_pmu_muxed_spi_workaround_term(struct arm_pmu *cpu_pmu)
+{
+ cpu_pmu->muxed_spi_workaround_irq = 0;
+}
+#else /* CONFIG_SMP */
+static int cpu_pmu_muxed_spi_workaround_init(struct arm_pmu *cpu_pmu)
+{
+ return 0;
+}
+
+static void cpu_pmu_muxed_spi_workaround_term(struct arm_pmu *cpu_pmu)
+{
+}
+
+static irqreturn_t cpu_pmu_handle_irq_none(int irq_num, struct arm_pmu *cpu_pmu)
+{
+ return IRQ_NONE;
+}
+#endif /* CONFIG_SMP */
+
/* Include the PMU-specific implementations. */
#include "perf_event_xscale.c"
#include "perf_event_v6.c"
@@ -98,6 +234,8 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
if (irq >= 0)
free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, i));
}
+
+ cpu_pmu_muxed_spi_workaround_term(cpu_pmu);
}
}
@@ -155,6 +293,16 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
cpumask_set_cpu(i, &cpu_pmu->active_irqs);
}
+
+ /*
+ * If we are running SMP and have only one interrupt source
+ * then get ready to share that single irq among the cores.
+ */
+ if (nr_cpu_ids > 1 && irqs == 1) {
+ err = cpu_pmu_muxed_spi_workaround_init(cpu_pmu);
+ if (err)
+ return err;
+ }
}
return 0;