summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSumit Garg <sumit.garg@linaro.org>2020-04-22 16:29:59 +0530
committerSumit Garg <sumit.garg@linaro.org>2020-05-01 13:32:50 +0100
commit25c96663126264ec758c49a4a01a9c285f4ccc61 (patch)
tree6b1f733692eafeebf96a2b0e11e2be363cc8e257
parent0846c33ea6b2c10f775fe04e50f9150378faf743 (diff)
irqchip/gic-v3: Setup arch specific IPI as pseudo NMI
Add support to mark arch specific IPI as pseudo NMI. Currently its used to allows arm64 specific IPI_CALL_NMI_FUNC to be marked as pseudo NMI. Brief description of changes: - Update NMI setup/teardown routines for SGIs. - Enable NMI support prior to gic_smp_init(). - Setup custom flow handler for SGI setup as NMI. - Request, prepare and enable arch specific IPI as per CPU NMI using common APIs. Signed-off-by: Sumit Garg <sumit.garg@linaro.org>
-rw-r--r--arch/arm64/kernel/smp.c5
-rw-r--r--drivers/irqchip/irq-gic-v3.c81
2 files changed, 82 insertions, 4 deletions
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 4bf5a26efdc6..529f937d75f3 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -802,6 +802,11 @@ static const char *ipi_types[NR_IPI] __tracepoint_string = {
S(IPI_CALL_NMI_FUNC, "NMI function call interrupts"),
};
+int arch_get_ipinr_nmi(void)
+{
+ return IPI_CALL_NMI_FUNC;
+}
+
static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
{
trace_ipi_raise(target, ipi_types[ipinr]);
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 33a7e9478f65..9c0534f76fa9 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -57,6 +57,7 @@ struct gic_chip_data {
bool has_rss;
unsigned int ppi_nr;
struct partition_desc **ppi_descs;
+ int virq_ipi_nmi;
};
static struct gic_chip_data gic_data __read_mostly;
@@ -471,6 +472,11 @@ static int gic_irq_nmi_setup(struct irq_data *d)
if (WARN_ON(gic_irq(d) >= 8192))
return -EINVAL;
+ if (get_intid_range(d) == SGI_RANGE) {
+ gic_irq_set_prio(d, GICD_INT_NMI_PRI);
+ return 0;
+ }
+
/* desc lock should already be held */
if (gic_irq_in_rdist(d)) {
u32 idx = gic_get_ppi_index(d);
@@ -508,6 +514,11 @@ static void gic_irq_nmi_teardown(struct irq_data *d)
if (WARN_ON(gic_irq(d) >= 8192))
return;
+ if (get_intid_range(d) == SGI_RANGE) {
+ gic_irq_set_prio(d, GICD_INT_DEF_PRI);
+ return;
+ }
+
/* desc lock should already be held */
if (gic_irq_in_rdist(d)) {
u32 idx = gic_get_ppi_index(d);
@@ -1060,6 +1071,10 @@ static int gic_starting_cpu(unsigned int cpu)
if (gic_dist_supports_lpis())
its_cpu_init();
+ if (gic_data.virq_ipi_nmi >= 0)
+ if (!prepare_percpu_nmi(gic_data.virq_ipi_nmi))
+ enable_percpu_nmi(gic_data.virq_ipi_nmi, IRQ_TYPE_NONE);
+
return 0;
}
@@ -1150,12 +1165,47 @@ static void sgi_flow_handler(struct irq_desc *desc)
do_handle_IPI(d->hwirq);
}
+int __weak arch_get_ipinr_nmi(void)
+{
+ return -1;
+}
+
+static void sgi_nmi_flow_handler(struct irq_desc *desc)
+{
+ struct irq_data *d = irq_desc_get_irq_data(desc);
+ struct irqaction *action = desc->action;
+
+ /*
+ * Contrarily to the normal IRQ flow, we need to EOI SGIs
+ * early, as they may result in a context switch.
+ */
+ if (static_branch_likely(&supports_deactivate_key))
+ gic_eoimode1_eoi_irq(d);
+ else
+ gic_eoi_irq(d);
+
+ if (likely(action))
+ action->handler(d->hwirq, NULL);
+}
+
+static irqreturn_t sgi_nmi_action_handler(int hwirq, void *dev_id)
+{
+ int ipi_nmi = arch_get_ipinr_nmi();
+
+ if (hwirq != ipi_nmi)
+ return IRQ_NONE;
+
+ do_handle_IPI(hwirq);
+
+ return IRQ_HANDLED;
+}
+
static void gic_smp_init(void)
{
struct irq_fwspec sgi_fwspec = {
.fwnode = gic_data.fwnode,
};
- int base_sgi, i;
+ int base_sgi, i, ipi_nmi;
if (is_of_node(gic_data.fwnode)) {
/* DT */
@@ -1176,9 +1226,33 @@ static void gic_smp_init(void)
if (WARN_ON(base_sgi <= 0))
return;
- for (i = 0; i < NR_IPI; i++)
+ gic_data.virq_ipi_nmi = -1;
+
+ ipi_nmi = arch_get_ipinr_nmi();
+ if (ipi_nmi >= 0 && ipi_nmi < NR_IPI) {
+ int ret;
+
+ irq_set_handler(base_sgi + ipi_nmi, sgi_nmi_flow_handler);
+ ret = request_percpu_nmi(base_sgi + ipi_nmi,
+ sgi_nmi_action_handler, "ipi_nmi",
+ NULL);
+ if (!ret) {
+ ret = prepare_percpu_nmi(base_sgi + ipi_nmi);
+ if (!ret) {
+ enable_percpu_nmi(base_sgi + ipi_nmi,
+ IRQ_TYPE_NONE);
+ gic_data.virq_ipi_nmi = base_sgi + ipi_nmi;
+ }
+ }
+ }
+
+ for (i = 0; i < NR_IPI; i++) {
+ if (gic_data.virq_ipi_nmi == base_sgi + i)
+ continue;
+
irq_set_chained_handler_and_data(base_sgi + i, sgi_flow_handler,
NULL);
+ }
set_smp_cross_call(gic_raise_softirq);
cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
@@ -1677,6 +1751,7 @@ static int __init gic_init_bases(void __iomem *dist_base,
gic_dist_init();
gic_cpu_init();
+ gic_enable_nmi_support();
gic_smp_init();
gic_cpu_pm_init();
@@ -1688,8 +1763,6 @@ static int __init gic_init_bases(void __iomem *dist_base,
gicv2m_init(handle, gic_data.domain);
}
- gic_enable_nmi_support();
-
return 0;
out_free: