aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorDaniel Thompson <daniel.thompson@linaro.org>2014-09-25 15:50:17 +0100
committerDaniel Thompson <daniel.thompson@linaro.org>2015-01-13 15:40:37 +0000
commita861e098dd83a6e698342a7cb8ebae9f8ce39c79 (patch)
tree2a824d9cd54b1806e2df4edec2fc06546a833c74 /kernel
parentf6190b863f77c3c9ff4e1acc54e56736831d5267 (diff)
irq: Allow interrupts to routed to NMI (or similar)
Some combinations of architectures and interrupt controllers make it possible for abitrary interrupt signals to be selectively made immune to masking by local_irq_disable(). For example, on ARM platforms, many interrupt controllers allow interrupts to be routed to FIQ rather than IRQ. These features could be exploited to implement debug and tracing features that can be implemented using NMI on x86 platforms (perf, hard lockup, kgdb). This patch assumes that the management of the NMI handler itself will be architecture specific (maybe notifier list like x86, hard coded like ARM, or something else entirely). The generic layer can still manage the irq as normal (affinity, enable/disable, free) but is not responsible for dispatching. Signed-off-by: Daniel Thompson <daniel.thompson@linaro.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/irq/manage.c29
1 files changed, 27 insertions, 2 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 80692373abd6..8e669051759d 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -571,6 +571,17 @@ int can_request_irq(unsigned int irq, unsigned long irqflags)
return canrequest;
}
+int __irq_set_nmi_routing(struct irq_desc *desc, unsigned int irq,
+ unsigned int nmi)
+{
+ struct irq_chip *chip = desc->irq_data.chip;
+
+ if (!chip || !chip->irq_set_nmi_routing)
+ return -EINVAL;
+
+ return chip->irq_set_nmi_routing(&desc->irq_data, nmi);
+}
+
int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
unsigned long flags)
{
@@ -1058,11 +1069,12 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
* the same type (level, edge, polarity). So both flag
* fields must have IRQF_SHARED set and the bits which
* set the trigger type must match. Also all must
- * agree on ONESHOT.
+ * agree on ONESHOT and NMI
*/
if (!((old->flags & new->flags) & IRQF_SHARED) ||
((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) ||
- ((old->flags ^ new->flags) & IRQF_ONESHOT))
+ ((old->flags ^ new->flags) & IRQF_ONESHOT) ||
+ ((old->flags ^ new->flags) & __IRQF_NMI))
goto mismatch;
/* All handlers must agree on per-cpuness */
@@ -1153,6 +1165,19 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
init_waitqueue_head(&desc->wait_for_threads);
+ if (new->flags & __IRQF_NMI) {
+ ret = __irq_set_nmi_routing(desc, irq, true);
+ if (ret != 1)
+ goto out_mask;
+ } else {
+ ret = __irq_set_nmi_routing(desc, irq, false);
+ if (ret == 1) {
+ pr_err("Failed to disable NMI routing for irq %d\n",
+ irq);
+ goto out_mask;
+ }
+ }
+
/* Setup the type (level, edge polarity) if configured: */
if (new->flags & IRQF_TRIGGER_MASK) {
ret = __irq_set_trigger(desc, irq,