From d911aed8adf74e1fae88d082b8474b2175b7f1da Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Thu, 17 Nov 2005 16:27:02 -0500 Subject: [PARISC] Fix our interrupts not to use smp_call_function Fix our interrupts not to use smp_call_function On K and D class smp, the generic code calls this under an irq spinlock, which causes the WARN_ON() message in smp_call_function() (and is also illegal because it could deadlock). The fix is to use a new scheme based on the IPI_NOP. Signed-off-by: James Bottomley Signed-off-by: Kyle McMartin --- arch/parisc/kernel/irq.c | 26 +++++++++++++++++--------- arch/parisc/kernel/smp.c | 20 ++++++++++++++------ 2 files changed, 31 insertions(+), 15 deletions(-) (limited to 'arch/parisc/kernel') diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c index 21a9c5ad580..3998c0cb925 100644 --- a/arch/parisc/kernel/irq.c +++ b/arch/parisc/kernel/irq.c @@ -43,26 +43,34 @@ extern irqreturn_t ipi_interrupt(int, void *, struct pt_regs *); */ static volatile unsigned long cpu_eiem = 0; -static void cpu_set_eiem(void *info) -{ - set_eiem((unsigned long) info); -} - -static inline void cpu_disable_irq(unsigned int irq) +static void cpu_disable_irq(unsigned int irq) { unsigned long eirr_bit = EIEM_MASK(irq); cpu_eiem &= ~eirr_bit; - on_each_cpu(cpu_set_eiem, (void *) cpu_eiem, 1, 1); + /* Do nothing on the other CPUs. If they get this interrupt, + * The & cpu_eiem in the do_cpu_irq_mask() ensures they won't + * handle it, and the set_eiem() at the bottom will ensure it + * then gets disabled */ } static void cpu_enable_irq(unsigned int irq) { unsigned long eirr_bit = EIEM_MASK(irq); - mtctl(eirr_bit, 23); /* clear EIRR bit before unmasking */ cpu_eiem |= eirr_bit; - on_each_cpu(cpu_set_eiem, (void *) cpu_eiem, 1, 1); + + /* FIXME: while our interrupts aren't nested, we cannot reset + * the eiem mask if we're already in an interrupt. Once we + * implement nested interrupts, this can go away + */ + if (!in_interrupt()) + set_eiem(cpu_eiem); + + /* This is just a simple NOP IPI. But what it does is cause + * all the other CPUs to do a set_eiem(cpu_eiem) at the end + * of the interrupt handler */ + smp_send_all_nop(); } static unsigned int cpu_startup_irq(unsigned int irq) diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c index 268b0f2a328..ce89da0f654 100644 --- a/arch/parisc/kernel/smp.c +++ b/arch/parisc/kernel/smp.c @@ -181,12 +181,19 @@ ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs) while (ops) { unsigned long which = ffz(~ops); + ops &= ~(1 << which); + switch (which) { + case IPI_NOP: +#if (kDEBUG>=100) + printk(KERN_DEBUG "CPU%d IPI_NOP\n",this_cpu); +#endif /* kDEBUG */ + break; + case IPI_RESCHEDULE: #if (kDEBUG>=100) printk(KERN_DEBUG "CPU%d IPI_RESCHEDULE\n",this_cpu); #endif /* kDEBUG */ - ops &= ~(1 << IPI_RESCHEDULE); /* * Reschedule callback. Everything to be * done is done by the interrupt return path. @@ -197,7 +204,6 @@ ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs) #if (kDEBUG>=100) printk(KERN_DEBUG "CPU%d IPI_CALL_FUNC\n",this_cpu); #endif /* kDEBUG */ - ops &= ~(1 << IPI_CALL_FUNC); { volatile struct smp_call_struct *data; void (*func)(void *info); @@ -231,7 +237,6 @@ ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs) #if (kDEBUG>=100) printk(KERN_DEBUG "CPU%d IPI_CPU_START\n",this_cpu); #endif /* kDEBUG */ - ops &= ~(1 << IPI_CPU_START); #ifdef ENTRY_SYS_CPUS p->state = STATE_RUNNING; #endif @@ -241,7 +246,6 @@ ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs) #if (kDEBUG>=100) printk(KERN_DEBUG "CPU%d IPI_CPU_STOP\n",this_cpu); #endif /* kDEBUG */ - ops &= ~(1 << IPI_CPU_STOP); #ifdef ENTRY_SYS_CPUS #else halt_processor(); @@ -252,13 +256,11 @@ ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs) #if (kDEBUG>=100) printk(KERN_DEBUG "CPU%d is alive!\n",this_cpu); #endif /* kDEBUG */ - ops &= ~(1 << IPI_CPU_TEST); break; default: printk(KERN_CRIT "Unknown IPI num on CPU%d: %lu\n", this_cpu, which); - ops &= ~(1 << which); return IRQ_NONE; } /* Switch */ } /* while (ops) */ @@ -312,6 +314,12 @@ smp_send_start(void) { send_IPI_allbutself(IPI_CPU_START); } void smp_send_reschedule(int cpu) { send_IPI_single(cpu, IPI_RESCHEDULE); } +void +smp_send_all_nop(void) +{ + send_IPI_allbutself(IPI_NOP); +} + /** * Run a function on all other CPUs. -- cgit v1.2.3