aboutsummaryrefslogtreecommitdiff
path: root/arch/x86/kernel/irq_32.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-02-19 11:46:36 -0800
committerIngo Molnar <mingo@elte.hu>2012-02-20 09:30:18 +0100
commit986cb48c5a4de0085db94d343b4e7dcf54355ec1 (patch)
treea25140de28f6b73dca92600273f589719af090b3 /arch/x86/kernel/irq_32.c
parentb01543dfe67bb1d191998e90d20534dc354de059 (diff)
x86-32/irq: Don't switch to irq stack for a user-mode irq
If the irq happens in user mode, our kernel stack is empty (apart from the pt_regs themselves, of course), so there's no need or advantage to switch. And it really doesn't save any stack space, quite the reverse: it means that a nested interrupt cannot switch irq stacks. So instead of saving kernel stack space, it actually causes the potential for *more* stack usage. Also simplify the preemption count copy when we do switch stacks: just copy the whole preemption count, rather than just the softirq parts of it. There is no advantage to the partial copy: it is more effort to get a less correct result. Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Link: http://lkml.kernel.org/r/alpine.LFD.2.02.1202191139260.10000@i5.linux-foundation.org Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/irq_32.c')
-rw-r--r--arch/x86/kernel/irq_32.c11
1 files changed, 3 insertions, 8 deletions
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 40fc86161d92..58b7f27cb3e9 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -100,13 +100,8 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
irqctx->tinfo.task = curctx->tinfo.task;
irqctx->tinfo.previous_esp = current_stack_pointer;
- /*
- * Copy the softirq bits in preempt_count so that the
- * softirq checks work in the hardirq context.
- */
- irqctx->tinfo.preempt_count =
- (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
- (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
+ /* Copy the preempt_count so that the [soft]irq checks work. */
+ irqctx->tinfo.preempt_count = curctx->tinfo.preempt_count;
if (unlikely(overflow))
call_on_stack(print_stack_overflow, isp);
@@ -196,7 +191,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
if (unlikely(!desc))
return false;
- if (!execute_on_irq_stack(overflow, desc, irq)) {
+ if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
if (unlikely(overflow))
print_stack_overflow();
desc->handle_irq(irq, desc);