aboutsummaryrefslogtreecommitdiff
path: root/arch/ia64/kernel/smp.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/kernel/smp.c')
-rw-r--r--arch/ia64/kernel/smp.c14
1 files changed, 6 insertions, 8 deletions
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index 2ea4199d9c5..5230eaafd83 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -225,6 +225,7 @@ smp_send_reschedule (int cpu)
{
platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0);
}
+EXPORT_SYMBOL_GPL(smp_send_reschedule);
/*
* Called with preemption disabled.
@@ -300,15 +301,12 @@ smp_flush_tlb_mm (struct mm_struct *mm)
return;
}
+ smp_call_function_mask(mm->cpu_vm_mask,
+ (void (*)(void *))local_finish_flush_tlb_mm, mm, 1);
+ local_irq_disable();
+ local_finish_flush_tlb_mm(mm);
+ local_irq_enable();
preempt_enable();
- /*
- * We could optimize this further by using mm->cpu_vm_mask to track which CPUs
- * have been running in the address space. It's not clear that this is worth the
- * trouble though: to avoid races, we have to raise the IPI on the target CPU
- * anyhow, and once a CPU is interrupted, the cost of local_flush_tlb_all() is
- * rather trivial.
- */
- on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1);
}
void arch_send_call_function_single_ipi(int cpu)