aboutsummaryrefslogtreecommitdiff
path: root/kernel/irq_work.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/irq_work.c')
-rw-r--r--kernel/irq_work.c69
1 files changed, 39 insertions, 30 deletions
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index 55fcce6065cf..af8ceafc94e4 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -16,11 +16,12 @@
#include <linux/tick.h>
#include <linux/cpu.h>
#include <linux/notifier.h>
+#include <linux/interrupt.h>
#include <asm/processor.h>
-static DEFINE_PER_CPU(struct llist_head, irq_work_list);
-static DEFINE_PER_CPU(int, irq_work_raised);
+static DEFINE_PER_CPU(struct llist_head, raised_list);
+static DEFINE_PER_CPU(struct llist_head, lazy_list);
/*
* Claim the entry so that no one else will poke at it.
@@ -63,6 +64,9 @@ void __weak arch_irq_work_raise(void)
*/
void irq_work_queue(struct irq_work *work)
{
+ bool lazy_work, realtime = IS_ENABLED(CONFIG_PREEMPT_RT_FULL);
+ struct llist_head *list;
+
/* Only queue if not already pending */
if (!irq_work_claim(work))
return;
@@ -70,15 +74,16 @@ void irq_work_queue(struct irq_work *work)
/* Queue the entry and raise the IPI if needed. */
preempt_disable();
- llist_add(&work->llnode, &__get_cpu_var(irq_work_list));
+ /* If the work is "lazy", handle it from next tick if any */
+ lazy_work = work->flags & IRQ_WORK_LAZY;
- /*
- * If the work is not "lazy" or the tick is stopped, raise the irq
- * work interrupt (if supported by the arch), otherwise, just wait
- * for the next tick.
- */
- if (!(work->flags & IRQ_WORK_LAZY) || tick_nohz_tick_stopped()) {
- if (!this_cpu_cmpxchg(irq_work_raised, 0, 1))
+ if (lazy_work || (realtime && !(work->flags & IRQ_WORK_HARD_IRQ)))
+ list = this_cpu_ptr(&lazy_list);
+ else
+ list = this_cpu_ptr(&raised_list);
+
+ if (llist_add(&work->llnode, list)) {
+ if (!lazy_work || tick_nohz_tick_stopped())
arch_irq_work_raise();
}
@@ -88,10 +93,11 @@ EXPORT_SYMBOL_GPL(irq_work_queue);
bool irq_work_needs_cpu(void)
{
- struct llist_head *this_list;
+ struct llist_head *raised, *lazy;
- this_list = &__get_cpu_var(irq_work_list);
- if (llist_empty(this_list))
+ raised = &__get_cpu_var(raised_list);
+ lazy = &__get_cpu_var(lazy_list);
+ if (llist_empty(raised) && llist_empty(lazy))
return false;
/* All work should have been flushed before going offline */
@@ -100,28 +106,18 @@ bool irq_work_needs_cpu(void)
return true;
}
-static void __irq_work_run(void)
+static void irq_work_run_list(struct llist_head *list)
{
unsigned long flags;
struct irq_work *work;
- struct llist_head *this_list;
struct llist_node *llnode;
+ BUG_ON_NONRT(!irqs_disabled());
- /*
- * Reset the "raised" state right before we check the list because
- * an NMI may enqueue after we find the list empty from the runner.
- */
- __this_cpu_write(irq_work_raised, 0);
- barrier();
-
- this_list = &__get_cpu_var(irq_work_list);
- if (llist_empty(this_list))
+ if (llist_empty(list))
return;
- BUG_ON(!irqs_disabled());
-
- llnode = llist_del_all(this_list);
+ llnode = llist_del_all(list);
while (llnode != NULL) {
work = llist_entry(llnode, struct irq_work, llnode);
@@ -152,11 +148,24 @@ static void __irq_work_run(void)
*/
void irq_work_run(void)
{
- BUG_ON(!in_irq());
- __irq_work_run();
+ irq_work_run_list(this_cpu_ptr(&raised_list));
+ if (IS_ENABLED(CONFIG_PREEMPT_RT_FULL)) {
+ if (!llist_empty(this_cpu_ptr(&lazy_list)))
+ raise_softirq(TIMER_SOFTIRQ);
+ } else
+ irq_work_run_list(this_cpu_ptr(&lazy_list));
}
EXPORT_SYMBOL_GPL(irq_work_run);
+void irq_work_tick(void)
+{
+ struct llist_head *raised = this_cpu_ptr(&raised_list);
+
+ if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
+ irq_work_run_list(raised);
+ irq_work_run_list(this_cpu_ptr(&lazy_list));
+}
+
/*
* Synchronize against the irq_work @entry, ensures the entry is not
* currently in use.
@@ -181,7 +190,7 @@ static int irq_work_cpu_notify(struct notifier_block *self,
/* Called from stop_machine */
if (WARN_ON_ONCE(cpu != smp_processor_id()))
break;
- __irq_work_run();
+ irq_work_tick();
break;
default:
break;