aboutsummaryrefslogtreecommitdiff
path: root/kernel/rcutiny.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/rcutiny.c')
-rw-r--r--kernel/rcutiny.c149
1 files changed, 130 insertions, 19 deletions
diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
index 636af6d9c6e5..977296dca0a4 100644
--- a/kernel/rcutiny.c
+++ b/kernel/rcutiny.c
@@ -53,31 +53,137 @@ static void __call_rcu(struct rcu_head *head,
#include "rcutiny_plugin.h"
-#ifdef CONFIG_NO_HZ
+static long long rcu_dynticks_nesting = DYNTICK_TASK_NESTING;
-static long rcu_dynticks_nesting = 1;
+/* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcutree.c. */
+static void rcu_idle_enter_common(long long oldval)
+{
+ if (rcu_dynticks_nesting) {
+ RCU_TRACE(trace_rcu_dyntick("--=",
+ oldval, rcu_dynticks_nesting));
+ return;
+ }
+ RCU_TRACE(trace_rcu_dyntick("Start", oldval, rcu_dynticks_nesting));
+ if (!is_idle_task(current)) {
+ struct task_struct *idle = idle_task(smp_processor_id());
+
+ RCU_TRACE(trace_rcu_dyntick("Error on entry: not idle task",
+ oldval, rcu_dynticks_nesting));
+ ftrace_dump(DUMP_ALL);
+ WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
+ current->pid, current->comm,
+ idle->pid, idle->comm); /* must be idle task! */
+ }
+ rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */
+}
/*
- * Enter dynticks-idle mode, which is an extended quiescent state
- * if we have fully entered that mode (i.e., if the new value of
- * dynticks_nesting is zero).
+ * Enter idle, which is an extended quiescent state if we have fully
+ * entered that mode (i.e., if the new value of dynticks_nesting is zero).
*/
-void rcu_enter_nohz(void)
+void rcu_idle_enter(void)
{
- if (--rcu_dynticks_nesting == 0)
- rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */
+ unsigned long flags;
+ long long oldval;
+
+ local_irq_save(flags);
+ oldval = rcu_dynticks_nesting;
+ rcu_dynticks_nesting = 0;
+ rcu_idle_enter_common(oldval);
+ local_irq_restore(flags);
}
/*
- * Exit dynticks-idle mode, so that we are no longer in an extended
- * quiescent state.
+ * Exit an interrupt handler towards idle.
*/
-void rcu_exit_nohz(void)
+void rcu_irq_exit(void)
+{
+ unsigned long flags;
+ long long oldval;
+
+ local_irq_save(flags);
+ oldval = rcu_dynticks_nesting;
+ rcu_dynticks_nesting--;
+ WARN_ON_ONCE(rcu_dynticks_nesting < 0);
+ rcu_idle_enter_common(oldval);
+ local_irq_restore(flags);
+}
+
+/* Common code for rcu_idle_exit() and rcu_irq_enter(), see kernel/rcutree.c. */
+static void rcu_idle_exit_common(long long oldval)
{
+ if (oldval) {
+ RCU_TRACE(trace_rcu_dyntick("++=",
+ oldval, rcu_dynticks_nesting));
+ return;
+ }
+ RCU_TRACE(trace_rcu_dyntick("End", oldval, rcu_dynticks_nesting));
+ if (!is_idle_task(current)) {
+ struct task_struct *idle = idle_task(smp_processor_id());
+
+ RCU_TRACE(trace_rcu_dyntick("Error on exit: not idle task",
+ oldval, rcu_dynticks_nesting));
+ ftrace_dump(DUMP_ALL);
+ WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
+ current->pid, current->comm,
+ idle->pid, idle->comm); /* must be idle task! */
+ }
+}
+
+/*
+ * Exit idle, so that we are no longer in an extended quiescent state.
+ */
+void rcu_idle_exit(void)
+{
+ unsigned long flags;
+ long long oldval;
+
+ local_irq_save(flags);
+ oldval = rcu_dynticks_nesting;
+ WARN_ON_ONCE(oldval != 0);
+ rcu_dynticks_nesting = DYNTICK_TASK_NESTING;
+ rcu_idle_exit_common(oldval);
+ local_irq_restore(flags);
+}
+
+/*
+ * Enter an interrupt handler, moving away from idle.
+ */
+void rcu_irq_enter(void)
+{
+ unsigned long flags;
+ long long oldval;
+
+ local_irq_save(flags);
+ oldval = rcu_dynticks_nesting;
rcu_dynticks_nesting++;
+ WARN_ON_ONCE(rcu_dynticks_nesting == 0);
+ rcu_idle_exit_common(oldval);
+ local_irq_restore(flags);
+}
+
+#ifdef CONFIG_PROVE_RCU
+
+/*
+ * Test whether RCU thinks that the current CPU is idle.
+ */
+int rcu_is_cpu_idle(void)
+{
+ return !rcu_dynticks_nesting;
}
+EXPORT_SYMBOL(rcu_is_cpu_idle);
+
+#endif /* #ifdef CONFIG_PROVE_RCU */
-#endif /* #ifdef CONFIG_NO_HZ */
+/*
+ * Test whether the current CPU was interrupted from idle. Nested
+ * interrupts don't count, we must be running at the first interrupt
+ * level.
+ */
+int rcu_is_cpu_rrupt_from_idle(void)
+{
+ return rcu_dynticks_nesting <= 0;
+}
/*
* Helper function for rcu_sched_qs() and rcu_bh_qs().
@@ -126,14 +232,13 @@ void rcu_bh_qs(int cpu)
/*
* Check to see if the scheduling-clock interrupt came from an extended
- * quiescent state, and, if so, tell RCU about it.
+ * quiescent state, and, if so, tell RCU about it. This function must
+ * be called from hardirq context. It is normally called from the
+ * scheduling-clock interrupt.
*/
void rcu_check_callbacks(int cpu, int user)
{
- if (user ||
- (idle_cpu(cpu) &&
- !in_softirq() &&
- hardirq_count() <= (1 << HARDIRQ_SHIFT)))
+ if (user || rcu_is_cpu_rrupt_from_idle())
rcu_sched_qs(cpu);
else if (!in_softirq())
rcu_bh_qs(cpu);
@@ -154,7 +259,11 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
/* If no RCU callbacks ready to invoke, just return. */
if (&rcp->rcucblist == rcp->donetail) {
RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, -1));
- RCU_TRACE(trace_rcu_batch_end(rcp->name, 0));
+ RCU_TRACE(trace_rcu_batch_end(rcp->name, 0,
+ ACCESS_ONCE(rcp->rcucblist),
+ need_resched(),
+ is_idle_task(current),
+ rcu_is_callbacks_kthread()));
return;
}
@@ -183,7 +292,9 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
RCU_TRACE(cb_count++);
}
RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count));
- RCU_TRACE(trace_rcu_batch_end(rcp->name, cb_count));
+ RCU_TRACE(trace_rcu_batch_end(rcp->name, cb_count, 0, need_resched(),
+ is_idle_task(current),
+ rcu_is_callbacks_kthread()));
}
static void rcu_process_callbacks(struct softirq_action *unused)