aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-11-12 10:00:04 +0900
committerLinus Torvalds <torvalds@linux-foundation.org>2013-11-12 10:00:04 +0900
commit70fdcb83db15c85a0495b07dc55d9347a4c2efd9 (patch)
tree5a7f073e87e14aacfc77a21a3bf37bc470143871 /include
parentedae583a6d4d1ad2eb73981787790993fef1bbad (diff)
parent0e95c69bde1a5bf22acd53b356fe10d7bec6e2be (diff)
Merge branch 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull RCU updates from Ingo Molnar: "The main RCU changes in this cycle are: - Idle entry/exit changes, to throttle callback execution and other refinements to speed up kbuild, primarily to address performance issues located by Tibor Billes. - Grace-period related changes, primarily to aid in debugging, inspired by an -rt debugging session. - Code reorganization moving RCU's source files into its own kernel/rcu/ directory. - RCU documentation updates - Miscellaneous fixes. Note, the following commit: 5c889690aa08 mm: Place preemption point in do_mlockall() loop is identical to the commit already in your tree via email: 22356f447ceb mm: Place preemption point in do_mlockall() loop [ Your version of the changelog nicely demonstrates it how kernel oops messages should be trimmed properly :-/ ]" * 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (30 commits) rcu: Move RCU-related source code to kernel/rcu directory rcu: Fix occurrence of "the the" in checklist.txt kthread: Add pointer to vmstat-avoidance patch rcu: Update stall-warning documentation rcu: Consistent rcu_is_watching() naming rcu: Change EXPORT_SYMBOL() to EXPORT_SYMBOL_GPL() rcu: Is it safe to enter an RCU read-side critical section? rcu: Throttle invoke_rcu_core() invocations due to non-lazy callbacks rcu: Throttle rcu_try_advance_all_cbs() execution rcu: Remove redundant code from rcu_cleanup_after_idle() rcu: Fix CONFIG_RCU_NOCB_CPU_ALL panic on machines with sparse CPU mask rcu: Avoid sparse warnings in rcu_nocb_wake trace event rcu: Track rcu_nocb_kthread()'s sleeping and awakening rcu: Distinguish between NOCB and non-NOCB rcu_callback trace events rcu: Add tracing for rcuo no-CBs CPU wakeup handshake rcu: Add tracing of normal (non-NOCB) grace-period requests rcu: Add tracing to rcu_gp_kthread() rcu: Flag lockless access to ->gp_flags with ACCESS_ONCE() rcu: Prevent spurious-wakeup DoS attack on rcu_gp_kthread() rcu: Improve grace-period start logic ...
Diffstat (limited to 'include')
-rw-r--r--include/linux/rculist.h23
-rw-r--r--include/linux/rcupdate.h24
-rw-r--r--include/linux/rcutiny.h17
-rw-r--r--include/linux/rcutree.h2
-rw-r--r--include/trace/events/rcu.h80
5 files changed, 118 insertions, 28 deletions
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 4106721c4e5e..45a0a9e81478 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -19,6 +19,21 @@
*/
/*
+ * INIT_LIST_HEAD_RCU - Initialize a list_head visible to RCU readers
+ * @list: list to be initialized
+ *
+ * You should instead use INIT_LIST_HEAD() for normal initialization and
+ * cleanup tasks, when readers have no access to the list being initialized.
+ * However, if the list being initialized is visible to readers, you
+ * need to keep the compiler from being too mischievous.
+ */
+static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
+{
+ ACCESS_ONCE(list->next) = list;
+ ACCESS_ONCE(list->prev) = list;
+}
+
+/*
* return the ->next pointer of a list_head in an rcu safe
* way, we must not access it directly
*/
@@ -191,9 +206,13 @@ static inline void list_splice_init_rcu(struct list_head *list,
if (list_empty(list))
return;
- /* "first" and "last" tracking list, so initialize it. */
+ /*
+ * "first" and "last" tracking list, so initialize it. RCU readers
+ * have access to this list, so we must use INIT_LIST_HEAD_RCU()
+ * instead of INIT_LIST_HEAD().
+ */
- INIT_LIST_HEAD(list);
+ INIT_LIST_HEAD_RCU(list);
/*
* At this point, the list body still points to the source list.
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index f1f1bc39346b..39cbb889e20d 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -261,6 +261,10 @@ static inline void rcu_user_hooks_switch(struct task_struct *prev,
rcu_irq_exit(); \
} while (0)
+#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP)
+extern bool __rcu_is_watching(void);
+#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */
+
/*
* Infrastructure to implement the synchronize_() primitives in
* TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
@@ -297,10 +301,6 @@ static inline void destroy_rcu_head_on_stack(struct rcu_head *head)
}
#endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
-#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SMP)
-extern int rcu_is_cpu_idle(void);
-#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SMP) */
-
#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU)
bool rcu_lockdep_current_cpu_online(void);
#else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
@@ -351,7 +351,7 @@ static inline int rcu_read_lock_held(void)
{
if (!debug_lockdep_rcu_enabled())
return 1;
- if (rcu_is_cpu_idle())
+ if (!rcu_is_watching())
return 0;
if (!rcu_lockdep_current_cpu_online())
return 0;
@@ -402,7 +402,7 @@ static inline int rcu_read_lock_sched_held(void)
if (!debug_lockdep_rcu_enabled())
return 1;
- if (rcu_is_cpu_idle())
+ if (!rcu_is_watching())
return 0;
if (!rcu_lockdep_current_cpu_online())
return 0;
@@ -771,7 +771,7 @@ static inline void rcu_read_lock(void)
__rcu_read_lock();
__acquire(RCU);
rcu_lock_acquire(&rcu_lock_map);
- rcu_lockdep_assert(!rcu_is_cpu_idle(),
+ rcu_lockdep_assert(rcu_is_watching(),
"rcu_read_lock() used illegally while idle");
}
@@ -792,7 +792,7 @@ static inline void rcu_read_lock(void)
*/
static inline void rcu_read_unlock(void)
{
- rcu_lockdep_assert(!rcu_is_cpu_idle(),
+ rcu_lockdep_assert(rcu_is_watching(),
"rcu_read_unlock() used illegally while idle");
rcu_lock_release(&rcu_lock_map);
__release(RCU);
@@ -821,7 +821,7 @@ static inline void rcu_read_lock_bh(void)
local_bh_disable();
__acquire(RCU_BH);
rcu_lock_acquire(&rcu_bh_lock_map);
- rcu_lockdep_assert(!rcu_is_cpu_idle(),
+ rcu_lockdep_assert(rcu_is_watching(),
"rcu_read_lock_bh() used illegally while idle");
}
@@ -832,7 +832,7 @@ static inline void rcu_read_lock_bh(void)
*/
static inline void rcu_read_unlock_bh(void)
{
- rcu_lockdep_assert(!rcu_is_cpu_idle(),
+ rcu_lockdep_assert(rcu_is_watching(),
"rcu_read_unlock_bh() used illegally while idle");
rcu_lock_release(&rcu_bh_lock_map);
__release(RCU_BH);
@@ -857,7 +857,7 @@ static inline void rcu_read_lock_sched(void)
preempt_disable();
__acquire(RCU_SCHED);
rcu_lock_acquire(&rcu_sched_lock_map);
- rcu_lockdep_assert(!rcu_is_cpu_idle(),
+ rcu_lockdep_assert(rcu_is_watching(),
"rcu_read_lock_sched() used illegally while idle");
}
@@ -875,7 +875,7 @@ static inline notrace void rcu_read_lock_sched_notrace(void)
*/
static inline void rcu_read_unlock_sched(void)
{
- rcu_lockdep_assert(!rcu_is_cpu_idle(),
+ rcu_lockdep_assert(rcu_is_watching(),
"rcu_read_unlock_sched() used illegally while idle");
rcu_lock_release(&rcu_sched_lock_map);
__release(RCU_SCHED);
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index e31005ee339e..09ebcbe9fd78 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -132,4 +132,21 @@ static inline void rcu_scheduler_starting(void)
}
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE)
+
+static inline bool rcu_is_watching(void)
+{
+ return __rcu_is_watching();
+}
+
+#else /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
+
+static inline bool rcu_is_watching(void)
+{
+ return true;
+}
+
+
+#endif /* #else defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
+
#endif /* __LINUX_RCUTINY_H */
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 226169d1bd2b..4b9c81548742 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -90,4 +90,6 @@ extern void exit_rcu(void);
extern void rcu_scheduler_starting(void);
extern int rcu_scheduler_active __read_mostly;
+extern bool rcu_is_watching(void);
+
#endif /* __LINUX_RCUTREE_H */
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index ee2376cfaab3..aca382266411 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -39,15 +39,26 @@ TRACE_EVENT(rcu_utilization,
#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
/*
- * Tracepoint for grace-period events: starting and ending a grace
- * period ("start" and "end", respectively), a CPU noting the start
- * of a new grace period or the end of an old grace period ("cpustart"
- * and "cpuend", respectively), a CPU passing through a quiescent
- * state ("cpuqs"), a CPU coming online or going offline ("cpuonl"
- * and "cpuofl", respectively), a CPU being kicked for being too
- * long in dyntick-idle mode ("kick"), a CPU accelerating its new
- * callbacks to RCU_NEXT_READY_TAIL ("AccReadyCB"), and a CPU
- * accelerating its new callbacks to RCU_WAIT_TAIL ("AccWaitCB").
+ * Tracepoint for grace-period events. Takes a string identifying the
+ * RCU flavor, the grace-period number, and a string identifying the
+ * grace-period-related event as follows:
+ *
+ * "AccReadyCB": CPU acclerates new callbacks to RCU_NEXT_READY_TAIL.
+ * "AccWaitCB": CPU accelerates new callbacks to RCU_WAIT_TAIL.
+ * "newreq": Request a new grace period.
+ * "start": Start a grace period.
+ * "cpustart": CPU first notices a grace-period start.
+ * "cpuqs": CPU passes through a quiescent state.
+ * "cpuonl": CPU comes online.
+ * "cpuofl": CPU goes offline.
+ * "reqwait": GP kthread sleeps waiting for grace-period request.
+ * "reqwaitsig": GP kthread awakened by signal from reqwait state.
+ * "fqswait": GP kthread waiting until time to force quiescent states.
+ * "fqsstart": GP kthread starts forcing quiescent states.
+ * "fqsend": GP kthread done forcing quiescent states.
+ * "fqswaitsig": GP kthread awakened by signal from fqswait state.
+ * "end": End a grace period.
+ * "cpuend": CPU first notices a grace-period end.
*/
TRACE_EVENT(rcu_grace_period,
@@ -161,6 +172,46 @@ TRACE_EVENT(rcu_grace_period_init,
);
/*
+ * Tracepoint for RCU no-CBs CPU callback handoffs. This event is intended
+ * to assist debugging of these handoffs.
+ *
+ * The first argument is the name of the RCU flavor, and the second is
+ * the number of the offloaded CPU are extracted. The third and final
+ * argument is a string as follows:
+ *
+ * "WakeEmpty": Wake rcuo kthread, first CB to empty list.
+ * "WakeOvf": Wake rcuo kthread, CB list is huge.
+ * "WakeNot": Don't wake rcuo kthread.
+ * "WakeNotPoll": Don't wake rcuo kthread because it is polling.
+ * "Poll": Start of new polling cycle for rcu_nocb_poll.
+ * "Sleep": Sleep waiting for CBs for !rcu_nocb_poll.
+ * "WokeEmpty": rcuo kthread woke to find empty list.
+ * "WokeNonEmpty": rcuo kthread woke to find non-empty list.
+ * "WaitQueue": Enqueue partially done, timed wait for it to complete.
+ * "WokeQueue": Partial enqueue now complete.
+ */
+TRACE_EVENT(rcu_nocb_wake,
+
+ TP_PROTO(const char *rcuname, int cpu, const char *reason),
+
+ TP_ARGS(rcuname, cpu, reason),
+
+ TP_STRUCT__entry(
+ __field(const char *, rcuname)
+ __field(int, cpu)
+ __field(const char *, reason)
+ ),
+
+ TP_fast_assign(
+ __entry->rcuname = rcuname;
+ __entry->cpu = cpu;
+ __entry->reason = reason;
+ ),
+
+ TP_printk("%s %d %s", __entry->rcuname, __entry->cpu, __entry->reason)
+);
+
+/*
* Tracepoint for tasks blocking within preemptible-RCU read-side
* critical sections. Track the type of RCU (which one day might
* include SRCU), the grace-period number that the task is blocking
@@ -540,17 +591,17 @@ TRACE_EVENT(rcu_invoke_kfree_callback,
TRACE_EVENT(rcu_batch_end,
TP_PROTO(const char *rcuname, int callbacks_invoked,
- bool cb, bool nr, bool iit, bool risk),
+ char cb, char nr, char iit, char risk),
TP_ARGS(rcuname, callbacks_invoked, cb, nr, iit, risk),
TP_STRUCT__entry(
__field(const char *, rcuname)
__field(int, callbacks_invoked)
- __field(bool, cb)
- __field(bool, nr)
- __field(bool, iit)
- __field(bool, risk)
+ __field(char, cb)
+ __field(char, nr)
+ __field(char, iit)
+ __field(char, risk)
),
TP_fast_assign(
@@ -656,6 +707,7 @@ TRACE_EVENT(rcu_barrier,
#define trace_rcu_future_grace_period(rcuname, gpnum, completed, c, \
level, grplo, grphi, event) \
do { } while (0)
+#define trace_rcu_nocb_wake(rcuname, cpu, reason) do { } while (0)
#define trace_rcu_preempt_task(rcuname, pid, gpnum) do { } while (0)
#define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0)
#define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, \