blob: a28f893615fae5dc4d2f05ebe5f1286e2e6179e8 [file] [log] [blame]
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001/*
2 * Read-Copy Update mechanism for mutual exclusion
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright IBM Corporation, 2008
19 *
20 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
21 * Manfred Spraul <manfred@colorfullife.com>
22 * Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical version
23 *
24 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
25 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
26 *
27 * For detailed explanation of Read-Copy Update mechanism see -
Paul E. McKenneya71fca52009-09-18 10:28:19 -070028 * Documentation/RCU
Paul E. McKenney64db4cf2008-12-18 21:55:32 +010029 */
30#include <linux/types.h>
31#include <linux/kernel.h>
32#include <linux/init.h>
33#include <linux/spinlock.h>
34#include <linux/smp.h>
35#include <linux/rcupdate.h>
36#include <linux/interrupt.h>
37#include <linux/sched.h>
Ingo Molnarc1dc0b92009-08-02 11:28:21 +020038#include <linux/nmi.h>
Paul E. McKenney64db4cf2008-12-18 21:55:32 +010039#include <asm/atomic.h>
40#include <linux/bitops.h>
41#include <linux/module.h>
42#include <linux/completion.h>
43#include <linux/moduleparam.h>
44#include <linux/percpu.h>
45#include <linux/notifier.h>
46#include <linux/cpu.h>
47#include <linux/mutex.h>
48#include <linux/time.h>
Paul E. McKenneybbad9372010-04-02 16:17:17 -070049#include <linux/kernel_stat.h>
Anson Huangceb8fa12011-09-27 18:20:19 +080050#include <linux/tick.h>
Paul E. McKenney64db4cf2008-12-18 21:55:32 +010051
Paul E. McKenney9f77da92009-08-22 13:56:45 -070052#include "rcutree.h"
53
Paul E. McKenney64db4cf2008-12-18 21:55:32 +010054/* Data structures. */
55
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -080056static struct lock_class_key rcu_node_class[NUM_RCU_LVLS];
Peter Zijlstra88b91c72009-10-26 10:24:31 -070057
Paul E. McKenney4300aa62010-04-13 16:18:22 -070058#define RCU_STATE_INITIALIZER(structname) { \
59 .level = { &structname.node[0] }, \
Paul E. McKenney64db4cf2008-12-18 21:55:32 +010060 .levelcnt = { \
61 NUM_RCU_LVL_0, /* root of hierarchy. */ \
62 NUM_RCU_LVL_1, \
63 NUM_RCU_LVL_2, \
Paul E. McKenneycf244dc2009-12-02 12:10:14 -080064 NUM_RCU_LVL_3, \
65 NUM_RCU_LVL_4, /* == MAX_RCU_LVLS */ \
Paul E. McKenney64db4cf2008-12-18 21:55:32 +010066 }, \
Paul E. McKenney83f5b012009-10-28 08:14:49 -070067 .signaled = RCU_GP_IDLE, \
Paul E. McKenney64db4cf2008-12-18 21:55:32 +010068 .gpnum = -300, \
69 .completed = -300, \
Paul E. McKenney4300aa62010-04-13 16:18:22 -070070 .onofflock = __RAW_SPIN_LOCK_UNLOCKED(&structname.onofflock), \
Paul E. McKenney4300aa62010-04-13 16:18:22 -070071 .fqslock = __RAW_SPIN_LOCK_UNLOCKED(&structname.fqslock), \
Paul E. McKenney64db4cf2008-12-18 21:55:32 +010072 .n_force_qs = 0, \
73 .n_force_qs_ngp = 0, \
Paul E. McKenney4300aa62010-04-13 16:18:22 -070074 .name = #structname, \
Paul E. McKenney64db4cf2008-12-18 21:55:32 +010075}
76
Paul E. McKenneyd6714c22009-08-22 13:56:46 -070077struct rcu_state rcu_sched_state = RCU_STATE_INITIALIZER(rcu_sched_state);
78DEFINE_PER_CPU(struct rcu_data, rcu_sched_data);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +010079
Ingo Molnar6258c4f2009-03-25 16:42:24 +010080struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state);
81DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
Ingo Molnarb1f77b02009-03-13 03:20:49 +010082
Paul E. McKenneybbad9372010-04-02 16:17:17 -070083int rcu_scheduler_active __read_mostly;
84EXPORT_SYMBOL_GPL(rcu_scheduler_active);
85
Ingo Molnarb1f77b02009-03-13 03:20:49 +010086/*
Paul E. McKenneyfc2219d2009-09-23 09:50:41 -070087 * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s
88 * permit this function to be invoked without holding the root rcu_node
89 * structure's ->lock, but of course results can be subject to change.
90 */
91static int rcu_gp_in_progress(struct rcu_state *rsp)
92{
93 return ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum);
94}
95
96/*
Paul E. McKenneyd6714c22009-08-22 13:56:46 -070097 * Note a quiescent state. Because we do not need to know
Ingo Molnarb1f77b02009-03-13 03:20:49 +010098 * how many quiescent states passed, just if there was at least
Paul E. McKenneyd6714c22009-08-22 13:56:46 -070099 * one since the start of the grace period, this just sets a flag.
Ingo Molnarb1f77b02009-03-13 03:20:49 +0100100 */
Paul E. McKenneyd6714c22009-08-22 13:56:46 -0700101void rcu_sched_qs(int cpu)
Ingo Molnarb1f77b02009-03-13 03:20:49 +0100102{
Paul E. McKenney25502a62010-04-01 17:37:01 -0700103 struct rcu_data *rdp = &per_cpu(rcu_sched_data, cpu);
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700104
Paul E. McKenneyc64ac3c2009-11-10 13:37:22 -0800105 rdp->passed_quiesc_completed = rdp->gpnum - 1;
Paul E. McKenneyc3422be2009-09-13 09:15:10 -0700106 barrier();
107 rdp->passed_quiesc = 1;
Ingo Molnarb1f77b02009-03-13 03:20:49 +0100108}
109
Paul E. McKenneyd6714c22009-08-22 13:56:46 -0700110void rcu_bh_qs(int cpu)
Ingo Molnarb1f77b02009-03-13 03:20:49 +0100111{
Paul E. McKenney25502a62010-04-01 17:37:01 -0700112 struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700113
Paul E. McKenneyc64ac3c2009-11-10 13:37:22 -0800114 rdp->passed_quiesc_completed = rdp->gpnum - 1;
Paul E. McKenneyc3422be2009-09-13 09:15:10 -0700115 barrier();
116 rdp->passed_quiesc = 1;
Ingo Molnarb1f77b02009-03-13 03:20:49 +0100117}
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100118
Paul E. McKenney25502a62010-04-01 17:37:01 -0700119/*
120 * Note a context switch. This is a quiescent state for RCU-sched,
121 * and requires special handling for preemptible RCU.
122 */
123void rcu_note_context_switch(int cpu)
124{
125 rcu_sched_qs(cpu);
126 rcu_preempt_note_context_switch(cpu);
127}
128
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100129#ifdef CONFIG_NO_HZ
Paul E. McKenney90a4d2c2009-01-04 11:41:11 -0800130DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
131 .dynticks_nesting = 1,
132 .dynticks = 1,
133};
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100134#endif /* #ifdef CONFIG_NO_HZ */
135
136static int blimit = 10; /* Maximum callbacks per softirq. */
137static int qhimark = 10000; /* If this many pending, ignore blimit. */
138static int qlowmark = 100; /* Once only this many pending, use blimit. */
139
Paul E. McKenney3d76c082009-09-28 07:46:32 -0700140module_param(blimit, int, 0);
141module_param(qhimark, int, 0);
142module_param(qlowmark, int, 0);
143
Paul E. McKenney742734e2010-06-30 11:43:52 -0700144#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
Paul E. McKenney910b1b72010-07-21 08:05:56 -0700145int rcu_cpu_stall_suppress __read_mostly = RCU_CPU_STALL_SUPPRESS_INIT;
Paul E. McKenneyf2e0dd72010-07-14 14:38:30 -0700146module_param(rcu_cpu_stall_suppress, int, 0644);
Paul E. McKenney742734e2010-06-30 11:43:52 -0700147#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
148
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100149static void force_quiescent_state(struct rcu_state *rsp, int relaxed);
Paul E. McKenneya1572292009-08-22 13:56:51 -0700150static int rcu_pending(int cpu);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100151
152/*
Paul E. McKenneyd6714c22009-08-22 13:56:46 -0700153 * Return the number of RCU-sched batches processed thus far for debug & stats.
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100154 */
Paul E. McKenneyd6714c22009-08-22 13:56:46 -0700155long rcu_batches_completed_sched(void)
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100156{
Paul E. McKenneyd6714c22009-08-22 13:56:46 -0700157 return rcu_sched_state.completed;
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100158}
Paul E. McKenneyd6714c22009-08-22 13:56:46 -0700159EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100160
161/*
162 * Return the number of RCU BH batches processed thus far for debug & stats.
163 */
164long rcu_batches_completed_bh(void)
165{
166 return rcu_bh_state.completed;
167}
168EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
169
170/*
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800171 * Force a quiescent state for RCU BH.
172 */
173void rcu_bh_force_quiescent_state(void)
174{
175 force_quiescent_state(&rcu_bh_state, 0);
176}
177EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
178
179/*
180 * Force a quiescent state for RCU-sched.
181 */
182void rcu_sched_force_quiescent_state(void)
183{
184 force_quiescent_state(&rcu_sched_state, 0);
185}
186EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state);
187
188/*
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100189 * Does the CPU have callbacks ready to be invoked?
190 */
191static int
192cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp)
193{
194 return &rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL];
195}
196
197/*
198 * Does the current CPU require a yet-as-unscheduled grace period?
199 */
200static int
201cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
202{
Paul E. McKenneyfc2219d2009-09-23 09:50:41 -0700203 return *rdp->nxttail[RCU_DONE_TAIL] && !rcu_gp_in_progress(rsp);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100204}
205
206/*
207 * Return the root node of the specified rcu_state structure.
208 */
209static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
210{
211 return &rsp->node[0];
212}
213
214#ifdef CONFIG_SMP
215
216/*
217 * If the specified CPU is offline, tell the caller that it is in
218 * a quiescent state. Otherwise, whack it with a reschedule IPI.
219 * Grace periods can end up waiting on an offline CPU when that
220 * CPU is in the process of coming online -- it will be added to the
221 * rcu_node bitmasks before it actually makes it online. The same thing
222 * can happen while a CPU is in the process of coming online. Because this
223 * race is quite rare, we check for it after detecting that the grace
224 * period has been delayed rather than checking each and every CPU
225 * each and every time we start a new grace period.
226 */
227static int rcu_implicit_offline_qs(struct rcu_data *rdp)
228{
229 /*
230 * If the CPU is offline, it is in a quiescent state. We can
231 * trust its state not to change because interrupts are disabled.
232 */
233 if (cpu_is_offline(rdp->cpu)) {
234 rdp->offline_fqs++;
235 return 1;
236 }
237
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700238 /* If preemptable RCU, no point in sending reschedule IPI. */
239 if (rdp->preemptable)
240 return 0;
241
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100242 /* The CPU is online, so send it a reschedule IPI. */
243 if (rdp->cpu != smp_processor_id())
244 smp_send_reschedule(rdp->cpu);
245 else
246 set_need_resched();
247 rdp->resched_ipi++;
248 return 0;
249}
250
251#endif /* #ifdef CONFIG_SMP */
252
253#ifdef CONFIG_NO_HZ
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100254
255/**
256 * rcu_enter_nohz - inform RCU that current CPU is entering nohz
257 *
258 * Enter nohz mode, in other words, -leave- the mode in which RCU
259 * read-side critical sections can occur. (Though RCU read-side
260 * critical sections can occur in irq handlers in nohz mode, a possibility
261 * handled by rcu_irq_enter() and rcu_irq_exit()).
262 */
263void rcu_enter_nohz(void)
264{
265 unsigned long flags;
266 struct rcu_dynticks *rdtp;
267
268 smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
269 local_irq_save(flags);
270 rdtp = &__get_cpu_var(rcu_dynticks);
271 rdtp->dynticks++;
272 rdtp->dynticks_nesting--;
Paul E. McKenney86848962009-08-27 15:00:12 -0700273 WARN_ON_ONCE(rdtp->dynticks & 0x1);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100274 local_irq_restore(flags);
275}
276
277/*
278 * rcu_exit_nohz - inform RCU that current CPU is leaving nohz
279 *
280 * Exit nohz mode, in other words, -enter- the mode in which RCU
281 * read-side critical sections normally occur.
282 */
283void rcu_exit_nohz(void)
284{
285 unsigned long flags;
286 struct rcu_dynticks *rdtp;
287
288 local_irq_save(flags);
289 rdtp = &__get_cpu_var(rcu_dynticks);
290 rdtp->dynticks++;
291 rdtp->dynticks_nesting++;
Paul E. McKenney86848962009-08-27 15:00:12 -0700292 WARN_ON_ONCE(!(rdtp->dynticks & 0x1));
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100293 local_irq_restore(flags);
294 smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
295}
296
297/**
298 * rcu_nmi_enter - inform RCU of entry to NMI context
299 *
300 * If the CPU was idle with dynamic ticks active, and there is no
301 * irq handler running, this updates rdtp->dynticks_nmi to let the
302 * RCU grace-period handling know that the CPU is active.
303 */
304void rcu_nmi_enter(void)
305{
306 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
307
308 if (rdtp->dynticks & 0x1)
309 return;
310 rdtp->dynticks_nmi++;
Paul E. McKenney86848962009-08-27 15:00:12 -0700311 WARN_ON_ONCE(!(rdtp->dynticks_nmi & 0x1));
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100312 smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
313}
314
315/**
316 * rcu_nmi_exit - inform RCU of exit from NMI context
317 *
318 * If the CPU was idle with dynamic ticks active, and there is no
319 * irq handler running, this updates rdtp->dynticks_nmi to let the
320 * RCU grace-period handling know that the CPU is no longer active.
321 */
322void rcu_nmi_exit(void)
323{
324 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
325
326 if (rdtp->dynticks & 0x1)
327 return;
328 smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
329 rdtp->dynticks_nmi++;
Paul E. McKenney86848962009-08-27 15:00:12 -0700330 WARN_ON_ONCE(rdtp->dynticks_nmi & 0x1);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100331}
332
333/**
334 * rcu_irq_enter - inform RCU of entry to hard irq context
335 *
336 * If the CPU was idle with dynamic ticks active, this updates the
337 * rdtp->dynticks to let the RCU handling know that the CPU is active.
338 */
339void rcu_irq_enter(void)
340{
341 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
342
343 if (rdtp->dynticks_nesting++)
344 return;
345 rdtp->dynticks++;
Paul E. McKenney86848962009-08-27 15:00:12 -0700346 WARN_ON_ONCE(!(rdtp->dynticks & 0x1));
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100347 smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
348}
349
350/**
351 * rcu_irq_exit - inform RCU of exit from hard irq context
352 *
353 * If the CPU was idle with dynamic ticks active, update the rdp->dynticks
354 * to put let the RCU handling be aware that the CPU is going back to idle
355 * with no ticks.
356 */
357void rcu_irq_exit(void)
358{
359 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
360
361 if (--rdtp->dynticks_nesting)
362 return;
363 smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
364 rdtp->dynticks++;
Paul E. McKenney86848962009-08-27 15:00:12 -0700365 WARN_ON_ONCE(rdtp->dynticks & 0x1);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100366
367 /* If the interrupt queued a callback, get out of dyntick mode. */
Christoph Lameter909ea962010-12-08 16:22:55 +0100368 if (__this_cpu_read(rcu_sched_data.nxtlist) ||
369 __this_cpu_read(rcu_bh_data.nxtlist))
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100370 set_need_resched();
371}
372
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100373#ifdef CONFIG_SMP
374
375/*
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100376 * Snapshot the specified CPU's dynticks counter so that we can later
377 * credit them with an implicit quiescent state. Return 1 if this CPU
Paul E. McKenney1eba8f82009-09-23 09:50:42 -0700378 * is in dynticks idle mode, which is an extended quiescent state.
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100379 */
380static int dyntick_save_progress_counter(struct rcu_data *rdp)
381{
382 int ret;
383 int snap;
384 int snap_nmi;
385
386 snap = rdp->dynticks->dynticks;
387 snap_nmi = rdp->dynticks->dynticks_nmi;
388 smp_mb(); /* Order sampling of snap with end of grace period. */
389 rdp->dynticks_snap = snap;
390 rdp->dynticks_nmi_snap = snap_nmi;
391 ret = ((snap & 0x1) == 0) && ((snap_nmi & 0x1) == 0);
392 if (ret)
393 rdp->dynticks_fqs++;
394 return ret;
395}
396
397/*
398 * Return true if the specified CPU has passed through a quiescent
399 * state by virtue of being in or having passed through an dynticks
400 * idle state since the last call to dyntick_save_progress_counter()
401 * for this same CPU.
402 */
403static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
404{
405 long curr;
406 long curr_nmi;
407 long snap;
408 long snap_nmi;
409
410 curr = rdp->dynticks->dynticks;
411 snap = rdp->dynticks_snap;
412 curr_nmi = rdp->dynticks->dynticks_nmi;
413 snap_nmi = rdp->dynticks_nmi_snap;
414 smp_mb(); /* force ordering with cpu entering/leaving dynticks. */
415
416 /*
417 * If the CPU passed through or entered a dynticks idle phase with
418 * no active irq/NMI handlers, then we can safely pretend that the CPU
419 * already acknowledged the request to pass through a quiescent
420 * state. Either way, that CPU cannot possibly be in an RCU
421 * read-side critical section that started before the beginning
422 * of the current RCU grace period.
423 */
424 if ((curr != snap || (curr & 0x1) == 0) &&
425 (curr_nmi != snap_nmi || (curr_nmi & 0x1) == 0)) {
426 rdp->dynticks_fqs++;
427 return 1;
428 }
429
430 /* Go check for the CPU being offline. */
431 return rcu_implicit_offline_qs(rdp);
432}
433
434#endif /* #ifdef CONFIG_SMP */
435
436#else /* #ifdef CONFIG_NO_HZ */
437
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100438#ifdef CONFIG_SMP
439
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100440static int dyntick_save_progress_counter(struct rcu_data *rdp)
441{
442 return 0;
443}
444
445static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
446{
447 return rcu_implicit_offline_qs(rdp);
448}
449
450#endif /* #ifdef CONFIG_SMP */
451
452#endif /* #else #ifdef CONFIG_NO_HZ */
453
454#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
455
Paul E. McKenney742734e2010-06-30 11:43:52 -0700456int rcu_cpu_stall_suppress __read_mostly;
Paul E. McKenneyc68de202010-04-15 10:12:40 -0700457
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100458static void record_gp_stall_check_time(struct rcu_state *rsp)
459{
460 rsp->gp_start = jiffies;
461 rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_CHECK;
462}
463
464static void print_other_cpu_stall(struct rcu_state *rsp)
465{
466 int cpu;
467 long delta;
468 unsigned long flags;
469 struct rcu_node *rnp = rcu_get_root(rsp);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100470
471 /* Only let one CPU complain about others per time interval. */
472
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800473 raw_spin_lock_irqsave(&rnp->lock, flags);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100474 delta = jiffies - rsp->jiffies_stall;
Paul E. McKenneyfc2219d2009-09-23 09:50:41 -0700475 if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) {
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800476 raw_spin_unlock_irqrestore(&rnp->lock, flags);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100477 return;
478 }
479 rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
Paul E. McKenneya0b6c9a2009-09-28 07:46:33 -0700480
481 /*
482 * Now rat on any tasks that got kicked up to the root rcu_node
483 * due to CPU offlining.
484 */
485 rcu_print_task_stall(rnp);
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800486 raw_spin_unlock_irqrestore(&rnp->lock, flags);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100487
Paul E. McKenney8cdd32a2010-08-09 14:23:03 -0700488 /*
489 * OK, time to rat on our buddy...
490 * See Documentation/RCU/stallwarn.txt for info on how to debug
491 * RCU CPU stall warnings.
492 */
Paul E. McKenney4300aa62010-04-13 16:18:22 -0700493 printk(KERN_ERR "INFO: %s detected stalls on CPUs/tasks: {",
494 rsp->name);
Paul E. McKenneya0b6c9a2009-09-28 07:46:33 -0700495 rcu_for_each_leaf_node(rsp, rnp) {
Paul E. McKenney3acd9eb2010-02-22 17:05:03 -0800496 raw_spin_lock_irqsave(&rnp->lock, flags);
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700497 rcu_print_task_stall(rnp);
Paul E. McKenney3acd9eb2010-02-22 17:05:03 -0800498 raw_spin_unlock_irqrestore(&rnp->lock, flags);
Paul E. McKenneya0b6c9a2009-09-28 07:46:33 -0700499 if (rnp->qsmask == 0)
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100500 continue;
Paul E. McKenneya0b6c9a2009-09-28 07:46:33 -0700501 for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++)
502 if (rnp->qsmask & (1UL << cpu))
503 printk(" %d", rnp->grplo + cpu);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100504 }
Paul E. McKenney4300aa62010-04-13 16:18:22 -0700505 printk("} (detected by %d, t=%ld jiffies)\n",
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100506 smp_processor_id(), (long)(jiffies - rsp->gp_start));
Ingo Molnarc1dc0b92009-08-02 11:28:21 +0200507 trigger_all_cpu_backtrace();
508
Paul E. McKenney1ed509a2010-02-22 17:05:05 -0800509 /* If so configured, complain about tasks blocking the grace period. */
510
511 rcu_print_detail_task_stall(rsp);
512
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100513 force_quiescent_state(rsp, 0); /* Kick them all. */
514}
515
516static void print_cpu_stall(struct rcu_state *rsp)
517{
518 unsigned long flags;
519 struct rcu_node *rnp = rcu_get_root(rsp);
520
Paul E. McKenney8cdd32a2010-08-09 14:23:03 -0700521 /*
522 * OK, time to rat on ourselves...
523 * See Documentation/RCU/stallwarn.txt for info on how to debug
524 * RCU CPU stall warnings.
525 */
Paul E. McKenney4300aa62010-04-13 16:18:22 -0700526 printk(KERN_ERR "INFO: %s detected stall on CPU %d (t=%lu jiffies)\n",
527 rsp->name, smp_processor_id(), jiffies - rsp->gp_start);
Ingo Molnarc1dc0b92009-08-02 11:28:21 +0200528 trigger_all_cpu_backtrace();
529
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800530 raw_spin_lock_irqsave(&rnp->lock, flags);
Paul E. McKenney20133cf2010-02-22 17:05:01 -0800531 if (ULONG_CMP_GE(jiffies, rsp->jiffies_stall))
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100532 rsp->jiffies_stall =
533 jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800534 raw_spin_unlock_irqrestore(&rnp->lock, flags);
Ingo Molnarc1dc0b92009-08-02 11:28:21 +0200535
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100536 set_need_resched(); /* kick ourselves to get things going. */
537}
538
539static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
540{
541 long delta;
542 struct rcu_node *rnp;
543
Paul E. McKenney742734e2010-06-30 11:43:52 -0700544 if (rcu_cpu_stall_suppress)
Paul E. McKenneyc68de202010-04-15 10:12:40 -0700545 return;
Dongdong Deng4ee0a602010-09-28 16:32:43 +0800546 delta = jiffies - ACCESS_ONCE(rsp->jiffies_stall);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100547 rnp = rdp->mynode;
Dongdong Deng4ee0a602010-09-28 16:32:43 +0800548 if ((ACCESS_ONCE(rnp->qsmask) & rdp->grpmask) && delta >= 0) {
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100549
550 /* We haven't checked in, so go dump stack. */
551 print_cpu_stall(rsp);
552
Paul E. McKenneyfc2219d2009-09-23 09:50:41 -0700553 } else if (rcu_gp_in_progress(rsp) && delta >= RCU_STALL_RAT_DELAY) {
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100554
555 /* They had two time units to dump stack, so complain. */
556 print_other_cpu_stall(rsp);
557 }
558}
559
Paul E. McKenneyc68de202010-04-15 10:12:40 -0700560static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
561{
Paul E. McKenney742734e2010-06-30 11:43:52 -0700562 rcu_cpu_stall_suppress = 1;
Paul E. McKenneyc68de202010-04-15 10:12:40 -0700563 return NOTIFY_DONE;
564}
565
Paul E. McKenney53d84e02010-08-10 14:28:53 -0700566/**
567 * rcu_cpu_stall_reset - prevent further stall warnings in current grace period
568 *
569 * Set the stall-warning timeout way off into the future, thus preventing
570 * any RCU CPU stall-warning messages from appearing in the current set of
571 * RCU grace periods.
572 *
573 * The caller must disable hard irqs.
574 */
575void rcu_cpu_stall_reset(void)
576{
577 rcu_sched_state.jiffies_stall = jiffies + ULONG_MAX / 2;
578 rcu_bh_state.jiffies_stall = jiffies + ULONG_MAX / 2;
579 rcu_preempt_stall_reset();
580}
581
Paul E. McKenneyc68de202010-04-15 10:12:40 -0700582static struct notifier_block rcu_panic_block = {
583 .notifier_call = rcu_panic,
584};
585
586static void __init check_cpu_stall_init(void)
587{
588 atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block);
589}
590
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100591#else /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
592
593static void record_gp_stall_check_time(struct rcu_state *rsp)
594{
595}
596
597static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
598{
599}
600
Paul E. McKenney53d84e02010-08-10 14:28:53 -0700601void rcu_cpu_stall_reset(void)
602{
603}
604
Paul E. McKenneyc68de202010-04-15 10:12:40 -0700605static void __init check_cpu_stall_init(void)
606{
607}
608
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100609#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
610
611/*
612 * Update CPU-local rcu_data state to record the newly noticed grace period.
613 * This is used both when we started the grace period and when we notice
Paul E. McKenney91603062009-11-02 13:52:29 -0800614 * that someone else started the grace period. The caller must hold the
615 * ->lock of the leaf rcu_node structure corresponding to the current CPU,
616 * and must have irqs disabled.
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100617 */
Paul E. McKenney91603062009-11-02 13:52:29 -0800618static void __note_new_gpnum(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
619{
620 if (rdp->gpnum != rnp->gpnum) {
Paul E. McKenney121dfc42010-12-10 15:02:47 -0800621 /*
622 * If the current grace period is waiting for this CPU,
623 * set up to detect a quiescent state, otherwise don't
624 * go looking for one.
625 */
Paul E. McKenney91603062009-11-02 13:52:29 -0800626 rdp->gpnum = rnp->gpnum;
Paul E. McKenney121dfc42010-12-10 15:02:47 -0800627 if (rnp->qsmask & rdp->grpmask) {
628 rdp->qs_pending = 1;
629 rdp->passed_quiesc = 0;
630 } else
631 rdp->qs_pending = 0;
Paul E. McKenney91603062009-11-02 13:52:29 -0800632 }
633}
634
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100635static void note_new_gpnum(struct rcu_state *rsp, struct rcu_data *rdp)
636{
Paul E. McKenney91603062009-11-02 13:52:29 -0800637 unsigned long flags;
638 struct rcu_node *rnp;
639
640 local_irq_save(flags);
641 rnp = rdp->mynode;
642 if (rdp->gpnum == ACCESS_ONCE(rnp->gpnum) || /* outside lock. */
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800643 !raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */
Paul E. McKenney91603062009-11-02 13:52:29 -0800644 local_irq_restore(flags);
645 return;
646 }
647 __note_new_gpnum(rsp, rnp, rdp);
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800648 raw_spin_unlock_irqrestore(&rnp->lock, flags);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100649}
650
651/*
652 * Did someone else start a new RCU grace period start since we last
653 * checked? Update local state appropriately if so. Must be called
654 * on the CPU corresponding to rdp.
655 */
656static int
657check_for_new_grace_period(struct rcu_state *rsp, struct rcu_data *rdp)
658{
659 unsigned long flags;
660 int ret = 0;
661
662 local_irq_save(flags);
663 if (rdp->gpnum != rsp->gpnum) {
664 note_new_gpnum(rsp, rdp);
665 ret = 1;
666 }
667 local_irq_restore(flags);
668 return ret;
669}
670
671/*
Paul E. McKenneyd09b62d2009-11-02 13:52:28 -0800672 * Advance this CPU's callbacks, but only if the current grace period
673 * has ended. This may be called only from the CPU to whom the rdp
674 * belongs. In addition, the corresponding leaf rcu_node structure's
675 * ->lock must be held by the caller, with irqs disabled.
676 */
677static void
678__rcu_process_gp_end(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
679{
680 /* Did another grace period end? */
681 if (rdp->completed != rnp->completed) {
682
683 /* Advance callbacks. No harm if list empty. */
684 rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[RCU_WAIT_TAIL];
685 rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_READY_TAIL];
686 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
687
688 /* Remember that we saw this grace-period completion. */
689 rdp->completed = rnp->completed;
Frederic Weisbecker20377f32010-12-10 22:11:10 +0100690
691 /*
Frederic Weisbecker5ff8e6f2010-12-10 22:11:11 +0100692 * If we were in an extended quiescent state, we may have
Paul E. McKenney121dfc42010-12-10 15:02:47 -0800693 * missed some grace periods that others CPUs handled on
Frederic Weisbecker5ff8e6f2010-12-10 22:11:11 +0100694 * our behalf. Catch up with this state to avoid noting
Paul E. McKenney121dfc42010-12-10 15:02:47 -0800695 * spurious new grace periods. If another grace period
696 * has started, then rnp->gpnum will have advanced, so
697 * we will detect this later on.
Frederic Weisbecker5ff8e6f2010-12-10 22:11:11 +0100698 */
Paul E. McKenney121dfc42010-12-10 15:02:47 -0800699 if (ULONG_CMP_LT(rdp->gpnum, rdp->completed))
Frederic Weisbecker5ff8e6f2010-12-10 22:11:11 +0100700 rdp->gpnum = rdp->completed;
701
702 /*
Paul E. McKenney121dfc42010-12-10 15:02:47 -0800703 * If RCU does not need a quiescent state from this CPU,
704 * then make sure that this CPU doesn't go looking for one.
Frederic Weisbecker20377f32010-12-10 22:11:10 +0100705 */
Paul E. McKenney121dfc42010-12-10 15:02:47 -0800706 if ((rnp->qsmask & rdp->grpmask) == 0)
Frederic Weisbecker20377f32010-12-10 22:11:10 +0100707 rdp->qs_pending = 0;
Paul E. McKenneyd09b62d2009-11-02 13:52:28 -0800708 }
709}
710
711/*
712 * Advance this CPU's callbacks, but only if the current grace period
713 * has ended. This may be called only from the CPU to whom the rdp
714 * belongs.
715 */
716static void
717rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp)
718{
719 unsigned long flags;
720 struct rcu_node *rnp;
721
722 local_irq_save(flags);
723 rnp = rdp->mynode;
724 if (rdp->completed == ACCESS_ONCE(rnp->completed) || /* outside lock. */
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800725 !raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */
Paul E. McKenneyd09b62d2009-11-02 13:52:28 -0800726 local_irq_restore(flags);
727 return;
728 }
729 __rcu_process_gp_end(rsp, rnp, rdp);
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800730 raw_spin_unlock_irqrestore(&rnp->lock, flags);
Paul E. McKenneyd09b62d2009-11-02 13:52:28 -0800731}
732
733/*
734 * Do per-CPU grace-period initialization for running CPU. The caller
735 * must hold the lock of the leaf rcu_node structure corresponding to
736 * this CPU.
737 */
738static void
739rcu_start_gp_per_cpu(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
740{
741 /* Prior grace period ended, so advance callbacks for current CPU. */
742 __rcu_process_gp_end(rsp, rnp, rdp);
743
744 /*
745 * Because this CPU just now started the new grace period, we know
746 * that all of its callbacks will be covered by this upcoming grace
747 * period, even the ones that were registered arbitrarily recently.
748 * Therefore, advance all outstanding callbacks to RCU_WAIT_TAIL.
749 *
750 * Other CPUs cannot be sure exactly when the grace period started.
751 * Therefore, their recently registered callbacks must pass through
752 * an additional RCU_NEXT_READY stage, so that they will be handled
753 * by the next RCU grace period.
754 */
755 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
756 rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
Paul E. McKenney91603062009-11-02 13:52:29 -0800757
758 /* Set state so that this CPU will detect the next quiescent state. */
759 __note_new_gpnum(rsp, rnp, rdp);
Paul E. McKenneyd09b62d2009-11-02 13:52:28 -0800760}
761
762/*
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100763 * Start a new RCU grace period if warranted, re-initializing the hierarchy
764 * in preparation for detecting the next grace period. The caller must hold
765 * the root node's ->lock, which is released before return. Hard irqs must
766 * be disabled.
767 */
768static void
769rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
770 __releases(rcu_get_root(rsp)->lock)
771{
Lai Jiangshan394f99a2010-06-28 16:25:04 +0800772 struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100773 struct rcu_node *rnp = rcu_get_root(rsp);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100774
Paul E. McKenney07079d52010-01-04 15:09:02 -0800775 if (!cpu_needs_another_gp(rsp, rdp) || rsp->fqs_active) {
Paul E. McKenney46a1e342010-01-04 15:09:09 -0800776 if (cpu_needs_another_gp(rsp, rdp))
777 rsp->fqs_need_gp = 1;
Paul E. McKenneyb32e9eb2009-11-12 22:35:03 -0800778 if (rnp->completed == rsp->completed) {
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800779 raw_spin_unlock_irqrestore(&rnp->lock, flags);
Paul E. McKenneyb32e9eb2009-11-12 22:35:03 -0800780 return;
781 }
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800782 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
Paul E. McKenneyb32e9eb2009-11-12 22:35:03 -0800783
784 /*
785 * Propagate new ->completed value to rcu_node structures
786 * so that other CPUs don't have to wait until the start
787 * of the next grace period to process their callbacks.
788 */
789 rcu_for_each_node_breadth_first(rsp, rnp) {
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800790 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
Paul E. McKenneyb32e9eb2009-11-12 22:35:03 -0800791 rnp->completed = rsp->completed;
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800792 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
Paul E. McKenneyb32e9eb2009-11-12 22:35:03 -0800793 }
794 local_irq_restore(flags);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100795 return;
796 }
797
798 /* Advance to a new grace period and initialize state. */
799 rsp->gpnum++;
Paul E. McKenneyc3422be2009-09-13 09:15:10 -0700800 WARN_ON_ONCE(rsp->signaled == RCU_GP_INIT);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100801 rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */
802 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100803 record_gp_stall_check_time(rsp);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100804
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100805 /* Special-case the common single-level case. */
806 if (NUM_RCU_NODES == 1) {
Paul E. McKenneyb0e165c2009-09-13 09:15:09 -0700807 rcu_preempt_check_blocked_tasks(rnp);
Paul E. McKenney28ecd582009-09-18 09:50:17 -0700808 rnp->qsmask = rnp->qsmaskinit;
Paul E. McKenneyde078d82009-09-08 15:54:36 -0700809 rnp->gpnum = rsp->gpnum;
Paul E. McKenneyd09b62d2009-11-02 13:52:28 -0800810 rnp->completed = rsp->completed;
Paul E. McKenneyc12172c2009-01-04 20:30:06 -0800811 rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */
Paul E. McKenneyd09b62d2009-11-02 13:52:28 -0800812 rcu_start_gp_per_cpu(rsp, rnp, rdp);
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800813 raw_spin_unlock_irqrestore(&rnp->lock, flags);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100814 return;
815 }
816
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800817 raw_spin_unlock(&rnp->lock); /* leave irqs disabled. */
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100818
819
820 /* Exclude any concurrent CPU-hotplug operations. */
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800821 raw_spin_lock(&rsp->onofflock); /* irqs already disabled. */
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100822
823 /*
Paul E. McKenneyb835db12009-09-08 15:54:37 -0700824 * Set the quiescent-state-needed bits in all the rcu_node
825 * structures for all currently online CPUs in breadth-first
826 * order, starting from the root rcu_node structure. This
827 * operation relies on the layout of the hierarchy within the
828 * rsp->node[] array. Note that other CPUs will access only
829 * the leaves of the hierarchy, which still indicate that no
830 * grace period is in progress, at least until the corresponding
831 * leaf node has been initialized. In addition, we have excluded
832 * CPU-hotplug operations.
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100833 *
834 * Note that the grace period cannot complete until we finish
835 * the initialization process, as there will be at least one
836 * qsmask bit set in the root node until that time, namely the
Paul E. McKenneyb835db12009-09-08 15:54:37 -0700837 * one corresponding to this CPU, due to the fact that we have
838 * irqs disabled.
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100839 */
Paul E. McKenneya0b6c9a2009-09-28 07:46:33 -0700840 rcu_for_each_node_breadth_first(rsp, rnp) {
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800841 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
Paul E. McKenneyb0e165c2009-09-13 09:15:09 -0700842 rcu_preempt_check_blocked_tasks(rnp);
Paul E. McKenney49e29122009-09-18 09:50:19 -0700843 rnp->qsmask = rnp->qsmaskinit;
Paul E. McKenneyde078d82009-09-08 15:54:36 -0700844 rnp->gpnum = rsp->gpnum;
Paul E. McKenneyd09b62d2009-11-02 13:52:28 -0800845 rnp->completed = rsp->completed;
846 if (rnp == rdp->mynode)
847 rcu_start_gp_per_cpu(rsp, rnp, rdp);
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800848 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100849 }
850
Paul E. McKenney83f5b012009-10-28 08:14:49 -0700851 rnp = rcu_get_root(rsp);
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800852 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100853 rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800854 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
855 raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100856}
857
858/*
Paul E. McKenneyd3f6bad2009-12-02 12:10:13 -0800859 * Report a full set of quiescent states to the specified rcu_state
860 * data structure. This involves cleaning up after the prior grace
861 * period and letting rcu_start_gp() start up the next grace period
862 * if one is needed. Note that the caller must hold rnp->lock, as
863 * required by rcu_start_gp(), which will release it.
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700864 */
Paul E. McKenneyd3f6bad2009-12-02 12:10:13 -0800865static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
Paul E. McKenneyfc2219d2009-09-23 09:50:41 -0700866 __releases(rcu_get_root(rsp)->lock)
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700867{
Paul E. McKenneyfc2219d2009-09-23 09:50:41 -0700868 WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700869 rsp->completed = rsp->gpnum;
Paul E. McKenney83f5b012009-10-28 08:14:49 -0700870 rsp->signaled = RCU_GP_IDLE;
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700871 rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */
872}
873
874/*
Paul E. McKenneyd3f6bad2009-12-02 12:10:13 -0800875 * Similar to rcu_report_qs_rdp(), for which it is a helper function.
876 * Allows quiescent states for a group of CPUs to be reported at one go
877 * to the specified rcu_node structure, though all the CPUs in the group
878 * must be represented by the same rcu_node structure (which need not be
879 * a leaf rcu_node structure, though it often will be). That structure's
880 * lock must be held upon entry, and it is released before return.
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100881 */
882static void
Paul E. McKenneyd3f6bad2009-12-02 12:10:13 -0800883rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
884 struct rcu_node *rnp, unsigned long flags)
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100885 __releases(rnp->lock)
886{
Paul E. McKenney28ecd582009-09-18 09:50:17 -0700887 struct rcu_node *rnp_c;
888
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100889 /* Walk up the rcu_node hierarchy. */
890 for (;;) {
891 if (!(rnp->qsmask & mask)) {
892
893 /* Our bit has already been cleared, so done. */
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800894 raw_spin_unlock_irqrestore(&rnp->lock, flags);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100895 return;
896 }
897 rnp->qsmask &= ~mask;
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700898 if (rnp->qsmask != 0 || rcu_preempted_readers(rnp)) {
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100899
900 /* Other bits still set at this level, so done. */
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800901 raw_spin_unlock_irqrestore(&rnp->lock, flags);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100902 return;
903 }
904 mask = rnp->grpmask;
905 if (rnp->parent == NULL) {
906
907 /* No more levels. Exit loop holding root lock. */
908
909 break;
910 }
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800911 raw_spin_unlock_irqrestore(&rnp->lock, flags);
Paul E. McKenney28ecd582009-09-18 09:50:17 -0700912 rnp_c = rnp;
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100913 rnp = rnp->parent;
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800914 raw_spin_lock_irqsave(&rnp->lock, flags);
Paul E. McKenney28ecd582009-09-18 09:50:17 -0700915 WARN_ON_ONCE(rnp_c->qsmask);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100916 }
917
918 /*
919 * Get here if we are the last CPU to pass through a quiescent
Paul E. McKenneyd3f6bad2009-12-02 12:10:13 -0800920 * state for this grace period. Invoke rcu_report_qs_rsp()
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700921 * to clean up and start the next grace period if one is needed.
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100922 */
Paul E. McKenneyd3f6bad2009-12-02 12:10:13 -0800923 rcu_report_qs_rsp(rsp, flags); /* releases rnp->lock. */
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100924}
925
926/*
Paul E. McKenneyd3f6bad2009-12-02 12:10:13 -0800927 * Record a quiescent state for the specified CPU to that CPU's rcu_data
928 * structure. This must be either called from the specified CPU, or
929 * called when the specified CPU is known to be offline (and when it is
930 * also known that no other CPU is concurrently trying to help the offline
931 * CPU). The lastcomp argument is used to make sure we are still in the
932 * grace period of interest. We don't want to end the current grace period
933 * based on quiescent states detected in an earlier grace period!
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100934 */
935static void
Paul E. McKenneyd3f6bad2009-12-02 12:10:13 -0800936rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp)
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100937{
938 unsigned long flags;
939 unsigned long mask;
940 struct rcu_node *rnp;
941
942 rnp = rdp->mynode;
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800943 raw_spin_lock_irqsave(&rnp->lock, flags);
Paul E. McKenney560d4bc2009-11-13 19:51:38 -0800944 if (lastcomp != rnp->completed) {
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100945
946 /*
947 * Someone beat us to it for this grace period, so leave.
948 * The race with GP start is resolved by the fact that we
949 * hold the leaf rcu_node lock, so that the per-CPU bits
950 * cannot yet be initialized -- so we would simply find our
Paul E. McKenneyd3f6bad2009-12-02 12:10:13 -0800951 * CPU's bit already cleared in rcu_report_qs_rnp() if this
952 * race occurred.
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100953 */
954 rdp->passed_quiesc = 0; /* try again later! */
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800955 raw_spin_unlock_irqrestore(&rnp->lock, flags);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100956 return;
957 }
958 mask = rdp->grpmask;
959 if ((rnp->qsmask & mask) == 0) {
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800960 raw_spin_unlock_irqrestore(&rnp->lock, flags);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100961 } else {
962 rdp->qs_pending = 0;
963
964 /*
965 * This GP can't end until cpu checks in, so all of our
966 * callbacks can be processed during the next GP.
967 */
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100968 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
969
Paul E. McKenneyd3f6bad2009-12-02 12:10:13 -0800970 rcu_report_qs_rnp(mask, rsp, rnp, flags); /* rlses rnp->lock */
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100971 }
972}
973
974/*
975 * Check to see if there is a new grace period of which this CPU
976 * is not yet aware, and if so, set up local rcu_data state for it.
977 * Otherwise, see if this CPU has just passed through its first
978 * quiescent state for this grace period, and record that fact if so.
979 */
980static void
981rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
982{
983 /* If there is now a new grace period, record and return. */
984 if (check_for_new_grace_period(rsp, rdp))
985 return;
986
987 /*
988 * Does this CPU still need to do its part for current grace period?
989 * If no, return and let the other CPUs do their part as well.
990 */
991 if (!rdp->qs_pending)
992 return;
993
994 /*
995 * Was there a quiescent state since the beginning of the grace
996 * period? If no, then exit and wait for the next call.
997 */
998 if (!rdp->passed_quiesc)
999 return;
1000
Paul E. McKenneyd3f6bad2009-12-02 12:10:13 -08001001 /*
1002 * Tell RCU we are done (but rcu_report_qs_rdp() will be the
1003 * judge of that).
1004 */
1005 rcu_report_qs_rdp(rdp->cpu, rsp, rdp, rdp->passed_quiesc_completed);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001006}
1007
1008#ifdef CONFIG_HOTPLUG_CPU
1009
1010/*
Lai Jiangshan29494be2010-10-20 14:13:06 +08001011 * Move a dying CPU's RCU callbacks to online CPU's callback list.
1012 * Synchronization is not required because this function executes
1013 * in stop_machine() context.
Paul E. McKenneye74f4c42009-10-06 21:48:17 -07001014 */
Lai Jiangshan29494be2010-10-20 14:13:06 +08001015static void rcu_send_cbs_to_online(struct rcu_state *rsp)
Paul E. McKenneye74f4c42009-10-06 21:48:17 -07001016{
1017 int i;
Lai Jiangshan29494be2010-10-20 14:13:06 +08001018 /* current DYING CPU is cleared in the cpu_online_mask */
1019 int receive_cpu = cpumask_any(cpu_online_mask);
Lai Jiangshan394f99a2010-06-28 16:25:04 +08001020 struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
Lai Jiangshan29494be2010-10-20 14:13:06 +08001021 struct rcu_data *receive_rdp = per_cpu_ptr(rsp->rda, receive_cpu);
Paul E. McKenneye74f4c42009-10-06 21:48:17 -07001022
1023 if (rdp->nxtlist == NULL)
1024 return; /* irqs disabled, so comparison is stable. */
Lai Jiangshan29494be2010-10-20 14:13:06 +08001025
1026 *receive_rdp->nxttail[RCU_NEXT_TAIL] = rdp->nxtlist;
1027 receive_rdp->nxttail[RCU_NEXT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
1028 receive_rdp->qlen += rdp->qlen;
1029 receive_rdp->n_cbs_adopted += rdp->qlen;
1030 rdp->n_cbs_orphaned += rdp->qlen;
1031
Paul E. McKenneye74f4c42009-10-06 21:48:17 -07001032 rdp->nxtlist = NULL;
1033 for (i = 0; i < RCU_NEXT_SIZE; i++)
1034 rdp->nxttail[i] = &rdp->nxtlist;
Paul E. McKenneye74f4c42009-10-06 21:48:17 -07001035 rdp->qlen = 0;
Paul E. McKenneye74f4c42009-10-06 21:48:17 -07001036}
1037
1038/*
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001039 * Remove the outgoing CPU from the bitmasks in the rcu_node hierarchy
1040 * and move all callbacks from the outgoing CPU to the current one.
1041 */
1042static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
1043{
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001044 unsigned long flags;
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001045 unsigned long mask;
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -08001046 int need_report = 0;
Lai Jiangshan394f99a2010-06-28 16:25:04 +08001047 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001048 struct rcu_node *rnp;
1049
1050 /* Exclude any attempts to start a new grace period. */
Paul E. McKenney1304afb2010-02-22 17:05:02 -08001051 raw_spin_lock_irqsave(&rsp->onofflock, flags);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001052
1053 /* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */
Paul E. McKenney28ecd582009-09-18 09:50:17 -07001054 rnp = rdp->mynode; /* this is the outgoing CPU's rnp. */
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001055 mask = rdp->grpmask; /* rnp->grplo is constant. */
1056 do {
Paul E. McKenney1304afb2010-02-22 17:05:02 -08001057 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001058 rnp->qsmaskinit &= ~mask;
1059 if (rnp->qsmaskinit != 0) {
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -08001060 if (rnp != rdp->mynode)
Paul E. McKenney1304afb2010-02-22 17:05:02 -08001061 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001062 break;
1063 }
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -08001064 if (rnp == rdp->mynode)
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -08001065 need_report = rcu_preempt_offline_tasks(rsp, rnp, rdp);
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -08001066 else
Paul E. McKenney1304afb2010-02-22 17:05:02 -08001067 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001068 mask = rnp->grpmask;
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001069 rnp = rnp->parent;
1070 } while (rnp != NULL);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001071
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -08001072 /*
1073 * We still hold the leaf rcu_node structure lock here, and
1074 * irqs are still disabled. The reason for this subterfuge is
Paul E. McKenneyd3f6bad2009-12-02 12:10:13 -08001075 * because invoking rcu_report_unblock_qs_rnp() with ->onofflock
1076 * held leads to deadlock.
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -08001077 */
Paul E. McKenney1304afb2010-02-22 17:05:02 -08001078 raw_spin_unlock(&rsp->onofflock); /* irqs remain disabled. */
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -08001079 rnp = rdp->mynode;
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -08001080 if (need_report & RCU_OFL_TASKS_NORM_GP)
Paul E. McKenneyd3f6bad2009-12-02 12:10:13 -08001081 rcu_report_unblock_qs_rnp(rnp, flags);
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -08001082 else
Paul E. McKenney1304afb2010-02-22 17:05:02 -08001083 raw_spin_unlock_irqrestore(&rnp->lock, flags);
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -08001084 if (need_report & RCU_OFL_TASKS_EXP_GP)
1085 rcu_report_exp_rnp(rsp, rnp);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001086}
1087
1088/*
1089 * Remove the specified CPU from the RCU hierarchy and move any pending
1090 * callbacks that it might have to the current CPU. This code assumes
1091 * that at least one CPU in the system will remain running at all times.
1092 * Any attempt to offline -all- CPUs is likely to strand RCU callbacks.
1093 */
1094static void rcu_offline_cpu(int cpu)
1095{
Paul E. McKenneyd6714c22009-08-22 13:56:46 -07001096 __rcu_offline_cpu(cpu, &rcu_sched_state);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001097 __rcu_offline_cpu(cpu, &rcu_bh_state);
Paul E. McKenney33f76142009-08-24 09:42:01 -07001098 rcu_preempt_offline_cpu(cpu);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001099}
1100
1101#else /* #ifdef CONFIG_HOTPLUG_CPU */
1102
Lai Jiangshan29494be2010-10-20 14:13:06 +08001103static void rcu_send_cbs_to_online(struct rcu_state *rsp)
Paul E. McKenneye74f4c42009-10-06 21:48:17 -07001104{
1105}
1106
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001107static void rcu_offline_cpu(int cpu)
1108{
1109}
1110
1111#endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
1112
1113/*
1114 * Invoke any RCU callbacks that have made it to the end of their grace
1115 * period. Thottle as specified by rdp->blimit.
1116 */
Paul E. McKenney37c72e52009-10-14 10:15:55 -07001117static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001118{
1119 unsigned long flags;
1120 struct rcu_head *next, *list, **tail;
1121 int count;
1122
1123 /* If no callbacks are ready, just return.*/
1124 if (!cpu_has_callbacks_ready_to_invoke(rdp))
1125 return;
1126
1127 /*
1128 * Extract the list of ready callbacks, disabling to prevent
1129 * races with call_rcu() from interrupt handlers.
1130 */
1131 local_irq_save(flags);
1132 list = rdp->nxtlist;
1133 rdp->nxtlist = *rdp->nxttail[RCU_DONE_TAIL];
1134 *rdp->nxttail[RCU_DONE_TAIL] = NULL;
1135 tail = rdp->nxttail[RCU_DONE_TAIL];
1136 for (count = RCU_NEXT_SIZE - 1; count >= 0; count--)
1137 if (rdp->nxttail[count] == rdp->nxttail[RCU_DONE_TAIL])
1138 rdp->nxttail[count] = &rdp->nxtlist;
1139 local_irq_restore(flags);
1140
1141 /* Invoke callbacks. */
1142 count = 0;
1143 while (list) {
1144 next = list->next;
1145 prefetch(next);
Mathieu Desnoyers551d55a2010-04-17 08:48:42 -04001146 debug_rcu_head_unqueue(list);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001147 list->func(list);
1148 list = next;
1149 if (++count >= rdp->blimit)
1150 break;
1151 }
1152
1153 local_irq_save(flags);
1154
1155 /* Update count, and requeue any remaining callbacks. */
1156 rdp->qlen -= count;
Paul E. McKenney269dcc12010-09-07 14:23:09 -07001157 rdp->n_cbs_invoked += count;
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001158 if (list != NULL) {
1159 *tail = rdp->nxtlist;
1160 rdp->nxtlist = list;
1161 for (count = 0; count < RCU_NEXT_SIZE; count++)
1162 if (&rdp->nxtlist == rdp->nxttail[count])
1163 rdp->nxttail[count] = tail;
1164 else
1165 break;
1166 }
1167
1168 /* Reinstate batch limit if we have worked down the excess. */
1169 if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark)
1170 rdp->blimit = blimit;
1171
Paul E. McKenney37c72e52009-10-14 10:15:55 -07001172 /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
1173 if (rdp->qlen == 0 && rdp->qlen_last_fqs_check != 0) {
1174 rdp->qlen_last_fqs_check = 0;
1175 rdp->n_force_qs_snap = rsp->n_force_qs;
1176 } else if (rdp->qlen < rdp->qlen_last_fqs_check - qhimark)
1177 rdp->qlen_last_fqs_check = rdp->qlen;
1178
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001179 local_irq_restore(flags);
1180
1181 /* Re-raise the RCU softirq if there are callbacks remaining. */
1182 if (cpu_has_callbacks_ready_to_invoke(rdp))
1183 raise_softirq(RCU_SOFTIRQ);
1184}
1185
1186/*
1187 * Check to see if this CPU is in a non-context-switch quiescent state
1188 * (user mode or idle loop for rcu, non-softirq execution for rcu_bh).
1189 * Also schedule the RCU softirq handler.
1190 *
1191 * This function must be called with hardirqs disabled. It is normally
1192 * invoked from the scheduling-clock interrupt. If rcu_pending returns
1193 * false, there is no point in invoking rcu_check_callbacks().
1194 */
1195void rcu_check_callbacks(int cpu, int user)
1196{
1197 if (user ||
Paul E. McKenneya6826042009-02-25 18:03:42 -08001198 (idle_cpu(cpu) && rcu_scheduler_active &&
1199 !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001200
1201 /*
1202 * Get here if this CPU took its interrupt from user
1203 * mode or from the idle loop, and if this is not a
1204 * nested interrupt. In this case, the CPU is in
Paul E. McKenneyd6714c22009-08-22 13:56:46 -07001205 * a quiescent state, so note it.
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001206 *
1207 * No memory barrier is required here because both
Paul E. McKenneyd6714c22009-08-22 13:56:46 -07001208 * rcu_sched_qs() and rcu_bh_qs() reference only CPU-local
1209 * variables that other CPUs neither access nor modify,
1210 * at least not while the corresponding CPU is online.
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001211 */
1212
Paul E. McKenneyd6714c22009-08-22 13:56:46 -07001213 rcu_sched_qs(cpu);
1214 rcu_bh_qs(cpu);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001215
1216 } else if (!in_softirq()) {
1217
1218 /*
1219 * Get here if this CPU did not take its interrupt from
1220 * softirq, in other words, if it is not interrupting
1221 * a rcu_bh read-side critical section. This is an _bh
Paul E. McKenneyd6714c22009-08-22 13:56:46 -07001222 * critical section, so note it.
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001223 */
1224
Paul E. McKenneyd6714c22009-08-22 13:56:46 -07001225 rcu_bh_qs(cpu);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001226 }
Paul E. McKenneyf41d9112009-08-22 13:56:52 -07001227 rcu_preempt_check_callbacks(cpu);
Paul E. McKenneyd21670a2010-04-14 17:39:26 -07001228 if (rcu_pending(cpu))
1229 raise_softirq(RCU_SOFTIRQ);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001230}
1231
1232#ifdef CONFIG_SMP
1233
1234/*
1235 * Scan the leaf rcu_node structures, processing dyntick state for any that
1236 * have not yet encountered a quiescent state, using the function specified.
Paul E. McKenneyee47eb92010-01-04 15:09:07 -08001237 * The caller must have suppressed start of new grace periods.
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001238 */
Paul E. McKenney45f014c2010-01-04 15:09:08 -08001239static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *))
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001240{
1241 unsigned long bit;
1242 int cpu;
1243 unsigned long flags;
1244 unsigned long mask;
Paul E. McKenneya0b6c9a2009-09-28 07:46:33 -07001245 struct rcu_node *rnp;
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001246
Paul E. McKenneya0b6c9a2009-09-28 07:46:33 -07001247 rcu_for_each_leaf_node(rsp, rnp) {
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001248 mask = 0;
Paul E. McKenney1304afb2010-02-22 17:05:02 -08001249 raw_spin_lock_irqsave(&rnp->lock, flags);
Paul E. McKenneyee47eb92010-01-04 15:09:07 -08001250 if (!rcu_gp_in_progress(rsp)) {
Paul E. McKenney1304afb2010-02-22 17:05:02 -08001251 raw_spin_unlock_irqrestore(&rnp->lock, flags);
Paul E. McKenney0f10dc822010-01-04 15:09:06 -08001252 return;
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001253 }
Paul E. McKenneya0b6c9a2009-09-28 07:46:33 -07001254 if (rnp->qsmask == 0) {
Paul E. McKenney1304afb2010-02-22 17:05:02 -08001255 raw_spin_unlock_irqrestore(&rnp->lock, flags);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001256 continue;
1257 }
Paul E. McKenneya0b6c9a2009-09-28 07:46:33 -07001258 cpu = rnp->grplo;
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001259 bit = 1;
Paul E. McKenneya0b6c9a2009-09-28 07:46:33 -07001260 for (; cpu <= rnp->grphi; cpu++, bit <<= 1) {
Lai Jiangshan394f99a2010-06-28 16:25:04 +08001261 if ((rnp->qsmask & bit) != 0 &&
1262 f(per_cpu_ptr(rsp->rda, cpu)))
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001263 mask |= bit;
1264 }
Paul E. McKenney45f014c2010-01-04 15:09:08 -08001265 if (mask != 0) {
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001266
Paul E. McKenneyd3f6bad2009-12-02 12:10:13 -08001267 /* rcu_report_qs_rnp() releases rnp->lock. */
1268 rcu_report_qs_rnp(mask, rsp, rnp, flags);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001269 continue;
1270 }
Paul E. McKenney1304afb2010-02-22 17:05:02 -08001271 raw_spin_unlock_irqrestore(&rnp->lock, flags);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001272 }
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001273}
1274
1275/*
1276 * Force quiescent states on reluctant CPUs, and also detect which
1277 * CPUs are in dyntick-idle mode.
1278 */
1279static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1280{
1281 unsigned long flags;
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001282 struct rcu_node *rnp = rcu_get_root(rsp);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001283
Paul E. McKenneyfc2219d2009-09-23 09:50:41 -07001284 if (!rcu_gp_in_progress(rsp))
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001285 return; /* No grace period in progress, nothing to force. */
Paul E. McKenney1304afb2010-02-22 17:05:02 -08001286 if (!raw_spin_trylock_irqsave(&rsp->fqslock, flags)) {
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001287 rsp->n_force_qs_lh++; /* Inexact, can lose counts. Tough! */
1288 return; /* Someone else is already on the job. */
1289 }
Paul E. McKenney20133cf2010-02-22 17:05:01 -08001290 if (relaxed && ULONG_CMP_GE(rsp->jiffies_force_qs, jiffies))
Paul E. McKenneyf96e9232010-01-04 15:09:00 -08001291 goto unlock_fqs_ret; /* no emergency and done recently. */
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001292 rsp->n_force_qs++;
Paul E. McKenney1304afb2010-02-22 17:05:02 -08001293 raw_spin_lock(&rnp->lock); /* irqs already disabled */
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001294 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
Paul E. McKenney560d4bc2009-11-13 19:51:38 -08001295 if(!rcu_gp_in_progress(rsp)) {
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001296 rsp->n_force_qs_ngp++;
Paul E. McKenney1304afb2010-02-22 17:05:02 -08001297 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
Paul E. McKenneyf96e9232010-01-04 15:09:00 -08001298 goto unlock_fqs_ret; /* no GP in progress, time updated. */
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001299 }
Paul E. McKenney07079d52010-01-04 15:09:02 -08001300 rsp->fqs_active = 1;
Paul E. McKenneyf3a8b5c2010-01-04 15:09:03 -08001301 switch (rsp->signaled) {
Paul E. McKenney83f5b012009-10-28 08:14:49 -07001302 case RCU_GP_IDLE:
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001303 case RCU_GP_INIT:
1304
Paul E. McKenney83f5b012009-10-28 08:14:49 -07001305 break; /* grace period idle or initializing, ignore. */
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001306
1307 case RCU_SAVE_DYNTICK:
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001308 if (RCU_SIGNAL_INIT != RCU_SAVE_DYNTICK)
1309 break; /* So gcc recognizes the dead code. */
1310
Lai Jiangshanf2614142010-03-28 11:15:20 +08001311 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
1312
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001313 /* Record dyntick-idle state. */
Paul E. McKenney45f014c2010-01-04 15:09:08 -08001314 force_qs_rnp(rsp, dyntick_save_progress_counter);
Paul E. McKenney1304afb2010-02-22 17:05:02 -08001315 raw_spin_lock(&rnp->lock); /* irqs already disabled */
Paul E. McKenneyee47eb92010-01-04 15:09:07 -08001316 if (rcu_gp_in_progress(rsp))
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001317 rsp->signaled = RCU_FORCE_QS;
Paul E. McKenneyee47eb92010-01-04 15:09:07 -08001318 break;
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001319
1320 case RCU_FORCE_QS:
1321
1322 /* Check dyntick-idle state, send IPI to laggarts. */
Paul E. McKenney1304afb2010-02-22 17:05:02 -08001323 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
Paul E. McKenney45f014c2010-01-04 15:09:08 -08001324 force_qs_rnp(rsp, rcu_implicit_dynticks_qs);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001325
1326 /* Leave state in case more forcing is required. */
1327
Paul E. McKenney1304afb2010-02-22 17:05:02 -08001328 raw_spin_lock(&rnp->lock); /* irqs already disabled */
Paul E. McKenneyf96e9232010-01-04 15:09:00 -08001329 break;
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001330 }
Paul E. McKenney07079d52010-01-04 15:09:02 -08001331 rsp->fqs_active = 0;
Paul E. McKenney46a1e342010-01-04 15:09:09 -08001332 if (rsp->fqs_need_gp) {
Paul E. McKenney1304afb2010-02-22 17:05:02 -08001333 raw_spin_unlock(&rsp->fqslock); /* irqs remain disabled */
Paul E. McKenney46a1e342010-01-04 15:09:09 -08001334 rsp->fqs_need_gp = 0;
1335 rcu_start_gp(rsp, flags); /* releases rnp->lock */
1336 return;
1337 }
Paul E. McKenney1304afb2010-02-22 17:05:02 -08001338 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
Paul E. McKenneyf96e9232010-01-04 15:09:00 -08001339unlock_fqs_ret:
Paul E. McKenney1304afb2010-02-22 17:05:02 -08001340 raw_spin_unlock_irqrestore(&rsp->fqslock, flags);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001341}
1342
1343#else /* #ifdef CONFIG_SMP */
1344
1345static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1346{
1347 set_need_resched();
1348}
1349
1350#endif /* #else #ifdef CONFIG_SMP */
1351
1352/*
1353 * This does the RCU processing work from softirq context for the
1354 * specified rcu_state and rcu_data structures. This may be called
1355 * only from the CPU to whom the rdp belongs.
1356 */
1357static void
1358__rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
1359{
1360 unsigned long flags;
1361
Paul E. McKenney2e597552009-08-15 09:53:48 -07001362 WARN_ON_ONCE(rdp->beenonline == 0);
1363
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001364 /*
1365 * If an RCU GP has gone long enough, go check for dyntick
1366 * idle CPUs and, if needed, send resched IPIs.
1367 */
Paul E. McKenney20133cf2010-02-22 17:05:01 -08001368 if (ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies))
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001369 force_quiescent_state(rsp, 1);
1370
1371 /*
1372 * Advance callbacks in response to end of earlier grace
1373 * period that some other CPU ended.
1374 */
1375 rcu_process_gp_end(rsp, rdp);
1376
1377 /* Update RCU state based on any recent quiescent states. */
1378 rcu_check_quiescent_state(rsp, rdp);
1379
1380 /* Does this CPU require a not-yet-started grace period? */
1381 if (cpu_needs_another_gp(rsp, rdp)) {
Paul E. McKenney1304afb2010-02-22 17:05:02 -08001382 raw_spin_lock_irqsave(&rcu_get_root(rsp)->lock, flags);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001383 rcu_start_gp(rsp, flags); /* releases above lock */
1384 }
1385
1386 /* If there are callbacks ready, invoke them. */
Paul E. McKenney37c72e52009-10-14 10:15:55 -07001387 rcu_do_batch(rsp, rdp);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001388}
1389
1390/*
1391 * Do softirq processing for the current CPU.
1392 */
1393static void rcu_process_callbacks(struct softirq_action *unused)
1394{
1395 /*
1396 * Memory references from any prior RCU read-side critical sections
1397 * executed by the interrupted code must be seen before any RCU
1398 * grace-period manipulations below.
1399 */
1400 smp_mb(); /* See above block comment. */
1401
Paul E. McKenneyd6714c22009-08-22 13:56:46 -07001402 __rcu_process_callbacks(&rcu_sched_state,
1403 &__get_cpu_var(rcu_sched_data));
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001404 __rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
Paul E. McKenneyf41d9112009-08-22 13:56:52 -07001405 rcu_preempt_process_callbacks();
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001406
1407 /*
1408 * Memory references from any later RCU read-side critical sections
1409 * executed by the interrupted code must be seen after any RCU
1410 * grace-period manipulations above.
1411 */
1412 smp_mb(); /* See above block comment. */
Paul E. McKenneya47cd882010-02-26 16:38:56 -08001413
1414 /* If we are last CPU on way to dyntick-idle mode, accelerate it. */
1415 rcu_needs_cpu_flush();
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001416}
Anson Huangceb8fa12011-09-27 18:20:19 +08001417static atomic_t rcu_barrier_cpu_count;
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001418
1419static void
1420__call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
1421 struct rcu_state *rsp)
1422{
1423 unsigned long flags;
1424 struct rcu_data *rdp;
1425
Mathieu Desnoyers551d55a2010-04-17 08:48:42 -04001426 debug_rcu_head_queue(head);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001427 head->func = func;
1428 head->next = NULL;
1429
1430 smp_mb(); /* Ensure RCU update seen before callback registry. */
1431
1432 /*
1433 * Opportunistically note grace-period endings and beginnings.
1434 * Note that we might see a beginning right after we see an
1435 * end, but never vice versa, since this CPU has to pass through
1436 * a quiescent state betweentimes.
1437 */
1438 local_irq_save(flags);
Lai Jiangshan394f99a2010-06-28 16:25:04 +08001439 rdp = this_cpu_ptr(rsp->rda);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001440
1441 /* Add the callback to our list. */
1442 *rdp->nxttail[RCU_NEXT_TAIL] = head;
1443 rdp->nxttail[RCU_NEXT_TAIL] = &head->next;
1444
Anson Huangceb8fa12011-09-27 18:20:19 +08001445 /* Work around for reboot issue, check rcu_barrier_cpu_count
1446 to see whether it is in the _rcu_barrier process, do
1447 tick_nohz_restart_sched_tick if yes. If we enqueue an rcu
1448 callback, we need the CPU tick to stay alive until we take care
1449 of those by completing the appropriate grace period. */
1450 if (atomic_read(&rcu_barrier_cpu_count) != 0)
1451 tick_nohz_restart_sched_tick();
1452
Paul E. McKenney37c72e52009-10-14 10:15:55 -07001453 /*
1454 * Force the grace period if too many callbacks or too long waiting.
1455 * Enforce hysteresis, and don't invoke force_quiescent_state()
1456 * if some other CPU has recently done so. Also, don't bother
1457 * invoking force_quiescent_state() if the newly enqueued callback
1458 * is the only one waiting for a grace period to complete.
1459 */
1460 if (unlikely(++rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) {
Paul E. McKenneyb52573d2010-12-14 17:36:02 -08001461
1462 /* Are we ignoring a completed grace period? */
1463 rcu_process_gp_end(rsp, rdp);
1464 check_for_new_grace_period(rsp, rdp);
1465
1466 /* Start a new grace period if one not already started. */
1467 if (!rcu_gp_in_progress(rsp)) {
1468 unsigned long nestflag;
1469 struct rcu_node *rnp_root = rcu_get_root(rsp);
1470
1471 raw_spin_lock_irqsave(&rnp_root->lock, nestflag);
1472 rcu_start_gp(rsp, nestflag); /* rlses rnp_root->lock */
1473 } else {
1474 /* Give the grace period a kick. */
1475 rdp->blimit = LONG_MAX;
1476 if (rsp->n_force_qs == rdp->n_force_qs_snap &&
1477 *rdp->nxttail[RCU_DONE_TAIL] != head)
1478 force_quiescent_state(rsp, 0);
1479 rdp->n_force_qs_snap = rsp->n_force_qs;
1480 rdp->qlen_last_fqs_check = rdp->qlen;
1481 }
Paul E. McKenney20133cf2010-02-22 17:05:01 -08001482 } else if (ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies))
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001483 force_quiescent_state(rsp, 1);
1484 local_irq_restore(flags);
1485}
1486
1487/*
Paul E. McKenneyd6714c22009-08-22 13:56:46 -07001488 * Queue an RCU-sched callback for invocation after a grace period.
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001489 */
Paul E. McKenneyd6714c22009-08-22 13:56:46 -07001490void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001491{
Paul E. McKenneyd6714c22009-08-22 13:56:46 -07001492 __call_rcu(head, func, &rcu_sched_state);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001493}
Paul E. McKenneyd6714c22009-08-22 13:56:46 -07001494EXPORT_SYMBOL_GPL(call_rcu_sched);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001495
1496/*
1497 * Queue an RCU for invocation after a quicker grace period.
1498 */
1499void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
1500{
1501 __call_rcu(head, func, &rcu_bh_state);
1502}
1503EXPORT_SYMBOL_GPL(call_rcu_bh);
1504
Paul E. McKenney6ebb2372009-11-22 08:53:50 -08001505/**
1506 * synchronize_sched - wait until an rcu-sched grace period has elapsed.
1507 *
1508 * Control will return to the caller some time after a full rcu-sched
1509 * grace period has elapsed, in other words after all currently executing
1510 * rcu-sched read-side critical sections have completed. These read-side
1511 * critical sections are delimited by rcu_read_lock_sched() and
1512 * rcu_read_unlock_sched(), and may be nested. Note that preempt_disable(),
1513 * local_irq_disable(), and so on may be used in place of
1514 * rcu_read_lock_sched().
1515 *
1516 * This means that all preempt_disable code sequences, including NMI and
1517 * hardware-interrupt handlers, in progress on entry will have completed
1518 * before this primitive returns. However, this does not guarantee that
1519 * softirq handlers will have completed, since in some kernels, these
1520 * handlers can run in process context, and can block.
1521 *
1522 * This primitive provides the guarantees made by the (now removed)
1523 * synchronize_kernel() API. In contrast, synchronize_rcu() only
1524 * guarantees that rcu_read_lock() sections will have completed.
1525 * In "classic RCU", these two guarantees happen to be one and
1526 * the same, but can differ in realtime RCU implementations.
1527 */
1528void synchronize_sched(void)
1529{
1530 struct rcu_synchronize rcu;
1531
1532 if (rcu_blocking_is_gp())
1533 return;
1534
Paul E. McKenney72d5a9f2010-05-10 17:12:17 -07001535 init_rcu_head_on_stack(&rcu.head);
Paul E. McKenney6ebb2372009-11-22 08:53:50 -08001536 init_completion(&rcu.completion);
1537 /* Will wake me after RCU finished. */
1538 call_rcu_sched(&rcu.head, wakeme_after_rcu);
1539 /* Wait for it. */
1540 wait_for_completion(&rcu.completion);
Paul E. McKenney72d5a9f2010-05-10 17:12:17 -07001541 destroy_rcu_head_on_stack(&rcu.head);
Paul E. McKenney6ebb2372009-11-22 08:53:50 -08001542}
1543EXPORT_SYMBOL_GPL(synchronize_sched);
1544
1545/**
1546 * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
1547 *
1548 * Control will return to the caller some time after a full rcu_bh grace
1549 * period has elapsed, in other words after all currently executing rcu_bh
1550 * read-side critical sections have completed. RCU read-side critical
1551 * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(),
1552 * and may be nested.
1553 */
1554void synchronize_rcu_bh(void)
1555{
1556 struct rcu_synchronize rcu;
1557
1558 if (rcu_blocking_is_gp())
1559 return;
1560
Paul E. McKenney72d5a9f2010-05-10 17:12:17 -07001561 init_rcu_head_on_stack(&rcu.head);
Paul E. McKenney6ebb2372009-11-22 08:53:50 -08001562 init_completion(&rcu.completion);
1563 /* Will wake me after RCU finished. */
1564 call_rcu_bh(&rcu.head, wakeme_after_rcu);
1565 /* Wait for it. */
1566 wait_for_completion(&rcu.completion);
Paul E. McKenney72d5a9f2010-05-10 17:12:17 -07001567 destroy_rcu_head_on_stack(&rcu.head);
Paul E. McKenney6ebb2372009-11-22 08:53:50 -08001568}
1569EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
1570
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001571/*
1572 * Check to see if there is any immediate RCU-related work to be done
1573 * by the current CPU, for the specified type of RCU, returning 1 if so.
1574 * The checks are in order of increasing expense: checks that can be
1575 * carried out against CPU-local state are performed first. However,
1576 * we must check for CPU stalls first, else we might not get a chance.
1577 */
1578static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
1579{
Paul E. McKenney2f51f982009-11-13 19:51:39 -08001580 struct rcu_node *rnp = rdp->mynode;
1581
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001582 rdp->n_rcu_pending++;
1583
1584 /* Check for CPU stalls, if enabled. */
1585 check_cpu_stall(rsp, rdp);
1586
1587 /* Is the RCU core waiting for a quiescent state from this CPU? */
Paul E. McKenneyd21670a2010-04-14 17:39:26 -07001588 if (rdp->qs_pending && !rdp->passed_quiesc) {
Paul E. McKenneyd25eb942010-03-18 21:36:51 -07001589
1590 /*
1591 * If force_quiescent_state() coming soon and this CPU
1592 * needs a quiescent state, and this is either RCU-sched
1593 * or RCU-bh, force a local reschedule.
1594 */
Paul E. McKenneyd21670a2010-04-14 17:39:26 -07001595 rdp->n_rp_qs_pending++;
Paul E. McKenneyd25eb942010-03-18 21:36:51 -07001596 if (!rdp->preemptable &&
1597 ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs) - 1,
1598 jiffies))
1599 set_need_resched();
Paul E. McKenneyd21670a2010-04-14 17:39:26 -07001600 } else if (rdp->qs_pending && rdp->passed_quiesc) {
1601 rdp->n_rp_report_qs++;
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001602 return 1;
Paul E. McKenney7ba5c842009-04-13 21:31:17 -07001603 }
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001604
1605 /* Does this CPU have callbacks ready to invoke? */
Paul E. McKenney7ba5c842009-04-13 21:31:17 -07001606 if (cpu_has_callbacks_ready_to_invoke(rdp)) {
1607 rdp->n_rp_cb_ready++;
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001608 return 1;
Paul E. McKenney7ba5c842009-04-13 21:31:17 -07001609 }
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001610
1611 /* Has RCU gone idle with this CPU needing another grace period? */
Paul E. McKenney7ba5c842009-04-13 21:31:17 -07001612 if (cpu_needs_another_gp(rsp, rdp)) {
1613 rdp->n_rp_cpu_needs_gp++;
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001614 return 1;
Paul E. McKenney7ba5c842009-04-13 21:31:17 -07001615 }
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001616
1617 /* Has another RCU grace period completed? */
Paul E. McKenney2f51f982009-11-13 19:51:39 -08001618 if (ACCESS_ONCE(rnp->completed) != rdp->completed) { /* outside lock */
Paul E. McKenney7ba5c842009-04-13 21:31:17 -07001619 rdp->n_rp_gp_completed++;
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001620 return 1;
Paul E. McKenney7ba5c842009-04-13 21:31:17 -07001621 }
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001622
1623 /* Has a new RCU grace period started? */
Paul E. McKenney2f51f982009-11-13 19:51:39 -08001624 if (ACCESS_ONCE(rnp->gpnum) != rdp->gpnum) { /* outside lock */
Paul E. McKenney7ba5c842009-04-13 21:31:17 -07001625 rdp->n_rp_gp_started++;
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001626 return 1;
Paul E. McKenney7ba5c842009-04-13 21:31:17 -07001627 }
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001628
1629 /* Has an RCU GP gone long enough to send resched IPIs &c? */
Paul E. McKenneyfc2219d2009-09-23 09:50:41 -07001630 if (rcu_gp_in_progress(rsp) &&
Paul E. McKenney20133cf2010-02-22 17:05:01 -08001631 ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies)) {
Paul E. McKenney7ba5c842009-04-13 21:31:17 -07001632 rdp->n_rp_need_fqs++;
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001633 return 1;
Paul E. McKenney7ba5c842009-04-13 21:31:17 -07001634 }
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001635
1636 /* nothing to do */
Paul E. McKenney7ba5c842009-04-13 21:31:17 -07001637 rdp->n_rp_need_nothing++;
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001638 return 0;
1639}
1640
1641/*
1642 * Check to see if there is any immediate RCU-related work to be done
1643 * by the current CPU, returning 1 if so. This function is part of the
1644 * RCU implementation; it is -not- an exported member of the RCU API.
1645 */
Paul E. McKenneya1572292009-08-22 13:56:51 -07001646static int rcu_pending(int cpu)
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001647{
Paul E. McKenneyd6714c22009-08-22 13:56:46 -07001648 return __rcu_pending(&rcu_sched_state, &per_cpu(rcu_sched_data, cpu)) ||
Paul E. McKenneyf41d9112009-08-22 13:56:52 -07001649 __rcu_pending(&rcu_bh_state, &per_cpu(rcu_bh_data, cpu)) ||
1650 rcu_preempt_pending(cpu);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001651}
1652
1653/*
1654 * Check to see if any future RCU-related work will need to be done
1655 * by the current CPU, even if none need be done immediately, returning
Paul E. McKenney8bd93a22010-02-22 17:04:59 -08001656 * 1 if so.
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001657 */
Paul E. McKenney8bd93a22010-02-22 17:04:59 -08001658static int rcu_needs_cpu_quick_check(int cpu)
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001659{
1660 /* RCU callbacks either ready or pending? */
Paul E. McKenneyd6714c22009-08-22 13:56:46 -07001661 return per_cpu(rcu_sched_data, cpu).nxtlist ||
Paul E. McKenneyf41d9112009-08-22 13:56:52 -07001662 per_cpu(rcu_bh_data, cpu).nxtlist ||
1663 rcu_preempt_needs_cpu(cpu);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001664}
1665
Paul E. McKenneyd0ec7742009-10-06 21:48:16 -07001666static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL};
Paul E. McKenneyd0ec7742009-10-06 21:48:16 -07001667static DEFINE_MUTEX(rcu_barrier_mutex);
1668static struct completion rcu_barrier_completion;
Paul E. McKenneyd0ec7742009-10-06 21:48:16 -07001669
1670static void rcu_barrier_callback(struct rcu_head *notused)
1671{
1672 if (atomic_dec_and_test(&rcu_barrier_cpu_count))
1673 complete(&rcu_barrier_completion);
1674}
1675
1676/*
1677 * Called with preemption disabled, and from cross-cpu IRQ context.
1678 */
1679static void rcu_barrier_func(void *type)
1680{
1681 int cpu = smp_processor_id();
1682 struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu);
1683 void (*call_rcu_func)(struct rcu_head *head,
1684 void (*func)(struct rcu_head *head));
1685
1686 atomic_inc(&rcu_barrier_cpu_count);
1687 call_rcu_func = type;
1688 call_rcu_func(head, rcu_barrier_callback);
1689}
1690
Paul E. McKenneyd0ec7742009-10-06 21:48:16 -07001691/*
1692 * Orchestrate the specified type of RCU barrier, waiting for all
1693 * RCU callbacks of the specified type to complete.
1694 */
Paul E. McKenneye74f4c42009-10-06 21:48:17 -07001695static void _rcu_barrier(struct rcu_state *rsp,
1696 void (*call_rcu_func)(struct rcu_head *head,
Paul E. McKenneyd0ec7742009-10-06 21:48:16 -07001697 void (*func)(struct rcu_head *head)))
1698{
1699 BUG_ON(in_interrupt());
Paul E. McKenneye74f4c42009-10-06 21:48:17 -07001700 /* Take mutex to serialize concurrent rcu_barrier() requests. */
Paul E. McKenneyd0ec7742009-10-06 21:48:16 -07001701 mutex_lock(&rcu_barrier_mutex);
1702 init_completion(&rcu_barrier_completion);
1703 /*
1704 * Initialize rcu_barrier_cpu_count to 1, then invoke
1705 * rcu_barrier_func() on each CPU, so that each CPU also has
1706 * incremented rcu_barrier_cpu_count. Only then is it safe to
1707 * decrement rcu_barrier_cpu_count -- otherwise the first CPU
1708 * might complete its grace period before all of the other CPUs
1709 * did their increment, causing this function to return too
Paul E. McKenney2d999e02010-10-20 12:06:18 -07001710 * early. Note that on_each_cpu() disables irqs, which prevents
1711 * any CPUs from coming online or going offline until each online
1712 * CPU has queued its RCU-barrier callback.
Paul E. McKenneyd0ec7742009-10-06 21:48:16 -07001713 */
1714 atomic_set(&rcu_barrier_cpu_count, 1);
1715 on_each_cpu(rcu_barrier_func, (void *)call_rcu_func, 1);
1716 if (atomic_dec_and_test(&rcu_barrier_cpu_count))
1717 complete(&rcu_barrier_completion);
1718 wait_for_completion(&rcu_barrier_completion);
1719 mutex_unlock(&rcu_barrier_mutex);
Paul E. McKenneyd0ec7742009-10-06 21:48:16 -07001720}
1721
1722/**
Paul E. McKenneyd0ec7742009-10-06 21:48:16 -07001723 * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
1724 */
1725void rcu_barrier_bh(void)
1726{
Paul E. McKenneye74f4c42009-10-06 21:48:17 -07001727 _rcu_barrier(&rcu_bh_state, call_rcu_bh);
Paul E. McKenneyd0ec7742009-10-06 21:48:16 -07001728}
1729EXPORT_SYMBOL_GPL(rcu_barrier_bh);
1730
1731/**
1732 * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
1733 */
1734void rcu_barrier_sched(void)
1735{
Paul E. McKenneye74f4c42009-10-06 21:48:17 -07001736 _rcu_barrier(&rcu_sched_state, call_rcu_sched);
Paul E. McKenneyd0ec7742009-10-06 21:48:16 -07001737}
1738EXPORT_SYMBOL_GPL(rcu_barrier_sched);
1739
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001740/*
Paul E. McKenney27569622009-08-15 09:53:46 -07001741 * Do boot-time initialization of a CPU's per-CPU RCU data.
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001742 */
Paul E. McKenney27569622009-08-15 09:53:46 -07001743static void __init
1744rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001745{
1746 unsigned long flags;
1747 int i;
Lai Jiangshan394f99a2010-06-28 16:25:04 +08001748 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
Paul E. McKenney27569622009-08-15 09:53:46 -07001749 struct rcu_node *rnp = rcu_get_root(rsp);
1750
1751 /* Set up local state, ensuring consistent view of global state. */
Paul E. McKenney1304afb2010-02-22 17:05:02 -08001752 raw_spin_lock_irqsave(&rnp->lock, flags);
Paul E. McKenney27569622009-08-15 09:53:46 -07001753 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
1754 rdp->nxtlist = NULL;
1755 for (i = 0; i < RCU_NEXT_SIZE; i++)
1756 rdp->nxttail[i] = &rdp->nxtlist;
1757 rdp->qlen = 0;
1758#ifdef CONFIG_NO_HZ
1759 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
1760#endif /* #ifdef CONFIG_NO_HZ */
1761 rdp->cpu = cpu;
Paul E. McKenney1304afb2010-02-22 17:05:02 -08001762 raw_spin_unlock_irqrestore(&rnp->lock, flags);
Paul E. McKenney27569622009-08-15 09:53:46 -07001763}
1764
1765/*
1766 * Initialize a CPU's per-CPU RCU data. Note that only one online or
1767 * offline event can be happening at a given time. Note also that we
1768 * can accept some slop in the rsp->completed access due to the fact
1769 * that this CPU cannot possibly have any RCU callbacks in flight yet.
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001770 */
1771static void __cpuinit
Paul E. McKenneyf41d9112009-08-22 13:56:52 -07001772rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable)
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001773{
1774 unsigned long flags;
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001775 unsigned long mask;
Lai Jiangshan394f99a2010-06-28 16:25:04 +08001776 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001777 struct rcu_node *rnp = rcu_get_root(rsp);
1778
1779 /* Set up local state, ensuring consistent view of global state. */
Paul E. McKenney1304afb2010-02-22 17:05:02 -08001780 raw_spin_lock_irqsave(&rnp->lock, flags);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001781 rdp->passed_quiesc = 0; /* We could be racing with new GP, */
1782 rdp->qs_pending = 1; /* so set up to respond to current GP. */
1783 rdp->beenonline = 1; /* We have now been online. */
Paul E. McKenneyf41d9112009-08-22 13:56:52 -07001784 rdp->preemptable = preemptable;
Paul E. McKenney37c72e52009-10-14 10:15:55 -07001785 rdp->qlen_last_fqs_check = 0;
1786 rdp->n_force_qs_snap = rsp->n_force_qs;
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001787 rdp->blimit = blimit;
Paul E. McKenney1304afb2010-02-22 17:05:02 -08001788 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001789
1790 /*
1791 * A new grace period might start here. If so, we won't be part
1792 * of it, but that is OK, as we are currently in a quiescent state.
1793 */
1794
1795 /* Exclude any attempts to start a new GP on large systems. */
Paul E. McKenney1304afb2010-02-22 17:05:02 -08001796 raw_spin_lock(&rsp->onofflock); /* irqs already disabled. */
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001797
1798 /* Add CPU to rcu_node bitmasks. */
1799 rnp = rdp->mynode;
1800 mask = rdp->grpmask;
1801 do {
1802 /* Exclude any attempts to start a new GP on small systems. */
Paul E. McKenney1304afb2010-02-22 17:05:02 -08001803 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001804 rnp->qsmaskinit |= mask;
1805 mask = rnp->grpmask;
Paul E. McKenneyd09b62d2009-11-02 13:52:28 -08001806 if (rnp == rdp->mynode) {
1807 rdp->gpnum = rnp->completed; /* if GP in progress... */
1808 rdp->completed = rnp->completed;
1809 rdp->passed_quiesc_completed = rnp->completed - 1;
1810 }
Paul E. McKenney1304afb2010-02-22 17:05:02 -08001811 raw_spin_unlock(&rnp->lock); /* irqs already disabled. */
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001812 rnp = rnp->parent;
1813 } while (rnp != NULL && !(rnp->qsmaskinit & mask));
1814
Paul E. McKenney1304afb2010-02-22 17:05:02 -08001815 raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001816}
1817
1818static void __cpuinit rcu_online_cpu(int cpu)
1819{
Paul E. McKenneyf41d9112009-08-22 13:56:52 -07001820 rcu_init_percpu_data(cpu, &rcu_sched_state, 0);
1821 rcu_init_percpu_data(cpu, &rcu_bh_state, 0);
1822 rcu_preempt_init_percpu_data(cpu);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001823}
1824
1825/*
Paul E. McKenneyf41d9112009-08-22 13:56:52 -07001826 * Handle CPU online/offline notification events.
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001827 */
Paul E. McKenney9f680ab2009-11-22 08:53:49 -08001828static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
1829 unsigned long action, void *hcpu)
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001830{
1831 long cpu = (long)hcpu;
1832
1833 switch (action) {
1834 case CPU_UP_PREPARE:
1835 case CPU_UP_PREPARE_FROZEN:
1836 rcu_online_cpu(cpu);
1837 break;
Paul E. McKenneyd0ec7742009-10-06 21:48:16 -07001838 case CPU_DYING:
1839 case CPU_DYING_FROZEN:
1840 /*
Paul E. McKenney2d999e02010-10-20 12:06:18 -07001841 * The whole machine is "stopped" except this CPU, so we can
1842 * touch any data without introducing corruption. We send the
1843 * dying CPU's callbacks to an arbitrarily chosen online CPU.
Paul E. McKenneyd0ec7742009-10-06 21:48:16 -07001844 */
Lai Jiangshan29494be2010-10-20 14:13:06 +08001845 rcu_send_cbs_to_online(&rcu_bh_state);
1846 rcu_send_cbs_to_online(&rcu_sched_state);
1847 rcu_preempt_send_cbs_to_online();
Paul E. McKenneyd0ec7742009-10-06 21:48:16 -07001848 break;
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001849 case CPU_DEAD:
1850 case CPU_DEAD_FROZEN:
1851 case CPU_UP_CANCELED:
1852 case CPU_UP_CANCELED_FROZEN:
1853 rcu_offline_cpu(cpu);
1854 break;
1855 default:
1856 break;
1857 }
1858 return NOTIFY_OK;
1859}
1860
1861/*
Paul E. McKenneybbad9372010-04-02 16:17:17 -07001862 * This function is invoked towards the end of the scheduler's initialization
1863 * process. Before this is called, the idle task might contain
1864 * RCU read-side critical sections (during which time, this idle
1865 * task is booting the system). After this function is called, the
1866 * idle tasks are prohibited from containing RCU read-side critical
1867 * sections. This function also enables RCU lockdep checking.
1868 */
1869void rcu_scheduler_starting(void)
1870{
1871 WARN_ON(num_online_cpus() != 1);
1872 WARN_ON(nr_context_switches() > 0);
1873 rcu_scheduler_active = 1;
1874}
1875
1876/*
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001877 * Compute the per-level fanout, either using the exact fanout specified
1878 * or balancing the tree, depending on CONFIG_RCU_FANOUT_EXACT.
1879 */
1880#ifdef CONFIG_RCU_FANOUT_EXACT
1881static void __init rcu_init_levelspread(struct rcu_state *rsp)
1882{
1883 int i;
1884
Paul E. McKenney0209f642010-12-14 16:07:52 -08001885 for (i = NUM_RCU_LVLS - 1; i > 0; i--)
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001886 rsp->levelspread[i] = CONFIG_RCU_FANOUT;
Paul E. McKenney0209f642010-12-14 16:07:52 -08001887 rsp->levelspread[0] = RCU_FANOUT_LEAF;
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001888}
1889#else /* #ifdef CONFIG_RCU_FANOUT_EXACT */
1890static void __init rcu_init_levelspread(struct rcu_state *rsp)
1891{
1892 int ccur;
1893 int cprv;
1894 int i;
1895
1896 cprv = NR_CPUS;
1897 for (i = NUM_RCU_LVLS - 1; i >= 0; i--) {
1898 ccur = rsp->levelcnt[i];
1899 rsp->levelspread[i] = (cprv + ccur - 1) / ccur;
1900 cprv = ccur;
1901 }
1902}
1903#endif /* #else #ifdef CONFIG_RCU_FANOUT_EXACT */
1904
1905/*
1906 * Helper function for rcu_init() that initializes one rcu_state structure.
1907 */
Lai Jiangshan394f99a2010-06-28 16:25:04 +08001908static void __init rcu_init_one(struct rcu_state *rsp,
1909 struct rcu_data __percpu *rda)
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001910{
Paul E. McKenneyb6407e82010-01-04 16:04:02 -08001911 static char *buf[] = { "rcu_node_level_0",
1912 "rcu_node_level_1",
1913 "rcu_node_level_2",
1914 "rcu_node_level_3" }; /* Match MAX_RCU_LVLS */
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001915 int cpustride = 1;
1916 int i;
1917 int j;
1918 struct rcu_node *rnp;
1919
Paul E. McKenneyb6407e82010-01-04 16:04:02 -08001920 BUILD_BUG_ON(MAX_RCU_LVLS > ARRAY_SIZE(buf)); /* Fix buf[] init! */
1921
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001922 /* Initialize the level-tracking arrays. */
1923
1924 for (i = 1; i < NUM_RCU_LVLS; i++)
1925 rsp->level[i] = rsp->level[i - 1] + rsp->levelcnt[i - 1];
1926 rcu_init_levelspread(rsp);
1927
1928 /* Initialize the elements themselves, starting from the leaves. */
1929
1930 for (i = NUM_RCU_LVLS - 1; i >= 0; i--) {
1931 cpustride *= rsp->levelspread[i];
1932 rnp = rsp->level[i];
1933 for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) {
Paul E. McKenney1304afb2010-02-22 17:05:02 -08001934 raw_spin_lock_init(&rnp->lock);
Paul E. McKenneyb6407e82010-01-04 16:04:02 -08001935 lockdep_set_class_and_name(&rnp->lock,
1936 &rcu_node_class[i], buf[i]);
Paul E. McKenneyf41d9112009-08-22 13:56:52 -07001937 rnp->gpnum = 0;
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001938 rnp->qsmask = 0;
1939 rnp->qsmaskinit = 0;
1940 rnp->grplo = j * cpustride;
1941 rnp->grphi = (j + 1) * cpustride - 1;
1942 if (rnp->grphi >= NR_CPUS)
1943 rnp->grphi = NR_CPUS - 1;
1944 if (i == 0) {
1945 rnp->grpnum = 0;
1946 rnp->grpmask = 0;
1947 rnp->parent = NULL;
1948 } else {
1949 rnp->grpnum = j % rsp->levelspread[i - 1];
1950 rnp->grpmask = 1UL << rnp->grpnum;
1951 rnp->parent = rsp->level[i - 1] +
1952 j / rsp->levelspread[i - 1];
1953 }
1954 rnp->level = i;
Paul E. McKenneyf41d9112009-08-22 13:56:52 -07001955 INIT_LIST_HEAD(&rnp->blocked_tasks[0]);
1956 INIT_LIST_HEAD(&rnp->blocked_tasks[1]);
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -08001957 INIT_LIST_HEAD(&rnp->blocked_tasks[2]);
1958 INIT_LIST_HEAD(&rnp->blocked_tasks[3]);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001959 }
1960 }
Lai Jiangshan0c340292010-03-28 11:12:30 +08001961
Lai Jiangshan394f99a2010-06-28 16:25:04 +08001962 rsp->rda = rda;
Lai Jiangshan0c340292010-03-28 11:12:30 +08001963 rnp = rsp->level[NUM_RCU_LVLS - 1];
1964 for_each_possible_cpu(i) {
Paul E. McKenney4a90a062010-04-14 16:48:11 -07001965 while (i > rnp->grphi)
Lai Jiangshan0c340292010-03-28 11:12:30 +08001966 rnp++;
Lai Jiangshan394f99a2010-06-28 16:25:04 +08001967 per_cpu_ptr(rsp->rda, i)->mynode = rnp;
Lai Jiangshan0c340292010-03-28 11:12:30 +08001968 rcu_boot_init_percpu_data(i, rsp);
1969 }
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001970}
1971
Paul E. McKenney9f680ab2009-11-22 08:53:49 -08001972void __init rcu_init(void)
Paul E. McKenneyf41d9112009-08-22 13:56:52 -07001973{
Paul E. McKenney017c4262010-01-14 16:10:58 -08001974 int cpu;
Paul E. McKenney9f680ab2009-11-22 08:53:49 -08001975
Paul E. McKenneyf41d9112009-08-22 13:56:52 -07001976 rcu_bootup_announce();
Lai Jiangshan394f99a2010-06-28 16:25:04 +08001977 rcu_init_one(&rcu_sched_state, &rcu_sched_data);
1978 rcu_init_one(&rcu_bh_state, &rcu_bh_data);
Paul E. McKenneyf41d9112009-08-22 13:56:52 -07001979 __rcu_init_preempt();
Paul E. McKenney2e597552009-08-15 09:53:48 -07001980 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
Paul E. McKenney9f680ab2009-11-22 08:53:49 -08001981
1982 /*
1983 * We don't need protection against CPU-hotplug here because
1984 * this is called early in boot, before either interrupts
1985 * or the scheduler are operational.
1986 */
1987 cpu_notifier(rcu_cpu_notify, 0);
Paul E. McKenney017c4262010-01-14 16:10:58 -08001988 for_each_online_cpu(cpu)
1989 rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
Paul E. McKenneyc68de202010-04-15 10:12:40 -07001990 check_cpu_stall_init();
Paul E. McKenney64db4cf2008-12-18 21:55:32 +01001991}
1992
Paul E. McKenney1eba8f82009-09-23 09:50:42 -07001993#include "rcutree_plugin.h"