aboutsummaryrefslogtreecommitdiff
path: root/kernel/rcu
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2011-10-05 11:59:38 -0700
committerAnders Roxell <anders.roxell@linaro.org>2015-07-26 11:08:21 +0200
commit0e8f25a5c4642d14898e1309c083fac1b5df130b (patch)
treee839109cd81e434d1768e30e132bc07d4ee22463 /kernel/rcu
parente01a1d9d8cd454c4e7c9ddc1ca639146e3f5383b (diff)
rcu: Merge RCU-bh into RCU-preempt
The Linux kernel has long RCU-bh read-side critical sections that intolerably increase scheduling latency under mainline's RCU-bh rules, which include RCU-bh read-side critical sections being non-preemptible. This patch therefore arranges for RCU-bh to be implemented in terms of RCU-preempt for CONFIG_PREEMPT_RT_FULL=y. This has the downside of defeating the purpose of RCU-bh, namely, handling the case where the system is subjected to a network-based denial-of-service attack that keeps at least one CPU doing full-time softirq processing. This issue will be fixed by a later commit. The current commit will need some work to make it appropriate for mainline use, for example, it needs to be extended to cover Tiny RCU. [ paulmck: Added a useful changelog ] Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Link: http://lkml.kernel.org/r/20111005185938.GA20403@linux.vnet.ibm.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/rcu')
-rw-r--r--kernel/rcu/tree.c16
-rw-r--r--kernel/rcu/update.c2
2 files changed, 18 insertions, 0 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 8cf7304b2867..65aecd8681c2 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -220,6 +220,7 @@ void rcu_sched_qs(void)
}
}
+#ifndef CONFIG_PREEMPT_RT_FULL
void rcu_bh_qs(void)
{
if (!__this_cpu_read(rcu_bh_data.passed_quiesce)) {
@@ -229,6 +230,7 @@ void rcu_bh_qs(void)
__this_cpu_write(rcu_bh_data.passed_quiesce, 1);
}
}
+#endif
static DEFINE_PER_CPU(int, rcu_sched_qs_mask);
@@ -404,6 +406,7 @@ unsigned long rcu_batches_completed_sched(void)
}
EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
+#ifndef CONFIG_PREEMPT_RT_FULL
/*
* Return the number of RCU BH batches completed thus far for debug & stats.
*/
@@ -431,6 +434,13 @@ void rcu_bh_force_quiescent_state(void)
}
EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
+#else
+void rcu_force_quiescent_state(void)
+{
+}
+EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
+#endif
+
/*
* Force a quiescent state for RCU-sched.
*/
@@ -3040,6 +3050,7 @@ void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
}
EXPORT_SYMBOL_GPL(call_rcu_sched);
+#ifndef CONFIG_PREEMPT_RT_FULL
/*
* Queue an RCU callback for invocation after a quicker grace period.
*/
@@ -3048,6 +3059,7 @@ void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
__call_rcu(head, func, &rcu_bh_state, -1, 0);
}
EXPORT_SYMBOL_GPL(call_rcu_bh);
+#endif
/*
* Queue an RCU callback for lazy invocation after a grace period.
@@ -3139,6 +3151,7 @@ void synchronize_sched(void)
}
EXPORT_SYMBOL_GPL(synchronize_sched);
+#ifndef CONFIG_PREEMPT_RT_FULL
/**
* synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
*
@@ -3165,6 +3178,7 @@ void synchronize_rcu_bh(void)
wait_rcu_gp(call_rcu_bh);
}
EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
+#endif
/**
* get_state_synchronize_rcu - Snapshot current RCU state
@@ -3677,6 +3691,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
mutex_unlock(&rsp->barrier_mutex);
}
+#ifndef CONFIG_PREEMPT_RT_FULL
/**
* rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
*/
@@ -3685,6 +3700,7 @@ void rcu_barrier_bh(void)
_rcu_barrier(&rcu_bh_state);
}
EXPORT_SYMBOL_GPL(rcu_barrier_bh);
+#endif
/**
* rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index 1f133350da01..1718c4fe9bce 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -227,6 +227,7 @@ int rcu_read_lock_held(void)
}
EXPORT_SYMBOL_GPL(rcu_read_lock_held);
+#ifndef CONFIG_PREEMPT_RT_FULL
/**
* rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
*
@@ -253,6 +254,7 @@ int rcu_read_lock_bh_held(void)
return in_softirq() || irqs_disabled();
}
EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
+#endif
#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */