path: root/kernel/sched/core.c
diff options
authorFrederic Weisbecker <fweisbec@gmail.com>2013-05-03 03:39:05 +0200
committerFrederic Weisbecker <fweisbec@gmail.com>2013-05-04 08:32:02 +0200
commit265f22a975c1e4cc3a4d1f94a3ec53ffbb6f5b9f (patch)
treec5b7ec6b64fc31e879e730d2edf8e836cfaf7e9b /kernel/sched/core.c
parent73c30828771acafb0a5e3a1c4cf75e6c5dc5f98a (diff)
sched: Keep at least 1 tick per second for active dynticks tasks
The scheduler doesn't yet fully support environments with a single task running without a periodic tick. In order to ensure we still maintain the duties of scheduler_tick(), keep at least 1 tick per second. This makes sure that we keep the progression of various scheduler accounting and background maintainance even with a very low granularity. Examples include cpu load, sched average, CFS entity vruntime, avenrun and events such as load balancing, amongst other details handled in sched_class::task_tick(). This limitation will be removed in the future once we get these individual items to work in full dynticks CPUs. Suggested-by: Ingo Molnar <mingo@kernel.org> Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Christoph Lameter <cl@linux.com> Cc: Hakan Akkan <hakanakkan@gmail.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Kevin Hilman <khilman@linaro.org> Cc: Li Zhong <zhong@linux.vnet.ibm.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Paul Gortmaker <paul.gortmaker@windriver.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/sched/core.c')
1 files changed, 30 insertions, 0 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index e94842d4400..3bdf986a091 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2736,8 +2736,35 @@ void scheduler_tick(void)
rq->idle_balance = idle_cpu(cpu);
trigger_load_balance(rq, cpu);
+ rq_last_tick_reset(rq);
+ * scheduler_tick_max_deferment
+ *
+ * Keep at least one tick per second when a single
+ * active task is running because the scheduler doesn't
+ * yet completely support full dynticks environment.
+ *
+ * This makes sure that uptime, CFS vruntime, load
+ * balancing, etc... continue to move forward, even
+ * with a very low granularity.
+ */
+u64 scheduler_tick_max_deferment(void)
+ struct rq *rq = this_rq();
+ unsigned long next, now = ACCESS_ONCE(jiffies);
+ next = rq->last_sched_tick + HZ;
+ if (time_before_eq(next, now))
+ return 0;
+ return jiffies_to_usecs(next - now) * NSEC_PER_USEC;
notrace unsigned long get_parent_ip(unsigned long addr)
if (in_lock_functions(addr)) {
@@ -6993,6 +7020,9 @@ void __init sched_init(void)
rq->nohz_flags = 0;
+ rq->last_sched_tick = 0;
atomic_set(&rq->nr_iowait, 0);