aboutsummaryrefslogtreecommitdiff
path: root/kernel/sched/fair.c
diff options
context:
space:
mode:
authorSuresh Siddha <suresh.b.siddha@intel.com>2011-12-01 17:07:32 -0800
committerIngo Molnar <mingo@elte.hu>2011-12-06 09:06:30 +0100
commit1c792db7f7957e2e34b9a164f08200e36a25dfd0 (patch)
treed1ab8d9e860e12b31f8553309b79cc65ff380fa5 /kernel/sched/fair.c
parent5b680fd61388abb9059fbc8e7a2f60f602bfce15 (diff)
sched, nohz: Introduce nohz_flags in 'struct rq'
Introduce nohz_flags in the struct rq, which will track these two flags for now. NOHZ_TICK_STOPPED keeps track of the tick stopped status that gets set when the tick is stopped. It will be used to update the nohz idle load balancer data structures during the first busy tick after the tick is restarted. At this first busy tick after tickless idle, NOHZ_TICK_STOPPED flag will be reset. This will minimize the nohz idle load balancer status updates that currently happen for every tickless exit, making it more scalable when there are many logical cpu's that enter and exit idle often. NOHZ_BALANCE_KICK will track the need for nohz idle load balance on this rq. This will replace the nohz_balance_kick in the rq, which was not being updated atomically. Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/20111202010832.499438999@sbsiddha-desk.sc.intel.com Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r--kernel/sched/fair.c48
1 files changed, 27 insertions, 21 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 81ccb811afb..50c06b0e9fa 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4889,18 +4889,15 @@ static void nohz_balancer_kick(int cpu)
return;
}
- if (!cpu_rq(ilb_cpu)->nohz_balance_kick) {
- cpu_rq(ilb_cpu)->nohz_balance_kick = 1;
-
- smp_mb();
- /*
- * Use smp_send_reschedule() instead of resched_cpu().
- * This way we generate a sched IPI on the target cpu which
- * is idle. And the softirq performing nohz idle load balance
- * will be run before returning from the IPI.
- */
- smp_send_reschedule(ilb_cpu);
- }
+ if (test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)))
+ return;
+ /*
+ * Use smp_send_reschedule() instead of resched_cpu().
+ * This way we generate a sched IPI on the target cpu which
+ * is idle. And the softirq performing nohz idle load balance
+ * will be run before returning from the IPI.
+ */
+ smp_send_reschedule(ilb_cpu);
return;
}
@@ -4964,6 +4961,8 @@ void select_nohz_load_balancer(int stop_tick)
}
return;
}
+
+ set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
} else {
if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask))
return;
@@ -5079,8 +5078,9 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle)
struct rq *rq;
int balance_cpu;
- if (idle != CPU_IDLE || !this_rq->nohz_balance_kick)
- return;
+ if (idle != CPU_IDLE ||
+ !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)))
+ goto end;
for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
if (balance_cpu == this_cpu)
@@ -5091,10 +5091,8 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle)
* work being done for other cpus. Next load
* balancing owner will pick it up.
*/
- if (need_resched()) {
- this_rq->nohz_balance_kick = 0;
+ if (need_resched())
break;
- }
raw_spin_lock_irq(&this_rq->lock);
update_rq_clock(this_rq);
@@ -5108,7 +5106,8 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle)
this_rq->next_balance = rq->next_balance;
}
nohz.next_balance = this_rq->next_balance;
- this_rq->nohz_balance_kick = 0;
+end:
+ clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
}
/*
@@ -5129,10 +5128,17 @@ static inline int nohz_kick_needed(struct rq *rq, int cpu)
int ret;
int first_pick_cpu, second_pick_cpu;
- if (time_before(now, nohz.next_balance))
+ if (unlikely(idle_cpu(cpu)))
return 0;
- if (idle_cpu(cpu))
+ /*
+ * We may be recently in ticked or tickless idle mode. At the first
+ * busy tick after returning from idle, we will update the busy stats.
+ */
+ if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu))))
+ clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
+
+ if (time_before(now, nohz.next_balance))
return 0;
first_pick_cpu = atomic_read(&nohz.first_pick_cpu);
@@ -5196,7 +5202,7 @@ void trigger_load_balance(struct rq *rq, int cpu)
likely(!on_null_domain(cpu)))
raise_softirq(SCHED_SOFTIRQ);
#ifdef CONFIG_NO_HZ
- else if (nohz_kick_needed(rq, cpu) && likely(!on_null_domain(cpu)))
+ if (nohz_kick_needed(rq, cpu) && likely(!on_null_domain(cpu)))
nohz_balancer_kick(cpu);
#endif
}