aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChris Redpath <chris.redpath@arm.com>2013-06-17 16:20:37 +0100
committerJon Medhurst <tixy@linaro.org>2013-07-17 11:32:30 +0100
commit6eada0087366d8aec6bc38348a68f721f538cc5c (patch)
treeb3dfc779d09d3f81f87a72fb4a14e73739824a42
parent954978dd2cff81cc15745b9e581a1709e238f8ef (diff)
sched: Restrict nohz balance kicks to stay in the HMP domain
There is little point in doing a nohz balance kick on a CPU from a different HMP domain, since the unset SD_LOAD_BALANCE flag on the CPU domain level prevents tasks from being balanced across clusters except through the per-task load driven hmp_migrate/hmp_offload paths. Further, the nohz balance kick is actively harmful to power usage if all the tasks fit into the little domain since it causes the big domain to wake up and do a lot of calculation to determine that there is nothing to do. A more generic solution is to walk the sched domain tree and determine the intersection of potential idle balance cpus with visibility of tasks on the current CPU, however HMP domains are more easily accessible. Signed-off-by: Chris Redpath <chris.redpath@arm.com>
-rw-r--r--kernel/sched/fair.c18
1 files changed, 17 insertions, 1 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 1b784eea661..c849d68a9b7 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6028,7 +6028,11 @@ static struct {
static inline int find_new_ilb(int call_cpu)
{
int ilb = cpumask_first(nohz.idle_cpus_mask);
-
+#ifdef CONFIG_SCHED_HMP
+ /* restrict nohz balancing to occur in the same hmp domain */
+ ilb = cpumask_first_and(nohz.idle_cpus_mask,
+ &((struct hmp_domain *)hmp_cpu_domain(call_cpu))->cpus);
+#endif
if (ilb < nr_cpu_ids && idle_cpu(ilb))
return ilb;
@@ -6307,6 +6311,18 @@ static inline int nohz_kick_needed(struct rq *rq, int cpu)
if (time_before(now, nohz.next_balance))
return 0;
+#ifdef CONFIG_SCHED_HMP
+ /*
+ * Bail out if there are no nohz CPUs in our
+ * HMP domain, since we will move tasks between
+ * domains through wakeup and force balancing
+ * as necessary based upon task load.
+ */
+ if (cpumask_first_and(nohz.idle_cpus_mask,
+ &((struct hmp_domain *)hmp_cpu_domain(cpu))->cpus) >= nr_cpu_ids)
+ return 0;
+#endif
+
if (rq->nr_running >= 2)
goto need_kick;