aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChris Redpath <chris.redpath@arm.com>2017-02-28 17:27:28 +0000
committerAmit Pundir <amit.pundir@linaro.org>2017-06-21 16:37:44 +0530
commita815601bfb125eccf50295930f4ba86cb32451f3 (patch)
treef1ff85c2367c9ece340557893ef74868e23a670e
parentf25df5eba6e86f2214112873ff5d2874fb681ea0 (diff)
sched/fair: remove task util from own cpu when placing waking task
When we place a waking task with find_best_target, we calculate the existing and new utilisation of each candidate cpu. However, we do not remove any blocked load resulting from the waking task on the previous cpu which might cause unnecessary migrations. Switch to using cpu_util_wake which does this for us, which requires moving cpu_util_wake a few functions earlier. Also, we have multiple potential cpu utilization signals here, so update the necessary bits to allow WALT to work properly (including not subtracting task util for WALT). When WALT is in use, cpu utilization is the utilization in the previous completed window, whilst the task utilization ignores fully idle windows. There seems to be no way to have a decently accurate estimate of how much (if any) utilization from this task remains on the prev cpu. Instead, just return cpu_util when we're using WALT. Change-Id: I448203ab98ffb5c020dfb6b218581eef1f5601f7 Reported-by: Patrick Bellasi <patrick.bellasi@arm.com> Signed-off-by: Chris Redpath <chris.redpath@arm.com>
-rw-r--r--kernel/sched/fair.c48
1 files changed, 29 insertions, 19 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index f972df2115d7..27a6f5d6c86c 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6147,6 +6147,34 @@ done:
return target;
}
+/*
+ * cpu_util_wake: Compute cpu utilization with any contributions from
+ * the waking task p removed.
+ */
+static int cpu_util_wake(int cpu, struct task_struct *p)
+{
+ unsigned long util, capacity;
+
+#ifdef CONFIG_SCHED_WALT
+ /*
+ * WALT does not decay idle tasks in the same manner
+ * as PELT, so it makes little sense to subtract task
+ * utilization from cpu utilization. Instead just use
+ * cpu_util for this case.
+ */
+ if (!walt_disabled && sysctl_sched_use_walt_cpu_util)
+ return cpu_util(cpu);
+#endif
+ /* Task has no contribution or is new */
+ if (cpu != task_cpu(p) || !p->se.avg.last_update_time)
+ return cpu_util(cpu);
+
+ capacity = capacity_orig_of(cpu);
+ util = max_t(long, cpu_util(cpu) - task_util(p), 0);
+
+ return (util >= capacity) ? capacity : util;
+}
+
static int start_cpu(bool boosted)
{
struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
@@ -6203,7 +6231,7 @@ static inline int find_best_target(struct task_struct *p, bool boosted, bool pre
* so prev_cpu will receive a negative bias due to the double
* accounting. However, the blocked utilization may be zero.
*/
- new_util = cpu_util(i) + task_util(p);
+ new_util = cpu_util_wake(i, p) + task_util(p);
/*
* Ensure minimum capacity to grant the required boost.
@@ -6273,24 +6301,6 @@ static inline int find_best_target(struct task_struct *p, bool boosted, bool pre
}
/*
- * cpu_util_wake: Compute cpu utilization with any contributions from
- * the waking task p removed.
- */
-static int cpu_util_wake(int cpu, struct task_struct *p)
-{
- unsigned long util, capacity;
-
- /* Task has no contribution or is new */
- if (cpu != task_cpu(p) || !p->se.avg.last_update_time)
- return cpu_util(cpu);
-
- capacity = capacity_orig_of(cpu);
- util = max_t(long, cpu_rq(cpu)->cfs.avg.util_avg - task_util(p), 0);
-
- return (util >= capacity) ? capacity : util;
-}
-
-/*
* Disable WAKE_AFFINE in the case where task @p doesn't fit in the
* capacity of either the waking CPU @cpu or the previous CPU @prev_cpu.
*