aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPatrick Bellasi <patrick.bellasi@arm.com>2015-06-22 19:05:34 +0100
committerJuri Lelli <juri.lelli@arm.com>2015-10-05 12:11:29 +0100
commita105bf083cc51fdb0d304d2c030f5921c7515bfd (patch)
tree3dfb2ac4d4d3d8f1a8febe3586a2e267ed5e1ac9
parent8083529768a58662ed4859cd98b3cac2d99c0b1e (diff)
WIP: sched/fair: add boosted task utilization
The task utilization signal, which is derived from PELT signals and properly scaled to be architecture and frequency invariat, is used by EAS as an estimation of the task requirements in terms of CPU bandwidth. This signal affects both the CPU selection as well as, when SchedDVFS (the scheduler controlled CPUFreq governor) in use, the selection of the current operating points (OPP) for the CPU. A convenient way to bias these decisions, which is also little intrusive, is to boost the task utilization signal each time must be used to support them. This patch introduces the new function: boosted_task_utilization(task) which returns a boosted value for the utilization of the specified task. The margin added to the original utilization is: 1. computed by the boosting strategy introduced by a previous patch 2. proportional to the system-wide boost value defined by the sysctl interface, this also introduced by a previous patch The boosted signal is used by EAS a. transparently, via its integration into the task_fits() function b. explicitly, in the energy-aware wakeup path Change-Id: I032041cfd4406bb050bb2190c0332935ece5592a Signed-off-by: Patrick Bellasi <patrick.bellasi@arm.com>
-rw-r--r--kernel/sched/fair.c49
1 files changed, 47 insertions, 2 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 05c54ba4d012..a3c2f5c5610f 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4874,13 +4874,58 @@ schedtune_margin(unsigned long signal, unsigned long boost)
}
+static unsigned long
+schedtune_task_margin(struct task_struct *task)
+{
+ unsigned int boost;
+ unsigned long utilization;
+ unsigned long margin;
+
+ boost = get_sysctl_sched_cfs_boost();
+ if (boost == 0)
+ return 0;
+
+ utilization = task_utilization(task);
+ margin = schedtune_margin(utilization, boost);
+
+ return margin;
+}
+
+static unsigned long
+boosted_task_utilization(struct task_struct *task)
+{
+ unsigned long utilization;
+ unsigned long margin = 0;
+
+ utilization = task_utilization(task);
+
+ /*
+ * Boosting of task utilization is enabled only when the scheduler is
+ * working in energy-aware mode.
+ */
+ if (!task_rq(task)->rd->overutilized)
+ margin = schedtune_task_margin(task);
+
+ utilization += margin;
+
+ return utilization;
+}
+
+#else /* CONFIG_SCHED_TUNE */
+
+static unsigned long
+boosted_task_utilization(struct task_struct *task)
+{
+ return task_utilization(task);
+}
+
#endif /* CONFIG_SCHED_TUNE */
static inline bool __task_fits(struct task_struct *p, int cpu, int usage)
{
unsigned long capacity = capacity_of(cpu);
- usage += task_utilization(p);
+ usage += boosted_task_utilization(p);
return (capacity * 1024) > (usage * capacity_margin);
}
@@ -5139,7 +5184,7 @@ static int energy_aware_wake_cpu(struct task_struct *p, int target)
* so prev_cpu will receive a negative bias due the double
* accouting. However, the blocked utilization may be zero.
*/
- int new_usage = get_cpu_usage(i) + task_utilization(p);
+ int new_usage = get_cpu_usage(i) + boosted_task_utilization(p);
if (new_usage > capacity_orig_of(i))
continue;