aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJuri Lelli <juri.lelli@arm.com>2015-06-25 14:11:26 +0100
committerJuri Lelli <juri.lelli@arm.com>2015-10-05 12:09:35 +0100
commit1a5533735ba9e21918546224531b98120333ca49 (patch)
tree9e8c13344eceb4e3ea784989f898c75bc1cedf46
parente70396e242a20ca020ec47f4cd34798493559304 (diff)
FROMLIST: sched/fair: add triggers for OPP change requests
Each time a task is {en,de}queued we might need to adapt the current frequency to the new usage. Add triggers on {en,de}queue_task_fair() for this purpose. Only trigger a freq request if we are effectively waking up or going to sleep. Filter out load balancing related calls to reduce the number of triggers. cc: Ingo Molnar <mingo@redhat.com> cc: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Juri Lelli <juri.lelli@arm.com> (am from https://patchwork.kernel.org/patch/6738171) Signed-off-by: Juri Lelli <juri.lelli@arm.com> Change-Id: I167b610e0042c1921c043c1157476ebd0d86b08a
-rw-r--r--kernel/sched/fair.c42
1 files changed, 40 insertions, 2 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 761e0271513b..f0262ae63f90 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4050,7 +4050,10 @@ static inline void hrtick_update(struct rq *rq)
}
#endif
+static unsigned int capacity_margin = 1280; /* ~20% margin */
+
static bool cpu_overutilized(int cpu);
+static unsigned long get_cpu_usage(int cpu);
struct static_key __sched_energy_freq __read_mostly = STATIC_KEY_INIT_FALSE;
/*
@@ -4101,6 +4104,26 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
if (!task_new && !rq->rd->overutilized &&
cpu_overutilized(rq->cpu))
rq->rd->overutilized = true;
+ /*
+ * We want to trigger a freq switch request only for tasks that
+ * are waking up; this is because we get here also during
+ * load balancing, but in these cases it seems wise to trigger
+ * as single request after load balancing is done.
+ *
+ * XXX: how about fork()? Do we need a special flag/something
+ * to tell if we are here after a fork() (wakeup_task_new)?
+ *
+ * Also, we add a margin (same ~20% used for the tipping point)
+ * to our request to provide some head room if p's utilization
+ * further increases.
+ */
+ if (sched_energy_freq() && !task_new) {
+ unsigned long req_cap = get_cpu_usage(cpu_of(rq));
+
+ req_cap = req_cap * capacity_margin
+ >> SCHED_CAPACITY_SHIFT;
+ cpufreq_sched_set_cap(cpu_of(rq), req_cap);
+ }
}
hrtick_update(rq);
}
@@ -4162,6 +4185,23 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
if (!se) {
sub_nr_running(rq, 1);
update_rq_runnable_avg(rq, 1);
+ /*
+ * We want to trigger a freq switch request only for tasks that
+ * are going to sleep; this is because we get here also during
+ * load balancing, but in these cases it seems wise to trigger
+ * as single request after load balancing is done.
+ *
+ * Also, we add a margin (same ~20% used for the tipping point)
+ * to our request to provide some head room if p's utilization
+ * further increases.
+ */
+ if (sched_energy_freq() && task_sleep) {
+ unsigned long req_cap = get_cpu_usage(cpu_of(rq));
+
+ req_cap = req_cap * capacity_margin
+ >> SCHED_CAPACITY_SHIFT;
+ cpufreq_sched_set_cap(cpu_of(rq), req_cap);
+ }
}
hrtick_update(rq);
}
@@ -4545,8 +4585,6 @@ static int find_new_capacity(struct energy_env *eenv,
return idx;
}
-static unsigned int capacity_margin = 1280; /* ~20% margin */
-
static bool cpu_overutilized(int cpu)
{
return (capacity_of(cpu) * 1024) <