aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChris Redpath <chris.redpath@arm.com>2013-08-19 15:06:17 +0100
committerJon Medhurst <tixy@linaro.org>2013-08-19 15:41:36 +0100
commit5fad81c7739db3a1fc6380dcc3d7902666ed5ee8 (patch)
tree42cb39cda257ee7ac497cd501c366bd00ce1ea99
parentaaba2453951b3743d003be228102e1da63c75326 (diff)
sched: track per-rq 'last migration time'
Track when migrations were performed to runqueues. Use this to decide between runqueues as migration targets when run queues in an hmp domain have equal load. Intention is to spread migration load amongst CPUs more fairly. When all CPUs in an hmp domain are fully loaded, the existing code always selects the last CPU as a migration target - this is unfair and little better than doing no selection. Signed-off-by: Chris Redpath <chris.redpath@arm.com> Signed-off-by: Liviu Dudau <liviu.dudau@arm.com> Signed-off-by: Jon Medhurst <tixy@linaro.org>
-rw-r--r--kernel/sched/fair.c40
1 files changed, 33 insertions, 7 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index b801eb0330e..62302a37279 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3764,17 +3764,21 @@ static inline unsigned int hmp_select_slower_cpu(struct task_struct *tsk,
static inline void hmp_next_up_delay(struct sched_entity *se, int cpu)
{
struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
-
- se->avg.hmp_last_up_migration = cfs_rq_clock_task(cfs_rq);
+ u64 now = cfs_rq_clock_task(cfs_rq);
+ se->avg.hmp_last_up_migration = now;
se->avg.hmp_last_down_migration = 0;
+ cpu_rq(cpu)->avg.hmp_last_up_migration = now;
+ cpu_rq(cpu)->avg.hmp_last_down_migration = 0;
}
static inline void hmp_next_down_delay(struct sched_entity *se, int cpu)
{
struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
-
- se->avg.hmp_last_down_migration = cfs_rq_clock_task(cfs_rq);
+ u64 now = cfs_rq_clock_task(cfs_rq);
+ se->avg.hmp_last_down_migration = now;
se->avg.hmp_last_up_migration = 0;
+ cpu_rq(cpu)->avg.hmp_last_down_migration = now;
+ cpu_rq(cpu)->avg.hmp_last_up_migration = 0;
}
#ifdef CONFIG_HMP_VARIABLE_SCALE
@@ -3946,15 +3950,37 @@ static inline unsigned int hmp_domain_min_load(struct hmp_domain *hmpd,
{
int cpu;
int min_cpu_runnable_temp = NR_CPUS;
+ u64 min_target_last_migration = ULLONG_MAX;
+ u64 curr_last_migration;
unsigned long min_runnable_load = INT_MAX;
- unsigned long contrib;
+ unsigned long scaled_min_runnable_load = INT_MAX;
+ unsigned long contrib, scaled_contrib;
+ struct sched_avg *avg;
for_each_cpu_mask(cpu, hmpd->cpus) {
+ avg = &cpu_rq(cpu)->avg;
+ /* used for both up and down migration */
+ curr_last_migration = avg->hmp_last_up_migration ?
+ avg->hmp_last_up_migration : avg->hmp_last_down_migration;
+
/* don't use the divisor in the loop, just at the end */
- contrib = cpu_rq(cpu)->avg.runnable_avg_sum * scale_load_down(1024);
- if (contrib < min_runnable_load) {
+ contrib = avg->runnable_avg_sum * scale_load_down(1024);
+ scaled_contrib = contrib >> 22;
+
+ if ((contrib < min_runnable_load) ||
+ (scaled_contrib == scaled_min_runnable_load &&
+ curr_last_migration < min_target_last_migration)) {
+ /*
+ * if the load is the same target the CPU with
+ * the longest time since a migration.
+ * This is to spread migration load between
+ * members of a domain more evenly when the
+ * domain is fully loaded
+ */
min_runnable_load = contrib;
+ scaled_min_runnable_load = scaled_contrib;
min_cpu_runnable_temp = cpu;
+ min_target_last_migration = curr_last_migration;
}
}