diff options
author | Morten Rasmussen <Morten.Rasmussen@arm.com> | 2012-09-14 14:38:17 +0100 |
---|---|---|
committer | Viresh Kumar <viresh.kumar@linaro.org> | 2013-03-12 14:54:41 +0530 |
commit | b422ddf960f3dee1632136c66817004e7e29cea2 (patch) | |
tree | 6367a6c638092bb0d0819e6f14f3ee56bb243a80 | |
parent | e08f2c8fc885bcfbd476f5df53833e71c7eedaf5 (diff) |
sched: SCHED_HMP multi-domain task migration control
We need a way to prevent tasks that are migrating up and down the
hmp_domains from migrating straight on through before the load has
adapted to the new compute capacity of the CPU on the new hmp_domain.
This patch adds a next up/down migration delay that prevents the task
from doing another migration in the same direction until the delay
has expired.
Signed-off-by: Morten Rasmussen <Morten.Rasmussen@arm.com>
-rw-r--r-- | include/linux/sched.h | 4 | ||||
-rw-r--r-- | kernel/sched/core.c | 4 | ||||
-rw-r--r-- | kernel/sched/fair.c | 38 |
3 files changed, 46 insertions, 0 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 4ade83a6a28..ea439abfdd3 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1104,6 +1104,10 @@ struct sched_avg { s64 decay_count; unsigned long load_avg_contrib; unsigned long load_avg_ratio; +#ifdef CONFIG_SCHED_HMP + u64 hmp_last_up_migration; + u64 hmp_last_down_migration; +#endif u32 usage_avg_sum; }; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 7f12624a393..346862ee1de 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1569,6 +1569,10 @@ static void __sched_fork(struct task_struct *p) #if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED) p->se.avg.runnable_avg_period = 0; p->se.avg.runnable_avg_sum = 0; +#ifdef CONFIG_SCHED_HMP + p->se.avg.hmp_last_up_migration = 0; + p->se.avg.hmp_last_down_migration = 0; +#endif #endif #ifdef CONFIG_SCHEDSTATS memset(&p->se.statistics, 0, sizeof(p->se.statistics)); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index fe661dbf729..7aef90ad9b4 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3379,12 +3379,16 @@ static int __init hmp_cpu_mask_setup(void) * tweaking suit particular needs. * * hmp_up_prio: Only up migrate task with high priority (<hmp_up_prio) + * hmp_next_up_threshold: Delay before next up migration (1024 ~= 1 ms) + * hmp_next_down_threshold: Delay before next down migration (1024 ~= 1 ms) */ unsigned int hmp_up_threshold = 512; unsigned int hmp_down_threshold = 256; #ifdef CONFIG_SCHED_HMP_PRIO_FILTER unsigned int hmp_up_prio = NICE_TO_PRIO(CONFIG_SCHED_HMP_PRIO_FILTER_VAL); #endif +unsigned int hmp_next_up_threshold = 4096; +unsigned int hmp_next_down_threshold = 4096; static unsigned int hmp_up_migration(int cpu, struct sched_entity *se); static unsigned int hmp_down_migration(int cpu, struct sched_entity *se); @@ -3447,6 +3451,21 @@ static inline unsigned int hmp_select_slower_cpu(struct task_struct *tsk, tsk_cpus_allowed(tsk)); } +static inline void hmp_next_up_delay(struct sched_entity *se, int cpu) +{ + struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs; + + se->avg.hmp_last_up_migration = cfs_rq_clock_task(cfs_rq); + se->avg.hmp_last_down_migration = 0; +} + +static inline void hmp_next_down_delay(struct sched_entity *se, int cpu) +{ + struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs; + + se->avg.hmp_last_down_migration = cfs_rq_clock_task(cfs_rq); + se->avg.hmp_last_up_migration = 0; +} #endif /* CONFIG_SCHED_HMP */ /* @@ -3550,11 +3569,13 @@ unlock: #ifdef CONFIG_SCHED_HMP if (hmp_up_migration(prev_cpu, &p->se)) { new_cpu = hmp_select_faster_cpu(p, prev_cpu); + hmp_next_up_delay(&p->se, new_cpu); trace_sched_hmp_migrate(p, new_cpu, 0); return new_cpu; } if (hmp_down_migration(prev_cpu, &p->se)) { new_cpu = hmp_select_slower_cpu(p, prev_cpu); + hmp_next_down_delay(&p->se, new_cpu); trace_sched_hmp_migrate(p, new_cpu, 0); return new_cpu; } @@ -5827,6 +5848,8 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { } static unsigned int hmp_up_migration(int cpu, struct sched_entity *se) { struct task_struct *p = task_of(se); + struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs; + u64 now; if (hmp_cpu_is_fastest(cpu)) return 0; @@ -5837,6 +5860,12 @@ static unsigned int hmp_up_migration(int cpu, struct sched_entity *se) return 0; #endif + /* Let the task load settle before doing another up migration */ + now = cfs_rq_clock_task(cfs_rq); + if (((now - se->avg.hmp_last_up_migration) >> 10) + < hmp_next_up_threshold) + return 0; + if (cpumask_intersects(&hmp_faster_domain(cpu)->cpus, tsk_cpus_allowed(p)) && se->avg.load_avg_ratio > hmp_up_threshold) { @@ -5849,6 +5878,8 @@ static unsigned int hmp_up_migration(int cpu, struct sched_entity *se) static unsigned int hmp_down_migration(int cpu, struct sched_entity *se) { struct task_struct *p = task_of(se); + struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs; + u64 now; if (hmp_cpu_is_slowest(cpu)) return 0; @@ -5859,6 +5890,12 @@ static unsigned int hmp_down_migration(int cpu, struct sched_entity *se) return 1; #endif + /* Let the task load settle before doing another down migration */ + now = cfs_rq_clock_task(cfs_rq); + if (((now - se->avg.hmp_last_down_migration) >> 10) + < hmp_next_down_threshold) + return 0; + if (cpumask_intersects(&hmp_slower_domain(cpu)->cpus, tsk_cpus_allowed(p)) && se->avg.load_avg_ratio < hmp_down_threshold) { @@ -6049,6 +6086,7 @@ static void hmp_force_up_migration(int this_cpu) target->migrate_task = p; force = 1; trace_sched_hmp_migrate(p, target->push_cpu, 1); + hmp_next_up_delay(&p->se, target->push_cpu); } } raw_spin_unlock_irqrestore(&target->lock, flags); |