From 6cd288fa2f4ca4887705fbf48718f12d09a4ac5d Mon Sep 17 00:00:00 2001 From: Vincent Guittot Date: Tue, 23 Jul 2013 11:15:06 +0200 Subject: sched: differantiate idle cpu The cost for waking up of a core varies according to its current idle state. This includes C-state and intermediate state when some sync between cores is required to reach a deep C-state. Waking up a CPU in a deep C-state for running a short task is not efficient from both a power and a performance point of view. We should take into account the wake up latency of an idle CPU when the scheduler looks for the best CPU to use for a waking task. The wake up latency of a CPU is computed into a load that can be directly compared with task load and other CPUs load. Signed-off-by: Vincent Guittot --- kernel/sched/fair.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index ad8b99ae24b7..4863dad4f856 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -394,6 +394,20 @@ static int check_nohz_packing(int cpu) return false; } + +int sched_get_idle_load(int cpu) +{ + struct sched_pm *stat = &per_cpu(sched_stat, cpu); + int latency = atomic_read(&(stat->wake_latency)); + /* + * Transform the current wakeup latency (us) into an idle load that + * will be compared to task load to decide if it's worth to wake up + * the cpu. The current formula is quite simple but give good + * approximation in the range [0:10ms] + */ + return (latency * 21) >> 10; +} + #else /* CONFIG_SCHED_PACKING_TASKS */ static inline bool is_packing_cpu(int cpu) @@ -416,6 +430,10 @@ static inline int check_nohz_packing(int cpu) return false; } +static inline int sched_get_idle_load(int cpu) +{ + return 0; +} #endif /* CONFIG_SCHED_PACKING_TASKS */ #endif /* CONFIG_SMP */ @@ -3207,6 +3225,8 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) /* Used instead of source_load when we know the type == 0 */ static unsigned long weighted_cpuload(const int cpu) { + if (idle_cpu(cpu)) + return sched_get_idle_load(cpu); return cpu_rq(cpu)->cfs.runnable_load_avg; } @@ -3655,6 +3675,8 @@ static int select_idle_sibling(struct task_struct *p, int target) if (i == target || !idle_cpu(i) || !is_packing_cpu(i)) goto next; + if (weighted_cpuload(i) > p->se.avg.load_avg_contrib) + goto next; } target = cpumask_first_and(sched_group_cpus(sg), -- cgit v1.2.3