From 2cf897f4c832b7de2717bb74e1711b275dce49df Mon Sep 17 00:00:00 2001 From: Alex Shi Date: Wed, 17 Feb 2016 16:35:15 +0800 Subject: sched: use the idle cpu in interrupting Use the interrupting idle cpu instead of the shallowest one could save cpu power. After send a task to the idle cpu, the cpu isn't idle, so after the irq, it won't enter into cpu idle again, instead the cpu exit from cpu idle and try to wakeup task in the rq. But need to measure the latency change, since the shallowest idle cpu's wakeup time could compete with interrupt job. --- kernel/sched/fair.c | 15 +++++++++++++++ kernel/time/tick-sched.c | 2 +- 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 56b7d4b83947..2d4d059bd7a0 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -34,6 +34,7 @@ #include #include "sched.h" +#include "../time/tick-sched.h" /* * Targeted preemption latency for CPU-bound tasks: @@ -4922,6 +4923,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, /* * find_idlest_cpu - find the idlest cpu among the cpus in group. */ +DECLARE_PER_CPU(struct tick_sched, tick_cpu_sched); static int find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) { @@ -4930,6 +4932,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) u64 latest_idle_timestamp = 0; int least_loaded_cpu = this_cpu; int shallowest_idle_cpu = -1; + int interrupted_idle = -1; int i; /* Traverse only the allowed CPUs */ @@ -4937,6 +4940,15 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) if (idle_cpu(i)) { struct rq *rq = cpu_rq(i); struct cpuidle_state *idle = idle_get_state(rq); + +#ifdef CONFIG_NO_HZ_COMMON + struct tick_sched *ts = &per_cpu(tick_cpu_sched, i); + if (ts->inidle && !ts->idle_active) { + /* idle cpu doing irq */ + interrupted_idle = i; + break; + } +#endif if (idle && idle->exit_latency < min_exit_latency) { /* * We give priority to a CPU whose idle state @@ -4965,6 +4977,9 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) } } + if (interrupted_idle != -1) + return interrupted_idle; + return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu; } diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 0b17424349eb..d127467293e6 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -34,7 +34,7 @@ /* * Per cpu nohz control structure */ -static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched); +DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched); struct tick_sched *tick_get_tick_sched(int cpu) { -- cgit v1.2.3