aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlex Shi <alex.shi@linaro.org>2016-02-17 16:35:15 +0800
committerAlex Shi <alex.shi@linaro.org>2016-03-21 11:09:29 +0800
commit2cf897f4c832b7de2717bb74e1711b275dce49df (patch)
treebe80d87679ff53825c12532605a41f1470d1e3c8
parentb562e44f507e863c6792946e4e1b1449fbbac85d (diff)
sched: use the idle cpu in interruptingidle-steal-from-int
Use the interrupting idle cpu instead of the shallowest one could save cpu power. After send a task to the idle cpu, the cpu isn't idle, so after the irq, it won't enter into cpu idle again, instead the cpu exit from cpu idle and try to wakeup task in the rq. But need to measure the latency change, since the shallowest idle cpu's wakeup time could compete with interrupt job.
-rw-r--r--kernel/sched/fair.c15
-rw-r--r--kernel/time/tick-sched.c2
2 files changed, 16 insertions, 1 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 56b7d4b83947..2d4d059bd7a0 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -34,6 +34,7 @@
#include <trace/events/sched.h>
#include "sched.h"
+#include "../time/tick-sched.h"
/*
* Targeted preemption latency for CPU-bound tasks:
@@ -4922,6 +4923,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
/*
* find_idlest_cpu - find the idlest cpu among the cpus in group.
*/
+DECLARE_PER_CPU(struct tick_sched, tick_cpu_sched);
static int
find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
{
@@ -4930,6 +4932,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
u64 latest_idle_timestamp = 0;
int least_loaded_cpu = this_cpu;
int shallowest_idle_cpu = -1;
+ int interrupted_idle = -1;
int i;
/* Traverse only the allowed CPUs */
@@ -4937,6 +4940,15 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
if (idle_cpu(i)) {
struct rq *rq = cpu_rq(i);
struct cpuidle_state *idle = idle_get_state(rq);
+
+#ifdef CONFIG_NO_HZ_COMMON
+ struct tick_sched *ts = &per_cpu(tick_cpu_sched, i);
+ if (ts->inidle && !ts->idle_active) {
+ /* idle cpu doing irq */
+ interrupted_idle = i;
+ break;
+ }
+#endif
if (idle && idle->exit_latency < min_exit_latency) {
/*
* We give priority to a CPU whose idle state
@@ -4965,6 +4977,9 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
}
}
+ if (interrupted_idle != -1)
+ return interrupted_idle;
+
return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu;
}
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 0b17424349eb..d127467293e6 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -34,7 +34,7 @@
/*
* Per cpu nohz control structure
*/
-static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched);
+DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched);
struct tick_sched *tick_get_tick_sched(int cpu)
{