aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChris Redpath <chris.redpath@arm.com>2013-08-08 16:41:26 +0100
committerJon Medhurst <tixy@linaro.org>2013-09-05 18:09:17 +0100
commit1325a370daa4878e3153e877a68d29a0ab308d3b (patch)
tree1c9edd94f625d2f209ecc53f2c10df625ae162a6
parent70845269b55dd275c19d253bbb79b36fbf9a83d6 (diff)
HMP: Implement idle pull for HMP
When an A15 goes idle, we should up-migrate anything which is above the threshold and running on an A7. Reuses the HMP force-migration spinlock, but adds its own new cpu stopper client. Signed-off-by: Chris Redpath <chris.redpath@arm.com> Signed-off-by: Liviu Dudau <liviu.dudau@arm.com> Signed-off-by: Jon Medhurst <tixy@linaro.org>
-rw-r--r--kernel/sched/fair.c162
1 files changed, 160 insertions, 2 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index c7c41412f5e..afd76bf9433 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6003,7 +6003,9 @@ out_one_pinned:
out:
return ld_moved;
}
-
+#ifdef CONFIG_SCHED_HMP
+static unsigned int hmp_idle_pull(int this_cpu);
+#endif
/*
* idle_balance is called by schedule() if this_cpu is about to become
* idle. Attempts to pull tasks from other CPUs.
@@ -6048,7 +6050,10 @@ void idle_balance(int this_cpu, struct rq *this_rq)
}
}
rcu_read_unlock();
-
+#ifdef CONFIG_SCHED_HMP
+ if (!pulled_task)
+ pulled_task = hmp_idle_pull(this_cpu);
+#endif
raw_spin_lock(&this_rq->lock);
if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
@@ -6694,6 +6699,79 @@ out_unlock:
return 0;
}
+/*
+ * hmp_idle_pull_cpu_stop is run by cpu stopper and used to
+ * migrate a specific task from one runqueue to another.
+ * hmp_idle_pull uses this to push a currently running task
+ * off a runqueue to a faster CPU.
+ * Locking is slightly different than usual.
+ * Based on active_load_balance_stop_cpu and can potentially be merged.
+ */
+static int hmp_idle_pull_cpu_stop(void *data)
+{
+ struct rq *busiest_rq = data;
+ struct task_struct *p = busiest_rq->migrate_task;
+ int busiest_cpu = cpu_of(busiest_rq);
+ int target_cpu = busiest_rq->push_cpu;
+ struct rq *target_rq = cpu_rq(target_cpu);
+ struct sched_domain *sd;
+
+ raw_spin_lock_irq(&busiest_rq->lock);
+
+ /* make sure the requested cpu hasn't gone down in the meantime */
+ if (unlikely(busiest_cpu != smp_processor_id() ||
+ !busiest_rq->active_balance))
+ goto out_unlock;
+
+ /* Is there any task to move? */
+ if (busiest_rq->nr_running <= 1)
+ goto out_unlock;
+
+ /* Task has migrated meanwhile, abort forced migration */
+ if (task_rq(p) != busiest_rq)
+ goto out_unlock;
+
+ /*
+ * This condition is "impossible", if it occurs
+ * we need to fix it. Originally reported by
+ * Bjorn Helgaas on a 128-cpu setup.
+ */
+ BUG_ON(busiest_rq == target_rq);
+
+ /* move a task from busiest_rq to target_rq */
+ double_lock_balance(busiest_rq, target_rq);
+
+ /* Search for an sd spanning us and the target CPU. */
+ rcu_read_lock();
+ for_each_domain(target_cpu, sd) {
+ if (cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
+ break;
+ }
+ if (likely(sd)) {
+ struct lb_env env = {
+ .sd = sd,
+ .dst_cpu = target_cpu,
+ .dst_rq = target_rq,
+ .src_cpu = busiest_rq->cpu,
+ .src_rq = busiest_rq,
+ .idle = CPU_IDLE,
+ };
+
+ schedstat_inc(sd, alb_count);
+
+ if (move_specific_task(&env, p))
+ schedstat_inc(sd, alb_pushed);
+ else
+ schedstat_inc(sd, alb_failed);
+ }
+ rcu_read_unlock();
+ double_unlock_balance(busiest_rq, target_rq);
+out_unlock:
+ busiest_rq->active_balance = 0;
+ raw_spin_unlock_irq(&busiest_rq->lock);
+ return 0;
+}
+
static DEFINE_SPINLOCK(hmp_force_migration);
/*
@@ -6766,6 +6844,86 @@ static void hmp_force_up_migration(int this_cpu)
}
spin_unlock(&hmp_force_migration);
}
+/*
+ * hmp_idle_pull looks at little domain runqueues to see
+ * if a task should be pulled.
+ *
+ * Reuses hmp_force_migration spinlock.
+ *
+ */
+static unsigned int hmp_idle_pull(int this_cpu)
+{
+ int cpu;
+ struct sched_entity *curr, *orig;
+ struct hmp_domain *hmp_domain = NULL;
+ struct rq *target, *rq;
+ unsigned long flags, ratio = 0;
+ unsigned int force = 0;
+ struct task_struct *p = NULL;
+
+ if (!hmp_cpu_is_slowest(this_cpu))
+ hmp_domain = hmp_slower_domain(this_cpu);
+ if (!hmp_domain)
+ return 0;
+
+ if (!spin_trylock(&hmp_force_migration))
+ return 0;
+
+ /* first select a task */
+ for_each_cpu(cpu, &hmp_domain->cpus) {
+ rq = cpu_rq(cpu);
+ raw_spin_lock_irqsave(&rq->lock, flags);
+ curr = rq->cfs.curr;
+ if (!curr) {
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
+ continue;
+ }
+ if (!entity_is_task(curr)) {
+ struct cfs_rq *cfs_rq;
+
+ cfs_rq = group_cfs_rq(curr);
+ while (cfs_rq) {
+ curr = cfs_rq->curr;
+ if (!entity_is_task(curr))
+ cfs_rq = group_cfs_rq(curr);
+ else
+ cfs_rq = NULL;
+ }
+ }
+ orig = curr;
+ curr = hmp_get_heaviest_task(curr, 1);
+ if (curr->avg.load_avg_ratio > hmp_up_threshold &&
+ curr->avg.load_avg_ratio > ratio) {
+ p = task_of(curr);
+ target = rq;
+ ratio = curr->avg.load_avg_ratio;
+ }
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
+ }
+
+ if (!p)
+ goto done;
+
+ /* now we have a candidate */
+ raw_spin_lock_irqsave(&target->lock, flags);
+ if (!target->active_balance && task_rq(p) == target) {
+ target->active_balance = 1;
+ target->push_cpu = this_cpu;
+ target->migrate_task = p;
+ force = 1;
+ trace_sched_hmp_migrate(p, target->push_cpu, 3);
+ hmp_next_up_delay(&p->se, target->push_cpu);
+ }
+ raw_spin_unlock_irqrestore(&target->lock, flags);
+ if (force) {
+ stop_one_cpu_nowait(cpu_of(target),
+ hmp_idle_pull_cpu_stop,
+ target, &target->active_balance_work);
+ }
+done:
+ spin_unlock(&hmp_force_migration);
+ return force;
+}
#else
static void hmp_force_up_migration(int this_cpu) { }
#endif /* CONFIG_SCHED_HMP */