aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMark Brown <broonie@linaro.org>2014-01-22 12:33:23 +0000
committerMark Brown <broonie@linaro.org>2014-01-22 12:33:23 +0000
commit2305009118181260b45aab82f14bcbb659953ef1 (patch)
tree0c2c9469d7ae37f38449116971bf16b10355e19c
parent0e9b1909c3c660060d94a0a8a15c5299de97a4aa (diff)
parentb30814c74c184bbb231e24d6c857699af338468b (diff)
Merge branch 'for-lsk' of git://git.linaro.org/arm/big.LITTLE/mp into lsk-v3.10-big.LITTLEv3.10/topic/big.LITTLE
Conflicts: kernel/irq/irqdesc.c linaro/configs/big-LITTLE-MP.conf
-rw-r--r--kernel/irq/irqdesc.c8
-rw-r--r--kernel/sched/core.c5
-rw-r--r--kernel/sched/fair.c63
-rw-r--r--linaro/configs/big-LITTLE-MP.conf1
4 files changed, 67 insertions, 10 deletions
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 473b2b6eccb..20ecfb0984e 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -36,12 +36,20 @@ static int __init irq_affinity_setup(char *str)
}
__setup("irqaffinity=", irq_affinity_setup);
+extern struct cpumask hmp_slow_cpu_mask;
+
static void __init init_irq_default_affinity(void)
{
#ifdef CONFIG_CPUMASK_OFFSTACK
if (!irq_default_affinity)
zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
#endif
+#ifdef CONFIG_SCHED_HMP
+ if (!cpumask_empty(&hmp_slow_cpu_mask)) {
+ cpumask_copy(irq_default_affinity, &hmp_slow_cpu_mask);
+ return;
+ }
+#endif
if (cpumask_empty(irq_default_affinity))
cpumask_setall(irq_default_affinity);
}
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index fb9b7b74a83..3e326f9208f 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3842,8 +3842,11 @@ __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
p->sched_class = &rt_sched_class;
#ifdef CONFIG_SCHED_HMP
if (!cpumask_empty(&hmp_slow_cpu_mask))
- if (cpumask_equal(&p->cpus_allowed, cpu_all_mask))
+ if (cpumask_equal(&p->cpus_allowed, cpu_all_mask)) {
+ p->nr_cpus_allowed =
+ cpumask_weight(&hmp_slow_cpu_mask);
do_set_cpus_allowed(p, &hmp_slow_cpu_mask);
+ }
#endif
}
else
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 8a4a02740f0..43857fec77b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1453,12 +1453,9 @@ static inline u64 __synchronize_entity_decay(struct sched_entity *se)
u64 decays = atomic64_read(&cfs_rq->decay_counter);
decays -= se->avg.decay_count;
- if (!decays)
- return 0;
-
- se->avg.load_avg_contrib = decay_load(se->avg.load_avg_contrib, decays);
+ if (decays)
+ se->avg.load_avg_contrib = decay_load(se->avg.load_avg_contrib, decays);
se->avg.decay_count = 0;
-
return decays;
}
@@ -3679,12 +3676,13 @@ unsigned int hmp_next_up_threshold = 4096;
unsigned int hmp_next_down_threshold = 4096;
#ifdef CONFIG_SCHED_HMP_LITTLE_PACKING
-unsigned int hmp_packing_enabled = 1;
#ifndef CONFIG_ARCH_VEXPRESS_TC2
+unsigned int hmp_packing_enabled = 1;
unsigned int hmp_full_threshold = (NICE_0_LOAD * 9) / 8;
#else
/* TC2 has a sharp consumption curve @ around 800Mhz, so
we aim to spread the load around that frequency. */
+unsigned int hmp_packing_enabled;
unsigned int hmp_full_threshold = 650; /* 80% of the 800Mhz freq * NICE_0_LOAD */
#endif
#endif
@@ -4349,6 +4347,16 @@ unlock:
* load-balance).
*/
#ifdef CONFIG_FAIR_GROUP_SCHED
+
+#ifdef CONFIG_NO_HZ_COMMON
+static int nohz_test_cpu(int cpu);
+#else
+static inline int nohz_test_cpu(int cpu)
+{
+ return 0;
+}
+#endif
+
/*
* Called immediately before a task is migrated to a new cpu; task_cpu(p) and
* cfs_rq_of(p) references at time of call are still valid and identify the
@@ -4368,6 +4376,25 @@ migrate_task_rq_fair(struct task_struct *p, int next_cpu)
* be negative here since on-rq tasks have decay-count == 0.
*/
if (se->avg.decay_count) {
+ /*
+ * If we migrate a sleeping task away from a CPU
+ * which has the tick stopped, then both the clock_task
+ * and decay_counter will be out of date for that CPU
+ * and we will not decay load correctly.
+ */
+ if (!se->on_rq && nohz_test_cpu(task_cpu(p))) {
+ struct rq *rq = cpu_rq(task_cpu(p));
+ unsigned long flags;
+ /*
+ * Current CPU cannot be holding rq->lock in this
+ * circumstance, but another might be. We must hold
+ * rq->lock before we go poking around in its clocks
+ */
+ raw_spin_lock_irqsave(&rq->lock, flags);
+ update_rq_clock(rq);
+ update_cfs_rq_blocked_load(cfs_rq, 0);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
+ }
se->avg.decay_count = -__synchronize_entity_decay(se);
atomic64_add(se->avg.load_avg_contrib, &cfs_rq->removed_load);
}
@@ -6300,6 +6327,18 @@ static struct {
unsigned long next_balance; /* in jiffy units */
} nohz ____cacheline_aligned;
+/*
+ * nohz_test_cpu used when load tracking is enabled. FAIR_GROUP_SCHED
+ * dependency below may be removed when load tracking guards are
+ * removed.
+ */
+#ifdef CONFIG_FAIR_GROUP_SCHED
+static int nohz_test_cpu(int cpu)
+{
+ return cpumask_test_cpu(cpu, nohz.idle_cpus_mask);
+}
+#endif
+
#ifdef CONFIG_SCHED_HMP_LITTLE_PACKING
/*
* Decide if the tasks on the busy CPUs in the
@@ -6312,6 +6351,10 @@ static int hmp_packing_ilb_needed(int cpu)
if (!hmp_cpu_is_slowest(cpu))
return 1;
+ /* if disabled, use normal ILB behaviour */
+ if (!hmp_packing_enabled)
+ return 1;
+
hmp = hmp_cpu_domain(cpu);
for_each_cpu_and(cpu, &hmp->cpus, nohz.idle_cpus_mask) {
/* only idle balance if a CPU is loaded over threshold */
@@ -6984,13 +7027,13 @@ static void hmp_migrate_runnable_task(struct rq *rq)
* with the source rq.
*/
if (src_rq->active_balance)
- return;
+ goto out;
if (src_rq->nr_running <= 1)
- return;
+ goto out;
if (task_rq(p) != src_rq)
- return;
+ goto out;
/*
* Not sure if this applies here but one can never
* be too cautious
@@ -7025,6 +7068,8 @@ static void hmp_migrate_runnable_task(struct rq *rq)
rcu_read_unlock();
double_unlock_balance(src_rq, dst_rq);
+out:
+ put_task_struct(p);
}
static DEFINE_SPINLOCK(hmp_force_migration);
diff --git a/linaro/configs/big-LITTLE-MP.conf b/linaro/configs/big-LITTLE-MP.conf
index 0bbc603a13e..ced3cf974f1 100644
--- a/linaro/configs/big-LITTLE-MP.conf
+++ b/linaro/configs/big-LITTLE-MP.conf
@@ -9,3 +9,4 @@ CONFIG_HMP_FAST_CPU_MASK=""
CONFIG_HMP_SLOW_CPU_MASK=""
CONFIG_HMP_VARIABLE_SCALE=y
CONFIG_HMP_FREQUENCY_INVARIANT_SCALE=y
+CONFIG_SCHED_HMP_LITTLE_PACKING=y