aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJuri Lelli <juri.lelli@arm.com>2015-11-10 15:40:03 +0000
committerJuri Lelli <juri.lelli@arm.com>2015-11-10 15:40:03 +0000
commitd47adb96ef6311d77b15eda46c96eca31e10bc19 (patch)
tree0dd4af003d87bbb9ad636c692d5fb757045da046
parentb9ed5586638749771886164255c3259dc48eb4a2 (diff)
downloadlinux-linaro-stable-d47adb96ef6311d77b15eda46c96eca31e10bc19.tar.gz
sched/fair: move capacity_curr_of outside CONFIG_SMP
CONFIG_CPU_FREQ_GOV_SCHED configurations need to use capacity_curr_of; move it outside CONFIG_SMP regions. Once we do that arch_scale_freq_capacity as to be changed as well, because struct sched_domain is not defined on !CONFIG_SMP. Luckily, sd parameter is not used anywhere in that function, so we can simply clean it up. Signed-off-by: Juri Lelli <juri.lelli@arm.com>
-rw-r--r--arch/arm/include/asm/topology.h2
-rw-r--r--arch/arm/kernel/topology.c2
-rw-r--r--arch/arm64/include/asm/topology.h2
-rw-r--r--arch/arm64/kernel/topology.c2
-rw-r--r--kernel/sched/fair.c24
-rw-r--r--kernel/sched/sched.h10
6 files changed, 21 insertions, 21 deletions
diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h
index cf66aca3ba25..92d67815e1ef 100644
--- a/arch/arm/include/asm/topology.h
+++ b/arch/arm/include/asm/topology.h
@@ -27,7 +27,7 @@ const struct cpumask *cpu_coregroup_mask(int cpu);
#define arch_scale_freq_capacity arm_arch_scale_freq_capacity
struct sched_domain;
extern
-unsigned long arm_arch_scale_freq_capacity(struct sched_domain *sd, int cpu);
+unsigned long arm_arch_scale_freq_capacity(int cpu);
DECLARE_PER_CPU(atomic_long_t, cpu_freq_capacity);
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index da450709eaa9..d825c024a674 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -158,7 +158,7 @@ static void update_cpu_capacity(unsigned int cpu)
* compensates for frequency scaling (arch_scale_freq_capacity()). The scaling
* factor is updated in smp.c
*/
-unsigned long arm_arch_scale_freq_capacity(struct sched_domain *sd, int cpu)
+unsigned long arm_arch_scale_freq_capacity(int cpu)
{
unsigned long curr = atomic_long_read(&per_cpu(cpu_freq_capacity, cpu));
diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h
index b3496efc17ba..db1c8cd32fbd 100644
--- a/arch/arm64/include/asm/topology.h
+++ b/arch/arm64/include/asm/topology.h
@@ -27,7 +27,7 @@ const struct cpumask *cpu_coregroup_mask(int cpu);
#define arch_scale_freq_capacity arm_arch_scale_freq_capacity
struct sched_domain;
extern
-unsigned long arm_arch_scale_freq_capacity(struct sched_domain *sd, int cpu);
+unsigned long arm_arch_scale_freq_capacity(int cpu);
DECLARE_PER_CPU(atomic_long_t, cpu_freq_capacity);
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index ade0419ee97d..ccb749e8b211 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -221,7 +221,7 @@ out:
* compensates for frequency scaling (arch_scale_freq_capacity()). The scaling
* factor is updated in smp.c
*/
-unsigned long arm_arch_scale_freq_capacity(struct sched_domain *sd, int cpu)
+unsigned long arm_arch_scale_freq_capacity(int cpu)
{
unsigned long curr = atomic_long_read(&per_cpu(cpu_freq_capacity, cpu));
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 0058830c8f0b..928cbb03213c 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2327,7 +2327,7 @@ static __always_inline int __update_entity_runnable_avg(u64 now, int cpu,
u64 delta, scaled_delta, periods;
u32 runnable_contrib, scaled_runnable_contrib;
int delta_w, scaled_delta_w, decayed = 0;
- unsigned long scale_freq = arch_scale_freq_capacity(NULL, cpu);
+ unsigned long scale_freq = arch_scale_freq_capacity(cpu);
unsigned long scale_cpu = arch_scale_cpu_capacity(NULL, cpu);
trace_sched_contrib_scale_f(cpu, scale_freq, scale_cpu);
@@ -4238,6 +4238,17 @@ unsigned long capacity_orig_of(int cpu)
return cpu_rq(cpu)->cpu_capacity_orig;
}
+/*
+ * Returns the current capacity of cpu after applying both
+ * cpu and freq scaling.
+ */
+unsigned long capacity_curr_of(int cpu)
+{
+ return cpu_rq(cpu)->cpu_capacity_orig *
+ arch_scale_freq_capacity(cpu)
+ >> SCHED_CAPACITY_SHIFT;
+}
+
#ifdef CONFIG_SMP
/* Used instead of source_load when we know the type == 0 */
static unsigned long weighted_cpuload(const int cpu)
@@ -4452,17 +4463,6 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
#endif
/*
- * Returns the current capacity of cpu after applying both
- * cpu and freq scaling.
- */
-unsigned long capacity_curr_of(int cpu)
-{
- return cpu_rq(cpu)->cpu_capacity_orig *
- arch_scale_freq_capacity(NULL, cpu)
- >> SCHED_CAPACITY_SHIFT;
-}
-
-/*
* get_cpu_usage returns the amount of capacity of a CPU that is used by CFS
* tasks. The unit of the return value must be the one of capacity so we can
* compare the usage with the capacity of the CPU that is available for CFS
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 0d43a7255eb7..216d5a897491 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1382,17 +1382,17 @@ static inline int hrtick_enabled(struct rq *rq)
#endif /* CONFIG_SCHED_HRTICK */
-#ifdef CONFIG_SMP
-extern void sched_avg_update(struct rq *rq);
-
#ifndef arch_scale_freq_capacity
static __always_inline
-unsigned long arch_scale_freq_capacity(struct sched_domain *sd, int cpu)
+unsigned long arch_scale_freq_capacity(int cpu)
{
return SCHED_CAPACITY_SCALE;
}
#endif
+#ifdef CONFIG_SMP
+extern void sched_avg_update(struct rq *rq);
+
#ifndef arch_scale_cpu_capacity
static __always_inline
unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
@@ -1408,7 +1408,7 @@ unsigned long capacity_orig_of(int cpu);
static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
{
- rq->rt_avg += rt_delta * arch_scale_freq_capacity(NULL, cpu_of(rq));
+ rq->rt_avg += rt_delta * arch_scale_freq_capacity(cpu_of(rq));
sched_avg_update(rq);
}
#else