aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChris Redpath <Chris.Redpath@arm.com>2013-11-22 13:19:18 +0000
committerJon Medhurst <tixy@linaro.org>2013-11-22 14:15:38 +0000
commit9a0758156e5f7f2f609617eb342e476378ef63f2 (patch)
tree116cc3ab60f540b86c5d58fdd11faa1dc63729d5
parentf5be72980bc321f3491377861835c343cc27af0d (diff)
sched: hmp: Fix build breakage when not using CONFIG_SCHED_HMPbig-LITTLE-MP-13.12big-LITTLE-MP-13.11
hmp_variable_scale_convert was used without guards in __update_entity_runnable_avg. Guard it. Signed-off-by: Chris Redpath <chris.redpath@arm.com> Signed-off-by: Mark Brown <broonie@linaro.org> Signed-off-by: Jon Medhurst <tixy@linaro.org>
-rw-r--r--kernel/sched/fair.c5
1 files changed, 4 insertions, 1 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index c7d808ee0a3..8a4a02740f0 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1210,6 +1210,7 @@ static u32 __compute_runnable_contrib(u64 n)
return contrib + runnable_avg_yN_sum[n];
}
+#ifdef CONFIG_SCHED_HMP
#define HMP_VARIABLE_SCALE_SHIFT 16ULL
struct hmp_global_attr {
struct attribute attr;
@@ -1291,6 +1292,7 @@ struct cpufreq_extents {
static struct cpufreq_extents freq_scale[CONFIG_NR_CPUS];
#endif /* CONFIG_HMP_FREQUENCY_INVARIANT_SCALE */
+#endif /* CONFIG_SCHED_HMP */
/* We can represent the historical contribution to runnable average as the
* coefficients of a geometric series. To do this we sub-divide our runnable
@@ -1336,8 +1338,9 @@ static __always_inline int __update_entity_runnable_avg(u64 now,
#endif /* CONFIG_HMP_FREQUENCY_INVARIANT_SCALE */
delta = now - sa->last_runnable_update;
-
+#ifdef CONFIG_SCHED_HMP
delta = hmp_variable_scale_convert(delta);
+#endif
/*
* This should only happen when time goes backwards, which it
* unfortunately does during sched clock init when we swap over to TSC.