aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVincent Guittot <vincent.guittot@linaro.org>2018-07-18 21:47:02 +0200
committerVincent Guittot <vincent.guittot@linaro.org>2018-07-18 21:47:02 +0200
commit3432afd041288048a3af2c0a2c08dfba53899a62 (patch)
tree0cb2d4686971ca365d44cd030f70a48f262bb6cf
parentaf0fffd9300b97d8875aa745bc78e2f6fdb3c1f0 (diff)
sched/fair: remove #ifdefs from scale_rt_capacitysched-fix-irq-warning
Reuse cpu_util_irq() that has been defined for schedutil and set irq util to 0 when !CONFIG_IRQ_TIME_ACCOUNTING But the compiler is not able to optimize the sequence (at least with aarch64 GCC 7.2.1) free *= (max - irq); free /= max; when irq is fixed to 0 Add a new inline function scale_irq_capacity() that will scale utilization when irq is accounted. Reuse this funciton in schedutil which applies similar formula. Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
-rw-r--r--kernel/sched/cpufreq_schedutil.c3
-rw-r--r--kernel/sched/fair.c13
-rw-r--r--kernel/sched/sched.h20
3 files changed, 22 insertions, 14 deletions
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 97dcd4472a0e..3fffad3bc8a8 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -247,8 +247,7 @@ static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu)
* U' = irq + ------- * U
* max
*/
- util *= (max - irq);
- util /= max;
+ util = scale_irq_capacity(util, irq, max);
util += irq;
/*
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index d5f7d521e448..14c3fddf822a 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -7551,16 +7551,12 @@ static unsigned long scale_rt_capacity(int cpu)
struct rq *rq = cpu_rq(cpu);
unsigned long max = arch_scale_cpu_capacity(NULL, cpu);
unsigned long used, free;
-#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
unsigned long irq;
-#endif
-#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
- irq = READ_ONCE(rq->avg_irq.util_avg);
+ irq = cpu_util_irq(rq);
if (unlikely(irq >= max))
return 1;
-#endif
used = READ_ONCE(rq->avg_rt.util_avg);
used += READ_ONCE(rq->avg_dl.util_avg);
@@ -7569,11 +7565,8 @@ static unsigned long scale_rt_capacity(int cpu)
return 1;
free = max - used;
-#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
- free *= (max - irq);
- free /= max;
-#endif
- return free;
+
+ return scale_irq_capacity(free, irq, max);
}
static void update_cpu_capacity(struct sched_domain *sd, int cpu)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index ebb4b3c3ece7..b80c3fd9048a 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2210,17 +2210,33 @@ static inline unsigned long cpu_util_rt(struct rq *rq)
{
return READ_ONCE(rq->avg_rt.util_avg);
}
+#endif
-#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
+#if defined(SMP) \
+ && (defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING))
static inline unsigned long cpu_util_irq(struct rq *rq)
{
return rq->avg_irq.util_avg;
}
+
+static inline
+unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max)
+{
+ util *= (max - irq);
+ util /= max;
+
+ return util;
+
+}
#else
static inline unsigned long cpu_util_irq(struct rq *rq)
{
return 0;
}
-#endif
+static inline
+unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max)
+{
+ return util;
+}
#endif