aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDietmar Eggemann <dietmar.eggemann@arm.com>2015-01-30 12:16:38 +0000
committerRobin Randhawa <robin.randhawa@arm.com>2015-04-09 12:26:12 +0100
commit023c7a435c9675570f13f01343c290466b6f43f6 (patch)
treed0f0b99c23cc28146e17f8203ef79459c478988f
parente925532d3eacdd0a174e44fb05de3adb25bdb2f0 (diff)
sched: Make usage tracking cpu scale-invariant
Besides the existing frequency scale-invariance correction factor, apply cpu scale-invariance correction factor to usage tracking. Cpu scale-invariance takes cpu performance deviations due to micro-architectural differences (i.e. instructions per seconds) between cpus in HMP systems (e.g. big.LITTLE) and differences in the frequency value of the highest OPP between cpus in SMP systems into consideration. Each segment of the sched_avg::running_avg_sum geometric series is now scaled by the cpu performance factor too so the sched_avg::utilization_avg_contrib of each entity will be invariant from the particular cpu of the HMP/SMP system it is gathered on. So the usage level that is returned by get_cpu_usage stays relative to the max cpu performance of the system. Cc: Ingo Molnar <mingo@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
-rw-r--r--kernel/sched/fair.c16
1 files changed, 15 insertions, 1 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 17f571acdb83..1d4e224594b9 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2200,6 +2200,7 @@ static u32 __compute_runnable_contrib(u64 n)
}
unsigned long __weak arch_scale_freq_capacity(struct sched_domain *sd, int cpu);
+unsigned long __weak arch_scale_cpu_capacity(struct sched_domain *sd, int cpu);
/*
* We can represent the historical contribution to runnable average as the
@@ -2238,6 +2239,7 @@ static __always_inline int __update_entity_runnable_avg(u64 now, int cpu,
u32 runnable_contrib, scaled_runnable_contrib;
int delta_w, scaled_delta_w, decayed = 0;
unsigned long scale_freq = arch_scale_freq_capacity(NULL, cpu);
+ unsigned long scale_cpu = arch_scale_cpu_capacity(NULL, cpu);
delta = now - sa->last_runnable_update;
/*
@@ -2274,6 +2276,10 @@ static __always_inline int __update_entity_runnable_avg(u64 now, int cpu,
if (runnable)
sa->runnable_avg_sum += scaled_delta_w;
+
+ scaled_delta_w *= scale_cpu;
+ scaled_delta_w >>= SCHED_CAPACITY_SHIFT;
+
if (running)
sa->running_avg_sum += scaled_delta_w;
sa->avg_period += delta_w;
@@ -2298,6 +2304,10 @@ static __always_inline int __update_entity_runnable_avg(u64 now, int cpu,
if (runnable)
sa->runnable_avg_sum += scaled_runnable_contrib;
+
+ scaled_runnable_contrib *= scale_cpu;
+ scaled_runnable_contrib >>= SCHED_CAPACITY_SHIFT;
+
if (running)
sa->running_avg_sum += scaled_runnable_contrib;
sa->avg_period += runnable_contrib;
@@ -2308,6 +2318,10 @@ static __always_inline int __update_entity_runnable_avg(u64 now, int cpu,
if (runnable)
sa->runnable_avg_sum += scaled_delta;
+
+ scaled_delta *= scale_cpu;
+ scaled_delta >>= SCHED_CAPACITY_SHIFT;
+
if (running)
sa->running_avg_sum += scaled_delta;
sa->avg_period += delta;
@@ -5601,7 +5615,7 @@ unsigned long __weak arch_scale_freq_capacity(struct sched_domain *sd, int cpu)
static unsigned long default_scale_cpu_capacity(struct sched_domain *sd, int cpu)
{
- if ((sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1))
+ if (sd && (sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1))
return sd->smt_gain / sd->span_weight;
return SCHED_CAPACITY_SCALE;