diff options
author | Vincent Guittot <vincent.guittot@linaro.org> | 2012-04-19 15:15:05 +0200 |
---|---|---|
committer | Viresh Kumar <viresh.kumar@linaro.org> | 2012-07-10 12:21:04 +0100 |
commit | 2f1229dfedff6b5894ef8bec7d735225d476d99c (patch) | |
tree | e652df93bdd98264cacdd9d9a5472b332ac4d726 | |
parent | 92dd7cba0cbcd4dcebb847beaba4b45051ee3f5d (diff) |
sched: use u32 instead of u64 for load avg tracking
The max value of runnable_avg_sum and runnable_avg_period is 47788
-rw-r--r-- | kernel/sched/fair.c | 6 |
1 files changed, 3 insertions, 3 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 3710fc09a621..f0098be8473e 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1059,7 +1059,7 @@ static inline u64 __synchronize_entity_decay(struct sched_entity *se) static inline void __update_task_entity_contrib(struct sched_entity *se) { - se->avg.load_avg_contrib = div64_u64(se->avg.runnable_avg_sum * + se->avg.load_avg_contrib = div_u64(se->avg.runnable_avg_sum * se->load.weight, se->avg.runnable_avg_period + 1); } @@ -1075,10 +1075,10 @@ static inline void __update_tg_runnable_avg(struct sched_avg *sa, long contrib, usage_contrib; - contrib = div64_u64((sa->runnable_avg_sum << 12), (sa->runnable_avg_period + 1)); + contrib = div_u64((sa->runnable_avg_sum << 12), (sa->runnable_avg_period + 1)); contrib -= cfs_rq->tg_runnable_contrib; - usage_contrib = div64_u64((sa->usage_avg_sum << 12), (sa->runnable_avg_period + 1)); + usage_contrib = div_u64((sa->usage_avg_sum << 12), (sa->runnable_avg_period + 1)); usage_contrib -= cfs_rq->tg_usage_contrib; if ((abs(contrib) > cfs_rq->tg_runnable_contrib/64) || |