aboutsummaryrefslogtreecommitdiff
path: root/kernel/sched
diff options
context:
space:
mode:
authorPeng Wang <rocking@linux.alibaba.com>2020-06-16 14:04:07 +0800
committerPeter Zijlstra <peterz@infradead.org>2020-06-25 13:45:44 +0200
commit423d02e1463b21109106f52d94f7396b63731f3b (patch)
tree1480796f2df9165105ef7ef86ba167d52c7c2864 /kernel/sched
parentaa93cd53bc1b91b5f99c7b55e3dcc1ac98e99558 (diff)
sched/fair: Optimize dequeue_task_fair()
While looking at enqueue_task_fair and dequeue_task_fair, it occurred to me that dequeue_task_fair can also be optimized as Vincent described in commit 7d148be69e3a ("sched/fair: Optimize enqueue_task_fair()"). When encountering throttled cfs_rq, dequeue_throttle label can ensure se not to be NULL, and rq->nr_running remains unchanged, so we can also skip the early balance check. Signed-off-by: Peng Wang <rocking@linux.alibaba.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org> Link: https://lkml.kernel.org/r/701eef9a40de93dcf5fe7063fd607bca5db38e05.1592287263.git.rocking@linux.alibaba.com
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/fair.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index a63f400013de..b9b9f19e80c1 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5624,14 +5624,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
}
-dequeue_throttle:
- if (!se)
- sub_nr_running(rq, 1);
+ /* At this point se is NULL and we are at root level*/
+ sub_nr_running(rq, 1);
/* balance early to pull high priority tasks */
if (unlikely(!was_sched_idle && sched_idle_rq(rq)))
rq->next_balance = jiffies;
+dequeue_throttle:
util_est_dequeue(&rq->cfs, p, task_sleep);
hrtick_update(rq);
}