summaryrefslogtreecommitdiff
path: root/kernel/sched/deadline.c
diff options
context:
space:
mode:
authorJuri Lelli <juri.lelli@gmail.com>2013-11-07 14:43:47 +0100
committerIngo Molnar <mingo@kernel.org>2014-01-13 13:46:46 +0100
commit6bfd6d72f51c51177676f2b1ba113fe0a85fdae4 (patch)
tree8c3c4c49f18ba3218da4274623b50da0a317f2d6 /kernel/sched/deadline.c
parent332ac17ef5bfcff4766dfdfd3b4cdf10b8f8f155 (diff)
sched/deadline: speed up SCHED_DEADLINE pushes with a push-heap
Data from tests confirmed that the original active load balancing logic didn't scale neither in the number of CPU nor in the number of tasks (as sched_rt does). Here we provide a global data structure to keep track of deadlines of the running tasks in the system. The structure is composed by a bitmask showing the free CPUs and a max-heap, needed when the system is heavily loaded. The implementation and concurrent access scheme are kept simple by design. However, our measurements show that we can compete with sched_rt on large multi-CPUs machines [1]. Only the push path is addressed, the extension to use this structure also for pull decisions is straightforward. However, we are currently evaluating different (in order to decrease/avoid contention) data structures to solve possibly both problems. We are also going to re-run tests considering recent changes inside cpupri [2]. [1] http://retis.sssup.it/~jlelli/papers/Ospert11Lelli.pdf [2] http://www.spinics.net/lists/linux-rt-users/msg06778.html Signed-off-by: Juri Lelli <juri.lelli@gmail.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1383831828-15501-14-git-send-email-juri.lelli@gmail.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/deadline.c')
-rw-r--r--kernel/sched/deadline.c53
1 files changed, 14 insertions, 39 deletions
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 802188fb633..0c6b1d089cd 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -16,6 +16,8 @@
*/
#include "sched.h"
+#include <linux/slab.h>
+
struct dl_bandwidth def_dl_bandwidth;
static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
@@ -640,6 +642,7 @@ static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
*/
dl_rq->earliest_dl.next = dl_rq->earliest_dl.curr;
dl_rq->earliest_dl.curr = deadline;
+ cpudl_set(&rq->rd->cpudl, rq->cpu, deadline, 1);
} else if (dl_rq->earliest_dl.next == 0 ||
dl_time_before(deadline, dl_rq->earliest_dl.next)) {
/*
@@ -663,6 +666,7 @@ static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
if (!dl_rq->dl_nr_running) {
dl_rq->earliest_dl.curr = 0;
dl_rq->earliest_dl.next = 0;
+ cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0);
} else {
struct rb_node *leftmost = dl_rq->rb_leftmost;
struct sched_dl_entity *entry;
@@ -670,6 +674,7 @@ static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
dl_rq->earliest_dl.curr = entry->deadline;
dl_rq->earliest_dl.next = next_deadline(rq);
+ cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline, 1);
}
}
@@ -855,9 +860,6 @@ static void yield_task_dl(struct rq *rq)
#ifdef CONFIG_SMP
static int find_later_rq(struct task_struct *task);
-static int latest_cpu_find(struct cpumask *span,
- struct task_struct *task,
- struct cpumask *later_mask);
static int
select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
@@ -904,7 +906,7 @@ static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
* let's hope p can move out.
*/
if (rq->curr->nr_cpus_allowed == 1 ||
- latest_cpu_find(rq->rd->span, rq->curr, NULL) == -1)
+ cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1)
return;
/*
@@ -912,7 +914,7 @@ static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
* see if it is pushed or pulled somewhere else.
*/
if (p->nr_cpus_allowed != 1 &&
- latest_cpu_find(rq->rd->span, p, NULL) != -1)
+ cpudl_find(&rq->rd->cpudl, p, NULL) != -1)
return;
resched_task(rq->curr);
@@ -1085,39 +1087,6 @@ next_node:
return NULL;
}
-static int latest_cpu_find(struct cpumask *span,
- struct task_struct *task,
- struct cpumask *later_mask)
-{
- const struct sched_dl_entity *dl_se = &task->dl;
- int cpu, found = -1, best = 0;
- u64 max_dl = 0;
-
- for_each_cpu(cpu, span) {
- struct rq *rq = cpu_rq(cpu);
- struct dl_rq *dl_rq = &rq->dl;
-
- if (cpumask_test_cpu(cpu, &task->cpus_allowed) &&
- (!dl_rq->dl_nr_running || dl_time_before(dl_se->deadline,
- dl_rq->earliest_dl.curr))) {
- if (later_mask)
- cpumask_set_cpu(cpu, later_mask);
- if (!best && !dl_rq->dl_nr_running) {
- best = 1;
- found = cpu;
- } else if (!best &&
- dl_time_before(max_dl,
- dl_rq->earliest_dl.curr)) {
- max_dl = dl_rq->earliest_dl.curr;
- found = cpu;
- }
- } else if (later_mask)
- cpumask_clear_cpu(cpu, later_mask);
- }
-
- return found;
-}
-
static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
static int find_later_rq(struct task_struct *task)
@@ -1134,7 +1103,8 @@ static int find_later_rq(struct task_struct *task)
if (task->nr_cpus_allowed == 1)
return -1;
- best_cpu = latest_cpu_find(task_rq(task)->rd->span, task, later_mask);
+ best_cpu = cpudl_find(&task_rq(task)->rd->cpudl,
+ task, later_mask);
if (best_cpu == -1)
return -1;
@@ -1510,6 +1480,9 @@ static void rq_online_dl(struct rq *rq)
{
if (rq->dl.overloaded)
dl_set_overload(rq);
+
+ if (rq->dl.dl_nr_running > 0)
+ cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr, 1);
}
/* Assumes rq->lock is held */
@@ -1517,6 +1490,8 @@ static void rq_offline_dl(struct rq *rq)
{
if (rq->dl.overloaded)
dl_clear_overload(rq);
+
+ cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0);
}
void init_sched_dl_class(void)