aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-09-14 19:37:39 +0200
committerIngo Molnar <mingo@elte.hu>2009-09-15 16:51:29 +0200
commit0763a660a84220cc3900fd32abdd7ad109e2278d (patch)
tree1b745884fb79627e05c026ee5bfb115b1f722db8 /kernel
parent8e6598af3f35629c37249a610cf13e73f70db279 (diff)
sched: Rename select_task_rq() argument
In order to be able to rename the sync argument, we need to rename the current flag argument. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched_fair.c14
-rw-r--r--kernel/sched_idletask.c2
-rw-r--r--kernel/sched_rt.c4
3 files changed, 10 insertions, 10 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 19593568031..b554e63c521 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1331,7 +1331,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
*
* preempt must be disabled.
*/
-static int select_task_rq_fair(struct task_struct *p, int flag, int sync)
+static int select_task_rq_fair(struct task_struct *p, int sd_flag, int sync)
{
struct sched_domain *tmp, *sd = NULL;
int cpu = smp_processor_id();
@@ -1339,7 +1339,7 @@ static int select_task_rq_fair(struct task_struct *p, int flag, int sync)
int new_cpu = cpu;
int want_affine = 0;
- if (flag & SD_BALANCE_WAKE) {
+ if (sd_flag & SD_BALANCE_WAKE) {
if (sched_feat(AFFINE_WAKEUPS))
want_affine = 1;
new_cpu = prev_cpu;
@@ -1368,7 +1368,7 @@ static int select_task_rq_fair(struct task_struct *p, int flag, int sync)
break;
}
- switch (flag) {
+ switch (sd_flag) {
case SD_BALANCE_WAKE:
if (!sched_feat(LB_WAKEUP_UPDATE))
break;
@@ -1392,7 +1392,7 @@ static int select_task_rq_fair(struct task_struct *p, int flag, int sync)
want_affine = 0;
}
- if (!(tmp->flags & flag))
+ if (!(tmp->flags & sd_flag))
continue;
sd = tmp;
@@ -1402,12 +1402,12 @@ static int select_task_rq_fair(struct task_struct *p, int flag, int sync)
struct sched_group *group;
int weight;
- if (!(sd->flags & flag)) {
+ if (!(sd->flags & sd_flag)) {
sd = sd->child;
continue;
}
- group = find_idlest_group(sd, p, cpu, flag);
+ group = find_idlest_group(sd, p, cpu, sd_flag);
if (!group) {
sd = sd->child;
continue;
@@ -1427,7 +1427,7 @@ static int select_task_rq_fair(struct task_struct *p, int flag, int sync)
for_each_domain(cpu, tmp) {
if (weight <= cpumask_weight(sched_domain_span(tmp)))
break;
- if (tmp->flags & flag)
+ if (tmp->flags & sd_flag)
sd = tmp;
}
/* while loop will break here if sd == NULL */
diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c
index 99b2f033760..9ff7697e5dc 100644
--- a/kernel/sched_idletask.c
+++ b/kernel/sched_idletask.c
@@ -6,7 +6,7 @@
*/
#ifdef CONFIG_SMP
-static int select_task_rq_idle(struct task_struct *p, int flag, int sync)
+static int select_task_rq_idle(struct task_struct *p, int sd_flag, int sync)
{
return task_cpu(p); /* IDLE tasks as never migrated */
}
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 438380810ac..97c53f3f51a 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -938,11 +938,11 @@ static void yield_task_rt(struct rq *rq)
#ifdef CONFIG_SMP
static int find_lowest_rq(struct task_struct *task);
-static int select_task_rq_rt(struct task_struct *p, int flag, int sync)
+static int select_task_rq_rt(struct task_struct *p, int sd_flag, int sync)
{
struct rq *rq = task_rq(p);
- if (flag != SD_BALANCE_WAKE)
+ if (sd_flag != SD_BALANCE_WAKE)
return smp_processor_id();
/*