summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c11
-rw-r--r--kernel/sched_fair.c5
2 files changed, 14 insertions, 2 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index f592ce6f861..a8845516ace 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -5011,6 +5011,17 @@ recheck:
return -EINVAL;
}
+ /*
+ * If not changing anything there's no need to proceed further:
+ */
+ if (unlikely(policy == p->policy && (!rt_policy(policy) ||
+ param->sched_priority == p->rt_priority))) {
+
+ __task_rq_unlock(rq);
+ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+ return 0;
+ }
+
#ifdef CONFIG_RT_GROUP_SCHED
if (user) {
/*
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 3f7ec9e27ee..c7ec5c8e7b4 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -22,6 +22,7 @@
#include <linux/latencytop.h>
#include <linux/sched.h>
+#include <linux/cpumask.h>
/*
* Targeted preemption latency for CPU-bound tasks:
@@ -3850,8 +3851,8 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle)
interval = msecs_to_jiffies(interval);
if (unlikely(!interval))
interval = 1;
- if (interval > HZ*NR_CPUS/10)
- interval = HZ*NR_CPUS/10;
+ if (interval > HZ*num_online_cpus()/10)
+ interval = HZ*num_online_cpus()/10;
need_serialize = sd->flags & SD_SERIALIZE;