aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2007-10-15 17:00:04 +0200
committerIngo Molnar <mingo@elte.hu>2007-10-15 17:00:04 +0200
commitbf5c91ba8c629b84413c761f529627195fd0a935 (patch)
tree8a795d6861ba4e13b90d2597a7306e1adb30bee8 /kernel
parente9acbff6484df51fd880e0f5fe0224e8be34c17b (diff)
sched: move sched_feat() definitions
move sched_feat() definitions so that it can be used sooner by generic code too. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Mike Galbraith <efault@gmx.de> Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c31
-rw-r--r--kernel/sched_fair.c31
2 files changed, 31 insertions, 31 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 8f80ebafacc..a5dd03522e3 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -382,6 +382,37 @@ static void update_rq_clock(struct rq *rq)
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
/*
+ * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
+ */
+#ifdef CONFIG_SCHED_DEBUG
+# define const_debug __read_mostly
+#else
+# define const_debug static const
+#endif
+
+/*
+ * Debugging: various feature bits
+ */
+enum {
+ SCHED_FEAT_FAIR_SLEEPERS = 1,
+ SCHED_FEAT_NEW_FAIR_SLEEPERS = 2,
+ SCHED_FEAT_SLEEPER_AVG = 4,
+ SCHED_FEAT_SLEEPER_LOAD_AVG = 8,
+ SCHED_FEAT_START_DEBIT = 16,
+ SCHED_FEAT_SKIP_INITIAL = 32,
+};
+
+const_debug unsigned int sysctl_sched_features =
+ SCHED_FEAT_FAIR_SLEEPERS *0 |
+ SCHED_FEAT_NEW_FAIR_SLEEPERS *1 |
+ SCHED_FEAT_SLEEPER_AVG *0 |
+ SCHED_FEAT_SLEEPER_LOAD_AVG *1 |
+ SCHED_FEAT_START_DEBIT *1 |
+ SCHED_FEAT_SKIP_INITIAL *0;
+
+#define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x)
+
+/*
* For kernel-internal use: high-speed (but slightly incorrect) per-cpu
* clock constructed from sched_clock():
*/
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index a2af09cb6a7..a566a455816 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -21,15 +21,6 @@
*/
/*
- * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
- */
-#ifdef CONFIG_SCHED_DEBUG
-# define const_debug __read_mostly
-#else
-# define const_debug static const
-#endif
-
-/*
* Targeted preemption latency for CPU-bound tasks:
* (default: 20ms, units: nanoseconds)
*
@@ -87,28 +78,6 @@ const_debug unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
unsigned int sysctl_sched_runtime_limit __read_mostly;
-/*
- * Debugging: various feature bits
- */
-enum {
- SCHED_FEAT_FAIR_SLEEPERS = 1,
- SCHED_FEAT_NEW_FAIR_SLEEPERS = 2,
- SCHED_FEAT_SLEEPER_AVG = 4,
- SCHED_FEAT_SLEEPER_LOAD_AVG = 8,
- SCHED_FEAT_START_DEBIT = 16,
- SCHED_FEAT_SKIP_INITIAL = 32,
-};
-
-const_debug unsigned int sysctl_sched_features =
- SCHED_FEAT_FAIR_SLEEPERS *0 |
- SCHED_FEAT_NEW_FAIR_SLEEPERS *1 |
- SCHED_FEAT_SLEEPER_AVG *0 |
- SCHED_FEAT_SLEEPER_LOAD_AVG *1 |
- SCHED_FEAT_START_DEBIT *1 |
- SCHED_FEAT_SKIP_INITIAL *0;
-
-#define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x)
-
extern struct sched_class fair_sched_class;
/**************************************************************