aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVincent Guittot <vincent.guittot@linaro.org>2013-05-30 17:44:50 +0200
committerVincent Guittot <vincent.guittot@linaro.org>2013-10-17 22:10:57 +0200
commit5a85c4fa523a6bd8d3bb4d288272aec2dcb75429 (patch)
tree7e27abf6fc6aeded591821715cbc0d9b6e4f0030
parent5484f39c8e5aba8fdb40d0367fa0d4abbf8b868f (diff)
sched: add a packing level knob
The knob is used to set an average load threshold that will be used to trig the inclusion/removal of CPUs in the packing effort list. Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
-rw-r--r--include/linux/sched/sysctl.h9
-rw-r--r--kernel/sched/fair.c26
-rw-r--r--kernel/sysctl.c17
3 files changed, 52 insertions, 0 deletions
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index bf8086b2506e..f41afa52d2df 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -44,6 +44,14 @@ enum sched_tunable_scaling {
};
extern enum sched_tunable_scaling sysctl_sched_tunable_scaling;
+#ifdef CONFIG_SCHED_PACKING_TASKS
+extern int __read_mostly sysctl_sched_packing_level;
+
+int sched_proc_update_packing(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos);
+#endif
+
extern unsigned int sysctl_numa_balancing_scan_delay;
extern unsigned int sysctl_numa_balancing_scan_period_min;
extern unsigned int sysctl_numa_balancing_scan_period_max;
@@ -61,6 +69,7 @@ extern unsigned int sysctl_sched_shares_window;
int sched_proc_update_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length,
loff_t *ppos);
+
#endif
#ifdef CONFIG_SCHED_DEBUG
static inline unsigned int get_sysctl_timer_migration(void)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 7149f38f37fc..556898049ec4 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -186,6 +186,32 @@ void sched_init_granularity(void)
*/
DEFINE_PER_CPU(int, sd_pack_buddy);
+/*
+ * The packing level of the scheduler
+ *
+ * This level define the activity % above which we should add another CPU to
+ * participate to the packing effort of the tasks
+ */
+#define DEFAULT_PACKING_LEVEL 80
+int __read_mostly sysctl_sched_packing_level = DEFAULT_PACKING_LEVEL;
+
+unsigned int sd_pack_threshold = (100 * 1024) / DEFAULT_PACKING_LEVEL;
+
+
+int sched_proc_update_packing(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+ if (ret || !write)
+ return ret;
+
+ if (sysctl_sched_packing_level)
+ sd_pack_threshold = (100 * 1024) / sysctl_sched_packing_level;
+
+ return 0;
+}
+
static inline bool is_packing_cpu(int cpu)
{
int my_buddy = per_cpu(sd_pack_buddy, cpu);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index b2f06f3c6a3f..77383fc5032a 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -255,11 +255,17 @@ static struct ctl_table sysctl_base_table[] = {
{ }
};
+#ifdef CONFIG_SCHED_PACKING_TASKS
+static int min_sched_packing_level;
+static int max_sched_packing_level = 100;
+#endif /* CONFIG_SMP */
+
#ifdef CONFIG_SCHED_DEBUG
static int min_sched_granularity_ns = 100000; /* 100 usecs */
static int max_sched_granularity_ns = NSEC_PER_SEC; /* 1 second */
static int min_wakeup_granularity_ns; /* 0 usecs */
static int max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */
+
#ifdef CONFIG_SMP
static int min_sched_tunable_scaling = SCHED_TUNABLESCALING_NONE;
static int max_sched_tunable_scaling = SCHED_TUNABLESCALING_END-1;
@@ -279,6 +285,17 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
+#ifdef CONFIG_SCHED_PACKING_TASKS
+ {
+ .procname = "sched_packing_level",
+ .data = &sysctl_sched_packing_level,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = sched_proc_update_packing,
+ .extra1 = &min_sched_packing_level,
+ .extra2 = &max_sched_packing_level,
+ },
+#endif
#ifdef CONFIG_SCHED_DEBUG
{
.procname = "sched_min_granularity_ns",