From 6dcf64ce657c9110cf9d666c1052933f6ca1b9cd Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Mon, 10 Sep 2012 16:06:30 +0100 Subject: ARM: kernel: provide cluster to logical cpu mask mapping API Some device drivers like PMU require to retrieve the logical cpu mask that corresponds to a given cluster id. This patch provides a hook in the topology code that, given an existing cluster id as input, initializes the corresponding cpumask passed as a pointer, reusing all existing topology information required by sched domains in the kernel. Signed-off-by: Lorenzo Pieralisi --- arch/arm/include/asm/topology.h | 3 +++ arch/arm/kernel/topology.c | 27 +++++++++++++++++++++++++++ 2 files changed, 30 insertions(+) diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h index 58b8b84adcd..611edefaeaf 100644 --- a/arch/arm/include/asm/topology.h +++ b/arch/arm/include/asm/topology.h @@ -26,11 +26,14 @@ extern struct cputopo_arm cpu_topology[NR_CPUS]; void init_cpu_topology(void); void store_cpu_topology(unsigned int cpuid); const struct cpumask *cpu_coregroup_mask(int cpu); +int cluster_to_logical_mask(unsigned int socket_id, cpumask_t *cluster_mask); #else static inline void init_cpu_topology(void) { } static inline void store_cpu_topology(unsigned int cpuid) { } +static inline int cluster_to_logical_mask(unsigned int socket_id, + cpumask_t *cluster_mask) { return -EINVAL; } #endif diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index 79282ebcd93..4239ab2e87c 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c @@ -287,6 +287,33 @@ void store_cpu_topology(unsigned int cpuid) cpu_topology[cpuid].socket_id, mpidr); } +/* + * cluster_to_logical_mask - return cpu logical mask of CPUs in a cluster + * @socket_id: cluster HW identifier + * @cluster_mask: the cpumask location to be initialized, modified by the + * function only if return value == 0 + * + * Return: + * + * 0 on success + * -EINVAL if cluster_mask is NULL or there is no record matching socket_id + */ +int cluster_to_logical_mask(unsigned int socket_id, cpumask_t *cluster_mask) +{ + int cpu; + + if (!cluster_mask) + return -EINVAL; + + for_each_online_cpu(cpu) + if (socket_id == topology_physical_package_id(cpu)) { + cpumask_copy(cluster_mask, topology_core_cpumask(cpu)); + return 0; + } + + return -EINVAL; +} + /* * init_cpu_topology is called at boot when only one cpu is running * which prevent simultaneous write access to cpu_topology array -- cgit v1.2.3 From 95345dca40d61d0b2f79086e271271fa37a1efd3 Mon Sep 17 00:00:00 2001 From: Sudeep KarkadaNagesha Date: Thu, 20 Sep 2012 17:53:42 +0100 Subject: ARM: perf: replace global CPU PMU pointer with per-cpu pointers A single global CPU PMU pointer is not useful in a system with multiple, heterogeneous CPU PMUs as we need to access the relevant PMU depending on the current CPU. This patch replaces the single global CPU PMU pointer with per-cpu pointers and changes the OProfile accessors to refer to the PMU affine to CPU0. Signed-off-by: Sudeep KarkadaNagesha Signed-off-by: Will Deacon --- arch/arm/kernel/perf_event_cpu.c | 37 +++++++++++++++++++------------------ 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c index 1f2740e3dbc..280de37056a 100644 --- a/arch/arm/kernel/perf_event_cpu.c +++ b/arch/arm/kernel/perf_event_cpu.c @@ -31,7 +31,7 @@ #include /* Set at runtime when we know what CPU type we are. */ -static struct arm_pmu *cpu_pmu; +static DEFINE_PER_CPU(struct arm_pmu *, cpu_pmu); static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events); static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask); @@ -43,21 +43,22 @@ static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events); */ const char *perf_pmu_name(void) { - if (!cpu_pmu) + struct arm_pmu *pmu = per_cpu(cpu_pmu, 0); + if (!pmu) return NULL; - return cpu_pmu->name; + return pmu->name; } EXPORT_SYMBOL_GPL(perf_pmu_name); int perf_num_counters(void) { - int max_events = 0; + struct arm_pmu *pmu = per_cpu(cpu_pmu, 0); - if (cpu_pmu != NULL) - max_events = cpu_pmu->num_events; + if (!pmu) + return 0; - return max_events; + return pmu->num_events; } EXPORT_SYMBOL_GPL(perf_num_counters); @@ -160,11 +161,13 @@ static void cpu_pmu_init(struct arm_pmu *cpu_pmu) static int __cpuinit cpu_pmu_notify(struct notifier_block *b, unsigned long action, void *hcpu) { + struct arm_pmu *pmu = per_cpu(cpu_pmu, (long)hcpu); + if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING) return NOTIFY_DONE; - if (cpu_pmu && cpu_pmu->reset) - cpu_pmu->reset(cpu_pmu); + if (pmu && pmu->reset) + pmu->reset(pmu); else return NOTIFY_DONE; @@ -257,11 +260,7 @@ static int cpu_pmu_device_probe(struct platform_device *pdev) struct device_node *node = pdev->dev.of_node; struct arm_pmu *pmu; int ret = -ENODEV; - - if (cpu_pmu) { - pr_info("attempt to register multiple PMU devices!"); - return -ENOSPC; - } + int cpu; pmu = kzalloc(sizeof(struct arm_pmu), GFP_KERNEL); if (!pmu) { @@ -281,10 +280,12 @@ static int cpu_pmu_device_probe(struct platform_device *pdev) goto out_free; } - cpu_pmu = pmu; - cpu_pmu->plat_device = pdev; - cpu_pmu_init(cpu_pmu); - ret = armpmu_register(cpu_pmu, PERF_TYPE_RAW); + for_each_possible_cpu(cpu) + per_cpu(cpu_pmu, cpu) = pmu; + + pmu->plat_device = pdev; + cpu_pmu_init(pmu); + ret = armpmu_register(pmu, PERF_TYPE_RAW); if (!ret) return 0; -- cgit v1.2.3 From 1376bf8c40871e4c3c9084b234b559fcfb4849bb Mon Sep 17 00:00:00 2001 From: Sudeep KarkadaNagesha Date: Tue, 25 Sep 2012 17:26:50 +0100 Subject: ARM: perf: register CPU PMUs with idr types In order to support multiple, heterogeneous CPU PMUs and distinguish them, they cannot be registered as PERF_TYPE_RAW type. Instead we can get perf core to allocate a new idr type id for each PMU. Userspace applications can refer sysfs entried to find a PMU's type, which can then be used in tracking events on individual PMUs. Signed-off-by: Sudeep KarkadaNagesha --- arch/arm/kernel/perf_event.c | 3 +++ arch/arm/kernel/perf_event_cpu.c | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 146157dfe27..81e3f852c2d 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -81,6 +81,9 @@ armpmu_map_event(struct perf_event *event, return armpmu_map_cache_event(cache_map, config); case PERF_TYPE_RAW: return armpmu_map_raw_event(raw_event_mask, config); + default: + if (event->attr.type >= PERF_TYPE_MAX) + return armpmu_map_raw_event(raw_event_mask, config); } return -ENOENT; diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c index 280de37056a..f62d6cb6524 100644 --- a/arch/arm/kernel/perf_event_cpu.c +++ b/arch/arm/kernel/perf_event_cpu.c @@ -285,7 +285,7 @@ static int cpu_pmu_device_probe(struct platform_device *pdev) pmu->plat_device = pdev; cpu_pmu_init(pmu); - ret = armpmu_register(pmu, PERF_TYPE_RAW); + ret = armpmu_register(pmu, -1); if (!ret) return 0; -- cgit v1.2.3 From 5ca6103da785e90b1e7d7effd1219a2b3b50f2aa Mon Sep 17 00:00:00 2001 From: Sudeep KarkadaNagesha Date: Tue, 25 Sep 2012 17:26:51 +0100 Subject: ARM: perf: set cpu affinity to support multiple PMUs In a system with multiple heterogeneous CPU PMUs and each PMUs can handle events on a subset of CPUs, probably belonging a the same cluster. This patch introduces a cpumask to track which CPUs each PMU supports. It also updates armpmu_event_init to reject cpu-specific events being initialised for unsupported CPUs. Since process-specific events can be initialised for all the CPU PMUs,armpmu_start/stop/add are modified to prevent from being added on unsupported CPUs. Signed-off-by: Sudeep KarkadaNagesha --- Documentation/devicetree/bindings/arm/pmu.txt | 3 +++ arch/arm/include/asm/pmu.h | 1 + arch/arm/kernel/perf_event.c | 16 ++++++++++++ arch/arm/kernel/perf_event_cpu.c | 36 +++++++++++++++++++++------ 4 files changed, 49 insertions(+), 7 deletions(-) diff --git a/Documentation/devicetree/bindings/arm/pmu.txt b/Documentation/devicetree/bindings/arm/pmu.txt index 343781b9f24..4ce82d045a6 100644 --- a/Documentation/devicetree/bindings/arm/pmu.txt +++ b/Documentation/devicetree/bindings/arm/pmu.txt @@ -16,6 +16,9 @@ Required properties: "arm,arm1176-pmu" "arm,arm1136-pmu" - interrupts : 1 combined interrupt or 1 per core. +- cluster : a phandle to the cluster to which it belongs + If there are more than one cluster with same CPU type + then there should be separate PMU nodes per cluster. Example: diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h index f24edad26c7..3713e57a467 100644 --- a/arch/arm/include/asm/pmu.h +++ b/arch/arm/include/asm/pmu.h @@ -65,6 +65,7 @@ struct pmu_hw_events { struct arm_pmu { struct pmu pmu; cpumask_t active_irqs; + cpumask_t valid_cpus; char *name; irqreturn_t (*handle_irq)(int irq_num, void *dev); void (*enable)(struct perf_event *event); diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 81e3f852c2d..543ffd1af96 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -12,6 +12,7 @@ */ #define pr_fmt(fmt) "hw perfevents: " fmt +#include #include #include #include @@ -161,6 +162,8 @@ armpmu_stop(struct perf_event *event, int flags) struct arm_pmu *armpmu = to_arm_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw; + if (!cpumask_test_cpu(smp_processor_id(), &armpmu->valid_cpus)) + return; /* * ARM pmu always has to update the counter, so ignore * PERF_EF_UPDATE, see comments in armpmu_start(). @@ -177,6 +180,8 @@ static void armpmu_start(struct perf_event *event, int flags) struct arm_pmu *armpmu = to_arm_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw; + if (!cpumask_test_cpu(smp_processor_id(), &armpmu->valid_cpus)) + return; /* * ARM pmu always has to reprogram the period, so ignore * PERF_EF_RELOAD, see the comment below. @@ -204,6 +209,9 @@ armpmu_del(struct perf_event *event, int flags) struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; + if (!cpumask_test_cpu(smp_processor_id(), &armpmu->valid_cpus)) + return; + armpmu_stop(event, PERF_EF_UPDATE); hw_events->events[idx] = NULL; clear_bit(idx, hw_events->used_mask); @@ -220,6 +228,10 @@ armpmu_add(struct perf_event *event, int flags) int idx; int err = 0; + /* An event following a process won't be stopped earlier */ + if (!cpumask_test_cpu(smp_processor_id(), &armpmu->valid_cpus)) + return 0; + perf_pmu_disable(event->pmu); /* If we don't have a space for the counter then finish early. */ @@ -416,6 +428,10 @@ static int armpmu_event_init(struct perf_event *event) int err = 0; atomic_t *active_events = &armpmu->active_events; + if (event->cpu != -1 && + !cpumask_test_cpu(event->cpu, &armpmu->valid_cpus)) + return -ENOENT; + /* does not support taken branch sampling */ if (has_branch_stack(event)) return -EOPNOTSUPP; diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c index f62d6cb6524..725e225c84b 100644 --- a/arch/arm/kernel/perf_event_cpu.c +++ b/arch/arm/kernel/perf_event_cpu.c @@ -136,7 +136,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) static void cpu_pmu_init(struct arm_pmu *cpu_pmu) { int cpu; - for_each_possible_cpu(cpu) { + for_each_cpu_mask(cpu, cpu_pmu->valid_cpus) { struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu); events->events = per_cpu(hw_events, cpu); events->used_mask = per_cpu(used_mask, cpu); @@ -149,7 +149,7 @@ static void cpu_pmu_init(struct arm_pmu *cpu_pmu) /* Ensure the PMU has sane values out of reset. */ if (cpu_pmu->reset) - on_each_cpu(cpu_pmu->reset, cpu_pmu, 1); + on_each_cpu_mask(&cpu_pmu->valid_cpus, cpu_pmu->reset, cpu_pmu, 1); } /* @@ -249,6 +249,9 @@ static int probe_current_pmu(struct arm_pmu *pmu) } } + /* assume PMU support all the CPUs in this case */ + cpumask_setall(&pmu->valid_cpus); + put_cpu(); return ret; } @@ -256,10 +259,9 @@ static int probe_current_pmu(struct arm_pmu *pmu) static int cpu_pmu_device_probe(struct platform_device *pdev) { const struct of_device_id *of_id; - int (*init_fn)(struct arm_pmu *); struct device_node *node = pdev->dev.of_node; struct arm_pmu *pmu; - int ret = -ENODEV; + int ret = 0; int cpu; pmu = kzalloc(sizeof(struct arm_pmu), GFP_KERNEL); @@ -269,8 +271,28 @@ static int cpu_pmu_device_probe(struct platform_device *pdev) } if (node && (of_id = of_match_node(cpu_pmu_of_device_ids, pdev->dev.of_node))) { - init_fn = of_id->data; - ret = init_fn(pmu); + smp_call_func_t init_fn = (smp_call_func_t)of_id->data; + struct device_node *ncluster; + int cluster = -1; + cpumask_t sibling_mask; + + ncluster = of_parse_phandle(node, "cluster", 0); + if (ncluster) { + int len; + const u32 *hwid; + hwid = of_get_property(ncluster, "reg", &len); + if (hwid && len == 4) + cluster = be32_to_cpup(hwid); + } + /* set sibling mask to all cpu mask if socket is not specified */ + if (cluster == -1 || + cluster_to_logical_mask(cluster, &sibling_mask)) + cpumask_setall(&sibling_mask); + + smp_call_function_any(&sibling_mask, init_fn, pmu, 1); + + /* now set the valid_cpus after init */ + cpumask_copy(&pmu->valid_cpus, &sibling_mask); } else { ret = probe_current_pmu(pmu); } @@ -280,7 +302,7 @@ static int cpu_pmu_device_probe(struct platform_device *pdev) goto out_free; } - for_each_possible_cpu(cpu) + for_each_cpu_mask(cpu, pmu->valid_cpus) per_cpu(cpu_pmu, cpu) = pmu; pmu->plat_device = pdev; -- cgit v1.2.3 From f33aa240dd0ef9ce2fe8bdc3a6f2ed0416395bf0 Mon Sep 17 00:00:00 2001 From: Sudeep KarkadaNagesha Date: Tue, 25 Sep 2012 17:30:45 +0100 Subject: ARM: perf: set cpu affinity for the irqs correctly This patch sets the cpu affinity for the perf IRQs in the logical order within the cluster. However interupts are assumed to be specified in the same logical order within the cluster. Signed-off-by: Sudeep KarkadaNagesha --- arch/arm/kernel/perf_event_cpu.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c index 725e225c84b..4b338b3bdac 100644 --- a/arch/arm/kernel/perf_event_cpu.c +++ b/arch/arm/kernel/perf_event_cpu.c @@ -76,11 +76,13 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu) { int i, irq, irqs; struct platform_device *pmu_device = cpu_pmu->plat_device; + int cpu = -1; irqs = min(pmu_device->num_resources, num_possible_cpus()); for (i = 0; i < irqs; ++i) { - if (!cpumask_test_and_clear_cpu(i, &cpu_pmu->active_irqs)) + cpu = cpumask_next(cpu, &cpu_pmu->valid_cpus); + if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs)) continue; irq = platform_get_irq(pmu_device, i); if (irq >= 0) @@ -92,6 +94,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) { int i, err, irq, irqs; struct platform_device *pmu_device = cpu_pmu->plat_device; + int cpu = -1; if (!pmu_device) return -ENODEV; @@ -104,6 +107,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) for (i = 0; i < irqs; ++i) { err = 0; + cpu = cpumask_next(cpu, &cpu_pmu->valid_cpus); irq = platform_get_irq(pmu_device, i); if (irq < 0) continue; @@ -113,7 +117,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) * assume that we're running on a uniprocessor machine and * continue. Otherwise, continue without this interrupt. */ - if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) { + if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) { pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n", irq, i); continue; @@ -127,7 +131,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) return err; } - cpumask_set_cpu(i, &cpu_pmu->active_irqs); + cpumask_set_cpu(cpu, &cpu_pmu->active_irqs); } return 0; -- cgit v1.2.3 From 42a5e0bc8f6dec5a5b3382b2d160cec24d49fc70 Mon Sep 17 00:00:00 2001 From: Sudeep KarkadaNagesha Date: Tue, 25 Sep 2012 17:36:12 +0100 Subject: ARM: perf: remove spaces in CPU PMU names The userspace perf tool provides options to specify PMU names from command line for the event. An example of pmu event syntax would be (//) However the parser in the perf tool breaks the tokens at spacesand fails to identify the PMU name with spaces correctly. This patch removes spaces in the ARMv7 CPU PMU names. Signed-off-by: Sudeep KarkadaNagesha --- arch/arm/kernel/perf_event_v7.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index 039cffb053a..16a0455d8cd 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -1240,7 +1240,7 @@ static u32 armv7_read_num_pmnc_events(void) static int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu) { armv7pmu_init(cpu_pmu); - cpu_pmu->name = "ARMv7 Cortex-A8"; + cpu_pmu->name = "ARMv7_Cortex_A8"; cpu_pmu->map_event = armv7_a8_map_event; cpu_pmu->num_events = armv7_read_num_pmnc_events(); return 0; @@ -1249,7 +1249,7 @@ static int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu) static int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu) { armv7pmu_init(cpu_pmu); - cpu_pmu->name = "ARMv7 Cortex-A9"; + cpu_pmu->name = "ARMv7_Cortex_A9"; cpu_pmu->map_event = armv7_a9_map_event; cpu_pmu->num_events = armv7_read_num_pmnc_events(); return 0; @@ -1258,7 +1258,7 @@ static int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu) static int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu) { armv7pmu_init(cpu_pmu); - cpu_pmu->name = "ARMv7 Cortex-A5"; + cpu_pmu->name = "ARMv7_Cortex_A5"; cpu_pmu->map_event = armv7_a5_map_event; cpu_pmu->num_events = armv7_read_num_pmnc_events(); return 0; @@ -1267,7 +1267,7 @@ static int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu) static int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu) { armv7pmu_init(cpu_pmu); - cpu_pmu->name = "ARMv7 Cortex-A15"; + cpu_pmu->name = "ARMv7_Cortex_A15"; cpu_pmu->map_event = armv7_a15_map_event; cpu_pmu->num_events = armv7_read_num_pmnc_events(); cpu_pmu->set_event_filter = armv7pmu_set_event_filter; @@ -1277,7 +1277,7 @@ static int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu) static int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu) { armv7pmu_init(cpu_pmu); - cpu_pmu->name = "ARMv7 Cortex-A7"; + cpu_pmu->name = "ARMv7_Cortex_A7"; cpu_pmu->map_event = armv7_a7_map_event; cpu_pmu->num_events = armv7_read_num_pmnc_events(); cpu_pmu->set_event_filter = armv7pmu_set_event_filter; -- cgit v1.2.3 From e29faff93713e9aba5077d590b11d035372f56ee Mon Sep 17 00:00:00 2001 From: Sudeep KarkadaNagesha Date: Tue, 25 Sep 2012 18:40:12 +0100 Subject: ARM: perf: save/restore pmu registers in pm notifier This adds core support for saving and restoring CPU PMU registers for suspend/resume support i.e. deeper C-states in cpuidle terms. This patch adds support only to ARMv7 PMU registers save/restore. It needs to be extended to xscale and ARMv6 if needed. Signed-off-by: Sudeep KarkadaNagesha --- arch/arm/include/asm/pmu.h | 11 ++++++++++ arch/arm/kernel/perf_event_cpu.c | 36 +++++++++++++++++++++++++++++- arch/arm/kernel/perf_event_v7.c | 47 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 93 insertions(+), 1 deletion(-) diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h index 3713e57a467..0cd7824ca76 100644 --- a/arch/arm/include/asm/pmu.h +++ b/arch/arm/include/asm/pmu.h @@ -62,6 +62,15 @@ struct pmu_hw_events { raw_spinlock_t pmu_lock; }; +struct cpupmu_regs { + u32 pmc; + u32 pmcntenset; + u32 pmuseren; + u32 pmintenset; + u32 pmxevttype[8]; + u32 pmxevtcnt[8]; +}; + struct arm_pmu { struct pmu pmu; cpumask_t active_irqs; @@ -82,6 +91,8 @@ struct arm_pmu { int (*request_irq)(struct arm_pmu *, irq_handler_t handler); void (*free_irq)(struct arm_pmu *); int (*map_event)(struct perf_event *event); + void (*save_regs)(struct arm_pmu *, struct cpupmu_regs *); + void (*restore_regs)(struct arm_pmu *, struct cpupmu_regs *); int num_events; atomic_t active_events; struct mutex reserve_mutex; diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c index 4b338b3bdac..0b48a38e3cf 100644 --- a/arch/arm/kernel/perf_event_cpu.c +++ b/arch/arm/kernel/perf_event_cpu.c @@ -19,6 +19,7 @@ #define pr_fmt(fmt) "CPU PMU: " fmt #include +#include #include #include #include @@ -37,6 +38,8 @@ static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events); static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask); static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events); +static DEFINE_PER_CPU(struct cpupmu_regs, cpu_pmu_regs); + /* * Despite the names, these two functions are CPU-specific and are used * by the OProfile/perf code. @@ -178,10 +181,33 @@ static int __cpuinit cpu_pmu_notify(struct notifier_block *b, return NOTIFY_OK; } +static int cpu_pmu_pm_notify(struct notifier_block *b, + unsigned long action, void *hcpu) +{ + int cpu = smp_processor_id(); + struct arm_pmu *pmu = per_cpu(cpu_pmu, cpu); + struct cpupmu_regs *pmuregs = &per_cpu(cpu_pmu_regs, cpu); + + if (!pmu) + return NOTIFY_DONE; + + if (action == CPU_PM_ENTER && pmu->save_regs) { + pmu->save_regs(pmu, pmuregs); + } else if (action == CPU_PM_EXIT && pmu->restore_regs) { + pmu->restore_regs(pmu, pmuregs); + } + + return NOTIFY_OK; +} + static struct notifier_block __cpuinitdata cpu_pmu_hotplug_notifier = { .notifier_call = cpu_pmu_notify, }; +static struct notifier_block __cpuinitdata cpu_pmu_pm_notifier = { + .notifier_call = cpu_pmu_pm_notify, +}; + /* * PMU platform driver and devicetree bindings. */ @@ -340,9 +366,17 @@ static int __init register_pmu_driver(void) if (err) return err; + err = cpu_pm_register_notifier(&cpu_pmu_pm_notifier); + if (err) { + unregister_cpu_notifier(&cpu_pmu_hotplug_notifier); + return err; + } + err = platform_driver_register(&cpu_pmu_driver); - if (err) + if (err) { + cpu_pm_unregister_notifier(&cpu_pmu_pm_notifier); unregister_cpu_notifier(&cpu_pmu_hotplug_notifier); + } return err; } diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index 16a0455d8cd..654db5030c3 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -950,6 +950,51 @@ static void armv7_pmnc_dump_regs(struct arm_pmu *cpu_pmu) } #endif +static void armv7pmu_save_regs(struct arm_pmu *cpu_pmu, + struct cpupmu_regs *regs) +{ + unsigned int cnt; + asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (regs->pmc)); + if (!(regs->pmc & ARMV7_PMNC_E)) + return; + + asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (regs->pmcntenset)); + asm volatile("mrc p15, 0, %0, c9, c14, 0" : "=r" (regs->pmuseren)); + asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (regs->pmintenset)); + asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (regs->pmxevtcnt[0])); + for (cnt = ARMV7_IDX_COUNTER0; + cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) { + armv7_pmnc_select_counter(cnt); + asm volatile("mrc p15, 0, %0, c9, c13, 1" + : "=r"(regs->pmxevttype[cnt])); + asm volatile("mrc p15, 0, %0, c9, c13, 2" + : "=r"(regs->pmxevtcnt[cnt])); + } + return; +} + +static void armv7pmu_restore_regs(struct arm_pmu *cpu_pmu, + struct cpupmu_regs *regs) +{ + unsigned int cnt; + if (!(regs->pmc & ARMV7_PMNC_E)) + return; + + asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (regs->pmcntenset)); + asm volatile("mcr p15, 0, %0, c9, c14, 0" : : "r" (regs->pmuseren)); + asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (regs->pmintenset)); + asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (regs->pmxevtcnt[0])); + for (cnt = ARMV7_IDX_COUNTER0; + cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) { + armv7_pmnc_select_counter(cnt); + asm volatile("mcr p15, 0, %0, c9, c13, 1" + : : "r"(regs->pmxevttype[cnt])); + asm volatile("mcr p15, 0, %0, c9, c13, 2" + : : "r"(regs->pmxevtcnt[cnt])); + } + asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r" (regs->pmc)); +} + static void armv7pmu_enable_event(struct perf_event *event) { unsigned long flags; @@ -1223,6 +1268,8 @@ static void armv7pmu_init(struct arm_pmu *cpu_pmu) cpu_pmu->start = armv7pmu_start; cpu_pmu->stop = armv7pmu_stop; cpu_pmu->reset = armv7pmu_reset; + cpu_pmu->save_regs = armv7pmu_save_regs; + cpu_pmu->restore_regs = armv7pmu_restore_regs; cpu_pmu->max_period = (1LLU << 32) - 1; }; -- cgit v1.2.3 From c1057acd610e8af30ec9fe78679bcd2800a8f248 Mon Sep 17 00:00:00 2001 From: Paul Turner Date: Fri, 21 Sep 2012 13:27:51 -0700 Subject: sched: implement usage tracking With the frame-work for runnable tracking now fully in place. Per-entity usage tracking is a simple and low-overhead addition. Signed-off-by: Paul Turner Reviewed-by: Ben Segall --- include/linux/sched.h | 1 + kernel/sched/debug.c | 3 +++ kernel/sched/fair.c | 33 ++++++++++++++++++++++++++++----- kernel/sched/sched.h | 4 ++-- 4 files changed, 34 insertions(+), 7 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index d35d2b6ddbf..1e0939c9901 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1097,6 +1097,7 @@ struct sched_avg { u64 last_runnable_update; s64 decay_count; unsigned long load_avg_contrib; + u32 usage_avg_sum; }; #ifdef CONFIG_SCHEDSTATS diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 75024a67352..fbd8caa83ef 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -94,6 +94,7 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group #ifdef CONFIG_SMP P(se->avg.runnable_avg_sum); P(se->avg.runnable_avg_period); + P(se->avg.usage_avg_sum); P(se->avg.load_avg_contrib); P(se->avg.decay_count); #endif @@ -223,6 +224,8 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) cfs_rq->tg_runnable_contrib); SEQ_printf(m, " .%-30s: %d\n", "tg->runnable_avg", atomic_read(&cfs_rq->tg->runnable_avg)); + SEQ_printf(m, " .%-30s: %d\n", "tg->usage_avg", + atomic_read(&cfs_rq->tg->usage_avg)); #endif print_cfs_group_stats(m, cpu, cfs_rq->tg); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 7a33e5986fc..f3b33d88633 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1230,7 +1230,8 @@ static u32 __compute_runnable_contrib(u64 n) */ static __always_inline int __update_entity_runnable_avg(u64 now, struct sched_avg *sa, - int runnable) + int runnable, + int running) { u64 delta, periods; u32 runnable_contrib; @@ -1269,6 +1270,8 @@ static __always_inline int __update_entity_runnable_avg(u64 now, delta_w = 1024 - delta_w; if (runnable) sa->runnable_avg_sum += delta_w; + if (running) + sa->usage_avg_sum += delta_w; sa->runnable_avg_period += delta_w; delta -= delta_w; @@ -1281,17 +1284,22 @@ static __always_inline int __update_entity_runnable_avg(u64 now, periods + 1); sa->runnable_avg_period = decay_load(sa->runnable_avg_period, periods + 1); + sa->usage_avg_sum = decay_load(sa->usage_avg_sum, periods + 1); /* Efficiently calculate \sum (1..n_period) 1024*y^i */ runnable_contrib = __compute_runnable_contrib(periods); if (runnable) sa->runnable_avg_sum += runnable_contrib; + if (running) + sa->usage_avg_sum += runnable_contrib; sa->runnable_avg_period += runnable_contrib; } /* Remainder of delta accrued against u_0` */ if (runnable) sa->runnable_avg_sum += delta; + if (running) + sa->usage_avg_sum += delta; sa->runnable_avg_period += delta; return decayed; @@ -1337,16 +1345,28 @@ static inline void __update_tg_runnable_avg(struct sched_avg *sa, struct cfs_rq *cfs_rq) { struct task_group *tg = cfs_rq->tg; - long contrib; + long contrib, usage_contrib; /* The fraction of a cpu used by this cfs_rq */ contrib = div_u64(sa->runnable_avg_sum << NICE_0_SHIFT, sa->runnable_avg_period + 1); contrib -= cfs_rq->tg_runnable_contrib; - if (abs(contrib) > cfs_rq->tg_runnable_contrib / 64) { + usage_contrib = div_u64(sa->usage_avg_sum << NICE_0_SHIFT, + sa->runnable_avg_period + 1); + usage_contrib -= cfs_rq->tg_usage_contrib; + + /* + * contrib/usage at this point represent deltas, only update if they + * are substantive. + */ + if ((abs(contrib) > cfs_rq->tg_runnable_contrib / 64) || + (abs(usage_contrib) > cfs_rq->tg_usage_contrib / 64)) { atomic_add(contrib, &tg->runnable_avg); cfs_rq->tg_runnable_contrib += contrib; + + atomic_add(usage_contrib, &tg->usage_avg); + cfs_rq->tg_usage_contrib += usage_contrib; } } @@ -1452,7 +1472,8 @@ static inline void update_entity_load_avg(struct sched_entity *se, else now = cfs_rq_clock_task(group_cfs_rq(se)); - if (!__update_entity_runnable_avg(now, &se->avg, se->on_rq)) + if (!__update_entity_runnable_avg(now, &se->avg, se->on_rq, + cfs_rq->curr == se)) return; contrib_delta = __update_entity_load_avg_contrib(se); @@ -1496,7 +1517,8 @@ static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update) static inline void update_rq_runnable_avg(struct rq *rq, int runnable) { - __update_entity_runnable_avg(rq->clock_task, &rq->avg, runnable); + __update_entity_runnable_avg(rq->clock_task, &rq->avg, runnable, + runnable); __update_tg_runnable_avg(&rq->avg, &rq->cfs); } @@ -1864,6 +1886,7 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) */ update_stats_wait_end(cfs_rq, se); __dequeue_entity(cfs_rq, se); + update_entity_load_avg(se, 1); } update_stats_curr_start(cfs_rq, se); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index cc03cfdf469..64e3aff3dac 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -115,7 +115,7 @@ struct task_group { atomic_t load_weight; atomic64_t load_avg; - atomic_t runnable_avg; + atomic_t runnable_avg, usage_avg; #endif #ifdef CONFIG_RT_GROUP_SCHED @@ -245,7 +245,7 @@ struct cfs_rq { #endif /* CONFIG_FAIR_GROUP_SCHED */ /* These always depend on CONFIG_FAIR_GROUP_SCHED */ #ifdef CONFIG_FAIR_GROUP_SCHED - u32 tg_runnable_contrib; + u32 tg_runnable_contrib, tg_usage_contrib; u64 tg_load_contrib; #endif /* CONFIG_FAIR_GROUP_SCHED */ -- cgit v1.2.3 From 115472b3c6cf5240a0e54ecfc3612db0133f1e5c Mon Sep 17 00:00:00 2001 From: Morten Rasmussen Date: Fri, 14 Sep 2012 14:38:08 +0100 Subject: sched: entity load-tracking load_avg_ratio This patch adds load_avg_ratio to each task. The load_avg_ratio is a variant of load_avg_contrib which is not scaled by the task priority. It is calculated like this: runnable_avg_sum * NICE_0_LOAD / (runnable_avg_period + 1). Signed-off-by: Morten Rasmussen --- include/linux/sched.h | 1 + kernel/sched/fair.c | 3 +++ 2 files changed, 4 insertions(+) diff --git a/include/linux/sched.h b/include/linux/sched.h index 1e0939c9901..38970832a51 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1097,6 +1097,7 @@ struct sched_avg { u64 last_runnable_update; s64 decay_count; unsigned long load_avg_contrib; + unsigned long load_avg_ratio; u32 usage_avg_sum; }; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index f3b33d88633..31a56dcb444 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1427,6 +1427,9 @@ static inline void __update_task_entity_contrib(struct sched_entity *se) contrib = se->avg.runnable_avg_sum * scale_load_down(se->load.weight); contrib /= (se->avg.runnable_avg_period + 1); se->avg.load_avg_contrib = scale_load(contrib); + contrib = se->avg.runnable_avg_sum * scale_load_down(NICE_0_LOAD); + contrib /= (se->avg.runnable_avg_period + 1); + se->avg.load_avg_ratio = scale_load(contrib); } /* Compute the current contribution to load_avg by se, return any delta */ -- cgit v1.2.3 From 00d520269ed1cf7266b595312614e06569da0394 Mon Sep 17 00:00:00 2001 From: Morten Rasmussen Date: Fri, 14 Sep 2012 14:38:09 +0100 Subject: sched: Task placement for heterogeneous systems based on task load-tracking This patch introduces the basic SCHED_HMP infrastructure. Each class of cpus is represented by a hmp_domain and tasks will only be moved between these domains when their load profiles suggest it is beneficial. SCHED_HMP relies heavily on the task load-tracking introduced in Paul Turners fair group scheduling patch set: SCHED_HMP requires that the platform implements arch_get_hmp_domains() which should set up the platform specific list of hmp_domains. It is also assumed that the platform disables SD_LOAD_BALANCE for the appropriate sched_domains. Tasks placement takes place every time a task is to be inserted into a runqueue based on its load history. The task placement decision is based on load thresholds. There are no restrictions on the number of hmp_domains, however, multiple (>2) has not been tested and the up/down migration policy is rather simple. Signed-off-by: Morten Rasmussen --- arch/arm/Kconfig | 17 +++++ include/linux/sched.h | 6 ++ kernel/sched/fair.c | 168 ++++++++++++++++++++++++++++++++++++++++++++++++++ kernel/sched/sched.h | 6 ++ 4 files changed, 197 insertions(+) diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 5b714695b01..7abe7094def 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1582,6 +1582,23 @@ config SCHED_SMT MultiThreading at a cost of slightly increased overhead in some places. If unsure say N here. +config DISABLE_CPU_SCHED_DOMAIN_BALANCE + bool "(EXPERIMENTAL) Disable CPU level scheduler load-balancing" + help + Disables scheduler load-balancing at CPU sched domain level. + +config SCHED_HMP + bool "(EXPERIMENTAL) Heterogenous multiprocessor scheduling" + depends on DISABLE_CPU_SCHED_DOMAIN_BALANCE && SCHED_MC && FAIR_GROUP_SCHED && !SCHED_AUTOGROUP + help + Experimental scheduler optimizations for heterogeneous platforms. + Attempts to introspectively select task affinity to optimize power + and performance. Basic support for multiple (>2) cpu types is in place, + but it has only been tested with two types of cpus. + There is currently no support for migration of task groups, hence + !SCHED_AUTOGROUP. Furthermore, normal load-balancing must be disabled + between cpus of different type (DISABLE_CPU_SCHED_DOMAIN_BALANCE). + config HAVE_ARM_SCU bool help diff --git a/include/linux/sched.h b/include/linux/sched.h index 38970832a51..4ade83a6a28 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -985,6 +985,12 @@ unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu); bool cpus_share_cache(int this_cpu, int that_cpu); +#ifdef CONFIG_SCHED_HMP +struct hmp_domain { + struct cpumask cpus; + struct list_head hmp_domains; +}; +#endif /* CONFIG_SCHED_HMP */ #else /* CONFIG_SMP */ struct sched_domain_attr; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 31a56dcb444..827034d1a48 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3318,6 +3318,125 @@ done: return target; } +#ifdef CONFIG_SCHED_HMP +/* + * Heterogenous multiprocessor (HMP) optimizations + * + * The cpu types are distinguished using a list of hmp_domains + * which each represent one cpu type using a cpumask. + * The list is assumed ordered by compute capacity with the + * fastest domain first. + */ +DEFINE_PER_CPU(struct hmp_domain *, hmp_cpu_domain); + +extern void __init arch_get_hmp_domains(struct list_head *hmp_domains_list); + +/* Setup hmp_domains */ +static int __init hmp_cpu_mask_setup(void) +{ + char buf[64]; + struct hmp_domain *domain; + struct list_head *pos; + int dc, cpu; + + pr_debug("Initializing HMP scheduler:\n"); + + /* Initialize hmp_domains using platform code */ + arch_get_hmp_domains(&hmp_domains); + if (list_empty(&hmp_domains)) { + pr_debug("HMP domain list is empty!\n"); + return 0; + } + + /* Print hmp_domains */ + dc = 0; + list_for_each(pos, &hmp_domains) { + domain = list_entry(pos, struct hmp_domain, hmp_domains); + cpulist_scnprintf(buf, 64, &domain->cpus); + pr_debug(" HMP domain %d: %s\n", dc, buf); + + for_each_cpu_mask(cpu, domain->cpus) { + per_cpu(hmp_cpu_domain, cpu) = domain; + } + dc++; + } + + return 1; +} + +/* + * Migration thresholds should be in the range [0..1023] + * hmp_up_threshold: min. load required for migrating tasks to a faster cpu + * hmp_down_threshold: max. load allowed for tasks migrating to a slower cpu + * The default values (512, 256) offer good responsiveness, but may need + * tweaking suit particular needs. + */ +unsigned int hmp_up_threshold = 512; +unsigned int hmp_down_threshold = 256; + +static unsigned int hmp_up_migration(int cpu, struct sched_entity *se); +static unsigned int hmp_down_migration(int cpu, struct sched_entity *se); + +/* Check if cpu is in fastest hmp_domain */ +static inline unsigned int hmp_cpu_is_fastest(int cpu) +{ + struct list_head *pos; + + pos = &hmp_cpu_domain(cpu)->hmp_domains; + return pos == hmp_domains.next; +} + +/* Check if cpu is in slowest hmp_domain */ +static inline unsigned int hmp_cpu_is_slowest(int cpu) +{ + struct list_head *pos; + + pos = &hmp_cpu_domain(cpu)->hmp_domains; + return list_is_last(pos, &hmp_domains); +} + +/* Next (slower) hmp_domain relative to cpu */ +static inline struct hmp_domain *hmp_slower_domain(int cpu) +{ + struct list_head *pos; + + pos = &hmp_cpu_domain(cpu)->hmp_domains; + return list_entry(pos->next, struct hmp_domain, hmp_domains); +} + +/* Previous (faster) hmp_domain relative to cpu */ +static inline struct hmp_domain *hmp_faster_domain(int cpu) +{ + struct list_head *pos; + + pos = &hmp_cpu_domain(cpu)->hmp_domains; + return list_entry(pos->prev, struct hmp_domain, hmp_domains); +} + +/* + * Selects a cpu in previous (faster) hmp_domain + * Note that cpumask_any_and() returns the first cpu in the cpumask + */ +static inline unsigned int hmp_select_faster_cpu(struct task_struct *tsk, + int cpu) +{ + return cpumask_any_and(&hmp_faster_domain(cpu)->cpus, + tsk_cpus_allowed(tsk)); +} + +/* + * Selects a cpu in next (slower) hmp_domain + * Note that cpumask_any_and() returns the first cpu in the cpumask + */ +static inline unsigned int hmp_select_slower_cpu(struct task_struct *tsk, + int cpu) +{ + return cpumask_any_and(&hmp_slower_domain(cpu)->cpus, + tsk_cpus_allowed(tsk)); +} + +#endif /* CONFIG_SCHED_HMP */ + /* * sched_balance_self: balance the current task (running on cpu) in domains * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and @@ -3416,6 +3535,16 @@ select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags) unlock: rcu_read_unlock(); +#ifdef CONFIG_SCHED_HMP + if (hmp_up_migration(prev_cpu, &p->se)) + return hmp_select_faster_cpu(p, prev_cpu); + if (hmp_down_migration(prev_cpu, &p->se)) + return hmp_select_slower_cpu(p, prev_cpu); + /* Make sure that the task stays in its previous hmp domain */ + if (!cpumask_test_cpu(new_cpu, &hmp_cpu_domain(prev_cpu)->cpus)) + return prev_cpu; +#endif + return new_cpu; } @@ -5676,6 +5805,41 @@ need_kick: static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { } #endif +#ifdef CONFIG_SCHED_HMP +/* Check if task should migrate to a faster cpu */ +static unsigned int hmp_up_migration(int cpu, struct sched_entity *se) +{ + struct task_struct *p = task_of(se); + + if (hmp_cpu_is_fastest(cpu)) + return 0; + + if (cpumask_intersects(&hmp_faster_domain(cpu)->cpus, + tsk_cpus_allowed(p)) + && se->avg.load_avg_ratio > hmp_up_threshold) { + return 1; + } + return 0; +} + +/* Check if task should migrate to a slower cpu */ +static unsigned int hmp_down_migration(int cpu, struct sched_entity *se) +{ + struct task_struct *p = task_of(se); + + if (hmp_cpu_is_slowest(cpu)) + return 0; + + if (cpumask_intersects(&hmp_slower_domain(cpu)->cpus, + tsk_cpus_allowed(p)) + && se->avg.load_avg_ratio < hmp_down_threshold) { + return 1; + } + return 0; +} + +#endif /* CONFIG_SCHED_HMP */ + /* * run_rebalance_domains is triggered when needed from the scheduler tick. * Also triggered for nohz idle balancing (with nohz_balancing_kick set). @@ -6186,6 +6350,10 @@ __init void init_sched_fair_class(void) zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT); cpu_notifier(sched_ilb_notifier, 0); #endif + +#ifdef CONFIG_SCHED_HMP + hmp_cpu_mask_setup(); +#endif #endif /* SMP */ } diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 64e3aff3dac..253144f500e 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -549,6 +549,12 @@ DECLARE_PER_CPU(int, sd_llc_id); extern int group_balance_cpu(struct sched_group *sg); +#ifdef CONFIG_SCHED_HMP +static LIST_HEAD(hmp_domains); +DECLARE_PER_CPU(struct hmp_domain *, hmp_cpu_domain); +#define hmp_cpu_domain(cpu) (per_cpu(hmp_cpu_domain, (cpu))) +#endif /* CONFIG_SCHED_HMP */ + #endif /* CONFIG_SMP */ #include "stats.h" -- cgit v1.2.3 From bc4ed1b52249e43657b264dcf8e12384d4d33b4c Mon Sep 17 00:00:00 2001 From: Morten Rasmussen Date: Fri, 14 Sep 2012 14:38:10 +0100 Subject: sched: Forced task migration on heterogeneous systems This patch introduces forced task migration for moving suitable currently running tasks between hmp_domains. Task behaviour is likely to change over time. Tasks running in a less capable hmp_domain may change to become more demanding and should therefore be migrated up. They are unlikely go through the select_task_rq_fair() path anytime soon and therefore need special attention. This patch introduces a period check (SCHED_TICK) of the currently running task on all runqueues and sets up a forced migration using stop_machine_no_wait() if the task needs to be migrated. Ideally, this should not be implemented by polling all runqueues. Signed-off-by: Morten Rasmussen --- kernel/sched/fair.c | 196 ++++++++++++++++++++++++++++++++++++++++++++++++++- kernel/sched/sched.h | 3 + 2 files changed, 198 insertions(+), 1 deletion(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 827034d1a48..44891b55ce1 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -4071,7 +4071,6 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) * 1) task is cache cold, or * 2) too many balance attempts have failed. */ - tsk_cache_hot = task_hot(p, env->src_rq->clock_task, env->sd); if (!tsk_cache_hot || env->sd->nr_balance_failed > env->sd->cache_nice_tries) { @@ -5838,6 +5837,199 @@ static unsigned int hmp_down_migration(int cpu, struct sched_entity *se) return 0; } +/* + * hmp_can_migrate_task - may task p from runqueue rq be migrated to this_cpu? + * Ideally this function should be merged with can_migrate_task() to avoid + * redundant code. + */ +static int hmp_can_migrate_task(struct task_struct *p, struct lb_env *env) +{ + int tsk_cache_hot = 0; + + /* + * We do not migrate tasks that are: + * 1) running (obviously), or + * 2) cannot be migrated to this CPU due to cpus_allowed + */ + if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) { + schedstat_inc(p, se.statistics.nr_failed_migrations_affine); + return 0; + } + env->flags &= ~LBF_ALL_PINNED; + + if (task_running(env->src_rq, p)) { + schedstat_inc(p, se.statistics.nr_failed_migrations_running); + return 0; + } + + /* + * Aggressive migration if: + * 1) task is cache cold, or + * 2) too many balance attempts have failed. + */ + + tsk_cache_hot = task_hot(p, env->src_rq->clock_task, env->sd); + if (!tsk_cache_hot || + env->sd->nr_balance_failed > env->sd->cache_nice_tries) { +#ifdef CONFIG_SCHEDSTATS + if (tsk_cache_hot) { + schedstat_inc(env->sd, lb_hot_gained[env->idle]); + schedstat_inc(p, se.statistics.nr_forced_migrations); + } +#endif + return 1; + } + + return 1; +} + +/* + * move_specific_task tries to move a specific task. + * Returns 1 if successful and 0 otherwise. + * Called with both runqueues locked. + */ +static int move_specific_task(struct lb_env *env, struct task_struct *pm) +{ + struct task_struct *p, *n; + + list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) { + if (throttled_lb_pair(task_group(p), env->src_rq->cpu, + env->dst_cpu)) + continue; + + if (!hmp_can_migrate_task(p, env)) + continue; + /* Check if we found the right task */ + if (p != pm) + continue; + + move_task(p, env); + /* + * Right now, this is only the third place move_task() + * is called, so we can safely collect move_task() + * stats here rather than inside move_task(). + */ + schedstat_inc(env->sd, lb_gained[env->idle]); + return 1; + } + return 0; +} + +/* + * hmp_active_task_migration_cpu_stop is run by cpu stopper and used to + * migrate a specific task from one runqueue to another. + * hmp_force_up_migration uses this to push a currently running task + * off a runqueue. + * Based on active_load_balance_stop_cpu and can potentially be merged. + */ +static int hmp_active_task_migration_cpu_stop(void *data) +{ + struct rq *busiest_rq = data; + struct task_struct *p = busiest_rq->migrate_task; + int busiest_cpu = cpu_of(busiest_rq); + int target_cpu = busiest_rq->push_cpu; + struct rq *target_rq = cpu_rq(target_cpu); + struct sched_domain *sd; + + raw_spin_lock_irq(&busiest_rq->lock); + /* make sure the requested cpu hasn't gone down in the meantime */ + if (unlikely(busiest_cpu != smp_processor_id() || + !busiest_rq->active_balance)) { + goto out_unlock; + } + /* Is there any task to move? */ + if (busiest_rq->nr_running <= 1) + goto out_unlock; + /* Task has migrated meanwhile, abort forced migration */ + if (task_rq(p) != busiest_rq) + goto out_unlock; + /* + * This condition is "impossible", if it occurs + * we need to fix it. Originally reported by + * Bjorn Helgaas on a 128-cpu setup. + */ + BUG_ON(busiest_rq == target_rq); + + /* move a task from busiest_rq to target_rq */ + double_lock_balance(busiest_rq, target_rq); + + /* Search for an sd spanning us and the target CPU. */ + rcu_read_lock(); + for_each_domain(target_cpu, sd) { + if (cpumask_test_cpu(busiest_cpu, sched_domain_span(sd))) + break; + } + + if (likely(sd)) { + struct lb_env env = { + .sd = sd, + .dst_cpu = target_cpu, + .dst_rq = target_rq, + .src_cpu = busiest_rq->cpu, + .src_rq = busiest_rq, + .idle = CPU_IDLE, + }; + + schedstat_inc(sd, alb_count); + + if (move_specific_task(&env, p)) + schedstat_inc(sd, alb_pushed); + else + schedstat_inc(sd, alb_failed); + } + rcu_read_unlock(); + double_unlock_balance(busiest_rq, target_rq); +out_unlock: + busiest_rq->active_balance = 0; + raw_spin_unlock_irq(&busiest_rq->lock); + return 0; +} + +static DEFINE_SPINLOCK(hmp_force_migration); + +/* + * hmp_force_up_migration checks runqueues for tasks that need to + * be actively migrated to a faster cpu. + */ +static void hmp_force_up_migration(int this_cpu) +{ + int cpu; + struct sched_entity *curr; + struct rq *target; + unsigned long flags; + unsigned int force; + struct task_struct *p; + + if (!spin_trylock(&hmp_force_migration)) + return; + for_each_online_cpu(cpu) { + force = 0; + target = cpu_rq(cpu); + raw_spin_lock_irqsave(&target->lock, flags); + curr = target->cfs.curr; + if (!curr || !entity_is_task(curr)) { + raw_spin_unlock_irqrestore(&target->lock, flags); + continue; + } + p = task_of(curr); + if (hmp_up_migration(cpu, curr)) { + if (!target->active_balance) { + target->active_balance = 1; + target->push_cpu = hmp_select_faster_cpu(p, cpu); + target->migrate_task = p; + force = 1; + } + } + raw_spin_unlock_irqrestore(&target->lock, flags); + if (force) + stop_one_cpu_nowait(cpu_of(target), + hmp_active_task_migration_cpu_stop, + target, &target->active_balance_work); + } + spin_unlock(&hmp_force_migration); +} +#else +static void hmp_force_up_migration(int this_cpu) { } #endif /* CONFIG_SCHED_HMP */ /* @@ -5851,6 +6043,8 @@ static void run_rebalance_domains(struct softirq_action *h) enum cpu_idle_type idle = this_rq->idle_balance ? CPU_IDLE : CPU_NOT_IDLE; + hmp_force_up_migration(this_cpu); + rebalance_domains(this_cpu, idle); /* diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 253144f500e..5affd9cf0a0 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -427,6 +427,9 @@ struct rq { int active_balance; int push_cpu; struct cpu_stop_work active_balance_work; +#ifdef CONFIG_SCHED_HMP + struct task_struct *migrate_task; +#endif /* cpu of this runqueue: */ int cpu; int online; -- cgit v1.2.3 From b46f8c8b364e03ea97e0807fe4e09d32c9dce69a Mon Sep 17 00:00:00 2001 From: Morten Rasmussen Date: Fri, 14 Sep 2012 14:38:11 +0100 Subject: sched: Introduce priority-based task migration filter Introduces a priority threshold which prevents low priority task from migrating to faster hmp_domains (cpus). This is useful for user-space software which assigns lower task priority to background task. Signed-off-by: Morten Rasmussen --- arch/arm/Kconfig | 13 +++++++++++++ kernel/sched/fair.c | 17 +++++++++++++++++ 2 files changed, 30 insertions(+) diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 7abe7094def..d24768c04be 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1599,6 +1599,19 @@ config SCHED_HMP !SCHED_AUTOGROUP. Furthermore, normal load-balancing must be disabled between cpus of different type (DISABLE_CPU_SCHED_DOMAIN_BALANCE). +config SCHED_HMP_PRIO_FILTER + bool "(EXPERIMENTAL) Filter HMP migrations by task priority" + depends on SCHED_HMP + help + Enables task priority based HMP migration filter. Any task with + a NICE value above the threshold will always be on low-power cpus + with less compute capacity. + +config SCHED_HMP_PRIO_FILTER_VAL + int "NICE priority threshold" + default 5 + depends on SCHED_HMP_PRIO_FILTER + config HAVE_ARM_SCU bool help diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 44891b55ce1..715ac8b4f4b 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3370,9 +3370,14 @@ static int __init hmp_cpu_mask_setup(void) * hmp_down_threshold: max. load allowed for tasks migrating to a slower cpu * The default values (512, 256) offer good responsiveness, but may need * tweaking suit particular needs. + * + * hmp_up_prio: Only up migrate task with high priority (prio >= hmp_up_prio) + return 0; +#endif + if (cpumask_intersects(&hmp_faster_domain(cpu)->cpus, tsk_cpus_allowed(p)) && se->avg.load_avg_ratio > hmp_up_threshold) { @@ -5829,6 +5840,12 @@ static unsigned int hmp_down_migration(int cpu, struct sched_entity *se) if (hmp_cpu_is_slowest(cpu)) return 0; +#ifdef CONFIG_SCHED_HMP_PRIO_FILTER + /* Filter by task priority */ + if (p->prio >= hmp_up_prio) + return 1; +#endif + if (cpumask_intersects(&hmp_slower_domain(cpu)->cpus, tsk_cpus_allowed(p)) && se->avg.load_avg_ratio < hmp_down_threshold) { -- cgit v1.2.3 From bb7c9814e9cdeaccd05ad9c08764ed9249020273 Mon Sep 17 00:00:00 2001 From: Morten Rasmussen Date: Fri, 14 Sep 2012 14:38:12 +0100 Subject: ARM: Add HMP scheduling support for ARM architecture Adds Kconfig entries to enable HMP scheduling on ARM platforms. Currently, it disables CPU level sched_domain load-balacing in order to simplify things. This needs fixing in a later revision. HMP scheduling will do the load-balancing at this level instead. Signed-off-by: Morten Rasmussen --- arch/arm/Kconfig | 14 ++++++++++++++ arch/arm/include/asm/topology.h | 31 +++++++++++++++++++++++++++++++ 2 files changed, 45 insertions(+) diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index d24768c04be..b6d20ffd0ec 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1612,6 +1612,20 @@ config SCHED_HMP_PRIO_FILTER_VAL default 5 depends on SCHED_HMP_PRIO_FILTER +config HMP_FAST_CPU_MASK + string "HMP scheduler fast CPU mask" + depends on SCHED_HMP + help + Specify the cpuids of the fast CPUs in the system as a list string, + e.g. cpuid 0+1 should be specified as 0-1. + +config HMP_SLOW_CPU_MASK + string "HMP scheduler slow CPU mask" + depends on SCHED_HMP + help + Specify the cpuids of the slow CPUs in the system as a list string, + e.g. cpuid 0+1 should be specified as 0-1. + config HAVE_ARM_SCU bool help diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h index 58b8b84adcd..5692ba11322 100644 --- a/arch/arm/include/asm/topology.h +++ b/arch/arm/include/asm/topology.h @@ -27,6 +27,37 @@ void init_cpu_topology(void); void store_cpu_topology(unsigned int cpuid); const struct cpumask *cpu_coregroup_mask(int cpu); +#ifdef CONFIG_DISABLE_CPU_SCHED_DOMAIN_BALANCE +/* Common values for CPUs */ +#ifndef SD_CPU_INIT +#define SD_CPU_INIT (struct sched_domain) { \ + .min_interval = 1, \ + .max_interval = 4, \ + .busy_factor = 64, \ + .imbalance_pct = 125, \ + .cache_nice_tries = 1, \ + .busy_idx = 2, \ + .idle_idx = 1, \ + .newidle_idx = 0, \ + .wake_idx = 0, \ + .forkexec_idx = 0, \ + \ + .flags = 0*SD_LOAD_BALANCE \ + | 1*SD_BALANCE_NEWIDLE \ + | 1*SD_BALANCE_EXEC \ + | 1*SD_BALANCE_FORK \ + | 0*SD_BALANCE_WAKE \ + | 1*SD_WAKE_AFFINE \ + | 0*SD_SHARE_CPUPOWER \ + | 0*SD_SHARE_PKG_RESOURCES \ + | 0*SD_SERIALIZE \ + , \ + .last_balance = jiffies, \ + .balance_interval = 1, \ +} +#endif +#endif /* CONFIG_DISABLE_CPU_SCHED_DOMAIN_BALANCE */ + #else static inline void init_cpu_topology(void) { } -- cgit v1.2.3 From 9731b49a4c5dc602164ca78f8bc75298c8a93ccc Mon Sep 17 00:00:00 2001 From: Morten Rasmussen Date: Fri, 14 Sep 2012 14:38:13 +0100 Subject: ARM: sched: Use device-tree to provide fast/slow CPU list for HMP We can't rely on Kconfig options to set the fast and slow CPU lists for HMP scheduling if we want a single kernel binary to support multiple devices with different CPU topology. E.g. TC2 (ARM's Test-Chip-2 big.LITTLE system), Fast Models, or even non big.LITTLE devices. This patch adds the function arch_get_fast_and_slow_cpus() to generate the lists at run-time by parsing the CPU nodes in device-tree; it assumes slow cores are A7s and everything else is fast. The function still supports the old Kconfig options as this is useful for testing the HMP scheduler on devices without big.LITTLE. This patch is reuse of a patch by Jon Medhurst with a few bits left out. Signed-off-by: Morten Rasmussen --- arch/arm/Kconfig | 4 ++- arch/arm/kernel/topology.c | 69 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 72 insertions(+), 1 deletion(-) diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index b6d20ffd0ec..fa304b88e7c 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1616,13 +1616,15 @@ config HMP_FAST_CPU_MASK string "HMP scheduler fast CPU mask" depends on SCHED_HMP help - Specify the cpuids of the fast CPUs in the system as a list string, + Leave empty to use device tree information. + Specify the cpuids of the fast CPUs in the system as a list string, e.g. cpuid 0+1 should be specified as 0-1. config HMP_SLOW_CPU_MASK string "HMP scheduler slow CPU mask" depends on SCHED_HMP help + Leave empty to use device tree information. Specify the cpuids of the slow CPUs in the system as a list string, e.g. cpuid 0+1 should be specified as 0-1. diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index 79282ebcd93..eac7424d372 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c @@ -287,6 +287,75 @@ void store_cpu_topology(unsigned int cpuid) cpu_topology[cpuid].socket_id, mpidr); } + +#ifdef CONFIG_SCHED_HMP + +static const char * const little_cores[] = { + "arm,cortex-a7", + NULL, +}; + +static bool is_little_cpu(struct device_node *cn) +{ + const char * const *lc; + for (lc = little_cores; *lc; lc++) + if (of_device_is_compatible(cn, *lc)) + return true; + return false; +} + +void __init arch_get_fast_and_slow_cpus(struct cpumask *fast, + struct cpumask *slow) +{ + struct device_node *cn = NULL; + int cpu = 0; + + cpumask_clear(fast); + cpumask_clear(slow); + + /* + * Use the config options if they are given. This helps testing + * HMP scheduling on systems without a big.LITTLE architecture. + */ + if (strlen(CONFIG_HMP_FAST_CPU_MASK) && strlen(CONFIG_HMP_SLOW_CPU_MASK)) { + if (cpulist_parse(CONFIG_HMP_FAST_CPU_MASK, fast)) + WARN(1, "Failed to parse HMP fast cpu mask!\n"); + if (cpulist_parse(CONFIG_HMP_SLOW_CPU_MASK, slow)) + WARN(1, "Failed to parse HMP slow cpu mask!\n"); + return; + } + + /* + * Else, parse device tree for little cores. + */ + while ((cn = of_find_node_by_type(cn, "cpu"))) { + + if (cpu >= num_possible_cpus()) + break; + + if (is_little_cpu(cn)) + cpumask_set_cpu(cpu, slow); + else + cpumask_set_cpu(cpu, fast); + + cpu++; + } + + if (!cpumask_empty(fast) && !cpumask_empty(slow)) + return; + + /* + * We didn't find both big and little cores so let's call all cores + * fast as this will keep the system running, with all cores being + * treated equal. + */ + cpumask_setall(fast); + cpumask_clear(slow); +} + +#endif /* CONFIG_SCHED_HMP */ + + /* * init_cpu_topology is called at boot when only one cpu is running * which prevent simultaneous write access to cpu_topology array -- cgit v1.2.3 From 053663e6bc778b946ee7f70505ccf86f371c0180 Mon Sep 17 00:00:00 2001 From: Morten Rasmussen Date: Fri, 14 Sep 2012 14:38:14 +0100 Subject: ARM: sched: Setup SCHED_HMP domains SCHED_HMP requires the different cpu types to be represented by an ordered list of hmp_domains. Each hmp_domain represents all cpus of a particular type using a cpumask. The list is platform specific and therefore must be generated by platform code by implementing arch_get_hmp_domains(). Signed-off-by: Morten Rasmussen --- arch/arm/kernel/topology.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index eac7424d372..04271db020c 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c @@ -353,6 +353,28 @@ void __init arch_get_fast_and_slow_cpus(struct cpumask *fast, cpumask_clear(slow); } +void __init arch_get_hmp_domains(struct list_head *hmp_domains_list) +{ + struct cpumask hmp_fast_cpu_mask; + struct cpumask hmp_slow_cpu_mask; + struct hmp_domain *domain; + + arch_get_fast_and_slow_cpus(&hmp_fast_cpu_mask, &hmp_slow_cpu_mask); + + /* + * Initialize hmp_domains + * Must be ordered with respect to compute capacity. + * Fastest domain at head of list. + */ + domain = (struct hmp_domain *) + kmalloc(sizeof(struct hmp_domain), GFP_KERNEL); + cpumask_copy(&domain->cpus, &hmp_slow_cpu_mask); + list_add(&domain->hmp_domains, hmp_domains_list); + domain = (struct hmp_domain *) + kmalloc(sizeof(struct hmp_domain), GFP_KERNEL); + cpumask_copy(&domain->cpus, &hmp_fast_cpu_mask); + list_add(&domain->hmp_domains, hmp_domains_list); +} #endif /* CONFIG_SCHED_HMP */ -- cgit v1.2.3 From 34504af6e20c12ce4db26a26417a360e7e149888 Mon Sep 17 00:00:00 2001 From: Morten Rasmussen Date: Fri, 14 Sep 2012 14:38:15 +0100 Subject: sched: Add ftrace events for entity load-tracking Adds ftrace events for key variables related to the entity load-tracking to help debugging scheduler behaviour. Allows tracing of load contribution and runqueue residency ratio for both entities and runqueues as well as entity CPU usage ratio. Signed-off-by: Morten Rasmussen --- include/trace/events/sched.h | 125 +++++++++++++++++++++++++++++++++++++++++++ kernel/sched/fair.c | 7 +++ 2 files changed, 132 insertions(+) diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index 5a8671e8a67..847eb76fc80 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -430,6 +430,131 @@ TRACE_EVENT(sched_pi_setprio, __entry->oldprio, __entry->newprio) ); +/* + * Tracepoint for showing tracked load contribution. + */ +TRACE_EVENT(sched_task_load_contrib, + + TP_PROTO(struct task_struct *tsk, unsigned long load_contrib), + + TP_ARGS(tsk, load_contrib), + + TP_STRUCT__entry( + __array(char, comm, TASK_COMM_LEN) + __field(pid_t, pid) + __field(unsigned long, load_contrib) + ), + + TP_fast_assign( + memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); + __entry->pid = tsk->pid; + __entry->load_contrib = load_contrib; + ), + + TP_printk("comm=%s pid=%d load_contrib=%lu", + __entry->comm, __entry->pid, + __entry->load_contrib) +); + +/* + * Tracepoint for showing tracked task runnable ratio [0..1023]. + */ +TRACE_EVENT(sched_task_runnable_ratio, + + TP_PROTO(struct task_struct *tsk, unsigned long ratio), + + TP_ARGS(tsk, ratio), + + TP_STRUCT__entry( + __array(char, comm, TASK_COMM_LEN) + __field(pid_t, pid) + __field(unsigned long, ratio) + ), + + TP_fast_assign( + memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); + __entry->pid = tsk->pid; + __entry->ratio = ratio; + ), + + TP_printk("comm=%s pid=%d ratio=%lu", + __entry->comm, __entry->pid, + __entry->ratio) +); + +/* + * Tracepoint for showing tracked rq runnable ratio [0..1023]. + */ +TRACE_EVENT(sched_rq_runnable_ratio, + + TP_PROTO(int cpu, unsigned long ratio), + + TP_ARGS(cpu, ratio), + + TP_STRUCT__entry( + __field(int, cpu) + __field(unsigned long, ratio) + ), + + TP_fast_assign( + __entry->cpu = cpu; + __entry->ratio = ratio; + ), + + TP_printk("cpu=%d ratio=%lu", + __entry->cpu, + __entry->ratio) +); + +/* + * Tracepoint for showing tracked rq runnable load. + */ +TRACE_EVENT(sched_rq_runnable_load, + + TP_PROTO(int cpu, u64 load), + + TP_ARGS(cpu, load), + + TP_STRUCT__entry( + __field(int, cpu) + __field(u64, load) + ), + + TP_fast_assign( + __entry->cpu = cpu; + __entry->load = load; + ), + + TP_printk("cpu=%d load=%llu", + __entry->cpu, + __entry->load) +); + +/* + * Tracepoint for showing tracked task cpu usage ratio [0..1023]. + */ +TRACE_EVENT(sched_task_usage_ratio, + + TP_PROTO(struct task_struct *tsk, unsigned long ratio), + + TP_ARGS(tsk, ratio), + + TP_STRUCT__entry( + __array(char, comm, TASK_COMM_LEN) + __field(pid_t, pid) + __field(unsigned long, ratio) + ), + + TP_fast_assign( + memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); + __entry->pid = tsk->pid; + __entry->ratio = ratio; + ), + + TP_printk("comm=%s pid=%d ratio=%lu", + __entry->comm, __entry->pid, + __entry->ratio) +); #endif /* _TRACE_SCHED_H */ /* This part must be outside protection */ diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 715ac8b4f4b..b9a659cee8c 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1427,9 +1427,11 @@ static inline void __update_task_entity_contrib(struct sched_entity *se) contrib = se->avg.runnable_avg_sum * scale_load_down(se->load.weight); contrib /= (se->avg.runnable_avg_period + 1); se->avg.load_avg_contrib = scale_load(contrib); + trace_sched_task_load_contrib(task_of(se), se->avg.load_avg_contrib); contrib = se->avg.runnable_avg_sum * scale_load_down(NICE_0_LOAD); contrib /= (se->avg.runnable_avg_period + 1); se->avg.load_avg_ratio = scale_load(contrib); + trace_sched_task_runnable_ratio(task_of(se), se->avg.load_avg_ratio); } /* Compute the current contribution to load_avg by se, return any delta */ @@ -1520,9 +1522,14 @@ static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update) static inline void update_rq_runnable_avg(struct rq *rq, int runnable) { + u32 contrib; __update_entity_runnable_avg(rq->clock_task, &rq->avg, runnable, runnable); __update_tg_runnable_avg(&rq->avg, &rq->cfs); + contrib = rq->avg.runnable_avg_sum * scale_load_down(1024); + contrib /= (rq->avg.runnable_avg_period + 1); + trace_sched_rq_runnable_ratio(cpu_of(rq), scale_load(contrib)); + trace_sched_rq_runnable_load(cpu_of(rq), rq->cfs.runnable_load_avg); } /* Add the load generated by se into cfs_rq's child load-average */ -- cgit v1.2.3 From e08f2c8fc885bcfbd476f5df53833e71c7eedaf5 Mon Sep 17 00:00:00 2001 From: Morten Rasmussen Date: Fri, 14 Sep 2012 14:38:16 +0100 Subject: sched: Add HMP task migration ftrace event Adds ftrace event for tracing task migrations using HMP optimized scheduling. Signed-off-by: Morten Rasmussen --- include/trace/events/sched.h | 28 ++++++++++++++++++++++++++++ kernel/sched/fair.c | 15 +++++++++++---- 2 files changed, 39 insertions(+), 4 deletions(-) diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index 847eb76fc80..501aa32eb2f 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -555,6 +555,34 @@ TRACE_EVENT(sched_task_usage_ratio, __entry->comm, __entry->pid, __entry->ratio) ); + +/* + * Tracepoint for HMP (CONFIG_SCHED_HMP) task migrations. + */ +TRACE_EVENT(sched_hmp_migrate, + + TP_PROTO(struct task_struct *tsk, int dest, int force), + + TP_ARGS(tsk, dest, force), + + TP_STRUCT__entry( + __array(char, comm, TASK_COMM_LEN) + __field(pid_t, pid) + __field(int, dest) + __field(int, force) + ), + + TP_fast_assign( + memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); + __entry->pid = tsk->pid; + __entry->dest = dest; + __entry->force = force; + ), + + TP_printk("comm=%s pid=%d dest=%d force=%d", + __entry->comm, __entry->pid, + __entry->dest, __entry->force) +); #endif /* _TRACE_SCHED_H */ /* This part must be outside protection */ diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index b9a659cee8c..fe661dbf729 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3548,10 +3548,16 @@ unlock: rcu_read_unlock(); #ifdef CONFIG_SCHED_HMP - if (hmp_up_migration(prev_cpu, &p->se)) - return hmp_select_faster_cpu(p, prev_cpu); - if (hmp_down_migration(prev_cpu, &p->se)) - return hmp_select_slower_cpu(p, prev_cpu); + if (hmp_up_migration(prev_cpu, &p->se)) { + new_cpu = hmp_select_faster_cpu(p, prev_cpu); + trace_sched_hmp_migrate(p, new_cpu, 0); + return new_cpu; + } + if (hmp_down_migration(prev_cpu, &p->se)) { + new_cpu = hmp_select_slower_cpu(p, prev_cpu); + trace_sched_hmp_migrate(p, new_cpu, 0); + return new_cpu; + } /* Make sure that the task stays in its previous hmp domain */ if (!cpumask_test_cpu(new_cpu, &hmp_cpu_domain(prev_cpu)->cpus)) return prev_cpu; @@ -6042,6 +6048,7 @@ static void hmp_force_up_migration(int this_cpu) target->push_cpu = hmp_select_faster_cpu(p, cpu); target->migrate_task = p; force = 1; + trace_sched_hmp_migrate(p, target->push_cpu, 1); } } raw_spin_unlock_irqrestore(&target->lock, flags); -- cgit v1.2.3 From b422ddf960f3dee1632136c66817004e7e29cea2 Mon Sep 17 00:00:00 2001 From: Morten Rasmussen Date: Fri, 14 Sep 2012 14:38:17 +0100 Subject: sched: SCHED_HMP multi-domain task migration control We need a way to prevent tasks that are migrating up and down the hmp_domains from migrating straight on through before the load has adapted to the new compute capacity of the CPU on the new hmp_domain. This patch adds a next up/down migration delay that prevents the task from doing another migration in the same direction until the delay has expired. Signed-off-by: Morten Rasmussen --- include/linux/sched.h | 4 ++++ kernel/sched/core.c | 4 ++++ kernel/sched/fair.c | 38 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 46 insertions(+) diff --git a/include/linux/sched.h b/include/linux/sched.h index 4ade83a6a28..ea439abfdd3 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1104,6 +1104,10 @@ struct sched_avg { s64 decay_count; unsigned long load_avg_contrib; unsigned long load_avg_ratio; +#ifdef CONFIG_SCHED_HMP + u64 hmp_last_up_migration; + u64 hmp_last_down_migration; +#endif u32 usage_avg_sum; }; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 7f12624a393..346862ee1de 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1569,6 +1569,10 @@ static void __sched_fork(struct task_struct *p) #if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED) p->se.avg.runnable_avg_period = 0; p->se.avg.runnable_avg_sum = 0; +#ifdef CONFIG_SCHED_HMP + p->se.avg.hmp_last_up_migration = 0; + p->se.avg.hmp_last_down_migration = 0; +#endif #endif #ifdef CONFIG_SCHEDSTATS memset(&p->se.statistics, 0, sizeof(p->se.statistics)); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index fe661dbf729..7aef90ad9b4 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3379,12 +3379,16 @@ static int __init hmp_cpu_mask_setup(void) * tweaking suit particular needs. * * hmp_up_prio: Only up migrate task with high priority (cfs; + + se->avg.hmp_last_up_migration = cfs_rq_clock_task(cfs_rq); + se->avg.hmp_last_down_migration = 0; +} + +static inline void hmp_next_down_delay(struct sched_entity *se, int cpu) +{ + struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs; + + se->avg.hmp_last_down_migration = cfs_rq_clock_task(cfs_rq); + se->avg.hmp_last_up_migration = 0; +} #endif /* CONFIG_SCHED_HMP */ /* @@ -3550,11 +3569,13 @@ unlock: #ifdef CONFIG_SCHED_HMP if (hmp_up_migration(prev_cpu, &p->se)) { new_cpu = hmp_select_faster_cpu(p, prev_cpu); + hmp_next_up_delay(&p->se, new_cpu); trace_sched_hmp_migrate(p, new_cpu, 0); return new_cpu; } if (hmp_down_migration(prev_cpu, &p->se)) { new_cpu = hmp_select_slower_cpu(p, prev_cpu); + hmp_next_down_delay(&p->se, new_cpu); trace_sched_hmp_migrate(p, new_cpu, 0); return new_cpu; } @@ -5827,6 +5848,8 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { } static unsigned int hmp_up_migration(int cpu, struct sched_entity *se) { struct task_struct *p = task_of(se); + struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs; + u64 now; if (hmp_cpu_is_fastest(cpu)) return 0; @@ -5837,6 +5860,12 @@ static unsigned int hmp_up_migration(int cpu, struct sched_entity *se) return 0; #endif + /* Let the task load settle before doing another up migration */ + now = cfs_rq_clock_task(cfs_rq); + if (((now - se->avg.hmp_last_up_migration) >> 10) + < hmp_next_up_threshold) + return 0; + if (cpumask_intersects(&hmp_faster_domain(cpu)->cpus, tsk_cpus_allowed(p)) && se->avg.load_avg_ratio > hmp_up_threshold) { @@ -5849,6 +5878,8 @@ static unsigned int hmp_up_migration(int cpu, struct sched_entity *se) static unsigned int hmp_down_migration(int cpu, struct sched_entity *se) { struct task_struct *p = task_of(se); + struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs; + u64 now; if (hmp_cpu_is_slowest(cpu)) return 0; @@ -5859,6 +5890,12 @@ static unsigned int hmp_down_migration(int cpu, struct sched_entity *se) return 1; #endif + /* Let the task load settle before doing another down migration */ + now = cfs_rq_clock_task(cfs_rq); + if (((now - se->avg.hmp_last_down_migration) >> 10) + < hmp_next_down_threshold) + return 0; + if (cpumask_intersects(&hmp_slower_domain(cpu)->cpus, tsk_cpus_allowed(p)) && se->avg.load_avg_ratio < hmp_down_threshold) { @@ -6049,6 +6086,7 @@ static void hmp_force_up_migration(int this_cpu) target->migrate_task = p; force = 1; trace_sched_hmp_migrate(p, target->push_cpu, 1); + hmp_next_up_delay(&p->se, target->push_cpu); } } raw_spin_unlock_irqrestore(&target->lock, flags); -- cgit v1.2.3 From b193b65dd36c95059cbd04d391c58ff4582db146 Mon Sep 17 00:00:00 2001 From: Morten Rasmussen Date: Wed, 10 Oct 2012 14:51:25 +0100 Subject: sched: Enable HMP priority filter by default This updates the ARM Kconfig to enable the HMP priority filter by default. Signed-off-by: Morten Rasmussen --- arch/arm/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index fa304b88e7c..8ae87dc3806 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1602,6 +1602,7 @@ config SCHED_HMP config SCHED_HMP_PRIO_FILTER bool "(EXPERIMENTAL) Filter HMP migrations by task priority" depends on SCHED_HMP + default y help Enables task priority based HMP migration filter. Any task with a NICE value above the threshold will always be on low-power cpus -- cgit v1.2.3 From 1a574871bfd002f4d7c7bcf004fd9835de9cea63 Mon Sep 17 00:00:00 2001 From: Jon Medhurst Date: Fri, 12 Oct 2012 13:45:35 +0100 Subject: ARM: sched: Avoid empty 'slow' HMP domain On homogeneous (non-heterogeneous) systems all CPUs will be declared 'fast' and the slow cpu list will be empty. In this situation we need to avoid adding an empty slow HMP domain otherwise the scheduler code will blow up when it attempts to move a task to the slow domain. Signed-off-by: Jon Medhurst --- arch/arm/kernel/topology.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index 04271db020c..f487129ab7f 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c @@ -366,10 +366,12 @@ void __init arch_get_hmp_domains(struct list_head *hmp_domains_list) * Must be ordered with respect to compute capacity. * Fastest domain at head of list. */ - domain = (struct hmp_domain *) - kmalloc(sizeof(struct hmp_domain), GFP_KERNEL); - cpumask_copy(&domain->cpus, &hmp_slow_cpu_mask); - list_add(&domain->hmp_domains, hmp_domains_list); + if(!cpumask_empty(&hmp_slow_cpu_mask)) { + domain = (struct hmp_domain *) + kmalloc(sizeof(struct hmp_domain), GFP_KERNEL); + cpumask_copy(&domain->cpus, &hmp_slow_cpu_mask); + list_add(&domain->hmp_domains, hmp_domains_list); + } domain = (struct hmp_domain *) kmalloc(sizeof(struct hmp_domain), GFP_KERNEL); cpumask_copy(&domain->cpus, &hmp_fast_cpu_mask); -- cgit v1.2.3 From ae50cc6c24965ee5239dfbbc6efa2f6370288f80 Mon Sep 17 00:00:00 2001 From: Morten Rasmussen Date: Fri, 12 Oct 2012 15:25:02 +0100 Subject: sched: Only down migrate low priority tasks if allowed by affinity mask Adds an extra check intersection of the task affinity mask and the slower hmp_domain cpumask before down migrating low priority tasks. Signed-off-by: Morten Rasmussen --- kernel/sched/fair.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 7aef90ad9b4..1c73975585b 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5886,8 +5886,11 @@ static unsigned int hmp_down_migration(int cpu, struct sched_entity *se) #ifdef CONFIG_SCHED_HMP_PRIO_FILTER /* Filter by task priority */ - if (p->prio >= hmp_up_prio) + if ((p->prio >= hmp_up_prio) && + cpumask_intersects(&hmp_slower_domain(cpu)->cpus, + tsk_cpus_allowed(p))) { return 1; + } #endif /* Let the task load settle before doing another down migration */ -- cgit v1.2.3 From 161bebe64ce4264105c09e022deeab0b94b06ead Mon Sep 17 00:00:00 2001 From: Sudeep KarkadaNagesha Date: Mon, 24 Sep 2012 14:07:20 +0100 Subject: sched: fix arch_get_fast_and_slow_cpus to get logical cpumask correctly The patch "sched: Use device-tree to provide fast/slow CPU list for HMP" depends on the ordering of CPU's in the device tree. It breaks to determine the logical mask correctly if the logical mask of the CPUs differ from physical ordering in the device tree. This patch fix the logic by depending on the mpidr in the device tree and mapping that mpidr to the logical cpu. Signed-off-by: Sudeep KarkadaNagesha Signed-off-by: Liviu Dudau --- arch/arm/kernel/topology.c | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index f487129ab7f..677325f4355 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c @@ -22,6 +22,7 @@ #include #include +#include #include /* @@ -308,7 +309,7 @@ void __init arch_get_fast_and_slow_cpus(struct cpumask *fast, struct cpumask *slow) { struct device_node *cn = NULL; - int cpu = 0; + int cpu; cpumask_clear(fast); cpumask_clear(slow); @@ -330,15 +331,26 @@ void __init arch_get_fast_and_slow_cpus(struct cpumask *fast, */ while ((cn = of_find_node_by_type(cn, "cpu"))) { - if (cpu >= num_possible_cpus()) + const u32 *mpidr; + int len; + + mpidr = of_get_property(cn, "reg", &len); + if (!mpidr || len != 4) { + pr_err("* %s missing reg property\n", cn->full_name); + continue; + } + + cpu = get_logical_index(be32_to_cpup(mpidr)); + if (cpu == -EINVAL) { + pr_err("couldn't get logical index for mpidr %x\n", + be32_to_cpup(mpidr)); break; + } if (is_little_cpu(cn)) cpumask_set_cpu(cpu, slow); else cpumask_set_cpu(cpu, fast); - - cpu++; } if (!cpumask_empty(fast) && !cpumask_empty(slow)) -- cgit v1.2.3 From 430219a653ec0e8bc2297db1a3402913bc2073b2 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 25 May 2012 16:59:47 +0200 Subject: genirq: Add default affinity mask command line option If we isolate CPUs, then we don't want random device interrupts on them. Even w/o the user space irq balancer enabled we can end up with irqs on non boot cpus. Allow to restrict the default irq affinity mask. Signed-off-by: Thomas Gleixner --- Documentation/kernel-parameters.txt | 9 +++++++++ kernel/irq/irqdesc.c | 21 +++++++++++++++++++-- 2 files changed, 28 insertions(+), 2 deletions(-) diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 4609e81dbc3..32d3c0da4c8 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -1189,6 +1189,15 @@ bytes respectively. Such letter suffixes can also be entirely omitted. See comment before ip2_setup() in drivers/char/ip2/ip2base.c. + irqaffinity= [SMP] Set the default irq affinity mask + Format: + ,..., + or + - + (must be a positive range in ascending order) + or a mixture + ,...,- + irqfixup [HW] When an interrupt is not handled search all handlers for it. Intended to get systems with badly broken diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 192a302d6cf..473b2b6eccb 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -23,10 +23,27 @@ static struct lock_class_key irq_desc_lock_class; #if defined(CONFIG_SMP) +static int __init irq_affinity_setup(char *str) +{ + zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); + cpulist_parse(str, irq_default_affinity); + /* + * Set at least the boot cpu. We don't want to end up with + * bugreports caused by random comandline masks + */ + cpumask_set_cpu(smp_processor_id(), irq_default_affinity); + return 1; +} +__setup("irqaffinity=", irq_affinity_setup); + static void __init init_irq_default_affinity(void) { - alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); - cpumask_setall(irq_default_affinity); +#ifdef CONFIG_CPUMASK_OFFSTACK + if (!irq_default_affinity) + zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); +#endif + if (cpumask_empty(irq_default_affinity)) + cpumask_setall(irq_default_affinity); } #else static void __init init_irq_default_affinity(void) -- cgit v1.2.3 From 4a81d7eed7d927fa6e9e336f9dd4ceb6084d338f Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Tue, 10 Jul 2012 14:47:10 +0100 Subject: configs: Add config fragments for big LITTLE MP This patch adds config fragments used to enable most of the features used by big LITTLE MP. Signed-off-by: Viresh Kumar --- linaro/configs/big-LITTLE-MP.conf | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 linaro/configs/big-LITTLE-MP.conf diff --git a/linaro/configs/big-LITTLE-MP.conf b/linaro/configs/big-LITTLE-MP.conf new file mode 100644 index 00000000000..25768457406 --- /dev/null +++ b/linaro/configs/big-LITTLE-MP.conf @@ -0,0 +1,4 @@ +CONFIG_CGROUPS=y +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_NO_HZ=y -- cgit v1.2.3 From a84d1995cee346185355b9d9650dd1fb6d044314 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Wed, 11 Jul 2012 09:55:22 +0100 Subject: linaro/configs: Update big LITTLE MP fragment for task placement work CONFIG_HMP_FAST_CPU_MASK and CONFIG_HMP_SLOW_CPU_MASK must be set correctly by user platform. For now they are marked 0-1 and 2-3. Signed-off-by: Viresh Kumar --- linaro/configs/big-LITTLE-MP.conf | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/linaro/configs/big-LITTLE-MP.conf b/linaro/configs/big-LITTLE-MP.conf index 25768457406..df35474eff1 100644 --- a/linaro/configs/big-LITTLE-MP.conf +++ b/linaro/configs/big-LITTLE-MP.conf @@ -2,3 +2,8 @@ CONFIG_CGROUPS=y CONFIG_CGROUP_SCHED=y CONFIG_FAIR_GROUP_SCHED=y CONFIG_NO_HZ=y +CONFIG_SCHED_MC=y +CONFIG_DISABLE_CPU_SCHED_DOMAIN_BALANCE=y +CONFIG_SCHED_HMP=y +CONFIG_HMP_FAST_CPU_MASK="0-1" +CONFIG_HMP_SLOW_CPU_MASK="2-3" -- cgit v1.2.3 From feff20f896e4808378466710fbf376c25debe1d8 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Wed, 12 Sep 2012 09:04:17 +0530 Subject: config-frag/big-LITTLE: Use device-tree to provide fast/slow CPU list for HMP Currently there are two ways of passing list of fast-slow CPU's to kernel. One via configs and other via DT. Code tries to get them via configs first an then try for DT. To make it configurable via DT by default, make config strings empty. Signed-off-by: Viresh Kumar Reported-by: Sudeep KarkadaNagesha --- linaro/configs/big-LITTLE-MP.conf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/linaro/configs/big-LITTLE-MP.conf b/linaro/configs/big-LITTLE-MP.conf index df35474eff1..df9cfa0554c 100644 --- a/linaro/configs/big-LITTLE-MP.conf +++ b/linaro/configs/big-LITTLE-MP.conf @@ -5,5 +5,5 @@ CONFIG_NO_HZ=y CONFIG_SCHED_MC=y CONFIG_DISABLE_CPU_SCHED_DOMAIN_BALANCE=y CONFIG_SCHED_HMP=y -CONFIG_HMP_FAST_CPU_MASK="0-1" -CONFIG_HMP_SLOW_CPU_MASK="2-3" +CONFIG_HMP_FAST_CPU_MASK="" +CONFIG_HMP_SLOW_CPU_MASK="" -- cgit v1.2.3 From 881c7ad8266a1394516891a5f15b0a1e346f3e2c Mon Sep 17 00:00:00 2001 From: Morten Rasmussen Date: Wed, 10 Oct 2012 14:51:25 +0100 Subject: linaro/configs: Enable HMP priority filter by default This updates linaro config fragments to enable the HMP priority filter by default. Signed-off-by: Morten Rasmussen --- linaro/configs/big-LITTLE-MP.conf | 2 ++ 1 file changed, 2 insertions(+) diff --git a/linaro/configs/big-LITTLE-MP.conf b/linaro/configs/big-LITTLE-MP.conf index df9cfa0554c..d1c9da2354d 100644 --- a/linaro/configs/big-LITTLE-MP.conf +++ b/linaro/configs/big-LITTLE-MP.conf @@ -7,3 +7,5 @@ CONFIG_DISABLE_CPU_SCHED_DOMAIN_BALANCE=y CONFIG_SCHED_HMP=y CONFIG_HMP_FAST_CPU_MASK="" CONFIG_HMP_SLOW_CPU_MASK="" +CONFIG_SCHED_HMP_PRIO_FILTER=y +CONFIG_SCHED_HMP_PRIO_FILTER_VAL=5 -- cgit v1.2.3 From b52a8cfcf718262e408a41ff1e8d07e998f5037a Mon Sep 17 00:00:00 2001 From: Liviu Dudau Date: Fri, 16 Nov 2012 18:32:44 +0000 Subject: linaro/configs: big-LITTLE-MP: Enable the new tunable sysfs interface by default. Enable the new tunable sysfs interface for HMP scaling invariants. Signed-of-by: Liviu Dudau --- linaro/configs/big-LITTLE-MP.conf | 2 ++ 1 file changed, 2 insertions(+) diff --git a/linaro/configs/big-LITTLE-MP.conf b/linaro/configs/big-LITTLE-MP.conf index d1c9da2354d..8cc2be049a4 100644 --- a/linaro/configs/big-LITTLE-MP.conf +++ b/linaro/configs/big-LITTLE-MP.conf @@ -7,5 +7,7 @@ CONFIG_DISABLE_CPU_SCHED_DOMAIN_BALANCE=y CONFIG_SCHED_HMP=y CONFIG_HMP_FAST_CPU_MASK="" CONFIG_HMP_SLOW_CPU_MASK="" +CONFIG_HMP_VARIABLE_SCALE=y +CONFIG_HMP_FREQUENCY_INVARIANT_SCALE=y CONFIG_SCHED_HMP_PRIO_FILTER=y CONFIG_SCHED_HMP_PRIO_FILTER_VAL=5 -- cgit v1.2.3 From 37380d0b7621d487b1d2885ad93227e880074ee5 Mon Sep 17 00:00:00 2001 From: Olivier Cozette Date: Wed, 17 Oct 2012 14:30:30 +0100 Subject: ARM: Change load tracking scale using sysfs These functions allow to change the load average period used in the task load average computation through /sys/kernel/hmp/load_avg_period_ms. This period is the time in ms to go from 0 to 0.5 load average while running or the time from 1 to 0.5 while sleeping. The default one used is 32 and gives the same load_avg_ratio computation than without this patch. These functions also allow to change the up and down threshold of HMP using /sys/kernel/hmp/{up,down}_threshold. Both must be between 0 and 1024. The thresholds are divided by 1024 before being compared to the load_avg_ratio. If /sys/kernel/hmp/load_avg_period_ms is 128 and /sys/kernel/hmp/up_threshold is 512, a task will be migrated to a bigger cluster after running for 128ms. Because after load_avg_period_ms the load average is 0.5 and real up_threshold us 512 / 1024 = 0.5. Signed-off-by: Olivier Cozette Signed-off-by: Chris Redpath --- arch/arm/Kconfig | 23 +++++++ kernel/sched/fair.c | 183 +++++++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 204 insertions(+), 2 deletions(-) diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 8ae87dc3806..42ea637002d 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1629,6 +1629,29 @@ config HMP_SLOW_CPU_MASK Specify the cpuids of the slow CPUs in the system as a list string, e.g. cpuid 0+1 should be specified as 0-1. +config HMP_VARIABLE_SCALE + bool "Allows changing the load tracking scale through sysfs" + depends on SCHED_HMP + help + When turned on, this option exports the thresholds and load average + period value for the load tracking patches through sysfs. + The values can be modified to change the rate of load accumulation + and the thresholds used for HMP migration. + The load_avg_period_ms is the time in ms to reach a load average of + 0.5 for an idle task of 0 load average ratio that start a busy loop. + The up_threshold and down_threshold is the value to go to a faster + CPU or to go back to a slower cpu. + The {up,down}_threshold are devided by 1024 before being compared + to the load average. + For examples, with load_avg_period_ms = 128 and up_threshold = 512, + a running task with a load of 0 will be migrated to a bigger CPU after + 128ms, because after 128ms its load_avg_ratio is 0.5 and the real + up_threshold is 0.5. + This patch has the same behavior as changing the Y of the load + average computation to + (1002/1024)^(LOAD_AVG_PERIOD/load_avg_period_ms) + but it remove intermadiate overflows in computation. + config HAVE_ARM_SCU bool help diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 1c73975585b..3c7102fe3a5 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -31,6 +31,10 @@ #include #include +#ifdef CONFIG_HMP_VARIABLE_SCALE +#include +#include +#endif #include "sched.h" @@ -1200,8 +1204,10 @@ static u32 __compute_runnable_contrib(u64 n) return contrib + runnable_avg_yN_sum[n]; } -/* - * We can represent the historical contribution to runnable average as the +#ifdef CONFIG_HMP_VARIABLE_SCALE +static u64 hmp_variable_scale_convert(u64 delta); +#endif +/* We can represent the historical contribution to runnable average as the * coefficients of a geometric series. To do this we sub-divide our runnable * history into segments of approximately 1ms (1024us); label the segment that * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g. @@ -1238,6 +1244,9 @@ static __always_inline int __update_entity_runnable_avg(u64 now, int delta_w, decayed = 0; delta = now - sa->last_runnable_update; +#ifdef CONFIG_HMP_VARIABLE_SCALE + delta = hmp_variable_scale_convert(delta); +#endif /* * This should only happen when time goes backwards, which it * unfortunately does during sched clock init when we swap over to TSC. @@ -3466,6 +3475,176 @@ static inline void hmp_next_down_delay(struct sched_entity *se, int cpu) se->avg.hmp_last_down_migration = cfs_rq_clock_task(cfs_rq); se->avg.hmp_last_up_migration = 0; } + +#ifdef CONFIG_HMP_VARIABLE_SCALE +/* + * Heterogenous multiprocessor (HMP) optimizations + * + * These functions allow to change the growing speed of the load_avg_ratio + * by default it goes from 0 to 0.5 in LOAD_AVG_PERIOD = 32ms + * This can now be changed with /sys/kernel/hmp/load_avg_period_ms. + * + * These functions also allow to change the up and down threshold of HMP + * using /sys/kernel/hmp/{up,down}_threshold. + * Both must be between 0 and 1023. The threshold that is compared + * to the load_avg_ratio is up_threshold/1024 and down_threshold/1024. + * + * For instance, if load_avg_period = 64 and up_threshold = 512, an idle + * task with a load of 0 will reach the threshold after 64ms of busy loop. + * + * Changing load_avg_periods_ms has the same effect than changing the + * default scaling factor Y=1002/1024 in the load_avg_ratio computation to + * (1002/1024.0)^(LOAD_AVG_PERIOD/load_avg_period_ms), but the last one + * could trigger overflows. + * For instance, with Y = 1023/1024 in __update_task_entity_contrib() + * "contrib = se->avg.runnable_avg_sum * scale_load_down(se->load.weight);" + * could be overflowed for a weight > 2^12 even is the load_avg_contrib + * should still be a 32bits result. This would not happen by multiplicating + * delta time by 1/22 and setting load_avg_period_ms = 706. + */ + +#define HMP_VARIABLE_SCALE_SHIFT 16ULL +struct hmp_global_attr { + struct attribute attr; + ssize_t (*show)(struct kobject *kobj, + struct attribute *attr, char *buf); + ssize_t (*store)(struct kobject *a, struct attribute *b, + const char *c, size_t count); + int *value; + int (*to_sysfs)(int); + int (*from_sysfs)(int); +}; + +#define HMP_DATA_SYSFS_MAX 3 + +struct hmp_data_struct { + int multiplier; /* used to scale the time delta */ + struct attribute_group attr_group; + struct attribute *attributes[HMP_DATA_SYSFS_MAX + 1]; + struct hmp_global_attr attr[HMP_DATA_SYSFS_MAX]; +} hmp_data; + +/* + * By scaling the delta time it end-up increasing or decrease the + * growing speed of the per entity load_avg_ratio + * The scale factor hmp_data.multiplier is a fixed point + * number: (32-HMP_VARIABLE_SCALE_SHIFT).HMP_VARIABLE_SCALE_SHIFT + */ +static u64 hmp_variable_scale_convert(u64 delta) +{ + u64 high = delta >> 32ULL; + u64 low = delta & 0xffffffffULL; + low *= hmp_data.multiplier; + high *= hmp_data.multiplier; + return (low >> HMP_VARIABLE_SCALE_SHIFT) + + (high << (32ULL - HMP_VARIABLE_SCALE_SHIFT)); +} + +static ssize_t hmp_show(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + ssize_t ret = 0; + struct hmp_global_attr *hmp_attr = + container_of(attr, struct hmp_global_attr, attr); + int temp = *(hmp_attr->value); + if (hmp_attr->to_sysfs != NULL) + temp = hmp_attr->to_sysfs(temp); + ret = sprintf(buf, "%d\n", temp); + return ret; +} + +static ssize_t hmp_store(struct kobject *a, struct attribute *attr, + const char *buf, size_t count) +{ + int temp; + ssize_t ret = count; + struct hmp_global_attr *hmp_attr = + container_of(attr, struct hmp_global_attr, attr); + char *str = vmalloc(count + 1); + if (str == NULL) + return -ENOMEM; + memcpy(str, buf, count); + str[count] = 0; + if (sscanf(str, "%d", &temp) < 1) + ret = -EINVAL; + else { + if (hmp_attr->from_sysfs != NULL) + temp = hmp_attr->from_sysfs(temp); + if (temp < 0) + ret = -EINVAL; + else + *(hmp_attr->value) = temp; + } + vfree(str); + return ret; +} + +static int hmp_period_tofrom_sysfs(int value) +{ + return (LOAD_AVG_PERIOD << HMP_VARIABLE_SCALE_SHIFT) / value; +} + +/* max value for threshold is 1024 */ +static int hmp_theshold_from_sysfs(int value) +{ + if (value > 1024) + return -1; + return value; +} + +static void hmp_attr_add( + const char *name, + int *value, + int (*to_sysfs)(int), + int (*from_sysfs)(int)) +{ + int i = 0; + while (hmp_data.attributes[i] != NULL) { + i++; + if (i >= HMP_DATA_SYSFS_MAX) + return; + } + hmp_data.attr[i].attr.mode = 0644; + hmp_data.attr[i].show = hmp_show; + hmp_data.attr[i].store = hmp_store; + hmp_data.attr[i].attr.name = name; + hmp_data.attr[i].value = value; + hmp_data.attr[i].to_sysfs = to_sysfs; + hmp_data.attr[i].from_sysfs = from_sysfs; + hmp_data.attributes[i] = &hmp_data.attr[i].attr; + hmp_data.attributes[i + 1] = NULL; +} + +static int hmp_attr_init(void) +{ + int ret; + memset(&hmp_data, sizeof(hmp_data), 0); + /* by default load_avg_period_ms == LOAD_AVG_PERIOD + * meaning no change + */ + hmp_data.multiplier = hmp_period_tofrom_sysfs(LOAD_AVG_PERIOD); + + hmp_attr_add("load_avg_period_ms", + &hmp_data.multiplier, + hmp_period_tofrom_sysfs, + hmp_period_tofrom_sysfs); + hmp_attr_add("up_threshold", + &hmp_up_threshold, + NULL, + hmp_theshold_from_sysfs); + hmp_attr_add("down_threshold", + &hmp_down_threshold, + NULL, + hmp_theshold_from_sysfs); + + hmp_data.attr_group.name = "hmp"; + hmp_data.attr_group.attrs = hmp_data.attributes; + ret = sysfs_create_group(kernel_kobj, + &hmp_data.attr_group); + return 0; +} +late_initcall(hmp_attr_init); +#endif /* CONFIG_HMP_VARIABLE_SCALE */ #endif /* CONFIG_SCHED_HMP */ /* -- cgit v1.2.3 From ee74021b31619243c350396d721732ca38dbd8a4 Mon Sep 17 00:00:00 2001 From: Chris Redpath Date: Fri, 16 Nov 2012 10:03:00 +0000 Subject: ARM: Experimental Frequency-Invariant Load Scaling Patch Evaluation Patch to investigate using load as a representation of the amount of POTENTIAL cpu compute capacity used rather than a representation of the CURRENT cpu compute capacity. If CPUFreq is enabled, scales load in accordance with frequency. Powersave/performance CPUFreq governors are detected and scaling is disabled while these governors are in use. This is because when a single-frequency governor is in use, potential CPU capacity is static. So long as the governors and CPUFreq subsystem correctly report the frequencies available, the scaling should self tune. Adds an additional file to sysfs to allow this feature to be disabled for experimentation. /sys/kernel/hmp/frequency_invariant_load_scale write 0 to disable, 1 to enable. Signed-off-by: Chris Redpath --- arch/arm/Kconfig | 15 +++ kernel/sched/fair.c | 320 +++++++++++++++++++++++++++++++++++++++++++++++----- 2 files changed, 305 insertions(+), 30 deletions(-) diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 42ea637002d..7b5392d14dc 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1652,6 +1652,21 @@ config HMP_VARIABLE_SCALE (1002/1024)^(LOAD_AVG_PERIOD/load_avg_period_ms) but it remove intermadiate overflows in computation. +config HMP_FREQUENCY_INVARIANT_SCALE + bool "(EXPERIMENTAL) Frequency-Invariant Tracked Load for HMP" + depends on HMP_VARIABLE_SCALE && CPU_FREQ + help + Scales the current load contribution in line with the frequency + of the CPU that the task was executed on. + In this version, we use a simple linear scale derived from the + maximum frequency reported by CPUFreq. + Restricting tracked load to be scaled by the CPU's frequency + represents the consumption of possible compute capacity + (rather than consumption of actual instantaneous capacity as + normal) and allows the HMP migration's simple threshold + migration strategy to interact more predictably with CPUFreq's + asynchronous compute capacity changes. + config HAVE_ARM_SCU bool help diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 3c7102fe3a5..0d6c21b1f4b 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -34,10 +34,17 @@ #ifdef CONFIG_HMP_VARIABLE_SCALE #include #include -#endif +#ifdef CONFIG_HMP_FREQUENCY_INVARIANT_SCALE +/* Include cpufreq header to add a notifier so that cpu frequency + * scaling can track the current CPU frequency + */ +#include +#endif /* CONFIG_HMP_FREQUENCY_INVARIANT_SCALE */ +#endif /* CONFIG_HMP_VARIABLE_SCALE */ #include "sched.h" + /* * Targeted preemption latency for CPU-bound tasks: * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds) @@ -1205,8 +1212,93 @@ static u32 __compute_runnable_contrib(u64 n) } #ifdef CONFIG_HMP_VARIABLE_SCALE -static u64 hmp_variable_scale_convert(u64 delta); + +#define HMP_VARIABLE_SCALE_SHIFT 16ULL +struct hmp_global_attr { + struct attribute attr; + ssize_t (*show)(struct kobject *kobj, + struct attribute *attr, char *buf); + ssize_t (*store)(struct kobject *a, struct attribute *b, + const char *c, size_t count); + int *value; + int (*to_sysfs)(int); + int (*from_sysfs)(int); +}; + +#ifdef CONFIG_HMP_FREQUENCY_INVARIANT_SCALE +#define HMP_DATA_SYSFS_MAX 4 +#else +#define HMP_DATA_SYSFS_MAX 3 +#endif + +struct hmp_data_struct { +#ifdef CONFIG_HMP_FREQUENCY_INVARIANT_SCALE + int freqinvar_load_scale_enabled; #endif + int multiplier; /* used to scale the time delta */ + struct attribute_group attr_group; + struct attribute *attributes[HMP_DATA_SYSFS_MAX + 1]; + struct hmp_global_attr attr[HMP_DATA_SYSFS_MAX]; +} hmp_data; + +static u64 hmp_variable_scale_convert(u64 delta); +#ifdef CONFIG_HMP_FREQUENCY_INVARIANT_SCALE +/* Frequency-Invariant Load Modification: + * Loads are calculated as in PJT's patch however we also scale the current + * contribution in line with the frequency of the CPU that the task was + * executed on. + * In this version, we use a simple linear scale derived from the maximum + * frequency reported by CPUFreq. As an example: + * + * Consider that we ran a task for 100% of the previous interval. + * + * Our CPU was under asynchronous frequency control through one of the + * CPUFreq governors. + * + * The CPUFreq governor reports that it is able to scale the CPU between + * 500MHz and 1GHz. + * + * During the period, the CPU was running at 1GHz. + * + * In this case, our load contribution for that period is calculated as + * 1 * (number_of_active_microseconds) + * + * This results in our task being able to accumulate maximum load as normal. + * + * + * Consider now that our CPU was executing at 500MHz. + * + * We now scale the load contribution such that it is calculated as + * 0.5 * (number_of_active_microseconds) + * + * Our task can only record 50% maximum load during this period. + * + * This represents the task consuming 50% of the CPU's *possible* compute + * capacity. However the task did consume 100% of the CPU's *available* + * compute capacity which is the value seen by the CPUFreq governor and + * user-side CPU Utilization tools. + * + * Restricting tracked load to be scaled by the CPU's frequency accurately + * represents the consumption of possible compute capacity and allows the + * HMP migration's simple threshold migration strategy to interact more + * predictably with CPUFreq's asynchronous compute capacity changes. + */ +#define SCHED_FREQSCALE_SHIFT 10 +struct cpufreq_extents { + u32 curr_scale; + u32 min; + u32 max; + u32 flags; +}; +/* Flag set when the governor in use only allows one frequency. + * Disables scaling. + */ +#define SCHED_LOAD_FREQINVAR_SINGLEFREQ 0x01 + +static struct cpufreq_extents freq_scale[CONFIG_NR_CPUS]; +#endif /* CONFIG_HMP_FREQUENCY_INVARIANT_SCALE */ +#endif /* CONFIG_HMP_VARIABLE_SCALE */ + /* We can represent the historical contribution to runnable average as the * coefficients of a geometric series. To do this we sub-divide our runnable * history into segments of approximately 1ms (1024us); label the segment that @@ -1237,11 +1329,18 @@ static u64 hmp_variable_scale_convert(u64 delta); static __always_inline int __update_entity_runnable_avg(u64 now, struct sched_avg *sa, int runnable, - int running) + int running, + int cpu) { u64 delta, periods; u32 runnable_contrib; int delta_w, decayed = 0; +#ifdef CONFIG_HMP_FREQUENCY_INVARIANT_SCALE + u64 scaled_delta; + u32 scaled_runnable_contrib; + int scaled_delta_w; + u32 curr_scale = 1024; +#endif /* CONFIG_HMP_FREQUENCY_INVARIANT_SCALE */ delta = now - sa->last_runnable_update; #ifdef CONFIG_HMP_VARIABLE_SCALE @@ -1265,6 +1364,12 @@ static __always_inline int __update_entity_runnable_avg(u64 now, return 0; sa->last_runnable_update = now; +#ifdef CONFIG_HMP_FREQUENCY_INVARIANT_SCALE + /* retrieve scale factor for load */ + if (hmp_data.freqinvar_load_scale_enabled) + curr_scale = freq_scale[cpu].curr_scale; +#endif /* CONFIG_HMP_FREQUENCY_INVARIANT_SCALE */ + /* delta_w is the amount already accumulated against our next period */ delta_w = sa->runnable_avg_period % 1024; if (delta + delta_w >= 1024) { @@ -1277,10 +1382,20 @@ static __always_inline int __update_entity_runnable_avg(u64 now, * period and accrue it. */ delta_w = 1024 - delta_w; + /* scale runnable time if necessary */ +#ifdef CONFIG_HMP_FREQUENCY_INVARIANT_SCALE + scaled_delta_w = (delta_w * curr_scale) + >> SCHED_FREQSCALE_SHIFT; + if (runnable) + sa->runnable_avg_sum += scaled_delta_w; + if (running) + sa->usage_avg_sum += scaled_delta_w; +#else if (runnable) sa->runnable_avg_sum += delta_w; if (running) sa->usage_avg_sum += delta_w; +#endif /* #ifdef CONFIG_HMP_FREQUENCY_INVARIANT_SCALE */ sa->runnable_avg_period += delta_w; delta -= delta_w; @@ -1288,27 +1403,49 @@ static __always_inline int __update_entity_runnable_avg(u64 now, /* Figure out how many additional periods this update spans */ periods = delta / 1024; delta %= 1024; - + /* decay the load we have accumulated so far */ sa->runnable_avg_sum = decay_load(sa->runnable_avg_sum, periods + 1); sa->runnable_avg_period = decay_load(sa->runnable_avg_period, periods + 1); sa->usage_avg_sum = decay_load(sa->usage_avg_sum, periods + 1); - + /* add the contribution from this period */ /* Efficiently calculate \sum (1..n_period) 1024*y^i */ runnable_contrib = __compute_runnable_contrib(periods); + /* Apply load scaling if necessary. + * Note that multiplying the whole series is same as + * multiplying all terms + */ +#ifdef CONFIG_HMP_FREQUENCY_INVARIANT_SCALE + scaled_runnable_contrib = (runnable_contrib * curr_scale) + >> SCHED_FREQSCALE_SHIFT; + if (runnable) + sa->runnable_avg_sum += scaled_runnable_contrib; + if (running) + sa->usage_avg_sum += scaled_runnable_contrib; +#else if (runnable) sa->runnable_avg_sum += runnable_contrib; if (running) sa->usage_avg_sum += runnable_contrib; +#endif /* CONFIG_HMP_FREQUENCY_INVARIANT_SCALE */ sa->runnable_avg_period += runnable_contrib; } /* Remainder of delta accrued against u_0` */ + /* scale if necessary */ +#ifdef CONFIG_HMP_FREQUENCY_INVARIANT_SCALE + scaled_delta = ((delta * curr_scale) >> SCHED_FREQSCALE_SHIFT); + if (runnable) + sa->runnable_avg_sum += scaled_delta; + if (running) + sa->usage_avg_sum += scaled_delta; +#else if (runnable) sa->runnable_avg_sum += delta; if (running) sa->usage_avg_sum += delta; +#endif /* CONFIG_HMP_FREQUENCY_INVARIANT_SCALE */ sa->runnable_avg_period += delta; return decayed; @@ -1487,7 +1624,7 @@ static inline void update_entity_load_avg(struct sched_entity *se, now = cfs_rq_clock_task(group_cfs_rq(se)); if (!__update_entity_runnable_avg(now, &se->avg, se->on_rq, - cfs_rq->curr == se)) + cfs_rq->curr == se, se->cfs_rq->rq->cpu)) return; contrib_delta = __update_entity_load_avg_contrib(se); @@ -1533,7 +1670,7 @@ static inline void update_rq_runnable_avg(struct rq *rq, int runnable) { u32 contrib; __update_entity_runnable_avg(rq->clock_task, &rq->avg, runnable, - runnable); + runnable, rq->cpu); __update_tg_runnable_avg(&rq->avg, &rq->cfs); contrib = rq->avg.runnable_avg_sum * scale_load_down(1024); contrib /= (rq->avg.runnable_avg_period + 1); @@ -3503,27 +3640,6 @@ static inline void hmp_next_down_delay(struct sched_entity *se, int cpu) * delta time by 1/22 and setting load_avg_period_ms = 706. */ -#define HMP_VARIABLE_SCALE_SHIFT 16ULL -struct hmp_global_attr { - struct attribute attr; - ssize_t (*show)(struct kobject *kobj, - struct attribute *attr, char *buf); - ssize_t (*store)(struct kobject *a, struct attribute *b, - const char *c, size_t count); - int *value; - int (*to_sysfs)(int); - int (*from_sysfs)(int); -}; - -#define HMP_DATA_SYSFS_MAX 3 - -struct hmp_data_struct { - int multiplier; /* used to scale the time delta */ - struct attribute_group attr_group; - struct attribute *attributes[HMP_DATA_SYSFS_MAX + 1]; - struct hmp_global_attr attr[HMP_DATA_SYSFS_MAX]; -} hmp_data; - /* * By scaling the delta time it end-up increasing or decrease the * growing speed of the per entity load_avg_ratio @@ -3591,7 +3707,15 @@ static int hmp_theshold_from_sysfs(int value) return -1; return value; } - +#ifdef CONFIG_HMP_FREQUENCY_INVARIANT_SCALE +/* freqinvar control is only 0,1 off/on */ +static int hmp_freqinvar_from_sysfs(int value) +{ + if (value < 0 || value > 1) + return -1; + return value; +} +#endif static void hmp_attr_add( const char *name, int *value, @@ -3636,7 +3760,14 @@ static int hmp_attr_init(void) &hmp_down_threshold, NULL, hmp_theshold_from_sysfs); - +#ifdef CONFIG_HMP_FREQUENCY_INVARIANT_SCALE + /* default frequency-invariant scaling ON */ + hmp_data.freqinvar_load_scale_enabled = 1; + hmp_attr_add("frequency_invariant_load_scale", + &hmp_data.freqinvar_load_scale_enabled, + NULL, + hmp_freqinvar_from_sysfs); +#endif hmp_data.attr_group.name = "hmp"; hmp_data.attr_group.attrs = hmp_data.attributes; ret = sysfs_create_group(kernel_kobj, @@ -6802,3 +6933,132 @@ __init void init_sched_fair_class(void) #endif /* SMP */ } + +#ifdef CONFIG_HMP_FREQUENCY_INVARIANT_SCALE +static u32 cpufreq_calc_scale(u32 min, u32 max, u32 curr) +{ + u32 result = curr / max; + return result; +} + +/* Called when the CPU Frequency is changed. + * Once for each CPU. + */ +static int cpufreq_callback(struct notifier_block *nb, + unsigned long val, void *data) +{ + struct cpufreq_freqs *freq = data; + int cpu = freq->cpu; + struct cpufreq_extents *extents; + + if (freq->flags & CPUFREQ_CONST_LOOPS) + return NOTIFY_OK; + + if (val != CPUFREQ_POSTCHANGE) + return NOTIFY_OK; + + /* if dynamic load scale is disabled, set the load scale to 1.0 */ + if (!hmp_data.freqinvar_load_scale_enabled) { + freq_scale[cpu].curr_scale = 1024; + return NOTIFY_OK; + } + + extents = &freq_scale[cpu]; + if (extents->flags & SCHED_LOAD_FREQINVAR_SINGLEFREQ) { + /* If our governor was recognised as a single-freq governor, + * use 1.0 + */ + extents->curr_scale = 1024; + } else { + extents->curr_scale = cpufreq_calc_scale(extents->min, + extents->max, freq->new); + } + + return NOTIFY_OK; +} + +/* Called when the CPUFreq governor is changed. + * Only called for the CPUs which are actually changed by the + * userspace. + */ +static int cpufreq_policy_callback(struct notifier_block *nb, + unsigned long event, void *data) +{ + struct cpufreq_policy *policy = data; + struct cpufreq_extents *extents; + int cpu, singleFreq = 0; + static const char performance_governor[] = "performance"; + static const char powersave_governor[] = "powersave"; + + if (event == CPUFREQ_START) + return 0; + + if (event != CPUFREQ_INCOMPATIBLE) + return 0; + + /* CPUFreq governors do not accurately report the range of + * CPU Frequencies they will choose from. + * We recognise performance and powersave governors as + * single-frequency only. + */ + if (!strncmp(policy->governor->name, performance_governor, + strlen(performance_governor)) || + !strncmp(policy->governor->name, powersave_governor, + strlen(powersave_governor))) + singleFreq = 1; + + /* Make sure that all CPUs impacted by this policy are + * updated since we will only get a notification when the + * user explicitly changes the policy on a CPU. + */ + for_each_cpu(cpu, policy->cpus) { + extents = &freq_scale[cpu]; + extents->max = policy->max >> SCHED_FREQSCALE_SHIFT; + extents->min = policy->min >> SCHED_FREQSCALE_SHIFT; + if (!hmp_data.freqinvar_load_scale_enabled) { + extents->curr_scale = 1024; + } else if (singleFreq) { + extents->flags |= SCHED_LOAD_FREQINVAR_SINGLEFREQ; + extents->curr_scale = 1024; + } else { + extents->flags &= ~SCHED_LOAD_FREQINVAR_SINGLEFREQ; + extents->curr_scale = cpufreq_calc_scale(extents->min, + extents->max, policy->cur); + } + } + + return 0; +} + +static struct notifier_block cpufreq_notifier = { + .notifier_call = cpufreq_callback, +}; +static struct notifier_block cpufreq_policy_notifier = { + .notifier_call = cpufreq_policy_callback, +}; + +static int __init register_sched_cpufreq_notifier(void) +{ + int ret = 0; + + /* init safe defaults since there are no policies at registration */ + for (ret = 0; ret < CONFIG_NR_CPUS; ret++) { + /* safe defaults */ + freq_scale[ret].max = 1024; + freq_scale[ret].min = 1024; + freq_scale[ret].curr_scale = 1024; + } + + pr_info("sched: registering cpufreq notifiers for scale-invariant loads\n"); + ret = cpufreq_register_notifier(&cpufreq_policy_notifier, + CPUFREQ_POLICY_NOTIFIER); + + if (ret != -EINVAL) + ret = cpufreq_register_notifier(&cpufreq_notifier, + CPUFREQ_TRANSITION_NOTIFIER); + + return ret; +} + +core_initcall(register_sched_cpufreq_notifier); +#endif /* CONFIG_HMP_FREQUENCY_INVARIANT_SCALE */ -- cgit v1.2.3 From b43993f78abe5915674b1309407ea6ff7720654f Mon Sep 17 00:00:00 2001 From: Chris Redpath Date: Tue, 20 Nov 2012 11:04:49 +0530 Subject: ARM: Fix build breakage when big.LITTLE.conf is not used. Change-Id: I8641f5e930c65b5672130bd4a18d9868bb3ca594 Signed-off-by: Chris Redpath Signed-off-by: Liviu Dudau --- kernel/sched/fair.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 0d6c21b1f4b..a35b1a53e80 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1613,7 +1613,11 @@ static inline void update_entity_load_avg(struct sched_entity *se, struct cfs_rq *cfs_rq = cfs_rq_of(se); long contrib_delta; u64 now; + int cpu = -1; /* not used in normal case */ +#ifdef CONFIG_HMP_FREQUENCY_INVARIANT_SCALE + cpu = cfs_rq->rq->cpu; +#endif /* * For a group entity we need to use their owned cfs_rq_clock_task() in * case they are the parent of a throttled hierarchy. @@ -1624,7 +1628,7 @@ static inline void update_entity_load_avg(struct sched_entity *se, now = cfs_rq_clock_task(group_cfs_rq(se)); if (!__update_entity_runnable_avg(now, &se->avg, se->on_rq, - cfs_rq->curr == se, se->cfs_rq->rq->cpu)) + cfs_rq->curr == se, cpu)) return; contrib_delta = __update_entity_load_avg_contrib(se); @@ -1669,8 +1673,13 @@ static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update) static inline void update_rq_runnable_avg(struct rq *rq, int runnable) { u32 contrib; + int cpu = -1; /* not used in normal case */ + +#ifdef CONFIG_HMP_FREQUENCY_INVARIANT_SCALE + cpu = rq->cpu; +#endif __update_entity_runnable_avg(rq->clock_task, &rq->avg, runnable, - runnable, rq->cpu); + runnable, cpu); __update_tg_runnable_avg(&rq->avg, &rq->cfs); contrib = rq->avg.runnable_avg_sum * scale_load_down(1024); contrib /= (rq->avg.runnable_avg_period + 1); -- cgit v1.2.3 From 3a2e9fb39687153e86ebd022446c5b9b983265b3 Mon Sep 17 00:00:00 2001 From: Morten Rasmussen Date: Thu, 29 Nov 2012 15:41:50 +0000 Subject: sched: Basic global balancing support for HMP This patch introduces an extra-check at task up-migration to prevent overloading the cpus in the faster hmp_domain while the slower hmp_domain is not fully utilized. The patch also introduces a periodic balance check that can down-migrate tasks if the faster domain is oversubscribed and the slower is under-utilized. Signed-off-by: Morten Rasmussen --- kernel/sched/fair.c | 101 +++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 97 insertions(+), 4 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index a35b1a53e80..5ecfb1be889 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3785,6 +3785,80 @@ static int hmp_attr_init(void) } late_initcall(hmp_attr_init); #endif /* CONFIG_HMP_VARIABLE_SCALE */ + +static inline unsigned int hmp_domain_min_load(struct hmp_domain *hmpd, + int *min_cpu) +{ + int cpu; + int min_load = INT_MAX; + int min_cpu_temp = NR_CPUS; + + for_each_cpu_mask(cpu, hmpd->cpus) { + if (cpu_rq(cpu)->cfs.tg_load_contrib < min_load) { + min_load = cpu_rq(cpu)->cfs.tg_load_contrib; + min_cpu_temp = cpu; + } + } + + if (min_cpu) + *min_cpu = min_cpu_temp; + + return min_load; +} + +/* + * Calculate the task starvation + * This is the ratio of actually running time vs. runnable time. + * If the two are equal the task is getting the cpu time it needs or + * it is alone on the cpu and the cpu is fully utilized. + */ +static inline unsigned int hmp_task_starvation(struct sched_entity *se) +{ + u32 starvation; + + starvation = se->avg.usage_avg_sum * scale_load_down(NICE_0_LOAD); + starvation /= (se->avg.runnable_avg_sum + 1); + + return scale_load(starvation); +} + +static inline unsigned int hmp_offload_down(int cpu, struct sched_entity *se) +{ + int min_usage; + int dest_cpu = NR_CPUS; + + if (hmp_cpu_is_slowest(cpu)) + return NR_CPUS; + + /* Is the current domain fully loaded? */ + /* load < ~94% */ + min_usage = hmp_domain_min_load(hmp_cpu_domain(cpu), NULL); + if (min_usage < NICE_0_LOAD-64) + return NR_CPUS; + + /* Is the cpu oversubscribed? */ + /* load < ~194% */ + if (cpu_rq(cpu)->cfs.tg_load_contrib < 2*NICE_0_LOAD-64) + return NR_CPUS; + + /* Is the task alone on the cpu? */ + if (cpu_rq(cpu)->cfs.nr_running < 2) + return NR_CPUS; + + /* Is the task actually starving? */ + if (hmp_task_starvation(se) > 768) /* <25% waiting */ + return NR_CPUS; + + /* Does the slower domain have spare cycles? */ + min_usage = hmp_domain_min_load(hmp_slower_domain(cpu), &dest_cpu); + /* load > 50% */ + if (min_usage > NICE_0_LOAD/2) + return NR_CPUS; + + if (cpumask_test_cpu(dest_cpu, &hmp_slower_domain(cpu)->cpus)) + return dest_cpu; + return NR_CPUS; +} #endif /* CONFIG_SCHED_HMP */ /* @@ -6185,10 +6259,14 @@ static unsigned int hmp_up_migration(int cpu, struct sched_entity *se) < hmp_next_up_threshold) return 0; - if (cpumask_intersects(&hmp_faster_domain(cpu)->cpus, - tsk_cpus_allowed(p)) - && se->avg.load_avg_ratio > hmp_up_threshold) { - return 1; + if (se->avg.load_avg_ratio > hmp_up_threshold) { + /* Target domain load < ~94% */ + if (hmp_domain_min_load(hmp_faster_domain(cpu), NULL) + > NICE_0_LOAD-64) + return 0; + if (cpumask_intersects(&hmp_faster_domain(cpu)->cpus, + tsk_cpus_allowed(p))) + return 1; } return 0; } @@ -6411,6 +6489,21 @@ static void hmp_force_up_migration(int this_cpu) hmp_next_up_delay(&p->se, target->push_cpu); } } + if (!force && !target->active_balance) { + /* + * For now we just check the currently running task. + * Selecting the lightest task for offloading will + * require extensive book keeping. + */ + target->push_cpu = hmp_offload_down(cpu, curr); + if (target->push_cpu < NR_CPUS) { + target->active_balance = 1; + target->migrate_task = p; + force = 1; + trace_sched_hmp_migrate(p, target->push_cpu, 2); + hmp_next_down_delay(&p->se, target->push_cpu); + } + } raw_spin_unlock_irqrestore(&target->lock, flags); if (force) stop_one_cpu_nowait(cpu_of(target), -- cgit v1.2.3 From ab4d6dc62b986a9c113a29e931ef165f66c72752 Mon Sep 17 00:00:00 2001 From: Lokesh Vutla Date: Wed, 13 Mar 2013 06:52:33 +0000 Subject: ARM: hw_breakpoint: Enable debug powerdown only if system supports 'has_ossr' Commit {9a6eb31 ARM: hw_breakpoint: Debug powerdown support for self-hosted debug} introduces debug powerdown support for self-hosted debug. While merging the patch 'has_ossr' check was removed which was needed for hardwares which doesn't support self-hosted debug. Pandaboard (A9) is one such hardware and Dietmar's orginial patch did mention this issue. Without that check on Panda with CPUIDLE enabled, a flood of below messages thrown. [ 3.597930] hw-breakpoint: CPU 0 failed to disable vector catch [ 3.597991] hw-breakpoint: CPU 1 failed to disable vector catch So restore that check back to avoid the mentioned issue. Cc: Dietmar Eggemann Cc: Will Deacon Reported-by: Santosh Shilimkar Acked-by: Santosh Shilimkar Signed-off-by: Lokesh Vutla --- arch/arm/kernel/hw_breakpoint.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 96093b75ab9..0ba062d7484 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -1049,7 +1049,8 @@ static struct notifier_block __cpuinitdata dbg_cpu_pm_nb = { static void __init pm_init(void) { - cpu_pm_register_notifier(&dbg_cpu_pm_nb); + if (has_ossr) + cpu_pm_register_notifier(&dbg_cpu_pm_nb); } #else static inline void pm_init(void) -- cgit v1.2.3