aboutsummaryrefslogtreecommitdiff
path: root/drivers/cpufreq
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r--drivers/cpufreq/Kconfig2
-rw-r--r--drivers/cpufreq/Kconfig.arm10
-rw-r--r--drivers/cpufreq/Makefile1
-rw-r--r--drivers/cpufreq/arm_big_little.c419
-rw-r--r--drivers/cpufreq/arm_big_little.h17
-rw-r--r--drivers/cpufreq/cpufreq.c23
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c20
-rw-r--r--drivers/cpufreq/cpufreq_governor.c11
-rw-r--r--drivers/cpufreq/cpufreq_governor.h4
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c20
-rw-r--r--drivers/cpufreq/cpufreq_stats.c63
-rw-r--r--drivers/cpufreq/highbank-cpufreq.c3
-rw-r--r--drivers/cpufreq/intel_pstate.c27
-rw-r--r--drivers/cpufreq/loongson2_cpufreq.c11
-rw-r--r--drivers/cpufreq/vexpress_big_little.c86
15 files changed, 611 insertions, 106 deletions
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 534fcb82515..a9c1324843e 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -201,7 +201,7 @@ source "drivers/cpufreq/Kconfig.x86"
endmenu
menu "ARM CPU frequency scaling drivers"
-depends on ARM
+depends on ARM || ARM64
source "drivers/cpufreq/Kconfig.arm"
endmenu
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 6e57543fe0b..8327444b76c 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -4,7 +4,7 @@
config ARM_BIG_LITTLE_CPUFREQ
tristate "Generic ARM big LITTLE CPUfreq driver"
- depends on ARM_CPU_TOPOLOGY && PM_OPP && HAVE_CLK
+ depends on ARM_CPU_TOPOLOGY && PM_OPP && HAVE_CLK && BIG_LITTLE
help
This enables the Generic CPUfreq driver for ARM big.LITTLE platforms.
@@ -15,6 +15,14 @@ config ARM_DT_BL_CPUFREQ
This enables probing via DT for Generic CPUfreq driver for ARM
big.LITTLE platform. This gets frequency tables from DT.
+config ARM_VEXPRESS_BL_CPUFREQ
+ tristate "ARM Vexpress big LITTLE CPUfreq driver"
+ select ARM_BIG_LITTLE_CPUFREQ
+ depends on VEXPRESS_SPC
+ help
+ This enables the CPUfreq driver for ARM Vexpress big.LITTLE platform.
+ If in doubt, say N.
+
config ARM_EXYNOS_CPUFREQ
bool "SAMSUNG EXYNOS SoCs"
depends on ARCH_EXYNOS
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 315b9231feb..1db9b4929cf 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -48,6 +48,7 @@ obj-$(CONFIG_X86_AMD_FREQ_SENSITIVITY) += amd_freq_sensitivity.o
obj-$(CONFIG_ARM_BIG_LITTLE_CPUFREQ) += arm_big_little.o
# big LITTLE per platform glues. Keep DT_BL_CPUFREQ as the last entry in all big
# LITTLE drivers, so that it is probed last.
+obj-$(CONFIG_ARM_VEXPRESS_BL_CPUFREQ) += vexpress_big_little.o
obj-$(CONFIG_ARM_DT_BL_CPUFREQ) += arm_big_little_dt.o
obj-$(CONFIG_ARCH_DAVINCI_DA850) += davinci-cpufreq.o
diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c
index 5d7f53fcd6f..076f25a59e0 100644
--- a/drivers/cpufreq/arm_big_little.c
+++ b/drivers/cpufreq/arm_big_little.c
@@ -24,27 +24,148 @@
#include <linux/cpufreq.h>
#include <linux/cpumask.h>
#include <linux/export.h>
+#include <linux/mutex.h>
#include <linux/of_platform.h>
#include <linux/opp.h>
#include <linux/slab.h>
#include <linux/topology.h>
#include <linux/types.h>
+#include <asm/bL_switcher.h>
#include "arm_big_little.h"
-/* Currently we support only two clusters */
-#define MAX_CLUSTERS 2
+#ifdef CONFIG_BL_SWITCHER
+bool bL_switching_enabled;
+#endif
+
+#define ACTUAL_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq << 1 : freq)
+#define VIRT_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq >> 1 : freq)
static struct cpufreq_arm_bL_ops *arm_bL_ops;
static struct clk *clk[MAX_CLUSTERS];
-static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS];
-static atomic_t cluster_usage[MAX_CLUSTERS] = {ATOMIC_INIT(0), ATOMIC_INIT(0)};
+static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS + 1];
+static atomic_t cluster_usage[MAX_CLUSTERS + 1] = {ATOMIC_INIT(0),
+ ATOMIC_INIT(0)};
+
+static unsigned int clk_big_min; /* (Big) clock frequencies */
+static unsigned int clk_little_max; /* Maximum clock frequency (Little) */
+
+static DEFINE_PER_CPU(unsigned int, physical_cluster);
+static DEFINE_PER_CPU(unsigned int, cpu_last_req_freq);
+
+static struct mutex cluster_lock[MAX_CLUSTERS];
+
+static unsigned int find_cluster_maxfreq(int cluster)
+{
+ int j;
+ u32 max_freq = 0, cpu_freq;
+
+ for_each_online_cpu(j) {
+ cpu_freq = per_cpu(cpu_last_req_freq, j);
+
+ if ((cluster == per_cpu(physical_cluster, j)) &&
+ (max_freq < cpu_freq))
+ max_freq = cpu_freq;
+ }
-static unsigned int bL_cpufreq_get(unsigned int cpu)
+ pr_debug("%s: cluster: %d, max freq: %d\n", __func__, cluster,
+ max_freq);
+
+ return max_freq;
+}
+
+static unsigned int clk_get_cpu_rate(unsigned int cpu)
{
- u32 cur_cluster = cpu_to_cluster(cpu);
+ u32 cur_cluster = per_cpu(physical_cluster, cpu);
+ u32 rate = clk_get_rate(clk[cur_cluster]) / 1000;
+
+ /* For switcher we use virtual A15 clock rates */
+ if (is_bL_switching_enabled())
+ rate = VIRT_FREQ(cur_cluster, rate);
+
+ pr_debug("%s: cpu: %d, cluster: %d, freq: %u\n", __func__, cpu,
+ cur_cluster, rate);
- return clk_get_rate(clk[cur_cluster]) / 1000;
+ return rate;
+}
+
+static unsigned int bL_cpufreq_get_rate(unsigned int cpu)
+{
+ if (is_bL_switching_enabled()) {
+ pr_debug("%s: freq: %d\n", __func__, per_cpu(cpu_last_req_freq,
+ cpu));
+
+ return per_cpu(cpu_last_req_freq, cpu);
+ } else {
+ return clk_get_cpu_rate(cpu);
+ }
+}
+
+static unsigned int
+bL_cpufreq_set_rate(u32 cpu, u32 old_cluster, u32 new_cluster, u32 rate)
+{
+ u32 new_rate, prev_rate;
+ int ret;
+ bool bLs = is_bL_switching_enabled();
+
+ mutex_lock(&cluster_lock[new_cluster]);
+
+ if (bLs) {
+ prev_rate = per_cpu(cpu_last_req_freq, cpu);
+ per_cpu(cpu_last_req_freq, cpu) = rate;
+ per_cpu(physical_cluster, cpu) = new_cluster;
+
+ new_rate = find_cluster_maxfreq(new_cluster);
+ new_rate = ACTUAL_FREQ(new_cluster, new_rate);
+ } else {
+ new_rate = rate;
+ }
+
+ pr_debug("%s: cpu: %d, old cluster: %d, new cluster: %d, freq: %d\n",
+ __func__, cpu, old_cluster, new_cluster, new_rate);
+
+ ret = clk_set_rate(clk[new_cluster], new_rate * 1000);
+ if (WARN_ON(ret)) {
+ pr_err("clk_set_rate failed: %d, new cluster: %d\n", ret,
+ new_cluster);
+ if (bLs) {
+ per_cpu(cpu_last_req_freq, cpu) = prev_rate;
+ per_cpu(physical_cluster, cpu) = old_cluster;
+ }
+
+ mutex_unlock(&cluster_lock[new_cluster]);
+
+ return ret;
+ }
+
+ mutex_unlock(&cluster_lock[new_cluster]);
+
+ /* Recalc freq for old cluster when switching clusters */
+ if (old_cluster != new_cluster) {
+ pr_debug("%s: cpu: %d, old cluster: %d, new cluster: %d\n",
+ __func__, cpu, old_cluster, new_cluster);
+
+ /* Switch cluster */
+ bL_switch_request(cpu, new_cluster);
+
+ mutex_lock(&cluster_lock[old_cluster]);
+
+ /* Set freq of old cluster if there are cpus left on it */
+ new_rate = find_cluster_maxfreq(old_cluster);
+ new_rate = ACTUAL_FREQ(old_cluster, new_rate);
+
+ if (new_rate) {
+ pr_debug("%s: Updating rate of old cluster: %d, to freq: %d\n",
+ __func__, old_cluster, new_rate);
+
+ if (clk_set_rate(clk[old_cluster], new_rate * 1000))
+ pr_err("%s: clk_set_rate failed: %d, old cluster: %d\n",
+ __func__, ret, old_cluster);
+ }
+ mutex_unlock(&cluster_lock[old_cluster]);
+ }
+
+ return 0;
}
/* Validate policy frequency range */
@@ -60,12 +181,14 @@ static int bL_cpufreq_set_target(struct cpufreq_policy *policy,
unsigned int target_freq, unsigned int relation)
{
struct cpufreq_freqs freqs;
- u32 cpu = policy->cpu, freq_tab_idx, cur_cluster;
+ u32 cpu = policy->cpu, freq_tab_idx, cur_cluster, new_cluster,
+ actual_cluster;
int ret = 0;
- cur_cluster = cpu_to_cluster(policy->cpu);
+ cur_cluster = cpu_to_cluster(cpu);
+ new_cluster = actual_cluster = per_cpu(physical_cluster, cpu);
- freqs.old = bL_cpufreq_get(policy->cpu);
+ freqs.old = bL_cpufreq_get_rate(cpu);
/* Determine valid target frequency using freq_table */
cpufreq_frequency_table_target(policy, freq_table[cur_cluster],
@@ -79,13 +202,21 @@ static int bL_cpufreq_set_target(struct cpufreq_policy *policy,
if (freqs.old == freqs.new)
return 0;
+ if (is_bL_switching_enabled()) {
+ if ((actual_cluster == A15_CLUSTER) &&
+ (freqs.new < clk_big_min)) {
+ new_cluster = A7_CLUSTER;
+ } else if ((actual_cluster == A7_CLUSTER) &&
+ (freqs.new > clk_little_max)) {
+ new_cluster = A15_CLUSTER;
+ }
+ }
+
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
- ret = clk_set_rate(clk[cur_cluster], freqs.new * 1000);
- if (ret) {
- pr_err("clk_set_rate failed: %d\n", ret);
+ ret = bL_cpufreq_set_rate(cpu, actual_cluster, new_cluster, freqs.new);
+ if (ret)
return ret;
- }
policy->cur = freqs.new;
@@ -94,7 +225,73 @@ static int bL_cpufreq_set_target(struct cpufreq_policy *policy,
return ret;
}
-static void put_cluster_clk_and_freq_table(struct device *cpu_dev)
+static inline u32 get_table_count(struct cpufreq_frequency_table *table)
+{
+ int count;
+
+ for (count = 0; table[count].frequency != CPUFREQ_TABLE_END; count++)
+ ;
+
+ return count;
+}
+
+/* get the minimum frequency in the cpufreq_frequency_table */
+static inline u32 get_table_min(struct cpufreq_frequency_table *table)
+{
+ int i;
+ uint32_t min_freq = ~0;
+ for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++)
+ if (table[i].frequency < min_freq)
+ min_freq = table[i].frequency;
+ return min_freq;
+}
+
+/* get the maximum frequency in the cpufreq_frequency_table */
+static inline u32 get_table_max(struct cpufreq_frequency_table *table)
+{
+ int i;
+ uint32_t max_freq = 0;
+ for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++)
+ if (table[i].frequency > max_freq)
+ max_freq = table[i].frequency;
+ return max_freq;
+}
+
+static int merge_cluster_tables(void)
+{
+ int i, j, k = 0, count = 1;
+ struct cpufreq_frequency_table *table;
+
+ for (i = 0; i < MAX_CLUSTERS; i++)
+ count += get_table_count(freq_table[i]);
+
+ table = kzalloc(sizeof(*table) * count, GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ freq_table[MAX_CLUSTERS] = table;
+
+ /* Add in reverse order to get freqs in increasing order */
+ for (i = MAX_CLUSTERS - 1; i >= 0; i--) {
+ for (j = 0; freq_table[i][j].frequency != CPUFREQ_TABLE_END;
+ j++) {
+ table[k].frequency = VIRT_FREQ(i,
+ freq_table[i][j].frequency);
+ pr_debug("%s: index: %d, freq: %d\n", __func__, k,
+ table[k].frequency);
+ k++;
+ }
+ }
+
+ table[k].index = k;
+ table[k].frequency = CPUFREQ_TABLE_END;
+
+ pr_debug("%s: End, table: %p, count: %d\n", __func__, table, k);
+
+ return 0;
+}
+
+static void _put_cluster_clk_and_freq_table(struct device *cpu_dev)
{
u32 cluster = cpu_to_cluster(cpu_dev->id);
@@ -105,10 +302,35 @@ static void put_cluster_clk_and_freq_table(struct device *cpu_dev)
}
}
-static int get_cluster_clk_and_freq_table(struct device *cpu_dev)
+static void put_cluster_clk_and_freq_table(struct device *cpu_dev)
{
u32 cluster = cpu_to_cluster(cpu_dev->id);
- char name[14] = "cpu-cluster.";
+ int i;
+
+ if (cluster < MAX_CLUSTERS)
+ return _put_cluster_clk_and_freq_table(cpu_dev);
+
+ if (atomic_dec_return(&cluster_usage[MAX_CLUSTERS]))
+ return;
+
+ for (i = 0; i < MAX_CLUSTERS; i++) {
+ struct device *cdev = get_cpu_device(i);
+ if (!cdev) {
+ pr_err("%s: failed to get cpu%d device\n", __func__, i);
+ return;
+ }
+
+ _put_cluster_clk_and_freq_table(cdev);
+ }
+
+ /* free virtual table */
+ kfree(freq_table[MAX_CLUSTERS]);
+}
+
+static int _get_cluster_clk_and_freq_table(struct device *cpu_dev)
+{
+ u32 cluster = cpu_to_cluster(cpu_dev->id);
+ char name[14] = "cpu-cluster.X";
int ret;
if (atomic_inc_return(&cluster_usage[cluster]) != 1)
@@ -149,6 +371,62 @@ atomic_dec:
return ret;
}
+static int get_cluster_clk_and_freq_table(struct device *cpu_dev)
+{
+ u32 cluster = cpu_to_cluster(cpu_dev->id);
+ int i, ret;
+
+ if (cluster < MAX_CLUSTERS)
+ return _get_cluster_clk_and_freq_table(cpu_dev);
+
+ if (atomic_inc_return(&cluster_usage[MAX_CLUSTERS]) != 1)
+ return 0;
+
+ /*
+ * Get data for all clusters and fill virtual cluster with a merge of
+ * both
+ */
+ for (i = 0; i < MAX_CLUSTERS; i++) {
+ struct device *cdev = get_cpu_device(i);
+ if (!cdev) {
+ pr_err("%s: failed to get cpu%d device\n", __func__, i);
+ return -ENODEV;
+ }
+
+ ret = _get_cluster_clk_and_freq_table(cdev);
+ if (ret)
+ goto put_clusters;
+ }
+
+ ret = merge_cluster_tables();
+ if (ret)
+ goto put_clusters;
+
+ /* Assuming 2 cluster, set clk_big_min and clk_little_max */
+ clk_big_min = get_table_min(freq_table[0]);
+ clk_little_max = VIRT_FREQ(1, get_table_max(freq_table[1]));
+
+ pr_debug("%s: cluster: %d, clk_big_min: %d, clk_little_max: %d\n",
+ __func__, cluster, clk_big_min, clk_little_max);
+
+ return 0;
+
+put_clusters:
+ while (i--) {
+ struct device *cdev = get_cpu_device(i);
+ if (!cdev) {
+ pr_err("%s: failed to get cpu%d device\n", __func__, i);
+ return -ENODEV;
+ }
+
+ _put_cluster_clk_and_freq_table(cdev);
+ }
+
+ atomic_dec(&cluster_usage[MAX_CLUSTERS]);
+
+ return ret;
+}
+
/* Per-CPU initialization */
static int bL_cpufreq_init(struct cpufreq_policy *policy)
{
@@ -177,37 +455,30 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy)
cpufreq_frequency_table_get_attr(freq_table[cur_cluster], policy->cpu);
+ if (cur_cluster < MAX_CLUSTERS) {
+ cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
+
+ per_cpu(physical_cluster, policy->cpu) = cur_cluster;
+ } else {
+ /* Assumption: during init, we are always running on A15 */
+ per_cpu(physical_cluster, policy->cpu) = A15_CLUSTER;
+ }
+
if (arm_bL_ops->get_transition_latency)
policy->cpuinfo.transition_latency =
arm_bL_ops->get_transition_latency(cpu_dev);
else
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
- policy->cur = bL_cpufreq_get(policy->cpu);
+ policy->cur = clk_get_cpu_rate(policy->cpu);
- cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
+ if (is_bL_switching_enabled())
+ per_cpu(cpu_last_req_freq, policy->cpu) = policy->cur;
dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu);
return 0;
}
-static int bL_cpufreq_exit(struct cpufreq_policy *policy)
-{
- struct device *cpu_dev;
-
- cpu_dev = get_cpu_device(policy->cpu);
- if (!cpu_dev) {
- pr_err("%s: failed to get cpu%d device\n", __func__,
- policy->cpu);
- return -ENODEV;
- }
-
- put_cluster_clk_and_freq_table(cpu_dev);
- dev_dbg(cpu_dev, "%s: Exited, cpu: %d\n", __func__, policy->cpu);
-
- return 0;
-}
-
/* Export freq_table to sysfs */
static struct freq_attr *bL_cpufreq_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
@@ -219,16 +490,47 @@ static struct cpufreq_driver bL_cpufreq_driver = {
.flags = CPUFREQ_STICKY,
.verify = bL_cpufreq_verify_policy,
.target = bL_cpufreq_set_target,
- .get = bL_cpufreq_get,
+ .get = bL_cpufreq_get_rate,
.init = bL_cpufreq_init,
- .exit = bL_cpufreq_exit,
.have_governor_per_policy = true,
.attr = bL_cpufreq_attr,
};
+static int bL_cpufreq_switcher_notifier(struct notifier_block *nfb,
+ unsigned long action, void *_arg)
+{
+ pr_debug("%s: action: %ld\n", __func__, action);
+
+ switch (action) {
+ case BL_NOTIFY_PRE_ENABLE:
+ case BL_NOTIFY_PRE_DISABLE:
+ cpufreq_unregister_driver(&bL_cpufreq_driver);
+ break;
+
+ case BL_NOTIFY_POST_ENABLE:
+ set_switching_enabled(true);
+ cpufreq_register_driver(&bL_cpufreq_driver);
+ break;
+
+ case BL_NOTIFY_POST_DISABLE:
+ set_switching_enabled(false);
+ cpufreq_register_driver(&bL_cpufreq_driver);
+ break;
+
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block bL_switcher_notifier = {
+ .notifier_call = bL_cpufreq_switcher_notifier,
+};
+
int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops)
{
- int ret;
+ int ret, i;
if (arm_bL_ops) {
pr_debug("%s: Already registered: %s, exiting\n", __func__,
@@ -243,16 +545,29 @@ int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops)
arm_bL_ops = ops;
+ ret = bL_switcher_get_enabled();
+ set_switching_enabled(ret);
+
+ for (i = 0; i < MAX_CLUSTERS; i++)
+ mutex_init(&cluster_lock[i]);
+
ret = cpufreq_register_driver(&bL_cpufreq_driver);
if (ret) {
pr_info("%s: Failed registering platform driver: %s, err: %d\n",
__func__, ops->name, ret);
arm_bL_ops = NULL;
} else {
- pr_info("%s: Registered platform driver: %s\n", __func__,
- ops->name);
+ ret = bL_switcher_register_notifier(&bL_switcher_notifier);
+ if (ret) {
+ cpufreq_unregister_driver(&bL_cpufreq_driver);
+ arm_bL_ops = NULL;
+ } else {
+ pr_info("%s: Registered platform driver: %s\n",
+ __func__, ops->name);
+ }
}
+ bL_switcher_put_enabled();
return ret;
}
EXPORT_SYMBOL_GPL(bL_cpufreq_register);
@@ -265,9 +580,31 @@ void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops)
return;
}
+ bL_switcher_get_enabled();
+ bL_switcher_unregister_notifier(&bL_switcher_notifier);
cpufreq_unregister_driver(&bL_cpufreq_driver);
+ bL_switcher_put_enabled();
pr_info("%s: Un-registered platform driver: %s\n", __func__,
arm_bL_ops->name);
+
+ /* For saving table get/put on every cpu in/out */
+ if (is_bL_switching_enabled()) {
+ put_cluster_clk_and_freq_table(get_cpu_device(0));
+ } else {
+ int i;
+
+ for (i = 0; i < MAX_CLUSTERS; i++) {
+ struct device *cdev = get_cpu_device(i);
+ if (!cdev) {
+ pr_err("%s: failed to get cpu%d device\n",
+ __func__, i);
+ return;
+ }
+
+ put_cluster_clk_and_freq_table(cdev);
+ }
+ }
+
arm_bL_ops = NULL;
}
EXPORT_SYMBOL_GPL(bL_cpufreq_unregister);
diff --git a/drivers/cpufreq/arm_big_little.h b/drivers/cpufreq/arm_big_little.h
index 79b2ce17884..4f5a03d3aef 100644
--- a/drivers/cpufreq/arm_big_little.h
+++ b/drivers/cpufreq/arm_big_little.h
@@ -23,6 +23,20 @@
#include <linux/device.h>
#include <linux/types.h>
+/* Currently we support only two clusters */
+#define A15_CLUSTER 0
+#define A7_CLUSTER 1
+#define MAX_CLUSTERS 2
+
+#ifdef CONFIG_BL_SWITCHER
+extern bool bL_switching_enabled;
+#define is_bL_switching_enabled() bL_switching_enabled
+#define set_switching_enabled(x) (bL_switching_enabled = (x))
+#else
+#define is_bL_switching_enabled() false
+#define set_switching_enabled(x) do { } while (0)
+#endif
+
struct cpufreq_arm_bL_ops {
char name[CPUFREQ_NAME_LEN];
int (*get_transition_latency)(struct device *cpu_dev);
@@ -36,7 +50,8 @@ struct cpufreq_arm_bL_ops {
static inline int cpu_to_cluster(int cpu)
{
- return topology_physical_package_id(cpu);
+ return is_bL_switching_enabled() ? MAX_CLUSTERS:
+ topology_physical_package_id(cpu);
}
int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 2d53f47d174..648554742a9 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1075,14 +1075,11 @@ static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif
__func__, cpu_dev->id, cpu);
}
- if ((cpus == 1) && (cpufreq_driver->target))
- __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
-
- pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
- cpufreq_cpu_put(data);
-
/* If cpu is last user of policy, free policy */
if (cpus == 1) {
+ if (cpufreq_driver->target)
+ __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
+
lock_policy_rwsem_read(cpu);
kobj = &data->kobj;
cmp = &data->kobj_unregister;
@@ -1103,9 +1100,13 @@ static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif
free_cpumask_var(data->related_cpus);
free_cpumask_var(data->cpus);
kfree(data);
- } else if (cpufreq_driver->target) {
- __cpufreq_governor(data, CPUFREQ_GOV_START);
- __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
+ } else {
+ pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
+ cpufreq_cpu_put(data);
+ if (cpufreq_driver->target) {
+ __cpufreq_governor(data, CPUFREQ_GOV_START);
+ __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
+ }
}
per_cpu(cpufreq_policy_cpu, cpu) = -1;
@@ -1837,13 +1838,15 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
if (dev) {
switch (action) {
case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
cpufreq_add_dev(dev, NULL);
break;
case CPU_DOWN_PREPARE:
- case CPU_UP_CANCELED_FROZEN:
+ case CPU_DOWN_PREPARE_FROZEN:
__cpufreq_remove_dev(dev, NULL);
break;
case CPU_DOWN_FAILED:
+ case CPU_DOWN_FAILED_FROZEN:
cpufreq_add_dev(dev, NULL);
break;
}
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 0ceb2eff5a7..f97cb3d8c5a 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -221,8 +221,8 @@ static ssize_t store_down_threshold(struct dbs_data *dbs_data, const char *buf,
return count;
}
-static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
- size_t count)
+static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
+ const char *buf, size_t count)
{
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int input, j;
@@ -235,10 +235,10 @@ static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
if (input > 1)
input = 1;
- if (input == cs_tuners->ignore_nice) /* nothing to do */
+ if (input == cs_tuners->ignore_nice_load) /* nothing to do */
return count;
- cs_tuners->ignore_nice = input;
+ cs_tuners->ignore_nice_load = input;
/* we need to re-evaluate prev_cpu_idle */
for_each_online_cpu(j) {
@@ -246,7 +246,7 @@ static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
dbs_info = &per_cpu(cs_cpu_dbs_info, j);
dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
&dbs_info->cdbs.prev_cpu_wall, 0);
- if (cs_tuners->ignore_nice)
+ if (cs_tuners->ignore_nice_load)
dbs_info->cdbs.prev_cpu_nice =
kcpustat_cpu(j).cpustat[CPUTIME_NICE];
}
@@ -279,7 +279,7 @@ show_store_one(cs, sampling_rate);
show_store_one(cs, sampling_down_factor);
show_store_one(cs, up_threshold);
show_store_one(cs, down_threshold);
-show_store_one(cs, ignore_nice);
+show_store_one(cs, ignore_nice_load);
show_store_one(cs, freq_step);
declare_show_sampling_rate_min(cs);
@@ -287,7 +287,7 @@ gov_sys_pol_attr_rw(sampling_rate);
gov_sys_pol_attr_rw(sampling_down_factor);
gov_sys_pol_attr_rw(up_threshold);
gov_sys_pol_attr_rw(down_threshold);
-gov_sys_pol_attr_rw(ignore_nice);
+gov_sys_pol_attr_rw(ignore_nice_load);
gov_sys_pol_attr_rw(freq_step);
gov_sys_pol_attr_ro(sampling_rate_min);
@@ -297,7 +297,7 @@ static struct attribute *dbs_attributes_gov_sys[] = {
&sampling_down_factor_gov_sys.attr,
&up_threshold_gov_sys.attr,
&down_threshold_gov_sys.attr,
- &ignore_nice_gov_sys.attr,
+ &ignore_nice_load_gov_sys.attr,
&freq_step_gov_sys.attr,
NULL
};
@@ -313,7 +313,7 @@ static struct attribute *dbs_attributes_gov_pol[] = {
&sampling_down_factor_gov_pol.attr,
&up_threshold_gov_pol.attr,
&down_threshold_gov_pol.attr,
- &ignore_nice_gov_pol.attr,
+ &ignore_nice_load_gov_pol.attr,
&freq_step_gov_pol.attr,
NULL
};
@@ -338,7 +338,7 @@ static int cs_init(struct dbs_data *dbs_data)
tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD;
tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
- tuners->ignore_nice = 0;
+ tuners->ignore_nice_load = 0;
tuners->freq_step = DEF_FREQUENCY_STEP;
dbs_data->tuners = tuners;
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index dc9b72e25c1..a86ff72141f 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -26,7 +26,6 @@
#include <linux/tick.h>
#include <linux/types.h>
#include <linux/workqueue.h>
-#include <linux/cpu.h>
#include "cpufreq_governor.h"
@@ -92,9 +91,9 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
unsigned int j;
if (dbs_data->cdata->governor == GOV_ONDEMAND)
- ignore_nice = od_tuners->ignore_nice;
+ ignore_nice = od_tuners->ignore_nice_load;
else
- ignore_nice = cs_tuners->ignore_nice;
+ ignore_nice = cs_tuners->ignore_nice_load;
policy = cdbs->cur_policy;
@@ -181,10 +180,8 @@ void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
if (!all_cpus) {
__gov_queue_work(smp_processor_id(), dbs_data, delay);
} else {
- get_online_cpus();
for_each_cpu(i, policy->cpus)
__gov_queue_work(i, dbs_data, delay);
- put_online_cpus();
}
}
EXPORT_SYMBOL_GPL(gov_queue_work);
@@ -339,12 +336,12 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
cs_tuners = dbs_data->tuners;
cs_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu);
sampling_rate = cs_tuners->sampling_rate;
- ignore_nice = cs_tuners->ignore_nice;
+ ignore_nice = cs_tuners->ignore_nice_load;
} else {
od_tuners = dbs_data->tuners;
od_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu);
sampling_rate = od_tuners->sampling_rate;
- ignore_nice = od_tuners->ignore_nice;
+ ignore_nice = od_tuners->ignore_nice_load;
od_ops = dbs_data->cdata->gov_ops;
io_busy = od_tuners->io_is_busy;
}
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index e16a96130cb..0d9e6befe1d 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -165,7 +165,7 @@ struct cs_cpu_dbs_info_s {
/* Per policy Governers sysfs tunables */
struct od_dbs_tuners {
- unsigned int ignore_nice;
+ unsigned int ignore_nice_load;
unsigned int sampling_rate;
unsigned int sampling_down_factor;
unsigned int up_threshold;
@@ -175,7 +175,7 @@ struct od_dbs_tuners {
};
struct cs_dbs_tuners {
- unsigned int ignore_nice;
+ unsigned int ignore_nice_load;
unsigned int sampling_rate;
unsigned int sampling_down_factor;
unsigned int up_threshold;
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 93eb5cbcc1f..c087347d668 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -403,8 +403,8 @@ static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
return count;
}
-static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
- size_t count)
+static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
+ const char *buf, size_t count)
{
struct od_dbs_tuners *od_tuners = dbs_data->tuners;
unsigned int input;
@@ -419,10 +419,10 @@ static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
if (input > 1)
input = 1;
- if (input == od_tuners->ignore_nice) { /* nothing to do */
+ if (input == od_tuners->ignore_nice_load) { /* nothing to do */
return count;
}
- od_tuners->ignore_nice = input;
+ od_tuners->ignore_nice_load = input;
/* we need to re-evaluate prev_cpu_idle */
for_each_online_cpu(j) {
@@ -430,7 +430,7 @@ static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
dbs_info = &per_cpu(od_cpu_dbs_info, j);
dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
&dbs_info->cdbs.prev_cpu_wall, od_tuners->io_is_busy);
- if (od_tuners->ignore_nice)
+ if (od_tuners->ignore_nice_load)
dbs_info->cdbs.prev_cpu_nice =
kcpustat_cpu(j).cpustat[CPUTIME_NICE];
@@ -461,7 +461,7 @@ show_store_one(od, sampling_rate);
show_store_one(od, io_is_busy);
show_store_one(od, up_threshold);
show_store_one(od, sampling_down_factor);
-show_store_one(od, ignore_nice);
+show_store_one(od, ignore_nice_load);
show_store_one(od, powersave_bias);
declare_show_sampling_rate_min(od);
@@ -469,7 +469,7 @@ gov_sys_pol_attr_rw(sampling_rate);
gov_sys_pol_attr_rw(io_is_busy);
gov_sys_pol_attr_rw(up_threshold);
gov_sys_pol_attr_rw(sampling_down_factor);
-gov_sys_pol_attr_rw(ignore_nice);
+gov_sys_pol_attr_rw(ignore_nice_load);
gov_sys_pol_attr_rw(powersave_bias);
gov_sys_pol_attr_ro(sampling_rate_min);
@@ -478,7 +478,7 @@ static struct attribute *dbs_attributes_gov_sys[] = {
&sampling_rate_gov_sys.attr,
&up_threshold_gov_sys.attr,
&sampling_down_factor_gov_sys.attr,
- &ignore_nice_gov_sys.attr,
+ &ignore_nice_load_gov_sys.attr,
&powersave_bias_gov_sys.attr,
&io_is_busy_gov_sys.attr,
NULL
@@ -494,7 +494,7 @@ static struct attribute *dbs_attributes_gov_pol[] = {
&sampling_rate_gov_pol.attr,
&up_threshold_gov_pol.attr,
&sampling_down_factor_gov_pol.attr,
- &ignore_nice_gov_pol.attr,
+ &ignore_nice_load_gov_pol.attr,
&powersave_bias_gov_pol.attr,
&io_is_busy_gov_pol.attr,
NULL
@@ -544,7 +544,7 @@ static int od_init(struct dbs_data *dbs_data)
}
tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
- tuners->ignore_nice = 0;
+ tuners->ignore_nice_load = 0;
tuners->powersave_bias = default_powersave_bias;
tuners->io_is_busy = should_io_be_busy();
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index fb65decffa2..66733f1d55d 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -21,6 +21,9 @@
#include <linux/spinlock.h>
#include <linux/notifier.h>
#include <asm/cputime.h>
+#ifdef CONFIG_BL_SWITCHER
+#include <asm/bL_switcher.h>
+#endif
static spinlock_t cpufreq_stats_lock;
@@ -349,16 +352,15 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
switch (action) {
case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
cpufreq_update_policy(cpu);
break;
case CPU_DOWN_PREPARE:
+ case CPU_DOWN_PREPARE_FROZEN:
cpufreq_stats_free_sysfs(cpu);
break;
case CPU_DEAD:
- cpufreq_stats_free_table(cpu);
- break;
- case CPU_UP_CANCELED_FROZEN:
- cpufreq_stats_free_sysfs(cpu);
+ case CPU_DEAD_FROZEN:
cpufreq_stats_free_table(cpu);
break;
}
@@ -379,7 +381,7 @@ static struct notifier_block notifier_trans_block = {
.notifier_call = cpufreq_stat_notifier_trans
};
-static int __init cpufreq_stats_init(void)
+static int cpufreq_stats_setup(void)
{
int ret;
unsigned int cpu;
@@ -407,7 +409,8 @@ static int __init cpufreq_stats_init(void)
return 0;
}
-static void __exit cpufreq_stats_exit(void)
+
+static void cpufreq_stats_cleanup(void)
{
unsigned int cpu;
@@ -422,6 +425,54 @@ static void __exit cpufreq_stats_exit(void)
}
}
+#ifdef CONFIG_BL_SWITCHER
+static int cpufreq_stats_switcher_notifier(struct notifier_block *nfb,
+ unsigned long action, void *_arg)
+{
+ switch (action) {
+ case BL_NOTIFY_PRE_ENABLE:
+ case BL_NOTIFY_PRE_DISABLE:
+ cpufreq_stats_cleanup();
+ break;
+
+ case BL_NOTIFY_POST_ENABLE:
+ case BL_NOTIFY_POST_DISABLE:
+ cpufreq_stats_setup();
+ break;
+
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block switcher_notifier = {
+ .notifier_call = cpufreq_stats_switcher_notifier,
+};
+#endif
+
+static int __init cpufreq_stats_init(void)
+{
+ int ret;
+ spin_lock_init(&cpufreq_stats_lock);
+
+ ret = cpufreq_stats_setup();
+#ifdef CONFIG_BL_SWITCHER
+ if (!ret)
+ bL_switcher_register_notifier(&switcher_notifier);
+#endif
+ return ret;
+}
+
+static void __exit cpufreq_stats_exit(void)
+{
+#ifdef CONFIG_BL_SWITCHER
+ bL_switcher_unregister_notifier(&switcher_notifier);
+#endif
+ cpufreq_stats_cleanup();
+}
+
MODULE_AUTHOR("Zou Nan hai <nanhai.zou@intel.com>");
MODULE_DESCRIPTION("'cpufreq_stats' - A driver to export cpufreq stats "
"through sysfs filesystem");
diff --git a/drivers/cpufreq/highbank-cpufreq.c b/drivers/cpufreq/highbank-cpufreq.c
index b61b5a3fad6..8c4771da1b8 100644
--- a/drivers/cpufreq/highbank-cpufreq.c
+++ b/drivers/cpufreq/highbank-cpufreq.c
@@ -66,7 +66,8 @@ static int hb_cpufreq_driver_init(void)
struct device_node *np;
int ret;
- if (!of_machine_is_compatible("calxeda,highbank"))
+ if ((!of_machine_is_compatible("calxeda,highbank")) &&
+ (!of_machine_is_compatible("calxeda,ecx-2000")))
return -ENODEV;
for_each_child_of_node(of_find_node_by_path("/cpus"), np)
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 07f2840ad80..a22fb3e4725 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -103,10 +103,10 @@ struct pstate_adjust_policy {
static struct pstate_adjust_policy default_policy = {
.sample_rate_ms = 10,
.deadband = 0,
- .setpoint = 109,
- .p_gain_pct = 17,
+ .setpoint = 97,
+ .p_gain_pct = 20,
.d_gain_pct = 0,
- .i_gain_pct = 4,
+ .i_gain_pct = 0,
};
struct perf_limits {
@@ -468,12 +468,12 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu)
{
int32_t busy_scaled;
- int32_t core_busy, turbo_pstate, current_pstate;
+ int32_t core_busy, max_pstate, current_pstate;
core_busy = int_tofp(cpu->samples[cpu->sample_ptr].core_pct_busy);
- turbo_pstate = int_tofp(cpu->pstate.turbo_pstate);
+ max_pstate = int_tofp(cpu->pstate.max_pstate);
current_pstate = int_tofp(cpu->pstate.current_pstate);
- busy_scaled = mul_fp(core_busy, div_fp(turbo_pstate, current_pstate));
+ busy_scaled = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
return fp_toint(busy_scaled);
}
@@ -516,7 +516,8 @@ static void intel_pstate_timer_func(unsigned long __data)
}
#define ICPU(model, policy) \
- { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&policy }
+ { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\
+ (unsigned long)&policy }
static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
ICPU(0x2a, default_policy),
@@ -543,6 +544,11 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
cpu = all_cpu_data[cpunum];
intel_pstate_get_cpu_pstates(cpu);
+ if (!cpu->pstate.current_pstate) {
+ all_cpu_data[cpunum] = NULL;
+ kfree(cpu);
+ return -ENODATA;
+ }
cpu->cpu = cpunum;
cpu->pstate_policy =
@@ -629,8 +635,8 @@ static int __cpuinit intel_pstate_cpu_exit(struct cpufreq_policy *policy)
static int __cpuinit intel_pstate_cpu_init(struct cpufreq_policy *policy)
{
- int rc, min_pstate, max_pstate;
struct cpudata *cpu;
+ int rc;
rc = intel_pstate_init_cpu(policy->cpu);
if (rc)
@@ -644,9 +650,8 @@ static int __cpuinit intel_pstate_cpu_init(struct cpufreq_policy *policy)
else
policy->policy = CPUFREQ_POLICY_POWERSAVE;
- intel_pstate_get_min_max(cpu, &min_pstate, &max_pstate);
- policy->min = min_pstate * 100000;
- policy->max = max_pstate * 100000;
+ policy->min = cpu->pstate.min_pstate * 100000;
+ policy->max = cpu->pstate.turbo_pstate * 100000;
/* cpuinfo and default policy values */
policy->cpuinfo.min_freq = cpu->pstate.min_pstate * 100000;
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
index d5391276894..f92b02ae20b 100644
--- a/drivers/cpufreq/loongson2_cpufreq.c
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -118,11 +118,6 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
clk_put(cpuclk);
return -EINVAL;
}
- ret = clk_set_rate(cpuclk, rate);
- if (ret) {
- clk_put(cpuclk);
- return ret;
- }
/* clock table init */
for (i = 2;
@@ -130,6 +125,12 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
i++)
loongson2_clockmod_table[i].frequency = (rate * i) / 8;
+ ret = clk_set_rate(cpuclk, rate);
+ if (ret) {
+ clk_put(cpuclk);
+ return ret;
+ }
+
policy->cur = loongson2_cpufreq_get(policy->cpu);
cpufreq_frequency_table_get_attr(&loongson2_clockmod_table[0],
diff --git a/drivers/cpufreq/vexpress_big_little.c b/drivers/cpufreq/vexpress_big_little.c
new file mode 100644
index 00000000000..1abb883c051
--- /dev/null
+++ b/drivers/cpufreq/vexpress_big_little.c
@@ -0,0 +1,86 @@
+/*
+ * Vexpress big.LITTLE CPUFreq Interface driver
+ *
+ * It provides necessary ops to arm_big_little cpufreq driver and gets
+ * Frequency information from Device Tree. Freq table in DT must be in KHz.
+ *
+ * Copyright (C) 2013 Linaro.
+ * Viresh Kumar <viresh.kumar@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/cpufreq.h>
+#include <linux/export.h>
+#include <linux/opp.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/vexpress.h>
+#include "arm_big_little.h"
+
+static int vexpress_init_opp_table(struct device *cpu_dev)
+{
+ int i = -1, count, cluster = cpu_to_cluster(cpu_dev->id);
+ u32 *table;
+ int ret;
+
+ count = vexpress_spc_get_freq_table(cluster, &table);
+ if (!table || !count) {
+ pr_err("SPC controller returned invalid freq table");
+ return -EINVAL;
+ }
+
+ while (++i < count) {
+ /* FIXME: Voltage value */
+ ret = opp_add(cpu_dev, table[i] * 1000, 900000);
+ if (ret) {
+ dev_warn(cpu_dev, "%s: Failed to add OPP %d, err: %d\n",
+ __func__, table[i] * 1000, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int vexpress_get_transition_latency(struct device *cpu_dev)
+{
+ /* 1 ms */
+ return 1000000;
+}
+
+static struct cpufreq_arm_bL_ops vexpress_bL_ops = {
+ .name = "vexpress-bL",
+ .get_transition_latency = vexpress_get_transition_latency,
+ .init_opp_table = vexpress_init_opp_table,
+};
+
+static int vexpress_bL_init(void)
+{
+ if (!vexpress_spc_check_loaded()) {
+ pr_info("%s: No SPC found\n", __func__);
+ return -ENOENT;
+ }
+
+ return bL_cpufreq_register(&vexpress_bL_ops);
+}
+module_init(vexpress_bL_init);
+
+static void vexpress_bL_exit(void)
+{
+ return bL_cpufreq_unregister(&vexpress_bL_ops);
+}
+module_exit(vexpress_bL_exit);
+
+MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
+MODULE_DESCRIPTION("ARM Vexpress big LITTLE cpufreq driver");
+MODULE_LICENSE("GPL");