summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJon Medhurst <tixy@linaro.org>2014-06-04 14:42:24 +0100
committerJon Medhurst <tixy@linaro.org>2014-06-04 14:42:24 +0100
commitd5e9d6365a0d64c8abe9a4feca57568def46a3c2 (patch)
tree69cede49ef9e40c58615f4560d2b549fe39d6d60
parenta59acfe11120b690df1976a815df1a67395100f2 (diff)
parent2560930bfaab189fd4b8701a2ffd9d1ec9a07b56 (diff)
Merge branch 'tracking-armlt-juno-cpufreq' into integration-linaro-vexpresstracking-integration-linaro-vexpress-ll-20140607.0tracking-integration-linaro-vexpress-ll-20140605.0
-rw-r--r--Documentation/devicetree/bindings/clock/scpi.txt34
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/arm64/include/asm/bL_switcher.h54
-rw-r--r--arch/arm64/include/asm/cputype.h5
-rw-r--r--arch/arm64/include/asm/topology.h3
-rw-r--r--arch/arm64/kernel/topology.c411
-rw-r--r--drivers/clk/Kconfig10
-rw-r--r--drivers/clk/Makefile1
-rw-r--r--drivers/clk/clk-scpi.c309
-rw-r--r--drivers/cpufreq/Kconfig.arm12
-rw-r--r--drivers/cpufreq/Makefile1
-rw-r--r--drivers/cpufreq/arm_big_little.c4
-rw-r--r--drivers/cpufreq/highbank-cpufreq.c24
-rw-r--r--drivers/cpufreq/scpi-cpufreq.c99
-rw-r--r--drivers/mailbox/Kconfig27
-rw-r--r--drivers/mailbox/Makefile8
-rw-r--r--drivers/mailbox/arm_mhu.c336
-rw-r--r--drivers/mailbox/arm_mhu.h31
-rw-r--r--drivers/mailbox/mailbox.c592
-rw-r--r--drivers/mailbox/pl320-ipc.c198
-rw-r--r--drivers/mailbox/pl320.c214
-rw-r--r--drivers/mailbox/scpi_protocol.c354
-rw-r--r--include/linux/mailbox.h27
-rw-r--r--include/linux/mailbox_client.h48
-rw-r--r--include/linux/mailbox_controller.h85
-rw-r--r--include/linux/scpi_protocol.h30
26 files changed, 2684 insertions, 234 deletions
diff --git a/Documentation/devicetree/bindings/clock/scpi.txt b/Documentation/devicetree/bindings/clock/scpi.txt
new file mode 100644
index 000000000000..b2b7035018f4
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/scpi.txt
@@ -0,0 +1,34 @@
+Device Tree Clock bindings for the clocks based on
+System Control and Power Interface (SCPI) Message Protocol
+
+This binding uses the common clock binding[1].
+
+Required properties:
+- compatible : shall be one of the following:
+ "arm,scpi-clks" - for the container node with all the clocks
+ based on the SCPI protocol
+ "arm,scpi-clk-indexed" - all the clocks that are variable and index
+ based. These clocks don't provide the full range between the
+ limits but only discrete points within the range. The firmware
+ provides the mapping for each such operating frequency and the
+ index associated with it.
+ "arm,scpi-clk-range" - all the clocks that are variable and provide
+ full range within the specified range
+
+Required properties for all clocks(all from common clock binding):
+- #clock-cells : ; shall be set to 0 or 1 depending on whether it has single
+ or multiple clock outputs.
+- clock-output-names : shall be the corresponding names of the outputs.
+- clock-indices: The identifyng number for the clocks in the node as expected
+ by the firmware. It can be non linear and hence provide the mapping
+ of identifiers into the clock-output-names array.
+- frequency-range: The allowed range of clock frequency supported specified
+ in the form of minimum and maximum limits(two u32 fields)
+ This is required only if compatible is "arm,scpi-clk-range"
+
+Clock consumers should specify the desired clocks they use with a
+"clocks" phandle cell. Consumers should also provide an additional ID
+in their clock property. This ID refers to the specific clock in the clock
+provider list.
+
+[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 503906aea5fb..e4c03a7da6fd 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1,6 +1,7 @@
config ARM64
def_bool y
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
+ select ARCH_HAS_OPP
select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_WANT_OPTIONAL_GPIOLIB
diff --git a/arch/arm64/include/asm/bL_switcher.h b/arch/arm64/include/asm/bL_switcher.h
new file mode 100644
index 000000000000..2bee500b7f54
--- /dev/null
+++ b/arch/arm64/include/asm/bL_switcher.h
@@ -0,0 +1,54 @@
+/*
+ * Based on the stubs for the ARM implementation which is:
+ *
+ * Created by: Nicolas Pitre, April 2012
+ * Copyright: (C) 2012-2013 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef ASM_BL_SWITCHER_H
+#define ASM_BL_SWITCHER_H
+
+#include <linux/notifier.h>
+#include <linux/types.h>
+
+typedef void (*bL_switch_completion_handler)(void *cookie);
+
+static inline int bL_switch_request(unsigned int cpu,
+ unsigned int new_cluster_id)
+{
+ return -ENOTSUPP;
+}
+
+/*
+ * Register here to be notified about runtime enabling/disabling of
+ * the switcher.
+ *
+ * The notifier chain is called with the switcher activation lock held:
+ * the switcher will not be enabled or disabled during callbacks.
+ * Callbacks must not call bL_switcher_{get,put}_enabled().
+ */
+#define BL_NOTIFY_PRE_ENABLE 0
+#define BL_NOTIFY_POST_ENABLE 1
+#define BL_NOTIFY_PRE_DISABLE 2
+#define BL_NOTIFY_POST_DISABLE 3
+
+static inline int bL_switcher_register_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int bL_switcher_unregister_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline bool bL_switcher_get_enabled(void) { return false; }
+static inline void bL_switcher_put_enabled(void) { }
+static inline int bL_switcher_trace_trigger(void) { return 0; }
+static inline int bL_switcher_get_logical_index(u32 mpidr) { return -EUNATCH; }
+
+#endif
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index c404fb0df3a6..b3b3287ca3fe 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -18,6 +18,8 @@
#define INVALID_HWID ULONG_MAX
+#define MPIDR_UP_BITMASK (0x1 << 30)
+#define MPIDR_MT_BITMASK (0x1 << 24)
#define MPIDR_HWID_BITMASK 0xff00ffffff
#define MPIDR_LEVEL_BITS_SHIFT 3
@@ -30,6 +32,9 @@
#define MPIDR_AFFINITY_LEVEL(mpidr, level) \
((mpidr >> MPIDR_LEVEL_SHIFT(level)) & MPIDR_LEVEL_MASK)
+#define MPIDR_AFF_MASK(level) \
+ ((u64)MPIDR_LEVEL_MASK << MPIDR_LEVEL_SHIFT(level))
+
#define read_cpuid(reg) ({ \
u64 __val; \
asm("mrs %0, " #reg : "=r" (__val)); \
diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h
index 0172e6d76bf3..7ebcd31ce51c 100644
--- a/arch/arm64/include/asm/topology.h
+++ b/arch/arm64/include/asm/topology.h
@@ -20,9 +20,6 @@ extern struct cpu_topology cpu_topology[NR_CPUS];
#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling)
#define topology_thread_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
-#define mc_capable() (cpu_topology[0].cluster_id != -1)
-#define smt_capable() (cpu_topology[0].thread_id != -1)
-
void init_cpu_topology(void);
void store_cpu_topology(unsigned int cpuid);
const struct cpumask *cpu_coregroup_mask(int cpu);
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index 3e06b0be4ec8..f7f3478eaab5 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -17,11 +17,338 @@
#include <linux/percpu.h>
#include <linux/node.h>
#include <linux/nodemask.h>
+#include <linux/of.h>
#include <linux/sched.h>
+#include <linux/slab.h>
+#include <asm/cputype.h>
+#include <asm/smp_plat.h>
#include <asm/topology.h>
/*
+ * cpu power table
+ * This per cpu data structure describes the relative capacity of each core.
+ * On a heteregenous system, cores don't have the same computation capacity
+ * and we reflect that difference in the cpu_power field so the scheduler can
+ * take this difference into account during load balance. A per cpu structure
+ * is preferred because each CPU updates its own cpu_power field during the
+ * load balance except for idle cores. One idle core is selected to run the
+ * rebalance_domains for all idle cores and the cpu_power can be updated
+ * during this sequence.
+ */
+static DEFINE_PER_CPU(unsigned long, cpu_scale);
+
+unsigned long arch_scale_freq_power(struct sched_domain *sd, int cpu)
+{
+ return per_cpu(cpu_scale, cpu);
+}
+
+static void set_power_scale(unsigned int cpu, unsigned long power)
+{
+ per_cpu(cpu_scale, cpu) = power;
+}
+
+static int __init get_cpu_for_node(struct device_node *node)
+{
+ struct device_node *cpu_node;
+ int cpu;
+
+ cpu_node = of_parse_phandle(node, "cpu", 0);
+ if (!cpu_node)
+ return -1;
+
+ for_each_possible_cpu(cpu) {
+ if (of_get_cpu_node(cpu, NULL) == cpu_node) {
+ of_node_put(cpu_node);
+ return cpu;
+ }
+ }
+
+ pr_crit("Unable to find CPU node for %s\n", cpu_node->full_name);
+
+ of_node_put(cpu_node);
+ return -1;
+}
+
+static int __init parse_core(struct device_node *core, int cluster_id,
+ int core_id)
+{
+ char name[10];
+ bool leaf = true;
+ int i = 0;
+ int cpu;
+ struct device_node *t;
+
+ do {
+ snprintf(name, sizeof(name), "thread%d", i);
+ t = of_get_child_by_name(core, name);
+ if (t) {
+ leaf = false;
+ cpu = get_cpu_for_node(t);
+ if (cpu >= 0) {
+ cpu_topology[cpu].cluster_id = cluster_id;
+ cpu_topology[cpu].core_id = core_id;
+ cpu_topology[cpu].thread_id = i;
+ } else {
+ pr_err("%s: Can't get CPU for thread\n",
+ t->full_name);
+ of_node_put(t);
+ return -EINVAL;
+ }
+ of_node_put(t);
+ }
+ i++;
+ } while (t);
+
+ cpu = get_cpu_for_node(core);
+ if (cpu >= 0) {
+ if (!leaf) {
+ pr_err("%s: Core has both threads and CPU\n",
+ core->full_name);
+ return -EINVAL;
+ }
+
+ cpu_topology[cpu].cluster_id = cluster_id;
+ cpu_topology[cpu].core_id = core_id;
+ } else if (leaf) {
+ pr_err("%s: Can't get CPU for leaf core\n", core->full_name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __init parse_cluster(struct device_node *cluster, int depth)
+{
+ char name[10];
+ bool leaf = true;
+ bool has_cores = false;
+ struct device_node *c;
+ static int cluster_id __initdata;
+ int core_id = 0;
+ int i, ret;
+
+ /*
+ * First check for child clusters; we currently ignore any
+ * information about the nesting of clusters and present the
+ * scheduler with a flat list of them.
+ */
+ i = 0;
+ do {
+ snprintf(name, sizeof(name), "cluster%d", i);
+ c = of_get_child_by_name(cluster, name);
+ if (c) {
+ leaf = false;
+ ret = parse_cluster(c, depth + 1);
+ of_node_put(c);
+ if (ret != 0)
+ return ret;
+ }
+ i++;
+ } while (c);
+
+ /* Now check for cores */
+ i = 0;
+ do {
+ snprintf(name, sizeof(name), "core%d", i);
+ c = of_get_child_by_name(cluster, name);
+ if (c) {
+ has_cores = true;
+
+ if (depth == 0) {
+ pr_err("%s: cpu-map children should be clusters\n",
+ c->full_name);
+ of_node_put(c);
+ return -EINVAL;
+ }
+
+ if (leaf) {
+ ret = parse_core(c, cluster_id, core_id++);
+ } else {
+ pr_err("%s: Non-leaf cluster with core %s\n",
+ cluster->full_name, name);
+ ret = -EINVAL;
+ }
+
+ of_node_put(c);
+ if (ret != 0)
+ return ret;
+ }
+ i++;
+ } while (c);
+
+ if (leaf && !has_cores)
+ pr_warn("%s: empty cluster\n", cluster->full_name);
+
+ if (leaf)
+ cluster_id++;
+
+ return 0;
+}
+
+struct cpu_efficiency {
+ const char *compatible;
+ unsigned long efficiency;
+};
+
+/*
+ * Table of relative efficiency of each processors
+ * The efficiency value must fit in 20bit and the final
+ * cpu_scale value must be in the range
+ * 0 < cpu_scale < 3*SCHED_POWER_SCALE/2
+ * in order to return at most 1 when DIV_ROUND_CLOSEST
+ * is used to compute the capacity of a CPU.
+ * Processors that are not defined in the table,
+ * use the default SCHED_POWER_SCALE value for cpu_scale.
+ */
+static const struct cpu_efficiency table_efficiency[] = {
+ { "arm,cortex-a57", 3891 },
+ { "arm,cortex-a53", 2048 },
+ { NULL, },
+};
+
+static unsigned long *__cpu_capacity;
+#define cpu_capacity(cpu) __cpu_capacity[cpu]
+
+static unsigned long middle_capacity = 1;
+
+/*
+ * Iterate all CPUs' descriptor in DT and compute the efficiency
+ * (as per table_efficiency). Also calculate a middle efficiency
+ * as close as possible to (max{eff_i} - min{eff_i}) / 2
+ * This is later used to scale the cpu_power field such that an
+ * 'average' CPU is of middle power. Also see the comments near
+ * table_efficiency[] and update_cpu_power().
+ */
+static int __init parse_dt_topology(void)
+{
+ struct device_node *cn, *map;
+ int ret = 0;
+ int cpu;
+
+ cn = of_find_node_by_path("/cpus");
+ if (!cn) {
+ pr_err("No CPU information found in DT\n");
+ return 0;
+ }
+
+ /*
+ * When topology is provided cpu-map is essentially a root
+ * cluster with restricted subnodes.
+ */
+ map = of_get_child_by_name(cn, "cpu-map");
+ if (!map)
+ goto out;
+
+ ret = parse_cluster(map, 0);
+ if (ret != 0)
+ goto out_map;
+
+ /*
+ * Check that all cores are in the topology; the SMP code will
+ * only mark cores described in the DT as possible.
+ */
+ for_each_possible_cpu(cpu) {
+ if (cpu_topology[cpu].cluster_id == -1) {
+ pr_err("CPU%d: No topology information specified\n",
+ cpu);
+ ret = -EINVAL;
+ }
+ }
+
+out_map:
+ of_node_put(map);
+out:
+ of_node_put(cn);
+ return ret;
+}
+
+static void __init parse_dt_cpu_power(void)
+{
+ const struct cpu_efficiency *cpu_eff;
+ struct device_node *cn;
+ unsigned long min_capacity = ULONG_MAX;
+ unsigned long max_capacity = 0;
+ unsigned long capacity = 0;
+ int cpu;
+
+ __cpu_capacity = kcalloc(nr_cpu_ids, sizeof(*__cpu_capacity),
+ GFP_NOWAIT);
+
+ for_each_possible_cpu(cpu) {
+ const u32 *rate;
+ int len;
+
+ /* Too early to use cpu->of_node */
+ cn = of_get_cpu_node(cpu, NULL);
+ if (!cn) {
+ pr_err("Missing device node for CPU %d\n", cpu);
+ continue;
+ }
+
+ for (cpu_eff = table_efficiency; cpu_eff->compatible; cpu_eff++)
+ if (of_device_is_compatible(cn, cpu_eff->compatible))
+ break;
+
+ if (cpu_eff->compatible == NULL) {
+ pr_warn("%s: Unknown CPU type\n", cn->full_name);
+ continue;
+ }
+
+ rate = of_get_property(cn, "clock-frequency", &len);
+ if (!rate || len != 4) {
+ pr_err("%s: Missing clock-frequency property\n",
+ cn->full_name);
+ continue;
+ }
+
+ capacity = ((be32_to_cpup(rate)) >> 20) * cpu_eff->efficiency;
+
+ /* Save min capacity of the system */
+ if (capacity < min_capacity)
+ min_capacity = capacity;
+
+ /* Save max capacity of the system */
+ if (capacity > max_capacity)
+ max_capacity = capacity;
+
+ cpu_capacity(cpu) = capacity;
+ }
+
+ /* If min and max capacities are equal we bypass the update of the
+ * cpu_scale because all CPUs have the same capacity. Otherwise, we
+ * compute a middle_capacity factor that will ensure that the capacity
+ * of an 'average' CPU of the system will be as close as possible to
+ * SCHED_POWER_SCALE, which is the default value, but with the
+ * constraint explained near table_efficiency[].
+ */
+ if (min_capacity == max_capacity)
+ return;
+ else if (4 * max_capacity < (3 * (max_capacity + min_capacity)))
+ middle_capacity = (min_capacity + max_capacity)
+ >> (SCHED_POWER_SHIFT+1);
+ else
+ middle_capacity = ((max_capacity / 3)
+ >> (SCHED_POWER_SHIFT-1)) + 1;
+}
+
+/*
+ * Look for a customed capacity of a CPU in the cpu_topo_data table during the
+ * boot. The update of all CPUs is in O(n^2) for heteregeneous system but the
+ * function returns directly for SMP system.
+ */
+static void update_cpu_power(unsigned int cpu)
+{
+ if (!cpu_capacity(cpu))
+ return;
+
+ set_power_scale(cpu, cpu_capacity(cpu) / middle_capacity);
+
+ pr_info("CPU%u: update cpu_power %lu\n",
+ cpu, arch_scale_freq_power(NULL, cpu));
+}
+
+/*
* cpu topology table
*/
struct cpu_topology cpu_topology[NR_CPUS];
@@ -38,14 +365,8 @@ static void update_siblings_masks(unsigned int cpuid)
int cpu;
if (cpuid_topo->cluster_id == -1) {
- /*
- * DT does not contain topology information for this cpu
- * reset it to default behaviour
- */
- pr_debug("CPU%u: No topology information configured\n", cpuid);
- cpuid_topo->core_id = 0;
- cpumask_set_cpu(cpuid, &cpuid_topo->core_sibling);
- cpumask_set_cpu(cpuid, &cpuid_topo->thread_sibling);
+ /* No topology information for this cpu ?! */
+ pr_err("CPU%u: No topology information configured\n", cpuid);
return;
}
@@ -71,25 +392,85 @@ static void update_siblings_masks(unsigned int cpuid)
void store_cpu_topology(unsigned int cpuid)
{
+ struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
+ u64 mpidr;
+
+ if (cpuid_topo->cluster_id != -1)
+ goto topology_populated;
+
+ mpidr = read_cpuid_mpidr();
+
+ /* Create cpu topology mapping based on MPIDR. */
+ if (mpidr & MPIDR_UP_BITMASK) {
+ /* Uniprocessor system */
+ cpuid_topo->thread_id = -1;
+ cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ cpuid_topo->cluster_id = 0;
+ } else if (mpidr & MPIDR_MT_BITMASK) {
+ /* Multiprocessor system : Multi-threads per core */
+ cpuid_topo->thread_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+ cpuid_topo->cluster_id =
+ ((mpidr & MPIDR_AFF_MASK(2)) >> mpidr_hash.shift_aff[2] |
+ (mpidr & MPIDR_AFF_MASK(3)) >> mpidr_hash.shift_aff[3])
+ >> mpidr_hash.shift_aff[1] >> mpidr_hash.shift_aff[0];
+ } else {
+ /* Multiprocessor system : Single-thread per core */
+ cpuid_topo->thread_id = -1;
+ cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ cpuid_topo->cluster_id =
+ ((mpidr & MPIDR_AFF_MASK(1)) >> mpidr_hash.shift_aff[1] |
+ (mpidr & MPIDR_AFF_MASK(2)) >> mpidr_hash.shift_aff[2] |
+ (mpidr & MPIDR_AFF_MASK(3)) >> mpidr_hash.shift_aff[3])
+ >> mpidr_hash.shift_aff[0];
+ }
+
+ pr_debug("CPU%u: cluster %d core %d thread %d mpidr %llx\n",
+ cpuid, cpuid_topo->cluster_id, cpuid_topo->core_id,
+ cpuid_topo->thread_id, mpidr);
+
+topology_populated:
update_siblings_masks(cpuid);
+ update_cpu_power(cpuid);
}
-/*
- * init_cpu_topology is called at boot when only one cpu is running
- * which prevent simultaneous write access to cpu_topology array
- */
-void __init init_cpu_topology(void)
+static void __init reset_cpu_topology(void)
{
unsigned int cpu;
- /* init core mask and power*/
for_each_possible_cpu(cpu) {
struct cpu_topology *cpu_topo = &cpu_topology[cpu];
cpu_topo->thread_id = -1;
- cpu_topo->core_id = -1;
+ cpu_topo->core_id = 0;
cpu_topo->cluster_id = -1;
+
cpumask_clear(&cpu_topo->core_sibling);
+ cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
cpumask_clear(&cpu_topo->thread_sibling);
+ cpumask_set_cpu(cpu, &cpu_topo->thread_sibling);
}
}
+
+static void __init reset_cpu_power(void)
+{
+ unsigned int cpu;
+
+ for_each_possible_cpu(cpu)
+ set_power_scale(cpu, SCHED_POWER_SCALE);
+}
+
+void __init init_cpu_topology(void)
+{
+ reset_cpu_topology();
+
+ /*
+ * Discard anything that was parsed if we hit an error so we
+ * don't use partial information.
+ */
+ if (parse_dt_topology())
+ reset_cpu_topology();
+
+ reset_cpu_power();
+ parse_dt_cpu_power();
+}
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 4fdfd6c70bd3..50f6c8767e30 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -32,6 +32,16 @@ config COMMON_CLK_WM831X
source "drivers/clk/versatile/Kconfig"
+config COMMON_CLK_SCPI
+ bool "Clock driver controlled via SCPI interface"
+ depends on ARM_SCPI_PROTOCOL
+ ---help---
+ This driver provides support for clocks that are controlled
+ by firmware that implements the SCPI interface.
+
+ This driver uses SCPI Message Protocol to interact with the
+ firware providing all the clock controls.
+
config COMMON_CLK_MAX77686
tristate "Clock driver for Maxim 77686 MFD"
depends on MFD_MAX77686
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 5f8a28735c96..edec51b90830 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_ARCH_NOMADIK) += clk-nomadik.o
obj-$(CONFIG_ARCH_NSPIRE) += clk-nspire.o
obj-$(CONFIG_CLK_PPC_CORENET) += clk-ppc-corenet.o
obj-$(CONFIG_COMMON_CLK_S2MPS11) += clk-s2mps11.o
+obj-$(CONFIG_COMMON_CLK_SCPI) += clk-scpi.o
obj-$(CONFIG_COMMON_CLK_SI5351) += clk-si5351.o
obj-$(CONFIG_COMMON_CLK_SI570) += clk-si570.o
obj-$(CONFIG_CLK_TWL6040) += clk-twl6040.o
diff --git a/drivers/clk/clk-scpi.c b/drivers/clk/clk-scpi.c
new file mode 100644
index 000000000000..2d707663542f
--- /dev/null
+++ b/drivers/clk/clk-scpi.c
@@ -0,0 +1,309 @@
+/*
+ * System Control and Power Interface (SCPI) Protocol based clock driver
+ *
+ * Copyright (C) 2014 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/scpi_protocol.h>
+
+struct scpi_clk {
+ u32 id;
+ const char *name;
+ struct clk_hw hw;
+ struct scpi_opp *opps;
+ unsigned long rate_min;
+ unsigned long rate_max;
+};
+
+#define to_scpi_clk(clk) container_of(clk, struct scpi_clk, hw)
+
+static unsigned long scpi_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct scpi_clk *clk = to_scpi_clk(hw);
+ return scpi_clk_get_val(clk->id);
+}
+
+static long scpi_clk_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct scpi_clk *clk = to_scpi_clk(hw);
+ if (clk->rate_min && rate < clk->rate_min)
+ rate = clk->rate_min;
+ if (clk->rate_max && rate > clk->rate_max)
+ rate = clk->rate_max;
+
+ return rate;
+}
+
+static int scpi_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct scpi_clk *clk = to_scpi_clk(hw);
+ return scpi_clk_set_val(clk->id, rate);
+}
+
+static struct clk_ops scpi_clk_ops = {
+ .recalc_rate = scpi_clk_recalc_rate,
+ .round_rate = scpi_clk_round_rate,
+ .set_rate = scpi_clk_set_rate,
+};
+
+/* find closest match to given frequency in OPP table */
+static int __scpi_dvfs_round_rate(struct scpi_clk *clk, unsigned long rate)
+{
+ int idx, max_opp = clk->opps->count;
+ u32 *freqs = clk->opps->freqs;
+ u32 fmin = 0, fmax = ~0, ftmp;
+
+ for (idx = 0; idx < max_opp; idx++, freqs++) {
+ ftmp = *freqs;
+ if (ftmp >= (u32)rate) {
+ if (ftmp <= fmax)
+ fmax = ftmp;
+ } else {
+ if (ftmp >= fmin)
+ fmin = ftmp;
+ }
+ }
+ if (fmax != ~0)
+ return fmax;
+ else
+ return fmin;
+}
+
+static unsigned long scpi_dvfs_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct scpi_clk *clk = to_scpi_clk(hw);
+ int idx = scpi_dvfs_get_idx(clk->id);
+ u32 *freqs = clk->opps->freqs;
+
+ if (idx < 0)
+ return 0;
+ else
+ return *(freqs + idx);
+}
+
+static long scpi_dvfs_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct scpi_clk *clk = to_scpi_clk(hw);
+ return __scpi_dvfs_round_rate(clk, rate);
+}
+
+static int __scpi_find_dvfs_index(struct scpi_clk *clk, unsigned long rate)
+{
+ int idx, max_opp = clk->opps->count;
+ u32 *freqs = clk->opps->freqs;
+
+ for (idx = 0; idx < max_opp; idx++, freqs++)
+ if (*freqs == (u32)rate)
+ break;
+ return (idx == max_opp) ? -EINVAL : idx;
+}
+
+static int scpi_dvfs_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct scpi_clk *clk = to_scpi_clk(hw);
+ int ret = __scpi_find_dvfs_index(clk, rate);
+
+ if (ret < 0)
+ return ret;
+ else
+ return scpi_dvfs_set_idx(clk->id, (u8)ret);
+}
+
+static struct clk_ops scpi_dvfs_ops = {
+ .recalc_rate = scpi_dvfs_recalc_rate,
+ .round_rate = scpi_dvfs_round_rate,
+ .set_rate = scpi_dvfs_set_rate,
+};
+
+static struct clk *
+scpi_dvfs_ops_init(struct device *dev, struct device_node *np,
+ struct scpi_clk *sclk)
+{
+ struct clk_init_data init;
+ struct scpi_opp *opp;
+
+ init.name = sclk->name;
+ init.flags = CLK_IS_ROOT;
+ init.num_parents = 0;
+ init.ops = &scpi_dvfs_ops;
+ sclk->hw.init = &init;
+
+ opp = scpi_dvfs_get_opps(sclk->id);
+ if (IS_ERR(opp))
+ return (struct clk *)opp;
+
+ sclk->opps = opp;
+
+ return devm_clk_register(dev, &sclk->hw);
+}
+
+static struct clk *
+scpi_clk_ops_init(struct device *dev, struct device_node *np,
+ struct scpi_clk *sclk)
+{
+ struct clk_init_data init;
+ u32 range[2];
+ int ret;
+
+ init.name = sclk->name;
+ init.flags = CLK_IS_ROOT;
+ init.num_parents = 0;
+ init.ops = &scpi_clk_ops;
+ sclk->hw.init = &init;
+
+ ret = of_property_read_u32_array(np, "frequency-range", range,
+ ARRAY_SIZE(range));
+ if (ret)
+ return ERR_PTR(ret);
+ sclk->rate_min = range[0];
+ sclk->rate_max = range[1];
+
+ return devm_clk_register(dev, &sclk->hw);
+}
+
+static int scpi_clk_setup(struct device *dev, struct device_node *np,
+ const void *data)
+{
+ struct clk *(*setup_ops)(struct device *, struct device_node *,
+ struct scpi_clk *) = data;
+ struct clk_onecell_data *clk_data;
+ struct clk **clks;
+ size_t count;
+ int idx;
+
+ count = of_property_count_strings(np, "clock-output-names");
+ if (count < 0) {
+ dev_err(dev, "%s: invalid clock output count\n", np->name);
+ return -EINVAL;
+ }
+
+ clk_data = devm_kmalloc(dev, sizeof(*clk_data), GFP_KERNEL);
+ if (!clk_data) {
+ dev_err(dev, "failed to allocate clock provider data\n");
+ return -ENOMEM;
+ }
+
+ clks = devm_kmalloc(dev, count * sizeof(*clks), GFP_KERNEL);
+ if (!clks) {
+ dev_err(dev, "failed to allocate clock providers\n");
+ return -ENOMEM;
+ }
+
+ for (idx = 0; idx < count; idx++) {
+ struct scpi_clk *sclk;
+ u32 val;
+
+ sclk = devm_kzalloc(dev, sizeof(*sclk), GFP_KERNEL);
+ if (!sclk) {
+ dev_err(dev, "failed to allocate scpi clocks\n");
+ return -ENOMEM;
+ }
+
+ if (of_property_read_string_index(np, "clock-output-names",
+ idx, &sclk->name)) {
+ dev_err(dev, "invalid clock name @ %s\n", np->name);
+ return -EINVAL;
+ }
+
+ if (of_property_read_u32_index(np, "clock-indices",
+ idx, &val)) {
+ dev_err(dev, "invalid clock index @ %s\n", np->name);
+ return -EINVAL;
+ }
+
+ sclk->id = val;
+
+ clks[idx] = setup_ops(dev, np, sclk);
+ if (IS_ERR(clks[idx])) {
+ dev_err(dev, "failed to register clock '%s'\n",
+ sclk->name);
+ return PTR_ERR(clks[idx]);
+ }
+
+ dev_dbg(dev, "Registered clock '%s'\n", sclk->name);
+ }
+
+ clk_data->clks = clks;
+ clk_data->clk_num = count;
+ of_clk_add_provider(np, of_clk_src_onecell_get, clk_data);
+
+ return 0;
+}
+
+static const struct of_device_id clk_match[] = {
+ { .compatible = "arm,scpi-clk-indexed", .data = scpi_dvfs_ops_init, },
+ { .compatible = "arm,scpi-clk-range", .data = &scpi_clk_ops_init, },
+ {}
+};
+
+static int scpi_clk_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node, *child;
+ const struct of_device_id *match;
+ int ret;
+
+ for_each_child_of_node(np, child) {
+ match = of_match_node(clk_match, child);
+ if (!match)
+ continue;
+ ret = scpi_clk_setup(dev, child, match->data);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static struct of_device_id scpi_clk_ids[] = {
+ { .compatible = "arm,scpi-clks", },
+ {}
+};
+
+static struct platform_driver scpi_clk_driver = {
+ .driver = {
+ .name = "scpi_clocks",
+ .of_match_table = scpi_clk_ids,
+ },
+ .probe = scpi_clk_probe,
+};
+
+static int __init scpi_clk_init(void)
+{
+ return platform_driver_register(&scpi_clk_driver);
+}
+postcore_initcall(scpi_clk_init);
+
+static void __exit scpi_clk_exit(void)
+{
+ platform_driver_unregister(&scpi_clk_driver);
+}
+module_exit(scpi_clk_exit);
+
+MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
+MODULE_DESCRIPTION("ARM SCPI clock driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 580503513f0f..6b2da11b1e1f 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -5,7 +5,8 @@
# big LITTLE core layer and glue drivers
config ARM_BIG_LITTLE_CPUFREQ
tristate "Generic ARM big LITTLE CPUfreq driver"
- depends on ARM && BIG_LITTLE && ARM_CPU_TOPOLOGY && HAVE_CLK
+ depends on ARM_CPU_TOPOLOGY || (ARM64 && SMP)
+ depends on HAVE_CLK
select PM_OPP
help
This enables the Generic CPUfreq driver for ARM big.LITTLE platforms.
@@ -24,6 +25,15 @@ config ARM_VEXPRESS_SPC_CPUFREQ
This add the CPUfreq driver support for Versatile Express
big.LITTLE platforms using SPC for power management.
+config ARM_SPCI_CPUFREQ
+ tristate "SPCI based CPUfreq driver"
+ depends on ARM_BIG_LITTLE_CPUFREQ && ARM_SCPI_PROTOCOL
+ help
+ This add the CPUfreq driver support for ARM big.LITTLE platforms
+ using SCPI interface for CPU power management.
+
+ This driver works only if firmware the supporting CPU DVFS adhere
+ to SCPI protocol.
config ARM_EXYNOS_CPUFREQ
bool
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 54a05c93b578..a418165cd9c0 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -76,6 +76,7 @@ obj-$(CONFIG_ARM_SA1110_CPUFREQ) += sa1110-cpufreq.o
obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o
obj-$(CONFIG_ARM_TEGRA_CPUFREQ) += tegra-cpufreq.o
obj-$(CONFIG_ARM_VEXPRESS_SPC_CPUFREQ) += vexpress-spc-cpufreq.o
+obj-$(CONFIG_ARM_SPCI_CPUFREQ) += scpi-cpufreq.o
##################################################################################
# PowerPC platform drivers
diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c
index bad2ed317ba2..bf78258a2199 100644
--- a/drivers/cpufreq/arm_big_little.c
+++ b/drivers/cpufreq/arm_big_little.c
@@ -340,7 +340,9 @@ static int _get_cluster_clk_and_freq_table(struct device *cpu_dev)
}
name[12] = cluster + '0';
- clk[cluster] = clk_get(cpu_dev, name);
+ clk[cluster] = clk_get_sys(name, NULL);
+ if (IS_ERR(clk[cluster]))
+ clk[cluster] = clk_get(cpu_dev, NULL);
if (!IS_ERR(clk[cluster])) {
dev_dbg(cpu_dev, "%s: clk: %p & freq table: %p, cluster: %d\n",
__func__, clk[cluster], freq_table[cluster],
diff --git a/drivers/cpufreq/highbank-cpufreq.c b/drivers/cpufreq/highbank-cpufreq.c
index bf8902a0866d..4846734874a8 100644
--- a/drivers/cpufreq/highbank-cpufreq.c
+++ b/drivers/cpufreq/highbank-cpufreq.c
@@ -19,7 +19,7 @@
#include <linux/cpu.h>
#include <linux/err.h>
#include <linux/of.h>
-#include <linux/mailbox.h>
+#include <linux/mailbox_client.h>
#include <linux/platform_device.h>
#define HB_CPUFREQ_CHANGE_NOTE 0x80000001
@@ -29,8 +29,28 @@
static int hb_voltage_change(unsigned int freq)
{
u32 msg[HB_CPUFREQ_IPC_LEN] = {HB_CPUFREQ_CHANGE_NOTE, freq / 1000000};
+ struct mbox_client cl;
+ int ret = -ETIMEDOUT;
+ struct mbox_chan *chan;
- return pl320_ipc_transmit(msg);
+ cl.rx_callback = NULL;
+ cl.tx_done = NULL;
+ cl.tx_block = true;
+ cl.tx_tout = 1000; /* 1 sec */
+ cl.link_data = NULL;
+ cl.knows_txdone = false;
+ cl.chan_name = "pl320:A9_to_M3";
+
+ chan = mbox_request_channel(&cl);
+ if (IS_ERR(chan))
+ return PTR_ERR(chan);
+
+ if (mbox_send_message(chan, (void *)msg))
+ ret = msg[1]; /* PL320 updates buffer with FIFO after ACK */
+
+ mbox_free_channel(chan);
+
+ return ret;
}
static int hb_cpufreq_clk_notify(struct notifier_block *nb,
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
new file mode 100644
index 000000000000..60725199b9aa
--- /dev/null
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -0,0 +1,99 @@
+/*
+ * SCPI CPUFreq Interface driver
+ *
+ * It provides necessary ops to arm_big_little cpufreq driver.
+ *
+ * Copyright (C) 2014 ARM Ltd.
+ * Sudeep Holla <sudeep.holla@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/cpufreq.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/scpi_protocol.h>
+#include <linux/types.h>
+
+#include "arm_big_little.h"
+
+static int scpi_init_opp_table(struct device *cpu_dev)
+{
+ u8 domain = topology_physical_package_id(cpu_dev->id);
+ struct scpi_opp *opp;
+ int idx, ret = 0, max_opp;
+ u32 *freqs;
+
+ opp = scpi_dvfs_get_opps(domain);
+ if (IS_ERR(opp))
+ return PTR_ERR(opp);
+
+ freqs = opp->freqs;
+ max_opp = opp->count;
+ for (idx = 0; idx < max_opp; idx++, freqs++) {
+ ret = dev_pm_opp_add(cpu_dev, *freqs, 900000000 /* TODO */);
+ if (ret) {
+ dev_warn(cpu_dev, "failed to add opp %u\n", *freqs);
+ return ret;
+ }
+ }
+ return ret;
+}
+
+static int scpi_get_transition_latency(struct device *cpu_dev)
+{
+ u8 domain = topology_physical_package_id(cpu_dev->id);
+ struct scpi_opp *opp;
+
+ opp = scpi_dvfs_get_opps(domain);
+ if (IS_ERR(opp))
+ return PTR_ERR(opp);
+
+ return opp->latency * 1000; /* SCPI returns in uS */
+}
+
+static struct cpufreq_arm_bL_ops scpi_cpufreq_ops = {
+ .name = "scpi",
+ .get_transition_latency = scpi_get_transition_latency,
+ .init_opp_table = scpi_init_opp_table,
+};
+
+static int scpi_cpufreq_probe(struct platform_device *pdev)
+{
+ return bL_cpufreq_register(&scpi_cpufreq_ops);
+}
+
+static int scpi_cpufreq_remove(struct platform_device *pdev)
+{
+ bL_cpufreq_unregister(&scpi_cpufreq_ops);
+ return 0;
+}
+
+static struct of_device_id scpi_cpufreq_of_match[] = {
+ { .compatible = "arm,scpi-cpufreq" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, scpi_cpufreq_of_match);
+
+static struct platform_driver scpi_cpufreq_platdrv = {
+ .driver = {
+ .name = "scpi-cpufreq",
+ .owner = THIS_MODULE,
+ .of_match_table = scpi_cpufreq_of_match,
+ },
+ .probe = scpi_cpufreq_probe,
+ .remove = scpi_cpufreq_remove,
+};
+module_platform_driver(scpi_cpufreq_platdrv);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index c8b5c13bcd05..c146c9e797cf 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -6,6 +6,33 @@ menuconfig MAILBOX
signals. Say Y if your platform supports hardware mailboxes.
if MAILBOX
+config ARM_MHU_MBOX
+ bool "ARM Message Handling Unit (MHU) Mailbox"
+ help
+ This driver provides support for inter-processor communication
+ between System Control Processor (SCP) with Cortex-M3 processor
+ and Application Processors (AP) on some ARM based systems with
+ MHU peripheral.
+
+ SCP controls most of the power managament on the Application
+ Processors. It offers control and management of: the core/cluster
+ power states, various power domain DVFS including the core/cluster,
+ certain system clocks configuration, thermal sensors and many
+ others.
+
+config ARM_SCPI_PROTOCOL
+ bool "ARM System Control and Power Interface (SCPI) Message Protocol"
+ select ARM_MHU_MBOX
+ help
+ System Control and Power Interface (SCPI) Message Protocol is
+ defined for the purpose of communication between the Application
+ Cores(AP) and the System Control Processor(SCP). The MHU peripheral
+ provides a mechanism for inter-processor communication between SCP
+ and AP.
+
+ This protocol library provides interface for all the client drivers
+ making use of the features offered by the SCP.
+
config PL320_MBOX
bool "ARM PL320 Mailbox"
depends on ARM_AMBA
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index e0facb34084a..7d064351bdb7 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -1,4 +1,10 @@
-obj-$(CONFIG_PL320_MBOX) += pl320-ipc.o
+# Generic MAILBOX API
+
+obj-$(CONFIG_MAILBOX) += mailbox.o
+
+obj-$(CONFIG_ARM_MHU_MBOX) += arm_mhu.o
+obj-$(CONFIG_ARM_SCPI_PROTOCOL) += scpi_protocol.o
+obj-$(CONFIG_PL320_MBOX) += pl320.o
obj-$(CONFIG_OMAP_MBOX) += omap-mailbox.o
obj-$(CONFIG_OMAP1_MBOX) += mailbox_omap1.o
diff --git a/drivers/mailbox/arm_mhu.c b/drivers/mailbox/arm_mhu.c
new file mode 100644
index 000000000000..841b0cb1b710
--- /dev/null
+++ b/drivers/mailbox/arm_mhu.c
@@ -0,0 +1,336 @@
+/*
+ * Driver for the Message Handling Unit (MHU) which is the peripheral in
+ * the Compute SubSystem (CSS) providing a mechanism for inter-processor
+ * communication between System Control Processor (SCP) with Cortex-M3
+ * processor and Application Processors (AP).
+ *
+ * The MHU peripheral provides a mechanism to assert interrupt signals to
+ * facilitate inter-processor message passing between the SCP and the AP.
+ * The message payload can be deposited into main memory or on-chip memories.
+ * The MHU supports three bi-directional channels - low priority, high
+ * priority and secure(can't be used in non-secure execution modes)
+ *
+ * Copyright (C) 2014 ARM Ltd.
+ *
+ * Author: Sudeep Holla <sudeep.holla@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include "arm_mhu.h"
+
+#define DRIVER_NAME CONTROLLER_NAME"_drv"
+
+/*
+ * +--------------------+-------+---------------+
+ * | Hardware Register | Offset| Driver View |
+ * +--------------------+-------+---------------+
+ * | SCP_INTR_L_STAT | 0x000 | RX_STATUS(L) |
+ * | SCP_INTR_L_SET | 0x008 | RX_SET(L) |
+ * | SCP_INTR_L_CLEAR | 0x010 | RX_CLEAR(L) |
+ * +--------------------+-------+---------------+
+ * | SCP_INTR_H_STAT | 0x020 | RX_STATUS(H) |
+ * | SCP_INTR_H_SET | 0x028 | RX_SET(H) |
+ * | SCP_INTR_H_CLEAR | 0x030 | RX_CLEAR(H) |
+ * +--------------------+-------+---------------+
+ * | CPU_INTR_L_STAT | 0x100 | TX_STATUS(L) |
+ * | CPU_INTR_L_SET | 0x108 | TX_SET(L) |
+ * | CPU_INTR_L_CLEAR | 0x110 | TX_CLEAR(L) |
+ * +--------------------+-------+---------------+
+ * | CPU_INTR_H_STAT | 0x120 | TX_STATUS(H) |
+ * | CPU_INTR_H_SET | 0x128 | TX_SET(H) |
+ * | CPU_INTR_H_CLEAR | 0x130 | TX_CLEAR(H) |
+ * +--------------------+-------+---------------+
+*/
+#define RX_OFFSET(chan) ((idx) * 0x20)
+#define RX_STATUS(chan) RX_OFFSET(chan)
+#define RX_SET(chan) (RX_OFFSET(chan) + 0x8)
+#define RX_CLEAR(chan) (RX_OFFSET(chan) + 0x10)
+
+#define TX_OFFSET(chan) (0x100 + (idx) * 0x20)
+#define TX_STATUS(chan) TX_OFFSET(chan)
+#define TX_SET(chan) (TX_OFFSET(chan) + 0x8)
+#define TX_CLEAR(chan) (TX_OFFSET(chan) + 0x10)
+
+/*
+ * +---------------+-------+----------------+
+ * | Payload | Offset| Driver View |
+ * +---------------+-------+----------------+
+ * | SCP->AP Low | 0x000 | RX_PAYLOAD(L) |
+ * | SCP->AP High | 0x400 | RX_PAYLOAD(H) |
+ * +---------------+-------+----------------+
+ * | AP->SCP Low | 0x200 | TX_PAYLOAD(H) |
+ * | AP->SCP High | 0x600 | TX_PAYLOAD(H) |
+ * +---------------+-------+----------------+
+*/
+#define PAYLOAD_MAX_SIZE 0x200
+#define PAYLOAD_OFFSET 0x400
+#define RX_PAYLOAD(chan) ((chan) * PAYLOAD_OFFSET)
+#define TX_PAYLOAD(chan) ((chan) * PAYLOAD_OFFSET + PAYLOAD_MAX_SIZE)
+
+struct mhu_chan {
+ int index;
+ int rx_irq;
+ struct mbox_link link;
+ struct mhu_ctlr *ctlr;
+ struct mhu_data_buf *data;
+};
+
+struct mhu_ctlr {
+ struct device *dev;
+ void __iomem *mbox_base;
+ void __iomem *payload_base;
+ struct mbox_controller mbox_con;
+ struct mhu_chan channels[CHANNEL_MAX];
+};
+
+static inline struct mhu_chan *to_mhu_chan(struct mbox_link *lnk)
+{
+ if (!lnk)
+ return NULL;
+
+ return container_of(lnk, struct mhu_chan, link);
+}
+
+static irqreturn_t mbox_handler(int irq, void *p)
+{
+ struct mbox_link *link = (struct mbox_link *)p;
+ struct mhu_chan *chan = to_mhu_chan(link);
+ struct mhu_ctlr *ctlr = chan->ctlr;
+ void __iomem *mbox_base = ctlr->mbox_base;
+ void __iomem *payload = ctlr->payload_base;
+ int idx = chan->index;
+ u32 status = readl(mbox_base + RX_STATUS(idx));
+
+ if (status && irq == chan->rx_irq) {
+ struct mhu_data_buf *data = chan->data;
+ if (!data)
+ return IRQ_NONE; /* spurious */
+ if (data->rx_buf)
+ memcpy(data->rx_buf, payload + RX_PAYLOAD(idx),
+ data->rx_size);
+ chan->data = NULL;
+ writel(~0, mbox_base + RX_CLEAR(idx));
+ mbox_link_received_data(link, data);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int mhu_send_data(struct mbox_link *link, void *msg)
+{
+ struct mhu_chan *chan = to_mhu_chan(link);
+ struct mhu_ctlr *ctlr = chan->ctlr;
+ void __iomem *mbox_base = ctlr->mbox_base;
+ void __iomem *payload = ctlr->payload_base;
+ struct mhu_data_buf *data = (struct mhu_data_buf *)msg;
+ int idx = chan->index;
+
+ if (!data)
+ return -EINVAL;
+
+ chan->data = data;
+ if (data->tx_buf)
+ memcpy(payload + TX_PAYLOAD(idx), data->tx_buf, data->tx_size);
+ writel(data->cmd, mbox_base + TX_SET(idx));
+
+ return 0;
+}
+
+static int mhu_startup(struct mbox_link *link, void *ignored)
+{
+ struct mhu_chan *chan = to_mhu_chan(link);
+ int err, mbox_irq = chan->rx_irq;
+
+ err = request_threaded_irq(mbox_irq, NULL, mbox_handler, IRQF_ONESHOT,
+ link->link_name, link);
+ if (err)
+ return err;
+
+ chan->data = NULL;
+ return 0;
+}
+
+static void mhu_shutdown(struct mbox_link *link)
+{
+ struct mhu_chan *chan = to_mhu_chan(link);
+
+ chan->data = NULL;
+ free_irq(chan->rx_irq, link);
+}
+
+static bool mhu_last_tx_done(struct mbox_link *link)
+{
+ struct mhu_chan *chan = to_mhu_chan(link);
+ struct mhu_ctlr *ctlr = chan->ctlr;
+ void __iomem *mbox_base = ctlr->mbox_base;
+ int idx = chan->index;
+
+ return !readl(mbox_base + TX_STATUS(idx));
+}
+
+static struct mbox_link_ops mhu_ops = {
+ .send_data = mhu_send_data,
+ .startup = mhu_startup,
+ .shutdown = mhu_shutdown,
+ .last_tx_done = mhu_last_tx_done,
+};
+
+static int mhu_probe(struct platform_device *pdev)
+{
+ struct mhu_ctlr *ctlr;
+ struct mhu_chan *chan;
+ struct device *dev = &pdev->dev;
+ struct mbox_link **l;
+ struct resource *res;
+ int idx;
+ static const char * const channel_names[] = {
+ CHANNEL_LOW_PRIORITY,
+ CHANNEL_HIGH_PRIORITY
+ };
+
+ ctlr = devm_kzalloc(dev, sizeof(*ctlr), GFP_KERNEL);
+ if (!ctlr) {
+ dev_err(dev, "failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "failed to get mailbox memory resource\n");
+ return -ENXIO;
+ }
+
+ ctlr->mbox_base = devm_request_and_ioremap(dev, res);
+ if (!ctlr->mbox_base) {
+ dev_err(dev, "failed to request or ioremap mailbox control\n");
+ return -EADDRNOTAVAIL;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res) {
+ dev_err(dev, "failed to get payload memory resource\n");
+ return -ENXIO;
+ }
+
+ ctlr->payload_base = devm_request_and_ioremap(dev, res);
+ if (!ctlr->payload_base) {
+ dev_err(dev, "failed to request or ioremap mailbox payload\n");
+ return -EADDRNOTAVAIL;
+ }
+
+ ctlr->dev = dev;
+ platform_set_drvdata(pdev, ctlr);
+
+ l = devm_kzalloc(dev, sizeof(*l) * (CHANNEL_MAX + 1), GFP_KERNEL);
+ if (!l) {
+ dev_err(dev, "failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ ctlr->mbox_con.links = l;
+ ctlr->mbox_con.txdone_poll = true;
+ ctlr->mbox_con.txpoll_period = 10;
+ ctlr->mbox_con.ops = &mhu_ops;
+ snprintf(ctlr->mbox_con.controller_name, 16, CONTROLLER_NAME);
+ ctlr->mbox_con.dev = dev;
+
+ for (idx = 0; idx < CHANNEL_MAX; idx++) {
+ chan = &ctlr->channels[idx];
+ chan->index = idx;
+ chan->ctlr = ctlr;
+ chan->rx_irq = platform_get_irq(pdev, idx);
+ if (chan->rx_irq < 0) {
+ dev_err(dev, "failed to get interrupt for %s\n",
+ channel_names[idx]);
+ return -ENXIO;
+ }
+ l[idx] = &chan->link;
+ snprintf(l[idx]->link_name, 16, channel_names[idx]);
+ }
+ l[idx] = NULL;
+
+ if (mbox_controller_register(&ctlr->mbox_con)) {
+ dev_err(dev, "failed to register mailbox controller\n");
+ return -ENOMEM;
+ }
+ _dev_info(dev, "registered mailbox controller %s\n",
+ ctlr->mbox_con.controller_name);
+ return 0;
+}
+
+static int mhu_remove(struct platform_device *pdev)
+{
+ struct mhu_ctlr *ctlr = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+
+ mbox_controller_unregister(&ctlr->mbox_con);
+ _dev_info(dev, "unregistered mailbox controller %s\n",
+ ctlr->mbox_con.controller_name);
+ devm_kfree(dev, ctlr->mbox_con.links);
+
+ devm_iounmap(dev, ctlr->payload_base);
+ devm_iounmap(dev, ctlr->mbox_base);
+
+ platform_set_drvdata(pdev, NULL);
+ devm_kfree(dev, ctlr);
+ return 0;
+}
+
+static struct of_device_id mhu_of_match[] = {
+ { .compatible = "arm,mhu" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mhu_of_match);
+
+static struct platform_driver mhu_driver = {
+ .probe = mhu_probe,
+ .remove = mhu_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = mhu_of_match,
+ },
+};
+
+static int __init mhu_init(void)
+{
+ return platform_driver_register(&mhu_driver);
+}
+core_initcall(mhu_init);
+
+static void __exit mhu_exit(void)
+{
+ platform_driver_unregister(&mhu_driver);
+}
+module_exit(mhu_exit);
+
+MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
+MODULE_DESCRIPTION("ARM MHU mailbox driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mailbox/arm_mhu.h b/drivers/mailbox/arm_mhu.h
new file mode 100644
index 000000000000..3b5343375c43
--- /dev/null
+++ b/drivers/mailbox/arm_mhu.h
@@ -0,0 +1,31 @@
+/*
+ * ARM Message Handling Unit (MHU) driver header
+ *
+ * Copyright (C) 2014 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#define CONTROLLER_NAME "mhu_ctlr"
+
+#define CHANNEL_MAX 2
+#define CHANNEL_LOW_PRIORITY "cpu_to_scp_low"
+#define CHANNEL_HIGH_PRIORITY "cpu_to_scp_high"
+
+struct mhu_data_buf {
+ u32 cmd;
+ int tx_size;
+ void *tx_buf;
+ int rx_size;
+ void *rx_buf;
+ void *cl_data;
+};
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
new file mode 100644
index 000000000000..d080fc31145a
--- /dev/null
+++ b/drivers/mailbox/mailbox.c
@@ -0,0 +1,592 @@
+/*
+ * Mailbox: Common code for Mailbox controllers and users
+ *
+ * Copyright (C) 2014 Linaro Ltd.
+ * Author: Jassi Brar <jassisinghbrar@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/mailbox_client.h>
+#include <linux/mailbox_controller.h>
+
+/*
+ * The length of circular buffer for queuing messages from a client.
+ * 'msg_count' tracks the number of buffered messages while 'msg_free'
+ * is the index where the next message would be buffered.
+ * We shouldn't need it too big because every transferr is interrupt
+ * triggered and if we have lots of data to transfer, the interrupt
+ * latencies are going to be the bottleneck, not the buffer length.
+ * Besides, mbox_send_message could be called from atomic context and
+ * the client could also queue another message from the notifier 'tx_done'
+ * of the last transfer done.
+ * REVIST: If too many platforms see the "Try increasing MBOX_TX_QUEUE_LEN"
+ * print, it needs to be taken from config option or somesuch.
+ */
+#define MBOX_TX_QUEUE_LEN 20
+
+#define TXDONE_BY_IRQ (1 << 0) /* controller has remote RTR irq */
+#define TXDONE_BY_POLL (1 << 1) /* controller can read status of last TX */
+#define TXDONE_BY_ACK (1 << 2) /* S/W ACK recevied by Client ticks the TX */
+
+struct mbox_chan {
+ char name[16]; /* Physical link's name */
+ struct mbox_con *con; /* Parent Controller */
+ unsigned txdone_method;
+
+ /* Physical links */
+ struct mbox_link *link;
+ struct mbox_link_ops *link_ops;
+
+ /* client */
+ struct mbox_client *cl;
+ struct completion tx_complete;
+
+ void *active_req;
+ unsigned msg_count, msg_free;
+ void *msg_data[MBOX_TX_QUEUE_LEN];
+ /* Access to the channel */
+ spinlock_t lock;
+ /* Hook to add to the controller's list of channels */
+ struct list_head node;
+ /* Notifier to all clients waiting on aquiring this channel */
+ struct blocking_notifier_head avail;
+} __aligned(32);
+
+/* Internal representation of a controller */
+struct mbox_con {
+ struct device *dev;
+ char name[16]; /* controller_name */
+ struct list_head channels;
+ /*
+ * If the controller supports only TXDONE_BY_POLL,
+ * this timer polls all the links for txdone.
+ */
+ struct timer_list poll;
+ unsigned period;
+ /* Hook to add to the global controller list */
+ struct list_head node;
+} __aligned(32);
+
+static LIST_HEAD(mbox_cons);
+static DEFINE_MUTEX(con_mutex);
+
+static int _add_to_rbuf(struct mbox_chan *chan, void *mssg)
+{
+ int idx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ /* See if there is any space left */
+ if (chan->msg_count == MBOX_TX_QUEUE_LEN) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return -ENOMEM;
+ }
+
+ idx = chan->msg_free;
+ chan->msg_data[idx] = mssg;
+ chan->msg_count++;
+
+ if (idx == MBOX_TX_QUEUE_LEN - 1)
+ chan->msg_free = 0;
+ else
+ chan->msg_free++;
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ return idx;
+}
+
+static void _msg_submit(struct mbox_chan *chan)
+{
+ struct mbox_link *link = chan->link;
+ unsigned count, idx;
+ unsigned long flags;
+ void *data;
+ int err;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ if (!chan->msg_count || chan->active_req) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return;
+ }
+
+ count = chan->msg_count;
+ idx = chan->msg_free;
+ if (idx >= count)
+ idx -= count;
+ else
+ idx += MBOX_TX_QUEUE_LEN - count;
+
+ data = chan->msg_data[idx];
+
+ /* Try to submit a message to the MBOX controller */
+ err = chan->link_ops->send_data(link, data);
+ if (!err) {
+ chan->active_req = data;
+ chan->msg_count--;
+ }
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+static void tx_tick(struct mbox_chan *chan, enum mbox_result r)
+{
+ unsigned long flags;
+ void *mssg;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ mssg = chan->active_req;
+ chan->active_req = NULL;
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ /* Submit next message */
+ _msg_submit(chan);
+
+ /* Notify the client */
+ if (chan->cl->tx_block)
+ complete(&chan->tx_complete);
+ else if (mssg && chan->cl->tx_done)
+ chan->cl->tx_done(chan->cl, mssg, r);
+}
+
+static void poll_txdone(unsigned long data)
+{
+ struct mbox_con *con = (struct mbox_con *)data;
+ bool txdone, resched = false;
+ struct mbox_chan *chan;
+
+ list_for_each_entry(chan, &con->channels, node) {
+ if (chan->active_req && chan->cl) {
+ resched = true;
+ txdone = chan->link_ops->last_tx_done(chan->link);
+ if (txdone)
+ tx_tick(chan, MBOX_OK);
+ }
+ }
+
+ if (resched)
+ mod_timer(&con->poll,
+ jiffies + msecs_to_jiffies(con->period));
+}
+
+/**
+ * mbox_link_received_data - A way for controller driver to push data
+ * received from remote to the upper layer.
+ * @link: Pointer to the mailbox link on which RX happened.
+ * @data: Client specific message typecasted as void *
+ *
+ * After startup and before shutdown any data received on the link
+ * is passed on to the API via atomic mbox_link_received_data().
+ * The controller should ACK the RX only after this call returns.
+ */
+void mbox_link_received_data(struct mbox_link *link, void *mssg)
+{
+ struct mbox_chan *chan = (struct mbox_chan *)link->api_priv;
+
+ /* No buffering the received data */
+ if (chan->cl->rx_callback)
+ chan->cl->rx_callback(chan->cl, mssg);
+}
+EXPORT_SYMBOL_GPL(mbox_link_received_data);
+
+/**
+ * mbox_link_txdone - A way for controller driver to notify the
+ * framework that the last TX has completed.
+ * @link: Pointer to the mailbox link on which TX happened.
+ * @r: Status of last TX - OK or ERROR
+ *
+ * The controller that has IRQ for TX ACK calls this atomic API
+ * to tick the TX state machine. It works only if txdone_irq
+ * is set by the controller.
+ */
+void mbox_link_txdone(struct mbox_link *link, enum mbox_result r)
+{
+ struct mbox_chan *chan = (struct mbox_chan *)link->api_priv;
+
+ if (unlikely(!(chan->txdone_method & TXDONE_BY_IRQ))) {
+ pr_err("Controller can't run the TX ticker\n");
+ return;
+ }
+
+ tx_tick(chan, r);
+}
+EXPORT_SYMBOL_GPL(mbox_link_txdone);
+
+/**
+ * mbox_client_txdone - The way for a client to run the TX state machine.
+ * @chan: Mailbox channel assigned to this client.
+ * @r: Success status of last transmission.
+ *
+ * The client/protocol had received some 'ACK' packet and it notifies
+ * the API that the last packet was sent successfully. This only works
+ * if the controller can't sense TX-Done.
+ */
+void mbox_client_txdone(struct mbox_chan *chan, enum mbox_result r)
+{
+ if (unlikely(!(chan->txdone_method & TXDONE_BY_ACK))) {
+ pr_err("Client can't run the TX ticker\n");
+ return;
+ }
+
+ tx_tick(chan, r);
+}
+EXPORT_SYMBOL_GPL(mbox_client_txdone);
+
+/**
+ * mbox_send_message - For client to submit a message to be
+ * sent to the remote.
+ * @chan: Mailbox channel assigned to this client.
+ * @mssg: Client specific message typecasted.
+ *
+ * For client to submit data to the controller destined for a remote
+ * processor. If the client had set 'tx_block', the call will return
+ * either when the remote receives the data or when 'tx_tout' millisecs
+ * run out.
+ * In non-blocking mode, the requests are buffered by the API and a
+ * non-negative token is returned for each queued request. If the request
+ * is not queued, a negative token is returned. Upon failure or successful
+ * TX, the API calls 'tx_done' from atomic context, from which the client
+ * could submit yet another request.
+ * In blocking mode, 'tx_done' is not called, effectively making the
+ * queue length 1.
+ * The pointer to message should be preserved until it is sent
+ * over the link, i.e, tx_done() is made.
+ * This function could be called from atomic context as it simply
+ * queues the data and returns a token against the request.
+ *
+ * Return: Non-negative integer for successful submission (non-blocking mode)
+ * or transmission over link (blocking mode).
+ * Negative value denotes failure.
+ */
+int mbox_send_message(struct mbox_chan *chan, void *mssg)
+{
+ int t;
+
+ if (!chan || !chan->cl)
+ return -EINVAL;
+
+ if (chan->cl->tx_block)
+ init_completion(&chan->tx_complete);
+
+ t = _add_to_rbuf(chan, mssg);
+ if (t < 0) {
+ pr_err("Try increasing MBOX_TX_QUEUE_LEN\n");
+ return t;
+ }
+
+ _msg_submit(chan);
+
+ if (chan->txdone_method == TXDONE_BY_POLL)
+ poll_txdone((unsigned long)chan->con);
+
+ if (chan->cl->tx_block && chan->active_req) {
+ int ret;
+ ret = wait_for_completion_timeout(&chan->tx_complete,
+ chan->cl->tx_tout);
+ if (ret == 0) {
+ t = -EIO;
+ tx_tick(chan, MBOX_ERR);
+ }
+ }
+
+ return t;
+}
+EXPORT_SYMBOL_GPL(mbox_send_message);
+
+/**
+ * mbox_request_channel - Request a mailbox channel.
+ * @cl: Identity of the client requesting the channel.
+ *
+ * The Client specifies its requirements and capabilities while asking for
+ * a mailbox channel by name. It can't be called from atomic context.
+ * The channel is exclusively allocated and can't be used by another
+ * client before the owner calls mbox_free_channel.
+ * After assignment, any packet received on this channel will be
+ * handed over to the client via the 'rx_callback'.
+ *
+ * Return: Pointer to the channel assigned to the client if successful.
+ * ERR_PTR for request failure.
+ */
+struct mbox_chan *mbox_request_channel(struct mbox_client *cl)
+{
+ struct mbox_chan *chan;
+ struct mbox_con *con;
+ unsigned long flags;
+ char *con_name;
+ int len, ret;
+
+ con_name = cl->chan_name;
+ len = strcspn(cl->chan_name, ":");
+
+ ret = 0;
+ mutex_lock(&con_mutex);
+ list_for_each_entry(con, &mbox_cons, node)
+ if (!strncmp(con->name, con_name, len)) {
+ ret = 1;
+ break;
+ }
+ mutex_unlock(&con_mutex);
+
+ if (!ret) {
+ pr_info("Channel(%s) not found!\n", cl->chan_name);
+ return ERR_PTR(-ENODEV);
+ }
+
+ ret = 0;
+ list_for_each_entry(chan, &con->channels, node) {
+ if (!chan->cl &&
+ !strcmp(con_name + len + 1, chan->name) &&
+ try_module_get(con->dev->driver->owner)) {
+ spin_lock_irqsave(&chan->lock, flags);
+ chan->msg_free = 0;
+ chan->msg_count = 0;
+ chan->active_req = NULL;
+ chan->cl = cl;
+ if (!cl->tx_tout) /* wait for ever */
+ cl->tx_tout = msecs_to_jiffies(3600000);
+ else
+ cl->tx_tout = msecs_to_jiffies(cl->tx_tout);
+ if (chan->txdone_method == TXDONE_BY_POLL
+ && cl->knows_txdone)
+ chan->txdone_method |= TXDONE_BY_ACK;
+ spin_unlock_irqrestore(&chan->lock, flags);
+ ret = 1;
+ break;
+ }
+ }
+
+ if (!ret) {
+ pr_err("Unable to assign mailbox(%s)\n", cl->chan_name);
+ return ERR_PTR(-EBUSY);
+ }
+
+ ret = chan->link_ops->startup(chan->link, cl->link_data);
+ if (ret) {
+ pr_err("Unable to startup the link\n");
+ mbox_free_channel(chan);
+ return ERR_PTR(ret);
+ }
+
+ return chan;
+}
+EXPORT_SYMBOL_GPL(mbox_request_channel);
+
+/**
+ * mbox_free_channel - The client relinquishes control of a mailbox
+ * channel by this call.
+ * @chan: The mailbox channel to be freed.
+ */
+void mbox_free_channel(struct mbox_chan *chan)
+{
+ unsigned long flags;
+
+ if (!chan || !chan->cl)
+ return;
+
+ chan->link_ops->shutdown(chan->link);
+
+ /* The queued TX requests are simply aborted, no callbacks are made */
+ spin_lock_irqsave(&chan->lock, flags);
+ chan->cl = NULL;
+ chan->active_req = NULL;
+ if (chan->txdone_method == (TXDONE_BY_POLL | TXDONE_BY_ACK))
+ chan->txdone_method = TXDONE_BY_POLL;
+
+ module_put(chan->con->dev->driver->owner);
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ blocking_notifier_call_chain(&chan->avail, 0, NULL);
+}
+EXPORT_SYMBOL_GPL(mbox_free_channel);
+
+static struct mbox_chan *name_to_chan(const char *name)
+{
+ struct mbox_chan *chan = NULL;
+ struct mbox_con *con;
+ int len, found = 0;
+
+ len = strcspn(name, ":");
+
+ mutex_lock(&con_mutex);
+
+ list_for_each_entry(con, &mbox_cons, node) {
+ if (!strncmp(con->name, name, len)) {
+ list_for_each_entry(chan, &con->channels, node) {
+ if (!strcmp(name + len + 1, chan->name)) {
+ found = 1;
+ goto done;
+ }
+ }
+ }
+ }
+done:
+ mutex_unlock(&con_mutex);
+
+ if (!found)
+ return NULL;
+
+ return chan;
+}
+
+/**
+ * mbox_notify_chan_register - The client may ask the framework to be
+ * notified when a particular channel becomes available
+ * to be acquired again.
+ * @name: Name of the mailbox channel the client is interested in.
+ * @nb: Pointer to the notifier.
+ */
+int mbox_notify_chan_register(const char *name, struct notifier_block *nb)
+{
+ struct mbox_chan *chan = name_to_chan(name);
+
+ if (chan && nb)
+ return blocking_notifier_chain_register(&chan->avail, nb);
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(mbox_notify_chan_register);
+
+/**
+ * mbox_notify_chan_unregister - The client is no more interested in channel.
+ *
+ * @name: Name of the mailbox channel the client was interested in.
+ * @nb: Pointer to the notifier.
+ */
+void mbox_notify_chan_unregister(const char *name, struct notifier_block *nb)
+{
+ struct mbox_chan *chan = name_to_chan(name);
+
+ if (chan && nb)
+ blocking_notifier_chain_unregister(&chan->avail, nb);
+}
+EXPORT_SYMBOL_GPL(mbox_notify_chan_unregister);
+
+/**
+ * mbox_controller_register - Register the mailbox controller
+ * @mbox_con: Pointer to the mailbox controller.
+ *
+ * The controller driver registers its communication links to the
+ * global pool managed by the common framework.
+ */
+int mbox_controller_register(struct mbox_controller *mbox)
+{
+ int i, num_links, txdone;
+ struct mbox_chan *chan;
+ struct mbox_con *con;
+
+ /* Sanity check */
+ if (!mbox || !mbox->ops)
+ return -EINVAL;
+
+ for (i = 0; mbox->links[i]; i++)
+ ;
+ if (!i)
+ return -EINVAL;
+ num_links = i;
+
+ mutex_lock(&con_mutex);
+ /* Check if already populated */
+ list_for_each_entry(con, &mbox_cons, node)
+ if (!strcmp(mbox->controller_name, con->name)) {
+ mutex_unlock(&con_mutex);
+ return -EINVAL;
+ }
+
+ con = kzalloc(sizeof(struct mbox_con), GFP_KERNEL);
+ if (!con)
+ return -ENOMEM;
+
+ chan = kzalloc(sizeof(struct mbox_chan) * num_links, GFP_KERNEL);
+ if (!chan) {
+ kfree(con);
+ return -ENOMEM;
+ }
+
+ con->dev = mbox->dev;
+ INIT_LIST_HEAD(&con->channels);
+ snprintf(con->name, 16, "%s", mbox->controller_name);
+
+ if (mbox->txdone_irq)
+ txdone = TXDONE_BY_IRQ;
+ else if (mbox->txdone_poll)
+ txdone = TXDONE_BY_POLL;
+ else /* It has to be ACK then */
+ txdone = TXDONE_BY_ACK;
+
+ if (txdone == TXDONE_BY_POLL) {
+ con->period = mbox->txpoll_period;
+ con->poll.function = &poll_txdone;
+ con->poll.data = (unsigned long)con;
+ init_timer(&con->poll);
+ }
+
+ for (i = 0; i < num_links; i++) {
+ chan[i].con = con;
+ chan[i].cl = NULL;
+ chan[i].link_ops = mbox->ops;
+ chan[i].link = mbox->links[i];
+ chan[i].txdone_method = txdone;
+ chan[i].link->api_priv = &chan[i];
+ spin_lock_init(&chan[i].lock);
+ BLOCKING_INIT_NOTIFIER_HEAD(&chan[i].avail);
+ list_add_tail(&chan[i].node, &con->channels);
+ snprintf(chan[i].name, 16, "%s", mbox->links[i]->link_name);
+ }
+
+ list_add_tail(&con->node, &mbox_cons);
+ mutex_unlock(&con_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mbox_controller_register);
+
+/**
+ * mbox_controller_unregister - UnRegister the mailbox controller
+ * @mbox_con: Pointer to the mailbox controller.
+ *
+ * Purge the mailbox links from the global pool maintained by the framework.
+ */
+void mbox_controller_unregister(struct mbox_controller *mbox)
+{
+ struct mbox_con *t, *con = NULL;
+ struct mbox_chan *chan;
+
+ mutex_lock(&con_mutex);
+
+ list_for_each_entry(t, &mbox_cons, node)
+ if (!strcmp(mbox->controller_name, t->name)) {
+ con = t;
+ break;
+ }
+
+ if (con)
+ list_del(&con->node);
+
+ mutex_unlock(&con_mutex);
+
+ if (!con)
+ return;
+
+ list_for_each_entry(chan, &con->channels, node)
+ mbox_free_channel(chan);
+
+ if (mbox->txdone_poll)
+ del_timer_sync(&con->poll);
+
+ kfree(con);
+}
+EXPORT_SYMBOL_GPL(mbox_controller_unregister);
diff --git a/drivers/mailbox/pl320-ipc.c b/drivers/mailbox/pl320-ipc.c
deleted file mode 100644
index d873cbae2fbb..000000000000
--- a/drivers/mailbox/pl320-ipc.c
+++ /dev/null
@@ -1,198 +0,0 @@
-/*
- * Copyright 2012 Calxeda, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-#include <linux/types.h>
-#include <linux/err.h>
-#include <linux/delay.h>
-#include <linux/export.h>
-#include <linux/io.h>
-#include <linux/interrupt.h>
-#include <linux/completion.h>
-#include <linux/mutex.h>
-#include <linux/notifier.h>
-#include <linux/spinlock.h>
-#include <linux/device.h>
-#include <linux/amba/bus.h>
-
-#include <linux/mailbox.h>
-
-#define IPCMxSOURCE(m) ((m) * 0x40)
-#define IPCMxDSET(m) (((m) * 0x40) + 0x004)
-#define IPCMxDCLEAR(m) (((m) * 0x40) + 0x008)
-#define IPCMxDSTATUS(m) (((m) * 0x40) + 0x00C)
-#define IPCMxMODE(m) (((m) * 0x40) + 0x010)
-#define IPCMxMSET(m) (((m) * 0x40) + 0x014)
-#define IPCMxMCLEAR(m) (((m) * 0x40) + 0x018)
-#define IPCMxMSTATUS(m) (((m) * 0x40) + 0x01C)
-#define IPCMxSEND(m) (((m) * 0x40) + 0x020)
-#define IPCMxDR(m, dr) (((m) * 0x40) + ((dr) * 4) + 0x024)
-
-#define IPCMMIS(irq) (((irq) * 8) + 0x800)
-#define IPCMRIS(irq) (((irq) * 8) + 0x804)
-
-#define MBOX_MASK(n) (1 << (n))
-#define IPC_TX_MBOX 1
-#define IPC_RX_MBOX 2
-
-#define CHAN_MASK(n) (1 << (n))
-#define A9_SOURCE 1
-#define M3_SOURCE 0
-
-static void __iomem *ipc_base;
-static int ipc_irq;
-static DEFINE_MUTEX(ipc_m1_lock);
-static DECLARE_COMPLETION(ipc_completion);
-static ATOMIC_NOTIFIER_HEAD(ipc_notifier);
-
-static inline void set_destination(int source, int mbox)
-{
- __raw_writel(CHAN_MASK(source), ipc_base + IPCMxDSET(mbox));
- __raw_writel(CHAN_MASK(source), ipc_base + IPCMxMSET(mbox));
-}
-
-static inline void clear_destination(int source, int mbox)
-{
- __raw_writel(CHAN_MASK(source), ipc_base + IPCMxDCLEAR(mbox));
- __raw_writel(CHAN_MASK(source), ipc_base + IPCMxMCLEAR(mbox));
-}
-
-static void __ipc_send(int mbox, u32 *data)
-{
- int i;
- for (i = 0; i < 7; i++)
- __raw_writel(data[i], ipc_base + IPCMxDR(mbox, i));
- __raw_writel(0x1, ipc_base + IPCMxSEND(mbox));
-}
-
-static u32 __ipc_rcv(int mbox, u32 *data)
-{
- int i;
- for (i = 0; i < 7; i++)
- data[i] = __raw_readl(ipc_base + IPCMxDR(mbox, i));
- return data[1];
-}
-
-/* blocking implmentation from the A9 side, not usuable in interrupts! */
-int pl320_ipc_transmit(u32 *data)
-{
- int ret;
-
- mutex_lock(&ipc_m1_lock);
-
- init_completion(&ipc_completion);
- __ipc_send(IPC_TX_MBOX, data);
- ret = wait_for_completion_timeout(&ipc_completion,
- msecs_to_jiffies(1000));
- if (ret == 0) {
- ret = -ETIMEDOUT;
- goto out;
- }
-
- ret = __ipc_rcv(IPC_TX_MBOX, data);
-out:
- mutex_unlock(&ipc_m1_lock);
- return ret;
-}
-EXPORT_SYMBOL_GPL(pl320_ipc_transmit);
-
-static irqreturn_t ipc_handler(int irq, void *dev)
-{
- u32 irq_stat;
- u32 data[7];
-
- irq_stat = __raw_readl(ipc_base + IPCMMIS(1));
- if (irq_stat & MBOX_MASK(IPC_TX_MBOX)) {
- __raw_writel(0, ipc_base + IPCMxSEND(IPC_TX_MBOX));
- complete(&ipc_completion);
- }
- if (irq_stat & MBOX_MASK(IPC_RX_MBOX)) {
- __ipc_rcv(IPC_RX_MBOX, data);
- atomic_notifier_call_chain(&ipc_notifier, data[0], data + 1);
- __raw_writel(2, ipc_base + IPCMxSEND(IPC_RX_MBOX));
- }
-
- return IRQ_HANDLED;
-}
-
-int pl320_ipc_register_notifier(struct notifier_block *nb)
-{
- return atomic_notifier_chain_register(&ipc_notifier, nb);
-}
-EXPORT_SYMBOL_GPL(pl320_ipc_register_notifier);
-
-int pl320_ipc_unregister_notifier(struct notifier_block *nb)
-{
- return atomic_notifier_chain_unregister(&ipc_notifier, nb);
-}
-EXPORT_SYMBOL_GPL(pl320_ipc_unregister_notifier);
-
-static int pl320_probe(struct amba_device *adev, const struct amba_id *id)
-{
- int ret;
-
- ipc_base = ioremap(adev->res.start, resource_size(&adev->res));
- if (ipc_base == NULL)
- return -ENOMEM;
-
- __raw_writel(0, ipc_base + IPCMxSEND(IPC_TX_MBOX));
-
- ipc_irq = adev->irq[0];
- ret = request_irq(ipc_irq, ipc_handler, 0, dev_name(&adev->dev), NULL);
- if (ret < 0)
- goto err;
-
- /* Init slow mailbox */
- __raw_writel(CHAN_MASK(A9_SOURCE),
- ipc_base + IPCMxSOURCE(IPC_TX_MBOX));
- __raw_writel(CHAN_MASK(M3_SOURCE),
- ipc_base + IPCMxDSET(IPC_TX_MBOX));
- __raw_writel(CHAN_MASK(M3_SOURCE) | CHAN_MASK(A9_SOURCE),
- ipc_base + IPCMxMSET(IPC_TX_MBOX));
-
- /* Init receive mailbox */
- __raw_writel(CHAN_MASK(M3_SOURCE),
- ipc_base + IPCMxSOURCE(IPC_RX_MBOX));
- __raw_writel(CHAN_MASK(A9_SOURCE),
- ipc_base + IPCMxDSET(IPC_RX_MBOX));
- __raw_writel(CHAN_MASK(M3_SOURCE) | CHAN_MASK(A9_SOURCE),
- ipc_base + IPCMxMSET(IPC_RX_MBOX));
-
- return 0;
-err:
- iounmap(ipc_base);
- return ret;
-}
-
-static struct amba_id pl320_ids[] = {
- {
- .id = 0x00041320,
- .mask = 0x000fffff,
- },
- { 0, 0 },
-};
-
-static struct amba_driver pl320_driver = {
- .drv = {
- .name = "pl320",
- },
- .id_table = pl320_ids,
- .probe = pl320_probe,
-};
-
-static int __init ipc_init(void)
-{
- return amba_driver_register(&pl320_driver);
-}
-module_init(ipc_init);
diff --git a/drivers/mailbox/pl320.c b/drivers/mailbox/pl320.c
new file mode 100644
index 000000000000..1121ca8f7f93
--- /dev/null
+++ b/drivers/mailbox/pl320.c
@@ -0,0 +1,214 @@
+/*
+ * Copyright 2012 Calxeda, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/amba/bus.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/mailbox_controller.h>
+
+#define IPCMSOURCE(m) ((m) * 0x40)
+#define IPCMDSET(m) (((m) * 0x40) + 0x004)
+#define IPCMDCLEAR(m) (((m) * 0x40) + 0x008)
+#define IPCMDSTATUS(m) (((m) * 0x40) + 0x00C)
+#define IPCMMODE(m) (((m) * 0x40) + 0x010)
+#define IPCMMSET(m) (((m) * 0x40) + 0x014)
+#define IPCMMCLEAR(m) (((m) * 0x40) + 0x018)
+#define IPCMMSTATUS(m) (((m) * 0x40) + 0x01C)
+#define IPCMSEND(m) (((m) * 0x40) + 0x020)
+#define IPCMDR(m, dr) (((m) * 0x40) + ((dr) * 4) + 0x024)
+
+#define IPCMMIS(irq) (((irq) * 8) + 0x800)
+#define IPCMRIS(irq) (((irq) * 8) + 0x804)
+
+#define MBOX_MASK(n) (1 << (n))
+#define IPC_TX_MBOX 1
+
+#define CHAN_MASK(n) (1 << (n))
+#define A9_SOURCE 1
+#define M3_SOURCE 0
+
+struct pl320_con {
+ u32 *data;
+ int mbox_irq;
+ struct device *dev;
+ struct mbox_link link;
+ void __iomem *mbox_base;
+ struct mbox_controller mbox_con;
+};
+
+static inline struct pl320_con *to_pl320(struct mbox_link *l)
+{
+ if (!l)
+ return NULL;
+
+ return container_of(l, struct pl320_con, link);
+}
+
+static irqreturn_t mbox_handler(int irq, void *p)
+{
+ struct mbox_link *link = (struct mbox_link *)p;
+ struct pl320_con *pl320 = to_pl320(link);
+ void __iomem *mbox_base = pl320->mbox_base;
+ u32 irq_stat;
+
+ irq_stat = __raw_readl(mbox_base + IPCMMIS(1));
+ if (irq_stat & MBOX_MASK(IPC_TX_MBOX)) {
+ u32 *data = pl320->data;
+ int i;
+
+ __raw_writel(0, mbox_base + IPCMSEND(IPC_TX_MBOX));
+
+ /*
+ * The PL320 driver specifies that the send buffer
+ * will be overwritten by same fifo upon TX ACK.
+ */
+ for (i = 0; i < 7; i++)
+ data[i] = __raw_readl(mbox_base
+ + IPCMDR(IPC_TX_MBOX, i));
+
+ mbox_link_txdone(link, MBOX_OK);
+
+ pl320->data = NULL;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int pl320_send_data(struct mbox_link *link, void *msg)
+{
+ struct pl320_con *pl320 = to_pl320(link);
+ void __iomem *mbox_base = pl320->mbox_base;
+ u32 *data = (u32 *)msg;
+ int i;
+
+ pl320->data = data;
+
+ for (i = 0; i < 7; i++)
+ __raw_writel(data[i], mbox_base + IPCMDR(IPC_TX_MBOX, i));
+
+ __raw_writel(0x1, mbox_base + IPCMSEND(IPC_TX_MBOX));
+
+ return 0;
+}
+
+static int pl320_startup(struct mbox_link *link, void *ignored)
+{
+ struct pl320_con *pl320 = to_pl320(link);
+ void __iomem *mbox_base = pl320->mbox_base;
+ int err, mbox_irq = pl320->mbox_irq;
+
+ __raw_writel(0, mbox_base + IPCMSEND(IPC_TX_MBOX));
+
+ err = request_irq(mbox_irq, mbox_handler,
+ 0, dev_name(pl320->dev), link);
+ if (err)
+ return err;
+
+ /* Init slow mailbox */
+ __raw_writel(CHAN_MASK(A9_SOURCE),
+ mbox_base + IPCMSOURCE(IPC_TX_MBOX));
+ __raw_writel(CHAN_MASK(M3_SOURCE),
+ mbox_base + IPCMDSET(IPC_TX_MBOX));
+ __raw_writel(CHAN_MASK(M3_SOURCE) | CHAN_MASK(A9_SOURCE),
+ mbox_base + IPCMMSET(IPC_TX_MBOX));
+
+ pl320->data = NULL;
+ return 0;
+}
+
+static void pl320_shutdown(struct mbox_link *link)
+{
+ struct pl320_con *pl320 = to_pl320(link);
+
+ pl320->data = NULL;
+ free_irq(pl320->mbox_irq, link);
+}
+
+static struct mbox_link_ops pl320_ops = {
+ .send_data = pl320_send_data,
+ .startup = pl320_startup,
+ .shutdown = pl320_shutdown,
+};
+
+static int pl320_probe(struct amba_device *adev, const struct amba_id *id)
+{
+ struct pl320_con *pl320;
+ struct mbox_link *l[2];
+ int ret;
+
+ pl320 = kzalloc(sizeof(struct pl320_con), GFP_KERNEL);
+ if (!pl320)
+ return -ENOMEM;
+
+ pl320->mbox_base = ioremap(adev->res.start, resource_size(&adev->res));
+ if (pl320->mbox_base == NULL) {
+ kfree(pl320);
+ return -ENOMEM;
+ }
+
+ pl320->dev = &adev->dev;
+ pl320->mbox_irq = adev->irq[0];
+ amba_set_drvdata(adev, pl320);
+
+ l[0] = &pl320->link;
+ l[1] = NULL;
+ pl320->mbox_con.links = l;
+ pl320->mbox_con.txdone_irq = true;
+ pl320->mbox_con.ops = &pl320_ops;
+ snprintf(pl320->link.link_name, 16, "A9_to_M3");
+ snprintf(pl320->mbox_con.controller_name, 16, "pl320");
+ pl320->mbox_con.dev = &adev->dev;
+
+ ret = mbox_controller_register(&pl320->mbox_con);
+ if (ret) {
+ iounmap(pl320->mbox_base);
+ kfree(pl320);
+ }
+
+ return ret;
+}
+
+static struct amba_id pl320_ids[] = {
+ {
+ .id = 0x00041320,
+ .mask = 0x000fffff,
+ },
+ { 0, 0 },
+};
+
+static struct amba_driver pl320_driver = {
+ .drv = {
+ .name = "pl320",
+ },
+ .id_table = pl320_ids,
+ .probe = pl320_probe,
+};
+
+static int __init mbox_init(void)
+{
+ return amba_driver_register(&pl320_driver);
+}
+module_init(mbox_init);
diff --git a/drivers/mailbox/scpi_protocol.c b/drivers/mailbox/scpi_protocol.c
new file mode 100644
index 000000000000..7a1ff2dd2687
--- /dev/null
+++ b/drivers/mailbox/scpi_protocol.c
@@ -0,0 +1,354 @@
+/*
+ * System Control and Power Interface (SCPI) Message Protocol driver
+ *
+ * Copyright (C) 2014 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/mailbox_client.h>
+#include <linux/scpi_protocol.h>
+#include <linux/slab.h>
+
+#include "arm_mhu.h"
+
+#define CMD_ID_SHIFT 0
+#define CMD_ID_MASK 0xff
+#define CMD_SENDER_ID_SHIFT 8
+#define CMD_SENDER_ID_MASK 0xff
+#define CMD_DATA_SIZE_SHIFT 20
+#define CMD_DATA_SIZE_MASK 0x1ff
+#define PACK_SCPI_CMD(cmd, sender, txsz) \
+ ((((cmd) & CMD_ID_MASK) << CMD_ID_SHIFT) | \
+ (((sender) & CMD_SENDER_ID_MASK) << CMD_SENDER_ID_SHIFT) | \
+ (((txsz) & CMD_DATA_SIZE_MASK) << CMD_DATA_SIZE_SHIFT))
+
+#define MAX_DVFS_DOMAINS 3
+#define MAX_DVFS_OPPS 4
+#define DVFS_LATENCY(hdr) ((hdr) >> 16)
+#define DVFS_OPP_COUNT(hdr) (((hdr) >> 8) & 0xff)
+
+enum scpi_error_codes {
+ SCPI_SUCCESS = 0, /* Success */
+ SCPI_ERR_PARAM = 1, /* Invalid parameter(s) */
+ SCPI_ERR_ALIGN = 2, /* Invalid alignment */
+ SCPI_ERR_SIZE = 3, /* Invalid size */
+ SCPI_ERR_HANDLER = 4, /* Invalid handler/callback */
+ SCPI_ERR_ACCESS = 5, /* Invalid access/permission denied */
+ SCPI_ERR_RANGE = 6, /* Value out of range */
+ SCPI_ERR_TIMEOUT = 7, /* Timeout has occurred */
+ SCPI_ERR_NOMEM = 8, /* Invalid memory area or pointer */
+ SCPI_ERR_PWRSTATE = 9, /* Invalid power state */
+ SCPI_ERR_SUPPORT = 10, /* Not supported or disabled */
+ SCPI_ERR_DEVICE = 11, /* Device error */
+ SCPI_ERR_MAX
+};
+
+enum scpi_client_id {
+ SCPI_CL_NONE,
+ SCPI_CL_CLOCKS,
+ SCPI_CL_DVFS,
+ SCPI_CL_POWER,
+ SCPI_MAX,
+};
+
+enum scpi_std_cmd {
+ SCPI_CMD_INVALID = 0x00,
+ SCPI_CMD_SCPI_READY = 0x01,
+ SCPI_CMD_SCPI_CAPABILITIES = 0x02,
+ SCPI_CMD_EVENT = 0x03,
+ SCPI_CMD_SET_CSS_PWR_STATE = 0x04,
+ SCPI_CMD_GET_CSS_PWR_STATE = 0x05,
+ SCPI_CMD_CFG_PWR_STATE_STAT = 0x06,
+ SCPI_CMD_GET_PWR_STATE_STAT = 0x07,
+ SCPI_CMD_SYS_PWR_STATE = 0x08,
+ SCPI_CMD_L2_READY = 0x09,
+ SCPI_CMD_SET_AP_TIMER = 0x0a,
+ SCPI_CMD_CANCEL_AP_TIME = 0x0b,
+ SCPI_CMD_DVFS_CAPABILITIES = 0x0c,
+ SCPI_CMD_GET_DVFS_INFO = 0x0d,
+ SCPI_CMD_SET_DVFS = 0x0e,
+ SCPI_CMD_GET_DVFS = 0x0f,
+ SCPI_CMD_GET_DVFS_STAT = 0x10,
+ SCPI_CMD_SET_RTC = 0x11,
+ SCPI_CMD_GET_RTC = 0x12,
+ SCPI_CMD_CLOCK_CAPABILITIES = 0x13,
+ SCPI_CMD_SET_CLOCK_INDEX = 0x14,
+ SCPI_CMD_SET_CLOCK_VALUE = 0x15,
+ SCPI_CMD_GET_CLOCK_VALUE = 0x16,
+ SCPI_CMD_PSU_CAPABILITIES = 0x17,
+ SCPI_CMD_SET_PSU = 0x18,
+ SCPI_CMD_GET_PSU = 0x19,
+ SCPI_CMD_SENSOR_CAPABILITIES = 0x1a,
+ SCPI_CMD_SENSOR_INFO = 0x1b,
+ SCPI_CMD_SENSOR_VALUE = 0x1c,
+ SCPI_CMD_SENSOR_CFG_PERIODIC = 0x1d,
+ SCPI_CMD_SENSOR_CFG_BOUNDS = 0x1e,
+ SCPI_CMD_SENSOR_ASYNC_VALUE = 0x1f,
+ SCPI_CMD_COUNT
+};
+
+struct scpi_data_buf {
+ int client_id;
+ struct mhu_data_buf *data;
+ struct completion complete;
+};
+
+static int high_priority_cmds[] = {
+ SCPI_CMD_GET_CSS_PWR_STATE,
+ SCPI_CMD_CFG_PWR_STATE_STAT,
+ SCPI_CMD_GET_PWR_STATE_STAT,
+ SCPI_CMD_SET_DVFS,
+ SCPI_CMD_GET_DVFS,
+ SCPI_CMD_SET_RTC,
+ SCPI_CMD_GET_RTC,
+ SCPI_CMD_SET_CLOCK_INDEX,
+ SCPI_CMD_SET_CLOCK_VALUE,
+ SCPI_CMD_GET_CLOCK_VALUE,
+ SCPI_CMD_SET_PSU,
+ SCPI_CMD_GET_PSU,
+ SCPI_CMD_SENSOR_VALUE,
+ SCPI_CMD_SENSOR_CFG_PERIODIC,
+ SCPI_CMD_SENSOR_CFG_BOUNDS,
+};
+
+static struct scpi_opp *scpi_opps[MAX_DVFS_DOMAINS];
+
+static int scpi_linux_errmap[SCPI_ERR_MAX] = {
+ 0, -EINVAL, -ENOEXEC, -EMSGSIZE,
+ -EINVAL, -EACCES, -ERANGE, -ETIMEDOUT,
+ -ENOMEM, -EINVAL, -EOPNOTSUPP, -EIO,
+};
+
+static inline int scpi_to_linux_errno(int errno)
+{
+ if (errno >= SCPI_SUCCESS && errno < SCPI_ERR_MAX)
+ return scpi_linux_errmap[errno];
+ return -EIO;
+}
+
+static bool high_priority_chan_supported(int cmd)
+{
+ int idx;
+ for (idx = 0; idx < ARRAY_SIZE(high_priority_cmds); idx++)
+ if (cmd == high_priority_cmds[idx])
+ return true;
+ return false;
+}
+
+static void scpi_rx_callback(struct mbox_client *cl, void *msg)
+{
+ struct mhu_data_buf *data = (struct mhu_data_buf *)msg;
+ struct scpi_data_buf *scpi_buf = data->cl_data;
+ complete(&scpi_buf->complete);
+}
+
+static int send_scpi_cmd(struct scpi_data_buf *scpi_buf, bool high_priority)
+{
+ struct mbox_chan *chan;
+ struct mbox_client cl;
+ struct mhu_data_buf *data = scpi_buf->data;
+ u32 status;
+
+ cl.rx_callback = scpi_rx_callback;
+ cl.tx_done = NULL;
+ cl.tx_block = true;
+ cl.tx_tout = 50; /* 50 msec */
+ cl.link_data = NULL;
+ cl.knows_txdone = false;
+ cl.chan_name = high_priority ?
+ CONTROLLER_NAME":"CHANNEL_HIGH_PRIORITY :
+ CONTROLLER_NAME":"CHANNEL_LOW_PRIORITY;
+
+ chan = mbox_request_channel(&cl);
+ if (IS_ERR(chan))
+ return PTR_ERR(chan);
+
+ init_completion(&scpi_buf->complete);
+ if (mbox_send_message(chan, (void *)data))
+ return -EIO;
+
+ if (!wait_for_completion_timeout(&scpi_buf->complete,
+ msecs_to_jiffies(50)))
+ status = SCPI_ERR_TIMEOUT;
+ else
+ status = *(u32 *)(data->rx_buf); /* read first word */
+
+ mbox_free_channel(chan);
+
+ return scpi_to_linux_errno(status);
+}
+
+#define SCPI_SETUP_DBUF(scpi_buf, mhu_buf, _client_id,\
+ _cmd, _tx_buf, _rx_buf) \
+do { \
+ struct mhu_data_buf *pdata = &mhu_buf; \
+ pdata->cmd = _cmd; \
+ pdata->tx_buf = &_tx_buf; \
+ pdata->tx_size = sizeof(_tx_buf); \
+ pdata->rx_buf = &_rx_buf; \
+ pdata->rx_size = sizeof(_rx_buf); \
+ scpi_buf.client_id = _client_id; \
+ scpi_buf.data = pdata; \
+} while (0)
+
+static int scpi_execute_cmd(struct scpi_data_buf *scpi_buf)
+{
+ struct mhu_data_buf *data;
+ bool high_priority;
+
+ if (!scpi_buf || !scpi_buf->data)
+ return -EINVAL;
+
+ data = scpi_buf->data;
+ high_priority = high_priority_chan_supported(data->cmd);
+ data->cmd = PACK_SCPI_CMD(data->cmd, scpi_buf->client_id,
+ data->tx_size);
+ data->cl_data = scpi_buf;
+
+ return send_scpi_cmd(scpi_buf, high_priority);
+}
+
+unsigned long scpi_clk_get_val(u16 clk_id)
+{
+ struct scpi_data_buf sdata;
+ struct mhu_data_buf mdata;
+ struct {
+ u32 status;
+ u32 clk_rate;
+ } buf;
+
+ SCPI_SETUP_DBUF(sdata, mdata, SCPI_CL_CLOCKS,
+ SCPI_CMD_GET_CLOCK_VALUE, clk_id, buf);
+ if (scpi_execute_cmd(&sdata))
+ return 0;
+
+ return buf.clk_rate;
+}
+EXPORT_SYMBOL_GPL(scpi_clk_get_val);
+
+int scpi_clk_set_val(u16 clk_id, unsigned long rate)
+{
+ struct scpi_data_buf sdata;
+ struct mhu_data_buf mdata;
+ int stat;
+ struct {
+ u32 clk_rate;
+ u16 clk_id;
+ } buf;
+
+ buf.clk_rate = (u32)rate;
+ buf.clk_id = clk_id;
+
+ SCPI_SETUP_DBUF(sdata, mdata, SCPI_CL_CLOCKS,
+ SCPI_CMD_SET_CLOCK_VALUE, buf, stat);
+ return scpi_execute_cmd(&sdata);
+}
+EXPORT_SYMBOL_GPL(scpi_clk_set_val);
+
+struct scpi_opp *scpi_dvfs_get_opps(u8 domain)
+{
+ struct scpi_data_buf sdata;
+ struct mhu_data_buf mdata;
+ struct {
+ u32 status;
+ u32 header;
+ u32 freqs[MAX_DVFS_OPPS];
+ } buf;
+ struct scpi_opp *opp;
+ size_t freqs_sz;
+ int count, ret;
+
+ if (domain >= MAX_DVFS_DOMAINS)
+ return ERR_PTR(-EINVAL);
+
+ if (scpi_opps[domain]) /* data already populated */
+ return scpi_opps[domain];
+
+ SCPI_SETUP_DBUF(sdata, mdata, SCPI_CL_DVFS,
+ SCPI_CMD_GET_DVFS_INFO, domain, buf);
+ ret = scpi_execute_cmd(&sdata);
+ if (ret)
+ return ERR_PTR(ret);
+
+ opp = kmalloc(sizeof(*opp), GFP_KERNEL);
+ if (!opp)
+ return ERR_PTR(-ENOMEM);
+
+ count = DVFS_OPP_COUNT(buf.header);
+ freqs_sz = count * sizeof(*(opp->freqs));
+
+ opp->count = count;
+ opp->latency = DVFS_LATENCY(buf.header);
+ opp->freqs = kmalloc(freqs_sz, GFP_KERNEL);
+ if (!opp->freqs) {
+ kfree(opp);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ memcpy(opp->freqs, &buf.freqs[0], freqs_sz);
+ scpi_opps[domain] = opp;
+
+ return opp;
+}
+EXPORT_SYMBOL_GPL(scpi_dvfs_get_opps);
+
+int scpi_dvfs_get_idx(u8 domain)
+{
+ struct scpi_data_buf sdata;
+ struct mhu_data_buf mdata;
+ struct {
+ u32 status;
+ u8 dvfs_idx;
+ } buf;
+ int ret;
+
+ if (domain >= MAX_DVFS_DOMAINS)
+ return -EINVAL;
+
+ SCPI_SETUP_DBUF(sdata, mdata, SCPI_CL_DVFS,
+ SCPI_CMD_GET_DVFS, domain, buf);
+ ret = scpi_execute_cmd(&sdata);
+
+ if (!ret)
+ ret = buf.dvfs_idx;
+ return ret;
+}
+EXPORT_SYMBOL_GPL(scpi_dvfs_get_idx);
+
+int scpi_dvfs_set_idx(u8 domain, u8 idx)
+{
+ struct scpi_data_buf sdata;
+ struct mhu_data_buf mdata;
+ struct {
+ u8 dvfs_domain;
+ u8 dvfs_idx;
+ } buf;
+ int stat;
+
+ buf.dvfs_idx = idx;
+ buf.dvfs_domain = domain;
+
+ if (domain >= MAX_DVFS_DOMAINS)
+ return -EINVAL;
+
+ SCPI_SETUP_DBUF(sdata, mdata, SCPI_CL_DVFS,
+ SCPI_CMD_SET_DVFS, buf, stat);
+ return scpi_execute_cmd(&sdata);
+}
+EXPORT_SYMBOL_GPL(scpi_dvfs_set_idx);
diff --git a/include/linux/mailbox.h b/include/linux/mailbox.h
index 5161f63ec1c8..44bcb2be0563 100644
--- a/include/linux/mailbox.h
+++ b/include/linux/mailbox.h
@@ -1,17 +1,18 @@
/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
+ * Copyright (C) 2014 Linaro Ltd.
+ * Author: Jassi Brar <jassisinghbrar@gmail.com>
*
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
*/
-int pl320_ipc_transmit(u32 *data);
-int pl320_ipc_register_notifier(struct notifier_block *nb);
-int pl320_ipc_unregister_notifier(struct notifier_block *nb);
+#ifndef __MAILBOX_H
+#define __MAILBOX_H
+
+enum mbox_result {
+ MBOX_OK = 0,
+ MBOX_ERR,
+};
+
+#endif /* __MAILBOX_H */
diff --git a/include/linux/mailbox_client.h b/include/linux/mailbox_client.h
new file mode 100644
index 000000000000..4954378be378
--- /dev/null
+++ b/include/linux/mailbox_client.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2014 Linaro Ltd.
+ * Author: Jassi Brar <jassisinghbrar@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MAILBOX_CLIENT_H
+#define __MAILBOX_CLIENT_H
+
+#include <linux/mailbox.h>
+
+struct mbox_chan;
+
+/**
+ * struct mbox_client - User of a mailbox
+ * @chan_name: The "controller:channel" this client wants
+ * @rx_callback: Atomic callback to provide client the data received
+ * @tx_done: Atomic callback to tell client of data transmission
+ * @tx_block: If the mbox_send_message should block until data is
+ * transmitted.
+ * @tx_tout: Max block period in ms before TX is assumed failure
+ * @knows_txdone: if the client could run the TX state machine. Usually
+ * if the client receives some ACK packet for transmission.
+ * Unused if the controller already has TX_Done/RTR IRQ.
+ * @link_data: Optional controller specific parameters during channel
+ * request.
+ */
+struct mbox_client {
+ char *chan_name;
+ void (*rx_callback)(struct mbox_client *cl, void *mssg);
+ void (*tx_done)(struct mbox_client *cl, void *mssg, enum mbox_result r);
+ bool tx_block;
+ unsigned long tx_tout;
+ bool knows_txdone;
+ void *link_data;
+};
+
+struct mbox_chan *mbox_request_channel(struct mbox_client *cl);
+int mbox_send_message(struct mbox_chan *chan, void *mssg);
+void mbox_client_txdone(struct mbox_chan *chan, enum mbox_result r);
+void mbox_free_channel(struct mbox_chan *chan);
+int mbox_notify_chan_register(const char *name, struct notifier_block *nb);
+void mbox_notify_chan_unregister(const char *name, struct notifier_block *nb);
+
+#endif /* __MAILBOX_CLIENT_H */
diff --git a/include/linux/mailbox_controller.h b/include/linux/mailbox_controller.h
new file mode 100644
index 000000000000..5ff22c6c8014
--- /dev/null
+++ b/include/linux/mailbox_controller.h
@@ -0,0 +1,85 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MAILBOX_CONTROLLER_H
+#define __MAILBOX_CONTROLLER_H
+
+#include <linux/mailbox.h>
+
+/**
+ * struct mbox_link - s/w representation of a communication link
+ * @link_name: Literal name assigned to the link. Physically
+ * identical channels may have the same name.
+ * @api_priv: Hook for the API to map its private data on the link
+ * Controller driver must not touch it.
+ */
+struct mbox_link {
+ char link_name[16];
+ void *api_priv;
+};
+
+/**
+ * struct mbox_link - s/w representation of a communication link
+ * @send_data: The API asks the MBOX controller driver, in atomic
+ * context try to transmit a message on the bus. Returns 0 if
+ * data is accepted for transmission, -EBUSY while rejecting
+ * if the remote hasn't yet read the last data sent. Actual
+ * transmission of data is reported by the controller via
+ * mbox_link_txdone (if it has some TX ACK irq). It must not
+ * block.
+ * @startup: Called when a client requests the link. The controller
+ * could ask clients for additional parameters of communication
+ * to be provided via client's link_data. This call may
+ * block. After this call the Controller must forward any
+ * data received on the link by calling mbox_link_received_data.
+ * @shutdown: Called when a client relinquishes control of a link.
+ * This call may block too. The controller must not forwared
+ * any received data anymore.
+ * @last_tx_done: If the controller sets 'txdone_poll', the API calls
+ * this to poll status of last TX. The controller must
+ * give priority to IRQ method over polling and never
+ * set both txdone_poll and txdone_irq. Only in polling
+ * mode 'send_data' is expected to return -EBUSY.
+ * Used only if txdone_poll:=true && txdone_irq:=false
+ */
+struct mbox_link_ops {
+ int (*send_data)(struct mbox_link *link, void *data);
+ int (*startup)(struct mbox_link *link, void *params);
+ void (*shutdown)(struct mbox_link *link);
+ bool (*last_tx_done)(struct mbox_link *link);
+};
+
+/**
+ * struct mbox_controller - Controller of a class of communication links
+ * @dev: Device backing this controller
+ * @controller_name: Literal name of the controller.
+ * @ops: Operators that work on each communication link
+ * @links: Null terminated array of links.
+ * @txdone_irq: Indicates if the controller can report to API when
+ * the last transmitted data was read by the remote.
+ * Eg, if it has some TX ACK irq.
+ * @txdone_poll: If the controller can read but not report the TX
+ * done. Ex, some register shows the TX status but
+ * no interrupt rises. Ignored if 'txdone_irq' is set.
+ * @txpoll_period: If 'txdone_poll' is in effect, the API polls for
+ * last TX's status after these many millisecs
+ */
+struct mbox_controller {
+ struct device *dev;
+ char controller_name[16];
+ struct mbox_link_ops *ops;
+ struct mbox_link **links;
+ bool txdone_irq;
+ bool txdone_poll;
+ unsigned txpoll_period;
+};
+
+int mbox_controller_register(struct mbox_controller *mbox);
+void mbox_link_received_data(struct mbox_link *link, void *data);
+void mbox_link_txdone(struct mbox_link *link, enum mbox_result r);
+void mbox_controller_unregister(struct mbox_controller *mbox);
+
+#endif /* __MAILBOX_CONTROLLER_H */
diff --git a/include/linux/scpi_protocol.h b/include/linux/scpi_protocol.h
new file mode 100644
index 000000000000..66e5eb3710ab
--- /dev/null
+++ b/include/linux/scpi_protocol.h
@@ -0,0 +1,30 @@
+/*
+ * SCPI Message Protocol driver header
+ *
+ * Copyright (C) 2014 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/types.h>
+
+struct scpi_opp {
+ u32 *freqs;
+ u32 latency; /* in usecs */
+ int count;
+};
+
+unsigned long scpi_clk_get_val(u16 clk_id);
+int scpi_clk_set_val(u16 clk_id, unsigned long rate);
+int scpi_dvfs_get_idx(u8 domain);
+int scpi_dvfs_set_idx(u8 domain, u8 idx);
+struct scpi_opp *scpi_dvfs_get_opps(u8 domain);