summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSrinivas Kandagatla <srinivas.kandagatla@linaro.org>2015-06-25 17:50:45 +0100
committerSrinivas Kandagatla <srinivas.kandagatla@linaro.org>2015-06-25 17:50:45 +0100
commit24bddce99f8b937e2c073582b25606743fc95851 (patch)
tree4111837b9596f1488f971fcf6bc7564c6344cdbe
parent4ba9a86faf4a5a908da77e145c08f5185da364d8 (diff)
parent40c2812f3558781c789d99e41aa91219cbfa891f (diff)
Merge branch 'tracking-qcomlt-mainline-rpm-smd' into integration-linux-qcomlttracking-integration-linux-qcomlt-ll-20150629.0qcomlt-v4.1
* tracking-qcomlt-mainline-rpm-smd: (23 commits) mfd: devicetree: qcom-rpm-smd: Add MSM8916 soc: qcom: smd: Fix fifo copies regulator: rpm-smd: Fix PM8841 table regulator: qcom: smd: Add PM8916 support mfd: qcom-smd-rpm: Add MSM8916 support mfd: qcom-smd-rpm: Driver for the Qualcomm RPM over SMD regulator: qcom: smd: Regulator driver for the Qualcomm RPM mfd: devicetree: bindings: Add Qualcomm SMD based RPM DT binding mfd: qcom_rpm: Add MSM8916 support soc: qcom: smd: Remove compiler warnings soc: qcom: Add device tree binding for Shared Memory Device soc: qcom: Add Shared Memory Driver soc: qcom: smem: Add debugfs soc: qcom: Add Shared Memory Manager driver soc: qcom: Add device tree binding for SMEM mm: Don't offset memmap for flatmem] WIP: add memblock_overlaps_memory Documentation: dt: add the omap hwspinlock bindings document hwspinlock/omap: add support for dt nodes hwspinlock: qcom: Add support for Qualcomm HW Mutex block ... Conflicts: drivers/soc/qcom/Kconfig drivers/soc/qcom/Makefile
-rw-r--r--Documentation/devicetree/bindings/hwlock/hwlock.txt59
-rw-r--r--Documentation/devicetree/bindings/hwlock/omap-hwspinlock.txt26
-rw-r--r--Documentation/devicetree/bindings/hwlock/qcom-hwspinlock.txt39
-rw-r--r--Documentation/devicetree/bindings/mfd/qcom-rpm-smd.txt156
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,smd.txt78
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,smem.txt49
-rw-r--r--Documentation/hwspinlock.txt10
-rw-r--r--MAINTAINERS1
-rw-r--r--arch/arm/mach-omap2/Makefile3
-rw-r--r--arch/arm/mach-omap2/hwspinlock.c60
-rw-r--r--drivers/hwspinlock/Kconfig11
-rw-r--r--drivers/hwspinlock/Makefile1
-rw-r--r--drivers/hwspinlock/hwspinlock_core.c79
-rw-r--r--drivers/hwspinlock/omap_hwspinlock.c17
-rw-r--r--drivers/hwspinlock/qcom_hwspinlock.c181
-rw-r--r--drivers/mfd/Kconfig14
-rw-r--r--drivers/mfd/Makefile1
-rw-r--r--drivers/mfd/qcom-smd-rpm.c235
-rw-r--r--drivers/mfd/qcom_rpm.c4
-rw-r--r--drivers/regulator/Kconfig12
-rw-r--r--drivers/regulator/Makefile1
-rw-r--r--drivers/regulator/qcom_smd-regulator.c415
-rw-r--r--drivers/soc/qcom/Kconfig16
-rw-r--r--drivers/soc/qcom/Makefile2
-rw-r--r--drivers/soc/qcom/smd.c1289
-rw-r--r--drivers/soc/qcom/smem.c883
-rw-r--r--include/linux/hwspinlock.h7
-rw-r--r--include/linux/mfd/qcom-smd-rpm.h35
-rw-r--r--include/linux/soc/qcom/smd.h47
-rw-r--r--include/linux/soc/qcom/smem.h14
30 files changed, 3677 insertions, 68 deletions
diff --git a/Documentation/devicetree/bindings/hwlock/hwlock.txt b/Documentation/devicetree/bindings/hwlock/hwlock.txt
new file mode 100644
index 000000000000..085d1f5c916a
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwlock/hwlock.txt
@@ -0,0 +1,59 @@
+Generic hwlock bindings
+=======================
+
+Generic bindings that are common to all the hwlock platform specific driver
+implementations.
+
+Please also look through the individual platform specific hwlock binding
+documentations for identifying any additional properties specific to that
+platform.
+
+hwlock providers:
+=================
+
+Required properties:
+- #hwlock-cells: Specifies the number of cells needed to represent a
+ specific lock.
+
+hwlock users:
+=============
+
+Consumers that require specific hwlock(s) should specify them using the
+property "hwlocks", and an optional "hwlock-names" property.
+
+Required properties:
+- hwlocks: List of phandle to a hwlock provider node and an
+ associated hwlock args specifier as indicated by
+ #hwlock-cells. The list can have just a single hwlock
+ or multiple hwlocks, with each hwlock represented by
+ a phandle and a corresponding args specifier.
+
+Optional properties:
+- hwlock-names: List of hwlock name strings defined in the same order
+ as the hwlocks, with one name per hwlock. Consumers can
+ use the hwlock-names to match and get a specific hwlock.
+
+
+1. Example of a node using a single specific hwlock:
+
+The following example has a node requesting a hwlock in the bank defined by
+the node hwlock1. hwlock1 is a hwlock provider with an argument specifier
+of length 1.
+
+ node {
+ ...
+ hwlocks = <&hwlock1 2>;
+ ...
+ };
+
+2. Example of a node using multiple specific hwlocks:
+
+The following example has a node requesting two hwlocks, a hwlock within
+the hwlock device node 'hwlock1' with #hwlock-cells value of 1, and another
+hwlock within the hwlock device node 'hwlock2' with #hwlock-cells value of 2.
+
+ node {
+ ...
+ hwlocks = <&hwlock1 2>, <&hwlock2 0 3>;
+ ...
+ };
diff --git a/Documentation/devicetree/bindings/hwlock/omap-hwspinlock.txt b/Documentation/devicetree/bindings/hwlock/omap-hwspinlock.txt
new file mode 100644
index 000000000000..2c9804f4f4ac
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwlock/omap-hwspinlock.txt
@@ -0,0 +1,26 @@
+OMAP4+ HwSpinlock Driver
+========================
+
+Required properties:
+- compatible: Should be "ti,omap4-hwspinlock" for
+ OMAP44xx, OMAP54xx, AM33xx, AM43xx, DRA7xx SoCs
+- reg: Contains the hwspinlock module register address space
+ (base address and length)
+- ti,hwmods: Name of the hwmod associated with the hwspinlock device
+- #hwlock-cells: Should be 1. The OMAP hwspinlock users will use a
+ 0-indexed relative hwlock number as the argument
+ specifier value for requesting a specific hwspinlock
+ within a hwspinlock bank.
+
+Please look at the generic hwlock binding for usage information for consumers,
+"Documentation/devicetree/bindings/hwlock/hwlock.txt"
+
+Example:
+
+/* OMAP4 */
+hwspinlock: spinlock@4a0f6000 {
+ compatible = "ti,omap4-hwspinlock";
+ reg = <0x4a0f6000 0x1000>;
+ ti,hwmods = "spinlock";
+ #hwlock-cells = <1>;
+};
diff --git a/Documentation/devicetree/bindings/hwlock/qcom-hwspinlock.txt b/Documentation/devicetree/bindings/hwlock/qcom-hwspinlock.txt
new file mode 100644
index 000000000000..28ade7dea37e
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwlock/qcom-hwspinlock.txt
@@ -0,0 +1,39 @@
+Qualcomm Hardware Mutex Block:
+
+The hardware block provides mutexes utilized between different processors on
+the SoC as part of the communication protocol used by these processors.
+
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: must be one of:
+ "qcom,sfpb-mutex",
+ "qcom,tcsr-mutex"
+
+- syscon:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: one cell containing:
+ syscon phandle
+ offset of the hwmutex block within the syscon
+ stride of the hwmutex registers
+
+- #hwlock-cells:
+ Usage: required
+ Value type: <u32>
+ Definition: must be 1, the specified cell represent the lock id
+ (hwlock standard property, see hwlock.txt)
+
+Example:
+
+ tcsr: syscon@1a400000 {
+ compatible = "qcom,tcsr-msm8974", "syscon";
+ reg = <0xfd484000 0x2000>;
+ };
+
+ hwlock@fd484000 {
+ compatible = "qcom,tcsr-mutex";
+ syscon = <&tcsr 0 0x80>;
+
+ #hwlock-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/mfd/qcom-rpm-smd.txt b/Documentation/devicetree/bindings/mfd/qcom-rpm-smd.txt
new file mode 100644
index 000000000000..2e28f350c548
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/qcom-rpm-smd.txt
@@ -0,0 +1,156 @@
+Qualcomm Resource Power Manager (RPM) over SMD
+
+This driver is used to interface with the Resource Power Manager (RPM) found in
+various Qualcomm platforms. The RPM allows each component in the system to vote
+for state of the system resources, such as clocks, regulators and bus
+frequencies.
+
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: must be one of:
+ "qcom,rpm-msm8974"
+ "qcom,rpm-msm8916"
+
+- qcom,smd-channels:
+ Usage: required
+ Value type: <stringlist>
+ Definition: Shared Memory channel used for communication with the RPM
+
+= SUBDEVICES
+
+The RPM exposes resources to its subnodes. The below bindings specify the set
+of valid subnodes that can operate on these resources.
+
+== Regulators
+
+Regulator nodes are identified by their compatible:
+
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: must be one of:
+ "qcom,rpm-pm8841-regulators"
+ "qcom,rpm-pm8941-regulators"
+ "qcom,rpm-pm8916-regulators"
+
+- vdd_s1-supply:
+- vdd_s2-supply:
+- vdd_s3-supply:
+- vdd_s4-supply:
+- vdd_s5-supply:
+- vdd_s6-supply:
+- vdd_s7-supply:
+- vdd_s8-supply:
+ Usage: optional (pm8841 only)
+ Value type: <phandle>
+ Definition: reference to regulator supplying the input pin, as
+ described in the data sheet
+
+- vdd_s1-supply:
+- vdd_s2-supply:
+- vdd_s3-supply:
+- vdd_l1_l3-supply:
+- vdd_l2_lvs1_2_3-supply:
+- vdd_l1_l3-supply:
+- vdd_l4_l11-supply:
+- vdd_l5_l7-supply:
+- vdd_l6_l12_l14_l15-supply:
+- vdd_l5_l7-supply:
+- vdd_l8_l16_l18_l19-supply:
+- vdd_l9_l10_l17_l22-supply:
+- vdd_l9_l10_l17_l22-supply:
+- vdd_l4_l11-supply:
+- vdd_l6_l12_l14_l15-supply:
+- vdd_l13_l20_l23_l24-supply:
+- vdd_l6_l12_l14_l15-supply:
+- vdd_l6_l12_l14_l15-supply:
+- vdd_l8_l16_l18_l19-supply:
+- vdd_l9_l10_l17_l22-supply:
+- vdd_l8_l16_l18_l19-supply:
+- vdd_l8_l16_l18_l19-supply:
+- vdd_l13_l20_l23_l24-supply:
+- vdd_l21-supply:
+- vdd_l9_l10_l17_l22-supply:
+- vdd_l13_l20_l23_l24-supply:
+- vdd_l13_l20_l23_l24-supply:
+- vdd_l2_lvs1_2_3-supply:
+- vdd_l2_lvs1_2_3-supply:
+- vdd_l2_lvs1_2_3-supply:
+- vin_5vs-supply:
+- vin_5vs-supply:
+ Usage: optional (pm8941 only)
+ Value type: <phandle>
+ Definition: reference to regulator supplying the input pin, as
+ described in the data sheet
+
+- vdd_s1-supply:
+- vdd_s2-supply:
+- vdd_s3-supply:
+- vdd_s4-supply:
+- l1_l2_l3-supply:
+- l4_l5_l6-supply:
+- l7-supply:
+- l8_l9_l10_l11_l12_l13_l14_l15_l16_l17_l18-supply:
+
+ Usage: optional (pm8916 only)
+ Value type: <phandle>
+ Definition: reference to regulator supplying the input pin, as
+ described in the data sheet
+
+The regulator node houses sub-nodes for each regulator within the device. Each
+sub-node is identified using the node's name, with valid values listed for each
+of the pmics below.
+
+pm8841:
+ s1, s2, s3, s4, s5, s6, s7, s8
+
+pm8941:
+ s1, s2, s3, s4, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12, l13,
+ l14, l15, l16, l17, l18, l19, l20, l21, l22, l23, l24, lvs1, lvs2,
+ lvs3, 5vs1, 5vs2
+
+pm8916:
+ s1, s2, s3, s4, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12, l13,
+ l14, l15, l16, l17, l18
+
+The content of each sub-node is defined by the standard binding for regulators -
+see regulator.txt.
+
+= EXAMPLE
+
+ smd {
+ compatible = "qcom,smd";
+
+ rpm {
+ interrupts = <0 168 1>;
+ qcom,ipc = <&apcs 8 0>;
+ qcom,smd-edge = <15>;
+
+ rpm_requests {
+ compatible = "qcom,rpm-msm8974";
+ qcom,smd-channels = "rpm_requests";
+
+ pm8941-regulators {
+ compatible = "qcom,rpm-pm8941-regulators";
+ vdd_l13_l20_l23_l24-supply = <&pm8941_boost>;
+
+ pm8941_s3: s3 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ pm8941_boost: s4 {
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+
+ pm8941_l20: l20 {
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
+ };
+ };
+ };
+ };
+ };
+
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,smd.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,smd.txt
new file mode 100644
index 000000000000..0fdb7f9d69ac
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,smd.txt
@@ -0,0 +1,78 @@
+Qualcomm Shared Memory Driver (SMD) binding
+
+This binding describes the Qualcomm Shared Memory Driver, a fifo based
+communication channel for sending data between the various subsystems in
+Qualcomm platforms.
+
+- compatible:
+ Usage: required
+ Value type: <stringlist>
+ Definition: must be "qcom,smd"
+
+= EDGES
+
+Each subnode of the SMD node represents a remote subsystem or a remote
+processor of some sort - or in SMD language an "edge". The name of the
+edges are not important.
+
+- interrupts:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: should specify the IRQ used by the remote processor to
+ signal this processor about communication related updates
+
+- qcom,ipc:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: three entries specifying the outgoing ipc bit used for
+ signaling the remote processor:
+ - phandle to a syscon node representing the apcs registers
+ - u32 representing offset to the register within the syscon
+ - u32 representing the ipc bit within the register
+
+- qcom,smd-edge:
+ Usage: required
+ Value type: <u32>
+ Definition: the identifier representing the remote processor in the
+ smd channel allocation table
+
+= SMD DEVICES
+
+In turn, subnodes of the "edges" represent devices tied to SMD channels on that
+"edge". The names of the devices are not important. The properties of these
+nodes are defined by the individual bindings for the SMD devices - but must
+contain the following property:
+
+- qcom,smd-channels:
+ Usage: required
+ Value type: <stringlist>
+ Definition: a list of channels tied to this device, used for matching
+ the device to channels
+
+= EXAMPLE
+
+The following example represents a smd node, with one edge representing the
+"rpm" subsystem. For the "rpm" subsystem we have a device tied to the
+"rpm_request" channel.
+
+ apcs: syscon@f9011000 {
+ compatible = "syscon";
+ reg = <0xf9011000 0x1000>;
+ };
+
+ smd {
+ compatible = "qcom,smd";
+
+ rpm {
+ interrupts = <0 168 1>;
+ qcom,ipc = <&apcs 8 0>;
+ qcom,smd-edge = <15>;
+
+ rpm_requests {
+ compatible = "qcom,rpm-msm8974";
+ qcom,smd-channels = "rpm_requests";
+
+ ...
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,smem.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,smem.txt
new file mode 100644
index 000000000000..d90f839964e2
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,smem.txt
@@ -0,0 +1,49 @@
+Qualcomm Shared Memory binding
+
+This binding describes the Qualcomm Shared Memory, used to share data between
+various subsystems and OSes in Qualcomm platforms.
+
+- compatible:
+ Usage: required
+ Value type: <stringlist>
+ Definition: must be:
+ "qcom,smem"
+
+- memory-region:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: handle to memory reservation for main smem memory region.
+
+- reg:
+ Usage: optional
+ Value type: <prop-encoded-array>
+ Definition: base address and size pair for any additional memory areas
+ of the shared memory.
+
+- hwspinlocks:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: reference to a hwspinlock used to protect allocations from
+ the shared memory
+
+= EXAMPLE
+
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ smem_region: smem@fa00000 {
+ reg = <0xfa00000 0x200000>;
+ no-map;
+ };
+ };
+
+ smem@fa00000 {
+ compatible = "qcom,smem";
+
+ memory-region = <&smem_region>;
+ reg = <0xfc428000 0x4000>;
+
+ hwlocks = <&tcsr_mutex 3>;
+ };
diff --git a/Documentation/hwspinlock.txt b/Documentation/hwspinlock.txt
index 62f7d4ea6e26..61c1ee98e59f 100644
--- a/Documentation/hwspinlock.txt
+++ b/Documentation/hwspinlock.txt
@@ -48,6 +48,16 @@ independent, drivers.
ids for predefined purposes.
Should be called from a process context (might sleep).
+ int of_hwspin_lock_get_id(struct device_node *np, int index);
+ - retrieve the global lock id for an OF phandle-based specific lock.
+ This function provides a means for DT users of a hwspinlock module
+ to get the global lock id of a specific hwspinlock, so that it can
+ be requested using the normal hwspin_lock_request_specific() API.
+ The function returns a lock id number on success, -EPROBE_DEFER if
+ the hwspinlock device is not yet registered with the core, or other
+ error values.
+ Should be called from a process context (might sleep).
+
int hwspin_lock_free(struct hwspinlock *hwlock);
- free a previously-assigned hwspinlock; returns 0 on success, or an
appropriate error code on failure (e.g. -EINVAL if the hwspinlock
diff --git a/MAINTAINERS b/MAINTAINERS
index ac50c4cee4cc..104958b5b9ec 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -7139,7 +7139,6 @@ M: Ohad Ben-Cohen <ohad@wizery.com>
L: linux-omap@vger.kernel.org
S: Maintained
F: drivers/hwspinlock/omap_hwspinlock.c
-F: arch/arm/mach-omap2/hwspinlock.c
OMAP MMC SUPPORT
M: Jarkko Lavinen <jarkko.lavinen@nokia.com>
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index ec002bd4af77..9a2c88e56bae 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -277,8 +277,5 @@ obj-y += $(nand-m) $(nand-y)
smsc911x-$(CONFIG_SMSC911X) := gpmc-smsc911x.o
obj-y += $(smsc911x-m) $(smsc911x-y)
-ifneq ($(CONFIG_HWSPINLOCK_OMAP),)
-obj-y += hwspinlock.o
-endif
obj-y += common-board-devices.o twl-common.o dss-common.o
diff --git a/arch/arm/mach-omap2/hwspinlock.c b/arch/arm/mach-omap2/hwspinlock.c
deleted file mode 100644
index ef175acaeaa2..000000000000
--- a/arch/arm/mach-omap2/hwspinlock.c
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * OMAP hardware spinlock device initialization
- *
- * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
- *
- * Contact: Simon Que <sque@ti.com>
- * Hari Kanigeri <h-kanigeri2@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/err.h>
-#include <linux/hwspinlock.h>
-
-#include "soc.h"
-#include "omap_hwmod.h"
-#include "omap_device.h"
-
-static struct hwspinlock_pdata omap_hwspinlock_pdata __initdata = {
- .base_id = 0,
-};
-
-static int __init hwspinlocks_init(void)
-{
- int retval = 0;
- struct omap_hwmod *oh;
- struct platform_device *pdev;
- const char *oh_name = "spinlock";
- const char *dev_name = "omap_hwspinlock";
-
- /*
- * Hwmod lookup will fail in case our platform doesn't support the
- * hardware spinlock module, so it is safe to run this initcall
- * on all omaps
- */
- oh = omap_hwmod_lookup(oh_name);
- if (oh == NULL)
- return -EINVAL;
-
- pdev = omap_device_build(dev_name, 0, oh, &omap_hwspinlock_pdata,
- sizeof(struct hwspinlock_pdata));
- if (IS_ERR(pdev)) {
- pr_err("Can't build omap_device for %s:%s\n", dev_name,
- oh_name);
- retval = PTR_ERR(pdev);
- }
-
- return retval;
-}
-/* early board code might need to reserve specific hwspinlock instances */
-omap_postcore_initcall(hwspinlocks_init);
diff --git a/drivers/hwspinlock/Kconfig b/drivers/hwspinlock/Kconfig
index 3612cb5b30b2..762216d8e12f 100644
--- a/drivers/hwspinlock/Kconfig
+++ b/drivers/hwspinlock/Kconfig
@@ -18,6 +18,17 @@ config HWSPINLOCK_OMAP
If unsure, say N.
+config HWSPINLOCK_QCOM
+ tristate "Qualcomm Hardware Spinlock device"
+ depends on ARCH_QCOM
+ select HWSPINLOCK
+ help
+ Say y here to support the Qualcomm Hardware Mutex functionality, which
+ provides a synchronisation mechanism for the various processors on
+ the SoC.
+
+ If unsure, say N.
+
config HSEM_U8500
tristate "STE Hardware Semaphore functionality"
depends on ARCH_U8500
diff --git a/drivers/hwspinlock/Makefile b/drivers/hwspinlock/Makefile
index 93eb64b66486..68f95d9a4855 100644
--- a/drivers/hwspinlock/Makefile
+++ b/drivers/hwspinlock/Makefile
@@ -4,4 +4,5 @@
obj-$(CONFIG_HWSPINLOCK) += hwspinlock_core.o
obj-$(CONFIG_HWSPINLOCK_OMAP) += omap_hwspinlock.o
+obj-$(CONFIG_HWSPINLOCK_QCOM) += qcom_hwspinlock.o
obj-$(CONFIG_HSEM_U8500) += u8500_hsem.o
diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c
index 461a0d739d75..f8539f624e3d 100644
--- a/drivers/hwspinlock/hwspinlock_core.c
+++ b/drivers/hwspinlock/hwspinlock_core.c
@@ -27,6 +27,7 @@
#include <linux/hwspinlock.h>
#include <linux/pm_runtime.h>
#include <linux/mutex.h>
+#include <linux/of.h>
#include "hwspinlock_internal.h"
@@ -257,6 +258,84 @@ void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
}
EXPORT_SYMBOL_GPL(__hwspin_unlock);
+/**
+ * of_hwspin_lock_simple_xlate - translate hwlock_spec to return a lock id
+ * @bank: the hwspinlock device bank
+ * @hwlock_spec: hwlock specifier as found in the device tree
+ *
+ * This is a simple translation function, suitable for hwspinlock platform
+ * drivers that only has a lock specifier length of 1.
+ *
+ * Returns a relative index of the lock within a specified bank on success,
+ * or -EINVAL on invalid specifier cell count.
+ */
+static inline int
+of_hwspin_lock_simple_xlate(const struct of_phandle_args *hwlock_spec)
+{
+ if (WARN_ON(hwlock_spec->args_count != 1))
+ return -EINVAL;
+
+ return hwlock_spec->args[0];
+}
+
+/**
+ * of_hwspin_lock_get_id() - get lock id for an OF phandle-based specific lock
+ * @np: device node from which to request the specific hwlock
+ * @index: index of the hwlock in the list of values
+ *
+ * This function provides a means for DT users of the hwspinlock module to
+ * get the global lock id of a specific hwspinlock using the phandle of the
+ * hwspinlock device, so that it can be requested using the normal
+ * hwspin_lock_request_specific() API.
+ *
+ * Returns the global lock id number on success, -EPROBE_DEFER if the hwspinlock
+ * device is not yet registered, -EINVAL on invalid args specifier value or an
+ * appropriate error as returned from the OF parsing of the DT client node.
+ */
+int of_hwspin_lock_get_id(struct device_node *np, int index)
+{
+ struct of_phandle_args args;
+ struct hwspinlock *hwlock;
+ struct radix_tree_iter iter;
+ void **slot;
+ int id;
+ int ret;
+
+ ret = of_parse_phandle_with_args(np, "hwlocks", "#hwlock-cells", index,
+ &args);
+ if (ret)
+ return ret;
+
+ /* perform a sanity check of the hwspinlock device */
+ ret = -EPROBE_DEFER;
+ rcu_read_lock();
+ radix_tree_for_each_slot(slot, &hwspinlock_tree, &iter, 0) {
+ hwlock = radix_tree_deref_slot(slot);
+ if (unlikely(!hwlock))
+ continue;
+
+ if (hwlock->bank->dev->of_node == args.np) {
+ ret = 0;
+ break;
+ }
+ }
+ rcu_read_unlock();
+ if (ret < 0)
+ goto out;
+
+ id = of_hwspin_lock_simple_xlate(&args);
+ if (id < 0 || id >= hwlock->bank->num_locks) {
+ ret = -EINVAL;
+ goto out;
+ }
+ id += hwlock->bank->base_id;
+
+out:
+ of_node_put(args.np);
+ return ret ? ret : id;
+}
+EXPORT_SYMBOL_GPL(of_hwspin_lock_get_id);
+
static int hwspin_lock_register_single(struct hwspinlock *hwlock, int id)
{
struct hwspinlock *tmp;
diff --git a/drivers/hwspinlock/omap_hwspinlock.c b/drivers/hwspinlock/omap_hwspinlock.c
index 47a275c6ece1..e49e5eb784a6 100644
--- a/drivers/hwspinlock/omap_hwspinlock.c
+++ b/drivers/hwspinlock/omap_hwspinlock.c
@@ -1,7 +1,7 @@
/*
* OMAP hardware spinlock driver
*
- * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
+ * Copyright (C) 2010-2015 Texas Instruments Incorporated - http://www.ti.com
*
* Contact: Simon Que <sque@ti.com>
* Hari Kanigeri <h-kanigeri2@ti.com>
@@ -27,6 +27,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/hwspinlock.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include "hwspinlock_internal.h"
@@ -80,14 +81,15 @@ static const struct hwspinlock_ops omap_hwspinlock_ops = {
static int omap_hwspinlock_probe(struct platform_device *pdev)
{
- struct hwspinlock_pdata *pdata = pdev->dev.platform_data;
+ struct device_node *node = pdev->dev.of_node;
struct hwspinlock_device *bank;
struct hwspinlock *hwlock;
struct resource *res;
void __iomem *io_base;
int num_locks, i, ret;
+ int base_id = 0;
- if (!pdata)
+ if (!node)
return -ENODEV;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -141,7 +143,7 @@ static int omap_hwspinlock_probe(struct platform_device *pdev)
hwlock->priv = io_base + LOCK_BASE_OFFSET + sizeof(u32) * i;
ret = hwspin_lock_register(bank, &pdev->dev, &omap_hwspinlock_ops,
- pdata->base_id, num_locks);
+ base_id, num_locks);
if (ret)
goto reg_fail;
@@ -174,11 +176,18 @@ static int omap_hwspinlock_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id omap_hwspinlock_of_match[] = {
+ { .compatible = "ti,omap4-hwspinlock", },
+ { /* end */ },
+};
+MODULE_DEVICE_TABLE(of, omap_hwspinlock_of_match);
+
static struct platform_driver omap_hwspinlock_driver = {
.probe = omap_hwspinlock_probe,
.remove = omap_hwspinlock_remove,
.driver = {
.name = "omap_hwspinlock",
+ .of_match_table = of_match_ptr(omap_hwspinlock_of_match),
},
};
diff --git a/drivers/hwspinlock/qcom_hwspinlock.c b/drivers/hwspinlock/qcom_hwspinlock.c
new file mode 100644
index 000000000000..93b62e055ff6
--- /dev/null
+++ b/drivers/hwspinlock/qcom_hwspinlock.c
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015, Sony Mobile Communications AB
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/hwspinlock.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include "hwspinlock_internal.h"
+
+#define QCOM_MUTEX_APPS_PROC_ID 1
+#define QCOM_MUTEX_NUM_LOCKS 32
+
+static int qcom_hwspinlock_trylock(struct hwspinlock *lock)
+{
+ struct regmap_field *field = lock->priv;
+ u32 lock_owner;
+ int ret;
+
+ ret = regmap_field_write(field, QCOM_MUTEX_APPS_PROC_ID);
+ if (ret)
+ return ret;
+
+ ret = regmap_field_read(field, &lock_owner);
+ if (ret)
+ return ret;
+
+ return lock_owner == QCOM_MUTEX_APPS_PROC_ID;
+}
+
+static void qcom_hwspinlock_unlock(struct hwspinlock *lock)
+{
+ struct regmap_field *field = lock->priv;
+ u32 lock_owner;
+ int ret;
+
+ ret = regmap_field_read(field, &lock_owner);
+ if (ret) {
+ pr_err("%s: unable to query spinlock owner\n", __func__);
+ return;
+ }
+
+ if (lock_owner != QCOM_MUTEX_APPS_PROC_ID) {
+ pr_err("%s: spinlock not owned by us (actual owner is %d)\n",
+ __func__, lock_owner);
+ }
+
+ ret = regmap_field_write(field, 0);
+ if (ret)
+ pr_err("%s: failed to unlock spinlock\n", __func__);
+}
+
+static const struct hwspinlock_ops qcom_hwspinlock_ops = {
+ .trylock = qcom_hwspinlock_trylock,
+ .unlock = qcom_hwspinlock_unlock,
+};
+
+static const struct of_device_id qcom_hwspinlock_of_match[] = {
+ { .compatible = "qcom,sfpb-mutex" },
+ { .compatible = "qcom,tcsr-mutex" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, qcom_hwspinlock_of_match);
+
+static int qcom_hwspinlock_probe(struct platform_device *pdev)
+{
+ struct hwspinlock_device *bank;
+ struct device_node *syscon;
+ struct reg_field field;
+ struct regmap *regmap;
+ size_t array_size;
+ u32 stride;
+ u32 base;
+ int ret;
+ int i;
+
+ syscon = of_parse_phandle(pdev->dev.of_node, "syscon", 0);
+ if (!syscon) {
+ dev_err(&pdev->dev, "no syscon property\n");
+ return -ENODEV;
+ }
+
+ regmap = syscon_node_to_regmap(syscon);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ ret = of_property_read_u32_index(pdev->dev.of_node, "syscon", 1, &base);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "no offset in syscon\n");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32_index(pdev->dev.of_node, "syscon", 2, &stride);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "no stride syscon\n");
+ return -EINVAL;
+ }
+
+ array_size = QCOM_MUTEX_NUM_LOCKS * sizeof(struct hwspinlock);
+ bank = devm_kzalloc(&pdev->dev, sizeof(*bank) + array_size, GFP_KERNEL);
+ if (!bank)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, bank);
+
+ for (i = 0; i < QCOM_MUTEX_NUM_LOCKS; i++) {
+ field.reg = base + i * stride;
+ field.lsb = 0;
+ field.msb = 32;
+
+ bank->lock[i].priv = devm_regmap_field_alloc(&pdev->dev,
+ regmap, field);
+ }
+
+ pm_runtime_enable(&pdev->dev);
+
+ ret = hwspin_lock_register(bank, &pdev->dev, &qcom_hwspinlock_ops,
+ 0, QCOM_MUTEX_NUM_LOCKS);
+ if (ret)
+ pm_runtime_disable(&pdev->dev);
+
+ return ret;
+}
+
+static int qcom_hwspinlock_remove(struct platform_device *pdev)
+{
+ struct hwspinlock_device *bank = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = hwspin_lock_unregister(bank);
+ if (ret) {
+ dev_err(&pdev->dev, "%s failed: %d\n", __func__, ret);
+ return ret;
+ }
+
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static struct platform_driver qcom_hwspinlock_driver = {
+ .probe = qcom_hwspinlock_probe,
+ .remove = qcom_hwspinlock_remove,
+ .driver = {
+ .name = "qcom_hwspinlock",
+ .of_match_table = qcom_hwspinlock_of_match,
+ },
+};
+
+static int __init qcom_hwspinlock_init(void)
+{
+ return platform_driver_register(&qcom_hwspinlock_driver);
+}
+/* board init code might need to reserve hwspinlocks for predefined purposes */
+postcore_initcall(qcom_hwspinlock_init);
+
+static void __exit qcom_hwspinlock_exit(void)
+{
+ platform_driver_unregister(&qcom_hwspinlock_driver);
+}
+module_exit(qcom_hwspinlock_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Hardware spinlock driver for Qualcomm SoCs");
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index d5ad04dad081..ef237af4f64a 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -653,6 +653,20 @@ config MFD_QCOM_RPM
Say M here if you want to include support for the Qualcomm RPM as a
module. This will build a module called "qcom_rpm".
+config MFD_QCOM_SMD_RPM
+ tristate "Qualcomm Resource Power Manager (RPM) over SMD"
+ depends on QCOM_SMD && OF
+ help
+ If you say yes to this option, support will be included for the
+ Resource Power Manager system found in the Qualcomm 8974 based
+ devices.
+
+ This is required to access many regulators, clocks and bus
+ frequencies controlled by the RPM on these devices.
+
+ Say M here if you want to include support for the Qualcomm RPM as a
+ module. This will build a module called "qcom-smd-rpm".
+
config MFD_SPMI_PMIC
tristate "Qualcomm SPMI PMICs"
depends on ARCH_QCOM || COMPILE_TEST
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 0e5cfeba107c..ff6fb39d1e89 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -156,6 +156,7 @@ obj-$(CONFIG_MFD_CS5535) += cs5535-mfd.o
obj-$(CONFIG_MFD_OMAP_USB_HOST) += omap-usb-host.o omap-usb-tll.o
obj-$(CONFIG_MFD_PM8921_CORE) += pm8921-core.o ssbi.o
obj-$(CONFIG_MFD_QCOM_RPM) += qcom_rpm.o
+obj-$(CONFIG_MFD_QCOM_SMD_RPM) += qcom-smd-rpm.o
obj-$(CONFIG_MFD_SPMI_PMIC) += qcom-spmi-pmic.o
obj-$(CONFIG_TPS65911_COMPARATOR) += tps65911-comparator.o
obj-$(CONFIG_MFD_TPS65090) += tps65090.o
diff --git a/drivers/mfd/qcom-smd-rpm.c b/drivers/mfd/qcom-smd-rpm.c
new file mode 100644
index 000000000000..10c8a84b7223
--- /dev/null
+++ b/drivers/mfd/qcom-smd-rpm.c
@@ -0,0 +1,235 @@
+/*
+ * Copyright (c) 2014, Sony Mobile Communications AB.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#include <linux/soc/qcom/smd.h>
+#include <linux/mfd/qcom-smd-rpm.h>
+
+#define RPM_REQUEST_TIMEOUT (5 * HZ)
+
+/**
+ * struct qcom_smd_rpm - state of the rpm device driver
+ * @rpm_channel: reference to the smd channel
+ * @ack: completion for acks
+ * @lock: mutual exclusion around the send/complete pair
+ * @ack_status: result of the rpm request
+ */
+struct qcom_smd_rpm {
+ struct qcom_smd_channel *rpm_channel;
+
+ struct completion ack;
+ struct mutex lock;
+ int ack_status;
+};
+
+/**
+ * struct qcom_rpm_header - header for all rpm requests and responses
+ * @service_type: identifier of the service
+ * @length: length of the payload
+ */
+struct qcom_rpm_header {
+ u32 service_type;
+ u32 length;
+};
+
+/**
+ * struct qcom_rpm_request - request message to the rpm
+ * @msg_id: identifier of the outgoing message
+ * @flags: active/sleep state flags
+ * @type: resource type
+ * @id: resource id
+ * @data_len: length of the payload following this header
+ */
+struct qcom_rpm_request {
+ u32 msg_id;
+ u32 flags;
+ u32 type;
+ u32 id;
+ u32 data_len;
+};
+
+/**
+ * struct qcom_rpm_message - response message from the rpm
+ * @msg_type: indicator of the type of message
+ * @length: the size of this message, including the message header
+ * @msg_id: message id
+ * @message: textual message from the rpm
+ *
+ * Multiple of these messages can be stacked in an rpm message.
+ */
+struct qcom_rpm_message {
+ u32 msg_type;
+ u32 length;
+ union {
+ u32 msg_id;
+ u8 message[0];
+ };
+};
+
+#define RPM_SERVICE_TYPE_REQUEST 0x00716572 /* "req\0" */
+
+#define RPM_MSG_TYPE_ERR 0x00727265 /* "err\0" */
+#define RPM_MSG_TYPE_MSG_ID 0x2367736d /* "msg#" */
+
+/**
+ * qcom_rpm_smd_write - write @buf to @type:@id
+ * @rpm: rpm handle
+ * @type: resource type
+ * @id: resource identifier
+ * @buf: the data to be written
+ * @count: number of bytes in @buf
+ */
+int qcom_rpm_smd_write(struct qcom_smd_rpm *rpm,
+ int state,
+ u32 type, u32 id,
+ void *buf,
+ size_t count)
+{
+ static unsigned msg_id = 1;
+ int left;
+ int ret;
+
+ struct {
+ struct qcom_rpm_header hdr;
+ struct qcom_rpm_request req;
+ u8 payload[count];
+ } pkt;
+
+ /* SMD packets to the RPM may not exceed 256 bytes */
+ if (WARN_ON(sizeof(pkt) >= 256))
+ return -EINVAL;
+
+ mutex_lock(&rpm->lock);
+
+ pkt.hdr.service_type = RPM_SERVICE_TYPE_REQUEST;
+ pkt.hdr.length = sizeof(struct qcom_rpm_request) + count;
+
+ pkt.req.msg_id = msg_id++;
+ pkt.req.flags = BIT(state);
+ pkt.req.type = type;
+ pkt.req.id = id;
+ pkt.req.data_len = count;
+ memcpy(pkt.payload, buf, count);
+
+ ret = qcom_smd_send(rpm->rpm_channel, &pkt, sizeof(pkt));
+ if (ret)
+ goto out;
+
+ left = wait_for_completion_timeout(&rpm->ack, RPM_REQUEST_TIMEOUT);
+ if (!left)
+ ret = -ETIMEDOUT;
+ else
+ ret = rpm->ack_status;
+
+out:
+ mutex_unlock(&rpm->lock);
+ return ret;
+}
+EXPORT_SYMBOL(qcom_rpm_smd_write);
+
+#define RPM_ERR_INVALID_RESOURCE "resource does not exist"
+
+static int qcom_smd_rpm_callback(struct qcom_smd_device *qsdev,
+ const void *data,
+ size_t count)
+{
+ const struct qcom_rpm_header *hdr = data;
+ const struct qcom_rpm_message *msg;
+ const size_t inv_res_len = sizeof(RPM_ERR_INVALID_RESOURCE) - 1;
+ struct qcom_smd_rpm *rpm = dev_get_drvdata(&qsdev->dev);
+ const u8 *buf = data + sizeof(struct qcom_rpm_header);
+ const u8 *end = buf + hdr->length;
+ int status = 0;
+
+ if (hdr->service_type != RPM_SERVICE_TYPE_REQUEST ||
+ hdr->length < sizeof(struct qcom_rpm_message)) {
+ dev_err(&qsdev->dev, "invalid request\n");
+ return 0;
+ }
+
+ while (buf < end) {
+ msg = (struct qcom_rpm_message *)buf;
+ switch (msg->msg_type) {
+ case RPM_MSG_TYPE_MSG_ID:
+ break;
+ case RPM_MSG_TYPE_ERR:
+ if (msg->length == inv_res_len &&
+ !memcmp(msg->message,
+ RPM_ERR_INVALID_RESOURCE,
+ inv_res_len))
+ status = -ENXIO;
+ else
+ status = -EIO;
+ break;
+ }
+
+ buf = PTR_ALIGN(buf + 2 * sizeof(u32) + msg->length, 4);
+ }
+
+ rpm->ack_status = status;
+ complete(&rpm->ack);
+ return 0;
+}
+
+static int qcom_smd_rpm_probe(struct qcom_smd_device *sdev)
+{
+ struct qcom_smd_rpm *rpm;
+
+ rpm = devm_kzalloc(&sdev->dev, sizeof(*rpm), GFP_KERNEL);
+ if (!rpm)
+ return -ENOMEM;
+
+ mutex_init(&rpm->lock);
+ init_completion(&rpm->ack);
+
+ rpm->rpm_channel = sdev->channel;
+
+ dev_set_drvdata(&sdev->dev, rpm);
+
+ return of_platform_populate(sdev->dev.of_node, NULL, NULL, &sdev->dev);
+}
+
+static void qcom_smd_rpm_remove(struct qcom_smd_device *sdev)
+{
+ of_platform_depopulate(&sdev->dev);
+}
+
+static const struct of_device_id qcom_smd_rpm_of_match[] = {
+ { .compatible = "qcom,rpm-msm8974" },
+ { .compatible = "qcom,rpm-msm8916" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_smd_rpm_of_match);
+
+static struct qcom_smd_driver qcom_smd_rpm_driver = {
+ .probe = qcom_smd_rpm_probe,
+ .remove = qcom_smd_rpm_remove,
+ .callback = qcom_smd_rpm_callback,
+ .driver = {
+ .name = "qcom_smd_rpm",
+ .owner = THIS_MODULE,
+ .of_match_table = qcom_smd_rpm_of_match,
+ },
+};
+
+module_qcom_smd_driver(qcom_smd_rpm_driver);
+
+MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
+MODULE_DESCRIPTION("Qualcomm SMD backed RPM driver");
+MODULE_LICENSE("GPLv2");
diff --git a/drivers/mfd/qcom_rpm.c b/drivers/mfd/qcom_rpm.c
index 7b87bde5d736..660fe319b5bf 100644
--- a/drivers/mfd/qcom_rpm.c
+++ b/drivers/mfd/qcom_rpm.c
@@ -72,6 +72,10 @@ struct qcom_rpm {
#define RPM_SIGNAL BIT(2)
+static const struct qcom_rpm_resource msm8916_rpm_resource_table[] = {
+
+};
+
static const struct qcom_rpm_resource apq8064_rpm_resource_table[] = {
[QCOM_RPM_CXO_CLK] = { 25, 9, 5, 1 },
[QCOM_RPM_PXO_CLK] = { 26, 10, 6, 1 },
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index a6f116aa5235..7707b989a246 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -512,6 +512,18 @@ config REGULATOR_QCOM_RPM
Qualcomm RPM as a module. The module will be named
"qcom_rpm-regulator".
+config REGULATOR_QCOM_SMD_RPM
+ tristate "Qualcomm SMD based RPM regulator driver"
+ depends on MFD_QCOM_SMD_RPM
+ help
+ If you say yes to this option, support will be included for the
+ regulators exposed by the Resource Power Manager found in Qualcomm
+ 8974 based devices.
+
+ Say M here if you want to include support for the regulators on the
+ Qualcomm RPM as a module. The module will be named
+ "qcom_smd-regulator".
+
config REGULATOR_RC5T583
tristate "RICOH RC5T583 Power regulators"
depends on MFD_RC5T583
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 2c4da15e1545..da9be8851e01 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -61,6 +61,7 @@ obj-$(CONFIG_REGULATOR_MC13892) += mc13892-regulator.o
obj-$(CONFIG_REGULATOR_MC13XXX_CORE) += mc13xxx-regulator-core.o
obj-$(CONFIG_REGULATOR_MT6397) += mt6397-regulator.o
obj-$(CONFIG_REGULATOR_QCOM_RPM) += qcom_rpm-regulator.o
+obj-$(CONFIG_REGULATOR_QCOM_SMD_RPM) += qcom_smd-regulator.o
obj-$(CONFIG_REGULATOR_PALMAS) += palmas-regulator.o
obj-$(CONFIG_REGULATOR_PFUZE100) += pfuze100-regulator.o
obj-$(CONFIG_REGULATOR_PWM) += pwm-regulator.o
diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c
new file mode 100644
index 000000000000..52ba85ddbb79
--- /dev/null
+++ b/drivers/regulator/qcom_smd-regulator.c
@@ -0,0 +1,415 @@
+/*
+ * Copyright (c) 2014, Sony Mobile Communications AB.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+
+#include <linux/mfd/qcom-smd-rpm.h>
+
+struct qcom_rpm_reg {
+ struct device *dev;
+
+ struct qcom_smd_rpm *rpm;
+
+ u32 type;
+ u32 id;
+
+ struct regulator_desc desc;
+
+ int is_enabled;
+ int uV;
+};
+
+struct rpm_regulator_req {
+ u32 key;
+ u32 nbytes;
+ u32 value;
+};
+
+#define RPM_KEY_SWEN 0x6e657773 /* "swen" */
+#define RPM_KEY_UV 0x00007675 /* "uv" */
+#define RPM_KEY_MA 0x0000616d /* "ma" */
+
+static int rpm_reg_write_active(struct qcom_rpm_reg *vreg,
+ struct rpm_regulator_req *req,
+ size_t size)
+{
+ return qcom_rpm_smd_write(vreg->rpm,
+ QCOM_SMD_RPM_ACTIVE_STATE,
+ vreg->type,
+ vreg->id,
+ req, size);
+}
+
+static int rpm_reg_enable(struct regulator_dev *rdev)
+{
+ struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev);
+ struct rpm_regulator_req req;
+ int ret;
+
+ req.key = RPM_KEY_SWEN;
+ req.nbytes = sizeof(u32);
+ req.value = 1;
+
+ ret = rpm_reg_write_active(vreg, &req, sizeof(req));
+ if (!ret)
+ vreg->is_enabled = 1;
+
+ return ret;
+}
+
+static int rpm_reg_is_enabled(struct regulator_dev *rdev)
+{
+ struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev);
+
+ return vreg->is_enabled;
+}
+
+static int rpm_reg_disable(struct regulator_dev *rdev)
+{
+ struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev);
+ struct rpm_regulator_req req;
+ int ret;
+
+ req.key = RPM_KEY_SWEN;
+ req.nbytes = sizeof(u32);
+ req.value = 0;
+
+ ret = rpm_reg_write_active(vreg, &req, sizeof(req));
+ if (!ret)
+ vreg->is_enabled = 0;
+
+ return ret;
+}
+
+static int rpm_reg_get_voltage(struct regulator_dev *rdev)
+{
+ struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev);
+
+ return vreg->uV;
+}
+
+static int rpm_reg_set_voltage(struct regulator_dev *rdev,
+ int min_uV,
+ int max_uV,
+ unsigned *selector)
+{
+ struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev);
+ struct rpm_regulator_req req;
+ int ret = 0;
+
+ req.key = RPM_KEY_UV;
+ req.nbytes = sizeof(u32);
+ req.value = min_uV;
+
+ ret = rpm_reg_write_active(vreg, &req, sizeof(req));
+ if (!ret)
+ vreg->uV = min_uV;
+
+ return ret;
+}
+
+static int rpm_reg_set_load(struct regulator_dev *rdev, int load_uA)
+{
+ struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev);
+ struct rpm_regulator_req req;
+
+ req.key = RPM_KEY_MA;
+ req.nbytes = sizeof(u32);
+ req.value = load_uA;
+
+ return rpm_reg_write_active(vreg, &req, sizeof(req));
+}
+
+static const struct regulator_ops rpm_smps_ldo_ops = {
+ .enable = rpm_reg_enable,
+ .disable = rpm_reg_disable,
+ .is_enabled = rpm_reg_is_enabled,
+
+ .get_voltage = rpm_reg_get_voltage,
+ .set_voltage = rpm_reg_set_voltage,
+
+ .set_load = rpm_reg_set_load,
+};
+
+static const struct regulator_ops rpm_switch_ops = {
+ .enable = rpm_reg_enable,
+ .disable = rpm_reg_disable,
+ .is_enabled = rpm_reg_is_enabled,
+};
+
+static const struct regulator_desc pm8941_hfsmps = {
+ .linear_ranges = (struct regulator_linear_range[]) {
+ REGULATOR_LINEAR_RANGE( 375000, 0, 95, 12500),
+ REGULATOR_LINEAR_RANGE(1550000, 96, 158, 25000),
+ },
+ .n_linear_ranges = 2,
+ .n_voltages = 159,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm8841_ftsmps = {
+ .linear_ranges = (struct regulator_linear_range[]) {
+ REGULATOR_LINEAR_RANGE(350000, 0, 184, 5000),
+ REGULATOR_LINEAR_RANGE(700000, 185, 339, 10000),
+ },
+ .n_linear_ranges = 2,
+ .n_voltages = 340,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm8941_boost = {
+ .linear_ranges = (struct regulator_linear_range[]) {
+ REGULATOR_LINEAR_RANGE(4000000, 0, 15, 100000),
+ },
+ .n_linear_ranges = 1,
+ .n_voltages = 16,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm8941_pldo = {
+ .linear_ranges = (struct regulator_linear_range[]) {
+ REGULATOR_LINEAR_RANGE( 750000, 0, 30, 25000),
+ REGULATOR_LINEAR_RANGE(1500000, 31, 99, 50000),
+ },
+ .n_linear_ranges = 2,
+ .n_voltages = 100,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm8941_nldo = {
+ .linear_ranges = (struct regulator_linear_range[]) {
+ REGULATOR_LINEAR_RANGE(750000, 0, 63, 12500),
+ },
+ .n_linear_ranges = 1,
+ .n_voltages = 64,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm8941_lnldo = {
+ .fixed_uV = 1740000,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm8941_switch = {
+ .ops = &rpm_switch_ops,
+};
+
+static const struct regulator_desc pm8916_pldo = {
+ .linear_ranges = (struct regulator_linear_range[]) {
+ REGULATOR_LINEAR_RANGE(750000, 0, 208, 12500),
+ },
+ .n_linear_ranges = 1,
+ .n_voltages = 209,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm8916_nldo = {
+ .linear_ranges = (struct regulator_linear_range[]) {
+ REGULATOR_LINEAR_RANGE(375000, 0, 93, 12500),
+ },
+ .n_linear_ranges = 1,
+ .n_voltages = 94,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm8916_buck_lvo_smps = {
+ .linear_ranges = (struct regulator_linear_range[]) {
+ REGULATOR_LINEAR_RANGE(375000, 0, 95, 12500),
+ REGULATOR_LINEAR_RANGE(750000, 96, 127, 25000),
+ },
+ .n_linear_ranges = 2,
+ .n_voltages = 128,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm8916_buck_hvo_smps = {
+ .linear_ranges = (struct regulator_linear_range[]) {
+ REGULATOR_LINEAR_RANGE(1550000, 0, 31, 25000),
+ },
+ .n_linear_ranges = 1,
+ .n_voltages = 32,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+struct rpm_regulator_data {
+ const char *name;
+ u32 type;
+ u32 id;
+ const struct regulator_desc *desc;
+ const char *supply;
+};
+
+static const struct rpm_regulator_data rpm_pm8841_regulators[] = {
+ { "s1", QCOM_SMD_RPM_SMPB, 1, &pm8941_hfsmps, "vdd_s1" },
+ { "s2", QCOM_SMD_RPM_SMPB, 2, &pm8841_ftsmps, "vdd_s2" },
+ { "s3", QCOM_SMD_RPM_SMPB, 3, &pm8941_hfsmps, "vdd_s3" },
+ { "s4", QCOM_SMD_RPM_SMPB, 4, &pm8841_ftsmps, "vdd_s4" },
+ { "s5", QCOM_SMD_RPM_SMPB, 5, &pm8841_ftsmps, "vdd_s5" },
+ { "s6", QCOM_SMD_RPM_SMPB, 6, &pm8841_ftsmps, "vdd_s6" },
+ { "s7", QCOM_SMD_RPM_SMPB, 7, &pm8841_ftsmps, "vdd_s7" },
+ { "s8", QCOM_SMD_RPM_SMPB, 8, &pm8841_ftsmps, "vdd_s8" },
+ {}
+};
+
+static const struct rpm_regulator_data rpm_pm8941_regulators[] = {
+ { "s1", QCOM_SMD_RPM_SMPA, 1, &pm8941_hfsmps, "vdd_s1" },
+ { "s2", QCOM_SMD_RPM_SMPA, 2, &pm8941_hfsmps, "vdd_s2" },
+ { "s3", QCOM_SMD_RPM_SMPA, 3, &pm8941_hfsmps, "vdd_s3" },
+ { "s4", QCOM_SMD_RPM_BOOST, 1, &pm8941_boost },
+
+ { "l1", QCOM_SMD_RPM_LDOA, 1, &pm8941_nldo, "vdd_l1_l3" },
+ { "l2", QCOM_SMD_RPM_LDOA, 2, &pm8941_nldo, "vdd_l2_lvs1_2_3" },
+ { "l3", QCOM_SMD_RPM_LDOA, 3, &pm8941_nldo, "vdd_l1_l3" },
+ { "l4", QCOM_SMD_RPM_LDOA, 4, &pm8941_nldo, "vdd_l4_l11" },
+ { "l5", QCOM_SMD_RPM_LDOA, 5, &pm8941_lnldo, "vdd_l5_l7" },
+ { "l6", QCOM_SMD_RPM_LDOA, 6, &pm8941_pldo, "vdd_l6_l12_l14_l15" },
+ { "l7", QCOM_SMD_RPM_LDOA, 7, &pm8941_lnldo, "vdd_l5_l7" },
+ { "l8", QCOM_SMD_RPM_LDOA, 8, &pm8941_pldo, "vdd_l8_l16_l18_l19" },
+ { "l9", QCOM_SMD_RPM_LDOA, 9, &pm8941_pldo, "vdd_l9_l10_l17_l22" },
+ { "l10", QCOM_SMD_RPM_LDOA, 10, &pm8941_pldo, "vdd_l9_l10_l17_l22" },
+ { "l11", QCOM_SMD_RPM_LDOA, 11, &pm8941_nldo, "vdd_l4_l11" },
+ { "l12", QCOM_SMD_RPM_LDOA, 12, &pm8941_pldo, "vdd_l6_l12_l14_l15" },
+ { "l13", QCOM_SMD_RPM_LDOA, 13, &pm8941_pldo, "vdd_l13_l20_l23_l24" },
+ { "l14", QCOM_SMD_RPM_LDOA, 14, &pm8941_pldo, "vdd_l6_l12_l14_l15" },
+ { "l15", QCOM_SMD_RPM_LDOA, 15, &pm8941_pldo, "vdd_l6_l12_l14_l15" },
+ { "l16", QCOM_SMD_RPM_LDOA, 16, &pm8941_pldo, "vdd_l8_l16_l18_l19" },
+ { "l17", QCOM_SMD_RPM_LDOA, 17, &pm8941_pldo, "vdd_l9_l10_l17_l22" },
+ { "l18", QCOM_SMD_RPM_LDOA, 18, &pm8941_pldo, "vdd_l8_l16_l18_l19" },
+ { "l19", QCOM_SMD_RPM_LDOA, 19, &pm8941_pldo, "vdd_l8_l16_l18_l19" },
+ { "l20", QCOM_SMD_RPM_LDOA, 20, &pm8941_pldo, "vdd_l13_l20_l23_l24" },
+ { "l21", QCOM_SMD_RPM_LDOA, 21, &pm8941_pldo, "vdd_l21" },
+ { "l22", QCOM_SMD_RPM_LDOA, 22, &pm8941_pldo, "vdd_l9_l10_l17_l22" },
+ { "l23", QCOM_SMD_RPM_LDOA, 23, &pm8941_pldo, "vdd_l13_l20_l23_l24" },
+ { "l24", QCOM_SMD_RPM_LDOA, 24, &pm8941_pldo, "vdd_l13_l20_l23_l24" },
+
+ { "lvs1", QCOM_SMD_RPM_VSA, 1, &pm8941_switch, "vdd_l2_lvs1_2_3" },
+ { "lvs2", QCOM_SMD_RPM_VSA, 2, &pm8941_switch, "vdd_l2_lvs1_2_3" },
+ { "lvs3", QCOM_SMD_RPM_VSA, 3, &pm8941_switch, "vdd_l2_lvs1_2_3" },
+
+ { "5vs1", QCOM_SMD_RPM_VSA, 4, &pm8941_switch, "vin_5vs" },
+ { "5vs2", QCOM_SMD_RPM_VSA, 5, &pm8941_switch, "vin_5vs" },
+
+ {}
+};
+
+static const struct rpm_regulator_data rpm_pm8916_regulators[] = {
+ { "s1", QCOM_SMD_RPM_SMPA, 1, &pm8916_buck_lvo_smps, "vdd_s1" },
+ { "s2", QCOM_SMD_RPM_SMPA, 2, &pm8916_buck_lvo_smps, "vdd_s2" },
+ { "s3", QCOM_SMD_RPM_SMPA, 3, &pm8916_buck_lvo_smps, "vdd_s3" },
+ { "s4", QCOM_SMD_RPM_SMPA, 4, &pm8916_buck_hvo_smps, "vdd_s4" },
+ { "l1", QCOM_SMD_RPM_LDOA, 1, &pm8916_nldo, "vdd_l1_l2_l3" },
+ { "l2", QCOM_SMD_RPM_LDOA, 2, &pm8916_nldo, "vdd_l1_l2_l3" },
+ { "l3", QCOM_SMD_RPM_LDOA, 3, &pm8916_nldo, "vdd_l1_l2_l3" },
+ { "l4", QCOM_SMD_RPM_LDOA, 4, &pm8916_pldo, "vdd_l4_l5_l6" },
+ { "l5", QCOM_SMD_RPM_LDOA, 5, &pm8916_pldo, "vdd_l4_l5_l6" },
+ { "l6", QCOM_SMD_RPM_LDOA, 6, &pm8916_pldo, "vdd_l4_l5_l6" },
+ { "l7", QCOM_SMD_RPM_LDOA, 7, &pm8916_pldo, "vdd_l7" },
+ { "l8", QCOM_SMD_RPM_LDOA, 8, &pm8916_pldo, "vdd_l8_l9_l10_l11_l12_l13_l14_l15_l16_l17_l18" },
+ { "l9", QCOM_SMD_RPM_LDOA, 9, &pm8916_pldo, "vdd_l8_l9_l10_l11_l12_l13_l14_l15_l16_l17_l18" },
+ { "l10", QCOM_SMD_RPM_LDOA, 10, &pm8916_pldo, "vdd_l8_l9_l10_l11_l12_l13_l14_l15_l16_l17_l18"},
+ { "l11", QCOM_SMD_RPM_LDOA, 11, &pm8916_pldo, "vdd_l8_l9_l10_l11_l12_l13_l14_l15_l16_l17_l18"},
+ { "l12", QCOM_SMD_RPM_LDOA, 12, &pm8916_pldo, "vdd_l8_l9_l10_l11_l12_l13_l14_l15_l16_l17_l18"},
+ { "l13", QCOM_SMD_RPM_LDOA, 13, &pm8916_pldo, "vdd_l8_l9_l10_l11_l12_l13_l14_l15_l16_l17_l18"},
+ { "l14", QCOM_SMD_RPM_LDOA, 14, &pm8916_pldo, "vdd_l8_l9_l10_l11_l12_l13_l14_l15_l16_l17_l18"},
+ { "l15", QCOM_SMD_RPM_LDOA, 15, &pm8916_pldo, "vdd_l8_l9_l10_l11_l12_l13_l14_l15_l16_l17_l18"},
+ { "l16", QCOM_SMD_RPM_LDOA, 16, &pm8916_pldo, "vdd_l8_l9_l10_l11_l12_l13_l14_l15_l16_l17_l18"},
+ { "l17", QCOM_SMD_RPM_LDOA, 17, &pm8916_pldo, "vdd_l8_l9_l10_l11_l12_l13_l14_l15_l16_l17_l18"},
+ { "l18", QCOM_SMD_RPM_LDOA, 18, &pm8916_pldo, "vdd_l8_l9_l10_l11_l12_l13_l14_l15_l16_l17_l18"},
+ {}
+};
+
+static const struct of_device_id rpm_of_match[] = {
+ { .compatible = "qcom,rpm-pm8841-regulators", .data = &rpm_pm8841_regulators },
+ { .compatible = "qcom,rpm-pm8941-regulators", .data = &rpm_pm8941_regulators },
+ { .compatible = "qcom,rpm-pm8916-regulators", .data = &rpm_pm8916_regulators },
+ {}
+};
+MODULE_DEVICE_TABLE(of, rpm_of_match);
+
+static int rpm_reg_probe(struct platform_device *pdev)
+{
+ const struct rpm_regulator_data *reg;
+ const struct of_device_id *match;
+ struct regulator_config config = { };
+ struct regulator_dev *rdev;
+ struct qcom_rpm_reg *vreg;
+ struct qcom_smd_rpm *rpm;
+
+ rpm = dev_get_drvdata(pdev->dev.parent);
+ if (!rpm) {
+ dev_err(&pdev->dev, "unable to retrieve handle to rpm\n");
+ return -ENODEV;
+ }
+
+ match = of_match_device(rpm_of_match, &pdev->dev);
+ for (reg = match->data; reg->name; reg++) {
+ vreg = devm_kzalloc(&pdev->dev, sizeof(*vreg), GFP_KERNEL);
+ if (!vreg)
+ return -ENOMEM;
+
+ vreg->dev = &pdev->dev;
+ vreg->type = reg->type;
+ vreg->id = reg->id;
+ vreg->rpm = rpm;
+
+ memcpy(&vreg->desc, reg->desc, sizeof(vreg->desc));
+
+ vreg->desc.id = -1;
+ vreg->desc.owner = THIS_MODULE;
+ vreg->desc.type = REGULATOR_VOLTAGE;
+ vreg->desc.name = reg->name;
+ vreg->desc.supply_name = reg->supply;
+ vreg->desc.of_match = reg->name;
+
+ config.dev = &pdev->dev;
+ config.driver_data = vreg;
+ rdev = devm_regulator_register(&pdev->dev, &vreg->desc, &config);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev, "failed to register %s\n", reg->name);
+ return PTR_ERR(rdev);
+ }
+ }
+
+ return 0;
+}
+
+static struct platform_driver rpm_reg_driver = {
+ .probe = rpm_reg_probe,
+ .driver = {
+ .name = "qcom_rpm_smd_regulator",
+ .owner = THIS_MODULE,
+ .of_match_table = rpm_of_match,
+ },
+};
+
+static int __init rpm_reg_init(void)
+{
+ return platform_driver_register(&rpm_reg_driver);
+}
+subsys_initcall(rpm_reg_init);
+
+static void __exit rpm_reg_exit(void)
+{
+ platform_driver_unregister(&rpm_reg_driver);
+}
+module_exit(rpm_reg_exit)
+
+MODULE_DESCRIPTION("Qualcomm RPM regulator driver");
+MODULE_LICENSE("GPLv2");
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index e67af05b8a92..36452ee2851d 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -17,3 +17,19 @@ config QCOM_PM
QCOM Platform specific power driver to manage cores and L2 low power
modes. It interface with various system drivers to put the cores in
low power modes.
+
+config QCOM_SMD
+ tristate "Qualcomm Shared Memory Driver (SMD)"
+ depends on QCOM_SMEM
+ help
+ Say y here to enable support for the Qualcomm Shared Memory Driver
+ providing communication channels to remote processors in Qualcomm
+ platforms.
+
+config QCOM_SMEM
+ tristate "Qualcomm Shared Memory Manager (SMEM)"
+ depends on ARCH_QCOM
+ help
+ Say y here to enable support for the Qualcomm Shared Memory Manager.
+ The driver provides an interface to items in a heap shared among all
+ processors in a Qualcomm platform.
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index eda7677b2c36..9036d570395e 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -1,3 +1,5 @@
obj-$(CONFIG_ARM64) += cpu_ops.o
obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o
obj-$(CONFIG_QCOM_PM) += spm.o
+obj-$(CONFIG_QCOM_SMD) += smd.o
+obj-$(CONFIG_QCOM_SMEM) += smem.o
diff --git a/drivers/soc/qcom/smd.c b/drivers/soc/qcom/smd.c
new file mode 100644
index 000000000000..41573d5edc1a
--- /dev/null
+++ b/drivers/soc/qcom/smd.c
@@ -0,0 +1,1289 @@
+/*
+ * Copyright (c) 2015, Sony Mobile Communications AB.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/hwspinlock.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/memblock.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/smd.h>
+#include <linux/soc/qcom/smem.h>
+#include <linux/wait.h>
+
+/*
+ * The Qualcomm Shared Memory communication solution provides point-to-point
+ * channels for clients to send and receive streaming or packet based data.
+ *
+ * Each channel consists of a control item (channel info) and a ring buffer
+ * pair. The channel info carry information related to channel state, flow
+ * control and the offsets within the ring buffer.
+ *
+ * All allocated channels are listed in an allocation table, identifying the
+ * pair of items by name, type and remote processor.
+ *
+ * Upon creating a new channel the remote processor allocates channel info and
+ * ring buffer items from the smem heap and populate the allocation table. An
+ * interrupt is sent to the other end of the channel and a scan for new
+ * channels should be done. A channel never goes away, it will only change
+ * state.
+ *
+ * The remote processor signals it intent for bring up the communication
+ * channel by setting the state of its end of the channel to "opening" and
+ * sends out an interrupt. We detect this change and register a smd device to
+ * consume the channel. Upon finding a consumer we finish the handshake and the
+ * channel is up.
+ *
+ * Upon closing a channel, the remote processor will update the state of its
+ * end of the channel and signal us, we will then unregister any attached
+ * device and close our end of the channel.
+ *
+ * Devices attached to a channel can use the qcom_smd_send function to push
+ * data to the channel, this is done by copying the data into the tx ring
+ * buffer, updating the pointers in the channel info and signaling the remote
+ * processor.
+ *
+ * The remote processor does the equivalent when it transfer data and upon
+ * receiving the interrupt we check the channel info for new data and delivers
+ * this to the attached device. If the device is not ready to receive the data
+ * we leave it in the ring buffer for now.
+ */
+
+struct smd_channel_info;
+struct smd_channel_info_word;
+
+#define SMD_ALLOC_TBL_COUNT 2
+#define SMD_ALLOC_TBL_SIZE 64
+
+/*
+ * This lists the various smem heap items relevant for the allocation table and
+ * smd channel entries.
+ */
+static const struct {
+ unsigned alloc_tbl_id;
+ unsigned info_base_id;
+ unsigned fifo_base_id;
+} smem_items[SMD_ALLOC_TBL_COUNT] = {
+ {
+ .alloc_tbl_id = 13,
+ .info_base_id = 14,
+ .fifo_base_id = 338
+ },
+ {
+ .alloc_tbl_id = 14,
+ .info_base_id = 266,
+ .fifo_base_id = 202,
+ },
+};
+
+/**
+ * struct qcom_smd_edge - representing a remote processor
+ * @smd: handle to qcom_smd
+ * @of_node: of_node handle for information related to this edge
+ * @edge_id: identifier of this edge
+ * @irq: interrupt for signals on this edge
+ * @ipc_regmap: regmap handle holding the outgoing ipc register
+ * @ipc_offset: offset within @ipc_regmap of the register for ipc
+ * @ipc_bit: bit in the register at @ipc_offset of @ipc_regmap
+ * @channels: list of all channels detected on this edge
+ * @channels_lock: guard for modifications of @channels
+ * @need_rescan: flag that the @work needs to scan smem for new channels
+ * @smem_available: last available amount of smem triggering a channel scan
+ * @work: work item for edge house keeping
+ */
+struct qcom_smd_edge {
+ struct qcom_smd *smd;
+ struct device_node *of_node;
+ unsigned edge_id;
+
+ int irq;
+
+ struct regmap *ipc_regmap;
+ int ipc_offset;
+ int ipc_bit;
+
+ struct list_head channels;
+ spinlock_t channels_lock;
+
+ bool need_rescan;
+ unsigned smem_available;
+
+ struct work_struct work;
+};
+
+/*
+ * SMD channel states.
+ */
+enum smd_channel_state {
+ SMD_CHANNEL_CLOSED,
+ SMD_CHANNEL_OPENING,
+ SMD_CHANNEL_OPENED,
+ SMD_CHANNEL_FLUSHING,
+ SMD_CHANNEL_CLOSING,
+ SMD_CHANNEL_RESET,
+ SMD_CHANNEL_RESET_OPENING
+};
+
+/**
+ * struct qcom_smd_channel - smd channel struct
+ * @edge: qcom_smd_edge this channel is living on
+ * @qsdev: reference to a associated smd client device
+ * @name: name of the channel
+ * @state: local state of the channel
+ * @remote_state: remote state of the channel
+ * @tx_info: byte aligned outgoing channel info
+ * @rx_info: byte aligned incoming channel info
+ * @tx_info_word: word aligned outgoing channel info
+ * @rx_info_word: word aligned incoming channel info
+ * @tx_lock: lock to make writes to the channel mutually exclusive
+ * @fblockread_event: wakeup event tied to tx fBLOCKREADINTR
+ * @tx_fifo: pointer to the outgoing ring buffer
+ * @rx_fifo: pointer to the incoming ring buffer
+ * @fifo_size: size of each ring buffer
+ * @bounce_buffer: bounce buffer for reading wrapped packets
+ * @cb: callback function registered for this channel
+ * @recv_lock: guard for rx info modifications and cb pointer
+ * @pkt_size: size of the currently handled packet
+ * @list: lite entry for @channels in qcom_smd_edge
+ */
+struct qcom_smd_channel {
+ struct qcom_smd_edge *edge;
+
+ struct qcom_smd_device *qsdev;
+
+ char *name;
+ enum smd_channel_state state;
+ enum smd_channel_state remote_state;
+
+ struct smd_channel_info *tx_info;
+ struct smd_channel_info *rx_info;
+
+ struct smd_channel_info_word *tx_info_word;
+ struct smd_channel_info_word *rx_info_word;
+
+ struct mutex tx_lock;
+ wait_queue_head_t fblockread_event;
+
+ void *tx_fifo;
+ void *rx_fifo;
+ int fifo_size;
+
+ void *bounce_buffer;
+ int (*cb)(struct qcom_smd_device *, const void *, size_t);
+
+ spinlock_t recv_lock;
+
+ int pkt_size;
+
+ struct list_head list;
+};
+
+/**
+ * struct qcom_smd - smd struct
+ * @dev: device struct
+ * @allocated: bitmap representing already allocated channels
+ * @alloc_tbl: pointer(s) to the allocation table(s)
+ * @alloc_tbl_entries: number of actual allocation entries found
+ * @num_edges: number of entries in @edges
+ * @edges: array of edges to be handled
+ */
+struct qcom_smd {
+ struct device *dev;
+
+ DECLARE_BITMAP(allocated, SMD_ALLOC_TBL_COUNT * SMD_ALLOC_TBL_SIZE);
+ struct qcom_smd_alloc_entry *alloc_tbl[SMD_ALLOC_TBL_COUNT];
+ unsigned alloc_tbl_entries;
+
+ unsigned num_edges;
+ struct qcom_smd_edge edges[0];
+};
+
+/*
+ * Format of the smd_info smem items, for byte aligned channels.
+ */
+struct smd_channel_info {
+ u32 state;
+ u8 fDSR;
+ u8 fCTS;
+ u8 fCD;
+ u8 fRI;
+ u8 fHEAD;
+ u8 fTAIL;
+ u8 fSTATE;
+ u8 fBLOCKREADINTR;
+ u32 tail;
+ u32 head;
+};
+
+/*
+ * Format of the smd_info smem items, for word aligned channels.
+ */
+struct smd_channel_info_word {
+ u32 state;
+ u32 fDSR;
+ u32 fCTS;
+ u32 fCD;
+ u32 fRI;
+ u32 fHEAD;
+ u32 fTAIL;
+ u32 fSTATE;
+ u32 fBLOCKREADINTR;
+ u32 tail;
+ u32 head;
+};
+
+#define GET_RX_CHANNEL_INFO(channel, param) \
+ (channel->rx_info_word ? \
+ channel->rx_info_word->param : \
+ channel->rx_info->param)
+
+#define SET_RX_CHANNEL_INFO(channel, param, value) \
+ (channel->rx_info_word ? \
+ (channel->rx_info_word->param = value) : \
+ (channel->rx_info->param = value))
+
+#define GET_TX_CHANNEL_INFO(channel, param) \
+ (channel->rx_info_word ? \
+ channel->tx_info_word->param : \
+ channel->tx_info->param)
+
+#define SET_TX_CHANNEL_INFO(channel, param, value) \
+ (channel->rx_info_word ? \
+ (channel->tx_info_word->param = value) : \
+ (channel->tx_info->param = value))
+
+/**
+ * struct qcom_smd_alloc_entry - channel allocation entry
+ * @name: channel name
+ * @cid: channel index
+ * @flags: channel flags and edge id
+ * @ref_count: reference count of the channel
+ */
+struct qcom_smd_alloc_entry {
+ u8 name[20];
+ u32 cid;
+ u32 flags;
+ u32 ref_count;
+} __packed;
+
+#define SMD_CHANNEL_FLAGS_EDGE_MASK 0xff
+#define SMD_CHANNEL_FLAGS_STREAM BIT(8)
+#define SMD_CHANNEL_FLAGS_PACKET BIT(9)
+
+/*
+ * Each smd packet contains a 20 byte header, with the first 4 being the length
+ * of the packet.
+ */
+#define SMD_PACKET_HEADER_LEN 20
+
+/*
+ * Signal the remote processor associated with 'channel'.
+ */
+static void qcom_smd_signal_channel(struct qcom_smd_channel *channel)
+{
+ struct qcom_smd_edge *edge = channel->edge;
+
+ regmap_write(edge->ipc_regmap, edge->ipc_offset, BIT(edge->ipc_bit));
+}
+
+/*
+ * Initialize the tx channel info
+ */
+static void qcom_smd_channel_reset(struct qcom_smd_channel *channel)
+{
+ SET_TX_CHANNEL_INFO(channel, state, SMD_CHANNEL_CLOSED);
+ SET_TX_CHANNEL_INFO(channel, fDSR, 0);
+ SET_TX_CHANNEL_INFO(channel, fCTS, 0);
+ SET_TX_CHANNEL_INFO(channel, fCD, 0);
+ SET_TX_CHANNEL_INFO(channel, fRI, 0);
+ SET_TX_CHANNEL_INFO(channel, fHEAD, 0);
+ SET_TX_CHANNEL_INFO(channel, fTAIL, 0);
+ SET_TX_CHANNEL_INFO(channel, fSTATE, 1);
+ SET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR, 0);
+ SET_TX_CHANNEL_INFO(channel, head, 0);
+ SET_TX_CHANNEL_INFO(channel, tail, 0);
+
+ qcom_smd_signal_channel(channel);
+
+ channel->state = SMD_CHANNEL_CLOSED;
+ channel->pkt_size = 0;
+}
+
+/*
+ * Calculate the amount of data available in the rx fifo
+ */
+static size_t qcom_smd_channel_get_rx_avail(struct qcom_smd_channel *channel)
+{
+ unsigned head;
+ unsigned tail;
+
+ head = GET_RX_CHANNEL_INFO(channel, head);
+ tail = GET_RX_CHANNEL_INFO(channel, tail);
+
+ return (head - tail) & (channel->fifo_size - 1);
+}
+
+/*
+ * Set tx channel state and inform the remote processor
+ */
+static void qcom_smd_channel_set_state(struct qcom_smd_channel *channel,
+ int state)
+{
+ struct qcom_smd_edge *edge = channel->edge;
+ bool is_open = state == SMD_CHANNEL_OPENED;
+
+ if (channel->state == state)
+ return;
+
+ dev_dbg(edge->smd->dev, "set_state(%s, %d)\n", channel->name, state);
+
+ SET_TX_CHANNEL_INFO(channel, fDSR, is_open);
+ SET_TX_CHANNEL_INFO(channel, fCTS, is_open);
+ SET_TX_CHANNEL_INFO(channel, fCD, is_open);
+
+ SET_TX_CHANNEL_INFO(channel, state, state);
+ SET_TX_CHANNEL_INFO(channel, fSTATE, 1);
+
+ channel->state = state;
+ qcom_smd_signal_channel(channel);
+}
+
+/*
+ * Copy count bytes of data from memory to device memory using 32bit accesses
+ */
+static void smd_copy_to_fifo(void __iomem *dst, const void *src, size_t count)
+{
+ while (count) {
+ __raw_writel(*(u32 *)src, dst);
+ dst += sizeof(u32);
+ src += sizeof(u32);
+ count -= sizeof(u32);
+ }
+}
+
+/*
+ * Copy count bytes of data from device memory to memory using 32bit accesses
+ */
+static void smd_copy_from_fifo(void *dst, const void __iomem *src, size_t count)
+{
+ while (count) {
+ *(u32 *)dst = __raw_readl(src);
+ src += sizeof(u32);
+ dst += sizeof(u32);
+ count -= sizeof(u32);
+ }
+}
+
+/*
+ * Read count bytes of data from the rx fifo into buf, but don't advance the
+ * tail.
+ */
+static size_t qcom_smd_channel_peek(struct qcom_smd_channel *channel,
+ void *buf, size_t count)
+{
+ unsigned tail;
+ size_t len;
+
+ tail = GET_RX_CHANNEL_INFO(channel, tail);
+
+ len = min_t(size_t, count, channel->fifo_size - tail);
+ if (len)
+ smd_copy_from_fifo(buf, channel->rx_fifo + tail, len);
+
+ if (len != count)
+ smd_copy_from_fifo(buf + len, channel->rx_fifo, count - len);
+
+ return count;
+}
+
+/*
+ * Advance the rx tail by count bytes.
+ */
+static void qcom_smd_channel_advance(struct qcom_smd_channel *channel,
+ size_t count)
+{
+ unsigned tail;
+
+ tail = GET_RX_CHANNEL_INFO(channel, tail);
+ tail += count;
+ tail &= (channel->fifo_size - 1);
+ SET_RX_CHANNEL_INFO(channel, tail, tail);
+}
+
+/*
+ * Read out a single packet from the rx fifo and deliver it to the device
+ */
+static int qcom_smd_channel_recv_single(struct qcom_smd_channel *channel)
+{
+ struct qcom_smd_device *qsdev = channel->qsdev;
+ unsigned tail;
+ size_t len;
+ void *ptr;
+ int ret;
+
+ if (!channel->cb)
+ return 0;
+
+ tail = GET_RX_CHANNEL_INFO(channel, tail);
+
+ /* Use bounce buffer if the data wraps */
+ if (tail + channel->pkt_size >= channel->fifo_size) {
+ ptr = channel->bounce_buffer;
+ len = qcom_smd_channel_peek(channel, ptr, channel->pkt_size);
+ } else {
+ ptr = channel->rx_fifo + tail;
+ len = channel->pkt_size;
+ }
+
+ ret = channel->cb(qsdev, ptr, len);
+ if (ret < 0)
+ return ret;
+
+ /* Only forward the tail if the client consumed the data */
+ qcom_smd_channel_advance(channel, len);
+
+ channel->pkt_size = 0;
+
+ return 0;
+}
+
+/*
+ * Per channel interrupt handling
+ */
+static bool qcom_smd_channel_intr(struct qcom_smd_channel *channel)
+{
+ bool need_state_scan = false;
+ int remote_state;
+ u32 pktlen;
+ int avail;
+ int ret;
+
+ /* Handle state changes */
+ remote_state = GET_RX_CHANNEL_INFO(channel, state);
+ if (remote_state != channel->remote_state) {
+ channel->remote_state = remote_state;
+ need_state_scan = true;
+ }
+ /* Indicate that we have seen any state change */
+ SET_RX_CHANNEL_INFO(channel, fSTATE, 0);
+
+ /* Signal waiting qcom_smd_send() about the interrupt */
+ if (!GET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR))
+ wake_up_interruptible(&channel->fblockread_event);
+
+ /* Don't consume any data until we've opened the channel */
+ if (channel->state != SMD_CHANNEL_OPENED)
+ goto out;
+
+ /* Indicate that we've seen the new data */
+ SET_RX_CHANNEL_INFO(channel, fHEAD, 0);
+
+ /* Consume data */
+ for (;;) {
+ avail = qcom_smd_channel_get_rx_avail(channel);
+
+ if (!channel->pkt_size && avail >= SMD_PACKET_HEADER_LEN) {
+ qcom_smd_channel_peek(channel, &pktlen, sizeof(pktlen));
+ qcom_smd_channel_advance(channel, SMD_PACKET_HEADER_LEN);
+ channel->pkt_size = pktlen;
+ } else if (channel->pkt_size && avail >= channel->pkt_size) {
+ ret = qcom_smd_channel_recv_single(channel);
+ if (ret)
+ break;
+ } else {
+ break;
+ }
+ }
+
+ /* Indicate that we have seen and updated tail */
+ SET_RX_CHANNEL_INFO(channel, fTAIL, 1);
+
+ /* Signal the remote that we've consumed the data (if requested) */
+ if (!GET_RX_CHANNEL_INFO(channel, fBLOCKREADINTR)) {
+ /* Ensure ordering of channel info updates */
+ wmb();
+
+ qcom_smd_signal_channel(channel);
+ }
+
+out:
+ return need_state_scan;
+}
+
+/*
+ * The edge interrupts are triggered by the remote processor on state changes,
+ * channel info updates or when new channels are created.
+ */
+static irqreturn_t qcom_smd_edge_intr(int irq, void *data)
+{
+ struct qcom_smd_edge *edge = data;
+ struct qcom_smd_channel *channel;
+ unsigned available;
+ bool kick_worker = false;
+
+ /*
+ * Handle state changes or data on each of the channels on this edge
+ */
+ spin_lock(&edge->channels_lock);
+ list_for_each_entry(channel, &edge->channels, list) {
+ spin_lock(&channel->recv_lock);
+ kick_worker |= qcom_smd_channel_intr(channel);
+ spin_unlock(&channel->recv_lock);
+ }
+ spin_unlock(&edge->channels_lock);
+
+ /*
+ * Creating a new channel requires allocating an smem entry, so we only
+ * have to scan if the amount of available space in smem have changed
+ * since last scan.
+ */
+ available = qcom_smem_get_free_space(edge->edge_id);
+ if (available != edge->smem_available) {
+ edge->smem_available = available;
+ edge->need_rescan = true;
+ kick_worker = true;
+ }
+
+ if (kick_worker)
+ schedule_work(&edge->work);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Delivers any outstanding packets in the rx fifo, can be used after probe of
+ * the clients to deliver any packets that wasn't delivered before the client
+ * was setup.
+ */
+static void qcom_smd_channel_resume(struct qcom_smd_channel *channel)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&channel->recv_lock, flags);
+ qcom_smd_channel_intr(channel);
+ spin_unlock_irqrestore(&channel->recv_lock, flags);
+}
+
+/*
+ * Calculate how much space is available in the tx fifo.
+ */
+static size_t qcom_smd_get_tx_avail(struct qcom_smd_channel *channel)
+{
+ unsigned head;
+ unsigned tail;
+ unsigned mask = channel->fifo_size - 1;
+
+ head = GET_TX_CHANNEL_INFO(channel, head);
+ tail = GET_TX_CHANNEL_INFO(channel, tail);
+
+ return mask - ((head - tail) & mask);
+}
+
+/*
+ * Write count bytes of data into channel, possibly wrapping in the ring buffer
+ */
+static int qcom_smd_write_fifo(struct qcom_smd_channel *channel,
+ const void *data,
+ size_t count)
+{
+ unsigned head;
+ size_t len;
+
+ head = GET_TX_CHANNEL_INFO(channel, head);
+
+ len = min_t(size_t, count, channel->fifo_size - head);
+ if (len)
+ smd_copy_to_fifo(channel->tx_fifo + head, data, len);
+
+ if (len != count)
+ smd_copy_to_fifo(channel->tx_fifo, data + len, count - len);
+
+ head += count;
+ head &= (channel->fifo_size - 1);
+ SET_TX_CHANNEL_INFO(channel, head, head);
+
+ return count;
+}
+
+/**
+ * qcom_smd_send - write data to smd channel
+ * @channel: channel handle
+ * @data: buffer of data to write
+ * @len: number of bytes to write
+ *
+ * This is a blocking write of len bytes into the channel's tx ring buffer and
+ * signal the remote end. It will sleep until there is enough space available
+ * in the tx buffer, utilizing the fBLOCKREADINTR signaling mechanism to avoid
+ * polling.
+ */
+int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len)
+{
+ u32 hdr[5] = {len,};
+ int tlen = sizeof(hdr) + len;
+ int ret;
+
+ ret = mutex_lock_interruptible(&channel->tx_lock);
+ if (ret)
+ return ret;
+
+ while (qcom_smd_get_tx_avail(channel) < tlen) {
+ if (channel->state != SMD_CHANNEL_OPENED) {
+ ret = -EPIPE;
+ goto out;
+ }
+
+ SET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR, 1);
+
+ ret = wait_event_interruptible(channel->fblockread_event,
+ qcom_smd_get_tx_avail(channel) >= tlen ||
+ channel->state != SMD_CHANNEL_OPENED);
+ if (ret)
+ goto out;
+
+ SET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR, 0);
+ }
+
+ SET_TX_CHANNEL_INFO(channel, fTAIL, 0);
+
+ qcom_smd_write_fifo(channel, hdr, sizeof(hdr));
+ qcom_smd_write_fifo(channel, data, len);
+
+ SET_TX_CHANNEL_INFO(channel, fHEAD, 1);
+
+ /* Ensure ordering of channel info updates */
+ wmb();
+
+ qcom_smd_signal_channel(channel);
+
+out:
+ mutex_unlock(&channel->tx_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(qcom_smd_send);
+
+static struct qcom_smd_device *to_smd_device(struct device *dev)
+{
+ return container_of(dev, struct qcom_smd_device, dev);
+}
+
+static struct qcom_smd_driver *to_smd_driver(struct device *dev)
+{
+ struct qcom_smd_device *qsdev = to_smd_device(dev);
+
+ return container_of(qsdev->dev.driver, struct qcom_smd_driver, driver);
+}
+
+static int qcom_smd_dev_match(struct device *dev, struct device_driver *drv)
+{
+ return of_driver_match_device(dev, drv);
+}
+
+/*
+ * Probe the smd client.
+ *
+ * The remote side have indicated that it want the channel to be opened, so
+ * complete the state handshake and probe our client driver.
+ */
+static int qcom_smd_dev_probe(struct device *dev)
+{
+ struct qcom_smd_device *qsdev = to_smd_device(dev);
+ struct qcom_smd_driver *qsdrv = to_smd_driver(dev);
+ struct qcom_smd_channel *channel = qsdev->channel;
+ size_t bb_size;
+ int ret;
+
+ /*
+ * Packets are maximum 4k, but reduce if the fifo is smaller
+ */
+ bb_size = min(channel->fifo_size, 4096);
+ channel->bounce_buffer = kmalloc(bb_size, GFP_KERNEL);
+ if (!channel->bounce_buffer)
+ return -ENOMEM;
+
+ channel->cb = qsdrv->callback;
+
+ qcom_smd_channel_set_state(channel, SMD_CHANNEL_OPENING);
+
+ qcom_smd_channel_set_state(channel, SMD_CHANNEL_OPENED);
+
+ ret = qsdrv->probe(qsdev);
+ if (ret)
+ goto err;
+
+ qcom_smd_channel_resume(channel);
+
+ return 0;
+
+err:
+ dev_err(&qsdev->dev, "probe failed\n");
+
+ channel->cb = NULL;
+ kfree(channel->bounce_buffer);
+ channel->bounce_buffer = NULL;
+
+ qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSED);
+ return ret;
+}
+
+/*
+ * Remove the smd client.
+ *
+ * The channel is going away, for some reason, so remove the smd client and
+ * reset the channel state.
+ */
+static int qcom_smd_dev_remove(struct device *dev)
+{
+ struct qcom_smd_device *qsdev = to_smd_device(dev);
+ struct qcom_smd_driver *qsdrv = to_smd_driver(dev);
+ struct qcom_smd_channel *channel = qsdev->channel;
+ unsigned long flags;
+
+ qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSING);
+
+ /*
+ * Make sure we don't race with the code receiving data.
+ */
+ spin_lock_irqsave(&channel->recv_lock, flags);
+ channel->cb = NULL;
+ spin_unlock_irqrestore(&channel->recv_lock, flags);
+
+ /* Wake up any sleepers in qcom_smd_send() */
+ wake_up_interruptible(&channel->fblockread_event);
+
+ /*
+ * We expect that the client might block in remove() waiting for any
+ * outstanding calls to qcom_smd_send() to wake up and finish.
+ */
+ if (qsdrv->remove)
+ qsdrv->remove(qsdev);
+
+ /*
+ * The client is now gone, cleanup and reset the channel state.
+ */
+ channel->qsdev = NULL;
+ kfree(channel->bounce_buffer);
+ channel->bounce_buffer = NULL;
+
+ qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSED);
+
+ qcom_smd_channel_reset(channel);
+
+ return 0;
+}
+
+static struct bus_type qcom_smd_bus = {
+ .name = "qcom_smd",
+ .match = qcom_smd_dev_match,
+ .probe = qcom_smd_dev_probe,
+ .remove = qcom_smd_dev_remove,
+};
+
+/*
+ * Release function for the qcom_smd_device object.
+ */
+static void qcom_smd_release_device(struct device *dev)
+{
+ struct qcom_smd_device *qsdev = to_smd_device(dev);
+
+ kfree(qsdev);
+}
+
+/*
+ * Finds the device_node for the smd child interested in this channel.
+ */
+static struct device_node *qcom_smd_match_channel(struct device_node *edge_node,
+ const char *channel)
+{
+ struct device_node *child;
+ const char *name;
+ const char *key;
+ int ret;
+
+ for_each_available_child_of_node(edge_node, child) {
+ key = "qcom,smd-channels";
+ ret = of_property_read_string(child, key, &name);
+ if (ret) {
+ of_node_put(child);
+ continue;
+ }
+
+ if (strcmp(name, channel) == 0)
+ return child;
+ }
+
+ return NULL;
+}
+
+/*
+ * Create a smd client device for channel that is being opened.
+ */
+static int qcom_smd_create_device(struct qcom_smd_channel *channel)
+{
+ struct qcom_smd_device *qsdev;
+ struct qcom_smd_edge *edge = channel->edge;
+ struct device_node *node;
+ struct qcom_smd *smd = edge->smd;
+ int ret;
+
+ if (channel->qsdev)
+ return -EEXIST;
+
+ node = qcom_smd_match_channel(edge->of_node, channel->name);
+ if (!node) {
+ dev_dbg(smd->dev, "no match for '%s'\n", channel->name);
+ return -ENXIO;
+ }
+
+ dev_dbg(smd->dev, "registering '%s'\n", channel->name);
+
+ qsdev = kzalloc(sizeof(*qsdev), GFP_KERNEL);
+ if (!qsdev)
+ return -ENOMEM;
+
+ dev_set_name(&qsdev->dev, "%s.%s", edge->of_node->name, node->name);
+ qsdev->dev.parent = smd->dev;
+ qsdev->dev.bus = &qcom_smd_bus;
+ qsdev->dev.release = qcom_smd_release_device;
+ qsdev->dev.of_node = node;
+
+ qsdev->channel = channel;
+
+ channel->qsdev = qsdev;
+
+ ret = device_register(&qsdev->dev);
+ if (ret) {
+ dev_err(smd->dev, "device_register failed: %d\n", ret);
+ put_device(&qsdev->dev);
+ }
+
+ return ret;
+}
+
+/*
+ * Destroy a smd client device for a channel that's going away.
+ */
+static void qcom_smd_destroy_device(struct qcom_smd_channel *channel)
+{
+ struct device *dev;
+
+ BUG_ON(!channel->qsdev);
+
+ dev = &channel->qsdev->dev;
+
+ device_unregister(dev);
+ of_node_put(dev->of_node);
+ put_device(dev);
+}
+
+/**
+ * qcom_smd_driver_register - register a smd driver
+ * @qsdrv: qcom_smd_driver struct
+ */
+int qcom_smd_driver_register(struct qcom_smd_driver *qsdrv)
+{
+ qsdrv->driver.bus = &qcom_smd_bus;
+ return driver_register(&qsdrv->driver);
+}
+EXPORT_SYMBOL(qcom_smd_driver_register);
+
+/**
+ * qcom_smd_driver_unregister - unregister a smd driver
+ * @qsdrv: qcom_smd_driver struct
+ */
+void qcom_smd_driver_unregister(struct qcom_smd_driver *qsdrv)
+{
+ driver_unregister(&qsdrv->driver);
+}
+EXPORT_SYMBOL(qcom_smd_driver_unregister);
+
+/*
+ * Allocate the qcom_smd_channel object for a newly found smd channel,
+ * retrieving and validating the smem items involved.
+ */
+static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *edge,
+ unsigned smem_info_item,
+ unsigned smem_fifo_item,
+ char *name)
+{
+ struct qcom_smd_channel *channel;
+ struct qcom_smd *smd = edge->smd;
+ size_t fifo_size;
+ size_t info_size;
+ void *fifo_base;
+ void *info;
+ int ret;
+
+ channel = devm_kzalloc(smd->dev, sizeof(*channel), GFP_KERNEL);
+ if (!channel)
+ return ERR_PTR(-ENOMEM);
+
+ channel->edge = edge;
+ channel->name = devm_kstrdup(smd->dev, name, GFP_KERNEL);
+ if (!channel->name)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&channel->tx_lock);
+ spin_lock_init(&channel->recv_lock);
+ init_waitqueue_head(&channel->fblockread_event);
+
+ ret = qcom_smem_get(edge->edge_id, smem_info_item, (void **)&info, &info_size);
+ if (ret)
+ goto free_name_and_channel;
+
+ /*
+ * Use the size of the item to figure out which channel info struct to
+ * use.
+ */
+ if (info_size == 2 * sizeof(struct smd_channel_info_word)) {
+ channel->tx_info_word = info;
+ channel->rx_info_word = info + sizeof(struct smd_channel_info_word);
+ } else if (info_size == 2 * sizeof(struct smd_channel_info)) {
+ channel->tx_info = info;
+ channel->rx_info = info + sizeof(struct smd_channel_info);
+ } else {
+ dev_err(smd->dev,
+ "channel info of size %li not supported\n", info_size);
+ ret = -EINVAL;
+ goto free_name_and_channel;
+ }
+
+ ret = qcom_smem_get(edge->edge_id, smem_fifo_item, &fifo_base, &fifo_size);
+ if (ret)
+ goto free_name_and_channel;
+
+ /* The channel consist of a rx and tx fifo of equal size */
+ fifo_size /= 2;
+
+ dev_dbg(smd->dev, "new channel '%s' info-size: %li fifo-size: %li\n",
+ name, info_size, fifo_size);
+
+ channel->tx_fifo = fifo_base;
+ channel->rx_fifo = fifo_base + fifo_size;
+ channel->fifo_size = fifo_size;
+
+ qcom_smd_channel_reset(channel);
+
+ return channel;
+
+free_name_and_channel:
+ devm_kfree(smd->dev, channel->name);
+ devm_kfree(smd->dev, channel);
+
+ return ERR_PTR(ret);
+}
+
+/*
+ * Scans the allocation table for any newly allocated channels, calls
+ * qcom_smd_create_channel() to create representations of these and add
+ * them to the edge's list of channels.
+ */
+static void qcom_discover_channels(struct qcom_smd_edge *edge)
+{
+ struct qcom_smd_alloc_entry *entry;
+ struct qcom_smd_channel *channel;
+ struct qcom_smd *smd = edge->smd;
+ unsigned long flags;
+ unsigned fifo_id;
+ unsigned info_id;
+ int tbl;
+ int i;
+
+ for (i = 0; i < smd->alloc_tbl_entries; i++) {
+ tbl = i / SMD_ALLOC_TBL_SIZE;
+
+ entry = &smd->alloc_tbl[tbl][i];
+ if (test_bit(i, smd->allocated))
+ continue;
+
+ if (entry->ref_count == 0)
+ continue;
+
+ if (!entry->name[0])
+ continue;
+
+ if (!(entry->flags & SMD_CHANNEL_FLAGS_PACKET))
+ continue;
+
+ if ((entry->flags & SMD_CHANNEL_FLAGS_EDGE_MASK) != edge->edge_id)
+ continue;
+
+ info_id = smem_items[tbl].info_base_id + entry->cid;
+ fifo_id = smem_items[tbl].fifo_base_id + entry->cid;
+
+ channel = qcom_smd_create_channel(edge, info_id, fifo_id, entry->name);
+ if (IS_ERR(channel))
+ continue;
+
+ spin_lock_irqsave(&edge->channels_lock, flags);
+ list_add(&channel->list, &edge->channels);
+ spin_unlock_irqrestore(&edge->channels_lock, flags);
+
+ dev_dbg(smd->dev, "new channel found: '%s'\n", channel->name);
+ set_bit(i, smd->allocated);
+ }
+
+ schedule_work(&edge->work);
+}
+
+/*
+ * This per edge worker scans smem for any new channels and register these. It
+ * then scans all registered channels for state changes that should be handled
+ * by creating or destroying smd client devices for the registered channels.
+ *
+ * LOCKING: edge->channels_lock is not needed to be held during the traversal
+ * of the channels list as it's done synchronously with the only writer.
+ */
+static void qcom_channel_state_worker(struct work_struct *work)
+{
+ struct qcom_smd_channel *channel;
+ struct qcom_smd_edge *edge = container_of(work,
+ struct qcom_smd_edge,
+ work);
+ unsigned remote_state;
+
+ /*
+ * Rescan smem if we have reason to belive that there are new channels.
+ */
+ if (edge->need_rescan) {
+ edge->need_rescan = false;
+ qcom_discover_channels(edge);
+ }
+
+ /*
+ * Register a device for any closed channel where the remote processor
+ * is showing interest in opening the channel.
+ */
+ list_for_each_entry(channel, &edge->channels, list) {
+ if (channel->state != SMD_CHANNEL_CLOSED)
+ continue;
+
+ remote_state = GET_RX_CHANNEL_INFO(channel, state);
+ if (remote_state != SMD_CHANNEL_OPENING &&
+ remote_state != SMD_CHANNEL_OPENED)
+ continue;
+
+ qcom_smd_create_device(channel);
+ }
+
+ /*
+ * Unregister the device for any channel that is opened where the
+ * remote processor is closing the channel.
+ */
+ list_for_each_entry(channel, &edge->channels, list) {
+ if (channel->state != SMD_CHANNEL_OPENING &&
+ channel->state != SMD_CHANNEL_OPENED)
+ continue;
+
+ remote_state = GET_RX_CHANNEL_INFO(channel, state);
+ if (remote_state == SMD_CHANNEL_OPENING ||
+ remote_state == SMD_CHANNEL_OPENED)
+ continue;
+
+ qcom_smd_destroy_device(channel);
+ }
+}
+
+/*
+ * Parses an of_node describing an edge.
+ */
+static int qcom_smd_parse_edge(struct device *dev,
+ struct device_node *node,
+ struct qcom_smd_edge *edge)
+{
+ struct device_node *syscon_np;
+ const char *key;
+ int irq;
+ int ret;
+
+ INIT_LIST_HEAD(&edge->channels);
+ spin_lock_init(&edge->channels_lock);
+
+ INIT_WORK(&edge->work, qcom_channel_state_worker);
+
+ edge->of_node = of_node_get(node);
+
+ irq = irq_of_parse_and_map(node, 0);
+ if (irq < 0) {
+ dev_err(dev, "required smd interrupt missing\n");
+ return -EINVAL;
+ }
+
+ ret = devm_request_irq(dev, irq,
+ qcom_smd_edge_intr, IRQF_TRIGGER_RISING,
+ node->name, edge);
+ if (ret) {
+ dev_err(dev, "failed to request smd irq\n");
+ return ret;
+ }
+
+ edge->irq = irq;
+
+ key = "qcom,smd-edge";
+ ret = of_property_read_u32(node, key, &edge->edge_id);
+ if (ret) {
+ dev_err(dev, "edge missing %s property\n", key);
+ return -EINVAL;
+ }
+
+ syscon_np = of_parse_phandle(node, "qcom,ipc", 0);
+ if (!syscon_np) {
+ dev_err(dev, "no qcom,ipc node\n");
+ return -ENODEV;
+ }
+
+ edge->ipc_regmap = syscon_node_to_regmap(syscon_np);
+ if (IS_ERR(edge->ipc_regmap))
+ return PTR_ERR(edge->ipc_regmap);
+
+ key = "qcom,ipc";
+ ret = of_property_read_u32_index(node, key, 1, &edge->ipc_offset);
+ if (ret < 0) {
+ dev_err(dev, "no offset in %s\n", key);
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32_index(node, key, 2, &edge->ipc_bit);
+ if (ret < 0) {
+ dev_err(dev, "no bit in %s\n", key);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qcom_smd_probe(struct platform_device *pdev)
+{
+ struct qcom_smd_edge *edge;
+ struct device_node *node;
+ struct qcom_smd *smd;
+ size_t array_size;
+ int num_edges;
+ int ret;
+ int i = 0;
+
+ num_edges = of_get_available_child_count(pdev->dev.of_node);
+ array_size = sizeof(*smd) + num_edges * sizeof(struct qcom_smd_edge);
+ smd = devm_kzalloc(&pdev->dev, array_size, GFP_KERNEL);
+ if (!smd)
+ return -ENOMEM;
+ smd->dev = &pdev->dev;
+
+ ret = qcom_smem_get(QCOM_SMEM_HOST_ANY, smem_items[0].alloc_tbl_id,
+ (void **)&smd->alloc_tbl[0], NULL);
+ if (ret < 0) {
+ dev_err(smd->dev, "primary allocation table not available\n");
+ return ret;
+ };
+
+ smd->alloc_tbl_entries = SMD_ALLOC_TBL_SIZE;
+
+ ret = qcom_smem_get(QCOM_SMEM_HOST_ANY, smem_items[1].alloc_tbl_id,
+ (void **)&smd->alloc_tbl[1], NULL);
+ if (ret >= 0)
+ smd->alloc_tbl_entries += SMD_ALLOC_TBL_SIZE;
+
+ smd->num_edges = num_edges;
+ for_each_available_child_of_node(pdev->dev.of_node, node) {
+ edge = &smd->edges[i++];
+ edge->smd = smd;
+
+ ret = qcom_smd_parse_edge(&pdev->dev, node, edge);
+ if (ret)
+ continue;
+
+ edge->need_rescan = true;
+ schedule_work(&edge->work);
+ }
+
+ platform_set_drvdata(pdev, smd);
+
+ return 0;
+}
+
+/*
+ * Shut down all smd clients by making sure that each edge stops processing
+ * events and scanning for new channels, then call destroy on the devices.
+ */
+static int qcom_smd_remove(struct platform_device *pdev)
+{
+ struct qcom_smd_channel *channel;
+ struct qcom_smd_edge *edge;
+ struct qcom_smd *smd = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < smd->num_edges; i++) {
+ edge = &smd->edges[i];
+
+ disable_irq(edge->irq);
+ cancel_work_sync(&edge->work);
+
+ list_for_each_entry(channel, &edge->channels, list) {
+ if (!channel->qsdev)
+ continue;
+
+ qcom_smd_destroy_device(channel);
+ }
+ }
+
+ return 0;
+}
+
+static const struct of_device_id qcom_smd_of_match[] = {
+ { .compatible = "qcom,smd" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_smd_of_match);
+
+static struct platform_driver qcom_smd_driver = {
+ .probe = qcom_smd_probe,
+ .remove = qcom_smd_remove,
+ .driver = {
+ .name = "qcom_smd",
+ .owner = THIS_MODULE,
+ .of_match_table = qcom_smd_of_match,
+ },
+};
+
+static int __init qcom_smd_init(void)
+{
+ int ret;
+
+ ret = bus_register(&qcom_smd_bus);
+ if (ret) {
+ pr_err("failed to register smd bus: %d\n", ret);
+ return ret;
+ }
+
+ return platform_driver_register(&qcom_smd_driver);
+}
+arch_initcall(qcom_smd_init);
+
+static void __exit qcom_smd_exit(void)
+{
+ platform_driver_unregister(&qcom_smd_driver);
+ bus_unregister(&qcom_smd_bus);
+}
+module_exit(qcom_smd_exit);
+
+MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
+MODULE_DESCRIPTION("Qualcomm Shared Memory Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
new file mode 100644
index 000000000000..06b02b5ad183
--- /dev/null
+++ b/drivers/soc/qcom/smem.c
@@ -0,0 +1,883 @@
+/*
+ * Copyright (c) 2015, Sony Mobile Communications AB.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/hwspinlock.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/smem.h>
+#include <linux/debugfs.h>
+
+/*
+ * The Qualcomm shared memory system is a allocate only heap structure that
+ * consists of one of more memory areas that can be accessed by the processors
+ * in the SoC.
+ *
+ * All systems contains a global heap, accessible by all processors in the SoC,
+ * with a table of contents data structure (@smem_header) at the beginning of
+ * the main shared memory block.
+ *
+ * The global header contains metadata for allocations as well as a fixed list
+ * of 512 entries (@smem_global_entry) that can be initialized to reference
+ * parts of the shared memory space.
+ *
+ *
+ * In addition to this global heap a set of "private" heaps can be set up at
+ * boot time with access restrictions so that only certain processor pairs can
+ * access the data.
+ *
+ * These partitions are referenced from an optional partition table
+ * (@smem_ptable), that is found 4kB from the end of the main smem region. The
+ * partition table entries (@smem_ptable_entry) lists the involved processors
+ * (or hosts) and their location in the main shared memory region.
+ *
+ * Each partition starts with a header (@smem_partition_header) that identifies
+ * the partition and holds properties for the two internal memory regions. The
+ * contain a link list of allocation headers (@smem_private_entry) followed by
+ * their data.
+ *
+ * Items in the non-cached region are allocated from the start of the partition
+ * while items in the cached region are allocated from the end. The free area
+ * is hence the region between the cached and non-cached offsets.
+ *
+ *
+ * To synchronize allocations in the shared memory heaps a remote spinlock must
+ * be held - currently lock number 3 of the sfpb or tcsr is used for this on all
+ * platforms.
+ *
+ */
+
+/**
+ * struct smem_proc_comm - proc_comm communication struct (legacy)
+ * @command: current command to be executed
+ * @status: status of the currently requested command
+ * @params: parameters to the command
+ */
+struct smem_proc_comm {
+ u32 command;
+ u32 status;
+ u32 params[2];
+};
+
+/**
+ * struct smem_global_entry - entry to reference smem items on the heap
+ * @allocated: boolean to indicate if this entry is used
+ * @offset: offset to the allocated space
+ * @size: size of the allocated space, 8 byte aligned
+ * @aux_base: base address for the memory region used by this unit, or 0 for
+ * the default region. bits 0,1 are reserved
+ */
+struct smem_global_entry {
+ u32 allocated;
+ u32 offset;
+ u32 size;
+ u32 aux_base; /* bits 1:0 reserved */
+};
+#define AUX_BASE_MASK 0xfffffffc
+
+/**
+ * struct smem_header - header found in beginning of primary smem region
+ * @proc_comm: proc_comm communication interface (legacy)
+ * @version: array of versions for the various subsystems
+ * @initialized: boolean to indicate that smem is initialized
+ * @free_offset: index of the first unallocated byte in smem
+ * @available: number of bytes available for allocation
+ * @reserved: reserved field, must be 0
+ * toc: array of references to items
+ */
+struct smem_header {
+ struct smem_proc_comm proc_comm[4];
+ u32 version[32];
+ u32 initialized;
+ u32 free_offset;
+ u32 available;
+ u32 reserved;
+ struct smem_global_entry toc[];
+};
+
+/**
+ * struct smem_ptable_entry - one entry in the @smem_ptable list
+ * @offset: offset, within the main shared memory region, of the partition
+ * @size: size of the partition
+ * @flags: flags for the partition (currently unused)
+ * @host0: first processor/host with access to this partition
+ * @host1: second processor/host with access to this partition
+ * @reserved: reserved entries for later use
+ */
+struct smem_ptable_entry {
+ u32 offset;
+ u32 size;
+ u32 flags;
+ u16 host0;
+ u16 host1;
+ u32 reserved[8];
+};
+
+/**
+ * struct smem_ptable - partition table for the private partitions
+ * @magic: magic number, must be SMEM_PTABLE_MAGIC
+ * @version: version of the partition table
+ * @num_entries: number of partitions in the table
+ * @reserved: for now reserved entries
+ * @entry: list of @smem_ptable_entry for the @num_entries partitions
+ */
+struct smem_ptable {
+ u32 magic;
+ u32 version;
+ u32 num_entries;
+ u32 reserved[5];
+ struct smem_ptable_entry entry[];
+};
+#define SMEM_PTABLE_MAGIC 0x434f5424 /* "$TOC" */
+
+/**
+ * struct smem_partition_header - header of the partitions
+ * @magic: magic number, must be SMEM_PART_MAGIC
+ * @host0: first processor/host with access to this partition
+ * @host1: second processor/host with access to this partition
+ * @size: size of the partition
+ * @offset_free_uncached: offset to the first free byte of uncached memory in
+ * this partition
+ * @offset_free_cached: offset to the first free byte of cached memory in this
+ * partition
+ * @reserved: for now reserved entries
+ */
+struct smem_partition_header {
+ u32 magic;
+ u16 host0;
+ u16 host1;
+ u32 size;
+ u32 offset_free_uncached;
+ u32 offset_free_cached;
+ u32 reserved[3];
+};
+#define SMEM_PART_MAGIC 0x54525024 /* "$PRT" */
+
+/**
+ * struct smem_private_entry - header of each item in the private partition
+ * @canary: magic number, must be SMEM_PRIVATE_CANARY
+ * @item: identifying number of the smem item
+ * @size: size of the data, including padding bytes
+ * @padding_data: number of bytes of padding of data
+ * @padding_hdr: number of bytes of padding between the header and the data
+ * @reserved: for now reserved entry
+ */
+struct smem_private_entry {
+ u16 canary;
+ u16 item;
+ u32 size; /* includes padding bytes */
+ u16 padding_data;
+ u16 padding_hdr;
+ u32 reserved;
+};
+#define SMEM_PRIVATE_CANARY 0xa5a5
+
+/*
+ * Item 3 of the global heap contains an array of versions for the various
+ * software components in the SoC. We verify that the boot loader version is
+ * what the expected version (SMEM_EXPECTED_VERSION) as a sanity check.
+ */
+#define SMEM_ITEM_VERSION 3
+#define SMEM_MASTER_SBL_VERSION_INDEX 7
+#define SMEM_EXPECTED_VERSION 11
+
+/*
+ * The first 8 items are only to be allocated by the boot loader while
+ * initializing the heap.
+ */
+#define SMEM_ITEM_LAST_FIXED 8
+
+/* Highest accepted item number, for both global and private heaps */
+#define SMEM_ITEM_LAST 512
+
+/* Processor/host identifier for the application processor */
+#define SMEM_HOST_APPS 0
+
+/* Max number of processors/hosts in a system */
+#define SMEM_HOST_COUNT 7
+
+/* Item numbers necessary for debugfs */
+#define SMEM_HEAP_INFO 1
+
+/**
+ * struct smem_region - representation of a chunk of memory used for smem
+ * @aux_base: identifier of aux_mem base
+ * @virt_base: virtual base address of memory with this aux_mem identifier
+ * @size: size of the memory region
+ */
+struct smem_region {
+ u32 aux_base;
+ void __iomem *virt_base;
+ size_t size;
+};
+
+/**
+ * struct qcom_smem - device data for the smem device
+ * @dev: device pointer
+ * @hwlock: reference to a hwspinlock
+ * @partitions: list of pointers to partitions affecting the current
+ * processor/host
+ * @num_regions: number of @regions
+ * @regions: list of the memory regions defining the shared memory
+ */
+struct qcom_smem {
+ struct device *dev;
+
+ struct hwspinlock *hwlock;
+
+ struct smem_partition_header *partitions[SMEM_HOST_COUNT];
+
+ struct dentry *dent;
+ u32 version;
+
+ unsigned num_regions;
+ struct smem_region regions[0];
+};
+
+/* Pointer to the one and only smem handle */
+static struct qcom_smem *__smem;
+
+/* Timeout (ms) for the trylock of remote spinlocks */
+#define HWSPINLOCK_TIMEOUT 1000
+
+static int qcom_smem_alloc_private(struct qcom_smem *smem,
+ unsigned host,
+ unsigned item,
+ size_t size)
+{
+ struct smem_partition_header *phdr;
+ struct smem_private_entry *hdr;
+ size_t alloc_size;
+ void *p;
+
+ /* We're not going to find it if there's no matching partition */
+ if (host >= SMEM_HOST_COUNT || !smem->partitions[host])
+ return -ENOENT;
+
+ phdr = smem->partitions[host];
+
+ p = (void *)phdr + sizeof(*phdr);
+ while (p < (void *)phdr + phdr->offset_free_uncached) {
+ hdr = p;
+
+ if (hdr->canary != SMEM_PRIVATE_CANARY) {
+ dev_err(smem->dev,
+ "Found invalid canary in host %d partition\n",
+ host);
+ return -EINVAL;
+ }
+
+ if (hdr->item == item)
+ return -EEXIST;
+
+ p += sizeof(*hdr) + hdr->padding_hdr + hdr->size;
+ }
+
+ /* Check that we don't grow into the cached region */
+ alloc_size = sizeof(*hdr) + ALIGN(size, 8);
+ if (p + alloc_size >= (void *)phdr + phdr->offset_free_cached) {
+ dev_err(smem->dev, "Out of memory\n");
+ return -ENOSPC;
+ }
+
+ hdr = p;
+ hdr->canary = SMEM_PRIVATE_CANARY;
+ hdr->item = item;
+ hdr->size = ALIGN(size, 8);
+ hdr->padding_data = hdr->size - size;
+ hdr->padding_hdr = 0;
+
+ /*
+ * Ensure the header is written before we advance the free offset, so
+ * that remote processors that does not take the remote spinlock still
+ * gets a consistent view of the linked list.
+ */
+ wmb();
+ phdr->offset_free_uncached += alloc_size;
+
+ return 0;
+}
+
+static int qcom_smem_alloc_global(struct qcom_smem *smem,
+ unsigned item,
+ size_t size)
+{
+ struct smem_header *header;
+ struct smem_global_entry *entry;
+
+ if (WARN_ON(item >= SMEM_ITEM_LAST))
+ return -EINVAL;
+
+ header = smem->regions[0].virt_base;
+ entry = &header->toc[item];
+ if (entry->allocated)
+ return -EEXIST;
+
+ size = ALIGN(size, 8);
+ if (WARN_ON(size > header->available))
+ return -ENOMEM;
+
+ entry->offset = header->free_offset;
+ entry->size = size;
+
+ /*
+ * Ensure the header is consistent before we mark the item allocated,
+ * so that remote processors will get a consistent view of the item
+ * even though they do not take the spinlock on read.
+ */
+ wmb();
+ entry->allocated = 1;
+
+ header->free_offset += size;
+ header->available -= size;
+
+ return 0;
+}
+
+/**
+ * qcom_smem_alloc - allocate space for a smem item
+ * @host: remote processor id, or -1
+ * @item: smem item handle
+ * @size: number of bytes to be allocated
+ *
+ * Allocate space for a given smem item of size @size, given that the item is
+ * not yet allocated.
+ */
+int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
+{
+ unsigned long flags;
+ int ret;
+
+ if (!__smem)
+ return -EPROBE_DEFER;
+
+ if (item < SMEM_ITEM_LAST_FIXED) {
+ dev_err(__smem->dev,
+ "Rejecting allocation of static entry %d\n", item);
+ return -EINVAL;
+ }
+
+ ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
+ HWSPINLOCK_TIMEOUT,
+ &flags);
+ if (ret)
+ return ret;
+
+ ret = qcom_smem_alloc_private(__smem, host, item, size);
+ if (ret == -ENOENT)
+ ret = qcom_smem_alloc_global(__smem, item, size);
+
+ hwspin_unlock_irqrestore(__smem->hwlock, &flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(qcom_smem_alloc);
+
+static int qcom_smem_get_global(struct qcom_smem *smem,
+ unsigned item,
+ void **ptr,
+ size_t *size)
+{
+ struct smem_header *header;
+ struct smem_region *area;
+ struct smem_global_entry *entry;
+ u32 aux_base;
+ unsigned i;
+
+ if (WARN_ON(item >= SMEM_ITEM_LAST))
+ return -EINVAL;
+
+ header = smem->regions[0].virt_base;
+
+ entry = &header->toc[item];
+ if (!entry->allocated)
+ return -ENXIO;
+
+ if (ptr != NULL) {
+ aux_base = entry->aux_base & AUX_BASE_MASK;
+
+ for (i = 0; i < smem->num_regions; i++) {
+ area = &smem->regions[i];
+
+ if (area->aux_base == aux_base || !aux_base) {
+ *ptr = area->virt_base + entry->offset;
+ break;
+ }
+ }
+ }
+ if (size != NULL)
+ *size = entry->size;
+
+ return 0;
+}
+
+static int qcom_smem_get_private(struct qcom_smem *smem,
+ unsigned host,
+ unsigned item,
+ void **ptr,
+ size_t *size)
+{
+ struct smem_partition_header *phdr;
+ struct smem_private_entry *hdr;
+ void *p;
+
+ /* We're not going to find it if there's no matching partition */
+ if (host >= SMEM_HOST_COUNT || !smem->partitions[host])
+ return -ENOENT;
+
+ phdr = smem->partitions[host];
+
+ p = (void *)phdr + sizeof(*phdr);
+ while (p < (void *)phdr + phdr->offset_free_uncached) {
+ hdr = p;
+
+
+ if (hdr->canary != SMEM_PRIVATE_CANARY) {
+ dev_err(smem->dev,
+ "Found invalid canary in host %d partition\n",
+ host);
+ return -EINVAL;
+ }
+
+ if (hdr->item == item) {
+ if (ptr != NULL)
+ *ptr = p + sizeof(*hdr) + hdr->padding_hdr;
+
+ if (size != NULL)
+ *size = hdr->size - hdr->padding_data;
+
+ return 0;
+ }
+
+ p += sizeof(*hdr) + hdr->padding_hdr + hdr->size;
+ }
+
+ return -ENOENT;
+}
+
+/**
+ * qcom_smem_get - resolve ptr of size of a smem item
+ * @host: the remote processor, or -1
+ * @item: smem item handle
+ * @ptr: pointer to be filled out with address of the item
+ * @size: pointer to be filled out with size of the item
+ *
+ * Looks up pointer and size of a smem item.
+ */
+int qcom_smem_get(unsigned host, unsigned item, void **ptr, size_t *size)
+{
+ unsigned long flags;
+ int ret;
+
+ if (!__smem)
+ return -EPROBE_DEFER;
+
+ ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
+ HWSPINLOCK_TIMEOUT,
+ &flags);
+ if (ret)
+ return ret;
+
+ ret = qcom_smem_get_private(__smem, host, item, ptr, size);
+ if (ret == -ENOENT)
+ ret = qcom_smem_get_global(__smem, item, ptr, size);
+
+ hwspin_unlock_irqrestore(__smem->hwlock, &flags);
+ return ret;
+
+}
+EXPORT_SYMBOL(qcom_smem_get);
+
+/**
+ * qcom_smem_get_free_space - retrieve amont of free space in a partition
+ * @host: the remote processor identifing a partition, or -1
+ *
+ * To be used by smem clients as a quick way to determine if any new
+ * allocations has been made.
+ */
+int qcom_smem_get_free_space(unsigned host)
+{
+ struct smem_partition_header *phdr;
+ struct smem_header *header;
+ unsigned ret;
+
+ if (!__smem)
+ return -EPROBE_DEFER;
+
+ if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
+ phdr = __smem->partitions[host];
+ ret = phdr->offset_free_uncached;
+ } else {
+ header = __smem->regions[0].virt_base;
+ ret = header->available;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(qcom_smem_get_free_space);
+
+static int qcom_smem_get_sbl_version(struct qcom_smem *smem)
+{
+ unsigned *versions;
+ size_t size;
+ int ret;
+
+ ret = qcom_smem_get_global(smem, SMEM_ITEM_VERSION,
+ (void **)&versions, &size);
+ if (ret < 0) {
+ dev_err(smem->dev, "Unable to read the version item\n");
+ return -ENOENT;
+ }
+
+ if (size < sizeof(unsigned) * SMEM_MASTER_SBL_VERSION_INDEX) {
+ dev_err(smem->dev, "Version item is too small\n");
+ return -EINVAL;
+ }
+
+ return versions[SMEM_MASTER_SBL_VERSION_INDEX];
+}
+
+static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
+ unsigned local_host)
+{
+ struct smem_partition_header *header;
+ struct smem_ptable_entry *entry;
+ struct smem_ptable *ptable;
+ unsigned remote_host;
+ int i;
+
+ ptable = smem->regions[0].virt_base + smem->regions[0].size - 4 * 1024;
+ if (ptable->magic != SMEM_PTABLE_MAGIC)
+ return 0;
+
+ if (ptable->version != 1) {
+ dev_err(smem->dev,
+ "Unsupported partition header version %d\n",
+ ptable->version);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ptable->num_entries; i++) {
+ entry = &ptable->entry[i];
+
+ if (entry->host0 != local_host && entry->host1 != local_host)
+ continue;
+
+ if (!entry->offset)
+ continue;
+
+ if (!entry->size)
+ continue;
+
+ if (entry->host0 == local_host)
+ remote_host = entry->host1;
+ else
+ remote_host = entry->host0;
+
+ if (smem->partitions[remote_host]) {
+ dev_err(smem->dev,
+ "Already found a partition for host %d\n",
+ remote_host);
+ return -EINVAL;
+ }
+
+ header = smem->regions[0].virt_base + entry->offset;
+
+ if (header->magic != SMEM_PART_MAGIC) {
+ dev_err(smem->dev,
+ "Partition %d has invalid magic\n", i);
+ return -EINVAL;
+ }
+
+ if (header->host0 != local_host && header->host1 != local_host) {
+ dev_err(smem->dev,
+ "Partition %d hosts are invalid\n", i);
+ return -EINVAL;
+ }
+
+ if (header->host0 != remote_host && header->host1 != remote_host) {
+ dev_err(smem->dev,
+ "Partition %d hosts are invalid\n", i);
+ return -EINVAL;
+ }
+
+ if (header->size != entry->size) {
+ dev_err(smem->dev,
+ "Partition %d has invalid size\n", i);
+ return -EINVAL;
+ }
+
+ if (header->offset_free_uncached > header->size) {
+ dev_err(smem->dev,
+ "Partition %d has invalid free pointer\n", i);
+ return -EINVAL;
+ }
+
+ smem->partitions[remote_host] = header;
+ }
+
+ return 0;
+}
+
+static int qcom_smem_count_mem_regions(struct platform_device *pdev)
+{
+ struct resource *res;
+ int num_regions = 0;
+ int i;
+
+ for (i = 0; i < pdev->num_resources; i++) {
+ res = &pdev->resource[i];
+
+ if (resource_type(res) == IORESOURCE_MEM)
+ num_regions++;
+ }
+
+ return num_regions;
+}
+
+static void smem_debug_read_mem(struct seq_file *s)
+{
+ u32 *info;
+ size_t size;
+ int ret, i;
+ long flags;
+
+ ret = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_HEAP_INFO,
+ (void **)&info, &size);
+
+ if (ret < 0)
+ seq_printf(s, "Can't get global heap information pool\n");
+ else {
+ seq_printf(s, "global heap\n");
+ seq_printf(s, " initialized: %d offset: %08x avail: %08x\n",
+ info[0], info[1], info[2]);
+
+ for (i = 0; i < SMEM_ITEM_LAST; i++) {
+ ret = qcom_smem_get(QCOM_SMEM_HOST_ANY, i,
+ (void **)&info, &size);
+ if (ret < 0)
+ continue;
+
+ seq_printf(s, " [%d]: p: %p s: %li\n", i, info,
+ size);
+ }
+ }
+
+ seq_printf(s, "\nSecure partitions accessible from APPS:\n");
+
+ ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
+ HWSPINLOCK_TIMEOUT,
+ &flags);
+
+ for (i = 0; i < SMEM_HOST_COUNT; i++) {
+ struct smem_partition_header *part_hdr = __smem->partitions[i];
+ void *p;
+
+ if (!part_hdr)
+ continue;
+
+ if (part_hdr->magic != SMEM_PART_MAGIC) {
+ seq_printf(s, " part[%d]: incorrect magic\n", i);
+ continue;
+ }
+
+ seq_printf(s, " part[%d]: (%d <-> %d) size: %d off: %08x\n",
+ i, part_hdr->host0, part_hdr->host1, part_hdr->size,
+ part_hdr->offset_free_uncached);
+
+ p = (void *)part_hdr + sizeof(*part_hdr);
+ while (p < (void *)part_hdr + part_hdr->offset_free_uncached) {
+ struct smem_private_entry *entry = p;
+
+ seq_printf(s,
+ " [%d]: %s size: %d pd: %d\n",
+ entry->item,
+ (entry->canary == SMEM_PRIVATE_CANARY) ?
+ "valid" : "invalid",
+ entry->size,
+ entry->padding_data);
+
+ p += sizeof(*entry) + entry->padding_hdr + entry->size;
+ }
+ }
+
+ hwspin_unlock_irqrestore(__smem->hwlock, &flags);
+}
+
+static void smem_debug_read_version(struct seq_file *s)
+{
+ seq_printf(s, "SBL version: %08x\n", __smem->version >> 16);
+}
+
+static int debugfs_show(struct seq_file *s, void *data)
+{
+ void (*show)(struct seq_file *) = s->private;
+
+ show(s);
+
+ return 0;
+}
+
+static int smem_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, debugfs_show, inode->i_private);
+}
+
+
+static const struct file_operations smem_debug_ops = {
+ .open = smem_debug_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
+
+static int qcom_smem_probe(struct platform_device *pdev)
+{
+ struct smem_header *header;
+ struct device_node *np;
+ struct qcom_smem *smem;
+ struct resource *res;
+ struct resource r;
+ size_t array_size;
+ int num_regions = 0;
+ int hwlock_id;
+ int ret;
+ int i;
+
+ num_regions = qcom_smem_count_mem_regions(pdev) + 1;
+
+ array_size = num_regions * sizeof(struct smem_region);
+ smem = devm_kzalloc(&pdev->dev, sizeof(*smem) + array_size, GFP_KERNEL);
+ if (!smem)
+ return -ENOMEM;
+
+ smem->dev = &pdev->dev;
+ smem->num_regions = num_regions;
+
+ np = of_parse_phandle(pdev->dev.of_node, "memory-region", 0);
+ if (!np) {
+ dev_err(&pdev->dev, "No memory-region specified\n");
+ return -EINVAL;
+ }
+
+ ret = of_address_to_resource(np, 0, &r);
+ of_node_put(np);
+ if (ret)
+ return ret;
+
+ smem->regions[0].aux_base = (u32)r.start;
+ smem->regions[0].size = resource_size(&r);
+ smem->regions[0].virt_base = devm_ioremap_nocache(&pdev->dev,
+ r.start,
+ resource_size(&r));
+ if (!smem->regions[0].virt_base)
+ return -ENOMEM;
+
+ for (i = 1; i < num_regions; i++) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, i - 1);
+
+ smem->regions[i].aux_base = (u32)res->start;
+ smem->regions[i].size = resource_size(res);
+ smem->regions[i].virt_base = devm_ioremap_nocache(&pdev->dev,
+ res->start,
+ resource_size(res));
+ if (!smem->regions[i].virt_base)
+ return -ENOMEM;
+ }
+
+ header = smem->regions[0].virt_base;
+ if (header->initialized != 1 || header->reserved) {
+ dev_err(&pdev->dev, "SMEM is not initilized by SBL\n");
+ return -EINVAL;
+ }
+
+ smem->version = qcom_smem_get_sbl_version(smem);
+ if (smem->version >> 16 != SMEM_EXPECTED_VERSION) {
+ dev_err(&pdev->dev, "Unsupported smem version 0x%x\n", smem->version);
+ return -EINVAL;
+ }
+
+ ret = qcom_smem_enumerate_partitions(smem, SMEM_HOST_APPS);
+ if (ret < 0)
+ return ret;
+
+ hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0);
+ if (hwlock_id < 0) {
+ dev_err(&pdev->dev, "failed to retrieve hwlock\n");
+ return hwlock_id;
+ }
+
+ smem->hwlock = hwspin_lock_request_specific(hwlock_id);
+ if (!smem->hwlock)
+ return -ENXIO;
+
+ __smem = smem;
+
+ /* setup debugfs information */
+ __smem->dent = debugfs_create_dir("smem", 0);
+ if (IS_ERR(__smem->dent))
+ dev_info(smem->dev, "unable to create debugfs\n");
+
+ if (!debugfs_create_file("mem", 0444, __smem->dent, smem_debug_read_mem, &smem_debug_ops))
+ dev_err(smem->dev, "couldnt create mem file\n");
+ if (!debugfs_create_file("version", 0444, __smem->dent, smem_debug_read_version,
+ &smem_debug_ops))
+ dev_err(smem->dev, "couldnt create mem file\n");
+
+ return 0;
+}
+
+static int qcom_smem_remove(struct platform_device *pdev)
+{
+ hwspin_lock_free(__smem->hwlock);
+ __smem = NULL;
+
+ return 0;
+}
+
+static const struct of_device_id qcom_smem_of_match[] = {
+ { .compatible = "qcom,smem" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_smem_of_match);
+
+static struct platform_driver qcom_smem_driver = {
+ .probe = qcom_smem_probe,
+ .remove = qcom_smem_remove,
+ .driver = {
+ .name = "qcom_smem",
+ .of_match_table = qcom_smem_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int __init qcom_smem_init(void)
+{
+ return platform_driver_register(&qcom_smem_driver);
+}
+arch_initcall(qcom_smem_init);
+
+static void __exit qcom_smem_exit(void)
+{
+ platform_driver_unregister(&qcom_smem_driver);
+}
+module_exit(qcom_smem_exit)
+
+MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
+MODULE_DESCRIPTION("Qualcomm Shared Memory Manager");
+MODULE_LICENSE("GPLv2");
diff --git a/include/linux/hwspinlock.h b/include/linux/hwspinlock.h
index 3343298e40e8..859d673d98c8 100644
--- a/include/linux/hwspinlock.h
+++ b/include/linux/hwspinlock.h
@@ -26,6 +26,7 @@
#define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */
struct device;
+struct device_node;
struct hwspinlock;
struct hwspinlock_device;
struct hwspinlock_ops;
@@ -66,6 +67,7 @@ int hwspin_lock_unregister(struct hwspinlock_device *bank);
struct hwspinlock *hwspin_lock_request(void);
struct hwspinlock *hwspin_lock_request_specific(unsigned int id);
int hwspin_lock_free(struct hwspinlock *hwlock);
+int of_hwspin_lock_get_id(struct device_node *np, int index);
int hwspin_lock_get_id(struct hwspinlock *hwlock);
int __hwspin_lock_timeout(struct hwspinlock *, unsigned int, int,
unsigned long *);
@@ -120,6 +122,11 @@ void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
{
}
+static inline int of_hwspin_lock_get_id(struct device_node *np, int index)
+{
+ return 0;
+}
+
static inline int hwspin_lock_get_id(struct hwspinlock *hwlock)
{
return 0;
diff --git a/include/linux/mfd/qcom-smd-rpm.h b/include/linux/mfd/qcom-smd-rpm.h
new file mode 100644
index 000000000000..2a53dcaeeeed
--- /dev/null
+++ b/include/linux/mfd/qcom-smd-rpm.h
@@ -0,0 +1,35 @@
+#ifndef __QCOM_SMD_RPM_H__
+#define __QCOM_SMD_RPM_H__
+
+struct qcom_smd_rpm;
+
+#define QCOM_SMD_RPM_ACTIVE_STATE 0
+#define QCOM_SMD_RPM_SLEEP_STATE 1
+
+/*
+ * Constants used for addressing resources in the RPM.
+ */
+#define QCOM_SMD_RPM_BOOST 0x61747362
+#define QCOM_SMD_RPM_BUS_CLK 0x316b6c63
+#define QCOM_SMD_RPM_BUS_MASTER 0x73616d62
+#define QCOM_SMD_RPM_BUS_SLAVE 0x766c7362
+#define QCOM_SMD_RPM_CLK_BUF_A 0x616B6C63
+#define QCOM_SMD_RPM_LDOA 0x616f646c
+#define QCOM_SMD_RPM_LDOB 0x626F646C
+#define QCOM_SMD_RPM_MEM_CLK 0x326b6c63
+#define QCOM_SMD_RPM_MISC_CLK 0x306b6c63
+#define QCOM_SMD_RPM_NCPA 0x6170636E
+#define QCOM_SMD_RPM_NCPB 0x6270636E
+#define QCOM_SMD_RPM_OCMEM_PWR 0x706d636f
+#define QCOM_SMD_RPM_QPIC_CLK 0x63697071
+#define QCOM_SMD_RPM_SMPA 0x61706d73
+#define QCOM_SMD_RPM_SMPB 0x62706d73
+#define QCOM_SMD_RPM_SPDM 0x63707362
+#define QCOM_SMD_RPM_VSA 0x00617376
+
+int qcom_rpm_smd_write(struct qcom_smd_rpm *rpm,
+ int state,
+ u32 resource_type, u32 resource_id,
+ void *buf, size_t count);
+
+#endif
diff --git a/include/linux/soc/qcom/smd.h b/include/linux/soc/qcom/smd.h
new file mode 100644
index 000000000000..581ec24da43b
--- /dev/null
+++ b/include/linux/soc/qcom/smd.h
@@ -0,0 +1,47 @@
+#ifndef __LINUX_MSM_SMEM_H__
+#define __LINUX_MSM_SMEM_H__
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+
+struct qcom_smd;
+struct qcom_smd_channel;
+struct qcom_smd_lookup;
+
+/**
+ * struct qcom_smd_device - smd device struct
+ * @dev: the device struct
+ * @channel: handle to the smd channel for this device
+ */
+struct qcom_smd_device {
+ struct device dev;
+ struct qcom_smd_channel *channel;
+};
+
+/**
+ * struct qcom_smd_driver - smd driver struct
+ * @driver: underlying device driver
+ * @probe: invoked when the smd channel is found
+ * @remove: invoked when the smd channel is closed
+ * @callback: invoked when an inbound message is received on the channel,
+ * should return 0 on success or -EBUSY if the data cannot be
+ * consumed at this time
+ */
+struct qcom_smd_driver {
+ struct device_driver driver;
+ int (*probe)(struct qcom_smd_device *dev);
+ void (*remove)(struct qcom_smd_device *dev);
+ int (*callback)(struct qcom_smd_device *, const void *, size_t);
+};
+
+int qcom_smd_driver_register(struct qcom_smd_driver *drv);
+void qcom_smd_driver_unregister(struct qcom_smd_driver *drv);
+
+#define module_qcom_smd_driver(__smd_driver) \
+ module_driver(__smd_driver, qcom_smd_driver_register, \
+ qcom_smd_driver_unregister)
+
+int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len);
+
+#endif
+
diff --git a/include/linux/soc/qcom/smem.h b/include/linux/soc/qcom/smem.h
new file mode 100644
index 000000000000..294070de859f
--- /dev/null
+++ b/include/linux/soc/qcom/smem.h
@@ -0,0 +1,14 @@
+#ifndef __QCOM_SMEM_H__
+#define __QCOM_SMEM_H__
+
+struct device_node;
+struct qcom_smem;
+
+#define QCOM_SMEM_HOST_ANY -1
+
+int qcom_smem_alloc(unsigned host, unsigned item, size_t size);
+int qcom_smem_get(unsigned host, unsigned item, void **ptr, size_t *size);
+
+int qcom_smem_get_free_space(unsigned host);
+
+#endif