aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-04-26 12:11:52 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2021-04-26 12:11:52 -0700
commit37f00ab4a003f371f81e0eae76cf372f06dec780 (patch)
treec6217483f22a0fac876f12af53f4b8948200f2fd /drivers
parent2b90506a8186df5f7c81ad1ebd250103d8469e27 (diff)
parent5ffa828534036348fa90fb3079ccc0972d202c4a (diff)
Merge tag 'arm-drivers-5.13' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
Pull ARM SoC driver updates from Arnd Bergmann: "Updates for SoC specific drivers include a few subsystems that have their own maintainers but send them through the soc tree: TEE/OP-TEE: - Add tracepoints around calls to secure world Memory controller drivers: - Minor fixes for Renesas, Exynos, Mediatek and Tegra platforms - Add debug statistics to Tegra20 memory controller - Update Tegra bindings and convert to dtschema ARM SCMI Firmware: - Support for modular SCMI protocols and vendor specific extensions - New SCMI IIO driver - Per-cpu DVFS The other driver changes are all from the platform maintainers directly and reflect the drivers that don't fit into any other subsystem as well as treewide changes for a particular platform. SoCFPGA: - Various cleanups contributed by Krzysztof Kozlowski Mediatek: - add MT8183 support to mutex driver - MMSYS: use per SoC array to describe the possible routing - add MMSYS support for MT8183 and MT8167 - add support for PMIC wrapper with integrated arbiter - add support for MT8192/MT6873 Tegra: - Bug fixes to PMC and clock drivers NXP/i.MX: - Update SCU power domain driver to keep console domain power on. - Add missing ADC1 power domain to SCU power domain driver. - Update comments for single global power domain in SCU power domain driver. - Add i.MX51/i.MX53 unique id support to i.MX SoC driver. NXP/FSL SoC driver updates for v5.13 - Add ACPI support for RCPM driver - Use generic io{read,write} for QE drivers after performance optimized for PowerPC - Fix QBMAN probe to cleanup HW states correctly for kexec - Various cleanup and style fix for QBMAN/QE/GUTS drivers OMAP: - Preparation to use devicetree for genpd - ti-sysc needs iorange check improved when the interconnect target module has no control registers listed - ti-sysc needs to probe l4_wkup and l4_cfg interconnects first to avoid issues with missing resources and unnecessary deferred probe - ti-sysc debug option can now detect more devices - ti-sysc now warns if an old incomplete devicetree data is found as we now rely on it being complete for am3 and 4 - soc init code needs to check for prcm and prm nodes for omap4/5 and dra7 - omap-prm driver needs to enable autoidle retention support for omap4 - omap5 clocks are missing gpmc and ocmc clock registers - pci-dra7xx now needs to use builtin_platform_driver instead of using builtin_platform_driver_probe for deferred probe to work Raspberry Pi: - Fix-up all RPi firmware drivers so as for unbind to happen in an orderly fashion - Support for RPi's PoE hat PWM bus Qualcomm - Improved detection for SCM calling conventions - Support for OEM specific wifi firmware path - Added drivers for SC7280/SM8350: RPMH, LLCC< AOSS QMP" * tag 'arm-drivers-5.13' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc: (165 commits) soc: aspeed: fix a ternary sign expansion bug memory: mtk-smi: Add device-link between smi-larb and smi-common memory: samsung: exynos5422-dmc: handle clk_set_parent() failure memory: renesas-rpc-if: fix possible NULL pointer dereference of resource clk: socfpga: fix iomem pointer cast on 64-bit soc: aspeed: Adapt to new LPC device tree layout pinctrl: aspeed-g5: Adapt to new LPC device tree layout ipmi: kcs: aspeed: Adapt to new LPC DTS layout ARM: dts: Remove LPC BMC and Host partitions dt-bindings: aspeed-lpc: Remove LPC partitioning soc: fsl: enable acpi support in RCPM driver soc: qcom: mdt_loader: Detect truncated read of segments soc: qcom: mdt_loader: Validate that p_filesz < p_memsz soc: qcom: pdr: Fix error return code in pdr_register_listener firmware: qcom_scm: Fix kernel-doc function names to match firmware: qcom_scm: Suppress sysfs bind attributes firmware: qcom_scm: Workaround lack of "is available" call on SC7180 firmware: qcom_scm: Reduce locking section for __get_convention() firmware: qcom_scm: Make __qcom_scm_is_call_available() return bool Revert "soc: fsl: qe: introduce qe_io{read,write}* wrappers" ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/bus/qcom-ebi2.c4
-rw-r--r--drivers/bus/ti-sysc.c6
-rw-r--r--drivers/char/ipmi/kcs_bmc_aspeed.c27
-rw-r--r--drivers/clk/Kconfig1
-rw-r--r--drivers/clk/Makefile4
-rw-r--r--drivers/clk/bcm/clk-raspberrypi.c2
-rw-r--r--drivers/clk/clk-scmi.c28
-rw-r--r--drivers/clk/socfpga/Kconfig19
-rw-r--r--drivers/clk/socfpga/Makefile11
-rw-r--r--drivers/clk/tegra/clk-pll.c12
-rw-r--r--drivers/clk/tegra/clk-tegra210.c53
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c107
-rw-r--r--drivers/dma/Kconfig2
-rw-r--r--drivers/edac/Kconfig2
-rw-r--r--drivers/edac/altera_edac.c17
-rw-r--r--drivers/firmware/Kconfig2
-rw-r--r--drivers/firmware/arm_scmi/base.c142
-rw-r--r--drivers/firmware/arm_scmi/bus.c100
-rw-r--r--drivers/firmware/arm_scmi/clock.c129
-rw-r--r--drivers/firmware/arm_scmi/common.h133
-rw-r--r--drivers/firmware/arm_scmi/driver.c798
-rw-r--r--drivers/firmware/arm_scmi/notify.c328
-rw-r--r--drivers/firmware/arm_scmi/notify.h40
-rw-r--r--drivers/firmware/arm_scmi/perf.c262
-rw-r--r--drivers/firmware/arm_scmi/power.c134
-rw-r--r--drivers/firmware/arm_scmi/reset.c146
-rw-r--r--drivers/firmware/arm_scmi/scmi_pm_domain.c26
-rw-r--r--drivers/firmware/arm_scmi/sensors.c232
-rw-r--r--drivers/firmware/arm_scmi/system.c63
-rw-r--r--drivers/firmware/arm_scmi/voltage.c126
-rw-r--r--drivers/firmware/imx/scu-pd.c41
-rw-r--r--drivers/firmware/qcom_scm-legacy.c4
-rw-r--r--drivers/firmware/qcom_scm-smc.c12
-rw-r--r--drivers/firmware/qcom_scm.c89
-rw-r--r--drivers/firmware/qcom_scm.h7
-rw-r--r--drivers/firmware/raspberrypi.c69
-rw-r--r--drivers/firmware/xilinx/zynqmp.c5
-rw-r--r--drivers/fpga/Kconfig8
-rw-r--r--drivers/gpio/gpio-raspberrypi-exp.c2
-rw-r--r--drivers/hwmon/scmi-hwmon.c24
-rw-r--r--drivers/i2c/busses/Kconfig2
-rw-r--r--drivers/iio/common/scmi_sensors/scmi_iio.c100
-rw-r--r--drivers/input/touchscreen/raspberrypi-ts.c2
-rw-r--r--drivers/memory/fsl-corenet-cf.c4
-rw-r--r--drivers/memory/mtk-smi.c19
-rw-r--r--drivers/memory/omap-gpmc.c7
-rw-r--r--drivers/memory/pl353-smc.c2
-rw-r--r--drivers/memory/renesas-rpc-if.c2
-rw-r--r--drivers/memory/samsung/exynos5422-dmc.c4
-rw-r--r--drivers/memory/tegra/mc.c9
-rw-r--r--drivers/memory/tegra/mc.h4
-rw-r--r--drivers/memory/tegra/tegra124-emc.c16
-rw-r--r--drivers/memory/tegra/tegra20-emc.c20
-rw-r--r--drivers/memory/tegra/tegra20.c332
-rw-r--r--drivers/memory/tegra/tegra30-emc.c18
-rw-r--r--drivers/mfd/Kconfig4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig4
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c17
-rw-r--r--drivers/pwm/Kconfig9
-rw-r--r--drivers/pwm/Makefile1
-rw-r--r--drivers/pwm/pwm-raspberrypi-poe.c206
-rw-r--r--drivers/regulator/scmi-regulator.c42
-rw-r--r--drivers/reset/Kconfig6
-rw-r--r--drivers/reset/reset-raspberrypi.c2
-rw-r--r--drivers/reset/reset-scmi.c33
-rw-r--r--drivers/soc/aspeed/aspeed-lpc-ctrl.c20
-rw-r--r--drivers/soc/aspeed/aspeed-lpc-snoop.c27
-rw-r--r--drivers/soc/bcm/bcm63xx/bcm-pmb.c30
-rw-r--r--drivers/soc/bcm/raspberrypi-power.c2
-rw-r--r--drivers/soc/fsl/guts.c2
-rw-r--r--drivers/soc/fsl/qbman/bman.c1
-rw-r--r--drivers/soc/fsl/qbman/bman_portal.c3
-rw-r--r--drivers/soc/fsl/qbman/qman_portal.c3
-rw-r--r--drivers/soc/fsl/qe/gpio.c20
-rw-r--r--drivers/soc/fsl/qe/qe.c24
-rw-r--r--drivers/soc/fsl/qe/qe_common.c3
-rw-r--r--drivers/soc/fsl/qe/qe_ic.c4
-rw-r--r--drivers/soc/fsl/qe/qe_io.c36
-rw-r--r--drivers/soc/fsl/qe/ucc_fast.c68
-rw-r--r--drivers/soc/fsl/qe/ucc_slow.c42
-rw-r--r--drivers/soc/fsl/rcpm.c24
-rw-r--r--drivers/soc/imx/soc-imx.c12
-rw-r--r--drivers/soc/mediatek/mt8167-mmsys.h35
-rw-r--r--drivers/soc/mediatek/mt8167-pm-domains.h7
-rw-r--r--drivers/soc/mediatek/mt8173-pm-domains.h10
-rw-r--r--drivers/soc/mediatek/mt8183-mmsys.h54
-rw-r--r--drivers/soc/mediatek/mt8183-pm-domains.h15
-rw-r--r--drivers/soc/mediatek/mt8192-pm-domains.h21
-rw-r--r--drivers/soc/mediatek/mtk-mmsys.c314
-rw-r--r--drivers/soc/mediatek/mtk-mmsys.h215
-rw-r--r--drivers/soc/mediatek/mtk-mutex.c52
-rw-r--r--drivers/soc/mediatek/mtk-pm-domains.c11
-rw-r--r--drivers/soc/mediatek/mtk-pm-domains.h2
-rw-r--r--drivers/soc/mediatek/mtk-pmic-wrap.c97
-rw-r--r--drivers/soc/qcom/llcc-qcom.c19
-rw-r--r--drivers/soc/qcom/mdt_loader.c17
-rw-r--r--drivers/soc/qcom/pdr_interface.c2
-rw-r--r--drivers/soc/qcom/qcom_aoss.c1
-rw-r--r--drivers/soc/qcom/qmi_encdec.c8
-rw-r--r--drivers/soc/qcom/rpmh-rsc.c65
-rw-r--r--drivers/soc/qcom/rpmhpd.c56
-rw-r--r--drivers/soc/qcom/smem.c2
-rw-r--r--drivers/soc/qcom/wcnss_ctrl.c15
-rw-r--r--drivers/soc/renesas/rmobile-sysc.c4
-rw-r--r--drivers/soc/tegra/pmc.c259
-rw-r--r--drivers/soc/tegra/regulators-tegra30.c2
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c2
-rw-r--r--drivers/tee/optee/Makefile3
-rw-r--r--drivers/tee/optee/call.c4
-rw-r--r--drivers/tee/optee/core.c10
-rw-r--r--drivers/tee/optee/optee_trace.h67
-rw-r--r--drivers/tty/serial/ucc_uart.c124
112 files changed, 4274 insertions, 1622 deletions
diff --git a/drivers/bus/qcom-ebi2.c b/drivers/bus/qcom-ebi2.c
index 03ddcf426887..0b8f53a688b8 100644
--- a/drivers/bus/qcom-ebi2.c
+++ b/drivers/bus/qcom-ebi2.c
@@ -353,8 +353,10 @@ static int qcom_ebi2_probe(struct platform_device *pdev)
/* Figure out the chipselect */
ret = of_property_read_u32(child, "reg", &csindex);
- if (ret)
+ if (ret) {
+ of_node_put(child);
return ret;
+ }
if (csindex > 5) {
dev_err(dev,
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index da568a310052..5fae60f8c135 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -288,7 +288,7 @@ static int sysc_add_named_clock_from_child(struct sysc *ddata,
* limit for clk_get(). If cl ever needs to be freed, it should be done
* with clkdev_drop().
*/
- cl = kcalloc(1, sizeof(*cl), GFP_KERNEL);
+ cl = kzalloc(sizeof(*cl), GFP_KERNEL);
if (!cl)
return -ENOMEM;
@@ -1648,7 +1648,7 @@ static u32 sysc_quirk_dispc(struct sysc *ddata, int dispc_offset,
case SOC_UNKNOWN:
default:
return 0;
- };
+ }
/* Remap the whole module range to be able to reset dispc outputs */
devm_iounmap(ddata->dev, ddata->module_va);
@@ -2905,7 +2905,7 @@ static int sysc_init_soc(struct sysc *ddata)
break;
default:
break;
- };
+ }
}
match = soc_device_match(sysc_soc_feat_match);
diff --git a/drivers/char/ipmi/kcs_bmc_aspeed.c b/drivers/char/ipmi/kcs_bmc_aspeed.c
index a140203c079b..eefe362f65f0 100644
--- a/drivers/char/ipmi/kcs_bmc_aspeed.c
+++ b/drivers/char/ipmi/kcs_bmc_aspeed.c
@@ -27,7 +27,6 @@
#define KCS_CHANNEL_MAX 4
-/* mapped to lpc-bmc@0 IO space */
#define LPC_HICR0 0x000
#define LPC_HICR0_LPC3E BIT(7)
#define LPC_HICR0_LPC2E BIT(6)
@@ -52,15 +51,13 @@
#define LPC_STR1 0x03C
#define LPC_STR2 0x040
#define LPC_STR3 0x044
-
-/* mapped to lpc-host@80 IO space */
-#define LPC_HICRB 0x080
+#define LPC_HICRB 0x100
#define LPC_HICRB_IBFIF4 BIT(1)
#define LPC_HICRB_LPC4E BIT(0)
-#define LPC_LADR4 0x090
-#define LPC_IDR4 0x094
-#define LPC_ODR4 0x098
-#define LPC_STR4 0x09C
+#define LPC_LADR4 0x110
+#define LPC_IDR4 0x114
+#define LPC_ODR4 0x118
+#define LPC_STR4 0x11C
struct aspeed_kcs_bmc {
struct regmap *map;
@@ -348,12 +345,20 @@ static int aspeed_kcs_probe(struct platform_device *pdev)
struct device_node *np;
int rc;
- np = pdev->dev.of_node;
+ np = dev->of_node->parent;
+ if (!of_device_is_compatible(np, "aspeed,ast2400-lpc-v2") &&
+ !of_device_is_compatible(np, "aspeed,ast2500-lpc-v2") &&
+ !of_device_is_compatible(np, "aspeed,ast2600-lpc-v2")) {
+ dev_err(dev, "unsupported LPC device binding\n");
+ return -ENODEV;
+ }
+
+ np = dev->of_node;
if (of_device_is_compatible(np, "aspeed,ast2400-kcs-bmc") ||
- of_device_is_compatible(np, "aspeed,ast2500-kcs-bmc"))
+ of_device_is_compatible(np, "aspeed,ast2500-kcs-bmc"))
kcs_bmc = aspeed_kcs_probe_of_v1(pdev);
else if (of_device_is_compatible(np, "aspeed,ast2400-kcs-bmc-v2") ||
- of_device_is_compatible(np, "aspeed,ast2500-kcs-bmc-v2"))
+ of_device_is_compatible(np, "aspeed,ast2500-kcs-bmc-v2"))
kcs_bmc = aspeed_kcs_probe_of_v2(pdev);
else
return -EINVAL;
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index a588d56502d4..1d1891b9cad2 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -394,6 +394,7 @@ source "drivers/clk/renesas/Kconfig"
source "drivers/clk/rockchip/Kconfig"
source "drivers/clk/samsung/Kconfig"
source "drivers/clk/sifive/Kconfig"
+source "drivers/clk/socfpga/Kconfig"
source "drivers/clk/sprd/Kconfig"
source "drivers/clk/sunxi/Kconfig"
source "drivers/clk/sunxi-ng/Kconfig"
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index b22ae4f81e0b..9b582b3fca34 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -104,9 +104,7 @@ obj-y += renesas/
obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/
obj-$(CONFIG_COMMON_CLK_SAMSUNG) += samsung/
obj-$(CONFIG_CLK_SIFIVE) += sifive/
-obj-$(CONFIG_ARCH_SOCFPGA) += socfpga/
-obj-$(CONFIG_ARCH_AGILEX) += socfpga/
-obj-$(CONFIG_ARCH_STRATIX10) += socfpga/
+obj-y += socfpga/
obj-$(CONFIG_PLAT_SPEAR) += spear/
obj-y += sprd/
obj-$(CONFIG_ARCH_STI) += st/
diff --git a/drivers/clk/bcm/clk-raspberrypi.c b/drivers/clk/bcm/clk-raspberrypi.c
index f89b9cfc4309..dd3b71eafabf 100644
--- a/drivers/clk/bcm/clk-raspberrypi.c
+++ b/drivers/clk/bcm/clk-raspberrypi.c
@@ -314,7 +314,7 @@ static int raspberrypi_clk_probe(struct platform_device *pdev)
return -ENOENT;
}
- firmware = rpi_firmware_get(firmware_node);
+ firmware = devm_rpi_firmware_get(&pdev->dev, firmware_node);
of_node_put(firmware_node);
if (!firmware)
return -EPROBE_DEFER;
diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c
index c754dfbb73fd..1e357d364ca2 100644
--- a/drivers/clk/clk-scmi.c
+++ b/drivers/clk/clk-scmi.c
@@ -2,7 +2,7 @@
/*
* System Control and Power Interface (SCMI) Protocol based clock driver
*
- * Copyright (C) 2018 ARM Ltd.
+ * Copyright (C) 2018-2021 ARM Ltd.
*/
#include <linux/clk-provider.h>
@@ -13,11 +13,13 @@
#include <linux/scmi_protocol.h>
#include <asm/div64.h>
+static const struct scmi_clk_proto_ops *scmi_proto_clk_ops;
+
struct scmi_clk {
u32 id;
struct clk_hw hw;
const struct scmi_clock_info *info;
- const struct scmi_handle *handle;
+ const struct scmi_protocol_handle *ph;
};
#define to_scmi_clk(clk) container_of(clk, struct scmi_clk, hw)
@@ -29,7 +31,7 @@ static unsigned long scmi_clk_recalc_rate(struct clk_hw *hw,
u64 rate;
struct scmi_clk *clk = to_scmi_clk(hw);
- ret = clk->handle->clk_ops->rate_get(clk->handle, clk->id, &rate);
+ ret = scmi_proto_clk_ops->rate_get(clk->ph, clk->id, &rate);
if (ret)
return 0;
return rate;
@@ -69,21 +71,21 @@ static int scmi_clk_set_rate(struct clk_hw *hw, unsigned long rate,
{
struct scmi_clk *clk = to_scmi_clk(hw);
- return clk->handle->clk_ops->rate_set(clk->handle, clk->id, rate);
+ return scmi_proto_clk_ops->rate_set(clk->ph, clk->id, rate);
}
static int scmi_clk_enable(struct clk_hw *hw)
{
struct scmi_clk *clk = to_scmi_clk(hw);
- return clk->handle->clk_ops->enable(clk->handle, clk->id);
+ return scmi_proto_clk_ops->enable(clk->ph, clk->id);
}
static void scmi_clk_disable(struct clk_hw *hw)
{
struct scmi_clk *clk = to_scmi_clk(hw);
- clk->handle->clk_ops->disable(clk->handle, clk->id);
+ scmi_proto_clk_ops->disable(clk->ph, clk->id);
}
static const struct clk_ops scmi_clk_ops = {
@@ -142,11 +144,17 @@ static int scmi_clocks_probe(struct scmi_device *sdev)
struct device *dev = &sdev->dev;
struct device_node *np = dev->of_node;
const struct scmi_handle *handle = sdev->handle;
+ struct scmi_protocol_handle *ph;
- if (!handle || !handle->clk_ops)
+ if (!handle)
return -ENODEV;
- count = handle->clk_ops->count_get(handle);
+ scmi_proto_clk_ops =
+ handle->devm_protocol_get(sdev, SCMI_PROTOCOL_CLOCK, &ph);
+ if (IS_ERR(scmi_proto_clk_ops))
+ return PTR_ERR(scmi_proto_clk_ops);
+
+ count = scmi_proto_clk_ops->count_get(ph);
if (count < 0) {
dev_err(dev, "%pOFn: invalid clock output count\n", np);
return -EINVAL;
@@ -167,14 +175,14 @@ static int scmi_clocks_probe(struct scmi_device *sdev)
if (!sclk)
return -ENOMEM;
- sclk->info = handle->clk_ops->info_get(handle, idx);
+ sclk->info = scmi_proto_clk_ops->info_get(ph, idx);
if (!sclk->info) {
dev_dbg(dev, "invalid clock info for idx %d\n", idx);
continue;
}
sclk->id = idx;
- sclk->handle = handle;
+ sclk->ph = ph;
err = scmi_clk_ops_init(dev, sclk);
if (err) {
diff --git a/drivers/clk/socfpga/Kconfig b/drivers/clk/socfpga/Kconfig
new file mode 100644
index 000000000000..0cf16b894efb
--- /dev/null
+++ b/drivers/clk/socfpga/Kconfig
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0
+config CLK_INTEL_SOCFPGA
+ bool "Intel SoCFPGA family clock support" if COMPILE_TEST && !ARCH_INTEL_SOCFPGA
+ default ARCH_INTEL_SOCFPGA
+ help
+ Support for the clock controllers present on Intel SoCFPGA and eASIC
+ devices like Aria, Cyclone, Stratix 10, Agilex and N5X eASIC.
+
+if CLK_INTEL_SOCFPGA
+
+config CLK_INTEL_SOCFPGA32
+ bool "Intel Aria / Cyclone clock controller support" if COMPILE_TEST && (!ARM || !ARCH_INTEL_SOCFPGA)
+ default ARM && ARCH_INTEL_SOCFPGA
+
+config CLK_INTEL_SOCFPGA64
+ bool "Intel Stratix / Agilex / N5X clock controller support" if COMPILE_TEST && (!ARM64 || !ARCH_INTEL_SOCFPGA)
+ default ARM64 && ARCH_INTEL_SOCFPGA
+
+endif # CLK_INTEL_SOCFPGA
diff --git a/drivers/clk/socfpga/Makefile b/drivers/clk/socfpga/Makefile
index bf736f8d201a..e8dfce339c91 100644
--- a/drivers/clk/socfpga/Makefile
+++ b/drivers/clk/socfpga/Makefile
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_ARCH_SOCFPGA) += clk.o clk-gate.o clk-pll.o clk-periph.o
-obj-$(CONFIG_ARCH_SOCFPGA) += clk-pll-a10.o clk-periph-a10.o clk-gate-a10.o
-obj-$(CONFIG_ARCH_STRATIX10) += clk-s10.o
-obj-$(CONFIG_ARCH_STRATIX10) += clk-pll-s10.o clk-periph-s10.o clk-gate-s10.o
-obj-$(CONFIG_ARCH_AGILEX) += clk-agilex.o
-obj-$(CONFIG_ARCH_AGILEX) += clk-pll-s10.o clk-periph-s10.o clk-gate-s10.o
+obj-$(CONFIG_CLK_INTEL_SOCFPGA32) += clk.o clk-gate.o clk-pll.o clk-periph.o \
+ clk-pll-a10.o clk-periph-a10.o clk-gate-a10.o
+obj-$(CONFIG_CLK_INTEL_SOCFPGA64) += clk-s10.o \
+ clk-pll-s10.o clk-periph-s10.o clk-gate-s10.o \
+ clk-agilex.o
diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
index c5cc0a2dac6f..0193cebe8c5a 100644
--- a/drivers/clk/tegra/clk-pll.c
+++ b/drivers/clk/tegra/clk-pll.c
@@ -2515,18 +2515,6 @@ static int clk_plle_tegra210_enable(struct clk_hw *hw)
pll_writel(val, PLLE_SS_CTRL, pll);
udelay(1);
- val = pll_readl_misc(pll);
- val &= ~PLLE_MISC_IDDQ_SW_CTRL;
- pll_writel_misc(val, pll);
-
- val = pll_readl(pll->params->aux_reg, pll);
- val |= (PLLE_AUX_USE_LOCKDET | PLLE_AUX_SS_SEQ_INCLUDE);
- val &= ~(PLLE_AUX_ENABLE_SWCTL | PLLE_AUX_SS_SWCTL);
- pll_writel(val, pll->params->aux_reg, pll);
- udelay(1);
- val |= PLLE_AUX_SEQ_ENABLE;
- pll_writel(val, pll->params->aux_reg, pll);
-
out:
if (pll->lock)
spin_unlock_irqrestore(pll->lock, flags);
diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c
index 68cbb98af567..b9099012dc7b 100644
--- a/drivers/clk/tegra/clk-tegra210.c
+++ b/drivers/clk/tegra/clk-tegra210.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2012-2014 NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2012-2020 NVIDIA CORPORATION. All rights reserved.
*/
#include <linux/io.h>
@@ -403,6 +403,14 @@ static unsigned long tegra210_input_freq[] = {
#define PLLRE_BASE_DEFAULT_MASK 0x1c000000
#define PLLRE_MISC0_WRITE_MASK 0x67ffffff
+/* PLLE */
+#define PLLE_MISC_IDDQ_SW_CTRL (1 << 14)
+#define PLLE_AUX_USE_LOCKDET (1 << 3)
+#define PLLE_AUX_SS_SEQ_INCLUDE (1 << 31)
+#define PLLE_AUX_ENABLE_SWCTL (1 << 4)
+#define PLLE_AUX_SS_SWCTL (1 << 6)
+#define PLLE_AUX_SEQ_ENABLE (1 << 24)
+
/* PLLX */
#define PLLX_USE_DYN_RAMP 1
#define PLLX_BASE_LOCK (1 << 27)
@@ -489,6 +497,49 @@ static unsigned long tegra210_input_freq[] = {
#define PLLU_MISC0_WRITE_MASK 0xbfffffff
#define PLLU_MISC1_WRITE_MASK 0x00000007
+bool tegra210_plle_hw_sequence_is_enabled(void)
+{
+ u32 value;
+
+ value = readl_relaxed(clk_base + PLLE_AUX);
+ if (value & PLLE_AUX_SEQ_ENABLE)
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(tegra210_plle_hw_sequence_is_enabled);
+
+int tegra210_plle_hw_sequence_start(void)
+{
+ u32 value;
+
+ if (tegra210_plle_hw_sequence_is_enabled())
+ return 0;
+
+ /* skip if PLLE is not enabled yet */
+ value = readl_relaxed(clk_base + PLLE_MISC0);
+ if (!(value & PLLE_MISC_LOCK))
+ return -EIO;
+
+ value &= ~PLLE_MISC_IDDQ_SW_CTRL;
+ writel_relaxed(value, clk_base + PLLE_MISC0);
+
+ value = readl_relaxed(clk_base + PLLE_AUX);
+ value |= (PLLE_AUX_USE_LOCKDET | PLLE_AUX_SS_SEQ_INCLUDE);
+ value &= ~(PLLE_AUX_ENABLE_SWCTL | PLLE_AUX_SS_SWCTL);
+ writel_relaxed(value, clk_base + PLLE_AUX);
+
+ fence_udelay(1, clk_base);
+
+ value |= PLLE_AUX_SEQ_ENABLE;
+ writel_relaxed(value, clk_base + PLLE_AUX);
+
+ fence_udelay(1, clk_base);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tegra210_plle_hw_sequence_start);
+
void tegra210_xusb_pll_hw_control_enable(void)
{
u32 val;
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index 5bd03b59887f..c8a4364ad3c2 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -2,7 +2,7 @@
/*
* System Control and Power Interface (SCMI) based CPUFreq Interface driver
*
- * Copyright (C) 2018 ARM Ltd.
+ * Copyright (C) 2018-2021 ARM Ltd.
* Sudeep Holla <sudeep.holla@arm.com>
*/
@@ -25,17 +25,17 @@ struct scmi_data {
struct device *cpu_dev;
};
-static const struct scmi_handle *handle;
+static struct scmi_protocol_handle *ph;
+static const struct scmi_perf_proto_ops *perf_ops;
static unsigned int scmi_cpufreq_get_rate(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
- const struct scmi_perf_ops *perf_ops = handle->perf_ops;
struct scmi_data *priv = policy->driver_data;
unsigned long rate;
int ret;
- ret = perf_ops->freq_get(handle, priv->domain_id, &rate, false);
+ ret = perf_ops->freq_get(ph, priv->domain_id, &rate, false);
if (ret)
return 0;
return rate / 1000;
@@ -50,19 +50,17 @@ static int
scmi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index)
{
struct scmi_data *priv = policy->driver_data;
- const struct scmi_perf_ops *perf_ops = handle->perf_ops;
u64 freq = policy->freq_table[index].frequency;
- return perf_ops->freq_set(handle, priv->domain_id, freq * 1000, false);
+ return perf_ops->freq_set(ph, priv->domain_id, freq * 1000, false);
}
static unsigned int scmi_cpufreq_fast_switch(struct cpufreq_policy *policy,
unsigned int target_freq)
{
struct scmi_data *priv = policy->driver_data;
- const struct scmi_perf_ops *perf_ops = handle->perf_ops;
- if (!perf_ops->freq_set(handle, priv->domain_id,
+ if (!perf_ops->freq_set(ph, priv->domain_id,
target_freq * 1000, true))
return target_freq;
@@ -75,7 +73,7 @@ scmi_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
int cpu, domain, tdomain;
struct device *tcpu_dev;
- domain = handle->perf_ops->device_domain_id(cpu_dev);
+ domain = perf_ops->device_domain_id(cpu_dev);
if (domain < 0)
return domain;
@@ -87,7 +85,7 @@ scmi_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
if (!tcpu_dev)
continue;
- tdomain = handle->perf_ops->device_domain_id(tcpu_dev);
+ tdomain = perf_ops->device_domain_id(tcpu_dev);
if (tdomain == domain)
cpumask_set_cpu(cpu, cpumask);
}
@@ -102,13 +100,13 @@ scmi_get_cpu_power(unsigned long *power, unsigned long *KHz,
unsigned long Hz;
int ret, domain;
- domain = handle->perf_ops->device_domain_id(cpu_dev);
+ domain = perf_ops->device_domain_id(cpu_dev);
if (domain < 0)
return domain;
/* Get the power cost of the performance domain. */
Hz = *KHz * 1000;
- ret = handle->perf_ops->est_power_get(handle, domain, &Hz, power);
+ ret = perf_ops->est_power_get(ph, domain, &Hz, power);
if (ret)
return ret;
@@ -126,6 +124,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
struct scmi_data *priv;
struct cpufreq_frequency_table *freq_table;
struct em_data_callback em_cb = EM_DATA_CB(scmi_get_cpu_power);
+ cpumask_var_t opp_shared_cpus;
bool power_scale_mw;
cpu_dev = get_cpu_device(policy->cpu);
@@ -134,30 +133,64 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
return -ENODEV;
}
- ret = handle->perf_ops->device_opps_add(handle, cpu_dev);
- if (ret) {
- dev_warn(cpu_dev, "failed to add opps to the device\n");
- return ret;
- }
+ if (!zalloc_cpumask_var(&opp_shared_cpus, GFP_KERNEL))
+ ret = -ENOMEM;
+ /* Obtain CPUs that share SCMI performance controls */
ret = scmi_get_sharing_cpus(cpu_dev, policy->cpus);
if (ret) {
dev_warn(cpu_dev, "failed to get sharing cpumask\n");
- return ret;
+ goto out_free_cpumask;
}
- ret = dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus);
- if (ret) {
- dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
- __func__, ret);
- return ret;
+ /*
+ * Obtain CPUs that share performance levels.
+ * The OPP 'sharing cpus' info may come from DT through an empty opp
+ * table and opp-shared.
+ */
+ ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, opp_shared_cpus);
+ if (ret || !cpumask_weight(opp_shared_cpus)) {
+ /*
+ * Either opp-table is not set or no opp-shared was found.
+ * Use the CPU mask from SCMI to designate CPUs sharing an OPP
+ * table.
+ */
+ cpumask_copy(opp_shared_cpus, policy->cpus);
}
+ /*
+ * A previous CPU may have marked OPPs as shared for a few CPUs, based on
+ * what OPP core provided. If the current CPU is part of those few, then
+ * there is no need to add OPPs again.
+ */
nr_opp = dev_pm_opp_get_opp_count(cpu_dev);
if (nr_opp <= 0) {
- dev_dbg(cpu_dev, "OPP table is not ready, deferring probe\n");
- ret = -EPROBE_DEFER;
- goto out_free_opp;
+ ret = perf_ops->device_opps_add(ph, cpu_dev);
+ if (ret) {
+ dev_warn(cpu_dev, "failed to add opps to the device\n");
+ goto out_free_cpumask;
+ }
+
+ nr_opp = dev_pm_opp_get_opp_count(cpu_dev);
+ if (nr_opp <= 0) {
+ dev_err(cpu_dev, "%s: No OPPs for this device: %d\n",
+ __func__, ret);
+
+ ret = -ENODEV;
+ goto out_free_opp;
+ }
+
+ ret = dev_pm_opp_set_sharing_cpus(cpu_dev, opp_shared_cpus);
+ if (ret) {
+ dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
+ __func__, ret);
+
+ goto out_free_opp;
+ }
+
+ power_scale_mw = perf_ops->power_scale_mw_get(ph);
+ em_dev_register_perf_domain(cpu_dev, nr_opp, &em_cb,
+ opp_shared_cpus, power_scale_mw);
}
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
@@ -173,7 +206,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
}
priv->cpu_dev = cpu_dev;
- priv->domain_id = handle->perf_ops->device_domain_id(cpu_dev);
+ priv->domain_id = perf_ops->device_domain_id(cpu_dev);
policy->driver_data = priv;
policy->freq_table = freq_table;
@@ -181,26 +214,27 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
/* SCMI allows DVFS request for any domain from any CPU */
policy->dvfs_possible_from_any_cpu = true;
- latency = handle->perf_ops->transition_latency_get(handle, cpu_dev);
+ latency = perf_ops->transition_latency_get(ph, cpu_dev);
if (!latency)
latency = CPUFREQ_ETERNAL;
policy->cpuinfo.transition_latency = latency;
policy->fast_switch_possible =
- handle->perf_ops->fast_switch_possible(handle, cpu_dev);
-
- power_scale_mw = handle->perf_ops->power_scale_mw_get(handle);
- em_dev_register_perf_domain(cpu_dev, nr_opp, &em_cb, policy->cpus,
- power_scale_mw);
+ perf_ops->fast_switch_possible(ph, cpu_dev);
+ free_cpumask_var(opp_shared_cpus);
return 0;
out_free_priv:
kfree(priv);
+
out_free_opp:
dev_pm_opp_remove_all_dynamic(cpu_dev);
+out_free_cpumask:
+ free_cpumask_var(opp_shared_cpus);
+
return ret;
}
@@ -233,12 +267,17 @@ static int scmi_cpufreq_probe(struct scmi_device *sdev)
{
int ret;
struct device *dev = &sdev->dev;
+ const struct scmi_handle *handle;
handle = sdev->handle;
- if (!handle || !handle->perf_ops)
+ if (!handle)
return -ENODEV;
+ perf_ops = handle->devm_protocol_get(sdev, SCMI_PROTOCOL_PERF, &ph);
+ if (IS_ERR(perf_ops))
+ return PTR_ERR(perf_ops);
+
#ifdef CONFIG_COMMON_CLK
/* dummy clock provider as needed by OPP if clocks property is used */
if (of_find_property(dev->of_node, "#clock-cells", NULL))
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 0c2827fd8c19..a0836ffc22e0 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -100,7 +100,7 @@ config AT_XDMAC
config AXI_DMAC
tristate "Analog Devices AXI-DMAC DMA support"
- depends on MICROBLAZE || NIOS2 || ARCH_ZYNQ || ARCH_ZYNQMP || ARCH_SOCFPGA || COMPILE_TEST
+ depends on MICROBLAZE || NIOS2 || ARCH_ZYNQ || ARCH_ZYNQMP || ARCH_INTEL_SOCFPGA || COMPILE_TEST
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
select REGMAP_MMIO
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 27d0c4cdc58d..1e836e320edd 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -396,7 +396,7 @@ config EDAC_THUNDERX
config EDAC_ALTERA
bool "Altera SOCFPGA ECC"
- depends on EDAC=y && (ARCH_SOCFPGA || ARCH_STRATIX10)
+ depends on EDAC=y && ARCH_INTEL_SOCFPGA
help
Support for error detection and correction on the
Altera SOCs. This is the global enable for the
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index e91cf1147a4e..5f7fd79ec82f 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -1501,8 +1501,13 @@ static int altr_portb_setup(struct altr_edac_device_dev *device)
dci->mod_name = ecc_name;
dci->dev_name = ecc_name;
- /* Update the PortB IRQs - A10 has 4, S10 has 2, Index accordingly */
-#ifdef CONFIG_ARCH_STRATIX10
+ /*
+ * Update the PortB IRQs - A10 has 4, S10 has 2, Index accordingly
+ *
+ * FIXME: Instead of ifdefs with different architectures the driver
+ * should properly use compatibles.
+ */
+#ifdef CONFIG_64BIT
altdev->sb_irq = irq_of_parse_and_map(np, 1);
#else
altdev->sb_irq = irq_of_parse_and_map(np, 2);
@@ -1521,7 +1526,7 @@ static int altr_portb_setup(struct altr_edac_device_dev *device)
goto err_release_group_1;
}
-#ifdef CONFIG_ARCH_STRATIX10
+#ifdef CONFIG_64BIT
/* Use IRQ to determine SError origin instead of assigning IRQ */
rc = of_property_read_u32_index(np, "interrupts", 1, &altdev->db_irq);
if (rc) {
@@ -1931,7 +1936,7 @@ static int altr_edac_a10_device_add(struct altr_arria10_edac *edac,
goto err_release_group1;
}
-#ifdef CONFIG_ARCH_STRATIX10
+#ifdef CONFIG_64BIT
/* Use IRQ to determine SError origin instead of assigning IRQ */
rc = of_property_read_u32_index(np, "interrupts", 0, &altdev->db_irq);
if (rc) {
@@ -2016,7 +2021,7 @@ static const struct irq_domain_ops a10_eccmgr_ic_ops = {
/************** Stratix 10 EDAC Double Bit Error Handler ************/
#define to_a10edac(p, m) container_of(p, struct altr_arria10_edac, m)
-#ifdef CONFIG_ARCH_STRATIX10
+#ifdef CONFIG_64BIT
/* panic routine issues reboot on non-zero panic_timeout */
extern int panic_timeout;
@@ -2109,7 +2114,7 @@ static int altr_edac_a10_probe(struct platform_device *pdev)
altr_edac_a10_irq_handler,
edac);
-#ifdef CONFIG_ARCH_STRATIX10
+#ifdef CONFIG_64BIT
{
int dberror, err_addr;
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 5dd19dbd67a3..db0ea2d2d75a 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -206,7 +206,7 @@ config FW_CFG_SYSFS_CMDLINE
config INTEL_STRATIX10_SERVICE
tristate "Intel Stratix10 Service Layer"
- depends on (ARCH_STRATIX10 || ARCH_AGILEX) && HAVE_ARM_SMCCC
+ depends on ARCH_INTEL_SOCFPGA && ARM64 && HAVE_ARM_SMCCC
default n
help
Intel Stratix10 service layer runs at privileged exception level,
diff --git a/drivers/firmware/arm_scmi/base.c b/drivers/firmware/arm_scmi/base.c
index 017e5d8bd869..de416f9e7921 100644
--- a/drivers/firmware/arm_scmi/base.c
+++ b/drivers/firmware/arm_scmi/base.c
@@ -2,11 +2,12 @@
/*
* System Control and Management Interface (SCMI) Base Protocol
*
- * Copyright (C) 2018 ARM Ltd.
+ * Copyright (C) 2018-2021 ARM Ltd.
*/
#define pr_fmt(fmt) "SCMI Notifications BASE - " fmt
+#include <linux/module.h>
#include <linux/scmi_protocol.h>
#include "common.h"
@@ -50,30 +51,30 @@ struct scmi_base_error_notify_payld {
* scmi_base_attributes_get() - gets the implementation details
* that are associated with the base protocol.
*
- * @handle: SCMI entity handle
+ * @ph: SCMI protocol handle
*
* Return: 0 on success, else appropriate SCMI error.
*/
-static int scmi_base_attributes_get(const struct scmi_handle *handle)
+static int scmi_base_attributes_get(const struct scmi_protocol_handle *ph)
{
int ret;
struct scmi_xfer *t;
struct scmi_msg_resp_base_attributes *attr_info;
- struct scmi_revision_info *rev = handle->version;
+ struct scmi_revision_info *rev = ph->get_priv(ph);
- ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES,
- SCMI_PROTOCOL_BASE, 0, sizeof(*attr_info), &t);
+ ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES,
+ 0, sizeof(*attr_info), &t);
if (ret)
return ret;
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (!ret) {
attr_info = t->rx.buf;
rev->num_protocols = attr_info->num_protocols;
rev->num_agents = attr_info->num_agents;
}
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
@@ -81,19 +82,20 @@ static int scmi_base_attributes_get(const struct scmi_handle *handle)
/**
* scmi_base_vendor_id_get() - gets vendor/subvendor identifier ASCII string.
*
- * @handle: SCMI entity handle
+ * @ph: SCMI protocol handle
* @sub_vendor: specify true if sub-vendor ID is needed
*
* Return: 0 on success, else appropriate SCMI error.
*/
static int
-scmi_base_vendor_id_get(const struct scmi_handle *handle, bool sub_vendor)
+scmi_base_vendor_id_get(const struct scmi_protocol_handle *ph, bool sub_vendor)
{
u8 cmd;
int ret, size;
char *vendor_id;
struct scmi_xfer *t;
- struct scmi_revision_info *rev = handle->version;
+ struct scmi_revision_info *rev = ph->get_priv(ph);
+
if (sub_vendor) {
cmd = BASE_DISCOVER_SUB_VENDOR;
@@ -105,15 +107,15 @@ scmi_base_vendor_id_get(const struct scmi_handle *handle, bool sub_vendor)
size = ARRAY_SIZE(rev->vendor_id);
}
- ret = scmi_xfer_get_init(handle, cmd, SCMI_PROTOCOL_BASE, 0, size, &t);
+ ret = ph->xops->xfer_get_init(ph, cmd, 0, size, &t);
if (ret)
return ret;
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (!ret)
memcpy(vendor_id, t->rx.buf, size);
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
@@ -123,30 +125,30 @@ scmi_base_vendor_id_get(const struct scmi_handle *handle, bool sub_vendor)
* implementation 32-bit version. The format of the version number is
* vendor-specific
*
- * @handle: SCMI entity handle
+ * @ph: SCMI protocol handle
*
* Return: 0 on success, else appropriate SCMI error.
*/
static int
-scmi_base_implementation_version_get(const struct scmi_handle *handle)
+scmi_base_implementation_version_get(const struct scmi_protocol_handle *ph)
{
int ret;
__le32 *impl_ver;
struct scmi_xfer *t;
- struct scmi_revision_info *rev = handle->version;
+ struct scmi_revision_info *rev = ph->get_priv(ph);
- ret = scmi_xfer_get_init(handle, BASE_DISCOVER_IMPLEMENT_VERSION,
- SCMI_PROTOCOL_BASE, 0, sizeof(*impl_ver), &t);
+ ret = ph->xops->xfer_get_init(ph, BASE_DISCOVER_IMPLEMENT_VERSION,
+ 0, sizeof(*impl_ver), &t);
if (ret)
return ret;
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (!ret) {
impl_ver = t->rx.buf;
rev->impl_ver = le32_to_cpu(*impl_ver);
}
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
@@ -155,23 +157,24 @@ scmi_base_implementation_version_get(const struct scmi_handle *handle)
* scmi_base_implementation_list_get() - gets the list of protocols it is
* OSPM is allowed to access
*
- * @handle: SCMI entity handle
+ * @ph: SCMI protocol handle
* @protocols_imp: pointer to hold the list of protocol identifiers
*
* Return: 0 on success, else appropriate SCMI error.
*/
-static int scmi_base_implementation_list_get(const struct scmi_handle *handle,
- u8 *protocols_imp)
+static int
+scmi_base_implementation_list_get(const struct scmi_protocol_handle *ph,
+ u8 *protocols_imp)
{
u8 *list;
int ret, loop;
struct scmi_xfer *t;
__le32 *num_skip, *num_ret;
u32 tot_num_ret = 0, loop_num_ret;
- struct device *dev = handle->dev;
+ struct device *dev = ph->dev;
- ret = scmi_xfer_get_init(handle, BASE_DISCOVER_LIST_PROTOCOLS,
- SCMI_PROTOCOL_BASE, sizeof(*num_skip), 0, &t);
+ ret = ph->xops->xfer_get_init(ph, BASE_DISCOVER_LIST_PROTOCOLS,
+ sizeof(*num_skip), 0, &t);
if (ret)
return ret;
@@ -183,7 +186,7 @@ static int scmi_base_implementation_list_get(const struct scmi_handle *handle,
/* Set the number of protocols to be skipped/already read */
*num_skip = cpu_to_le32(tot_num_ret);
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (ret)
break;
@@ -198,10 +201,10 @@ static int scmi_base_implementation_list_get(const struct scmi_handle *handle,
tot_num_ret += loop_num_ret;
- scmi_reset_rx_to_maxsz(handle, t);
+ ph->xops->reset_rx_to_maxsz(ph, t);
} while (loop_num_ret);
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
@@ -209,7 +212,7 @@ static int scmi_base_implementation_list_get(const struct scmi_handle *handle,
/**
* scmi_base_discover_agent_get() - discover the name of an agent
*
- * @handle: SCMI entity handle
+ * @ph: SCMI protocol handle
* @id: Agent identifier
* @name: Agent identifier ASCII string
*
@@ -218,63 +221,63 @@ static int scmi_base_implementation_list_get(const struct scmi_handle *handle,
*
* Return: 0 on success, else appropriate SCMI error.
*/
-static int scmi_base_discover_agent_get(const struct scmi_handle *handle,
+static int scmi_base_discover_agent_get(const struct scmi_protocol_handle *ph,
int id, char *name)
{
int ret;
struct scmi_xfer *t;
- ret = scmi_xfer_get_init(handle, BASE_DISCOVER_AGENT,
- SCMI_PROTOCOL_BASE, sizeof(__le32),
- SCMI_MAX_STR_SIZE, &t);
+ ret = ph->xops->xfer_get_init(ph, BASE_DISCOVER_AGENT,
+ sizeof(__le32), SCMI_MAX_STR_SIZE, &t);
if (ret)
return ret;
put_unaligned_le32(id, t->tx.buf);
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (!ret)
strlcpy(name, t->rx.buf, SCMI_MAX_STR_SIZE);
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
-static int scmi_base_error_notify(const struct scmi_handle *handle, bool enable)
+static int scmi_base_error_notify(const struct scmi_protocol_handle *ph,
+ bool enable)
{
int ret;
u32 evt_cntl = enable ? BASE_TP_NOTIFY_ALL : 0;
struct scmi_xfer *t;
struct scmi_msg_base_error_notify *cfg;
- ret = scmi_xfer_get_init(handle, BASE_NOTIFY_ERRORS,
- SCMI_PROTOCOL_BASE, sizeof(*cfg), 0, &t);
+ ret = ph->xops->xfer_get_init(ph, BASE_NOTIFY_ERRORS,
+ sizeof(*cfg), 0, &t);
if (ret)
return ret;
cfg = t->tx.buf;
cfg->event_control = cpu_to_le32(evt_cntl);
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
-static int scmi_base_set_notify_enabled(const struct scmi_handle *handle,
+static int scmi_base_set_notify_enabled(const struct scmi_protocol_handle *ph,
u8 evt_id, u32 src_id, bool enable)
{
int ret;
- ret = scmi_base_error_notify(handle, enable);
+ ret = scmi_base_error_notify(ph, enable);
if (ret)
pr_debug("FAIL_ENABLED - evt[%X] ret:%d\n", evt_id, ret);
return ret;
}
-static void *scmi_base_fill_custom_report(const struct scmi_handle *handle,
+static void *scmi_base_fill_custom_report(const struct scmi_protocol_handle *ph,
u8 evt_id, ktime_t timestamp,
const void *payld, size_t payld_sz,
void *report, u32 *src_id)
@@ -318,17 +321,24 @@ static const struct scmi_event_ops base_event_ops = {
.fill_custom_report = scmi_base_fill_custom_report,
};
-int scmi_base_protocol_init(struct scmi_handle *h)
+static const struct scmi_protocol_events base_protocol_events = {
+ .queue_sz = 4 * SCMI_PROTO_QUEUE_SZ,
+ .ops = &base_event_ops,
+ .evts = base_events,
+ .num_events = ARRAY_SIZE(base_events),
+ .num_sources = SCMI_BASE_NUM_SOURCES,
+};
+
+static int scmi_base_protocol_init(const struct scmi_protocol_handle *ph)
{
int id, ret;
u8 *prot_imp;
u32 version;
char name[SCMI_MAX_STR_SIZE];
- const struct scmi_handle *handle = h;
- struct device *dev = handle->dev;
- struct scmi_revision_info *rev = handle->version;
+ struct device *dev = ph->dev;
+ struct scmi_revision_info *rev = scmi_revision_area_get(ph);
- ret = scmi_version_get(handle, SCMI_PROTOCOL_BASE, &version);
+ ret = ph->xops->version_get(ph, &version);
if (ret)
return ret;
@@ -338,13 +348,15 @@ int scmi_base_protocol_init(struct scmi_handle *h)
rev->major_ver = PROTOCOL_REV_MAJOR(version),
rev->minor_ver = PROTOCOL_REV_MINOR(version);
+ ph->set_priv(ph, rev);
+
+ scmi_base_attributes_get(ph);
+ scmi_base_vendor_id_get(ph, false);
+ scmi_base_vendor_id_get(ph, true);
+ scmi_base_implementation_version_get(ph);
+ scmi_base_implementation_list_get(ph, prot_imp);
- scmi_base_attributes_get(handle);
- scmi_base_vendor_id_get(handle, false);
- scmi_base_vendor_id_get(handle, true);
- scmi_base_implementation_version_get(handle);
- scmi_base_implementation_list_get(handle, prot_imp);
- scmi_setup_protocol_implemented(handle, prot_imp);
+ scmi_setup_protocol_implemented(ph, prot_imp);
dev_info(dev, "SCMI Protocol v%d.%d '%s:%s' Firmware version 0x%x\n",
rev->major_ver, rev->minor_ver, rev->vendor_id,
@@ -352,16 +364,20 @@ int scmi_base_protocol_init(struct scmi_handle *h)
dev_dbg(dev, "Found %d protocol(s) %d agent(s)\n", rev->num_protocols,
rev->num_agents);
- scmi_register_protocol_events(handle, SCMI_PROTOCOL_BASE,
- (4 * SCMI_PROTO_QUEUE_SZ),
- &base_event_ops, base_events,
- ARRAY_SIZE(base_events),
- SCMI_BASE_NUM_SOURCES);
-
for (id = 0; id < rev->num_agents; id++) {
- scmi_base_discover_agent_get(handle, id, name);
+ scmi_base_discover_agent_get(ph, id, name);
dev_dbg(dev, "Agent %d: %s\n", id, name);
}
return 0;
}
+
+static const struct scmi_protocol scmi_base = {
+ .id = SCMI_PROTOCOL_BASE,
+ .owner = NULL,
+ .instance_init = &scmi_base_protocol_init,
+ .ops = NULL,
+ .events = &base_protocol_events,
+};
+
+DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(base, scmi_base)
diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c
index 1377ec76a45d..784cf0027da3 100644
--- a/drivers/firmware/arm_scmi/bus.c
+++ b/drivers/firmware/arm_scmi/bus.c
@@ -2,7 +2,7 @@
/*
* System Control and Management Interface (SCMI) Message Protocol bus layer
*
- * Copyright (C) 2018 ARM Ltd.
+ * Copyright (C) 2018-2021 ARM Ltd.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -51,18 +51,53 @@ static int scmi_dev_match(struct device *dev, struct device_driver *drv)
return 0;
}
-static int scmi_protocol_init(int protocol_id, struct scmi_handle *handle)
+static int scmi_match_by_id_table(struct device *dev, void *data)
{
- scmi_prot_init_fn_t fn = idr_find(&scmi_protocols, protocol_id);
+ struct scmi_device *sdev = to_scmi_dev(dev);
+ struct scmi_device_id *id_table = data;
- if (unlikely(!fn))
- return -EINVAL;
- return fn(handle);
+ return sdev->protocol_id == id_table->protocol_id &&
+ !strcmp(sdev->name, id_table->name);
}
-static int scmi_protocol_dummy_init(struct scmi_handle *handle)
+struct scmi_device *scmi_child_dev_find(struct device *parent,
+ int prot_id, const char *name)
{
- return 0;
+ struct scmi_device_id id_table;
+ struct device *dev;
+
+ id_table.protocol_id = prot_id;
+ id_table.name = name;
+
+ dev = device_find_child(parent, &id_table, scmi_match_by_id_table);
+ if (!dev)
+ return NULL;
+
+ return to_scmi_dev(dev);
+}
+
+const struct scmi_protocol *scmi_protocol_get(int protocol_id)
+{
+ const struct scmi_protocol *proto;
+
+ proto = idr_find(&scmi_protocols, protocol_id);
+ if (!proto || !try_module_get(proto->owner)) {
+ pr_warn("SCMI Protocol 0x%x not found!\n", protocol_id);
+ return NULL;
+ }
+
+ pr_debug("Found SCMI Protocol 0x%x\n", protocol_id);
+
+ return proto;
+}
+
+void scmi_protocol_put(int protocol_id)
+{
+ const struct scmi_protocol *proto;
+
+ proto = idr_find(&scmi_protocols, protocol_id);
+ if (proto)
+ module_put(proto->owner);
}
static int scmi_dev_probe(struct device *dev)
@@ -70,7 +105,6 @@ static int scmi_dev_probe(struct device *dev)
struct scmi_driver *scmi_drv = to_scmi_driver(dev->driver);
struct scmi_device *scmi_dev = to_scmi_dev(dev);
const struct scmi_device_id *id;
- int ret;
id = scmi_dev_match_id(scmi_dev, scmi_drv);
if (!id)
@@ -79,14 +113,6 @@ static int scmi_dev_probe(struct device *dev)
if (!scmi_dev->handle)
return -EPROBE_DEFER;
- ret = scmi_protocol_init(scmi_dev->protocol_id, scmi_dev->handle);
- if (ret)
- return ret;
-
- /* Skip protocol initialisation for additional devices */
- idr_replace(&scmi_protocols, &scmi_protocol_dummy_init,
- scmi_dev->protocol_id);
-
return scmi_drv->probe(scmi_dev);
}
@@ -113,6 +139,10 @@ int scmi_driver_register(struct scmi_driver *driver, struct module *owner,
{
int retval;
+ retval = scmi_protocol_device_request(driver->id_table);
+ if (retval)
+ return retval;
+
driver->driver.bus = &scmi_bus_type;
driver->driver.name = driver->name;
driver->driver.owner = owner;
@@ -129,6 +159,7 @@ EXPORT_SYMBOL_GPL(scmi_driver_register);
void scmi_driver_unregister(struct scmi_driver *driver)
{
driver_unregister(&driver->driver);
+ scmi_protocol_device_unrequest(driver->id_table);
}
EXPORT_SYMBOL_GPL(scmi_driver_unregister);
@@ -194,26 +225,45 @@ void scmi_set_handle(struct scmi_device *scmi_dev)
scmi_dev->handle = scmi_handle_get(&scmi_dev->dev);
}
-int scmi_protocol_register(int protocol_id, scmi_prot_init_fn_t fn)
+int scmi_protocol_register(const struct scmi_protocol *proto)
{
int ret;
+ if (!proto) {
+ pr_err("invalid protocol\n");
+ return -EINVAL;
+ }
+
+ if (!proto->instance_init) {
+ pr_err("missing init for protocol 0x%x\n", proto->id);
+ return -EINVAL;
+ }
+
spin_lock(&protocol_lock);
- ret = idr_alloc(&scmi_protocols, fn, protocol_id, protocol_id + 1,
- GFP_ATOMIC);
+ ret = idr_alloc(&scmi_protocols, (void *)proto,
+ proto->id, proto->id + 1, GFP_ATOMIC);
spin_unlock(&protocol_lock);
- if (ret != protocol_id)
- pr_err("unable to allocate SCMI idr slot, err %d\n", ret);
+ if (ret != proto->id) {
+ pr_err("unable to allocate SCMI idr slot for 0x%x - err %d\n",
+ proto->id, ret);
+ return ret;
+ }
- return ret;
+ pr_debug("Registered SCMI Protocol 0x%x\n", proto->id);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(scmi_protocol_register);
-void scmi_protocol_unregister(int protocol_id)
+void scmi_protocol_unregister(const struct scmi_protocol *proto)
{
spin_lock(&protocol_lock);
- idr_remove(&scmi_protocols, protocol_id);
+ idr_remove(&scmi_protocols, proto->id);
spin_unlock(&protocol_lock);
+
+ pr_debug("Unregistered SCMI Protocol 0x%x\n", proto->id);
+
+ return;
}
EXPORT_SYMBOL_GPL(scmi_protocol_unregister);
diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c
index 4645677d86f1..35b56c8ba0c0 100644
--- a/drivers/firmware/arm_scmi/clock.c
+++ b/drivers/firmware/arm_scmi/clock.c
@@ -2,9 +2,10 @@
/*
* System Control and Management Interface (SCMI) Clock Protocol
*
- * Copyright (C) 2018 ARM Ltd.
+ * Copyright (C) 2018-2021 ARM Ltd.
*/
+#include <linux/module.h>
#include <linux/sort.h>
#include "common.h"
@@ -74,52 +75,53 @@ struct clock_info {
struct scmi_clock_info *clk;
};
-static int scmi_clock_protocol_attributes_get(const struct scmi_handle *handle,
- struct clock_info *ci)
+static int
+scmi_clock_protocol_attributes_get(const struct scmi_protocol_handle *ph,
+ struct clock_info *ci)
{
int ret;
struct scmi_xfer *t;
struct scmi_msg_resp_clock_protocol_attributes *attr;
- ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES,
- SCMI_PROTOCOL_CLOCK, 0, sizeof(*attr), &t);
+ ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES,
+ 0, sizeof(*attr), &t);
if (ret)
return ret;
attr = t->rx.buf;
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (!ret) {
ci->num_clocks = le16_to_cpu(attr->num_clocks);
ci->max_async_req = attr->max_async_req;
}
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
-static int scmi_clock_attributes_get(const struct scmi_handle *handle,
+static int scmi_clock_attributes_get(const struct scmi_protocol_handle *ph,
u32 clk_id, struct scmi_clock_info *clk)
{
int ret;
struct scmi_xfer *t;
struct scmi_msg_resp_clock_attributes *attr;
- ret = scmi_xfer_get_init(handle, CLOCK_ATTRIBUTES, SCMI_PROTOCOL_CLOCK,
- sizeof(clk_id), sizeof(*attr), &t);
+ ret = ph->xops->xfer_get_init(ph, CLOCK_ATTRIBUTES,
+ sizeof(clk_id), sizeof(*attr), &t);
if (ret)
return ret;
put_unaligned_le32(clk_id, t->tx.buf);
attr = t->rx.buf;
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (!ret)
strlcpy(clk->name, attr->name, SCMI_MAX_STR_SIZE);
else
clk->name[0] = '\0';
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
@@ -136,7 +138,7 @@ static int rate_cmp_func(const void *_r1, const void *_r2)
}
static int
-scmi_clock_describe_rates_get(const struct scmi_handle *handle, u32 clk_id,
+scmi_clock_describe_rates_get(const struct scmi_protocol_handle *ph, u32 clk_id,
struct scmi_clock_info *clk)
{
u64 *rate = NULL;
@@ -148,8 +150,8 @@ scmi_clock_describe_rates_get(const struct scmi_handle *handle, u32 clk_id,
struct scmi_msg_clock_describe_rates *clk_desc;
struct scmi_msg_resp_clock_describe_rates *rlist;
- ret = scmi_xfer_get_init(handle, CLOCK_DESCRIBE_RATES,
- SCMI_PROTOCOL_CLOCK, sizeof(*clk_desc), 0, &t);
+ ret = ph->xops->xfer_get_init(ph, CLOCK_DESCRIBE_RATES,
+ sizeof(*clk_desc), 0, &t);
if (ret)
return ret;
@@ -161,7 +163,7 @@ scmi_clock_describe_rates_get(const struct scmi_handle *handle, u32 clk_id,
/* Set the number of rates to be skipped/already read */
clk_desc->rate_index = cpu_to_le32(tot_rate_cnt);
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (ret)
goto err;
@@ -171,7 +173,7 @@ scmi_clock_describe_rates_get(const struct scmi_handle *handle, u32 clk_id,
num_returned = NUM_RETURNED(rates_flag);
if (tot_rate_cnt + num_returned > SCMI_MAX_NUM_RATES) {
- dev_err(handle->dev, "No. of rates > MAX_NUM_RATES");
+ dev_err(ph->dev, "No. of rates > MAX_NUM_RATES");
break;
}
@@ -179,7 +181,7 @@ scmi_clock_describe_rates_get(const struct scmi_handle *handle, u32 clk_id,
clk->range.min_rate = RATE_TO_U64(rlist->rate[0]);
clk->range.max_rate = RATE_TO_U64(rlist->rate[1]);
clk->range.step_size = RATE_TO_U64(rlist->rate[2]);
- dev_dbg(handle->dev, "Min %llu Max %llu Step %llu Hz\n",
+ dev_dbg(ph->dev, "Min %llu Max %llu Step %llu Hz\n",
clk->range.min_rate, clk->range.max_rate,
clk->range.step_size);
break;
@@ -188,12 +190,12 @@ scmi_clock_describe_rates_get(const struct scmi_handle *handle, u32 clk_id,
rate = &clk->list.rates[tot_rate_cnt];
for (cnt = 0; cnt < num_returned; cnt++, rate++) {
*rate = RATE_TO_U64(rlist->rate[cnt]);
- dev_dbg(handle->dev, "Rate %llu Hz\n", *rate);
+ dev_dbg(ph->dev, "Rate %llu Hz\n", *rate);
}
tot_rate_cnt += num_returned;
- scmi_reset_rx_to_maxsz(handle, t);
+ ph->xops->reset_rx_to_maxsz(ph, t);
/*
* check for both returned and remaining to avoid infinite
* loop due to buggy firmware
@@ -208,42 +210,42 @@ scmi_clock_describe_rates_get(const struct scmi_handle *handle, u32 clk_id,
clk->rate_discrete = rate_discrete;
err:
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
static int
-scmi_clock_rate_get(const struct scmi_handle *handle, u32 clk_id, u64 *value)
+scmi_clock_rate_get(const struct scmi_protocol_handle *ph,
+ u32 clk_id, u64 *value)
{
int ret;
struct scmi_xfer *t;
- ret = scmi_xfer_get_init(handle, CLOCK_RATE_GET, SCMI_PROTOCOL_CLOCK,
- sizeof(__le32), sizeof(u64), &t);
+ ret = ph->xops->xfer_get_init(ph, CLOCK_RATE_GET,
+ sizeof(__le32), sizeof(u64), &t);
if (ret)
return ret;
put_unaligned_le32(clk_id, t->tx.buf);
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (!ret)
*value = get_unaligned_le64(t->rx.buf);
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
-static int scmi_clock_rate_set(const struct scmi_handle *handle, u32 clk_id,
- u64 rate)
+static int scmi_clock_rate_set(const struct scmi_protocol_handle *ph,
+ u32 clk_id, u64 rate)
{
int ret;
u32 flags = 0;
struct scmi_xfer *t;
struct scmi_clock_set_rate *cfg;
- struct clock_info *ci = handle->clk_priv;
+ struct clock_info *ci = ph->get_priv(ph);
- ret = scmi_xfer_get_init(handle, CLOCK_RATE_SET, SCMI_PROTOCOL_CLOCK,
- sizeof(*cfg), 0, &t);
+ ret = ph->xops->xfer_get_init(ph, CLOCK_RATE_SET, sizeof(*cfg), 0, &t);
if (ret)
return ret;
@@ -258,26 +260,27 @@ static int scmi_clock_rate_set(const struct scmi_handle *handle, u32 clk_id,
cfg->value_high = cpu_to_le32(rate >> 32);
if (flags & CLOCK_SET_ASYNC)
- ret = scmi_do_xfer_with_response(handle, t);
+ ret = ph->xops->do_xfer_with_response(ph, t);
else
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (ci->max_async_req)
atomic_dec(&ci->cur_async_req);
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
static int
-scmi_clock_config_set(const struct scmi_handle *handle, u32 clk_id, u32 config)
+scmi_clock_config_set(const struct scmi_protocol_handle *ph, u32 clk_id,
+ u32 config)
{
int ret;
struct scmi_xfer *t;
struct scmi_clock_set_config *cfg;
- ret = scmi_xfer_get_init(handle, CLOCK_CONFIG_SET, SCMI_PROTOCOL_CLOCK,
- sizeof(*cfg), 0, &t);
+ ret = ph->xops->xfer_get_init(ph, CLOCK_CONFIG_SET,
+ sizeof(*cfg), 0, &t);
if (ret)
return ret;
@@ -285,33 +288,33 @@ scmi_clock_config_set(const struct scmi_handle *handle, u32 clk_id, u32 config)
cfg->id = cpu_to_le32(clk_id);
cfg->attributes = cpu_to_le32(config);
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
-static int scmi_clock_enable(const struct scmi_handle *handle, u32 clk_id)
+static int scmi_clock_enable(const struct scmi_protocol_handle *ph, u32 clk_id)
{
- return scmi_clock_config_set(handle, clk_id, CLOCK_ENABLE);
+ return scmi_clock_config_set(ph, clk_id, CLOCK_ENABLE);
}
-static int scmi_clock_disable(const struct scmi_handle *handle, u32 clk_id)
+static int scmi_clock_disable(const struct scmi_protocol_handle *ph, u32 clk_id)
{
- return scmi_clock_config_set(handle, clk_id, 0);
+ return scmi_clock_config_set(ph, clk_id, 0);
}
-static int scmi_clock_count_get(const struct scmi_handle *handle)
+static int scmi_clock_count_get(const struct scmi_protocol_handle *ph)
{
- struct clock_info *ci = handle->clk_priv;
+ struct clock_info *ci = ph->get_priv(ph);
return ci->num_clocks;
}
static const struct scmi_clock_info *
-scmi_clock_info_get(const struct scmi_handle *handle, u32 clk_id)
+scmi_clock_info_get(const struct scmi_protocol_handle *ph, u32 clk_id)
{
- struct clock_info *ci = handle->clk_priv;
+ struct clock_info *ci = ph->get_priv(ph);
struct scmi_clock_info *clk = ci->clk + clk_id;
if (!clk->name[0])
@@ -320,7 +323,7 @@ scmi_clock_info_get(const struct scmi_handle *handle, u32 clk_id)
return clk;
}
-static const struct scmi_clk_ops clk_ops = {
+static const struct scmi_clk_proto_ops clk_proto_ops = {
.count_get = scmi_clock_count_get,
.info_get = scmi_clock_info_get,
.rate_get = scmi_clock_rate_get,
@@ -329,24 +332,24 @@ static const struct scmi_clk_ops clk_ops = {
.disable = scmi_clock_disable,
};
-static int scmi_clock_protocol_init(struct scmi_handle *handle)
+static int scmi_clock_protocol_init(const struct scmi_protocol_handle *ph)
{
u32 version;
int clkid, ret;
struct clock_info *cinfo;
- scmi_version_get(handle, SCMI_PROTOCOL_CLOCK, &version);
+ ph->xops->version_get(ph, &version);
- dev_dbg(handle->dev, "Clock Version %d.%d\n",
+ dev_dbg(ph->dev, "Clock Version %d.%d\n",
PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
- cinfo = devm_kzalloc(handle->dev, sizeof(*cinfo), GFP_KERNEL);
+ cinfo = devm_kzalloc(ph->dev, sizeof(*cinfo), GFP_KERNEL);
if (!cinfo)
return -ENOMEM;
- scmi_clock_protocol_attributes_get(handle, cinfo);
+ scmi_clock_protocol_attributes_get(ph, cinfo);
- cinfo->clk = devm_kcalloc(handle->dev, cinfo->num_clocks,
+ cinfo->clk = devm_kcalloc(ph->dev, cinfo->num_clocks,
sizeof(*cinfo->clk), GFP_KERNEL);
if (!cinfo->clk)
return -ENOMEM;
@@ -354,16 +357,20 @@ static int scmi_clock_protocol_init(struct scmi_handle *handle)
for (clkid = 0; clkid < cinfo->num_clocks; clkid++) {
struct scmi_clock_info *clk = cinfo->clk + clkid;
- ret = scmi_clock_attributes_get(handle, clkid, clk);
+ ret = scmi_clock_attributes_get(ph, clkid, clk);
if (!ret)
- scmi_clock_describe_rates_get(handle, clkid, clk);
+ scmi_clock_describe_rates_get(ph, clkid, clk);
}
cinfo->version = version;
- handle->clk_ops = &clk_ops;
- handle->clk_priv = cinfo;
-
- return 0;
+ return ph->set_priv(ph, cinfo);
}
-DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(SCMI_PROTOCOL_CLOCK, clock)
+static const struct scmi_protocol scmi_clock = {
+ .id = SCMI_PROTOCOL_CLOCK,
+ .owner = THIS_MODULE,
+ .instance_init = &scmi_clock_protocol_init,
+ .ops = &clk_proto_ops,
+};
+
+DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(clock, scmi_clock)
diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
index c0fb45e7c3e8..228bf4a71d23 100644
--- a/drivers/firmware/arm_scmi/common.h
+++ b/drivers/firmware/arm_scmi/common.h
@@ -4,7 +4,7 @@
* driver common header file containing some definitions, structures
* and function prototypes used in all the different SCMI protocols.
*
- * Copyright (C) 2018 ARM Ltd.
+ * Copyright (C) 2018-2021 ARM Ltd.
*/
#ifndef _SCMI_COMMON_H
#define _SCMI_COMMON_H
@@ -14,11 +14,14 @@
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/scmi_protocol.h>
#include <linux/types.h>
#include <asm/unaligned.h>
+#include "notify.h"
+
#define PROTOCOL_REV_MINOR_MASK GENMASK(15, 0)
#define PROTOCOL_REV_MAJOR_MASK GENMASK(31, 16)
#define PROTOCOL_REV_MAJOR(x) (u16)(FIELD_GET(PROTOCOL_REV_MAJOR_MASK, (x)))
@@ -141,22 +144,92 @@ struct scmi_xfer {
struct completion *async_done;
};
-void scmi_xfer_put(const struct scmi_handle *h, struct scmi_xfer *xfer);
-int scmi_do_xfer(const struct scmi_handle *h, struct scmi_xfer *xfer);
-int scmi_do_xfer_with_response(const struct scmi_handle *h,
- struct scmi_xfer *xfer);
-int scmi_xfer_get_init(const struct scmi_handle *h, u8 msg_id, u8 prot_id,
- size_t tx_size, size_t rx_size, struct scmi_xfer **p);
-void scmi_reset_rx_to_maxsz(const struct scmi_handle *handle,
- struct scmi_xfer *xfer);
+struct scmi_xfer_ops;
+
+/**
+ * struct scmi_protocol_handle - Reference to an initialized protocol instance
+ *
+ * @dev: A reference to the associated SCMI instance device (handle->dev).
+ * @xops: A reference to a struct holding refs to the core xfer operations that
+ * can be used by the protocol implementation to generate SCMI messages.
+ * @set_priv: A method to set protocol private data for this instance.
+ * @get_priv: A method to get protocol private data previously set.
+ *
+ * This structure represents a protocol initialized against specific SCMI
+ * instance and it will be used as follows:
+ * - as a parameter fed from the core to the protocol initialization code so
+ * that it can access the core xfer operations to build and generate SCMI
+ * messages exclusively for the specific underlying protocol instance.
+ * - as an opaque handle fed by an SCMI driver user when it tries to access
+ * this protocol through its own protocol operations.
+ * In this case this handle will be returned as an opaque object together
+ * with the related protocol operations when the SCMI driver tries to access
+ * the protocol.
+ */
+struct scmi_protocol_handle {
+ struct device *dev;
+ const struct scmi_xfer_ops *xops;
+ int (*set_priv)(const struct scmi_protocol_handle *ph, void *priv);
+ void *(*get_priv)(const struct scmi_protocol_handle *ph);
+};
+
+/**
+ * struct scmi_xfer_ops - References to the core SCMI xfer operations.
+ * @version_get: Get this version protocol.
+ * @xfer_get_init: Initialize one struct xfer if any xfer slot is free.
+ * @reset_rx_to_maxsz: Reset rx size to max transport size.
+ * @do_xfer: Do the SCMI transfer.
+ * @do_xfer_with_response: Do the SCMI transfer waiting for a response.
+ * @xfer_put: Free the xfer slot.
+ *
+ * Note that all this operations expect a protocol handle as first parameter;
+ * they then internally use it to infer the underlying protocol number: this
+ * way is not possible for a protocol implementation to forge messages for
+ * another protocol.
+ */
+struct scmi_xfer_ops {
+ int (*version_get)(const struct scmi_protocol_handle *ph, u32 *version);
+ int (*xfer_get_init)(const struct scmi_protocol_handle *ph, u8 msg_id,
+ size_t tx_size, size_t rx_size,
+ struct scmi_xfer **p);
+ void (*reset_rx_to_maxsz)(const struct scmi_protocol_handle *ph,
+ struct scmi_xfer *xfer);
+ int (*do_xfer)(const struct scmi_protocol_handle *ph,
+ struct scmi_xfer *xfer);
+ int (*do_xfer_with_response)(const struct scmi_protocol_handle *ph,
+ struct scmi_xfer *xfer);
+ void (*xfer_put)(const struct scmi_protocol_handle *ph,
+ struct scmi_xfer *xfer);
+};
+
+struct scmi_revision_info *
+scmi_revision_area_get(const struct scmi_protocol_handle *ph);
int scmi_handle_put(const struct scmi_handle *handle);
struct scmi_handle *scmi_handle_get(struct device *dev);
void scmi_set_handle(struct scmi_device *scmi_dev);
-int scmi_version_get(const struct scmi_handle *h, u8 protocol, u32 *version);
-void scmi_setup_protocol_implemented(const struct scmi_handle *handle,
+void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph,
u8 *prot_imp);
-int scmi_base_protocol_init(struct scmi_handle *h);
+typedef int (*scmi_prot_init_ph_fn_t)(const struct scmi_protocol_handle *);
+
+/**
+ * struct scmi_protocol - Protocol descriptor
+ * @id: Protocol ID.
+ * @owner: Module reference if any.
+ * @instance_init: Mandatory protocol initialization function.
+ * @instance_deinit: Optional protocol de-initialization function.
+ * @ops: Optional reference to the operations provided by the protocol and
+ * exposed in scmi_protocol.h.
+ * @events: An optional reference to the events supported by this protocol.
+ */
+struct scmi_protocol {
+ const u8 id;
+ struct module *owner;
+ const scmi_prot_init_ph_fn_t instance_init;
+ const scmi_prot_init_ph_fn_t instance_deinit;
+ const void *ops;
+ const struct scmi_protocol_events *events;
+};
int __init scmi_bus_init(void);
void __exit scmi_bus_exit(void);
@@ -164,6 +237,7 @@ void __exit scmi_bus_exit(void);
#define DECLARE_SCMI_REGISTER_UNREGISTER(func) \
int __init scmi_##func##_register(void); \
void __exit scmi_##func##_unregister(void)
+DECLARE_SCMI_REGISTER_UNREGISTER(base);
DECLARE_SCMI_REGISTER_UNREGISTER(clock);
DECLARE_SCMI_REGISTER_UNREGISTER(perf);
DECLARE_SCMI_REGISTER_UNREGISTER(power);
@@ -172,17 +246,25 @@ DECLARE_SCMI_REGISTER_UNREGISTER(sensors);
DECLARE_SCMI_REGISTER_UNREGISTER(voltage);
DECLARE_SCMI_REGISTER_UNREGISTER(system);
-#define DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(id, name) \
-int __init scmi_##name##_register(void) \
-{ \
- return scmi_protocol_register((id), &scmi_##name##_protocol_init); \
-} \
-\
-void __exit scmi_##name##_unregister(void) \
-{ \
- scmi_protocol_unregister((id)); \
+#define DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(name, proto) \
+static const struct scmi_protocol *__this_proto = &(proto); \
+ \
+int __init scmi_##name##_register(void) \
+{ \
+ return scmi_protocol_register(__this_proto); \
+} \
+ \
+void __exit scmi_##name##_unregister(void) \
+{ \
+ scmi_protocol_unregister(__this_proto); \
}
+const struct scmi_protocol *scmi_protocol_get(int protocol_id);
+void scmi_protocol_put(int protocol_id);
+
+int scmi_protocol_acquire(const struct scmi_handle *handle, u8 protocol_id);
+void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id);
+
/* SCMI Transport */
/**
* struct scmi_chan_info - Structure representing a SCMI channel information
@@ -227,6 +309,11 @@ struct scmi_transport_ops {
bool (*poll_done)(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer);
};
+int scmi_protocol_device_request(const struct scmi_device_id *id_table);
+void scmi_protocol_device_unrequest(const struct scmi_device_id *id_table);
+struct scmi_device *scmi_child_dev_find(struct device *parent,
+ int prot_id, const char *name);
+
/**
* struct scmi_desc - Description of SoC integration
*
@@ -265,4 +352,8 @@ void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem);
bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem,
struct scmi_xfer *xfer);
+void scmi_notification_instance_data_set(const struct scmi_handle *handle,
+ void *priv);
+void *scmi_notification_instance_data_get(const struct scmi_handle *handle);
+
#endif /* _SCMI_COMMON_H */
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index 3e748e57deab..66eb3f0e5daf 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -11,18 +11,22 @@
* various power domain DVFS including the core/cluster, certain system
* clocks configuration, thermal sensors and many others.
*
- * Copyright (C) 2018 ARM Ltd.
+ * Copyright (C) 2018-2021 ARM Ltd.
*/
#include <linux/bitmap.h>
+#include <linux/device.h>
#include <linux/export.h>
+#include <linux/idr.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/ktime.h>
+#include <linux/list.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/processor.h>
+#include <linux/refcount.h>
#include <linux/slab.h>
#include "common.h"
@@ -53,6 +57,14 @@ static DEFINE_MUTEX(scmi_list_mutex);
/* Track the unique id for the transfers for debug & profiling purpose */
static atomic_t transfer_last_id;
+static DEFINE_IDR(scmi_requested_devices);
+static DEFINE_MUTEX(scmi_requested_devices_mtx);
+
+struct scmi_requested_dev {
+ const struct scmi_device_id *id_table;
+ struct list_head node;
+};
+
/**
* struct scmi_xfers_info - Structure to manage transfer information
*
@@ -69,6 +81,30 @@ struct scmi_xfers_info {
};
/**
+ * struct scmi_protocol_instance - Describe an initialized protocol instance.
+ * @handle: Reference to the SCMI handle associated to this protocol instance.
+ * @proto: A reference to the protocol descriptor.
+ * @gid: A reference for per-protocol devres management.
+ * @users: A refcount to track effective users of this protocol.
+ * @priv: Reference for optional protocol private data.
+ * @ph: An embedded protocol handle that will be passed down to protocol
+ * initialization code to identify this instance.
+ *
+ * Each protocol is initialized independently once for each SCMI platform in
+ * which is defined by DT and implemented by the SCMI server fw.
+ */
+struct scmi_protocol_instance {
+ const struct scmi_handle *handle;
+ const struct scmi_protocol *proto;
+ void *gid;
+ refcount_t users;
+ void *priv;
+ struct scmi_protocol_handle ph;
+};
+
+#define ph_to_pi(h) container_of(h, struct scmi_protocol_instance, ph)
+
+/**
* struct scmi_info - Structure representing a SCMI instance
*
* @dev: Device pointer
@@ -80,8 +116,15 @@ struct scmi_xfers_info {
* @rx_minfo: Universal Receive Message management info
* @tx_idr: IDR object to map protocol id to Tx channel info pointer
* @rx_idr: IDR object to map protocol id to Rx channel info pointer
+ * @protocols: IDR for protocols' instance descriptors initialized for
+ * this SCMI instance: populated on protocol's first attempted
+ * usage.
+ * @protocols_mtx: A mutex to protect protocols instances initialization.
* @protocols_imp: List of protocols implemented, currently maximum of
* MAX_PROTOCOLS_IMP elements allocated by the base protocol
+ * @active_protocols: IDR storing device_nodes for protocols actually defined
+ * in the DT and confirmed as implemented by fw.
+ * @notify_priv: Pointer to private data structure specific to notifications.
* @node: List head
* @users: Number of users of this instance
*/
@@ -94,7 +137,12 @@ struct scmi_info {
struct scmi_xfers_info rx_minfo;
struct idr tx_idr;
struct idr rx_idr;
+ struct idr protocols;
+ /* Ensure mutual exclusive access to protocols instance array */
+ struct mutex protocols_mtx;
u8 *protocols_imp;
+ struct idr active_protocols;
+ void *notify_priv;
struct list_head node;
int users;
};
@@ -136,6 +184,25 @@ static inline void scmi_dump_header_dbg(struct device *dev,
hdr->id, hdr->seq, hdr->protocol_id);
}
+void scmi_notification_instance_data_set(const struct scmi_handle *handle,
+ void *priv)
+{
+ struct scmi_info *info = handle_to_scmi_info(handle);
+
+ info->notify_priv = priv;
+ /* Ensure updated protocol private date are visible */
+ smp_wmb();
+}
+
+void *scmi_notification_instance_data_get(const struct scmi_handle *handle)
+{
+ struct scmi_info *info = handle_to_scmi_info(handle);
+
+ /* Ensure protocols_private_data has been updated */
+ smp_rmb();
+ return info->notify_priv;
+}
+
/**
* scmi_xfer_get() - Allocate one message
*
@@ -316,14 +383,16 @@ void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr)
}
/**
- * scmi_xfer_put() - Release a transmit message
+ * xfer_put() - Release a transmit message
*
- * @handle: Pointer to SCMI entity handle
+ * @ph: Pointer to SCMI protocol handle
* @xfer: message that was reserved by scmi_xfer_get
*/
-void scmi_xfer_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
+static void xfer_put(const struct scmi_protocol_handle *ph,
+ struct scmi_xfer *xfer)
{
- struct scmi_info *info = handle_to_scmi_info(handle);
+ const struct scmi_protocol_instance *pi = ph_to_pi(ph);
+ struct scmi_info *info = handle_to_scmi_info(pi->handle);
__scmi_xfer_put(&info->tx_minfo, xfer);
}
@@ -340,23 +409,32 @@ static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
}
/**
- * scmi_do_xfer() - Do one transfer
+ * do_xfer() - Do one transfer
*
- * @handle: Pointer to SCMI entity handle
+ * @ph: Pointer to SCMI protocol handle
* @xfer: Transfer to initiate and wait for response
*
* Return: -ETIMEDOUT in case of no response, if transmit error,
* return corresponding error, else if all goes well,
* return 0.
*/
-int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer)
+static int do_xfer(const struct scmi_protocol_handle *ph,
+ struct scmi_xfer *xfer)
{
int ret;
int timeout;
- struct scmi_info *info = handle_to_scmi_info(handle);
+ const struct scmi_protocol_instance *pi = ph_to_pi(ph);
+ struct scmi_info *info = handle_to_scmi_info(pi->handle);
struct device *dev = info->dev;
struct scmi_chan_info *cinfo;
+ /*
+ * Re-instate protocol id here from protocol handle so that cannot be
+ * overridden by mistake (or malice) by the protocol code mangling with
+ * the scmi_xfer structure.
+ */
+ xfer->hdr.protocol_id = pi->proto->id;
+
cinfo = idr_find(&info->tx_idr, xfer->hdr.protocol_id);
if (unlikely(!cinfo))
return -EINVAL;
@@ -402,10 +480,11 @@ int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer)
return ret;
}
-void scmi_reset_rx_to_maxsz(const struct scmi_handle *handle,
- struct scmi_xfer *xfer)
+static void reset_rx_to_maxsz(const struct scmi_protocol_handle *ph,
+ struct scmi_xfer *xfer)
{
- struct scmi_info *info = handle_to_scmi_info(handle);
+ const struct scmi_protocol_instance *pi = ph_to_pi(ph);
+ struct scmi_info *info = handle_to_scmi_info(pi->handle);
xfer->rx.len = info->desc->max_msg_size;
}
@@ -413,24 +492,27 @@ void scmi_reset_rx_to_maxsz(const struct scmi_handle *handle,
#define SCMI_MAX_RESPONSE_TIMEOUT (2 * MSEC_PER_SEC)
/**
- * scmi_do_xfer_with_response() - Do one transfer and wait until the delayed
+ * do_xfer_with_response() - Do one transfer and wait until the delayed
* response is received
*
- * @handle: Pointer to SCMI entity handle
+ * @ph: Pointer to SCMI protocol handle
* @xfer: Transfer to initiate and wait for response
*
* Return: -ETIMEDOUT in case of no delayed response, if transmit error,
* return corresponding error, else if all goes well, return 0.
*/
-int scmi_do_xfer_with_response(const struct scmi_handle *handle,
- struct scmi_xfer *xfer)
+static int do_xfer_with_response(const struct scmi_protocol_handle *ph,
+ struct scmi_xfer *xfer)
{
int ret, timeout = msecs_to_jiffies(SCMI_MAX_RESPONSE_TIMEOUT);
+ const struct scmi_protocol_instance *pi = ph_to_pi(ph);
DECLARE_COMPLETION_ONSTACK(async_response);
+ xfer->hdr.protocol_id = pi->proto->id;
+
xfer->async_done = &async_response;
- ret = scmi_do_xfer(handle, xfer);
+ ret = do_xfer(ph, xfer);
if (!ret && !wait_for_completion_timeout(xfer->async_done, timeout))
ret = -ETIMEDOUT;
@@ -439,11 +521,10 @@ int scmi_do_xfer_with_response(const struct scmi_handle *handle,
}
/**
- * scmi_xfer_get_init() - Allocate and initialise one message for transmit
+ * xfer_get_init() - Allocate and initialise one message for transmit
*
- * @handle: Pointer to SCMI entity handle
+ * @ph: Pointer to SCMI protocol handle
* @msg_id: Message identifier
- * @prot_id: Protocol identifier for the message
* @tx_size: transmit message size
* @rx_size: receive message size
* @p: pointer to the allocated and initialised message
@@ -454,12 +535,14 @@ int scmi_do_xfer_with_response(const struct scmi_handle *handle,
* Return: 0 if all went fine with @p pointing to message, else
* corresponding error.
*/
-int scmi_xfer_get_init(const struct scmi_handle *handle, u8 msg_id, u8 prot_id,
- size_t tx_size, size_t rx_size, struct scmi_xfer **p)
+static int xfer_get_init(const struct scmi_protocol_handle *ph,
+ u8 msg_id, size_t tx_size, size_t rx_size,
+ struct scmi_xfer **p)
{
int ret;
struct scmi_xfer *xfer;
- struct scmi_info *info = handle_to_scmi_info(handle);
+ const struct scmi_protocol_instance *pi = ph_to_pi(ph);
+ struct scmi_info *info = handle_to_scmi_info(pi->handle);
struct scmi_xfers_info *minfo = &info->tx_minfo;
struct device *dev = info->dev;
@@ -468,7 +551,7 @@ int scmi_xfer_get_init(const struct scmi_handle *handle, u8 msg_id, u8 prot_id,
tx_size > info->desc->max_msg_size)
return -ERANGE;
- xfer = scmi_xfer_get(handle, minfo);
+ xfer = scmi_xfer_get(pi->handle, minfo);
if (IS_ERR(xfer)) {
ret = PTR_ERR(xfer);
dev_err(dev, "failed to get free message slot(%d)\n", ret);
@@ -478,7 +561,7 @@ int scmi_xfer_get_init(const struct scmi_handle *handle, u8 msg_id, u8 prot_id,
xfer->tx.len = tx_size;
xfer->rx.len = rx_size ? : info->desc->max_msg_size;
xfer->hdr.id = msg_id;
- xfer->hdr.protocol_id = prot_id;
+ xfer->hdr.protocol_id = pi->proto->id;
xfer->hdr.poll_completion = false;
*p = xfer;
@@ -487,43 +570,276 @@ int scmi_xfer_get_init(const struct scmi_handle *handle, u8 msg_id, u8 prot_id,
}
/**
- * scmi_version_get() - command to get the revision of the SCMI entity
+ * version_get() - command to get the revision of the SCMI entity
*
- * @handle: Pointer to SCMI entity handle
- * @protocol: Protocol identifier for the message
+ * @ph: Pointer to SCMI protocol handle
* @version: Holds returned version of protocol.
*
* Updates the SCMI information in the internal data structure.
*
* Return: 0 if all went fine, else return appropriate error.
*/
-int scmi_version_get(const struct scmi_handle *handle, u8 protocol,
- u32 *version)
+static int version_get(const struct scmi_protocol_handle *ph, u32 *version)
{
int ret;
__le32 *rev_info;
struct scmi_xfer *t;
- ret = scmi_xfer_get_init(handle, PROTOCOL_VERSION, protocol, 0,
- sizeof(*version), &t);
+ ret = xfer_get_init(ph, PROTOCOL_VERSION, 0, sizeof(*version), &t);
if (ret)
return ret;
- ret = scmi_do_xfer(handle, t);
+ ret = do_xfer(ph, t);
if (!ret) {
rev_info = t->rx.buf;
*version = le32_to_cpu(*rev_info);
}
- scmi_xfer_put(handle, t);
+ xfer_put(ph, t);
return ret;
}
-void scmi_setup_protocol_implemented(const struct scmi_handle *handle,
- u8 *prot_imp)
+/**
+ * scmi_set_protocol_priv - Set protocol specific data at init time
+ *
+ * @ph: A reference to the protocol handle.
+ * @priv: The private data to set.
+ *
+ * Return: 0 on Success
+ */
+static int scmi_set_protocol_priv(const struct scmi_protocol_handle *ph,
+ void *priv)
{
+ struct scmi_protocol_instance *pi = ph_to_pi(ph);
+
+ pi->priv = priv;
+
+ return 0;
+}
+
+/**
+ * scmi_get_protocol_priv - Set protocol specific data at init time
+ *
+ * @ph: A reference to the protocol handle.
+ *
+ * Return: Protocol private data if any was set.
+ */
+static void *scmi_get_protocol_priv(const struct scmi_protocol_handle *ph)
+{
+ const struct scmi_protocol_instance *pi = ph_to_pi(ph);
+
+ return pi->priv;
+}
+
+static const struct scmi_xfer_ops xfer_ops = {
+ .version_get = version_get,
+ .xfer_get_init = xfer_get_init,
+ .reset_rx_to_maxsz = reset_rx_to_maxsz,
+ .do_xfer = do_xfer,
+ .do_xfer_with_response = do_xfer_with_response,
+ .xfer_put = xfer_put,
+};
+
+/**
+ * scmi_revision_area_get - Retrieve version memory area.
+ *
+ * @ph: A reference to the protocol handle.
+ *
+ * A helper to grab the version memory area reference during SCMI Base protocol
+ * initialization.
+ *
+ * Return: A reference to the version memory area associated to the SCMI
+ * instance underlying this protocol handle.
+ */
+struct scmi_revision_info *
+scmi_revision_area_get(const struct scmi_protocol_handle *ph)
+{
+ const struct scmi_protocol_instance *pi = ph_to_pi(ph);
+
+ return pi->handle->version;
+}
+
+/**
+ * scmi_alloc_init_protocol_instance - Allocate and initialize a protocol
+ * instance descriptor.
+ * @info: The reference to the related SCMI instance.
+ * @proto: The protocol descriptor.
+ *
+ * Allocate a new protocol instance descriptor, using the provided @proto
+ * description, against the specified SCMI instance @info, and initialize it;
+ * all resources management is handled via a dedicated per-protocol devres
+ * group.
+ *
+ * Context: Assumes to be called with @protocols_mtx already acquired.
+ * Return: A reference to a freshly allocated and initialized protocol instance
+ * or ERR_PTR on failure. On failure the @proto reference is at first
+ * put using @scmi_protocol_put() before releasing all the devres group.
+ */
+static struct scmi_protocol_instance *
+scmi_alloc_init_protocol_instance(struct scmi_info *info,
+ const struct scmi_protocol *proto)
+{
+ int ret = -ENOMEM;
+ void *gid;
+ struct scmi_protocol_instance *pi;
+ const struct scmi_handle *handle = &info->handle;
+
+ /* Protocol specific devres group */
+ gid = devres_open_group(handle->dev, NULL, GFP_KERNEL);
+ if (!gid) {
+ scmi_protocol_put(proto->id);
+ goto out;
+ }
+
+ pi = devm_kzalloc(handle->dev, sizeof(*pi), GFP_KERNEL);
+ if (!pi)
+ goto clean;
+
+ pi->gid = gid;
+ pi->proto = proto;
+ pi->handle = handle;
+ pi->ph.dev = handle->dev;
+ pi->ph.xops = &xfer_ops;
+ pi->ph.set_priv = scmi_set_protocol_priv;
+ pi->ph.get_priv = scmi_get_protocol_priv;
+ refcount_set(&pi->users, 1);
+ /* proto->init is assured NON NULL by scmi_protocol_register */
+ ret = pi->proto->instance_init(&pi->ph);
+ if (ret)
+ goto clean;
+
+ ret = idr_alloc(&info->protocols, pi, proto->id, proto->id + 1,
+ GFP_KERNEL);
+ if (ret != proto->id)
+ goto clean;
+
+ /*
+ * Warn but ignore events registration errors since we do not want
+ * to skip whole protocols if their notifications are messed up.
+ */
+ if (pi->proto->events) {
+ ret = scmi_register_protocol_events(handle, pi->proto->id,
+ &pi->ph,
+ pi->proto->events);
+ if (ret)
+ dev_warn(handle->dev,
+ "Protocol:%X - Events Registration Failed - err:%d\n",
+ pi->proto->id, ret);
+ }
+
+ devres_close_group(handle->dev, pi->gid);
+ dev_dbg(handle->dev, "Initialized protocol: 0x%X\n", pi->proto->id);
+
+ return pi;
+
+clean:
+ /* Take care to put the protocol module's owner before releasing all */
+ scmi_protocol_put(proto->id);
+ devres_release_group(handle->dev, gid);
+out:
+ return ERR_PTR(ret);
+}
+
+/**
+ * scmi_get_protocol_instance - Protocol initialization helper.
+ * @handle: A reference to the SCMI platform instance.
+ * @protocol_id: The protocol being requested.
+ *
+ * In case the required protocol has never been requested before for this
+ * instance, allocate and initialize all the needed structures while handling
+ * resource allocation with a dedicated per-protocol devres subgroup.
+ *
+ * Return: A reference to an initialized protocol instance or error on failure:
+ * in particular returns -EPROBE_DEFER when the desired protocol could
+ * NOT be found.
+ */
+static struct scmi_protocol_instance * __must_check
+scmi_get_protocol_instance(const struct scmi_handle *handle, u8 protocol_id)
+{
+ struct scmi_protocol_instance *pi;
struct scmi_info *info = handle_to_scmi_info(handle);
+ mutex_lock(&info->protocols_mtx);
+ pi = idr_find(&info->protocols, protocol_id);
+
+ if (pi) {
+ refcount_inc(&pi->users);
+ } else {
+ const struct scmi_protocol *proto;
+
+ /* Fails if protocol not registered on bus */
+ proto = scmi_protocol_get(protocol_id);
+ if (proto)
+ pi = scmi_alloc_init_protocol_instance(info, proto);
+ else
+ pi = ERR_PTR(-EPROBE_DEFER);
+ }
+ mutex_unlock(&info->protocols_mtx);
+
+ return pi;
+}
+
+/**
+ * scmi_protocol_acquire - Protocol acquire
+ * @handle: A reference to the SCMI platform instance.
+ * @protocol_id: The protocol being requested.
+ *
+ * Register a new user for the requested protocol on the specified SCMI
+ * platform instance, possibly triggering its initialization on first user.
+ *
+ * Return: 0 if protocol was acquired successfully.
+ */
+int scmi_protocol_acquire(const struct scmi_handle *handle, u8 protocol_id)
+{
+ return PTR_ERR_OR_ZERO(scmi_get_protocol_instance(handle, protocol_id));
+}
+
+/**
+ * scmi_protocol_release - Protocol de-initialization helper.
+ * @handle: A reference to the SCMI platform instance.
+ * @protocol_id: The protocol being requested.
+ *
+ * Remove one user for the specified protocol and triggers de-initialization
+ * and resources de-allocation once the last user has gone.
+ */
+void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id)
+{
+ struct scmi_info *info = handle_to_scmi_info(handle);
+ struct scmi_protocol_instance *pi;
+
+ mutex_lock(&info->protocols_mtx);
+ pi = idr_find(&info->protocols, protocol_id);
+ if (WARN_ON(!pi))
+ goto out;
+
+ if (refcount_dec_and_test(&pi->users)) {
+ void *gid = pi->gid;
+
+ if (pi->proto->events)
+ scmi_deregister_protocol_events(handle, protocol_id);
+
+ if (pi->proto->instance_deinit)
+ pi->proto->instance_deinit(&pi->ph);
+
+ idr_remove(&info->protocols, protocol_id);
+
+ scmi_protocol_put(protocol_id);
+
+ devres_release_group(handle->dev, gid);
+ dev_dbg(handle->dev, "De-Initialized protocol: 0x%X\n",
+ protocol_id);
+ }
+
+out:
+ mutex_unlock(&info->protocols_mtx);
+}
+
+void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph,
+ u8 *prot_imp)
+{
+ const struct scmi_protocol_instance *pi = ph_to_pi(ph);
+ struct scmi_info *info = handle_to_scmi_info(pi->handle);
+
info->protocols_imp = prot_imp;
}
@@ -542,6 +858,102 @@ scmi_is_protocol_implemented(const struct scmi_handle *handle, u8 prot_id)
return false;
}
+struct scmi_protocol_devres {
+ const struct scmi_handle *handle;
+ u8 protocol_id;
+};
+
+static void scmi_devm_release_protocol(struct device *dev, void *res)
+{
+ struct scmi_protocol_devres *dres = res;
+
+ scmi_protocol_release(dres->handle, dres->protocol_id);
+}
+
+/**
+ * scmi_devm_protocol_get - Devres managed get protocol operations and handle
+ * @sdev: A reference to an scmi_device whose embedded struct device is to
+ * be used for devres accounting.
+ * @protocol_id: The protocol being requested.
+ * @ph: A pointer reference used to pass back the associated protocol handle.
+ *
+ * Get hold of a protocol accounting for its usage, eventually triggering its
+ * initialization, and returning the protocol specific operations and related
+ * protocol handle which will be used as first argument in most of the
+ * protocols operations methods.
+ * Being a devres based managed method, protocol hold will be automatically
+ * released, and possibly de-initialized on last user, once the SCMI driver
+ * owning the scmi_device is unbound from it.
+ *
+ * Return: A reference to the requested protocol operations or error.
+ * Must be checked for errors by caller.
+ */
+static const void __must_check *
+scmi_devm_protocol_get(struct scmi_device *sdev, u8 protocol_id,
+ struct scmi_protocol_handle **ph)
+{
+ struct scmi_protocol_instance *pi;
+ struct scmi_protocol_devres *dres;
+ struct scmi_handle *handle = sdev->handle;
+
+ if (!ph)
+ return ERR_PTR(-EINVAL);
+
+ dres = devres_alloc(scmi_devm_release_protocol,
+ sizeof(*dres), GFP_KERNEL);
+ if (!dres)
+ return ERR_PTR(-ENOMEM);
+
+ pi = scmi_get_protocol_instance(handle, protocol_id);
+ if (IS_ERR(pi)) {
+ devres_free(dres);
+ return pi;
+ }
+
+ dres->handle = handle;
+ dres->protocol_id = protocol_id;
+ devres_add(&sdev->dev, dres);
+
+ *ph = &pi->ph;
+
+ return pi->proto->ops;
+}
+
+static int scmi_devm_protocol_match(struct device *dev, void *res, void *data)
+{
+ struct scmi_protocol_devres *dres = res;
+
+ if (WARN_ON(!dres || !data))
+ return 0;
+
+ return dres->protocol_id == *((u8 *)data);
+}
+
+/**
+ * scmi_devm_protocol_put - Devres managed put protocol operations and handle
+ * @sdev: A reference to an scmi_device whose embedded struct device is to
+ * be used for devres accounting.
+ * @protocol_id: The protocol being requested.
+ *
+ * Explicitly release a protocol hold previously obtained calling the above
+ * @scmi_devm_protocol_get.
+ */
+static void scmi_devm_protocol_put(struct scmi_device *sdev, u8 protocol_id)
+{
+ int ret;
+
+ ret = devres_release(&sdev->dev, scmi_devm_release_protocol,
+ scmi_devm_protocol_match, &protocol_id);
+ WARN_ON(ret);
+}
+
+static inline
+struct scmi_handle *scmi_handle_get_from_info_unlocked(struct scmi_info *info)
+{
+ info->users++;
+ return &info->handle;
+}
+
/**
* scmi_handle_get() - Get the SCMI handle for a device
*
@@ -563,8 +975,7 @@ struct scmi_handle *scmi_handle_get(struct device *dev)
list_for_each(p, &scmi_list) {
info = list_entry(p, struct scmi_info, node);
if (dev->parent == info->dev) {
- handle = &info->handle;
- info->users++;
+ handle = scmi_handle_get_from_info_unlocked(info);
break;
}
}
@@ -707,63 +1118,268 @@ scmi_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id)
return ret;
}
-static inline void
-scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
- int prot_id, const char *name)
+/**
+ * scmi_get_protocol_device - Helper to get/create an SCMI device.
+ *
+ * @np: A device node representing a valid active protocols for the referred
+ * SCMI instance.
+ * @info: The referred SCMI instance for which we are getting/creating this
+ * device.
+ * @prot_id: The protocol ID.
+ * @name: The device name.
+ *
+ * Referring to the specific SCMI instance identified by @info, this helper
+ * takes care to return a properly initialized device matching the requested
+ * @proto_id and @name: if device was still not existent it is created as a
+ * child of the specified SCMI instance @info and its transport properly
+ * initialized as usual.
+ */
+static inline struct scmi_device *
+scmi_get_protocol_device(struct device_node *np, struct scmi_info *info,
+ int prot_id, const char *name)
{
struct scmi_device *sdev;
+ /* Already created for this parent SCMI instance ? */
+ sdev = scmi_child_dev_find(info->dev, prot_id, name);
+ if (sdev)
+ return sdev;
+
+ pr_debug("Creating SCMI device (%s) for protocol %x\n", name, prot_id);
+
sdev = scmi_device_create(np, info->dev, prot_id, name);
if (!sdev) {
dev_err(info->dev, "failed to create %d protocol device\n",
prot_id);
- return;
+ return NULL;
}
if (scmi_txrx_setup(info, &sdev->dev, prot_id)) {
dev_err(&sdev->dev, "failed to setup transport\n");
scmi_device_destroy(sdev);
- return;
+ return NULL;
}
+ return sdev;
+}
+
+static inline void
+scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
+ int prot_id, const char *name)
+{
+ struct scmi_device *sdev;
+
+ sdev = scmi_get_protocol_device(np, info, prot_id, name);
+ if (!sdev)
+ return;
+
/* setup handle now as the transport is ready */
scmi_set_handle(sdev);
}
-#define MAX_SCMI_DEV_PER_PROTOCOL 2
-struct scmi_prot_devnames {
- int protocol_id;
- char *names[MAX_SCMI_DEV_PER_PROTOCOL];
-};
+/**
+ * scmi_create_protocol_devices - Create devices for all pending requests for
+ * this SCMI instance.
+ *
+ * @np: The device node describing the protocol
+ * @info: The SCMI instance descriptor
+ * @prot_id: The protocol ID
+ *
+ * All devices previously requested for this instance (if any) are found and
+ * created by scanning the proper @&scmi_requested_devices entry.
+ */
+static void scmi_create_protocol_devices(struct device_node *np,
+ struct scmi_info *info, int prot_id)
+{
+ struct list_head *phead;
-static struct scmi_prot_devnames devnames[] = {
- { SCMI_PROTOCOL_POWER, { "genpd" },},
- { SCMI_PROTOCOL_SYSTEM, { "syspower" },},
- { SCMI_PROTOCOL_PERF, { "cpufreq" },},
- { SCMI_PROTOCOL_CLOCK, { "clocks" },},
- { SCMI_PROTOCOL_SENSOR, { "hwmon", "iiodev" },},
- { SCMI_PROTOCOL_RESET, { "reset" },},
- { SCMI_PROTOCOL_VOLTAGE, { "regulator" },},
-};
+ mutex_lock(&scmi_requested_devices_mtx);
+ phead = idr_find(&scmi_requested_devices, prot_id);
+ if (phead) {
+ struct scmi_requested_dev *rdev;
-static inline void
-scmi_create_protocol_devices(struct device_node *np, struct scmi_info *info,
- int prot_id)
+ list_for_each_entry(rdev, phead, node)
+ scmi_create_protocol_device(np, info, prot_id,
+ rdev->id_table->name);
+ }
+ mutex_unlock(&scmi_requested_devices_mtx);
+}
+
+/**
+ * scmi_protocol_device_request - Helper to request a device
+ *
+ * @id_table: A protocol/name pair descriptor for the device to be created.
+ *
+ * This helper let an SCMI driver request specific devices identified by the
+ * @id_table to be created for each active SCMI instance.
+ *
+ * The requested device name MUST NOT be already existent for any protocol;
+ * at first the freshly requested @id_table is annotated in the IDR table
+ * @scmi_requested_devices, then a matching device is created for each already
+ * active SCMI instance. (if any)
+ *
+ * This way the requested device is created straight-away for all the already
+ * initialized(probed) SCMI instances (handles) and it remains also annotated
+ * as pending creation if the requesting SCMI driver was loaded before some
+ * SCMI instance and related transports were available: when such late instance
+ * is probed, its probe will take care to scan the list of pending requested
+ * devices and create those on its own (see @scmi_create_protocol_devices and
+ * its enclosing loop)
+ *
+ * Return: 0 on Success
+ */
+int scmi_protocol_device_request(const struct scmi_device_id *id_table)
{
- int loop, cnt;
+ int ret = 0;
+ unsigned int id = 0;
+ struct list_head *head, *phead = NULL;
+ struct scmi_requested_dev *rdev;
+ struct scmi_info *info;
- for (loop = 0; loop < ARRAY_SIZE(devnames); loop++) {
- if (devnames[loop].protocol_id != prot_id)
- continue;
+ pr_debug("Requesting SCMI device (%s) for protocol %x\n",
+ id_table->name, id_table->protocol_id);
- for (cnt = 0; cnt < ARRAY_SIZE(devnames[loop].names); cnt++) {
- const char *name = devnames[loop].names[cnt];
+ /*
+ * Search for the matching protocol rdev list and then search
+ * of any existent equally named device...fails if any duplicate found.
+ */
+ mutex_lock(&scmi_requested_devices_mtx);
+ idr_for_each_entry(&scmi_requested_devices, head, id) {
+ if (!phead) {
+ /* A list found registered in the IDR is never empty */
+ rdev = list_first_entry(head, struct scmi_requested_dev,
+ node);
+ if (rdev->id_table->protocol_id ==
+ id_table->protocol_id)
+ phead = head;
+ }
+ list_for_each_entry(rdev, head, node) {
+ if (!strcmp(rdev->id_table->name, id_table->name)) {
+ pr_err("Ignoring duplicate request [%d] %s\n",
+ rdev->id_table->protocol_id,
+ rdev->id_table->name);
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+ }
+
+ /*
+ * No duplicate found for requested id_table, so let's create a new
+ * requested device entry for this new valid request.
+ */
+ rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
+ if (!rdev) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ rdev->id_table = id_table;
- if (name)
- scmi_create_protocol_device(np, info, prot_id,
- name);
+ /*
+ * Append the new requested device table descriptor to the head of the
+ * related protocol list, eventually creating such head if not already
+ * there.
+ */
+ if (!phead) {
+ phead = kzalloc(sizeof(*phead), GFP_KERNEL);
+ if (!phead) {
+ kfree(rdev);
+ ret = -ENOMEM;
+ goto out;
+ }
+ INIT_LIST_HEAD(phead);
+
+ ret = idr_alloc(&scmi_requested_devices, (void *)phead,
+ id_table->protocol_id,
+ id_table->protocol_id + 1, GFP_KERNEL);
+ if (ret != id_table->protocol_id) {
+ pr_err("Failed to save SCMI device - ret:%d\n", ret);
+ kfree(rdev);
+ kfree(phead);
+ ret = -EINVAL;
+ goto out;
}
+ ret = 0;
}
+ list_add(&rdev->node, phead);
+
+ /*
+ * Now effectively create and initialize the requested device for every
+ * already initialized SCMI instance which has registered the requested
+ * protocol as a valid active one: i.e. defined in DT and supported by
+ * current platform FW.
+ */
+ mutex_lock(&scmi_list_mutex);
+ list_for_each_entry(info, &scmi_list, node) {
+ struct device_node *child;
+
+ child = idr_find(&info->active_protocols,
+ id_table->protocol_id);
+ if (child) {
+ struct scmi_device *sdev;
+
+ sdev = scmi_get_protocol_device(child, info,
+ id_table->protocol_id,
+ id_table->name);
+ /* Set handle if not already set: device existed */
+ if (sdev && !sdev->handle)
+ sdev->handle =
+ scmi_handle_get_from_info_unlocked(info);
+ } else {
+ dev_err(info->dev,
+ "Failed. SCMI protocol %d not active.\n",
+ id_table->protocol_id);
+ }
+ }
+ mutex_unlock(&scmi_list_mutex);
+
+out:
+ mutex_unlock(&scmi_requested_devices_mtx);
+
+ return ret;
+}
+
+/**
+ * scmi_protocol_device_unrequest - Helper to unrequest a device
+ *
+ * @id_table: A protocol/name pair descriptor for the device to be unrequested.
+ *
+ * An helper to let an SCMI driver release its request about devices; note that
+ * devices are created and initialized once the first SCMI driver request them
+ * but they destroyed only on SCMI core unloading/unbinding.
+ *
+ * The current SCMI transport layer uses such devices as internal references and
+ * as such they could be shared as same transport between multiple drivers so
+ * that cannot be safely destroyed till the whole SCMI stack is removed.
+ * (unless adding further burden of refcounting.)
+ */
+void scmi_protocol_device_unrequest(const struct scmi_device_id *id_table)
+{
+ struct list_head *phead;
+
+ pr_debug("Unrequesting SCMI device (%s) for protocol %x\n",
+ id_table->name, id_table->protocol_id);
+
+ mutex_lock(&scmi_requested_devices_mtx);
+ phead = idr_find(&scmi_requested_devices, id_table->protocol_id);
+ if (phead) {
+ struct scmi_requested_dev *victim, *tmp;
+
+ list_for_each_entry_safe(victim, tmp, phead, node) {
+ if (!strcmp(victim->id_table->name, id_table->name)) {
+ list_del(&victim->node);
+ kfree(victim);
+ break;
+ }
+ }
+
+ if (list_empty(phead)) {
+ idr_remove(&scmi_requested_devices,
+ id_table->protocol_id);
+ kfree(phead);
+ }
+ }
+ mutex_unlock(&scmi_requested_devices_mtx);
}
static int scmi_probe(struct platform_device *pdev)
@@ -786,6 +1402,9 @@ static int scmi_probe(struct platform_device *pdev)
info->dev = dev;
info->desc = desc;
INIT_LIST_HEAD(&info->node);
+ idr_init(&info->protocols);
+ mutex_init(&info->protocols_mtx);
+ idr_init(&info->active_protocols);
platform_set_drvdata(pdev, info);
idr_init(&info->tx_idr);
@@ -794,6 +1413,8 @@ static int scmi_probe(struct platform_device *pdev)
handle = &info->handle;
handle->dev = info->dev;
handle->version = &info->version;
+ handle->devm_protocol_get = scmi_devm_protocol_get;
+ handle->devm_protocol_put = scmi_devm_protocol_put;
ret = scmi_txrx_setup(info, dev, SCMI_PROTOCOL_BASE);
if (ret)
@@ -806,9 +1427,14 @@ static int scmi_probe(struct platform_device *pdev)
if (scmi_notification_init(handle))
dev_err(dev, "SCMI Notifications NOT available.\n");
- ret = scmi_base_protocol_init(handle);
+ /*
+ * Trigger SCMI Base protocol initialization.
+ * It's mandatory and won't be ever released/deinit until the
+ * SCMI stack is shutdown/unloaded as a whole.
+ */
+ ret = scmi_protocol_acquire(handle, SCMI_PROTOCOL_BASE);
if (ret) {
- dev_err(dev, "unable to communicate with SCMI(%d)\n", ret);
+ dev_err(dev, "unable to communicate with SCMI\n");
return ret;
}
@@ -831,6 +1457,19 @@ static int scmi_probe(struct platform_device *pdev)
continue;
}
+ /*
+ * Save this valid DT protocol descriptor amongst
+ * @active_protocols for this SCMI instance/
+ */
+ ret = idr_alloc(&info->active_protocols, child,
+ prot_id, prot_id + 1, GFP_KERNEL);
+ if (ret != prot_id) {
+ dev_err(dev, "SCMI protocol %d already activated. Skip\n",
+ prot_id);
+ continue;
+ }
+
+ of_node_get(child);
scmi_create_protocol_devices(child, info, prot_id);
}
@@ -844,9 +1483,10 @@ void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id)
static int scmi_remove(struct platform_device *pdev)
{
- int ret = 0;
+ int ret = 0, id;
struct scmi_info *info = platform_get_drvdata(pdev);
struct idr *idr = &info->tx_idr;
+ struct device_node *child;
mutex_lock(&scmi_list_mutex);
if (info->users)
@@ -860,6 +1500,14 @@ static int scmi_remove(struct platform_device *pdev)
scmi_notification_exit(&info->handle);
+ mutex_lock(&info->protocols_mtx);
+ idr_destroy(&info->protocols);
+ mutex_unlock(&info->protocols_mtx);
+
+ idr_for_each_entry(&info->active_protocols, child, id)
+ of_node_put(child);
+ idr_destroy(&info->active_protocols);
+
/* Safe to free channels since no more users */
ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
idr_destroy(&info->tx_idr);
@@ -942,6 +1590,8 @@ static int __init scmi_driver_init(void)
{
scmi_bus_init();
+ scmi_base_register();
+
scmi_clock_register();
scmi_perf_register();
scmi_power_register();
@@ -956,7 +1606,7 @@ subsys_initcall(scmi_driver_init);
static void __exit scmi_driver_exit(void)
{
- scmi_bus_exit();
+ scmi_base_unregister();
scmi_clock_unregister();
scmi_perf_unregister();
@@ -966,6 +1616,8 @@ static void __exit scmi_driver_exit(void)
scmi_voltage_unregister();
scmi_system_unregister();
+ scmi_bus_exit();
+
platform_driver_unregister(&scmi_driver);
}
module_exit(scmi_driver_exit);
diff --git a/drivers/firmware/arm_scmi/notify.c b/drivers/firmware/arm_scmi/notify.c
index 66196b293b6c..d860bebd984a 100644
--- a/drivers/firmware/arm_scmi/notify.c
+++ b/drivers/firmware/arm_scmi/notify.c
@@ -2,7 +2,7 @@
/*
* System Control and Management Interface (SCMI) Notification support
*
- * Copyright (C) 2020 ARM Ltd.
+ * Copyright (C) 2020-2021 ARM Ltd.
*/
/**
* DOC: Theory of operation
@@ -91,6 +91,7 @@
#include <linux/types.h>
#include <linux/workqueue.h>
+#include "common.h"
#include "notify.h"
#define SCMI_MAX_PROTO 256
@@ -177,7 +178,7 @@
#define REVT_NOTIFY_SET_STATUS(revt, eid, sid, state) \
({ \
typeof(revt) r = revt; \
- r->proto->ops->set_notify_enabled(r->proto->ni->handle, \
+ r->proto->ops->set_notify_enabled(r->proto->ph, \
(eid), (sid), (state)); \
})
@@ -190,7 +191,7 @@
#define REVT_FILL_REPORT(revt, ...) \
({ \
typeof(revt) r = revt; \
- r->proto->ops->fill_custom_report(r->proto->ni->handle, \
+ r->proto->ops->fill_custom_report(r->proto->ph, \
__VA_ARGS__); \
})
@@ -278,6 +279,7 @@ struct scmi_registered_event;
* events' descriptors, whose fixed-size is determined at
* compile time.
* @registered_mtx: A mutex to protect @registered_events_handlers
+ * @ph: SCMI protocol handle reference
* @registered_events_handlers: An hashtable containing all events' handlers
* descriptors registered for this protocol
*
@@ -302,6 +304,7 @@ struct scmi_registered_events_desc {
struct scmi_registered_event **registered_events;
/* mutex to protect registered_events_handlers */
struct mutex registered_mtx;
+ const struct scmi_protocol_handle *ph;
DECLARE_HASHTABLE(registered_events_handlers, SCMI_REGISTERED_HASH_SZ);
};
@@ -368,7 +371,7 @@ static struct scmi_event_handler *
scmi_get_active_handler(struct scmi_notify_instance *ni, u32 evt_key);
static void scmi_put_active_handler(struct scmi_notify_instance *ni,
struct scmi_event_handler *hndl);
-static void scmi_put_handler_unlocked(struct scmi_notify_instance *ni,
+static bool scmi_put_handler_unlocked(struct scmi_notify_instance *ni,
struct scmi_event_handler *hndl);
/**
@@ -579,11 +582,9 @@ int scmi_notify(const struct scmi_handle *handle, u8 proto_id, u8 evt_id,
struct scmi_event_header eh;
struct scmi_notify_instance *ni;
- /* Ensure notify_priv is updated */
- smp_rmb();
- if (!handle->notify_priv)
+ ni = scmi_notification_instance_data_get(handle);
+ if (!ni)
return 0;
- ni = handle->notify_priv;
r_evt = SCMI_GET_REVT(ni, proto_id, evt_id);
if (!r_evt)
@@ -732,14 +733,10 @@ scmi_allocate_registered_events_desc(struct scmi_notify_instance *ni,
/**
* scmi_register_protocol_events() - Register Protocol Events with the core
* @handle: The handle identifying the platform instance against which the
- * the protocol's events are registered
+ * protocol's events are registered
* @proto_id: Protocol ID
- * @queue_sz: Size in bytes of the associated queue to be allocated
- * @ops: Protocol specific event-related operations
- * @evt: Event descriptor array
- * @num_events: Number of events in @evt array
- * @num_sources: Number of possible sources for this protocol on this
- * platform.
+ * @ph: SCMI protocol handle.
+ * @ee: A structure describing the events supported by this protocol.
*
* Used by SCMI Protocols initialization code to register with the notification
* core the list of supported events and their descriptors: takes care to
@@ -748,60 +745,69 @@ scmi_allocate_registered_events_desc(struct scmi_notify_instance *ni,
*
* Return: 0 on Success
*/
-int scmi_register_protocol_events(const struct scmi_handle *handle,
- u8 proto_id, size_t queue_sz,
- const struct scmi_event_ops *ops,
- const struct scmi_event *evt, int num_events,
- int num_sources)
+int scmi_register_protocol_events(const struct scmi_handle *handle, u8 proto_id,
+ const struct scmi_protocol_handle *ph,
+ const struct scmi_protocol_events *ee)
{
int i;
+ unsigned int num_sources;
size_t payld_sz = 0;
struct scmi_registered_events_desc *pd;
struct scmi_notify_instance *ni;
+ const struct scmi_event *evt;
- if (!ops || !evt)
+ if (!ee || !ee->ops || !ee->evts || !ph ||
+ (!ee->num_sources && !ee->ops->get_num_sources))
return -EINVAL;
- /* Ensure notify_priv is updated */
- smp_rmb();
- if (!handle->notify_priv)
+ ni = scmi_notification_instance_data_get(handle);
+ if (!ni)
return -ENOMEM;
- ni = handle->notify_priv;
- /* Attach to the notification main devres group */
- if (!devres_open_group(ni->handle->dev, ni->gid, GFP_KERNEL))
- return -ENOMEM;
+ /* num_sources cannot be <= 0 */
+ if (ee->num_sources) {
+ num_sources = ee->num_sources;
+ } else {
+ int nsrc = ee->ops->get_num_sources(ph);
- for (i = 0; i < num_events; i++)
+ if (nsrc <= 0)
+ return -EINVAL;
+ num_sources = nsrc;
+ }
+
+ evt = ee->evts;
+ for (i = 0; i < ee->num_events; i++)
payld_sz = max_t(size_t, payld_sz, evt[i].max_payld_sz);
payld_sz += sizeof(struct scmi_event_header);
- pd = scmi_allocate_registered_events_desc(ni, proto_id, queue_sz,
- payld_sz, num_events, ops);
+ pd = scmi_allocate_registered_events_desc(ni, proto_id, ee->queue_sz,
+ payld_sz, ee->num_events,
+ ee->ops);
if (IS_ERR(pd))
- goto err;
+ return PTR_ERR(pd);
- for (i = 0; i < num_events; i++, evt++) {
+ pd->ph = ph;
+ for (i = 0; i < ee->num_events; i++, evt++) {
struct scmi_registered_event *r_evt;
r_evt = devm_kzalloc(ni->handle->dev, sizeof(*r_evt),
GFP_KERNEL);
if (!r_evt)
- goto err;
+ return -ENOMEM;
r_evt->proto = pd;
r_evt->evt = evt;
r_evt->sources = devm_kcalloc(ni->handle->dev, num_sources,
sizeof(refcount_t), GFP_KERNEL);
if (!r_evt->sources)
- goto err;
+ return -ENOMEM;
r_evt->num_sources = num_sources;
mutex_init(&r_evt->sources_mtx);
r_evt->report = devm_kzalloc(ni->handle->dev,
evt->max_report_sz, GFP_KERNEL);
if (!r_evt->report)
- goto err;
+ return -ENOMEM;
pd->registered_events[i] = r_evt;
/* Ensure events are updated */
@@ -815,8 +821,6 @@ int scmi_register_protocol_events(const struct scmi_handle *handle,
/* Ensure protocols are updated */
smp_wmb();
- devres_close_group(ni->handle->dev, ni->gid);
-
/*
* Finalize any pending events' handler which could have been waiting
* for this protocol's events registration.
@@ -824,13 +828,33 @@ int scmi_register_protocol_events(const struct scmi_handle *handle,
schedule_work(&ni->init_work);
return 0;
+}
-err:
- dev_warn(handle->dev, "Proto:%X - Registration Failed !\n", proto_id);
- /* A failing protocol registration does not trigger full failure */
- devres_close_group(ni->handle->dev, ni->gid);
+/**
+ * scmi_deregister_protocol_events - Deregister protocol events with the core
+ * @handle: The handle identifying the platform instance against which the
+ * protocol's events are registered
+ * @proto_id: Protocol ID
+ */
+void scmi_deregister_protocol_events(const struct scmi_handle *handle,
+ u8 proto_id)
+{
+ struct scmi_notify_instance *ni;
+ struct scmi_registered_events_desc *pd;
- return -ENOMEM;
+ ni = scmi_notification_instance_data_get(handle);
+ if (!ni)
+ return;
+
+ pd = ni->registered_protocols[proto_id];
+ if (!pd)
+ return;
+
+ ni->registered_protocols[proto_id] = NULL;
+ /* Ensure protocols are updated */
+ smp_wmb();
+
+ cancel_work_sync(&pd->equeue.notify_work);
}
/**
@@ -900,9 +924,21 @@ static inline int scmi_bind_event_handler(struct scmi_notify_instance *ni,
if (!r_evt)
return -EINVAL;
- /* Remove from pending and insert into registered */
+ /*
+ * Remove from pending and insert into registered while getting hold
+ * of protocol instance.
+ */
hash_del(&hndl->hash);
+ /*
+ * Acquire protocols only for NON pending handlers, so as NOT to trigger
+ * protocol initialization when a notifier is registered against a still
+ * not registered protocol, since it would make little sense to force init
+ * protocols for which still no SCMI driver user exists: they wouldn't
+ * emit any event anyway till some SCMI driver starts using it.
+ */
+ scmi_protocol_acquire(ni->handle, KEY_XTRACT_PROTO_ID(hndl->key));
hndl->r_evt = r_evt;
+
mutex_lock(&r_evt->proto->registered_mtx);
hash_add(r_evt->proto->registered_events_handlers,
&hndl->hash, hndl->key);
@@ -1193,41 +1229,65 @@ static int scmi_disable_events(struct scmi_event_handler *hndl)
* * unregister and free the handler itself
*
* Context: Assumes all the proper locking has been managed by the caller.
+ *
+ * Return: True if handler was freed (users dropped to zero)
*/
-static void scmi_put_handler_unlocked(struct scmi_notify_instance *ni,
+static bool scmi_put_handler_unlocked(struct scmi_notify_instance *ni,
struct scmi_event_handler *hndl)
{
+ bool freed = false;
+
if (refcount_dec_and_test(&hndl->users)) {
if (!IS_HNDL_PENDING(hndl))
scmi_disable_events(hndl);
scmi_free_event_handler(hndl);
+ freed = true;
}
+
+ return freed;
}
static void scmi_put_handler(struct scmi_notify_instance *ni,
struct scmi_event_handler *hndl)
{
+ bool freed;
+ u8 protocol_id;
struct scmi_registered_event *r_evt = hndl->r_evt;
mutex_lock(&ni->pending_mtx);
- if (r_evt)
+ if (r_evt) {
+ protocol_id = r_evt->proto->id;
mutex_lock(&r_evt->proto->registered_mtx);
+ }
- scmi_put_handler_unlocked(ni, hndl);
+ freed = scmi_put_handler_unlocked(ni, hndl);
- if (r_evt)
+ if (r_evt) {
mutex_unlock(&r_evt->proto->registered_mtx);
+ /*
+ * Only registered handler acquired protocol; must be here
+ * released only AFTER unlocking registered_mtx, since
+ * releasing a protocol can trigger its de-initialization
+ * (ie. including r_evt and registered_mtx)
+ */
+ if (freed)
+ scmi_protocol_release(ni->handle, protocol_id);
+ }
mutex_unlock(&ni->pending_mtx);
}
static void scmi_put_active_handler(struct scmi_notify_instance *ni,
struct scmi_event_handler *hndl)
{
+ bool freed;
struct scmi_registered_event *r_evt = hndl->r_evt;
+ u8 protocol_id = r_evt->proto->id;
mutex_lock(&r_evt->proto->registered_mtx);
- scmi_put_handler_unlocked(ni, hndl);
+ freed = scmi_put_handler_unlocked(ni, hndl);
mutex_unlock(&r_evt->proto->registered_mtx);
+ if (freed)
+ scmi_protocol_release(ni->handle, protocol_id);
}
/**
@@ -1247,7 +1307,7 @@ static int scmi_event_handler_enable_events(struct scmi_event_handler *hndl)
}
/**
- * scmi_register_notifier() - Register a notifier_block for an event
+ * scmi_notifier_register() - Register a notifier_block for an event
* @handle: The handle identifying the platform instance against which the
* callback is registered
* @proto_id: Protocol ID
@@ -1279,8 +1339,8 @@ static int scmi_event_handler_enable_events(struct scmi_event_handler *hndl)
*
* Return: 0 on Success
*/
-static int scmi_register_notifier(const struct scmi_handle *handle,
- u8 proto_id, u8 evt_id, u32 *src_id,
+static int scmi_notifier_register(const struct scmi_handle *handle,
+ u8 proto_id, u8 evt_id, const u32 *src_id,
struct notifier_block *nb)
{
int ret = 0;
@@ -1288,11 +1348,9 @@ static int scmi_register_notifier(const struct scmi_handle *handle,
struct scmi_event_handler *hndl;
struct scmi_notify_instance *ni;
- /* Ensure notify_priv is updated */
- smp_rmb();
- if (!handle->notify_priv)
+ ni = scmi_notification_instance_data_get(handle);
+ if (!ni)
return -ENODEV;
- ni = handle->notify_priv;
evt_key = MAKE_HASH_KEY(proto_id, evt_id,
src_id ? *src_id : SRC_ID_MASK);
@@ -1313,7 +1371,7 @@ static int scmi_register_notifier(const struct scmi_handle *handle,
}
/**
- * scmi_unregister_notifier() - Unregister a notifier_block for an event
+ * scmi_notifier_unregister() - Unregister a notifier_block for an event
* @handle: The handle identifying the platform instance against which the
* callback is unregistered
* @proto_id: Protocol ID
@@ -1328,19 +1386,17 @@ static int scmi_register_notifier(const struct scmi_handle *handle,
*
* Return: 0 on Success
*/
-static int scmi_unregister_notifier(const struct scmi_handle *handle,
- u8 proto_id, u8 evt_id, u32 *src_id,
+static int scmi_notifier_unregister(const struct scmi_handle *handle,
+ u8 proto_id, u8 evt_id, const u32 *src_id,
struct notifier_block *nb)
{
u32 evt_key;
struct scmi_event_handler *hndl;
struct scmi_notify_instance *ni;
- /* Ensure notify_priv is updated */
- smp_rmb();
- if (!handle->notify_priv)
+ ni = scmi_notification_instance_data_get(handle);
+ if (!ni)
return -ENODEV;
- ni = handle->notify_priv;
evt_key = MAKE_HASH_KEY(proto_id, evt_id,
src_id ? *src_id : SRC_ID_MASK);
@@ -1356,7 +1412,7 @@ static int scmi_unregister_notifier(const struct scmi_handle *handle,
scmi_put_handler(ni, hndl);
/*
- * This balances the initial get issued in @scmi_register_notifier.
+ * This balances the initial get issued in @scmi_notifier_register.
* If this notifier_block happened to be the last known user callback
* for this event, the handler is here freed and the event's generation
* stopped.
@@ -1371,6 +1427,129 @@ static int scmi_unregister_notifier(const struct scmi_handle *handle,
return 0;
}
+struct scmi_notifier_devres {
+ const struct scmi_handle *handle;
+ u8 proto_id;
+ u8 evt_id;
+ u32 __src_id;
+ u32 *src_id;
+ struct notifier_block *nb;
+};
+
+static void scmi_devm_release_notifier(struct device *dev, void *res)
+{
+ struct scmi_notifier_devres *dres = res;
+
+ scmi_notifier_unregister(dres->handle, dres->proto_id, dres->evt_id,
+ dres->src_id, dres->nb);
+}
+
+/**
+ * scmi_devm_notifier_register() - Managed registration of a notifier_block
+ * for an event
+ * @sdev: A reference to an scmi_device whose embedded struct device is to
+ * be used for devres accounting.
+ * @proto_id: Protocol ID
+ * @evt_id: Event ID
+ * @src_id: Source ID, when NULL register for events coming form ALL possible
+ * sources
+ * @nb: A standard notifier block to register for the specified event
+ *
+ * Generic devres managed helper to register a notifier_block against a
+ * protocol event.
+ */
+static int scmi_devm_notifier_register(struct scmi_device *sdev,
+ u8 proto_id, u8 evt_id,
+ const u32 *src_id,
+ struct notifier_block *nb)
+{
+ int ret;
+ struct scmi_notifier_devres *dres;
+
+ dres = devres_alloc(scmi_devm_release_notifier,
+ sizeof(*dres), GFP_KERNEL);
+ if (!dres)
+ return -ENOMEM;
+
+ ret = scmi_notifier_register(sdev->handle, proto_id,
+ evt_id, src_id, nb);
+ if (ret) {
+ devres_free(dres);
+ return ret;
+ }
+
+ dres->handle = sdev->handle;
+ dres->proto_id = proto_id;
+ dres->evt_id = evt_id;
+ dres->nb = nb;
+ if (src_id) {
+ dres->__src_id = *src_id;
+ dres->src_id = &dres->__src_id;
+ } else {
+ dres->src_id = NULL;
+ }
+ devres_add(&sdev->dev, dres);
+
+ return ret;
+}
+
+static int scmi_devm_notifier_match(struct device *dev, void *res, void *data)
+{
+ struct scmi_notifier_devres *dres = res;
+ struct scmi_notifier_devres *xres = data;
+
+ if (WARN_ON(!dres || !xres))
+ return 0;
+
+ return dres->proto_id == xres->proto_id &&
+ dres->evt_id == xres->evt_id &&
+ dres->nb == xres->nb &&
+ ((!dres->src_id && !xres->src_id) ||
+ (dres->src_id && xres->src_id &&
+ dres->__src_id == xres->__src_id));
+}
+
+/**
+ * scmi_devm_notifier_unregister() - Managed un-registration of a
+ * notifier_block for an event
+ * @sdev: A reference to an scmi_device whose embedded struct device is to
+ * be used for devres accounting.
+ * @proto_id: Protocol ID
+ * @evt_id: Event ID
+ * @src_id: Source ID, when NULL register for events coming form ALL possible
+ * sources
+ * @nb: A standard notifier block to register for the specified event
+ *
+ * Generic devres managed helper to explicitly un-register a notifier_block
+ * against a protocol event, which was previously registered using the above
+ * @scmi_devm_notifier_register.
+ */
+static int scmi_devm_notifier_unregister(struct scmi_device *sdev,
+ u8 proto_id, u8 evt_id,
+ const u32 *src_id,
+ struct notifier_block *nb)
+{
+ int ret;
+ struct scmi_notifier_devres dres;
+
+ dres.handle = sdev->handle;
+ dres.proto_id = proto_id;
+ dres.evt_id = evt_id;
+ if (src_id) {
+ dres.__src_id = *src_id;
+ dres.src_id = &dres.__src_id;
+ } else {
+ dres.src_id = NULL;
+ }
+
+ ret = devres_release(&sdev->dev, scmi_devm_release_notifier,
+ scmi_devm_notifier_match, &dres);
+
+ WARN_ON(ret);
+
+ return ret;
+}
+
/**
* scmi_protocols_late_init() - Worker for late initialization
* @work: The work item to use associated to the proper SCMI instance
@@ -1428,8 +1607,10 @@ static void scmi_protocols_late_init(struct work_struct *work)
* directly from an scmi_driver to register its own notifiers.
*/
static const struct scmi_notify_ops notify_ops = {
- .register_event_notifier = scmi_register_notifier,
- .unregister_event_notifier = scmi_unregister_notifier,
+ .devm_event_notifier_register = scmi_devm_notifier_register,
+ .devm_event_notifier_unregister = scmi_devm_notifier_unregister,
+ .event_notifier_register = scmi_notifier_register,
+ .event_notifier_unregister = scmi_notifier_unregister,
};
/**
@@ -1490,8 +1671,8 @@ int scmi_notification_init(struct scmi_handle *handle)
INIT_WORK(&ni->init_work, scmi_protocols_late_init);
+ scmi_notification_instance_data_set(handle, ni);
handle->notify_ops = &notify_ops;
- handle->notify_priv = ni;
/* Ensure handle is up to date */
smp_wmb();
@@ -1503,7 +1684,7 @@ int scmi_notification_init(struct scmi_handle *handle)
err:
dev_warn(handle->dev, "Initialization Failed.\n");
- devres_release_group(handle->dev, NULL);
+ devres_release_group(handle->dev, gid);
return -ENOMEM;
}
@@ -1515,15 +1696,10 @@ void scmi_notification_exit(struct scmi_handle *handle)
{
struct scmi_notify_instance *ni;
- /* Ensure notify_priv is updated */
- smp_rmb();
- if (!handle->notify_priv)
+ ni = scmi_notification_instance_data_get(handle);
+ if (!ni)
return;
- ni = handle->notify_priv;
-
- handle->notify_priv = NULL;
- /* Ensure handle is up to date */
- smp_wmb();
+ scmi_notification_instance_data_set(handle, NULL);
/* Destroy while letting pending work complete */
destroy_workqueue(ni->notify_wq);
diff --git a/drivers/firmware/arm_scmi/notify.h b/drivers/firmware/arm_scmi/notify.h
index 3485f20fa70e..ce0324be6c71 100644
--- a/drivers/firmware/arm_scmi/notify.h
+++ b/drivers/firmware/arm_scmi/notify.h
@@ -4,7 +4,7 @@
* notification header file containing some definitions, structures
* and function prototypes related to SCMI Notification handling.
*
- * Copyright (C) 2020 ARM Ltd.
+ * Copyright (C) 2020-2021 ARM Ltd.
*/
#ifndef _SCMI_NOTIFY_H
#define _SCMI_NOTIFY_H
@@ -31,8 +31,12 @@ struct scmi_event {
size_t max_report_sz;
};
+struct scmi_protocol_handle;
+
/**
* struct scmi_event_ops - Protocol helpers called by the notification core.
+ * @get_num_sources: Returns the number of possible events' sources for this
+ * protocol
* @set_notify_enabled: Enable/disable the required evt_id/src_id notifications
* using the proper custom protocol commands.
* Return 0 on Success
@@ -46,22 +50,42 @@ struct scmi_event {
* process context.
*/
struct scmi_event_ops {
- int (*set_notify_enabled)(const struct scmi_handle *handle,
+ int (*get_num_sources)(const struct scmi_protocol_handle *ph);
+ int (*set_notify_enabled)(const struct scmi_protocol_handle *ph,
u8 evt_id, u32 src_id, bool enabled);
- void *(*fill_custom_report)(const struct scmi_handle *handle,
+ void *(*fill_custom_report)(const struct scmi_protocol_handle *ph,
u8 evt_id, ktime_t timestamp,
const void *payld, size_t payld_sz,
void *report, u32 *src_id);
};
+/**
+ * struct scmi_protocol_events - Per-protocol description of available events
+ * @queue_sz: Size in bytes of the per-protocol queue to use.
+ * @ops: Array of protocol-specific events operations.
+ * @evts: Array of supported protocol's events.
+ * @num_events: Number of supported protocol's events described in @evts.
+ * @num_sources: Number of protocol's sources, should be greater than 0; if not
+ * available at compile time, it will be provided at run-time via
+ * @get_num_sources.
+ */
+struct scmi_protocol_events {
+ size_t queue_sz;
+ const struct scmi_event_ops *ops;
+ const struct scmi_event *evts;
+ unsigned int num_events;
+ unsigned int num_sources;
+};
+
int scmi_notification_init(struct scmi_handle *handle);
void scmi_notification_exit(struct scmi_handle *handle);
-int scmi_register_protocol_events(const struct scmi_handle *handle,
- u8 proto_id, size_t queue_sz,
- const struct scmi_event_ops *ops,
- const struct scmi_event *evt, int num_events,
- int num_sources);
+struct scmi_protocol_handle;
+int scmi_register_protocol_events(const struct scmi_handle *handle, u8 proto_id,
+ const struct scmi_protocol_handle *ph,
+ const struct scmi_protocol_events *ee);
+void scmi_deregister_protocol_events(const struct scmi_handle *handle,
+ u8 proto_id);
int scmi_notify(const struct scmi_handle *handle, u8 proto_id, u8 evt_id,
const void *buf, size_t len, ktime_t ts);
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
index e374b1125fca..f4cd5193b961 100644
--- a/drivers/firmware/arm_scmi/perf.c
+++ b/drivers/firmware/arm_scmi/perf.c
@@ -2,7 +2,7 @@
/*
* System Control and Management Interface (SCMI) Performance Protocol
*
- * Copyright (C) 2018 ARM Ltd.
+ * Copyright (C) 2018-2021 ARM Ltd.
*/
#define pr_fmt(fmt) "SCMI Notifications PERF - " fmt
@@ -11,6 +11,7 @@
#include <linux/of.h>
#include <linux/io.h>
#include <linux/io-64-nonatomic-hi-lo.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/scmi_protocol.h>
@@ -175,21 +176,21 @@ static enum scmi_performance_protocol_cmd evt_2_cmd[] = {
PERF_NOTIFY_LEVEL,
};
-static int scmi_perf_attributes_get(const struct scmi_handle *handle,
+static int scmi_perf_attributes_get(const struct scmi_protocol_handle *ph,
struct scmi_perf_info *pi)
{
int ret;
struct scmi_xfer *t;
struct scmi_msg_resp_perf_attributes *attr;
- ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES,
- SCMI_PROTOCOL_PERF, 0, sizeof(*attr), &t);
+ ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, 0,
+ sizeof(*attr), &t);
if (ret)
return ret;
attr = t->rx.buf;
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (!ret) {
u16 flags = le16_to_cpu(attr->flags);
@@ -200,28 +201,27 @@ static int scmi_perf_attributes_get(const struct scmi_handle *handle,
pi->stats_size = le32_to_cpu(attr->stats_size);
}
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
static int
-scmi_perf_domain_attributes_get(const struct scmi_handle *handle, u32 domain,
- struct perf_dom_info *dom_info)
+scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph,
+ u32 domain, struct perf_dom_info *dom_info)
{
int ret;
struct scmi_xfer *t;
struct scmi_msg_resp_perf_domain_attributes *attr;
- ret = scmi_xfer_get_init(handle, PERF_DOMAIN_ATTRIBUTES,
- SCMI_PROTOCOL_PERF, sizeof(domain),
- sizeof(*attr), &t);
+ ret = ph->xops->xfer_get_init(ph, PERF_DOMAIN_ATTRIBUTES,
+ sizeof(domain), sizeof(*attr), &t);
if (ret)
return ret;
put_unaligned_le32(domain, t->tx.buf);
attr = t->rx.buf;
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (!ret) {
u32 flags = le32_to_cpu(attr->flags);
@@ -245,7 +245,7 @@ scmi_perf_domain_attributes_get(const struct scmi_handle *handle, u32 domain,
strlcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE);
}
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
@@ -257,7 +257,7 @@ static int opp_cmp_func(const void *opp1, const void *opp2)
}
static int
-scmi_perf_describe_levels_get(const struct scmi_handle *handle, u32 domain,
+scmi_perf_describe_levels_get(const struct scmi_protocol_handle *ph, u32 domain,
struct perf_dom_info *perf_dom)
{
int ret, cnt;
@@ -268,8 +268,8 @@ scmi_perf_describe_levels_get(const struct scmi_handle *handle, u32 domain,
struct scmi_msg_perf_describe_levels *dom_info;
struct scmi_msg_resp_perf_describe_levels *level_info;
- ret = scmi_xfer_get_init(handle, PERF_DESCRIBE_LEVELS,
- SCMI_PROTOCOL_PERF, sizeof(*dom_info), 0, &t);
+ ret = ph->xops->xfer_get_init(ph, PERF_DESCRIBE_LEVELS,
+ sizeof(*dom_info), 0, &t);
if (ret)
return ret;
@@ -281,14 +281,14 @@ scmi_perf_describe_levels_get(const struct scmi_handle *handle, u32 domain,
/* Set the number of OPPs to be skipped/already read */
dom_info->level_index = cpu_to_le32(tot_opp_cnt);
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (ret)
break;
num_returned = le16_to_cpu(level_info->num_returned);
num_remaining = le16_to_cpu(level_info->num_remaining);
if (tot_opp_cnt + num_returned > MAX_OPPS) {
- dev_err(handle->dev, "No. of OPPs exceeded MAX_OPPS");
+ dev_err(ph->dev, "No. of OPPs exceeded MAX_OPPS");
break;
}
@@ -299,13 +299,13 @@ scmi_perf_describe_levels_get(const struct scmi_handle *handle, u32 domain,
opp->trans_latency_us = le16_to_cpu
(level_info->opp[cnt].transition_latency_us);
- dev_dbg(handle->dev, "Level %d Power %d Latency %dus\n",
+ dev_dbg(ph->dev, "Level %d Power %d Latency %dus\n",
opp->perf, opp->power, opp->trans_latency_us);
}
tot_opp_cnt += num_returned;
- scmi_reset_rx_to_maxsz(handle, t);
+ ph->xops->reset_rx_to_maxsz(ph, t);
/*
* check for both returned and remaining to avoid infinite
* loop due to buggy firmware
@@ -313,7 +313,7 @@ scmi_perf_describe_levels_get(const struct scmi_handle *handle, u32 domain,
} while (num_returned && num_remaining);
perf_dom->opp_count = tot_opp_cnt;
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
sort(perf_dom->opp, tot_opp_cnt, sizeof(*opp), opp_cmp_func, NULL);
return ret;
@@ -353,15 +353,15 @@ static void scmi_perf_fc_ring_db(struct scmi_fc_db_info *db)
#endif
}
-static int scmi_perf_mb_limits_set(const struct scmi_handle *handle, u32 domain,
- u32 max_perf, u32 min_perf)
+static int scmi_perf_mb_limits_set(const struct scmi_protocol_handle *ph,
+ u32 domain, u32 max_perf, u32 min_perf)
{
int ret;
struct scmi_xfer *t;
struct scmi_perf_set_limits *limits;
- ret = scmi_xfer_get_init(handle, PERF_LIMITS_SET, SCMI_PROTOCOL_PERF,
- sizeof(*limits), 0, &t);
+ ret = ph->xops->xfer_get_init(ph, PERF_LIMITS_SET,
+ sizeof(*limits), 0, &t);
if (ret)
return ret;
@@ -370,16 +370,16 @@ static int scmi_perf_mb_limits_set(const struct scmi_handle *handle, u32 domain,
limits->max_level = cpu_to_le32(max_perf);
limits->min_level = cpu_to_le32(min_perf);
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
-static int scmi_perf_limits_set(const struct scmi_handle *handle, u32 domain,
- u32 max_perf, u32 min_perf)
+static int scmi_perf_limits_set(const struct scmi_protocol_handle *ph,
+ u32 domain, u32 max_perf, u32 min_perf)
{
- struct scmi_perf_info *pi = handle->perf_priv;
+ struct scmi_perf_info *pi = ph->get_priv(ph);
struct perf_dom_info *dom = pi->dom_info + domain;
if (dom->fc_info && dom->fc_info->limit_set_addr) {
@@ -389,24 +389,24 @@ static int scmi_perf_limits_set(const struct scmi_handle *handle, u32 domain,
return 0;
}
- return scmi_perf_mb_limits_set(handle, domain, max_perf, min_perf);
+ return scmi_perf_mb_limits_set(ph, domain, max_perf, min_perf);
}
-static int scmi_perf_mb_limits_get(const struct scmi_handle *handle, u32 domain,
- u32 *max_perf, u32 *min_perf)
+static int scmi_perf_mb_limits_get(const struct scmi_protocol_handle *ph,
+ u32 domain, u32 *max_perf, u32 *min_perf)
{
int ret;
struct scmi_xfer *t;
struct scmi_perf_get_limits *limits;
- ret = scmi_xfer_get_init(handle, PERF_LIMITS_GET, SCMI_PROTOCOL_PERF,
- sizeof(__le32), 0, &t);
+ ret = ph->xops->xfer_get_init(ph, PERF_LIMITS_GET,
+ sizeof(__le32), 0, &t);
if (ret)
return ret;
put_unaligned_le32(domain, t->tx.buf);
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (!ret) {
limits = t->rx.buf;
@@ -414,14 +414,14 @@ static int scmi_perf_mb_limits_get(const struct scmi_handle *handle, u32 domain,
*min_perf = le32_to_cpu(limits->min_level);
}
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
-static int scmi_perf_limits_get(const struct scmi_handle *handle, u32 domain,
- u32 *max_perf, u32 *min_perf)
+static int scmi_perf_limits_get(const struct scmi_protocol_handle *ph,
+ u32 domain, u32 *max_perf, u32 *min_perf)
{
- struct scmi_perf_info *pi = handle->perf_priv;
+ struct scmi_perf_info *pi = ph->get_priv(ph);
struct perf_dom_info *dom = pi->dom_info + domain;
if (dom->fc_info && dom->fc_info->limit_get_addr) {
@@ -430,18 +430,17 @@ static int scmi_perf_limits_get(const struct scmi_handle *handle, u32 domain,
return 0;
}
- return scmi_perf_mb_limits_get(handle, domain, max_perf, min_perf);
+ return scmi_perf_mb_limits_get(ph, domain, max_perf, min_perf);
}
-static int scmi_perf_mb_level_set(const struct scmi_handle *handle, u32 domain,
- u32 level, bool poll)
+static int scmi_perf_mb_level_set(const struct scmi_protocol_handle *ph,
+ u32 domain, u32 level, bool poll)
{
int ret;
struct scmi_xfer *t;
struct scmi_perf_set_level *lvl;
- ret = scmi_xfer_get_init(handle, PERF_LEVEL_SET, SCMI_PROTOCOL_PERF,
- sizeof(*lvl), 0, &t);
+ ret = ph->xops->xfer_get_init(ph, PERF_LEVEL_SET, sizeof(*lvl), 0, &t);
if (ret)
return ret;
@@ -450,16 +449,16 @@ static int scmi_perf_mb_level_set(const struct scmi_handle *handle, u32 domain,
lvl->domain = cpu_to_le32(domain);
lvl->level = cpu_to_le32(level);
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
-static int scmi_perf_level_set(const struct scmi_handle *handle, u32 domain,
- u32 level, bool poll)
+static int scmi_perf_level_set(const struct scmi_protocol_handle *ph,
+ u32 domain, u32 level, bool poll)
{
- struct scmi_perf_info *pi = handle->perf_priv;
+ struct scmi_perf_info *pi = ph->get_priv(ph);
struct perf_dom_info *dom = pi->dom_info + domain;
if (dom->fc_info && dom->fc_info->level_set_addr) {
@@ -468,35 +467,35 @@ static int scmi_perf_level_set(const struct scmi_handle *handle, u32 domain,
return 0;
}
- return scmi_perf_mb_level_set(handle, domain, level, poll);
+ return scmi_perf_mb_level_set(ph, domain, level, poll);
}
-static int scmi_perf_mb_level_get(const struct scmi_handle *handle, u32 domain,
- u32 *level, bool poll)
+static int scmi_perf_mb_level_get(const struct scmi_protocol_handle *ph,
+ u32 domain, u32 *level, bool poll)
{
int ret;
struct scmi_xfer *t;
- ret = scmi_xfer_get_init(handle, PERF_LEVEL_GET, SCMI_PROTOCOL_PERF,
- sizeof(u32), sizeof(u32), &t);
+ ret = ph->xops->xfer_get_init(ph, PERF_LEVEL_GET,
+ sizeof(u32), sizeof(u32), &t);
if (ret)
return ret;
t->hdr.poll_completion = poll;
put_unaligned_le32(domain, t->tx.buf);
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (!ret)
*level = get_unaligned_le32(t->rx.buf);
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
-static int scmi_perf_level_get(const struct scmi_handle *handle, u32 domain,
- u32 *level, bool poll)
+static int scmi_perf_level_get(const struct scmi_protocol_handle *ph,
+ u32 domain, u32 *level, bool poll)
{
- struct scmi_perf_info *pi = handle->perf_priv;
+ struct scmi_perf_info *pi = ph->get_priv(ph);
struct perf_dom_info *dom = pi->dom_info + domain;
if (dom->fc_info && dom->fc_info->level_get_addr) {
@@ -504,10 +503,10 @@ static int scmi_perf_level_get(const struct scmi_handle *handle, u32 domain,
return 0;
}
- return scmi_perf_mb_level_get(handle, domain, level, poll);
+ return scmi_perf_mb_level_get(ph, domain, level, poll);
}
-static int scmi_perf_level_limits_notify(const struct scmi_handle *handle,
+static int scmi_perf_level_limits_notify(const struct scmi_protocol_handle *ph,
u32 domain, int message_id,
bool enable)
{
@@ -515,8 +514,7 @@ static int scmi_perf_level_limits_notify(const struct scmi_handle *handle,
struct scmi_xfer *t;
struct scmi_perf_notify_level_or_limits *notify;
- ret = scmi_xfer_get_init(handle, message_id, SCMI_PROTOCOL_PERF,
- sizeof(*notify), 0, &t);
+ ret = ph->xops->xfer_get_init(ph, message_id, sizeof(*notify), 0, &t);
if (ret)
return ret;
@@ -524,9 +522,9 @@ static int scmi_perf_level_limits_notify(const struct scmi_handle *handle,
notify->domain = cpu_to_le32(domain);
notify->notify_enable = enable ? cpu_to_le32(BIT(0)) : 0;
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
@@ -540,7 +538,7 @@ static bool scmi_perf_fc_size_is_valid(u32 msg, u32 size)
}
static void
-scmi_perf_domain_desc_fc(const struct scmi_handle *handle, u32 domain,
+scmi_perf_domain_desc_fc(const struct scmi_protocol_handle *ph, u32 domain,
u32 message_id, void __iomem **p_addr,
struct scmi_fc_db_info **p_db)
{
@@ -557,9 +555,8 @@ scmi_perf_domain_desc_fc(const struct scmi_handle *handle, u32 domain,
if (!p_addr)
return;
- ret = scmi_xfer_get_init(handle, PERF_DESCRIBE_FASTCHANNEL,
- SCMI_PROTOCOL_PERF,
- sizeof(*info), sizeof(*resp), &t);
+ ret = ph->xops->xfer_get_init(ph, PERF_DESCRIBE_FASTCHANNEL,
+ sizeof(*info), sizeof(*resp), &t);
if (ret)
return;
@@ -567,7 +564,7 @@ scmi_perf_domain_desc_fc(const struct scmi_handle *handle, u32 domain,
info->domain = cpu_to_le32(domain);
info->message_id = cpu_to_le32(message_id);
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (ret)
goto err_xfer;
@@ -579,20 +576,20 @@ scmi_perf_domain_desc_fc(const struct scmi_handle *handle, u32 domain,
phys_addr = le32_to_cpu(resp->chan_addr_low);
phys_addr |= (u64)le32_to_cpu(resp->chan_addr_high) << 32;
- addr = devm_ioremap(handle->dev, phys_addr, size);
+ addr = devm_ioremap(ph->dev, phys_addr, size);
if (!addr)
goto err_xfer;
*p_addr = addr;
if (p_db && SUPPORTS_DOORBELL(flags)) {
- db = devm_kzalloc(handle->dev, sizeof(*db), GFP_KERNEL);
+ db = devm_kzalloc(ph->dev, sizeof(*db), GFP_KERNEL);
if (!db)
goto err_xfer;
size = 1 << DOORBELL_REG_WIDTH(flags);
phys_addr = le32_to_cpu(resp->db_addr_low);
phys_addr |= (u64)le32_to_cpu(resp->db_addr_high) << 32;
- addr = devm_ioremap(handle->dev, phys_addr, size);
+ addr = devm_ioremap(ph->dev, phys_addr, size);
if (!addr)
goto err_xfer;
@@ -605,25 +602,25 @@ scmi_perf_domain_desc_fc(const struct scmi_handle *handle, u32 domain,
*p_db = db;
}
err_xfer:
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
}
-static void scmi_perf_domain_init_fc(const struct scmi_handle *handle,
+static void scmi_perf_domain_init_fc(const struct scmi_protocol_handle *ph,
u32 domain, struct scmi_fc_info **p_fc)
{
struct scmi_fc_info *fc;
- fc = devm_kzalloc(handle->dev, sizeof(*fc), GFP_KERNEL);
+ fc = devm_kzalloc(ph->dev, sizeof(*fc), GFP_KERNEL);
if (!fc)
return;
- scmi_perf_domain_desc_fc(handle, domain, PERF_LEVEL_SET,
+ scmi_perf_domain_desc_fc(ph, domain, PERF_LEVEL_SET,
&fc->level_set_addr, &fc->level_set_db);
- scmi_perf_domain_desc_fc(handle, domain, PERF_LEVEL_GET,
+ scmi_perf_domain_desc_fc(ph, domain, PERF_LEVEL_GET,
&fc->level_get_addr, NULL);
- scmi_perf_domain_desc_fc(handle, domain, PERF_LIMITS_SET,
+ scmi_perf_domain_desc_fc(ph, domain, PERF_LIMITS_SET,
&fc->limit_set_addr, &fc->limit_set_db);
- scmi_perf_domain_desc_fc(handle, domain, PERF_LIMITS_GET,
+ scmi_perf_domain_desc_fc(ph, domain, PERF_LIMITS_GET,
&fc->limit_get_addr, NULL);
*p_fc = fc;
}
@@ -640,14 +637,14 @@ static int scmi_dev_domain_id(struct device *dev)
return clkspec.args[0];
}
-static int scmi_dvfs_device_opps_add(const struct scmi_handle *handle,
+static int scmi_dvfs_device_opps_add(const struct scmi_protocol_handle *ph,
struct device *dev)
{
int idx, ret, domain;
unsigned long freq;
struct scmi_opp *opp;
struct perf_dom_info *dom;
- struct scmi_perf_info *pi = handle->perf_priv;
+ struct scmi_perf_info *pi = ph->get_priv(ph);
domain = scmi_dev_domain_id(dev);
if (domain < 0)
@@ -672,11 +669,12 @@ static int scmi_dvfs_device_opps_add(const struct scmi_handle *handle,
return 0;
}
-static int scmi_dvfs_transition_latency_get(const struct scmi_handle *handle,
- struct device *dev)
+static int
+scmi_dvfs_transition_latency_get(const struct scmi_protocol_handle *ph,
+ struct device *dev)
{
struct perf_dom_info *dom;
- struct scmi_perf_info *pi = handle->perf_priv;
+ struct scmi_perf_info *pi = ph->get_priv(ph);
int domain = scmi_dev_domain_id(dev);
if (domain < 0)
@@ -687,35 +685,35 @@ static int scmi_dvfs_transition_latency_get(const struct scmi_handle *handle,
return dom->opp[dom->opp_count - 1].trans_latency_us * 1000;
}
-static int scmi_dvfs_freq_set(const struct scmi_handle *handle, u32 domain,
+static int scmi_dvfs_freq_set(const struct scmi_protocol_handle *ph, u32 domain,
unsigned long freq, bool poll)
{
- struct scmi_perf_info *pi = handle->perf_priv;
+ struct scmi_perf_info *pi = ph->get_priv(ph);
struct perf_dom_info *dom = pi->dom_info + domain;
- return scmi_perf_level_set(handle, domain, freq / dom->mult_factor,
- poll);
+ return scmi_perf_level_set(ph, domain, freq / dom->mult_factor, poll);
}
-static int scmi_dvfs_freq_get(const struct scmi_handle *handle, u32 domain,
+static int scmi_dvfs_freq_get(const struct scmi_protocol_handle *ph, u32 domain,
unsigned long *freq, bool poll)
{
int ret;
u32 level;
- struct scmi_perf_info *pi = handle->perf_priv;
+ struct scmi_perf_info *pi = ph->get_priv(ph);
struct perf_dom_info *dom = pi->dom_info + domain;
- ret = scmi_perf_level_get(handle, domain, &level, poll);
+ ret = scmi_perf_level_get(ph, domain, &level, poll);
if (!ret)
*freq = level * dom->mult_factor;
return ret;
}
-static int scmi_dvfs_est_power_get(const struct scmi_handle *handle, u32 domain,
- unsigned long *freq, unsigned long *power)
+static int scmi_dvfs_est_power_get(const struct scmi_protocol_handle *ph,
+ u32 domain, unsigned long *freq,
+ unsigned long *power)
{
- struct scmi_perf_info *pi = handle->perf_priv;
+ struct scmi_perf_info *pi = ph->get_priv(ph);
struct perf_dom_info *dom;
unsigned long opp_freq;
int idx, ret = -EINVAL;
@@ -739,25 +737,25 @@ static int scmi_dvfs_est_power_get(const struct scmi_handle *handle, u32 domain,
return ret;
}
-static bool scmi_fast_switch_possible(const struct scmi_handle *handle,
+static bool scmi_fast_switch_possible(const struct scmi_protocol_handle *ph,
struct device *dev)
{
struct perf_dom_info *dom;
- struct scmi_perf_info *pi = handle->perf_priv;
+ struct scmi_perf_info *pi = ph->get_priv(ph);
dom = pi->dom_info + scmi_dev_domain_id(dev);
return dom->fc_info && dom->fc_info->level_set_addr;
}
-static bool scmi_power_scale_mw_get(const struct scmi_handle *handle)
+static bool scmi_power_scale_mw_get(const struct scmi_protocol_handle *ph)
{
- struct scmi_perf_info *pi = handle->perf_priv;
+ struct scmi_perf_info *pi = ph->get_priv(ph);
return pi->power_scale_mw;
}
-static const struct scmi_perf_ops perf_ops = {
+static const struct scmi_perf_proto_ops perf_proto_ops = {
.limits_set = scmi_perf_limits_set,
.limits_get = scmi_perf_limits_get,
.level_set = scmi_perf_level_set,
@@ -772,7 +770,7 @@ static const struct scmi_perf_ops perf_ops = {
.power_scale_mw_get = scmi_power_scale_mw_get,
};
-static int scmi_perf_set_notify_enabled(const struct scmi_handle *handle,
+static int scmi_perf_set_notify_enabled(const struct scmi_protocol_handle *ph,
u8 evt_id, u32 src_id, bool enable)
{
int ret, cmd_id;
@@ -781,7 +779,7 @@ static int scmi_perf_set_notify_enabled(const struct scmi_handle *handle,
return -EINVAL;
cmd_id = evt_2_cmd[evt_id];
- ret = scmi_perf_level_limits_notify(handle, src_id, cmd_id, enable);
+ ret = scmi_perf_level_limits_notify(ph, src_id, cmd_id, enable);
if (ret)
pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n",
evt_id, src_id, ret);
@@ -789,7 +787,7 @@ static int scmi_perf_set_notify_enabled(const struct scmi_handle *handle,
return ret;
}
-static void *scmi_perf_fill_custom_report(const struct scmi_handle *handle,
+static void *scmi_perf_fill_custom_report(const struct scmi_protocol_handle *ph,
u8 evt_id, ktime_t timestamp,
const void *payld, size_t payld_sz,
void *report, u32 *src_id)
@@ -837,6 +835,16 @@ static void *scmi_perf_fill_custom_report(const struct scmi_handle *handle,
return rep;
}
+static int scmi_perf_get_num_sources(const struct scmi_protocol_handle *ph)
+{
+ struct scmi_perf_info *pi = ph->get_priv(ph);
+
+ if (!pi)
+ return -EINVAL;
+
+ return pi->num_domains;
+}
+
static const struct scmi_event perf_events[] = {
{
.id = SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED,
@@ -851,28 +859,36 @@ static const struct scmi_event perf_events[] = {
};
static const struct scmi_event_ops perf_event_ops = {
+ .get_num_sources = scmi_perf_get_num_sources,
.set_notify_enabled = scmi_perf_set_notify_enabled,
.fill_custom_report = scmi_perf_fill_custom_report,
};
-static int scmi_perf_protocol_init(struct scmi_handle *handle)
+static const struct scmi_protocol_events perf_protocol_events = {
+ .queue_sz = SCMI_PROTO_QUEUE_SZ,
+ .ops = &perf_event_ops,
+ .evts = perf_events,
+ .num_events = ARRAY_SIZE(perf_events),
+};
+
+static int scmi_perf_protocol_init(const struct scmi_protocol_handle *ph)
{
int domain;
u32 version;
struct scmi_perf_info *pinfo;
- scmi_version_get(handle, SCMI_PROTOCOL_PERF, &version);
+ ph->xops->version_get(ph, &version);
- dev_dbg(handle->dev, "Performance Version %d.%d\n",
+ dev_dbg(ph->dev, "Performance Version %d.%d\n",
PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
- pinfo = devm_kzalloc(handle->dev, sizeof(*pinfo), GFP_KERNEL);
+ pinfo = devm_kzalloc(ph->dev, sizeof(*pinfo), GFP_KERNEL);
if (!pinfo)
return -ENOMEM;
- scmi_perf_attributes_get(handle, pinfo);
+ scmi_perf_attributes_get(ph, pinfo);
- pinfo->dom_info = devm_kcalloc(handle->dev, pinfo->num_domains,
+ pinfo->dom_info = devm_kcalloc(ph->dev, pinfo->num_domains,
sizeof(*pinfo->dom_info), GFP_KERNEL);
if (!pinfo->dom_info)
return -ENOMEM;
@@ -880,24 +896,24 @@ static int scmi_perf_protocol_init(struct scmi_handle *handle)
for (domain = 0; domain < pinfo->num_domains; domain++) {
struct perf_dom_info *dom = pinfo->dom_info + domain;
- scmi_perf_domain_attributes_get(handle, domain, dom);
- scmi_perf_describe_levels_get(handle, domain, dom);
+ scmi_perf_domain_attributes_get(ph, domain, dom);
+ scmi_perf_describe_levels_get(ph, domain, dom);
if (dom->perf_fastchannels)
- scmi_perf_domain_init_fc(handle, domain, &dom->fc_info);
+ scmi_perf_domain_init_fc(ph, domain, &dom->fc_info);
}
- scmi_register_protocol_events(handle,
- SCMI_PROTOCOL_PERF, SCMI_PROTO_QUEUE_SZ,
- &perf_event_ops, perf_events,
- ARRAY_SIZE(perf_events),
- pinfo->num_domains);
-
pinfo->version = version;
- handle->perf_ops = &perf_ops;
- handle->perf_priv = pinfo;
- return 0;
+ return ph->set_priv(ph, pinfo);
}
-DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(SCMI_PROTOCOL_PERF, perf)
+static const struct scmi_protocol scmi_perf = {
+ .id = SCMI_PROTOCOL_PERF,
+ .owner = THIS_MODULE,
+ .instance_init = &scmi_perf_protocol_init,
+ .ops = &perf_proto_ops,
+ .events = &perf_protocol_events,
+};
+
+DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(perf, scmi_perf)
diff --git a/drivers/firmware/arm_scmi/power.c b/drivers/firmware/arm_scmi/power.c
index 1f37258e9bee..ad2ab080f344 100644
--- a/drivers/firmware/arm_scmi/power.c
+++ b/drivers/firmware/arm_scmi/power.c
@@ -2,11 +2,12 @@
/*
* System Control and Management Interface (SCMI) Power Protocol
*
- * Copyright (C) 2018 ARM Ltd.
+ * Copyright (C) 2018-2021 ARM Ltd.
*/
#define pr_fmt(fmt) "SCMI Notifications POWER - " fmt
+#include <linux/module.h>
#include <linux/scmi_protocol.h>
#include "common.h"
@@ -68,21 +69,21 @@ struct scmi_power_info {
struct power_dom_info *dom_info;
};
-static int scmi_power_attributes_get(const struct scmi_handle *handle,
+static int scmi_power_attributes_get(const struct scmi_protocol_handle *ph,
struct scmi_power_info *pi)
{
int ret;
struct scmi_xfer *t;
struct scmi_msg_resp_power_attributes *attr;
- ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES,
- SCMI_PROTOCOL_POWER, 0, sizeof(*attr), &t);
+ ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES,
+ 0, sizeof(*attr), &t);
if (ret)
return ret;
attr = t->rx.buf;
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (!ret) {
pi->num_domains = le16_to_cpu(attr->num_domains);
pi->stats_addr = le32_to_cpu(attr->stats_addr_low) |
@@ -90,28 +91,27 @@ static int scmi_power_attributes_get(const struct scmi_handle *handle,
pi->stats_size = le32_to_cpu(attr->stats_size);
}
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
static int
-scmi_power_domain_attributes_get(const struct scmi_handle *handle, u32 domain,
- struct power_dom_info *dom_info)
+scmi_power_domain_attributes_get(const struct scmi_protocol_handle *ph,
+ u32 domain, struct power_dom_info *dom_info)
{
int ret;
struct scmi_xfer *t;
struct scmi_msg_resp_power_domain_attributes *attr;
- ret = scmi_xfer_get_init(handle, POWER_DOMAIN_ATTRIBUTES,
- SCMI_PROTOCOL_POWER, sizeof(domain),
- sizeof(*attr), &t);
+ ret = ph->xops->xfer_get_init(ph, POWER_DOMAIN_ATTRIBUTES,
+ sizeof(domain), sizeof(*attr), &t);
if (ret)
return ret;
put_unaligned_le32(domain, t->tx.buf);
attr = t->rx.buf;
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (!ret) {
u32 flags = le32_to_cpu(attr->flags);
@@ -121,19 +121,18 @@ scmi_power_domain_attributes_get(const struct scmi_handle *handle, u32 domain,
strlcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE);
}
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
-static int
-scmi_power_state_set(const struct scmi_handle *handle, u32 domain, u32 state)
+static int scmi_power_state_set(const struct scmi_protocol_handle *ph,
+ u32 domain, u32 state)
{
int ret;
struct scmi_xfer *t;
struct scmi_power_set_state *st;
- ret = scmi_xfer_get_init(handle, POWER_STATE_SET, SCMI_PROTOCOL_POWER,
- sizeof(*st), 0, &t);
+ ret = ph->xops->xfer_get_init(ph, POWER_STATE_SET, sizeof(*st), 0, &t);
if (ret)
return ret;
@@ -142,64 +141,64 @@ scmi_power_state_set(const struct scmi_handle *handle, u32 domain, u32 state)
st->domain = cpu_to_le32(domain);
st->state = cpu_to_le32(state);
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
-static int
-scmi_power_state_get(const struct scmi_handle *handle, u32 domain, u32 *state)
+static int scmi_power_state_get(const struct scmi_protocol_handle *ph,
+ u32 domain, u32 *state)
{
int ret;
struct scmi_xfer *t;
- ret = scmi_xfer_get_init(handle, POWER_STATE_GET, SCMI_PROTOCOL_POWER,
- sizeof(u32), sizeof(u32), &t);
+ ret = ph->xops->xfer_get_init(ph, POWER_STATE_GET, sizeof(u32), sizeof(u32), &t);
if (ret)
return ret;
put_unaligned_le32(domain, t->tx.buf);
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (!ret)
*state = get_unaligned_le32(t->rx.buf);
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
-static int scmi_power_num_domains_get(const struct scmi_handle *handle)
+static int scmi_power_num_domains_get(const struct scmi_protocol_handle *ph)
{
- struct scmi_power_info *pi = handle->power_priv;
+ struct scmi_power_info *pi = ph->get_priv(ph);
return pi->num_domains;
}
-static char *scmi_power_name_get(const struct scmi_handle *handle, u32 domain)
+static char *scmi_power_name_get(const struct scmi_protocol_handle *ph,
+ u32 domain)
{
- struct scmi_power_info *pi = handle->power_priv;
+ struct scmi_power_info *pi = ph->get_priv(ph);
struct power_dom_info *dom = pi->dom_info + domain;
return dom->name;
}
-static const struct scmi_power_ops power_ops = {
+static const struct scmi_power_proto_ops power_proto_ops = {
.num_domains_get = scmi_power_num_domains_get,
.name_get = scmi_power_name_get,
.state_set = scmi_power_state_set,
.state_get = scmi_power_state_get,
};
-static int scmi_power_request_notify(const struct scmi_handle *handle,
+static int scmi_power_request_notify(const struct scmi_protocol_handle *ph,
u32 domain, bool enable)
{
int ret;
struct scmi_xfer *t;
struct scmi_power_state_notify *notify;
- ret = scmi_xfer_get_init(handle, POWER_STATE_NOTIFY,
- SCMI_PROTOCOL_POWER, sizeof(*notify), 0, &t);
+ ret = ph->xops->xfer_get_init(ph, POWER_STATE_NOTIFY,
+ sizeof(*notify), 0, &t);
if (ret)
return ret;
@@ -207,18 +206,18 @@ static int scmi_power_request_notify(const struct scmi_handle *handle,
notify->domain = cpu_to_le32(domain);
notify->notify_enable = enable ? cpu_to_le32(BIT(0)) : 0;
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
-static int scmi_power_set_notify_enabled(const struct scmi_handle *handle,
+static int scmi_power_set_notify_enabled(const struct scmi_protocol_handle *ph,
u8 evt_id, u32 src_id, bool enable)
{
int ret;
- ret = scmi_power_request_notify(handle, src_id, enable);
+ ret = scmi_power_request_notify(ph, src_id, enable);
if (ret)
pr_debug("FAIL_ENABLE - evt[%X] dom[%d] - ret:%d\n",
evt_id, src_id, ret);
@@ -226,10 +225,11 @@ static int scmi_power_set_notify_enabled(const struct scmi_handle *handle,
return ret;
}
-static void *scmi_power_fill_custom_report(const struct scmi_handle *handle,
- u8 evt_id, ktime_t timestamp,
- const void *payld, size_t payld_sz,
- void *report, u32 *src_id)
+static void *
+scmi_power_fill_custom_report(const struct scmi_protocol_handle *ph,
+ u8 evt_id, ktime_t timestamp,
+ const void *payld, size_t payld_sz,
+ void *report, u32 *src_id)
{
const struct scmi_power_state_notify_payld *p = payld;
struct scmi_power_state_changed_report *r = report;
@@ -246,6 +246,16 @@ static void *scmi_power_fill_custom_report(const struct scmi_handle *handle,
return r;
}
+static int scmi_power_get_num_sources(const struct scmi_protocol_handle *ph)
+{
+ struct scmi_power_info *pinfo = ph->get_priv(ph);
+
+ if (!pinfo)
+ return -EINVAL;
+
+ return pinfo->num_domains;
+}
+
static const struct scmi_event power_events[] = {
{
.id = SCMI_EVENT_POWER_STATE_CHANGED,
@@ -256,28 +266,36 @@ static const struct scmi_event power_events[] = {
};
static const struct scmi_event_ops power_event_ops = {
+ .get_num_sources = scmi_power_get_num_sources,
.set_notify_enabled = scmi_power_set_notify_enabled,
.fill_custom_report = scmi_power_fill_custom_report,
};
-static int scmi_power_protocol_init(struct scmi_handle *handle)
+static const struct scmi_protocol_events power_protocol_events = {
+ .queue_sz = SCMI_PROTO_QUEUE_SZ,
+ .ops = &power_event_ops,
+ .evts = power_events,
+ .num_events = ARRAY_SIZE(power_events),
+};
+
+static int scmi_power_protocol_init(const struct scmi_protocol_handle *ph)
{
int domain;
u32 version;
struct scmi_power_info *pinfo;
- scmi_version_get(handle, SCMI_PROTOCOL_POWER, &version);
+ ph->xops->version_get(ph, &version);
- dev_dbg(handle->dev, "Power Version %d.%d\n",
+ dev_dbg(ph->dev, "Power Version %d.%d\n",
PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
- pinfo = devm_kzalloc(handle->dev, sizeof(*pinfo), GFP_KERNEL);
+ pinfo = devm_kzalloc(ph->dev, sizeof(*pinfo), GFP_KERNEL);
if (!pinfo)
return -ENOMEM;
- scmi_power_attributes_get(handle, pinfo);
+ scmi_power_attributes_get(ph, pinfo);
- pinfo->dom_info = devm_kcalloc(handle->dev, pinfo->num_domains,
+ pinfo->dom_info = devm_kcalloc(ph->dev, pinfo->num_domains,
sizeof(*pinfo->dom_info), GFP_KERNEL);
if (!pinfo->dom_info)
return -ENOMEM;
@@ -285,20 +303,20 @@ static int scmi_power_protocol_init(struct scmi_handle *handle)
for (domain = 0; domain < pinfo->num_domains; domain++) {
struct power_dom_info *dom = pinfo->dom_info + domain;
- scmi_power_domain_attributes_get(handle, domain, dom);
+ scmi_power_domain_attributes_get(ph, domain, dom);
}
- scmi_register_protocol_events(handle,
- SCMI_PROTOCOL_POWER, SCMI_PROTO_QUEUE_SZ,
- &power_event_ops, power_events,
- ARRAY_SIZE(power_events),
- pinfo->num_domains);
-
pinfo->version = version;
- handle->power_ops = &power_ops;
- handle->power_priv = pinfo;
- return 0;
+ return ph->set_priv(ph, pinfo);
}
-DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(SCMI_PROTOCOL_POWER, power)
+static const struct scmi_protocol scmi_power = {
+ .id = SCMI_PROTOCOL_POWER,
+ .owner = THIS_MODULE,
+ .instance_init = &scmi_power_protocol_init,
+ .ops = &power_proto_ops,
+ .events = &power_protocol_events,
+};
+
+DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(power, scmi_power)
diff --git a/drivers/firmware/arm_scmi/reset.c b/drivers/firmware/arm_scmi/reset.c
index a981a22cfe89..9bf2478ec6d1 100644
--- a/drivers/firmware/arm_scmi/reset.c
+++ b/drivers/firmware/arm_scmi/reset.c
@@ -2,11 +2,12 @@
/*
* System Control and Management Interface (SCMI) Reset Protocol
*
- * Copyright (C) 2019 ARM Ltd.
+ * Copyright (C) 2019-2021 ARM Ltd.
*/
#define pr_fmt(fmt) "SCMI Notifications RESET - " fmt
+#include <linux/module.h>
#include <linux/scmi_protocol.h>
#include "common.h"
@@ -64,46 +65,45 @@ struct scmi_reset_info {
struct reset_dom_info *dom_info;
};
-static int scmi_reset_attributes_get(const struct scmi_handle *handle,
+static int scmi_reset_attributes_get(const struct scmi_protocol_handle *ph,
struct scmi_reset_info *pi)
{
int ret;
struct scmi_xfer *t;
u32 attr;
- ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES,
- SCMI_PROTOCOL_RESET, 0, sizeof(attr), &t);
+ ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES,
+ 0, sizeof(attr), &t);
if (ret)
return ret;
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (!ret) {
attr = get_unaligned_le32(t->rx.buf);
pi->num_domains = attr & NUM_RESET_DOMAIN_MASK;
}
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
static int
-scmi_reset_domain_attributes_get(const struct scmi_handle *handle, u32 domain,
- struct reset_dom_info *dom_info)
+scmi_reset_domain_attributes_get(const struct scmi_protocol_handle *ph,
+ u32 domain, struct reset_dom_info *dom_info)
{
int ret;
struct scmi_xfer *t;
struct scmi_msg_resp_reset_domain_attributes *attr;
- ret = scmi_xfer_get_init(handle, RESET_DOMAIN_ATTRIBUTES,
- SCMI_PROTOCOL_RESET, sizeof(domain),
- sizeof(*attr), &t);
+ ret = ph->xops->xfer_get_init(ph, RESET_DOMAIN_ATTRIBUTES,
+ sizeof(domain), sizeof(*attr), &t);
if (ret)
return ret;
put_unaligned_le32(domain, t->tx.buf);
attr = t->rx.buf;
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (!ret) {
u32 attributes = le32_to_cpu(attr->attributes);
@@ -115,47 +115,49 @@ scmi_reset_domain_attributes_get(const struct scmi_handle *handle, u32 domain,
strlcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE);
}
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
-static int scmi_reset_num_domains_get(const struct scmi_handle *handle)
+static int scmi_reset_num_domains_get(const struct scmi_protocol_handle *ph)
{
- struct scmi_reset_info *pi = handle->reset_priv;
+ struct scmi_reset_info *pi = ph->get_priv(ph);
return pi->num_domains;
}
-static char *scmi_reset_name_get(const struct scmi_handle *handle, u32 domain)
+static char *scmi_reset_name_get(const struct scmi_protocol_handle *ph,
+ u32 domain)
{
- struct scmi_reset_info *pi = handle->reset_priv;
+ struct scmi_reset_info *pi = ph->get_priv(ph);
+
struct reset_dom_info *dom = pi->dom_info + domain;
return dom->name;
}
-static int scmi_reset_latency_get(const struct scmi_handle *handle, u32 domain)
+static int scmi_reset_latency_get(const struct scmi_protocol_handle *ph,
+ u32 domain)
{
- struct scmi_reset_info *pi = handle->reset_priv;
+ struct scmi_reset_info *pi = ph->get_priv(ph);
struct reset_dom_info *dom = pi->dom_info + domain;
return dom->latency_us;
}
-static int scmi_domain_reset(const struct scmi_handle *handle, u32 domain,
+static int scmi_domain_reset(const struct scmi_protocol_handle *ph, u32 domain,
u32 flags, u32 state)
{
int ret;
struct scmi_xfer *t;
struct scmi_msg_reset_domain_reset *dom;
- struct scmi_reset_info *pi = handle->reset_priv;
+ struct scmi_reset_info *pi = ph->get_priv(ph);
struct reset_dom_info *rdom = pi->dom_info + domain;
if (rdom->async_reset)
flags |= ASYNCHRONOUS_RESET;
- ret = scmi_xfer_get_init(handle, RESET, SCMI_PROTOCOL_RESET,
- sizeof(*dom), 0, &t);
+ ret = ph->xops->xfer_get_init(ph, RESET, sizeof(*dom), 0, &t);
if (ret)
return ret;
@@ -165,34 +167,35 @@ static int scmi_domain_reset(const struct scmi_handle *handle, u32 domain,
dom->reset_state = cpu_to_le32(state);
if (rdom->async_reset)
- ret = scmi_do_xfer_with_response(handle, t);
+ ret = ph->xops->do_xfer_with_response(ph, t);
else
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
-static int scmi_reset_domain_reset(const struct scmi_handle *handle, u32 domain)
+static int scmi_reset_domain_reset(const struct scmi_protocol_handle *ph,
+ u32 domain)
{
- return scmi_domain_reset(handle, domain, AUTONOMOUS_RESET,
+ return scmi_domain_reset(ph, domain, AUTONOMOUS_RESET,
ARCH_COLD_RESET);
}
static int
-scmi_reset_domain_assert(const struct scmi_handle *handle, u32 domain)
+scmi_reset_domain_assert(const struct scmi_protocol_handle *ph, u32 domain)
{
- return scmi_domain_reset(handle, domain, EXPLICIT_RESET_ASSERT,
+ return scmi_domain_reset(ph, domain, EXPLICIT_RESET_ASSERT,
ARCH_COLD_RESET);
}
static int
-scmi_reset_domain_deassert(const struct scmi_handle *handle, u32 domain)
+scmi_reset_domain_deassert(const struct scmi_protocol_handle *ph, u32 domain)
{
- return scmi_domain_reset(handle, domain, 0, ARCH_COLD_RESET);
+ return scmi_domain_reset(ph, domain, 0, ARCH_COLD_RESET);
}
-static const struct scmi_reset_ops reset_ops = {
+static const struct scmi_reset_proto_ops reset_proto_ops = {
.num_domains_get = scmi_reset_num_domains_get,
.name_get = scmi_reset_name_get,
.latency_get = scmi_reset_latency_get,
@@ -201,16 +204,15 @@ static const struct scmi_reset_ops reset_ops = {
.deassert = scmi_reset_domain_deassert,
};
-static int scmi_reset_notify(const struct scmi_handle *handle, u32 domain_id,
- bool enable)
+static int scmi_reset_notify(const struct scmi_protocol_handle *ph,
+ u32 domain_id, bool enable)
{
int ret;
u32 evt_cntl = enable ? RESET_TP_NOTIFY_ALL : 0;
struct scmi_xfer *t;
struct scmi_msg_reset_notify *cfg;
- ret = scmi_xfer_get_init(handle, RESET_NOTIFY,
- SCMI_PROTOCOL_RESET, sizeof(*cfg), 0, &t);
+ ret = ph->xops->xfer_get_init(ph, RESET_NOTIFY, sizeof(*cfg), 0, &t);
if (ret)
return ret;
@@ -218,18 +220,18 @@ static int scmi_reset_notify(const struct scmi_handle *handle, u32 domain_id,
cfg->id = cpu_to_le32(domain_id);
cfg->event_control = cpu_to_le32(evt_cntl);
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
-static int scmi_reset_set_notify_enabled(const struct scmi_handle *handle,
+static int scmi_reset_set_notify_enabled(const struct scmi_protocol_handle *ph,
u8 evt_id, u32 src_id, bool enable)
{
int ret;
- ret = scmi_reset_notify(handle, src_id, enable);
+ ret = scmi_reset_notify(ph, src_id, enable);
if (ret)
pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n",
evt_id, src_id, ret);
@@ -237,10 +239,11 @@ static int scmi_reset_set_notify_enabled(const struct scmi_handle *handle,
return ret;
}
-static void *scmi_reset_fill_custom_report(const struct scmi_handle *handle,
- u8 evt_id, ktime_t timestamp,
- const void *payld, size_t payld_sz,
- void *report, u32 *src_id)
+static void *
+scmi_reset_fill_custom_report(const struct scmi_protocol_handle *ph,
+ u8 evt_id, ktime_t timestamp,
+ const void *payld, size_t payld_sz,
+ void *report, u32 *src_id)
{
const struct scmi_reset_issued_notify_payld *p = payld;
struct scmi_reset_issued_report *r = report;
@@ -257,6 +260,16 @@ static void *scmi_reset_fill_custom_report(const struct scmi_handle *handle,
return r;
}
+static int scmi_reset_get_num_sources(const struct scmi_protocol_handle *ph)
+{
+ struct scmi_reset_info *pinfo = ph->get_priv(ph);
+
+ if (!pinfo)
+ return -EINVAL;
+
+ return pinfo->num_domains;
+}
+
static const struct scmi_event reset_events[] = {
{
.id = SCMI_EVENT_RESET_ISSUED,
@@ -266,28 +279,36 @@ static const struct scmi_event reset_events[] = {
};
static const struct scmi_event_ops reset_event_ops = {
+ .get_num_sources = scmi_reset_get_num_sources,
.set_notify_enabled = scmi_reset_set_notify_enabled,
.fill_custom_report = scmi_reset_fill_custom_report,
};
-static int scmi_reset_protocol_init(struct scmi_handle *handle)
+static const struct scmi_protocol_events reset_protocol_events = {
+ .queue_sz = SCMI_PROTO_QUEUE_SZ,
+ .ops = &reset_event_ops,
+ .evts = reset_events,
+ .num_events = ARRAY_SIZE(reset_events),
+};
+
+static int scmi_reset_protocol_init(const struct scmi_protocol_handle *ph)
{
int domain;
u32 version;
struct scmi_reset_info *pinfo;
- scmi_version_get(handle, SCMI_PROTOCOL_RESET, &version);
+ ph->xops->version_get(ph, &version);
- dev_dbg(handle->dev, "Reset Version %d.%d\n",
+ dev_dbg(ph->dev, "Reset Version %d.%d\n",
PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
- pinfo = devm_kzalloc(handle->dev, sizeof(*pinfo), GFP_KERNEL);
+ pinfo = devm_kzalloc(ph->dev, sizeof(*pinfo), GFP_KERNEL);
if (!pinfo)
return -ENOMEM;
- scmi_reset_attributes_get(handle, pinfo);
+ scmi_reset_attributes_get(ph, pinfo);
- pinfo->dom_info = devm_kcalloc(handle->dev, pinfo->num_domains,
+ pinfo->dom_info = devm_kcalloc(ph->dev, pinfo->num_domains,
sizeof(*pinfo->dom_info), GFP_KERNEL);
if (!pinfo->dom_info)
return -ENOMEM;
@@ -295,20 +316,19 @@ static int scmi_reset_protocol_init(struct scmi_handle *handle)
for (domain = 0; domain < pinfo->num_domains; domain++) {
struct reset_dom_info *dom = pinfo->dom_info + domain;
- scmi_reset_domain_attributes_get(handle, domain, dom);
+ scmi_reset_domain_attributes_get(ph, domain, dom);
}
- scmi_register_protocol_events(handle,
- SCMI_PROTOCOL_RESET, SCMI_PROTO_QUEUE_SZ,
- &reset_event_ops, reset_events,
- ARRAY_SIZE(reset_events),
- pinfo->num_domains);
-
pinfo->version = version;
- handle->reset_ops = &reset_ops;
- handle->reset_priv = pinfo;
-
- return 0;
+ return ph->set_priv(ph, pinfo);
}
-DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(SCMI_PROTOCOL_RESET, reset)
+static const struct scmi_protocol scmi_reset = {
+ .id = SCMI_PROTOCOL_RESET,
+ .owner = THIS_MODULE,
+ .instance_init = &scmi_reset_protocol_init,
+ .ops = &reset_proto_ops,
+ .events = &reset_protocol_events,
+};
+
+DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(reset, scmi_reset)
diff --git a/drivers/firmware/arm_scmi/scmi_pm_domain.c b/drivers/firmware/arm_scmi/scmi_pm_domain.c
index 9e44479f0284..9d36d5c0622d 100644
--- a/drivers/firmware/arm_scmi/scmi_pm_domain.c
+++ b/drivers/firmware/arm_scmi/scmi_pm_domain.c
@@ -2,7 +2,7 @@
/*
* SCMI Generic power domain support.
*
- * Copyright (C) 2018 ARM Ltd.
+ * Copyright (C) 2018-2021 ARM Ltd.
*/
#include <linux/err.h>
@@ -11,9 +11,11 @@
#include <linux/pm_domain.h>
#include <linux/scmi_protocol.h>
+static const struct scmi_power_proto_ops *power_ops;
+
struct scmi_pm_domain {
struct generic_pm_domain genpd;
- const struct scmi_handle *handle;
+ const struct scmi_protocol_handle *ph;
const char *name;
u32 domain;
};
@@ -25,16 +27,15 @@ static int scmi_pd_power(struct generic_pm_domain *domain, bool power_on)
int ret;
u32 state, ret_state;
struct scmi_pm_domain *pd = to_scmi_pd(domain);
- const struct scmi_power_ops *ops = pd->handle->power_ops;
if (power_on)
state = SCMI_POWER_STATE_GENERIC_ON;
else
state = SCMI_POWER_STATE_GENERIC_OFF;
- ret = ops->state_set(pd->handle, pd->domain, state);
+ ret = power_ops->state_set(pd->ph, pd->domain, state);
if (!ret)
- ret = ops->state_get(pd->handle, pd->domain, &ret_state);
+ ret = power_ops->state_get(pd->ph, pd->domain, &ret_state);
if (!ret && state != ret_state)
return -EIO;
@@ -60,11 +61,16 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev)
struct genpd_onecell_data *scmi_pd_data;
struct generic_pm_domain **domains;
const struct scmi_handle *handle = sdev->handle;
+ struct scmi_protocol_handle *ph;
- if (!handle || !handle->power_ops)
+ if (!handle)
return -ENODEV;
- num_domains = handle->power_ops->num_domains_get(handle);
+ power_ops = handle->devm_protocol_get(sdev, SCMI_PROTOCOL_POWER, &ph);
+ if (IS_ERR(power_ops))
+ return PTR_ERR(power_ops);
+
+ num_domains = power_ops->num_domains_get(ph);
if (num_domains < 0) {
dev_err(dev, "number of domains not found\n");
return num_domains;
@@ -85,14 +91,14 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev)
for (i = 0; i < num_domains; i++, scmi_pd++) {
u32 state;
- if (handle->power_ops->state_get(handle, i, &state)) {
+ if (power_ops->state_get(ph, i, &state)) {
dev_warn(dev, "failed to get state for domain %d\n", i);
continue;
}
scmi_pd->domain = i;
- scmi_pd->handle = handle;
- scmi_pd->name = handle->power_ops->name_get(handle, i);
+ scmi_pd->ph = ph;
+ scmi_pd->name = power_ops->name_get(ph, i);
scmi_pd->genpd.name = scmi_pd->name;
scmi_pd->genpd.power_off = scmi_pd_power_off;
scmi_pd->genpd.power_on = scmi_pd_power_on;
diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c
index 4541b891b733..2c88aa221559 100644
--- a/drivers/firmware/arm_scmi/sensors.c
+++ b/drivers/firmware/arm_scmi/sensors.c
@@ -2,12 +2,13 @@
/*
* System Control and Management Interface (SCMI) Sensor Protocol
*
- * Copyright (C) 2018-2020 ARM Ltd.
+ * Copyright (C) 2018-2021 ARM Ltd.
*/
#define pr_fmt(fmt) "SCMI Notifications SENSOR - " fmt
#include <linux/bitfield.h>
+#include <linux/module.h>
#include <linux/scmi_protocol.h>
#include "common.h"
@@ -201,21 +202,21 @@ struct sensors_info {
struct scmi_sensor_info *sensors;
};
-static int scmi_sensor_attributes_get(const struct scmi_handle *handle,
+static int scmi_sensor_attributes_get(const struct scmi_protocol_handle *ph,
struct sensors_info *si)
{
int ret;
struct scmi_xfer *t;
struct scmi_msg_resp_sensor_attributes *attr;
- ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES,
- SCMI_PROTOCOL_SENSOR, 0, sizeof(*attr), &t);
+ ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES,
+ 0, sizeof(*attr), &t);
if (ret)
return ret;
attr = t->rx.buf;
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (!ret) {
si->num_sensors = le16_to_cpu(attr->num_sensors);
si->max_requests = attr->max_requests;
@@ -224,7 +225,7 @@ static int scmi_sensor_attributes_get(const struct scmi_handle *handle,
si->reg_size = le32_to_cpu(attr->reg_size);
}
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
@@ -235,7 +236,7 @@ static inline void scmi_parse_range_attrs(struct scmi_range_attrs *out,
out->max_range = get_unaligned_le64((void *)&in->max_range_low);
}
-static int scmi_sensor_update_intervals(const struct scmi_handle *handle,
+static int scmi_sensor_update_intervals(const struct scmi_protocol_handle *ph,
struct scmi_sensor_info *s)
{
int ret, cnt;
@@ -245,8 +246,8 @@ static int scmi_sensor_update_intervals(const struct scmi_handle *handle,
struct scmi_msg_resp_sensor_list_update_intervals *buf;
struct scmi_msg_sensor_list_update_intervals *msg;
- ret = scmi_xfer_get_init(handle, SENSOR_LIST_UPDATE_INTERVALS,
- SCMI_PROTOCOL_SENSOR, sizeof(*msg), 0, &ti);
+ ret = ph->xops->xfer_get_init(ph, SENSOR_LIST_UPDATE_INTERVALS,
+ sizeof(*msg), 0, &ti);
if (ret)
return ret;
@@ -259,7 +260,7 @@ static int scmi_sensor_update_intervals(const struct scmi_handle *handle,
msg->id = cpu_to_le32(s->id);
msg->index = cpu_to_le32(desc_index);
- ret = scmi_do_xfer(handle, ti);
+ ret = ph->xops->do_xfer(ph, ti);
if (ret)
break;
@@ -277,7 +278,7 @@ static int scmi_sensor_update_intervals(const struct scmi_handle *handle,
/* segmented intervals are reported in one triplet */
if (s->intervals.segmented &&
(num_remaining || num_returned != 3)) {
- dev_err(handle->dev,
+ dev_err(ph->dev,
"Sensor ID:%d advertises an invalid segmented interval (%d)\n",
s->id, s->intervals.count);
s->intervals.segmented = false;
@@ -288,7 +289,7 @@ static int scmi_sensor_update_intervals(const struct scmi_handle *handle,
/* Direct allocation when exceeding pre-allocated */
if (s->intervals.count >= SCMI_MAX_PREALLOC_POOL) {
s->intervals.desc =
- devm_kcalloc(handle->dev,
+ devm_kcalloc(ph->dev,
s->intervals.count,
sizeof(*s->intervals.desc),
GFP_KERNEL);
@@ -300,7 +301,7 @@ static int scmi_sensor_update_intervals(const struct scmi_handle *handle,
}
}
} else if (desc_index + num_returned > s->intervals.count) {
- dev_err(handle->dev,
+ dev_err(ph->dev,
"No. of update intervals can't exceed %d\n",
s->intervals.count);
ret = -EINVAL;
@@ -313,18 +314,18 @@ static int scmi_sensor_update_intervals(const struct scmi_handle *handle,
desc_index += num_returned;
- scmi_reset_rx_to_maxsz(handle, ti);
+ ph->xops->reset_rx_to_maxsz(ph, ti);
/*
* check for both returned and remaining to avoid infinite
* loop due to buggy firmware
*/
} while (num_returned && num_remaining);
- scmi_xfer_put(handle, ti);
+ ph->xops->xfer_put(ph, ti);
return ret;
}
-static int scmi_sensor_axis_description(const struct scmi_handle *handle,
+static int scmi_sensor_axis_description(const struct scmi_protocol_handle *ph,
struct scmi_sensor_info *s)
{
int ret, cnt;
@@ -334,13 +335,13 @@ static int scmi_sensor_axis_description(const struct scmi_handle *handle,
struct scmi_msg_resp_sensor_axis_description *buf;
struct scmi_msg_sensor_axis_description_get *msg;
- s->axis = devm_kcalloc(handle->dev, s->num_axis,
+ s->axis = devm_kcalloc(ph->dev, s->num_axis,
sizeof(*s->axis), GFP_KERNEL);
if (!s->axis)
return -ENOMEM;
- ret = scmi_xfer_get_init(handle, SENSOR_AXIS_DESCRIPTION_GET,
- SCMI_PROTOCOL_SENSOR, sizeof(*msg), 0, &te);
+ ret = ph->xops->xfer_get_init(ph, SENSOR_AXIS_DESCRIPTION_GET,
+ sizeof(*msg), 0, &te);
if (ret)
return ret;
@@ -354,7 +355,7 @@ static int scmi_sensor_axis_description(const struct scmi_handle *handle,
msg->id = cpu_to_le32(s->id);
msg->axis_desc_index = cpu_to_le32(desc_index);
- ret = scmi_do_xfer(handle, te);
+ ret = ph->xops->do_xfer(ph, te);
if (ret)
break;
@@ -363,7 +364,7 @@ static int scmi_sensor_axis_description(const struct scmi_handle *handle,
num_remaining = NUM_AXIS_REMAINING(flags);
if (desc_index + num_returned > s->num_axis) {
- dev_err(handle->dev, "No. of axis can't exceed %d\n",
+ dev_err(ph->dev, "No. of axis can't exceed %d\n",
s->num_axis);
break;
}
@@ -405,18 +406,18 @@ static int scmi_sensor_axis_description(const struct scmi_handle *handle,
desc_index += num_returned;
- scmi_reset_rx_to_maxsz(handle, te);
+ ph->xops->reset_rx_to_maxsz(ph, te);
/*
* check for both returned and remaining to avoid infinite
* loop due to buggy firmware
*/
} while (num_returned && num_remaining);
- scmi_xfer_put(handle, te);
+ ph->xops->xfer_put(ph, te);
return ret;
}
-static int scmi_sensor_description_get(const struct scmi_handle *handle,
+static int scmi_sensor_description_get(const struct scmi_protocol_handle *ph,
struct sensors_info *si)
{
int ret, cnt;
@@ -425,8 +426,8 @@ static int scmi_sensor_description_get(const struct scmi_handle *handle,
struct scmi_xfer *t;
struct scmi_msg_resp_sensor_description *buf;
- ret = scmi_xfer_get_init(handle, SENSOR_DESCRIPTION_GET,
- SCMI_PROTOCOL_SENSOR, sizeof(__le32), 0, &t);
+ ret = ph->xops->xfer_get_init(ph, SENSOR_DESCRIPTION_GET,
+ sizeof(__le32), 0, &t);
if (ret)
return ret;
@@ -437,7 +438,8 @@ static int scmi_sensor_description_get(const struct scmi_handle *handle,
/* Set the number of sensors to be skipped/already read */
put_unaligned_le32(desc_index, t->tx.buf);
- ret = scmi_do_xfer(handle, t);
+
+ ret = ph->xops->do_xfer(ph, t);
if (ret)
break;
@@ -445,7 +447,7 @@ static int scmi_sensor_description_get(const struct scmi_handle *handle,
num_remaining = le16_to_cpu(buf->num_remaining);
if (desc_index + num_returned > si->num_sensors) {
- dev_err(handle->dev, "No. of sensors can't exceed %d",
+ dev_err(ph->dev, "No. of sensors can't exceed %d",
si->num_sensors);
break;
}
@@ -500,8 +502,8 @@ static int scmi_sensor_description_get(const struct scmi_handle *handle,
* Since the command is optional, on error carry
* on without any update interval.
*/
- if (scmi_sensor_update_intervals(handle, s))
- dev_dbg(handle->dev,
+ if (scmi_sensor_update_intervals(ph, s))
+ dev_dbg(ph->dev,
"Update Intervals not available for sensor ID:%d\n",
s->id);
}
@@ -535,7 +537,7 @@ static int scmi_sensor_description_get(const struct scmi_handle *handle,
}
}
if (s->num_axis > 0) {
- ret = scmi_sensor_axis_description(handle, s);
+ ret = scmi_sensor_axis_description(ph, s);
if (ret)
goto out;
}
@@ -545,7 +547,7 @@ static int scmi_sensor_description_get(const struct scmi_handle *handle,
desc_index += num_returned;
- scmi_reset_rx_to_maxsz(handle, t);
+ ph->xops->reset_rx_to_maxsz(ph, t);
/*
* check for both returned and remaining to avoid infinite
* loop due to buggy firmware
@@ -553,12 +555,12 @@ static int scmi_sensor_description_get(const struct scmi_handle *handle,
} while (num_returned && num_remaining);
out:
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
static inline int
-scmi_sensor_request_notify(const struct scmi_handle *handle, u32 sensor_id,
+scmi_sensor_request_notify(const struct scmi_protocol_handle *ph, u32 sensor_id,
u8 message_id, bool enable)
{
int ret;
@@ -566,8 +568,7 @@ scmi_sensor_request_notify(const struct scmi_handle *handle, u32 sensor_id,
struct scmi_xfer *t;
struct scmi_msg_sensor_request_notify *cfg;
- ret = scmi_xfer_get_init(handle, message_id,
- SCMI_PROTOCOL_SENSOR, sizeof(*cfg), 0, &t);
+ ret = ph->xops->xfer_get_init(ph, message_id, sizeof(*cfg), 0, &t);
if (ret)
return ret;
@@ -575,40 +576,40 @@ scmi_sensor_request_notify(const struct scmi_handle *handle, u32 sensor_id,
cfg->id = cpu_to_le32(sensor_id);
cfg->event_control = cpu_to_le32(evt_cntl);
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
-static int scmi_sensor_trip_point_notify(const struct scmi_handle *handle,
+static int scmi_sensor_trip_point_notify(const struct scmi_protocol_handle *ph,
u32 sensor_id, bool enable)
{
- return scmi_sensor_request_notify(handle, sensor_id,
+ return scmi_sensor_request_notify(ph, sensor_id,
SENSOR_TRIP_POINT_NOTIFY,
enable);
}
static int
-scmi_sensor_continuous_update_notify(const struct scmi_handle *handle,
+scmi_sensor_continuous_update_notify(const struct scmi_protocol_handle *ph,
u32 sensor_id, bool enable)
{
- return scmi_sensor_request_notify(handle, sensor_id,
+ return scmi_sensor_request_notify(ph, sensor_id,
SENSOR_CONTINUOUS_UPDATE_NOTIFY,
enable);
}
static int
-scmi_sensor_trip_point_config(const struct scmi_handle *handle, u32 sensor_id,
- u8 trip_id, u64 trip_value)
+scmi_sensor_trip_point_config(const struct scmi_protocol_handle *ph,
+ u32 sensor_id, u8 trip_id, u64 trip_value)
{
int ret;
u32 evt_cntl = SENSOR_TP_BOTH;
struct scmi_xfer *t;
struct scmi_msg_set_sensor_trip_point *trip;
- ret = scmi_xfer_get_init(handle, SENSOR_TRIP_POINT_CONFIG,
- SCMI_PROTOCOL_SENSOR, sizeof(*trip), 0, &t);
+ ret = ph->xops->xfer_get_init(ph, SENSOR_TRIP_POINT_CONFIG,
+ sizeof(*trip), 0, &t);
if (ret)
return ret;
@@ -618,47 +619,46 @@ scmi_sensor_trip_point_config(const struct scmi_handle *handle, u32 sensor_id,
trip->value_low = cpu_to_le32(trip_value & 0xffffffff);
trip->value_high = cpu_to_le32(trip_value >> 32);
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
-static int scmi_sensor_config_get(const struct scmi_handle *handle,
+static int scmi_sensor_config_get(const struct scmi_protocol_handle *ph,
u32 sensor_id, u32 *sensor_config)
{
int ret;
struct scmi_xfer *t;
- ret = scmi_xfer_get_init(handle, SENSOR_CONFIG_GET,
- SCMI_PROTOCOL_SENSOR, sizeof(__le32),
- sizeof(__le32), &t);
+ ret = ph->xops->xfer_get_init(ph, SENSOR_CONFIG_GET,
+ sizeof(__le32), sizeof(__le32), &t);
if (ret)
return ret;
put_unaligned_le32(cpu_to_le32(sensor_id), t->tx.buf);
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (!ret) {
- struct sensors_info *si = handle->sensor_priv;
+ struct sensors_info *si = ph->get_priv(ph);
struct scmi_sensor_info *s = si->sensors + sensor_id;
*sensor_config = get_unaligned_le64(t->rx.buf);
s->sensor_config = *sensor_config;
}
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
-static int scmi_sensor_config_set(const struct scmi_handle *handle,
+static int scmi_sensor_config_set(const struct scmi_protocol_handle *ph,
u32 sensor_id, u32 sensor_config)
{
int ret;
struct scmi_xfer *t;
struct scmi_msg_sensor_config_set *msg;
- ret = scmi_xfer_get_init(handle, SENSOR_CONFIG_SET,
- SCMI_PROTOCOL_SENSOR, sizeof(*msg), 0, &t);
+ ret = ph->xops->xfer_get_init(ph, SENSOR_CONFIG_SET,
+ sizeof(*msg), 0, &t);
if (ret)
return ret;
@@ -666,21 +666,21 @@ static int scmi_sensor_config_set(const struct scmi_handle *handle,
msg->id = cpu_to_le32(sensor_id);
msg->sensor_config = cpu_to_le32(sensor_config);
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (!ret) {
- struct sensors_info *si = handle->sensor_priv;
+ struct sensors_info *si = ph->get_priv(ph);
struct scmi_sensor_info *s = si->sensors + sensor_id;
s->sensor_config = sensor_config;
}
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
/**
* scmi_sensor_reading_get - Read scalar sensor value
- * @handle: Platform handle
+ * @ph: Protocol handle
* @sensor_id: Sensor ID
* @value: The 64bit value sensor reading
*
@@ -693,17 +693,17 @@ static int scmi_sensor_config_set(const struct scmi_handle *handle,
*
* Return: 0 on Success
*/
-static int scmi_sensor_reading_get(const struct scmi_handle *handle,
+static int scmi_sensor_reading_get(const struct scmi_protocol_handle *ph,
u32 sensor_id, u64 *value)
{
int ret;
struct scmi_xfer *t;
struct scmi_msg_sensor_reading_get *sensor;
- struct sensors_info *si = handle->sensor_priv;
+ struct sensors_info *si = ph->get_priv(ph);
struct scmi_sensor_info *s = si->sensors + sensor_id;
- ret = scmi_xfer_get_init(handle, SENSOR_READING_GET,
- SCMI_PROTOCOL_SENSOR, sizeof(*sensor), 0, &t);
+ ret = ph->xops->xfer_get_init(ph, SENSOR_READING_GET,
+ sizeof(*sensor), 0, &t);
if (ret)
return ret;
@@ -711,7 +711,7 @@ static int scmi_sensor_reading_get(const struct scmi_handle *handle,
sensor->id = cpu_to_le32(sensor_id);
if (s->async) {
sensor->flags = cpu_to_le32(SENSOR_READ_ASYNC);
- ret = scmi_do_xfer_with_response(handle, t);
+ ret = ph->xops->do_xfer_with_response(ph, t);
if (!ret) {
struct scmi_resp_sensor_reading_complete *resp;
@@ -723,12 +723,12 @@ static int scmi_sensor_reading_get(const struct scmi_handle *handle,
}
} else {
sensor->flags = cpu_to_le32(0);
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (!ret)
*value = get_unaligned_le64(t->rx.buf);
}
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
@@ -742,7 +742,7 @@ scmi_parse_sensor_readings(struct scmi_sensor_reading *out,
/**
* scmi_sensor_reading_get_timestamped - Read multiple-axis timestamped values
- * @handle: Platform handle
+ * @ph: Protocol handle
* @sensor_id: Sensor ID
* @count: The length of the provided @readings array
* @readings: An array of elements each representing a timestamped per-axis
@@ -755,22 +755,22 @@ scmi_parse_sensor_readings(struct scmi_sensor_reading *out,
* Return: 0 on Success
*/
static int
-scmi_sensor_reading_get_timestamped(const struct scmi_handle *handle,
+scmi_sensor_reading_get_timestamped(const struct scmi_protocol_handle *ph,
u32 sensor_id, u8 count,
struct scmi_sensor_reading *readings)
{
int ret;
struct scmi_xfer *t;
struct scmi_msg_sensor_reading_get *sensor;
- struct sensors_info *si = handle->sensor_priv;
+ struct sensors_info *si = ph->get_priv(ph);
struct scmi_sensor_info *s = si->sensors + sensor_id;
if (!count || !readings ||
(!s->num_axis && count > 1) || (s->num_axis && count > s->num_axis))
return -EINVAL;
- ret = scmi_xfer_get_init(handle, SENSOR_READING_GET,
- SCMI_PROTOCOL_SENSOR, sizeof(*sensor), 0, &t);
+ ret = ph->xops->xfer_get_init(ph, SENSOR_READING_GET,
+ sizeof(*sensor), 0, &t);
if (ret)
return ret;
@@ -778,7 +778,7 @@ scmi_sensor_reading_get_timestamped(const struct scmi_handle *handle,
sensor->id = cpu_to_le32(sensor_id);
if (s->async) {
sensor->flags = cpu_to_le32(SENSOR_READ_ASYNC);
- ret = scmi_do_xfer_with_response(handle, t);
+ ret = ph->xops->do_xfer_with_response(ph, t);
if (!ret) {
int i;
struct scmi_resp_sensor_reading_complete_v3 *resp;
@@ -794,7 +794,7 @@ scmi_sensor_reading_get_timestamped(const struct scmi_handle *handle,
}
} else {
sensor->flags = cpu_to_le32(0);
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (!ret) {
int i;
struct scmi_sensor_reading_resp *resp_readings;
@@ -806,26 +806,26 @@ scmi_sensor_reading_get_timestamped(const struct scmi_handle *handle,
}
}
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
static const struct scmi_sensor_info *
-scmi_sensor_info_get(const struct scmi_handle *handle, u32 sensor_id)
+scmi_sensor_info_get(const struct scmi_protocol_handle *ph, u32 sensor_id)
{
- struct sensors_info *si = handle->sensor_priv;
+ struct sensors_info *si = ph->get_priv(ph);
return si->sensors + sensor_id;
}
-static int scmi_sensor_count_get(const struct scmi_handle *handle)
+static int scmi_sensor_count_get(const struct scmi_protocol_handle *ph)
{
- struct sensors_info *si = handle->sensor_priv;
+ struct sensors_info *si = ph->get_priv(ph);
return si->num_sensors;
}
-static const struct scmi_sensor_ops sensor_ops = {
+static const struct scmi_sensor_proto_ops sensor_proto_ops = {
.count_get = scmi_sensor_count_get,
.info_get = scmi_sensor_info_get,
.trip_point_config = scmi_sensor_trip_point_config,
@@ -835,18 +835,17 @@ static const struct scmi_sensor_ops sensor_ops = {
.config_set = scmi_sensor_config_set,
};
-static int scmi_sensor_set_notify_enabled(const struct scmi_handle *handle,
+static int scmi_sensor_set_notify_enabled(const struct scmi_protocol_handle *ph,
u8 evt_id, u32 src_id, bool enable)
{
int ret;
switch (evt_id) {
case SCMI_EVENT_SENSOR_TRIP_POINT_EVENT:
- ret = scmi_sensor_trip_point_notify(handle, src_id, enable);
+ ret = scmi_sensor_trip_point_notify(ph, src_id, enable);
break;
case SCMI_EVENT_SENSOR_UPDATE:
- ret = scmi_sensor_continuous_update_notify(handle, src_id,
- enable);
+ ret = scmi_sensor_continuous_update_notify(ph, src_id, enable);
break;
default:
ret = -EINVAL;
@@ -860,10 +859,11 @@ static int scmi_sensor_set_notify_enabled(const struct scmi_handle *handle,
return ret;
}
-static void *scmi_sensor_fill_custom_report(const struct scmi_handle *handle,
- u8 evt_id, ktime_t timestamp,
- const void *payld, size_t payld_sz,
- void *report, u32 *src_id)
+static void *
+scmi_sensor_fill_custom_report(const struct scmi_protocol_handle *ph,
+ u8 evt_id, ktime_t timestamp,
+ const void *payld, size_t payld_sz,
+ void *report, u32 *src_id)
{
void *rep = NULL;
@@ -890,7 +890,7 @@ static void *scmi_sensor_fill_custom_report(const struct scmi_handle *handle,
struct scmi_sensor_info *s;
const struct scmi_sensor_update_notify_payld *p = payld;
struct scmi_sensor_update_report *r = report;
- struct sensors_info *sinfo = handle->sensor_priv;
+ struct sensors_info *sinfo = ph->get_priv(ph);
/* payld_sz is variable for this event */
r->sensor_id = le32_to_cpu(p->sensor_id);
@@ -920,6 +920,13 @@ static void *scmi_sensor_fill_custom_report(const struct scmi_handle *handle,
return rep;
}
+static int scmi_sensor_get_num_sources(const struct scmi_protocol_handle *ph)
+{
+ struct sensors_info *si = ph->get_priv(ph);
+
+ return si->num_sensors;
+}
+
static const struct scmi_event sensor_events[] = {
{
.id = SCMI_EVENT_SENSOR_TRIP_POINT_EVENT,
@@ -939,48 +946,55 @@ static const struct scmi_event sensor_events[] = {
};
static const struct scmi_event_ops sensor_event_ops = {
+ .get_num_sources = scmi_sensor_get_num_sources,
.set_notify_enabled = scmi_sensor_set_notify_enabled,
.fill_custom_report = scmi_sensor_fill_custom_report,
};
-static int scmi_sensors_protocol_init(struct scmi_handle *handle)
+static const struct scmi_protocol_events sensor_protocol_events = {
+ .queue_sz = SCMI_PROTO_QUEUE_SZ,
+ .ops = &sensor_event_ops,
+ .evts = sensor_events,
+ .num_events = ARRAY_SIZE(sensor_events),
+};
+
+static int scmi_sensors_protocol_init(const struct scmi_protocol_handle *ph)
{
u32 version;
int ret;
struct sensors_info *sinfo;
- scmi_version_get(handle, SCMI_PROTOCOL_SENSOR, &version);
+ ph->xops->version_get(ph, &version);
- dev_dbg(handle->dev, "Sensor Version %d.%d\n",
+ dev_dbg(ph->dev, "Sensor Version %d.%d\n",
PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
- sinfo = devm_kzalloc(handle->dev, sizeof(*sinfo), GFP_KERNEL);
+ sinfo = devm_kzalloc(ph->dev, sizeof(*sinfo), GFP_KERNEL);
if (!sinfo)
return -ENOMEM;
sinfo->version = version;
- ret = scmi_sensor_attributes_get(handle, sinfo);
+ ret = scmi_sensor_attributes_get(ph, sinfo);
if (ret)
return ret;
- sinfo->sensors = devm_kcalloc(handle->dev, sinfo->num_sensors,
+ sinfo->sensors = devm_kcalloc(ph->dev, sinfo->num_sensors,
sizeof(*sinfo->sensors), GFP_KERNEL);
if (!sinfo->sensors)
return -ENOMEM;
- ret = scmi_sensor_description_get(handle, sinfo);
+ ret = scmi_sensor_description_get(ph, sinfo);
if (ret)
return ret;
- scmi_register_protocol_events(handle,
- SCMI_PROTOCOL_SENSOR, SCMI_PROTO_QUEUE_SZ,
- &sensor_event_ops, sensor_events,
- ARRAY_SIZE(sensor_events),
- sinfo->num_sensors);
-
- handle->sensor_priv = sinfo;
- handle->sensor_ops = &sensor_ops;
-
- return 0;
+ return ph->set_priv(ph, sinfo);
}
-DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(SCMI_PROTOCOL_SENSOR, sensors)
+static const struct scmi_protocol scmi_sensors = {
+ .id = SCMI_PROTOCOL_SENSOR,
+ .owner = THIS_MODULE,
+ .instance_init = &scmi_sensors_protocol_init,
+ .ops = &sensor_proto_ops,
+ .events = &sensor_protocol_events,
+};
+
+DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(sensors, scmi_sensors)
diff --git a/drivers/firmware/arm_scmi/system.c b/drivers/firmware/arm_scmi/system.c
index 283e12d5f24b..e5175ef73b40 100644
--- a/drivers/firmware/arm_scmi/system.c
+++ b/drivers/firmware/arm_scmi/system.c
@@ -2,11 +2,12 @@
/*
* System Control and Management Interface (SCMI) System Power Protocol
*
- * Copyright (C) 2020 ARM Ltd.
+ * Copyright (C) 2020-2021 ARM Ltd.
*/
#define pr_fmt(fmt) "SCMI Notifications SYSTEM - " fmt
+#include <linux/module.h>
#include <linux/scmi_protocol.h>
#include "common.h"
@@ -32,43 +33,44 @@ struct scmi_system_info {
u32 version;
};
-static int scmi_system_request_notify(const struct scmi_handle *handle,
+static int scmi_system_request_notify(const struct scmi_protocol_handle *ph,
bool enable)
{
int ret;
struct scmi_xfer *t;
struct scmi_system_power_state_notify *notify;
- ret = scmi_xfer_get_init(handle, SYSTEM_POWER_STATE_NOTIFY,
- SCMI_PROTOCOL_SYSTEM, sizeof(*notify), 0, &t);
+ ret = ph->xops->xfer_get_init(ph, SYSTEM_POWER_STATE_NOTIFY,
+ sizeof(*notify), 0, &t);
if (ret)
return ret;
notify = t->tx.buf;
notify->notify_enable = enable ? cpu_to_le32(BIT(0)) : 0;
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
-static int scmi_system_set_notify_enabled(const struct scmi_handle *handle,
+static int scmi_system_set_notify_enabled(const struct scmi_protocol_handle *ph,
u8 evt_id, u32 src_id, bool enable)
{
int ret;
- ret = scmi_system_request_notify(handle, enable);
+ ret = scmi_system_request_notify(ph, enable);
if (ret)
pr_debug("FAIL_ENABLE - evt[%X] - ret:%d\n", evt_id, ret);
return ret;
}
-static void *scmi_system_fill_custom_report(const struct scmi_handle *handle,
- u8 evt_id, ktime_t timestamp,
- const void *payld, size_t payld_sz,
- void *report, u32 *src_id)
+static void *
+scmi_system_fill_custom_report(const struct scmi_protocol_handle *ph,
+ u8 evt_id, ktime_t timestamp,
+ const void *payld, size_t payld_sz,
+ void *report, u32 *src_id)
{
const struct scmi_system_power_state_notifier_payld *p = payld;
struct scmi_system_power_state_notifier_report *r = report;
@@ -101,31 +103,38 @@ static const struct scmi_event_ops system_event_ops = {
.fill_custom_report = scmi_system_fill_custom_report,
};
-static int scmi_system_protocol_init(struct scmi_handle *handle)
+static const struct scmi_protocol_events system_protocol_events = {
+ .queue_sz = SCMI_PROTO_QUEUE_SZ,
+ .ops = &system_event_ops,
+ .evts = system_events,
+ .num_events = ARRAY_SIZE(system_events),
+ .num_sources = SCMI_SYSTEM_NUM_SOURCES,
+};
+
+static int scmi_system_protocol_init(const struct scmi_protocol_handle *ph)
{
u32 version;
struct scmi_system_info *pinfo;
- scmi_version_get(handle, SCMI_PROTOCOL_SYSTEM, &version);
+ ph->xops->version_get(ph, &version);
- dev_dbg(handle->dev, "System Power Version %d.%d\n",
+ dev_dbg(ph->dev, "System Power Version %d.%d\n",
PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
- pinfo = devm_kzalloc(handle->dev, sizeof(*pinfo), GFP_KERNEL);
+ pinfo = devm_kzalloc(ph->dev, sizeof(*pinfo), GFP_KERNEL);
if (!pinfo)
return -ENOMEM;
- scmi_register_protocol_events(handle,
- SCMI_PROTOCOL_SYSTEM, SCMI_PROTO_QUEUE_SZ,
- &system_event_ops,
- system_events,
- ARRAY_SIZE(system_events),
- SCMI_SYSTEM_NUM_SOURCES);
-
pinfo->version = version;
- handle->system_priv = pinfo;
-
- return 0;
+ return ph->set_priv(ph, pinfo);
}
-DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(SCMI_PROTOCOL_SYSTEM, system)
+static const struct scmi_protocol scmi_system = {
+ .id = SCMI_PROTOCOL_SYSTEM,
+ .owner = THIS_MODULE,
+ .instance_init = &scmi_system_protocol_init,
+ .ops = NULL,
+ .events = &system_protocol_events,
+};
+
+DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(system, scmi_system)
diff --git a/drivers/firmware/arm_scmi/voltage.c b/drivers/firmware/arm_scmi/voltage.c
index e794e4349ae6..a5048956a0be 100644
--- a/drivers/firmware/arm_scmi/voltage.c
+++ b/drivers/firmware/arm_scmi/voltage.c
@@ -2,9 +2,10 @@
/*
* System Control and Management Interface (SCMI) Voltage Protocol
*
- * Copyright (C) 2020 ARM Ltd.
+ * Copyright (C) 2020-2021 ARM Ltd.
*/
+#include <linux/module.h>
#include <linux/scmi_protocol.h>
#include "common.h"
@@ -59,23 +60,23 @@ struct voltage_info {
struct scmi_voltage_info *domains;
};
-static int scmi_protocol_attributes_get(const struct scmi_handle *handle,
+static int scmi_protocol_attributes_get(const struct scmi_protocol_handle *ph,
struct voltage_info *vinfo)
{
int ret;
struct scmi_xfer *t;
- ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES,
- SCMI_PROTOCOL_VOLTAGE, 0, sizeof(__le32), &t);
+ ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, 0,
+ sizeof(__le32), &t);
if (ret)
return ret;
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (!ret)
vinfo->num_domains =
NUM_VOLTAGE_DOMAINS(get_unaligned_le32(t->rx.buf));
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
@@ -109,24 +110,23 @@ static int scmi_init_voltage_levels(struct device *dev,
return 0;
}
-static int scmi_voltage_descriptors_get(const struct scmi_handle *handle,
+static int scmi_voltage_descriptors_get(const struct scmi_protocol_handle *ph,
struct voltage_info *vinfo)
{
int ret, dom;
struct scmi_xfer *td, *tl;
- struct device *dev = handle->dev;
+ struct device *dev = ph->dev;
struct scmi_msg_resp_domain_attributes *resp_dom;
struct scmi_msg_resp_describe_levels *resp_levels;
- ret = scmi_xfer_get_init(handle, VOLTAGE_DOMAIN_ATTRIBUTES,
- SCMI_PROTOCOL_VOLTAGE, sizeof(__le32),
- sizeof(*resp_dom), &td);
+ ret = ph->xops->xfer_get_init(ph, VOLTAGE_DOMAIN_ATTRIBUTES,
+ sizeof(__le32), sizeof(*resp_dom), &td);
if (ret)
return ret;
resp_dom = td->rx.buf;
- ret = scmi_xfer_get_init(handle, VOLTAGE_DESCRIBE_LEVELS,
- SCMI_PROTOCOL_VOLTAGE, sizeof(__le64), 0, &tl);
+ ret = ph->xops->xfer_get_init(ph, VOLTAGE_DESCRIBE_LEVELS,
+ sizeof(__le64), 0, &tl);
if (ret)
goto outd;
resp_levels = tl->rx.buf;
@@ -139,7 +139,7 @@ static int scmi_voltage_descriptors_get(const struct scmi_handle *handle,
/* Retrieve domain attributes at first ... */
put_unaligned_le32(dom, td->tx.buf);
- ret = scmi_do_xfer(handle, td);
+ ret = ph->xops->do_xfer(ph, td);
/* Skip domain on comms error */
if (ret)
continue;
@@ -157,7 +157,7 @@ static int scmi_voltage_descriptors_get(const struct scmi_handle *handle,
cmd->domain_id = cpu_to_le32(v->id);
cmd->level_index = desc_index;
- ret = scmi_do_xfer(handle, tl);
+ ret = ph->xops->do_xfer(ph, tl);
if (ret)
break;
@@ -176,7 +176,7 @@ static int scmi_voltage_descriptors_get(const struct scmi_handle *handle,
}
if (desc_index + num_returned > v->num_levels) {
- dev_err(handle->dev,
+ dev_err(ph->dev,
"No. of voltage levels can't exceed %d\n",
v->num_levels);
ret = -EINVAL;
@@ -195,7 +195,7 @@ static int scmi_voltage_descriptors_get(const struct scmi_handle *handle,
desc_index += num_returned;
- scmi_reset_rx_to_maxsz(handle, tl);
+ ph->xops->reset_rx_to_maxsz(ph, tl);
/* check both to avoid infinite loop due to buggy fw */
} while (num_returned && num_remaining);
@@ -204,55 +204,52 @@ static int scmi_voltage_descriptors_get(const struct scmi_handle *handle,
devm_kfree(dev, v->levels_uv);
}
- scmi_reset_rx_to_maxsz(handle, td);
+ ph->xops->reset_rx_to_maxsz(ph, td);
}
- scmi_xfer_put(handle, tl);
+ ph->xops->xfer_put(ph, tl);
outd:
- scmi_xfer_put(handle, td);
+ ph->xops->xfer_put(ph, td);
return ret;
}
-static int __scmi_voltage_get_u32(const struct scmi_handle *handle,
+static int __scmi_voltage_get_u32(const struct scmi_protocol_handle *ph,
u8 cmd_id, u32 domain_id, u32 *value)
{
int ret;
struct scmi_xfer *t;
- struct voltage_info *vinfo = handle->voltage_priv;
+ struct voltage_info *vinfo = ph->get_priv(ph);
if (domain_id >= vinfo->num_domains)
return -EINVAL;
- ret = scmi_xfer_get_init(handle, cmd_id,
- SCMI_PROTOCOL_VOLTAGE,
- sizeof(__le32), 0, &t);
+ ret = ph->xops->xfer_get_init(ph, cmd_id, sizeof(__le32), 0, &t);
if (ret)
return ret;
put_unaligned_le32(domain_id, t->tx.buf);
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
if (!ret)
*value = get_unaligned_le32(t->rx.buf);
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
-static int scmi_voltage_config_set(const struct scmi_handle *handle,
+static int scmi_voltage_config_set(const struct scmi_protocol_handle *ph,
u32 domain_id, u32 config)
{
int ret;
struct scmi_xfer *t;
- struct voltage_info *vinfo = handle->voltage_priv;
+ struct voltage_info *vinfo = ph->get_priv(ph);
struct scmi_msg_cmd_config_set *cmd;
if (domain_id >= vinfo->num_domains)
return -EINVAL;
- ret = scmi_xfer_get_init(handle, VOLTAGE_CONFIG_SET,
- SCMI_PROTOCOL_VOLTAGE,
- sizeof(*cmd), 0, &t);
+ ret = ph->xops->xfer_get_init(ph, VOLTAGE_CONFIG_SET,
+ sizeof(*cmd), 0, &t);
if (ret)
return ret;
@@ -260,33 +257,32 @@ static int scmi_voltage_config_set(const struct scmi_handle *handle,
cmd->domain_id = cpu_to_le32(domain_id);
cmd->config = cpu_to_le32(config & GENMASK(3, 0));
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
-static int scmi_voltage_config_get(const struct scmi_handle *handle,
+static int scmi_voltage_config_get(const struct scmi_protocol_handle *ph,
u32 domain_id, u32 *config)
{
- return __scmi_voltage_get_u32(handle, VOLTAGE_CONFIG_GET,
+ return __scmi_voltage_get_u32(ph, VOLTAGE_CONFIG_GET,
domain_id, config);
}
-static int scmi_voltage_level_set(const struct scmi_handle *handle,
+static int scmi_voltage_level_set(const struct scmi_protocol_handle *ph,
u32 domain_id, u32 flags, s32 volt_uV)
{
int ret;
struct scmi_xfer *t;
- struct voltage_info *vinfo = handle->voltage_priv;
+ struct voltage_info *vinfo = ph->get_priv(ph);
struct scmi_msg_cmd_level_set *cmd;
if (domain_id >= vinfo->num_domains)
return -EINVAL;
- ret = scmi_xfer_get_init(handle, VOLTAGE_LEVEL_SET,
- SCMI_PROTOCOL_VOLTAGE,
- sizeof(*cmd), 0, &t);
+ ret = ph->xops->xfer_get_init(ph, VOLTAGE_LEVEL_SET,
+ sizeof(*cmd), 0, &t);
if (ret)
return ret;
@@ -295,23 +291,23 @@ static int scmi_voltage_level_set(const struct scmi_handle *handle,
cmd->flags = cpu_to_le32(flags);
cmd->voltage_level = cpu_to_le32(volt_uV);
- ret = scmi_do_xfer(handle, t);
+ ret = ph->xops->do_xfer(ph, t);
- scmi_xfer_put(handle, t);
+ ph->xops->xfer_put(ph, t);
return ret;
}
-static int scmi_voltage_level_get(const struct scmi_handle *handle,
+static int scmi_voltage_level_get(const struct scmi_protocol_handle *ph,
u32 domain_id, s32 *volt_uV)
{
- return __scmi_voltage_get_u32(handle, VOLTAGE_LEVEL_GET,
+ return __scmi_voltage_get_u32(ph, VOLTAGE_LEVEL_GET,
domain_id, (u32 *)volt_uV);
}
static const struct scmi_voltage_info * __must_check
-scmi_voltage_info_get(const struct scmi_handle *handle, u32 domain_id)
+scmi_voltage_info_get(const struct scmi_protocol_handle *ph, u32 domain_id)
{
- struct voltage_info *vinfo = handle->voltage_priv;
+ struct voltage_info *vinfo = ph->get_priv(ph);
if (domain_id >= vinfo->num_domains ||
!vinfo->domains[domain_id].num_levels)
@@ -320,14 +316,14 @@ scmi_voltage_info_get(const struct scmi_handle *handle, u32 domain_id)
return vinfo->domains + domain_id;
}
-static int scmi_voltage_domains_num_get(const struct scmi_handle *handle)
+static int scmi_voltage_domains_num_get(const struct scmi_protocol_handle *ph)
{
- struct voltage_info *vinfo = handle->voltage_priv;
+ struct voltage_info *vinfo = ph->get_priv(ph);
return vinfo->num_domains;
}
-static struct scmi_voltage_ops voltage_ops = {
+static struct scmi_voltage_proto_ops voltage_proto_ops = {
.num_domains_get = scmi_voltage_domains_num_get,
.info_get = scmi_voltage_info_get,
.config_set = scmi_voltage_config_set,
@@ -336,45 +332,49 @@ static struct scmi_voltage_ops voltage_ops = {
.level_get = scmi_voltage_level_get,
};
-static int scmi_voltage_protocol_init(struct scmi_handle *handle)
+static int scmi_voltage_protocol_init(const struct scmi_protocol_handle *ph)
{
int ret;
u32 version;
struct voltage_info *vinfo;
- ret = scmi_version_get(handle, SCMI_PROTOCOL_VOLTAGE, &version);
+ ret = ph->xops->version_get(ph, &version);
if (ret)
return ret;
- dev_dbg(handle->dev, "Voltage Version %d.%d\n",
+ dev_dbg(ph->dev, "Voltage Version %d.%d\n",
PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
- vinfo = devm_kzalloc(handle->dev, sizeof(*vinfo), GFP_KERNEL);
+ vinfo = devm_kzalloc(ph->dev, sizeof(*vinfo), GFP_KERNEL);
if (!vinfo)
return -ENOMEM;
vinfo->version = version;
- ret = scmi_protocol_attributes_get(handle, vinfo);
+ ret = scmi_protocol_attributes_get(ph, vinfo);
if (ret)
return ret;
if (vinfo->num_domains) {
- vinfo->domains = devm_kcalloc(handle->dev, vinfo->num_domains,
+ vinfo->domains = devm_kcalloc(ph->dev, vinfo->num_domains,
sizeof(*vinfo->domains),
GFP_KERNEL);
if (!vinfo->domains)
return -ENOMEM;
- ret = scmi_voltage_descriptors_get(handle, vinfo);
+ ret = scmi_voltage_descriptors_get(ph, vinfo);
if (ret)
return ret;
} else {
- dev_warn(handle->dev, "No Voltage domains found.\n");
+ dev_warn(ph->dev, "No Voltage domains found.\n");
}
- handle->voltage_ops = &voltage_ops;
- handle->voltage_priv = vinfo;
-
- return 0;
+ return ph->set_priv(ph, vinfo);
}
-DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(SCMI_PROTOCOL_VOLTAGE, voltage)
+static const struct scmi_protocol scmi_voltage = {
+ .id = SCMI_PROTOCOL_VOLTAGE,
+ .owner = THIS_MODULE,
+ .instance_init = &scmi_voltage_protocol_init,
+ .ops = &voltage_proto_ops,
+};
+
+DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(voltage, scmi_voltage)
diff --git a/drivers/firmware/imx/scu-pd.c b/drivers/firmware/imx/scu-pd.c
index 08533ee67626..ff6569c4a53b 100644
--- a/drivers/firmware/imx/scu-pd.c
+++ b/drivers/firmware/imx/scu-pd.c
@@ -29,6 +29,10 @@
* The framework needs some proper extension to support multi power
* domain cases.
*
+ * Update: Genpd assigns the ->of_node for the virtual device before it
+ * invokes ->attach_dev() callback, hence parsing for device resources via
+ * DT should work fine.
+ *
* 2. It also breaks most of current drivers as the driver probe sequence
* behavior changed if removing ->power_on|off() callback and use
* ->start() and ->stop() instead. genpd_dev_pm_attach will only power
@@ -39,8 +43,11 @@
* domain enabled will trigger a HW access error. That means we need fix
* most drivers probe sequence with proper runtime pm.
*
- * In summary, we need fix above two issue before being able to switch to
- * the "single global power domain" way.
+ * Update: Runtime PM support isn't necessary. Instead, this can easily be
+ * fixed in drivers by adding a call to dev_pm_domain_start() during probe.
+ *
+ * In summary, the second part needs to be addressed via minor updates to the
+ * relevant drivers, before the "single global power domain" model can be used.
*
*/
@@ -86,6 +93,8 @@ struct imx_sc_pd_soc {
u8 num_ranges;
};
+static int imx_con_rsrc;
+
static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = {
/* LSIO SS */
{ "pwm", IMX_SC_R_PWM_0, 8, true, 0 },
@@ -134,7 +143,7 @@ static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = {
{ "can", IMX_SC_R_CAN_0, 3, true, 0 },
{ "ftm", IMX_SC_R_FTM_0, 2, true, 0 },
{ "lpi2c", IMX_SC_R_I2C_0, 4, true, 0 },
- { "adc", IMX_SC_R_ADC_0, 1, true, 0 },
+ { "adc", IMX_SC_R_ADC_0, 2, true, 0 },
{ "lcd", IMX_SC_R_LCD_0, 1, true, 0 },
{ "lcd0-pwm", IMX_SC_R_LCD_0_PWM_0, 1, true, 0 },
{ "lpuart", IMX_SC_R_UART_0, 4, true, 0 },
@@ -207,6 +216,23 @@ to_imx_sc_pd(struct generic_pm_domain *genpd)
return container_of(genpd, struct imx_sc_pm_domain, pd);
}
+static void imx_sc_pd_get_console_rsrc(void)
+{
+ struct of_phandle_args specs;
+ int ret;
+
+ if (!of_stdout)
+ return;
+
+ ret = of_parse_phandle_with_args(of_stdout, "power-domains",
+ "#power-domain-cells",
+ 0, &specs);
+ if (ret)
+ return;
+
+ imx_con_rsrc = specs.args[0];
+}
+
static int imx_sc_pd_power(struct generic_pm_domain *domain, bool power_on)
{
struct imx_sc_msg_req_set_resource_power_mode msg;
@@ -267,6 +293,7 @@ imx_scu_add_pm_domain(struct device *dev, int idx,
const struct imx_sc_pd_range *pd_ranges)
{
struct imx_sc_pm_domain *sc_pd;
+ bool is_off = true;
int ret;
if (!imx_sc_rm_is_resource_owned(pm_ipc_handle, pd_ranges->rsrc + idx))
@@ -288,6 +315,10 @@ imx_scu_add_pm_domain(struct device *dev, int idx,
"%s", pd_ranges->name);
sc_pd->pd.name = sc_pd->name;
+ if (imx_con_rsrc == sc_pd->rsrc) {
+ sc_pd->pd.flags = GENPD_FLAG_RPM_ALWAYS_ON;
+ is_off = false;
+ }
if (sc_pd->rsrc >= IMX_SC_R_LAST) {
dev_warn(dev, "invalid pd %s rsrc id %d found",
@@ -297,7 +328,7 @@ imx_scu_add_pm_domain(struct device *dev, int idx,
return NULL;
}
- ret = pm_genpd_init(&sc_pd->pd, NULL, true);
+ ret = pm_genpd_init(&sc_pd->pd, NULL, is_off);
if (ret) {
dev_warn(dev, "failed to init pd %s rsrc id %d",
sc_pd->name, sc_pd->rsrc);
@@ -363,6 +394,8 @@ static int imx_sc_pd_probe(struct platform_device *pdev)
if (!pd_soc)
return -ENODEV;
+ imx_sc_pd_get_console_rsrc();
+
return imx_scu_init_pm_domains(&pdev->dev, pd_soc);
}
diff --git a/drivers/firmware/qcom_scm-legacy.c b/drivers/firmware/qcom_scm-legacy.c
index eba6b60bfb61..1829ba220576 100644
--- a/drivers/firmware/qcom_scm-legacy.c
+++ b/drivers/firmware/qcom_scm-legacy.c
@@ -118,7 +118,7 @@ static void __scm_legacy_do(const struct arm_smccc_args *smc,
}
/**
- * qcom_scm_call() - Sends a command to the SCM and waits for the command to
+ * scm_legacy_call() - Sends a command to the SCM and waits for the command to
* finish processing.
*
* A note on cache maintenance:
@@ -209,7 +209,7 @@ out:
(n & 0xf))
/**
- * qcom_scm_call_atomic() - Send an atomic SCM command with up to 5 arguments
+ * scm_legacy_call_atomic() - Send an atomic SCM command with up to 5 arguments
* and 3 return values
* @desc: SCM call descriptor containing arguments
* @res: SCM call return values
diff --git a/drivers/firmware/qcom_scm-smc.c b/drivers/firmware/qcom_scm-smc.c
index 497c13ba98d6..d111833364ba 100644
--- a/drivers/firmware/qcom_scm-smc.c
+++ b/drivers/firmware/qcom_scm-smc.c
@@ -77,8 +77,10 @@ static void __scm_smc_do(const struct arm_smccc_args *smc,
} while (res->a0 == QCOM_SCM_V2_EBUSY);
}
-int scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
- struct qcom_scm_res *res, bool atomic)
+
+int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
+ enum qcom_scm_convention qcom_convention,
+ struct qcom_scm_res *res, bool atomic)
{
int arglen = desc->arginfo & 0xf;
int i;
@@ -87,9 +89,8 @@ int scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
size_t alloc_len;
gfp_t flag = atomic ? GFP_ATOMIC : GFP_KERNEL;
u32 smccc_call_type = atomic ? ARM_SMCCC_FAST_CALL : ARM_SMCCC_STD_CALL;
- u32 qcom_smccc_convention =
- (qcom_scm_convention == SMC_CONVENTION_ARM_32) ?
- ARM_SMCCC_SMC_32 : ARM_SMCCC_SMC_64;
+ u32 qcom_smccc_convention = (qcom_convention == SMC_CONVENTION_ARM_32) ?
+ ARM_SMCCC_SMC_32 : ARM_SMCCC_SMC_64;
struct arm_smccc_res smc_res;
struct arm_smccc_args smc = {0};
@@ -148,4 +149,5 @@ int scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
}
return (long)smc_res.a0 ? qcom_scm_remap_error(smc_res.a0) : 0;
+
}
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
index f57779fc7ee9..ee9cb545e73b 100644
--- a/drivers/firmware/qcom_scm.c
+++ b/drivers/firmware/qcom_scm.c
@@ -113,14 +113,10 @@ static void qcom_scm_clk_disable(void)
clk_disable_unprepare(__scm->bus_clk);
}
-static int __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
- u32 cmd_id);
+enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN;
+static DEFINE_SPINLOCK(scm_query_lock);
-enum qcom_scm_convention qcom_scm_convention;
-static bool has_queried __read_mostly;
-static DEFINE_SPINLOCK(query_lock);
-
-static void __query_convention(void)
+static enum qcom_scm_convention __get_convention(void)
{
unsigned long flags;
struct qcom_scm_desc desc = {
@@ -133,36 +129,50 @@ static void __query_convention(void)
.owner = ARM_SMCCC_OWNER_SIP,
};
struct qcom_scm_res res;
+ enum qcom_scm_convention probed_convention;
int ret;
+ bool forced = false;
- spin_lock_irqsave(&query_lock, flags);
- if (has_queried)
- goto out;
+ if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN))
+ return qcom_scm_convention;
- qcom_scm_convention = SMC_CONVENTION_ARM_64;
- // Device isn't required as there is only one argument - no device
- // needed to dma_map_single to secure world
- ret = scm_smc_call(NULL, &desc, &res, true);
+ /*
+ * Device isn't required as there is only one argument - no device
+ * needed to dma_map_single to secure world
+ */
+ probed_convention = SMC_CONVENTION_ARM_64;
+ ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
if (!ret && res.result[0] == 1)
- goto out;
+ goto found;
+
+ /*
+ * Some SC7180 firmwares didn't implement the
+ * QCOM_SCM_INFO_IS_CALL_AVAIL call, so we fallback to forcing ARM_64
+ * calling conventions on these firmwares. Luckily we don't make any
+ * early calls into the firmware on these SoCs so the device pointer
+ * will be valid here to check if the compatible matches.
+ */
+ if (of_device_is_compatible(__scm ? __scm->dev->of_node : NULL, "qcom,scm-sc7180")) {
+ forced = true;
+ goto found;
+ }
- qcom_scm_convention = SMC_CONVENTION_ARM_32;
- ret = scm_smc_call(NULL, &desc, &res, true);
+ probed_convention = SMC_CONVENTION_ARM_32;
+ ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
if (!ret && res.result[0] == 1)
- goto out;
-
- qcom_scm_convention = SMC_CONVENTION_LEGACY;
-out:
- has_queried = true;
- spin_unlock_irqrestore(&query_lock, flags);
- pr_info("qcom_scm: convention: %s\n",
- qcom_scm_convention_names[qcom_scm_convention]);
-}
+ goto found;
+
+ probed_convention = SMC_CONVENTION_LEGACY;
+found:
+ spin_lock_irqsave(&scm_query_lock, flags);
+ if (probed_convention != qcom_scm_convention) {
+ qcom_scm_convention = probed_convention;
+ pr_info("qcom_scm: convention: %s%s\n",
+ qcom_scm_convention_names[qcom_scm_convention],
+ forced ? " (forced)" : "");
+ }
+ spin_unlock_irqrestore(&scm_query_lock, flags);
-static inline enum qcom_scm_convention __get_convention(void)
-{
- if (unlikely(!has_queried))
- __query_convention();
return qcom_scm_convention;
}
@@ -219,8 +229,8 @@ static int qcom_scm_call_atomic(struct device *dev,
}
}
-static int __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
- u32 cmd_id)
+static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
+ u32 cmd_id)
{
int ret;
struct qcom_scm_desc desc = {
@@ -247,7 +257,7 @@ static int __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
ret = qcom_scm_call(dev, &desc, &res);
- return ret ? : res.result[0];
+ return ret ? false : !!res.result[0];
}
/**
@@ -585,9 +595,8 @@ bool qcom_scm_pas_supported(u32 peripheral)
};
struct qcom_scm_res res;
- ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
- QCOM_SCM_PIL_PAS_IS_SUPPORTED);
- if (ret <= 0)
+ if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
+ QCOM_SCM_PIL_PAS_IS_SUPPORTED))
return false;
ret = qcom_scm_call(__scm->dev, &desc, &res);
@@ -1060,17 +1069,18 @@ EXPORT_SYMBOL(qcom_scm_ice_set_key);
*/
bool qcom_scm_hdcp_available(void)
{
+ bool avail;
int ret = qcom_scm_clk_enable();
if (ret)
return ret;
- ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
+ avail = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
QCOM_SCM_HDCP_INVOKE);
qcom_scm_clk_disable();
- return ret > 0;
+ return avail;
}
EXPORT_SYMBOL(qcom_scm_hdcp_available);
@@ -1242,7 +1252,7 @@ static int qcom_scm_probe(struct platform_device *pdev)
__scm = scm;
__scm->dev = &pdev->dev;
- __query_convention();
+ __get_convention();
/*
* If requested enable "download mode", from this point on warmboot
@@ -1291,6 +1301,7 @@ static struct platform_driver qcom_scm_driver = {
.driver = {
.name = "qcom_scm",
.of_match_table = qcom_scm_dt_match,
+ .suppress_bind_attrs = true,
},
.probe = qcom_scm_probe,
.shutdown = qcom_scm_shutdown,
diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h
index 95cd1ac30ab0..632fe3142462 100644
--- a/drivers/firmware/qcom_scm.h
+++ b/drivers/firmware/qcom_scm.h
@@ -61,8 +61,11 @@ struct qcom_scm_res {
};
#define SCM_SMC_FNID(s, c) ((((s) & 0xFF) << 8) | ((c) & 0xFF))
-extern int scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
- struct qcom_scm_res *res, bool atomic);
+extern int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
+ enum qcom_scm_convention qcom_convention,
+ struct qcom_scm_res *res, bool atomic);
+#define scm_smc_call(dev, desc, res, atomic) \
+ __scm_smc_call((dev), (desc), qcom_scm_convention, (res), (atomic))
#define SCM_LEGACY_FNID(s, c) (((s) << 10) | ((c) & 0x3ff))
extern int scm_legacy_call_atomic(struct device *dev,
diff --git a/drivers/firmware/raspberrypi.c b/drivers/firmware/raspberrypi.c
index 30259dc9b805..250e01680742 100644
--- a/drivers/firmware/raspberrypi.c
+++ b/drivers/firmware/raspberrypi.c
@@ -7,6 +7,7 @@
*/
#include <linux/dma-mapping.h>
+#include <linux/kref.h>
#include <linux/mailbox_client.h>
#include <linux/module.h>
#include <linux/of_platform.h>
@@ -27,6 +28,8 @@ struct rpi_firmware {
struct mbox_chan *chan; /* The property channel. */
struct completion c;
u32 enabled;
+
+ struct kref consumers;
};
static DEFINE_MUTEX(transaction_lock);
@@ -225,12 +228,38 @@ static void rpi_register_clk_driver(struct device *dev)
-1, NULL, 0);
}
+static void rpi_firmware_delete(struct kref *kref)
+{
+ struct rpi_firmware *fw = container_of(kref, struct rpi_firmware,
+ consumers);
+
+ mbox_free_channel(fw->chan);
+ kfree(fw);
+}
+
+void rpi_firmware_put(struct rpi_firmware *fw)
+{
+ kref_put(&fw->consumers, rpi_firmware_delete);
+}
+EXPORT_SYMBOL_GPL(rpi_firmware_put);
+
+static void devm_rpi_firmware_put(void *data)
+{
+ struct rpi_firmware *fw = data;
+
+ rpi_firmware_put(fw);
+}
+
static int rpi_firmware_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rpi_firmware *fw;
- fw = devm_kzalloc(dev, sizeof(*fw), GFP_KERNEL);
+ /*
+ * Memory will be freed by rpi_firmware_delete() once all users have
+ * released their firmware handles. Don't use devm_kzalloc() here.
+ */
+ fw = kzalloc(sizeof(*fw), GFP_KERNEL);
if (!fw)
return -ENOMEM;
@@ -247,6 +276,7 @@ static int rpi_firmware_probe(struct platform_device *pdev)
}
init_completion(&fw->c);
+ kref_init(&fw->consumers);
platform_set_drvdata(pdev, fw);
@@ -275,7 +305,8 @@ static int rpi_firmware_remove(struct platform_device *pdev)
rpi_hwmon = NULL;
platform_device_unregister(rpi_clk);
rpi_clk = NULL;
- mbox_free_channel(fw->chan);
+
+ rpi_firmware_put(fw);
return 0;
}
@@ -284,19 +315,51 @@ static int rpi_firmware_remove(struct platform_device *pdev)
* rpi_firmware_get - Get pointer to rpi_firmware structure.
* @firmware_node: Pointer to the firmware Device Tree node.
*
+ * The reference to rpi_firmware has to be released with rpi_firmware_put().
+ *
* Returns NULL is the firmware device is not ready.
*/
struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node)
{
struct platform_device *pdev = of_find_device_by_node(firmware_node);
+ struct rpi_firmware *fw;
if (!pdev)
return NULL;
- return platform_get_drvdata(pdev);
+ fw = platform_get_drvdata(pdev);
+ if (!fw)
+ return NULL;
+
+ if (!kref_get_unless_zero(&fw->consumers))
+ return NULL;
+
+ return fw;
}
EXPORT_SYMBOL_GPL(rpi_firmware_get);
+/**
+ * devm_rpi_firmware_get - Get pointer to rpi_firmware structure.
+ * @firmware_node: Pointer to the firmware Device Tree node.
+ *
+ * Returns NULL is the firmware device is not ready.
+ */
+struct rpi_firmware *devm_rpi_firmware_get(struct device *dev,
+ struct device_node *firmware_node)
+{
+ struct rpi_firmware *fw;
+
+ fw = rpi_firmware_get(firmware_node);
+ if (!fw)
+ return NULL;
+
+ if (devm_add_action_or_reset(dev, devm_rpi_firmware_put, fw))
+ return NULL;
+
+ return fw;
+}
+EXPORT_SYMBOL_GPL(devm_rpi_firmware_get);
+
static const struct of_device_id rpi_firmware_of_match[] = {
{ .compatible = "raspberrypi,bcm2835-firmware", },
{},
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index 7eb9958662dd..83082e2f2e44 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -2,7 +2,7 @@
/*
* Xilinx Zynq MPSoC Firmware layer
*
- * Copyright (C) 2014-2020 Xilinx, Inc.
+ * Copyright (C) 2014-2021 Xilinx, Inc.
*
* Michal Simek <michal.simek@xilinx.com>
* Davorin Mista <davorin.mista@aggios.com>
@@ -1280,12 +1280,13 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
static int zynqmp_firmware_remove(struct platform_device *pdev)
{
struct pm_api_feature_data *feature_data;
+ struct hlist_node *tmp;
int i;
mfd_remove_devices(&pdev->dev);
zynqmp_pm_api_debugfs_exit();
- hash_for_each(pm_api_features_map, i, feature_data, hentry) {
+ hash_for_each_safe(pm_api_features_map, i, tmp, feature_data, hentry) {
hash_del(&feature_data->hentry);
kfree(feature_data);
}
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
index d591dd9b7c60..33e15058d0dc 100644
--- a/drivers/fpga/Kconfig
+++ b/drivers/fpga/Kconfig
@@ -14,13 +14,13 @@ if FPGA
config FPGA_MGR_SOCFPGA
tristate "Altera SOCFPGA FPGA Manager"
- depends on ARCH_SOCFPGA || COMPILE_TEST
+ depends on ARCH_INTEL_SOCFPGA || COMPILE_TEST
help
FPGA manager driver support for Altera SOCFPGA.
config FPGA_MGR_SOCFPGA_A10
tristate "Altera SoCFPGA Arria10"
- depends on ARCH_SOCFPGA || COMPILE_TEST
+ depends on ARCH_INTEL_SOCFPGA || COMPILE_TEST
select REGMAP_MMIO
help
FPGA manager driver support for Altera Arria10 SoCFPGA.
@@ -60,7 +60,7 @@ config FPGA_MGR_ZYNQ_FPGA
config FPGA_MGR_STRATIX10_SOC
tristate "Intel Stratix10 SoC FPGA Manager"
- depends on (ARCH_STRATIX10 && INTEL_STRATIX10_SERVICE)
+ depends on (ARCH_INTEL_SOCFPGA && INTEL_STRATIX10_SERVICE)
help
FPGA manager driver support for the Intel Stratix10 SoC.
@@ -99,7 +99,7 @@ config FPGA_BRIDGE
config SOCFPGA_FPGA_BRIDGE
tristate "Altera SoCFPGA FPGA Bridges"
- depends on ARCH_SOCFPGA && FPGA_BRIDGE
+ depends on ARCH_INTEL_SOCFPGA && FPGA_BRIDGE
help
Say Y to enable drivers for FPGA bridges for Altera SOCFPGA
devices.
diff --git a/drivers/gpio/gpio-raspberrypi-exp.c b/drivers/gpio/gpio-raspberrypi-exp.c
index bb100e0124e6..64a552ecc2ad 100644
--- a/drivers/gpio/gpio-raspberrypi-exp.c
+++ b/drivers/gpio/gpio-raspberrypi-exp.c
@@ -208,7 +208,7 @@ static int rpi_exp_gpio_probe(struct platform_device *pdev)
return -ENOENT;
}
- fw = rpi_firmware_get(fw_node);
+ fw = devm_rpi_firmware_get(&pdev->dev, fw_node);
of_node_put(fw_node);
if (!fw)
return -EPROBE_DEFER;
diff --git a/drivers/hwmon/scmi-hwmon.c b/drivers/hwmon/scmi-hwmon.c
index 17d064e58938..b1329a58ce40 100644
--- a/drivers/hwmon/scmi-hwmon.c
+++ b/drivers/hwmon/scmi-hwmon.c
@@ -2,7 +2,7 @@
/*
* System Control and Management Interface(SCMI) based hwmon sensor driver
*
- * Copyright (C) 2018 ARM Ltd.
+ * Copyright (C) 2018-2021 ARM Ltd.
* Sudeep Holla <sudeep.holla@arm.com>
*/
@@ -13,8 +13,10 @@
#include <linux/sysfs.h>
#include <linux/thermal.h>
+static const struct scmi_sensor_proto_ops *sensor_ops;
+
struct scmi_sensors {
- const struct scmi_handle *handle;
+ const struct scmi_protocol_handle *ph;
const struct scmi_sensor_info **info[hwmon_max];
};
@@ -69,10 +71,9 @@ static int scmi_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
u64 value;
const struct scmi_sensor_info *sensor;
struct scmi_sensors *scmi_sensors = dev_get_drvdata(dev);
- const struct scmi_handle *h = scmi_sensors->handle;
sensor = *(scmi_sensors->info[type] + channel);
- ret = h->sensor_ops->reading_get(h, sensor->id, &value);
+ ret = sensor_ops->reading_get(scmi_sensors->ph, sensor->id, &value);
if (ret)
return ret;
@@ -169,11 +170,16 @@ static int scmi_hwmon_probe(struct scmi_device *sdev)
struct hwmon_channel_info *scmi_hwmon_chan;
const struct hwmon_channel_info **ptr_scmi_ci;
const struct scmi_handle *handle = sdev->handle;
+ struct scmi_protocol_handle *ph;
- if (!handle || !handle->sensor_ops)
+ if (!handle)
return -ENODEV;
- nr_sensors = handle->sensor_ops->count_get(handle);
+ sensor_ops = handle->devm_protocol_get(sdev, SCMI_PROTOCOL_SENSOR, &ph);
+ if (IS_ERR(sensor_ops))
+ return PTR_ERR(sensor_ops);
+
+ nr_sensors = sensor_ops->count_get(ph);
if (!nr_sensors)
return -EIO;
@@ -181,10 +187,10 @@ static int scmi_hwmon_probe(struct scmi_device *sdev)
if (!scmi_sensors)
return -ENOMEM;
- scmi_sensors->handle = handle;
+ scmi_sensors->ph = ph;
for (i = 0; i < nr_sensors; i++) {
- sensor = handle->sensor_ops->info_get(handle, i);
+ sensor = sensor_ops->info_get(ph, i);
if (!sensor)
return -EINVAL;
@@ -236,7 +242,7 @@ static int scmi_hwmon_probe(struct scmi_device *sdev)
}
for (i = nr_sensors - 1; i >= 0 ; i--) {
- sensor = handle->sensor_ops->info_get(handle, i);
+ sensor = sensor_ops->info_get(ph, i);
if (!sensor)
continue;
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 05ebf7546e3f..3eec59f1fed3 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -369,7 +369,7 @@ comment "I2C system bus drivers (mostly embedded / system-on-chip)"
config I2C_ALTERA
tristate "Altera Soft IP I2C"
- depends on ARCH_SOCFPGA || NIOS2 || COMPILE_TEST
+ depends on ARCH_INTEL_SOCFPGA || NIOS2 || COMPILE_TEST
depends on OF
help
If you say yes to this option, support will be included for the
diff --git a/drivers/iio/common/scmi_sensors/scmi_iio.c b/drivers/iio/common/scmi_sensors/scmi_iio.c
index 63e4cec9de5e..141e8aa6911e 100644
--- a/drivers/iio/common/scmi_sensors/scmi_iio.c
+++ b/drivers/iio/common/scmi_sensors/scmi_iio.c
@@ -22,7 +22,8 @@
#define SCMI_IIO_NUM_OF_AXIS 3
struct scmi_iio_priv {
- struct scmi_handle *handle;
+ const struct scmi_sensor_proto_ops *sensor_ops;
+ struct scmi_protocol_handle *ph;
const struct scmi_sensor_info *sensor_info;
struct iio_dev *indio_dev;
/* adding one additional channel for timestamp */
@@ -82,7 +83,6 @@ static int scmi_iio_sensor_update_cb(struct notifier_block *nb,
static int scmi_iio_buffer_preenable(struct iio_dev *iio_dev)
{
struct scmi_iio_priv *sensor = iio_priv(iio_dev);
- u32 sensor_id = sensor->sensor_info->id;
u32 sensor_config = 0;
int err;
@@ -92,27 +92,12 @@ static int scmi_iio_buffer_preenable(struct iio_dev *iio_dev)
sensor_config |= FIELD_PREP(SCMI_SENS_CFG_SENSOR_ENABLED_MASK,
SCMI_SENS_CFG_SENSOR_ENABLE);
-
- err = sensor->handle->notify_ops->register_event_notifier(sensor->handle,
- SCMI_PROTOCOL_SENSOR, SCMI_EVENT_SENSOR_UPDATE,
- &sensor_id, &sensor->sensor_update_nb);
- if (err) {
- dev_err(&iio_dev->dev,
- "Error in registering sensor update notifier for sensor %s err %d",
- sensor->sensor_info->name, err);
- return err;
- }
-
- err = sensor->handle->sensor_ops->config_set(sensor->handle,
- sensor->sensor_info->id, sensor_config);
- if (err) {
- sensor->handle->notify_ops->unregister_event_notifier(sensor->handle,
- SCMI_PROTOCOL_SENSOR,
- SCMI_EVENT_SENSOR_UPDATE, &sensor_id,
- &sensor->sensor_update_nb);
+ err = sensor->sensor_ops->config_set(sensor->ph,
+ sensor->sensor_info->id,
+ sensor_config);
+ if (err)
dev_err(&iio_dev->dev, "Error in enabling sensor %s err %d",
sensor->sensor_info->name, err);
- }
return err;
}
@@ -120,25 +105,14 @@ static int scmi_iio_buffer_preenable(struct iio_dev *iio_dev)
static int scmi_iio_buffer_postdisable(struct iio_dev *iio_dev)
{
struct scmi_iio_priv *sensor = iio_priv(iio_dev);
- u32 sensor_id = sensor->sensor_info->id;
u32 sensor_config = 0;
int err;
sensor_config |= FIELD_PREP(SCMI_SENS_CFG_SENSOR_ENABLED_MASK,
SCMI_SENS_CFG_SENSOR_DISABLE);
-
- err = sensor->handle->notify_ops->unregister_event_notifier(sensor->handle,
- SCMI_PROTOCOL_SENSOR, SCMI_EVENT_SENSOR_UPDATE,
- &sensor_id, &sensor->sensor_update_nb);
- if (err) {
- dev_err(&iio_dev->dev,
- "Error in unregistering sensor update notifier for sensor %s err %d",
- sensor->sensor_info->name, err);
- return err;
- }
-
- err = sensor->handle->sensor_ops->config_set(sensor->handle, sensor_id,
- sensor_config);
+ err = sensor->sensor_ops->config_set(sensor->ph,
+ sensor->sensor_info->id,
+ sensor_config);
if (err) {
dev_err(&iio_dev->dev,
"Error in disabling sensor %s with err %d",
@@ -161,8 +135,9 @@ static int scmi_iio_set_odr_val(struct iio_dev *iio_dev, int val, int val2)
u32 sensor_config;
char buf[32];
- int err = sensor->handle->sensor_ops->config_get(sensor->handle,
- sensor->sensor_info->id, &sensor_config);
+ int err = sensor->sensor_ops->config_get(sensor->ph,
+ sensor->sensor_info->id,
+ &sensor_config);
if (err) {
dev_err(&iio_dev->dev,
"Error in getting sensor config for sensor %s err %d",
@@ -208,8 +183,9 @@ static int scmi_iio_set_odr_val(struct iio_dev *iio_dev, int val, int val2)
sensor_config |=
FIELD_PREP(SCMI_SENS_CFG_ROUND_MASK, SCMI_SENS_CFG_ROUND_AUTO);
- err = sensor->handle->sensor_ops->config_set(sensor->handle,
- sensor->sensor_info->id, sensor_config);
+ err = sensor->sensor_ops->config_set(sensor->ph,
+ sensor->sensor_info->id,
+ sensor_config);
if (err)
dev_err(&iio_dev->dev,
"Error in setting sensor update interval for sensor %s value %u err %d",
@@ -274,8 +250,9 @@ static int scmi_iio_get_odr_val(struct iio_dev *iio_dev, int *val, int *val2)
u32 sensor_config;
int mult;
- int err = sensor->handle->sensor_ops->config_get(sensor->handle,
- sensor->sensor_info->id, &sensor_config);
+ int err = sensor->sensor_ops->config_get(sensor->ph,
+ sensor->sensor_info->id,
+ &sensor_config);
if (err) {
dev_err(&iio_dev->dev,
"Error in getting sensor config for sensor %s err %d",
@@ -528,15 +505,19 @@ static int scmi_iio_set_sampling_freq_avail(struct iio_dev *iio_dev)
return 0;
}
-static struct iio_dev *scmi_alloc_iiodev(struct device *dev,
- struct scmi_handle *handle,
- const struct scmi_sensor_info *sensor_info)
+static struct iio_dev *
+scmi_alloc_iiodev(struct scmi_device *sdev,
+ const struct scmi_sensor_proto_ops *ops,
+ struct scmi_protocol_handle *ph,
+ const struct scmi_sensor_info *sensor_info)
{
struct iio_chan_spec *iio_channels;
struct scmi_iio_priv *sensor;
enum iio_modifier modifier;
enum iio_chan_type type;
struct iio_dev *iiodev;
+ struct device *dev = &sdev->dev;
+ const struct scmi_handle *handle = sdev->handle;
int i, ret;
iiodev = devm_iio_device_alloc(dev, sizeof(*sensor));
@@ -546,7 +527,8 @@ static struct iio_dev *scmi_alloc_iiodev(struct device *dev,
iiodev->modes = INDIO_DIRECT_MODE;
iiodev->dev.parent = dev;
sensor = iio_priv(iiodev);
- sensor->handle = handle;
+ sensor->sensor_ops = ops;
+ sensor->ph = ph;
sensor->sensor_info = sensor_info;
sensor->sensor_update_nb.notifier_call = scmi_iio_sensor_update_cb;
sensor->indio_dev = iiodev;
@@ -581,6 +563,17 @@ static struct iio_dev *scmi_alloc_iiodev(struct device *dev,
sensor_info->axis[i].id);
}
+ ret = handle->notify_ops->devm_event_notifier_register(sdev,
+ SCMI_PROTOCOL_SENSOR, SCMI_EVENT_SENSOR_UPDATE,
+ &sensor->sensor_info->id,
+ &sensor->sensor_update_nb);
+ if (ret) {
+ dev_err(&iiodev->dev,
+ "Error in registering sensor update notifier for sensor %s err %d",
+ sensor->sensor_info->name, ret);
+ return ERR_PTR(ret);
+ }
+
scmi_iio_set_timestamp_channel(&iio_channels[i], i);
iiodev->channels = iio_channels;
return iiodev;
@@ -590,24 +583,30 @@ static int scmi_iio_dev_probe(struct scmi_device *sdev)
{
const struct scmi_sensor_info *sensor_info;
struct scmi_handle *handle = sdev->handle;
+ const struct scmi_sensor_proto_ops *sensor_ops;
+ struct scmi_protocol_handle *ph;
struct device *dev = &sdev->dev;
struct iio_dev *scmi_iio_dev;
u16 nr_sensors;
int err = -ENODEV, i;
- if (!handle || !handle->sensor_ops) {
+ if (!handle)
+ return -ENODEV;
+
+ sensor_ops = handle->devm_protocol_get(sdev, SCMI_PROTOCOL_SENSOR, &ph);
+ if (IS_ERR(sensor_ops)) {
dev_err(dev, "SCMI device has no sensor interface\n");
- return -EINVAL;
+ return PTR_ERR(sensor_ops);
}
- nr_sensors = handle->sensor_ops->count_get(handle);
+ nr_sensors = sensor_ops->count_get(ph);
if (!nr_sensors) {
dev_dbg(dev, "0 sensors found via SCMI bus\n");
return -ENODEV;
}
for (i = 0; i < nr_sensors; i++) {
- sensor_info = handle->sensor_ops->info_get(handle, i);
+ sensor_info = sensor_ops->info_get(ph, i);
if (!sensor_info) {
dev_err(dev, "SCMI sensor %d has missing info\n", i);
return -EINVAL;
@@ -622,7 +621,8 @@ static int scmi_iio_dev_probe(struct scmi_device *sdev)
sensor_info->axis[0].type != RADIANS_SEC)
continue;
- scmi_iio_dev = scmi_alloc_iiodev(dev, handle, sensor_info);
+ scmi_iio_dev = scmi_alloc_iiodev(sdev, sensor_ops, ph,
+ sensor_info);
if (IS_ERR(scmi_iio_dev)) {
dev_err(dev,
"failed to allocate IIO device for sensor %s: %ld\n",
diff --git a/drivers/input/touchscreen/raspberrypi-ts.c b/drivers/input/touchscreen/raspberrypi-ts.c
index ef6aaed217cf..5000f5fd9ec3 100644
--- a/drivers/input/touchscreen/raspberrypi-ts.c
+++ b/drivers/input/touchscreen/raspberrypi-ts.c
@@ -160,7 +160,7 @@ static int rpi_ts_probe(struct platform_device *pdev)
touchbuf = (u32)ts->fw_regs_phys;
error = rpi_firmware_property(fw, RPI_FIRMWARE_FRAMEBUFFER_SET_TOUCHBUF,
&touchbuf, sizeof(touchbuf));
-
+ rpi_firmware_put(fw);
if (error || touchbuf != 0) {
dev_warn(dev, "Failed to set touchbuf, %d\n", error);
return error;
diff --git a/drivers/memory/fsl-corenet-cf.c b/drivers/memory/fsl-corenet-cf.c
index 0309bd5a1800..f8ea592c9cb5 100644
--- a/drivers/memory/fsl-corenet-cf.c
+++ b/drivers/memory/fsl-corenet-cf.c
@@ -192,10 +192,8 @@ static int ccf_probe(struct platform_device *pdev)
}
ccf->regs = devm_ioremap_resource(&pdev->dev, r);
- if (IS_ERR(ccf->regs)) {
- dev_err(&pdev->dev, "%s: can't map mem resource\n", __func__);
+ if (IS_ERR(ccf->regs))
return PTR_ERR(ccf->regs);
- }
ccf->dev = &pdev->dev;
ccf->info = match->data;
diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
index b396253fcf4b..c5fb51f73b34 100644
--- a/drivers/memory/mtk-smi.c
+++ b/drivers/memory/mtk-smi.c
@@ -319,6 +319,7 @@ static int mtk_smi_larb_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *smi_node;
struct platform_device *smi_pdev;
+ struct device_link *link;
larb = devm_kzalloc(dev, sizeof(*larb), GFP_KERNEL);
if (!larb)
@@ -358,6 +359,12 @@ static int mtk_smi_larb_probe(struct platform_device *pdev)
if (!platform_get_drvdata(smi_pdev))
return -EPROBE_DEFER;
larb->smi_common_dev = &smi_pdev->dev;
+ link = device_link_add(dev, larb->smi_common_dev,
+ DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS);
+ if (!link) {
+ dev_err(dev, "Unable to link smi-common dev\n");
+ return -ENODEV;
+ }
} else {
dev_err(dev, "Failed to get the smi_common device\n");
return -EINVAL;
@@ -370,6 +377,9 @@ static int mtk_smi_larb_probe(struct platform_device *pdev)
static int mtk_smi_larb_remove(struct platform_device *pdev)
{
+ struct mtk_smi_larb *larb = platform_get_drvdata(pdev);
+
+ device_link_remove(&pdev->dev, larb->smi_common_dev);
pm_runtime_disable(&pdev->dev);
component_del(&pdev->dev, &mtk_smi_larb_component_ops);
return 0;
@@ -381,17 +391,9 @@ static int __maybe_unused mtk_smi_larb_resume(struct device *dev)
const struct mtk_smi_larb_gen *larb_gen = larb->larb_gen;
int ret;
- /* Power on smi-common. */
- ret = pm_runtime_resume_and_get(larb->smi_common_dev);
- if (ret < 0) {
- dev_err(dev, "Failed to pm get for smi-common(%d).\n", ret);
- return ret;
- }
-
ret = mtk_smi_clk_enable(&larb->smi);
if (ret < 0) {
dev_err(dev, "Failed to enable clock(%d).\n", ret);
- pm_runtime_put_sync(larb->smi_common_dev);
return ret;
}
@@ -406,7 +408,6 @@ static int __maybe_unused mtk_smi_larb_suspend(struct device *dev)
struct mtk_smi_larb *larb = dev_get_drvdata(dev);
mtk_smi_clk_disable(&larb->smi);
- pm_runtime_put_sync(larb->smi_common_dev);
return 0;
}
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index cfa730cfd145..f80c2ea39ca4 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -1009,8 +1009,8 @@ EXPORT_SYMBOL(gpmc_cs_request);
void gpmc_cs_free(int cs)
{
- struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
- struct resource *res = &gpmc->mem;
+ struct gpmc_cs_data *gpmc;
+ struct resource *res;
spin_lock(&gpmc_mem_lock);
if (cs >= gpmc_cs_num || cs < 0 || !gpmc_cs_reserved(cs)) {
@@ -1018,6 +1018,9 @@ void gpmc_cs_free(int cs)
spin_unlock(&gpmc_mem_lock);
return;
}
+ gpmc = &gpmc_cs[cs];
+ res = &gpmc->mem;
+
gpmc_cs_disable_mem(cs);
if (res->flags)
release_resource(res);
diff --git a/drivers/memory/pl353-smc.c b/drivers/memory/pl353-smc.c
index 3b5b1045edd9..9c0a28416777 100644
--- a/drivers/memory/pl353-smc.c
+++ b/drivers/memory/pl353-smc.c
@@ -63,7 +63,7 @@
/* ECC memory config register specific constants */
#define PL353_SMC_ECC_MEMCFG_MODE_MASK 0xC
#define PL353_SMC_ECC_MEMCFG_MODE_SHIFT 2
-#define PL353_SMC_ECC_MEMCFG_PGSIZE_MASK 0xC
+#define PL353_SMC_ECC_MEMCFG_PGSIZE_MASK 0x3
#define PL353_SMC_DC_UPT_NAND_REGS ((4 << 23) | /* CS: NAND chip */ \
(2 << 21)) /* UpdateRegs operation */
diff --git a/drivers/memory/renesas-rpc-if.c b/drivers/memory/renesas-rpc-if.c
index 8d36e221def1..45eed659b0c6 100644
--- a/drivers/memory/renesas-rpc-if.c
+++ b/drivers/memory/renesas-rpc-if.c
@@ -192,10 +192,10 @@ int rpcif_sw_init(struct rpcif *rpc, struct device *dev)
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dirmap");
- rpc->size = resource_size(res);
rpc->dirmap = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(rpc->dirmap))
rpc->dirmap = NULL;
+ rpc->size = resource_size(res);
rpc->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
diff --git a/drivers/memory/samsung/exynos5422-dmc.c b/drivers/memory/samsung/exynos5422-dmc.c
index 1dabb509dec3..dee503640e12 100644
--- a/drivers/memory/samsung/exynos5422-dmc.c
+++ b/drivers/memory/samsung/exynos5422-dmc.c
@@ -1298,7 +1298,9 @@ static int exynos5_dmc_init_clks(struct exynos5_dmc *dmc)
dmc->curr_volt = target_volt;
- clk_set_parent(dmc->mout_mx_mspll_ccore, dmc->mout_spll);
+ ret = clk_set_parent(dmc->mout_mx_mspll_ccore, dmc->mout_spll);
+ if (ret)
+ return ret;
clk_prepare_enable(dmc->fout_bpll);
clk_prepare_enable(dmc->mout_bpll);
diff --git a/drivers/memory/tegra/mc.c b/drivers/memory/tegra/mc.c
index a21163ccadc4..e58c3e5baea0 100644
--- a/drivers/memory/tegra/mc.c
+++ b/drivers/memory/tegra/mc.c
@@ -827,6 +827,15 @@ static int tegra_mc_probe(struct platform_device *pdev)
return err;
}
+ mc->debugfs.root = debugfs_create_dir("mc", NULL);
+
+ if (mc->soc->init) {
+ err = mc->soc->init(mc);
+ if (err < 0)
+ dev_err(&pdev->dev, "failed to initialize SoC driver: %d\n",
+ err);
+ }
+
err = tegra_mc_reset_setup(mc);
if (err < 0)
dev_err(&pdev->dev, "failed to register reset controller: %d\n",
diff --git a/drivers/memory/tegra/mc.h b/drivers/memory/tegra/mc.h
index 33e40d600592..1ee34f0da4f7 100644
--- a/drivers/memory/tegra/mc.h
+++ b/drivers/memory/tegra/mc.h
@@ -92,12 +92,12 @@ icc_provider_to_tegra_mc(struct icc_provider *provider)
return container_of(provider, struct tegra_mc, provider);
}
-static inline u32 mc_readl(struct tegra_mc *mc, unsigned long offset)
+static inline u32 mc_readl(const struct tegra_mc *mc, unsigned long offset)
{
return readl_relaxed(mc->regs + offset);
}
-static inline void mc_writel(struct tegra_mc *mc, u32 value,
+static inline void mc_writel(const struct tegra_mc *mc, u32 value,
unsigned long offset)
{
writel_relaxed(value, mc->regs + offset);
diff --git a/drivers/memory/tegra/tegra124-emc.c b/drivers/memory/tegra/tegra124-emc.c
index bee8d9f79b04..5699d909abc2 100644
--- a/drivers/memory/tegra/tegra124-emc.c
+++ b/drivers/memory/tegra/tegra124-emc.c
@@ -905,7 +905,7 @@ static int emc_init(struct tegra_emc *emc)
else
emc->dram_bus_width = 32;
- dev_info(emc->dev, "%ubit DRAM bus\n", emc->dram_bus_width);
+ dev_info_once(emc->dev, "%ubit DRAM bus\n", emc->dram_bus_width);
emc->dram_type &= EMC_FBIO_CFG5_DRAM_TYPE_MASK;
emc->dram_type >>= EMC_FBIO_CFG5_DRAM_TYPE_SHIFT;
@@ -1204,7 +1204,7 @@ static int tegra_emc_debug_min_rate_set(void *data, u64 rate)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(tegra_emc_debug_min_rate_fops,
+DEFINE_DEBUGFS_ATTRIBUTE(tegra_emc_debug_min_rate_fops,
tegra_emc_debug_min_rate_get,
tegra_emc_debug_min_rate_set, "%llu\n");
@@ -1234,7 +1234,7 @@ static int tegra_emc_debug_max_rate_set(void *data, u64 rate)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(tegra_emc_debug_max_rate_fops,
+DEFINE_DEBUGFS_ATTRIBUTE(tegra_emc_debug_max_rate_fops,
tegra_emc_debug_max_rate_get,
tegra_emc_debug_max_rate_set, "%llu\n");
@@ -1419,8 +1419,8 @@ static int tegra_emc_opp_table_init(struct tegra_emc *emc)
goto put_hw_table;
}
- dev_info(emc->dev, "OPP HW ver. 0x%x, current clock rate %lu MHz\n",
- hw_version, clk_get_rate(emc->clk) / 1000000);
+ dev_info_once(emc->dev, "OPP HW ver. 0x%x, current clock rate %lu MHz\n",
+ hw_version, clk_get_rate(emc->clk) / 1000000);
/* first dummy rate-set initializes voltage state */
err = dev_pm_opp_set_rate(emc->dev, clk_get_rate(emc->clk));
@@ -1475,9 +1475,9 @@ static int tegra_emc_probe(struct platform_device *pdev)
if (err)
return err;
} else {
- dev_info(&pdev->dev,
- "no memory timings for RAM code %u found in DT\n",
- ram_code);
+ dev_info_once(&pdev->dev,
+ "no memory timings for RAM code %u found in DT\n",
+ ram_code);
}
err = emc_init(emc);
diff --git a/drivers/memory/tegra/tegra20-emc.c b/drivers/memory/tegra/tegra20-emc.c
index d653a6be8d7f..da8a0da8da79 100644
--- a/drivers/memory/tegra/tegra20-emc.c
+++ b/drivers/memory/tegra/tegra20-emc.c
@@ -411,12 +411,12 @@ static int tegra_emc_load_timings_from_dt(struct tegra_emc *emc,
sort(emc->timings, emc->num_timings, sizeof(*timing), cmp_timings,
NULL);
- dev_info(emc->dev,
- "got %u timings for RAM code %u (min %luMHz max %luMHz)\n",
- emc->num_timings,
- tegra_read_ram_code(),
- emc->timings[0].rate / 1000000,
- emc->timings[emc->num_timings - 1].rate / 1000000);
+ dev_info_once(emc->dev,
+ "got %u timings for RAM code %u (min %luMHz max %luMHz)\n",
+ emc->num_timings,
+ tegra_read_ram_code(),
+ emc->timings[0].rate / 1000000,
+ emc->timings[emc->num_timings - 1].rate / 1000000);
return 0;
}
@@ -429,7 +429,7 @@ tegra_emc_find_node_by_ram_code(struct device *dev)
int err;
if (of_get_child_count(dev->of_node) == 0) {
- dev_info(dev, "device-tree doesn't have memory timings\n");
+ dev_info_once(dev, "device-tree doesn't have memory timings\n");
return NULL;
}
@@ -496,7 +496,7 @@ static int emc_setup_hw(struct tegra_emc *emc)
else
emc->dram_bus_width = 32;
- dev_info(emc->dev, "%ubit DRAM bus\n", emc->dram_bus_width);
+ dev_info_once(emc->dev, "%ubit DRAM bus\n", emc->dram_bus_width);
return 0;
}
@@ -931,8 +931,8 @@ static int tegra_emc_opp_table_init(struct tegra_emc *emc)
goto put_hw_table;
}
- dev_info(emc->dev, "OPP HW ver. 0x%x, current clock rate %lu MHz\n",
- hw_version, clk_get_rate(emc->clk) / 1000000);
+ dev_info_once(emc->dev, "OPP HW ver. 0x%x, current clock rate %lu MHz\n",
+ hw_version, clk_get_rate(emc->clk) / 1000000);
/* first dummy rate-set initializes voltage state */
err = dev_pm_opp_set_rate(emc->dev, clk_get_rate(emc->clk));
diff --git a/drivers/memory/tegra/tegra20.c b/drivers/memory/tegra/tegra20.c
index 29ecf02805a0..2db68a913b7a 100644
--- a/drivers/memory/tegra/tegra20.c
+++ b/drivers/memory/tegra/tegra20.c
@@ -3,6 +3,9 @@
* Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
*/
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
#include <linux/of_device.h>
#include <linux/slab.h>
#include <linux/string.h>
@@ -11,6 +14,79 @@
#include "mc.h"
+#define MC_STAT_CONTROL 0x90
+#define MC_STAT_EMC_CLOCK_LIMIT 0xa0
+#define MC_STAT_EMC_CLOCKS 0xa4
+#define MC_STAT_EMC_CONTROL_0 0xa8
+#define MC_STAT_EMC_CONTROL_1 0xac
+#define MC_STAT_EMC_COUNT_0 0xb8
+#define MC_STAT_EMC_COUNT_1 0xbc
+
+#define MC_STAT_CONTROL_CLIENT_ID GENMASK(13, 8)
+#define MC_STAT_CONTROL_EVENT GENMASK(23, 16)
+#define MC_STAT_CONTROL_PRI_EVENT GENMASK(25, 24)
+#define MC_STAT_CONTROL_FILTER_CLIENT_ENABLE GENMASK(26, 26)
+#define MC_STAT_CONTROL_FILTER_PRI GENMASK(29, 28)
+
+#define MC_STAT_CONTROL_PRI_EVENT_HP 0
+#define MC_STAT_CONTROL_PRI_EVENT_TM 1
+#define MC_STAT_CONTROL_PRI_EVENT_BW 2
+
+#define MC_STAT_CONTROL_FILTER_PRI_DISABLE 0
+#define MC_STAT_CONTROL_FILTER_PRI_NO 1
+#define MC_STAT_CONTROL_FILTER_PRI_YES 2
+
+#define MC_STAT_CONTROL_EVENT_QUALIFIED 0
+#define MC_STAT_CONTROL_EVENT_ANY_READ 1
+#define MC_STAT_CONTROL_EVENT_ANY_WRITE 2
+#define MC_STAT_CONTROL_EVENT_RD_WR_CHANGE 3
+#define MC_STAT_CONTROL_EVENT_SUCCESSIVE 4
+#define MC_STAT_CONTROL_EVENT_ARB_BANK_AA 5
+#define MC_STAT_CONTROL_EVENT_ARB_BANK_BB 6
+#define MC_STAT_CONTROL_EVENT_PAGE_MISS 7
+#define MC_STAT_CONTROL_EVENT_AUTO_PRECHARGE 8
+
+#define EMC_GATHER_RST (0 << 8)
+#define EMC_GATHER_CLEAR (1 << 8)
+#define EMC_GATHER_DISABLE (2 << 8)
+#define EMC_GATHER_ENABLE (3 << 8)
+
+#define MC_STAT_SAMPLE_TIME_USEC 16000
+
+/* we store collected statistics as a fixed point values */
+#define MC_FX_FRAC_SCALE 100
+
+static DEFINE_MUTEX(tegra20_mc_stat_lock);
+
+struct tegra20_mc_stat_gather {
+ unsigned int pri_filter;
+ unsigned int pri_event;
+ unsigned int result;
+ unsigned int client;
+ unsigned int event;
+ bool client_enb;
+};
+
+struct tegra20_mc_stat {
+ struct tegra20_mc_stat_gather gather0;
+ struct tegra20_mc_stat_gather gather1;
+ unsigned int sample_time_usec;
+ const struct tegra_mc *mc;
+};
+
+struct tegra20_mc_client_stat {
+ unsigned int events;
+ unsigned int arb_high_prio;
+ unsigned int arb_timeout;
+ unsigned int arb_bandwidth;
+ unsigned int rd_wr_change;
+ unsigned int successive;
+ unsigned int page_miss;
+ unsigned int auto_precharge;
+ unsigned int arb_bank_aa;
+ unsigned int arb_bank_bb;
+};
+
static const struct tegra_mc_client tegra20_mc_clients[] = {
{
.id = 0x00,
@@ -356,6 +432,261 @@ static const struct tegra_mc_icc_ops tegra20_mc_icc_ops = {
.set = tegra20_mc_icc_set,
};
+static u32 tegra20_mc_stat_gather_control(const struct tegra20_mc_stat_gather *g)
+{
+ u32 control;
+
+ control = FIELD_PREP(MC_STAT_CONTROL_EVENT, g->event);
+ control |= FIELD_PREP(MC_STAT_CONTROL_CLIENT_ID, g->client);
+ control |= FIELD_PREP(MC_STAT_CONTROL_PRI_EVENT, g->pri_event);
+ control |= FIELD_PREP(MC_STAT_CONTROL_FILTER_PRI, g->pri_filter);
+ control |= FIELD_PREP(MC_STAT_CONTROL_FILTER_CLIENT_ENABLE, g->client_enb);
+
+ return control;
+}
+
+static void tegra20_mc_stat_gather(struct tegra20_mc_stat *stat)
+{
+ u32 clocks, count0, count1, control_0, control_1;
+ const struct tegra_mc *mc = stat->mc;
+
+ control_0 = tegra20_mc_stat_gather_control(&stat->gather0);
+ control_1 = tegra20_mc_stat_gather_control(&stat->gather1);
+
+ /*
+ * Reset statistic gathers state, select statistics collection mode
+ * and set clocks counter saturation limit to maximum.
+ */
+ mc_writel(mc, 0x00000000, MC_STAT_CONTROL);
+ mc_writel(mc, control_0, MC_STAT_EMC_CONTROL_0);
+ mc_writel(mc, control_1, MC_STAT_EMC_CONTROL_1);
+ mc_writel(mc, 0xffffffff, MC_STAT_EMC_CLOCK_LIMIT);
+
+ mc_writel(mc, EMC_GATHER_ENABLE, MC_STAT_CONTROL);
+ fsleep(stat->sample_time_usec);
+ mc_writel(mc, EMC_GATHER_DISABLE, MC_STAT_CONTROL);
+
+ count0 = mc_readl(mc, MC_STAT_EMC_COUNT_0);
+ count1 = mc_readl(mc, MC_STAT_EMC_COUNT_1);
+ clocks = mc_readl(mc, MC_STAT_EMC_CLOCKS);
+ clocks = max(clocks / 100 / MC_FX_FRAC_SCALE, 1u);
+
+ stat->gather0.result = DIV_ROUND_UP(count0, clocks);
+ stat->gather1.result = DIV_ROUND_UP(count1, clocks);
+}
+
+static void tegra20_mc_stat_events(const struct tegra_mc *mc,
+ const struct tegra_mc_client *client0,
+ const struct tegra_mc_client *client1,
+ unsigned int pri_filter,
+ unsigned int pri_event,
+ unsigned int event,
+ unsigned int *result0,
+ unsigned int *result1)
+{
+ struct tegra20_mc_stat stat = {};
+
+ stat.gather0.client = client0 ? client0->id : 0;
+ stat.gather0.pri_filter = pri_filter;
+ stat.gather0.client_enb = !!client0;
+ stat.gather0.pri_event = pri_event;
+ stat.gather0.event = event;
+
+ stat.gather1.client = client1 ? client1->id : 0;
+ stat.gather1.pri_filter = pri_filter;
+ stat.gather1.client_enb = !!client1;
+ stat.gather1.pri_event = pri_event;
+ stat.gather1.event = event;
+
+ stat.sample_time_usec = MC_STAT_SAMPLE_TIME_USEC;
+ stat.mc = mc;
+
+ tegra20_mc_stat_gather(&stat);
+
+ *result0 = stat.gather0.result;
+ *result1 = stat.gather1.result;
+}
+
+static void tegra20_mc_collect_stats(const struct tegra_mc *mc,
+ struct tegra20_mc_client_stat *stats)
+{
+ const struct tegra_mc_client *client0, *client1;
+ unsigned int i;
+
+ /* collect memory controller utilization percent for each client */
+ for (i = 0; i < mc->soc->num_clients; i += 2) {
+ client0 = &mc->soc->clients[i];
+ client1 = &mc->soc->clients[i + 1];
+
+ if (i + 1 == mc->soc->num_clients)
+ client1 = NULL;
+
+ tegra20_mc_stat_events(mc, client0, client1,
+ MC_STAT_CONTROL_FILTER_PRI_DISABLE,
+ MC_STAT_CONTROL_PRI_EVENT_HP,
+ MC_STAT_CONTROL_EVENT_QUALIFIED,
+ &stats[i + 0].events,
+ &stats[i + 1].events);
+ }
+
+ /* collect more info from active clients */
+ for (i = 0; i < mc->soc->num_clients; i++) {
+ unsigned int clienta, clientb = mc->soc->num_clients;
+
+ for (client0 = NULL; i < mc->soc->num_clients; i++) {
+ if (stats[i].events) {
+ client0 = &mc->soc->clients[i];
+ clienta = i++;
+ break;
+ }
+ }
+
+ for (client1 = NULL; i < mc->soc->num_clients; i++) {
+ if (stats[i].events) {
+ client1 = &mc->soc->clients[i];
+ clientb = i;
+ break;
+ }
+ }
+
+ if (!client0 && !client1)
+ break;
+
+ tegra20_mc_stat_events(mc, client0, client1,
+ MC_STAT_CONTROL_FILTER_PRI_YES,
+ MC_STAT_CONTROL_PRI_EVENT_HP,
+ MC_STAT_CONTROL_EVENT_QUALIFIED,
+ &stats[clienta].arb_high_prio,
+ &stats[clientb].arb_high_prio);
+
+ tegra20_mc_stat_events(mc, client0, client1,
+ MC_STAT_CONTROL_FILTER_PRI_YES,
+ MC_STAT_CONTROL_PRI_EVENT_TM,
+ MC_STAT_CONTROL_EVENT_QUALIFIED,
+ &stats[clienta].arb_timeout,
+ &stats[clientb].arb_timeout);
+
+ tegra20_mc_stat_events(mc, client0, client1,
+ MC_STAT_CONTROL_FILTER_PRI_YES,
+ MC_STAT_CONTROL_PRI_EVENT_BW,
+ MC_STAT_CONTROL_EVENT_QUALIFIED,
+ &stats[clienta].arb_bandwidth,
+ &stats[clientb].arb_bandwidth);
+
+ tegra20_mc_stat_events(mc, client0, client1,
+ MC_STAT_CONTROL_FILTER_PRI_DISABLE,
+ MC_STAT_CONTROL_PRI_EVENT_HP,
+ MC_STAT_CONTROL_EVENT_RD_WR_CHANGE,
+ &stats[clienta].rd_wr_change,
+ &stats[clientb].rd_wr_change);
+
+ tegra20_mc_stat_events(mc, client0, client1,
+ MC_STAT_CONTROL_FILTER_PRI_DISABLE,
+ MC_STAT_CONTROL_PRI_EVENT_HP,
+ MC_STAT_CONTROL_EVENT_SUCCESSIVE,
+ &stats[clienta].successive,
+ &stats[clientb].successive);
+
+ tegra20_mc_stat_events(mc, client0, client1,
+ MC_STAT_CONTROL_FILTER_PRI_DISABLE,
+ MC_STAT_CONTROL_PRI_EVENT_HP,
+ MC_STAT_CONTROL_EVENT_PAGE_MISS,
+ &stats[clienta].page_miss,
+ &stats[clientb].page_miss);
+ }
+}
+
+static void tegra20_mc_printf_percents(struct seq_file *s,
+ const char *fmt,
+ unsigned int percents_fx)
+{
+ char percents_str[8];
+
+ snprintf(percents_str, ARRAY_SIZE(percents_str), "%3u.%02u%%",
+ percents_fx / MC_FX_FRAC_SCALE, percents_fx % MC_FX_FRAC_SCALE);
+
+ seq_printf(s, fmt, percents_str);
+}
+
+static int tegra20_mc_stats_show(struct seq_file *s, void *unused)
+{
+ const struct tegra_mc *mc = dev_get_drvdata(s->private);
+ struct tegra20_mc_client_stat *stats;
+ unsigned int i;
+
+ stats = kcalloc(mc->soc->num_clients + 1, sizeof(*stats), GFP_KERNEL);
+ if (!stats)
+ return -ENOMEM;
+
+ mutex_lock(&tegra20_mc_stat_lock);
+
+ tegra20_mc_collect_stats(mc, stats);
+
+ mutex_unlock(&tegra20_mc_stat_lock);
+
+ seq_puts(s, "Memory client Events Timeout High priority Bandwidth ARB RW change Successive Page miss\n");
+ seq_puts(s, "-----------------------------------------------------------------------------------------------------\n");
+
+ for (i = 0; i < mc->soc->num_clients; i++) {
+ seq_printf(s, "%-14s ", mc->soc->clients[i].name);
+
+ /* An event is generated when client performs R/W request. */
+ tegra20_mc_printf_percents(s, "%-9s", stats[i].events);
+
+ /*
+ * An event is generated based on the timeout (TM) signal
+ * accompanying a request for arbitration.
+ */
+ tegra20_mc_printf_percents(s, "%-10s", stats[i].arb_timeout);
+
+ /*
+ * An event is generated based on the high-priority (HP) signal
+ * accompanying a request for arbitration.
+ */
+ tegra20_mc_printf_percents(s, "%-16s", stats[i].arb_high_prio);
+
+ /*
+ * An event is generated based on the bandwidth (BW) signal
+ * accompanying a request for arbitration.
+ */
+ tegra20_mc_printf_percents(s, "%-16s", stats[i].arb_bandwidth);
+
+ /*
+ * An event is generated when the memory controller switches
+ * between making a read request to making a write request.
+ */
+ tegra20_mc_printf_percents(s, "%-12s", stats[i].rd_wr_change);
+
+ /*
+ * An even generated when the chosen client has wins arbitration
+ * when it was also the winner at the previous request. If a
+ * client makes N requests in a row that are honored, SUCCESSIVE
+ * will be counted (N-1) times. Large values for this event
+ * imply that if we were patient enough, all of those requests
+ * could have been coalesced.
+ */
+ tegra20_mc_printf_percents(s, "%-13s", stats[i].successive);
+
+ /*
+ * An event is generated when the memory controller detects a
+ * page miss for the current request.
+ */
+ tegra20_mc_printf_percents(s, "%-12s\n", stats[i].page_miss);
+ }
+
+ kfree(stats);
+
+ return 0;
+}
+
+static int tegra20_mc_init(struct tegra_mc *mc)
+{
+ debugfs_create_devm_seqfile(mc->dev, "stats", mc->debugfs.root,
+ tegra20_mc_stats_show);
+
+ return 0;
+}
+
const struct tegra_mc_soc tegra20_mc_soc = {
.clients = tegra20_mc_clients,
.num_clients = ARRAY_SIZE(tegra20_mc_clients),
@@ -367,4 +698,5 @@ const struct tegra_mc_soc tegra20_mc_soc = {
.resets = tegra20_mc_resets,
.num_resets = ARRAY_SIZE(tegra20_mc_resets),
.icc_ops = &tegra20_mc_icc_ops,
+ .init = tegra20_mc_init,
};
diff --git a/drivers/memory/tegra/tegra30-emc.c b/drivers/memory/tegra/tegra30-emc.c
index 6985da0ffb35..829f6d673c96 100644
--- a/drivers/memory/tegra/tegra30-emc.c
+++ b/drivers/memory/tegra/tegra30-emc.c
@@ -998,12 +998,12 @@ static int emc_load_timings_from_dt(struct tegra_emc *emc,
if (err)
return err;
- dev_info(emc->dev,
- "got %u timings for RAM code %u (min %luMHz max %luMHz)\n",
- emc->num_timings,
- tegra_read_ram_code(),
- emc->timings[0].rate / 1000000,
- emc->timings[emc->num_timings - 1].rate / 1000000);
+ dev_info_once(emc->dev,
+ "got %u timings for RAM code %u (min %luMHz max %luMHz)\n",
+ emc->num_timings,
+ tegra_read_ram_code(),
+ emc->timings[0].rate / 1000000,
+ emc->timings[emc->num_timings - 1].rate / 1000000);
return 0;
}
@@ -1015,7 +1015,7 @@ static struct device_node *emc_find_node_by_ram_code(struct device *dev)
int err;
if (of_get_child_count(dev->of_node) == 0) {
- dev_info(dev, "device-tree doesn't have memory timings\n");
+ dev_info_once(dev, "device-tree doesn't have memory timings\n");
return NULL;
}
@@ -1503,8 +1503,8 @@ static int tegra_emc_opp_table_init(struct tegra_emc *emc)
goto put_hw_table;
}
- dev_info(emc->dev, "OPP HW ver. 0x%x, current clock rate %lu MHz\n",
- hw_version, clk_get_rate(emc->clk) / 1000000);
+ dev_info_once(emc->dev, "OPP HW ver. 0x%x, current clock rate %lu MHz\n",
+ hw_version, clk_get_rate(emc->clk) / 1000000);
/* first dummy rate-set initializes voltage state */
err = dev_pm_opp_set_rate(emc->dev, clk_get_rate(emc->clk));
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index b74efa469e90..2ce9edb90901 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -21,7 +21,7 @@ config MFD_CS5535
config MFD_ALTERA_A10SR
bool "Altera Arria10 DevKit System Resource chip"
- depends on ARCH_SOCFPGA && SPI_MASTER=y && OF
+ depends on ARCH_INTEL_SOCFPGA && SPI_MASTER=y && OF
select REGMAP_SPI
select MFD_CORE
help
@@ -32,7 +32,7 @@ config MFD_ALTERA_A10SR
config MFD_ALTERA_SYSMGR
bool "Altera SOCFPGA System Manager"
- depends on (ARCH_SOCFPGA || ARCH_STRATIX10) && OF
+ depends on ARCH_INTEL_SOCFPGA && OF
select MFD_SYSCON
help
Select this to get System Manager support for all Altera branded
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index e675ba12fde2..7737e4d0bb9e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -140,8 +140,8 @@ config DWMAC_ROCKCHIP
config DWMAC_SOCFPGA
tristate "SOCFPGA dwmac support"
- default (ARCH_SOCFPGA || ARCH_STRATIX10)
- depends on OF && (ARCH_SOCFPGA || ARCH_STRATIX10 || COMPILE_TEST)
+ default ARCH_INTEL_SOCFPGA
+ depends on OF && (ARCH_INTEL_SOCFPGA || COMPILE_TEST)
select MFD_SYSCON
help
Support for ethernet controller on Altera SOCFPGA
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
index 0cab4c2576e2..996ebcba4d38 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
@@ -60,7 +60,7 @@
#define COND2 { ASPEED_IP_SCU, SCU94, GENMASK(1, 0), 0, 0 }
/* LHCR0 is offset from the end of the H8S/2168-compatible registers */
-#define LHCR0 0x20
+#define LHCR0 0xa0
#define GFX064 0x64
#define B14 0
@@ -2648,14 +2648,19 @@ static struct regmap *aspeed_g5_acquire_regmap(struct aspeed_pinmux_data *ctx,
}
if (ip == ASPEED_IP_LPC) {
- struct device_node *node;
+ struct device_node *np;
struct regmap *map;
- node = of_parse_phandle(ctx->dev->of_node,
+ np = of_parse_phandle(ctx->dev->of_node,
"aspeed,external-nodes", 1);
- if (node) {
- map = syscon_node_to_regmap(node->parent);
- of_node_put(node);
+ if (np) {
+ if (!of_device_is_compatible(np->parent, "aspeed,ast2400-lpc-v2") &&
+ !of_device_is_compatible(np->parent, "aspeed,ast2500-lpc-v2") &&
+ !of_device_is_compatible(np->parent, "aspeed,ast2600-lpc-v2"))
+ return ERR_PTR(-ENODEV);
+
+ map = syscon_node_to_regmap(np->parent);
+ of_node_put(np);
if (IS_ERR(map))
return map;
} else
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index 9a4f66ae8070..43d9f1884cd3 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -423,6 +423,15 @@ config PWM_PXA
To compile this driver as a module, choose M here: the module
will be called pwm-pxa.
+config PWM_RASPBERRYPI_POE
+ tristate "Raspberry Pi Firwmware PoE Hat PWM support"
+ # Make sure not 'y' when RASPBERRYPI_FIRMWARE is 'm'. This can only
+ # happen when COMPILE_TEST=y, hence the added !RASPBERRYPI_FIRMWARE.
+ depends on RASPBERRYPI_FIRMWARE || (COMPILE_TEST && !RASPBERRYPI_FIRMWARE)
+ help
+ Enable Raspberry Pi firmware controller PWM bus used to control the
+ official RPI PoE hat
+
config PWM_RCAR
tristate "Renesas R-Car PWM support"
depends on ARCH_RENESAS || COMPILE_TEST
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index 6374d3b1d6f3..211db810c439 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -38,6 +38,7 @@ obj-$(CONFIG_PWM_MXS) += pwm-mxs.o
obj-$(CONFIG_PWM_OMAP_DMTIMER) += pwm-omap-dmtimer.o
obj-$(CONFIG_PWM_PCA9685) += pwm-pca9685.o
obj-$(CONFIG_PWM_PXA) += pwm-pxa.o
+obj-$(CONFIG_PWM_RASPBERRYPI_POE) += pwm-raspberrypi-poe.o
obj-$(CONFIG_PWM_RCAR) += pwm-rcar.o
obj-$(CONFIG_PWM_RENESAS_TPU) += pwm-renesas-tpu.o
obj-$(CONFIG_PWM_ROCKCHIP) += pwm-rockchip.o
diff --git a/drivers/pwm/pwm-raspberrypi-poe.c b/drivers/pwm/pwm-raspberrypi-poe.c
new file mode 100644
index 000000000000..043fc32e8be8
--- /dev/null
+++ b/drivers/pwm/pwm-raspberrypi-poe.c
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2021 Nicolas Saenz Julienne <nsaenzjulienne@suse.de>
+ * For more information on Raspberry Pi's PoE hat see:
+ * https://www.raspberrypi.org/products/poe-hat/
+ *
+ * Limitations:
+ * - No disable bit, so a disabled PWM is simulated by duty_cycle 0
+ * - Only normal polarity
+ * - Fixed 12.5 kHz period
+ *
+ * The current period is completed when HW is reconfigured.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+
+#include <soc/bcm2835/raspberrypi-firmware.h>
+#include <dt-bindings/pwm/raspberrypi,firmware-poe-pwm.h>
+
+#define RPI_PWM_MAX_DUTY 255
+#define RPI_PWM_PERIOD_NS 80000 /* 12.5 kHz */
+
+#define RPI_PWM_CUR_DUTY_REG 0x0
+
+struct raspberrypi_pwm {
+ struct rpi_firmware *firmware;
+ struct pwm_chip chip;
+ unsigned int duty_cycle;
+};
+
+struct raspberrypi_pwm_prop {
+ __le32 reg;
+ __le32 val;
+ __le32 ret;
+} __packed;
+
+static inline
+struct raspberrypi_pwm *raspberrypi_pwm_from_chip(struct pwm_chip *chip)
+{
+ return container_of(chip, struct raspberrypi_pwm, chip);
+}
+
+static int raspberrypi_pwm_set_property(struct rpi_firmware *firmware,
+ u32 reg, u32 val)
+{
+ struct raspberrypi_pwm_prop msg = {
+ .reg = cpu_to_le32(reg),
+ .val = cpu_to_le32(val),
+ };
+ int ret;
+
+ ret = rpi_firmware_property(firmware, RPI_FIRMWARE_SET_POE_HAT_VAL,
+ &msg, sizeof(msg));
+ if (ret)
+ return ret;
+ if (msg.ret)
+ return -EIO;
+
+ return 0;
+}
+
+static int raspberrypi_pwm_get_property(struct rpi_firmware *firmware,
+ u32 reg, u32 *val)
+{
+ struct raspberrypi_pwm_prop msg = {
+ .reg = reg
+ };
+ int ret;
+
+ ret = rpi_firmware_property(firmware, RPI_FIRMWARE_GET_POE_HAT_VAL,
+ &msg, sizeof(msg));
+ if (ret)
+ return ret;
+ if (msg.ret)
+ return -EIO;
+
+ *val = le32_to_cpu(msg.val);
+
+ return 0;
+}
+
+static void raspberrypi_pwm_get_state(struct pwm_chip *chip,
+ struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ struct raspberrypi_pwm *rpipwm = raspberrypi_pwm_from_chip(chip);
+
+ state->period = RPI_PWM_PERIOD_NS;
+ state->duty_cycle = DIV_ROUND_UP(rpipwm->duty_cycle * RPI_PWM_PERIOD_NS,
+ RPI_PWM_MAX_DUTY);
+ state->enabled = !!(rpipwm->duty_cycle);
+ state->polarity = PWM_POLARITY_NORMAL;
+}
+
+static int raspberrypi_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ struct raspberrypi_pwm *rpipwm = raspberrypi_pwm_from_chip(chip);
+ unsigned int duty_cycle;
+ int ret;
+
+ if (state->period < RPI_PWM_PERIOD_NS ||
+ state->polarity != PWM_POLARITY_NORMAL)
+ return -EINVAL;
+
+ if (!state->enabled)
+ duty_cycle = 0;
+ else if (state->duty_cycle < RPI_PWM_PERIOD_NS)
+ duty_cycle = DIV_ROUND_DOWN_ULL(state->duty_cycle * RPI_PWM_MAX_DUTY,
+ RPI_PWM_PERIOD_NS);
+ else
+ duty_cycle = RPI_PWM_MAX_DUTY;
+
+ if (duty_cycle == rpipwm->duty_cycle)
+ return 0;
+
+ ret = raspberrypi_pwm_set_property(rpipwm->firmware, RPI_PWM_CUR_DUTY_REG,
+ duty_cycle);
+ if (ret) {
+ dev_err(chip->dev, "Failed to set duty cycle: %pe\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+
+ rpipwm->duty_cycle = duty_cycle;
+
+ return 0;
+}
+
+static const struct pwm_ops raspberrypi_pwm_ops = {
+ .get_state = raspberrypi_pwm_get_state,
+ .apply = raspberrypi_pwm_apply,
+ .owner = THIS_MODULE,
+};
+
+static int raspberrypi_pwm_probe(struct platform_device *pdev)
+{
+ struct device_node *firmware_node;
+ struct device *dev = &pdev->dev;
+ struct rpi_firmware *firmware;
+ struct raspberrypi_pwm *rpipwm;
+ int ret;
+
+ firmware_node = of_get_parent(dev->of_node);
+ if (!firmware_node) {
+ dev_err(dev, "Missing firmware node\n");
+ return -ENOENT;
+ }
+
+ firmware = devm_rpi_firmware_get(&pdev->dev, firmware_node);
+ of_node_put(firmware_node);
+ if (!firmware)
+ return dev_err_probe(dev, -EPROBE_DEFER,
+ "Failed to get firmware handle\n");
+
+ rpipwm = devm_kzalloc(&pdev->dev, sizeof(*rpipwm), GFP_KERNEL);
+ if (!rpipwm)
+ return -ENOMEM;
+
+ rpipwm->firmware = firmware;
+ rpipwm->chip.dev = dev;
+ rpipwm->chip.ops = &raspberrypi_pwm_ops;
+ rpipwm->chip.base = -1;
+ rpipwm->chip.npwm = RASPBERRYPI_FIRMWARE_PWM_NUM;
+
+ platform_set_drvdata(pdev, rpipwm);
+
+ ret = raspberrypi_pwm_get_property(rpipwm->firmware, RPI_PWM_CUR_DUTY_REG,
+ &rpipwm->duty_cycle);
+ if (ret) {
+ dev_err(dev, "Failed to get duty cycle: %pe\n", ERR_PTR(ret));
+ return ret;
+ }
+
+ return pwmchip_add(&rpipwm->chip);
+}
+
+static int raspberrypi_pwm_remove(struct platform_device *pdev)
+{
+ struct raspberrypi_pwm *rpipwm = platform_get_drvdata(pdev);
+
+ return pwmchip_remove(&rpipwm->chip);
+}
+
+static const struct of_device_id raspberrypi_pwm_of_match[] = {
+ { .compatible = "raspberrypi,firmware-poe-pwm", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, raspberrypi_pwm_of_match);
+
+static struct platform_driver raspberrypi_pwm_driver = {
+ .driver = {
+ .name = "raspberrypi-poe-pwm",
+ .of_match_table = raspberrypi_pwm_of_match,
+ },
+ .probe = raspberrypi_pwm_probe,
+ .remove = raspberrypi_pwm_remove,
+};
+module_platform_driver(raspberrypi_pwm_driver);
+
+MODULE_AUTHOR("Nicolas Saenz Julienne <nsaenzjulienne@suse.de>");
+MODULE_DESCRIPTION("Raspberry Pi Firmware Based PWM Bus Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/scmi-regulator.c b/drivers/regulator/scmi-regulator.c
index 0e8b3caa8146..a38343f8c843 100644
--- a/drivers/regulator/scmi-regulator.c
+++ b/drivers/regulator/scmi-regulator.c
@@ -2,7 +2,7 @@
//
// System Control and Management Interface (SCMI) based regulator driver
//
-// Copyright (C) 2020 ARM Ltd.
+// Copyright (C) 2020-2021 ARM Ltd.
//
// Implements a regulator driver on top of the SCMI Voltage Protocol.
//
@@ -33,9 +33,12 @@
#include <linux/slab.h>
#include <linux/types.h>
+static const struct scmi_voltage_proto_ops *voltage_ops;
+
struct scmi_regulator {
u32 id;
struct scmi_device *sdev;
+ struct scmi_protocol_handle *ph;
struct regulator_dev *rdev;
struct device_node *of_node;
struct regulator_desc desc;
@@ -50,19 +53,17 @@ struct scmi_regulator_info {
static int scmi_reg_enable(struct regulator_dev *rdev)
{
struct scmi_regulator *sreg = rdev_get_drvdata(rdev);
- const struct scmi_handle *handle = sreg->sdev->handle;
- return handle->voltage_ops->config_set(handle, sreg->id,
- SCMI_VOLTAGE_ARCH_STATE_ON);
+ return voltage_ops->config_set(sreg->ph, sreg->id,
+ SCMI_VOLTAGE_ARCH_STATE_ON);
}
static int scmi_reg_disable(struct regulator_dev *rdev)
{
struct scmi_regulator *sreg = rdev_get_drvdata(rdev);
- const struct scmi_handle *handle = sreg->sdev->handle;
- return handle->voltage_ops->config_set(handle, sreg->id,
- SCMI_VOLTAGE_ARCH_STATE_OFF);
+ return voltage_ops->config_set(sreg->ph, sreg->id,
+ SCMI_VOLTAGE_ARCH_STATE_OFF);
}
static int scmi_reg_is_enabled(struct regulator_dev *rdev)
@@ -70,10 +71,8 @@ static int scmi_reg_is_enabled(struct regulator_dev *rdev)
int ret;
u32 config;
struct scmi_regulator *sreg = rdev_get_drvdata(rdev);
- const struct scmi_handle *handle = sreg->sdev->handle;
- ret = handle->voltage_ops->config_get(handle, sreg->id,
- &config);
+ ret = voltage_ops->config_get(sreg->ph, sreg->id, &config);
if (ret) {
dev_err(&sreg->sdev->dev,
"Error %d reading regulator %s status.\n",
@@ -89,9 +88,8 @@ static int scmi_reg_get_voltage_sel(struct regulator_dev *rdev)
int ret;
s32 volt_uV;
struct scmi_regulator *sreg = rdev_get_drvdata(rdev);
- const struct scmi_handle *handle = sreg->sdev->handle;
- ret = handle->voltage_ops->level_get(handle, sreg->id, &volt_uV);
+ ret = voltage_ops->level_get(sreg->ph, sreg->id, &volt_uV);
if (ret)
return ret;
@@ -103,13 +101,12 @@ static int scmi_reg_set_voltage_sel(struct regulator_dev *rdev,
{
s32 volt_uV;
struct scmi_regulator *sreg = rdev_get_drvdata(rdev);
- const struct scmi_handle *handle = sreg->sdev->handle;
volt_uV = sreg->desc.ops->list_voltage(rdev, selector);
if (volt_uV <= 0)
return -EINVAL;
- return handle->voltage_ops->level_set(handle, sreg->id, 0x0, volt_uV);
+ return voltage_ops->level_set(sreg->ph, sreg->id, 0x0, volt_uV);
}
static const struct regulator_ops scmi_reg_fixed_ops = {
@@ -204,11 +201,10 @@ scmi_config_discrete_regulator_mappings(struct scmi_regulator *sreg,
static int scmi_regulator_common_init(struct scmi_regulator *sreg)
{
int ret;
- const struct scmi_handle *handle = sreg->sdev->handle;
struct device *dev = &sreg->sdev->dev;
const struct scmi_voltage_info *vinfo;
- vinfo = handle->voltage_ops->info_get(handle, sreg->id);
+ vinfo = voltage_ops->info_get(sreg->ph, sreg->id);
if (!vinfo) {
dev_warn(dev, "Failure to get voltage domain %d\n",
sreg->id);
@@ -257,6 +253,7 @@ static int scmi_regulator_common_init(struct scmi_regulator *sreg)
}
static int process_scmi_regulator_of_node(struct scmi_device *sdev,
+ struct scmi_protocol_handle *ph,
struct device_node *np,
struct scmi_regulator_info *rinfo)
{
@@ -284,6 +281,7 @@ static int process_scmi_regulator_of_node(struct scmi_device *sdev,
rinfo->sregv[dom]->id = dom;
rinfo->sregv[dom]->sdev = sdev;
+ rinfo->sregv[dom]->ph = ph;
/* get hold of good nodes */
of_node_get(np);
@@ -302,11 +300,17 @@ static int scmi_regulator_probe(struct scmi_device *sdev)
struct device_node *np, *child;
const struct scmi_handle *handle = sdev->handle;
struct scmi_regulator_info *rinfo;
+ struct scmi_protocol_handle *ph;
- if (!handle || !handle->voltage_ops)
+ if (!handle)
return -ENODEV;
- num_doms = handle->voltage_ops->num_domains_get(handle);
+ voltage_ops = handle->devm_protocol_get(sdev,
+ SCMI_PROTOCOL_VOLTAGE, &ph);
+ if (IS_ERR(voltage_ops))
+ return PTR_ERR(voltage_ops);
+
+ num_doms = voltage_ops->num_domains_get(ph);
if (num_doms <= 0) {
if (!num_doms) {
dev_err(&sdev->dev,
@@ -341,7 +345,7 @@ static int scmi_regulator_probe(struct scmi_device *sdev)
*/
np = of_find_node_by_name(handle->dev->of_node, "regulators");
for_each_child_of_node(np, child) {
- ret = process_scmi_regulator_of_node(sdev, child, rinfo);
+ ret = process_scmi_regulator_of_node(sdev, ph, child, rinfo);
/* abort on any mem issue */
if (ret == -ENOMEM)
return ret;
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index 4171c6f76385..7043c7f6dcf0 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -183,7 +183,7 @@ config RESET_SCMI
config RESET_SIMPLE
bool "Simple Reset Controller Driver" if COMPILE_TEST
- default ARCH_AGILEX || ARCH_ASPEED || ARCH_BCM4908 || ARCH_BITMAIN || ARCH_REALTEK || ARCH_STM32 || ARCH_STRATIX10 || ARCH_SUNXI || ARC
+ default ARCH_ASPEED || ARCH_BCM4908 || ARCH_BITMAIN || ARCH_REALTEK || ARCH_STM32 || (ARCH_INTEL_SOCFPGA && ARM64) || ARCH_SUNXI || ARC
help
This enables a simple reset controller driver for reset lines that
that can be asserted and deasserted by toggling bits in a contiguous,
@@ -205,8 +205,8 @@ config RESET_STM32MP157
This enables the RCC reset controller driver for STM32 MPUs.
config RESET_SOCFPGA
- bool "SoCFPGA Reset Driver" if COMPILE_TEST && !ARCH_SOCFPGA
- default ARCH_SOCFPGA
+ bool "SoCFPGA Reset Driver" if COMPILE_TEST && (!ARM || !ARCH_INTEL_SOCFPGA)
+ default ARM && ARCH_INTEL_SOCFPGA
select RESET_SIMPLE
help
This enables the reset driver for the SoCFPGA ARMv7 platforms. This
diff --git a/drivers/reset/reset-raspberrypi.c b/drivers/reset/reset-raspberrypi.c
index 02f59c06f69b..fa23db554bcf 100644
--- a/drivers/reset/reset-raspberrypi.c
+++ b/drivers/reset/reset-raspberrypi.c
@@ -82,7 +82,7 @@ static int rpi_reset_probe(struct platform_device *pdev)
return -ENOENT;
}
- fw = rpi_firmware_get(np);
+ fw = devm_rpi_firmware_get(&pdev->dev, np);
of_node_put(np);
if (!fw)
return -EPROBE_DEFER;
diff --git a/drivers/reset/reset-scmi.c b/drivers/reset/reset-scmi.c
index 8d3a858e3b19..4335811e0cfa 100644
--- a/drivers/reset/reset-scmi.c
+++ b/drivers/reset/reset-scmi.c
@@ -2,7 +2,7 @@
/*
* ARM System Control and Management Interface (ARM SCMI) reset driver
*
- * Copyright (C) 2019 ARM Ltd.
+ * Copyright (C) 2019-2021 ARM Ltd.
*/
#include <linux/module.h>
@@ -11,18 +11,20 @@
#include <linux/reset-controller.h>
#include <linux/scmi_protocol.h>
+static const struct scmi_reset_proto_ops *reset_ops;
+
/**
* struct scmi_reset_data - reset controller information structure
* @rcdev: reset controller entity
- * @handle: ARM SCMI handle used for communication with system controller
+ * @ph: ARM SCMI protocol handle used for communication with system controller
*/
struct scmi_reset_data {
struct reset_controller_dev rcdev;
- const struct scmi_handle *handle;
+ const struct scmi_protocol_handle *ph;
};
#define to_scmi_reset_data(p) container_of((p), struct scmi_reset_data, rcdev)
-#define to_scmi_handle(p) (to_scmi_reset_data(p)->handle)
+#define to_scmi_handle(p) (to_scmi_reset_data(p)->ph)
/**
* scmi_reset_assert() - assert device reset
@@ -37,9 +39,9 @@ struct scmi_reset_data {
static int
scmi_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
{
- const struct scmi_handle *handle = to_scmi_handle(rcdev);
+ const struct scmi_protocol_handle *ph = to_scmi_handle(rcdev);
- return handle->reset_ops->assert(handle, id);
+ return reset_ops->assert(ph, id);
}
/**
@@ -55,9 +57,9 @@ scmi_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
static int
scmi_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
{
- const struct scmi_handle *handle = to_scmi_handle(rcdev);
+ const struct scmi_protocol_handle *ph = to_scmi_handle(rcdev);
- return handle->reset_ops->deassert(handle, id);
+ return reset_ops->deassert(ph, id);
}
/**
@@ -73,9 +75,9 @@ scmi_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
static int
scmi_reset_reset(struct reset_controller_dev *rcdev, unsigned long id)
{
- const struct scmi_handle *handle = to_scmi_handle(rcdev);
+ const struct scmi_protocol_handle *ph = to_scmi_handle(rcdev);
- return handle->reset_ops->reset(handle, id);
+ return reset_ops->reset(ph, id);
}
static const struct reset_control_ops scmi_reset_ops = {
@@ -90,10 +92,15 @@ static int scmi_reset_probe(struct scmi_device *sdev)
struct device *dev = &sdev->dev;
struct device_node *np = dev->of_node;
const struct scmi_handle *handle = sdev->handle;
+ struct scmi_protocol_handle *ph;
- if (!handle || !handle->reset_ops)
+ if (!handle)
return -ENODEV;
+ reset_ops = handle->devm_protocol_get(sdev, SCMI_PROTOCOL_RESET, &ph);
+ if (IS_ERR(reset_ops))
+ return PTR_ERR(reset_ops);
+
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -101,8 +108,8 @@ static int scmi_reset_probe(struct scmi_device *sdev)
data->rcdev.ops = &scmi_reset_ops;
data->rcdev.owner = THIS_MODULE;
data->rcdev.of_node = np;
- data->rcdev.nr_resets = handle->reset_ops->num_domains_get(handle);
- data->handle = handle;
+ data->rcdev.nr_resets = reset_ops->num_domains_get(ph);
+ data->ph = ph;
return devm_reset_controller_register(dev, &data->rcdev);
}
diff --git a/drivers/soc/aspeed/aspeed-lpc-ctrl.c b/drivers/soc/aspeed/aspeed-lpc-ctrl.c
index 439bcd6b8c4a..c557ffd0992c 100644
--- a/drivers/soc/aspeed/aspeed-lpc-ctrl.c
+++ b/drivers/soc/aspeed/aspeed-lpc-ctrl.c
@@ -18,15 +18,15 @@
#define DEVICE_NAME "aspeed-lpc-ctrl"
-#define HICR5 0x0
+#define HICR5 0x80
#define HICR5_ENL2H BIT(8)
#define HICR5_ENFWH BIT(10)
-#define HICR6 0x4
+#define HICR6 0x84
#define SW_FWH2AHB BIT(17)
-#define HICR7 0x8
-#define HICR8 0xc
+#define HICR7 0x88
+#define HICR8 0x8c
struct aspeed_lpc_ctrl {
struct miscdevice miscdev;
@@ -215,6 +215,7 @@ static int aspeed_lpc_ctrl_probe(struct platform_device *pdev)
struct device_node *node;
struct resource resm;
struct device *dev;
+ struct device_node *np;
int rc;
dev = &pdev->dev;
@@ -270,8 +271,15 @@ static int aspeed_lpc_ctrl_probe(struct platform_device *pdev)
}
}
- lpc_ctrl->regmap = syscon_node_to_regmap(
- pdev->dev.parent->of_node);
+ np = pdev->dev.parent->of_node;
+ if (!of_device_is_compatible(np, "aspeed,ast2400-lpc-v2") &&
+ !of_device_is_compatible(np, "aspeed,ast2500-lpc-v2") &&
+ !of_device_is_compatible(np, "aspeed,ast2600-lpc-v2")) {
+ dev_err(dev, "unsupported LPC device binding\n");
+ return -ENODEV;
+ }
+
+ lpc_ctrl->regmap = syscon_node_to_regmap(np);
if (IS_ERR(lpc_ctrl->regmap)) {
dev_err(dev, "Couldn't get regmap\n");
return -ENODEV;
diff --git a/drivers/soc/aspeed/aspeed-lpc-snoop.c b/drivers/soc/aspeed/aspeed-lpc-snoop.c
index 20acac6342ef..eceeaf8dfbeb 100644
--- a/drivers/soc/aspeed/aspeed-lpc-snoop.c
+++ b/drivers/soc/aspeed/aspeed-lpc-snoop.c
@@ -29,26 +29,25 @@
#define NUM_SNOOP_CHANNELS 2
#define SNOOP_FIFO_SIZE 2048
-#define HICR5 0x0
+#define HICR5 0x80
#define HICR5_EN_SNP0W BIT(0)
#define HICR5_ENINT_SNP0W BIT(1)
#define HICR5_EN_SNP1W BIT(2)
#define HICR5_ENINT_SNP1W BIT(3)
-
-#define HICR6 0x4
+#define HICR6 0x84
#define HICR6_STR_SNP0W BIT(0)
#define HICR6_STR_SNP1W BIT(1)
-#define SNPWADR 0x10
+#define SNPWADR 0x90
#define SNPWADR_CH0_MASK GENMASK(15, 0)
#define SNPWADR_CH0_SHIFT 0
#define SNPWADR_CH1_MASK GENMASK(31, 16)
#define SNPWADR_CH1_SHIFT 16
-#define SNPWDR 0x14
+#define SNPWDR 0x94
#define SNPWDR_CH0_MASK GENMASK(7, 0)
#define SNPWDR_CH0_SHIFT 0
#define SNPWDR_CH1_MASK GENMASK(15, 8)
#define SNPWDR_CH1_SHIFT 8
-#define HICRB 0x80
+#define HICRB 0x100
#define HICRB_ENSNP0D BIT(14)
#define HICRB_ENSNP1D BIT(15)
@@ -95,8 +94,10 @@ static ssize_t snoop_file_read(struct file *file, char __user *buffer,
return -EINTR;
}
ret = kfifo_to_user(&chan->fifo, buffer, count, &copied);
+ if (ret)
+ return ret;
- return ret ? ret : copied;
+ return copied;
}
static __poll_t snoop_file_poll(struct file *file,
@@ -260,6 +261,7 @@ static int aspeed_lpc_snoop_probe(struct platform_device *pdev)
{
struct aspeed_lpc_snoop *lpc_snoop;
struct device *dev;
+ struct device_node *np;
u32 port;
int rc;
@@ -269,8 +271,15 @@ static int aspeed_lpc_snoop_probe(struct platform_device *pdev)
if (!lpc_snoop)
return -ENOMEM;
- lpc_snoop->regmap = syscon_node_to_regmap(
- pdev->dev.parent->of_node);
+ np = pdev->dev.parent->of_node;
+ if (!of_device_is_compatible(np, "aspeed,ast2400-lpc-v2") &&
+ !of_device_is_compatible(np, "aspeed,ast2500-lpc-v2") &&
+ !of_device_is_compatible(np, "aspeed,ast2600-lpc-v2")) {
+ dev_err(dev, "unsupported LPC device binding\n");
+ return -ENODEV;
+ }
+
+ lpc_snoop->regmap = syscon_node_to_regmap(np);
if (IS_ERR(lpc_snoop->regmap)) {
dev_err(dev, "Couldn't get regmap\n");
return -ENODEV;
diff --git a/drivers/soc/bcm/bcm63xx/bcm-pmb.c b/drivers/soc/bcm/bcm63xx/bcm-pmb.c
index c223023dc64f..774465c119be 100644
--- a/drivers/soc/bcm/bcm63xx/bcm-pmb.c
+++ b/drivers/soc/bcm/bcm63xx/bcm-pmb.c
@@ -209,6 +209,28 @@ static int bcm_pmb_power_on_device(struct bcm_pmb *pmb, int bus, u8 device)
return err;
}
+static int bcm_pmb_power_on_sata(struct bcm_pmb *pmb, int bus, u8 device)
+{
+ int err;
+
+ err = bcm_pmb_power_on_zone(pmb, bus, device, 0);
+ if (err)
+ return err;
+
+ /* Does not apply to the BCM963158 */
+ err = bcm_pmb_bpcm_write(pmb, bus, device, BPCM_MISC_CONTROL, 0);
+ if (err)
+ return err;
+
+ err = bcm_pmb_bpcm_write(pmb, bus, device, BPCM_SR_CONTROL, 0xffffffff);
+ if (err)
+ return err;
+
+ err = bcm_pmb_bpcm_write(pmb, bus, device, BPCM_SR_CONTROL, 0);
+
+ return err;
+}
+
static int bcm_pmb_power_on(struct generic_pm_domain *genpd)
{
struct bcm_pmb_pm_domain *pd = container_of(genpd, struct bcm_pmb_pm_domain, genpd);
@@ -222,6 +244,8 @@ static int bcm_pmb_power_on(struct generic_pm_domain *genpd)
return bcm_pmb_power_on_zone(pmb, data->bus, data->device, 0);
case BCM_PMB_HOST_USB:
return bcm_pmb_power_on_device(pmb, data->bus, data->device);
+ case BCM_PMB_SATA:
+ return bcm_pmb_power_on_sata(pmb, data->bus, data->device);
default:
dev_err(pmb->dev, "unsupported device id: %d\n", data->id);
return -EINVAL;
@@ -317,8 +341,14 @@ static const struct bcm_pmb_pd_data bcm_pmb_bcm4908_data[] = {
{ },
};
+static const struct bcm_pmb_pd_data bcm_pmb_bcm63138_data[] = {
+ { .name = "sata", .id = BCM_PMB_SATA, .bus = 0, .device = 3, },
+ { },
+};
+
static const struct of_device_id bcm_pmb_of_match[] = {
{ .compatible = "brcm,bcm4908-pmb", .data = &bcm_pmb_bcm4908_data, },
+ { .compatible = "brcm,bcm63138-pmb", .data = &bcm_pmb_bcm63138_data, },
{ },
};
diff --git a/drivers/soc/bcm/raspberrypi-power.c b/drivers/soc/bcm/raspberrypi-power.c
index 5d1aacdd84ef..068715d6e66d 100644
--- a/drivers/soc/bcm/raspberrypi-power.c
+++ b/drivers/soc/bcm/raspberrypi-power.c
@@ -177,7 +177,7 @@ static int rpi_power_probe(struct platform_device *pdev)
return -ENODEV;
}
- rpi_domains->fw = rpi_firmware_get(fw_np);
+ rpi_domains->fw = devm_rpi_firmware_get(&pdev->dev, fw_np);
of_node_put(fw_np);
if (!rpi_domains->fw)
return -EPROBE_DEFER;
diff --git a/drivers/soc/fsl/guts.c b/drivers/soc/fsl/guts.c
index 34810f9bb2ee..d5e9a5f2c087 100644
--- a/drivers/soc/fsl/guts.c
+++ b/drivers/soc/fsl/guts.c
@@ -117,7 +117,7 @@ static const struct fsl_soc_die_attr *fsl_soc_die_match(
if (matches->svr == (svr & matches->mask))
return matches;
matches++;
- };
+ }
return NULL;
}
diff --git a/drivers/soc/fsl/qbman/bman.c b/drivers/soc/fsl/qbman/bman.c
index c5dd026fe889..6cc1847e534a 100644
--- a/drivers/soc/fsl/qbman/bman.c
+++ b/drivers/soc/fsl/qbman/bman.c
@@ -709,7 +709,6 @@ struct bman_pool *bman_new_pool(void)
return pool;
err:
bm_release_bpid(bpid);
- kfree(pool);
return NULL;
}
EXPORT_SYMBOL(bman_new_pool);
diff --git a/drivers/soc/fsl/qbman/bman_portal.c b/drivers/soc/fsl/qbman/bman_portal.c
index 923c44063a9a..acda8a5637c5 100644
--- a/drivers/soc/fsl/qbman/bman_portal.c
+++ b/drivers/soc/fsl/qbman/bman_portal.c
@@ -160,7 +160,7 @@ static int bman_portal_probe(struct platform_device *pdev)
__bman_portals_probed = 1;
/* unassigned portal, skip init */
spin_unlock(&bman_lock);
- return 0;
+ goto check_cleanup;
}
cpumask_set_cpu(cpu, &portal_cpus);
@@ -176,6 +176,7 @@ static int bman_portal_probe(struct platform_device *pdev)
if (!cpu_online(cpu))
bman_offline_cpu(cpu);
+check_cleanup:
if (__bman_portals_probed == 1 && bman_requires_cleanup()) {
/*
* BMan wasn't reset prior to boot (Kexec for example)
diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c
index 5685b6706893..4274bd1b0f99 100644
--- a/drivers/soc/fsl/qbman/qman_portal.c
+++ b/drivers/soc/fsl/qbman/qman_portal.c
@@ -302,7 +302,7 @@ static int qman_portal_probe(struct platform_device *pdev)
__qman_portals_probed = 1;
/* unassigned portal, skip init */
spin_unlock(&qman_lock);
- return 0;
+ goto check_cleanup;
}
cpumask_set_cpu(cpu, &portal_cpus);
@@ -323,6 +323,7 @@ static int qman_portal_probe(struct platform_device *pdev)
if (!cpu_online(cpu))
qman_offline_cpu(cpu);
+check_cleanup:
if (__qman_portals_probed == 1 && qman_requires_cleanup()) {
/*
* QMan wasn't reset prior to boot (Kexec for example)
diff --git a/drivers/soc/fsl/qe/gpio.c b/drivers/soc/fsl/qe/gpio.c
index ed75198ed254..99f7de43c3c6 100644
--- a/drivers/soc/fsl/qe/gpio.c
+++ b/drivers/soc/fsl/qe/gpio.c
@@ -41,13 +41,13 @@ static void qe_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
container_of(mm_gc, struct qe_gpio_chip, mm_gc);
struct qe_pio_regs __iomem *regs = mm_gc->regs;
- qe_gc->cpdata = qe_ioread32be(&regs->cpdata);
+ qe_gc->cpdata = ioread32be(&regs->cpdata);
qe_gc->saved_regs.cpdata = qe_gc->cpdata;
- qe_gc->saved_regs.cpdir1 = qe_ioread32be(&regs->cpdir1);
- qe_gc->saved_regs.cpdir2 = qe_ioread32be(&regs->cpdir2);
- qe_gc->saved_regs.cppar1 = qe_ioread32be(&regs->cppar1);
- qe_gc->saved_regs.cppar2 = qe_ioread32be(&regs->cppar2);
- qe_gc->saved_regs.cpodr = qe_ioread32be(&regs->cpodr);
+ qe_gc->saved_regs.cpdir1 = ioread32be(&regs->cpdir1);
+ qe_gc->saved_regs.cpdir2 = ioread32be(&regs->cpdir2);
+ qe_gc->saved_regs.cppar1 = ioread32be(&regs->cppar1);
+ qe_gc->saved_regs.cppar2 = ioread32be(&regs->cppar2);
+ qe_gc->saved_regs.cpodr = ioread32be(&regs->cpodr);
}
static int qe_gpio_get(struct gpio_chip *gc, unsigned int gpio)
@@ -56,7 +56,7 @@ static int qe_gpio_get(struct gpio_chip *gc, unsigned int gpio)
struct qe_pio_regs __iomem *regs = mm_gc->regs;
u32 pin_mask = 1 << (QE_PIO_PINS - 1 - gpio);
- return !!(qe_ioread32be(&regs->cpdata) & pin_mask);
+ return !!(ioread32be(&regs->cpdata) & pin_mask);
}
static void qe_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
@@ -74,7 +74,7 @@ static void qe_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
else
qe_gc->cpdata &= ~pin_mask;
- qe_iowrite32be(qe_gc->cpdata, &regs->cpdata);
+ iowrite32be(qe_gc->cpdata, &regs->cpdata);
spin_unlock_irqrestore(&qe_gc->lock, flags);
}
@@ -101,7 +101,7 @@ static void qe_gpio_set_multiple(struct gpio_chip *gc,
}
}
- qe_iowrite32be(qe_gc->cpdata, &regs->cpdata);
+ iowrite32be(qe_gc->cpdata, &regs->cpdata);
spin_unlock_irqrestore(&qe_gc->lock, flags);
}
@@ -269,7 +269,7 @@ void qe_pin_set_dedicated(struct qe_pin *qe_pin)
else
qe_gc->cpdata &= ~mask1;
- qe_iowrite32be(qe_gc->cpdata, &regs->cpdata);
+ iowrite32be(qe_gc->cpdata, &regs->cpdata);
qe_clrsetbits_be32(&regs->cpodr, mask1, sregs->cpodr & mask1);
spin_unlock_irqrestore(&qe_gc->lock, flags);
diff --git a/drivers/soc/fsl/qe/qe.c b/drivers/soc/fsl/qe/qe.c
index 2df20d6f85fa..4d38c80f8be8 100644
--- a/drivers/soc/fsl/qe/qe.c
+++ b/drivers/soc/fsl/qe/qe.c
@@ -109,7 +109,7 @@ int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input)
spin_lock_irqsave(&qe_lock, flags);
if (cmd == QE_RESET) {
- qe_iowrite32be((u32)(cmd | QE_CR_FLG), &qe_immr->cp.cecr);
+ iowrite32be((u32)(cmd | QE_CR_FLG), &qe_immr->cp.cecr);
} else {
if (cmd == QE_ASSIGN_PAGE) {
/* Here device is the SNUM, not sub-block */
@@ -126,13 +126,13 @@ int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input)
mcn_shift = QE_CR_MCN_NORMAL_SHIFT;
}
- qe_iowrite32be(cmd_input, &qe_immr->cp.cecdr);
- qe_iowrite32be((cmd | QE_CR_FLG | ((u32)device << dev_shift) | (u32)mcn_protocol << mcn_shift),
+ iowrite32be(cmd_input, &qe_immr->cp.cecdr);
+ iowrite32be((cmd | QE_CR_FLG | ((u32)device << dev_shift) | (u32)mcn_protocol << mcn_shift),
&qe_immr->cp.cecr);
}
/* wait for the QE_CR_FLG to clear */
- ret = readx_poll_timeout_atomic(qe_ioread32be, &qe_immr->cp.cecr, val,
+ ret = readx_poll_timeout_atomic(ioread32be, &qe_immr->cp.cecr, val,
(val & QE_CR_FLG) == 0, 0, 100);
/* On timeout, ret is -ETIMEDOUT, otherwise it will be 0. */
spin_unlock_irqrestore(&qe_lock, flags);
@@ -231,7 +231,7 @@ int qe_setbrg(enum qe_clock brg, unsigned int rate, unsigned int multiplier)
tempval = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) |
QE_BRGC_ENABLE | div16;
- qe_iowrite32be(tempval, &qe_immr->brg.brgc[brg - QE_BRG1]);
+ iowrite32be(tempval, &qe_immr->brg.brgc[brg - QE_BRG1]);
return 0;
}
@@ -375,9 +375,9 @@ static int qe_sdma_init(void)
return -ENOMEM;
}
- qe_iowrite32be((u32)sdma_buf_offset & QE_SDEBCR_BA_MASK,
+ iowrite32be((u32)sdma_buf_offset & QE_SDEBCR_BA_MASK,
&sdma->sdebcr);
- qe_iowrite32be((QE_SDMR_GLB_1_MSK | (0x1 << QE_SDMR_CEN_SHIFT)),
+ iowrite32be((QE_SDMR_GLB_1_MSK | (0x1 << QE_SDMR_CEN_SHIFT)),
&sdma->sdmr);
return 0;
@@ -416,14 +416,14 @@ static void qe_upload_microcode(const void *base,
"uploading microcode '%s'\n", ucode->id);
/* Use auto-increment */
- qe_iowrite32be(be32_to_cpu(ucode->iram_offset) | QE_IRAM_IADD_AIE | QE_IRAM_IADD_BADDR,
+ iowrite32be(be32_to_cpu(ucode->iram_offset) | QE_IRAM_IADD_AIE | QE_IRAM_IADD_BADDR,
&qe_immr->iram.iadd);
for (i = 0; i < be32_to_cpu(ucode->count); i++)
- qe_iowrite32be(be32_to_cpu(code[i]), &qe_immr->iram.idata);
+ iowrite32be(be32_to_cpu(code[i]), &qe_immr->iram.idata);
/* Set I-RAM Ready Register */
- qe_iowrite32be(QE_IRAM_READY, &qe_immr->iram.iready);
+ iowrite32be(QE_IRAM_READY, &qe_immr->iram.iready);
}
/*
@@ -542,12 +542,12 @@ int qe_upload_firmware(const struct qe_firmware *firmware)
u32 trap = be32_to_cpu(ucode->traps[j]);
if (trap)
- qe_iowrite32be(trap,
+ iowrite32be(trap,
&qe_immr->rsp[i].tibcr[j]);
}
/* Enable traps */
- qe_iowrite32be(be32_to_cpu(ucode->eccr),
+ iowrite32be(be32_to_cpu(ucode->eccr),
&qe_immr->rsp[i].eccr);
}
diff --git a/drivers/soc/fsl/qe/qe_common.c b/drivers/soc/fsl/qe/qe_common.c
index 654e9246ce6b..a0cb8e746879 100644
--- a/drivers/soc/fsl/qe/qe_common.c
+++ b/drivers/soc/fsl/qe/qe_common.c
@@ -26,7 +26,7 @@
#include <soc/fsl/qe/qe.h>
static struct gen_pool *muram_pool;
-static spinlock_t cpm_muram_lock;
+static DEFINE_SPINLOCK(cpm_muram_lock);
static void __iomem *muram_vbase;
static phys_addr_t muram_pbase;
@@ -54,7 +54,6 @@ int cpm_muram_init(void)
if (muram_pbase)
return 0;
- spin_lock_init(&cpm_muram_lock);
np = of_find_compatible_node(NULL, NULL, "fsl,cpm-muram-data");
if (!np) {
/* try legacy bindings */
diff --git a/drivers/soc/fsl/qe/qe_ic.c b/drivers/soc/fsl/qe/qe_ic.c
index 0390af999900..3f711c1a0996 100644
--- a/drivers/soc/fsl/qe/qe_ic.c
+++ b/drivers/soc/fsl/qe/qe_ic.c
@@ -222,13 +222,13 @@ static struct qe_ic_info qe_ic_info[] = {
static inline u32 qe_ic_read(__be32 __iomem *base, unsigned int reg)
{
- return qe_ioread32be(base + (reg >> 2));
+ return ioread32be(base + (reg >> 2));
}
static inline void qe_ic_write(__be32 __iomem *base, unsigned int reg,
u32 value)
{
- qe_iowrite32be(value, base + (reg >> 2));
+ iowrite32be(value, base + (reg >> 2));
}
static inline struct qe_ic *qe_ic_from_irq(unsigned int virq)
diff --git a/drivers/soc/fsl/qe/qe_io.c b/drivers/soc/fsl/qe/qe_io.c
index 11ea08e97db7..e277c827bdf3 100644
--- a/drivers/soc/fsl/qe/qe_io.c
+++ b/drivers/soc/fsl/qe/qe_io.c
@@ -54,16 +54,16 @@ void __par_io_config_pin(struct qe_pio_regs __iomem *par_io, u8 pin, int dir,
pin_mask1bit = (u32) (1 << (QE_PIO_PINS - (pin + 1)));
/* Set open drain, if required */
- tmp_val = qe_ioread32be(&par_io->cpodr);
+ tmp_val = ioread32be(&par_io->cpodr);
if (open_drain)
- qe_iowrite32be(pin_mask1bit | tmp_val, &par_io->cpodr);
+ iowrite32be(pin_mask1bit | tmp_val, &par_io->cpodr);
else
- qe_iowrite32be(~pin_mask1bit & tmp_val, &par_io->cpodr);
+ iowrite32be(~pin_mask1bit & tmp_val, &par_io->cpodr);
/* define direction */
tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ?
- qe_ioread32be(&par_io->cpdir2) :
- qe_ioread32be(&par_io->cpdir1);
+ ioread32be(&par_io->cpdir2) :
+ ioread32be(&par_io->cpdir1);
/* get all bits mask for 2 bit per port */
pin_mask2bits = (u32) (0x3 << (QE_PIO_PINS -
@@ -75,30 +75,30 @@ void __par_io_config_pin(struct qe_pio_regs __iomem *par_io, u8 pin, int dir,
/* clear and set 2 bits mask */
if (pin > (QE_PIO_PINS / 2) - 1) {
- qe_iowrite32be(~pin_mask2bits & tmp_val, &par_io->cpdir2);
+ iowrite32be(~pin_mask2bits & tmp_val, &par_io->cpdir2);
tmp_val &= ~pin_mask2bits;
- qe_iowrite32be(new_mask2bits | tmp_val, &par_io->cpdir2);
+ iowrite32be(new_mask2bits | tmp_val, &par_io->cpdir2);
} else {
- qe_iowrite32be(~pin_mask2bits & tmp_val, &par_io->cpdir1);
+ iowrite32be(~pin_mask2bits & tmp_val, &par_io->cpdir1);
tmp_val &= ~pin_mask2bits;
- qe_iowrite32be(new_mask2bits | tmp_val, &par_io->cpdir1);
+ iowrite32be(new_mask2bits | tmp_val, &par_io->cpdir1);
}
/* define pin assignment */
tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ?
- qe_ioread32be(&par_io->cppar2) :
- qe_ioread32be(&par_io->cppar1);
+ ioread32be(&par_io->cppar2) :
+ ioread32be(&par_io->cppar1);
new_mask2bits = (u32) (assignment << (QE_PIO_PINS -
(pin % (QE_PIO_PINS / 2) + 1) * 2));
/* clear and set 2 bits mask */
if (pin > (QE_PIO_PINS / 2) - 1) {
- qe_iowrite32be(~pin_mask2bits & tmp_val, &par_io->cppar2);
+ iowrite32be(~pin_mask2bits & tmp_val, &par_io->cppar2);
tmp_val &= ~pin_mask2bits;
- qe_iowrite32be(new_mask2bits | tmp_val, &par_io->cppar2);
+ iowrite32be(new_mask2bits | tmp_val, &par_io->cppar2);
} else {
- qe_iowrite32be(~pin_mask2bits & tmp_val, &par_io->cppar1);
+ iowrite32be(~pin_mask2bits & tmp_val, &par_io->cppar1);
tmp_val &= ~pin_mask2bits;
- qe_iowrite32be(new_mask2bits | tmp_val, &par_io->cppar1);
+ iowrite32be(new_mask2bits | tmp_val, &par_io->cppar1);
}
}
EXPORT_SYMBOL(__par_io_config_pin);
@@ -126,12 +126,12 @@ int par_io_data_set(u8 port, u8 pin, u8 val)
/* calculate pin location */
pin_mask = (u32) (1 << (QE_PIO_PINS - 1 - pin));
- tmp_val = qe_ioread32be(&par_io[port].cpdata);
+ tmp_val = ioread32be(&par_io[port].cpdata);
if (val == 0) /* clear */
- qe_iowrite32be(~pin_mask & tmp_val, &par_io[port].cpdata);
+ iowrite32be(~pin_mask & tmp_val, &par_io[port].cpdata);
else /* set */
- qe_iowrite32be(pin_mask | tmp_val, &par_io[port].cpdata);
+ iowrite32be(pin_mask | tmp_val, &par_io[port].cpdata);
return 0;
}
diff --git a/drivers/soc/fsl/qe/ucc_fast.c b/drivers/soc/fsl/qe/ucc_fast.c
index ad6193ea4597..53d8aafc9317 100644
--- a/drivers/soc/fsl/qe/ucc_fast.c
+++ b/drivers/soc/fsl/qe/ucc_fast.c
@@ -29,42 +29,42 @@ void ucc_fast_dump_regs(struct ucc_fast_private * uccf)
printk(KERN_INFO "Base address: 0x%p\n", uccf->uf_regs);
printk(KERN_INFO "gumr : addr=0x%p, val=0x%08x\n",
- &uccf->uf_regs->gumr, qe_ioread32be(&uccf->uf_regs->gumr));
+ &uccf->uf_regs->gumr, ioread32be(&uccf->uf_regs->gumr));
printk(KERN_INFO "upsmr : addr=0x%p, val=0x%08x\n",
- &uccf->uf_regs->upsmr, qe_ioread32be(&uccf->uf_regs->upsmr));
+ &uccf->uf_regs->upsmr, ioread32be(&uccf->uf_regs->upsmr));
printk(KERN_INFO "utodr : addr=0x%p, val=0x%04x\n",
- &uccf->uf_regs->utodr, qe_ioread16be(&uccf->uf_regs->utodr));
+ &uccf->uf_regs->utodr, ioread16be(&uccf->uf_regs->utodr));
printk(KERN_INFO "udsr : addr=0x%p, val=0x%04x\n",
- &uccf->uf_regs->udsr, qe_ioread16be(&uccf->uf_regs->udsr));
+ &uccf->uf_regs->udsr, ioread16be(&uccf->uf_regs->udsr));
printk(KERN_INFO "ucce : addr=0x%p, val=0x%08x\n",
- &uccf->uf_regs->ucce, qe_ioread32be(&uccf->uf_regs->ucce));
+ &uccf->uf_regs->ucce, ioread32be(&uccf->uf_regs->ucce));
printk(KERN_INFO "uccm : addr=0x%p, val=0x%08x\n",
- &uccf->uf_regs->uccm, qe_ioread32be(&uccf->uf_regs->uccm));
+ &uccf->uf_regs->uccm, ioread32be(&uccf->uf_regs->uccm));
printk(KERN_INFO "uccs : addr=0x%p, val=0x%02x\n",
- &uccf->uf_regs->uccs, qe_ioread8(&uccf->uf_regs->uccs));
+ &uccf->uf_regs->uccs, ioread8(&uccf->uf_regs->uccs));
printk(KERN_INFO "urfb : addr=0x%p, val=0x%08x\n",
- &uccf->uf_regs->urfb, qe_ioread32be(&uccf->uf_regs->urfb));
+ &uccf->uf_regs->urfb, ioread32be(&uccf->uf_regs->urfb));
printk(KERN_INFO "urfs : addr=0x%p, val=0x%04x\n",
- &uccf->uf_regs->urfs, qe_ioread16be(&uccf->uf_regs->urfs));
+ &uccf->uf_regs->urfs, ioread16be(&uccf->uf_regs->urfs));
printk(KERN_INFO "urfet : addr=0x%p, val=0x%04x\n",
- &uccf->uf_regs->urfet, qe_ioread16be(&uccf->uf_regs->urfet));
+ &uccf->uf_regs->urfet, ioread16be(&uccf->uf_regs->urfet));
printk(KERN_INFO "urfset: addr=0x%p, val=0x%04x\n",
&uccf->uf_regs->urfset,
- qe_ioread16be(&uccf->uf_regs->urfset));
+ ioread16be(&uccf->uf_regs->urfset));
printk(KERN_INFO "utfb : addr=0x%p, val=0x%08x\n",
- &uccf->uf_regs->utfb, qe_ioread32be(&uccf->uf_regs->utfb));
+ &uccf->uf_regs->utfb, ioread32be(&uccf->uf_regs->utfb));
printk(KERN_INFO "utfs : addr=0x%p, val=0x%04x\n",
- &uccf->uf_regs->utfs, qe_ioread16be(&uccf->uf_regs->utfs));
+ &uccf->uf_regs->utfs, ioread16be(&uccf->uf_regs->utfs));
printk(KERN_INFO "utfet : addr=0x%p, val=0x%04x\n",
- &uccf->uf_regs->utfet, qe_ioread16be(&uccf->uf_regs->utfet));
+ &uccf->uf_regs->utfet, ioread16be(&uccf->uf_regs->utfet));
printk(KERN_INFO "utftt : addr=0x%p, val=0x%04x\n",
- &uccf->uf_regs->utftt, qe_ioread16be(&uccf->uf_regs->utftt));
+ &uccf->uf_regs->utftt, ioread16be(&uccf->uf_regs->utftt));
printk(KERN_INFO "utpt : addr=0x%p, val=0x%04x\n",
- &uccf->uf_regs->utpt, qe_ioread16be(&uccf->uf_regs->utpt));
+ &uccf->uf_regs->utpt, ioread16be(&uccf->uf_regs->utpt));
printk(KERN_INFO "urtry : addr=0x%p, val=0x%08x\n",
- &uccf->uf_regs->urtry, qe_ioread32be(&uccf->uf_regs->urtry));
+ &uccf->uf_regs->urtry, ioread32be(&uccf->uf_regs->urtry));
printk(KERN_INFO "guemr : addr=0x%p, val=0x%02x\n",
- &uccf->uf_regs->guemr, qe_ioread8(&uccf->uf_regs->guemr));
+ &uccf->uf_regs->guemr, ioread8(&uccf->uf_regs->guemr));
}
EXPORT_SYMBOL(ucc_fast_dump_regs);
@@ -86,7 +86,7 @@ EXPORT_SYMBOL(ucc_fast_get_qe_cr_subblock);
void ucc_fast_transmit_on_demand(struct ucc_fast_private * uccf)
{
- qe_iowrite16be(UCC_FAST_TOD, &uccf->uf_regs->utodr);
+ iowrite16be(UCC_FAST_TOD, &uccf->uf_regs->utodr);
}
EXPORT_SYMBOL(ucc_fast_transmit_on_demand);
@@ -98,7 +98,7 @@ void ucc_fast_enable(struct ucc_fast_private * uccf, enum comm_dir mode)
uf_regs = uccf->uf_regs;
/* Enable reception and/or transmission on this UCC. */
- gumr = qe_ioread32be(&uf_regs->gumr);
+ gumr = ioread32be(&uf_regs->gumr);
if (mode & COMM_DIR_TX) {
gumr |= UCC_FAST_GUMR_ENT;
uccf->enabled_tx = 1;
@@ -107,7 +107,7 @@ void ucc_fast_enable(struct ucc_fast_private * uccf, enum comm_dir mode)
gumr |= UCC_FAST_GUMR_ENR;
uccf->enabled_rx = 1;
}
- qe_iowrite32be(gumr, &uf_regs->gumr);
+ iowrite32be(gumr, &uf_regs->gumr);
}
EXPORT_SYMBOL(ucc_fast_enable);
@@ -119,7 +119,7 @@ void ucc_fast_disable(struct ucc_fast_private * uccf, enum comm_dir mode)
uf_regs = uccf->uf_regs;
/* Disable reception and/or transmission on this UCC. */
- gumr = qe_ioread32be(&uf_regs->gumr);
+ gumr = ioread32be(&uf_regs->gumr);
if (mode & COMM_DIR_TX) {
gumr &= ~UCC_FAST_GUMR_ENT;
uccf->enabled_tx = 0;
@@ -128,7 +128,7 @@ void ucc_fast_disable(struct ucc_fast_private * uccf, enum comm_dir mode)
gumr &= ~UCC_FAST_GUMR_ENR;
uccf->enabled_rx = 0;
}
- qe_iowrite32be(gumr, &uf_regs->gumr);
+ iowrite32be(gumr, &uf_regs->gumr);
}
EXPORT_SYMBOL(ucc_fast_disable);
@@ -262,7 +262,7 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc
gumr |= uf_info->tenc;
gumr |= uf_info->tcrc;
gumr |= uf_info->mode;
- qe_iowrite32be(gumr, &uf_regs->gumr);
+ iowrite32be(gumr, &uf_regs->gumr);
/* Allocate memory for Tx Virtual Fifo */
uccf->ucc_fast_tx_virtual_fifo_base_offset =
@@ -287,16 +287,16 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc
}
/* Set Virtual Fifo registers */
- qe_iowrite16be(uf_info->urfs, &uf_regs->urfs);
- qe_iowrite16be(uf_info->urfet, &uf_regs->urfet);
- qe_iowrite16be(uf_info->urfset, &uf_regs->urfset);
- qe_iowrite16be(uf_info->utfs, &uf_regs->utfs);
- qe_iowrite16be(uf_info->utfet, &uf_regs->utfet);
- qe_iowrite16be(uf_info->utftt, &uf_regs->utftt);
+ iowrite16be(uf_info->urfs, &uf_regs->urfs);
+ iowrite16be(uf_info->urfet, &uf_regs->urfet);
+ iowrite16be(uf_info->urfset, &uf_regs->urfset);
+ iowrite16be(uf_info->utfs, &uf_regs->utfs);
+ iowrite16be(uf_info->utfet, &uf_regs->utfet);
+ iowrite16be(uf_info->utftt, &uf_regs->utftt);
/* utfb, urfb are offsets from MURAM base */
- qe_iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset,
+ iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset,
&uf_regs->utfb);
- qe_iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset,
+ iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset,
&uf_regs->urfb);
/* Mux clocking */
@@ -365,14 +365,14 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc
}
/* Set interrupt mask register at UCC level. */
- qe_iowrite32be(uf_info->uccm_mask, &uf_regs->uccm);
+ iowrite32be(uf_info->uccm_mask, &uf_regs->uccm);
/* First, clear anything pending at UCC level,
* otherwise, old garbage may come through
* as soon as the dam is opened. */
/* Writing '1' clears */
- qe_iowrite32be(0xffffffff, &uf_regs->ucce);
+ iowrite32be(0xffffffff, &uf_regs->ucce);
*uccf_ret = uccf;
return 0;
diff --git a/drivers/soc/fsl/qe/ucc_slow.c b/drivers/soc/fsl/qe/ucc_slow.c
index 7e11be41ab62..d5ac1ac0ed3c 100644
--- a/drivers/soc/fsl/qe/ucc_slow.c
+++ b/drivers/soc/fsl/qe/ucc_slow.c
@@ -78,7 +78,7 @@ void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode)
us_regs = uccs->us_regs;
/* Enable reception and/or transmission on this UCC. */
- gumr_l = qe_ioread32be(&us_regs->gumr_l);
+ gumr_l = ioread32be(&us_regs->gumr_l);
if (mode & COMM_DIR_TX) {
gumr_l |= UCC_SLOW_GUMR_L_ENT;
uccs->enabled_tx = 1;
@@ -87,7 +87,7 @@ void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode)
gumr_l |= UCC_SLOW_GUMR_L_ENR;
uccs->enabled_rx = 1;
}
- qe_iowrite32be(gumr_l, &us_regs->gumr_l);
+ iowrite32be(gumr_l, &us_regs->gumr_l);
}
EXPORT_SYMBOL(ucc_slow_enable);
@@ -99,7 +99,7 @@ void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode)
us_regs = uccs->us_regs;
/* Disable reception and/or transmission on this UCC. */
- gumr_l = qe_ioread32be(&us_regs->gumr_l);
+ gumr_l = ioread32be(&us_regs->gumr_l);
if (mode & COMM_DIR_TX) {
gumr_l &= ~UCC_SLOW_GUMR_L_ENT;
uccs->enabled_tx = 0;
@@ -108,7 +108,7 @@ void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode)
gumr_l &= ~UCC_SLOW_GUMR_L_ENR;
uccs->enabled_rx = 0;
}
- qe_iowrite32be(gumr_l, &us_regs->gumr_l);
+ iowrite32be(gumr_l, &us_regs->gumr_l);
}
EXPORT_SYMBOL(ucc_slow_disable);
@@ -194,7 +194,7 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
return ret;
}
- qe_iowrite16be(us_info->max_rx_buf_length, &uccs->us_pram->mrblr);
+ iowrite16be(us_info->max_rx_buf_length, &uccs->us_pram->mrblr);
INIT_LIST_HEAD(&uccs->confQ);
@@ -222,27 +222,27 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
bd = uccs->confBd = uccs->tx_bd = qe_muram_addr(uccs->tx_base_offset);
for (i = 0; i < us_info->tx_bd_ring_len - 1; i++) {
/* clear bd buffer */
- qe_iowrite32be(0, &bd->buf);
+ iowrite32be(0, &bd->buf);
/* set bd status and length */
- qe_iowrite32be(0, (u32 __iomem *)bd);
+ iowrite32be(0, (u32 __iomem *)bd);
bd++;
}
/* for last BD set Wrap bit */
- qe_iowrite32be(0, &bd->buf);
- qe_iowrite32be(T_W, (u32 __iomem *)bd);
+ iowrite32be(0, &bd->buf);
+ iowrite32be(T_W, (u32 __iomem *)bd);
/* Init Rx bds */
bd = uccs->rx_bd = qe_muram_addr(uccs->rx_base_offset);
for (i = 0; i < us_info->rx_bd_ring_len - 1; i++) {
/* set bd status and length */
- qe_iowrite32be(0, (u32 __iomem *)bd);
+ iowrite32be(0, (u32 __iomem *)bd);
/* clear bd buffer */
- qe_iowrite32be(0, &bd->buf);
+ iowrite32be(0, &bd->buf);
bd++;
}
/* for last BD set Wrap bit */
- qe_iowrite32be(R_W, (u32 __iomem *)bd);
- qe_iowrite32be(0, &bd->buf);
+ iowrite32be(R_W, (u32 __iomem *)bd);
+ iowrite32be(0, &bd->buf);
/* Set GUMR (For more details see the hardware spec.). */
/* gumr_h */
@@ -263,7 +263,7 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
gumr |= UCC_SLOW_GUMR_H_TXSY;
if (us_info->rtsm)
gumr |= UCC_SLOW_GUMR_H_RTSM;
- qe_iowrite32be(gumr, &us_regs->gumr_h);
+ iowrite32be(gumr, &us_regs->gumr_h);
/* gumr_l */
gumr = (u32)us_info->tdcr | (u32)us_info->rdcr | (u32)us_info->tenc |
@@ -276,18 +276,18 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
gumr |= UCC_SLOW_GUMR_L_TINV;
if (us_info->tend)
gumr |= UCC_SLOW_GUMR_L_TEND;
- qe_iowrite32be(gumr, &us_regs->gumr_l);
+ iowrite32be(gumr, &us_regs->gumr_l);
/* Function code registers */
/* if the data is in cachable memory, the 'global' */
/* in the function code should be set. */
- qe_iowrite8(UCC_BMR_BO_BE, &uccs->us_pram->tbmr);
- qe_iowrite8(UCC_BMR_BO_BE, &uccs->us_pram->rbmr);
+ iowrite8(UCC_BMR_BO_BE, &uccs->us_pram->tbmr);
+ iowrite8(UCC_BMR_BO_BE, &uccs->us_pram->rbmr);
/* rbase, tbase are offsets from MURAM base */
- qe_iowrite16be(uccs->rx_base_offset, &uccs->us_pram->rbase);
- qe_iowrite16be(uccs->tx_base_offset, &uccs->us_pram->tbase);
+ iowrite16be(uccs->rx_base_offset, &uccs->us_pram->rbase);
+ iowrite16be(uccs->tx_base_offset, &uccs->us_pram->tbase);
/* Mux clocking */
/* Grant Support */
@@ -317,14 +317,14 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
}
/* Set interrupt mask register at UCC level. */
- qe_iowrite16be(us_info->uccm_mask, &us_regs->uccm);
+ iowrite16be(us_info->uccm_mask, &us_regs->uccm);
/* First, clear anything pending at UCC level,
* otherwise, old garbage may come through
* as soon as the dam is opened. */
/* Writing '1' clears */
- qe_iowrite16be(0xffff, &us_regs->ucce);
+ iowrite16be(0xffff, &us_regs->ucce);
/* Issue QE Init command */
if (us_info->init_tx && us_info->init_rx)
diff --git a/drivers/soc/fsl/rcpm.c b/drivers/soc/fsl/rcpm.c
index 4ace28cab314..90d3f4060b0c 100644
--- a/drivers/soc/fsl/rcpm.c
+++ b/drivers/soc/fsl/rcpm.c
@@ -13,6 +13,7 @@
#include <linux/slab.h>
#include <linux/suspend.h>
#include <linux/kernel.h>
+#include <linux/acpi.h>
#define RCPM_WAKEUP_CELL_MAX_SIZE 7
@@ -78,10 +79,20 @@ static int rcpm_pm_prepare(struct device *dev)
"fsl,rcpm-wakeup", value,
rcpm->wakeup_cells + 1);
- /* Wakeup source should refer to current rcpm device */
- if (ret || (np->phandle != value[0]))
+ if (ret)
continue;
+ /*
+ * For DT mode, would handle devices with "fsl,rcpm-wakeup"
+ * pointing to the current RCPM node.
+ *
+ * For ACPI mode, currently we assume there is only one
+ * RCPM controller existing.
+ */
+ if (is_of_node(dev->fwnode))
+ if (np->phandle != value[0])
+ continue;
+
/* Property "#fsl,rcpm-wakeup-cells" of rcpm node defines the
* number of IPPDEXPCR register cells, and "fsl,rcpm-wakeup"
* of wakeup source IP contains an integer array: <phandle to
@@ -172,10 +183,19 @@ static const struct of_device_id rcpm_of_match[] = {
};
MODULE_DEVICE_TABLE(of, rcpm_of_match);
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id rcpm_acpi_ids[] = {
+ {"NXP0015",},
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, rcpm_acpi_ids);
+#endif
+
static struct platform_driver rcpm_driver = {
.driver = {
.name = "rcpm",
.of_match_table = rcpm_of_match,
+ .acpi_match_table = ACPI_PTR(rcpm_acpi_ids),
.pm = &rcpm_pm_ops,
},
.probe = rcpm_probe,
diff --git a/drivers/soc/imx/soc-imx.c b/drivers/soc/imx/soc-imx.c
index 01bfea1cb64a..0738c0f36792 100644
--- a/drivers/soc/imx/soc-imx.c
+++ b/drivers/soc/imx/soc-imx.c
@@ -13,6 +13,8 @@
#include <soc/imx/cpu.h>
#include <soc/imx/revision.h>
+#define IIM_UID 0x820
+
#define OCOTP_UID_H 0x420
#define OCOTP_UID_L 0x410
@@ -32,6 +34,7 @@ static int __init imx_soc_device_init(void)
u64 soc_uid = 0;
u32 val;
int ret;
+ int i;
if (of_machine_is_compatible("fsl,ls1021a"))
return 0;
@@ -68,9 +71,11 @@ static int __init imx_soc_device_init(void)
soc_id = "i.MX35";
break;
case MXC_CPU_MX51:
+ ocotp_compat = "fsl,imx51-iim";
soc_id = "i.MX51";
break;
case MXC_CPU_MX53:
+ ocotp_compat = "fsl,imx53-iim";
soc_id = "i.MX53";
break;
case MXC_CPU_IMX6SL:
@@ -153,6 +158,13 @@ static int __init imx_soc_device_init(void)
regmap_read(ocotp, OCOTP_ULP_UID_1, &val);
soc_uid <<= 16;
soc_uid |= val & 0xffff;
+ } else if (__mxc_cpu_type == MXC_CPU_MX51 ||
+ __mxc_cpu_type == MXC_CPU_MX53) {
+ for (i=0; i < 8; i++) {
+ regmap_read(ocotp, IIM_UID + i*4, &val);
+ soc_uid <<= 8;
+ soc_uid |= (val & 0xff);
+ }
} else {
regmap_read(ocotp, OCOTP_UID_H, &val);
soc_uid = val;
diff --git a/drivers/soc/mediatek/mt8167-mmsys.h b/drivers/soc/mediatek/mt8167-mmsys.h
new file mode 100644
index 000000000000..2772ef5e3934
--- /dev/null
+++ b/drivers/soc/mediatek/mt8167-mmsys.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __SOC_MEDIATEK_MT8167_MMSYS_H
+#define __SOC_MEDIATEK_MT8167_MMSYS_H
+
+#define MT8167_DISP_REG_CONFIG_DISP_OVL0_MOUT_EN 0x030
+#define MT8167_DISP_REG_CONFIG_DISP_DITHER_MOUT_EN 0x038
+#define MT8167_DISP_REG_CONFIG_DISP_COLOR0_SEL_IN 0x058
+#define MT8167_DISP_REG_CONFIG_DISP_DSI0_SEL_IN 0x064
+#define MT8167_DISP_REG_CONFIG_DISP_RDMA0_SOUT_SEL_IN 0x06c
+
+#define MT8167_DITHER_MOUT_EN_RDMA0 0x1
+#define MT8167_RDMA0_SOUT_DSI0 0x2
+#define MT8167_DSI0_SEL_IN_RDMA0 0x1
+
+static const struct mtk_mmsys_routes mt8167_mmsys_routing_table[] = {
+ {
+ DDP_COMPONENT_OVL0, DDP_COMPONENT_COLOR0,
+ MT8167_DISP_REG_CONFIG_DISP_OVL0_MOUT_EN, OVL0_MOUT_EN_COLOR0,
+ }, {
+ DDP_COMPONENT_DITHER, DDP_COMPONENT_RDMA0,
+ MT8167_DISP_REG_CONFIG_DISP_DITHER_MOUT_EN, MT8167_DITHER_MOUT_EN_RDMA0
+ }, {
+ DDP_COMPONENT_OVL0, DDP_COMPONENT_COLOR0,
+ MT8167_DISP_REG_CONFIG_DISP_COLOR0_SEL_IN, COLOR0_SEL_IN_OVL0
+ }, {
+ DDP_COMPONENT_RDMA0, DDP_COMPONENT_DSI0,
+ MT8167_DISP_REG_CONFIG_DISP_DSI0_SEL_IN, MT8167_DSI0_SEL_IN_RDMA0
+ }, {
+ DDP_COMPONENT_RDMA0, DDP_COMPONENT_DSI0,
+ MT8167_DISP_REG_CONFIG_DISP_RDMA0_SOUT_SEL_IN, MT8167_RDMA0_SOUT_DSI0
+ },
+};
+
+#endif /* __SOC_MEDIATEK_MT8167_MMSYS_H */
diff --git a/drivers/soc/mediatek/mt8167-pm-domains.h b/drivers/soc/mediatek/mt8167-pm-domains.h
index ad0b8dfa0527..15559ddf26e4 100644
--- a/drivers/soc/mediatek/mt8167-pm-domains.h
+++ b/drivers/soc/mediatek/mt8167-pm-domains.h
@@ -15,6 +15,7 @@
static const struct scpsys_domain_data scpsys_domain_data_mt8167[] = {
[MT8167_POWER_DOMAIN_MM] = {
+ .name = "mm",
.sta_mask = PWR_STATUS_DISP,
.ctl_offs = SPM_DIS_PWR_CON,
.sram_pdn_bits = GENMASK(11, 8),
@@ -26,6 +27,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8167[] = {
.caps = MTK_SCPD_ACTIVE_WAKEUP,
},
[MT8167_POWER_DOMAIN_VDEC] = {
+ .name = "vdec",
.sta_mask = PWR_STATUS_VDEC,
.ctl_offs = SPM_VDE_PWR_CON,
.sram_pdn_bits = GENMASK(8, 8),
@@ -33,6 +35,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8167[] = {
.caps = MTK_SCPD_ACTIVE_WAKEUP,
},
[MT8167_POWER_DOMAIN_ISP] = {
+ .name = "isp",
.sta_mask = PWR_STATUS_ISP,
.ctl_offs = SPM_ISP_PWR_CON,
.sram_pdn_bits = GENMASK(11, 8),
@@ -40,6 +43,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8167[] = {
.caps = MTK_SCPD_ACTIVE_WAKEUP,
},
[MT8167_POWER_DOMAIN_MFG_ASYNC] = {
+ .name = "mfg_async",
.sta_mask = MT8167_PWR_STATUS_MFG_ASYNC,
.ctl_offs = SPM_MFG_ASYNC_PWR_CON,
.sram_pdn_bits = 0,
@@ -50,18 +54,21 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8167[] = {
},
},
[MT8167_POWER_DOMAIN_MFG_2D] = {
+ .name = "mfg_2d",
.sta_mask = MT8167_PWR_STATUS_MFG_2D,
.ctl_offs = SPM_MFG_2D_PWR_CON,
.sram_pdn_bits = GENMASK(11, 8),
.sram_pdn_ack_bits = GENMASK(15, 12),
},
[MT8167_POWER_DOMAIN_MFG] = {
+ .name = "mfg",
.sta_mask = PWR_STATUS_MFG,
.ctl_offs = SPM_MFG_PWR_CON,
.sram_pdn_bits = GENMASK(11, 8),
.sram_pdn_ack_bits = GENMASK(15, 12),
},
[MT8167_POWER_DOMAIN_CONN] = {
+ .name = "conn",
.sta_mask = PWR_STATUS_CONN,
.ctl_offs = SPM_CONN_PWR_CON,
.sram_pdn_bits = GENMASK(8, 8),
diff --git a/drivers/soc/mediatek/mt8173-pm-domains.h b/drivers/soc/mediatek/mt8173-pm-domains.h
index 3e8ee5dabb43..654c717e5467 100644
--- a/drivers/soc/mediatek/mt8173-pm-domains.h
+++ b/drivers/soc/mediatek/mt8173-pm-domains.h
@@ -12,24 +12,28 @@
static const struct scpsys_domain_data scpsys_domain_data_mt8173[] = {
[MT8173_POWER_DOMAIN_VDEC] = {
+ .name = "vdec",
.sta_mask = PWR_STATUS_VDEC,
.ctl_offs = SPM_VDE_PWR_CON,
.sram_pdn_bits = GENMASK(11, 8),
.sram_pdn_ack_bits = GENMASK(12, 12),
},
[MT8173_POWER_DOMAIN_VENC] = {
+ .name = "venc",
.sta_mask = PWR_STATUS_VENC,
.ctl_offs = SPM_VEN_PWR_CON,
.sram_pdn_bits = GENMASK(11, 8),
.sram_pdn_ack_bits = GENMASK(15, 12),
},
[MT8173_POWER_DOMAIN_ISP] = {
+ .name = "isp",
.sta_mask = PWR_STATUS_ISP,
.ctl_offs = SPM_ISP_PWR_CON,
.sram_pdn_bits = GENMASK(11, 8),
.sram_pdn_ack_bits = GENMASK(13, 12),
},
[MT8173_POWER_DOMAIN_MM] = {
+ .name = "mm",
.sta_mask = PWR_STATUS_DISP,
.ctl_offs = SPM_DIS_PWR_CON,
.sram_pdn_bits = GENMASK(11, 8),
@@ -40,18 +44,21 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8173[] = {
},
},
[MT8173_POWER_DOMAIN_VENC_LT] = {
+ .name = "venc_lt",
.sta_mask = PWR_STATUS_VENC_LT,
.ctl_offs = SPM_VEN2_PWR_CON,
.sram_pdn_bits = GENMASK(11, 8),
.sram_pdn_ack_bits = GENMASK(15, 12),
},
[MT8173_POWER_DOMAIN_AUDIO] = {
+ .name = "audio",
.sta_mask = PWR_STATUS_AUDIO,
.ctl_offs = SPM_AUDIO_PWR_CON,
.sram_pdn_bits = GENMASK(11, 8),
.sram_pdn_ack_bits = GENMASK(15, 12),
},
[MT8173_POWER_DOMAIN_USB] = {
+ .name = "usb",
.sta_mask = PWR_STATUS_USB,
.ctl_offs = SPM_USB_PWR_CON,
.sram_pdn_bits = GENMASK(11, 8),
@@ -59,18 +66,21 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8173[] = {
.caps = MTK_SCPD_ACTIVE_WAKEUP,
},
[MT8173_POWER_DOMAIN_MFG_ASYNC] = {
+ .name = "mfg_async",
.sta_mask = PWR_STATUS_MFG_ASYNC,
.ctl_offs = SPM_MFG_ASYNC_PWR_CON,
.sram_pdn_bits = GENMASK(11, 8),
.sram_pdn_ack_bits = 0,
},
[MT8173_POWER_DOMAIN_MFG_2D] = {
+ .name = "mfg_2d",
.sta_mask = PWR_STATUS_MFG_2D,
.ctl_offs = SPM_MFG_2D_PWR_CON,
.sram_pdn_bits = GENMASK(11, 8),
.sram_pdn_ack_bits = GENMASK(13, 12),
},
[MT8173_POWER_DOMAIN_MFG] = {
+ .name = "mfg",
.sta_mask = PWR_STATUS_MFG,
.ctl_offs = SPM_MFG_PWR_CON,
.sram_pdn_bits = GENMASK(13, 8),
diff --git a/drivers/soc/mediatek/mt8183-mmsys.h b/drivers/soc/mediatek/mt8183-mmsys.h
new file mode 100644
index 000000000000..579dfc8dc8fc
--- /dev/null
+++ b/drivers/soc/mediatek/mt8183-mmsys.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __SOC_MEDIATEK_MT8183_MMSYS_H
+#define __SOC_MEDIATEK_MT8183_MMSYS_H
+
+#define MT8183_DISP_OVL0_MOUT_EN 0xf00
+#define MT8183_DISP_OVL0_2L_MOUT_EN 0xf04
+#define MT8183_DISP_OVL1_2L_MOUT_EN 0xf08
+#define MT8183_DISP_DITHER0_MOUT_EN 0xf0c
+#define MT8183_DISP_PATH0_SEL_IN 0xf24
+#define MT8183_DISP_DSI0_SEL_IN 0xf2c
+#define MT8183_DISP_DPI0_SEL_IN 0xf30
+#define MT8183_DISP_RDMA0_SOUT_SEL_IN 0xf50
+#define MT8183_DISP_RDMA1_SOUT_SEL_IN 0xf54
+
+#define MT8183_OVL0_MOUT_EN_OVL0_2L BIT(4)
+#define MT8183_OVL0_2L_MOUT_EN_DISP_PATH0 BIT(0)
+#define MT8183_OVL1_2L_MOUT_EN_RDMA1 BIT(4)
+#define MT8183_DITHER0_MOUT_IN_DSI0 BIT(0)
+#define MT8183_DISP_PATH0_SEL_IN_OVL0_2L 0x1
+#define MT8183_DSI0_SEL_IN_RDMA0 0x1
+#define MT8183_DSI0_SEL_IN_RDMA1 0x3
+#define MT8183_DPI0_SEL_IN_RDMA0 0x1
+#define MT8183_DPI0_SEL_IN_RDMA1 0x2
+#define MT8183_RDMA0_SOUT_COLOR0 0x1
+#define MT8183_RDMA1_SOUT_DSI0 0x1
+
+static const struct mtk_mmsys_routes mmsys_mt8183_routing_table[] = {
+ {
+ DDP_COMPONENT_OVL0, DDP_COMPONENT_OVL_2L0,
+ MT8183_DISP_OVL0_MOUT_EN, MT8183_OVL0_MOUT_EN_OVL0_2L
+ }, {
+ DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA0,
+ MT8183_DISP_OVL0_2L_MOUT_EN, MT8183_OVL0_2L_MOUT_EN_DISP_PATH0
+ }, {
+ DDP_COMPONENT_OVL_2L1, DDP_COMPONENT_RDMA1,
+ MT8183_DISP_OVL1_2L_MOUT_EN, MT8183_OVL1_2L_MOUT_EN_RDMA1
+ }, {
+ DDP_COMPONENT_DITHER, DDP_COMPONENT_DSI0,
+ MT8183_DISP_DITHER0_MOUT_EN, MT8183_DITHER0_MOUT_IN_DSI0
+ }, {
+ DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA0,
+ MT8183_DISP_PATH0_SEL_IN, MT8183_DISP_PATH0_SEL_IN_OVL0_2L
+ }, {
+ DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
+ MT8183_DISP_DPI0_SEL_IN, MT8183_DPI0_SEL_IN_RDMA1
+ }, {
+ DDP_COMPONENT_RDMA0, DDP_COMPONENT_COLOR0,
+ MT8183_DISP_RDMA0_SOUT_SEL_IN, MT8183_RDMA0_SOUT_COLOR0
+ }
+};
+
+#endif /* __SOC_MEDIATEK_MT8183_MMSYS_H */
+
diff --git a/drivers/soc/mediatek/mt8183-pm-domains.h b/drivers/soc/mediatek/mt8183-pm-domains.h
index aa5230e6c12f..98a9940d05fb 100644
--- a/drivers/soc/mediatek/mt8183-pm-domains.h
+++ b/drivers/soc/mediatek/mt8183-pm-domains.h
@@ -12,12 +12,14 @@
static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
[MT8183_POWER_DOMAIN_AUDIO] = {
+ .name = "audio",
.sta_mask = PWR_STATUS_AUDIO,
.ctl_offs = 0x0314,
.sram_pdn_bits = GENMASK(11, 8),
.sram_pdn_ack_bits = GENMASK(15, 12),
},
[MT8183_POWER_DOMAIN_CONN] = {
+ .name = "conn",
.sta_mask = PWR_STATUS_CONN,
.ctl_offs = 0x032c,
.sram_pdn_bits = 0,
@@ -28,12 +30,14 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
},
},
[MT8183_POWER_DOMAIN_MFG_ASYNC] = {
+ .name = "mfg_async",
.sta_mask = PWR_STATUS_MFG_ASYNC,
.ctl_offs = 0x0334,
.sram_pdn_bits = 0,
.sram_pdn_ack_bits = 0,
},
[MT8183_POWER_DOMAIN_MFG] = {
+ .name = "mfg",
.sta_mask = PWR_STATUS_MFG,
.ctl_offs = 0x0338,
.sram_pdn_bits = GENMASK(8, 8),
@@ -41,18 +45,21 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
.caps = MTK_SCPD_DOMAIN_SUPPLY,
},
[MT8183_POWER_DOMAIN_MFG_CORE0] = {
+ .name = "mfg_core0",
.sta_mask = BIT(7),
.ctl_offs = 0x034c,
.sram_pdn_bits = GENMASK(8, 8),
.sram_pdn_ack_bits = GENMASK(12, 12),
},
[MT8183_POWER_DOMAIN_MFG_CORE1] = {
+ .name = "mfg_core1",
.sta_mask = BIT(20),
.ctl_offs = 0x0310,
.sram_pdn_bits = GENMASK(8, 8),
.sram_pdn_ack_bits = GENMASK(12, 12),
},
[MT8183_POWER_DOMAIN_MFG_2D] = {
+ .name = "mfg_2d",
.sta_mask = PWR_STATUS_MFG_2D,
.ctl_offs = 0x0348,
.sram_pdn_bits = GENMASK(8, 8),
@@ -65,6 +72,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
},
},
[MT8183_POWER_DOMAIN_DISP] = {
+ .name = "disp",
.sta_mask = PWR_STATUS_DISP,
.ctl_offs = 0x030c,
.sram_pdn_bits = GENMASK(8, 8),
@@ -83,6 +91,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
},
},
[MT8183_POWER_DOMAIN_CAM] = {
+ .name = "cam",
.sta_mask = BIT(25),
.ctl_offs = 0x0344,
.sram_pdn_bits = GENMASK(9, 8),
@@ -105,6 +114,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
},
},
[MT8183_POWER_DOMAIN_ISP] = {
+ .name = "isp",
.sta_mask = PWR_STATUS_ISP,
.ctl_offs = 0x0308,
.sram_pdn_bits = GENMASK(9, 8),
@@ -127,6 +137,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
},
},
[MT8183_POWER_DOMAIN_VDEC] = {
+ .name = "vdec",
.sta_mask = BIT(31),
.ctl_offs = 0x0300,
.sram_pdn_bits = GENMASK(8, 8),
@@ -139,6 +150,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
},
},
[MT8183_POWER_DOMAIN_VENC] = {
+ .name = "venc",
.sta_mask = PWR_STATUS_VENC,
.ctl_offs = 0x0304,
.sram_pdn_bits = GENMASK(11, 8),
@@ -151,6 +163,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
},
},
[MT8183_POWER_DOMAIN_VPU_TOP] = {
+ .name = "vpu_top",
.sta_mask = BIT(26),
.ctl_offs = 0x0324,
.sram_pdn_bits = GENMASK(8, 8),
@@ -177,6 +190,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
},
},
[MT8183_POWER_DOMAIN_VPU_CORE0] = {
+ .name = "vpu_core0",
.sta_mask = BIT(27),
.ctl_offs = 0x33c,
.sram_pdn_bits = GENMASK(11, 8),
@@ -194,6 +208,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
.caps = MTK_SCPD_SRAM_ISO,
},
[MT8183_POWER_DOMAIN_VPU_CORE1] = {
+ .name = "vpu_core1",
.sta_mask = BIT(28),
.ctl_offs = 0x0340,
.sram_pdn_bits = GENMASK(11, 8),
diff --git a/drivers/soc/mediatek/mt8192-pm-domains.h b/drivers/soc/mediatek/mt8192-pm-domains.h
index 0fdf6dc6231f..543dda70de01 100644
--- a/drivers/soc/mediatek/mt8192-pm-domains.h
+++ b/drivers/soc/mediatek/mt8192-pm-domains.h
@@ -12,6 +12,7 @@
static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
[MT8192_POWER_DOMAIN_AUDIO] = {
+ .name = "audio",
.sta_mask = BIT(21),
.ctl_offs = 0x0354,
.sram_pdn_bits = GENMASK(8, 8),
@@ -24,6 +25,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
},
},
[MT8192_POWER_DOMAIN_CONN] = {
+ .name = "conn",
.sta_mask = PWR_STATUS_CONN,
.ctl_offs = 0x0304,
.sram_pdn_bits = 0,
@@ -45,12 +47,14 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
.caps = MTK_SCPD_KEEP_DEFAULT_OFF,
},
[MT8192_POWER_DOMAIN_MFG0] = {
+ .name = "mfg0",
.sta_mask = BIT(2),
.ctl_offs = 0x0308,
.sram_pdn_bits = GENMASK(8, 8),
.sram_pdn_ack_bits = GENMASK(12, 12),
},
[MT8192_POWER_DOMAIN_MFG1] = {
+ .name = "mfg1",
.sta_mask = BIT(3),
.ctl_offs = 0x030c,
.sram_pdn_bits = GENMASK(8, 8),
@@ -75,36 +79,42 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
},
},
[MT8192_POWER_DOMAIN_MFG2] = {
+ .name = "mfg2",
.sta_mask = BIT(4),
.ctl_offs = 0x0310,
.sram_pdn_bits = GENMASK(8, 8),
.sram_pdn_ack_bits = GENMASK(12, 12),
},
[MT8192_POWER_DOMAIN_MFG3] = {
+ .name = "mfg3",
.sta_mask = BIT(5),
.ctl_offs = 0x0314,
.sram_pdn_bits = GENMASK(8, 8),
.sram_pdn_ack_bits = GENMASK(12, 12),
},
[MT8192_POWER_DOMAIN_MFG4] = {
+ .name = "mfg4",
.sta_mask = BIT(6),
.ctl_offs = 0x0318,
.sram_pdn_bits = GENMASK(8, 8),
.sram_pdn_ack_bits = GENMASK(12, 12),
},
[MT8192_POWER_DOMAIN_MFG5] = {
+ .name = "mfg5",
.sta_mask = BIT(7),
.ctl_offs = 0x031c,
.sram_pdn_bits = GENMASK(8, 8),
.sram_pdn_ack_bits = GENMASK(12, 12),
},
[MT8192_POWER_DOMAIN_MFG6] = {
+ .name = "mfg6",
.sta_mask = BIT(8),
.ctl_offs = 0x0320,
.sram_pdn_bits = GENMASK(8, 8),
.sram_pdn_ack_bits = GENMASK(12, 12),
},
[MT8192_POWER_DOMAIN_DISP] = {
+ .name = "disp",
.sta_mask = BIT(20),
.ctl_offs = 0x0350,
.sram_pdn_bits = GENMASK(8, 8),
@@ -133,6 +143,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
},
},
[MT8192_POWER_DOMAIN_IPE] = {
+ .name = "ipe",
.sta_mask = BIT(14),
.ctl_offs = 0x0338,
.sram_pdn_bits = GENMASK(8, 8),
@@ -149,6 +160,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
},
},
[MT8192_POWER_DOMAIN_ISP] = {
+ .name = "isp",
.sta_mask = BIT(12),
.ctl_offs = 0x0330,
.sram_pdn_bits = GENMASK(8, 8),
@@ -165,6 +177,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
},
},
[MT8192_POWER_DOMAIN_ISP2] = {
+ .name = "isp2",
.sta_mask = BIT(13),
.ctl_offs = 0x0334,
.sram_pdn_bits = GENMASK(8, 8),
@@ -181,6 +194,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
},
},
[MT8192_POWER_DOMAIN_MDP] = {
+ .name = "mdp",
.sta_mask = BIT(19),
.ctl_offs = 0x034c,
.sram_pdn_bits = GENMASK(8, 8),
@@ -197,6 +211,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
},
},
[MT8192_POWER_DOMAIN_VENC] = {
+ .name = "venc",
.sta_mask = BIT(17),
.ctl_offs = 0x0344,
.sram_pdn_bits = GENMASK(8, 8),
@@ -213,6 +228,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
},
},
[MT8192_POWER_DOMAIN_VDEC] = {
+ .name = "vdec",
.sta_mask = BIT(15),
.ctl_offs = 0x033c,
.sram_pdn_bits = GENMASK(8, 8),
@@ -229,12 +245,14 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
},
},
[MT8192_POWER_DOMAIN_VDEC2] = {
+ .name = "vdec2",
.sta_mask = BIT(16),
.ctl_offs = 0x0340,
.sram_pdn_bits = GENMASK(8, 8),
.sram_pdn_ack_bits = GENMASK(12, 12),
},
[MT8192_POWER_DOMAIN_CAM] = {
+ .name = "cam",
.sta_mask = BIT(23),
.ctl_offs = 0x035c,
.sram_pdn_bits = GENMASK(8, 8),
@@ -263,18 +281,21 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
},
},
[MT8192_POWER_DOMAIN_CAM_RAWA] = {
+ .name = "cam_rawa",
.sta_mask = BIT(24),
.ctl_offs = 0x0360,
.sram_pdn_bits = GENMASK(8, 8),
.sram_pdn_ack_bits = GENMASK(12, 12),
},
[MT8192_POWER_DOMAIN_CAM_RAWB] = {
+ .name = "cam_rawb",
.sta_mask = BIT(25),
.ctl_offs = 0x0364,
.sram_pdn_bits = GENMASK(8, 8),
.sram_pdn_ack_bits = GENMASK(12, 12),
},
[MT8192_POWER_DOMAIN_CAM_RAWC] = {
+ .name = "cam_rawc",
.sta_mask = BIT(26),
.ctl_offs = 0x0368,
.sram_pdn_bits = GENMASK(8, 8),
diff --git a/drivers/soc/mediatek/mtk-mmsys.c b/drivers/soc/mediatek/mtk-mmsys.c
index 18f93979e14a..080660ef11bf 100644
--- a/drivers/soc/mediatek/mtk-mmsys.c
+++ b/drivers/soc/mediatek/mtk-mmsys.c
@@ -10,79 +10,20 @@
#include <linux/platform_device.h>
#include <linux/soc/mediatek/mtk-mmsys.h>
-#define DISP_REG_CONFIG_DISP_OVL0_MOUT_EN 0x040
-#define DISP_REG_CONFIG_DISP_OVL1_MOUT_EN 0x044
-#define DISP_REG_CONFIG_DISP_OD_MOUT_EN 0x048
-#define DISP_REG_CONFIG_DISP_GAMMA_MOUT_EN 0x04c
-#define DISP_REG_CONFIG_DISP_UFOE_MOUT_EN 0x050
-#define DISP_REG_CONFIG_DISP_COLOR0_SEL_IN 0x084
-#define DISP_REG_CONFIG_DISP_COLOR1_SEL_IN 0x088
-#define DISP_REG_CONFIG_DSIE_SEL_IN 0x0a4
-#define DISP_REG_CONFIG_DSIO_SEL_IN 0x0a8
-#define DISP_REG_CONFIG_DPI_SEL_IN 0x0ac
-#define DISP_REG_CONFIG_DISP_RDMA2_SOUT 0x0b8
-#define DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN 0x0c4
-#define DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN 0x0c8
-#define DISP_REG_CONFIG_MMSYS_CG_CON0 0x100
-
-#define DISP_REG_CONFIG_DISP_OVL_MOUT_EN 0x030
-#define DISP_REG_CONFIG_OUT_SEL 0x04c
-#define DISP_REG_CONFIG_DSI_SEL 0x050
-#define DISP_REG_CONFIG_DPI_SEL 0x064
-
-#define OVL0_MOUT_EN_COLOR0 0x1
-#define OD_MOUT_EN_RDMA0 0x1
-#define OD1_MOUT_EN_RDMA1 BIT(16)
-#define UFOE_MOUT_EN_DSI0 0x1
-#define COLOR0_SEL_IN_OVL0 0x1
-#define OVL1_MOUT_EN_COLOR1 0x1
-#define GAMMA_MOUT_EN_RDMA1 0x1
-#define RDMA0_SOUT_DPI0 0x2
-#define RDMA0_SOUT_DPI1 0x3
-#define RDMA0_SOUT_DSI1 0x1
-#define RDMA0_SOUT_DSI2 0x4
-#define RDMA0_SOUT_DSI3 0x5
-#define RDMA1_SOUT_DPI0 0x2
-#define RDMA1_SOUT_DPI1 0x3
-#define RDMA1_SOUT_DSI1 0x1
-#define RDMA1_SOUT_DSI2 0x4
-#define RDMA1_SOUT_DSI3 0x5
-#define RDMA2_SOUT_DPI0 0x2
-#define RDMA2_SOUT_DPI1 0x3
-#define RDMA2_SOUT_DSI1 0x1
-#define RDMA2_SOUT_DSI2 0x4
-#define RDMA2_SOUT_DSI3 0x5
-#define DPI0_SEL_IN_RDMA1 0x1
-#define DPI0_SEL_IN_RDMA2 0x3
-#define DPI1_SEL_IN_RDMA1 (0x1 << 8)
-#define DPI1_SEL_IN_RDMA2 (0x3 << 8)
-#define DSI0_SEL_IN_RDMA1 0x1
-#define DSI0_SEL_IN_RDMA2 0x4
-#define DSI1_SEL_IN_RDMA1 0x1
-#define DSI1_SEL_IN_RDMA2 0x4
-#define DSI2_SEL_IN_RDMA1 (0x1 << 16)
-#define DSI2_SEL_IN_RDMA2 (0x4 << 16)
-#define DSI3_SEL_IN_RDMA1 (0x1 << 16)
-#define DSI3_SEL_IN_RDMA2 (0x4 << 16)
-#define COLOR1_SEL_IN_OVL1 0x1
-
-#define OVL_MOUT_EN_RDMA 0x1
-#define BLS_TO_DSI_RDMA1_TO_DPI1 0x8
-#define BLS_TO_DPI_RDMA1_TO_DSI 0x2
-#define DSI_SEL_IN_BLS 0x0
-#define DPI_SEL_IN_BLS 0x0
-#define DSI_SEL_IN_RDMA 0x1
-
-struct mtk_mmsys_driver_data {
- const char *clk_driver;
-};
+#include "mtk-mmsys.h"
+#include "mt8167-mmsys.h"
+#include "mt8183-mmsys.h"
static const struct mtk_mmsys_driver_data mt2701_mmsys_driver_data = {
.clk_driver = "clk-mt2701-mm",
+ .routes = mmsys_default_routing_table,
+ .num_routes = ARRAY_SIZE(mmsys_default_routing_table),
};
static const struct mtk_mmsys_driver_data mt2712_mmsys_driver_data = {
.clk_driver = "clk-mt2712-mm",
+ .routes = mmsys_default_routing_table,
+ .num_routes = ARRAY_SIZE(mmsys_default_routing_table),
};
static const struct mtk_mmsys_driver_data mt6779_mmsys_driver_data = {
@@ -93,188 +34,43 @@ static const struct mtk_mmsys_driver_data mt6797_mmsys_driver_data = {
.clk_driver = "clk-mt6797-mm",
};
+static const struct mtk_mmsys_driver_data mt8167_mmsys_driver_data = {
+ .clk_driver = "clk-mt8167-mm",
+ .routes = mt8167_mmsys_routing_table,
+ .num_routes = ARRAY_SIZE(mt8167_mmsys_routing_table),
+};
+
static const struct mtk_mmsys_driver_data mt8173_mmsys_driver_data = {
.clk_driver = "clk-mt8173-mm",
+ .routes = mmsys_default_routing_table,
+ .num_routes = ARRAY_SIZE(mmsys_default_routing_table),
};
static const struct mtk_mmsys_driver_data mt8183_mmsys_driver_data = {
.clk_driver = "clk-mt8183-mm",
+ .routes = mmsys_mt8183_routing_table,
+ .num_routes = ARRAY_SIZE(mmsys_mt8183_routing_table),
};
-static unsigned int mtk_mmsys_ddp_mout_en(enum mtk_ddp_comp_id cur,
- enum mtk_ddp_comp_id next,
- unsigned int *addr)
-{
- unsigned int value;
-
- if (cur == DDP_COMPONENT_OVL0 && next == DDP_COMPONENT_COLOR0) {
- *addr = DISP_REG_CONFIG_DISP_OVL0_MOUT_EN;
- value = OVL0_MOUT_EN_COLOR0;
- } else if (cur == DDP_COMPONENT_OVL0 && next == DDP_COMPONENT_RDMA0) {
- *addr = DISP_REG_CONFIG_DISP_OVL_MOUT_EN;
- value = OVL_MOUT_EN_RDMA;
- } else if (cur == DDP_COMPONENT_OD0 && next == DDP_COMPONENT_RDMA0) {
- *addr = DISP_REG_CONFIG_DISP_OD_MOUT_EN;
- value = OD_MOUT_EN_RDMA0;
- } else if (cur == DDP_COMPONENT_UFOE && next == DDP_COMPONENT_DSI0) {
- *addr = DISP_REG_CONFIG_DISP_UFOE_MOUT_EN;
- value = UFOE_MOUT_EN_DSI0;
- } else if (cur == DDP_COMPONENT_OVL1 && next == DDP_COMPONENT_COLOR1) {
- *addr = DISP_REG_CONFIG_DISP_OVL1_MOUT_EN;
- value = OVL1_MOUT_EN_COLOR1;
- } else if (cur == DDP_COMPONENT_GAMMA && next == DDP_COMPONENT_RDMA1) {
- *addr = DISP_REG_CONFIG_DISP_GAMMA_MOUT_EN;
- value = GAMMA_MOUT_EN_RDMA1;
- } else if (cur == DDP_COMPONENT_OD1 && next == DDP_COMPONENT_RDMA1) {
- *addr = DISP_REG_CONFIG_DISP_OD_MOUT_EN;
- value = OD1_MOUT_EN_RDMA1;
- } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI0) {
- *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
- value = RDMA0_SOUT_DPI0;
- } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI1) {
- *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
- value = RDMA0_SOUT_DPI1;
- } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI1) {
- *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
- value = RDMA0_SOUT_DSI1;
- } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI2) {
- *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
- value = RDMA0_SOUT_DSI2;
- } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI3) {
- *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
- value = RDMA0_SOUT_DSI3;
- } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI1) {
- *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
- value = RDMA1_SOUT_DSI1;
- } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI2) {
- *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
- value = RDMA1_SOUT_DSI2;
- } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI3) {
- *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
- value = RDMA1_SOUT_DSI3;
- } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI0) {
- *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
- value = RDMA1_SOUT_DPI0;
- } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI1) {
- *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
- value = RDMA1_SOUT_DPI1;
- } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI0) {
- *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
- value = RDMA2_SOUT_DPI0;
- } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI1) {
- *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
- value = RDMA2_SOUT_DPI1;
- } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) {
- *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
- value = RDMA2_SOUT_DSI1;
- } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI2) {
- *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
- value = RDMA2_SOUT_DSI2;
- } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI3) {
- *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
- value = RDMA2_SOUT_DSI3;
- } else {
- value = 0;
- }
-
- return value;
-}
-
-static unsigned int mtk_mmsys_ddp_sel_in(enum mtk_ddp_comp_id cur,
- enum mtk_ddp_comp_id next,
- unsigned int *addr)
-{
- unsigned int value;
-
- if (cur == DDP_COMPONENT_OVL0 && next == DDP_COMPONENT_COLOR0) {
- *addr = DISP_REG_CONFIG_DISP_COLOR0_SEL_IN;
- value = COLOR0_SEL_IN_OVL0;
- } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI0) {
- *addr = DISP_REG_CONFIG_DPI_SEL_IN;
- value = DPI0_SEL_IN_RDMA1;
- } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI1) {
- *addr = DISP_REG_CONFIG_DPI_SEL_IN;
- value = DPI1_SEL_IN_RDMA1;
- } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI0) {
- *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
- value = DSI0_SEL_IN_RDMA1;
- } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI1) {
- *addr = DISP_REG_CONFIG_DSIO_SEL_IN;
- value = DSI1_SEL_IN_RDMA1;
- } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI2) {
- *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
- value = DSI2_SEL_IN_RDMA1;
- } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI3) {
- *addr = DISP_REG_CONFIG_DSIO_SEL_IN;
- value = DSI3_SEL_IN_RDMA1;
- } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI0) {
- *addr = DISP_REG_CONFIG_DPI_SEL_IN;
- value = DPI0_SEL_IN_RDMA2;
- } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI1) {
- *addr = DISP_REG_CONFIG_DPI_SEL_IN;
- value = DPI1_SEL_IN_RDMA2;
- } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI0) {
- *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
- value = DSI0_SEL_IN_RDMA2;
- } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) {
- *addr = DISP_REG_CONFIG_DSIO_SEL_IN;
- value = DSI1_SEL_IN_RDMA2;
- } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI2) {
- *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
- value = DSI2_SEL_IN_RDMA2;
- } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI3) {
- *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
- value = DSI3_SEL_IN_RDMA2;
- } else if (cur == DDP_COMPONENT_OVL1 && next == DDP_COMPONENT_COLOR1) {
- *addr = DISP_REG_CONFIG_DISP_COLOR1_SEL_IN;
- value = COLOR1_SEL_IN_OVL1;
- } else if (cur == DDP_COMPONENT_BLS && next == DDP_COMPONENT_DSI0) {
- *addr = DISP_REG_CONFIG_DSI_SEL;
- value = DSI_SEL_IN_BLS;
- } else {
- value = 0;
- }
-
- return value;
-}
-
-static void mtk_mmsys_ddp_sout_sel(void __iomem *config_regs,
- enum mtk_ddp_comp_id cur,
- enum mtk_ddp_comp_id next)
-{
- if (cur == DDP_COMPONENT_BLS && next == DDP_COMPONENT_DSI0) {
- writel_relaxed(BLS_TO_DSI_RDMA1_TO_DPI1,
- config_regs + DISP_REG_CONFIG_OUT_SEL);
- } else if (cur == DDP_COMPONENT_BLS && next == DDP_COMPONENT_DPI0) {
- writel_relaxed(BLS_TO_DPI_RDMA1_TO_DSI,
- config_regs + DISP_REG_CONFIG_OUT_SEL);
- writel_relaxed(DSI_SEL_IN_RDMA,
- config_regs + DISP_REG_CONFIG_DSI_SEL);
- writel_relaxed(DPI_SEL_IN_BLS,
- config_regs + DISP_REG_CONFIG_DPI_SEL);
- }
-}
+struct mtk_mmsys {
+ void __iomem *regs;
+ const struct mtk_mmsys_driver_data *data;
+};
void mtk_mmsys_ddp_connect(struct device *dev,
enum mtk_ddp_comp_id cur,
enum mtk_ddp_comp_id next)
{
- void __iomem *config_regs = dev_get_drvdata(dev);
- unsigned int addr, value, reg;
-
- value = mtk_mmsys_ddp_mout_en(cur, next, &addr);
- if (value) {
- reg = readl_relaxed(config_regs + addr) | value;
- writel_relaxed(reg, config_regs + addr);
- }
-
- mtk_mmsys_ddp_sout_sel(config_regs, cur, next);
-
- value = mtk_mmsys_ddp_sel_in(cur, next, &addr);
- if (value) {
- reg = readl_relaxed(config_regs + addr) | value;
- writel_relaxed(reg, config_regs + addr);
- }
+ struct mtk_mmsys *mmsys = dev_get_drvdata(dev);
+ const struct mtk_mmsys_routes *routes = mmsys->data->routes;
+ u32 reg;
+ int i;
+
+ for (i = 0; i < mmsys->data->num_routes; i++)
+ if (cur == routes[i].from_comp && next == routes[i].to_comp) {
+ reg = readl_relaxed(mmsys->regs + routes[i].addr) | routes[i].val;
+ writel_relaxed(reg, mmsys->regs + routes[i].addr);
+ }
}
EXPORT_SYMBOL_GPL(mtk_mmsys_ddp_connect);
@@ -282,44 +78,42 @@ void mtk_mmsys_ddp_disconnect(struct device *dev,
enum mtk_ddp_comp_id cur,
enum mtk_ddp_comp_id next)
{
- void __iomem *config_regs = dev_get_drvdata(dev);
- unsigned int addr, value, reg;
-
- value = mtk_mmsys_ddp_mout_en(cur, next, &addr);
- if (value) {
- reg = readl_relaxed(config_regs + addr) & ~value;
- writel_relaxed(reg, config_regs + addr);
- }
-
- value = mtk_mmsys_ddp_sel_in(cur, next, &addr);
- if (value) {
- reg = readl_relaxed(config_regs + addr) & ~value;
- writel_relaxed(reg, config_regs + addr);
- }
+ struct mtk_mmsys *mmsys = dev_get_drvdata(dev);
+ const struct mtk_mmsys_routes *routes = mmsys->data->routes;
+ u32 reg;
+ int i;
+
+ for (i = 0; i < mmsys->data->num_routes; i++)
+ if (cur == routes[i].from_comp && next == routes[i].to_comp) {
+ reg = readl_relaxed(mmsys->regs + routes[i].addr) & ~routes[i].val;
+ writel_relaxed(reg, mmsys->regs + routes[i].addr);
+ }
}
EXPORT_SYMBOL_GPL(mtk_mmsys_ddp_disconnect);
static int mtk_mmsys_probe(struct platform_device *pdev)
{
- const struct mtk_mmsys_driver_data *data;
struct device *dev = &pdev->dev;
struct platform_device *clks;
struct platform_device *drm;
- void __iomem *config_regs;
+ struct mtk_mmsys *mmsys;
int ret;
- config_regs = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(config_regs)) {
- ret = PTR_ERR(config_regs);
+ mmsys = devm_kzalloc(dev, sizeof(*mmsys), GFP_KERNEL);
+ if (!mmsys)
+ return -ENOMEM;
+
+ mmsys->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mmsys->regs)) {
+ ret = PTR_ERR(mmsys->regs);
dev_err(dev, "Failed to ioremap mmsys registers: %d\n", ret);
return ret;
}
- platform_set_drvdata(pdev, config_regs);
-
- data = of_device_get_match_data(&pdev->dev);
+ mmsys->data = of_device_get_match_data(&pdev->dev);
+ platform_set_drvdata(pdev, mmsys);
- clks = platform_device_register_data(&pdev->dev, data->clk_driver,
+ clks = platform_device_register_data(&pdev->dev, mmsys->data->clk_driver,
PLATFORM_DEVID_AUTO, NULL, 0);
if (IS_ERR(clks))
return PTR_ERR(clks);
@@ -352,6 +146,10 @@ static const struct of_device_id of_match_mtk_mmsys[] = {
.data = &mt6797_mmsys_driver_data,
},
{
+ .compatible = "mediatek,mt8167-mmsys",
+ .data = &mt8167_mmsys_driver_data,
+ },
+ {
.compatible = "mediatek,mt8173-mmsys",
.data = &mt8173_mmsys_driver_data,
},
diff --git a/drivers/soc/mediatek/mtk-mmsys.h b/drivers/soc/mediatek/mtk-mmsys.h
new file mode 100644
index 000000000000..a760a34e6eca
--- /dev/null
+++ b/drivers/soc/mediatek/mtk-mmsys.h
@@ -0,0 +1,215 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __SOC_MEDIATEK_MTK_MMSYS_H
+#define __SOC_MEDIATEK_MTK_MMSYS_H
+
+#define DISP_REG_CONFIG_DISP_OVL0_MOUT_EN 0x040
+#define DISP_REG_CONFIG_DISP_OVL1_MOUT_EN 0x044
+#define DISP_REG_CONFIG_DISP_OD_MOUT_EN 0x048
+#define DISP_REG_CONFIG_DISP_GAMMA_MOUT_EN 0x04c
+#define DISP_REG_CONFIG_DISP_UFOE_MOUT_EN 0x050
+#define DISP_REG_CONFIG_DISP_COLOR0_SEL_IN 0x084
+#define DISP_REG_CONFIG_DISP_COLOR1_SEL_IN 0x088
+#define DISP_REG_CONFIG_DSIE_SEL_IN 0x0a4
+#define DISP_REG_CONFIG_DSIO_SEL_IN 0x0a8
+#define DISP_REG_CONFIG_DPI_SEL_IN 0x0ac
+#define DISP_REG_CONFIG_DISP_RDMA2_SOUT 0x0b8
+#define DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN 0x0c4
+#define DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN 0x0c8
+#define DISP_REG_CONFIG_MMSYS_CG_CON0 0x100
+
+#define DISP_REG_CONFIG_DISP_OVL_MOUT_EN 0x030
+#define DISP_REG_CONFIG_OUT_SEL 0x04c
+#define DISP_REG_CONFIG_DSI_SEL 0x050
+#define DISP_REG_CONFIG_DPI_SEL 0x064
+
+#define OVL0_MOUT_EN_COLOR0 0x1
+#define OD_MOUT_EN_RDMA0 0x1
+#define OD1_MOUT_EN_RDMA1 BIT(16)
+#define UFOE_MOUT_EN_DSI0 0x1
+#define COLOR0_SEL_IN_OVL0 0x1
+#define OVL1_MOUT_EN_COLOR1 0x1
+#define GAMMA_MOUT_EN_RDMA1 0x1
+#define RDMA0_SOUT_DPI0 0x2
+#define RDMA0_SOUT_DPI1 0x3
+#define RDMA0_SOUT_DSI1 0x1
+#define RDMA0_SOUT_DSI2 0x4
+#define RDMA0_SOUT_DSI3 0x5
+#define RDMA1_SOUT_DPI0 0x2
+#define RDMA1_SOUT_DPI1 0x3
+#define RDMA1_SOUT_DSI1 0x1
+#define RDMA1_SOUT_DSI2 0x4
+#define RDMA1_SOUT_DSI3 0x5
+#define RDMA2_SOUT_DPI0 0x2
+#define RDMA2_SOUT_DPI1 0x3
+#define RDMA2_SOUT_DSI1 0x1
+#define RDMA2_SOUT_DSI2 0x4
+#define RDMA2_SOUT_DSI3 0x5
+#define DPI0_SEL_IN_RDMA1 0x1
+#define DPI0_SEL_IN_RDMA2 0x3
+#define DPI1_SEL_IN_RDMA1 (0x1 << 8)
+#define DPI1_SEL_IN_RDMA2 (0x3 << 8)
+#define DSI0_SEL_IN_RDMA1 0x1
+#define DSI0_SEL_IN_RDMA2 0x4
+#define DSI1_SEL_IN_RDMA1 0x1
+#define DSI1_SEL_IN_RDMA2 0x4
+#define DSI2_SEL_IN_RDMA1 (0x1 << 16)
+#define DSI2_SEL_IN_RDMA2 (0x4 << 16)
+#define DSI3_SEL_IN_RDMA1 (0x1 << 16)
+#define DSI3_SEL_IN_RDMA2 (0x4 << 16)
+#define COLOR1_SEL_IN_OVL1 0x1
+
+#define OVL_MOUT_EN_RDMA 0x1
+#define BLS_TO_DSI_RDMA1_TO_DPI1 0x8
+#define BLS_TO_DPI_RDMA1_TO_DSI 0x2
+#define DSI_SEL_IN_BLS 0x0
+#define DPI_SEL_IN_BLS 0x0
+#define DSI_SEL_IN_RDMA 0x1
+
+struct mtk_mmsys_routes {
+ u32 from_comp;
+ u32 to_comp;
+ u32 addr;
+ u32 val;
+};
+
+struct mtk_mmsys_driver_data {
+ const char *clk_driver;
+ const struct mtk_mmsys_routes *routes;
+ const unsigned int num_routes;
+};
+
+/*
+ * Routes in mt8173, mt2701, mt2712 are different. That means
+ * in the same register address, it controls different input/output
+ * selection for each SoC. But, right now, they use the same table as
+ * default routes meet their requirements. But we don't have the complete
+ * route information for these three SoC, so just keep them in the same
+ * table. After we've more information, we could separate mt2701, mt2712
+ * to an independent table.
+ */
+static const struct mtk_mmsys_routes mmsys_default_routing_table[] = {
+ {
+ DDP_COMPONENT_BLS, DDP_COMPONENT_DSI0,
+ DISP_REG_CONFIG_OUT_SEL, BLS_TO_DSI_RDMA1_TO_DPI1
+ }, {
+ DDP_COMPONENT_BLS, DDP_COMPONENT_DSI0,
+ DISP_REG_CONFIG_DSI_SEL, DSI_SEL_IN_BLS
+ }, {
+ DDP_COMPONENT_BLS, DDP_COMPONENT_DPI0,
+ DISP_REG_CONFIG_OUT_SEL, BLS_TO_DPI_RDMA1_TO_DSI
+ }, {
+ DDP_COMPONENT_BLS, DDP_COMPONENT_DPI0,
+ DISP_REG_CONFIG_DSI_SEL, DSI_SEL_IN_RDMA
+ }, {
+ DDP_COMPONENT_BLS, DDP_COMPONENT_DPI0,
+ DISP_REG_CONFIG_DPI_SEL, DPI_SEL_IN_BLS
+ }, {
+ DDP_COMPONENT_GAMMA, DDP_COMPONENT_RDMA1,
+ DISP_REG_CONFIG_DISP_GAMMA_MOUT_EN, GAMMA_MOUT_EN_RDMA1
+ }, {
+ DDP_COMPONENT_OD0, DDP_COMPONENT_RDMA0,
+ DISP_REG_CONFIG_DISP_OD_MOUT_EN, OD_MOUT_EN_RDMA0
+ }, {
+ DDP_COMPONENT_OD1, DDP_COMPONENT_RDMA1,
+ DISP_REG_CONFIG_DISP_OD_MOUT_EN, OD1_MOUT_EN_RDMA1
+ }, {
+ DDP_COMPONENT_OVL0, DDP_COMPONENT_COLOR0,
+ DISP_REG_CONFIG_DISP_OVL0_MOUT_EN, OVL0_MOUT_EN_COLOR0
+ }, {
+ DDP_COMPONENT_OVL0, DDP_COMPONENT_COLOR0,
+ DISP_REG_CONFIG_DISP_COLOR0_SEL_IN, COLOR0_SEL_IN_OVL0
+ }, {
+ DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
+ DISP_REG_CONFIG_DISP_OVL_MOUT_EN, OVL_MOUT_EN_RDMA
+ }, {
+ DDP_COMPONENT_OVL1, DDP_COMPONENT_COLOR1,
+ DISP_REG_CONFIG_DISP_OVL1_MOUT_EN, OVL1_MOUT_EN_COLOR1
+ }, {
+ DDP_COMPONENT_OVL1, DDP_COMPONENT_COLOR1,
+ DISP_REG_CONFIG_DISP_COLOR1_SEL_IN, COLOR1_SEL_IN_OVL1
+ }, {
+ DDP_COMPONENT_RDMA0, DDP_COMPONENT_DPI0,
+ DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN, RDMA0_SOUT_DPI0
+ }, {
+ DDP_COMPONENT_RDMA0, DDP_COMPONENT_DPI1,
+ DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN, RDMA0_SOUT_DPI1
+ }, {
+ DDP_COMPONENT_RDMA0, DDP_COMPONENT_DSI1,
+ DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN, RDMA0_SOUT_DSI1
+ }, {
+ DDP_COMPONENT_RDMA0, DDP_COMPONENT_DSI2,
+ DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN, RDMA0_SOUT_DSI2
+ }, {
+ DDP_COMPONENT_RDMA0, DDP_COMPONENT_DSI3,
+ DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN, RDMA0_SOUT_DSI3
+ }, {
+ DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
+ DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN, RDMA1_SOUT_DPI0
+ }, {
+ DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
+ DISP_REG_CONFIG_DPI_SEL_IN, DPI0_SEL_IN_RDMA1
+ }, {
+ DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI1,
+ DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN, RDMA1_SOUT_DPI1
+ }, {
+ DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI1,
+ DISP_REG_CONFIG_DPI_SEL_IN, DPI1_SEL_IN_RDMA1
+ }, {
+ DDP_COMPONENT_RDMA1, DDP_COMPONENT_DSI0,
+ DISP_REG_CONFIG_DSIE_SEL_IN, DSI0_SEL_IN_RDMA1
+ }, {
+ DDP_COMPONENT_RDMA1, DDP_COMPONENT_DSI1,
+ DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN, RDMA1_SOUT_DSI1
+ }, {
+ DDP_COMPONENT_RDMA1, DDP_COMPONENT_DSI1,
+ DISP_REG_CONFIG_DSIO_SEL_IN, DSI1_SEL_IN_RDMA1
+ }, {
+ DDP_COMPONENT_RDMA1, DDP_COMPONENT_DSI2,
+ DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN, RDMA1_SOUT_DSI2
+ }, {
+ DDP_COMPONENT_RDMA1, DDP_COMPONENT_DSI2,
+ DISP_REG_CONFIG_DSIE_SEL_IN, DSI2_SEL_IN_RDMA1
+ }, {
+ DDP_COMPONENT_RDMA1, DDP_COMPONENT_DSI3,
+ DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN, RDMA1_SOUT_DSI3
+ }, {
+ DDP_COMPONENT_RDMA1, DDP_COMPONENT_DSI3,
+ DISP_REG_CONFIG_DSIO_SEL_IN, DSI3_SEL_IN_RDMA1
+ }, {
+ DDP_COMPONENT_RDMA2, DDP_COMPONENT_DPI0,
+ DISP_REG_CONFIG_DISP_RDMA2_SOUT, RDMA2_SOUT_DPI0
+ }, {
+ DDP_COMPONENT_RDMA2, DDP_COMPONENT_DPI0,
+ DISP_REG_CONFIG_DPI_SEL_IN, DPI0_SEL_IN_RDMA2
+ }, {
+ DDP_COMPONENT_RDMA2, DDP_COMPONENT_DPI1,
+ DISP_REG_CONFIG_DISP_RDMA2_SOUT, RDMA2_SOUT_DPI1
+ }, {
+ DDP_COMPONENT_RDMA2, DDP_COMPONENT_DPI1,
+ DISP_REG_CONFIG_DPI_SEL_IN, DPI1_SEL_IN_RDMA2
+ }, {
+ DDP_COMPONENT_RDMA2, DDP_COMPONENT_DSI0,
+ DISP_REG_CONFIG_DSIE_SEL_IN, DSI0_SEL_IN_RDMA2
+ }, {
+ DDP_COMPONENT_RDMA2, DDP_COMPONENT_DSI1,
+ DISP_REG_CONFIG_DISP_RDMA2_SOUT, RDMA2_SOUT_DSI1
+ }, {
+ DDP_COMPONENT_RDMA2, DDP_COMPONENT_DSI1,
+ DISP_REG_CONFIG_DSIO_SEL_IN, DSI1_SEL_IN_RDMA2
+ }, {
+ DDP_COMPONENT_RDMA2, DDP_COMPONENT_DSI2,
+ DISP_REG_CONFIG_DISP_RDMA2_SOUT, RDMA2_SOUT_DSI2
+ }, {
+ DDP_COMPONENT_RDMA2, DDP_COMPONENT_DSI2,
+ DISP_REG_CONFIG_DSIE_SEL_IN, DSI2_SEL_IN_RDMA2
+ }, {
+ DDP_COMPONENT_RDMA2, DDP_COMPONENT_DSI3,
+ DISP_REG_CONFIG_DISP_RDMA2_SOUT, RDMA2_SOUT_DSI3
+ }, {
+ DDP_COMPONENT_RDMA2, DDP_COMPONENT_DSI3,
+ DISP_REG_CONFIG_DSIO_SEL_IN, DSI3_SEL_IN_RDMA2
+ }
+};
+
+#endif /* __SOC_MEDIATEK_MTK_MMSYS_H */
diff --git a/drivers/soc/mediatek/mtk-mutex.c b/drivers/soc/mediatek/mtk-mutex.c
index f531b119da7a..2e4bcc300576 100644
--- a/drivers/soc/mediatek/mtk-mutex.c
+++ b/drivers/soc/mediatek/mtk-mutex.c
@@ -14,6 +14,8 @@
#define MT2701_MUTEX0_MOD0 0x2c
#define MT2701_MUTEX0_SOF0 0x30
+#define MT8183_MUTEX0_MOD0 0x30
+#define MT8183_MUTEX0_SOF0 0x2c
#define DISP_REG_MUTEX_EN(n) (0x20 + 0x20 * (n))
#define DISP_REG_MUTEX(n) (0x24 + 0x20 * (n))
@@ -37,6 +39,18 @@
#define MT8167_MUTEX_MOD_DISP_DITHER 15
#define MT8167_MUTEX_MOD_DISP_UFOE 16
+#define MT8183_MUTEX_MOD_DISP_RDMA0 0
+#define MT8183_MUTEX_MOD_DISP_RDMA1 1
+#define MT8183_MUTEX_MOD_DISP_OVL0 9
+#define MT8183_MUTEX_MOD_DISP_OVL0_2L 10
+#define MT8183_MUTEX_MOD_DISP_OVL1_2L 11
+#define MT8183_MUTEX_MOD_DISP_WDMA0 12
+#define MT8183_MUTEX_MOD_DISP_COLOR0 13
+#define MT8183_MUTEX_MOD_DISP_CCORR0 14
+#define MT8183_MUTEX_MOD_DISP_AAL0 15
+#define MT8183_MUTEX_MOD_DISP_GAMMA0 16
+#define MT8183_MUTEX_MOD_DISP_DITHER0 17
+
#define MT8173_MUTEX_MOD_DISP_OVL0 11
#define MT8173_MUTEX_MOD_DISP_OVL1 12
#define MT8173_MUTEX_MOD_DISP_RDMA0 13
@@ -87,6 +101,11 @@
#define MT2712_MUTEX_SOF_DSI3 6
#define MT8167_MUTEX_SOF_DPI0 2
#define MT8167_MUTEX_SOF_DPI1 3
+#define MT8183_MUTEX_SOF_DSI0 1
+#define MT8183_MUTEX_SOF_DPI0 2
+
+#define MT8183_MUTEX_EOF_DSI0 (MT8183_MUTEX_SOF_DSI0 << 6)
+#define MT8183_MUTEX_EOF_DPI0 (MT8183_MUTEX_SOF_DPI0 << 6)
struct mtk_mutex {
int id;
@@ -181,6 +200,20 @@ static const unsigned int mt8173_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_WDMA1] = MT8173_MUTEX_MOD_DISP_WDMA1,
};
+static const unsigned int mt8183_mutex_mod[DDP_COMPONENT_ID_MAX] = {
+ [DDP_COMPONENT_AAL0] = MT8183_MUTEX_MOD_DISP_AAL0,
+ [DDP_COMPONENT_CCORR] = MT8183_MUTEX_MOD_DISP_CCORR0,
+ [DDP_COMPONENT_COLOR0] = MT8183_MUTEX_MOD_DISP_COLOR0,
+ [DDP_COMPONENT_DITHER] = MT8183_MUTEX_MOD_DISP_DITHER0,
+ [DDP_COMPONENT_GAMMA] = MT8183_MUTEX_MOD_DISP_GAMMA0,
+ [DDP_COMPONENT_OVL0] = MT8183_MUTEX_MOD_DISP_OVL0,
+ [DDP_COMPONENT_OVL_2L0] = MT8183_MUTEX_MOD_DISP_OVL0_2L,
+ [DDP_COMPONENT_OVL_2L1] = MT8183_MUTEX_MOD_DISP_OVL1_2L,
+ [DDP_COMPONENT_RDMA0] = MT8183_MUTEX_MOD_DISP_RDMA0,
+ [DDP_COMPONENT_RDMA1] = MT8183_MUTEX_MOD_DISP_RDMA1,
+ [DDP_COMPONENT_WDMA0] = MT8183_MUTEX_MOD_DISP_WDMA0,
+};
+
static const unsigned int mt2712_mutex_sof[MUTEX_SOF_DSI3 + 1] = {
[MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
[MUTEX_SOF_DSI0] = MUTEX_SOF_DSI0,
@@ -198,6 +231,13 @@ static const unsigned int mt8167_mutex_sof[MUTEX_SOF_DSI3 + 1] = {
[MUTEX_SOF_DPI1] = MT8167_MUTEX_SOF_DPI1,
};
+/* Add EOF setting so overlay hardware can receive frame done irq */
+static const unsigned int mt8183_mutex_sof[MUTEX_SOF_DSI3 + 1] = {
+ [MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
+ [MUTEX_SOF_DSI0] = MUTEX_SOF_DSI0 | MT8183_MUTEX_EOF_DSI0,
+ [MUTEX_SOF_DPI0] = MT8183_MUTEX_SOF_DPI0 | MT8183_MUTEX_EOF_DPI0,
+};
+
static const struct mtk_mutex_data mt2701_mutex_driver_data = {
.mutex_mod = mt2701_mutex_mod,
.mutex_sof = mt2712_mutex_sof,
@@ -227,6 +267,14 @@ static const struct mtk_mutex_data mt8173_mutex_driver_data = {
.mutex_sof_reg = MT2701_MUTEX0_SOF0,
};
+static const struct mtk_mutex_data mt8183_mutex_driver_data = {
+ .mutex_mod = mt8183_mutex_mod,
+ .mutex_sof = mt8183_mutex_sof,
+ .mutex_mod_reg = MT8183_MUTEX0_MOD0,
+ .mutex_sof_reg = MT8183_MUTEX0_SOF0,
+ .no_clk = true,
+};
+
struct mtk_mutex *mtk_mutex_get(struct device *dev)
{
struct mtk_mutex_ctx *mtx = dev_get_drvdata(dev);
@@ -457,11 +505,13 @@ static const struct of_device_id mutex_driver_dt_match[] = {
.data = &mt8167_mutex_driver_data},
{ .compatible = "mediatek,mt8173-disp-mutex",
.data = &mt8173_mutex_driver_data},
+ { .compatible = "mediatek,mt8183-disp-mutex",
+ .data = &mt8183_mutex_driver_data},
{},
};
MODULE_DEVICE_TABLE(of, mutex_driver_dt_match);
-struct platform_driver mtk_mutex_driver = {
+static struct platform_driver mtk_mutex_driver = {
.probe = mtk_mutex_probe,
.remove = mtk_mutex_remove,
.driver = {
diff --git a/drivers/soc/mediatek/mtk-pm-domains.c b/drivers/soc/mediatek/mtk-pm-domains.c
index b7f697666bdd..0af00efa0ef8 100644
--- a/drivers/soc/mediatek/mtk-pm-domains.c
+++ b/drivers/soc/mediatek/mtk-pm-domains.c
@@ -438,7 +438,11 @@ generic_pm_domain *scpsys_add_one_domain(struct scpsys *scpsys, struct device_no
goto err_unprepare_subsys_clocks;
}
- pd->genpd.name = node->name;
+ if (!pd->data->name)
+ pd->genpd.name = node->name;
+ else
+ pd->genpd.name = pd->data->name;
+
pd->genpd.power_off = scpsys_power_off;
pd->genpd.power_on = scpsys_power_on;
@@ -487,8 +491,9 @@ static int scpsys_add_subdomain(struct scpsys *scpsys, struct device_node *paren
child_pd = scpsys_add_one_domain(scpsys, child);
if (IS_ERR(child_pd)) {
- dev_err_probe(scpsys->dev, PTR_ERR(child_pd),
- "%pOF: failed to get child domain id\n", child);
+ ret = PTR_ERR(child_pd);
+ dev_err_probe(scpsys->dev, ret, "%pOF: failed to get child domain id\n",
+ child);
goto err_put_node;
}
diff --git a/drivers/soc/mediatek/mtk-pm-domains.h b/drivers/soc/mediatek/mtk-pm-domains.h
index 141dc76054e6..21a4e113bbec 100644
--- a/drivers/soc/mediatek/mtk-pm-domains.h
+++ b/drivers/soc/mediatek/mtk-pm-domains.h
@@ -76,6 +76,7 @@ struct scpsys_bus_prot_data {
/**
* struct scpsys_domain_data - scp domain data for power on/off flow
+ * @name: The name of the power domain.
* @sta_mask: The mask for power on/off status bit.
* @ctl_offs: The offset for main power control register.
* @sram_pdn_bits: The mask for sram power control bits.
@@ -85,6 +86,7 @@ struct scpsys_bus_prot_data {
* @bp_smi: bus protection for smi subsystem
*/
struct scpsys_domain_data {
+ const char *name;
u32 sta_mask;
int ctl_offs;
u32 sram_pdn_bits;
diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c
index 5d34e8b9c988..e4de75f35c33 100644
--- a/drivers/soc/mediatek/mtk-pmic-wrap.c
+++ b/drivers/soc/mediatek/mtk-pmic-wrap.c
@@ -25,10 +25,12 @@
/* macro for wrapper status */
#define PWRAP_GET_WACS_RDATA(x) (((x) >> 0) & 0x0000ffff)
+#define PWRAP_GET_WACS_ARB_FSM(x) (((x) >> 1) & 0x00000007)
#define PWRAP_GET_WACS_FSM(x) (((x) >> 16) & 0x00000007)
#define PWRAP_GET_WACS_REQ(x) (((x) >> 19) & 0x00000001)
-#define PWRAP_STATE_SYNC_IDLE0 (1 << 20)
-#define PWRAP_STATE_INIT_DONE0 (1 << 21)
+#define PWRAP_STATE_SYNC_IDLE0 BIT(20)
+#define PWRAP_STATE_INIT_DONE0 BIT(21)
+#define PWRAP_STATE_INIT_DONE1 BIT(15)
/* macro for WACS FSM */
#define PWRAP_WACS_FSM_IDLE 0x00
@@ -74,6 +76,7 @@
#define PWRAP_CAP_DCM BIT(2)
#define PWRAP_CAP_INT1_EN BIT(3)
#define PWRAP_CAP_WDT_SRC1 BIT(4)
+#define PWRAP_CAP_ARB BIT(5)
/* defines for slave device wrapper registers */
enum dew_regs {
@@ -340,6 +343,8 @@ enum pwrap_regs {
PWRAP_DCM_DBC_PRD,
PWRAP_EINT_STA0_ADR,
PWRAP_EINT_STA1_ADR,
+ PWRAP_SWINF_2_WDATA_31_0,
+ PWRAP_SWINF_2_RDATA_31_0,
/* MT2701 only regs */
PWRAP_ADC_CMD_ADDR,
@@ -627,6 +632,17 @@ static int mt6797_regs[] = {
[PWRAP_DCM_DBC_PRD] = 0x1D4,
};
+static int mt6873_regs[] = {
+ [PWRAP_INIT_DONE2] = 0x0,
+ [PWRAP_TIMER_EN] = 0x3E0,
+ [PWRAP_INT_EN] = 0x448,
+ [PWRAP_WACS2_CMD] = 0xC80,
+ [PWRAP_SWINF_2_WDATA_31_0] = 0xC84,
+ [PWRAP_SWINF_2_RDATA_31_0] = 0xC94,
+ [PWRAP_WACS2_VLDCLR] = 0xCA4,
+ [PWRAP_WACS2_RDATA] = 0xCA8,
+};
+
static int mt7622_regs[] = {
[PWRAP_MUX_SEL] = 0x0,
[PWRAP_WRAP_EN] = 0x4,
@@ -1045,6 +1061,7 @@ enum pwrap_type {
PWRAP_MT6765,
PWRAP_MT6779,
PWRAP_MT6797,
+ PWRAP_MT6873,
PWRAP_MT7622,
PWRAP_MT8135,
PWRAP_MT8173,
@@ -1106,18 +1123,25 @@ static void pwrap_writel(struct pmic_wrapper *wrp, u32 val, enum pwrap_regs reg)
writel(val, wrp->base + wrp->master->regs[reg]);
}
-static bool pwrap_is_fsm_idle(struct pmic_wrapper *wrp)
+static u32 pwrap_get_fsm_state(struct pmic_wrapper *wrp)
{
- u32 val = pwrap_readl(wrp, PWRAP_WACS2_RDATA);
+ u32 val;
- return PWRAP_GET_WACS_FSM(val) == PWRAP_WACS_FSM_IDLE;
+ val = pwrap_readl(wrp, PWRAP_WACS2_RDATA);
+ if (HAS_CAP(wrp->master->caps, PWRAP_CAP_ARB))
+ return PWRAP_GET_WACS_ARB_FSM(val);
+ else
+ return PWRAP_GET_WACS_FSM(val);
}
-static bool pwrap_is_fsm_vldclr(struct pmic_wrapper *wrp)
+static bool pwrap_is_fsm_idle(struct pmic_wrapper *wrp)
{
- u32 val = pwrap_readl(wrp, PWRAP_WACS2_RDATA);
+ return pwrap_get_fsm_state(wrp) == PWRAP_WACS_FSM_IDLE;
+}
- return PWRAP_GET_WACS_FSM(val) == PWRAP_WACS_FSM_WFVLDCLR;
+static bool pwrap_is_fsm_vldclr(struct pmic_wrapper *wrp)
+{
+ return pwrap_get_fsm_state(wrp) == PWRAP_WACS_FSM_WFVLDCLR;
}
/*
@@ -1165,6 +1189,7 @@ static int pwrap_wait_for_state(struct pmic_wrapper *wrp,
static int pwrap_read16(struct pmic_wrapper *wrp, u32 adr, u32 *rdata)
{
int ret;
+ u32 val;
ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle);
if (ret) {
@@ -1172,13 +1197,21 @@ static int pwrap_read16(struct pmic_wrapper *wrp, u32 adr, u32 *rdata)
return ret;
}
- pwrap_writel(wrp, (adr >> 1) << 16, PWRAP_WACS2_CMD);
+ if (HAS_CAP(wrp->master->caps, PWRAP_CAP_ARB))
+ val = adr;
+ else
+ val = (adr >> 1) << 16;
+ pwrap_writel(wrp, val, PWRAP_WACS2_CMD);
ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_vldclr);
if (ret)
return ret;
- *rdata = PWRAP_GET_WACS_RDATA(pwrap_readl(wrp, PWRAP_WACS2_RDATA));
+ if (HAS_CAP(wrp->master->caps, PWRAP_CAP_ARB))
+ val = pwrap_readl(wrp, PWRAP_SWINF_2_RDATA_31_0);
+ else
+ val = pwrap_readl(wrp, PWRAP_WACS2_RDATA);
+ *rdata = PWRAP_GET_WACS_RDATA(val);
pwrap_writel(wrp, 1, PWRAP_WACS2_VLDCLR);
@@ -1228,8 +1261,13 @@ static int pwrap_write16(struct pmic_wrapper *wrp, u32 adr, u32 wdata)
return ret;
}
- pwrap_writel(wrp, (1 << 31) | ((adr >> 1) << 16) | wdata,
- PWRAP_WACS2_CMD);
+ if (HAS_CAP(wrp->master->caps, PWRAP_CAP_ARB)) {
+ pwrap_writel(wrp, wdata, PWRAP_SWINF_2_WDATA_31_0);
+ pwrap_writel(wrp, BIT(29) | adr, PWRAP_WACS2_CMD);
+ } else {
+ pwrap_writel(wrp, BIT(31) | ((adr >> 1) << 16) | wdata,
+ PWRAP_WACS2_CMD);
+ }
return 0;
}
@@ -1485,6 +1523,7 @@ static int pwrap_init_cipher(struct pmic_wrapper *wrp)
case PWRAP_MT7622:
pwrap_writel(wrp, 0, PWRAP_CIPHER_EN);
break;
+ case PWRAP_MT6873:
case PWRAP_MT8183:
break;
}
@@ -1921,6 +1960,19 @@ static const struct pmic_wrapper_type pwrap_mt6797 = {
.init_soc_specific = NULL,
};
+static const struct pmic_wrapper_type pwrap_mt6873 = {
+ .regs = mt6873_regs,
+ .type = PWRAP_MT6873,
+ .arb_en_all = 0x777f,
+ .int_en_all = BIT(4) | BIT(5),
+ .int1_en_all = 0,
+ .spi_w = PWRAP_MAN_CMD_SPI_WRITE,
+ .wdt_src = PWRAP_WDT_SRC_MASK_ALL,
+ .caps = PWRAP_CAP_ARB,
+ .init_reg_clock = pwrap_common_init_reg_clock,
+ .init_soc_specific = NULL,
+};
+
static const struct pmic_wrapper_type pwrap_mt7622 = {
.regs = mt7622_regs,
.type = PWRAP_MT7622,
@@ -1999,6 +2051,9 @@ static const struct of_device_id of_pwrap_match_tbl[] = {
.compatible = "mediatek,mt6797-pwrap",
.data = &pwrap_mt6797,
}, {
+ .compatible = "mediatek,mt6873-pwrap",
+ .data = &pwrap_mt6873,
+ }, {
.compatible = "mediatek,mt7622-pwrap",
.data = &pwrap_mt7622,
}, {
@@ -2022,6 +2077,7 @@ MODULE_DEVICE_TABLE(of, of_pwrap_match_tbl);
static int pwrap_probe(struct platform_device *pdev)
{
int ret, irq;
+ u32 mask_done;
struct pmic_wrapper *wrp;
struct device_node *np = pdev->dev.of_node;
const struct of_device_id *of_slave_id = NULL;
@@ -2116,14 +2172,21 @@ static int pwrap_probe(struct platform_device *pdev)
}
}
- if (!(pwrap_readl(wrp, PWRAP_WACS2_RDATA) & PWRAP_STATE_INIT_DONE0)) {
+ if (HAS_CAP(wrp->master->caps, PWRAP_CAP_ARB))
+ mask_done = PWRAP_STATE_INIT_DONE1;
+ else
+ mask_done = PWRAP_STATE_INIT_DONE0;
+
+ if (!(pwrap_readl(wrp, PWRAP_WACS2_RDATA) & mask_done)) {
dev_dbg(wrp->dev, "initialization isn't finished\n");
ret = -ENODEV;
goto err_out2;
}
/* Initialize watchdog, may not be done by the bootloader */
- pwrap_writel(wrp, 0xf, PWRAP_WDT_UNIT);
+ if (!HAS_CAP(wrp->master->caps, PWRAP_CAP_ARB))
+ pwrap_writel(wrp, 0xf, PWRAP_WDT_UNIT);
+
/*
* Since STAUPD was not used on mt8173 platform,
* so STAUPD of WDT_SRC which should be turned off
@@ -2132,7 +2195,11 @@ static int pwrap_probe(struct platform_device *pdev)
if (HAS_CAP(wrp->master->caps, PWRAP_CAP_WDT_SRC1))
pwrap_writel(wrp, wrp->master->wdt_src, PWRAP_WDT_SRC_EN_1);
- pwrap_writel(wrp, 0x1, PWRAP_TIMER_EN);
+ if (HAS_CAP(wrp->master->caps, PWRAP_CAP_ARB))
+ pwrap_writel(wrp, 0x3, PWRAP_TIMER_EN);
+ else
+ pwrap_writel(wrp, 0x1, PWRAP_TIMER_EN);
+
pwrap_writel(wrp, wrp->master->int_en_all, PWRAP_INT_EN);
/*
* We add INT1 interrupt to handle starvation and request exception
diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
index 8403a77b59fe..15a36dcab990 100644
--- a/drivers/soc/qcom/llcc-qcom.c
+++ b/drivers/soc/qcom/llcc-qcom.c
@@ -109,6 +109,18 @@ static const struct llcc_slice_config sc7180_data[] = {
{ LLCC_GPU, 12, 128, 1, 0, 0xf, 0x0, 0, 0, 0, 1, 0 },
};
+static const struct llcc_slice_config sc7280_data[] = {
+ { LLCC_CPUSS, 1, 768, 1, 0, 0x3f, 0x0, 0, 0, 0, 1, 1, 0},
+ { LLCC_MDMHPGRW, 7, 512, 2, 1, 0x3f, 0x0, 0, 0, 0, 1, 0, 0},
+ { LLCC_CMPT, 10, 768, 1, 1, 0x3f, 0x0, 0, 0, 0, 1, 0, 0},
+ { LLCC_GPUHTW, 11, 256, 1, 1, 0x3f, 0x0, 0, 0, 0, 1, 0, 0},
+ { LLCC_GPU, 12, 512, 1, 0, 0x3f, 0x0, 0, 0, 0, 1, 0, 0},
+ { LLCC_MMUHWT, 13, 256, 1, 1, 0x3f, 0x0, 0, 0, 0, 1, 1, 0},
+ { LLCC_MDMPNG, 21, 768, 0, 1, 0x3f, 0x0, 0, 0, 0, 1, 0, 0},
+ { LLCC_WLHW, 24, 256, 1, 1, 0x3f, 0x0, 0, 0, 0, 1, 0, 0},
+ { LLCC_MODPE, 29, 64, 1, 1, 0x3f, 0x0, 0, 0, 0, 1, 0, 0},
+};
+
static const struct llcc_slice_config sdm845_data[] = {
{ LLCC_CPUSS, 1, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 1 },
{ LLCC_VIDSC0, 2, 512, 2, 1, 0x0, 0x0f0, 0, 0, 1, 1, 0 },
@@ -179,6 +191,12 @@ static const struct qcom_llcc_config sc7180_cfg = {
.need_llcc_cfg = true,
};
+static const struct qcom_llcc_config sc7280_cfg = {
+ .sct_data = sc7280_data,
+ .size = ARRAY_SIZE(sc7280_data),
+ .need_llcc_cfg = true,
+};
+
static const struct qcom_llcc_config sdm845_cfg = {
.sct_data = sdm845_data,
.size = ARRAY_SIZE(sdm845_data),
@@ -606,6 +624,7 @@ err:
static const struct of_device_id qcom_llcc_of_match[] = {
{ .compatible = "qcom,sc7180-llcc", .data = &sc7180_cfg },
+ { .compatible = "qcom,sc7280-llcc", .data = &sc7280_cfg },
{ .compatible = "qcom,sdm845-llcc", .data = &sdm845_cfg },
{ .compatible = "qcom,sm8150-llcc", .data = &sm8150_cfg },
{ .compatible = "qcom,sm8250-llcc", .data = &sm8250_cfg },
diff --git a/drivers/soc/qcom/mdt_loader.c b/drivers/soc/qcom/mdt_loader.c
index 24cd193dec55..eba7f76f9d61 100644
--- a/drivers/soc/qcom/mdt_loader.c
+++ b/drivers/soc/qcom/mdt_loader.c
@@ -230,6 +230,14 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw,
break;
}
+ if (phdr->p_filesz > phdr->p_memsz) {
+ dev_err(dev,
+ "refusing to load segment %d with p_filesz > p_memsz\n",
+ i);
+ ret = -EINVAL;
+ break;
+ }
+
ptr = mem_region + offset;
if (phdr->p_filesz && phdr->p_offset < fw->size) {
@@ -253,6 +261,15 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw,
break;
}
+ if (seg_fw->size != phdr->p_filesz) {
+ dev_err(dev,
+ "failed to load segment %d from truncated file %s\n",
+ i, fw_name);
+ release_firmware(seg_fw);
+ ret = -EINVAL;
+ break;
+ }
+
release_firmware(seg_fw);
}
diff --git a/drivers/soc/qcom/pdr_interface.c b/drivers/soc/qcom/pdr_interface.c
index 209dcdca923f..915d5bc3d46e 100644
--- a/drivers/soc/qcom/pdr_interface.c
+++ b/drivers/soc/qcom/pdr_interface.c
@@ -153,7 +153,7 @@ static int pdr_register_listener(struct pdr_handle *pdr,
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
pr_err("PDR: %s register listener failed: 0x%x\n",
pds->service_path, resp.resp.error);
- return ret;
+ return -EREMOTEIO;
}
pds->state = resp.curr_state;
diff --git a/drivers/soc/qcom/qcom_aoss.c b/drivers/soc/qcom/qcom_aoss.c
index 53acb9423bd6..934fcc4d2b05 100644
--- a/drivers/soc/qcom/qcom_aoss.c
+++ b/drivers/soc/qcom/qcom_aoss.c
@@ -597,6 +597,7 @@ static int qmp_remove(struct platform_device *pdev)
static const struct of_device_id qmp_dt_match[] = {
{ .compatible = "qcom,sc7180-aoss-qmp", },
+ { .compatible = "qcom,sc7280-aoss-qmp", },
{ .compatible = "qcom,sdm845-aoss-qmp", },
{ .compatible = "qcom,sm8150-aoss-qmp", },
{ .compatible = "qcom,sm8250-aoss-qmp", },
diff --git a/drivers/soc/qcom/qmi_encdec.c b/drivers/soc/qcom/qmi_encdec.c
index 3aaab71d1b2c..328cc8237191 100644
--- a/drivers/soc/qcom/qmi_encdec.c
+++ b/drivers/soc/qcom/qmi_encdec.c
@@ -451,11 +451,11 @@ static int qmi_decode_basic_elem(void *buf_dst, const void *buf_src,
/**
* qmi_decode_struct_elem() - Decodes elements of struct data type
- * @ei_array: Struct info array descibing the struct element.
+ * @ei_array: Struct info array describing the struct element.
* @buf_dst: Buffer to store the decoded element.
* @buf_src: Buffer containing the elements in QMI wire format.
* @elem_len: Number of elements to be decoded.
- * @tlv_len: Total size of the encoded inforation corresponding to
+ * @tlv_len: Total size of the encoded information corresponding to
* this struct element.
* @dec_level: Depth of the nested structure from the main structure.
*
@@ -499,10 +499,10 @@ static int qmi_decode_struct_elem(struct qmi_elem_info *ei_array,
/**
* qmi_decode_string_elem() - Decodes elements of string data type
- * @ei_array: Struct info array descibing the string element.
+ * @ei_array: Struct info array describing the string element.
* @buf_dst: Buffer to store the decoded element.
* @buf_src: Buffer containing the elements in QMI wire format.
- * @tlv_len: Total size of the encoded inforation corresponding to
+ * @tlv_len: Total size of the encoded information corresponding to
* this string element.
* @dec_level: Depth of the string element from the main structure.
*
diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c
index a84ab0d6a9d4..e749a2b285d8 100644
--- a/drivers/soc/qcom/rpmh-rsc.c
+++ b/drivers/soc/qcom/rpmh-rsc.c
@@ -195,22 +195,6 @@ static void write_tcs_reg_sync(const struct rsc_drv *drv, int reg, int tcs_id,
}
/**
- * tcs_is_free() - Return if a TCS is totally free.
- * @drv: The RSC controller.
- * @tcs_id: The global ID of this TCS.
- *
- * Returns true if nobody has claimed this TCS (by setting tcs_in_use).
- *
- * Context: Must be called with the drv->lock held.
- *
- * Return: true if the given TCS is free.
- */
-static bool tcs_is_free(struct rsc_drv *drv, int tcs_id)
-{
- return !test_bit(tcs_id, drv->tcs_in_use);
-}
-
-/**
* tcs_invalidate() - Invalidate all TCSes of the given type (sleep or wake).
* @drv: The RSC controller.
* @type: SLEEP_TCS or WAKE_TCS
@@ -408,12 +392,10 @@ static irqreturn_t tcs_tx_done(int irq, void *p)
irq_status = readl_relaxed(drv->tcs_base + RSC_DRV_IRQ_STATUS);
- for_each_set_bit(i, &irq_status, BITS_PER_LONG) {
+ for_each_set_bit(i, &irq_status, BITS_PER_TYPE(u32)) {
req = get_req_from_tcs(drv, i);
- if (!req) {
- WARN_ON(1);
+ if (WARN_ON(!req))
goto skip;
- }
err = 0;
for (j = 0; j < req->num_cmds; j++) {
@@ -520,7 +502,7 @@ static void __tcs_buffer_write(struct rsc_drv *drv, int tcs_id, int cmd_id,
*
* Return: 0 if nothing in flight or -EBUSY if we should try again later.
* The caller must re-enable interrupts between tries since that's
- * the only way tcs_is_free() will ever return true and the only way
+ * the only way tcs_in_use will ever be updated and the only way
* RSC_DRV_CMD_ENABLE will ever be cleared.
*/
static int check_for_req_inflight(struct rsc_drv *drv, struct tcs_group *tcs,
@@ -528,17 +510,14 @@ static int check_for_req_inflight(struct rsc_drv *drv, struct tcs_group *tcs,
{
unsigned long curr_enabled;
u32 addr;
- int i, j, k;
- int tcs_id = tcs->offset;
-
- for (i = 0; i < tcs->num_tcs; i++, tcs_id++) {
- if (tcs_is_free(drv, tcs_id))
- continue;
+ int j, k;
+ int i = tcs->offset;
- curr_enabled = read_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id);
+ for_each_set_bit_from(i, drv->tcs_in_use, tcs->offset + tcs->num_tcs) {
+ curr_enabled = read_tcs_reg(drv, RSC_DRV_CMD_ENABLE, i);
for_each_set_bit(j, &curr_enabled, MAX_CMDS_PER_TCS) {
- addr = read_tcs_cmd(drv, RSC_DRV_CMD_ADDR, tcs_id, j);
+ addr = read_tcs_cmd(drv, RSC_DRV_CMD_ADDR, i, j);
for (k = 0; k < msg->num_cmds; k++) {
if (addr == msg->cmds[k].addr)
return -EBUSY;
@@ -556,18 +535,19 @@ static int check_for_req_inflight(struct rsc_drv *drv, struct tcs_group *tcs,
*
* Must be called with the drv->lock held since that protects tcs_in_use.
*
- * Return: The first tcs that's free.
+ * Return: The first tcs that's free or -EBUSY if all in use.
*/
static int find_free_tcs(struct tcs_group *tcs)
{
- int i;
+ const struct rsc_drv *drv = tcs->drv;
+ unsigned long i;
+ unsigned long max = tcs->offset + tcs->num_tcs;
- for (i = 0; i < tcs->num_tcs; i++) {
- if (tcs_is_free(tcs->drv, tcs->offset + i))
- return tcs->offset + i;
- }
+ i = find_next_zero_bit(drv->tcs_in_use, max, tcs->offset);
+ if (i >= max)
+ return -EBUSY;
- return -EBUSY;
+ return i;
}
/**
@@ -754,8 +734,9 @@ int rpmh_rsc_write_ctrl_data(struct rsc_drv *drv, const struct tcs_request *msg)
*/
static bool rpmh_rsc_ctrlr_is_busy(struct rsc_drv *drv)
{
- int m;
- struct tcs_group *tcs = &drv->tcs[ACTIVE_TCS];
+ unsigned long set;
+ const struct tcs_group *tcs = &drv->tcs[ACTIVE_TCS];
+ unsigned long max;
/*
* If we made an active request on a RSC that does not have a
@@ -766,12 +747,10 @@ static bool rpmh_rsc_ctrlr_is_busy(struct rsc_drv *drv)
if (!tcs->num_tcs)
tcs = &drv->tcs[WAKE_TCS];
- for (m = tcs->offset; m < tcs->offset + tcs->num_tcs; m++) {
- if (!tcs_is_free(drv, m))
- return true;
- }
+ max = tcs->offset + tcs->num_tcs;
+ set = find_next_bit(drv->tcs_in_use, max, tcs->offset);
- return false;
+ return set < max;
}
/**
diff --git a/drivers/soc/qcom/rpmhpd.c b/drivers/soc/qcom/rpmhpd.c
index 7ce06356d24c..bb21c4f1c0c4 100644
--- a/drivers/soc/qcom/rpmhpd.c
+++ b/drivers/soc/qcom/rpmhpd.c
@@ -200,6 +200,42 @@ static const struct rpmhpd_desc sm8250_desc = {
.num_pds = ARRAY_SIZE(sm8250_rpmhpds),
};
+/* SM8350 Power domains */
+static struct rpmhpd sm8350_mxc_ao;
+static struct rpmhpd sm8350_mxc = {
+ .pd = { .name = "mxc", },
+ .peer = &sm8150_mmcx_ao,
+ .res_name = "mxc.lvl",
+};
+
+static struct rpmhpd sm8350_mxc_ao = {
+ .pd = { .name = "mxc_ao", },
+ .active_only = true,
+ .peer = &sm8350_mxc,
+ .res_name = "mxc.lvl",
+};
+
+static struct rpmhpd *sm8350_rpmhpds[] = {
+ [SM8350_CX] = &sdm845_cx,
+ [SM8350_CX_AO] = &sdm845_cx_ao,
+ [SM8350_EBI] = &sdm845_ebi,
+ [SM8350_GFX] = &sdm845_gfx,
+ [SM8350_LCX] = &sdm845_lcx,
+ [SM8350_LMX] = &sdm845_lmx,
+ [SM8350_MMCX] = &sm8150_mmcx,
+ [SM8350_MMCX_AO] = &sm8150_mmcx_ao,
+ [SM8350_MX] = &sdm845_mx,
+ [SM8350_MX_AO] = &sdm845_mx_ao,
+ [SM8350_MXC] = &sm8350_mxc,
+ [SM8350_MXC_AO] = &sm8350_mxc_ao,
+ [SM8350_MSS] = &sdm845_mss,
+};
+
+static const struct rpmhpd_desc sm8350_desc = {
+ .rpmhpds = sm8350_rpmhpds,
+ .num_pds = ARRAY_SIZE(sm8350_rpmhpds),
+};
+
/* SC7180 RPMH powerdomains */
static struct rpmhpd *sc7180_rpmhpds[] = {
[SC7180_CX] = &sdm845_cx,
@@ -217,12 +253,32 @@ static const struct rpmhpd_desc sc7180_desc = {
.num_pds = ARRAY_SIZE(sc7180_rpmhpds),
};
+/* SC7280 RPMH powerdomains */
+static struct rpmhpd *sc7280_rpmhpds[] = {
+ [SC7280_CX] = &sdm845_cx,
+ [SC7280_CX_AO] = &sdm845_cx_ao,
+ [SC7280_EBI] = &sdm845_ebi,
+ [SC7280_GFX] = &sdm845_gfx,
+ [SC7280_MX] = &sdm845_mx,
+ [SC7280_MX_AO] = &sdm845_mx_ao,
+ [SC7280_LMX] = &sdm845_lmx,
+ [SC7280_LCX] = &sdm845_lcx,
+ [SC7280_MSS] = &sdm845_mss,
+};
+
+static const struct rpmhpd_desc sc7280_desc = {
+ .rpmhpds = sc7280_rpmhpds,
+ .num_pds = ARRAY_SIZE(sc7280_rpmhpds),
+};
+
static const struct of_device_id rpmhpd_match_table[] = {
{ .compatible = "qcom,sc7180-rpmhpd", .data = &sc7180_desc },
+ { .compatible = "qcom,sc7280-rpmhpd", .data = &sc7280_desc },
{ .compatible = "qcom,sdm845-rpmhpd", .data = &sdm845_desc },
{ .compatible = "qcom,sdx55-rpmhpd", .data = &sdx55_desc},
{ .compatible = "qcom,sm8150-rpmhpd", .data = &sm8150_desc },
{ .compatible = "qcom,sm8250-rpmhpd", .data = &sm8250_desc },
+ { .compatible = "qcom,sm8350-rpmhpd", .data = &sm8350_desc },
{ }
};
MODULE_DEVICE_TABLE(of, rpmhpd_match_table);
diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
index cc4e0655a47b..4fb5aeeb0843 100644
--- a/drivers/soc/qcom/smem.c
+++ b/drivers/soc/qcom/smem.c
@@ -84,7 +84,7 @@
#define SMEM_GLOBAL_HOST 0xfffe
/* Max number of processors/hosts in a system */
-#define SMEM_HOST_COUNT 11
+#define SMEM_HOST_COUNT 14
/**
* struct smem_proc_comm - proc_comm communication struct (legacy)
diff --git a/drivers/soc/qcom/wcnss_ctrl.c b/drivers/soc/qcom/wcnss_ctrl.c
index 32bed249f90e..2a06d631e415 100644
--- a/drivers/soc/qcom/wcnss_ctrl.c
+++ b/drivers/soc/qcom/wcnss_ctrl.c
@@ -199,6 +199,8 @@ static int wcnss_download_nv(struct wcnss_ctrl *wcnss, bool *expect_cbc)
{
struct wcnss_download_nv_req *req;
const struct firmware *fw;
+ struct device *dev = wcnss->dev;
+ const char *nvbin = NVBIN_FILE;
const void *data;
ssize_t left;
int ret;
@@ -207,10 +209,13 @@ static int wcnss_download_nv(struct wcnss_ctrl *wcnss, bool *expect_cbc)
if (!req)
return -ENOMEM;
- ret = request_firmware(&fw, NVBIN_FILE, wcnss->dev);
+ ret = of_property_read_string(dev->of_node, "firmware-name", &nvbin);
+ if (ret < 0 && ret != -EINVAL)
+ goto free_req;
+
+ ret = request_firmware(&fw, nvbin, dev);
if (ret < 0) {
- dev_err(wcnss->dev, "Failed to load nv file %s: %d\n",
- NVBIN_FILE, ret);
+ dev_err(dev, "Failed to load nv file %s: %d\n", nvbin, ret);
goto free_req;
}
@@ -235,7 +240,7 @@ static int wcnss_download_nv(struct wcnss_ctrl *wcnss, bool *expect_cbc)
ret = rpmsg_send(wcnss->channel, req, req->hdr.len);
if (ret < 0) {
- dev_err(wcnss->dev, "failed to send smd packet\n");
+ dev_err(dev, "failed to send smd packet\n");
goto release_fw;
}
@@ -248,7 +253,7 @@ static int wcnss_download_nv(struct wcnss_ctrl *wcnss, bool *expect_cbc)
ret = wait_for_completion_timeout(&wcnss->ack, WCNSS_REQUEST_TIMEOUT);
if (!ret) {
- dev_err(wcnss->dev, "timeout waiting for nv upload ack\n");
+ dev_err(dev, "timeout waiting for nv upload ack\n");
ret = -ETIMEDOUT;
} else {
*expect_cbc = wcnss->ack_status == WCNSS_ACK_COLD_BOOTING;
diff --git a/drivers/soc/renesas/rmobile-sysc.c b/drivers/soc/renesas/rmobile-sysc.c
index 9046b8c933cb..204e6135180b 100644
--- a/drivers/soc/renesas/rmobile-sysc.c
+++ b/drivers/soc/renesas/rmobile-sysc.c
@@ -14,8 +14,6 @@
#include <linux/delay.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_platform.h>
-#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/pm_clock.h>
#include <linux/pm_domain.h>
@@ -344,6 +342,8 @@ static int __init rmobile_init_pm_domains(void)
of_node_put(np);
break;
}
+
+ fwnode_dev_initialized(&np->fwnode, true);
}
put_special_pds();
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index df9a5ca8c99c..6bd22359d411 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -39,6 +39,7 @@
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/reboot.h>
+#include <linux/regmap.h>
#include <linux/reset.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
@@ -102,6 +103,9 @@
#define PMC_PWR_DET_VALUE 0xe4
+#define PMC_USB_DEBOUNCE_DEL 0xec
+#define PMC_USB_AO 0xf0
+
#define PMC_SCRATCH41 0x140
#define PMC_WAKE2_MASK 0x160
@@ -133,6 +137,13 @@
#define IO_DPD2_STATUS 0x1c4
#define SEL_DPD_TIM 0x1c8
+#define PMC_UTMIP_UHSIC_TRIGGERS 0x1ec
+#define PMC_UTMIP_UHSIC_SAVED_STATE 0x1f0
+
+#define PMC_UTMIP_TERM_PAD_CFG 0x1f8
+#define PMC_UTMIP_UHSIC_SLEEP_CFG 0x1fc
+#define PMC_UTMIP_UHSIC_FAKE 0x218
+
#define PMC_SCRATCH54 0x258
#define PMC_SCRATCH54_DATA_SHIFT 8
#define PMC_SCRATCH54_ADDR_SHIFT 0
@@ -145,8 +156,18 @@
#define PMC_SCRATCH55_CHECKSUM_SHIFT 16
#define PMC_SCRATCH55_I2CSLV1_SHIFT 0
+#define PMC_UTMIP_UHSIC_LINE_WAKEUP 0x26c
+
+#define PMC_UTMIP_BIAS_MASTER_CNTRL 0x270
+#define PMC_UTMIP_MASTER_CONFIG 0x274
+#define PMC_UTMIP_UHSIC2_TRIGGERS 0x27c
+#define PMC_UTMIP_MASTER2_CONFIG 0x29c
+
#define GPU_RG_CNTRL 0x2d4
+#define PMC_UTMIP_PAD_CFG0 0x4c0
+#define PMC_UTMIP_UHSIC_SLEEP_CFG1 0x4d0
+#define PMC_UTMIP_SLEEPWALK_P3 0x4e0
/* Tegra186 and later */
#define WAKE_AOWAKE_CNTRL(x) (0x000 + ((x) << 2))
#define WAKE_AOWAKE_CNTRL_LEVEL (1 << 3)
@@ -237,6 +258,7 @@ struct tegra_powergate {
unsigned int id;
struct clk **clks;
unsigned int num_clks;
+ unsigned long *clk_rates;
struct reset_control *reset;
};
@@ -317,6 +339,8 @@ struct tegra_pmc_soc {
bool invert);
int (*irq_set_wake)(struct irq_data *data, unsigned int on);
int (*irq_set_type)(struct irq_data *data, unsigned int type);
+ int (*powergate_set)(struct tegra_pmc *pmc, unsigned int id,
+ bool new_state);
const char * const *reset_sources;
unsigned int num_reset_sources;
@@ -334,6 +358,7 @@ struct tegra_pmc_soc {
const struct pmc_clk_init_data *pmc_clks_data;
unsigned int num_pmc_clks;
bool has_blink_output;
+ bool has_usb_sleepwalk;
};
/**
@@ -517,6 +542,63 @@ static int tegra_powergate_lookup(struct tegra_pmc *pmc, const char *name)
return -ENODEV;
}
+static int tegra20_powergate_set(struct tegra_pmc *pmc, unsigned int id,
+ bool new_state)
+{
+ unsigned int retries = 100;
+ bool status;
+ int ret;
+
+ /*
+ * As per TRM documentation, the toggle command will be dropped by PMC
+ * if there is contention with a HW-initiated toggling (i.e. CPU core
+ * power-gated), the command should be retried in that case.
+ */
+ do {
+ tegra_pmc_writel(pmc, PWRGATE_TOGGLE_START | id, PWRGATE_TOGGLE);
+
+ /* wait for PMC to execute the command */
+ ret = readx_poll_timeout(tegra_powergate_state, id, status,
+ status == new_state, 1, 10);
+ } while (ret == -ETIMEDOUT && retries--);
+
+ return ret;
+}
+
+static inline bool tegra_powergate_toggle_ready(struct tegra_pmc *pmc)
+{
+ return !(tegra_pmc_readl(pmc, PWRGATE_TOGGLE) & PWRGATE_TOGGLE_START);
+}
+
+static int tegra114_powergate_set(struct tegra_pmc *pmc, unsigned int id,
+ bool new_state)
+{
+ bool status;
+ int err;
+
+ /* wait while PMC power gating is contended */
+ err = readx_poll_timeout(tegra_powergate_toggle_ready, pmc, status,
+ status == true, 1, 100);
+ if (err)
+ return err;
+
+ tegra_pmc_writel(pmc, PWRGATE_TOGGLE_START | id, PWRGATE_TOGGLE);
+
+ /* wait for PMC to accept the command */
+ err = readx_poll_timeout(tegra_powergate_toggle_ready, pmc, status,
+ status == true, 1, 100);
+ if (err)
+ return err;
+
+ /* wait for PMC to execute the command */
+ err = readx_poll_timeout(tegra_powergate_state, id, status,
+ status == new_state, 10, 100000);
+ if (err)
+ return err;
+
+ return 0;
+}
+
/**
* tegra_powergate_set() - set the state of a partition
* @pmc: power management controller
@@ -526,7 +608,6 @@ static int tegra_powergate_lookup(struct tegra_pmc *pmc, const char *name)
static int tegra_powergate_set(struct tegra_pmc *pmc, unsigned int id,
bool new_state)
{
- bool status;
int err;
if (id == TEGRA_POWERGATE_3D && pmc->soc->has_gpu_clamps)
@@ -539,10 +620,7 @@ static int tegra_powergate_set(struct tegra_pmc *pmc, unsigned int id,
return 0;
}
- tegra_pmc_writel(pmc, PWRGATE_TOGGLE_START | id, PWRGATE_TOGGLE);
-
- err = readx_poll_timeout(tegra_powergate_state, id, status,
- status == new_state, 10, 100000);
+ err = pmc->soc->powergate_set(pmc, id, new_state);
mutex_unlock(&pmc->powergates_lock);
@@ -586,6 +664,57 @@ out:
return 0;
}
+static int tegra_powergate_prepare_clocks(struct tegra_powergate *pg)
+{
+ unsigned long safe_rate = 100 * 1000 * 1000;
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < pg->num_clks; i++) {
+ pg->clk_rates[i] = clk_get_rate(pg->clks[i]);
+
+ if (!pg->clk_rates[i]) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (pg->clk_rates[i] <= safe_rate)
+ continue;
+
+ /*
+ * We don't know whether voltage state is okay for the
+ * current clock rate, hence it's better to temporally
+ * switch clock to a safe rate which is suitable for
+ * all voltages, before enabling the clock.
+ */
+ err = clk_set_rate(pg->clks[i], safe_rate);
+ if (err)
+ goto out;
+ }
+
+ return 0;
+
+out:
+ while (i--)
+ clk_set_rate(pg->clks[i], pg->clk_rates[i]);
+
+ return err;
+}
+
+static int tegra_powergate_unprepare_clocks(struct tegra_powergate *pg)
+{
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < pg->num_clks; i++) {
+ err = clk_set_rate(pg->clks[i], pg->clk_rates[i]);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static void tegra_powergate_disable_clocks(struct tegra_powergate *pg)
{
unsigned int i;
@@ -636,9 +765,13 @@ static int tegra_powergate_power_up(struct tegra_powergate *pg,
usleep_range(10, 20);
+ err = tegra_powergate_prepare_clocks(pg);
+ if (err)
+ goto powergate_off;
+
err = tegra_powergate_enable_clocks(pg);
if (err)
- goto disable_clks;
+ goto unprepare_clks;
usleep_range(10, 20);
@@ -662,12 +795,19 @@ static int tegra_powergate_power_up(struct tegra_powergate *pg,
if (disable_clocks)
tegra_powergate_disable_clocks(pg);
+ err = tegra_powergate_unprepare_clocks(pg);
+ if (err)
+ return err;
+
return 0;
disable_clks:
tegra_powergate_disable_clocks(pg);
usleep_range(10, 20);
+unprepare_clks:
+ tegra_powergate_unprepare_clocks(pg);
+
powergate_off:
tegra_powergate_set(pg->pmc, pg->id, false);
@@ -678,10 +818,14 @@ static int tegra_powergate_power_down(struct tegra_powergate *pg)
{
int err;
- err = tegra_powergate_enable_clocks(pg);
+ err = tegra_powergate_prepare_clocks(pg);
if (err)
return err;
+ err = tegra_powergate_enable_clocks(pg);
+ if (err)
+ goto unprepare_clks;
+
usleep_range(10, 20);
err = reset_control_assert(pg->reset);
@@ -698,6 +842,10 @@ static int tegra_powergate_power_down(struct tegra_powergate *pg)
if (err)
goto assert_resets;
+ err = tegra_powergate_unprepare_clocks(pg);
+ if (err)
+ return err;
+
return 0;
assert_resets:
@@ -709,6 +857,9 @@ assert_resets:
disable_clks:
tegra_powergate_disable_clocks(pg);
+unprepare_clks:
+ tegra_powergate_unprepare_clocks(pg);
+
return err;
}
@@ -739,7 +890,8 @@ static int tegra_genpd_power_off(struct generic_pm_domain *domain)
err = reset_control_acquire(pg->reset);
if (err < 0) {
- pr_err("failed to acquire resets: %d\n", err);
+ dev_err(dev, "failed to acquire resets for PM domain %s: %d\n",
+ pg->genpd.name, err);
return err;
}
@@ -826,6 +978,12 @@ int tegra_powergate_sequence_power_up(unsigned int id, struct clk *clk,
if (!pg)
return -ENOMEM;
+ pg->clk_rates = kzalloc(sizeof(*pg->clk_rates), GFP_KERNEL);
+ if (!pg->clk_rates) {
+ kfree(pg->clks);
+ return -ENOMEM;
+ }
+
pg->id = id;
pg->clks = &clk;
pg->num_clks = 1;
@@ -837,6 +995,7 @@ int tegra_powergate_sequence_power_up(unsigned int id, struct clk *clk,
dev_err(pmc->dev, "failed to turn on partition %d: %d\n", id,
err);
+ kfree(pg->clk_rates);
kfree(pg);
return err;
@@ -987,6 +1146,12 @@ static int tegra_powergate_of_get_clks(struct tegra_powergate *pg,
if (!pg->clks)
return -ENOMEM;
+ pg->clk_rates = kcalloc(count, sizeof(*pg->clk_rates), GFP_KERNEL);
+ if (!pg->clk_rates) {
+ kfree(pg->clks);
+ return -ENOMEM;
+ }
+
for (i = 0; i < count; i++) {
pg->clks[i] = of_clk_get(np, i);
if (IS_ERR(pg->clks[i])) {
@@ -1003,6 +1168,7 @@ err:
while (i--)
clk_put(pg->clks[i]);
+ kfree(pg->clk_rates);
kfree(pg->clks);
return err;
@@ -2443,6 +2609,67 @@ static void tegra_pmc_clock_register(struct tegra_pmc *pmc,
err);
}
+static const struct regmap_range pmc_usb_sleepwalk_ranges[] = {
+ regmap_reg_range(PMC_USB_DEBOUNCE_DEL, PMC_USB_AO),
+ regmap_reg_range(PMC_UTMIP_UHSIC_TRIGGERS, PMC_UTMIP_UHSIC_SAVED_STATE),
+ regmap_reg_range(PMC_UTMIP_TERM_PAD_CFG, PMC_UTMIP_UHSIC_FAKE),
+ regmap_reg_range(PMC_UTMIP_UHSIC_LINE_WAKEUP, PMC_UTMIP_UHSIC_LINE_WAKEUP),
+ regmap_reg_range(PMC_UTMIP_BIAS_MASTER_CNTRL, PMC_UTMIP_MASTER_CONFIG),
+ regmap_reg_range(PMC_UTMIP_UHSIC2_TRIGGERS, PMC_UTMIP_MASTER2_CONFIG),
+ regmap_reg_range(PMC_UTMIP_PAD_CFG0, PMC_UTMIP_UHSIC_SLEEP_CFG1),
+ regmap_reg_range(PMC_UTMIP_SLEEPWALK_P3, PMC_UTMIP_SLEEPWALK_P3),
+};
+
+static const struct regmap_access_table pmc_usb_sleepwalk_table = {
+ .yes_ranges = pmc_usb_sleepwalk_ranges,
+ .n_yes_ranges = ARRAY_SIZE(pmc_usb_sleepwalk_ranges),
+};
+
+static int tegra_pmc_regmap_readl(void *context, unsigned int offset, unsigned int *value)
+{
+ struct tegra_pmc *pmc = context;
+
+ *value = tegra_pmc_readl(pmc, offset);
+ return 0;
+}
+
+static int tegra_pmc_regmap_writel(void *context, unsigned int offset, unsigned int value)
+{
+ struct tegra_pmc *pmc = context;
+
+ tegra_pmc_writel(pmc, value, offset);
+ return 0;
+}
+
+static const struct regmap_config usb_sleepwalk_regmap_config = {
+ .name = "usb_sleepwalk",
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .fast_io = true,
+ .rd_table = &pmc_usb_sleepwalk_table,
+ .wr_table = &pmc_usb_sleepwalk_table,
+ .reg_read = tegra_pmc_regmap_readl,
+ .reg_write = tegra_pmc_regmap_writel,
+};
+
+static int tegra_pmc_regmap_init(struct tegra_pmc *pmc)
+{
+ struct regmap *regmap;
+ int err;
+
+ if (pmc->soc->has_usb_sleepwalk) {
+ regmap = devm_regmap_init(pmc->dev, NULL, pmc, &usb_sleepwalk_regmap_config);
+ if (IS_ERR(regmap)) {
+ err = PTR_ERR(regmap);
+ dev_err(pmc->dev, "failed to allocate register map (%d)\n", err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
static int tegra_pmc_probe(struct platform_device *pdev)
{
void __iomem *base;
@@ -2548,6 +2775,10 @@ static int tegra_pmc_probe(struct platform_device *pdev)
if (err)
goto cleanup_restart_handler;
+ err = tegra_pmc_regmap_init(pmc);
+ if (err < 0)
+ goto cleanup_restart_handler;
+
err = tegra_powergate_init(pmc, pdev->dev.of_node);
if (err < 0)
goto cleanup_powergates;
@@ -2699,6 +2930,7 @@ static const struct tegra_pmc_soc tegra20_pmc_soc = {
.regs = &tegra20_pmc_regs,
.init = tegra20_pmc_init,
.setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
+ .powergate_set = tegra20_powergate_set,
.reset_sources = NULL,
.num_reset_sources = 0,
.reset_levels = NULL,
@@ -2706,6 +2938,7 @@ static const struct tegra_pmc_soc tegra20_pmc_soc = {
.pmc_clks_data = NULL,
.num_pmc_clks = 0,
.has_blink_output = true,
+ .has_usb_sleepwalk = false,
};
static const char * const tegra30_powergates[] = {
@@ -2757,6 +2990,7 @@ static const struct tegra_pmc_soc tegra30_pmc_soc = {
.regs = &tegra20_pmc_regs,
.init = tegra20_pmc_init,
.setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
+ .powergate_set = tegra20_powergate_set,
.reset_sources = tegra30_reset_sources,
.num_reset_sources = ARRAY_SIZE(tegra30_reset_sources),
.reset_levels = NULL,
@@ -2764,6 +2998,7 @@ static const struct tegra_pmc_soc tegra30_pmc_soc = {
.pmc_clks_data = tegra_pmc_clks_data,
.num_pmc_clks = ARRAY_SIZE(tegra_pmc_clks_data),
.has_blink_output = true,
+ .has_usb_sleepwalk = false,
};
static const char * const tegra114_powergates[] = {
@@ -2811,6 +3046,7 @@ static const struct tegra_pmc_soc tegra114_pmc_soc = {
.regs = &tegra20_pmc_regs,
.init = tegra20_pmc_init,
.setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
+ .powergate_set = tegra114_powergate_set,
.reset_sources = tegra30_reset_sources,
.num_reset_sources = ARRAY_SIZE(tegra30_reset_sources),
.reset_levels = NULL,
@@ -2818,6 +3054,7 @@ static const struct tegra_pmc_soc tegra114_pmc_soc = {
.pmc_clks_data = tegra_pmc_clks_data,
.num_pmc_clks = ARRAY_SIZE(tegra_pmc_clks_data),
.has_blink_output = true,
+ .has_usb_sleepwalk = false,
};
static const char * const tegra124_powergates[] = {
@@ -2925,6 +3162,7 @@ static const struct tegra_pmc_soc tegra124_pmc_soc = {
.regs = &tegra20_pmc_regs,
.init = tegra20_pmc_init,
.setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
+ .powergate_set = tegra114_powergate_set,
.reset_sources = tegra30_reset_sources,
.num_reset_sources = ARRAY_SIZE(tegra30_reset_sources),
.reset_levels = NULL,
@@ -2932,6 +3170,7 @@ static const struct tegra_pmc_soc tegra124_pmc_soc = {
.pmc_clks_data = tegra_pmc_clks_data,
.num_pmc_clks = ARRAY_SIZE(tegra_pmc_clks_data),
.has_blink_output = true,
+ .has_usb_sleepwalk = true,
};
static const char * const tegra210_powergates[] = {
@@ -3048,6 +3287,7 @@ static const struct tegra_pmc_soc tegra210_pmc_soc = {
.regs = &tegra20_pmc_regs,
.init = tegra20_pmc_init,
.setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
+ .powergate_set = tegra114_powergate_set,
.irq_set_wake = tegra210_pmc_irq_set_wake,
.irq_set_type = tegra210_pmc_irq_set_type,
.reset_sources = tegra210_reset_sources,
@@ -3059,6 +3299,7 @@ static const struct tegra_pmc_soc tegra210_pmc_soc = {
.pmc_clks_data = tegra_pmc_clks_data,
.num_pmc_clks = ARRAY_SIZE(tegra_pmc_clks_data),
.has_blink_output = true,
+ .has_usb_sleepwalk = true,
};
#define TEGRA186_IO_PAD_TABLE(_pad) \
@@ -3214,6 +3455,7 @@ static const struct tegra_pmc_soc tegra186_pmc_soc = {
.pmc_clks_data = NULL,
.num_pmc_clks = 0,
.has_blink_output = false,
+ .has_usb_sleepwalk = false,
};
#define TEGRA194_IO_PAD_TABLE(_pad) \
@@ -3347,6 +3589,7 @@ static const struct tegra_pmc_soc tegra194_pmc_soc = {
.pmc_clks_data = NULL,
.num_pmc_clks = 0,
.has_blink_output = false,
+ .has_usb_sleepwalk = false,
};
static const struct tegra_pmc_regs tegra234_pmc_regs = {
diff --git a/drivers/soc/tegra/regulators-tegra30.c b/drivers/soc/tegra/regulators-tegra30.c
index 7f21f31de09d..0e776b20f625 100644
--- a/drivers/soc/tegra/regulators-tegra30.c
+++ b/drivers/soc/tegra/regulators-tegra30.c
@@ -178,7 +178,7 @@ static int tegra30_voltage_update(struct tegra_regulator_coupler *tegra,
* survive the voltage drop if it's running on a higher frequency.
*/
if (!cpu_min_uV_consumers)
- cpu_min_uV = cpu_uV;
+ cpu_min_uV = max(cpu_uV, cpu_min_uV);
/*
* Bootloader shall set up voltages correctly, but if it
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index 8b2b4771f420..e39897c38e6a 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -2738,7 +2738,7 @@ static int vchiq_probe(struct platform_device *pdev)
return -ENOENT;
}
- drvdata->fw = rpi_firmware_get(fw_node);
+ drvdata->fw = devm_rpi_firmware_get(&pdev->dev, fw_node);
of_node_put(fw_node);
if (!drvdata->fw)
return -EPROBE_DEFER;
diff --git a/drivers/tee/optee/Makefile b/drivers/tee/optee/Makefile
index 56263ae3b1d7..3aa33ea9e6a6 100644
--- a/drivers/tee/optee/Makefile
+++ b/drivers/tee/optee/Makefile
@@ -6,3 +6,6 @@ optee-objs += rpc.o
optee-objs += supp.o
optee-objs += shm_pool.o
optee-objs += device.o
+
+# for tracing framework to find optee_trace.h
+CFLAGS_call.o := -I$(src)
diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c
index 7a77e375b503..6132cc8d014c 100644
--- a/drivers/tee/optee/call.c
+++ b/drivers/tee/optee/call.c
@@ -14,6 +14,8 @@
#include <linux/uaccess.h>
#include "optee_private.h"
#include "optee_smc.h"
+#define CREATE_TRACE_POINTS
+#include "optee_trace.h"
struct optee_call_waiter {
struct list_head list_node;
@@ -138,9 +140,11 @@ u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg)
while (true) {
struct arm_smccc_res res;
+ trace_optee_invoke_fn_begin(&param);
optee->invoke_fn(param.a0, param.a1, param.a2, param.a3,
param.a4, param.a5, param.a6, param.a7,
&res);
+ trace_optee_invoke_fn_end(&param, &res);
if (res.a0 == OPTEE_SMC_RETURN_ETHREAD_LIMIT) {
/*
diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
index 319a1e701163..ddb8f9ecf307 100644
--- a/drivers/tee/optee/core.c
+++ b/drivers/tee/optee/core.c
@@ -79,16 +79,6 @@ int optee_from_msg_param(struct tee_param *params, size_t num_params,
return rc;
p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa;
p->u.memref.shm = shm;
-
- /* Check that the memref is covered by the shm object */
- if (p->u.memref.size) {
- size_t o = p->u.memref.shm_offs +
- p->u.memref.size - 1;
-
- rc = tee_shm_get_pa(shm, o, NULL);
- if (rc)
- return rc;
- }
break;
case OPTEE_MSG_ATTR_TYPE_RMEM_INPUT:
case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT:
diff --git a/drivers/tee/optee/optee_trace.h b/drivers/tee/optee/optee_trace.h
new file mode 100644
index 000000000000..7c954eefa4bf
--- /dev/null
+++ b/drivers/tee/optee/optee_trace.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * optee trace points
+ *
+ * Copyright (C) 2021 Synaptics Incorporated
+ * Author: Jisheng Zhang <jszhang@kernel.org>
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM optee
+
+#if !defined(_TRACE_OPTEE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_OPTEE_H
+
+#include <linux/arm-smccc.h>
+#include <linux/tracepoint.h>
+#include "optee_private.h"
+
+TRACE_EVENT(optee_invoke_fn_begin,
+ TP_PROTO(struct optee_rpc_param *param),
+ TP_ARGS(param),
+
+ TP_STRUCT__entry(
+ __field(void *, param)
+ __array(u32, args, 8)
+ ),
+
+ TP_fast_assign(
+ __entry->param = param;
+ BUILD_BUG_ON(sizeof(*param) < sizeof(__entry->args));
+ memcpy(__entry->args, param, sizeof(__entry->args));
+ ),
+
+ TP_printk("param=%p (%x, %x, %x, %x, %x, %x, %x, %x)", __entry->param,
+ __entry->args[0], __entry->args[1], __entry->args[2],
+ __entry->args[3], __entry->args[4], __entry->args[5],
+ __entry->args[6], __entry->args[7])
+);
+
+TRACE_EVENT(optee_invoke_fn_end,
+ TP_PROTO(struct optee_rpc_param *param, struct arm_smccc_res *res),
+ TP_ARGS(param, res),
+
+ TP_STRUCT__entry(
+ __field(void *, param)
+ __array(unsigned long, rets, 4)
+ ),
+
+ TP_fast_assign(
+ __entry->param = param;
+ BUILD_BUG_ON(sizeof(*res) < sizeof(__entry->rets));
+ memcpy(__entry->rets, res, sizeof(__entry->rets));
+ ),
+
+ TP_printk("param=%p ret (%lx, %lx, %lx, %lx)", __entry->param,
+ __entry->rets[0], __entry->rets[1], __entry->rets[2],
+ __entry->rets[3])
+);
+#endif /* _TRACE_OPTEE_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE optee_trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c
index d6a8604157ab..f81261cb52b8 100644
--- a/drivers/tty/serial/ucc_uart.c
+++ b/drivers/tty/serial/ucc_uart.c
@@ -261,11 +261,11 @@ static unsigned int qe_uart_tx_empty(struct uart_port *port)
struct qe_bd *bdp = qe_port->tx_bd_base;
while (1) {
- if (qe_ioread16be(&bdp->status) & BD_SC_READY)
+ if (ioread16be(&bdp->status) & BD_SC_READY)
/* This BD is not done, so return "not done" */
return 0;
- if (qe_ioread16be(&bdp->status) & BD_SC_WRAP)
+ if (ioread16be(&bdp->status) & BD_SC_WRAP)
/*
* This BD is done and it's the last one, so return
* "done"
@@ -344,10 +344,10 @@ static int qe_uart_tx_pump(struct uart_qe_port *qe_port)
p = qe2cpu_addr(be32_to_cpu(bdp->buf), qe_port);
*p++ = port->x_char;
- qe_iowrite16be(1, &bdp->length);
+ iowrite16be(1, &bdp->length);
qe_setbits_be16(&bdp->status, BD_SC_READY);
/* Get next BD. */
- if (qe_ioread16be(&bdp->status) & BD_SC_WRAP)
+ if (ioread16be(&bdp->status) & BD_SC_WRAP)
bdp = qe_port->tx_bd_base;
else
bdp++;
@@ -366,7 +366,7 @@ static int qe_uart_tx_pump(struct uart_qe_port *qe_port)
/* Pick next descriptor and fill from buffer */
bdp = qe_port->tx_cur;
- while (!(qe_ioread16be(&bdp->status) & BD_SC_READY) &&
+ while (!(ioread16be(&bdp->status) & BD_SC_READY) &&
(xmit->tail != xmit->head)) {
count = 0;
p = qe2cpu_addr(be32_to_cpu(bdp->buf), qe_port);
@@ -379,11 +379,11 @@ static int qe_uart_tx_pump(struct uart_qe_port *qe_port)
break;
}
- qe_iowrite16be(count, &bdp->length);
+ iowrite16be(count, &bdp->length);
qe_setbits_be16(&bdp->status, BD_SC_READY);
/* Get next BD. */
- if (qe_ioread16be(&bdp->status) & BD_SC_WRAP)
+ if (ioread16be(&bdp->status) & BD_SC_WRAP)
bdp = qe_port->tx_bd_base;
else
bdp++;
@@ -416,7 +416,7 @@ static void qe_uart_start_tx(struct uart_port *port)
container_of(port, struct uart_qe_port, port);
/* If we currently are transmitting, then just return */
- if (qe_ioread16be(&qe_port->uccp->uccm) & UCC_UART_UCCE_TX)
+ if (ioread16be(&qe_port->uccp->uccm) & UCC_UART_UCCE_TX)
return;
/* Otherwise, pump the port and start transmission */
@@ -471,14 +471,14 @@ static void qe_uart_int_rx(struct uart_qe_port *qe_port)
*/
bdp = qe_port->rx_cur;
while (1) {
- status = qe_ioread16be(&bdp->status);
+ status = ioread16be(&bdp->status);
/* If this one is empty, then we assume we've read them all */
if (status & BD_SC_EMPTY)
break;
/* get number of characters, and check space in RX buffer */
- i = qe_ioread16be(&bdp->length);
+ i = ioread16be(&bdp->length);
/* If we don't have enough room in RX buffer for the entire BD,
* then we try later, which will be the next RX interrupt.
@@ -512,7 +512,7 @@ error_return:
qe_clrsetbits_be16(&bdp->status,
BD_SC_BR | BD_SC_FR | BD_SC_PR | BD_SC_OV | BD_SC_ID,
BD_SC_EMPTY);
- if (qe_ioread16be(&bdp->status) & BD_SC_WRAP)
+ if (ioread16be(&bdp->status) & BD_SC_WRAP)
bdp = qe_port->rx_bd_base;
else
bdp++;
@@ -569,8 +569,8 @@ static irqreturn_t qe_uart_int(int irq, void *data)
u16 events;
/* Clear the interrupts */
- events = qe_ioread16be(&uccp->ucce);
- qe_iowrite16be(events, &uccp->ucce);
+ events = ioread16be(&uccp->ucce);
+ iowrite16be(events, &uccp->ucce);
if (events & UCC_UART_UCCE_BRKE)
uart_handle_break(&qe_port->port);
@@ -601,17 +601,17 @@ static void qe_uart_initbd(struct uart_qe_port *qe_port)
bdp = qe_port->rx_bd_base;
qe_port->rx_cur = qe_port->rx_bd_base;
for (i = 0; i < (qe_port->rx_nrfifos - 1); i++) {
- qe_iowrite16be(BD_SC_EMPTY | BD_SC_INTRPT, &bdp->status);
- qe_iowrite32be(cpu2qe_addr(bd_virt, qe_port), &bdp->buf);
- qe_iowrite16be(0, &bdp->length);
+ iowrite16be(BD_SC_EMPTY | BD_SC_INTRPT, &bdp->status);
+ iowrite32be(cpu2qe_addr(bd_virt, qe_port), &bdp->buf);
+ iowrite16be(0, &bdp->length);
bd_virt += qe_port->rx_fifosize;
bdp++;
}
/* */
- qe_iowrite16be(BD_SC_WRAP | BD_SC_EMPTY | BD_SC_INTRPT, &bdp->status);
- qe_iowrite32be(cpu2qe_addr(bd_virt, qe_port), &bdp->buf);
- qe_iowrite16be(0, &bdp->length);
+ iowrite16be(BD_SC_WRAP | BD_SC_EMPTY | BD_SC_INTRPT, &bdp->status);
+ iowrite32be(cpu2qe_addr(bd_virt, qe_port), &bdp->buf);
+ iowrite16be(0, &bdp->length);
/* Set the physical address of the host memory
* buffers in the buffer descriptors, and the
@@ -622,9 +622,9 @@ static void qe_uart_initbd(struct uart_qe_port *qe_port)
qe_port->tx_cur = qe_port->tx_bd_base;
bdp = qe_port->tx_bd_base;
for (i = 0; i < (qe_port->tx_nrfifos - 1); i++) {
- qe_iowrite16be(BD_SC_INTRPT, &bdp->status);
- qe_iowrite32be(cpu2qe_addr(bd_virt, qe_port), &bdp->buf);
- qe_iowrite16be(0, &bdp->length);
+ iowrite16be(BD_SC_INTRPT, &bdp->status);
+ iowrite32be(cpu2qe_addr(bd_virt, qe_port), &bdp->buf);
+ iowrite16be(0, &bdp->length);
bd_virt += qe_port->tx_fifosize;
bdp++;
}
@@ -634,9 +634,9 @@ static void qe_uart_initbd(struct uart_qe_port *qe_port)
qe_setbits_be16(&qe_port->tx_cur->status, BD_SC_P);
#endif
- qe_iowrite16be(BD_SC_WRAP | BD_SC_INTRPT, &bdp->status);
- qe_iowrite32be(cpu2qe_addr(bd_virt, qe_port), &bdp->buf);
- qe_iowrite16be(0, &bdp->length);
+ iowrite16be(BD_SC_WRAP | BD_SC_INTRPT, &bdp->status);
+ iowrite32be(cpu2qe_addr(bd_virt, qe_port), &bdp->buf);
+ iowrite16be(0, &bdp->length);
}
/*
@@ -658,21 +658,21 @@ static void qe_uart_init_ucc(struct uart_qe_port *qe_port)
ucc_slow_disable(qe_port->us_private, COMM_DIR_RX_AND_TX);
/* Program the UCC UART parameter RAM */
- qe_iowrite8(UCC_BMR_GBL | UCC_BMR_BO_BE, &uccup->common.rbmr);
- qe_iowrite8(UCC_BMR_GBL | UCC_BMR_BO_BE, &uccup->common.tbmr);
- qe_iowrite16be(qe_port->rx_fifosize, &uccup->common.mrblr);
- qe_iowrite16be(0x10, &uccup->maxidl);
- qe_iowrite16be(1, &uccup->brkcr);
- qe_iowrite16be(0, &uccup->parec);
- qe_iowrite16be(0, &uccup->frmec);
- qe_iowrite16be(0, &uccup->nosec);
- qe_iowrite16be(0, &uccup->brkec);
- qe_iowrite16be(0, &uccup->uaddr[0]);
- qe_iowrite16be(0, &uccup->uaddr[1]);
- qe_iowrite16be(0, &uccup->toseq);
+ iowrite8(UCC_BMR_GBL | UCC_BMR_BO_BE, &uccup->common.rbmr);
+ iowrite8(UCC_BMR_GBL | UCC_BMR_BO_BE, &uccup->common.tbmr);
+ iowrite16be(qe_port->rx_fifosize, &uccup->common.mrblr);
+ iowrite16be(0x10, &uccup->maxidl);
+ iowrite16be(1, &uccup->brkcr);
+ iowrite16be(0, &uccup->parec);
+ iowrite16be(0, &uccup->frmec);
+ iowrite16be(0, &uccup->nosec);
+ iowrite16be(0, &uccup->brkec);
+ iowrite16be(0, &uccup->uaddr[0]);
+ iowrite16be(0, &uccup->uaddr[1]);
+ iowrite16be(0, &uccup->toseq);
for (i = 0; i < 8; i++)
- qe_iowrite16be(0xC000, &uccup->cchars[i]);
- qe_iowrite16be(0xc0ff, &uccup->rccm);
+ iowrite16be(0xC000, &uccup->cchars[i]);
+ iowrite16be(0xc0ff, &uccup->rccm);
/* Configure the GUMR registers for UART */
if (soft_uart) {
@@ -702,30 +702,30 @@ static void qe_uart_init_ucc(struct uart_qe_port *qe_port)
#endif
/* Disable rx interrupts and clear all pending events. */
- qe_iowrite16be(0, &uccp->uccm);
- qe_iowrite16be(0xffff, &uccp->ucce);
- qe_iowrite16be(0x7e7e, &uccp->udsr);
+ iowrite16be(0, &uccp->uccm);
+ iowrite16be(0xffff, &uccp->ucce);
+ iowrite16be(0x7e7e, &uccp->udsr);
/* Initialize UPSMR */
- qe_iowrite16be(0, &uccp->upsmr);
+ iowrite16be(0, &uccp->upsmr);
if (soft_uart) {
- qe_iowrite16be(0x30, &uccup->supsmr);
- qe_iowrite16be(0, &uccup->res92);
- qe_iowrite32be(0, &uccup->rx_state);
- qe_iowrite32be(0, &uccup->rx_cnt);
- qe_iowrite8(0, &uccup->rx_bitmark);
- qe_iowrite8(10, &uccup->rx_length);
- qe_iowrite32be(0x4000, &uccup->dump_ptr);
- qe_iowrite8(0, &uccup->rx_temp_dlst_qe);
- qe_iowrite32be(0, &uccup->rx_frame_rem);
- qe_iowrite8(0, &uccup->rx_frame_rem_size);
+ iowrite16be(0x30, &uccup->supsmr);
+ iowrite16be(0, &uccup->res92);
+ iowrite32be(0, &uccup->rx_state);
+ iowrite32be(0, &uccup->rx_cnt);
+ iowrite8(0, &uccup->rx_bitmark);
+ iowrite8(10, &uccup->rx_length);
+ iowrite32be(0x4000, &uccup->dump_ptr);
+ iowrite8(0, &uccup->rx_temp_dlst_qe);
+ iowrite32be(0, &uccup->rx_frame_rem);
+ iowrite8(0, &uccup->rx_frame_rem_size);
/* Soft-UART requires TX to be 1X */
- qe_iowrite8(UCC_UART_TX_STATE_UART | UCC_UART_TX_STATE_X1,
+ iowrite8(UCC_UART_TX_STATE_UART | UCC_UART_TX_STATE_X1,
&uccup->tx_mode);
- qe_iowrite16be(0, &uccup->tx_state);
- qe_iowrite8(0, &uccup->resD4);
- qe_iowrite16be(0, &uccup->resD5);
+ iowrite16be(0, &uccup->tx_state);
+ iowrite8(0, &uccup->resD4);
+ iowrite16be(0, &uccup->resD5);
/* Set UART mode.
* Enable receive and transmit.
@@ -850,9 +850,9 @@ static void qe_uart_set_termios(struct uart_port *port,
struct ucc_slow __iomem *uccp = qe_port->uccp;
unsigned int baud;
unsigned long flags;
- u16 upsmr = qe_ioread16be(&uccp->upsmr);
+ u16 upsmr = ioread16be(&uccp->upsmr);
struct ucc_uart_pram __iomem *uccup = qe_port->uccup;
- u16 supsmr = qe_ioread16be(&uccup->supsmr);
+ u16 supsmr = ioread16be(&uccup->supsmr);
u8 char_length = 2; /* 1 + CL + PEN + 1 + SL */
/* Character length programmed into the mode register is the
@@ -950,10 +950,10 @@ static void qe_uart_set_termios(struct uart_port *port,
/* Update the per-port timeout. */
uart_update_timeout(port, termios->c_cflag, baud);
- qe_iowrite16be(upsmr, &uccp->upsmr);
+ iowrite16be(upsmr, &uccp->upsmr);
if (soft_uart) {
- qe_iowrite16be(supsmr, &uccup->supsmr);
- qe_iowrite8(char_length, &uccup->rx_length);
+ iowrite16be(supsmr, &uccup->supsmr);
+ iowrite8(char_length, &uccup->rx_length);
/* Soft-UART requires a 1X multiplier for TX */
qe_setbrg(qe_port->us_info.rx_clock, baud, 16);