aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-04-01 15:20:57 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-04-01 15:20:57 -0700
commit4646de87d32526ee87b46c2e0130413367fb5362 (patch)
tree97da47fbec9a71ab8c6a8e155b225d48af6866b2 /drivers
parentc101e9bbce4ae2947b35a660f17d617fc3827595 (diff)
parent0a67003b1985c79811160af1b01aca07cd5fbc53 (diff)
Merge tag 'mailbox-v5.7' of git://git.linaro.org/landing-teams/working/fujitsu/integration
Pull mailbox updates from Jassi Brar: - imx: add support for i.MX8/8X to existing driver - mediatek: drop the atomix execution feature, add flush - allwinner: new 'msgbox' controller driver - armada: misc: drop redundant error print - bcm: misc: catch error in probe and snprintf buffer overflow * tag 'mailbox-v5.7' of git://git.linaro.org/landing-teams/working/fujitsu/integration: mailbox: imx: add SCU MU support mailbox: imx: restructure code to make easy for new MU dt-bindings: mailbox: imx-mu: add SCU MU support mailbox: mediatek: remove implementation related to atomic_exec mailbox: mediatek: implement flush function dt-binding: gce: remove atomic_exec in mboxes property maillbox: bcm-flexrm-mailbox: handle cmpl_pool dma allocation failure mailbox: sun6i-msgbox: Add a new mailbox driver dt-bindings: mailbox: Add a binding for the sun6i msgbox mailbox: bcm-pdc: Use scnprintf() for avoiding potential buffer overflow mailbox:armada-37xx-rwtm:remove duplicate print in armada_37xx_mbox_probe()
Diffstat (limited to 'drivers')
-rw-r--r--drivers/mailbox/Kconfig9
-rw-r--r--drivers/mailbox/Makefile2
-rw-r--r--drivers/mailbox/armada-37xx-rwtm-mailbox.c8
-rw-r--r--drivers/mailbox/bcm-flexrm-mailbox.c2
-rw-r--r--drivers/mailbox/bcm-pdc-mailbox.c20
-rw-r--r--drivers/mailbox/imx-mailbox.c288
-rw-r--r--drivers/mailbox/mtk-cmdq-mailbox.c128
-rw-r--r--drivers/mailbox/sun6i-msgbox.c326
8 files changed, 647 insertions, 136 deletions
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index ab4eb750bbdd..5a577a6734cf 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -227,4 +227,13 @@ config ZYNQMP_IPI_MBOX
message to the IPI buffer and will access the IPI control
registers to kick the other processor or enquire status.
+config SUN6I_MSGBOX
+ tristate "Allwinner sun6i/sun8i/sun9i/sun50i Message Box"
+ depends on ARCH_SUNXI || COMPILE_TEST
+ default ARCH_SUNXI
+ help
+ Mailbox implementation for the hardware message box present in
+ various Allwinner SoCs. This mailbox is used for communication
+ between the application CPUs and the power management coprocessor.
+
endif
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index c22fad6f696b..2e4364ef5c47 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -48,3 +48,5 @@ obj-$(CONFIG_STM32_IPCC) += stm32-ipcc.o
obj-$(CONFIG_MTK_CMDQ_MBOX) += mtk-cmdq-mailbox.o
obj-$(CONFIG_ZYNQMP_IPI_MBOX) += zynqmp-ipi-mailbox.o
+
+obj-$(CONFIG_SUN6I_MSGBOX) += sun6i-msgbox.o
diff --git a/drivers/mailbox/armada-37xx-rwtm-mailbox.c b/drivers/mailbox/armada-37xx-rwtm-mailbox.c
index 02b7b28e6969..9f2ce7f03c67 100644
--- a/drivers/mailbox/armada-37xx-rwtm-mailbox.c
+++ b/drivers/mailbox/armada-37xx-rwtm-mailbox.c
@@ -156,16 +156,12 @@ static int armada_37xx_mbox_probe(struct platform_device *pdev)
return -ENOMEM;
mbox->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(mbox->base)) {
- dev_err(&pdev->dev, "ioremap failed\n");
+ if (IS_ERR(mbox->base))
return PTR_ERR(mbox->base);
- }
mbox->irq = platform_get_irq(pdev, 0);
- if (mbox->irq < 0) {
- dev_err(&pdev->dev, "Cannot get irq\n");
+ if (mbox->irq < 0)
return mbox->irq;
- }
mbox->dev = &pdev->dev;
diff --git a/drivers/mailbox/bcm-flexrm-mailbox.c b/drivers/mailbox/bcm-flexrm-mailbox.c
index 8ee9db274802..bee33abb5308 100644
--- a/drivers/mailbox/bcm-flexrm-mailbox.c
+++ b/drivers/mailbox/bcm-flexrm-mailbox.c
@@ -1599,6 +1599,7 @@ static int flexrm_mbox_probe(struct platform_device *pdev)
1 << RING_CMPL_ALIGN_ORDER, 0);
if (!mbox->cmpl_pool) {
ret = -ENOMEM;
+ goto fail_destroy_bd_pool;
}
/* Allocate platform MSIs for each ring */
@@ -1661,6 +1662,7 @@ fail_free_debugfs_root:
platform_msi_domain_free_irqs(dev);
fail_destroy_cmpl_pool:
dma_pool_destroy(mbox->cmpl_pool);
+fail_destroy_bd_pool:
dma_pool_destroy(mbox->bd_pool);
fail:
return ret;
diff --git a/drivers/mailbox/bcm-pdc-mailbox.c b/drivers/mailbox/bcm-pdc-mailbox.c
index fcb3b18a0678..c10a9318a4b7 100644
--- a/drivers/mailbox/bcm-pdc-mailbox.c
+++ b/drivers/mailbox/bcm-pdc-mailbox.c
@@ -436,33 +436,33 @@ static ssize_t pdc_debugfs_read(struct file *filp, char __user *ubuf,
pdcs = filp->private_data;
out_offset = 0;
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"SPU %u stats:\n", pdcs->pdc_idx);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"PDC requests....................%u\n",
pdcs->pdc_requests);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"PDC responses...................%u\n",
pdcs->pdc_replies);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Tx not done.....................%u\n",
pdcs->last_tx_not_done);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Tx ring full....................%u\n",
pdcs->tx_ring_full);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Rx ring full....................%u\n",
pdcs->rx_ring_full);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Tx desc write fail. Ring full...%u\n",
pdcs->txnobuf);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Rx desc write fail. Ring full...%u\n",
pdcs->rxnobuf);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Receive overflow................%u\n",
pdcs->rx_oflow);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Num frags in rx ring............%u\n",
NRXDACTIVE(pdcs->rxin, pdcs->last_rx_curr,
pdcs->nrxpost));
diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c
index 2cdcdc5f1119..7906624a731c 100644
--- a/drivers/mailbox/imx-mailbox.c
+++ b/drivers/mailbox/imx-mailbox.c
@@ -4,8 +4,10 @@
*/
#include <linux/clk.h>
+#include <linux/firmware/imx/ipc.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/mailbox_controller.h>
#include <linux/module.h>
@@ -27,6 +29,8 @@
#define IMX_MU_xCR_GIRn(x) BIT(16 + (3 - (x)))
#define IMX_MU_CHANS 16
+/* TX0/RX0/RXDB[0-3] */
+#define IMX_MU_SCU_CHANS 6
#define IMX_MU_CHAN_NAME_SIZE 20
enum imx_mu_chan_type {
@@ -36,11 +40,9 @@ enum imx_mu_chan_type {
IMX_MU_TYPE_RXDB, /* Rx doorbell */
};
-struct imx_mu_dcfg {
- u32 xTR[4]; /* Transmit Registers */
- u32 xRR[4]; /* Receive Registers */
- u32 xSR; /* Status Register */
- u32 xCR; /* Control Register */
+struct imx_sc_rpc_msg_max {
+ struct imx_sc_rpc_msg hdr;
+ u32 data[7];
};
struct imx_mu_con_priv {
@@ -67,18 +69,14 @@ struct imx_mu_priv {
bool side_b;
};
-static const struct imx_mu_dcfg imx_mu_cfg_imx6sx = {
- .xTR = {0x0, 0x4, 0x8, 0xc},
- .xRR = {0x10, 0x14, 0x18, 0x1c},
- .xSR = 0x20,
- .xCR = 0x24,
-};
-
-static const struct imx_mu_dcfg imx_mu_cfg_imx7ulp = {
- .xTR = {0x20, 0x24, 0x28, 0x2c},
- .xRR = {0x40, 0x44, 0x48, 0x4c},
- .xSR = 0x60,
- .xCR = 0x64,
+struct imx_mu_dcfg {
+ int (*tx)(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp, void *data);
+ int (*rx)(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp);
+ void (*init)(struct imx_mu_priv *priv);
+ u32 xTR[4]; /* Transmit Registers */
+ u32 xRR[4]; /* Receive Registers */
+ u32 xSR; /* Status Register */
+ u32 xCR; /* Control Register */
};
static struct imx_mu_priv *to_imx_mu_priv(struct mbox_controller *mbox)
@@ -111,6 +109,117 @@ static u32 imx_mu_xcr_rmw(struct imx_mu_priv *priv, u32 set, u32 clr)
return val;
}
+static int imx_mu_generic_tx(struct imx_mu_priv *priv,
+ struct imx_mu_con_priv *cp,
+ void *data)
+{
+ u32 *arg = data;
+
+ switch (cp->type) {
+ case IMX_MU_TYPE_TX:
+ imx_mu_write(priv, *arg, priv->dcfg->xTR[cp->idx]);
+ imx_mu_xcr_rmw(priv, IMX_MU_xCR_TIEn(cp->idx), 0);
+ break;
+ case IMX_MU_TYPE_TXDB:
+ imx_mu_xcr_rmw(priv, IMX_MU_xCR_GIRn(cp->idx), 0);
+ tasklet_schedule(&cp->txdb_tasklet);
+ break;
+ default:
+ dev_warn_ratelimited(priv->dev, "Send data on wrong channel type: %d\n", cp->type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int imx_mu_generic_rx(struct imx_mu_priv *priv,
+ struct imx_mu_con_priv *cp)
+{
+ u32 dat;
+
+ dat = imx_mu_read(priv, priv->dcfg->xRR[cp->idx]);
+ mbox_chan_received_data(cp->chan, (void *)&dat);
+
+ return 0;
+}
+
+static int imx_mu_scu_tx(struct imx_mu_priv *priv,
+ struct imx_mu_con_priv *cp,
+ void *data)
+{
+ struct imx_sc_rpc_msg_max *msg = data;
+ u32 *arg = data;
+ int i, ret;
+ u32 xsr;
+
+ switch (cp->type) {
+ case IMX_MU_TYPE_TX:
+ if (msg->hdr.size > sizeof(*msg)) {
+ /*
+ * The real message size can be different to
+ * struct imx_sc_rpc_msg_max size
+ */
+ dev_err(priv->dev, "Exceed max msg size (%zu) on TX, got: %i\n", sizeof(*msg), msg->hdr.size);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < 4 && i < msg->hdr.size; i++)
+ imx_mu_write(priv, *arg++, priv->dcfg->xTR[i % 4]);
+ for (; i < msg->hdr.size; i++) {
+ ret = readl_poll_timeout(priv->base + priv->dcfg->xSR,
+ xsr,
+ xsr & IMX_MU_xSR_TEn(i % 4),
+ 0, 100);
+ if (ret) {
+ dev_err(priv->dev, "Send data index: %d timeout\n", i);
+ return ret;
+ }
+ imx_mu_write(priv, *arg++, priv->dcfg->xTR[i % 4]);
+ }
+
+ imx_mu_xcr_rmw(priv, IMX_MU_xCR_TIEn(cp->idx), 0);
+ break;
+ default:
+ dev_warn_ratelimited(priv->dev, "Send data on wrong channel type: %d\n", cp->type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int imx_mu_scu_rx(struct imx_mu_priv *priv,
+ struct imx_mu_con_priv *cp)
+{
+ struct imx_sc_rpc_msg_max msg;
+ u32 *data = (u32 *)&msg;
+ int i, ret;
+ u32 xsr;
+
+ imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_RIEn(0));
+ *data++ = imx_mu_read(priv, priv->dcfg->xRR[0]);
+
+ if (msg.hdr.size > sizeof(msg)) {
+ dev_err(priv->dev, "Exceed max msg size (%zu) on RX, got: %i\n",
+ sizeof(msg), msg.hdr.size);
+ return -EINVAL;
+ }
+
+ for (i = 1; i < msg.hdr.size; i++) {
+ ret = readl_poll_timeout(priv->base + priv->dcfg->xSR, xsr,
+ xsr & IMX_MU_xSR_RFn(i % 4), 0, 100);
+ if (ret) {
+ dev_err(priv->dev, "timeout read idx %d\n", i);
+ return ret;
+ }
+ *data++ = imx_mu_read(priv, priv->dcfg->xRR[i % 4]);
+ }
+
+ imx_mu_xcr_rmw(priv, IMX_MU_xCR_RIEn(0), 0);
+ mbox_chan_received_data(cp->chan, (void *)&msg);
+
+ return 0;
+}
+
static void imx_mu_txdb_tasklet(unsigned long data)
{
struct imx_mu_con_priv *cp = (struct imx_mu_con_priv *)data;
@@ -123,7 +232,7 @@ static irqreturn_t imx_mu_isr(int irq, void *p)
struct mbox_chan *chan = p;
struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
struct imx_mu_con_priv *cp = chan->con_priv;
- u32 val, ctrl, dat;
+ u32 val, ctrl;
ctrl = imx_mu_read(priv, priv->dcfg->xCR);
val = imx_mu_read(priv, priv->dcfg->xSR);
@@ -152,8 +261,7 @@ static irqreturn_t imx_mu_isr(int irq, void *p)
imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_TIEn(cp->idx));
mbox_chan_txdone(chan, 0);
} else if (val == IMX_MU_xSR_RFn(cp->idx)) {
- dat = imx_mu_read(priv, priv->dcfg->xRR[cp->idx]);
- mbox_chan_received_data(chan, (void *)&dat);
+ priv->dcfg->rx(priv, cp);
} else if (val == IMX_MU_xSR_GIPn(cp->idx)) {
imx_mu_write(priv, IMX_MU_xSR_GIPn(cp->idx), priv->dcfg->xSR);
mbox_chan_received_data(chan, NULL);
@@ -169,23 +277,8 @@ static int imx_mu_send_data(struct mbox_chan *chan, void *data)
{
struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
struct imx_mu_con_priv *cp = chan->con_priv;
- u32 *arg = data;
- switch (cp->type) {
- case IMX_MU_TYPE_TX:
- imx_mu_write(priv, *arg, priv->dcfg->xTR[cp->idx]);
- imx_mu_xcr_rmw(priv, IMX_MU_xCR_TIEn(cp->idx), 0);
- break;
- case IMX_MU_TYPE_TXDB:
- imx_mu_xcr_rmw(priv, IMX_MU_xCR_GIRn(cp->idx), 0);
- tasklet_schedule(&cp->txdb_tasklet);
- break;
- default:
- dev_warn_ratelimited(priv->dev, "Send data on wrong channel type: %d\n", cp->type);
- return -EINVAL;
- }
-
- return 0;
+ return priv->dcfg->tx(priv, cp, data);
}
static int imx_mu_startup(struct mbox_chan *chan)
@@ -256,6 +349,42 @@ static const struct mbox_chan_ops imx_mu_ops = {
.shutdown = imx_mu_shutdown,
};
+static struct mbox_chan *imx_mu_scu_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *sp)
+{
+ u32 type, idx, chan;
+
+ if (sp->args_count != 2) {
+ dev_err(mbox->dev, "Invalid argument count %d\n", sp->args_count);
+ return ERR_PTR(-EINVAL);
+ }
+
+ type = sp->args[0]; /* channel type */
+ idx = sp->args[1]; /* index */
+
+ switch (type) {
+ case IMX_MU_TYPE_TX:
+ case IMX_MU_TYPE_RX:
+ if (idx != 0)
+ dev_err(mbox->dev, "Invalid chan idx: %d\n", idx);
+ chan = type;
+ break;
+ case IMX_MU_TYPE_RXDB:
+ chan = 2 + idx;
+ break;
+ default:
+ dev_err(mbox->dev, "Invalid chan type: %d\n", type);
+ return NULL;
+ }
+
+ if (chan >= mbox->num_chans) {
+ dev_err(mbox->dev, "Not supported channel number: %d. (type: %d, idx: %d)\n", chan, type, idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return &mbox->chans[chan];
+}
+
static struct mbox_chan * imx_mu_xlate(struct mbox_controller *mbox,
const struct of_phandle_args *sp)
{
@@ -280,6 +409,22 @@ static struct mbox_chan * imx_mu_xlate(struct mbox_controller *mbox,
static void imx_mu_init_generic(struct imx_mu_priv *priv)
{
+ unsigned int i;
+
+ for (i = 0; i < IMX_MU_CHANS; i++) {
+ struct imx_mu_con_priv *cp = &priv->con_priv[i];
+
+ cp->idx = i % 4;
+ cp->type = i >> 2;
+ cp->chan = &priv->mbox_chans[i];
+ priv->mbox_chans[i].con_priv = cp;
+ snprintf(cp->irq_desc, sizeof(cp->irq_desc),
+ "imx_mu_chan[%i-%i]", cp->type, cp->idx);
+ }
+
+ priv->mbox.num_chans = IMX_MU_CHANS;
+ priv->mbox.of_xlate = imx_mu_xlate;
+
if (priv->side_b)
return;
@@ -287,13 +432,34 @@ static void imx_mu_init_generic(struct imx_mu_priv *priv)
imx_mu_write(priv, 0, priv->dcfg->xCR);
}
+static void imx_mu_init_scu(struct imx_mu_priv *priv)
+{
+ unsigned int i;
+
+ for (i = 0; i < IMX_MU_SCU_CHANS; i++) {
+ struct imx_mu_con_priv *cp = &priv->con_priv[i];
+
+ cp->idx = i < 2 ? 0 : i - 2;
+ cp->type = i < 2 ? i : IMX_MU_TYPE_RXDB;
+ cp->chan = &priv->mbox_chans[i];
+ priv->mbox_chans[i].con_priv = cp;
+ snprintf(cp->irq_desc, sizeof(cp->irq_desc),
+ "imx_mu_chan[%i-%i]", cp->type, cp->idx);
+ }
+
+ priv->mbox.num_chans = IMX_MU_SCU_CHANS;
+ priv->mbox.of_xlate = imx_mu_scu_xlate;
+
+ /* Set default MU configuration */
+ imx_mu_write(priv, 0, priv->dcfg->xCR);
+}
+
static int imx_mu_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct imx_mu_priv *priv;
const struct imx_mu_dcfg *dcfg;
- unsigned int i;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -329,32 +495,19 @@ static int imx_mu_probe(struct platform_device *pdev)
return ret;
}
- for (i = 0; i < IMX_MU_CHANS; i++) {
- struct imx_mu_con_priv *cp = &priv->con_priv[i];
-
- cp->idx = i % 4;
- cp->type = i >> 2;
- cp->chan = &priv->mbox_chans[i];
- priv->mbox_chans[i].con_priv = cp;
- snprintf(cp->irq_desc, sizeof(cp->irq_desc),
- "imx_mu_chan[%i-%i]", cp->type, cp->idx);
- }
-
priv->side_b = of_property_read_bool(np, "fsl,mu-side-b");
+ priv->dcfg->init(priv);
+
spin_lock_init(&priv->xcr_lock);
priv->mbox.dev = dev;
priv->mbox.ops = &imx_mu_ops;
priv->mbox.chans = priv->mbox_chans;
- priv->mbox.num_chans = IMX_MU_CHANS;
- priv->mbox.of_xlate = imx_mu_xlate;
priv->mbox.txdone_irq = true;
platform_set_drvdata(pdev, priv);
- imx_mu_init_generic(priv);
-
return devm_mbox_controller_register(dev, &priv->mbox);
}
@@ -367,9 +520,40 @@ static int imx_mu_remove(struct platform_device *pdev)
return 0;
}
+static const struct imx_mu_dcfg imx_mu_cfg_imx6sx = {
+ .tx = imx_mu_generic_tx,
+ .rx = imx_mu_generic_rx,
+ .init = imx_mu_init_generic,
+ .xTR = {0x0, 0x4, 0x8, 0xc},
+ .xRR = {0x10, 0x14, 0x18, 0x1c},
+ .xSR = 0x20,
+ .xCR = 0x24,
+};
+
+static const struct imx_mu_dcfg imx_mu_cfg_imx7ulp = {
+ .tx = imx_mu_generic_tx,
+ .rx = imx_mu_generic_rx,
+ .init = imx_mu_init_generic,
+ .xTR = {0x20, 0x24, 0x28, 0x2c},
+ .xRR = {0x40, 0x44, 0x48, 0x4c},
+ .xSR = 0x60,
+ .xCR = 0x64,
+};
+
+static const struct imx_mu_dcfg imx_mu_cfg_imx8_scu = {
+ .tx = imx_mu_scu_tx,
+ .rx = imx_mu_scu_rx,
+ .init = imx_mu_init_scu,
+ .xTR = {0x0, 0x4, 0x8, 0xc},
+ .xRR = {0x10, 0x14, 0x18, 0x1c},
+ .xSR = 0x20,
+ .xCR = 0x24,
+};
+
static const struct of_device_id imx_mu_dt_ids[] = {
{ .compatible = "fsl,imx7ulp-mu", .data = &imx_mu_cfg_imx7ulp },
{ .compatible = "fsl,imx6sx-mu", .data = &imx_mu_cfg_imx6sx },
+ { .compatible = "fsl,imx8-mu-scu", .data = &imx_mu_cfg_imx8_scu },
{ },
};
MODULE_DEVICE_TABLE(of, imx_mu_dt_ids);
diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
index 9a6ce9f5a7db..b24822ad8409 100644
--- a/drivers/mailbox/mtk-cmdq-mailbox.c
+++ b/drivers/mailbox/mtk-cmdq-mailbox.c
@@ -56,7 +56,6 @@ struct cmdq_thread {
void __iomem *base;
struct list_head task_busy_list;
u32 priority;
- bool atomic_exec;
};
struct cmdq_task {
@@ -162,48 +161,11 @@ static void cmdq_task_insert_into_thread(struct cmdq_task *task)
cmdq_thread_invalidate_fetched_data(thread);
}
-static bool cmdq_command_is_wfe(u64 cmd)
-{
- u64 wfe_option = CMDQ_WFE_UPDATE | CMDQ_WFE_WAIT | CMDQ_WFE_WAIT_VALUE;
- u64 wfe_op = (u64)(CMDQ_CODE_WFE << CMDQ_OP_CODE_SHIFT) << 32;
- u64 wfe_mask = (u64)CMDQ_OP_CODE_MASK << 32 | 0xffffffff;
-
- return ((cmd & wfe_mask) == (wfe_op | wfe_option));
-}
-
-/* we assume tasks in the same display GCE thread are waiting the same event. */
-static void cmdq_task_remove_wfe(struct cmdq_task *task)
-{
- struct device *dev = task->cmdq->mbox.dev;
- u64 *base = task->pkt->va_base;
- int i;
-
- dma_sync_single_for_cpu(dev, task->pa_base, task->pkt->cmd_buf_size,
- DMA_TO_DEVICE);
- for (i = 0; i < CMDQ_NUM_CMD(task->pkt); i++)
- if (cmdq_command_is_wfe(base[i]))
- base[i] = (u64)CMDQ_JUMP_BY_OFFSET << 32 |
- CMDQ_JUMP_PASS;
- dma_sync_single_for_device(dev, task->pa_base, task->pkt->cmd_buf_size,
- DMA_TO_DEVICE);
-}
-
static bool cmdq_thread_is_in_wfe(struct cmdq_thread *thread)
{
return readl(thread->base + CMDQ_THR_WAIT_TOKEN) & CMDQ_THR_IS_WAITING;
}
-static void cmdq_thread_wait_end(struct cmdq_thread *thread,
- unsigned long end_pa)
-{
- struct device *dev = thread->chan->mbox->dev;
- unsigned long curr_pa;
-
- if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_CURR_ADDR,
- curr_pa, curr_pa == end_pa, 1, 20))
- dev_err(dev, "GCE thread cannot run to end.\n");
-}
-
static void cmdq_task_exec_done(struct cmdq_task *task, enum cmdq_cb_status sta)
{
struct cmdq_task_cb *cb = &task->pkt->async_cb;
@@ -383,36 +345,15 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR);
end_pa = readl(thread->base + CMDQ_THR_END_ADDR);
-
- /*
- * Atomic execution should remove the following wfe, i.e. only
- * wait event at first task, and prevent to pause when running.
- */
- if (thread->atomic_exec) {
- /* GCE is executing if command is not WFE */
- if (!cmdq_thread_is_in_wfe(thread)) {
- cmdq_thread_resume(thread);
- cmdq_thread_wait_end(thread, end_pa);
- WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
- /* set to this task directly */
- writel(task->pa_base,
- thread->base + CMDQ_THR_CURR_ADDR);
- } else {
- cmdq_task_insert_into_thread(task);
- cmdq_task_remove_wfe(task);
- smp_mb(); /* modify jump before enable thread */
- }
+ /* check boundary */
+ if (curr_pa == end_pa - CMDQ_INST_SIZE ||
+ curr_pa == end_pa) {
+ /* set to this task directly */
+ writel(task->pa_base,
+ thread->base + CMDQ_THR_CURR_ADDR);
} else {
- /* check boundary */
- if (curr_pa == end_pa - CMDQ_INST_SIZE ||
- curr_pa == end_pa) {
- /* set to this task directly */
- writel(task->pa_base,
- thread->base + CMDQ_THR_CURR_ADDR);
- } else {
- cmdq_task_insert_into_thread(task);
- smp_mb(); /* modify jump before enable thread */
- }
+ cmdq_task_insert_into_thread(task);
+ smp_mb(); /* modify jump before enable thread */
}
writel(task->pa_base + pkt->cmd_buf_size,
thread->base + CMDQ_THR_END_ADDR);
@@ -432,10 +373,62 @@ static void cmdq_mbox_shutdown(struct mbox_chan *chan)
{
}
+static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
+{
+ struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
+ struct cmdq_task_cb *cb;
+ struct cmdq_cb_data data;
+ struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
+ struct cmdq_task *task, *tmp;
+ unsigned long flags;
+ u32 enable;
+
+ spin_lock_irqsave(&thread->chan->lock, flags);
+ if (list_empty(&thread->task_busy_list))
+ goto out;
+
+ WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
+ if (!cmdq_thread_is_in_wfe(thread))
+ goto wait;
+
+ list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
+ list_entry) {
+ cb = &task->pkt->async_cb;
+ if (cb->cb) {
+ data.sta = CMDQ_CB_ERROR;
+ data.data = cb->data;
+ cb->cb(data);
+ }
+ list_del(&task->list_entry);
+ kfree(task);
+ }
+
+ cmdq_thread_resume(thread);
+ cmdq_thread_disable(cmdq, thread);
+ clk_disable(cmdq->clock);
+
+out:
+ spin_unlock_irqrestore(&thread->chan->lock, flags);
+ return 0;
+
+wait:
+ cmdq_thread_resume(thread);
+ spin_unlock_irqrestore(&thread->chan->lock, flags);
+ if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_ENABLE_TASK,
+ enable, enable == 0, 1, timeout)) {
+ dev_err(cmdq->mbox.dev, "Fail to wait GCE thread 0x%x done\n",
+ (u32)(thread->base - cmdq->base));
+
+ return -EFAULT;
+ }
+ return 0;
+}
+
static const struct mbox_chan_ops cmdq_mbox_chan_ops = {
.send_data = cmdq_mbox_send_data,
.startup = cmdq_mbox_startup,
.shutdown = cmdq_mbox_shutdown,
+ .flush = cmdq_mbox_flush,
};
static struct mbox_chan *cmdq_xlate(struct mbox_controller *mbox,
@@ -449,7 +442,6 @@ static struct mbox_chan *cmdq_xlate(struct mbox_controller *mbox,
thread = (struct cmdq_thread *)mbox->chans[ind].con_priv;
thread->priority = sp->args[1];
- thread->atomic_exec = (sp->args[2] != 0);
thread->chan = &mbox->chans[ind];
return &mbox->chans[ind];
diff --git a/drivers/mailbox/sun6i-msgbox.c b/drivers/mailbox/sun6i-msgbox.c
new file mode 100644
index 000000000000..ccecf2e5941d
--- /dev/null
+++ b/drivers/mailbox/sun6i-msgbox.c
@@ -0,0 +1,326 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2017-2019 Samuel Holland <samuel@sholland.org>
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/spinlock.h>
+
+#define NUM_CHANS 8
+
+#define CTRL_REG(n) (0x0000 + 0x4 * ((n) / 4))
+#define CTRL_RX(n) BIT(0 + 8 * ((n) % 4))
+#define CTRL_TX(n) BIT(4 + 8 * ((n) % 4))
+
+#define REMOTE_IRQ_EN_REG 0x0040
+#define REMOTE_IRQ_STAT_REG 0x0050
+#define LOCAL_IRQ_EN_REG 0x0060
+#define LOCAL_IRQ_STAT_REG 0x0070
+
+#define RX_IRQ(n) BIT(0 + 2 * (n))
+#define RX_IRQ_MASK 0x5555
+#define TX_IRQ(n) BIT(1 + 2 * (n))
+#define TX_IRQ_MASK 0xaaaa
+
+#define FIFO_STAT_REG(n) (0x0100 + 0x4 * (n))
+#define FIFO_STAT_MASK GENMASK(0, 0)
+
+#define MSG_STAT_REG(n) (0x0140 + 0x4 * (n))
+#define MSG_STAT_MASK GENMASK(2, 0)
+
+#define MSG_DATA_REG(n) (0x0180 + 0x4 * (n))
+
+#define mbox_dbg(mbox, ...) dev_dbg((mbox)->controller.dev, __VA_ARGS__)
+
+struct sun6i_msgbox {
+ struct mbox_controller controller;
+ struct clk *clk;
+ spinlock_t lock;
+ void __iomem *regs;
+};
+
+static bool sun6i_msgbox_last_tx_done(struct mbox_chan *chan);
+static bool sun6i_msgbox_peek_data(struct mbox_chan *chan);
+
+static inline int channel_number(struct mbox_chan *chan)
+{
+ return chan - chan->mbox->chans;
+}
+
+static inline struct sun6i_msgbox *to_sun6i_msgbox(struct mbox_chan *chan)
+{
+ return chan->con_priv;
+}
+
+static irqreturn_t sun6i_msgbox_irq(int irq, void *dev_id)
+{
+ struct sun6i_msgbox *mbox = dev_id;
+ uint32_t status;
+ int n;
+
+ /* Only examine channels that are currently enabled. */
+ status = readl(mbox->regs + LOCAL_IRQ_EN_REG) &
+ readl(mbox->regs + LOCAL_IRQ_STAT_REG);
+
+ if (!(status & RX_IRQ_MASK))
+ return IRQ_NONE;
+
+ for (n = 0; n < NUM_CHANS; ++n) {
+ struct mbox_chan *chan = &mbox->controller.chans[n];
+
+ if (!(status & RX_IRQ(n)))
+ continue;
+
+ while (sun6i_msgbox_peek_data(chan)) {
+ uint32_t msg = readl(mbox->regs + MSG_DATA_REG(n));
+
+ mbox_dbg(mbox, "Channel %d received 0x%08x\n", n, msg);
+ mbox_chan_received_data(chan, &msg);
+ }
+
+ /* The IRQ can be cleared only once the FIFO is empty. */
+ writel(RX_IRQ(n), mbox->regs + LOCAL_IRQ_STAT_REG);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int sun6i_msgbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct sun6i_msgbox *mbox = to_sun6i_msgbox(chan);
+ int n = channel_number(chan);
+ uint32_t msg = *(uint32_t *)data;
+
+ /* Using a channel backwards gets the hardware into a bad state. */
+ if (WARN_ON_ONCE(!(readl(mbox->regs + CTRL_REG(n)) & CTRL_TX(n))))
+ return 0;
+
+ writel(msg, mbox->regs + MSG_DATA_REG(n));
+ mbox_dbg(mbox, "Channel %d sent 0x%08x\n", n, msg);
+
+ return 0;
+}
+
+static int sun6i_msgbox_startup(struct mbox_chan *chan)
+{
+ struct sun6i_msgbox *mbox = to_sun6i_msgbox(chan);
+ int n = channel_number(chan);
+
+ /* The coprocessor is responsible for setting channel directions. */
+ if (readl(mbox->regs + CTRL_REG(n)) & CTRL_RX(n)) {
+ /* Flush the receive FIFO. */
+ while (sun6i_msgbox_peek_data(chan))
+ readl(mbox->regs + MSG_DATA_REG(n));
+ writel(RX_IRQ(n), mbox->regs + LOCAL_IRQ_STAT_REG);
+
+ /* Enable the receive IRQ. */
+ spin_lock(&mbox->lock);
+ writel(readl(mbox->regs + LOCAL_IRQ_EN_REG) | RX_IRQ(n),
+ mbox->regs + LOCAL_IRQ_EN_REG);
+ spin_unlock(&mbox->lock);
+ }
+
+ mbox_dbg(mbox, "Channel %d startup complete\n", n);
+
+ return 0;
+}
+
+static void sun6i_msgbox_shutdown(struct mbox_chan *chan)
+{
+ struct sun6i_msgbox *mbox = to_sun6i_msgbox(chan);
+ int n = channel_number(chan);
+
+ if (readl(mbox->regs + CTRL_REG(n)) & CTRL_RX(n)) {
+ /* Disable the receive IRQ. */
+ spin_lock(&mbox->lock);
+ writel(readl(mbox->regs + LOCAL_IRQ_EN_REG) & ~RX_IRQ(n),
+ mbox->regs + LOCAL_IRQ_EN_REG);
+ spin_unlock(&mbox->lock);
+
+ /* Attempt to flush the FIFO until the IRQ is cleared. */
+ do {
+ while (sun6i_msgbox_peek_data(chan))
+ readl(mbox->regs + MSG_DATA_REG(n));
+ writel(RX_IRQ(n), mbox->regs + LOCAL_IRQ_STAT_REG);
+ } while (readl(mbox->regs + LOCAL_IRQ_STAT_REG) & RX_IRQ(n));
+ }
+
+ mbox_dbg(mbox, "Channel %d shutdown complete\n", n);
+}
+
+static bool sun6i_msgbox_last_tx_done(struct mbox_chan *chan)
+{
+ struct sun6i_msgbox *mbox = to_sun6i_msgbox(chan);
+ int n = channel_number(chan);
+
+ /*
+ * The hardware allows snooping on the remote user's IRQ statuses.
+ * We consider a message to be acknowledged only once the receive IRQ
+ * for that channel is cleared. Since the receive IRQ for a channel
+ * cannot be cleared until the FIFO for that channel is empty, this
+ * ensures that the message has actually been read. It also gives the
+ * recipient an opportunity to perform minimal processing before
+ * acknowledging the message.
+ */
+ return !(readl(mbox->regs + REMOTE_IRQ_STAT_REG) & RX_IRQ(n));
+}
+
+static bool sun6i_msgbox_peek_data(struct mbox_chan *chan)
+{
+ struct sun6i_msgbox *mbox = to_sun6i_msgbox(chan);
+ int n = channel_number(chan);
+
+ return readl(mbox->regs + MSG_STAT_REG(n)) & MSG_STAT_MASK;
+}
+
+static const struct mbox_chan_ops sun6i_msgbox_chan_ops = {
+ .send_data = sun6i_msgbox_send_data,
+ .startup = sun6i_msgbox_startup,
+ .shutdown = sun6i_msgbox_shutdown,
+ .last_tx_done = sun6i_msgbox_last_tx_done,
+ .peek_data = sun6i_msgbox_peek_data,
+};
+
+static int sun6i_msgbox_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mbox_chan *chans;
+ struct reset_control *reset;
+ struct resource *res;
+ struct sun6i_msgbox *mbox;
+ int i, ret;
+
+ mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ chans = devm_kcalloc(dev, NUM_CHANS, sizeof(*chans), GFP_KERNEL);
+ if (!chans)
+ return -ENOMEM;
+
+ for (i = 0; i < NUM_CHANS; ++i)
+ chans[i].con_priv = mbox;
+
+ mbox->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(mbox->clk)) {
+ ret = PTR_ERR(mbox->clk);
+ dev_err(dev, "Failed to get clock: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(mbox->clk);
+ if (ret) {
+ dev_err(dev, "Failed to enable clock: %d\n", ret);
+ return ret;
+ }
+
+ reset = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(reset)) {
+ ret = PTR_ERR(reset);
+ dev_err(dev, "Failed to get reset control: %d\n", ret);
+ goto err_disable_unprepare;
+ }
+
+ /*
+ * NOTE: We rely on platform firmware to preconfigure the channel
+ * directions, and we share this hardware block with other firmware
+ * that runs concurrently with Linux (e.g. a trusted monitor).
+ *
+ * Therefore, we do *not* assert the reset line if probing fails or
+ * when removing the device.
+ */
+ ret = reset_control_deassert(reset);
+ if (ret) {
+ dev_err(dev, "Failed to deassert reset: %d\n", ret);
+ goto err_disable_unprepare;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ ret = -ENODEV;
+ goto err_disable_unprepare;
+ }
+
+ mbox->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mbox->regs)) {
+ ret = PTR_ERR(mbox->regs);
+ dev_err(dev, "Failed to map MMIO resource: %d\n", ret);
+ goto err_disable_unprepare;
+ }
+
+ /* Disable all IRQs for this end of the msgbox. */
+ writel(0, mbox->regs + LOCAL_IRQ_EN_REG);
+
+ ret = devm_request_irq(dev, irq_of_parse_and_map(dev->of_node, 0),
+ sun6i_msgbox_irq, 0, dev_name(dev), mbox);
+ if (ret) {
+ dev_err(dev, "Failed to register IRQ handler: %d\n", ret);
+ goto err_disable_unprepare;
+ }
+
+ mbox->controller.dev = dev;
+ mbox->controller.ops = &sun6i_msgbox_chan_ops;
+ mbox->controller.chans = chans;
+ mbox->controller.num_chans = NUM_CHANS;
+ mbox->controller.txdone_irq = false;
+ mbox->controller.txdone_poll = true;
+ mbox->controller.txpoll_period = 5;
+
+ spin_lock_init(&mbox->lock);
+ platform_set_drvdata(pdev, mbox);
+
+ ret = mbox_controller_register(&mbox->controller);
+ if (ret) {
+ dev_err(dev, "Failed to register controller: %d\n", ret);
+ goto err_disable_unprepare;
+ }
+
+ return 0;
+
+err_disable_unprepare:
+ clk_disable_unprepare(mbox->clk);
+
+ return ret;
+}
+
+static int sun6i_msgbox_remove(struct platform_device *pdev)
+{
+ struct sun6i_msgbox *mbox = platform_get_drvdata(pdev);
+
+ mbox_controller_unregister(&mbox->controller);
+ /* See the comment in sun6i_msgbox_probe about the reset line. */
+ clk_disable_unprepare(mbox->clk);
+
+ return 0;
+}
+
+static const struct of_device_id sun6i_msgbox_of_match[] = {
+ { .compatible = "allwinner,sun6i-a31-msgbox", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sun6i_msgbox_of_match);
+
+static struct platform_driver sun6i_msgbox_driver = {
+ .driver = {
+ .name = "sun6i-msgbox",
+ .of_match_table = sun6i_msgbox_of_match,
+ },
+ .probe = sun6i_msgbox_probe,
+ .remove = sun6i_msgbox_remove,
+};
+module_platform_driver(sun6i_msgbox_driver);
+
+MODULE_AUTHOR("Samuel Holland <samuel@sholland.org>");
+MODULE_DESCRIPTION("Allwinner sun6i/sun8i/sun9i/sun50i Message Box");
+MODULE_LICENSE("GPL v2");