aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinaro CI <ci_notify@linaro.org>2023-04-16 22:47:45 +0000
committerLinaro CI <ci_notify@linaro.org>2023-04-16 22:47:45 +0000
commit1d89b41a23df8b105edd401201c6053efd2c54d2 (patch)
treea1d1f8e7bc155aea3c377d47745ca1dc55f731ac
parent1d7b6b8d0842962c8d4db6fc55c25438f6144280 (diff)
parentc575a10a1cd696de081bbf4285ec7882edff00b6 (diff)
Merge remote-tracking branch 'pci-mhi-edma/tracking-pci-mhi-edma' into integration-linux-qcomlt
-rw-r--r--MAINTAINERS1
-rw-r--r--drivers/bus/mhi/ep/main.c12
-rw-r--r--drivers/net/Kconfig9
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/mhi_ep_net.c331
-rw-r--r--drivers/net/mhi_net.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c108
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-ep.c12
-rw-r--r--drivers/pci/endpoint/functions/Kconfig10
-rw-r--r--drivers/pci/endpoint/functions/Makefile1
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-mhi.c699
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-ntb.c3
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c2
-rw-r--r--drivers/pci/endpoint/pci-ep-cfs.c3
-rw-r--r--drivers/pci/endpoint/pci-epc-core.c52
-rw-r--r--drivers/pci/endpoint/pci-epc-mem.c10
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c8
-rw-r--r--include/linux/mhi_ep.h11
-rw-r--r--include/linux/pci-epc.h2
-rw-r--r--include/linux/pci-epf.h8
20 files changed, 1214 insertions, 71 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index d0c8baade611..785001b039af 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -13538,6 +13538,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mani/mhi.git
F: Documentation/ABI/stable/sysfs-bus-mhi
F: Documentation/mhi/
F: drivers/bus/mhi/
+F: drivers/net/mhi_*
F: include/linux/mhi.h
MICROBLAZE ARCHITECTURE
diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c
index a6a48e515478..ba062df4fe25 100644
--- a/drivers/bus/mhi/ep/main.c
+++ b/drivers/bus/mhi/ep/main.c
@@ -126,7 +126,7 @@ static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_ele
/* Check if the channel is supported by the controller */
if ((ch_id >= mhi_cntrl->max_chan) || !mhi_cntrl->mhi_chan[ch_id].name) {
- dev_err(dev, "Channel (%u) not supported!\n", ch_id);
+ dev_dbg(dev, "Channel (%u) not supported!\n", ch_id);
return -ENODEV;
}
@@ -327,8 +327,8 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
read_addr = mhi_chan->tre_loc + read_offset;
write_addr = result->buf_addr + write_offset;
- dev_dbg(dev, "Reading %zd bytes from channel (%u)\n", tr_len, ring->ch_id);
- ret = mhi_cntrl->read_from_host(mhi_cntrl, read_addr, write_addr, tr_len);
+ dev_dbg(dev, "Transferring %zd bytes from channel (%u)\n", tr_len, ring->ch_id);
+ ret = mhi_cntrl->transfer_from_host(mhi_cntrl, read_addr, write_addr, tr_len);
if (ret < 0) {
dev_err(&mhi_chan->mhi_dev->dev, "Error reading from channel\n");
return ret;
@@ -488,8 +488,8 @@ int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb)
read_addr = skb->data + read_offset;
write_addr = MHI_TRE_DATA_GET_PTR(el);
- dev_dbg(dev, "Writing %zd bytes to channel (%u)\n", tr_len, ring->ch_id);
- ret = mhi_cntrl->write_to_host(mhi_cntrl, read_addr, write_addr, tr_len);
+ dev_dbg(dev, "Transferring %zd bytes to channel (%u)\n", tr_len, ring->ch_id);
+ ret = mhi_cntrl->transfer_to_host(mhi_cntrl, read_addr, write_addr, tr_len);
if (ret < 0) {
dev_err(dev, "Error writing to the channel\n");
goto err_exit;
@@ -702,7 +702,7 @@ static void mhi_ep_cmd_ring_worker(struct work_struct *work)
el = &ring->ring_cache[ring->rd_offset];
ret = mhi_ep_process_cmd_ring(ring, el);
- if (ret)
+ if (ret && ret != -ENODEV)
dev_err(dev, "Error processing cmd ring element: %zu\n", ring->rd_offset);
mhi_ep_ring_inc_index(ring);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index c34bd432da27..474cd5afd789 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -451,6 +451,15 @@ config MHI_NET
QCOM based WWAN modems for IP or QMAP/rmnet protocol (like SDX55).
Say Y or M.
+config MHI_EP_NET
+ tristate "MHI Endpoint network driver"
+ depends on MHI_BUS_EP
+ help
+ This is the network driver for MHI bus implementation in endpoint
+ devices. It is used provide the network interface for QCOM modems
+ such as SDX55.
+ Say Y or M.
+
endif # NET_CORE
config SUNGEM_PHY
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index e26f98f897c5..b8e706a4150e 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -40,6 +40,7 @@ obj-$(CONFIG_NLMON) += nlmon.o
obj-$(CONFIG_NET_VRF) += vrf.o
obj-$(CONFIG_VSOCKMON) += vsockmon.o
obj-$(CONFIG_MHI_NET) += mhi_net.o
+obj-$(CONFIG_MHI_EP_NET) += mhi_ep_net.o
#
# Networking Drivers
diff --git a/drivers/net/mhi_ep_net.c b/drivers/net/mhi_ep_net.c
new file mode 100644
index 000000000000..5891d17bf4e4
--- /dev/null
+++ b/drivers/net/mhi_ep_net.c
@@ -0,0 +1,331 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MHI Endpoint Network driver
+ *
+ * Based on drivers/net/mhi_net.c
+ *
+ * Copyright (c) 2022, Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#include <linux/if_arp.h>
+#include <linux/mhi_ep.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/u64_stats_sync.h>
+
+#define MHI_NET_MIN_MTU ETH_MIN_MTU
+#define MHI_NET_MAX_MTU 0xffff
+
+struct mhi_ep_net_stats {
+ u64_stats_t rx_packets;
+ u64_stats_t rx_bytes;
+ u64_stats_t rx_errors;
+ u64_stats_t tx_packets;
+ u64_stats_t tx_bytes;
+ u64_stats_t tx_errors;
+ u64_stats_t tx_dropped;
+ struct u64_stats_sync tx_syncp;
+ struct u64_stats_sync rx_syncp;
+};
+
+struct mhi_ep_net_dev {
+ struct mhi_ep_device *mdev;
+ struct net_device *ndev;
+ struct mhi_ep_net_stats stats;
+ struct workqueue_struct *xmit_wq;
+ struct work_struct xmit_work;
+ struct sk_buff_head tx_buffers;
+ spinlock_t tx_lock; /* Lock for protecting tx_buffers */
+ u32 mru;
+};
+
+static void mhi_ep_net_dev_process_queue_packets(struct work_struct *work)
+{
+ struct mhi_ep_net_dev *mhi_ep_netdev = container_of(work,
+ struct mhi_ep_net_dev, xmit_work);
+ struct mhi_ep_device *mdev = mhi_ep_netdev->mdev;
+ struct sk_buff_head q;
+ struct sk_buff *skb;
+ int ret;
+
+ if (mhi_ep_queue_is_empty(mdev, DMA_FROM_DEVICE)) {
+ netif_stop_queue(mhi_ep_netdev->ndev);
+ return;
+ }
+
+ __skb_queue_head_init(&q);
+
+ spin_lock_bh(&mhi_ep_netdev->tx_lock);
+ skb_queue_splice_init(&mhi_ep_netdev->tx_buffers, &q);
+ spin_unlock_bh(&mhi_ep_netdev->tx_lock);
+
+ while ((skb = __skb_dequeue(&q))) {
+ ret = mhi_ep_queue_skb(mdev, skb);
+ if (ret) {
+ kfree(skb);
+ goto exit_drop;
+ }
+
+ u64_stats_update_begin(&mhi_ep_netdev->stats.tx_syncp);
+ u64_stats_inc(&mhi_ep_netdev->stats.tx_packets);
+ u64_stats_add(&mhi_ep_netdev->stats.tx_bytes, skb->len);
+ u64_stats_update_end(&mhi_ep_netdev->stats.tx_syncp);
+
+ /* Check if queue is empty */
+ if (mhi_ep_queue_is_empty(mdev, DMA_FROM_DEVICE)) {
+ netif_stop_queue(mhi_ep_netdev->ndev);
+ break;
+ }
+
+ consume_skb(skb);
+ cond_resched();
+ }
+
+ return;
+
+exit_drop:
+ u64_stats_update_begin(&mhi_ep_netdev->stats.tx_syncp);
+ u64_stats_inc(&mhi_ep_netdev->stats.tx_dropped);
+ u64_stats_update_end(&mhi_ep_netdev->stats.tx_syncp);
+}
+
+static int mhi_ndo_open(struct net_device *ndev)
+{
+ /* Carrier is established via out-of-band channel (e.g. qmi) */
+ netif_carrier_on(ndev);
+
+ netif_start_queue(ndev);
+
+ return 0;
+}
+
+static int mhi_ndo_stop(struct net_device *ndev)
+{
+ netif_stop_queue(ndev);
+ netif_carrier_off(ndev);
+
+ return 0;
+}
+
+static netdev_tx_t mhi_ndo_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct mhi_ep_net_dev *mhi_ep_netdev = netdev_priv(ndev);
+
+ spin_lock(&mhi_ep_netdev->tx_lock);
+ skb_queue_tail(&mhi_ep_netdev->tx_buffers, skb);
+ spin_unlock(&mhi_ep_netdev->tx_lock);
+
+ queue_work(mhi_ep_netdev->xmit_wq, &mhi_ep_netdev->xmit_work);
+
+ return NETDEV_TX_OK;
+}
+
+static void mhi_ndo_get_stats64(struct net_device *ndev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct mhi_ep_net_dev *mhi_ep_netdev = netdev_priv(ndev);
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&mhi_ep_netdev->stats.rx_syncp);
+ stats->rx_packets = u64_stats_read(&mhi_ep_netdev->stats.rx_packets);
+ stats->rx_bytes = u64_stats_read(&mhi_ep_netdev->stats.rx_bytes);
+ stats->rx_errors = u64_stats_read(&mhi_ep_netdev->stats.rx_errors);
+ } while (u64_stats_fetch_retry_irq(&mhi_ep_netdev->stats.rx_syncp, start));
+
+ do {
+ start = u64_stats_fetch_begin_irq(&mhi_ep_netdev->stats.tx_syncp);
+ stats->tx_packets = u64_stats_read(&mhi_ep_netdev->stats.tx_packets);
+ stats->tx_bytes = u64_stats_read(&mhi_ep_netdev->stats.tx_bytes);
+ stats->tx_errors = u64_stats_read(&mhi_ep_netdev->stats.tx_errors);
+ stats->tx_dropped = u64_stats_read(&mhi_ep_netdev->stats.tx_dropped);
+ } while (u64_stats_fetch_retry_irq(&mhi_ep_netdev->stats.tx_syncp, start));
+}
+
+static const struct net_device_ops mhi_ep_netdev_ops = {
+ .ndo_open = mhi_ndo_open,
+ .ndo_stop = mhi_ndo_stop,
+ .ndo_start_xmit = mhi_ndo_xmit,
+ .ndo_get_stats64 = mhi_ndo_get_stats64,
+};
+
+static void mhi_ep_net_setup(struct net_device *ndev)
+{
+ ndev->header_ops = NULL; /* No header */
+ ndev->type = ARPHRD_RAWIP;
+ ndev->hard_header_len = 0;
+ ndev->addr_len = 0;
+ ndev->flags = IFF_POINTOPOINT | IFF_NOARP;
+ ndev->netdev_ops = &mhi_ep_netdev_ops;
+ ndev->mtu = MHI_EP_DEFAULT_MTU;
+ ndev->min_mtu = MHI_NET_MIN_MTU;
+ ndev->max_mtu = MHI_NET_MAX_MTU;
+ ndev->tx_queue_len = 1000;
+}
+
+static void mhi_ep_net_ul_callback(struct mhi_ep_device *mhi_dev,
+ struct mhi_result *mhi_res)
+{
+ struct mhi_ep_net_dev *mhi_ep_netdev = dev_get_drvdata(&mhi_dev->dev);
+ struct net_device *ndev = mhi_ep_netdev->ndev;
+ struct sk_buff *skb;
+ size_t size;
+
+ size = mhi_ep_netdev->mru ? mhi_ep_netdev->mru : READ_ONCE(ndev->mtu);
+
+ skb = netdev_alloc_skb(ndev, size);
+ if (unlikely(!skb)) {
+ u64_stats_update_begin(&mhi_ep_netdev->stats.rx_syncp);
+ u64_stats_inc(&mhi_ep_netdev->stats.rx_errors);
+ u64_stats_update_end(&mhi_ep_netdev->stats.rx_syncp);
+ return;
+ }
+
+ skb_copy_to_linear_data(skb, mhi_res->buf_addr, mhi_res->bytes_xferd);
+ skb->len = mhi_res->bytes_xferd;
+ skb->dev = mhi_ep_netdev->ndev;
+
+ if (unlikely(mhi_res->transaction_status)) {
+ switch (mhi_res->transaction_status) {
+ case -ENOTCONN:
+ /* MHI layer stopping/resetting the UL channel */
+ dev_kfree_skb_any(skb);
+ return;
+ default:
+ /* Unknown error, simply drop */
+ dev_kfree_skb_any(skb);
+ u64_stats_update_begin(&mhi_ep_netdev->stats.rx_syncp);
+ u64_stats_inc(&mhi_ep_netdev->stats.rx_errors);
+ u64_stats_update_end(&mhi_ep_netdev->stats.rx_syncp);
+ }
+ } else {
+ skb_put(skb, mhi_res->bytes_xferd);
+
+ switch (skb->data[0] & 0xf0) {
+ case 0x40:
+ skb->protocol = htons(ETH_P_IP);
+ break;
+ case 0x60:
+ skb->protocol = htons(ETH_P_IPV6);
+ break;
+ default:
+ skb->protocol = htons(ETH_P_MAP);
+ break;
+ }
+
+ u64_stats_update_begin(&mhi_ep_netdev->stats.rx_syncp);
+ u64_stats_inc(&mhi_ep_netdev->stats.rx_packets);
+ u64_stats_add(&mhi_ep_netdev->stats.rx_bytes, skb->len);
+ u64_stats_update_end(&mhi_ep_netdev->stats.rx_syncp);
+ netif_rx(skb);
+ }
+}
+
+static void mhi_ep_net_dl_callback(struct mhi_ep_device *mhi_dev,
+ struct mhi_result *mhi_res)
+{
+ struct mhi_ep_net_dev *mhi_ep_netdev = dev_get_drvdata(&mhi_dev->dev);
+
+ if (unlikely(mhi_res->transaction_status == -ENOTCONN))
+ return;
+
+ /* Since we got enough buffers to queue, wake the queue if stopped */
+ if (netif_queue_stopped(mhi_ep_netdev->ndev)) {
+ netif_wake_queue(mhi_ep_netdev->ndev);
+ queue_work(mhi_ep_netdev->xmit_wq, &mhi_ep_netdev->xmit_work);
+ }
+}
+
+static int mhi_ep_net_newlink(struct mhi_ep_device *mhi_dev, struct net_device *ndev)
+{
+ struct mhi_ep_net_dev *mhi_ep_netdev;
+ int ret;
+
+ mhi_ep_netdev = netdev_priv(ndev);
+
+ dev_set_drvdata(&mhi_dev->dev, mhi_ep_netdev);
+ mhi_ep_netdev->ndev = ndev;
+ mhi_ep_netdev->mdev = mhi_dev;
+ mhi_ep_netdev->mru = mhi_dev->mhi_cntrl->mru;
+
+ skb_queue_head_init(&mhi_ep_netdev->tx_buffers);
+ spin_lock_init(&mhi_ep_netdev->tx_lock);
+
+ u64_stats_init(&mhi_ep_netdev->stats.rx_syncp);
+ u64_stats_init(&mhi_ep_netdev->stats.tx_syncp);
+
+ mhi_ep_netdev->xmit_wq = alloc_workqueue("mhi_ep_net_xmit_wq", 0, WQ_HIGHPRI);
+ INIT_WORK(&mhi_ep_netdev->xmit_work, mhi_ep_net_dev_process_queue_packets);
+
+ ret = register_netdev(ndev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void mhi_ep_net_dellink(struct mhi_ep_device *mhi_dev, struct net_device *ndev)
+{
+ struct mhi_ep_net_dev *mhi_ep_netdev = netdev_priv(ndev);
+
+ destroy_workqueue(mhi_ep_netdev->xmit_wq);
+ unregister_netdev(ndev);
+ free_netdev(ndev);
+ dev_set_drvdata(&mhi_dev->dev, NULL);
+}
+
+static int mhi_ep_net_probe(struct mhi_ep_device *mhi_dev, const struct mhi_device_id *id)
+{
+ struct net_device *ndev;
+ int ret;
+
+ ndev = alloc_netdev(sizeof(struct mhi_ep_net_dev), (const char *)id->driver_data,
+ NET_NAME_PREDICTABLE, mhi_ep_net_setup);
+ if (!ndev)
+ return -ENOMEM;
+
+ SET_NETDEV_DEV(ndev, &mhi_dev->dev);
+
+ ret = mhi_ep_net_newlink(mhi_dev, ndev);
+ if (ret) {
+ free_netdev(ndev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void mhi_ep_net_remove(struct mhi_ep_device *mhi_dev)
+{
+ struct mhi_ep_net_dev *mhi_ep_netdev = dev_get_drvdata(&mhi_dev->dev);
+
+ mhi_ep_net_dellink(mhi_dev, mhi_ep_netdev->ndev);
+}
+
+static const struct mhi_device_id mhi_ep_net_id_table[] = {
+ /* Software data PATH (from modem CPU) */
+ { .chan = "IP_SW0", .driver_data = (kernel_ulong_t)"mhi_swip%d" },
+ {}
+};
+MODULE_DEVICE_TABLE(mhi, mhi_ep_net_id_table);
+
+static struct mhi_ep_driver mhi_ep_net_driver = {
+ .probe = mhi_ep_net_probe,
+ .remove = mhi_ep_net_remove,
+ .dl_xfer_cb = mhi_ep_net_dl_callback,
+ .ul_xfer_cb = mhi_ep_net_ul_callback,
+ .id_table = mhi_ep_net_id_table,
+ .driver = {
+ .name = "mhi_ep_net",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_mhi_ep_driver(mhi_ep_net_driver);
+
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
+MODULE_DESCRIPTION("MHI Endpoint Network driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/mhi_net.c b/drivers/net/mhi_net.c
index 3d322ac4f6a5..eddc2c701da4 100644
--- a/drivers/net/mhi_net.c
+++ b/drivers/net/mhi_net.c
@@ -14,7 +14,7 @@
#define MHI_NET_MIN_MTU ETH_MIN_MTU
#define MHI_NET_MAX_MTU 0xffff
-#define MHI_NET_DEFAULT_MTU 0x4000
+#define MHI_NET_DEFAULT_MTU 0x8000
struct mhi_net_stats {
u64_stats_t rx_packets;
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index f9182f8d552f..26b77ef363c4 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -644,11 +644,19 @@ static unsigned int dw_pcie_ep_find_ext_capability(struct dw_pcie *pci, int cap)
int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct dw_pcie_ep_func *ep_func;
+ struct device *dev = pci->dev;
+ struct pci_epc *epc = ep->epc;
unsigned int offset, ptm_cap_base;
unsigned int nbars;
u8 hdr_type;
+ u8 func_no;
+ void *addr;
u32 reg;
- int i;
+ int ret, i;
+
+ if (ep->ops->ep_init)
+ ep->ops->ep_init(ep);
hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE) &
PCI_HEADER_TYPE_MASK;
@@ -659,6 +667,48 @@ int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
return -EIO;
}
+ dw_pcie_version_detect(pci);
+
+ dw_pcie_iatu_detect(pci);
+
+ ret = dw_pcie_edma_detect(pci);
+ if (ret)
+ return ret;
+
+ ep->ib_window_map = devm_kcalloc(dev,
+ BITS_TO_LONGS(pci->num_ib_windows),
+ sizeof(long),
+ GFP_KERNEL);
+ if (!ep->ib_window_map)
+ return -ENOMEM;
+
+ ep->ob_window_map = devm_kcalloc(dev,
+ BITS_TO_LONGS(pci->num_ob_windows),
+ sizeof(long),
+ GFP_KERNEL);
+ if (!ep->ob_window_map)
+ return -ENOMEM;
+
+ addr = devm_kcalloc(dev, pci->num_ob_windows, sizeof(phys_addr_t),
+ GFP_KERNEL);
+ if (!addr)
+ return -ENOMEM;
+ ep->outbound_addr = addr;
+
+ for (func_no = 0; func_no < epc->max_functions; func_no++) {
+ ep_func = devm_kzalloc(dev, sizeof(*ep_func), GFP_KERNEL);
+ if (!ep_func)
+ return -ENOMEM;
+
+ ep_func->func_no = func_no;
+ ep_func->msi_cap = dw_pcie_ep_find_capability(ep, func_no,
+ PCI_CAP_ID_MSI);
+ ep_func->msix_cap = dw_pcie_ep_find_capability(ep, func_no,
+ PCI_CAP_ID_MSIX);
+
+ list_add_tail(&ep_func->list, &ep->func_list);
+ }
+
offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
ptm_cap_base = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_PTM);
@@ -699,8 +749,6 @@ EXPORT_SYMBOL_GPL(dw_pcie_ep_init_complete);
int dw_pcie_ep_init(struct dw_pcie_ep *ep)
{
int ret;
- void *addr;
- u8 func_no;
struct resource *res;
struct pci_epc *epc;
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -708,7 +756,6 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
struct platform_device *pdev = to_platform_device(dev);
struct device_node *np = dev->of_node;
const struct pci_epc_features *epc_features;
- struct dw_pcie_ep_func *ep_func;
INIT_LIST_HEAD(&ep->func_list);
@@ -723,26 +770,6 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
ep->phys_base = res->start;
ep->addr_size = resource_size(res);
- dw_pcie_version_detect(pci);
-
- dw_pcie_iatu_detect(pci);
-
- ep->ib_window_map = devm_bitmap_zalloc(dev, pci->num_ib_windows,
- GFP_KERNEL);
- if (!ep->ib_window_map)
- return -ENOMEM;
-
- ep->ob_window_map = devm_bitmap_zalloc(dev, pci->num_ob_windows,
- GFP_KERNEL);
- if (!ep->ob_window_map)
- return -ENOMEM;
-
- addr = devm_kcalloc(dev, pci->num_ob_windows, sizeof(phys_addr_t),
- GFP_KERNEL);
- if (!addr)
- return -ENOMEM;
- ep->outbound_addr = addr;
-
epc = devm_pci_epc_create(dev, &epc_ops);
if (IS_ERR(epc)) {
dev_err(dev, "Failed to create epc device\n");
@@ -756,23 +783,6 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
if (ret < 0)
epc->max_functions = 1;
- for (func_no = 0; func_no < epc->max_functions; func_no++) {
- ep_func = devm_kzalloc(dev, sizeof(*ep_func), GFP_KERNEL);
- if (!ep_func)
- return -ENOMEM;
-
- ep_func->func_no = func_no;
- ep_func->msi_cap = dw_pcie_ep_find_capability(ep, func_no,
- PCI_CAP_ID_MSI);
- ep_func->msix_cap = dw_pcie_ep_find_capability(ep, func_no,
- PCI_CAP_ID_MSIX);
-
- list_add_tail(&ep_func->list, &ep->func_list);
- }
-
- if (ep->ops->ep_init)
- ep->ops->ep_init(ep);
-
ret = pci_epc_mem_init(epc, ep->phys_base, ep->addr_size,
ep->page_size);
if (ret < 0) {
@@ -788,25 +798,25 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
goto err_exit_epc_mem;
}
- ret = dw_pcie_edma_detect(pci);
- if (ret)
- goto err_free_epc_mem;
-
if (ep->ops->get_features) {
epc_features = ep->ops->get_features(ep);
if (epc_features->core_init_notifier)
return 0;
}
+ /*
+ * NOTE:- Avoid accessing the hardware (Ex:- DBI space) before this
+ * step as platforms that implement 'core_init_notifier' feature may
+ * not have the hardware ready (i.e. core initialized) for access
+ * (Ex: tegra194). Any hardware access on such platforms result
+ * in system hard hang.
+ */
ret = dw_pcie_ep_init_complete(ep);
if (ret)
- goto err_remove_edma;
+ goto err_free_epc_mem;
return 0;
-err_remove_edma:
- dw_pcie_edma_remove(pci);
-
err_free_epc_mem:
pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem,
epc->mem->window.page_size);
diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c
index 19b32839ea26..fcc85f5f2b49 100644
--- a/drivers/pci/controller/dwc/pcie-qcom-ep.c
+++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c
@@ -74,6 +74,7 @@
#define PARF_INT_ALL_PLS_ERR BIT(15)
#define PARF_INT_ALL_PME_LEGACY BIT(16)
#define PARF_INT_ALL_PLS_PME BIT(17)
+#define PARF_INT_ALL_EDMA BIT(22)
/* PARF_BDF_TO_SID_CFG register fields */
#define PARF_BDF_TO_SID_BYPASS BIT(0)
@@ -395,7 +396,7 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
writel_relaxed(0, pcie_ep->parf + PARF_INT_ALL_MASK);
val = PARF_INT_ALL_LINK_DOWN | PARF_INT_ALL_BME |
PARF_INT_ALL_PM_TURNOFF | PARF_INT_ALL_DSTATE_CHANGE |
- PARF_INT_ALL_LINK_UP;
+ PARF_INT_ALL_LINK_UP | PARF_INT_ALL_EDMA;
writel_relaxed(val, pcie_ep->parf + PARF_INT_ALL_MASK);
ret = dw_pcie_ep_init_complete(&pcie_ep->pci.ep);
@@ -569,9 +570,11 @@ static irqreturn_t qcom_pcie_ep_global_irq_thread(int irq, void *data)
if (FIELD_GET(PARF_INT_ALL_LINK_DOWN, status)) {
dev_dbg(dev, "Received Linkdown event\n");
pcie_ep->link_status = QCOM_PCIE_EP_LINK_DOWN;
+ pci_epc_linkdown(pci->ep.epc);
} else if (FIELD_GET(PARF_INT_ALL_BME, status)) {
dev_dbg(dev, "Received BME event. Link is enabled!\n");
pcie_ep->link_status = QCOM_PCIE_EP_LINK_ENABLED;
+ pci_epc_bme_notify(pci->ep.epc);
} else if (FIELD_GET(PARF_INT_ALL_PM_TURNOFF, status)) {
dev_dbg(dev, "Received PM Turn-off event! Entering L23\n");
val = readl_relaxed(pcie_ep->parf + PARF_PM_CTRL);
@@ -741,18 +744,13 @@ static int qcom_pcie_ep_probe(struct platform_device *pdev)
pcie_ep->pci.dev = dev;
pcie_ep->pci.ops = &pci_ops;
pcie_ep->pci.ep.ops = &pci_ep_ops;
+ pcie_ep->pci.edma.nr_irqs = 1;
platform_set_drvdata(pdev, pcie_ep);
ret = qcom_pcie_ep_get_resources(pdev, pcie_ep);
if (ret)
return ret;
- ret = qcom_pcie_enable_resources(pcie_ep);
- if (ret) {
- dev_err(dev, "Failed to enable resources: %d\n", ret);
- return ret;
- }
-
ret = dw_pcie_ep_init(&pcie_ep->pci.ep);
if (ret) {
dev_err(dev, "Failed to initialize endpoint: %d\n", ret);
diff --git a/drivers/pci/endpoint/functions/Kconfig b/drivers/pci/endpoint/functions/Kconfig
index 9fd560886871..5d1066b1040b 100644
--- a/drivers/pci/endpoint/functions/Kconfig
+++ b/drivers/pci/endpoint/functions/Kconfig
@@ -37,3 +37,13 @@ config PCI_EPF_VNTB
between PCI Root Port and PCIe Endpoint.
If in doubt, say "N" to disable Endpoint NTB driver.
+
+config PCI_EPF_MHI
+ tristate "PCI Endpoint driver for MHI bus"
+ depends on PCI_ENDPOINT && MHI_BUS_EP
+ help
+ Enable this configuration option to enable the PCI Endpoint
+ driver for Modem Host Interface (MHI) bus found in Qualcomm
+ modems such as SDX55.
+
+ If in doubt, say "N" to disable Endpoint driver for MHI bus.
diff --git a/drivers/pci/endpoint/functions/Makefile b/drivers/pci/endpoint/functions/Makefile
index 5c13001deaba..696473fce50e 100644
--- a/drivers/pci/endpoint/functions/Makefile
+++ b/drivers/pci/endpoint/functions/Makefile
@@ -6,3 +6,4 @@
obj-$(CONFIG_PCI_EPF_TEST) += pci-epf-test.o
obj-$(CONFIG_PCI_EPF_NTB) += pci-epf-ntb.o
obj-$(CONFIG_PCI_EPF_VNTB) += pci-epf-vntb.o
+obj-$(CONFIG_PCI_EPF_MHI) += pci-epf-mhi.o
diff --git a/drivers/pci/endpoint/functions/pci-epf-mhi.c b/drivers/pci/endpoint/functions/pci-epf-mhi.c
new file mode 100644
index 000000000000..3add841a3ceb
--- /dev/null
+++ b/drivers/pci/endpoint/functions/pci-epf-mhi.c
@@ -0,0 +1,699 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCI EPF driver for MHI Endpoint devices
+ *
+ * Copyright (C) 2022 Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/mhi_ep.h>
+#include <linux/module.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/pci-epc.h>
+#include <linux/pci-epf.h>
+
+#define MHI_VERSION_1_0 0x01000000
+
+/* Platform specific flags */
+#define MHI_EPF_USE_DMA BIT(0)
+
+struct pci_epf_mhi_ep_info {
+ const struct mhi_ep_cntrl_config *config;
+ struct pci_epf_header *epf_header;
+ enum pci_barno bar_num;
+ u32 epf_flags;
+ u32 msi_count;
+ u32 mru;
+ u32 flags;
+};
+
+#define MHI_EP_CHANNEL_CONFIG_UL(ch_num, ch_name) \
+ { \
+ .num = ch_num, \
+ .name = ch_name, \
+ .dir = DMA_TO_DEVICE, \
+ }
+
+#define MHI_EP_CHANNEL_CONFIG_DL(ch_num, ch_name) \
+ { \
+ .num = ch_num, \
+ .name = ch_name, \
+ .dir = DMA_FROM_DEVICE, \
+ }
+
+static const struct mhi_ep_channel_config mhi_v1_channels[] = {
+ MHI_EP_CHANNEL_CONFIG_UL(0, "LOOPBACK"),
+ MHI_EP_CHANNEL_CONFIG_DL(1, "LOOPBACK"),
+ MHI_EP_CHANNEL_CONFIG_UL(2, "SAHARA"),
+ MHI_EP_CHANNEL_CONFIG_DL(3, "SAHARA"),
+ MHI_EP_CHANNEL_CONFIG_UL(4, "DIAG"),
+ MHI_EP_CHANNEL_CONFIG_DL(5, "DIAG"),
+ MHI_EP_CHANNEL_CONFIG_UL(6, "SSR"),
+ MHI_EP_CHANNEL_CONFIG_DL(7, "SSR"),
+ MHI_EP_CHANNEL_CONFIG_UL(8, "QDSS"),
+ MHI_EP_CHANNEL_CONFIG_DL(9, "QDSS"),
+ MHI_EP_CHANNEL_CONFIG_UL(10, "EFS"),
+ MHI_EP_CHANNEL_CONFIG_DL(11, "EFS"),
+ MHI_EP_CHANNEL_CONFIG_UL(12, "MBIM"),
+ MHI_EP_CHANNEL_CONFIG_DL(13, "MBIM"),
+ MHI_EP_CHANNEL_CONFIG_UL(14, "QMI"),
+ MHI_EP_CHANNEL_CONFIG_DL(15, "QMI"),
+ MHI_EP_CHANNEL_CONFIG_UL(16, "QMI"),
+ MHI_EP_CHANNEL_CONFIG_DL(17, "QMI"),
+ MHI_EP_CHANNEL_CONFIG_UL(18, "IP-CTRL-1"),
+ MHI_EP_CHANNEL_CONFIG_DL(19, "IP-CTRL-1"),
+ MHI_EP_CHANNEL_CONFIG_UL(20, "IPCR"),
+ MHI_EP_CHANNEL_CONFIG_DL(21, "IPCR"),
+ MHI_EP_CHANNEL_CONFIG_UL(32, "DUN"),
+ MHI_EP_CHANNEL_CONFIG_DL(33, "DUN"),
+ MHI_EP_CHANNEL_CONFIG_UL(36, "IP_SW0"),
+ MHI_EP_CHANNEL_CONFIG_DL(37, "IP_SW0"),
+};
+
+static const struct mhi_ep_cntrl_config mhi_v1_config = {
+ .max_channels = 128,
+ .num_channels = ARRAY_SIZE(mhi_v1_channels),
+ .ch_cfg = mhi_v1_channels,
+ .mhi_version = MHI_VERSION_1_0,
+};
+
+static struct pci_epf_header sdx55_header = {
+ .vendorid = PCI_VENDOR_ID_QCOM,
+ .deviceid = 0x0306,
+ .baseclass_code = PCI_BASE_CLASS_COMMUNICATION,
+ .subclass_code = PCI_CLASS_COMMUNICATION_MODEM & 0xff,
+ .interrupt_pin = PCI_INTERRUPT_INTA,
+};
+
+static const struct pci_epf_mhi_ep_info sdx55_info = {
+ .config = &mhi_v1_config,
+ .epf_header = &sdx55_header,
+ .bar_num = BAR_0,
+ .epf_flags = PCI_BASE_ADDRESS_MEM_TYPE_32,
+ .msi_count = 32,
+ .mru = 0x8000,
+};
+
+static struct pci_epf_header sm8450_header = {
+ .vendorid = PCI_VENDOR_ID_QCOM,
+ .deviceid = 0x0306,
+ .baseclass_code = PCI_BASE_CLASS_COMMUNICATION,
+ .subclass_code = PCI_CLASS_COMMUNICATION_MODEM & 0xff,
+ .interrupt_pin = PCI_INTERRUPT_INTA,
+};
+
+static const struct pci_epf_mhi_ep_info sm8450_info = {
+ .config = &mhi_v1_config,
+ .epf_header = &sm8450_header,
+ .bar_num = BAR_0,
+ .epf_flags = PCI_BASE_ADDRESS_MEM_TYPE_32,
+ .msi_count = 32,
+ .mru = 0x8000,
+ .flags = MHI_EPF_USE_DMA,
+};
+
+struct pci_epf_mhi {
+ const struct pci_epf_mhi_ep_info *info;
+ struct mhi_ep_cntrl mhi_cntrl;
+ struct pci_epf *epf;
+ struct mutex lock;
+ void __iomem *mmio;
+ resource_size_t mmio_phys;
+ struct dma_chan *dma_chan_tx;
+ struct dma_chan *dma_chan_rx;
+ u32 mmio_size;
+ int irq;
+ bool mhi_registered;
+};
+
+static int pci_epf_mhi_alloc_map(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr,
+ phys_addr_t *phys_ptr, void __iomem **virt, size_t size)
+{
+ struct pci_epf_mhi *epf_mhi = container_of(mhi_cntrl, struct pci_epf_mhi, mhi_cntrl);
+ struct pci_epf *epf = epf_mhi->epf;
+ struct pci_epc *epc = epf_mhi->epf->epc;
+ size_t offset = pci_addr & (epc->mem->window.page_size - 1);
+ void __iomem *virt_addr;
+ phys_addr_t phys_addr;
+ int ret;
+
+ virt_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, size + offset);
+ if (!virt_addr)
+ return -ENOMEM;
+
+ ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, phys_addr, pci_addr - offset, size + offset);
+ if (ret) {
+ pci_epc_mem_free_addr(epc, phys_addr, virt_addr, size + offset);
+
+ return ret;
+ }
+
+ *phys_ptr = phys_addr + offset;
+ *virt = virt_addr + offset;
+
+ return 0;
+}
+
+static void pci_epf_mhi_unmap_free(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr,
+ phys_addr_t phys_addr, void __iomem *virt_addr, size_t size)
+{
+ struct pci_epf_mhi *epf_mhi = container_of(mhi_cntrl, struct pci_epf_mhi, mhi_cntrl);
+ struct pci_epf *epf = epf_mhi->epf;
+ struct pci_epc *epc = epf->epc;
+ size_t offset = pci_addr & (epc->mem->window.page_size - 1);
+
+ pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, phys_addr - offset);
+ pci_epc_mem_free_addr(epc, phys_addr - offset, virt_addr - offset, size + offset);
+}
+
+void pci_epf_mhi_raise_irq(struct mhi_ep_cntrl *mhi_cntrl, u32 vector)
+{
+ struct pci_epf_mhi *epf_mhi = container_of(mhi_cntrl, struct pci_epf_mhi, mhi_cntrl);
+ struct pci_epf *epf = epf_mhi->epf;
+ struct pci_epc *epc = epf->epc;
+
+ /*
+ * Vector is incremented by 1 here as the DWC core will decrement it before
+ * writing to iATU.
+ */
+ pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no, PCI_EPC_IRQ_MSI, vector + 1);
+}
+
+int pci_epf_mhi_iatu_read(struct mhi_ep_cntrl *mhi_cntrl, u64 from, void __iomem *to,
+ size_t size)
+{
+ struct pci_epf_mhi *epf_mhi = container_of(mhi_cntrl, struct pci_epf_mhi, mhi_cntrl);
+ struct pci_epf *epf = epf_mhi->epf;
+ struct pci_epc *epc = epf_mhi->epf->epc;
+ void __iomem *tre_buf;
+ phys_addr_t tre_phys;
+ size_t offset = from % 0x1000;
+ int ret;
+
+ mutex_lock(&epf_mhi->lock);
+
+ tre_buf = pci_epc_mem_alloc_addr(epc, &tre_phys, size + offset);
+ if (!tre_buf) {
+ mutex_unlock(&epf_mhi->lock);
+ return -ENOMEM;
+ }
+
+ ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, tre_phys, from - offset,
+ size + offset);
+ if (ret) {
+ pci_epc_mem_free_addr(epc, tre_phys, tre_buf, size + offset);
+ mutex_unlock(&epf_mhi->lock);
+ return ret;
+ }
+
+ memcpy_fromio(to, tre_buf + offset, size);
+
+ pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, tre_phys);
+ pci_epc_mem_free_addr(epc, tre_phys, tre_buf, size + offset);
+
+ mutex_unlock(&epf_mhi->lock);
+
+ return 0;
+}
+
+int pci_epf_mhi_iatu_write(struct mhi_ep_cntrl *mhi_cntrl, void __iomem *from, u64 to,
+ size_t size)
+{
+ struct pci_epf_mhi *epf_mhi = container_of(mhi_cntrl, struct pci_epf_mhi, mhi_cntrl);
+ struct pci_epf *epf = epf_mhi->epf;
+ struct pci_epc *epc = epf_mhi->epf->epc;
+ void __iomem *tre_buf;
+ phys_addr_t tre_phys;
+ size_t offset = to % 0x1000;
+ int ret;
+
+ mutex_lock(&epf_mhi->lock);
+
+ tre_buf = pci_epc_mem_alloc_addr(epc, &tre_phys, size + offset);
+ if (!tre_buf) {
+ mutex_unlock(&epf_mhi->lock);
+ return -ENOMEM;
+ }
+
+ ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, tre_phys, to - offset,
+ size + offset);
+ if (ret) {
+ pci_epc_mem_free_addr(epc, tre_phys, tre_buf, size + offset);
+ mutex_unlock(&epf_mhi->lock);
+ return ret;
+ }
+
+ memcpy_toio(tre_buf + offset, from, size);
+
+ pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, tre_phys);
+ pci_epc_mem_free_addr(epc, tre_phys, tre_buf, size + offset);
+
+ mutex_unlock(&epf_mhi->lock);
+
+ return 0;
+}
+
+static void pci_epf_mhi_dma_callback(void *param)
+{
+ complete(param);
+}
+
+static int pci_epf_mhi_edma_read(struct mhi_ep_cntrl *mhi_cntrl, u64 from, void *to, size_t size)
+{
+ struct pci_epf_mhi *epf_mhi = container_of(mhi_cntrl, struct pci_epf_mhi, mhi_cntrl);
+ struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
+ struct dma_chan *chan = epf_mhi->dma_chan_rx;
+ struct device *dev = &epf_mhi->epf->dev;
+ DECLARE_COMPLETION_ONSTACK(complete);
+ struct dma_async_tx_descriptor *desc;
+ struct dma_slave_config config = {};
+ dma_cookie_t cookie;
+ dma_addr_t dst_addr;
+ int ret;
+
+ if (size < 0x1000)
+ return pci_epf_mhi_iatu_read(mhi_cntrl, from, to, size);
+
+ mutex_lock(&epf_mhi->lock);
+
+ config.direction = DMA_DEV_TO_MEM;
+ config.src_addr = from;
+
+ ret = dmaengine_slave_config(chan, &config);
+ if (ret) {
+ dev_err(dev, "Failed to configure DMA channel\n");
+ goto err_unlock;
+ }
+
+ dst_addr = dma_map_single(dma_dev, to, size, DMA_FROM_DEVICE);
+ ret = dma_mapping_error(dma_dev, dst_addr);
+ if (ret) {
+ dev_err(dev, "Failed to map remote memory\n");
+ goto err_unlock;
+ }
+
+ desc = dmaengine_prep_slave_single(chan, dst_addr, size, DMA_DEV_TO_MEM,
+ DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
+ if (!desc) {
+ dev_err(dev, "Failed to prepare DMA\n");
+ ret = -EIO;
+ goto err_unmap;
+ }
+
+ desc->callback = pci_epf_mhi_dma_callback;
+ desc->callback_param = &complete;
+
+ cookie = dmaengine_submit(desc);
+ ret = dma_submit_error(cookie);
+ if (ret) {
+ dev_err(dev, "Failed to do DMA submit\n");
+ goto err_unmap;
+ }
+
+ dma_async_issue_pending(chan);
+ ret = wait_for_completion_timeout(&complete, msecs_to_jiffies(1000));
+ if (!ret) {
+ dev_err(dev, "DMA transfer timeout\n");
+ dmaengine_terminate_sync(chan);
+ ret = -ETIMEDOUT;
+ }
+
+err_unmap:
+ dma_unmap_single(dma_dev, dst_addr, size, DMA_FROM_DEVICE);
+err_unlock:
+ mutex_unlock(&epf_mhi->lock);
+
+ return ret;
+}
+
+static int pci_epf_mhi_edma_write(struct mhi_ep_cntrl *mhi_cntrl, void *from, u64 to, size_t size)
+{
+ struct pci_epf_mhi *epf_mhi = container_of(mhi_cntrl, struct pci_epf_mhi, mhi_cntrl);
+ struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
+ struct dma_chan *chan = epf_mhi->dma_chan_tx;
+ struct device *dev = &epf_mhi->epf->dev;
+ DECLARE_COMPLETION_ONSTACK(complete);
+ struct dma_async_tx_descriptor *desc;
+ struct dma_slave_config config = {};
+ dma_cookie_t cookie;
+ dma_addr_t src_addr;
+ int ret;
+
+ if (size < 0x1000)
+ return pci_epf_mhi_iatu_write(mhi_cntrl, from, to, size);
+
+ mutex_lock(&epf_mhi->lock);
+
+ config.direction = DMA_MEM_TO_DEV;
+ config.dst_addr = to;
+
+ ret = dmaengine_slave_config(chan, &config);
+ if (ret) {
+ dev_err(dev, "Failed to configure DMA channel\n");
+ goto err_unlock;
+ }
+
+ src_addr = dma_map_single(dma_dev, from, size, DMA_TO_DEVICE);
+ ret = dma_mapping_error(dma_dev, src_addr);
+ if (ret) {
+ dev_err(dev, "Failed to map remote memory\n");
+ goto err_unlock;
+ }
+
+ desc = dmaengine_prep_slave_single(chan, src_addr, size, DMA_MEM_TO_DEV,
+ DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
+ if (!desc) {
+ dev_err(dev, "Failed to prepare DMA\n");
+ ret = -EIO;
+ goto err_unmap;
+ }
+
+ desc->callback = pci_epf_mhi_dma_callback;
+ desc->callback_param = &complete;
+
+ cookie = dmaengine_submit(desc);
+ ret = dma_submit_error(cookie);
+ if (ret) {
+ dev_err(dev, "Failed to do DMA submit\n");
+ goto err_unmap;
+ }
+
+ dma_async_issue_pending(chan);
+ ret = wait_for_completion_timeout(&complete, msecs_to_jiffies(1000));
+ if (!ret) {
+ dev_err(dev, "DMA transfer timeout\n");
+ dmaengine_terminate_sync(chan);
+ ret = -ETIMEDOUT;
+ }
+
+err_unmap:
+ dma_unmap_single(dma_dev, src_addr, size, DMA_FROM_DEVICE);
+err_unlock:
+ mutex_unlock(&epf_mhi->lock);
+
+ return ret;
+}
+
+struct epf_dma_filter {
+ struct device *dev;
+ u32 dma_mask;
+};
+
+static bool pci_epf_mhi_filter(struct dma_chan *chan, void *node)
+{
+ struct epf_dma_filter *filter = node;
+ struct dma_slave_caps caps;
+
+ memset(&caps, 0, sizeof(caps));
+ dma_get_slave_caps(chan, &caps);
+
+ return chan->device->dev == filter->dev && filter->dma_mask & caps.directions;
+}
+
+static int pci_epf_mhi_dma_init(struct pci_epf_mhi *epf_mhi)
+{
+ struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
+ struct device *dev = &epf_mhi->epf->dev;
+ struct epf_dma_filter filter;
+ dma_cap_mask_t mask;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ filter.dev = dma_dev;
+ filter.dma_mask = BIT(DMA_MEM_TO_DEV);
+ epf_mhi->dma_chan_tx = dma_request_channel(mask, pci_epf_mhi_filter, &filter);
+ if (IS_ERR_OR_NULL(epf_mhi->dma_chan_tx)) {
+ dev_err(dev, "Failed to request tx channel\n");
+ return -ENODEV;
+ }
+
+ filter.dma_mask = BIT(DMA_DEV_TO_MEM);
+ epf_mhi->dma_chan_rx = dma_request_channel(mask, pci_epf_mhi_filter, &filter);
+ if (IS_ERR_OR_NULL(epf_mhi->dma_chan_rx)) {
+ dev_err(dev, "Failed to request rx channel\n");
+ dma_release_channel(epf_mhi->dma_chan_tx);
+ epf_mhi->dma_chan_tx = NULL;
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void pci_epf_mhi_dma_deinit(struct pci_epf_mhi *epf_mhi)
+{
+ dma_release_channel(epf_mhi->dma_chan_tx);
+ dma_release_channel(epf_mhi->dma_chan_rx);
+ epf_mhi->dma_chan_tx = NULL;
+ epf_mhi->dma_chan_rx = NULL;
+}
+
+int pci_epf_mhi_core_init(struct pci_epf *epf)
+{
+ struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
+ const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
+ struct pci_epf_bar *epf_bar = &epf->bar[info->bar_num];
+ struct pci_epc *epc = epf->epc;
+ struct device *dev = &epf->dev;
+ int ret;
+
+ epf_bar->phys_addr = epf_mhi->mmio_phys;
+ epf_bar->size = epf_mhi->mmio_size;
+ epf_bar->barno = info->bar_num;
+ epf_bar->flags = info->epf_flags;
+ ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, epf_bar);
+ if (ret) {
+ dev_err(dev, "Failed to set BAR: %d\n", ret);
+ return ret;
+ }
+
+ ret = pci_epc_set_msi(epc, epf->func_no, epf->vfunc_no,
+ order_base_2(info->msi_count));
+ if (ret) {
+ dev_err(dev, "Failed to set MSI configuration: %d\n", ret);
+ return ret;
+ }
+
+ ret = pci_epc_write_header(epc, epf->func_no, epf->vfunc_no, epf->header);
+ if (ret) {
+ dev_err(dev, "Failed to set Configuration header: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int pci_epf_mhi_link_up(struct pci_epf *epf)
+{
+ struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
+ const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
+ struct mhi_ep_cntrl *mhi_cntrl = &epf_mhi->mhi_cntrl;
+ struct pci_epc *epc = epf->epc;
+ struct device *dev = &epf->dev;
+ int ret;
+
+ if (info->flags & MHI_EPF_USE_DMA) {
+ ret = pci_epf_mhi_dma_init(epf_mhi);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DMA: %d\n", ret);
+ return ret;
+ }
+ }
+
+ mhi_cntrl->mmio = epf_mhi->mmio;
+ mhi_cntrl->irq = epf_mhi->irq;
+ mhi_cntrl->mru = info->mru;
+
+ /* Assign the struct dev of PCI EP as MHI controller device */
+ mhi_cntrl->cntrl_dev = epc->dev.parent;
+ mhi_cntrl->raise_irq = pci_epf_mhi_raise_irq;
+ mhi_cntrl->alloc_map = pci_epf_mhi_alloc_map;
+ mhi_cntrl->unmap_free = pci_epf_mhi_unmap_free;
+ mhi_cntrl->read_from_host = pci_epf_mhi_iatu_read;
+ mhi_cntrl->write_to_host = pci_epf_mhi_iatu_write;
+ if (info->flags & MHI_EPF_USE_DMA) {
+ mhi_cntrl->transfer_from_host = pci_epf_mhi_edma_read;
+ mhi_cntrl->transfer_to_host = pci_epf_mhi_edma_write;
+ } else {
+ mhi_cntrl->transfer_from_host = pci_epf_mhi_iatu_read;
+ mhi_cntrl->transfer_to_host = pci_epf_mhi_iatu_write;
+ }
+
+ /* Register the MHI EP controller */
+ ret = mhi_ep_register_controller(mhi_cntrl, info->config);
+ if (ret) {
+ dev_err(dev, "Failed to register MHI EP controller: %d\n", ret);
+ if (info->flags & MHI_EPF_USE_DMA)
+ pci_epf_mhi_dma_deinit(epf_mhi);
+
+ return ret;
+ }
+
+ epf_mhi->mhi_registered = true;
+
+ return 0;
+}
+
+int pci_epf_mhi_link_down(struct pci_epf *epf)
+{
+ struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
+ const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
+ struct mhi_ep_cntrl *mhi_cntrl = &epf_mhi->mhi_cntrl;
+
+ if (epf_mhi->mhi_registered) {
+ mhi_ep_power_down(mhi_cntrl);
+ if (info->flags & MHI_EPF_USE_DMA)
+ pci_epf_mhi_dma_deinit(epf_mhi);
+ mhi_ep_unregister_controller(mhi_cntrl);
+ epf_mhi->mhi_registered = false;
+ }
+
+ return 0;
+}
+
+int pci_epf_mhi_bme(struct pci_epf *epf)
+{
+ struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
+ const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
+ struct mhi_ep_cntrl *mhi_cntrl = &epf_mhi->mhi_cntrl;
+ struct device *dev = &epf->dev;
+ int ret;
+
+ /* Power up the MHI EP stack if link is up and stack is in power down state */
+ if (!mhi_cntrl->enabled && epf_mhi->mhi_registered) {
+ ret = mhi_ep_power_up(mhi_cntrl);
+ if (ret) {
+ dev_err(dev, "Failed to power up MHI EP: %d\n", ret);
+ if (info->flags & MHI_EPF_USE_DMA)
+ pci_epf_mhi_dma_deinit(epf_mhi);
+ mhi_ep_unregister_controller(mhi_cntrl);
+ epf_mhi->mhi_registered = false;
+ }
+ }
+
+ return 0;
+}
+
+static int pci_epf_mhi_bind(struct pci_epf *epf)
+{
+ struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
+ struct pci_epc *epc = epf->epc;
+ struct platform_device *pdev = to_platform_device(epc->dev.parent);
+ struct device *dev = &epf->dev;
+ struct resource *res;
+ int ret;
+
+ if (WARN_ON_ONCE(!epc))
+ return -EINVAL;
+
+ /* Get MMIO base address from Endpoint controller */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mmio");
+ epf_mhi->mmio_phys = res->start;
+ epf_mhi->mmio_size = resource_size(res);
+
+ epf_mhi->mmio = ioremap_wc(epf_mhi->mmio_phys, epf_mhi->mmio_size);
+ if (IS_ERR(epf_mhi->mmio))
+ return PTR_ERR(epf_mhi->mmio);
+
+ ret = platform_get_irq_byname(pdev, "doorbell");
+ if (ret < 0) {
+ dev_err(dev, "Failed to get Doorbell IRQ\n");
+ iounmap(epf_mhi->mmio);
+ return ret;
+ }
+
+ epf_mhi->irq = ret;
+
+ return 0;
+}
+
+static void pci_epf_mhi_unbind(struct pci_epf *epf)
+{
+ struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
+ const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
+ struct pci_epf_bar *epf_bar = &epf->bar[info->bar_num];
+ struct mhi_ep_cntrl *mhi_cntrl = &epf_mhi->mhi_cntrl;
+ struct pci_epc *epc = epf->epc;
+
+ /*
+ * Forcefully power down the MHI EP stack. Only way to bring the MHI EP stack
+ * back to working state after successive bind is by getting BME from host.
+ */
+ if (epf_mhi->mhi_registered) {
+ mhi_ep_power_down(mhi_cntrl);
+ if (info->flags & MHI_EPF_USE_DMA)
+ pci_epf_mhi_dma_deinit(epf_mhi);
+ mhi_ep_unregister_controller(mhi_cntrl);
+ epf_mhi->mhi_registered = false;
+ }
+
+ iounmap(epf_mhi->mmio);
+ pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no, epf_bar);
+}
+
+static struct pci_epc_event_ops pci_epf_mhi_event_ops = {
+ .core_init = pci_epf_mhi_core_init,
+ .link_up = pci_epf_mhi_link_up,
+ .link_down = pci_epf_mhi_link_down,
+ .bme = pci_epf_mhi_bme,
+};
+
+static int pci_epf_mhi_probe(struct pci_epf *epf, const struct pci_epf_device_id *id)
+{
+ struct pci_epf_mhi_ep_info *info = (struct pci_epf_mhi_ep_info *) id->driver_data;
+ struct pci_epf_mhi *epf_mhi;
+ struct device *dev = &epf->dev;
+
+ epf_mhi = devm_kzalloc(dev, sizeof(*epf_mhi), GFP_KERNEL);
+ if (!epf_mhi)
+ return -ENOMEM;
+
+ epf->header = info->epf_header;
+ epf_mhi->info = info;
+ epf_mhi->epf = epf;
+
+ epf->event_ops = &pci_epf_mhi_event_ops;
+
+ mutex_init(&epf_mhi->lock);
+
+ epf_set_drvdata(epf, epf_mhi);
+
+ return 0;
+}
+
+static const struct pci_epf_device_id pci_epf_mhi_ids[] = {
+ { .name = "sdx55", .driver_data = (kernel_ulong_t) &sdx55_info },
+ { .name = "sm8450", .driver_data = (kernel_ulong_t) &sm8450_info },
+ {},
+};
+
+static struct pci_epf_ops pci_epf_mhi_ops = {
+ .unbind = pci_epf_mhi_unbind,
+ .bind = pci_epf_mhi_bind,
+};
+
+static struct pci_epf_driver pci_epf_mhi_driver = {
+ .driver.name = "pci_epf_mhi",
+ .probe = pci_epf_mhi_probe,
+ .id_table = pci_epf_mhi_ids,
+ .ops = &pci_epf_mhi_ops,
+ .owner = THIS_MODULE,
+};
+
+static int __init pci_epf_mhi_init(void)
+{
+ return pci_epf_register_driver(&pci_epf_mhi_driver);
+}
+module_init(pci_epf_mhi_init);
+
+static void __exit pci_epf_mhi_exit(void)
+{
+ pci_epf_unregister_driver(&pci_epf_mhi_driver);
+}
+module_exit(pci_epf_mhi_exit);
+
+MODULE_DESCRIPTION("PCI EPF driver for MHI Endpoint devices");
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/endpoint/functions/pci-epf-ntb.c b/drivers/pci/endpoint/functions/pci-epf-ntb.c
index 9a00448c7e61..980b4ecf19a2 100644
--- a/drivers/pci/endpoint/functions/pci-epf-ntb.c
+++ b/drivers/pci/endpoint/functions/pci-epf-ntb.c
@@ -2075,11 +2075,12 @@ static struct config_group *epf_ntb_add_cfs(struct pci_epf *epf,
/**
* epf_ntb_probe() - Probe NTB function driver
* @epf: NTB endpoint function device
+ * @id: NTB endpoint function device ID
*
* Probe NTB function driver when endpoint function bus detects a NTB
* endpoint function.
*/
-static int epf_ntb_probe(struct pci_epf *epf)
+static int epf_ntb_probe(struct pci_epf *epf, const struct pci_epf_device_id *id)
{
struct epf_ntb *ntb;
struct device *dev;
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index 0f9d2ec822ac..d5fcc78a5b73 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -980,7 +980,7 @@ static const struct pci_epf_device_id pci_epf_test_ids[] = {
{},
};
-static int pci_epf_test_probe(struct pci_epf *epf)
+static int pci_epf_test_probe(struct pci_epf *epf, const struct pci_epf_device_id *id)
{
struct pci_epf_test *epf_test;
struct device *dev = &epf->dev;
diff --git a/drivers/pci/endpoint/pci-ep-cfs.c b/drivers/pci/endpoint/pci-ep-cfs.c
index 4b8ac0ac84d5..62c8e09c59f4 100644
--- a/drivers/pci/endpoint/pci-ep-cfs.c
+++ b/drivers/pci/endpoint/pci-ep-cfs.c
@@ -178,6 +178,9 @@ static ssize_t pci_epc_start_store(struct config_item *item, const char *page,
if (kstrtobool(page, &start) < 0)
return -EINVAL;
+ if (WARN_ON_ONCE(start == epc_group->start))
+ return 0;
+
if (!start) {
pci_epc_stop(epc);
epc_group->start = 0;
diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
index 9440d9811eea..11bd873a7997 100644
--- a/drivers/pci/endpoint/pci-epc-core.c
+++ b/drivers/pci/endpoint/pci-epc-core.c
@@ -707,6 +707,32 @@ void pci_epc_linkup(struct pci_epc *epc)
EXPORT_SYMBOL_GPL(pci_epc_linkup);
/**
+ * pci_epc_linkdown() - Notify the EPF device that EPC device has dropped the
+ * connection with the Root Complex.
+ * @epc: the EPC device which has dropped the link with the host
+ *
+ * Invoke to Notify the EPF device that the EPC device has dropped the
+ * connection with the Root Complex.
+ */
+void pci_epc_linkdown(struct pci_epc *epc)
+{
+ struct pci_epf *epf;
+
+ if (!epc || IS_ERR(epc))
+ return;
+
+ mutex_lock(&epc->list_lock);
+ list_for_each_entry(epf, &epc->pci_epf, list) {
+ mutex_lock(&epf->lock);
+ if (epf->event_ops && epf->event_ops->link_down)
+ epf->event_ops->link_down(epf);
+ mutex_unlock(&epf->lock);
+ }
+ mutex_unlock(&epc->list_lock);
+}
+EXPORT_SYMBOL_GPL(pci_epc_linkdown);
+
+/**
* pci_epc_init_notify() - Notify the EPF device that EPC device's core
* initialization is completed.
* @epc: the EPC device whose core initialization is completed
@@ -733,6 +759,32 @@ void pci_epc_init_notify(struct pci_epc *epc)
EXPORT_SYMBOL_GPL(pci_epc_init_notify);
/**
+ * pci_epc_bme_notify() - Notify the EPF device that the EPC device has received
+ * the BME event from the Root complex
+ * @epc: the EPC device that received the BME event
+ *
+ * Invoke to Notify the EPF device that the EPC device has received the Bus
+ * Master Enable (BME) event from the Root complex
+ */
+void pci_epc_bme_notify(struct pci_epc *epc)
+{
+ struct pci_epf *epf;
+
+ if (!epc || IS_ERR(epc))
+ return;
+
+ mutex_lock(&epc->list_lock);
+ list_for_each_entry(epf, &epc->pci_epf, list) {
+ mutex_lock(&epf->lock);
+ if (epf->event_ops && epf->event_ops->bme)
+ epf->event_ops->bme(epf);
+ mutex_unlock(&epf->lock);
+ }
+ mutex_unlock(&epc->list_lock);
+}
+EXPORT_SYMBOL_GPL(pci_epc_bme_notify);
+
+/**
* pci_epc_destroy() - destroy the EPC device
* @epc: the EPC device that has to be destroyed
*
diff --git a/drivers/pci/endpoint/pci-epc-mem.c b/drivers/pci/endpoint/pci-epc-mem.c
index 7dcf6f480b82..a9c028f58da1 100644
--- a/drivers/pci/endpoint/pci-epc-mem.c
+++ b/drivers/pci/endpoint/pci-epc-mem.c
@@ -115,6 +115,16 @@ err_mem:
}
EXPORT_SYMBOL_GPL(pci_epc_multi_mem_init);
+/**
+ * pci_epc_mem_init() - Initialize the pci_epc_mem structure
+ * @epc: the EPC device that invoked pci_epc_mem_init
+ * @base: Physical address of the window region
+ * @size: Total Size of the window region
+ * @page_size: Page size of the window region
+ *
+ * Invoke to initialize a single pci_epc_mem structure used by the
+ * endpoint functions to allocate memory for mapping the PCI host memory
+ */
int pci_epc_mem_init(struct pci_epc *epc, phys_addr_t base,
size_t size, size_t page_size)
{
diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c
index 2036e38be093..924564288c9a 100644
--- a/drivers/pci/endpoint/pci-epf-core.c
+++ b/drivers/pci/endpoint/pci-epf-core.c
@@ -494,11 +494,13 @@ static const struct device_type pci_epf_type = {
};
static int
-pci_epf_match_id(const struct pci_epf_device_id *id, const struct pci_epf *epf)
+pci_epf_match_id(const struct pci_epf_device_id *id, struct pci_epf *epf)
{
while (id->name[0]) {
- if (strcmp(epf->name, id->name) == 0)
+ if (strcmp(epf->name, id->name) == 0) {
+ epf->id = id;
return true;
+ }
id++;
}
@@ -526,7 +528,7 @@ static int pci_epf_device_probe(struct device *dev)
epf->driver = driver;
- return driver->probe(epf);
+ return driver->probe(epf, epf->id);
}
static void pci_epf_device_remove(struct device *dev)
diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h
index f198a8ac7ee7..8bd92be4ba9a 100644
--- a/include/linux/mhi_ep.h
+++ b/include/linux/mhi_ep.h
@@ -134,8 +134,15 @@ struct mhi_ep_cntrl {
void __iomem **virt, size_t size);
void (*unmap_free)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t phys,
void __iomem *virt, size_t size);
- int (*read_from_host)(struct mhi_ep_cntrl *mhi_cntrl, u64 from, void *to, size_t size);
- int (*write_to_host)(struct mhi_ep_cntrl *mhi_cntrl, void *from, u64 to, size_t size);
+
+ int (*read_from_host)(struct mhi_ep_cntrl *mhi_cntrl, u64 from, void *to,
+ size_t size);
+ int (*write_to_host)(struct mhi_ep_cntrl *mhi_cntrl, void *from, u64 to,
+ size_t size);
+ int (*transfer_from_host)(struct mhi_ep_cntrl *mhi_cntrl, u64 from, void *to,
+ size_t size);
+ int (*transfer_to_host)(struct mhi_ep_cntrl *mhi_cntrl, void *from, u64 to,
+ size_t size);
enum mhi_state mhi_state;
diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h
index 301bb0e53707..5cb694031072 100644
--- a/include/linux/pci-epc.h
+++ b/include/linux/pci-epc.h
@@ -203,7 +203,9 @@ void pci_epc_destroy(struct pci_epc *epc);
int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf,
enum pci_epc_interface_type type);
void pci_epc_linkup(struct pci_epc *epc);
+void pci_epc_linkdown(struct pci_epc *epc);
void pci_epc_init_notify(struct pci_epc *epc);
+void pci_epc_bme_notify(struct pci_epc *epc);
void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf,
enum pci_epc_interface_type type);
int pci_epc_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h
index a215dc8ce693..f34b3b32a0e7 100644
--- a/include/linux/pci-epf.h
+++ b/include/linux/pci-epf.h
@@ -71,10 +71,14 @@ struct pci_epf_ops {
* struct pci_epf_event_ops - Callbacks for capturing the EPC events
* @core_init: Callback for the EPC initialization complete event
* @link_up: Callback for the EPC link up event
+ * @link_down: Callback for the EPC link down event
+ * @bme: Callback for the EPC BME (Bus Master Enable) event
*/
struct pci_epc_event_ops {
int (*core_init)(struct pci_epf *epf);
int (*link_up)(struct pci_epf *epf);
+ int (*link_down)(struct pci_epf *epf);
+ int (*bme)(struct pci_epf *epf);
};
/**
@@ -89,7 +93,7 @@ struct pci_epc_event_ops {
* @id_table: identifies EPF devices for probing
*/
struct pci_epf_driver {
- int (*probe)(struct pci_epf *epf);
+ int (*probe)(struct pci_epf *epf, const struct pci_epf_device_id *id);
void (*remove)(struct pci_epf *epf);
struct device_driver driver;
@@ -131,6 +135,7 @@ struct pci_epf_bar {
* @epc: the EPC device to which this EPF device is bound
* @epf_pf: the physical EPF device to which this virtual EPF device is bound
* @driver: the EPF driver to which this EPF device is bound
+ * @id: Pointer to the EPF device ID
* @list: to add pci_epf as a list of PCI endpoint functions to pci_epc
* @lock: mutex to protect pci_epf_ops
* @sec_epc: the secondary EPC device to which this EPF device is bound
@@ -158,6 +163,7 @@ struct pci_epf {
struct pci_epc *epc;
struct pci_epf *epf_pf;
struct pci_epf_driver *driver;
+ const struct pci_epf_device_id *id;
struct list_head list;
/* mutex to protect against concurrent access of pci_epf_ops */
struct mutex lock;