diff options
Diffstat (limited to 'drivers')
23 files changed, 2106 insertions, 1 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig index 95b9ccc08165..3ed6ede9d021 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig @@ -217,4 +217,6 @@ source "drivers/siox/Kconfig" source "drivers/slimbus/Kconfig" +source "drivers/interconnect/Kconfig" + endmenu diff --git a/drivers/Makefile b/drivers/Makefile index 24cd47014657..0cca95740d9b 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -185,3 +185,4 @@ obj-$(CONFIG_TEE) += tee/ obj-$(CONFIG_MULTIPLEXER) += mux/ obj-$(CONFIG_UNISYS_VISORBUS) += visorbus/ obj-$(CONFIG_SIOX) += siox/ +obj-$(CONFIG_INTERCONNECT) += interconnect/ diff --git a/drivers/char/hw_random/msm-rng.c b/drivers/char/hw_random/msm-rng.c index 841fee845ec9..48f2ef138d4a 100644 --- a/drivers/char/hw_random/msm-rng.c +++ b/drivers/char/hw_random/msm-rng.c @@ -14,6 +14,7 @@ #include <linux/clk.h> #include <linux/err.h> #include <linux/hw_random.h> +#include <linux/interconnect.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of.h> @@ -39,6 +40,7 @@ struct msm_rng { void __iomem *base; struct clk *clk; struct hwrng hwrng; + struct icc_path *path; }; #define to_msm_rng(p) container_of(p, struct msm_rng, hwrng) @@ -94,6 +96,8 @@ static int msm_rng_read(struct hwrng *hwrng, void *data, size_t max, bool wait) if (ret) return ret; + icc_set(rng->path, 0, 800); + /* read random data from hardware */ do { val = readl_relaxed(rng->base + PRNG_STATUS); @@ -112,6 +116,8 @@ static int msm_rng_read(struct hwrng *hwrng, void *data, size_t max, bool wait) break; } while (currsize < maxsize); + icc_set(rng->path, 0, 0); + clk_disable_unprepare(rng->clk); return currsize; @@ -148,6 +154,10 @@ static int msm_rng_probe(struct platform_device *pdev) if (IS_ERR(rng->clk)) return PTR_ERR(rng->clk); + rng->path = of_icc_get(&pdev->dev, "cpu"); + if (IS_ERR(rng->path)) + return PTR_ERR(rng->path); + rng->hwrng.name = KBUILD_MODNAME, rng->hwrng.init = msm_rng_init, rng->hwrng.cleanup = msm_rng_cleanup, diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c index 6d8e3a9a6fc0..01b1891ee434 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c @@ -16,6 +16,7 @@ * this program. If not, see <http://www.gnu.org/licenses/>. */ +#include <linux/interconnect.h> #include <linux/of_irq.h> #include "msm_drv.h" @@ -1001,6 +1002,19 @@ static const struct component_ops mdp5_ops = { static int mdp5_dev_probe(struct platform_device *pdev) { + struct icc_path *path0 = of_icc_get(&pdev->dev, "port0"); + struct icc_path *path1 = of_icc_get(&pdev->dev, "port1"); + struct icc_path *path_rot = of_icc_get(&pdev->dev, "rotator"); + + if (IS_ERR(path0)) + return PTR_ERR(path0); + icc_set(path0, 0, 6400000); + + if (!IS_ERR(path1)) + icc_set(path1, 0, 6400000); + if (!IS_ERR(path_rot)) + icc_set(path_rot, 0, 6400000); + DBG(""); return component_add(&pdev->dev, &mdp5_ops); } diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 1c09acfb4028..463f84c56623 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -84,6 +84,30 @@ static struct devfreq_dev_profile msm_devfreq_profile = { .get_cur_freq = msm_devfreq_get_cur_freq, }; +static int bs_init(struct msm_gpu *gpu) +{ + gpu->path = of_icc_get(&gpu->pdev->dev, "gfx"); + if (IS_ERR(gpu->path)) + return PTR_ERR(gpu->path); + + return 0; +} + +static void bs_fini(struct msm_gpu *gpu) +{ + icc_put(gpu->path); +} + +static void bs_set(struct msm_gpu *gpu, int idx) +{ + u32 peak_bw = 0; + + if (idx > 0) + peak_bw = 14432000; + + icc_set(gpu->path, 0, peak_bw); +} + static void msm_devfreq_init(struct msm_gpu *gpu) { /* We need target support to do devfreq */ @@ -191,6 +215,7 @@ static int enable_axi(struct msm_gpu *gpu) { if (gpu->ebi1_clk) clk_prepare_enable(gpu->ebi1_clk); + bs_set(gpu, 1); return 0; } @@ -198,6 +223,7 @@ static int disable_axi(struct msm_gpu *gpu) { if (gpu->ebi1_clk) clk_disable_unprepare(gpu->ebi1_clk); + bs_set(gpu, 0); return 0; } @@ -753,6 +779,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs); gpu->dev = drm; + gpu->pdev = pdev; gpu->funcs = funcs; gpu->name = name; @@ -792,6 +819,10 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, if (ret) goto fail; + ret = bs_init(gpu); + if (ret) + goto fail; + gpu->ebi1_clk = msm_clk_get(pdev, "bus"); DBG("ebi1_clk: %p", gpu->ebi1_clk); if (IS_ERR(gpu->ebi1_clk)) @@ -808,7 +839,6 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, if (IS_ERR(gpu->gpu_cx)) gpu->gpu_cx = NULL; - gpu->pdev = pdev; platform_set_drvdata(pdev, gpu); msm_devfreq_init(gpu); diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index b8241179175a..307c0711da49 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -20,6 +20,7 @@ #include <linux/clk.h> #include <linux/regulator/consumer.h> +#include <linux/interconnect.h> #include "msm_drv.h" #include "msm_fence.h" @@ -112,6 +113,7 @@ struct msm_gpu { int nr_clocks; struct clk *ebi1_clk, *core_clk, *rbbmtimer_clk; uint32_t fast_rate; + struct icc_path *path; /* Hang and Inactivity Detection: */ diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c index 904dfec7ab96..0138c2120d14 100644 --- a/drivers/i2c/busses/i2c-qup.c +++ b/drivers/i2c/busses/i2c-qup.c @@ -14,6 +14,7 @@ #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/i2c.h> +#include <linux/interconnect.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> @@ -271,6 +272,9 @@ struct qup_i2c_dev { void (*read_rx_fifo)(struct qup_i2c_dev *qup); /* function to write tags in tx fifo for i2c read transfer */ void (*write_rx_tags)(struct qup_i2c_dev *qup); + + /* The interconnect path */ + struct icc_path *path; }; static irqreturn_t qup_i2c_interrupt(int irq, void *dev) @@ -1752,6 +1756,10 @@ nodma: return qup->irq; } + qup->path = of_icc_get(qup->dev, "ddr"); + if (IS_ERR(qup->path)) + return PTR_ERR(qup->path); + if (has_acpi_companion(qup->dev)) { ret = device_property_read_u32(qup->dev, "src-clock-hz", &src_clk_freq); @@ -1911,15 +1919,19 @@ static int qup_i2c_pm_suspend_runtime(struct device *device) dev_dbg(device, "pm_runtime: suspending...\n"); qup_i2c_disable_clocks(qup); + icc_set(qup->path, 0, 0); return 0; } static int qup_i2c_pm_resume_runtime(struct device *device) { struct qup_i2c_dev *qup = dev_get_drvdata(device); + u32 freq; dev_dbg(device, "pm_runtime: resuming...\n"); qup_i2c_enable_clocks(qup); + freq = clk_get_rate(qup->clk); + icc_set(qup->path, 0, freq * 8); return 0; } #endif diff --git a/drivers/interconnect/Kconfig b/drivers/interconnect/Kconfig new file mode 100644 index 000000000000..07a8276fa35a --- /dev/null +++ b/drivers/interconnect/Kconfig @@ -0,0 +1,15 @@ +menuconfig INTERCONNECT + tristate "On-Chip Interconnect management support" + help + Support for management of the on-chip interconnects. + + This framework is designed to provide a generic interface for + managing the interconnects in a SoC. + + If unsure, say no. + +if INTERCONNECT + +source "drivers/interconnect/qcom/Kconfig" + +endif diff --git a/drivers/interconnect/Makefile b/drivers/interconnect/Makefile new file mode 100644 index 000000000000..7944cbca0527 --- /dev/null +++ b/drivers/interconnect/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_INTERCONNECT) += core.o +obj-$(CONFIG_INTERCONNECT_QCOM) += qcom/ diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c new file mode 100644 index 000000000000..5e14b7a23142 --- /dev/null +++ b/drivers/interconnect/core.c @@ -0,0 +1,644 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Interconnect framework core driver + * + * Copyright (c) 2018, Linaro Ltd. + * Author: Georgi Djakov <georgi.djakov@linaro.org> + */ + +#include <linux/debugfs.h> +#include <linux/device.h> +#include <linux/idr.h> +#include <linux/init.h> +#include <linux/interconnect.h> +#include <linux/interconnect-provider.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/slab.h> +#include <linux/of.h> +#include <linux/uaccess.h> + +static DEFINE_IDR(icc_idr); +static LIST_HEAD(icc_provider_list); +static DEFINE_MUTEX(icc_provider_list_mutex); +static DEFINE_MUTEX(icc_path_mutex); +static struct dentry *icc_debugfs_dir; + +/** + * struct icc_req - constraints that are attached to each node + * + * @req_node: entry in list of requests for the particular @node + * @node: the interconnect node to which this constraint applies + * @dev: reference to the device that sets the constraints + * @avg_bw: an integer describing the average bandwidth in kbps + * @peak_bw: an integer describing the peak bandwidth in kbps + */ +struct icc_req { + struct hlist_node req_node; + struct icc_node *node; + struct device *dev; + u32 avg_bw; + u32 peak_bw; +}; + +/** + * struct icc_path - interconnect path structure + * @num_nodes: number of hops (nodes) + * @reqs: array of the requests applicable to this path of nodes + */ +struct icc_path { + size_t num_nodes; + struct icc_req reqs[0]; +}; + +#ifdef CONFIG_DEBUG_FS + +static void icc_summary_show_one(struct seq_file *s, struct icc_node *n) +{ + if (!n) + return; + + seq_printf(s, "%-30s %12d %12d\n", + n->name, n->avg_bw, n->peak_bw); +} + +static int icc_summary_show(struct seq_file *s, void *data) +{ + struct icc_provider *provider; + + seq_puts(s, " node avg peak\n"); + seq_puts(s, "--------------------------------------------------------\n"); + + mutex_lock(&icc_provider_list_mutex); + + list_for_each_entry(provider, &icc_provider_list, provider_list) { + struct icc_node *n; + + mutex_lock(&provider->lock); + list_for_each_entry(n, &provider->nodes, node_list) { + struct icc_req *r; + icc_summary_show_one(s, n); + + hlist_for_each_entry(r, &n->req_list, req_node) { + if (!r->dev) + continue; + + seq_printf(s, " %-26s %12d %12d\n", + dev_name(r->dev), r->avg_bw, r->peak_bw); + } + } + mutex_unlock(&provider->lock); + } + + mutex_unlock(&icc_provider_list_mutex); + + return 0; +} + +static int icc_summary_open(struct inode *inode, struct file *file) +{ + return single_open(file, icc_summary_show, inode->i_private); +} + +static const struct file_operations icc_summary_fops = { + .open = icc_summary_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int __init icc_debugfs_init(void) +{ + struct dentry *file; + + icc_debugfs_dir = debugfs_create_dir("interconnect", NULL); + if (!icc_debugfs_dir) { + pr_err("interconnect: error creating debugfs directory\n"); + return -ENODEV; + } + + file = debugfs_create_file("interconnect_summary", 0444, + icc_debugfs_dir, NULL, &icc_summary_fops); + if (!file) + return -ENODEV; + + return 0; +} +late_initcall(icc_debugfs_init); +#endif + +static struct icc_node *node_find(const int id) +{ + struct icc_node *node; + + node = idr_find(&icc_idr, id); + + return node; +} + +static struct icc_path *path_allocate(struct icc_node *node, ssize_t num_nodes) +{ + struct icc_path *path; + size_t i; + + path = kzalloc(sizeof(*path) + num_nodes * sizeof(*path->reqs), + GFP_KERNEL); + if (!path) + return ERR_PTR(-ENOMEM); + + path->num_nodes = num_nodes; + + for (i = 0; i < num_nodes; i++) { + hlist_add_head(&path->reqs[i].req_node, &node->req_list); + + path->reqs[i].node = node; + /* reference to previous node was saved during path traversal */ + node = node->reverse; + } + + return path; +} + +static struct icc_path *path_find(struct icc_node *src, struct icc_node *dst) +{ + struct icc_node *tmp_node, *node = NULL; + struct icc_provider *provider; + struct list_head traverse_list; + struct list_head edge_list; + struct list_head route_list; + size_t i, number = 0; + bool found = false; + + INIT_LIST_HEAD(&traverse_list); + INIT_LIST_HEAD(&edge_list); + INIT_LIST_HEAD(&route_list); + + list_add_tail(&src->search_list, &traverse_list); + + do { + list_for_each_entry_safe(node, tmp_node, &traverse_list, search_list) { + if (node == dst) { + found = true; + list_add(&node->search_list, &route_list); + break; + } + for (i = 0; i < node->num_links; i++) { + struct icc_node *tmp = node->links[i]; + + if (!tmp) + return ERR_PTR(-ENOENT); + + if (tmp->is_traversed) + continue; + + tmp->is_traversed = true; + tmp->reverse = node; + list_add(&tmp->search_list, &edge_list); + } + } + if (found) + break; + + list_splice_init(&traverse_list, &route_list); + list_splice_init(&edge_list, &traverse_list); + + /* count the number of nodes */ + number++; + + } while (!list_empty(&traverse_list)); + + /* reset the traversed state */ + mutex_lock(&icc_provider_list_mutex); + list_for_each_entry(provider, &icc_provider_list, provider_list) { + mutex_lock(&provider->lock); + list_for_each_entry(tmp_node, &provider->nodes, node_list) + if (tmp_node->is_traversed) + tmp_node->is_traversed = false; + mutex_unlock(&provider->lock); + } + mutex_unlock(&icc_provider_list_mutex); + + if (found) + return path_allocate(dst, number); + + return ERR_PTR(-EPROBE_DEFER); +} + +static int path_init(struct device *dev, struct icc_path *path) +{ + struct icc_node *node; + size_t i; + + for (i = 0; i < path->num_nodes; i++) { + node = path->reqs[i].node; + path->reqs[i].dev = dev; + + mutex_lock(&node->provider->lock); + node->provider->users++; + mutex_unlock(&node->provider->lock); + } + + return 0; +} + +static int aggregate(struct icc_node *node, u32 avg_bw, u32 peak_bw, + u32 *agg_avg, u32 *agg_peak) +{ + *agg_avg += node->avg_bw + avg_bw; + *agg_peak = max(node->peak_bw, peak_bw); + + return 0; +} + +static void provider_aggregate(struct icc_provider *provider, u32 *avg_bw, + u32 *peak_bw) +{ + struct icc_node *n; + u32 agg_avg = 0; + u32 agg_peak = 0; + + /* aggregate for the interconnect provider */ + list_for_each_entry(n, &provider->nodes, node_list) { + if (provider->aggregate) + provider->aggregate(n, agg_avg, agg_peak, + &agg_avg, &agg_peak); + else + aggregate(n, agg_avg, agg_peak, + &agg_avg, &agg_peak); + } + + *avg_bw = agg_avg; + *peak_bw = agg_peak; +} + +static int constraints_apply(struct icc_path *path) +{ + struct icc_node *next, *prev = NULL; + int i; + + for (i = 0; i < path->num_nodes; i++, prev = next) { + struct icc_provider *provider; + u32 avg_bw = 0; + u32 peak_bw = 0; + int ret; + + next = path->reqs[i].node; + /* + * Both endpoints should be valid master-slave pairs of the + * same interconnect provider that will be configured. + */ + if (!next || !prev) + continue; + + if (next->provider != prev->provider) + continue; + + provider = next->provider; + mutex_lock(&provider->lock); + + /* aggregate requests for the provider */ + provider_aggregate(provider, &avg_bw, &peak_bw); + + if (provider->set) { + /* set the constraints */ + ret = provider->set(prev, next, avg_bw, peak_bw); + } + + mutex_unlock(&provider->lock); + + if (ret) + return ret; + } + + return 0; +} + +struct icc_path *of_icc_get(struct device *dev, const char *name) +{ + struct device_node *np = NULL; + u32 src_id, dst_id; + int index = 0; + int ret; + + if (dev->of_node) + np = dev->of_node; + + if (name) { + index = of_property_match_string(np, "interconnect-names", name); + if (index < 0) + return ERR_PTR(index); + } + + /* + * We use a combination of phandle and specifier for endpoint. For now + * lets support only global ids and extend this is the future if needed + * without breaking DT compatibility. + */ + ret = of_property_read_u32_index(np, "interconnects", index * 4 + 1, + &src_id); + if (ret) { + pr_err("%s: %s src port is invalid (%d)\n", __func__, np->name, + ret); + return ERR_PTR(ret); + } + ret = of_property_read_u32_index(np, "interconnects", index * 4 + 3, + &dst_id); + if (ret) { + pr_err("%s: %s dst port is invalid (%d)\n", __func__, np->name, + ret); + return ERR_PTR(ret); + } + + return icc_get(dev, src_id, dst_id); +} +EXPORT_SYMBOL_GPL(of_icc_get); + +/** + * icc_set() - set constraints on an interconnect path between two endpoints + * @path: reference to the path returned by icc_get() + * @avg_bw: average bandwidth in kbps + * @peak_bw: peak bandwidth in kbps + * + * This function is used by an interconnect consumer to express its own needs + * in term of bandwidth and QoS for a previously requested path between two + * endpoints. The requests are aggregated and each node is updated accordingly. + * + * Returns 0 on success, or an approproate error code otherwise. + */ +int icc_set(struct icc_path *path, u32 avg_bw, u32 peak_bw) +{ + struct icc_node *node; + struct icc_provider *p; + size_t i; + int ret; + + if (!path) + return 0; + + for (i = 0; i < path->num_nodes; i++) { + struct icc_req *r; + u32 agg_avg = 0; + u32 agg_peak = 0; + + node = path->reqs[i].node; + p = node->provider; + + mutex_lock(&icc_path_mutex); + + /* update the consumer request for this path */ + path->reqs[i].avg_bw = avg_bw; + path->reqs[i].peak_bw = peak_bw; + + /* aggregate requests for this node */ + if (p->aggregate) { + hlist_for_each_entry(r, &node->req_list, req_node) { + p->aggregate(node, r->avg_bw, r->peak_bw, + &agg_avg, &agg_peak); + } + node->avg_bw = agg_avg; + node->peak_bw = agg_peak; + } else { + hlist_for_each_entry(r, &node->req_list, req_node) { + /* sum(averages) and max(peaks) */ + agg_avg += r->avg_bw; + agg_peak = max(agg_peak, r->peak_bw); + } + node->avg_bw = agg_avg; + node->peak_bw = agg_peak; + } + + mutex_unlock(&icc_path_mutex); + } + + ret = constraints_apply(path); + if (ret) + pr_err("interconnect: error applying constraints (%d)", ret); + + return ret; +} +EXPORT_SYMBOL_GPL(icc_set); + +/** + * icc_get() - return a handle for path between two endpoints + * @dev: the device requesting the path + * @src_id: source device port id + * @dst_id: destination device port id + * + * This function will search for a path between two endpoints and return an + * icc_path handle on success. Use icc_put() to release + * constraints when the they are not needed anymore. + * + * Return: icc_path pointer on success, or ERR_PTR() on error + */ +struct icc_path *icc_get(struct device *dev, const int src_id, const int dst_id) +{ + struct icc_node *src, *dst; + struct icc_path *path = ERR_PTR(-EPROBE_DEFER); + + src = node_find(src_id); + if (!src) { + dev_err(dev, "%s: invalid src=%d\n", __func__, src_id); + goto out; + } + + dst = node_find(dst_id); + if (!dst) { + dev_err(dev, "%s: invalid dst=%d\n", __func__, dst_id); + goto out; + } + + mutex_lock(&icc_path_mutex); + path = path_find(src, dst); + mutex_unlock(&icc_path_mutex); + if (IS_ERR(path)) { + dev_err(dev, "%s: invalid path=%ld\n", __func__, PTR_ERR(path)); + goto out; + } + + path_init(dev, path); + +out: + return path; +} +EXPORT_SYMBOL_GPL(icc_get); + +/** + * icc_put() - release the reference to the icc_path + * @path: interconnect path + * + * Use this function to release the constraints on a path when the path is + * no longer needed. The constraints will be re-aggregated. + */ +void icc_put(struct icc_path *path) +{ + struct icc_node *node; + size_t i; + int ret; + + if (!path || WARN_ON_ONCE(IS_ERR(path))) + return; + + ret = icc_set(path, 0, 0); + if (ret) + pr_err("%s: error (%d)\n", __func__, ret); + + for (i = 0; i < path->num_nodes; i++) { + node = path->reqs[i].node; + hlist_del(&path->reqs[i].req_node); + + mutex_lock(&node->provider->lock); + node->provider->users--; + mutex_unlock(&node->provider->lock); + } + + kfree(path); +} +EXPORT_SYMBOL_GPL(icc_put); + +/** + * icc_node_create() - create a node + * @id: node id + * + * Return: icc_node pointer on success, or ERR_PTR() on error + */ +struct icc_node *icc_node_create(int id) +{ + struct icc_node *node; + + /* check if node already exists */ + node = node_find(id); + if (node) + return node; + + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) + return ERR_PTR(-ENOMEM); + + id = idr_alloc(&icc_idr, node, id, id + 1, GFP_KERNEL); + if (WARN(id < 0, "couldn't get idr")) + return ERR_PTR(id); + + node->id = id; + + return node; +} +EXPORT_SYMBOL_GPL(icc_node_create); + +/** + * icc_link_create() - create a link between two nodes + * @src_id: source node id + * @dst_id: destination node id + * + * Return: 0 on success, or an error code otherwise + */ +int icc_link_create(struct icc_node *node, const int dst_id) +{ + struct icc_node *dst; + struct icc_node **new; + int ret = 0; + + if (IS_ERR_OR_NULL(node)) + return PTR_ERR(node); + + mutex_lock(&node->provider->lock); + + dst = node_find(dst_id); + if (!dst) + dst = icc_node_create(dst_id); + + new = krealloc(node->links, + (node->num_links + 1) * sizeof(*node->links), + GFP_KERNEL); + if (!new) { + ret = -ENOMEM; + goto out; + } + + node->links = new; + node->links[node->num_links++] = dst; + +out: + mutex_unlock(&node->provider->lock); + + return 0; +} +EXPORT_SYMBOL_GPL(icc_link_create); + +/** + * icc_add_node() - add an interconnect node to interconnect provider + * @node: pointer to the interconnect node + * @provider: pointer to the interconnect provider + * + * Return: 0 on success, or an error code otherwise + */ +int icc_node_add(struct icc_node *node, struct icc_provider *provider) +{ + if (WARN_ON(!node)) + return -EINVAL; + + if (WARN_ON(!provider)) + return -EINVAL; + + node->provider = provider; + + mutex_lock(&provider->lock); + list_add(&node->node_list, &provider->nodes); + mutex_unlock(&provider->lock); + + return 0; +} + +/** + * icc_provider_add() - add a new interconnect provider + * @icc_provider: the interconnect provider that will be added into topology + * + * Return: 0 on success, or an error code otherwise + */ +int icc_provider_add(struct icc_provider *provider) +{ + if (WARN_ON(!provider)) + return -EINVAL; + + if (WARN_ON(!provider->set)) + return -EINVAL; + + mutex_init(&provider->lock); + INIT_LIST_HEAD(&provider->nodes); + + mutex_lock(&icc_provider_list_mutex); + list_add(&provider->provider_list, &icc_provider_list); + mutex_unlock(&icc_provider_list_mutex); + + dev_dbg(provider->dev, "interconnect provider added to topology\n"); + + return 0; +} +EXPORT_SYMBOL_GPL(icc_provider_add); + +/** + * icc_provider_del() - delete previously added interconnect provider + * @icc_provider: the interconnect provider that will be removed from topology + * + * Return: 0 on success, or an error code otherwise + */ +int icc_provider_del(struct icc_provider *provider) +{ + mutex_lock(&provider->lock); + if (provider->users) { + pr_warn("interconnect provider still has %d users\n", + provider->users); + } + mutex_unlock(&provider->lock); + + mutex_lock(&icc_provider_list_mutex); + list_del(&provider->provider_list); + mutex_unlock(&icc_provider_list_mutex); + + return 0; +} +EXPORT_SYMBOL_GPL(icc_provider_del); + +MODULE_AUTHOR("Georgi Djakov <georgi.djakov@linaro.org"); +MODULE_DESCRIPTION("Interconnect Driver Core"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/interconnect/qcom/Kconfig b/drivers/interconnect/qcom/Kconfig new file mode 100644 index 000000000000..ca03803c7433 --- /dev/null +++ b/drivers/interconnect/qcom/Kconfig @@ -0,0 +1,17 @@ +config INTERCONNECT_QCOM + bool "Qualcomm Network-on-Chip interconnect drivers" + depends on INTERCONNECT + depends on ARCH_QCOM || COMPILE_TEST + default y + +config INTERCONNECT_QCOM_MSM8916 + tristate "Qualcomm MSM8916 interconnect driver" + depends on INTERCONNECT_QCOM + help + This is a driver for the Qualcomm Network-on-Chip on msm8916-based platforms. + +config INTERCONNECT_QCOM_MSM8996 + tristate "Qualcomm MSM8996 interconnect driver" + depends on INTERCONNECT_QCOM + help + This is a driver for the Qualcomm Network-on-Chip on msm8996-based platforms. diff --git a/drivers/interconnect/qcom/Makefile b/drivers/interconnect/qcom/Makefile new file mode 100644 index 000000000000..23c8c7157161 --- /dev/null +++ b/drivers/interconnect/qcom/Makefile @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-y += smd-rpm.o + +obj-$(CONFIG_INTERCONNECT_QCOM_MSM8916) += msm8916.o +obj-$(CONFIG_INTERCONNECT_QCOM_MSM8996) += msm8996.o diff --git a/drivers/interconnect/qcom/msm8916.c b/drivers/interconnect/qcom/msm8916.c new file mode 100644 index 000000000000..d4b44eee9524 --- /dev/null +++ b/drivers/interconnect/qcom/msm8916.c @@ -0,0 +1,500 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 Linaro Ltd + * Author: Georgi Djakov <georgi.djakov@linaro.org> + */ + +#include <linux/clk.h> +#include <linux/device.h> +#include <linux/io.h> +#include <linux/interconnect-provider.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +#include "smd-rpm.h" + +#define RPM_MASTER_FIELD_BW 0x00007762 +#define RPM_BUS_MASTER_REQ 0x73616d62 +#define RPM_BUS_SLAVE_REQ 0x766c7362 + +#define to_qcom_provider(_provider) \ + container_of(_provider, struct qcom_icc_provider, provider) + +#define DEFINE_QNODE(_name, _id, _port, _buswidth, _ap_owned, \ + _mas_rpm_id, _slv_rpm_id, _qos_mode, \ + _numlinks, ...) \ + static struct qcom_icc_node _name = { \ + .id = _id, \ + .name = #_name, \ + .port = _port, \ + .buswidth = _buswidth, \ + .qos_mode = _qos_mode, \ + .ap_owned = _ap_owned, \ + .mas_rpm_id = _mas_rpm_id, \ + .slv_rpm_id = _slv_rpm_id, \ + .num_links = _numlinks, \ + .links = { __VA_ARGS__ }, \ + } + +enum qcom_qos_mode { + QCOM_QOS_MODE_BYPASS = 0, + QCOM_QOS_MODE_FIXED, + QCOM_QOS_MODE_MAX, +}; + +struct qcom_icc_provider { + struct icc_provider provider; + void __iomem *base; + struct clk *bus_clk; + struct clk *bus_a_clk; +}; + +#define MSM8916_MAX_LINKS 8 + +/** + * struct qcom_icc_node - Qualcomm specific interconnect nodes + * @name: the node name used in debugfs + * @links: an array of nodes where we can go next while traversing + * @id: a unique node identifier + * @num_links: the total number of @links + * @port: the offset index into the masters QoS register space + * @buswidth: width of the interconnect between a node and the bus + * @ap_owned: the AP CPU does the writing to QoS registers + * @rpm: reference to the RPM SMD driver + * @qos_mode: QoS mode for ap_owned resources + * @mas_rpm_id: RPM id for devices that are bus masters + * @slv_rpm_id: RPM id for devices that are bus slaves + * @rate: current bus clock rate in Hz + */ +struct qcom_icc_node { + unsigned char *name; + u16 links[MSM8916_MAX_LINKS]; + u16 id; + u16 num_links; + u16 port; + u16 buswidth; + bool ap_owned; + struct qcom_smd_rpm *rpm; + enum qcom_qos_mode qos_mode; + int mas_rpm_id; + int slv_rpm_id; + u64 rate; +}; + +struct qcom_icc_desc { + struct qcom_icc_node **nodes; + size_t num_nodes; +}; + +DEFINE_QNODE(mas_video, 63, 8, 16, 1, -1, -1, QCOM_QOS_MODE_BYPASS, 2, 10000, 10002); +DEFINE_QNODE(mas_jpeg, 62, 6, 16, 1, -1, -1, QCOM_QOS_MODE_BYPASS, 2, 10000, 10002); +DEFINE_QNODE(mas_vfe, 29, 9, 16, 1, -1, -1, QCOM_QOS_MODE_BYPASS, 2, 10001, 10002); +DEFINE_QNODE(mas_mdp, 22, 7, 16, 1, -1, -1, QCOM_QOS_MODE_BYPASS, 2, 10000, 10002); +DEFINE_QNODE(mas_qdss_bam, 53, 11, 16, 1, -1, -1, QCOM_QOS_MODE_FIXED, 1, 10009); +DEFINE_QNODE(mas_snoc_cfg, 54, 0, 16, 0, 20, -1, QCOM_QOS_MODE_BYPASS, 1, 10009); +DEFINE_QNODE(mas_qdss_etr, 60, 10, 16, 1, -1, -1, QCOM_QOS_MODE_FIXED, 1, 10009); +DEFINE_QNODE(mm_int_0, 10000, 0, 16, 1, -1, -1, QCOM_QOS_MODE_FIXED, 1, 10003); +DEFINE_QNODE(mm_int_1, 10001, 0, 16, 1, -1, -1, QCOM_QOS_MODE_FIXED, 1, 10003); +DEFINE_QNODE(mm_int_2, 10002, 0, 16, 1, -1, -1, QCOM_QOS_MODE_FIXED, 1, 10004); +DEFINE_QNODE(mm_int_bimc, 10003, 0, 16, 1, -1, -1, QCOM_QOS_MODE_FIXED, 1, 10008); +DEFINE_QNODE(snoc_int_0, 10004, 0, 8, 0, 99, 130, QCOM_QOS_MODE_FIXED, 3, 588, 519, 10027); +DEFINE_QNODE(snoc_int_1, 10005, 0, 8, 0, 100, 131, QCOM_QOS_MODE_FIXED, 3, 517, 663, 664); +DEFINE_QNODE(snoc_int_bimc, 10006, 0, 8, 0, 101, 132, QCOM_QOS_MODE_FIXED, 1, 10007); +DEFINE_QNODE(snoc_bimc_0_mas, 10007, 0, 8, 0, 3, -1, QCOM_QOS_MODE_FIXED, 1, 10025); +DEFINE_QNODE(snoc_bimc_1_mas, 10008, 0, 16, 1, -1, -1, QCOM_QOS_MODE_FIXED, 1, 10026); +DEFINE_QNODE(qdss_int, 10009, 0, 8, 1, -1, -1, QCOM_QOS_MODE_FIXED, 2, 10004, 10006); +DEFINE_QNODE(bimc_snoc_slv, 10017, 0, 8, 1, -1, -1, QCOM_QOS_MODE_FIXED, 2, 10004, 10005); +DEFINE_QNODE(snoc_pnoc_mas, 10027, 0, 8, 0, -1, -1, QCOM_QOS_MODE_FIXED, 1, 10028); +DEFINE_QNODE(pnoc_snoc_slv, 10011, 0, 8, 0, -1, 45, QCOM_QOS_MODE_FIXED, 3, 10004, 10006, 10005); +DEFINE_QNODE(slv_srvc_snoc, 587, 0, 8, 0, -1, 29, QCOM_QOS_MODE_FIXED, 0, 0); +DEFINE_QNODE(slv_qdss_stm, 588, 0, 4, 0, -1, 30, QCOM_QOS_MODE_FIXED, 0, 0); +DEFINE_QNODE(slv_imem, 519, 0, 8, 0, -1, 26, QCOM_QOS_MODE_FIXED, 0, 0); +DEFINE_QNODE(slv_apss, 517, 0, 4, 0, -1, 20, QCOM_QOS_MODE_FIXED, 0, 0); +DEFINE_QNODE(slv_cats_0, 663, 0, 16, 0, -1, 106, QCOM_QOS_MODE_FIXED, 0, 0); +DEFINE_QNODE(slv_cats_1, 664, 0, 8, 0, -1, 107, QCOM_QOS_MODE_FIXED, 0, 0); +DEFINE_QNODE(mas_apss, 1, 0, 8, 1, -1, -1, QCOM_QOS_MODE_FIXED, 3, 512, 10016, 514); +DEFINE_QNODE(mas_tcu0, 104, 5, 8, 1, -1, -1, QCOM_QOS_MODE_FIXED, 3, 512, 10016, 514); +DEFINE_QNODE(mas_tcu1, 105, 6, 8, 1, -1, -1, QCOM_QOS_MODE_FIXED, 3, 512, 10016, 514); +DEFINE_QNODE(mas_gfx, 26, 2, 8, 1, -1, -1, QCOM_QOS_MODE_FIXED, 3, 512, 10016, 514); +DEFINE_QNODE(bimc_snoc_mas, 10016, 0, 8, 1, -1, -1, QCOM_QOS_MODE_FIXED, 1, 10017); +DEFINE_QNODE(snoc_bimc_0_slv, 10025, 0, 8, 0, -1, 24, QCOM_QOS_MODE_FIXED, 1, 512); +DEFINE_QNODE(snoc_bimc_1_slv, 10026, 0, 8, 1, -1, -1, QCOM_QOS_MODE_FIXED, 1, 512); +DEFINE_QNODE(slv_ebi_ch0, 512, 0, 8, 0, -1, 0, QCOM_QOS_MODE_FIXED, 0, 0); +DEFINE_QNODE(slv_apps_l2, 514, 0, 8, 0, -1, -1, QCOM_QOS_MODE_FIXED, 0, 0); +DEFINE_QNODE(snoc_pnoc_slv, 10028, 0, 8, 0, -1, -1, QCOM_QOS_MODE_FIXED, 1, 10012); +DEFINE_QNODE(pnoc_int_0, 10012, 0, 8, 0, -1, -1, QCOM_QOS_MODE_FIXED, 8, 10010, 10018, 10019, 10020, 10021, 10022, 10023, 10024); +DEFINE_QNODE(pnoc_int_1, 10013, 0, 8, 0, -1, -1, QCOM_QOS_MODE_FIXED, 1, 10010); +DEFINE_QNODE(pnoc_m_0, 10014, 0, 8, 0, -1, -1, QCOM_QOS_MODE_FIXED, 1, 10012); +DEFINE_QNODE(pnoc_m_1, 10015, 0, 8, 0, -1, -1, QCOM_QOS_MODE_FIXED, 1, 10010); +DEFINE_QNODE(pnoc_s_0, 10018, 0, 8, 0, -1, -1, QCOM_QOS_MODE_FIXED, 5, 620, 624, 579, 622, 521); +DEFINE_QNODE(pnoc_s_1, 10019, 0, 8, 0, -1, -1, QCOM_QOS_MODE_FIXED, 5, 627, 625, 535, 577, 618); +DEFINE_QNODE(pnoc_s_2, 10020, 0, 8, 0, -1, -1, QCOM_QOS_MODE_FIXED, 5, 533, 630, 629, 641, 632); +DEFINE_QNODE(pnoc_s_3, 10021, 0, 8, 0, -1, -1, QCOM_QOS_MODE_FIXED, 5, 536, 647, 636, 635, 634); +DEFINE_QNODE(pnoc_s_4, 10022, 0, 8, 0, -1, -1, QCOM_QOS_MODE_FIXED, 3, 596, 589, 590); +DEFINE_QNODE(pnoc_s_8, 10023, 0, 8, 0, -1, -1, QCOM_QOS_MODE_FIXED, 3, 614, 606, 613); +DEFINE_QNODE(pnoc_s_9, 10024, 0, 8, 0, -1, -1, QCOM_QOS_MODE_FIXED, 3, 609, 522, 598); +DEFINE_QNODE(slv_imem_cfg, 627, 0, 4, 0, -1, -1, QCOM_QOS_MODE_FIXED, 0, 0); +DEFINE_QNODE(slv_crypto_0_cfg, 625, 0, 4, 0, -1, -1, QCOM_QOS_MODE_FIXED, 0, 0); +DEFINE_QNODE(slv_msg_ram, 535, 0, 4, 0, -1, -1, QCOM_QOS_MODE_FIXED, 0, 0); +DEFINE_QNODE(slv_pdm, 577, 0, 4, 0, -1, -1, QCOM_QOS_MODE_FIXED, 0, 0); +DEFINE_QNODE(slv_prng, 618, 0, 4, 0, -1, -1, QCOM_QOS_MODE_FIXED, 0, 0); +DEFINE_QNODE(slv_clk_ctl, 620, 0, 4, 0, -1, -1, QCOM_QOS_MODE_FIXED, 0, 0); +DEFINE_QNODE(slv_mss, 521, 0, 4, 0, -1, -1, QCOM_QOS_MODE_FIXED, 0, 0); +DEFINE_QNODE(slv_tlmm, 624, 0, 4, 0, -1, -1, QCOM_QOS_MODE_FIXED, 0, 0); +DEFINE_QNODE(slv_tcsr, 579, 0, 4, 0, -1, -1, QCOM_QOS_MODE_FIXED, 0, 0); +DEFINE_QNODE(slv_security, 622, 0, 4, 0, -1, -1, QCOM_QOS_MODE_FIXED, 0, 0); +DEFINE_QNODE(slv_spdm, 533, 0, 4, 0, -1, -1, QCOM_QOS_MODE_FIXED, 0, 0); +DEFINE_QNODE(slv_pnoc_cfg, 641, 0, 4, 0, -1, -1, QCOM_QOS_MODE_FIXED, 0, 0); +DEFINE_QNODE(slv_pmic_arb, 632, 0, 4, 0, -1, -1, QCOM_QOS_MODE_FIXED, 0, 0); +DEFINE_QNODE(slv_bimc_cfg, 629, 0, 4, 0, -1, -1, QCOM_QOS_MODE_FIXED, 0, 0); +DEFINE_QNODE(slv_boot_rom, 630, 0, 4, 0, -1, -1, QCOM_QOS_MODE_FIXED, 0, 0); +DEFINE_QNODE(slv_mpm, 536, 0, 4, 0, -1, -1, QCOM_QOS_MODE_FIXED, 0, 0); +DEFINE_QNODE(slv_qdss_cfg, 635, 0, 4, 0, -1, -1, QCOM_QOS_MODE_FIXED, 0, 0); +DEFINE_QNODE(slv_rbcpr_cfg, 636, 0, 4, 0, -1, -1, QCOM_QOS_MODE_FIXED, 0, 0); +DEFINE_QNODE(slv_snoc_cfg, 647, 0, 4, 0, -1, -1, QCOM_QOS_MODE_FIXED, 0, 0); +DEFINE_QNODE(slv_dehr_cfg, 634, 0, 4, 0, -1, -1, QCOM_QOS_MODE_FIXED, 0, 0); +DEFINE_QNODE(slv_venus_cfg, 596, 0, 4, 0, -1, -1, QCOM_QOS_MODE_FIXED, 0, 0); +DEFINE_QNODE(slv_display_cfg, 590, 0, 4, 0, -1, -1, QCOM_QOS_MODE_FIXED, 0, 0); +DEFINE_QNODE(slv_camera_cfg, 589, 0, 4, 0, -1, -1, QCOM_QOS_MODE_FIXED, 0, 0); +DEFINE_QNODE(slv_usb_hs, 614, 0, 4, 0, -1, -1, QCOM_QOS_MODE_FIXED, 0, 0); +DEFINE_QNODE(slv_sdcc_1, 606, 0, 4, 0, -1, -1, QCOM_QOS_MODE_FIXED, 0, 0); +DEFINE_QNODE(slv_blsp_1, 613, 0, 4, 0, -1, -1, QCOM_QOS_MODE_FIXED, 0, 0); +DEFINE_QNODE(slv_sdcc_2, 609, 0, 4, 0, -1, -1, QCOM_QOS_MODE_FIXED, 0, 0); +DEFINE_QNODE(slv_gfx_cfg, 598, 0, 4, 0, -1, -1, QCOM_QOS_MODE_FIXED, 0, 0); +DEFINE_QNODE(slv_audio, 522, 0, 4, 0, -1, -1, QCOM_QOS_MODE_FIXED, 0, 0); +DEFINE_QNODE(mas_blsp_1, 86, 0, 4, 0, -1, -1, QCOM_QOS_MODE_FIXED, 1, 10015); +DEFINE_QNODE(mas_spdm, 36, 0, 4, 0, -1, -1, QCOM_QOS_MODE_FIXED, 1, 10014); +DEFINE_QNODE(mas_dehr, 75, 0, 4, 0, -1, -1, QCOM_QOS_MODE_FIXED, 1, 10014); +DEFINE_QNODE(mas_audio, 15, 0, 4, 0, -1, -1, QCOM_QOS_MODE_FIXED, 1, 10014); +DEFINE_QNODE(mas_usb_hs, 87, 0, 4, 0, -1, -1, QCOM_QOS_MODE_FIXED, 1, 10015); +DEFINE_QNODE(mas_pnoc_crypto_0, 55, 0, 8, 0, -1, -1, QCOM_QOS_MODE_FIXED, 1, 10013); +DEFINE_QNODE(mas_pnoc_sdcc_1, 78, 7, 8, 0, -1, -1, QCOM_QOS_MODE_FIXED, 1, 10013); +DEFINE_QNODE(mas_pnoc_sdcc_2, 81, 8, 8, 0, -1, -1, QCOM_QOS_MODE_FIXED, 1, 10013); +DEFINE_QNODE(pnoc_snoc_mas, 10010, 0, 8, 0, 29, -1, QCOM_QOS_MODE_FIXED, 1, 10011); + +static struct qcom_icc_node *msm8916_snoc_nodes[] = { + &mas_video, + &mas_jpeg, + &mas_vfe, + &mas_mdp, + &mas_qdss_bam, + &mas_snoc_cfg, + &mas_qdss_etr, + &mm_int_0, + &mm_int_1, + &mm_int_2, + &mm_int_bimc, + &snoc_int_0, + &snoc_int_1, + &snoc_int_bimc, + &snoc_bimc_0_mas, + &snoc_bimc_1_mas, + &qdss_int, + &bimc_snoc_slv, + &snoc_pnoc_mas, + &pnoc_snoc_slv, + &slv_srvc_snoc, + &slv_qdss_stm, + &slv_imem, + &slv_apss, + &slv_cats_0, + &slv_cats_1, +}; + +static struct qcom_icc_desc msm8916_snoc = { + .nodes = msm8916_snoc_nodes, + .num_nodes = ARRAY_SIZE(msm8916_snoc_nodes), +}; + +static struct qcom_icc_node *msm8916_bimc_nodes[] = { + &mas_apss, + &mas_tcu0, + &mas_tcu1, + &mas_gfx, + &bimc_snoc_mas, + &snoc_bimc_0_slv, + &snoc_bimc_1_slv, + &slv_ebi_ch0, + &slv_apps_l2, +}; + +static struct qcom_icc_desc msm8916_bimc = { + .nodes = msm8916_bimc_nodes, + .num_nodes = ARRAY_SIZE(msm8916_bimc_nodes), +}; + +static struct qcom_icc_node *msm8916_pnoc_nodes[] = { + &snoc_pnoc_slv, + &pnoc_int_0, + &pnoc_int_1, + &pnoc_m_0, + &pnoc_m_1, + &pnoc_s_0, + &pnoc_s_1, + &pnoc_s_2, + &pnoc_s_3, + &pnoc_s_4, + &pnoc_s_8, + &pnoc_s_9, + &slv_imem_cfg, + &slv_crypto_0_cfg, + &slv_msg_ram, + &slv_pdm, + &slv_prng, + &slv_clk_ctl, + &slv_mss, + &slv_tlmm, + &slv_tcsr, + &slv_security, + &slv_spdm, + &slv_pnoc_cfg, + &slv_pmic_arb, + &slv_bimc_cfg, + &slv_boot_rom, + &slv_mpm, + &slv_qdss_cfg, + &slv_rbcpr_cfg, + &slv_snoc_cfg, + &slv_dehr_cfg, + &slv_venus_cfg, + &slv_display_cfg, + &slv_camera_cfg, + &slv_usb_hs, + &slv_sdcc_1, + &slv_blsp_1, + &slv_sdcc_2, + &slv_gfx_cfg, + &slv_audio, + &mas_blsp_1, + &mas_spdm, + &mas_dehr, + &mas_audio, + &mas_usb_hs, + &mas_pnoc_crypto_0, + &mas_pnoc_sdcc_1, + &mas_pnoc_sdcc_2, + &pnoc_snoc_mas, +}; + +static struct qcom_icc_desc msm8916_pnoc = { + .nodes = msm8916_pnoc_nodes, + .num_nodes = ARRAY_SIZE(msm8916_pnoc_nodes), +}; + +static int qcom_icc_init(struct icc_node *node) +{ + struct qcom_icc_provider *qp = to_qcom_provider(node->provider); + int ret; + + /* TODO: init qos and priority */ + + clk_set_rate(qp->bus_clk, INT_MAX); + ret = clk_prepare_enable(qp->bus_clk); + if (ret) + pr_info("%s: error enabling bus clk (%d)\n", __func__, ret); + clk_set_rate(qp->bus_a_clk, INT_MAX); + ret = clk_prepare_enable(qp->bus_a_clk); + if (ret) + pr_info("%s: error enabling bus_a clk (%d)\n", __func__, ret); + + return 0; +} + +static int qcom_icc_aggregate(struct icc_node *node, u32 avg_bw, u32 peak_bw, + u32 *agg_avg, u32 *agg_peak) +{ + /* sum(averages) and max(peaks) */ + *agg_avg = node->avg_bw + avg_bw; + *agg_peak = max(node->peak_bw, peak_bw); + + return 0; +} + +static int qcom_icc_set(struct icc_node *src, struct icc_node *dst, + u32 avg, u32 peak) +{ + struct qcom_icc_provider *qp; + struct qcom_icc_node *qn; + struct icc_node *node; + struct icc_provider *provider; + u64 avg_bw; + u64 peak_bw; + u64 rate = 0; + int ret = 0; + + if (!src) + node = dst; + else + node = src; + + qn = node->data; + provider = node->provider; + qp = to_qcom_provider(node->provider); + + /* convert from kbps to bps */ + avg_bw = avg * 1000ULL; + peak_bw = peak * 1000ULL; + + /* set bandwidth */ + if (qn->ap_owned) { + /* TODO: set QoS */ + } else { + /* send message to the RPM processor */ + if (qn->mas_rpm_id != -1) { + ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE, + RPM_BUS_MASTER_REQ, + qn->mas_rpm_id, + avg_bw); + } + + if (qn->slv_rpm_id != -1) { + ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE, + RPM_BUS_SLAVE_REQ, + qn->slv_rpm_id, + avg_bw); + } + } + + rate = max(avg_bw, peak_bw); + + do_div(rate, qn->buswidth); + + if (qn->rate != rate) { + ret = clk_set_rate(qp->bus_clk, rate); + if (ret) { + pr_err("set clk rate %lld error %d\n", rate, ret); + return ret; + } + + ret = clk_set_rate(qp->bus_a_clk, rate); + if (ret) { + pr_err("set clk rate %lld error %d\n", rate, ret); + return ret; + } + + qn->rate = rate; + } + + return ret; +} + +static int qnoc_probe(struct platform_device *pdev) +{ + const struct qcom_icc_desc *desc; + struct qcom_icc_node **qnodes; + struct qcom_icc_provider *qp; + struct resource *res; + struct icc_provider *provider; + size_t num_nodes, i; + int ret; + + /* wait for RPM */ + if (!qcom_icc_rpm_smd_available()) + return -EPROBE_DEFER; + + desc = of_device_get_match_data(&pdev->dev); + if (!desc) + return -EINVAL; + + qnodes = desc->nodes; + num_nodes = desc->num_nodes; + + qp = devm_kzalloc(&pdev->dev, sizeof(*qp), GFP_KERNEL); + if (!qp) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + qp->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(qp->base)) + return PTR_ERR(qp->base); + + qp->bus_clk = devm_clk_get(&pdev->dev, "bus_clk"); + if (IS_ERR(qp->bus_clk)) + return PTR_ERR(qp->bus_clk); + + qp->bus_a_clk = devm_clk_get(&pdev->dev, "bus_a_clk"); + if (IS_ERR(qp->bus_a_clk)) + return PTR_ERR(qp->bus_a_clk); + + provider = &qp->provider; + provider->dev = &pdev->dev; + provider->set = &qcom_icc_set; + provider->aggregate = &qcom_icc_aggregate; + INIT_LIST_HEAD(&provider->nodes); + provider->data = qp; + + ret = icc_provider_add(provider); + if (ret) { + dev_err(&pdev->dev, "error adding interconnect provider\n"); + return ret; + } + + for (i = 0; i < num_nodes; i++) { + struct icc_node *node; + int ret; + size_t j; + + node = icc_node_create(qnodes[i]->id); + if (IS_ERR(node)) { + ret = PTR_ERR(node); + goto err; + } + + node->name = qnodes[i]->name; + node->data = qnodes[i]; + icc_node_add(node, provider); + + dev_dbg(&pdev->dev, "registered node %p %s %d\n", node, + qnodes[i]->name, node->id); + + /* populate links */ + for (j = 0; j < qnodes[i]->num_links; j++) + if (qnodes[i]->links[j]) + icc_link_create(node, qnodes[i]->links[j]); + + ret = qcom_icc_init(node); + if (ret) + dev_err(&pdev->dev, "%s init error (%d)\n", node->name, + ret); + } + + platform_set_drvdata(pdev, provider); + + return ret; +err: + icc_provider_del(provider); + return ret; +} + +static int qnoc_remove(struct platform_device *pdev) +{ + struct icc_provider *provider = platform_get_drvdata(pdev); + + icc_provider_del(provider); + + return 0; +} + +static const struct of_device_id qnoc_of_match[] = { + { .compatible = "qcom,msm8916-pnoc", .data = &msm8916_pnoc }, + { .compatible = "qcom,msm8916-snoc", .data = &msm8916_snoc }, + { .compatible = "qcom,msm8916-bimc", .data = &msm8916_bimc }, + { }, +}; +MODULE_DEVICE_TABLE(of, qnoc_of_match); + +static struct platform_driver qnoc_driver = { + .probe = qnoc_probe, + .remove = qnoc_remove, + .driver = { + .name = "qnoc-msm8916", + .of_match_table = qnoc_of_match, + }, +}; +module_platform_driver(qnoc_driver); +MODULE_AUTHOR("Georgi Djakov <georgi.djakov@linaro.org>"); +MODULE_DESCRIPTION("Qualcomm msm8916 NoC driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/interconnect/qcom/msm8996.c b/drivers/interconnect/qcom/msm8996.c new file mode 100644 index 000000000000..983eaf3778fc --- /dev/null +++ b/drivers/interconnect/qcom/msm8996.c @@ -0,0 +1,652 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 Linaro Ltd + * Author: Georgi Djakov <georgi.djakov@linaro.org> + */ + +#include <linux/clk.h> +#include <linux/device.h> +#include <linux/io.h> +#include <linux/interconnect-provider.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +#include "smd-rpm.h" + +#define RPM_MASTER_FIELD_BW 0x00007762 +#define RPM_BUS_MASTER_REQ 0x73616d62 +#define RPM_BUS_SLAVE_REQ 0x766c7362 + +#define to_qcom_provider(_provider) \ + container_of(_provider, struct qcom_icc_provider, provider) + +#define DEFINE_QNODE(_name, _id, _port, _agg_ports, _buswidth, \ + _qos_mode, _ap_owned, _mas_rpm_id, _slv_rpm_id, \ + _numlinks, ...) \ + static struct qcom_icc_node _name = { \ + .id = _id, \ + .name = #_name, \ + .port = _port, \ + .agg_ports = _agg_ports, \ + .buswidth = _buswidth, \ + .qos_mode = _qos_mode, \ + .ap_owned = _ap_owned, \ + .mas_rpm_id = _mas_rpm_id, \ + .slv_rpm_id = _slv_rpm_id, \ + .num_links = _numlinks, \ + .links = { __VA_ARGS__ }, \ + } + +enum qcom_qos_mode { + QCOM_QOS_MODE_BYPASS = 0, + QCOM_QOS_MODE_FIXED, + QCOM_QOS_MODE_MAX, +}; + +struct qcom_icc_provider { + struct icc_provider provider; + void __iomem *base; + struct clk *bus_clk; + struct clk *bus_a_clk; + u32 base_offset; + u32 qos_offset; +}; + +#define MSM8996_MAX_LINKS 38 + +/** + * struct qcom_icc_node - Qualcomm specific interconnect nodes + * @name: the node name used in debugfs + * @links: an array of nodes where we can go next while traversing + * @id: a unique node identifier + * @num_links: the total number of @links + * @port: the offset index into the masters QoS register space + * @agg_ports: the number of aggregation ports on the bus + * @buswidth: width of the interconnect between a node and the bus + * @ap_owned: the AP CPU does the writing to QoS registers + * @rpm: reference to the RPM SMD driver + * @qos_mode: QoS mode for ap_owned resources + * @mas_rpm_id: RPM id for devices that are bus masters + * @slv_rpm_id: RPM id for devices that are bus slaves + * @rate: current bus clock rate in Hz + */ +struct qcom_icc_node { + unsigned char *name; + u16 links[MSM8996_MAX_LINKS]; + u16 id; + u16 num_links; + u16 port; + u16 agg_ports; /* The number of aggregation ports on the bus */ + u16 buswidth; /* width of the interconnect between a node and the bus */ + bool ap_owned; /* the AP CPU does the writing to QoS registers */ + struct qcom_smd_rpm *rpm; /* reference to the RPM driver */ + enum qcom_qos_mode qos_mode; + int mas_rpm_id; + int slv_rpm_id; + u64 rate; +}; + +struct qcom_icc_desc { + struct qcom_icc_node **nodes; + size_t num_nodes; +}; + +DEFINE_QNODE(mas_pcie_0, 45, 0, 1, 8, QCOM_QOS_MODE_FIXED, 1, 65, -1, 1, 10061); +DEFINE_QNODE(mas_pcie_1, 100, 1, 1, 8, QCOM_QOS_MODE_FIXED, 1, 66, -1, 1, 10061); +DEFINE_QNODE(mas_pcie_2, 108, 2, 1, 8, QCOM_QOS_MODE_FIXED, 1, 119, -1, 1, 10061); +DEFINE_QNODE(mas_cnoc_a1noc, 10059, 0, 1, 8, QCOM_QOS_MODE_FIXED, 1, 116, -1, 1, 10062); +DEFINE_QNODE(mas_crypto_c0, 55, 0, 1, 8, QCOM_QOS_MODE_FIXED, 1, 23, -1, 1, 10062); +DEFINE_QNODE(mas_pnoc_a1noc, 10057, 1, 1, 8, QCOM_QOS_MODE_FIXED, 0, 117, -1, 1, 10062); +DEFINE_QNODE(mas_usb3, 61, 3, 1, 8, QCOM_QOS_MODE_FIXED, 1, 32, -1, 1, 10065); +DEFINE_QNODE(mas_ipa, 90, 0, 1, 8, QCOM_QOS_MODE_FIXED, 1, 59, -1, 1, 10065); +DEFINE_QNODE(mas_ufs, 95, 2, 1, 8, QCOM_QOS_MODE_FIXED, 1, 68, -1, 1, 10065); +DEFINE_QNODE(mas_apps_proc, 1, 0, 2, 8, QCOM_QOS_MODE_FIXED, 1, 0, -1, 3, 10056, 512, 10017); +DEFINE_QNODE(mas_oxili, 26, 1, 2, 8, QCOM_QOS_MODE_BYPASS, 1, 6, -1, 4, 10056, 680, 512, 10017); +DEFINE_QNODE(mas_mnoc_bimc, 10027, 2, 2, 8, QCOM_QOS_MODE_BYPASS, 1, 2, -1, 4, 10056, 680, 512, 10017); +DEFINE_QNODE(mas_snoc_bimc, 10031, 0, 2, 8, QCOM_QOS_MODE_BYPASS, 0, 3, -1, 2, 680, 512); +DEFINE_QNODE(mas_snoc_cnoc, 10035, 0, 1, 8, QCOM_QOS_MODE_BYPASS, 0, 52, -1, 37, 620, 716, 693, 707, 628, 631, 667, 624, 536, 691, 645, 629, 681, 715, 618, 685, 690, 635, 688, 686, 650, 625, 668, 642, 638, 689, 692, 684, 640, 683, 632, 627, 687, 697, 623, 694, 682); +DEFINE_QNODE(mas_qdss_dap, 76, 0, 1, 8, QCOM_QOS_MODE_BYPASS, 1, 49, -1, 38, 683, 716, 693, 707, 628, 667, 624, 536, 691, 645, 629, 681, 715, 620, 618, 685, 690, 635, 688, 686, 650, 625, 10034, 668, 642, 638, 689, 692, 684, 640, 631, 632, 627, 687, 697, 623, 694, 682); +DEFINE_QNODE(mas_cnoc_mnoc_mmss_cfg, 102, 0, 1, 8, QCOM_QOS_MODE_BYPASS, 1, 4, -1, 21, 695, 699, 599, 709, 596, 706, 594, 701, 598, 700, 696, 589, 590, 592, 704, 698, 705, 708, 702, 703, 601); +DEFINE_QNODE(mas_cnoc_mnoc_cfg, 103, 0, 1, 8, QCOM_QOS_MODE_BYPASS, 1, 5, -1, 1, 603); +DEFINE_QNODE(mas_cpp, 106, 5, 1, 32, QCOM_QOS_MODE_BYPASS, 1, 115, -1, 1, 10028); +DEFINE_QNODE(mas_jpeg, 62, 7, 1, 32, QCOM_QOS_MODE_BYPASS, 1, 7, -1, 1, 10028); +DEFINE_QNODE(mas_mdp_p0, 22, 1, 1, 32, QCOM_QOS_MODE_BYPASS, 1, 8, -1, 1, 10028); +DEFINE_QNODE(mas_mdp_p1, 23, 2, 1, 32, QCOM_QOS_MODE_BYPASS, 1, 61, -1, 1, 10028); +DEFINE_QNODE(mas_rotator, 25, 0, 1, 32, QCOM_QOS_MODE_BYPASS, 1, 120, -1, 1, 10028); +DEFINE_QNODE(mas_venus, 63, 3, 2, 32, QCOM_QOS_MODE_BYPASS, 1, 9, -1, 1, 10028); +DEFINE_QNODE(mas_vfe, 29, 6, 1, 32, QCOM_QOS_MODE_BYPASS, 1, 11, -1, 1, 10028); +DEFINE_QNODE(mas_snoc_vmem, 40, 0, 1, 32, QCOM_QOS_MODE_BYPASS, 1, 114, -1, 1, 708); +DEFINE_QNODE(mas_venus_vmem, 68, 0, 1, 32, QCOM_QOS_MODE_BYPASS, 1, 121, -1, 1, 708); +DEFINE_QNODE(mas_snoc_pnoc, 10041, 0, 1, 8, QCOM_QOS_MODE_BYPASS, 0, 44, -1, 9, 613, 611, 614, 606, 608, 609, 575, 615, 711); +DEFINE_QNODE(mas_sdcc_1, 78, 0, 1, 8, QCOM_QOS_MODE_BYPASS, 0, 33, -1, 1, 10058); +DEFINE_QNODE(mas_sdcc_2, 81, 0, 1, 8, QCOM_QOS_MODE_BYPASS, 0, 35, -1, 1, 10058); +DEFINE_QNODE(mas_sdcc_4, 80, 0, 1, 8, QCOM_QOS_MODE_BYPASS, 0, 36, -1, 1, 10058); +DEFINE_QNODE(mas_usb_hs, 87, 0, 1, 8, QCOM_QOS_MODE_BYPASS, 0, 42, -1, 1, 10058); +DEFINE_QNODE(mas_blsp_1, 86, 0, 1, 4, QCOM_QOS_MODE_BYPASS, 0, 41, -1, 1, 10058); +DEFINE_QNODE(mas_blsp_2, 84, 0, 1, 4, QCOM_QOS_MODE_BYPASS, 0, 39, -1, 1, 10058); +DEFINE_QNODE(mas_tsif, 82, 0, 1, 4, QCOM_QOS_MODE_BYPASS, 0, 37, -1, 1, 10058); +DEFINE_QNODE(mas_hmss, 43, 4, 1, 8, QCOM_QOS_MODE_FIXED, 1, 118, -1, 3, 712, 585, 10032); +DEFINE_QNODE(mas_qdss_bam, 53, 2, 1, 16, QCOM_QOS_MODE_FIXED, 1, 19, -1, 5, 712, 583, 585, 10032, 10042); +DEFINE_QNODE(mas_snoc_cfg, 54, 0, 1, 16, QCOM_QOS_MODE_FIXED, 1, 20, -1, 1, 587); +DEFINE_QNODE(mas_bimc_snoc_0, 10016, 0, 1, 16, QCOM_QOS_MODE_FIXED, 1, 21, -1, 9, 713, 583, 712, 522, 673, 10036, 10042, 585, 588); +DEFINE_QNODE(mas_bimc_snoc_1, 10055, 0, 1, 16, QCOM_QOS_MODE_FIXED, 1, 109, -1, 3, 714, 666, 665); +DEFINE_QNODE(mas_a0noc_snoc, 10060, 0, 1, 16, QCOM_QOS_MODE_FIXED, 1, 110, -1, 5, 10042, 585, 673, 10032, 712); +DEFINE_QNODE(mas_a1noc_snoc, 10063, 0, 1, 16, QCOM_QOS_MODE_FIXED, 0, 111, -1, 13, 713, 583, 665, 712, 714, 522, 666, 673, 10032, 10036, 10042, 585, 588); +DEFINE_QNODE(mas_a2noc_snoc, 10064, 0, 1, 16, QCOM_QOS_MODE_FIXED, 0, 112, -1, 12, 713, 583, 666, 712, 714, 588, 522, 10032, 10036, 10042, 585, 665); +DEFINE_QNODE(mas_qdss_etr, 60, 3, 1, 16, QCOM_QOS_MODE_FIXED, 1, 31, -1, 5, 712, 583, 585, 10032, 10042); +DEFINE_QNODE(slv_a0noc_snoc, 10061, 0, 1, 8, QCOM_QOS_MODE_FIXED, 1, -1, 141, 1, 10060); +DEFINE_QNODE(slv_a1noc_snoc, 10062, 0, 1, 8, QCOM_QOS_MODE_FIXED, 0, -1, 142, 1, 10063); +DEFINE_QNODE(slv_a2noc_snoc, 10065, 0, 1, 8, QCOM_QOS_MODE_FIXED, 0, -1, 143, 1, 10064); +DEFINE_QNODE(slv_ebi, 512, 0, 2, 8, QCOM_QOS_MODE_FIXED, 0, -1, 0, 0, 0); +DEFINE_QNODE(slv_hmss_l3, 680, 0, 1, 8, QCOM_QOS_MODE_FIXED, 0, -1, 160, 0, 0); +DEFINE_QNODE(slv_bimc_snoc_0, 10017, 0, 1, 8, QCOM_QOS_MODE_FIXED, 1, -1, 2, 1, 10016); +DEFINE_QNODE(slv_bimc_snoc_1, 10056, 0, 1, 8, QCOM_QOS_MODE_FIXED, 1, -1, 138, 1, 10055); +DEFINE_QNODE(slv_cnoc_a1noc, 10034, 0, 1, 4, QCOM_QOS_MODE_FIXED, 1, -1, 75, 1, 10059); +DEFINE_QNODE(slv_clk_ctl, 620, 0, 1, 4, QCOM_QOS_MODE_FIXED, 0, -1, 47, 0, 0); +DEFINE_QNODE(slv_tcsr, 623, 0, 1, 4, QCOM_QOS_MODE_FIXED, 0, -1, 50, 0, 0); +DEFINE_QNODE(slv_tlmm, 624, 0, 1, 4, QCOM_QOS_MODE_FIXED, 0, -1, 51, 0, 0); +DEFINE_QNODE(slv_crypto0_cfg, 625, 0, 1, 4, QCOM_QOS_MODE_FIXED, 1, -1, 52, 0, 0); +DEFINE_QNODE(slv_mpm, 536, 0, 1, 4, QCOM_QOS_MODE_FIXED, 1, -1, 62, 0, 0); +DEFINE_QNODE(slv_pimem_cfg, 681, 0, 1, 4, QCOM_QOS_MODE_FIXED, 1, -1, 167, 0, 0); +DEFINE_QNODE(slv_imem_cfg, 627, 0, 1, 4, QCOM_QOS_MODE_FIXED, 1, -1, 54, 0, 0); +DEFINE_QNODE(slv_message_ram, 628, 0, 1, 4, QCOM_QOS_MODE_FIXED, 0, -1, 55, 0, 0); +DEFINE_QNODE(slv_bimc_cfg, 629, 0, 1, 4, QCOM_QOS_MODE_FIXED, 1, -1, 56, 0, 0); +DEFINE_QNODE(slv_pmic_arb, 632, 0, 1, 4, QCOM_QOS_MODE_FIXED, 0, -1, 59, 0, 0); +DEFINE_QNODE(slv_prng, 618, 0, 1, 4, QCOM_QOS_MODE_FIXED, 1, -1, 44, 0, 0); +DEFINE_QNODE(slv_dcc_cfg, 682, 0, 1, 4, QCOM_QOS_MODE_FIXED, 1, -1, 155, 0, 0); +DEFINE_QNODE(slv_rbcpr_mx, 715, 0, 1, 4, QCOM_QOS_MODE_FIXED, 1, -1, 170, 0, 0); +DEFINE_QNODE(slv_qdss_cfg, 635, 0, 1, 4, QCOM_QOS_MODE_FIXED, 1, -1, 63, 0, 0); +DEFINE_QNODE(slv_rbcpr_cx, 716, 0, 1, 4, QCOM_QOS_MODE_FIXED, 1, -1, 169, 0, 0); +DEFINE_QNODE(slv_cpr_apu_cfg, 683, 0, 1, 4, QCOM_QOS_MODE_FIXED, 1, -1, 168, 0, 0); +DEFINE_QNODE(slv_cnoc_mnoc_cfg, 640, 0, 1, 4, QCOM_QOS_MODE_FIXED, 1, -1, 66, 1, 103); +DEFINE_QNODE(slv_snoc_cfg, 642, 0, 1, 4, QCOM_QOS_MODE_FIXED, 1, -1, 70, 0, 0); +DEFINE_QNODE(slv_snoc_mpu_cfg, 638, 0, 1, 4, QCOM_QOS_MODE_FIXED, 1, -1, 67, 0, 0); +DEFINE_QNODE(slv_ebi1_phy_cfg, 645, 0, 1, 4, QCOM_QOS_MODE_FIXED, 1, -1, 73, 0, 0); +DEFINE_QNODE(slv_a0noc_cfg, 686, 0, 1, 4, QCOM_QOS_MODE_FIXED, 1, -1, 144, 0, 0); +DEFINE_QNODE(slv_pcie_1_cfg, 668, 0, 1, 4, QCOM_QOS_MODE_FIXED, 1, -1, 89, 0, 0); +DEFINE_QNODE(slv_pcie_2_cfg, 684, 0, 1, 4, QCOM_QOS_MODE_FIXED, 1, -1, 165, 0, 0); +DEFINE_QNODE(slv_pcie_0_cfg, 667, 0, 1, 4, QCOM_QOS_MODE_FIXED, 1, -1, 88, 0, 0); +DEFINE_QNODE(slv_pcie20_ahb2phy, 685, 0, 1, 4, QCOM_QOS_MODE_FIXED, 1, -1, 163, 0, 0); +DEFINE_QNODE(slv_a0noc_mpu_cfg, 707, 0, 1, 4, QCOM_QOS_MODE_FIXED, 1, -1, 145, 0, 0); +DEFINE_QNODE(slv_ufs_cfg, 650, 0, 1, 4, QCOM_QOS_MODE_FIXED, 1, -1, 92, 0, 0); +DEFINE_QNODE(slv_a1noc_cfg, 687, 0, 1, 4, QCOM_QOS_MODE_FIXED, 1, -1, 147, 0, 0); +DEFINE_QNODE(slv_a1noc_mpu_cfg, 689, 0, 1, 4, QCOM_QOS_MODE_FIXED, 1, -1, 148, 0, 0); +DEFINE_QNODE(slv_a2noc_cfg, 688, 0, 1, 4, QCOM_QOS_MODE_FIXED, 1, -1, 150, 0, 0); +DEFINE_QNODE(slv_a2noc_mpu_cfg, 690, 0, 1, 4, QCOM_QOS_MODE_FIXED, 1, -1, 151, 0, 0); +DEFINE_QNODE(slv_ssc_cfg, 697, 0, 1, 4, QCOM_QOS_MODE_FIXED, 1, -1, 177, 0, 0); +DEFINE_QNODE(slv_a0noc_smmu_cfg, 691, 0, 1, 8, QCOM_QOS_MODE_FIXED, 1, -1, 146, 0, 0); +DEFINE_QNODE(slv_a1noc_smmu_cfg, 692, 0, 1, 8, QCOM_QOS_MODE_FIXED, 1, -1, 149, 0, 0); +DEFINE_QNODE(slv_a2noc_smmu_cfg, 693, 0, 1, 8, QCOM_QOS_MODE_FIXED, 1, -1, 152, 0, 0); +DEFINE_QNODE(slv_lpass_smmu_cfg, 694, 0, 1, 8, QCOM_QOS_MODE_FIXED, 1, -1, 161, 0, 0); +DEFINE_QNODE(slv_cnoc_mnoc_mmss_cfg, 631, 0, 1, 8, QCOM_QOS_MODE_FIXED, 1, -1, 58, 1, 102); +DEFINE_QNODE(slv_mmagic_cfg, 695, 0, 1, 8, QCOM_QOS_MODE_FIXED, 1, -1, 162, 0, 0); +DEFINE_QNODE(slv_cpr_cfg, 592, 0, 1, 8, QCOM_QOS_MODE_FIXED, 1, -1, 6, 0, 0); +DEFINE_QNODE(slv_misc_cfg, 594, 0, 1, 8, QCOM_QOS_MODE_FIXED, 1, -1, 8, 0, 0); +DEFINE_QNODE(slv_venus_throttle_cfg, 696, 0, 1, 8, QCOM_QOS_MODE_FIXED, 1, -1, 178, 0, 0); +DEFINE_QNODE(slv_venus_cfg, 596, 0, 1, 8, QCOM_QOS_MODE_FIXED, 1, -1, 10, 0, 0); +DEFINE_QNODE(slv_vmem_cfg, 708, 0, 1, 8, QCOM_QOS_MODE_FIXED, 1, -1, 180, 0, 0); +DEFINE_QNODE(slv_dsa_cfg, 698, 0, 1, 8, QCOM_QOS_MODE_FIXED, 1, -1, 157, 0, 0); +DEFINE_QNODE(slv_mnoc_clocks_cfg, 599, 0, 1, 8, QCOM_QOS_MODE_FIXED, 1, -1, 12, 0, 0); +DEFINE_QNODE(slv_dsa_mpu_cfg, 699, 0, 1, 8, QCOM_QOS_MODE_FIXED, 1, -1, 158, 0, 0); +DEFINE_QNODE(slv_mnoc_mpu_cfg, 601, 0, 1, 8, QCOM_QOS_MODE_FIXED, 1, -1, 14, 0, 0); +DEFINE_QNODE(slv_display_cfg, 590, 0, 1, 8, QCOM_QOS_MODE_FIXED, 1, -1, 4, 0, 0); +DEFINE_QNODE(slv_display_throttle_cfg, 700, 0, 1, 8, QCOM_QOS_MODE_FIXED, 1, -1, 156, 0, 0); +DEFINE_QNODE(slv_camera_cfg, 589, 0, 1, 8, QCOM_QOS_MODE_FIXED, 1, -1, 3, 0, 0); +DEFINE_QNODE(slv_camera_throttle_cfg, 709, 0, 1, 8, QCOM_QOS_MODE_FIXED, 1, -1, 154, 0, 0); +DEFINE_QNODE(slv_oxili_cfg, 598, 0, 1, 8, QCOM_QOS_MODE_FIXED, 1, -1, 11, 0, 0); +DEFINE_QNODE(slv_smmu_mdp_cfg, 703, 0, 1, 8, QCOM_QOS_MODE_FIXED, 1, -1, 173, 0, 0); +DEFINE_QNODE(slv_smmu_rot_cfg, 704, 0, 1, 8, QCOM_QOS_MODE_FIXED, 1, -1, 174, 0, 0); +DEFINE_QNODE(slv_smmu_venus_cfg, 705, 0, 1, 8, QCOM_QOS_MODE_FIXED, 1, -1, 175, 0, 0); +DEFINE_QNODE(slv_smmu_cpp_cfg, 701, 0, 1, 8, QCOM_QOS_MODE_FIXED, 1, -1, 171, 0, 0); +DEFINE_QNODE(slv_smmu_jpeg_cfg, 702, 0, 1, 8, QCOM_QOS_MODE_FIXED, 1, -1, 172, 0, 0); +DEFINE_QNODE(slv_smmu_vfe_cfg, 706, 0, 1, 8, QCOM_QOS_MODE_FIXED, 1, -1, 176, 0, 0); +DEFINE_QNODE(slv_mnoc_bimc, 10028, 0, 2, 32, QCOM_QOS_MODE_FIXED, 1, -1, 16, 1, 10027); +DEFINE_QNODE(slv_vmem, 710, 0, 1, 32, QCOM_QOS_MODE_FIXED, 1, -1, 179, 0, 0); +DEFINE_QNODE(slv_srvc_mnoc, 603, 0, 1, 8, QCOM_QOS_MODE_FIXED, 1, -1, 17, 0, 0); +DEFINE_QNODE(slv_pnoc_a1noc, 10058, 0, 1, 8, QCOM_QOS_MODE_FIXED, 0, -1, 139, 1, 10057); +DEFINE_QNODE(slv_usb_hs, 614, 0, 1, 4, QCOM_QOS_MODE_FIXED, 0, -1, 40, 0, 0); +DEFINE_QNODE(slv_sdcc_2, 608, 0, 1, 4, QCOM_QOS_MODE_FIXED, 0, -1, 33, 0, 0); +DEFINE_QNODE(slv_sdcc_4, 609, 0, 1, 4, QCOM_QOS_MODE_FIXED, 0, -1, 34, 0, 0); +DEFINE_QNODE(slv_tsif, 575, 0, 1, 4, QCOM_QOS_MODE_FIXED, 0, -1, 35, 0, 0); +DEFINE_QNODE(slv_blsp_2, 611, 0, 1, 4, QCOM_QOS_MODE_FIXED, 0, -1, 37, 0, 0); +DEFINE_QNODE(slv_sdcc_1, 606, 0, 1, 4, QCOM_QOS_MODE_FIXED, 0, -1, 31, 0, 0); +DEFINE_QNODE(slv_blsp_1, 613, 0, 1, 4, QCOM_QOS_MODE_FIXED, 0, -1, 39, 0, 0); +DEFINE_QNODE(slv_pdm, 615, 0, 1, 4, QCOM_QOS_MODE_FIXED, 0, -1, 41, 0, 0); +DEFINE_QNODE(slv_ahb2phy, 711, 0, 1, 4, QCOM_QOS_MODE_FIXED, 1, -1, 153, 0, 0); +DEFINE_QNODE(slv_hmss, 673, 0, 1, 16, QCOM_QOS_MODE_FIXED, 1, -1, 20, 0, 0); +DEFINE_QNODE(slv_lpass, 522, 0, 1, 16, QCOM_QOS_MODE_FIXED, 1, -1, 21, 0, 0); +DEFINE_QNODE(slv_usb3, 583, 0, 1, 16, QCOM_QOS_MODE_FIXED, 1, -1, 22, 0, 0); +DEFINE_QNODE(slv_snoc_bimc, 10032, 0, 2, 32, QCOM_QOS_MODE_FIXED, 0, -1, 24, 1, 10031); +DEFINE_QNODE(slv_snoc_cnoc, 10036, 0, 1, 16, QCOM_QOS_MODE_FIXED, 0, -1, 25, 1, 10035); +DEFINE_QNODE(slv_imem, 585, 0, 1, 16, QCOM_QOS_MODE_FIXED, 0, -1, 26, 0, 0); +DEFINE_QNODE(slv_pimem, 712, 0, 1, 16, QCOM_QOS_MODE_FIXED, 0, -1, 166, 0, 0); +DEFINE_QNODE(slv_snoc_vmem, 713, 0, 1, 16, QCOM_QOS_MODE_FIXED, 1, -1, 140, 1, 40); +DEFINE_QNODE(slv_snoc_pnoc, 10042, 0, 1, 16, QCOM_QOS_MODE_FIXED, 0, -1, 28, 1, 10041); +DEFINE_QNODE(slv_qdss_stm, 588, 0, 1, 16, QCOM_QOS_MODE_FIXED, 0, -1, 30, 0, 0); +DEFINE_QNODE(slv_pcie_0, 665, 0, 1, 16, QCOM_QOS_MODE_FIXED, 1, -1, 84, 0, 0); +DEFINE_QNODE(slv_pcie_1, 666, 0, 1, 16, QCOM_QOS_MODE_FIXED, 1, -1, 85, 0, 0); +DEFINE_QNODE(slv_pcie_2, 714, 0, 1, 16, QCOM_QOS_MODE_FIXED, 1, -1, 164, 0, 0); +DEFINE_QNODE(slv_srvc_snoc, 587, 0, 1, 16, QCOM_QOS_MODE_FIXED, 1, -1, 29, 0, 0); + +static struct qcom_icc_node *msm8996_snoc_nodes[] = { + &mas_hmss, + &mas_qdss_bam, + &mas_snoc_cfg, + &mas_bimc_snoc_0, + &mas_bimc_snoc_1, + &mas_a0noc_snoc, + &mas_a1noc_snoc, + &mas_a2noc_snoc, + &mas_qdss_etr, + &slv_a0noc_snoc, + &slv_a1noc_snoc, + &slv_a2noc_snoc, + &slv_hmss, + &slv_lpass, + &slv_usb3, + &slv_snoc_bimc, + &slv_snoc_cnoc, + &slv_imem, + &slv_pimem, + &slv_snoc_vmem, + &slv_snoc_pnoc, + &slv_qdss_stm, + &slv_pcie_0, + &slv_pcie_1, + &slv_pcie_2, + &slv_srvc_snoc, +}; + +static struct qcom_icc_desc msm8996_snoc = { + .nodes = msm8996_snoc_nodes, + .num_nodes = ARRAY_SIZE(msm8996_snoc_nodes), +}; + +static struct qcom_icc_node *msm8996_bimc_nodes[] = { + &mas_apps_proc, + &mas_oxili, + &mas_mnoc_bimc, + &mas_snoc_bimc, + &slv_ebi, + &slv_hmss_l3, + &slv_bimc_snoc_0, + &slv_bimc_snoc_1, +}; + +static struct qcom_icc_desc msm8996_bimc = { + .nodes = msm8996_bimc_nodes, + .num_nodes = ARRAY_SIZE(msm8996_bimc_nodes), +}; + +static struct qcom_icc_node *msm8996_pnoc_nodes[] = { + &mas_snoc_pnoc, + &mas_sdcc_1, + &mas_sdcc_2, + &mas_sdcc_4, + &mas_usb_hs, + &mas_blsp_1, + &mas_blsp_2, + &mas_tsif, + &slv_pnoc_a1noc, + &slv_usb_hs, + &slv_sdcc_2, + &slv_sdcc_4, + &slv_tsif, + &slv_blsp_2, + &slv_sdcc_1, + &slv_blsp_1, + &slv_pdm, + &slv_ahb2phy, +}; + +static struct qcom_icc_desc msm8996_pnoc = { + .nodes = msm8996_pnoc_nodes, + .num_nodes = ARRAY_SIZE(msm8996_pnoc_nodes), +}; + +static struct qcom_icc_node *msm8996_cnoc_nodes[] = { + &mas_snoc_cnoc, + &mas_qdss_dap, + &slv_cnoc_a1noc, + &slv_clk_ctl, + &slv_tcsr, + &slv_tlmm, + &slv_crypto0_cfg, + &slv_mpm, + &slv_pimem_cfg, + &slv_imem_cfg, + &slv_message_ram, + &slv_bimc_cfg, + &slv_pmic_arb, + &slv_prng, + &slv_dcc_cfg, + &slv_rbcpr_mx, + &slv_qdss_cfg, + &slv_rbcpr_cx, + &slv_cpr_apu_cfg, + &slv_cnoc_mnoc_cfg, + &slv_snoc_cfg, + &slv_snoc_mpu_cfg, + &slv_ebi1_phy_cfg, + &slv_a0noc_cfg, + &slv_pcie_1_cfg, + &slv_pcie_2_cfg, + &slv_pcie_0_cfg, + &slv_pcie20_ahb2phy, + &slv_a0noc_mpu_cfg, + &slv_ufs_cfg, + &slv_a1noc_cfg, + &slv_a1noc_mpu_cfg, + &slv_a2noc_cfg, + &slv_a2noc_mpu_cfg, + &slv_ssc_cfg, + &slv_a0noc_smmu_cfg, + &slv_a1noc_smmu_cfg, + &slv_a2noc_smmu_cfg, + &slv_lpass_smmu_cfg, + &slv_cnoc_mnoc_mmss_cfg, +}; + +static struct qcom_icc_desc msm8996_cnoc = { + .nodes = msm8996_cnoc_nodes, + .num_nodes = ARRAY_SIZE(msm8996_cnoc_nodes), +}; + +static struct qcom_icc_node *msm8996_mnoc_nodes[] = { + &mas_cnoc_mnoc_mmss_cfg, + &mas_cnoc_mnoc_cfg, + &mas_cpp, + &mas_jpeg, + &mas_mdp_p0, + &mas_mdp_p1, + &mas_rotator, + &mas_venus, + &mas_vfe, + &mas_snoc_vmem, + &mas_venus_vmem, + &slv_mmagic_cfg, + &slv_cpr_cfg, + &slv_misc_cfg, + &slv_venus_throttle_cfg, + &slv_venus_cfg, + &slv_vmem_cfg, + &slv_dsa_cfg, + &slv_mnoc_clocks_cfg, + &slv_dsa_mpu_cfg, + &slv_mnoc_mpu_cfg, + &slv_display_cfg, + &slv_display_throttle_cfg, + &slv_camera_cfg, + &slv_camera_throttle_cfg, + &slv_oxili_cfg, + &slv_smmu_mdp_cfg, + &slv_smmu_rot_cfg, + &slv_smmu_venus_cfg, + &slv_smmu_cpp_cfg, + &slv_smmu_jpeg_cfg, + &slv_smmu_vfe_cfg, + &slv_mnoc_bimc, + &slv_vmem, + &slv_srvc_mnoc, +}; + +static struct qcom_icc_desc msm8996_mnoc = { + .nodes = msm8996_mnoc_nodes, + .num_nodes = ARRAY_SIZE(msm8996_mnoc_nodes), +}; + +static struct qcom_icc_node *msm8996_a0noc_nodes[] = { + &mas_pcie_0, + &mas_pcie_1, + &mas_pcie_2, +}; + +static struct qcom_icc_desc msm8996_a0noc = { + .nodes = msm8996_a0noc_nodes, + .num_nodes = ARRAY_SIZE(msm8996_a0noc_nodes), +}; + +static struct qcom_icc_node *msm8996_a1noc_nodes[] = { + &mas_cnoc_a1noc, + &mas_crypto_c0, + &mas_pnoc_a1noc, +}; + +static struct qcom_icc_desc msm8996_a1noc = { + .nodes = msm8996_a1noc_nodes, + .num_nodes = ARRAY_SIZE(msm8996_a1noc_nodes), +}; + +static struct qcom_icc_node *msm8996_a2noc_nodes[] = { + &mas_usb3, + &mas_ipa, + &mas_ufs, +}; + +static struct qcom_icc_desc msm8996_a2noc = { + .nodes = msm8996_a2noc_nodes, + .num_nodes = ARRAY_SIZE(msm8996_a2noc_nodes), +}; + + +static int qcom_icc_init(struct icc_node *node) +{ + struct qcom_icc_provider *qp = to_qcom_provider(node->provider); + int ret; + + /* TODO: init qos and priority */ + + clk_set_rate(qp->bus_clk, INT_MAX); + ret = clk_prepare_enable(qp->bus_clk); + if (ret) + pr_info("%s: error enabling bus clk (%d)\n", __func__, ret); + clk_set_rate(qp->bus_a_clk, INT_MAX); + ret = clk_prepare_enable(qp->bus_a_clk); + if (ret) + pr_info("%s: error enabling bus_a clk (%d)\n", __func__, ret); + + return 0; +} + +static int qcom_icc_aggregate(struct icc_node *node, u32 avg_bw, u32 peak_bw, + u32 *agg_avg, u32 *agg_peak) +{ + /* sum(averages) and max(peaks) */ + *agg_avg = node->avg_bw + avg_bw; + *agg_peak = max(node->peak_bw, peak_bw); + + return 0; +} + +static int qcom_icc_set(struct icc_node *src, struct icc_node *dst, + u32 avg, u32 peak) +{ + struct qcom_icc_provider *qp; + struct qcom_icc_node *qn; + struct icc_node *node; + struct icc_provider *provider; + u64 avg_bw = 0; + u64 peak_bw = 0; + u64 rate = 0; + int ret = 0; + + if (!src) + node = dst; + else + node = src; + + qn = node->data; + provider = node->provider; + qp = to_qcom_provider(node->provider); + + /* convert from kbps to bps */ + avg_bw = avg * 1000ULL; + peak_bw = peak * 1000ULL; + + /* set bandwidth */ + if (qn->ap_owned) { + /* TODO: set QoS */ + } else { + /* send message to the RPM processor */ + + if (qn->mas_rpm_id != -1) { + ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE, + RPM_BUS_MASTER_REQ, + qn->mas_rpm_id, + avg_bw); + } + + if (qn->slv_rpm_id != -1) { + ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE, + RPM_BUS_SLAVE_REQ, + qn->slv_rpm_id, + avg_bw); + } + } + + rate = max(avg_bw, peak_bw); + + do_div(rate, qn->buswidth); + + if (qn->rate != rate) { + ret = clk_set_rate(qp->bus_clk, rate); + if (ret) { + pr_err("set clk rate %lld error %d\n", rate, ret); + return ret; + } + + ret = clk_set_rate(qp->bus_a_clk, rate); + if (ret) { + pr_err("set clk rate %lld error %d\n", rate, ret); + return ret; + } + + qn->rate = rate; + } + + return ret; +} + +static int qnoc_probe(struct platform_device *pdev) +{ + const struct qcom_icc_desc *desc; + struct qcom_icc_node **qnodes; + struct qcom_icc_provider *qp; + struct resource *res; + struct icc_provider *provider; + size_t num_nodes, i; + int ret; + + /* wait for RPM */ + if (!qcom_icc_rpm_smd_available()) + return -EPROBE_DEFER; + + desc = of_device_get_match_data(&pdev->dev); + if (!desc) + return -EINVAL; + + qnodes = desc->nodes; + num_nodes = desc->num_nodes; + + qp = devm_kzalloc(&pdev->dev, sizeof(*qp), GFP_KERNEL); + if (!qp) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + qp->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(qp->base)) + return PTR_ERR(qp->base); + + qp->bus_clk = devm_clk_get(&pdev->dev, "bus_clk"); + if (IS_ERR(qp->bus_clk)) + return PTR_ERR(qp->bus_clk); + + qp->bus_a_clk = devm_clk_get(&pdev->dev, "bus_a_clk"); + if (IS_ERR(qp->bus_a_clk)) + return PTR_ERR(qp->bus_a_clk); + + provider = &qp->provider; + provider->dev = &pdev->dev; + provider->set = &qcom_icc_set; + provider->aggregate = &qcom_icc_aggregate; + INIT_LIST_HEAD(&provider->nodes); + provider->data = qp; + + ret = icc_provider_add(provider); + if (ret) { + dev_err(&pdev->dev, "error adding interconnect provider\n"); + return ret; + } + + for (i = 0; i < num_nodes; i++) { + struct icc_node *node; + int ret; + size_t j; + + node = icc_node_create(qnodes[i]->id); + if (IS_ERR(node)) { + ret = PTR_ERR(node); + goto err; + } + node->name = qnodes[i]->name; + node->data = qnodes[i]; + icc_node_add(node, provider); + + dev_dbg(&pdev->dev, "registered node %p %s %d\n", node, + qnodes[i]->name, node->id); + /* populate links */ + for (j = 0; j < qnodes[i]->num_links; j++) + if (qnodes[i]->links[j]) + icc_link_create(node, qnodes[i]->links[j]); + + ret = qcom_icc_init(node); + if (ret) + dev_err(&pdev->dev, "%s init error (%d)\n", node->name, + ret); + } + + platform_set_drvdata(pdev, provider); + + return ret; +err: + icc_provider_del(provider); + return ret; +} + +static int qnoc_remove(struct platform_device *pdev) +{ + struct icc_provider *provider = platform_get_drvdata(pdev); + + icc_provider_del(provider); + + return 0; +} + +static const struct of_device_id qnoc_of_match[] = { + { .compatible = "qcom,msm8996-bimc", .data = &msm8996_bimc }, + { .compatible = "qcom,msm8996-cnoc", .data = &msm8996_cnoc }, + { .compatible = "qcom,msm8996-snoc", .data = &msm8996_snoc }, + { .compatible = "qcom,msm8996-a0noc", .data = &msm8996_a0noc }, + { .compatible = "qcom,msm8996-a1noc", .data = &msm8996_a1noc }, + { .compatible = "qcom,msm8996-a2noc", .data = &msm8996_a2noc }, + { .compatible = "qcom,msm8996-mmnoc", .data = &msm8996_mnoc }, + { .compatible = "qcom,msm8996-pnoc", .data = &msm8996_pnoc }, + { }, +}; +MODULE_DEVICE_TABLE(of, qnoc_of_match); + +static struct platform_driver qnoc_driver = { + .probe = qnoc_probe, + .remove = qnoc_remove, + .driver = { + .name = "qnoc-msm8996", + .of_match_table = qnoc_of_match, + }, +}; +module_platform_driver(qnoc_driver); +MODULE_AUTHOR("Georgi Djakov <georgi.djakov@linaro.org>"); +MODULE_DESCRIPTION("Qualcomm msm8996 NoC driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/interconnect/qcom/smd-rpm.c b/drivers/interconnect/qcom/smd-rpm.c new file mode 100644 index 000000000000..0cf772f51642 --- /dev/null +++ b/drivers/interconnect/qcom/smd-rpm.c @@ -0,0 +1,90 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * RPM over SMD communication wrapper for interconects + * + * Copyright (C) 2018 Linaro Ltd + * Author: Georgi Djakov <georgi.djakov@linaro.org> + */ + +#include <linux/interconnect-provider.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/soc/qcom/smd-rpm.h> +#include "smd-rpm.h" + +#define RPM_KEY_BW 0x00007762 + +static struct qcom_icc_rpm { + struct qcom_smd_rpm *rpm; +} icc_rpm_smd; + +struct icc_rpm_smd_req { + __le32 key; + __le32 nbytes; + __le32 value; +}; + +bool qcom_icc_rpm_smd_available(void) +{ + if (!icc_rpm_smd.rpm) + return false; + + return true; +} + +int qcom_icc_rpm_smd_send(int ctx, int rsc_type, int id, u32 val) +{ + struct icc_rpm_smd_req req = { + .key = cpu_to_le32(RPM_KEY_BW), + .nbytes = cpu_to_le32(sizeof(u32)), + .value = cpu_to_le32(val), + }; + + return qcom_rpm_smd_write(icc_rpm_smd.rpm, ctx, rsc_type, id, &req, + sizeof(req)); +} +EXPORT_SYMBOL(qcom_icc_rpm_smd_send); + +static int qcom_icc_rpm_smd_probe(struct platform_device *pdev) +{ + icc_rpm_smd.rpm = dev_get_drvdata(pdev->dev.parent); + if (!icc_rpm_smd.rpm) { + dev_err(&pdev->dev, "unable to retrieve handle to RPM\n"); + return -ENODEV; + } + + return 0; +} + +static const struct of_device_id qcom_icc_rpm_smd_dt_match[] = { + { .compatible = "qcom,interconnect-smd-rpm", }, + { }, +}; + +MODULE_DEVICE_TABLE(of, qcom_interconnect_rpm_smd_dt_match); + +static struct platform_driver qcom_interconnect_rpm_smd_driver = { + .driver = { + .name = "qcom-interconnect-smd-rpm", + .of_match_table = qcom_icc_rpm_smd_dt_match, + }, + .probe = qcom_icc_rpm_smd_probe, +}; + +static int __init rpm_smd_interconnect_init(void) +{ + return platform_driver_register(&qcom_interconnect_rpm_smd_driver); +} +subsys_initcall(rpm_smd_interconnect_init); + +static void __exit rpm_smd_interconnect_exit(void) +{ + platform_driver_unregister(&qcom_interconnect_rpm_smd_driver); +} +module_exit(rpm_smd_interconnect_exit) + +MODULE_AUTHOR("Georgi Djakov <georgi.djakov@linaro.org>"); +MODULE_DESCRIPTION("Qualcomm SMD RPM interconnect driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/interconnect/qcom/smd-rpm.h b/drivers/interconnect/qcom/smd-rpm.h new file mode 100644 index 000000000000..0f4a3da31cf6 --- /dev/null +++ b/drivers/interconnect/qcom/smd-rpm.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018, Linaro Ltd. + * Author: Georgi Djakov <georgi.djakov@linaro.org> + */ + +#ifndef __LINUX_INTERCONNECT_QCOM_RPM_H +#define __LINUX_INTERCONNECT_QCOM_RPM_H + +#include <linux/soc/qcom/smd-rpm.h> + +bool qcom_icc_rpm_smd_available(void); +int qcom_icc_rpm_smd_send(int ctx, int rsc_type, int id, u32 val); + +#endif diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c index 41eef376eb2d..3e5e60a8ac62 100644 --- a/drivers/media/platform/qcom/venus/core.c +++ b/drivers/media/platform/qcom/venus/core.c @@ -14,6 +14,7 @@ */ #include <linux/clk.h> #include <linux/init.h> +#include <linux/interconnect.h> #include <linux/ioctl.h> #include <linux/list.h> #include <linux/module.h> @@ -171,6 +172,18 @@ static int venus_probe(struct platform_device *pdev) if (IS_ERR(core->base)) return PTR_ERR(core->base); + core->path = of_icc_get(dev, "video"); + if (IS_ERR(core->path)) + return PTR_ERR(core->path); + + core->path_mdp0 = of_icc_get(dev, "mdp0"); + if (IS_ERR(core->path_mdp0)) + return PTR_ERR(core->path_mdp0); + + core->path_gpu = of_icc_get(dev, "gpu"); + if (IS_ERR(core->path_gpu)) + return PTR_ERR(core->path_gpu); + core->irq = platform_get_irq(pdev, 0); if (core->irq < 0) return core->irq; @@ -275,6 +288,10 @@ static __maybe_unused int venus_runtime_suspend(struct device *dev) struct venus_core *core = dev_get_drvdata(dev); int ret; + icc_set(core->path, 0, 0); + icc_set(core->path_mdp0, 0, 1000); + icc_set(core->path_mdp1, 0, 1000); + ret = hfi_core_suspend(core); venus_clks_disable(core); @@ -287,6 +304,11 @@ static __maybe_unused int venus_runtime_resume(struct device *dev) struct venus_core *core = dev_get_drvdata(dev); int ret; + icc_set(core->path, 677600, 1331000); + icc_set(core->path_mdp0, 0, 6400000); + icc_set(core->path_mdp1, 0, 6400000); + icc_set(core->path_gpu, 1066000, 4264000); + ret = venus_clks_enable(core); if (ret) return ret; diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h index 0360d295f4c8..d97f1fb1fd75 100644 --- a/drivers/media/platform/qcom/venus/core.h +++ b/drivers/media/platform/qcom/venus/core.h @@ -120,6 +120,10 @@ struct venus_core { void *priv; const struct hfi_ops *ops; struct delayed_work work; + struct icc_path *path; + struct icc_path *path_mdp0; + struct icc_path *path_mdp1; + struct icc_path *path_gpu; }; struct vdec_controls { diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c index 49bbd1861d3a..4867edba8b6a 100644 --- a/drivers/media/platform/qcom/venus/vdec.c +++ b/drivers/media/platform/qcom/venus/vdec.c @@ -767,6 +767,8 @@ static int vdec_start_streaming(struct vb2_queue *q, unsigned int count) if (ret) goto deinit_sess; + /* TODO: determine parameters and set interconnect bandwidth */ + ret = venus_helper_vb2_start_streaming(inst); if (ret) goto deinit_sess; diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index c283291db705..b2d875afae5f 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -20,6 +20,7 @@ #include <linux/mmc/mmc.h> #include <linux/pm_runtime.h> #include <linux/slab.h> +#include <linux/interconnect.h> #include <linux/iopoll.h> #include "sdhci-pltfm.h" @@ -148,6 +149,7 @@ struct sdhci_msm_host { u32 curr_io_level; wait_queue_head_t pwr_irq_wait; bool pwr_irq_flag; + struct icc_path *path; }; static unsigned int msm_get_clock_rate_for_bus_mode(struct sdhci_host *host, @@ -1313,6 +1315,28 @@ static void sdhci_msm_writeb(struct sdhci_host *host, u8 val, int reg) sdhci_msm_check_power_status(host, req_type); } +static int sdhci_msm_set_icc(struct sdhci_msm_host *msm_host, unsigned int rate) +{ + + if (IS_ERR(msm_host->path)) { + WARN_ON(1); + return 0; + } + + if (rate == INT_MAX) + icc_set(msm_host->path, 2048000, 4096000); + else + icc_set(msm_host->path, 0, 0); + + return 0; +} + +static void sdhci_msm_deinit_icc(struct sdhci_msm_host *msm_host) +{ + if (!IS_ERR(msm_host->path)) + icc_put(msm_host->path); +} + static const struct of_device_id sdhci_msm_dt_match[] = { { .compatible = "qcom,sdhci-msm-v4" }, {}, @@ -1420,6 +1444,13 @@ static int sdhci_msm_probe(struct platform_device *pdev) if (ret) goto bus_clk_disable; + msm_host->path = of_icc_get(&pdev->dev, "ddr"); + if (IS_ERR(msm_host->path)) { + ret = PTR_ERR(msm_host->path); + goto clk_disable; + } + sdhci_msm_set_icc(msm_host, INT_MAX); + /* * xo clock is needed for FLL feature of cm_dll. * In case if xo clock is not mentioned in DT, warn and proceed. @@ -1567,6 +1598,8 @@ static int sdhci_msm_remove(struct platform_device *pdev) clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks), msm_host->bulk_clks); + sdhci_msm_deinit_icc(msm_host); + if (!IS_ERR(msm_host->bus_clk)) clk_disable_unprepare(msm_host->bus_clk); sdhci_pltfm_free(pdev); diff --git a/drivers/pci/dwc/pcie-qcom.c b/drivers/pci/dwc/pcie-qcom.c index 75896b49741e..585f6e3a88ca 100644 --- a/drivers/pci/dwc/pcie-qcom.c +++ b/drivers/pci/dwc/pcie-qcom.c @@ -11,6 +11,7 @@ #include <linux/clk.h> #include <linux/delay.h> #include <linux/gpio.h> +#include <linux/interconnect.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/iopoll.h> @@ -110,6 +111,7 @@ struct qcom_pcie_resources_2_3_2 { struct clk *cfg_clk; struct clk *pipe_clk; struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY]; + struct icc_path *path; }; struct qcom_pcie_resources_2_4_0 { @@ -522,6 +524,10 @@ static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie) if (ret) return ret; + res->path = of_icc_get(dev, "ddr"); + if (IS_ERR(res->path)) + return PTR_ERR(res->path); + res->aux_clk = devm_clk_get(dev, "aux"); if (IS_ERR(res->aux_clk)) return PTR_ERR(res->aux_clk); @@ -552,6 +558,7 @@ static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie) clk_disable_unprepare(res->aux_clk); regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); + icc_put(res->path); } static void qcom_pcie_post_deinit_2_3_2(struct qcom_pcie *pcie) @@ -599,6 +606,8 @@ static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie) goto err_slave_clk; } + icc_set(res->path, 500, 800); + /* enable PCIe clocks and resets */ val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); val &= ~BIT(0); diff --git a/drivers/phy/qualcomm/phy-qcom-usb-hs.c b/drivers/phy/qualcomm/phy-qcom-usb-hs.c index 2d0c70b5589f..81ff6f2b26b6 100644 --- a/drivers/phy/qualcomm/phy-qcom-usb-hs.c +++ b/drivers/phy/qualcomm/phy-qcom-usb-hs.c @@ -15,6 +15,7 @@ #include <linux/reset.h> #include <linux/extcon.h> #include <linux/notifier.h> +#include <linux/interconnect.h> #define ULPI_PWR_CLK_MNG_REG 0x88 # define ULPI_PWR_OTG_COMP_DISABLE BIT(0) @@ -39,6 +40,7 @@ struct qcom_usb_hs_phy { struct reset_control *reset; struct ulpi_seq *init_seq; struct extcon_dev *vbus_edev; + struct icc_path *path; struct notifier_block vbus_notify; }; @@ -154,6 +156,8 @@ static int qcom_usb_hs_phy_power_on(struct phy *phy) goto err_ulpi; } + icc_set(uphy->path, 80000, 6000); + if (uphy->vbus_edev) { state = extcon_get_state(uphy->vbus_edev, EXTCON_USB); /* setup initial state */ @@ -181,6 +185,8 @@ static int qcom_usb_hs_phy_power_off(struct phy *phy) { struct qcom_usb_hs_phy *uphy = phy_get_drvdata(phy); + icc_set(uphy->path, 0, 0); + regulator_disable(uphy->v3p3); regulator_disable(uphy->v1p8); clk_disable_unprepare(uphy->sleep_clk); @@ -249,6 +255,10 @@ static int qcom_usb_hs_phy_probe(struct ulpi *ulpi) uphy->reset = NULL; } + uphy->path = of_icc_get(&ulpi->dev, "ddr"); + if (IS_ERR(uphy->path)) + return PTR_ERR(uphy->path); + uphy->phy = devm_phy_create(&ulpi->dev, ulpi->dev.of_node, &qcom_usb_hs_phy_ops); if (IS_ERR(uphy->phy)) diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c index 479b36d0e465..3f41f051ac7a 100644 --- a/drivers/scsi/ufs/ufs-qcom.c +++ b/drivers/scsi/ufs/ufs-qcom.c @@ -16,6 +16,7 @@ #include <linux/of.h> #include <linux/platform_device.h> #include <linux/phy/phy.h> +#include <linux/interconnect.h> #include "ufshcd.h" #include "ufshcd-pltfrm.h" @@ -913,6 +914,7 @@ static int ufs_qcom_bus_register(struct ufs_qcom_host *host) } #endif /* CONFIG_MSM_BUS_SCALING */ + static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable) { if (host->dev_ref_clk_ctrl_mmio && @@ -1713,6 +1715,17 @@ static int ufs_qcom_probe(struct platform_device *pdev) { int err; struct device *dev = &pdev->dev; + struct icc_path *path = of_icc_get(dev, "ddr"); + struct icc_path *path_cfg = of_icc_get(dev, "cfg"); + + if (IS_ERR(path)) + return PTR_ERR(path); + + if (IS_ERR(path_cfg)) + return PTR_ERR(path_cfg); + + icc_set(path, 4096000, 0); + icc_set(path_cfg, 1000, 0); /* Perform generic probe */ err = ufshcd_pltfrm_init(pdev, &ufs_hba_qcom_vops); |