aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorYin, Fengwei <fengwei.yin@linaro.org>2015-05-12 21:30:30 +0800
committerYin, Fengwei <fengwei.yin@linaro.org>2015-06-11 11:17:38 +0800
commitf61498803e2198c13fe948077f2d5eafe800a27f (patch)
tree1fef627be2454004c78c17e5214f68db54dd83e7
parent0d37c3b430a56f5e0ce03630a7171382433178e2 (diff)
Port removable dma ops from msm-3.10 to remove the workaround in PIL for wcnss.
Signed-off-by: Yin, Fengwei <fengwei.yin@linaro.org>
-rw-r--r--drivers/base/Makefile2
-rw-r--r--drivers/base/dma-contiguous.c99
-rw-r--r--drivers/base/dma-removed.c156
-rw-r--r--include/linux/cma.h2
-rw-r--r--include/linux/dma-contiguous.h30
-rw-r--r--include/linux/dma-removed.h16
-rw-r--r--mm/cma.c63
7 files changed, 365 insertions, 3 deletions
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 527d291706e8..29c4b2d2f7e8 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -6,7 +6,7 @@ obj-y := component.o core.o bus.o dd.o syscore.o \
attribute_container.o transport_class.o \
topology.o container.o property.o cacheinfo.o
obj-$(CONFIG_DEVTMPFS) += devtmpfs.o
-obj-$(CONFIG_DMA_CMA) += dma-contiguous.o
+obj-$(CONFIG_DMA_CMA) += dma-contiguous.o dma-removed.o
obj-y += power/
obj-$(CONFIG_HAS_DMA) += dma-mapping.o
obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index 950fff9ce453..6fde3aff5847 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -22,10 +22,12 @@
#include <asm/page.h>
#include <asm/dma-contiguous.h>
+#include <linux/kernel.h>
#include <linux/memblock.h>
#include <linux/err.h>
#include <linux/sizes.h>
#include <linux/dma-contiguous.h>
+#include <linux/dma-removed.h>
#include <linux/cma.h>
#ifdef CONFIG_CMA_SIZE_MBYTES
@@ -212,6 +214,21 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
return cma_release(dev_get_cma_area(dev), pages, count);
}
+unsigned long dma_alloc_from_contiguous_nomap(struct device *dev, int count,
+ unsigned int align)
+{
+ if (align > CONFIG_CMA_ALIGNMENT)
+ align = CONFIG_CMA_ALIGNMENT;
+
+ return cma_alloc_nomap(dev_get_cma_area(dev), count, align);
+}
+
+bool dma_release_from_contiguous_nomap(struct device *dev, unsigned long pfn,
+ int count)
+{
+ return cma_release_nomap(dev_get_cma_area(dev), pfn, count);
+}
+
/*
* Support for reserved memory regions defined in device tree
*/
@@ -277,4 +294,86 @@ static int __init rmem_cma_setup(struct reserved_mem *rmem)
return 0;
}
RESERVEDMEM_OF_DECLARE(cma, "shared-dma-pool", rmem_cma_setup);
+
+static int rmem_nomap_cma_device_init(struct reserved_mem *rmem, struct device *dev)
+{
+ struct cma *cma = (struct cma *)rmem->priv;
+ int bitmap_size =
+ BITS_TO_LONGS(cma->count >> cma->order_per_bit) * sizeof(long);
+
+ cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+ if (!cma->bitmap) {
+ pr_info("no memory for cma bitmap\n");
+ kfree(cma);
+ return -ENOMEM;
+ }
+
+ mutex_init(&cma->lock);
+
+ dev_set_cma_area(dev, rmem->priv);
+ dev->archdata.dma_ops = &removed_dma_ops;
+ return 0;
+}
+
+static void rmem_nomap_cma_device_release(struct reserved_mem *rmem,
+ struct device *dev)
+{
+ struct cma *cma = (struct cma *)rmem->priv;
+
+ kfree(cma->bitmap);
+ dev->archdata.dma_ops = NULL;
+ dev_set_cma_area(dev, NULL);
+}
+
+static const struct reserved_mem_ops rmem_nomap_cma_ops = {
+ .device_init = rmem_nomap_cma_device_init,
+ .device_release = rmem_nomap_cma_device_release,
+};
+
+#define NOMAP_CMAP_MAX_NUM 16
+static struct cma nomap_cma[NOMAP_CMAP_MAX_NUM];
+static int next_nomap_cma = 0;
+
+static int __init rmem_nomap_cma_setup(struct reserved_mem *rmem)
+{
+ phys_addr_t mask = PAGE_SIZE - 1;
+ unsigned long node = rmem->fdt_node;
+ struct cma *cma;
+
+ if (!of_get_flat_dt_prop(node, "no-map", NULL))
+ return -EINVAL;
+
+ if ((rmem->base & mask) || (rmem->size & mask)) {
+ pr_err("Reserved memory: incorrect alignment of CMA region\n");
+ return -EINVAL;
+ }
+
+ /* cma for nomap range is not like general cma. There is no physical
+ * page associated. So we can't go to general cma active path.
+ * We maintain it here by ourself instead of using general one.
+ * sl[au]b is not available now. Static array is used.
+ *
+ * not necessary to have lock here until the parallel boot applied
+ * to this early stage.
+ */
+ next_nomap_cma++;
+ cma = &nomap_cma[next_nomap_cma - 1];
+
+ cma->base_pfn = PFN_DOWN(rmem->base);
+ cma->count = rmem->size >> PAGE_SHIFT;
+ cma->order_per_bit = 0;
+ /* bitmap for cma initliazation was delayed to .device_init which
+ * has sl[au]b available.
+ */
+
+ rmem->ops = &rmem_nomap_cma_ops;
+ rmem->priv = cma;
+
+ pr_info("Reserved memory: created CMA memory pool at %pa, size %ld MiB\n",
+ &rmem->base, (unsigned long)rmem->size / SZ_1M);
+
+ return 0;
+}
+RESERVEDMEM_OF_DECLARE(nomap_cma, "removed-dma-pool", rmem_nomap_cma_setup);
+
#endif
diff --git a/drivers/base/dma-removed.c b/drivers/base/dma-removed.c
new file mode 100644
index 000000000000..9287c4458478
--- /dev/null
+++ b/drivers/base/dma-removed.c
@@ -0,0 +1,156 @@
+/*
+ *
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2000-2004 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/gfp.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma-contiguous.h>
+#include <linux/highmem.h>
+#include <linux/memblock.h>
+#include <linux/slab.h>
+#include <linux/iommu.h>
+#include <linux/io.h>
+#include <linux/vmalloc.h>
+#include <linux/sizes.h>
+
+#define NO_KERNEL_MAPPING_DUMMY 0x2222
+
+void *removed_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+ gfp_t gfp, struct dma_attrs *attrs)
+{
+ unsigned long pfn;
+ unsigned long order = get_order(PAGE_SIZE);
+ void *addr = NULL;
+
+ size = PAGE_ALIGN(size);
+
+ if (!(gfp & __GFP_WAIT))
+ return NULL;
+
+ pfn = dma_alloc_from_contiguous_nomap(dev, size >> PAGE_SHIFT, order);
+
+ if (pfn) {
+ /* Not supported on Stan's branch. Should we?
+ if (no_kernel_mapping && skip_zeroing) {
+ *handle = __pfn_to_phys(pfn);
+ return (void *)NO_KERNEL_MAPPING_DUMMY;
+ }
+ */
+
+ addr = ioremap(__pfn_to_phys(pfn), size);
+ if (WARN_ON(!addr)) {
+ dma_release_from_contiguous_nomap(dev, pfn, order);
+ } else {
+ memset_io(addr, 0, size);
+ *handle = __pfn_to_phys(pfn);
+ }
+ }
+
+ return addr;
+}
+
+
+int removed_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ struct dma_attrs *attrs)
+{
+ return -ENXIO;
+}
+
+void removed_free(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t handle, struct dma_attrs *attrs)
+{
+ bool no_kernel_mapping = dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING,
+ attrs);
+
+ if (!no_kernel_mapping)
+ iounmap(cpu_addr);
+ dma_release_from_contiguous_nomap(dev, __phys_to_pfn(handle),
+ size >> PAGE_SHIFT);
+}
+
+static dma_addr_t removed_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ return ~(dma_addr_t)0;
+}
+
+static void removed_unmap_page(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ return;
+}
+
+static int removed_map_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ return 0;
+}
+
+static void removed_unmap_sg(struct device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ return;
+}
+
+static void removed_sync_single_for_cpu(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction dir)
+{
+ return;
+}
+
+void removed_sync_single_for_device(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction dir)
+{
+ return;
+}
+
+void removed_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction dir)
+{
+ return;
+}
+
+void removed_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction dir)
+{
+ return;
+}
+
+struct dma_map_ops removed_dma_ops = {
+ .alloc = removed_alloc,
+ .free = removed_free,
+ .mmap = removed_mmap,
+ .map_page = removed_map_page,
+ .unmap_page = removed_unmap_page,
+ .map_sg = removed_map_sg,
+ .unmap_sg = removed_unmap_sg,
+ .sync_single_for_cpu = removed_sync_single_for_cpu,
+ .sync_single_for_device = removed_sync_single_for_device,
+ .sync_sg_for_cpu = removed_sync_sg_for_cpu,
+ .sync_sg_for_device = removed_sync_sg_for_device,
+};
+EXPORT_SYMBOL(removed_dma_ops);
+
+
diff --git a/include/linux/cma.h b/include/linux/cma.h
index 9384ba66e975..658214d9e2c8 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -28,4 +28,6 @@ extern int cma_init_reserved_mem(phys_addr_t base,
struct cma **res_cma);
extern struct page *cma_alloc(struct cma *cma, int count, unsigned int align);
extern bool cma_release(struct cma *cma, struct page *pages, int count);
+extern unsigned long cma_alloc_nomap(struct cma *cma, int count, unsigned int align);
+extern bool cma_release_nomap(struct cma *cma, unsigned long pfn, int count);
#endif
diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h
index 569bbd039896..4c83b6aaf30b 100644
--- a/include/linux/dma-contiguous.h
+++ b/include/linux/dma-contiguous.h
@@ -54,8 +54,16 @@
#ifdef __KERNEL__
#include <linux/device.h>
-
-struct cma;
+#include <linux/slab.h>
+
+/* copied exactly from cma.c */
+struct cma {
+ unsigned long base_pfn;
+ unsigned long count;
+ unsigned long *bitmap;
+ unsigned int order_per_bit; /* Order of pages represented by one bit */
+ struct mutex lock;
+};
struct page;
#ifdef CONFIG_DMA_CMA
@@ -116,6 +124,10 @@ struct page *dma_alloc_from_contiguous(struct device *dev, int count,
bool dma_release_from_contiguous(struct device *dev, struct page *pages,
int count);
+unsigned long dma_alloc_from_contiguous_nomap(struct device *dev, int count,
+ unsigned int order);
+bool dma_release_from_contiguous_nomap(struct device *dev, unsigned long pfn,
+ int count);
#else
static inline struct cma *dev_get_cma_area(struct device *dev)
@@ -157,6 +169,20 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
return false;
}
+static inline
+unsigned long dma_alloc_from_contiguous_nomap(struct device *dev, int count,
+ unsigned int order)
+{
+ return 0L;
+}
+
+static inline
+bool dma_release_from_contiguous_nomap(struct device *dev, unsigned long pfn,
+ int count)
+{
+ return false;
+}
+
#endif
#endif
diff --git a/include/linux/dma-removed.h b/include/linux/dma-removed.h
new file mode 100644
index 000000000000..3a0f1a4b45e7
--- /dev/null
+++ b/include/linux/dma-removed.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/dma-mapping.h>
+
+extern struct dma_map_ops removed_dma_ops;
diff --git a/mm/cma.c b/mm/cma.c
index 68ecb7a42983..b3f2368a0dd9 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -450,3 +450,66 @@ bool cma_release(struct cma *cma, struct page *pages, int count)
return true;
}
+
+/* For nomap cma range, we don't actually have page associated.
+ * So we just use pfn to track the range usage.
+ */
+unsigned long cma_alloc_nomap(struct cma *cma, int count, unsigned int align)
+{
+ unsigned long mask, offset, pfn = 0, start = 0;
+ unsigned long bitmap_maxno, bitmap_no, bitmap_count;
+
+ if (!cma || !cma->count)
+ return 0;
+
+ pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
+ count, align);
+
+ if (!count)
+ return 0;
+
+ mask = cma_bitmap_aligned_mask(cma, align);
+ offset = cma_bitmap_aligned_offset(cma, align);
+ bitmap_maxno = cma_bitmap_maxno(cma);
+ bitmap_count = cma_bitmap_pages_to_bits(cma, count);
+
+ for (;;) {
+ mutex_lock(&cma->lock);
+ bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
+ bitmap_maxno, start, bitmap_count, mask,
+ offset);
+ if (bitmap_no >= bitmap_maxno) {
+ mutex_unlock(&cma->lock);
+ break;
+ }
+ bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
+ /*
+ * It's safe to drop the lock here. We've marked this region for
+ * our exclusive use. If the migration fails we will take the
+ * lock again and unmark it.
+ */
+ mutex_unlock(&cma->lock);
+
+ pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
+ }
+
+ pr_debug("%s(): returned %ld\n", __func__, pfn);
+ return pfn;
+}
+
+bool cma_release_nomap(struct cma *cma, unsigned long pfn, int count)
+{
+ if (!cma || (pfn == 0))
+ return false;
+
+ pr_debug("%s(pfn %ld)\n", __func__, pfn);
+
+ if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
+ return false;
+
+ VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
+
+ cma_clear_bitmap(cma, pfn, count);
+
+ return true;
+}