aboutsummaryrefslogtreecommitdiff
path: root/arch/tile/kernel/pci-dma.c
diff options
context:
space:
mode:
authorChris Metcalf <cmetcalf@tilera.com>2012-06-15 15:23:06 -0400
committerChris Metcalf <cmetcalf@tilera.com>2012-07-18 16:40:17 -0400
commit41bb38fc5398ae878c799647f3c4b25374029afb (patch)
tree5d7e01bd4176db1241b801f83cf92f32231b8e8b /arch/tile/kernel/pci-dma.c
parenteef015c8aa74451f848307fe5f65485070533bbb (diff)
downloadlinux-linaro-stable-41bb38fc5398ae878c799647f3c4b25374029afb.tar.gz
tile pci: enable IOMMU to support DMA for legacy devices
This change uses the TRIO IOMMU to map the PCI DMA space and physical memory at different addresses. We also now use the dma_mapping_ops to provide support for non-PCI DMA, PCIe DMA (64-bit) and legacy PCI DMA (32-bit). We use the kernel's software I/O TLB framework (i.e. bounce buffers) for the legacy 32-bit PCI device support since there are a limited number of TLB entries in the IOMMU and it is non-trivial to handle indexing, searching, matching, etc. For 32-bit devices the performance impact of bounce buffers should not be a concern. Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
Diffstat (limited to 'arch/tile/kernel/pci-dma.c')
-rw-r--r--arch/tile/kernel/pci-dma.c369
1 files changed, 298 insertions, 71 deletions
diff --git a/arch/tile/kernel/pci-dma.c b/arch/tile/kernel/pci-dma.c
index edd856a000c5..b9fe80ec1089 100644
--- a/arch/tile/kernel/pci-dma.c
+++ b/arch/tile/kernel/pci-dma.c
@@ -14,6 +14,7 @@
#include <linux/mm.h>
#include <linux/dma-mapping.h>
+#include <linux/swiotlb.h>
#include <linux/vmalloc.h>
#include <linux/export.h>
#include <asm/tlbflush.h>
@@ -31,10 +32,9 @@
#define PAGE_HOME_DMA PAGE_HOME_HASH
#endif
-void *dma_alloc_coherent(struct device *dev,
- size_t size,
- dma_addr_t *dma_handle,
- gfp_t gfp)
+static void *tile_dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp,
+ struct dma_attrs *attrs)
{
u64 dma_mask = dev->coherent_dma_mask ?: DMA_BIT_MASK(32);
int node = dev_to_node(dev);
@@ -68,19 +68,19 @@ void *dma_alloc_coherent(struct device *dev,
}
*dma_handle = addr;
+
return page_address(pg);
}
-EXPORT_SYMBOL(dma_alloc_coherent);
/*
- * Free memory that was allocated with dma_alloc_coherent.
+ * Free memory that was allocated with tile_dma_alloc_coherent.
*/
-void dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle)
+static void tile_dma_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
{
homecache_free_pages((unsigned long)vaddr, get_order(size));
}
-EXPORT_SYMBOL(dma_free_coherent);
/*
* The map routines "map" the specified address range for DMA
@@ -199,38 +199,182 @@ static void __dma_complete_pa_range(dma_addr_t dma_addr, size_t size,
}
}
+static int tile_dma_map_sg(struct device *dev, struct scatterlist *sglist,
+ int nents, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
+{
+ struct scatterlist *sg;
+ int i;
-/*
- * dma_map_single can be passed any memory address, and there appear
- * to be no alignment constraints.
- *
- * There is a chance that the start of the buffer will share a cache
- * line with some other data that has been touched in the meantime.
- */
-dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
- enum dma_data_direction direction)
+ BUG_ON(!valid_dma_direction(direction));
+
+ WARN_ON(nents == 0 || sglist->length == 0);
+
+ for_each_sg(sglist, sg, nents, i) {
+ sg->dma_address = sg_phys(sg);
+ __dma_prep_pa_range(sg->dma_address, sg->length, direction);
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+ sg->dma_length = sg->length;
+#endif
+ }
+
+ return nents;
+}
+
+static void tile_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
+ int nents, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
+{
+ struct scatterlist *sg;
+ int i;
+
+ BUG_ON(!valid_dma_direction(direction));
+ for_each_sg(sglist, sg, nents, i) {
+ sg->dma_address = sg_phys(sg);
+ __dma_complete_pa_range(sg->dma_address, sg->length,
+ direction);
+ }
+}
+
+static dma_addr_t tile_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction,
+ struct dma_attrs *attrs)
{
- dma_addr_t dma_addr = __pa(ptr);
+ BUG_ON(!valid_dma_direction(direction));
+
+ BUG_ON(offset + size > PAGE_SIZE);
+ __dma_prep_page(page, offset, size, direction);
+
+ return page_to_pa(page) + offset;
+}
+
+static void tile_dma_unmap_page(struct device *dev, dma_addr_t dma_address,
+ size_t size, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
+{
+ BUG_ON(!valid_dma_direction(direction));
+
+ __dma_complete_page(pfn_to_page(PFN_DOWN(dma_address)),
+ dma_address & PAGE_OFFSET, size, direction);
+}
+static void tile_dma_sync_single_for_cpu(struct device *dev,
+ dma_addr_t dma_handle,
+ size_t size,
+ enum dma_data_direction direction)
+{
BUG_ON(!valid_dma_direction(direction));
- WARN_ON(size == 0);
- __dma_prep_pa_range(dma_addr, size, direction);
+ __dma_complete_pa_range(dma_handle, size, direction);
+}
- return dma_addr;
+static void tile_dma_sync_single_for_device(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction)
+{
+ __dma_prep_pa_range(dma_handle, size, direction);
}
-EXPORT_SYMBOL(dma_map_single);
-void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction direction)
+static void tile_dma_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sglist, int nelems,
+ enum dma_data_direction direction)
{
+ struct scatterlist *sg;
+ int i;
+
BUG_ON(!valid_dma_direction(direction));
- __dma_complete_pa_range(dma_addr, size, direction);
+ WARN_ON(nelems == 0 || sglist->length == 0);
+
+ for_each_sg(sglist, sg, nelems, i) {
+ dma_sync_single_for_cpu(dev, sg->dma_address,
+ sg_dma_len(sg), direction);
+ }
}
-EXPORT_SYMBOL(dma_unmap_single);
-int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
- enum dma_data_direction direction)
+static void tile_dma_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sglist, int nelems,
+ enum dma_data_direction direction)
+{
+ struct scatterlist *sg;
+ int i;
+
+ BUG_ON(!valid_dma_direction(direction));
+ WARN_ON(nelems == 0 || sglist->length == 0);
+
+ for_each_sg(sglist, sg, nelems, i) {
+ dma_sync_single_for_device(dev, sg->dma_address,
+ sg_dma_len(sg), direction);
+ }
+}
+
+static inline int
+tile_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return 0;
+}
+
+static inline int
+tile_dma_supported(struct device *dev, u64 mask)
+{
+ return 1;
+}
+
+static struct dma_map_ops tile_default_dma_map_ops = {
+ .alloc = tile_dma_alloc_coherent,
+ .free = tile_dma_free_coherent,
+ .map_page = tile_dma_map_page,
+ .unmap_page = tile_dma_unmap_page,
+ .map_sg = tile_dma_map_sg,
+ .unmap_sg = tile_dma_unmap_sg,
+ .sync_single_for_cpu = tile_dma_sync_single_for_cpu,
+ .sync_single_for_device = tile_dma_sync_single_for_device,
+ .sync_sg_for_cpu = tile_dma_sync_sg_for_cpu,
+ .sync_sg_for_device = tile_dma_sync_sg_for_device,
+ .mapping_error = tile_dma_mapping_error,
+ .dma_supported = tile_dma_supported
+};
+
+struct dma_map_ops *tile_dma_map_ops = &tile_default_dma_map_ops;
+EXPORT_SYMBOL(tile_dma_map_ops);
+
+/* Generic PCI DMA mapping functions */
+
+static void *tile_pci_dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp,
+ struct dma_attrs *attrs)
+{
+ int node = dev_to_node(dev);
+ int order = get_order(size);
+ struct page *pg;
+ dma_addr_t addr;
+
+ gfp |= __GFP_ZERO;
+
+ pg = homecache_alloc_pages_node(node, gfp, order, PAGE_HOME_DMA);
+ if (pg == NULL)
+ return NULL;
+
+ addr = page_to_phys(pg);
+
+ *dma_handle = phys_to_dma(dev, addr);
+
+ return page_address(pg);
+}
+
+/*
+ * Free memory that was allocated with tile_pci_dma_alloc_coherent.
+ */
+static void tile_pci_dma_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
+{
+ homecache_free_pages((unsigned long)vaddr, get_order(size));
+}
+
+static int tile_pci_dma_map_sg(struct device *dev, struct scatterlist *sglist,
+ int nents, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
{
struct scatterlist *sg;
int i;
@@ -242,14 +386,20 @@ int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
for_each_sg(sglist, sg, nents, i) {
sg->dma_address = sg_phys(sg);
__dma_prep_pa_range(sg->dma_address, sg->length, direction);
+
+ sg->dma_address = phys_to_dma(dev, sg->dma_address);
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+ sg->dma_length = sg->length;
+#endif
}
return nents;
}
-EXPORT_SYMBOL(dma_map_sg);
-void dma_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
- enum dma_data_direction direction)
+static void tile_pci_dma_unmap_sg(struct device *dev,
+ struct scatterlist *sglist, int nents,
+ enum dma_data_direction direction,
+ struct dma_attrs *attrs)
{
struct scatterlist *sg;
int i;
@@ -261,46 +411,60 @@ void dma_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
direction);
}
}
-EXPORT_SYMBOL(dma_unmap_sg);
-dma_addr_t dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
+static dma_addr_t tile_pci_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction,
+ struct dma_attrs *attrs)
{
BUG_ON(!valid_dma_direction(direction));
BUG_ON(offset + size > PAGE_SIZE);
__dma_prep_page(page, offset, size, direction);
- return page_to_pa(page) + offset;
+
+ return phys_to_dma(dev, page_to_pa(page) + offset);
}
-EXPORT_SYMBOL(dma_map_page);
-void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
- enum dma_data_direction direction)
+static void tile_pci_dma_unmap_page(struct device *dev, dma_addr_t dma_address,
+ size_t size,
+ enum dma_data_direction direction,
+ struct dma_attrs *attrs)
{
BUG_ON(!valid_dma_direction(direction));
+
+ dma_address = dma_to_phys(dev, dma_address);
+
__dma_complete_page(pfn_to_page(PFN_DOWN(dma_address)),
dma_address & PAGE_OFFSET, size, direction);
}
-EXPORT_SYMBOL(dma_unmap_page);
-void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction direction)
+static void tile_pci_dma_sync_single_for_cpu(struct device *dev,
+ dma_addr_t dma_handle,
+ size_t size,
+ enum dma_data_direction direction)
{
BUG_ON(!valid_dma_direction(direction));
+
+ dma_handle = dma_to_phys(dev, dma_handle);
+
__dma_complete_pa_range(dma_handle, size, direction);
}
-EXPORT_SYMBOL(dma_sync_single_for_cpu);
-void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction direction)
+static void tile_pci_dma_sync_single_for_device(struct device *dev,
+ dma_addr_t dma_handle,
+ size_t size,
+ enum dma_data_direction
+ direction)
{
+ dma_handle = dma_to_phys(dev, dma_handle);
+
__dma_prep_pa_range(dma_handle, size, direction);
}
-EXPORT_SYMBOL(dma_sync_single_for_device);
-void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist,
- int nelems, enum dma_data_direction direction)
+static void tile_pci_dma_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sglist,
+ int nelems,
+ enum dma_data_direction direction)
{
struct scatterlist *sg;
int i;
@@ -313,10 +477,11 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist,
sg_dma_len(sg), direction);
}
}
-EXPORT_SYMBOL(dma_sync_sg_for_cpu);
-void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
- int nelems, enum dma_data_direction direction)
+static void tile_pci_dma_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sglist,
+ int nelems,
+ enum dma_data_direction direction)
{
struct scatterlist *sg;
int i;
@@ -329,31 +494,93 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
sg_dma_len(sg), direction);
}
}
-EXPORT_SYMBOL(dma_sync_sg_for_device);
-void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
+static inline int
+tile_pci_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
- dma_sync_single_for_cpu(dev, dma_handle + offset, size, direction);
+ return 0;
}
-EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
-void dma_sync_single_range_for_device(struct device *dev,
- dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
+static inline int
+tile_pci_dma_supported(struct device *dev, u64 mask)
{
- dma_sync_single_for_device(dev, dma_handle + offset, size, direction);
+ return 1;
}
-EXPORT_SYMBOL(dma_sync_single_range_for_device);
-/*
- * dma_alloc_noncoherent() is #defined to return coherent memory,
- * so there's no need to do any flushing here.
- */
-void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction direction)
+static struct dma_map_ops tile_pci_default_dma_map_ops = {
+ .alloc = tile_pci_dma_alloc_coherent,
+ .free = tile_pci_dma_free_coherent,
+ .map_page = tile_pci_dma_map_page,
+ .unmap_page = tile_pci_dma_unmap_page,
+ .map_sg = tile_pci_dma_map_sg,
+ .unmap_sg = tile_pci_dma_unmap_sg,
+ .sync_single_for_cpu = tile_pci_dma_sync_single_for_cpu,
+ .sync_single_for_device = tile_pci_dma_sync_single_for_device,
+ .sync_sg_for_cpu = tile_pci_dma_sync_sg_for_cpu,
+ .sync_sg_for_device = tile_pci_dma_sync_sg_for_device,
+ .mapping_error = tile_pci_dma_mapping_error,
+ .dma_supported = tile_pci_dma_supported
+};
+
+struct dma_map_ops *gx_pci_dma_map_ops = &tile_pci_default_dma_map_ops;
+EXPORT_SYMBOL(gx_pci_dma_map_ops);
+
+/* PCI DMA mapping functions for legacy PCI devices */
+
+#ifdef CONFIG_SWIOTLB
+static void *tile_swiotlb_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp,
+ struct dma_attrs *attrs)
+{
+ gfp |= GFP_DMA;
+ return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
+}
+
+static void tile_swiotlb_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_addr,
+ struct dma_attrs *attrs)
{
+ swiotlb_free_coherent(dev, size, vaddr, dma_addr);
}
-EXPORT_SYMBOL(dma_cache_sync);
+
+static struct dma_map_ops pci_swiotlb_dma_ops = {
+ .alloc = tile_swiotlb_alloc_coherent,
+ .free = tile_swiotlb_free_coherent,
+ .map_page = swiotlb_map_page,
+ .unmap_page = swiotlb_unmap_page,
+ .map_sg = swiotlb_map_sg_attrs,
+ .unmap_sg = swiotlb_unmap_sg_attrs,
+ .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
+ .sync_single_for_device = swiotlb_sync_single_for_device,
+ .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
+ .sync_sg_for_device = swiotlb_sync_sg_for_device,
+ .dma_supported = swiotlb_dma_supported,
+ .mapping_error = swiotlb_dma_mapping_error,
+};
+
+struct dma_map_ops *gx_legacy_pci_dma_map_ops = &pci_swiotlb_dma_ops;
+#else
+struct dma_map_ops *gx_legacy_pci_dma_map_ops;
+#endif
+EXPORT_SYMBOL(gx_legacy_pci_dma_map_ops);
+
+#ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
+int dma_set_coherent_mask(struct device *dev, u64 mask)
+{
+ struct dma_map_ops *dma_ops = get_dma_ops(dev);
+
+ /* Handle legacy PCI devices with limited memory addressability. */
+ if (((dma_ops == gx_pci_dma_map_ops) ||
+ (dma_ops == gx_legacy_pci_dma_map_ops)) &&
+ (mask <= DMA_BIT_MASK(32))) {
+ if (mask > dev->archdata.max_direct_dma_addr)
+ mask = dev->archdata.max_direct_dma_addr;
+ }
+
+ if (!dma_supported(dev, mask))
+ return -EIO;
+ dev->coherent_dma_mask = mask;
+ return 0;
+}
+EXPORT_SYMBOL(dma_set_coherent_mask);
+#endif