aboutsummaryrefslogtreecommitdiff
path: root/arch/xtensa
diff options
context:
space:
mode:
authorAkinobu Mita <akinobu.mita@gmail.com>2015-06-24 16:55:51 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2015-06-24 17:49:40 -0700
commit3693a84d3b8b2fd4db1f1b22f33793eb84a66420 (patch)
tree4f8e5305fda1c5e15daaa01bec5e4935bd1a371c /arch/xtensa
parentf51c0eaee39e306458d2bf8a30e010615fa451cc (diff)
xtensa: use for_each_sg()
This replaces the plain loop over the sglist array with for_each_sg() macro which consists of sg_next() function calls. Since xtensa doesn't select ARCH_HAS_SG_CHAIN, it is not necessary to use for_each_sg() in order to loop over each sg element. But this can help find problems with drivers that do not properly initialize their sg tables when CONFIG_DEBUG_SG is enabled. Signed-off-by: Akinobu Mita <akinobu.mita@gmail.com> Cc: Chris Zankel <chris@zankel.net> Cc: Max Filippov <jcmvbkbc@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/xtensa')
-rw-r--r--arch/xtensa/include/asm/dma-mapping.h19
1 files changed, 12 insertions, 7 deletions
diff --git a/arch/xtensa/include/asm/dma-mapping.h b/arch/xtensa/include/asm/dma-mapping.h
index ba78ccf651e7..1f5f6dc09736 100644
--- a/arch/xtensa/include/asm/dma-mapping.h
+++ b/arch/xtensa/include/asm/dma-mapping.h
@@ -52,14 +52,15 @@ dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
}
static inline int
-dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
enum dma_data_direction direction)
{
int i;
+ struct scatterlist *sg;
BUG_ON(direction == DMA_NONE);
- for (i = 0; i < nents; i++, sg++ ) {
+ for_each_sg(sglist, sg, nents, i) {
BUG_ON(!sg_page(sg));
sg->dma_address = sg_phys(sg);
@@ -124,20 +125,24 @@ dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
consistent_sync((void *)bus_to_virt(dma_handle)+offset,size,direction);
}
static inline void
-dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
+dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nelems,
enum dma_data_direction dir)
{
int i;
- for (i = 0; i < nelems; i++, sg++)
+ struct scatterlist *sg;
+
+ for_each_sg(sglist, sg, nelems, i)
consistent_sync(sg_virt(sg), sg->length, dir);
}
static inline void
-dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
- enum dma_data_direction dir)
+dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
+ int nelems, enum dma_data_direction dir)
{
int i;
- for (i = 0; i < nelems; i++, sg++)
+ struct scatterlist *sg;
+
+ for_each_sg(sglist, sg, nelems, i)
consistent_sync(sg_virt(sg), sg->length, dir);
}
static inline int