summaryrefslogtreecommitdiff
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig46
-rw-r--r--arch/arm/mm/Makefile4
-rw-r--r--arch/arm/mm/cache-tauros2.c24
-rw-r--r--arch/arm/mm/cache-v3.S1
-rw-r--r--arch/arm/mm/cache-v4.S1
-rw-r--r--arch/arm/mm/cache-v4wb.S6
-rw-r--r--arch/arm/mm/cache-v4wt.S1
-rw-r--r--arch/arm/mm/cache-v6.S10
-rw-r--r--arch/arm/mm/cache-v7.S10
-rw-r--r--arch/arm/mm/context.c57
-rw-r--r--arch/arm/mm/copypage-v3.c81
-rw-r--r--arch/arm/mm/dma-mapping.c1348
-rw-r--r--arch/arm/mm/fault.c3
-rw-r--r--arch/arm/mm/init.c23
-rw-r--r--arch/arm/mm/mm.h3
-rw-r--r--arch/arm/mm/mmu.c31
-rw-r--r--arch/arm/mm/proc-arm1020.S1
-rw-r--r--arch/arm/mm/proc-arm1020e.S1
-rw-r--r--arch/arm/mm/proc-arm1022.S1
-rw-r--r--arch/arm/mm/proc-arm1026.S1
-rw-r--r--arch/arm/mm/proc-arm6_7.S327
-rw-r--r--arch/arm/mm/proc-arm920.S1
-rw-r--r--arch/arm/mm/proc-arm922.S1
-rw-r--r--arch/arm/mm/proc-arm925.S1
-rw-r--r--arch/arm/mm/proc-arm926.S1
-rw-r--r--arch/arm/mm/proc-arm940.S6
-rw-r--r--arch/arm/mm/proc-arm946.S1
-rw-r--r--arch/arm/mm/proc-feroceon.S1
-rw-r--r--arch/arm/mm/proc-mohawk.S36
-rw-r--r--arch/arm/mm/proc-v7-2level.S9
-rw-r--r--arch/arm/mm/tlb-v3.S48
-rw-r--r--arch/arm/mm/vmregion.h2
32 files changed, 1310 insertions, 777 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 7c8a7d8467b..101b9681c08 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -4,23 +4,6 @@ comment "Processor Type"
# which CPUs we support in the kernel image, and the compiler instruction
# optimiser behaviour.
-# ARM610
-config CPU_ARM610
- bool "Support ARM610 processor" if ARCH_RPC
- select CPU_32v3
- select CPU_CACHE_V3
- select CPU_CACHE_VIVT
- select CPU_CP15_MMU
- select CPU_COPY_V3 if MMU
- select CPU_TLB_V3 if MMU
- select CPU_PABRT_LEGACY
- help
- The ARM610 is the successor to the ARM3 processor
- and was produced by VLSI Technology Inc.
-
- Say Y if you want support for the ARM610 processor.
- Otherwise, say N.
-
# ARM7TDMI
config CPU_ARM7TDMI
bool "Support ARM7TDMI processor"
@@ -36,25 +19,6 @@ config CPU_ARM7TDMI
Say Y if you want support for the ARM7TDMI processor.
Otherwise, say N.
-# ARM710
-config CPU_ARM710
- bool "Support ARM710 processor" if ARCH_RPC
- select CPU_32v3
- select CPU_CACHE_V3
- select CPU_CACHE_VIVT
- select CPU_CP15_MMU
- select CPU_COPY_V3 if MMU
- select CPU_TLB_V3 if MMU
- select CPU_PABRT_LEGACY
- help
- A 32-bit RISC microprocessor based on the ARM7 processor core
- designed by Advanced RISC Machines Ltd. The ARM710 is the
- successor to the ARM610 processor. It was released in
- July 1994 by VLSI Technology Inc.
-
- Say Y if you want support for the ARM710 processor.
- Otherwise, say N.
-
# ARM720T
config CPU_ARM720T
bool "Support ARM720T processor" if ARCH_INTEGRATOR
@@ -530,9 +494,6 @@ config CPU_CACHE_FA
if MMU
# The copy-page model
-config CPU_COPY_V3
- bool
-
config CPU_COPY_V4WT
bool
@@ -549,11 +510,6 @@ config CPU_COPY_V6
bool
# This selects the TLB model
-config CPU_TLB_V3
- bool
- help
- ARM Architecture Version 3 TLB.
-
config CPU_TLB_V4WT
bool
help
@@ -731,7 +687,7 @@ config CPU_HIGH_VECTOR
config CPU_ICACHE_DISABLE
bool "Disable I-Cache (I-bit)"
- depends on CPU_CP15 && !(CPU_ARM610 || CPU_ARM710 || CPU_ARM720T || CPU_ARM740T || CPU_XSCALE || CPU_XSC3)
+ depends on CPU_CP15 && !(CPU_ARM720T || CPU_ARM740T || CPU_XSCALE || CPU_XSC3)
help
Say Y here to disable the processor instruction cache. Unless
you have a reason not to or are unsure, say N.
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index bca7e61928c..8a9c4cb50a9 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -44,7 +44,6 @@ obj-$(CONFIG_CPU_CACHE_FA) += cache-fa.o
AFLAGS_cache-v6.o :=-Wa,-march=armv6
AFLAGS_cache-v7.o :=-Wa,-march=armv7-a
-obj-$(CONFIG_CPU_COPY_V3) += copypage-v3.o
obj-$(CONFIG_CPU_COPY_V4WT) += copypage-v4wt.o
obj-$(CONFIG_CPU_COPY_V4WB) += copypage-v4wb.o
obj-$(CONFIG_CPU_COPY_FEROCEON) += copypage-feroceon.o
@@ -54,7 +53,6 @@ obj-$(CONFIG_CPU_XSCALE) += copypage-xscale.o
obj-$(CONFIG_CPU_XSC3) += copypage-xsc3.o
obj-$(CONFIG_CPU_COPY_FA) += copypage-fa.o
-obj-$(CONFIG_CPU_TLB_V3) += tlb-v3.o
obj-$(CONFIG_CPU_TLB_V4WT) += tlb-v4.o
obj-$(CONFIG_CPU_TLB_V4WB) += tlb-v4wb.o
obj-$(CONFIG_CPU_TLB_V4WBI) += tlb-v4wbi.o
@@ -66,8 +64,6 @@ obj-$(CONFIG_CPU_TLB_FA) += tlb-fa.o
AFLAGS_tlb-v6.o :=-Wa,-march=armv6
AFLAGS_tlb-v7.o :=-Wa,-march=armv7-a
-obj-$(CONFIG_CPU_ARM610) += proc-arm6_7.o
-obj-$(CONFIG_CPU_ARM710) += proc-arm6_7.o
obj-$(CONFIG_CPU_ARM7TDMI) += proc-arm7tdmi.o
obj-$(CONFIG_CPU_ARM720T) += proc-arm720.o
obj-$(CONFIG_CPU_ARM740T) += proc-arm740.o
diff --git a/arch/arm/mm/cache-tauros2.c b/arch/arm/mm/cache-tauros2.c
index 1fbca05fe90..23a7643e9a8 100644
--- a/arch/arm/mm/cache-tauros2.c
+++ b/arch/arm/mm/cache-tauros2.c
@@ -108,6 +108,26 @@ static void tauros2_flush_range(unsigned long start, unsigned long end)
dsb();
}
+
+static void tauros2_disable(void)
+{
+ __asm__ __volatile__ (
+ "mcr p15, 1, %0, c7, c11, 0 @L2 Cache Clean All\n\t"
+ "mrc p15, 0, %0, c1, c0, 0\n\t"
+ "bic %0, %0, #(1 << 26)\n\t"
+ "mcr p15, 0, %0, c1, c0, 0 @Disable L2 Cache\n\t"
+ : : "r" (0x0));
+}
+
+static void tauros2_resume(void)
+{
+ __asm__ __volatile__ (
+ "mcr p15, 1, %0, c7, c7, 0 @L2 Cache Invalidate All\n\t"
+ "mrc p15, 0, %0, c1, c0, 0\n\t"
+ "orr %0, %0, #(1 << 26)\n\t"
+ "mcr p15, 0, %0, c1, c0, 0 @Enable L2 Cache\n\t"
+ : : "r" (0x0));
+}
#endif
static inline u32 __init read_extra_features(void)
@@ -194,6 +214,8 @@ void __init tauros2_init(void)
outer_cache.inv_range = tauros2_inv_range;
outer_cache.clean_range = tauros2_clean_range;
outer_cache.flush_range = tauros2_flush_range;
+ outer_cache.disable = tauros2_disable;
+ outer_cache.resume = tauros2_resume;
}
#endif
@@ -219,6 +241,8 @@ void __init tauros2_init(void)
outer_cache.inv_range = tauros2_inv_range;
outer_cache.clean_range = tauros2_clean_range;
outer_cache.flush_range = tauros2_flush_range;
+ outer_cache.disable = tauros2_disable;
+ outer_cache.resume = tauros2_resume;
}
#endif
diff --git a/arch/arm/mm/cache-v3.S b/arch/arm/mm/cache-v3.S
index c2301f22610..52e35f32eef 100644
--- a/arch/arm/mm/cache-v3.S
+++ b/arch/arm/mm/cache-v3.S
@@ -78,6 +78,7 @@ ENTRY(v3_coherent_kern_range)
* - end - virtual end address
*/
ENTRY(v3_coherent_user_range)
+ mov r0, #0
mov pc, lr
/*
diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S
index fd9bb7addc8..022135d2b7e 100644
--- a/arch/arm/mm/cache-v4.S
+++ b/arch/arm/mm/cache-v4.S
@@ -88,6 +88,7 @@ ENTRY(v4_coherent_kern_range)
* - end - virtual end address
*/
ENTRY(v4_coherent_user_range)
+ mov r0, #0
mov pc, lr
/*
diff --git a/arch/arm/mm/cache-v4wb.S b/arch/arm/mm/cache-v4wb.S
index 4f2c14151cc..8f1eeae340c 100644
--- a/arch/arm/mm/cache-v4wb.S
+++ b/arch/arm/mm/cache-v4wb.S
@@ -167,9 +167,9 @@ ENTRY(v4wb_coherent_user_range)
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
- mov ip, #0
- mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
- mcr p15, 0, ip, c7, c10, 4 @ drain WB
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+ mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
diff --git a/arch/arm/mm/cache-v4wt.S b/arch/arm/mm/cache-v4wt.S
index 4d7b467631c..b34a5f908a8 100644
--- a/arch/arm/mm/cache-v4wt.S
+++ b/arch/arm/mm/cache-v4wt.S
@@ -125,6 +125,7 @@ ENTRY(v4wt_coherent_user_range)
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
+ mov r0, #0
mov pc, lr
/*
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index 74c2e5a33a4..4b10760c56d 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -12,6 +12,7 @@
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/assembler.h>
+#include <asm/errno.h>
#include <asm/unwind.h>
#include "proc-macros.S"
@@ -135,7 +136,6 @@ ENTRY(v6_coherent_user_range)
1:
USER( mcr p15, 0, r0, c7, c10, 1 ) @ clean D line
add r0, r0, #CACHE_LINE_SIZE
-2:
cmp r0, r1
blo 1b
#endif
@@ -154,13 +154,11 @@ ENTRY(v6_coherent_user_range)
/*
* Fault handling for the cache operation above. If the virtual address in r0
- * isn't mapped, just try the next page.
+ * isn't mapped, fail with -EFAULT.
*/
9001:
- mov r0, r0, lsr #12
- mov r0, r0, lsl #12
- add r0, r0, #4096
- b 2b
+ mov r0, #-EFAULT
+ mov pc, lr
UNWIND(.fnend )
ENDPROC(v6_coherent_user_range)
ENDPROC(v6_coherent_kern_range)
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index a655d3da386..39e3fb3db80 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -13,6 +13,7 @@
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/assembler.h>
+#include <asm/errno.h>
#include <asm/unwind.h>
#include "proc-macros.S"
@@ -198,7 +199,6 @@ ENTRY(v7_coherent_user_range)
add r12, r12, r2
cmp r12, r1
blo 2b
-3:
mov r0, #0
ALT_SMP(mcr p15, 0, r0, c7, c1, 6) @ invalidate BTB Inner Shareable
ALT_UP(mcr p15, 0, r0, c7, c5, 6) @ invalidate BTB
@@ -208,13 +208,11 @@ ENTRY(v7_coherent_user_range)
/*
* Fault handling for the cache operation above. If the virtual address in r0
- * isn't mapped, just try the next page.
+ * isn't mapped, fail with -EFAULT.
*/
9001:
- mov r12, r12, lsr #12
- mov r12, r12, lsl #12
- add r12, r12, #4096
- b 3b
+ mov r0, #-EFAULT
+ mov pc, lr
UNWIND(.fnend )
ENDPROC(v7_coherent_kern_range)
ENDPROC(v7_coherent_user_range)
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index ee9bb363d60..806cc4f6351 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -18,30 +18,39 @@
static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
unsigned int cpu_last_asid = ASID_FIRST_VERSION;
-#ifdef CONFIG_SMP
-DEFINE_PER_CPU(struct mm_struct *, current_mm);
-#endif
#ifdef CONFIG_ARM_LPAE
-#define cpu_set_asid(asid) { \
- unsigned long ttbl, ttbh; \
- asm volatile( \
- " mrrc p15, 0, %0, %1, c2 @ read TTBR0\n" \
- " mov %1, %2, lsl #(48 - 32) @ set ASID\n" \
- " mcrr p15, 0, %0, %1, c2 @ set TTBR0\n" \
- : "=&r" (ttbl), "=&r" (ttbh) \
- : "r" (asid & ~ASID_MASK)); \
+void cpu_set_reserved_ttbr0(void)
+{
+ unsigned long ttbl = __pa(swapper_pg_dir);
+ unsigned long ttbh = 0;
+
+ /*
+ * Set TTBR0 to swapper_pg_dir which contains only global entries. The
+ * ASID is set to 0.
+ */
+ asm volatile(
+ " mcrr p15, 0, %0, %1, c2 @ set TTBR0\n"
+ :
+ : "r" (ttbl), "r" (ttbh));
+ isb();
}
#else
-#define cpu_set_asid(asid) \
- asm(" mcr p15, 0, %0, c13, c0, 1\n" : : "r" (asid))
+void cpu_set_reserved_ttbr0(void)
+{
+ u32 ttb;
+ /* Copy TTBR1 into TTBR0 */
+ asm volatile(
+ " mrc p15, 0, %0, c2, c0, 1 @ read TTBR1\n"
+ " mcr p15, 0, %0, c2, c0, 0 @ set TTBR0\n"
+ : "=r" (ttb));
+ isb();
+}
#endif
/*
* We fork()ed a process, and we need a new context for the child
- * to run in. We reserve version 0 for initial tasks so we will
- * always allocate an ASID. The ASID 0 is reserved for the TTBR
- * register changing sequence.
+ * to run in.
*/
void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
@@ -51,9 +60,7 @@ void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
static void flush_context(void)
{
- /* set the reserved ASID before flushing the TLB */
- cpu_set_asid(0);
- isb();
+ cpu_set_reserved_ttbr0();
local_flush_tlb_all();
if (icache_is_vivt_asid_tagged()) {
__flush_icache_all();
@@ -98,14 +105,7 @@ static void reset_context(void *info)
{
unsigned int asid;
unsigned int cpu = smp_processor_id();
- struct mm_struct *mm = per_cpu(current_mm, cpu);
-
- /*
- * Check if a current_mm was set on this CPU as it might still
- * be in the early booting stages and using the reserved ASID.
- */
- if (!mm)
- return;
+ struct mm_struct *mm = current->active_mm;
smp_rmb();
asid = cpu_last_asid + cpu + 1;
@@ -114,8 +114,7 @@ static void reset_context(void *info)
set_mm_context(mm, asid);
/* set the new ASID */
- cpu_set_asid(mm->context.id);
- isb();
+ cpu_switch_mm(mm->pgd, mm);
}
#else
diff --git a/arch/arm/mm/copypage-v3.c b/arch/arm/mm/copypage-v3.c
deleted file mode 100644
index 3935bddd476..00000000000
--- a/arch/arm/mm/copypage-v3.c
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * linux/arch/arm/mm/copypage-v3.c
- *
- * Copyright (C) 1995-1999 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/init.h>
-#include <linux/highmem.h>
-
-/*
- * ARMv3 optimised copy_user_highpage
- *
- * FIXME: do we need to handle cache stuff...
- */
-static void __naked
-v3_copy_user_page(void *kto, const void *kfrom)
-{
- asm("\n\
- stmfd sp!, {r4, lr} @ 2\n\
- mov r2, %2 @ 1\n\
- ldmia %0!, {r3, r4, ip, lr} @ 4+1\n\
-1: stmia %1!, {r3, r4, ip, lr} @ 4\n\
- ldmia %0!, {r3, r4, ip, lr} @ 4+1\n\
- stmia %1!, {r3, r4, ip, lr} @ 4\n\
- ldmia %0!, {r3, r4, ip, lr} @ 4+1\n\
- stmia %1!, {r3, r4, ip, lr} @ 4\n\
- ldmia %0!, {r3, r4, ip, lr} @ 4\n\
- subs r2, r2, #1 @ 1\n\
- stmia %1!, {r3, r4, ip, lr} @ 4\n\
- ldmneia %0!, {r3, r4, ip, lr} @ 4\n\
- bne 1b @ 1\n\
- ldmfd sp!, {r4, pc} @ 3"
- :
- : "r" (kfrom), "r" (kto), "I" (PAGE_SIZE / 64));
-}
-
-void v3_copy_user_highpage(struct page *to, struct page *from,
- unsigned long vaddr, struct vm_area_struct *vma)
-{
- void *kto, *kfrom;
-
- kto = kmap_atomic(to);
- kfrom = kmap_atomic(from);
- v3_copy_user_page(kto, kfrom);
- kunmap_atomic(kfrom);
- kunmap_atomic(kto);
-}
-
-/*
- * ARMv3 optimised clear_user_page
- *
- * FIXME: do we need to handle cache stuff...
- */
-void v3_clear_user_highpage(struct page *page, unsigned long vaddr)
-{
- void *ptr, *kaddr = kmap_atomic(page);
- asm volatile("\n\
- mov r1, %2 @ 1\n\
- mov r2, #0 @ 1\n\
- mov r3, #0 @ 1\n\
- mov ip, #0 @ 1\n\
- mov lr, #0 @ 1\n\
-1: stmia %0!, {r2, r3, ip, lr} @ 4\n\
- stmia %0!, {r2, r3, ip, lr} @ 4\n\
- stmia %0!, {r2, r3, ip, lr} @ 4\n\
- stmia %0!, {r2, r3, ip, lr} @ 4\n\
- subs r1, r1, #1 @ 1\n\
- bne 1b @ 1"
- : "=r" (ptr)
- : "0" (kaddr), "I" (PAGE_SIZE / 64)
- : "r1", "r2", "r3", "ip", "lr");
- kunmap_atomic(kaddr);
-}
-
-struct cpu_user_fns v3_user_fns __initdata = {
- .cpu_clear_user_highpage = v3_clear_user_highpage,
- .cpu_copy_user_highpage = v3_copy_user_highpage,
-};
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index db23ae4aaaa..ea6b4315409 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -17,8 +17,12 @@
#include <linux/init.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
+#include <linux/dma-contiguous.h>
#include <linux/highmem.h>
+#include <linux/memblock.h>
#include <linux/slab.h>
+#include <linux/iommu.h>
+#include <linux/vmalloc.h>
#include <asm/memory.h>
#include <asm/highmem.h>
@@ -26,9 +30,112 @@
#include <asm/tlbflush.h>
#include <asm/sizes.h>
#include <asm/mach/arch.h>
+#include <asm/dma-iommu.h>
+#include <asm/mach/map.h>
+#include <asm/system_info.h>
+#include <asm/dma-contiguous.h>
#include "mm.h"
+/*
+ * The DMA API is built upon the notion of "buffer ownership". A buffer
+ * is either exclusively owned by the CPU (and therefore may be accessed
+ * by it) or exclusively owned by the DMA device. These helper functions
+ * represent the transitions between these two ownership states.
+ *
+ * Note, however, that on later ARMs, this notion does not work due to
+ * speculative prefetches. We model our approach on the assumption that
+ * the CPU does do speculative prefetches, which means we clean caches
+ * before transfers and delay cache invalidation until transfer completion.
+ *
+ */
+static void __dma_page_cpu_to_dev(struct page *, unsigned long,
+ size_t, enum dma_data_direction);
+static void __dma_page_dev_to_cpu(struct page *, unsigned long,
+ size_t, enum dma_data_direction);
+
+/**
+ * arm_dma_map_page - map a portion of a page for streaming DMA
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @page: page that buffer resides in
+ * @offset: offset into page for start of buffer
+ * @size: size of buffer to map
+ * @dir: DMA transfer direction
+ *
+ * Ensure that any data held in the cache is appropriately discarded
+ * or written back.
+ *
+ * The device owns this memory once this call has completed. The CPU
+ * can regain ownership by calling dma_unmap_page().
+ */
+static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ if (!arch_is_coherent())
+ __dma_page_cpu_to_dev(page, offset, size, dir);
+ return pfn_to_dma(dev, page_to_pfn(page)) + offset;
+}
+
+/**
+ * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @handle: DMA address of buffer
+ * @size: size of buffer (same as passed to dma_map_page)
+ * @dir: DMA transfer direction (same as passed to dma_map_page)
+ *
+ * Unmap a page streaming mode DMA translation. The handle and size
+ * must match what was provided in the previous dma_map_page() call.
+ * All other usages are undefined.
+ *
+ * After this call, reads by the CPU to the buffer are guaranteed to see
+ * whatever the device wrote there.
+ */
+static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ if (!arch_is_coherent())
+ __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
+ handle & ~PAGE_MASK, size, dir);
+}
+
+static void arm_dma_sync_single_for_cpu(struct device *dev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ unsigned int offset = handle & (PAGE_SIZE - 1);
+ struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
+ if (!arch_is_coherent())
+ __dma_page_dev_to_cpu(page, offset, size, dir);
+}
+
+static void arm_dma_sync_single_for_device(struct device *dev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ unsigned int offset = handle & (PAGE_SIZE - 1);
+ struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
+ if (!arch_is_coherent())
+ __dma_page_cpu_to_dev(page, offset, size, dir);
+}
+
+static int arm_dma_set_mask(struct device *dev, u64 dma_mask);
+
+struct dma_map_ops arm_dma_ops = {
+ .alloc = arm_dma_alloc,
+ .free = arm_dma_free,
+ .mmap = arm_dma_mmap,
+ .map_page = arm_dma_map_page,
+ .unmap_page = arm_dma_unmap_page,
+ .map_sg = arm_dma_map_sg,
+ .unmap_sg = arm_dma_unmap_sg,
+ .sync_single_for_cpu = arm_dma_sync_single_for_cpu,
+ .sync_single_for_device = arm_dma_sync_single_for_device,
+ .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
+ .sync_sg_for_device = arm_dma_sync_sg_for_device,
+ .set_dma_mask = arm_dma_set_mask,
+};
+EXPORT_SYMBOL(arm_dma_ops);
+
static u64 get_coherent_dma_mask(struct device *dev)
{
u64 mask = (u64)arm_dma_limit;
@@ -56,6 +163,21 @@ static u64 get_coherent_dma_mask(struct device *dev)
return mask;
}
+static void __dma_clear_buffer(struct page *page, size_t size)
+{
+ void *ptr;
+ /*
+ * Ensure that the allocated pages are zeroed, and that any data
+ * lurking in the kernel direct-mapped region is invalidated.
+ */
+ ptr = page_address(page);
+ if (ptr) {
+ memset(ptr, 0, size);
+ dmac_flush_range(ptr, ptr + size);
+ outer_flush_range(__pa(ptr), __pa(ptr) + size);
+ }
+}
+
/*
* Allocate a DMA buffer for 'dev' of size 'size' using the
* specified gfp mask. Note that 'size' must be page aligned.
@@ -64,23 +186,6 @@ static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gf
{
unsigned long order = get_order(size);
struct page *page, *p, *e;
- void *ptr;
- u64 mask = get_coherent_dma_mask(dev);
-
-#ifdef CONFIG_DMA_API_DEBUG
- u64 limit = (mask + 1) & ~mask;
- if (limit && size >= limit) {
- dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
- size, mask);
- return NULL;
- }
-#endif
-
- if (!mask)
- return NULL;
-
- if (mask < 0xffffffffULL)
- gfp |= GFP_DMA;
page = alloc_pages(gfp, order);
if (!page)
@@ -93,14 +198,7 @@ static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gf
for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
__free_page(p);
- /*
- * Ensure that the allocated pages are zeroed, and that any data
- * lurking in the kernel direct-mapped region is invalidated.
- */
- ptr = page_address(page);
- memset(ptr, 0, size);
- dmac_flush_range(ptr, ptr + size);
- outer_flush_range(__pa(ptr), __pa(ptr) + size);
+ __dma_clear_buffer(page, size);
return page;
}
@@ -170,6 +268,11 @@ static int __init consistent_init(void)
unsigned long base = consistent_base;
unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT;
+#ifndef CONFIG_ARM_DMA_USE_IOMMU
+ if (cpu_architecture() >= CPU_ARCH_ARMv6)
+ return 0;
+#endif
+
consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL);
if (!consistent_pte) {
pr_err("%s: no memory\n", __func__);
@@ -184,14 +287,14 @@ static int __init consistent_init(void)
pud = pud_alloc(&init_mm, pgd, base);
if (!pud) {
- printk(KERN_ERR "%s: no pud tables\n", __func__);
+ pr_err("%s: no pud tables\n", __func__);
ret = -ENOMEM;
break;
}
pmd = pmd_alloc(&init_mm, pud, base);
if (!pmd) {
- printk(KERN_ERR "%s: no pmd tables\n", __func__);
+ pr_err("%s: no pmd tables\n", __func__);
ret = -ENOMEM;
break;
}
@@ -199,7 +302,7 @@ static int __init consistent_init(void)
pte = pte_alloc_kernel(pmd, base);
if (!pte) {
- printk(KERN_ERR "%s: no pte tables\n", __func__);
+ pr_err("%s: no pte tables\n", __func__);
ret = -ENOMEM;
break;
}
@@ -210,9 +313,101 @@ static int __init consistent_init(void)
return ret;
}
-
core_initcall(consistent_init);
+static void *__alloc_from_contiguous(struct device *dev, size_t size,
+ pgprot_t prot, struct page **ret_page);
+
+static struct arm_vmregion_head coherent_head = {
+ .vm_lock = __SPIN_LOCK_UNLOCKED(&coherent_head.vm_lock),
+ .vm_list = LIST_HEAD_INIT(coherent_head.vm_list),
+};
+
+size_t coherent_pool_size = DEFAULT_CONSISTENT_DMA_SIZE / 8;
+
+static int __init early_coherent_pool(char *p)
+{
+ coherent_pool_size = memparse(p, &p);
+ return 0;
+}
+early_param("coherent_pool", early_coherent_pool);
+
+/*
+ * Initialise the coherent pool for atomic allocations.
+ */
+static int __init coherent_init(void)
+{
+ pgprot_t prot = pgprot_dmacoherent(pgprot_kernel);
+ size_t size = coherent_pool_size;
+ struct page *page;
+ void *ptr;
+
+ if (cpu_architecture() < CPU_ARCH_ARMv6)
+ return 0;
+
+ ptr = __alloc_from_contiguous(NULL, size, prot, &page);
+ if (ptr) {
+ coherent_head.vm_start = (unsigned long) ptr;
+ coherent_head.vm_end = (unsigned long) ptr + size;
+ printk(KERN_INFO "DMA: preallocated %u KiB pool for atomic coherent allocations\n",
+ (unsigned)size / 1024);
+ return 0;
+ }
+ printk(KERN_ERR "DMA: failed to allocate %u KiB pool for atomic coherent allocation\n",
+ (unsigned)size / 1024);
+ return -ENOMEM;
+}
+/*
+ * CMA is activated by core_initcall, so we must be called after it.
+ */
+postcore_initcall(coherent_init);
+
+struct dma_contig_early_reserve {
+ phys_addr_t base;
+ unsigned long size;
+};
+
+static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata;
+
+static int dma_mmu_remap_num __initdata;
+
+void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
+{
+ dma_mmu_remap[dma_mmu_remap_num].base = base;
+ dma_mmu_remap[dma_mmu_remap_num].size = size;
+ dma_mmu_remap_num++;
+}
+
+void __init dma_contiguous_remap(void)
+{
+ int i;
+ for (i = 0; i < dma_mmu_remap_num; i++) {
+ phys_addr_t start = dma_mmu_remap[i].base;
+ phys_addr_t end = start + dma_mmu_remap[i].size;
+ struct map_desc map;
+ unsigned long addr;
+
+ if (end > arm_lowmem_limit)
+ end = arm_lowmem_limit;
+ if (start >= end)
+ return;
+
+ map.pfn = __phys_to_pfn(start);
+ map.virtual = __phys_to_virt(start);
+ map.length = end - start;
+ map.type = MT_MEMORY_DMA_READY;
+
+ /*
+ * Clear previous low-memory mapping
+ */
+ for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
+ addr += PMD_SIZE)
+ pmd_clear(pmd_off_k(addr));
+
+ iotable_init(&map, 1);
+ }
+}
+
static void *
__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
const void *caller)
@@ -222,7 +417,7 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
int bit;
if (!consistent_pte) {
- printk(KERN_ERR "%s: not initialised\n", __func__);
+ pr_err("%s: not initialised\n", __func__);
dump_stack();
return NULL;
}
@@ -249,7 +444,7 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
pte = consistent_pte[idx] + off;
- c->vm_pages = page;
+ c->priv = page;
do {
BUG_ON(!pte_none(*pte));
@@ -281,14 +476,14 @@ static void __dma_free_remap(void *cpu_addr, size_t size)
c = arm_vmregion_find_remove(&consistent_head, (unsigned long)cpu_addr);
if (!c) {
- printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n",
+ pr_err("%s: trying to free invalid coherent area: %p\n",
__func__, cpu_addr);
dump_stack();
return;
}
if ((c->vm_end - c->vm_start) != size) {
- printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
+ pr_err("%s: freeing wrong coherent size (%ld != %d)\n",
__func__, c->vm_end - c->vm_start, size);
dump_stack();
size = c->vm_end - c->vm_start;
@@ -310,8 +505,8 @@ static void __dma_free_remap(void *cpu_addr, size_t size)
}
if (pte_none(pte) || !pte_present(pte))
- printk(KERN_CRIT "%s: bad page in kernel page table\n",
- __func__);
+ pr_crit("%s: bad page in kernel page table\n",
+ __func__);
} while (size -= PAGE_SIZE);
flush_tlb_kernel_range(c->vm_start, c->vm_end);
@@ -319,20 +514,182 @@ static void __dma_free_remap(void *cpu_addr, size_t size)
arm_vmregion_free(&consistent_head, c);
}
+static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr,
+ void *data)
+{
+ struct page *page = virt_to_page(addr);
+ pgprot_t prot = *(pgprot_t *)data;
+
+ set_pte_ext(pte, mk_pte(page, prot), 0);
+ return 0;
+}
+
+static void __dma_remap(struct page *page, size_t size, pgprot_t prot)
+{
+ unsigned long start = (unsigned long) page_address(page);
+ unsigned end = start + size;
+
+ apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot);
+ dsb();
+ flush_tlb_kernel_range(start, end);
+}
+
+static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
+ pgprot_t prot, struct page **ret_page,
+ const void *caller)
+{
+ struct page *page;
+ void *ptr;
+ page = __dma_alloc_buffer(dev, size, gfp);
+ if (!page)
+ return NULL;
+
+ ptr = __dma_alloc_remap(page, size, gfp, prot, caller);
+ if (!ptr) {
+ __dma_free_buffer(page, size);
+ return NULL;
+ }
+
+ *ret_page = page;
+ return ptr;
+}
+
+static void *__alloc_from_pool(struct device *dev, size_t size,
+ struct page **ret_page, const void *caller)
+{
+ struct arm_vmregion *c;
+ size_t align;
+
+ if (!coherent_head.vm_start) {
+ printk(KERN_ERR "%s: coherent pool not initialised!\n",
+ __func__);
+ dump_stack();
+ return NULL;
+ }
+
+ /*
+ * Align the region allocation - allocations from pool are rather
+ * small, so align them to their order in pages, minimum is a page
+ * size. This helps reduce fragmentation of the DMA space.
+ */
+ align = PAGE_SIZE << get_order(size);
+ c = arm_vmregion_alloc(&coherent_head, align, size, 0, caller);
+ if (c) {
+ void *ptr = (void *)c->vm_start;
+ struct page *page = virt_to_page(ptr);
+ *ret_page = page;
+ return ptr;
+ }
+ return NULL;
+}
+
+static int __free_from_pool(void *cpu_addr, size_t size)
+{
+ unsigned long start = (unsigned long)cpu_addr;
+ unsigned long end = start + size;
+ struct arm_vmregion *c;
+
+ if (start < coherent_head.vm_start || end > coherent_head.vm_end)
+ return 0;
+
+ c = arm_vmregion_find_remove(&coherent_head, (unsigned long)start);
+
+ if ((c->vm_end - c->vm_start) != size) {
+ printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
+ __func__, c->vm_end - c->vm_start, size);
+ dump_stack();
+ size = c->vm_end - c->vm_start;
+ }
+
+ arm_vmregion_free(&coherent_head, c);
+ return 1;
+}
+
+static void *__alloc_from_contiguous(struct device *dev, size_t size,
+ pgprot_t prot, struct page **ret_page)
+{
+ unsigned long order = get_order(size);
+ size_t count = size >> PAGE_SHIFT;
+ struct page *page;
+
+ page = dma_alloc_from_contiguous(dev, count, order);
+ if (!page)
+ return NULL;
+
+ __dma_clear_buffer(page, size);
+ __dma_remap(page, size, prot);
+
+ *ret_page = page;
+ return page_address(page);
+}
+
+static void __free_from_contiguous(struct device *dev, struct page *page,
+ size_t size)
+{
+ __dma_remap(page, size, pgprot_kernel);
+ dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
+}
+
+static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
+{
+ prot = dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs) ?
+ pgprot_writecombine(prot) :
+ pgprot_dmacoherent(prot);
+ return prot;
+}
+
+#define nommu() 0
+
#else /* !CONFIG_MMU */
-#define __dma_alloc_remap(page, size, gfp, prot, c) page_address(page)
-#define __dma_free_remap(addr, size) do { } while (0)
+#define nommu() 1
+
+#define __get_dma_pgprot(attrs, prot) __pgprot(0)
+#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL
+#define __alloc_from_pool(dev, size, ret_page, c) NULL
+#define __alloc_from_contiguous(dev, size, prot, ret) NULL
+#define __free_from_pool(cpu_addr, size) 0
+#define __free_from_contiguous(dev, page, size) do { } while (0)
+#define __dma_free_remap(cpu_addr, size) do { } while (0)
#endif /* CONFIG_MMU */
-static void *
-__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
- pgprot_t prot, const void *caller)
+static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
+ struct page **ret_page)
+{
+ struct page *page;
+ page = __dma_alloc_buffer(dev, size, gfp);
+ if (!page)
+ return NULL;
+
+ *ret_page = page;
+ return page_address(page);
+}
+
+
+
+static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+ gfp_t gfp, pgprot_t prot, const void *caller)
{
+ u64 mask = get_coherent_dma_mask(dev);
struct page *page;
void *addr;
+#ifdef CONFIG_DMA_API_DEBUG
+ u64 limit = (mask + 1) & ~mask;
+ if (limit && size >= limit) {
+ dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
+ size, mask);
+ return NULL;
+ }
+#endif
+
+ if (!mask)
+ return NULL;
+
+ if (mask < 0xffffffffULL)
+ gfp |= GFP_DMA;
+
/*
* Following is a work-around (a.k.a. hack) to prevent pages
* with __GFP_COMP being passed to split_page() which cannot
@@ -342,22 +699,20 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
*/
gfp &= ~(__GFP_COMP);
- *handle = ~0;
+ *handle = DMA_ERROR_CODE;
size = PAGE_ALIGN(size);
- page = __dma_alloc_buffer(dev, size, gfp);
- if (!page)
- return NULL;
-
- if (!arch_is_coherent())
- addr = __dma_alloc_remap(page, size, gfp, prot, caller);
+ if (arch_is_coherent() || nommu())
+ addr = __alloc_simple_buffer(dev, size, gfp, &page);
+ else if (cpu_architecture() < CPU_ARCH_ARMv6)
+ addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
+ else if (gfp & GFP_ATOMIC)
+ addr = __alloc_from_pool(dev, size, &page, caller);
else
- addr = page_address(page);
+ addr = __alloc_from_contiguous(dev, size, prot, &page);
if (addr)
*handle = pfn_to_dma(dev, page_to_pfn(page));
- else
- __dma_free_buffer(page, size);
return addr;
}
@@ -366,138 +721,71 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
* Allocate DMA-coherent memory space and return both the kernel remapped
* virtual and bus address for that space.
*/
-void *
-dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
+void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+ gfp_t gfp, struct dma_attrs *attrs)
{
+ pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
void *memory;
if (dma_alloc_from_coherent(dev, size, handle, &memory))
return memory;
- return __dma_alloc(dev, size, handle, gfp,
- pgprot_dmacoherent(pgprot_kernel),
+ return __dma_alloc(dev, size, handle, gfp, prot,
__builtin_return_address(0));
}
-EXPORT_SYMBOL(dma_alloc_coherent);
/*
- * Allocate a writecombining region, in much the same way as
- * dma_alloc_coherent above.
+ * Create userspace mapping for the DMA-coherent memory.
*/
-void *
-dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
-{
- return __dma_alloc(dev, size, handle, gfp,
- pgprot_writecombine(pgprot_kernel),
- __builtin_return_address(0));
-}
-EXPORT_SYMBOL(dma_alloc_writecombine);
-
-static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size)
+int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ struct dma_attrs *attrs)
{
int ret = -ENXIO;
#ifdef CONFIG_MMU
- unsigned long user_size, kern_size;
- struct arm_vmregion *c;
+ unsigned long pfn = dma_to_pfn(dev, dma_addr);
+ vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
- user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
+ return ret;
- c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
- if (c) {
- unsigned long off = vma->vm_pgoff;
-
- kern_size = (c->vm_end - c->vm_start) >> PAGE_SHIFT;
-
- if (off < kern_size &&
- user_size <= (kern_size - off)) {
- ret = remap_pfn_range(vma, vma->vm_start,
- page_to_pfn(c->vm_pages) + off,
- user_size << PAGE_SHIFT,
- vma->vm_page_prot);
- }
- }
+ ret = remap_pfn_range(vma, vma->vm_start,
+ pfn + vma->vm_pgoff,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
#endif /* CONFIG_MMU */
return ret;
}
-int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size)
-{
- vma->vm_page_prot = pgprot_dmacoherent(vma->vm_page_prot);
- return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
-}
-EXPORT_SYMBOL(dma_mmap_coherent);
-
-int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size)
-{
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
- return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
-}
-EXPORT_SYMBOL(dma_mmap_writecombine);
-
/*
- * free a page as defined by the above mapping.
- * Must not be called with IRQs disabled.
+ * Free a buffer as defined by the above mapping.
*/
-void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
+void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t handle, struct dma_attrs *attrs)
{
- WARN_ON(irqs_disabled());
+ struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
return;
size = PAGE_ALIGN(size);
- if (!arch_is_coherent())
+ if (arch_is_coherent() || nommu()) {
+ __dma_free_buffer(page, size);
+ } else if (cpu_architecture() < CPU_ARCH_ARMv6) {
__dma_free_remap(cpu_addr, size);
-
- __dma_free_buffer(pfn_to_page(dma_to_pfn(dev, handle)), size);
-}
-EXPORT_SYMBOL(dma_free_coherent);
-
-/*
- * Make an area consistent for devices.
- * Note: Drivers should NOT use this function directly, as it will break
- * platforms with CONFIG_DMABOUNCE.
- * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
- */
-void ___dma_single_cpu_to_dev(const void *kaddr, size_t size,
- enum dma_data_direction dir)
-{
- unsigned long paddr;
-
- BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
-
- dmac_map_area(kaddr, size, dir);
-
- paddr = __pa(kaddr);
- if (dir == DMA_FROM_DEVICE) {
- outer_inv_range(paddr, paddr + size);
+ __dma_free_buffer(page, size);
} else {
- outer_clean_range(paddr, paddr + size);
- }
- /* FIXME: non-speculating: flush on bidirectional mappings? */
-}
-EXPORT_SYMBOL(___dma_single_cpu_to_dev);
-
-void ___dma_single_dev_to_cpu(const void *kaddr, size_t size,
- enum dma_data_direction dir)
-{
- BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
-
- /* FIXME: non-speculating: not required */
- /* don't bother invalidating if DMA to device */
- if (dir != DMA_TO_DEVICE) {
- unsigned long paddr = __pa(kaddr);
- outer_inv_range(paddr, paddr + size);
+ if (__free_from_pool(cpu_addr, size))
+ return;
+ /*
+ * Non-atomic allocations cannot be freed with IRQs disabled
+ */
+ WARN_ON(irqs_disabled());
+ __free_from_contiguous(dev, page, size);
}
-
- dmac_unmap_area(kaddr, size, dir);
}
-EXPORT_SYMBOL(___dma_single_dev_to_cpu);
static void dma_cache_maint_page(struct page *page, unsigned long offset,
size_t size, enum dma_data_direction dir,
@@ -543,7 +831,13 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
} while (left);
}
-void ___dma_page_cpu_to_dev(struct page *page, unsigned long off,
+/*
+ * Make an area consistent for devices.
+ * Note: Drivers should NOT use this function directly, as it will break
+ * platforms with CONFIG_DMABOUNCE.
+ * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
+ */
+static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
size_t size, enum dma_data_direction dir)
{
unsigned long paddr;
@@ -558,9 +852,8 @@ void ___dma_page_cpu_to_dev(struct page *page, unsigned long off,
}
/* FIXME: non-speculating: flush on bidirectional mappings? */
}
-EXPORT_SYMBOL(___dma_page_cpu_to_dev);
-void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
+static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
size_t size, enum dma_data_direction dir)
{
unsigned long paddr = page_to_phys(page) + off;
@@ -578,10 +871,9 @@ void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE)
set_bit(PG_dcache_clean, &page->flags);
}
-EXPORT_SYMBOL(___dma_page_dev_to_cpu);
/**
- * dma_map_sg - map a set of SG buffers for streaming mode DMA
+ * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @sg: list of buffers
* @nents: number of buffers to map
@@ -596,32 +888,32 @@ EXPORT_SYMBOL(___dma_page_dev_to_cpu);
* Device ownership issues as mentioned for dma_map_single are the same
* here.
*/
-int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir)
+int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, struct dma_attrs *attrs)
{
+ struct dma_map_ops *ops = get_dma_ops(dev);
struct scatterlist *s;
int i, j;
- BUG_ON(!valid_dma_direction(dir));
-
for_each_sg(sg, s, nents, i) {
- s->dma_address = __dma_map_page(dev, sg_page(s), s->offset,
- s->length, dir);
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+ s->dma_length = s->length;
+#endif
+ s->dma_address = ops->map_page(dev, sg_page(s), s->offset,
+ s->length, dir, attrs);
if (dma_mapping_error(dev, s->dma_address))
goto bad_mapping;
}
- debug_dma_map_sg(dev, sg, nents, nents, dir);
return nents;
bad_mapping:
for_each_sg(sg, s, i, j)
- __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
+ ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
return 0;
}
-EXPORT_SYMBOL(dma_map_sg);
/**
- * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
+ * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @sg: list of buffers
* @nents: number of buffers to unmap (same as was passed to dma_map_sg)
@@ -630,70 +922,55 @@ EXPORT_SYMBOL(dma_map_sg);
* Unmap a set of streaming mode DMA translations. Again, CPU access
* rules concerning calls here are the same as for dma_unmap_single().
*/
-void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir)
+void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, struct dma_attrs *attrs)
{
+ struct dma_map_ops *ops = get_dma_ops(dev);
struct scatterlist *s;
- int i;
- debug_dma_unmap_sg(dev, sg, nents, dir);
+ int i;
for_each_sg(sg, s, nents, i)
- __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
+ ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
}
-EXPORT_SYMBOL(dma_unmap_sg);
/**
- * dma_sync_sg_for_cpu
+ * arm_dma_sync_sg_for_cpu
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @sg: list of buffers
* @nents: number of buffers to map (returned from dma_map_sg)
* @dir: DMA transfer direction (same as was passed to dma_map_sg)
*/
-void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir)
{
+ struct dma_map_ops *ops = get_dma_ops(dev);
struct scatterlist *s;
int i;
- for_each_sg(sg, s, nents, i) {
- if (!dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0,
- sg_dma_len(s), dir))
- continue;
-
- __dma_page_dev_to_cpu(sg_page(s), s->offset,
- s->length, dir);
- }
-
- debug_dma_sync_sg_for_cpu(dev, sg, nents, dir);
+ for_each_sg(sg, s, nents, i)
+ ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length,
+ dir);
}
-EXPORT_SYMBOL(dma_sync_sg_for_cpu);
/**
- * dma_sync_sg_for_device
+ * arm_dma_sync_sg_for_device
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @sg: list of buffers
* @nents: number of buffers to map (returned from dma_map_sg)
* @dir: DMA transfer direction (same as was passed to dma_map_sg)
*/
-void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir)
{
+ struct dma_map_ops *ops = get_dma_ops(dev);
struct scatterlist *s;
int i;
- for_each_sg(sg, s, nents, i) {
- if (!dmabounce_sync_for_device(dev, sg_dma_address(s), 0,
- sg_dma_len(s), dir))
- continue;
-
- __dma_page_cpu_to_dev(sg_page(s), s->offset,
- s->length, dir);
- }
-
- debug_dma_sync_sg_for_device(dev, sg, nents, dir);
+ for_each_sg(sg, s, nents, i)
+ ops->sync_single_for_device(dev, sg_dma_address(s), s->length,
+ dir);
}
-EXPORT_SYMBOL(dma_sync_sg_for_device);
/*
* Return whether the given device DMA address mask can be supported
@@ -709,18 +986,15 @@ int dma_supported(struct device *dev, u64 mask)
}
EXPORT_SYMBOL(dma_supported);
-int dma_set_mask(struct device *dev, u64 dma_mask)
+static int arm_dma_set_mask(struct device *dev, u64 dma_mask)
{
if (!dev->dma_mask || !dma_supported(dev, dma_mask))
return -EIO;
-#ifndef CONFIG_DMABOUNCE
*dev->dma_mask = dma_mask;
-#endif
return 0;
}
-EXPORT_SYMBOL(dma_set_mask);
#define PREALLOC_DMA_DEBUG_ENTRIES 4096
@@ -733,3 +1007,679 @@ static int __init dma_debug_do_init(void)
return 0;
}
fs_initcall(dma_debug_do_init);
+
+#ifdef CONFIG_ARM_DMA_USE_IOMMU
+
+/* IOMMU */
+
+static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
+ size_t size)
+{
+ unsigned int order = get_order(size);
+ unsigned int align = 0;
+ unsigned int count, start;
+ unsigned long flags;
+
+ count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) +
+ (1 << mapping->order) - 1) >> mapping->order;
+
+ if (order > mapping->order)
+ align = (1 << (order - mapping->order)) - 1;
+
+ spin_lock_irqsave(&mapping->lock, flags);
+ start = bitmap_find_next_zero_area(mapping->bitmap, mapping->bits, 0,
+ count, align);
+ if (start > mapping->bits) {
+ spin_unlock_irqrestore(&mapping->lock, flags);
+ return DMA_ERROR_CODE;
+ }
+
+ bitmap_set(mapping->bitmap, start, count);
+ spin_unlock_irqrestore(&mapping->lock, flags);
+
+ return mapping->base + (start << (mapping->order + PAGE_SHIFT));
+}
+
+static inline void __free_iova(struct dma_iommu_mapping *mapping,
+ dma_addr_t addr, size_t size)
+{
+ unsigned int start = (addr - mapping->base) >>
+ (mapping->order + PAGE_SHIFT);
+ unsigned int count = ((size >> PAGE_SHIFT) +
+ (1 << mapping->order) - 1) >> mapping->order;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mapping->lock, flags);
+ bitmap_clear(mapping->bitmap, start, count);
+ spin_unlock_irqrestore(&mapping->lock, flags);
+}
+
+static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
+{
+ struct page **pages;
+ int count = size >> PAGE_SHIFT;
+ int array_size = count * sizeof(struct page *);
+ int i = 0;
+
+ if (array_size <= PAGE_SIZE)
+ pages = kzalloc(array_size, gfp);
+ else
+ pages = vzalloc(array_size);
+ if (!pages)
+ return NULL;
+
+ while (count) {
+ int j, order = __ffs(count);
+
+ pages[i] = alloc_pages(gfp | __GFP_NOWARN, order);
+ while (!pages[i] && order)
+ pages[i] = alloc_pages(gfp | __GFP_NOWARN, --order);
+ if (!pages[i])
+ goto error;
+
+ if (order)
+ split_page(pages[i], order);
+ j = 1 << order;
+ while (--j)
+ pages[i + j] = pages[i] + j;
+
+ __dma_clear_buffer(pages[i], PAGE_SIZE << order);
+ i += 1 << order;
+ count -= 1 << order;
+ }
+
+ return pages;
+error:
+ while (--i)
+ if (pages[i])
+ __free_pages(pages[i], 0);
+ if (array_size < PAGE_SIZE)
+ kfree(pages);
+ else
+ vfree(pages);
+ return NULL;
+}
+
+static int __iommu_free_buffer(struct device *dev, struct page **pages, size_t size)
+{
+ int count = size >> PAGE_SHIFT;
+ int array_size = count * sizeof(struct page *);
+ int i;
+ for (i = 0; i < count; i++)
+ if (pages[i])
+ __free_pages(pages[i], 0);
+ if (array_size < PAGE_SIZE)
+ kfree(pages);
+ else
+ vfree(pages);
+ return 0;
+}
+
+/*
+ * Create a CPU mapping for a specified pages
+ */
+static void *
+__iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot)
+{
+ struct arm_vmregion *c;
+ size_t align;
+ size_t count = size >> PAGE_SHIFT;
+ int bit;
+
+ if (!consistent_pte[0]) {
+ pr_err("%s: not initialised\n", __func__);
+ dump_stack();
+ return NULL;
+ }
+
+ /*
+ * Align the virtual region allocation - maximum alignment is
+ * a section size, minimum is a page size. This helps reduce
+ * fragmentation of the DMA space, and also prevents allocations
+ * smaller than a section from crossing a section boundary.
+ */
+ bit = fls(size - 1);
+ if (bit > SECTION_SHIFT)
+ bit = SECTION_SHIFT;
+ align = 1 << bit;
+
+ /*
+ * Allocate a virtual address in the consistent mapping region.
+ */
+ c = arm_vmregion_alloc(&consistent_head, align, size,
+ gfp & ~(__GFP_DMA | __GFP_HIGHMEM), NULL);
+ if (c) {
+ pte_t *pte;
+ int idx = CONSISTENT_PTE_INDEX(c->vm_start);
+ int i = 0;
+ u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
+
+ pte = consistent_pte[idx] + off;
+ c->priv = pages;
+
+ do {
+ BUG_ON(!pte_none(*pte));
+
+ set_pte_ext(pte, mk_pte(pages[i], prot), 0);
+ pte++;
+ off++;
+ i++;
+ if (off >= PTRS_PER_PTE) {
+ off = 0;
+ pte = consistent_pte[++idx];
+ }
+ } while (i < count);
+
+ dsb();
+
+ return (void *)c->vm_start;
+ }
+ return NULL;
+}
+
+/*
+ * Create a mapping in device IO address space for specified pages
+ */
+static dma_addr_t
+__iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
+{
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ dma_addr_t dma_addr, iova;
+ int i, ret = DMA_ERROR_CODE;
+
+ dma_addr = __alloc_iova(mapping, size);
+ if (dma_addr == DMA_ERROR_CODE)
+ return dma_addr;
+
+ iova = dma_addr;
+ for (i = 0; i < count; ) {
+ unsigned int next_pfn = page_to_pfn(pages[i]) + 1;
+ phys_addr_t phys = page_to_phys(pages[i]);
+ unsigned int len, j;
+
+ for (j = i + 1; j < count; j++, next_pfn++)
+ if (page_to_pfn(pages[j]) != next_pfn)
+ break;
+
+ len = (j - i) << PAGE_SHIFT;
+ ret = iommu_map(mapping->domain, iova, phys, len, 0);
+ if (ret < 0)
+ goto fail;
+ iova += len;
+ i = j;
+ }
+ return dma_addr;
+fail:
+ iommu_unmap(mapping->domain, dma_addr, iova-dma_addr);
+ __free_iova(mapping, dma_addr, size);
+ return DMA_ERROR_CODE;
+}
+
+static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size)
+{
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+
+ /*
+ * add optional in-page offset from iova to size and align
+ * result to page size
+ */
+ size = PAGE_ALIGN((iova & ~PAGE_MASK) + size);
+ iova &= PAGE_MASK;
+
+ iommu_unmap(mapping->domain, iova, size);
+ __free_iova(mapping, iova, size);
+ return 0;
+}
+
+static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
+{
+ pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
+ struct page **pages;
+ void *addr = NULL;
+
+ *handle = DMA_ERROR_CODE;
+ size = PAGE_ALIGN(size);
+
+ pages = __iommu_alloc_buffer(dev, size, gfp);
+ if (!pages)
+ return NULL;
+
+ *handle = __iommu_create_mapping(dev, pages, size);
+ if (*handle == DMA_ERROR_CODE)
+ goto err_buffer;
+
+ addr = __iommu_alloc_remap(pages, size, gfp, prot);
+ if (!addr)
+ goto err_mapping;
+
+ return addr;
+
+err_mapping:
+ __iommu_remove_mapping(dev, *handle, size);
+err_buffer:
+ __iommu_free_buffer(dev, pages, size);
+ return NULL;
+}
+
+static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ struct dma_attrs *attrs)
+{
+ struct arm_vmregion *c;
+
+ vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
+ c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
+
+ if (c) {
+ struct page **pages = c->priv;
+
+ unsigned long uaddr = vma->vm_start;
+ unsigned long usize = vma->vm_end - vma->vm_start;
+ int i = 0;
+
+ do {
+ int ret;
+
+ ret = vm_insert_page(vma, uaddr, pages[i++]);
+ if (ret) {
+ pr_err("Remapping memory, error: %d\n", ret);
+ return ret;
+ }
+
+ uaddr += PAGE_SIZE;
+ usize -= PAGE_SIZE;
+ } while (usize > 0);
+ }
+ return 0;
+}
+
+/*
+ * free a page as defined by the above mapping.
+ * Must not be called with IRQs disabled.
+ */
+void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t handle, struct dma_attrs *attrs)
+{
+ struct arm_vmregion *c;
+ size = PAGE_ALIGN(size);
+
+ c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
+ if (c) {
+ struct page **pages = c->priv;
+ __dma_free_remap(cpu_addr, size);
+ __iommu_remove_mapping(dev, handle, size);
+ __iommu_free_buffer(dev, pages, size);
+ }
+}
+
+/*
+ * Map a part of the scatter-gather list into contiguous io address space
+ */
+static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
+ size_t size, dma_addr_t *handle,
+ enum dma_data_direction dir)
+{
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ dma_addr_t iova, iova_base;
+ int ret = 0;
+ unsigned int count;
+ struct scatterlist *s;
+
+ size = PAGE_ALIGN(size);
+ *handle = DMA_ERROR_CODE;
+
+ iova_base = iova = __alloc_iova(mapping, size);
+ if (iova == DMA_ERROR_CODE)
+ return -ENOMEM;
+
+ for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
+ phys_addr_t phys = page_to_phys(sg_page(s));
+ unsigned int len = PAGE_ALIGN(s->offset + s->length);
+
+ if (!arch_is_coherent())
+ __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
+
+ ret = iommu_map(mapping->domain, iova, phys, len, 0);
+ if (ret < 0)
+ goto fail;
+ count += len >> PAGE_SHIFT;
+ iova += len;
+ }
+ *handle = iova_base;
+
+ return 0;
+fail:
+ iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE);
+ __free_iova(mapping, iova_base, size);
+ return ret;
+}
+
+/**
+ * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to map
+ * @dir: DMA transfer direction
+ *
+ * Map a set of buffers described by scatterlist in streaming mode for DMA.
+ * The scatter gather list elements are merged together (if possible) and
+ * tagged with the appropriate dma address and length. They are obtained via
+ * sg_dma_{address,length}.
+ */
+int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+ struct scatterlist *s = sg, *dma = sg, *start = sg;
+ int i, count = 0;
+ unsigned int offset = s->offset;
+ unsigned int size = s->offset + s->length;
+ unsigned int max = dma_get_max_seg_size(dev);
+
+ for (i = 1; i < nents; i++) {
+ s = sg_next(s);
+
+ s->dma_address = DMA_ERROR_CODE;
+ s->dma_length = 0;
+
+ if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
+ if (__map_sg_chunk(dev, start, size, &dma->dma_address,
+ dir) < 0)
+ goto bad_mapping;
+
+ dma->dma_address += offset;
+ dma->dma_length = size - offset;
+
+ size = offset = s->offset;
+ start = s;
+ dma = sg_next(dma);
+ count += 1;
+ }
+ size += s->length;
+ }
+ if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir) < 0)
+ goto bad_mapping;
+
+ dma->dma_address += offset;
+ dma->dma_length = size - offset;
+
+ return count+1;
+
+bad_mapping:
+ for_each_sg(sg, s, count, i)
+ __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s));
+ return 0;
+}
+
+/**
+ * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
+ *
+ * Unmap a set of streaming mode DMA translations. Again, CPU access
+ * rules concerning calls here are the same as for dma_unmap_single().
+ */
+void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+ struct scatterlist *s;
+ int i;
+
+ for_each_sg(sg, s, nents, i) {
+ if (sg_dma_len(s))
+ __iommu_remove_mapping(dev, sg_dma_address(s),
+ sg_dma_len(s));
+ if (!arch_is_coherent())
+ __dma_page_dev_to_cpu(sg_page(s), s->offset,
+ s->length, dir);
+ }
+}
+
+/**
+ * arm_iommu_sync_sg_for_cpu
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to map (returned from dma_map_sg)
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
+ */
+void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir)
+{
+ struct scatterlist *s;
+ int i;
+
+ for_each_sg(sg, s, nents, i)
+ if (!arch_is_coherent())
+ __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
+
+}
+
+/**
+ * arm_iommu_sync_sg_for_device
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to map (returned from dma_map_sg)
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
+ */
+void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir)
+{
+ struct scatterlist *s;
+ int i;
+
+ for_each_sg(sg, s, nents, i)
+ if (!arch_is_coherent())
+ __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
+}
+
+
+/**
+ * arm_iommu_map_page
+ * @dev: valid struct device pointer
+ * @page: page that buffer resides in
+ * @offset: offset into page for start of buffer
+ * @size: size of buffer to map
+ * @dir: DMA transfer direction
+ *
+ * IOMMU aware version of arm_dma_map_page()
+ */
+static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ dma_addr_t dma_addr;
+ int ret, len = PAGE_ALIGN(size + offset);
+
+ if (!arch_is_coherent())
+ __dma_page_cpu_to_dev(page, offset, size, dir);
+
+ dma_addr = __alloc_iova(mapping, len);
+ if (dma_addr == DMA_ERROR_CODE)
+ return dma_addr;
+
+ ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, 0);
+ if (ret < 0)
+ goto fail;
+
+ return dma_addr + offset;
+fail:
+ __free_iova(mapping, dma_addr, len);
+ return DMA_ERROR_CODE;
+}
+
+/**
+ * arm_iommu_unmap_page
+ * @dev: valid struct device pointer
+ * @handle: DMA address of buffer
+ * @size: size of buffer (same as passed to dma_map_page)
+ * @dir: DMA transfer direction (same as passed to dma_map_page)
+ *
+ * IOMMU aware version of arm_dma_unmap_page()
+ */
+static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ dma_addr_t iova = handle & PAGE_MASK;
+ struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
+ int offset = handle & ~PAGE_MASK;
+ int len = PAGE_ALIGN(size + offset);
+
+ if (!iova)
+ return;
+
+ if (!arch_is_coherent())
+ __dma_page_dev_to_cpu(page, offset, size, dir);
+
+ iommu_unmap(mapping->domain, iova, len);
+ __free_iova(mapping, iova, len);
+}
+
+static void arm_iommu_sync_single_for_cpu(struct device *dev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ dma_addr_t iova = handle & PAGE_MASK;
+ struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
+ unsigned int offset = handle & ~PAGE_MASK;
+
+ if (!iova)
+ return;
+
+ if (!arch_is_coherent())
+ __dma_page_dev_to_cpu(page, offset, size, dir);
+}
+
+static void arm_iommu_sync_single_for_device(struct device *dev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ dma_addr_t iova = handle & PAGE_MASK;
+ struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
+ unsigned int offset = handle & ~PAGE_MASK;
+
+ if (!iova)
+ return;
+
+ __dma_page_cpu_to_dev(page, offset, size, dir);
+}
+
+struct dma_map_ops iommu_ops = {
+ .alloc = arm_iommu_alloc_attrs,
+ .free = arm_iommu_free_attrs,
+ .mmap = arm_iommu_mmap_attrs,
+
+ .map_page = arm_iommu_map_page,
+ .unmap_page = arm_iommu_unmap_page,
+ .sync_single_for_cpu = arm_iommu_sync_single_for_cpu,
+ .sync_single_for_device = arm_iommu_sync_single_for_device,
+
+ .map_sg = arm_iommu_map_sg,
+ .unmap_sg = arm_iommu_unmap_sg,
+ .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu,
+ .sync_sg_for_device = arm_iommu_sync_sg_for_device,
+};
+
+/**
+ * arm_iommu_create_mapping
+ * @bus: pointer to the bus holding the client device (for IOMMU calls)
+ * @base: start address of the valid IO address space
+ * @size: size of the valid IO address space
+ * @order: accuracy of the IO addresses allocations
+ *
+ * Creates a mapping structure which holds information about used/unused
+ * IO address ranges, which is required to perform memory allocation and
+ * mapping with IOMMU aware functions.
+ *
+ * The client device need to be attached to the mapping with
+ * arm_iommu_attach_device function.
+ */
+struct dma_iommu_mapping *
+arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size,
+ int order)
+{
+ unsigned int count = size >> (PAGE_SHIFT + order);
+ unsigned int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
+ struct dma_iommu_mapping *mapping;
+ int err = -ENOMEM;
+
+ if (!count)
+ return ERR_PTR(-EINVAL);
+
+ mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL);
+ if (!mapping)
+ goto err;
+
+ mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+ if (!mapping->bitmap)
+ goto err2;
+
+ mapping->base = base;
+ mapping->bits = BITS_PER_BYTE * bitmap_size;
+ mapping->order = order;
+ spin_lock_init(&mapping->lock);
+
+ mapping->domain = iommu_domain_alloc(bus);
+ if (!mapping->domain)
+ goto err3;
+
+ kref_init(&mapping->kref);
+ return mapping;
+err3:
+ kfree(mapping->bitmap);
+err2:
+ kfree(mapping);
+err:
+ return ERR_PTR(err);
+}
+
+static void release_iommu_mapping(struct kref *kref)
+{
+ struct dma_iommu_mapping *mapping =
+ container_of(kref, struct dma_iommu_mapping, kref);
+
+ iommu_domain_free(mapping->domain);
+ kfree(mapping->bitmap);
+ kfree(mapping);
+}
+
+void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
+{
+ if (mapping)
+ kref_put(&mapping->kref, release_iommu_mapping);
+}
+
+/**
+ * arm_iommu_attach_device
+ * @dev: valid struct device pointer
+ * @mapping: io address space mapping structure (returned from
+ * arm_iommu_create_mapping)
+ *
+ * Attaches specified io address space mapping to the provided device,
+ * this replaces the dma operations (dma_map_ops pointer) with the
+ * IOMMU aware version. More than one client might be attached to
+ * the same io address space mapping.
+ */
+int arm_iommu_attach_device(struct device *dev,
+ struct dma_iommu_mapping *mapping)
+{
+ int err;
+
+ err = iommu_attach_device(mapping->domain, dev);
+ if (err)
+ return err;
+
+ kref_get(&mapping->kref);
+ dev->archdata.mapping = mapping;
+ set_dma_ops(dev, &iommu_ops);
+
+ pr_info("Attached IOMMU controller to %s device.\n", dev_name(dev));
+ return 0;
+}
+
+#endif
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 5bb48356d21..c3bd8345022 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -432,9 +432,6 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
index = pgd_index(addr);
- /*
- * FIXME: CP15 C1 is write only on ARMv3 architectures.
- */
pgd = cpu_get_pgd() + index;
pgd_k = init_mm.pgd + index;
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 8f5813bbffb..c21d06c7dd7 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -20,6 +20,7 @@
#include <linux/highmem.h>
#include <linux/gfp.h>
#include <linux/memblock.h>
+#include <linux/dma-contiguous.h>
#include <asm/mach-types.h>
#include <asm/memblock.h>
@@ -226,6 +227,17 @@ static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
}
#endif
+void __init setup_dma_zone(struct machine_desc *mdesc)
+{
+#ifdef CONFIG_ZONE_DMA
+ if (mdesc->dma_zone_size) {
+ arm_dma_zone_size = mdesc->dma_zone_size;
+ arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
+ } else
+ arm_dma_limit = 0xffffffff;
+#endif
+}
+
static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
unsigned long max_high)
{
@@ -273,12 +285,9 @@ static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
* Adjust the sizes according to any special requirements for
* this machine type.
*/
- if (arm_dma_zone_size) {
+ if (arm_dma_zone_size)
arm_adjust_dma_zone(zone_size, zhole_size,
arm_dma_zone_size >> PAGE_SHIFT);
- arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
- } else
- arm_dma_limit = 0xffffffff;
#endif
free_area_init_node(0, zone_size, min, zhole_size);
@@ -364,6 +373,12 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
if (mdesc->reserve)
mdesc->reserve();
+ /*
+ * reserve memory for DMA contigouos allocations,
+ * must come from DMA area inside low memory
+ */
+ dma_contiguous_reserve(min(arm_dma_limit, arm_lowmem_limit));
+
arm_memblock_steal_permitted = false;
memblock_allow_resize();
memblock_dump_all();
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index 27f4a619b35..93dc0c17cdc 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -67,5 +67,8 @@ extern u32 arm_dma_limit;
#define arm_dma_limit ((u32)~0)
#endif
+extern phys_addr_t arm_lowmem_limit;
+
void __init bootmem_init(void);
void arm_mm_memblock_reserve(void);
+void dma_contiguous_remap(void);
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index aa78de8bfdd..e5dad60b558 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -288,6 +288,11 @@ static struct mem_type mem_types[] = {
PMD_SECT_UNCACHED | PMD_SECT_XN,
.domain = DOMAIN_KERNEL,
},
+ [MT_MEMORY_DMA_READY] = {
+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
+ .prot_l1 = PMD_TYPE_TABLE,
+ .domain = DOMAIN_KERNEL,
+ },
};
const struct mem_type *get_mem_type(unsigned int type)
@@ -429,6 +434,7 @@ static void __init build_mem_type_table(void)
if (arch_is_coherent() && cpu_is_xsc3()) {
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
+ mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
}
@@ -460,6 +466,7 @@ static void __init build_mem_type_table(void)
mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
+ mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
}
@@ -512,6 +519,7 @@ static void __init build_mem_type_table(void)
mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
+ mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
mem_types[MT_ROM].prot_sect |= cp->pmd;
@@ -596,7 +604,7 @@ static void __init alloc_init_section(pud_t *pud, unsigned long addr,
* L1 entries, whereas PGDs refer to a group of L1 entries making
* up one logical pointer to an L2 table.
*/
- if (((addr | end | phys) & ~SECTION_MASK) == 0) {
+ if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0) {
pmd_t *p = pmd;
#ifndef CONFIG_ARM_LPAE
@@ -814,7 +822,7 @@ static int __init early_vmalloc(char *arg)
}
early_param("vmalloc", early_vmalloc);
-static phys_addr_t lowmem_limit __initdata = 0;
+phys_addr_t arm_lowmem_limit __initdata = 0;
void __init sanity_check_meminfo(void)
{
@@ -897,8 +905,8 @@ void __init sanity_check_meminfo(void)
bank->size = newsize;
}
#endif
- if (!bank->highmem && bank->start + bank->size > lowmem_limit)
- lowmem_limit = bank->start + bank->size;
+ if (!bank->highmem && bank->start + bank->size > arm_lowmem_limit)
+ arm_lowmem_limit = bank->start + bank->size;
j++;
}
@@ -923,8 +931,8 @@ void __init sanity_check_meminfo(void)
}
#endif
meminfo.nr_banks = j;
- high_memory = __va(lowmem_limit - 1) + 1;
- memblock_set_current_limit(lowmem_limit);
+ high_memory = __va(arm_lowmem_limit - 1) + 1;
+ memblock_set_current_limit(arm_lowmem_limit);
}
static inline void prepare_page_table(void)
@@ -949,8 +957,8 @@ static inline void prepare_page_table(void)
* Find the end of the first block of lowmem.
*/
end = memblock.memory.regions[0].base + memblock.memory.regions[0].size;
- if (end >= lowmem_limit)
- end = lowmem_limit;
+ if (end >= arm_lowmem_limit)
+ end = arm_lowmem_limit;
/*
* Clear out all the kernel space mappings, except for the first
@@ -1093,8 +1101,8 @@ static void __init map_lowmem(void)
phys_addr_t end = start + reg->size;
struct map_desc map;
- if (end > lowmem_limit)
- end = lowmem_limit;
+ if (end > arm_lowmem_limit)
+ end = arm_lowmem_limit;
if (start >= end)
break;
@@ -1115,11 +1123,12 @@ void __init paging_init(struct machine_desc *mdesc)
{
void *zero_page;
- memblock_set_current_limit(lowmem_limit);
+ memblock_set_current_limit(arm_lowmem_limit);
build_mem_type_table();
prepare_page_table();
map_lowmem();
+ dma_contiguous_remap();
devicemaps_init(mdesc);
kmap_init();
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S
index 234951345eb..0650bb87c1e 100644
--- a/arch/arm/mm/proc-arm1020.S
+++ b/arch/arm/mm/proc-arm1020.S
@@ -241,6 +241,7 @@ ENTRY(arm1020_coherent_user_range)
cmp r0, r1
blo 1b
mcr p15, 0, ip, c7, c10, 4 @ drain WB
+ mov r0, #0
mov pc, lr
/*
diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S
index c244b06caac..4188478325a 100644
--- a/arch/arm/mm/proc-arm1020e.S
+++ b/arch/arm/mm/proc-arm1020e.S
@@ -235,6 +235,7 @@ ENTRY(arm1020e_coherent_user_range)
cmp r0, r1
blo 1b
mcr p15, 0, ip, c7, c10, 4 @ drain WB
+ mov r0, #0
mov pc, lr
/*
diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S
index 38fe22efd18..33c68824bff 100644
--- a/arch/arm/mm/proc-arm1022.S
+++ b/arch/arm/mm/proc-arm1022.S
@@ -224,6 +224,7 @@ ENTRY(arm1022_coherent_user_range)
cmp r0, r1
blo 1b
mcr p15, 0, ip, c7, c10, 4 @ drain WB
+ mov r0, #0
mov pc, lr
/*
diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S
index 3eb9c3c26c7..fbc1d5fc24d 100644
--- a/arch/arm/mm/proc-arm1026.S
+++ b/arch/arm/mm/proc-arm1026.S
@@ -218,6 +218,7 @@ ENTRY(arm1026_coherent_user_range)
cmp r0, r1
blo 1b
mcr p15, 0, ip, c7, c10, 4 @ drain WB
+ mov r0, #0
mov pc, lr
/*
diff --git a/arch/arm/mm/proc-arm6_7.S b/arch/arm/mm/proc-arm6_7.S
deleted file mode 100644
index 4fbeb5b8e6c..00000000000
--- a/arch/arm/mm/proc-arm6_7.S
+++ /dev/null
@@ -1,327 +0,0 @@
-/*
- * linux/arch/arm/mm/proc-arm6,7.S
- *
- * Copyright (C) 1997-2000 Russell King
- * hacked for non-paged-MM by Hyok S. Choi, 2003.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * These are the low level assembler for performing cache and TLB
- * functions on the ARM610 & ARM710.
- */
-#include <linux/linkage.h>
-#include <linux/init.h>
-#include <asm/assembler.h>
-#include <asm/asm-offsets.h>
-#include <asm/hwcap.h>
-#include <asm/pgtable-hwdef.h>
-#include <asm/pgtable.h>
-#include <asm/ptrace.h>
-
-#include "proc-macros.S"
-
-ENTRY(cpu_arm6_dcache_clean_area)
-ENTRY(cpu_arm7_dcache_clean_area)
- mov pc, lr
-
-/*
- * Function: arm6_7_data_abort ()
- *
- * Params : r2 = pt_regs
- * : r4 = aborted context pc
- * : r5 = aborted context psr
- *
- * Purpose : obtain information about current aborted instruction
- *
- * Returns : r4-r5, r10-r11, r13 preserved
- */
-
-ENTRY(cpu_arm7_data_abort)
- mrc p15, 0, r1, c5, c0, 0 @ get FSR
- mrc p15, 0, r0, c6, c0, 0 @ get FAR
- ldr r8, [r4] @ read arm instruction
- tst r8, #1 << 20 @ L = 0 -> write?
- orreq r1, r1, #1 << 11 @ yes.
- and r7, r8, #15 << 24
- add pc, pc, r7, lsr #22 @ Now branch to the relevant processing routine
- nop
-
-/* 0 */ b .data_unknown
-/* 1 */ b do_DataAbort @ swp
-/* 2 */ b .data_unknown
-/* 3 */ b .data_unknown
-/* 4 */ b .data_arm_lateldrpostconst @ ldr rd, [rn], #m
-/* 5 */ b .data_arm_lateldrpreconst @ ldr rd, [rn, #m]
-/* 6 */ b .data_arm_lateldrpostreg @ ldr rd, [rn], rm
-/* 7 */ b .data_arm_lateldrprereg @ ldr rd, [rn, rm]
-/* 8 */ b .data_arm_ldmstm @ ldm*a rn, <rlist>
-/* 9 */ b .data_arm_ldmstm @ ldm*b rn, <rlist>
-/* a */ b .data_unknown
-/* b */ b .data_unknown
-/* c */ b do_DataAbort @ ldc rd, [rn], #m @ Same as ldr rd, [rn], #m
-/* d */ b do_DataAbort @ ldc rd, [rn, #m]
-/* e */ b .data_unknown
-/* f */
-.data_unknown: @ Part of jumptable
- mov r0, r4
- mov r1, r8
- b baddataabort
-
-ENTRY(cpu_arm6_data_abort)
- mrc p15, 0, r1, c5, c0, 0 @ get FSR
- mrc p15, 0, r0, c6, c0, 0 @ get FAR
- ldr r8, [r4] @ read arm instruction
- tst r8, #1 << 20 @ L = 0 -> write?
- orreq r1, r1, #1 << 11 @ yes.
- and r7, r8, #14 << 24
- teq r7, #8 << 24 @ was it ldm/stm
- bne do_DataAbort
-
-.data_arm_ldmstm:
- tst r8, #1 << 21 @ check writeback bit
- beq do_DataAbort @ no writeback -> no fixup
- mov r7, #0x11
- orr r7, r7, #0x1100
- and r6, r8, r7
- and r9, r8, r7, lsl #1
- add r6, r6, r9, lsr #1
- and r9, r8, r7, lsl #2
- add r6, r6, r9, lsr #2
- and r9, r8, r7, lsl #3
- add r6, r6, r9, lsr #3
- add r6, r6, r6, lsr #8
- add r6, r6, r6, lsr #4
- and r6, r6, #15 @ r6 = no. of registers to transfer.
- and r9, r8, #15 << 16 @ Extract 'n' from instruction
- ldr r7, [r2, r9, lsr #14] @ Get register 'Rn'
- tst r8, #1 << 23 @ Check U bit
- subne r7, r7, r6, lsl #2 @ Undo increment
- addeq r7, r7, r6, lsl #2 @ Undo decrement
- str r7, [r2, r9, lsr #14] @ Put register 'Rn'
- b do_DataAbort
-
-.data_arm_apply_r6_and_rn:
- and r9, r8, #15 << 16 @ Extract 'n' from instruction
- ldr r7, [r2, r9, lsr #14] @ Get register 'Rn'
- tst r8, #1 << 23 @ Check U bit
- subne r7, r7, r6 @ Undo incrmenet
- addeq r7, r7, r6 @ Undo decrement
- str r7, [r2, r9, lsr #14] @ Put register 'Rn'
- b do_DataAbort
-
-.data_arm_lateldrpreconst:
- tst r8, #1 << 21 @ check writeback bit
- beq do_DataAbort @ no writeback -> no fixup
-.data_arm_lateldrpostconst:
- movs r6, r8, lsl #20 @ Get offset
- beq do_DataAbort @ zero -> no fixup
- and r9, r8, #15 << 16 @ Extract 'n' from instruction
- ldr r7, [r2, r9, lsr #14] @ Get register 'Rn'
- tst r8, #1 << 23 @ Check U bit
- subne r7, r7, r6, lsr #20 @ Undo increment
- addeq r7, r7, r6, lsr #20 @ Undo decrement
- str r7, [r2, r9, lsr #14] @ Put register 'Rn'
- b do_DataAbort
-
-.data_arm_lateldrprereg:
- tst r8, #1 << 21 @ check writeback bit
- beq do_DataAbort @ no writeback -> no fixup
-.data_arm_lateldrpostreg:
- and r7, r8, #15 @ Extract 'm' from instruction
- ldr r6, [r2, r7, lsl #2] @ Get register 'Rm'
- mov r9, r8, lsr #7 @ get shift count
- ands r9, r9, #31
- and r7, r8, #0x70 @ get shift type
- orreq r7, r7, #8 @ shift count = 0
- add pc, pc, r7
- nop
-
- mov r6, r6, lsl r9 @ 0: LSL #!0
- b .data_arm_apply_r6_and_rn
- b .data_arm_apply_r6_and_rn @ 1: LSL #0
- nop
- b .data_unknown @ 2: MUL?
- nop
- b .data_unknown @ 3: MUL?
- nop
- mov r6, r6, lsr r9 @ 4: LSR #!0
- b .data_arm_apply_r6_and_rn
- mov r6, r6, lsr #32 @ 5: LSR #32
- b .data_arm_apply_r6_and_rn
- b .data_unknown @ 6: MUL?
- nop
- b .data_unknown @ 7: MUL?
- nop
- mov r6, r6, asr r9 @ 8: ASR #!0
- b .data_arm_apply_r6_and_rn
- mov r6, r6, asr #32 @ 9: ASR #32
- b .data_arm_apply_r6_and_rn
- b .data_unknown @ A: MUL?
- nop
- b .data_unknown @ B: MUL?
- nop
- mov r6, r6, ror r9 @ C: ROR #!0
- b .data_arm_apply_r6_and_rn
- mov r6, r6, rrx @ D: RRX
- b .data_arm_apply_r6_and_rn
- b .data_unknown @ E: MUL?
- nop
- b .data_unknown @ F: MUL?
-
-/*
- * Function: arm6_7_proc_init (void)
- * : arm6_7_proc_fin (void)
- *
- * Notes : This processor does not require these
- */
-ENTRY(cpu_arm6_proc_init)
-ENTRY(cpu_arm7_proc_init)
- mov pc, lr
-
-ENTRY(cpu_arm6_proc_fin)
-ENTRY(cpu_arm7_proc_fin)
- mov r0, #0x31 @ ....S..DP...M
- mcr p15, 0, r0, c1, c0, 0 @ disable caches
- mov pc, lr
-
-ENTRY(cpu_arm6_do_idle)
-ENTRY(cpu_arm7_do_idle)
- mov pc, lr
-
-/*
- * Function: arm6_7_switch_mm(unsigned long pgd_phys)
- * Params : pgd_phys Physical address of page table
- * Purpose : Perform a task switch, saving the old processes state, and restoring
- * the new.
- */
-ENTRY(cpu_arm6_switch_mm)
-ENTRY(cpu_arm7_switch_mm)
-#ifdef CONFIG_MMU
- mov r1, #0
- mcr p15, 0, r1, c7, c0, 0 @ flush cache
- mcr p15, 0, r0, c2, c0, 0 @ update page table ptr
- mcr p15, 0, r1, c5, c0, 0 @ flush TLBs
-#endif
- mov pc, lr
-
-/*
- * Function: arm6_7_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext)
- * Params : r0 = Address to set
- * : r1 = value to set
- * Purpose : Set a PTE and flush it out of any WB cache
- */
- .align 5
-ENTRY(cpu_arm6_set_pte_ext)
-ENTRY(cpu_arm7_set_pte_ext)
-#ifdef CONFIG_MMU
- armv3_set_pte_ext wc_disable=0
-#endif /* CONFIG_MMU */
- mov pc, lr
-
-/*
- * Function: _arm6_7_reset
- * Params : r0 = address to jump to
- * Notes : This sets up everything for a reset
- */
- .pushsection .idmap.text, "ax"
-ENTRY(cpu_arm6_reset)
-ENTRY(cpu_arm7_reset)
- mov r1, #0
- mcr p15, 0, r1, c7, c0, 0 @ flush cache
-#ifdef CONFIG_MMU
- mcr p15, 0, r1, c5, c0, 0 @ flush TLB
-#endif
- mov r1, #0x30
- mcr p15, 0, r1, c1, c0, 0 @ turn off MMU etc
- mov pc, r0
-ENDPROC(cpu_arm6_reset)
-ENDPROC(cpu_arm7_reset)
- .popsection
-
- __CPUINIT
-
- .type __arm6_setup, #function
-__arm6_setup: mov r0, #0
- mcr p15, 0, r0, c7, c0 @ flush caches on v3
-#ifdef CONFIG_MMU
- mcr p15, 0, r0, c5, c0 @ flush TLBs on v3
- mov r0, #0x3d @ . ..RS BLDP WCAM
- orr r0, r0, #0x100 @ . ..01 0011 1101
-#else
- mov r0, #0x3c @ . ..RS BLDP WCA.
-#endif
- mov pc, lr
- .size __arm6_setup, . - __arm6_setup
-
- .type __arm7_setup, #function
-__arm7_setup: mov r0, #0
- mcr p15, 0, r0, c7, c0 @ flush caches on v3
-#ifdef CONFIG_MMU
- mcr p15, 0, r0, c5, c0 @ flush TLBs on v3
- mcr p15, 0, r0, c3, c0 @ load domain access register
- mov r0, #0x7d @ . ..RS BLDP WCAM
- orr r0, r0, #0x100 @ . ..01 0111 1101
-#else
- mov r0, #0x7c @ . ..RS BLDP WCA.
-#endif
- mov pc, lr
- .size __arm7_setup, . - __arm7_setup
-
- __INITDATA
-
- @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
- define_processor_functions arm6, dabort=cpu_arm6_data_abort, pabort=legacy_pabort
- define_processor_functions arm7, dabort=cpu_arm7_data_abort, pabort=legacy_pabort
-
- .section ".rodata"
-
- string cpu_arch_name, "armv3"
- string cpu_elf_name, "v3"
- string cpu_arm6_name, "ARM6"
- string cpu_arm610_name, "ARM610"
- string cpu_arm7_name, "ARM7"
- string cpu_arm710_name, "ARM710"
-
- .align
-
- .section ".proc.info.init", #alloc, #execinstr
-
-.macro arm67_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, \
- cpu_mm_mmu_flags:req, cpu_flush:req, cpu_proc_funcs:req
- .type __\name\()_proc_info, #object
-__\name\()_proc_info:
- .long \cpu_val
- .long \cpu_mask
- .long \cpu_mm_mmu_flags
- .long PMD_TYPE_SECT | \
- PMD_BIT4 | \
- PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ
- b \cpu_flush
- .long cpu_arch_name
- .long cpu_elf_name
- .long HWCAP_SWP | HWCAP_26BIT
- .long \cpu_name
- .long \cpu_proc_funcs
- .long v3_tlb_fns
- .long v3_user_fns
- .long v3_cache_fns
- .size __\name\()_proc_info, . - __\name\()_proc_info
-.endm
-
- arm67_proc_info arm6, 0x41560600, 0xfffffff0, cpu_arm6_name, \
- 0x00000c1e, __arm6_setup, arm6_processor_functions
- arm67_proc_info arm610, 0x41560610, 0xfffffff0, cpu_arm610_name, \
- 0x00000c1e, __arm6_setup, arm6_processor_functions
- arm67_proc_info arm7, 0x41007000, 0xffffff00, cpu_arm7_name, \
- 0x00000c1e, __arm7_setup, arm7_processor_functions
- arm67_proc_info arm710, 0x41007100, 0xfff8ff00, cpu_arm710_name, \
- PMD_TYPE_SECT | \
- PMD_SECT_BUFFERABLE | \
- PMD_SECT_CACHEABLE | \
- PMD_BIT4 | \
- PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ, \
- __arm7_setup, arm7_processor_functions
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index cb941ae95f6..1a8c138eb89 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -210,6 +210,7 @@ ENTRY(arm920_coherent_user_range)
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
+ mov r0, #0
mov pc, lr
/*
diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S
index 4ec0e074dd5..4c44d7e1c3c 100644
--- a/arch/arm/mm/proc-arm922.S
+++ b/arch/arm/mm/proc-arm922.S
@@ -212,6 +212,7 @@ ENTRY(arm922_coherent_user_range)
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
+ mov r0, #0
mov pc, lr
/*
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S
index 9dccd9a365b..ec5b1180994 100644
--- a/arch/arm/mm/proc-arm925.S
+++ b/arch/arm/mm/proc-arm925.S
@@ -258,6 +258,7 @@ ENTRY(arm925_coherent_user_range)
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
+ mov r0, #0
mov pc, lr
/*
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index 820259b81a1..c31e62c606c 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -221,6 +221,7 @@ ENTRY(arm926_coherent_user_range)
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
+ mov r0, #0
mov pc, lr
/*
diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S
index 9fdc0a17097..a613a7dd714 100644
--- a/arch/arm/mm/proc-arm940.S
+++ b/arch/arm/mm/proc-arm940.S
@@ -160,7 +160,7 @@ ENTRY(arm940_coherent_user_range)
* - size - region size
*/
ENTRY(arm940_flush_kern_dcache_area)
- mov ip, #0
+ mov r0, #0
mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
2: mcr p15, 0, r3, c7, c14, 2 @ clean/flush D index
@@ -168,8 +168,8 @@ ENTRY(arm940_flush_kern_dcache_area)
bcs 2b @ entries 63 to 0
subs r1, r1, #1 << 4
bcs 1b @ segments 7 to 0
- mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
- mcr p15, 0, ip, c7, c10, 4 @ drain WB
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+ mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
/*
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S
index f684cfedcca..9f4f2999fdd 100644
--- a/arch/arm/mm/proc-arm946.S
+++ b/arch/arm/mm/proc-arm946.S
@@ -190,6 +190,7 @@ ENTRY(arm946_coherent_user_range)
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
+ mov r0, #0
mov pc, lr
/*
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S
index ba3c500584a..23a8e4c7f2b 100644
--- a/arch/arm/mm/proc-feroceon.S
+++ b/arch/arm/mm/proc-feroceon.S
@@ -232,6 +232,7 @@ ENTRY(feroceon_coherent_user_range)
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
+ mov r0, #0
mov pc, lr
/*
diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S
index cdfedc5b8ad..fbb2124a547 100644
--- a/arch/arm/mm/proc-mohawk.S
+++ b/arch/arm/mm/proc-mohawk.S
@@ -193,6 +193,7 @@ ENTRY(mohawk_coherent_user_range)
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
+ mov r0, #0
mov pc, lr
/*
@@ -344,6 +345,41 @@ ENTRY(cpu_mohawk_set_pte_ext)
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
+.globl cpu_mohawk_suspend_size
+.equ cpu_mohawk_suspend_size, 4 * 6
+#ifdef CONFIG_PM_SLEEP
+ENTRY(cpu_mohawk_do_suspend)
+ stmfd sp!, {r4 - r9, lr}
+ mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
+ mrc p15, 0, r5, c15, c1, 0 @ CP access reg
+ mrc p15, 0, r6, c13, c0, 0 @ PID
+ mrc p15, 0, r7, c3, c0, 0 @ domain ID
+ mrc p15, 0, r8, c1, c0, 1 @ auxiliary control reg
+ mrc p15, 0, r9, c1, c0, 0 @ control reg
+ bic r4, r4, #2 @ clear frequency change bit
+ stmia r0, {r4 - r9} @ store cp regs
+ ldmia sp!, {r4 - r9, pc}
+ENDPROC(cpu_mohawk_do_suspend)
+
+ENTRY(cpu_mohawk_do_resume)
+ ldmia r0, {r4 - r9} @ load cp regs
+ mov ip, #0
+ mcr p15, 0, ip, c7, c7, 0 @ invalidate I & D caches, BTB
+ mcr p15, 0, ip, c7, c10, 4 @ drain write (&fill) buffer
+ mcr p15, 0, ip, c7, c5, 4 @ flush prefetch buffer
+ mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
+ mcr p14, 0, r4, c6, c0, 0 @ clock configuration, turbo mode.
+ mcr p15, 0, r5, c15, c1, 0 @ CP access reg
+ mcr p15, 0, r6, c13, c0, 0 @ PID
+ mcr p15, 0, r7, c3, c0, 0 @ domain ID
+ orr r1, r1, #0x18 @ cache the page table in L2
+ mcr p15, 0, r1, c2, c0, 0 @ translation table base addr
+ mcr p15, 0, r8, c1, c0, 1 @ auxiliary control reg
+ mov r0, r9 @ control register
+ b cpu_resume_mmu
+ENDPROC(cpu_mohawk_do_resume)
+#endif
+
__CPUINIT
.type __mohawk_setup, #function
diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
index 3a4b3e7b888..42ac069c801 100644
--- a/arch/arm/mm/proc-v7-2level.S
+++ b/arch/arm/mm/proc-v7-2level.S
@@ -49,15 +49,10 @@ ENTRY(cpu_v7_switch_mm)
#ifdef CONFIG_ARM_ERRATA_754322
dsb
#endif
- mcr p15, 0, r2, c13, c0, 1 @ set reserved context ID
- isb
-1: mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
- isb
-#ifdef CONFIG_ARM_ERRATA_754322
- dsb
-#endif
mcr p15, 0, r1, c13, c0, 1 @ set context ID
isb
+ mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
+ isb
#endif
mov pc, lr
ENDPROC(cpu_v7_switch_mm)
diff --git a/arch/arm/mm/tlb-v3.S b/arch/arm/mm/tlb-v3.S
deleted file mode 100644
index d253995ec4c..00000000000
--- a/arch/arm/mm/tlb-v3.S
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * linux/arch/arm/mm/tlbv3.S
- *
- * Copyright (C) 1997-2002 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * ARM architecture version 3 TLB handling functions.
- *
- * Processors: ARM610, ARM710.
- */
-#include <linux/linkage.h>
-#include <linux/init.h>
-#include <asm/asm-offsets.h>
-#include <asm/tlbflush.h>
-#include "proc-macros.S"
-
- .align 5
-/*
- * v3_flush_user_tlb_range(start, end, mm)
- *
- * Invalidate a range of TLB entries in the specified address space.
- *
- * - start - range start address
- * - end - range end address
- * - mm - mm_struct describing address space
- */
- .align 5
-ENTRY(v3_flush_user_tlb_range)
- vma_vm_mm r2, r2
- act_mm r3 @ get current->active_mm
- teq r2, r3 @ == mm ?
- movne pc, lr @ no, we dont do anything
-ENTRY(v3_flush_kern_tlb_range)
- bic r0, r0, #0x0ff
- bic r0, r0, #0xf00
-1: mcr p15, 0, r0, c6, c0, 0 @ invalidate TLB entry
- add r0, r0, #PAGE_SZ
- cmp r0, r1
- blo 1b
- mov pc, lr
-
- __INITDATA
-
- /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */
- define_tlb_functions v3, v3_tlb_flags
diff --git a/arch/arm/mm/vmregion.h b/arch/arm/mm/vmregion.h
index 162be662c08..bf312c354a2 100644
--- a/arch/arm/mm/vmregion.h
+++ b/arch/arm/mm/vmregion.h
@@ -17,7 +17,7 @@ struct arm_vmregion {
struct list_head vm_list;
unsigned long vm_start;
unsigned long vm_end;
- struct page *vm_pages;
+ void *priv;
int vm_active;
const void *caller;
};