aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig24
-rw-r--r--arch/arm/mm/Makefile3
-rw-r--r--arch/arm/mm/alignment.c13
-rw-r--r--arch/arm/mm/cache-feroceon-l2.c1
-rw-r--r--arch/arm/mm/cache-l2x0.c11
-rw-r--r--arch/arm/mm/cache-v3.S137
-rw-r--r--arch/arm/mm/cache-v4.S2
-rw-r--r--arch/arm/mm/cache-v7.S46
-rw-r--r--arch/arm/mm/context.c35
-rw-r--r--arch/arm/mm/dma-mapping.c131
-rw-r--r--arch/arm/mm/fault.c13
-rw-r--r--arch/arm/mm/idmap.c56
-rw-r--r--arch/arm/mm/ioremap.c135
-rw-r--r--arch/arm/mm/mm.h12
-rw-r--r--arch/arm/mm/mmu.c152
-rw-r--r--arch/arm/mm/proc-arm740.S30
-rw-r--r--arch/arm/mm/proc-arm920.S2
-rw-r--r--arch/arm/mm/proc-arm926.S2
-rw-r--r--arch/arm/mm/proc-macros.S5
-rw-r--r--arch/arm/mm/proc-mohawk.S2
-rw-r--r--arch/arm/mm/proc-sa1100.S2
-rw-r--r--arch/arm/mm/proc-syms.c2
-rw-r--r--arch/arm/mm/proc-v6.S4
-rw-r--r--arch/arm/mm/proc-v7-2level.S6
-rw-r--r--arch/arm/mm/proc-v7-3level.S8
-rw-r--r--arch/arm/mm/proc-v7.S19
-rw-r--r--arch/arm/mm/proc-xsc3.S2
-rw-r--r--arch/arm/mm/proc-xscale.S2
-rw-r--r--arch/arm/mm/tcm.h17
-rw-r--r--arch/arm/mm/vmregion.c205
-rw-r--r--arch/arm/mm/vmregion.h31
31 files changed, 537 insertions, 573 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 3fd629d5a513..35955b54944c 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -43,7 +43,7 @@ config CPU_ARM740T
depends on !MMU
select CPU_32v4T
select CPU_ABRT_LV4T
- select CPU_CACHE_V3 # although the core is v4t
+ select CPU_CACHE_V4
select CPU_CP15_MPU
select CPU_PABRT_LEGACY
help
@@ -397,6 +397,13 @@ config CPU_V7
select CPU_PABRT_V7
select CPU_TLB_V7 if MMU
+config CPU_THUMBONLY
+ bool
+ # There are no CPUs available with MMU that don't implement an ARM ISA:
+ depends on !MMU
+ help
+ Select this if your CPU doesn't support the 32 bit ARM instructions.
+
# Figure out what processor architecture version we should be using.
# This defines the compiler instruction set which depends on the machine type.
config CPU_32v3
@@ -469,9 +476,6 @@ config CPU_PABRT_V7
bool
# The cache model
-config CPU_CACHE_V3
- bool
-
config CPU_CACHE_V4
bool
@@ -608,7 +612,7 @@ config ARCH_DMA_ADDR_T_64BIT
bool
config ARM_THUMB
- bool "Support Thumb user binaries"
+ bool "Support Thumb user binaries" if !CPU_THUMBONLY
depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_V6 || CPU_V6K || CPU_V7 || CPU_FEROCEON
default y
help
@@ -629,8 +633,9 @@ config ARM_THUMBEE
make use of it. Say N for code that can run on CPUs without ThumbEE.
config ARM_VIRT_EXT
- bool "Native support for the ARM Virtualization Extensions"
- depends on MMU && CPU_V7
+ bool
+ depends on MMU
+ default y if CPU_V7
help
Enable the kernel to make use of the ARM Virtualization
Extensions to install hypervisors without run-time firmware
@@ -640,11 +645,6 @@ config ARM_VIRT_EXT
use of this feature. Refer to Documentation/arm/Booting for
details.
- It is safe to enable this option even if the kernel may not be
- booted in HYP mode, may not have support for the
- virtualization extensions, or may be booted with a
- non-compliant bootloader.
-
config SWP_EMULATE
bool "Emulate SWP/SWPB instructions"
depends on !CPU_USE_DOMAINS && CPU_V7
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 8a9c4cb50a93..9e51be96f635 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -6,7 +6,7 @@ obj-y := dma-mapping.o extable.o fault.o init.o \
iomap.o
obj-$(CONFIG_MMU) += fault-armv.o flush.o idmap.o ioremap.o \
- mmap.o pgd.o mmu.o vmregion.o
+ mmap.o pgd.o mmu.o
ifneq ($(CONFIG_MMU),y)
obj-y += nommu.o
@@ -33,7 +33,6 @@ obj-$(CONFIG_CPU_PABRT_LEGACY) += pabort-legacy.o
obj-$(CONFIG_CPU_PABRT_V6) += pabort-v6.o
obj-$(CONFIG_CPU_PABRT_V7) += pabort-v7.o
-obj-$(CONFIG_CPU_CACHE_V3) += cache-v3.o
obj-$(CONFIG_CPU_CACHE_V4) += cache-v4.o
obj-$(CONFIG_CPU_CACHE_V4WT) += cache-v4wt.o
obj-$(CONFIG_CPU_CACHE_V4WB) += cache-v4wb.o
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index b820edaf3184..6f4585b89078 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -749,7 +749,6 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
unsigned long instr = 0, instrptr;
int (*handler)(unsigned long addr, unsigned long instr, struct pt_regs *regs);
unsigned int type;
- mm_segment_t fs;
unsigned int fault;
u16 tinstr = 0;
int isize = 4;
@@ -760,16 +759,15 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
instrptr = instruction_pointer(regs);
- fs = get_fs();
- set_fs(KERNEL_DS);
if (thumb_mode(regs)) {
- fault = __get_user(tinstr, (u16 *)(instrptr & ~1));
+ u16 *ptr = (u16 *)(instrptr & ~1);
+ fault = probe_kernel_address(ptr, tinstr);
if (!fault) {
if (cpu_architecture() >= CPU_ARCH_ARMv7 &&
IS_T32(tinstr)) {
/* Thumb-2 32-bit */
u16 tinst2 = 0;
- fault = __get_user(tinst2, (u16 *)(instrptr+2));
+ fault = probe_kernel_address(ptr + 1, tinst2);
instr = (tinstr << 16) | tinst2;
thumb2_32b = 1;
} else {
@@ -778,8 +776,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
}
}
} else
- fault = __get_user(instr, (u32 *)instrptr);
- set_fs(fs);
+ fault = probe_kernel_address(instrptr, instr);
if (fault) {
type = TYPE_FAULT;
@@ -964,12 +961,14 @@ static int __init alignment_init(void)
return -ENOMEM;
#endif
+#ifdef CONFIG_CPU_CP15
if (cpu_is_v6_unaligned()) {
cr_alignment &= ~CR_A;
cr_no_alignment &= ~CR_A;
set_cr(cr_alignment);
ai_usermode = safe_usermode(ai_usermode, false);
}
+#endif
hook_fault_code(FAULT_CODE_ALIGNMENT, do_alignment, SIGBUS, BUS_ADRALN,
"alignment exception");
diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c
index dd3d59122cc3..48bc3c0a87ce 100644
--- a/arch/arm/mm/cache-feroceon-l2.c
+++ b/arch/arm/mm/cache-feroceon-l2.c
@@ -343,6 +343,7 @@ void __init feroceon_l2_init(int __l2_wt_override)
outer_cache.inv_range = feroceon_l2_inv_range;
outer_cache.clean_range = feroceon_l2_clean_range;
outer_cache.flush_range = feroceon_l2_flush_range;
+ outer_cache.inv_all = l2_inv_all;
enable_l2();
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index c2f37390308a..c465faca51b0 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -299,7 +299,7 @@ static void l2x0_unlock(u32 cache_id)
int lockregs;
int i;
- switch (cache_id) {
+ switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
case L2X0_CACHE_ID_PART_L310:
lockregs = 8;
break;
@@ -333,15 +333,14 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
if (cache_id_part_number_from_dt)
cache_id = cache_id_part_number_from_dt;
else
- cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID)
- & L2X0_CACHE_ID_PART_MASK;
+ cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
aux &= aux_mask;
aux |= aux_val;
/* Determine the number of ways */
- switch (cache_id) {
+ switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
case L2X0_CACHE_ID_PART_L310:
if (aux & (1 << 16))
ways = 16;
@@ -725,7 +724,6 @@ static const struct l2x0_of_data pl310_data = {
.flush_all = l2x0_flush_all,
.inv_all = l2x0_inv_all,
.disable = l2x0_disable,
- .set_debug = pl310_set_debug,
},
};
@@ -814,9 +812,8 @@ int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
data->save();
of_init = true;
- l2x0_init(l2x0_base, aux_val, aux_mask);
-
memcpy(&outer_cache, &data->outer_cache, sizeof(outer_cache));
+ l2x0_init(l2x0_base, aux_val, aux_mask);
return 0;
}
diff --git a/arch/arm/mm/cache-v3.S b/arch/arm/mm/cache-v3.S
deleted file mode 100644
index 8a3fadece8d3..000000000000
--- a/arch/arm/mm/cache-v3.S
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * linux/arch/arm/mm/cache-v3.S
- *
- * Copyright (C) 1997-2002 Russell king
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/linkage.h>
-#include <linux/init.h>
-#include <asm/page.h>
-#include "proc-macros.S"
-
-/*
- * flush_icache_all()
- *
- * Unconditionally clean and invalidate the entire icache.
- */
-ENTRY(v3_flush_icache_all)
- mov pc, lr
-ENDPROC(v3_flush_icache_all)
-
-/*
- * flush_user_cache_all()
- *
- * Invalidate all cache entries in a particular address
- * space.
- *
- * - mm - mm_struct describing address space
- */
-ENTRY(v3_flush_user_cache_all)
- /* FALLTHROUGH */
-/*
- * flush_kern_cache_all()
- *
- * Clean and invalidate the entire cache.
- */
-ENTRY(v3_flush_kern_cache_all)
- /* FALLTHROUGH */
-
-/*
- * flush_user_cache_range(start, end, flags)
- *
- * Invalidate a range of cache entries in the specified
- * address space.
- *
- * - start - start address (may not be aligned)
- * - end - end address (exclusive, may not be aligned)
- * - flags - vma_area_struct flags describing address space
- */
-ENTRY(v3_flush_user_cache_range)
- mov ip, #0
- mcreq p15, 0, ip, c7, c0, 0 @ flush ID cache
- mov pc, lr
-
-/*
- * coherent_kern_range(start, end)
- *
- * Ensure coherency between the Icache and the Dcache in the
- * region described by start. If you have non-snooping
- * Harvard caches, you need to implement this function.
- *
- * - start - virtual start address
- * - end - virtual end address
- */
-ENTRY(v3_coherent_kern_range)
- /* FALLTHROUGH */
-
-/*
- * coherent_user_range(start, end)
- *
- * Ensure coherency between the Icache and the Dcache in the
- * region described by start. If you have non-snooping
- * Harvard caches, you need to implement this function.
- *
- * - start - virtual start address
- * - end - virtual end address
- */
-ENTRY(v3_coherent_user_range)
- mov r0, #0
- mov pc, lr
-
-/*
- * flush_kern_dcache_area(void *page, size_t size)
- *
- * Ensure no D cache aliasing occurs, either with itself or
- * the I cache
- *
- * - addr - kernel address
- * - size - region size
- */
-ENTRY(v3_flush_kern_dcache_area)
- /* FALLTHROUGH */
-
-/*
- * dma_flush_range(start, end)
- *
- * Clean and invalidate the specified virtual address range.
- *
- * - start - virtual start address
- * - end - virtual end address
- */
-ENTRY(v3_dma_flush_range)
- mov r0, #0
- mcr p15, 0, r0, c7, c0, 0 @ flush ID cache
- mov pc, lr
-
-/*
- * dma_unmap_area(start, size, dir)
- * - start - kernel virtual start address
- * - size - size of region
- * - dir - DMA direction
- */
-ENTRY(v3_dma_unmap_area)
- teq r2, #DMA_TO_DEVICE
- bne v3_dma_flush_range
- /* FALLTHROUGH */
-
-/*
- * dma_map_area(start, size, dir)
- * - start - kernel virtual start address
- * - size - size of region
- * - dir - DMA direction
- */
-ENTRY(v3_dma_map_area)
- mov pc, lr
-ENDPROC(v3_dma_unmap_area)
-ENDPROC(v3_dma_map_area)
-
- .globl v3_flush_kern_cache_louis
- .equ v3_flush_kern_cache_louis, v3_flush_kern_cache_all
-
- __INITDATA
-
- @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
- define_cache_functions v3
diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S
index 43e5d77be677..a7ba68f59f0c 100644
--- a/arch/arm/mm/cache-v4.S
+++ b/arch/arm/mm/cache-v4.S
@@ -58,7 +58,7 @@ ENTRY(v4_flush_kern_cache_all)
ENTRY(v4_flush_user_cache_range)
#ifdef CONFIG_CPU_CP15
mov ip, #0
- mcreq p15, 0, ip, c7, c7, 0 @ flush ID cache
+ mcr p15, 0, ip, c7, c7, 0 @ flush ID cache
mov pc, lr
#else
/* FALLTHROUGH */
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index 7539ec275065..15451ee4acc8 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -19,6 +19,52 @@
#include "proc-macros.S"
/*
+ * The secondary kernel init calls v7_flush_dcache_all before it enables
+ * the L1; however, the L1 comes out of reset in an undefined state, so
+ * the clean + invalidate performed by v7_flush_dcache_all causes a bunch
+ * of cache lines with uninitialized data and uninitialized tags to get
+ * written out to memory, which does really unpleasant things to the main
+ * processor. We fix this by performing an invalidate, rather than a
+ * clean + invalidate, before jumping into the kernel.
+ *
+ * This function is cloned from arch/arm/mach-tegra/headsmp.S, and needs
+ * to be called for both secondary cores startup and primary core resume
+ * procedures.
+ */
+ENTRY(v7_invalidate_l1)
+ mov r0, #0
+ mcr p15, 2, r0, c0, c0, 0
+ mrc p15, 1, r0, c0, c0, 0
+
+ ldr r1, =0x7fff
+ and r2, r1, r0, lsr #13
+
+ ldr r1, =0x3ff
+
+ and r3, r1, r0, lsr #3 @ NumWays - 1
+ add r2, r2, #1 @ NumSets
+
+ and r0, r0, #0x7
+ add r0, r0, #4 @ SetShift
+
+ clz r1, r3 @ WayShift
+ add r4, r3, #1 @ NumWays
+1: sub r2, r2, #1 @ NumSets--
+ mov r3, r4 @ Temp = NumWays
+2: subs r3, r3, #1 @ Temp--
+ mov r5, r3, lsl r1
+ mov r6, r2, lsl r0
+ orr r5, r5, r6 @ Reg = (Temp<<WayShift)|(NumSets<<SetShift)
+ mcr p15, 0, r5, c7, c6, 2
+ bgt 2b
+ cmp r2, #0
+ bgt 1b
+ dsb
+ isb
+ mov pc, lr
+ENDPROC(v7_invalidate_l1)
+
+/*
* v7_flush_icache_all()
*
* Flush the whole I-cache.
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index bc4a5e9ebb78..2ac37372ef52 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -34,6 +34,9 @@
* The ASID is used to tag entries in the CPU caches and TLBs.
* The context ID is used by debuggers and trace logic, and
* should be unique within all running processes.
+ *
+ * In big endian operation, the two 32 bit words are swapped if accesed by
+ * non 64-bit operations.
*/
#define ASID_FIRST_VERSION (1ULL << ASID_BITS)
#define NUM_USER_ASIDS (ASID_FIRST_VERSION - 1)
@@ -45,7 +48,7 @@ static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
-static DEFINE_PER_CPU(atomic64_t, active_asids);
+DEFINE_PER_CPU(atomic64_t, active_asids);
static DEFINE_PER_CPU(u64, reserved_asids);
static cpumask_t tlb_flush_pending;
@@ -149,9 +152,9 @@ static int is_reserved_asid(u64 asid)
return 0;
}
-static void new_context(struct mm_struct *mm, unsigned int cpu)
+static u64 new_context(struct mm_struct *mm, unsigned int cpu)
{
- u64 asid = mm->context.id;
+ u64 asid = atomic64_read(&mm->context.id);
u64 generation = atomic64_read(&asid_generation);
if (asid != 0 && is_reserved_asid(asid)) {
@@ -178,13 +181,14 @@ static void new_context(struct mm_struct *mm, unsigned int cpu)
cpumask_clear(mm_cpumask(mm));
}
- mm->context.id = asid;
+ return asid;
}
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
{
unsigned long flags;
unsigned int cpu = smp_processor_id();
+ u64 asid;
if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
__check_vmalloc_seq(mm);
@@ -195,20 +199,27 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
*/
cpu_set_reserved_ttbr0();
- if (!((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS)
- && atomic64_xchg(&per_cpu(active_asids, cpu), mm->context.id))
+ asid = atomic64_read(&mm->context.id);
+ if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
+ && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
goto switch_mm_fastpath;
raw_spin_lock_irqsave(&cpu_asid_lock, flags);
/* Check that our ASID belongs to the current generation. */
- if ((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS)
- new_context(mm, cpu);
-
- atomic64_set(&per_cpu(active_asids, cpu), mm->context.id);
- cpumask_set_cpu(cpu, mm_cpumask(mm));
+ asid = atomic64_read(&mm->context.id);
+ if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
+ asid = new_context(mm, cpu);
+ atomic64_set(&mm->context.id, asid);
+ }
- if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending))
+ if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
+ local_flush_bp_all();
local_flush_tlb_all();
+ dummy_flush_tlb_a15_erratum();
+ }
+
+ atomic64_set(&per_cpu(active_asids, cpu), asid);
+ cpumask_set_cpu(cpu, mm_cpumask(mm));
raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
switch_mm_fastpath:
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 6b2fb87c8698..e9db6b4bf65a 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -186,13 +186,24 @@ static u64 get_coherent_dma_mask(struct device *dev)
static void __dma_clear_buffer(struct page *page, size_t size)
{
- void *ptr;
/*
* Ensure that the allocated pages are zeroed, and that any data
* lurking in the kernel direct-mapped region is invalidated.
*/
- ptr = page_address(page);
- if (ptr) {
+ if (PageHighMem(page)) {
+ phys_addr_t base = __pfn_to_phys(page_to_pfn(page));
+ phys_addr_t end = base + size;
+ while (size > 0) {
+ void *ptr = kmap_atomic(page);
+ memset(ptr, 0, PAGE_SIZE);
+ dmac_flush_range(ptr, ptr + PAGE_SIZE);
+ kunmap_atomic(ptr);
+ page++;
+ size -= PAGE_SIZE;
+ }
+ outer_flush_range(base, end);
+ } else {
+ void *ptr = page_address(page);
memset(ptr, 0, size);
dmac_flush_range(ptr, ptr + size);
outer_flush_range(__pa(ptr), __pa(ptr) + size);
@@ -243,7 +254,8 @@ static void __dma_free_buffer(struct page *page, size_t size)
#endif
static void *__alloc_from_contiguous(struct device *dev, size_t size,
- pgprot_t prot, struct page **ret_page);
+ pgprot_t prot, struct page **ret_page,
+ const void *caller);
static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
pgprot_t prot, struct page **ret_page,
@@ -330,6 +342,7 @@ static int __init atomic_pool_init(void)
{
struct dma_pool *pool = &atomic_pool;
pgprot_t prot = pgprot_dmacoherent(pgprot_kernel);
+ gfp_t gfp = GFP_KERNEL | GFP_DMA;
unsigned long nr_pages = pool->size >> PAGE_SHIFT;
unsigned long *bitmap;
struct page *page;
@@ -346,10 +359,11 @@ static int __init atomic_pool_init(void)
goto no_pages;
if (IS_ENABLED(CONFIG_CMA))
- ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page);
+ ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page,
+ atomic_pool_init);
else
- ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot,
- &page, NULL);
+ ptr = __alloc_remap_buffer(NULL, pool->size, gfp, prot, &page,
+ atomic_pool_init);
if (ptr) {
int i;
@@ -542,27 +556,41 @@ static int __free_from_pool(void *start, size_t size)
}
static void *__alloc_from_contiguous(struct device *dev, size_t size,
- pgprot_t prot, struct page **ret_page)
+ pgprot_t prot, struct page **ret_page,
+ const void *caller)
{
unsigned long order = get_order(size);
size_t count = size >> PAGE_SHIFT;
struct page *page;
+ void *ptr;
page = dma_alloc_from_contiguous(dev, count, order);
if (!page)
return NULL;
__dma_clear_buffer(page, size);
- __dma_remap(page, size, prot);
+ if (PageHighMem(page)) {
+ ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, caller);
+ if (!ptr) {
+ dma_release_from_contiguous(dev, page, count);
+ return NULL;
+ }
+ } else {
+ __dma_remap(page, size, prot);
+ ptr = page_address(page);
+ }
*ret_page = page;
- return page_address(page);
+ return ptr;
}
static void __free_from_contiguous(struct device *dev, struct page *page,
- size_t size)
+ void *cpu_addr, size_t size)
{
- __dma_remap(page, size, pgprot_kernel);
+ if (PageHighMem(page))
+ __dma_free_remap(cpu_addr, size);
+ else
+ __dma_remap(page, size, pgprot_kernel);
dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
}
@@ -583,9 +611,9 @@ static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
#define __get_dma_pgprot(attrs, prot) __pgprot(0)
#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL
#define __alloc_from_pool(size, ret_page) NULL
-#define __alloc_from_contiguous(dev, size, prot, ret) NULL
+#define __alloc_from_contiguous(dev, size, prot, ret, c) NULL
#define __free_from_pool(cpu_addr, size) 0
-#define __free_from_contiguous(dev, page, size) do { } while (0)
+#define __free_from_contiguous(dev, page, cpu_addr, size) do { } while (0)
#define __dma_free_remap(cpu_addr, size) do { } while (0)
#endif /* CONFIG_MMU */
@@ -640,12 +668,12 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
if (is_coherent || nommu())
addr = __alloc_simple_buffer(dev, size, gfp, &page);
- else if (gfp & GFP_ATOMIC)
+ else if (!(gfp & __GFP_WAIT))
addr = __alloc_from_pool(size, &page);
else if (!IS_ENABLED(CONFIG_CMA))
addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
else
- addr = __alloc_from_contiguous(dev, size, prot, &page);
+ addr = __alloc_from_contiguous(dev, size, prot, &page, caller);
if (addr)
*handle = pfn_to_dma(dev, page_to_pfn(page));
@@ -739,7 +767,7 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
* Non-atomic allocations cannot be freed with IRQs disabled
*/
WARN_ON(irqs_disabled());
- __free_from_contiguous(dev, page, size);
+ __free_from_contiguous(dev, page, cpu_addr, size);
}
}
@@ -774,25 +802,27 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
size_t size, enum dma_data_direction dir,
void (*op)(const void *, size_t, int))
{
+ unsigned long pfn;
+ size_t left = size;
+
+ pfn = page_to_pfn(page) + offset / PAGE_SIZE;
+ offset %= PAGE_SIZE;
+
/*
* A single sg entry may refer to multiple physically contiguous
* pages. But we still need to process highmem pages individually.
* If highmem is not configured then the bulk of this loop gets
* optimized out.
*/
- size_t left = size;
do {
size_t len = left;
void *vaddr;
+ page = pfn_to_page(pfn);
+
if (PageHighMem(page)) {
- if (len + offset > PAGE_SIZE) {
- if (offset >= PAGE_SIZE) {
- page += offset / PAGE_SIZE;
- offset %= PAGE_SIZE;
- }
+ if (len + offset > PAGE_SIZE)
len = PAGE_SIZE - offset;
- }
vaddr = kmap_high_get(page);
if (vaddr) {
vaddr += offset;
@@ -809,7 +839,7 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
op(vaddr, len, dir);
}
offset = 0;
- page++;
+ pfn++;
left -= len;
} while (left);
}
@@ -1000,6 +1030,9 @@ static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
unsigned int count, start;
unsigned long flags;
+ if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT)
+ order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT;
+
count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) +
(1 << mapping->order) - 1) >> mapping->order;
@@ -1066,12 +1099,17 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
return pages;
}
+ /*
+ * IOMMU can map any pages, so himem can also be used here
+ */
+ gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
+
while (count) {
int j, order = __fls(count);
- pages[i] = alloc_pages(gfp | __GFP_NOWARN, order);
+ pages[i] = alloc_pages(gfp, order);
while (!pages[i] && order)
- pages[i] = alloc_pages(gfp | __GFP_NOWARN, --order);
+ pages[i] = alloc_pages(gfp, --order);
if (!pages[i])
goto error;
@@ -1255,11 +1293,11 @@ err_mapping:
return NULL;
}
-static void __iommu_free_atomic(struct device *dev, struct page **pages,
+static void __iommu_free_atomic(struct device *dev, void *cpu_addr,
dma_addr_t handle, size_t size)
{
__iommu_remove_mapping(dev, handle, size);
- __free_from_pool(page_address(pages[0]), size);
+ __free_from_pool(cpu_addr, size);
}
static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
@@ -1342,7 +1380,7 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
}
if (__in_atomic_pool(cpu_addr, size)) {
- __iommu_free_atomic(dev, pages, handle, size);
+ __iommu_free_atomic(dev, cpu_addr, handle, size);
return;
}
@@ -1730,6 +1768,8 @@ struct dma_map_ops iommu_ops = {
.unmap_sg = arm_iommu_unmap_sg,
.sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu,
.sync_sg_for_device = arm_iommu_sync_sg_for_device,
+
+ .set_dma_mask = arm_dma_set_mask,
};
struct dma_map_ops iommu_coherent_ops = {
@@ -1743,6 +1783,8 @@ struct dma_map_ops iommu_coherent_ops = {
.map_sg = arm_coherent_iommu_map_sg,
.unmap_sg = arm_coherent_iommu_unmap_sg,
+
+ .set_dma_mask = arm_dma_set_mask,
};
/**
@@ -1797,6 +1839,7 @@ err2:
err:
return ERR_PTR(err);
}
+EXPORT_SYMBOL_GPL(arm_iommu_create_mapping);
static void release_iommu_mapping(struct kref *kref)
{
@@ -1813,6 +1856,7 @@ void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
if (mapping)
kref_put(&mapping->kref, release_iommu_mapping);
}
+EXPORT_SYMBOL_GPL(arm_iommu_release_mapping);
/**
* arm_iommu_attach_device
@@ -1841,5 +1885,32 @@ int arm_iommu_attach_device(struct device *dev,
pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
return 0;
}
+EXPORT_SYMBOL_GPL(arm_iommu_attach_device);
+
+/**
+ * arm_iommu_detach_device
+ * @dev: valid struct device pointer
+ *
+ * Detaches the provided device from a previously attached map.
+ * This voids the dma operations (dma_map_ops pointer)
+ */
+void arm_iommu_detach_device(struct device *dev)
+{
+ struct dma_iommu_mapping *mapping;
+
+ mapping = to_dma_iommu_mapping(dev);
+ if (!mapping) {
+ dev_warn(dev, "Not attached\n");
+ return;
+ }
+
+ iommu_detach_device(mapping->domain, dev);
+ kref_put(&mapping->kref, release_iommu_mapping);
+ mapping = NULL;
+ set_dma_ops(dev, NULL);
+
+ pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
+}
+EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
#endif
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 5dbf13f954f6..e207aa5f846f 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -446,8 +446,16 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
if (pud_none(*pud_k))
goto bad_area;
- if (!pud_present(*pud))
+ if (!pud_present(*pud)) {
set_pud(pud, *pud_k);
+ /*
+ * There is a small window during free_pgtables() where the
+ * user *pud entry is 0 but the TLB has not been invalidated
+ * and we get a level 2 (pmd) translation fault caused by the
+ * intermediate TLB caching of the old level 1 (pud) entry.
+ */
+ flush_tlb_kernel_page(addr);
+ }
pmd = pmd_offset(pud, addr);
pmd_k = pmd_offset(pud_k, addr);
@@ -470,8 +478,9 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
#endif
if (pmd_none(pmd_k[index]))
goto bad_area;
+ if (!pmd_present(pmd[index]))
+ copy_pmd(pmd, pmd_k);
- copy_pmd(pmd, pmd_k);
return 0;
bad_area:
diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c
index 99db769307ec..5ee505c937d1 100644
--- a/arch/arm/mm/idmap.c
+++ b/arch/arm/mm/idmap.c
@@ -1,4 +1,6 @@
+#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/slab.h>
#include <asm/cputype.h>
#include <asm/idmap.h>
@@ -6,6 +8,7 @@
#include <asm/pgtable.h>
#include <asm/sections.h>
#include <asm/system_info.h>
+#include <asm/virt.h>
pgd_t *idmap_pgd;
@@ -59,11 +62,17 @@ static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
} while (pud++, addr = next, addr != end);
}
-static void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end)
+static void identity_mapping_add(pgd_t *pgd, const char *text_start,
+ const char *text_end, unsigned long prot)
{
- unsigned long prot, next;
+ unsigned long addr, end;
+ unsigned long next;
+
+ addr = virt_to_phys(text_start);
+ end = virt_to_phys(text_end);
+
+ prot |= PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AF;
- prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AF;
if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
prot |= PMD_BIT4;
@@ -74,28 +83,52 @@ static void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long e
} while (pgd++, addr = next, addr != end);
}
+#if defined(CONFIG_ARM_VIRT_EXT) && defined(CONFIG_ARM_LPAE)
+pgd_t *hyp_pgd;
+
+extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
+
+static int __init init_static_idmap_hyp(void)
+{
+ hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
+ if (!hyp_pgd)
+ return -ENOMEM;
+
+ pr_info("Setting up static HYP identity map for 0x%p - 0x%p\n",
+ __hyp_idmap_text_start, __hyp_idmap_text_end);
+ identity_mapping_add(hyp_pgd, __hyp_idmap_text_start,
+ __hyp_idmap_text_end, PMD_SECT_AP1);
+
+ return 0;
+}
+#else
+static int __init init_static_idmap_hyp(void)
+{
+ return 0;
+}
+#endif
+
extern char __idmap_text_start[], __idmap_text_end[];
static int __init init_static_idmap(void)
{
- phys_addr_t idmap_start, idmap_end;
+ int ret;
idmap_pgd = pgd_alloc(&init_mm);
if (!idmap_pgd)
return -ENOMEM;
- /* Add an identity mapping for the physical address of the section. */
- idmap_start = virt_to_phys((void *)__idmap_text_start);
- idmap_end = virt_to_phys((void *)__idmap_text_end);
+ pr_info("Setting up static identity map for 0x%p - 0x%p\n",
+ __idmap_text_start, __idmap_text_end);
+ identity_mapping_add(idmap_pgd, __idmap_text_start,
+ __idmap_text_end, 0);
- pr_info("Setting up static identity map for 0x%llx - 0x%llx\n",
- (long long)idmap_start, (long long)idmap_end);
- identity_mapping_add(idmap_pgd, idmap_start, idmap_end);
+ ret = init_static_idmap_hyp();
/* Flush L1 for the hardware to see this page table content */
flush_cache_louis();
- return 0;
+ return ret;
}
early_initcall(init_static_idmap);
@@ -108,6 +141,7 @@ void setup_mm_for_reboot(void)
{
/* Switch to the identity mapping. */
cpu_switch_mm(idmap_pgd, &init_mm);
+ local_flush_bp_all();
#ifdef CONFIG_CPU_HAS_ASID
/*
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 88fd86cf3d9a..04d9006eab1f 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -39,6 +39,70 @@
#include <asm/mach/pci.h>
#include "mm.h"
+
+LIST_HEAD(static_vmlist);
+
+static struct static_vm *find_static_vm_paddr(phys_addr_t paddr,
+ size_t size, unsigned int mtype)
+{
+ struct static_vm *svm;
+ struct vm_struct *vm;
+
+ list_for_each_entry(svm, &static_vmlist, list) {
+ vm = &svm->vm;
+ if (!(vm->flags & VM_ARM_STATIC_MAPPING))
+ continue;
+ if ((vm->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
+ continue;
+
+ if (vm->phys_addr > paddr ||
+ paddr + size - 1 > vm->phys_addr + vm->size - 1)
+ continue;
+
+ return svm;
+ }
+
+ return NULL;
+}
+
+struct static_vm *find_static_vm_vaddr(void *vaddr)
+{
+ struct static_vm *svm;
+ struct vm_struct *vm;
+
+ list_for_each_entry(svm, &static_vmlist, list) {
+ vm = &svm->vm;
+
+ /* static_vmlist is ascending order */
+ if (vm->addr > vaddr)
+ break;
+
+ if (vm->addr <= vaddr && vm->addr + vm->size > vaddr)
+ return svm;
+ }
+
+ return NULL;
+}
+
+void __init add_static_vm_early(struct static_vm *svm)
+{
+ struct static_vm *curr_svm;
+ struct vm_struct *vm;
+ void *vaddr;
+
+ vm = &svm->vm;
+ vm_area_add_early(vm);
+ vaddr = vm->addr;
+
+ list_for_each_entry(curr_svm, &static_vmlist, list) {
+ vm = &curr_svm->vm;
+
+ if (vm->addr > vaddr)
+ break;
+ }
+ list_add_tail(&svm->list, &curr_svm->list);
+}
+
int ioremap_page(unsigned long virt, unsigned long phys,
const struct mem_type *mtype)
{
@@ -197,13 +261,14 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
const struct mem_type *type;
int err;
unsigned long addr;
- struct vm_struct * area;
+ struct vm_struct *area;
+ phys_addr_t paddr = __pfn_to_phys(pfn);
#ifndef CONFIG_ARM_LPAE
/*
* High mappings must be supersection aligned
*/
- if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
+ if (pfn >= 0x100000 && (paddr & ~SUPERSECTION_MASK))
return NULL;
#endif
@@ -219,24 +284,16 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
/*
* Try to reuse one of the static mapping whenever possible.
*/
- read_lock(&vmlist_lock);
- for (area = vmlist; area; area = area->next) {
- if (!size || (sizeof(phys_addr_t) == 4 && pfn >= 0x100000))
- break;
- if (!(area->flags & VM_ARM_STATIC_MAPPING))
- continue;
- if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
- continue;
- if (__phys_to_pfn(area->phys_addr) > pfn ||
- __pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1)
- continue;
- /* we can drop the lock here as we know *area is static */
- read_unlock(&vmlist_lock);
- addr = (unsigned long)area->addr;
- addr += __pfn_to_phys(pfn) - area->phys_addr;
- return (void __iomem *) (offset + addr);
+ if (size && !(sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) {
+ struct static_vm *svm;
+
+ svm = find_static_vm_paddr(paddr, size, mtype);
+ if (svm) {
+ addr = (unsigned long)svm->vm.addr;
+ addr += paddr - svm->vm.phys_addr;
+ return (void __iomem *) (offset + addr);
+ }
}
- read_unlock(&vmlist_lock);
/*
* Don't allow RAM to be mapped - this causes problems with ARMv6+
@@ -248,21 +305,21 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
if (!area)
return NULL;
addr = (unsigned long)area->addr;
- area->phys_addr = __pfn_to_phys(pfn);
+ area->phys_addr = paddr;
#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
if (DOMAIN_IO == 0 &&
(((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
cpu_is_xsc3()) && pfn >= 0x100000 &&
- !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
+ !((paddr | size | addr) & ~SUPERSECTION_MASK)) {
area->flags |= VM_ARM_SECTION_MAPPING;
err = remap_area_supersections(addr, pfn, size, type);
- } else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
+ } else if (!((paddr | size | addr) & ~PMD_MASK)) {
area->flags |= VM_ARM_SECTION_MAPPING;
err = remap_area_sections(addr, pfn, size, type);
} else
#endif
- err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn),
+ err = ioremap_page_range(addr, addr + size, paddr,
__pgprot(type->prot_pte));
if (err) {
@@ -346,34 +403,28 @@ __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached)
void __iounmap(volatile void __iomem *io_addr)
{
void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
- struct vm_struct *vm;
+ struct static_vm *svm;
+
+ /* If this is a static mapping, we must leave it alone */
+ svm = find_static_vm_vaddr(addr);
+ if (svm)
+ return;
- read_lock(&vmlist_lock);
- for (vm = vmlist; vm; vm = vm->next) {
- if (vm->addr > addr)
- break;
- if (!(vm->flags & VM_IOREMAP))
- continue;
- /* If this is a static mapping we must leave it alone */
- if ((vm->flags & VM_ARM_STATIC_MAPPING) &&
- (vm->addr <= addr) && (vm->addr + vm->size > addr)) {
- read_unlock(&vmlist_lock);
- return;
- }
#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
+ {
+ struct vm_struct *vm;
+
+ vm = find_vm_area(addr);
+
/*
* If this is a section based mapping we need to handle it
* specially as the VM subsystem does not know how to handle
* such a beast.
*/
- if ((vm->addr == addr) &&
- (vm->flags & VM_ARM_SECTION_MAPPING)) {
+ if (vm && (vm->flags & VM_ARM_SECTION_MAPPING))
unmap_area_sections((unsigned long)vm->addr, vm->size);
- break;
- }
-#endif
}
- read_unlock(&vmlist_lock);
+#endif
vunmap(addr);
}
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index a8ee92da3544..d5a4e9ad8f0f 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -1,4 +1,6 @@
#ifdef CONFIG_MMU
+#include <linux/list.h>
+#include <linux/vmalloc.h>
/* the upper-most page table pointer */
extern pmd_t *top_pmd;
@@ -65,6 +67,16 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page
/* consistent regions used by dma_alloc_attrs() */
#define VM_ARM_DMA_CONSISTENT 0x20000000
+
+struct static_vm {
+ struct vm_struct vm;
+ struct list_head list;
+};
+
+extern struct list_head static_vmlist;
+extern struct static_vm *find_static_vm_vaddr(void *vaddr);
+extern __init void add_static_vm_early(struct static_vm *svm);
+
#endif
#ifdef CONFIG_ZONE_DMA
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 9f0610243bd6..e0d8565671a6 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -34,6 +34,7 @@
#include <asm/mach/pci.h>
#include "mm.h"
+#include "tcm.h"
/*
* empty_zero_page is a special page that is used for
@@ -57,6 +58,9 @@ static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
static unsigned int ecc_mask __initdata = 0;
pgprot_t pgprot_user;
pgprot_t pgprot_kernel;
+pgprot_t pgprot_hyp_device;
+pgprot_t pgprot_s2;
+pgprot_t pgprot_s2_device;
EXPORT_SYMBOL(pgprot_user);
EXPORT_SYMBOL(pgprot_kernel);
@@ -66,37 +70,50 @@ struct cachepolicy {
unsigned int cr_mask;
pmdval_t pmd;
pteval_t pte;
+ pteval_t pte_s2;
};
+#ifdef CONFIG_ARM_LPAE
+#define s2_policy(policy) policy
+#else
+#define s2_policy(policy) 0
+#endif
+
static struct cachepolicy cache_policies[] __initdata = {
{
.policy = "uncached",
.cr_mask = CR_W|CR_C,
.pmd = PMD_SECT_UNCACHED,
.pte = L_PTE_MT_UNCACHED,
+ .pte_s2 = s2_policy(L_PTE_S2_MT_UNCACHED),
}, {
.policy = "buffered",
.cr_mask = CR_C,
.pmd = PMD_SECT_BUFFERED,
.pte = L_PTE_MT_BUFFERABLE,
+ .pte_s2 = s2_policy(L_PTE_S2_MT_UNCACHED),
}, {
.policy = "writethrough",
.cr_mask = 0,
.pmd = PMD_SECT_WT,
.pte = L_PTE_MT_WRITETHROUGH,
+ .pte_s2 = s2_policy(L_PTE_S2_MT_WRITETHROUGH),
}, {
.policy = "writeback",
.cr_mask = 0,
.pmd = PMD_SECT_WB,
.pte = L_PTE_MT_WRITEBACK,
+ .pte_s2 = s2_policy(L_PTE_S2_MT_WRITEBACK),
}, {
.policy = "writealloc",
.cr_mask = 0,
.pmd = PMD_SECT_WBWA,
.pte = L_PTE_MT_WRITEALLOC,
+ .pte_s2 = s2_policy(L_PTE_S2_MT_WRITEBACK),
}
};
+#ifdef CONFIG_CPU_CP15
/*
* These are useful for identifying cache coherency
* problems by allowing the cache or the cache and
@@ -195,6 +212,22 @@ void adjust_cr(unsigned long mask, unsigned long set)
}
#endif
+#else /* ifdef CONFIG_CPU_CP15 */
+
+static int __init early_cachepolicy(char *p)
+{
+ pr_warning("cachepolicy kernel parameter not supported without cp15\n");
+}
+early_param("cachepolicy", early_cachepolicy);
+
+static int __init noalign_setup(char *__unused)
+{
+ pr_warning("noalign kernel parameter not supported without cp15\n");
+}
+__setup("noalign", noalign_setup);
+
+#endif /* ifdef CONFIG_CPU_CP15 / else */
+
#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
@@ -283,7 +316,7 @@ static struct mem_type mem_types[] = {
},
[MT_MEMORY_SO] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
- L_PTE_MT_UNCACHED,
+ L_PTE_MT_UNCACHED | L_PTE_XN,
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
PMD_SECT_UNCACHED | PMD_SECT_XN,
@@ -310,6 +343,7 @@ static void __init build_mem_type_table(void)
struct cachepolicy *cp;
unsigned int cr = get_cr();
pteval_t user_pgprot, kern_pgprot, vecs_pgprot;
+ pteval_t hyp_device_pgprot, s2_pgprot, s2_device_pgprot;
int cpu_arch = cpu_architecture();
int i;
@@ -421,6 +455,8 @@ static void __init build_mem_type_table(void)
*/
cp = &cache_policies[cachepolicy];
vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
+ s2_pgprot = cp->pte_s2;
+ hyp_device_pgprot = s2_device_pgprot = mem_types[MT_DEVICE].prot_pte;
/*
* ARMv6 and above have extended page tables.
@@ -444,6 +480,7 @@ static void __init build_mem_type_table(void)
user_pgprot |= L_PTE_SHARED;
kern_pgprot |= L_PTE_SHARED;
vecs_pgprot |= L_PTE_SHARED;
+ s2_pgprot |= L_PTE_SHARED;
mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
@@ -498,6 +535,9 @@ static void __init build_mem_type_table(void)
pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
L_PTE_DIRTY | kern_pgprot);
+ pgprot_s2 = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | s2_pgprot);
+ pgprot_s2_device = __pgprot(s2_device_pgprot);
+ pgprot_hyp_device = __pgprot(hyp_device_pgprot);
mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
@@ -576,39 +616,60 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
} while (pte++, addr += PAGE_SIZE, addr != end);
}
-static void __init alloc_init_section(pud_t *pud, unsigned long addr,
- unsigned long end, phys_addr_t phys,
- const struct mem_type *type)
+static void __init map_init_section(pmd_t *pmd, unsigned long addr,
+ unsigned long end, phys_addr_t phys,
+ const struct mem_type *type)
{
- pmd_t *pmd = pmd_offset(pud, addr);
-
+#ifndef CONFIG_ARM_LPAE
/*
- * Try a section mapping - end, addr and phys must all be aligned
- * to a section boundary. Note that PMDs refer to the individual
- * L1 entries, whereas PGDs refer to a group of L1 entries making
- * up one logical pointer to an L2 table.
+ * In classic MMU format, puds and pmds are folded in to
+ * the pgds. pmd_offset gives the PGD entry. PGDs refer to a
+ * group of L1 entries making up one logical pointer to
+ * an L2 table (2MB), where as PMDs refer to the individual
+ * L1 entries (1MB). Hence increment to get the correct
+ * offset for odd 1MB sections.
+ * (See arch/arm/include/asm/pgtable-2level.h)
*/
- if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0) {
- pmd_t *p = pmd;
-
-#ifndef CONFIG_ARM_LPAE
- if (addr & SECTION_SIZE)
- pmd++;
+ if (addr & SECTION_SIZE)
+ pmd++;
#endif
+ do {
+ *pmd = __pmd(phys | type->prot_sect);
+ phys += SECTION_SIZE;
+ } while (pmd++, addr += SECTION_SIZE, addr != end);
- do {
- *pmd = __pmd(phys | type->prot_sect);
- phys += SECTION_SIZE;
- } while (pmd++, addr += SECTION_SIZE, addr != end);
+ flush_pmd_entry(pmd);
+}
- flush_pmd_entry(p);
- } else {
+static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
+ unsigned long end, phys_addr_t phys,
+ const struct mem_type *type)
+{
+ pmd_t *pmd = pmd_offset(pud, addr);
+ unsigned long next;
+
+ do {
/*
- * No need to loop; pte's aren't interested in the
- * individual L1 entries.
+ * With LPAE, we must loop over to map
+ * all the pmds for the given range.
*/
- alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type);
- }
+ next = pmd_addr_end(addr, end);
+
+ /*
+ * Try a section mapping - addr, next and phys must all be
+ * aligned to a section boundary.
+ */
+ if (type->prot_sect &&
+ ((addr | next | phys) & ~SECTION_MASK) == 0) {
+ map_init_section(pmd, addr, next, phys, type);
+ } else {
+ alloc_init_pte(pmd, addr, next,
+ __phys_to_pfn(phys), type);
+ }
+
+ phys += next - addr;
+
+ } while (pmd++, addr = next, addr != end);
}
static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
@@ -619,7 +680,7 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
do {
next = pud_addr_end(addr, end);
- alloc_init_section(pud, addr, next, phys, type);
+ alloc_init_pmd(pud, addr, next, phys, type);
phys += next - addr;
} while (pud++, addr = next, addr != end);
}
@@ -757,21 +818,24 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
{
struct map_desc *md;
struct vm_struct *vm;
+ struct static_vm *svm;
if (!nr)
return;
- vm = early_alloc_aligned(sizeof(*vm) * nr, __alignof__(*vm));
+ svm = early_alloc_aligned(sizeof(*svm) * nr, __alignof__(*svm));
for (md = io_desc; nr; md++, nr--) {
create_mapping(md);
+
+ vm = &svm->vm;
vm->addr = (void *)(md->virtual & PAGE_MASK);
vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
vm->phys_addr = __pfn_to_phys(md->pfn);
vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
vm->flags |= VM_ARM_MTYPE(md->type);
vm->caller = iotable_init;
- vm_area_add_early(vm++);
+ add_static_vm_early(svm++);
}
}
@@ -779,13 +843,16 @@ void __init vm_reserve_area_early(unsigned long addr, unsigned long size,
void *caller)
{
struct vm_struct *vm;
+ struct static_vm *svm;
+
+ svm = early_alloc_aligned(sizeof(*svm), __alignof__(*svm));
- vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
+ vm = &svm->vm;
vm->addr = (void *)addr;
vm->size = size;
vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING;
vm->caller = caller;
- vm_area_add_early(vm);
+ add_static_vm_early(svm);
}
#ifndef CONFIG_ARM_LPAE
@@ -810,14 +877,13 @@ static void __init pmd_empty_section_gap(unsigned long addr)
static void __init fill_pmd_gaps(void)
{
+ struct static_vm *svm;
struct vm_struct *vm;
unsigned long addr, next = 0;
pmd_t *pmd;
- /* we're still single threaded hence no lock needed here */
- for (vm = vmlist; vm; vm = vm->next) {
- if (!(vm->flags & (VM_ARM_STATIC_MAPPING | VM_ARM_EMPTY_MAPPING)))
- continue;
+ list_for_each_entry(svm, &static_vmlist, list) {
+ vm = &svm->vm;
addr = (unsigned long)vm->addr;
if (addr < next)
continue;
@@ -857,19 +923,12 @@ static void __init fill_pmd_gaps(void)
#if defined(CONFIG_PCI) && !defined(CONFIG_NEED_MACH_IO_H)
static void __init pci_reserve_io(void)
{
- struct vm_struct *vm;
- unsigned long addr;
+ struct static_vm *svm;
- /* we're still single threaded hence no lock needed here */
- for (vm = vmlist; vm; vm = vm->next) {
- if (!(vm->flags & VM_ARM_STATIC_MAPPING))
- continue;
- addr = (unsigned long)vm->addr;
- addr &= ~(SZ_2M - 1);
- if (addr == PCI_IO_VIRT_BASE)
- return;
+ svm = find_static_vm_vaddr((void *)PCI_IO_VIRT_BASE);
+ if (svm)
+ return;
- }
vm_reserve_area_early(PCI_IO_VIRT_BASE, SZ_2M, pci_reserve_io);
}
#else
@@ -1236,6 +1295,7 @@ void __init paging_init(struct machine_desc *mdesc)
dma_contiguous_remap();
devicemaps_init(mdesc);
kmap_init();
+ tcm_init();
top_pmd = pmd_off_k(0xffff0000);
diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S
index dc5de5d53f20..fde2d2a794cf 100644
--- a/arch/arm/mm/proc-arm740.S
+++ b/arch/arm/mm/proc-arm740.S
@@ -77,24 +77,27 @@ __arm740_setup:
mcr p15, 0, r0, c6, c0 @ set area 0, default
ldr r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM
- ldr r1, =(CONFIG_DRAM_SIZE >> 12) @ size of RAM (must be >= 4KB)
- mov r2, #10 @ 11 is the minimum (4KB)
-1: add r2, r2, #1 @ area size *= 2
- mov r1, r1, lsr #1
+ ldr r3, =(CONFIG_DRAM_SIZE >> 12) @ size of RAM (must be >= 4KB)
+ mov r4, #10 @ 11 is the minimum (4KB)
+1: add r4, r4, #1 @ area size *= 2
+ movs r3, r3, lsr #1
bne 1b @ count not zero r-shift
- orr r0, r0, r2, lsl #1 @ the area register value
+ orr r0, r0, r4, lsl #1 @ the area register value
orr r0, r0, #1 @ set enable bit
mcr p15, 0, r0, c6, c1 @ set area 1, RAM
ldr r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH
- ldr r1, =(CONFIG_FLASH_SIZE >> 12) @ size of FLASH (must be >= 4KB)
- mov r2, #10 @ 11 is the minimum (4KB)
-1: add r2, r2, #1 @ area size *= 2
- mov r1, r1, lsr #1
+ ldr r3, =(CONFIG_FLASH_SIZE >> 12) @ size of FLASH (must be >= 4KB)
+ cmp r3, #0
+ moveq r0, #0
+ beq 2f
+ mov r4, #10 @ 11 is the minimum (4KB)
+1: add r4, r4, #1 @ area size *= 2
+ movs r3, r3, lsr #1
bne 1b @ count not zero r-shift
- orr r0, r0, r2, lsl #1 @ the area register value
+ orr r0, r0, r4, lsl #1 @ the area register value
orr r0, r0, #1 @ set enable bit
- mcr p15, 0, r0, c6, c2 @ set area 2, ROM/FLASH
+2: mcr p15, 0, r0, c6, c2 @ set area 2, ROM/FLASH
mov r0, #0x06
mcr p15, 0, r0, c2, c0 @ Region 1&2 cacheable
@@ -137,13 +140,14 @@ __arm740_proc_info:
.long 0x41807400
.long 0xfffffff0
.long 0
+ .long 0
b __arm740_setup
.long cpu_arch_name
.long cpu_elf_name
- .long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT
+ .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_26BIT
.long cpu_arm740_name
.long arm740_processor_functions
.long 0
.long 0
- .long v3_cache_fns @ cache model
+ .long v4_cache_fns @ cache model
.size __arm740_proc_info, . - __arm740_proc_info
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index 2c3b9421ab5e..2556cf1c2da1 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -387,7 +387,7 @@ ENTRY(cpu_arm920_set_pte_ext)
/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
.globl cpu_arm920_suspend_size
.equ cpu_arm920_suspend_size, 4 * 3
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ARM_CPU_SUSPEND
ENTRY(cpu_arm920_do_suspend)
stmfd sp!, {r4 - r6, lr}
mrc p15, 0, r4, c13, c0, 0 @ PID
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index f1803f7e2972..344c8a548cc0 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -402,7 +402,7 @@ ENTRY(cpu_arm926_set_pte_ext)
/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
.globl cpu_arm926_suspend_size
.equ cpu_arm926_suspend_size, 4 * 3
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ARM_CPU_SUSPEND
ENTRY(cpu_arm926_do_suspend)
stmfd sp!, {r4 - r6, lr}
mrc p15, 0, r4, c13, c0, 0 @ PID
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index eb6aa73bc8b7..f9a0aa725ea9 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -38,9 +38,14 @@
/*
* mmid - get context id from mm pointer (mm->context.id)
+ * note, this field is 64bit, so in big-endian the two words are swapped too.
*/
.macro mmid, rd, rn
+#ifdef __ARMEB__
+ ldr \rd, [\rn, #MM_CONTEXT_ID + 4 ]
+#else
ldr \rd, [\rn, #MM_CONTEXT_ID]
+#endif
.endm
/*
diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S
index 82f9cdc751d6..0b60dd3d742a 100644
--- a/arch/arm/mm/proc-mohawk.S
+++ b/arch/arm/mm/proc-mohawk.S
@@ -350,7 +350,7 @@ ENTRY(cpu_mohawk_set_pte_ext)
.globl cpu_mohawk_suspend_size
.equ cpu_mohawk_suspend_size, 4 * 6
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ARM_CPU_SUSPEND
ENTRY(cpu_mohawk_do_suspend)
stmfd sp!, {r4 - r9, lr}
mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S
index 3aa0da11fd84..d92dfd081429 100644
--- a/arch/arm/mm/proc-sa1100.S
+++ b/arch/arm/mm/proc-sa1100.S
@@ -172,7 +172,7 @@ ENTRY(cpu_sa1100_set_pte_ext)
.globl cpu_sa1100_suspend_size
.equ cpu_sa1100_suspend_size, 4 * 3
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ARM_CPU_SUSPEND
ENTRY(cpu_sa1100_do_suspend)
stmfd sp!, {r4 - r6, lr}
mrc p15, 0, r4, c3, c0, 0 @ domain ID
diff --git a/arch/arm/mm/proc-syms.c b/arch/arm/mm/proc-syms.c
index 3e6210b4d6d4..054b491ff764 100644
--- a/arch/arm/mm/proc-syms.c
+++ b/arch/arm/mm/proc-syms.c
@@ -17,7 +17,9 @@
#ifndef MULTI_CPU
EXPORT_SYMBOL(cpu_dcache_clean_area);
+#ifdef CONFIG_MMU
EXPORT_SYMBOL(cpu_set_pte_ext);
+#endif
#else
EXPORT_SYMBOL(processor);
#endif
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 09c5233f4dfc..5c07ee4fe3eb 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -101,7 +101,7 @@ ENTRY(cpu_v6_dcache_clean_area)
ENTRY(cpu_v6_switch_mm)
#ifdef CONFIG_MMU
mov r2, #0
- ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id
+ mmid r1, r1 @ get mm->context.id
ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP)
ALT_UP(orr r0, r0, #TTB_FLAGS_UP)
mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
@@ -138,7 +138,7 @@ ENTRY(cpu_v6_set_pte_ext)
/* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */
.globl cpu_v6_suspend_size
.equ cpu_v6_suspend_size, 4 * 6
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ARM_CPU_SUSPEND
ENTRY(cpu_v6_do_suspend)
stmfd sp!, {r4 - r9, lr}
mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID
diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
index 6d98c13ab827..2f76880060d4 100644
--- a/arch/arm/mm/proc-v7-2level.S
+++ b/arch/arm/mm/proc-v7-2level.S
@@ -40,7 +40,7 @@
ENTRY(cpu_v7_switch_mm)
#ifdef CONFIG_MMU
mov r2, #0
- ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id
+ mmid r1, r1 @ get mm->context.id
ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP)
ALT_UP(orr r0, r0, #TTB_FLAGS_UP)
#ifdef CONFIG_ARM_ERRATA_430973
@@ -110,7 +110,9 @@ ENTRY(cpu_v7_set_pte_ext)
ARM( str r3, [r0, #2048]! )
THUMB( add r0, r0, #2048 )
THUMB( str r3, [r0] )
- mcr p15, 0, r0, c7, c10, 1 @ flush_pte
+ mrc p15, 0, r3, c0, c1, 7 @ read ID_MMFR3
+ tst r3, #0xf << 20 @ check the coherent walk bits
+ mcreq p15, 0, r0, c7, c10, 1 @ clean D-cache to PoU
#endif
mov pc, lr
ENDPROC(cpu_v7_set_pte_ext)
diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S
index 7b56386f9496..2098e026632f 100644
--- a/arch/arm/mm/proc-v7-3level.S
+++ b/arch/arm/mm/proc-v7-3level.S
@@ -47,8 +47,8 @@
*/
ENTRY(cpu_v7_switch_mm)
#ifdef CONFIG_MMU
- ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id
- and r3, r1, #0xff
+ mmid r1, r1 @ get mm->context.id
+ asid r3, r1
mov r3, r3, lsl #(48 - 32) @ ASID
mcrr p15, 0, r0, r3, c2 @ set TTB 0
isb
@@ -73,7 +73,9 @@ ENTRY(cpu_v7_set_pte_ext)
tst r3, #1 << (55 - 32) @ L_PTE_DIRTY
orreq r2, #L_PTE_RDONLY
1: strd r2, r3, [r0]
- mcr p15, 0, r0, c7, c10, 1 @ flush_pte
+ mrc p15, 0, r3, c0, c1, 7 @ read ID_MMFR3
+ tst r3, #0xf << 20 @ check the coherent walk bits
+ mcreq p15, 0, r0, c7, c10, 1 @ clean D-cache to PoU
#endif
mov pc, lr
ENDPROC(cpu_v7_set_pte_ext)
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 3a3c015f8d5c..f584d3f5b37c 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -420,7 +420,7 @@ __v7_pj4b_proc_info:
__v7_ca7mp_proc_info:
.long 0x410fc070
.long 0xff0ffff0
- __v7_proc __v7_ca7mp_setup, hwcaps = HWCAP_IDIV
+ __v7_proc __v7_ca7mp_setup
.size __v7_ca7mp_proc_info, . - __v7_ca7mp_proc_info
/*
@@ -430,10 +430,25 @@ __v7_ca7mp_proc_info:
__v7_ca15mp_proc_info:
.long 0x410fc0f0
.long 0xff0ffff0
- __v7_proc __v7_ca15mp_setup, hwcaps = HWCAP_IDIV
+ __v7_proc __v7_ca15mp_setup
.size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info
/*
+ * Qualcomm Inc. Krait processors.
+ */
+ .type __krait_proc_info, #object
+__krait_proc_info:
+ .long 0x510f0400 @ Required ID value
+ .long 0xff0ffc00 @ Mask for ID
+ /*
+ * Some Krait processors don't indicate support for SDIV and UDIV
+ * instructions in the ARM instruction set, even though they actually
+ * do support them.
+ */
+ __v7_proc __v7_setup, hwcaps = HWCAP_IDIV
+ .size __krait_proc_info, . - __krait_proc_info
+
+ /*
* Match any ARMv7 processor core.
*/
.type __v7_proc_info, #object
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index eb93d6487f35..e8efd83b6f25 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -413,7 +413,7 @@ ENTRY(cpu_xsc3_set_pte_ext)
.globl cpu_xsc3_suspend_size
.equ cpu_xsc3_suspend_size, 4 * 6
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ARM_CPU_SUSPEND
ENTRY(cpu_xsc3_do_suspend)
stmfd sp!, {r4 - r9, lr}
mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index 25510361aa18..e766f889bfd6 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -528,7 +528,7 @@ ENTRY(cpu_xscale_set_pte_ext)
.globl cpu_xscale_suspend_size
.equ cpu_xscale_suspend_size, 4 * 6
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ARM_CPU_SUSPEND
ENTRY(cpu_xscale_do_suspend)
stmfd sp!, {r4 - r9, lr}
mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
diff --git a/arch/arm/mm/tcm.h b/arch/arm/mm/tcm.h
new file mode 100644
index 000000000000..8015ad434a40
--- /dev/null
+++ b/arch/arm/mm/tcm.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2008-2009 ST-Ericsson AB
+ * License terms: GNU General Public License (GPL) version 2
+ * TCM memory handling for ARM systems
+ *
+ * Author: Linus Walleij <linus.walleij@stericsson.com>
+ * Author: Rickard Andersson <rickard.andersson@stericsson.com>
+ */
+
+#ifdef CONFIG_HAVE_TCM
+void __init tcm_init(void);
+#else
+/* No TCM support, just blank inlines to be optimized out */
+inline void tcm_init(void)
+{
+}
+#endif
diff --git a/arch/arm/mm/vmregion.c b/arch/arm/mm/vmregion.c
deleted file mode 100644
index a631016e1f8f..000000000000
--- a/arch/arm/mm/vmregion.c
+++ /dev/null
@@ -1,205 +0,0 @@
-#include <linux/fs.h>
-#include <linux/spinlock.h>
-#include <linux/list.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/slab.h>
-
-#include "vmregion.h"
-
-/*
- * VM region handling support.
- *
- * This should become something generic, handling VM region allocations for
- * vmalloc and similar (ioremap, module space, etc).
- *
- * I envisage vmalloc()'s supporting vm_struct becoming:
- *
- * struct vm_struct {
- * struct vmregion region;
- * unsigned long flags;
- * struct page **pages;
- * unsigned int nr_pages;
- * unsigned long phys_addr;
- * };
- *
- * get_vm_area() would then call vmregion_alloc with an appropriate
- * struct vmregion head (eg):
- *
- * struct vmregion vmalloc_head = {
- * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list),
- * .vm_start = VMALLOC_START,
- * .vm_end = VMALLOC_END,
- * };
- *
- * However, vmalloc_head.vm_start is variable (typically, it is dependent on
- * the amount of RAM found at boot time.) I would imagine that get_vm_area()
- * would have to initialise this each time prior to calling vmregion_alloc().
- */
-
-struct arm_vmregion *
-arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align,
- size_t size, gfp_t gfp, const void *caller)
-{
- unsigned long start = head->vm_start, addr = head->vm_end;
- unsigned long flags;
- struct arm_vmregion *c, *new;
-
- if (head->vm_end - head->vm_start < size) {
- printk(KERN_WARNING "%s: allocation too big (requested %#x)\n",
- __func__, size);
- goto out;
- }
-
- new = kmalloc(sizeof(struct arm_vmregion), gfp);
- if (!new)
- goto out;
-
- new->caller = caller;
-
- spin_lock_irqsave(&head->vm_lock, flags);
-
- addr = rounddown(addr - size, align);
- list_for_each_entry_reverse(c, &head->vm_list, vm_list) {
- if (addr >= c->vm_end)
- goto found;
- addr = rounddown(c->vm_start - size, align);
- if (addr < start)
- goto nospc;
- }
-
- found:
- /*
- * Insert this entry after the one we found.
- */
- list_add(&new->vm_list, &c->vm_list);
- new->vm_start = addr;
- new->vm_end = addr + size;
- new->vm_active = 1;
-
- spin_unlock_irqrestore(&head->vm_lock, flags);
- return new;
-
- nospc:
- spin_unlock_irqrestore(&head->vm_lock, flags);
- kfree(new);
- out:
- return NULL;
-}
-
-static struct arm_vmregion *__arm_vmregion_find(struct arm_vmregion_head *head, unsigned long addr)
-{
- struct arm_vmregion *c;
-
- list_for_each_entry(c, &head->vm_list, vm_list) {
- if (c->vm_active && c->vm_start == addr)
- goto out;
- }
- c = NULL;
- out:
- return c;
-}
-
-struct arm_vmregion *arm_vmregion_find(struct arm_vmregion_head *head, unsigned long addr)
-{
- struct arm_vmregion *c;
- unsigned long flags;
-
- spin_lock_irqsave(&head->vm_lock, flags);
- c = __arm_vmregion_find(head, addr);
- spin_unlock_irqrestore(&head->vm_lock, flags);
- return c;
-}
-
-struct arm_vmregion *arm_vmregion_find_remove(struct arm_vmregion_head *head, unsigned long addr)
-{
- struct arm_vmregion *c;
- unsigned long flags;
-
- spin_lock_irqsave(&head->vm_lock, flags);
- c = __arm_vmregion_find(head, addr);
- if (c)
- c->vm_active = 0;
- spin_unlock_irqrestore(&head->vm_lock, flags);
- return c;
-}
-
-void arm_vmregion_free(struct arm_vmregion_head *head, struct arm_vmregion *c)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&head->vm_lock, flags);
- list_del(&c->vm_list);
- spin_unlock_irqrestore(&head->vm_lock, flags);
-
- kfree(c);
-}
-
-#ifdef CONFIG_PROC_FS
-static int arm_vmregion_show(struct seq_file *m, void *p)
-{
- struct arm_vmregion *c = list_entry(p, struct arm_vmregion, vm_list);
-
- seq_printf(m, "0x%08lx-0x%08lx %7lu", c->vm_start, c->vm_end,
- c->vm_end - c->vm_start);
- if (c->caller)
- seq_printf(m, " %pS", (void *)c->caller);
- seq_putc(m, '\n');
- return 0;
-}
-
-static void *arm_vmregion_start(struct seq_file *m, loff_t *pos)
-{
- struct arm_vmregion_head *h = m->private;
- spin_lock_irq(&h->vm_lock);
- return seq_list_start(&h->vm_list, *pos);
-}
-
-static void *arm_vmregion_next(struct seq_file *m, void *p, loff_t *pos)
-{
- struct arm_vmregion_head *h = m->private;
- return seq_list_next(p, &h->vm_list, pos);
-}
-
-static void arm_vmregion_stop(struct seq_file *m, void *p)
-{
- struct arm_vmregion_head *h = m->private;
- spin_unlock_irq(&h->vm_lock);
-}
-
-static const struct seq_operations arm_vmregion_ops = {
- .start = arm_vmregion_start,
- .stop = arm_vmregion_stop,
- .next = arm_vmregion_next,
- .show = arm_vmregion_show,
-};
-
-static int arm_vmregion_open(struct inode *inode, struct file *file)
-{
- struct arm_vmregion_head *h = PDE(inode)->data;
- int ret = seq_open(file, &arm_vmregion_ops);
- if (!ret) {
- struct seq_file *m = file->private_data;
- m->private = h;
- }
- return ret;
-}
-
-static const struct file_operations arm_vmregion_fops = {
- .open = arm_vmregion_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
-
-int arm_vmregion_create_proc(const char *path, struct arm_vmregion_head *h)
-{
- proc_create_data(path, S_IRUSR, NULL, &arm_vmregion_fops, h);
- return 0;
-}
-#else
-int arm_vmregion_create_proc(const char *path, struct arm_vmregion_head *h)
-{
- return 0;
-}
-#endif
diff --git a/arch/arm/mm/vmregion.h b/arch/arm/mm/vmregion.h
deleted file mode 100644
index 0f5a5f2a2c7b..000000000000
--- a/arch/arm/mm/vmregion.h
+++ /dev/null
@@ -1,31 +0,0 @@
-#ifndef VMREGION_H
-#define VMREGION_H
-
-#include <linux/spinlock.h>
-#include <linux/list.h>
-
-struct page;
-
-struct arm_vmregion_head {
- spinlock_t vm_lock;
- struct list_head vm_list;
- unsigned long vm_start;
- unsigned long vm_end;
-};
-
-struct arm_vmregion {
- struct list_head vm_list;
- unsigned long vm_start;
- unsigned long vm_end;
- int vm_active;
- const void *caller;
-};
-
-struct arm_vmregion *arm_vmregion_alloc(struct arm_vmregion_head *, size_t, size_t, gfp_t, const void *);
-struct arm_vmregion *arm_vmregion_find(struct arm_vmregion_head *, unsigned long);
-struct arm_vmregion *arm_vmregion_find_remove(struct arm_vmregion_head *, unsigned long);
-void arm_vmregion_free(struct arm_vmregion_head *, struct arm_vmregion *);
-
-int arm_vmregion_create_proc(const char *, struct arm_vmregion_head *);
-
-#endif