diff options
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/Kconfig | 18 | ||||
-rw-r--r-- | arch/arm/mm/cache-l2x0.c | 41 | ||||
-rw-r--r-- | arch/arm/mm/copypage-v4mc.c | 2 | ||||
-rw-r--r-- | arch/arm/mm/copypage-v6.c | 2 | ||||
-rw-r--r-- | arch/arm/mm/copypage-xscale.c | 2 | ||||
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 6 | ||||
-rw-r--r-- | arch/arm/mm/fault-armv.c | 8 | ||||
-rw-r--r-- | arch/arm/mm/flush.c | 50 | ||||
-rw-r--r-- | arch/arm/mm/mmu.c | 20 | ||||
-rw-r--r-- | arch/arm/mm/proc-macros.S | 7 | ||||
-rw-r--r-- | arch/arm/mm/proc-v7.S | 5 |
11 files changed, 120 insertions, 41 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 87ec141fcaa..92366f5990b 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -398,7 +398,7 @@ config CPU_V6 # ARMv6k config CPU_32v6K bool "Support ARM V6K processor extensions" if !SMP - depends on CPU_V6 + depends on CPU_V6 || CPU_V7 default y if SMP && !(ARCH_MX3 || ARCH_OMAP2) help Say Y here if your ARMv6 processor supports the 'K' extension. @@ -599,6 +599,14 @@ config CPU_CP15_MPU help Processor has the CP15 register, which has MPU related registers. +config CPU_USE_DOMAINS + bool + depends on MMU + default y if !HAS_TLS_REG + help + This option enables or disables the use of domain switching + via the set_fs() function. + # # CPU supports 36-bit I/O # @@ -778,6 +786,14 @@ config CACHE_L2X0 help This option enables the L2x0 PrimeCell. +config CACHE_PL310 + bool + depends on CACHE_L2X0 + default y if CPU_V7 && !CPU_V6 + help + This option enables optimisations for the PL310 cache + controller. + config CACHE_TAUROS2 bool "Enable the Tauros2 L2 cache controller" depends on (ARCH_DOVE || ARCH_MMP) diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index df4955885b2..edb43ff7aee 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -29,17 +29,26 @@ static void __iomem *l2x0_base; static DEFINE_SPINLOCK(l2x0_lock); static uint32_t l2x0_way_mask; /* Bitmask of active ways */ -static inline void cache_wait(void __iomem *reg, unsigned long mask) +static inline void cache_wait_way(void __iomem *reg, unsigned long mask) { - /* wait for the operation to complete */ - while (readl(reg) & mask) + /* wait for cache operation by line or way to complete */ + while (readl_relaxed(reg) & mask) ; } +#ifdef CONFIG_CACHE_PL310 +static inline void cache_wait(void __iomem *reg, unsigned long mask) +{ + /* cache operations by line are atomic on PL310 */ +} +#else +#define cache_wait cache_wait_way +#endif + static inline void cache_sync(void) { void __iomem *base = l2x0_base; - writel(0, base + L2X0_CACHE_SYNC); + writel_relaxed(0, base + L2X0_CACHE_SYNC); cache_wait(base + L2X0_CACHE_SYNC, 1); } @@ -47,14 +56,14 @@ static inline void l2x0_clean_line(unsigned long addr) { void __iomem *base = l2x0_base; cache_wait(base + L2X0_CLEAN_LINE_PA, 1); - writel(addr, base + L2X0_CLEAN_LINE_PA); + writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA); } static inline void l2x0_inv_line(unsigned long addr) { void __iomem *base = l2x0_base; cache_wait(base + L2X0_INV_LINE_PA, 1); - writel(addr, base + L2X0_INV_LINE_PA); + writel_relaxed(addr, base + L2X0_INV_LINE_PA); } #ifdef CONFIG_PL310_ERRATA_588369 @@ -75,9 +84,9 @@ static inline void l2x0_flush_line(unsigned long addr) /* Clean by PA followed by Invalidate by PA */ cache_wait(base + L2X0_CLEAN_LINE_PA, 1); - writel(addr, base + L2X0_CLEAN_LINE_PA); + writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA); cache_wait(base + L2X0_INV_LINE_PA, 1); - writel(addr, base + L2X0_INV_LINE_PA); + writel_relaxed(addr, base + L2X0_INV_LINE_PA); } #else @@ -90,7 +99,7 @@ static inline void l2x0_flush_line(unsigned long addr) { void __iomem *base = l2x0_base; cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); - writel(addr, base + L2X0_CLEAN_INV_LINE_PA); + writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA); } #endif @@ -109,8 +118,8 @@ static inline void l2x0_inv_all(void) /* invalidate all ways */ spin_lock_irqsave(&l2x0_lock, flags); - writel(l2x0_way_mask, l2x0_base + L2X0_INV_WAY); - cache_wait(l2x0_base + L2X0_INV_WAY, l2x0_way_mask); + writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY); + cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask); cache_sync(); spin_unlock_irqrestore(&l2x0_lock, flags); } @@ -215,8 +224,8 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) l2x0_base = base; - cache_id = readl(l2x0_base + L2X0_CACHE_ID); - aux = readl(l2x0_base + L2X0_AUX_CTRL); + cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID); + aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); aux &= aux_mask; aux |= aux_val; @@ -248,15 +257,15 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) * If you are booting from non-secure mode * accessing the below registers will fault. */ - if (!(readl(l2x0_base + L2X0_CTRL) & 1)) { + if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) { /* l2x0 controller is disabled */ - writel(aux, l2x0_base + L2X0_AUX_CTRL); + writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL); l2x0_inv_all(); /* enable L2X0 */ - writel(1, l2x0_base + L2X0_CTRL); + writel_relaxed(1, l2x0_base + L2X0_CTRL); } outer_cache.inv_range = l2x0_inv_range; diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c index 598c51ad507..b8061519ce7 100644 --- a/arch/arm/mm/copypage-v4mc.c +++ b/arch/arm/mm/copypage-v4mc.c @@ -73,7 +73,7 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from, { void *kto = kmap_atomic(to, KM_USER1); - if (test_and_clear_bit(PG_dcache_dirty, &from->flags)) + if (!test_and_set_bit(PG_dcache_clean, &from->flags)) __flush_dcache_page(page_mapping(from), from); spin_lock(&minicache_lock); diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index f55fa1044f7..bdba6c65c90 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c @@ -79,7 +79,7 @@ static void v6_copy_user_highpage_aliasing(struct page *to, unsigned int offset = CACHE_COLOUR(vaddr); unsigned long kfrom, kto; - if (test_and_clear_bit(PG_dcache_dirty, &from->flags)) + if (!test_and_set_bit(PG_dcache_clean, &from->flags)) __flush_dcache_page(page_mapping(from), from); /* FIXME: not highmem safe */ diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c index 9920c0ae209..649bbcd325b 100644 --- a/arch/arm/mm/copypage-xscale.c +++ b/arch/arm/mm/copypage-xscale.c @@ -95,7 +95,7 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from, { void *kto = kmap_atomic(to, KM_USER1); - if (test_and_clear_bit(PG_dcache_dirty, &from->flags)) + if (!test_and_set_bit(PG_dcache_clean, &from->flags)) __flush_dcache_page(page_mapping(from), from); spin_lock(&minicache_lock); diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 9e7742f0a10..fa3d07ddaaf 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -508,6 +508,12 @@ void ___dma_page_dev_to_cpu(struct page *page, unsigned long off, outer_inv_range(paddr, paddr + size); dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); + + /* + * Mark the D-cache clean for this page to avoid extra flushing. + */ + if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE) + set_bit(PG_dcache_clean, &page->flags); } EXPORT_SYMBOL(___dma_page_dev_to_cpu); diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index 9b906dec1ca..8440d952ba6 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c @@ -28,6 +28,7 @@ static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE; +#if __LINUX_ARM_ARCH__ < 6 /* * We take the easy way out of this problem - we make the * PTE uncacheable. However, we leave the write buffer on. @@ -141,7 +142,7 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma, * a page table, or changing an existing PTE. Basically, there are two * things that we need to take care of: * - * 1. If PG_dcache_dirty is set for the page, we need to ensure + * 1. If PG_dcache_clean is not set for the page, we need to ensure * that any cache entries for the kernels virtual memory * range are written back to the page. * 2. If we have multiple shared mappings of the same space in @@ -168,10 +169,8 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, return; mapping = page_mapping(page); -#ifndef CONFIG_SMP - if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) + if (!test_and_set_bit(PG_dcache_clean, &page->flags)) __flush_dcache_page(mapping, page); -#endif if (mapping) { if (cache_is_vivt()) make_coherent(mapping, vma, addr, ptep, pfn); @@ -179,6 +178,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, __flush_icache_all(); } } +#endif /* __LINUX_ARM_ARCH__ < 6 */ /* * Check whether the write buffer has physical address aliasing diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index c6844cb9b50..99ec00b0761 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -17,6 +17,7 @@ #include <asm/smp_plat.h> #include <asm/system.h> #include <asm/tlbflush.h> +#include <asm/smp_plat.h> #include "mm.h" @@ -93,12 +94,10 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig #define flush_pfn_alias(pfn,vaddr) do { } while (0) #endif -#ifdef CONFIG_SMP static void flush_ptrace_access_other(void *args) { __flush_icache_all(); } -#endif static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, @@ -120,13 +119,11 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, /* VIPT non-aliasing cache */ if (vma->vm_flags & VM_EXEC) { - unsigned long addr = (unsigned long)kaddr; - __cpuc_coherent_kern_range(addr, addr + len); -#ifdef CONFIG_SMP + __cpuc_flush_dcache_area(kaddr, len); + __flush_icache_all(); if (cache_ops_need_broadcast()) smp_call_function(flush_ptrace_access_other, NULL, 1); -#endif } } @@ -215,6 +212,36 @@ static void __flush_dcache_aliases(struct address_space *mapping, struct page *p flush_dcache_mmap_unlock(mapping); } +#if __LINUX_ARM_ARCH__ >= 6 +void __sync_icache_dcache(pte_t pteval) +{ + unsigned long pfn; + struct page *page; + struct address_space *mapping; + + if (!pte_present_user(pteval)) + return; + if (cache_is_vipt_nonaliasing() && !pte_exec(pteval)) + /* only flush non-aliasing VIPT caches for exec mappings */ + return; + pfn = pte_pfn(pteval); + if (!pfn_valid(pfn)) + return; + + page = pfn_to_page(pfn); + if (cache_is_vipt_aliasing()) + mapping = page_mapping(page); + else + mapping = NULL; + + if (!test_and_set_bit(PG_dcache_clean, &page->flags)) + __flush_dcache_page(mapping, page); + /* pte_exec() already checked above for non-aliasing VIPT cache */ + if (cache_is_vipt_nonaliasing() || pte_exec(pteval)) + __flush_icache_all(); +} +#endif + /* * Ensure cache coherency between kernel mapping and userspace mapping * of this page. @@ -246,17 +273,16 @@ void flush_dcache_page(struct page *page) mapping = page_mapping(page); -#ifndef CONFIG_SMP - if (!PageHighMem(page) && mapping && !mapping_mapped(mapping)) - set_bit(PG_dcache_dirty, &page->flags); - else -#endif - { + if (!cache_ops_need_broadcast() && + !PageHighMem(page) && mapping && !mapping_mapped(mapping)) + clear_bit(PG_dcache_clean, &page->flags); + else { __flush_dcache_page(mapping, page); if (mapping && cache_is_vivt()) __flush_dcache_aliases(mapping, page); else if (mapping) __flush_icache_all(); + set_bit(PG_dcache_clean, &page->flags); } } EXPORT_SYMBOL(flush_dcache_page); diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 9c202168f6c..93555c992be 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -15,6 +15,7 @@ #include <linux/nodemask.h> #include <linux/memblock.h> #include <linux/sort.h> +#include <linux/fs.h> #include <asm/cputype.h> #include <asm/sections.h> @@ -24,6 +25,7 @@ #include <asm/smp_plat.h> #include <asm/tlb.h> #include <asm/highmem.h> +#include <asm/traps.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> @@ -485,6 +487,19 @@ static void __init build_mem_type_table(void) } } +#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE +pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, + unsigned long size, pgprot_t vma_prot) +{ + if (!pfn_valid(pfn)) + return pgprot_noncached(vma_prot); + else if (file->f_flags & O_SYNC) + return pgprot_writecombine(vma_prot); + return vma_prot; +} +EXPORT_SYMBOL(phys_mem_access_prot); +#endif + #define vectors_base() (vectors_high() ? 0xffff0000 : 0) static void __init *early_alloc(unsigned long sz) @@ -870,12 +885,11 @@ static void __init devicemaps_init(struct machine_desc *mdesc) { struct map_desc map; unsigned long addr; - void *vectors; /* * Allocate the vector page early. */ - vectors = early_alloc(PAGE_SIZE); + vectors_page = early_alloc(PAGE_SIZE); for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE) pmd_clear(pmd_off_k(addr)); @@ -915,7 +929,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc) * location (0xffff0000). If we aren't using high-vectors, also * create a mapping at the low-vectors virtual address. */ - map.pfn = __phys_to_pfn(virt_to_phys(vectors)); + map.pfn = __phys_to_pfn(virt_to_phys(vectors_page)); map.virtual = 0xffff0000; map.length = PAGE_SIZE; map.type = MT_HIGH_VECTORS; diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S index 7d63beaf974..337f10256cd 100644 --- a/arch/arm/mm/proc-macros.S +++ b/arch/arm/mm/proc-macros.S @@ -99,6 +99,10 @@ * 110x 0 1 0 r/w r/o * 11x0 0 1 0 r/w r/o * 1111 0 1 1 r/w r/w + * + * If !CONFIG_CPU_USE_DOMAINS, the following permissions are changed: + * 110x 1 1 1 r/o r/o + * 11x0 1 1 1 r/o r/o */ .macro armv6_mt_table pfx \pfx\()_mt_table: @@ -138,8 +142,11 @@ tst r1, #L_PTE_USER orrne r3, r3, #PTE_EXT_AP1 +#ifdef CONFIG_CPU_USE_DOMAINS + @ allow kernel read/write access to read-only user pages tstne r3, #PTE_EXT_APX bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0 +#endif tst r1, #L_PTE_EXEC orreq r3, r3, #PTE_EXT_XN diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 8071bcd4c99..416c87bc663 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -152,8 +152,11 @@ ENTRY(cpu_v7_set_pte_ext) tst r1, #L_PTE_USER orrne r3, r3, #PTE_EXT_AP1 +#ifdef CONFIG_CPU_USE_DOMAINS + @ allow kernel read/write access to read-only user pages tstne r3, #PTE_EXT_APX bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0 +#endif tst r1, #L_PTE_EXEC orreq r3, r3, #PTE_EXT_XN @@ -240,8 +243,6 @@ __v7_setup: mcr p15, 0, r10, c2, c0, 2 @ TTB control register orr r4, r4, #TTB_FLAGS mcr p15, 0, r4, c2, c0, 1 @ load TTB1 - mov r10, #0x1f @ domains 0, 1 = manager - mcr p15, 0, r10, c3, c0, 0 @ load domain access register /* * Memory region attributes with SCTLR.TRE=1 * |