aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2015-10-06 18:46:23 +0100
committerAlex Shi <alex.shi@linaro.org>2016-05-23 21:09:01 +0800
commit3a3fcbe69f048f87aa59d72b73d9958af800bd2a (patch)
tree0ca30cfbd98069e986f9fa4ff498e272dd5903da /arch
parentcffbfed25fd96d4db65450b2206fc6c6e0b24e9a (diff)
arm64: flush: use local TLB and I-cache invalidation
There are a number of places where a single CPU is running with a private page-table and we need to perform maintenance on the TLB and I-cache in order to ensure correctness, but do not require the operation to be broadcast to other CPUs. This patch adds local variants of tlb_flush_all and __flush_icache_all to support these use-cases and updates the callers respectively. __local_flush_icache_all also implies an isb, since it is intended to be used synchronously. Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: David Daney <david.daney@cavium.com> Acked-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> (cherry picked from commit 8e63d38876691756f9bc6930850f1fb77809be1b) Signed-off-by: Alex Shi <alex.shi@linaro.org> Conflicts: arch/arm64/kernel/suspend.c
Diffstat (limited to 'arch')
-rw-r--r--arch/arm64/include/asm/cacheflush.h7
-rw-r--r--arch/arm64/include/asm/tlbflush.h8
-rw-r--r--arch/arm64/kernel/efi.c4
-rw-r--r--arch/arm64/kernel/smp.c2
-rw-r--r--arch/arm64/kernel/suspend.c10
-rw-r--r--arch/arm64/mm/context.c4
-rw-r--r--arch/arm64/mm/mmu.c2
7 files changed, 26 insertions, 11 deletions
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index c75b8d027eb1..54efedaf331f 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -115,6 +115,13 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *,
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
extern void flush_dcache_page(struct page *);
+static inline void __local_flush_icache_all(void)
+{
+ asm("ic iallu");
+ dsb(nsh);
+ isb();
+}
+
static inline void __flush_icache_all(void)
{
asm("ic ialluis");
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index a173d4abcd64..fca74efe1fc5 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -65,6 +65,14 @@
* only require the D-TLB to be invalidated.
* - kaddr - Kernel virtual memory address
*/
+static inline void local_flush_tlb_all(void)
+{
+ dsb(nshst);
+ asm("tlbi vmalle1");
+ dsb(nsh);
+ isb();
+}
+
static inline void flush_tlb_all(void)
{
dsb(ishst);
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index 5170fd5c8e97..8e8ced810104 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -343,9 +343,9 @@ static void efi_set_pgd(struct mm_struct *mm)
else
cpu_switch_mm(mm->pgd, mm);
- flush_tlb_all();
+ local_flush_tlb_all();
if (icache_is_aivivt())
- __flush_icache_all();
+ __local_flush_icache_all();
}
void efi_virtmap_load(void)
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index a1e6ed5f0d06..9c1d3fef8735 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -151,7 +151,7 @@ asmlinkage void secondary_start_kernel(void)
* point to zero page to avoid speculatively fetching new entries.
*/
cpu_set_reserved_ttbr0();
- flush_tlb_all();
+ local_flush_tlb_all();
cpu_set_default_tcr_t0sz();
preempt_disable();
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index dd6ad81d53aa..874ced5f5fac 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -97,13 +97,13 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
* them back to complete the address space configuration
* restoration before returning.
*/
- cpu_set_reserved_ttbr0();
- flush_tlb_all();
- cpu_set_default_tcr_t0sz();
-
- if (mm != &init_mm)
+ if (mm == &init_mm)
+ cpu_set_reserved_ttbr0();
+ else
cpu_switch_mm(mm->pgd, mm);
+ local_flush_tlb_all();
+
/*
* Restore per-cpu offset before any kernel
* subsystem relying on it has a chance to run.
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index d70ff14dbdbd..48b53fb381af 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -48,9 +48,9 @@ static void flush_context(void)
{
/* set the reserved TTBR0 before flushing the TLB */
cpu_set_reserved_ttbr0();
- flush_tlb_all();
+ local_flush_tlb_all();
if (icache_is_aivivt())
- __flush_icache_all();
+ __local_flush_icache_all();
}
static void set_mm_context(struct mm_struct *mm, unsigned int asid)
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 650e28e6042f..4a83eb63ec23 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -466,7 +466,7 @@ void __init paging_init(void)
* point to zero page to avoid speculatively fetching new entries.
*/
cpu_set_reserved_ttbr0();
- flush_tlb_all();
+ local_flush_tlb_all();
cpu_set_default_tcr_t0sz();
}