aboutsummaryrefslogtreecommitdiff
path: root/arch/arm64/include/asm/tlbflush.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/include/asm/tlbflush.h')
-rw-r--r--arch/arm64/include/asm/tlbflush.h55
1 files changed, 29 insertions, 26 deletions
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index c3bb05b98616..fca74efe1fc5 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -65,6 +65,14 @@
* only require the D-TLB to be invalidated.
* - kaddr - Kernel virtual memory address
*/
+static inline void local_flush_tlb_all(void)
+{
+ dsb(nshst);
+ asm("tlbi vmalle1");
+ dsb(nsh);
+ isb();
+}
+
static inline void flush_tlb_all(void)
{
dsb(ishst);
@@ -93,11 +101,23 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
dsb(ish);
}
-static inline void __flush_tlb_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
+/*
+ * This is meant to avoid soft lock-ups on large TLB flushing ranges and not
+ * necessarily a performance improvement.
+ */
+#define MAX_TLB_RANGE (1024UL << PAGE_SHIFT)
+
+static inline void flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
{
unsigned long asid = (unsigned long)ASID(vma->vm_mm) << 48;
unsigned long addr;
+
+ if ((end - start) > MAX_TLB_RANGE) {
+ flush_tlb_mm(vma->vm_mm);
+ return;
+ }
+
start = asid | (start >> 12);
end = asid | (end >> 12);
@@ -107,9 +127,15 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
dsb(ish);
}
-static inline void __flush_tlb_kernel_range(unsigned long start, unsigned long end)
+static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
unsigned long addr;
+
+ if ((end - start) > MAX_TLB_RANGE) {
+ flush_tlb_all();
+ return;
+ }
+
start >>= 12;
end >>= 12;
@@ -121,29 +147,6 @@ static inline void __flush_tlb_kernel_range(unsigned long start, unsigned long e
}
/*
- * This is meant to avoid soft lock-ups on large TLB flushing ranges and not
- * necessarily a performance improvement.
- */
-#define MAX_TLB_RANGE (1024UL << PAGE_SHIFT)
-
-static inline void flush_tlb_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
-{
- if ((end - start) <= MAX_TLB_RANGE)
- __flush_tlb_range(vma, start, end);
- else
- flush_tlb_mm(vma->vm_mm);
-}
-
-static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
-{
- if ((end - start) <= MAX_TLB_RANGE)
- __flush_tlb_kernel_range(start, end);
- else
- flush_tlb_all();
-}
-
-/*
* Used to invalidate the TLB (walk caches) corresponding to intermediate page
* table levels (pgd/pud/pmd).
*/