aboutsummaryrefslogtreecommitdiff
path: root/arch/arc/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-12-04 19:06:18 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2019-12-04 19:06:18 -0800
commit056df578c2dcac1e624254567f5df5ddaa223234 (patch)
treea19197bded5e2d2ee6219d502d6e9a3e42dd1e6f /arch/arc/mm
parentaedc0650f9135f3b92b39cbed1a8fe98d8088825 (diff)
parent9fbea0b7e842890a76acffce9be9e430b9e11194 (diff)
Merge tag 'arc-5.5-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc
Pull ARC updates from Vineet Gupta - Jump Label support for ARC - kmemleak enabled - arc mm backend TLB Miss / flush optimizations - nSIM platform switching to dwuart (vs. arcuart) and ensuing defconfig updates and cleanups - axs platform pll / video-mode updates * tag 'arc-5.5-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc: ARC: add kmemleak support ARC: [plat-axs10x]: remove hardcoded video mode from bootargs ARC: [plat-axs10x]: use pgu pll instead of fixed clock ARC: ARCv2: jump label: implement jump label patching ARC: mm: tlb flush optim: elide redundant uTLB invalidates for MMUv3 ARC: mm: tlb flush optim: elide repeated uTLB invalidate in loop ARC: mm: tlb flush optim: Make TLBWriteNI fallback to TLBWrite if not available ARC: mm: TLB Miss optim: avoid re-reading ECR ARCv2: mm: TLB Miss optim: Use double world load/stores LDD/STD ARCv2: mm: TLB Miss optim: SMP builds can cache pgd pointer in mmu scratch reg ARC: nSIM_700: remove unused network options ARC: nSIM_700: switch to DW UART usage ARC: merge HAPS-HS with nSIM-HS configs ARC: HAPS: cleanup defconfigs from unused ETH drivers ARC: HAPS: add HIGHMEM memory zone to DTS ARC: HAPS: use same UART configuration everywhere ARC: HAPS: cleanup defconfigs from unused IO-related options ARC: regenerate nSIM and HAPS defconfigs
Diffstat (limited to 'arch/arc/mm')
-rw-r--r--arch/arc/mm/tlb.c81
-rw-r--r--arch/arc/mm/tlbex.S18
2 files changed, 41 insertions, 58 deletions
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
index 10025e199353..c340acd989a0 100644
--- a/arch/arc/mm/tlb.c
+++ b/arch/arc/mm/tlb.c
@@ -118,6 +118,33 @@ static inline void __tlb_entry_erase(void)
write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
}
+static void utlb_invalidate(void)
+{
+#if (CONFIG_ARC_MMU_VER >= 2)
+
+#if (CONFIG_ARC_MMU_VER == 2)
+ /* MMU v2 introduced the uTLB Flush command.
+ * There was however an obscure hardware bug, where uTLB flush would
+ * fail when a prior probe for J-TLB (both totally unrelated) would
+ * return lkup err - because the entry didn't exist in MMU.
+ * The Workround was to set Index reg with some valid value, prior to
+ * flush. This was fixed in MMU v3
+ */
+ unsigned int idx;
+
+ /* make sure INDEX Reg is valid */
+ idx = read_aux_reg(ARC_REG_TLBINDEX);
+
+ /* If not write some dummy val */
+ if (unlikely(idx & TLB_LKUP_ERR))
+ write_aux_reg(ARC_REG_TLBINDEX, 0xa);
+#endif
+
+ write_aux_reg(ARC_REG_TLBCOMMAND, TLBIVUTLB);
+#endif
+
+}
+
#if (CONFIG_ARC_MMU_VER < 4)
static inline unsigned int tlb_entry_lkup(unsigned long vaddr_n_asid)
@@ -149,44 +176,6 @@ static void tlb_entry_erase(unsigned int vaddr_n_asid)
}
}
-/****************************************************************************
- * ARC700 MMU caches recently used J-TLB entries (RAM) as uTLBs (FLOPs)
- *
- * New IVUTLB cmd in MMU v2 explictly invalidates the uTLB
- *
- * utlb_invalidate ( )
- * -For v2 MMU calls Flush uTLB Cmd
- * -For v1 MMU does nothing (except for Metal Fix v1 MMU)
- * This is because in v1 TLBWrite itself invalidate uTLBs
- ***************************************************************************/
-
-static void utlb_invalidate(void)
-{
-#if (CONFIG_ARC_MMU_VER >= 2)
-
-#if (CONFIG_ARC_MMU_VER == 2)
- /* MMU v2 introduced the uTLB Flush command.
- * There was however an obscure hardware bug, where uTLB flush would
- * fail when a prior probe for J-TLB (both totally unrelated) would
- * return lkup err - because the entry didn't exist in MMU.
- * The Workround was to set Index reg with some valid value, prior to
- * flush. This was fixed in MMU v3 hence not needed any more
- */
- unsigned int idx;
-
- /* make sure INDEX Reg is valid */
- idx = read_aux_reg(ARC_REG_TLBINDEX);
-
- /* If not write some dummy val */
- if (unlikely(idx & TLB_LKUP_ERR))
- write_aux_reg(ARC_REG_TLBINDEX, 0xa);
-#endif
-
- write_aux_reg(ARC_REG_TLBCOMMAND, TLBIVUTLB);
-#endif
-
-}
-
static void tlb_entry_insert(unsigned int pd0, pte_t pd1)
{
unsigned int idx;
@@ -219,11 +208,6 @@ static void tlb_entry_insert(unsigned int pd0, pte_t pd1)
#else /* CONFIG_ARC_MMU_VER >= 4) */
-static void utlb_invalidate(void)
-{
- /* No need since uTLB is always in sync with JTLB */
-}
-
static void tlb_entry_erase(unsigned int vaddr_n_asid)
{
write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid | _PAGE_PRESENT);
@@ -267,7 +251,7 @@ noinline void local_flush_tlb_all(void)
for (entry = 0; entry < num_tlb; entry++) {
/* write this entry to the TLB */
write_aux_reg(ARC_REG_TLBINDEX, entry);
- write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
+ write_aux_reg(ARC_REG_TLBCOMMAND, TLBWriteNI);
}
if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
@@ -278,7 +262,7 @@ noinline void local_flush_tlb_all(void)
for (entry = stlb_idx; entry < stlb_idx + 16; entry++) {
write_aux_reg(ARC_REG_TLBINDEX, entry);
- write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
+ write_aux_reg(ARC_REG_TLBCOMMAND, TLBWriteNI);
}
}
@@ -355,8 +339,6 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
}
}
- utlb_invalidate();
-
local_irq_restore(flags);
}
@@ -385,8 +367,6 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
start += PAGE_SIZE;
}
- utlb_invalidate();
-
local_irq_restore(flags);
}
@@ -407,7 +387,6 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) {
tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm, cpu));
- utlb_invalidate();
}
local_irq_restore(flags);
@@ -868,7 +847,7 @@ void arc_mmu_init(void)
write_aux_reg(ARC_REG_PID, MMU_ENABLE);
/* In smp we use this reg for interrupt 1 scratch */
-#ifndef CONFIG_SMP
+#ifdef ARC_USE_SCRATCH_REG
/* swapper_pg_dir is the pgd for the kernel, used by vmalloc */
write_aux_reg(ARC_REG_SCRATCH_DATA0, swapper_pg_dir);
#endif
diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S
index c55d95dd2f39..2efaf6ca0c06 100644
--- a/arch/arc/mm/tlbex.S
+++ b/arch/arc/mm/tlbex.S
@@ -122,17 +122,27 @@ ex_saved_reg1:
#else /* ARCv2 */
.macro TLBMISS_FREEUP_REGS
+#ifdef CONFIG_ARC_HAS_LL64
+ std r0, [sp, -16]
+ std r2, [sp, -8]
+#else
PUSH r0
PUSH r1
PUSH r2
PUSH r3
+#endif
.endm
.macro TLBMISS_RESTORE_REGS
+#ifdef CONFIG_ARC_HAS_LL64
+ ldd r0, [sp, -16]
+ ldd r2, [sp, -8]
+#else
POP r3
POP r2
POP r1
POP r0
+#endif
.endm
#endif
@@ -193,7 +203,7 @@ ex_saved_reg1:
lr r2, [efa]
-#ifndef CONFIG_SMP
+#ifdef ARC_USE_SCRATCH_REG
lr r1, [ARC_REG_SCRATCH_DATA0] ; current pgd
#else
GET_CURR_TASK_ON_CPU r1
@@ -282,11 +292,7 @@ ex_saved_reg1:
sr TLBGetIndex, [ARC_REG_TLBCOMMAND]
/* Commit the Write */
-#if (CONFIG_ARC_MMU_VER >= 2) /* introduced in v2 */
sr TLBWriteNI, [ARC_REG_TLBCOMMAND]
-#else
- sr TLBWrite, [ARC_REG_TLBCOMMAND]
-#endif
#else
sr TLBInsertEntry, [ARC_REG_TLBCOMMAND]
@@ -370,9 +376,7 @@ ENTRY(EV_TLBMissD)
;----------------------------------------------------------------
; UPDATE_PTE: Let Linux VM know that page was accessed/dirty
- lr r3, [ecr]
or r0, r0, _PAGE_ACCESSED ; Accessed bit always
- btst_s r3, ECR_C_BIT_DTLB_ST_MISS ; See if it was a Write Access ?
or.nz r0, r0, _PAGE_DIRTY ; if Write, set Dirty bit as well
st_s r0, [r1] ; Write back PTE