aboutsummaryrefslogtreecommitdiff
path: root/arch/mips/mm/tlbex.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/mm/tlbex.c')
-rw-r--r--arch/mips/mm/tlbex.c34
1 files changed, 23 insertions, 11 deletions
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index a08dd53a1cc5..3978a3d81366 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -637,7 +637,7 @@ static __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
if (cpu_has_rixi) {
UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL));
} else {
-#ifdef CONFIG_64BIT_PHYS_ADDR
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
uasm_i_dsrl_safe(p, reg, reg, ilog2(_PAGE_GLOBAL));
#else
UASM_i_SRL(p, reg, reg, ilog2(_PAGE_GLOBAL));
@@ -1009,7 +1009,7 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
* 64bit address support (36bit on a 32bit CPU) in a 32bit
* Kernel is a special case. Only a few CPUs use it.
*/
-#ifdef CONFIG_64BIT_PHYS_ADDR
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
if (cpu_has_64bits) {
uasm_i_ld(p, tmp, 0, ptep); /* get even pte */
uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
@@ -1062,6 +1062,7 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
struct mips_huge_tlb_info {
int huge_pte;
int restore_scratch;
+ bool need_reload_pte;
};
static struct mips_huge_tlb_info
@@ -1076,6 +1077,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
rv.huge_pte = scratch;
rv.restore_scratch = 0;
+ rv.need_reload_pte = false;
if (check_for_high_segbits) {
UASM_i_MFC0(p, tmp, C0_BADVADDR);
@@ -1264,6 +1266,7 @@ static void build_r4000_tlb_refill_handler(void)
} else {
htlb_info.huge_pte = K0;
htlb_info.restore_scratch = 0;
+ htlb_info.need_reload_pte = true;
vmalloc_mode = refill_noscratch;
/*
* create the plain linear handler
@@ -1300,7 +1303,8 @@ static void build_r4000_tlb_refill_handler(void)
}
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
uasm_l_tlb_huge_update(&l, p);
- UASM_i_LW(&p, K0, 0, K1);
+ if (htlb_info.need_reload_pte)
+ UASM_i_LW(&p, htlb_info.huge_pte, 0, K1);
build_huge_update_entries(&p, htlb_info.huge_pte, K1);
build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random,
htlb_info.restore_scratch);
@@ -1506,14 +1510,14 @@ static void
iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr)
{
#ifdef CONFIG_SMP
-# ifdef CONFIG_64BIT_PHYS_ADDR
+# ifdef CONFIG_PHYS_ADDR_T_64BIT
if (cpu_has_64bits)
uasm_i_lld(p, pte, 0, ptr);
else
# endif
UASM_i_LL(p, pte, 0, ptr);
#else
-# ifdef CONFIG_64BIT_PHYS_ADDR
+# ifdef CONFIG_PHYS_ADDR_T_64BIT
if (cpu_has_64bits)
uasm_i_ld(p, pte, 0, ptr);
else
@@ -1526,13 +1530,13 @@ static void
iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
unsigned int mode)
{
-#ifdef CONFIG_64BIT_PHYS_ADDR
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY);
#endif
uasm_i_ori(p, pte, pte, mode);
#ifdef CONFIG_SMP
-# ifdef CONFIG_64BIT_PHYS_ADDR
+# ifdef CONFIG_PHYS_ADDR_T_64BIT
if (cpu_has_64bits)
uasm_i_scd(p, pte, 0, ptr);
else
@@ -1544,7 +1548,7 @@ iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
else
uasm_il_beqz(p, r, pte, label_smp_pgtable_change);
-# ifdef CONFIG_64BIT_PHYS_ADDR
+# ifdef CONFIG_PHYS_ADDR_T_64BIT
if (!cpu_has_64bits) {
/* no uasm_i_nop needed */
uasm_i_ll(p, pte, sizeof(pte_t) / 2, ptr);
@@ -1559,14 +1563,14 @@ iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
uasm_i_nop(p);
# endif
#else
-# ifdef CONFIG_64BIT_PHYS_ADDR
+# ifdef CONFIG_PHYS_ADDR_T_64BIT
if (cpu_has_64bits)
uasm_i_sd(p, pte, 0, ptr);
else
# endif
UASM_i_SW(p, pte, 0, ptr);
-# ifdef CONFIG_64BIT_PHYS_ADDR
+# ifdef CONFIG_PHYS_ADDR_T_64BIT
if (!cpu_has_64bits) {
uasm_i_lw(p, pte, sizeof(pte_t) / 2, ptr);
uasm_i_ori(p, pte, pte, hwmode);
@@ -1868,8 +1872,16 @@ build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
uasm_l_smp_pgtable_change(l, *p);
#endif
iPTE_LW(p, wr.r1, wr.r2); /* get even pte */
- if (!m4kc_tlbp_war())
+ if (!m4kc_tlbp_war()) {
build_tlb_probe_entry(p);
+ if (cpu_has_htw) {
+ /* race condition happens, leaving */
+ uasm_i_ehb(p);
+ uasm_i_mfc0(p, wr.r3, C0_INDEX);
+ uasm_il_bltz(p, r, wr.r3, label_leave);
+ uasm_i_nop(p);
+ }
+ }
return wr;
}