aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>2019-09-18 20:23:28 +0530
committerMichael Ellerman <mpe@ellerman.id.au>2019-09-19 21:24:59 +1000
commitd9101bfa6adc831bda8836c4d774820553c14942 (patch)
tree6ecb08a936eaf1af85eae2027ea0446407e53e07 /arch/powerpc
parent7c1bb6bbf75d8ca5ec878627d3170effcaf54f27 (diff)
powerpc/mm/mce: Keep irqs disabled during lockless page table walk
__find_linux_mm_pte() returns a page table entry pointer after walking the page table without holding locks. To make it safe against a THP split and/or collapse, we disable interrupts around the lockless page table walk. However we need to keep interrupts disabled as long as we use the page table entry pointer that is returned. Fix addr_to_pfn() to do that. Fixes: ba41e1e1ccb9 ("powerpc/mce: Hookup derror (load/store) UE errors") Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> [mpe: Rearrange code slightly and tweak change log wording] Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20190918145328.28602-1-aneesh.kumar@linux.ibm.com
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/kernel/mce_power.c20
1 files changed, 12 insertions, 8 deletions
diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c
index 356e7b99f661..1cbf7f1a4e3d 100644
--- a/arch/powerpc/kernel/mce_power.c
+++ b/arch/powerpc/kernel/mce_power.c
@@ -29,7 +29,7 @@ unsigned long addr_to_pfn(struct pt_regs *regs, unsigned long addr)
{
pte_t *ptep;
unsigned int shift;
- unsigned long flags;
+ unsigned long pfn, flags;
struct mm_struct *mm;
if (user_mode(regs))
@@ -39,18 +39,22 @@ unsigned long addr_to_pfn(struct pt_regs *regs, unsigned long addr)
local_irq_save(flags);
ptep = __find_linux_pte(mm->pgd, addr, NULL, &shift);
- local_irq_restore(flags);
- if (!ptep || pte_special(*ptep))
- return ULONG_MAX;
+ if (!ptep || pte_special(*ptep)) {
+ pfn = ULONG_MAX;
+ goto out;
+ }
- if (shift > PAGE_SHIFT) {
+ if (shift <= PAGE_SHIFT)
+ pfn = pte_pfn(*ptep);
+ else {
unsigned long rpnmask = (1ul << shift) - PAGE_SIZE;
-
- return pte_pfn(__pte(pte_val(*ptep) | (addr & rpnmask)));
+ pfn = pte_pfn(__pte(pte_val(*ptep) | (addr & rpnmask)));
}
- return pte_pfn(*ptep);
+out:
+ local_irq_restore(flags);
+ return pfn;
}
/* flush SLBs and reload */