From bab70a4af737f623de5b034976a311055308ab86 Mon Sep 17 00:00:00 2001 From: Eugene Surovegin Date: Tue, 28 Mar 2006 10:13:12 -0800 Subject: [PATCH] lock PTE before updating it in 440/BookE page fault handler Fix 44x and BookE page fault handler to correctly lock PTE before trying to pte_update() it, otherwise this PTE might be swapped out after pte_present() check but before pte_uptdate() call, resulting in corrupted PTE. This can happen with enabled preemption and low memory condition. Signed-off-by: Eugene Surovegin Signed-off-by: Paul Mackerras --- arch/powerpc/mm/fault.c | 30 +++++++++++++++++------------- arch/powerpc/mm/pgtable_32.c | 6 ++++-- arch/ppc/mm/fault.c | 30 +++++++++++++++++------------- arch/ppc/mm/pgtable.c | 6 ++++-- include/asm-ppc/pgtable.h | 3 ++- 5 files changed, 44 insertions(+), 31 deletions(-) diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index ec4adcb4bc2..5aea0909a5e 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -267,25 +267,29 @@ good_area: #endif #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) pte_t *ptep; + pmd_t *pmdp; /* Since 4xx/Book-E supports per-page execute permission, * we lazily flush dcache to icache. */ ptep = NULL; - if (get_pteptr(mm, address, &ptep) && pte_present(*ptep)) { - struct page *page = pte_page(*ptep); - - if (! test_bit(PG_arch_1, &page->flags)) { - flush_dcache_icache_page(page); - set_bit(PG_arch_1, &page->flags); + if (get_pteptr(mm, address, &ptep, &pmdp)) { + spinlock_t *ptl = pte_lockptr(mm, pmdp); + spin_lock(ptl); + if (pte_present(*ptep)) { + struct page *page = pte_page(*ptep); + + if (!test_bit(PG_arch_1, &page->flags)) { + flush_dcache_icache_page(page); + set_bit(PG_arch_1, &page->flags); + } + pte_update(ptep, 0, _PAGE_HWEXEC); + _tlbie(address); + pte_unmap_unlock(ptep, ptl); + up_read(&mm->mmap_sem); + return 0; } - pte_update(ptep, 0, _PAGE_HWEXEC); - _tlbie(address); - pte_unmap(ptep); - up_read(&mm->mmap_sem); - return 0; + pte_unmap_unlock(ptep, ptl); } - if (ptep != NULL) - pte_unmap(ptep); #endif /* a write */ } else if (is_write) { diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index d296eb6b454..90628601fac 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -372,7 +372,7 @@ void __init io_block_mapping(unsigned long virt, phys_addr_t phys, * the PTE pointer is unmodified if PTE is not found. */ int -get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep) +get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, pmd_t **pmdp) { pgd_t *pgd; pmd_t *pmd; @@ -387,6 +387,8 @@ get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep) if (pte) { retval = 1; *ptep = pte; + if (pmdp) + *pmdp = pmd; /* XXX caller needs to do pte_unmap, yuck */ } } @@ -424,7 +426,7 @@ unsigned long iopa(unsigned long addr) mm = &init_mm; pa = 0; - if (get_pteptr(mm, addr, &pte)) { + if (get_pteptr(mm, addr, &pte, NULL)) { pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK); pte_unmap(pte); } diff --git a/arch/ppc/mm/fault.c b/arch/ppc/mm/fault.c index 0217188ef46..8e08ca32531 100644 --- a/arch/ppc/mm/fault.c +++ b/arch/ppc/mm/fault.c @@ -202,6 +202,7 @@ good_area: /* an exec - 4xx/Book-E allows for per-page execute permission */ } else if (TRAP(regs) == 0x400) { pte_t *ptep; + pmd_t *pmdp; #if 0 /* It would be nice to actually enforce the VM execute @@ -215,21 +216,24 @@ good_area: /* Since 4xx/Book-E supports per-page execute permission, * we lazily flush dcache to icache. */ ptep = NULL; - if (get_pteptr(mm, address, &ptep) && pte_present(*ptep)) { - struct page *page = pte_page(*ptep); - - if (! test_bit(PG_arch_1, &page->flags)) { - flush_dcache_icache_page(page); - set_bit(PG_arch_1, &page->flags); + if (get_pteptr(mm, address, &ptep, &pmdp)) { + spinlock_t *ptl = pte_lockptr(mm, pmdp); + spin_lock(ptl); + if (pte_present(*ptep)) { + struct page *page = pte_page(*ptep); + + if (!test_bit(PG_arch_1, &page->flags)) { + flush_dcache_icache_page(page); + set_bit(PG_arch_1, &page->flags); + } + pte_update(ptep, 0, _PAGE_HWEXEC); + _tlbie(address); + pte_unmap_unlock(ptep, ptl); + up_read(&mm->mmap_sem); + return 0; } - pte_update(ptep, 0, _PAGE_HWEXEC); - _tlbie(address); - pte_unmap(ptep); - up_read(&mm->mmap_sem); - return 0; + pte_unmap_unlock(ptep, ptl); } - if (ptep != NULL) - pte_unmap(ptep); #endif /* a read */ } else { diff --git a/arch/ppc/mm/pgtable.c b/arch/ppc/mm/pgtable.c index a1924876cad..706bca8eb14 100644 --- a/arch/ppc/mm/pgtable.c +++ b/arch/ppc/mm/pgtable.c @@ -368,7 +368,7 @@ void __init io_block_mapping(unsigned long virt, phys_addr_t phys, * the PTE pointer is unmodified if PTE is not found. */ int -get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep) +get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, pmd_t **pmdp) { pgd_t *pgd; pmd_t *pmd; @@ -383,6 +383,8 @@ get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep) if (pte) { retval = 1; *ptep = pte; + if (pmdp) + *pmdp = pmd; /* XXX caller needs to do pte_unmap, yuck */ } } @@ -420,7 +422,7 @@ unsigned long iopa(unsigned long addr) mm = &init_mm; pa = 0; - if (get_pteptr(mm, addr, &pte)) { + if (get_pteptr(mm, addr, &pte, NULL)) { pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK); pte_unmap(pte); } diff --git a/include/asm-ppc/pgtable.h b/include/asm-ppc/pgtable.h index e1c62da12e7..570b355162f 100644 --- a/include/asm-ppc/pgtable.h +++ b/include/asm-ppc/pgtable.h @@ -837,7 +837,8 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma, */ #define pgtable_cache_init() do { } while (0) -extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep); +extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, + pmd_t **pmdp); #include -- cgit v1.2.3