From 97632e6fbea5b996669ffee21d869ed09848e1ec Mon Sep 17 00:00:00 2001 From: Becky Bruce Date: Mon, 10 Oct 2011 10:50:37 +0000 Subject: powerpc: hugetlb: fix huge_ptep_set_access_flags return value There was an unconditional return of "1" in the original code from David Gibson, and I dropped it because it wasn't needed for FSL BOOKE 32-bit. However, not all systems (including 64-bit FSL BOOKE) do loading of the hpte from the fault handler asm and depend on this function returning 1, which causes a call to update_mmu_cache() that writes an entry into the tlb. Signed-off-by: Becky Bruce Signed-off-by: David Gibson Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/hugetlb.h | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h index 86004930a78..70f9885f5c0 100644 --- a/arch/powerpc/include/asm/hugetlb.h +++ b/arch/powerpc/include/asm/hugetlb.h @@ -124,7 +124,18 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t pte, int dirty) { +#if defined(CONFIG_PPC_MMU_NOHASH) && \ + !(defined(CONFIG_PPC_FSL_BOOK3E) && defined(CONFIG_PPC32)) + /* + * The "return 1" forces a call of update_mmu_cache, which will write a + * TLB entry. Without this, platforms that don't do a write of the TLB + * entry in the TLB miss handler asm will fault ad infinitum. + */ + ptep_set_access_flags(vma, addr, ptep, pte, dirty); + return 1; +#else return ptep_set_access_flags(vma, addr, ptep, pte, dirty); +#endif } static inline pte_t huge_ptep_get(pte_t *ptep) -- cgit v1.2.3