aboutsummaryrefslogtreecommitdiff
path: root/arch/arm64
diff options
context:
space:
mode:
authorChristoffer Dall <christoffer.dall@linaro.org>2015-07-01 14:08:31 +0200
committerCatalin Marinas <catalin.marinas@arm.com>2015-07-01 14:29:28 +0100
commitfd28f5d439fca77348c129d5b73043a56f8a0296 (patch)
tree8acfb71906a63563b5a1e6d1abe86c707184f3d7 /arch/arm64
parentb265da5a45ce60bd3d7505cc0eaa6cfba50946a1 (diff)
arm64: Don't report clear pmds and puds as huge
The current pmd_huge() and pud_huge() functions simply check if the table bit is not set and reports the entries as huge in that case. This is counter-intuitive as a clear pmd/pud cannot also be a huge pmd/pud, and it is inconsistent with at least arm and x86. To prevent others from making the same mistake as me in looking at code that calls these functions and to fix an issue with KVM on arm64 that causes memory corruption due to incorrect page reference counting resulting from this mistake, let's change the behavior. Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org> Reviewed-by: Steve Capper <steve.capper@linaro.org> Acked-by: Marc Zyngier <marc.zyngier@arm.com> Fixes: 084bd29810a5 ("ARM64: mm: HugeTLB support.") Cc: <stable@vger.kernel.org> # 3.11+ Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/mm/hugetlbpage.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 2de9d2e59d96..0eeb4f0930a0 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -40,13 +40,13 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
int pmd_huge(pmd_t pmd)
{
- return !(pmd_val(pmd) & PMD_TABLE_BIT);
+ return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
}
int pud_huge(pud_t pud)
{
#ifndef __PAGETABLE_PMD_FOLDED
- return !(pud_val(pud) & PUD_TABLE_BIT);
+ return pud_val(pud) && !(pud_val(pud) & PUD_TABLE_BIT);
#else
return 0;
#endif