From b0e85701a7766341618d5e924f29f359431f9c91 Mon Sep 17 00:00:00 2001 From: Janosch Frank Date: Thu, 2 Mar 2017 15:23:42 +0100 Subject: KVM: s390: Fix guest migration for huge guests resulting in panic commit 2e4d88009f57057df7672fa69a32b5224af54d37 upstream. While we can technically not run huge page guests right now, we can setup a guest with huge pages. Trying to migrate it will trigger a VM_BUG_ON and, if the kernel is not configured to panic on a BUG, it will happily try to work on non-existing page table entries. With this patch, we always return "dirty" if we encounter a large page when migrating. This at least fixes the immediate problem until we have proper handling for both kind of pages. Fixes: 15f36eb ("KVM: s390: Add proper dirty bitmap support to S390 kvm.") Cc: # 3.16+ Signed-off-by: Janosch Frank Acked-by: Christian Borntraeger Signed-off-by: Martin Schwidefsky Signed-off-by: Greg Kroah-Hartman --- arch/s390/mm/pgtable.c | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) (limited to 'arch/s390/mm/pgtable.c') diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 8345ae1f117d..05ae254f84cf 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -1237,11 +1237,28 @@ EXPORT_SYMBOL_GPL(s390_reset_cmma); */ bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *gmap) { + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; pte_t *pte; spinlock_t *ptl; bool dirty = false; - pte = get_locked_pte(gmap->mm, address, &ptl); + pgd = pgd_offset(gmap->mm, address); + pud = pud_alloc(gmap->mm, pgd, address); + if (!pud) + return false; + pmd = pmd_alloc(gmap->mm, pud, address); + if (!pmd) + return false; + /* We can't run guests backed by huge pages, but userspace can + * still set them up and then try to migrate them without any + * migration support. + */ + if (pmd_large(*pmd)) + return true; + + pte = pte_alloc_map_lock(gmap->mm, pmd, address, &ptl); if (unlikely(!pte)) return false; -- cgit v1.2.3