aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJanosch Frank <frankja@linux.vnet.ibm.com>2017-03-02 15:23:42 +0100
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-03-18 19:09:58 +0800
commitb0e85701a7766341618d5e924f29f359431f9c91 (patch)
tree8f334619f8ac8c55915f63d809e59a594b098b81
parent61fbad6a28fd3c80567cc615e2de6bd6476ab60c (diff)
KVM: s390: Fix guest migration for huge guests resulting in panic
commit 2e4d88009f57057df7672fa69a32b5224af54d37 upstream. While we can technically not run huge page guests right now, we can setup a guest with huge pages. Trying to migrate it will trigger a VM_BUG_ON and, if the kernel is not configured to panic on a BUG, it will happily try to work on non-existing page table entries. With this patch, we always return "dirty" if we encounter a large page when migrating. This at least fixes the immediate problem until we have proper handling for both kind of pages. Fixes: 15f36eb ("KVM: s390: Add proper dirty bitmap support to S390 kvm.") Cc: <stable@vger.kernel.org> # 3.16+ Signed-off-by: Janosch Frank <frankja@linux.vnet.ibm.com> Acked-by: Christian Borntraeger <borntraeger@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--arch/s390/mm/pgtable.c19
1 files changed, 18 insertions, 1 deletions
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 8345ae1f117d..05ae254f84cf 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -1237,11 +1237,28 @@ EXPORT_SYMBOL_GPL(s390_reset_cmma);
*/
bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *gmap)
{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
pte_t *pte;
spinlock_t *ptl;
bool dirty = false;
- pte = get_locked_pte(gmap->mm, address, &ptl);
+ pgd = pgd_offset(gmap->mm, address);
+ pud = pud_alloc(gmap->mm, pgd, address);
+ if (!pud)
+ return false;
+ pmd = pmd_alloc(gmap->mm, pud, address);
+ if (!pmd)
+ return false;
+ /* We can't run guests backed by huge pages, but userspace can
+ * still set them up and then try to migrate them without any
+ * migration support.
+ */
+ if (pmd_large(*pmd))
+ return true;
+
+ pte = pte_alloc_map_lock(gmap->mm, pmd, address, &ptl);
if (unlikely(!pte))
return false;