aboutsummaryrefslogtreecommitdiff
path: root/mm/huge_memory.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r--mm/huge_memory.c44
1 files changed, 23 insertions, 21 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index e187454d82f6..3e29781ee762 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1162,7 +1162,12 @@ static void __split_huge_page_refcount(struct page *page)
/* after clearing PageTail the gup refcount can be released */
smp_mb();
- page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
+ /*
+ * retain hwpoison flag of the poisoned tail page:
+ * fix for the unsuitable process killed on Guest Machine(KVM)
+ * by the memory-failure.
+ */
+ page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP | __PG_HWPOISON;
page_tail->flags |= (page->flags &
((1L << PG_referenced) |
(1L << PG_swapbacked) |
@@ -1806,6 +1811,8 @@ static void collapse_huge_page(struct mm_struct *mm,
/* VM_PFNMAP vmas may have vm_ops null but vm_file set */
if (!vma->anon_vma || vma->vm_ops || vma->vm_file)
goto out;
+ if (is_vma_temporary_stack(vma))
+ goto out;
VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma));
pgd = pgd_offset(mm, address);
@@ -1847,7 +1854,6 @@ static void collapse_huge_page(struct mm_struct *mm,
set_pmd_at(mm, address, pmd, _pmd);
spin_unlock(&mm->page_table_lock);
anon_vma_unlock(vma->anon_vma);
- mem_cgroup_uncharge_page(new_page);
goto out;
}
@@ -1893,6 +1899,7 @@ out_up_write:
return;
out:
+ mem_cgroup_uncharge_page(new_page);
#ifdef CONFIG_NUMA
put_page(new_page);
#endif
@@ -2027,32 +2034,27 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
if ((!(vma->vm_flags & VM_HUGEPAGE) &&
!khugepaged_always()) ||
(vma->vm_flags & VM_NOHUGEPAGE)) {
+ skip:
progress++;
continue;
}
-
/* VM_PFNMAP vmas may have vm_ops null but vm_file set */
- if (!vma->anon_vma || vma->vm_ops || vma->vm_file) {
- khugepaged_scan.address = vma->vm_end;
- progress++;
- continue;
- }
+ if (!vma->anon_vma || vma->vm_ops || vma->vm_file)
+ goto skip;
+ if (is_vma_temporary_stack(vma))
+ goto skip;
+
VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma));
hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
hend = vma->vm_end & HPAGE_PMD_MASK;
- if (hstart >= hend) {
- progress++;
- continue;
- }
+ if (hstart >= hend)
+ goto skip;
+ if (khugepaged_scan.address > hend)
+ goto skip;
if (khugepaged_scan.address < hstart)
khugepaged_scan.address = hstart;
- if (khugepaged_scan.address > hend) {
- khugepaged_scan.address = hend + HPAGE_PMD_SIZE;
- progress++;
- continue;
- }
- BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
+ VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
while (khugepaged_scan.address < hend) {
int ret;
@@ -2081,7 +2083,7 @@ breakouterloop:
breakouterloop_mmap_sem:
spin_lock(&khugepaged_mm_lock);
- BUG_ON(khugepaged_scan.mm_slot != mm_slot);
+ VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
/*
* Release the current mm_slot if this mm is about to die, or
* if we scanned all vmas of this mm.
@@ -2236,9 +2238,9 @@ static int khugepaged(void *none)
for (;;) {
mutex_unlock(&khugepaged_mutex);
- BUG_ON(khugepaged_thread != current);
+ VM_BUG_ON(khugepaged_thread != current);
khugepaged_loop();
- BUG_ON(khugepaged_thread != current);
+ VM_BUG_ON(khugepaged_thread != current);
mutex_lock(&khugepaged_mutex);
if (!khugepaged_enabled())