aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/ksm.c2
-rw-r--r--mm/memory-failure.c18
-rw-r--r--mm/memory.c7
-rw-r--r--mm/mempolicy.c2
-rw-r--r--mm/page-writeback.c6
5 files changed, 17 insertions, 18 deletions
diff --git a/mm/ksm.c b/mm/ksm.c
index 68710e80994a..5e706e391a02 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -376,7 +376,7 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
else
ret = VM_FAULT_WRITE;
put_page(page);
- } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_OOM)));
+ } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM)));
/*
* We must loop because handle_mm_fault() may back out if there's
* any difficulty e.g. if pte accessed bit gets updated concurrently.
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index a98c7fce470a..9502057c3c54 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1149,10 +1149,10 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
* The check (unnecessarily) ignores LRU pages being isolated and
* walked by the page reclaim code, however that's not a big loss.
*/
- if (!PageHuge(p) && !PageTransTail(p)) {
- if (!PageLRU(p))
- shake_page(p, 0);
- if (!PageLRU(p)) {
+ if (!PageHuge(p)) {
+ if (!PageLRU(hpage))
+ shake_page(hpage, 0);
+ if (!PageLRU(hpage)) {
/*
* shake_page could have turned it free.
*/
@@ -1645,8 +1645,6 @@ static int __soft_offline_page(struct page *page, int flags)
* setting PG_hwpoison.
*/
if (!is_free_buddy_page(page))
- lru_add_drain_all();
- if (!is_free_buddy_page(page))
drain_all_pages();
SetPageHWPoison(page);
if (!is_free_buddy_page(page))
@@ -1725,12 +1723,12 @@ int soft_offline_page(struct page *page, int flags)
} else if (ret == 0) { /* for free pages */
if (PageHuge(page)) {
set_page_hwpoison_huge_page(hpage);
- dequeue_hwpoisoned_huge_page(hpage);
- atomic_long_add(1 << compound_order(hpage),
+ if (!dequeue_hwpoisoned_huge_page(hpage))
+ atomic_long_add(1 << compound_order(hpage),
&num_poisoned_pages);
} else {
- SetPageHWPoison(page);
- atomic_long_inc(&num_poisoned_pages);
+ if (!TestSetPageHWPoison(page))
+ atomic_long_inc(&num_poisoned_pages);
}
}
unset_migratetype_isolate(page, MIGRATE_MOVABLE);
diff --git a/mm/memory.c b/mm/memory.c
index 8348bccd9323..ea985114a453 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1838,7 +1838,8 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
else
return -EFAULT;
}
- if (ret & VM_FAULT_SIGBUS)
+ if (ret & (VM_FAULT_SIGBUS |
+ VM_FAULT_SIGSEGV))
return i ? i : -EFAULT;
BUG();
}
@@ -1948,7 +1949,7 @@ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
return -ENOMEM;
if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
return -EHWPOISON;
- if (ret & VM_FAULT_SIGBUS)
+ if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
return -EFAULT;
BUG();
}
@@ -3237,7 +3238,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
/* Check if we need to add a guard page to the stack */
if (check_stack_guard_page(vma, address) < 0)
- return VM_FAULT_SIGBUS;
+ return VM_FAULT_SIGSEGV;
/* Use the zero-page for reads */
if (!(flags & FAULT_FLAG_WRITE)) {
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 9df45ffa9d63..5e118b413dd7 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2663,7 +2663,7 @@ static void __init check_numabalancing_enable(void)
if (numabalancing_override)
set_numabalancing_state(numabalancing_override == 1);
- if (nr_node_ids > 1 && !numabalancing_override) {
+ if (num_online_nodes() > 1 && !numabalancing_override) {
pr_info("%s automatic NUMA balancing. "
"Configure with numa_balancing= or the "
"kernel.numa_balancing sysctl",
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 51d8d15f48d7..656a5490f693 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -601,7 +601,7 @@ static long long pos_ratio_polynom(unsigned long setpoint,
long x;
x = div64_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT,
- limit - setpoint + 1);
+ (limit - setpoint) | 1);
pos_ratio = x;
pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
@@ -828,7 +828,7 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
* scale global setpoint to bdi's:
* bdi_setpoint = setpoint * bdi_thresh / thresh
*/
- x = div_u64((u64)bdi_thresh << 16, thresh + 1);
+ x = div_u64((u64)bdi_thresh << 16, thresh | 1);
bdi_setpoint = setpoint * (u64)x >> 16;
/*
* Use span=(8*write_bw) in single bdi case as indicated by
@@ -843,7 +843,7 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
if (bdi_dirty < x_intercept - span / 4) {
pos_ratio = div64_u64(pos_ratio * (x_intercept - bdi_dirty),
- x_intercept - bdi_setpoint + 1);
+ (x_intercept - bdi_setpoint) | 1);
} else
pos_ratio /= 4;