blob: 92a647957f91a12489d69ba54fe0d783792bbb20 [file] [log] [blame]
Andi Kleen6a460792009-09-16 11:50:15 +02001/*
2 * Copyright (C) 2008, 2009 Intel Corporation
3 * Authors: Andi Kleen, Fengguang Wu
4 *
5 * This software may be redistributed and/or modified under the terms of
6 * the GNU General Public License ("GPL") version 2 only as published by the
7 * Free Software Foundation.
8 *
9 * High level machine check handler. Handles pages reported by the
Andi Kleen1c80b992010-09-27 23:09:51 +020010 * hardware as being corrupted usually due to a multi-bit ECC memory or cache
Andi Kleen6a460792009-09-16 11:50:15 +020011 * failure.
Andi Kleen1c80b992010-09-27 23:09:51 +020012 *
13 * In addition there is a "soft offline" entry point that allows stop using
14 * not-yet-corrupted-by-suspicious pages without killing anything.
Andi Kleen6a460792009-09-16 11:50:15 +020015 *
16 * Handles page cache pages in various states. The tricky part
Andi Kleen1c80b992010-09-27 23:09:51 +020017 * here is that we can access any page asynchronously in respect to
18 * other VM users, because memory failures could happen anytime and
19 * anywhere. This could violate some of their assumptions. This is why
20 * this code has to be extremely careful. Generally it tries to use
21 * normal locking rules, as in get the standard locks, even if that means
22 * the error handling takes potentially a long time.
Andi Kleene0de78d2015-06-24 16:56:02 -070023 *
24 * It can be very tempting to add handling for obscure cases here.
25 * In general any code for handling new cases should only be added iff:
26 * - You know how to test it.
27 * - You have a test that can be added to mce-test
28 * https://git.kernel.org/cgit/utils/cpu/mce/mce-test.git/
29 * - The case actually shows up as a frequent (top 10) page state in
30 * tools/vm/page-types when running a real workload.
Andi Kleen1c80b992010-09-27 23:09:51 +020031 *
32 * There are several operations here with exponential complexity because
33 * of unsuitable VM data structures. For example the operation to map back
34 * from RMAP chains to processes has to walk the complete process list and
35 * has non linear complexity with the number. But since memory corruptions
36 * are rare we hope to get away with this. This avoids impacting the core
37 * VM.
Andi Kleen6a460792009-09-16 11:50:15 +020038 */
Andi Kleen6a460792009-09-16 11:50:15 +020039#include <linux/kernel.h>
40#include <linux/mm.h>
41#include <linux/page-flags.h>
Wu Fengguang478c5ff2009-12-16 12:19:59 +010042#include <linux/kernel-page-flags.h>
Andi Kleen6a460792009-09-16 11:50:15 +020043#include <linux/sched.h>
Hugh Dickins01e00f82009-10-13 15:02:11 +010044#include <linux/ksm.h>
Andi Kleen6a460792009-09-16 11:50:15 +020045#include <linux/rmap.h>
Paul Gortmakerb9e15ba2011-05-26 16:00:52 -040046#include <linux/export.h>
Andi Kleen6a460792009-09-16 11:50:15 +020047#include <linux/pagemap.h>
48#include <linux/swap.h>
49#include <linux/backing-dev.h>
Andi Kleenfacb6012009-12-16 12:20:00 +010050#include <linux/migrate.h>
51#include <linux/page-isolation.h>
52#include <linux/suspend.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090053#include <linux/slab.h>
Huang Yingbf998152010-05-31 14:28:19 +080054#include <linux/swapops.h>
Naoya Horiguchi7af446a2010-05-28 09:29:17 +090055#include <linux/hugetlb.h>
KOSAKI Motohiro20d6c962010-12-02 14:31:19 -080056#include <linux/memory_hotplug.h>
Minchan Kim5db8a732011-06-15 15:08:48 -070057#include <linux/mm_inline.h>
Huang Yingea8f5fb2011-07-13 13:14:27 +080058#include <linux/kfifo.h>
Naoya Horiguchia5f65102015-11-05 18:47:26 -080059#include <linux/ratelimit.h>
Andi Kleen6a460792009-09-16 11:50:15 +020060#include "internal.h"
Xie XiuQi97f0b132015-06-24 16:57:36 -070061#include "ras/ras_event.h"
Andi Kleen6a460792009-09-16 11:50:15 +020062
63int sysctl_memory_failure_early_kill __read_mostly = 0;
64
65int sysctl_memory_failure_recovery __read_mostly = 1;
66
Xishi Qiu293c07e2013-02-22 16:34:02 -080067atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
Andi Kleen6a460792009-09-16 11:50:15 +020068
Andi Kleen27df5062009-12-21 19:56:42 +010069#if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
70
Haicheng Li1bfe5fe2009-12-16 12:19:59 +010071u32 hwpoison_filter_enable = 0;
Wu Fengguang7c116f22009-12-16 12:19:59 +010072u32 hwpoison_filter_dev_major = ~0U;
73u32 hwpoison_filter_dev_minor = ~0U;
Wu Fengguang478c5ff2009-12-16 12:19:59 +010074u64 hwpoison_filter_flags_mask;
75u64 hwpoison_filter_flags_value;
Haicheng Li1bfe5fe2009-12-16 12:19:59 +010076EXPORT_SYMBOL_GPL(hwpoison_filter_enable);
Wu Fengguang7c116f22009-12-16 12:19:59 +010077EXPORT_SYMBOL_GPL(hwpoison_filter_dev_major);
78EXPORT_SYMBOL_GPL(hwpoison_filter_dev_minor);
Wu Fengguang478c5ff2009-12-16 12:19:59 +010079EXPORT_SYMBOL_GPL(hwpoison_filter_flags_mask);
80EXPORT_SYMBOL_GPL(hwpoison_filter_flags_value);
Wu Fengguang7c116f22009-12-16 12:19:59 +010081
82static int hwpoison_filter_dev(struct page *p)
83{
84 struct address_space *mapping;
85 dev_t dev;
86
87 if (hwpoison_filter_dev_major == ~0U &&
88 hwpoison_filter_dev_minor == ~0U)
89 return 0;
90
91 /*
Andi Kleen1c80b992010-09-27 23:09:51 +020092 * page_mapping() does not accept slab pages.
Wu Fengguang7c116f22009-12-16 12:19:59 +010093 */
94 if (PageSlab(p))
95 return -EINVAL;
96
97 mapping = page_mapping(p);
98 if (mapping == NULL || mapping->host == NULL)
99 return -EINVAL;
100
101 dev = mapping->host->i_sb->s_dev;
102 if (hwpoison_filter_dev_major != ~0U &&
103 hwpoison_filter_dev_major != MAJOR(dev))
104 return -EINVAL;
105 if (hwpoison_filter_dev_minor != ~0U &&
106 hwpoison_filter_dev_minor != MINOR(dev))
107 return -EINVAL;
108
109 return 0;
110}
111
Wu Fengguang478c5ff2009-12-16 12:19:59 +0100112static int hwpoison_filter_flags(struct page *p)
113{
114 if (!hwpoison_filter_flags_mask)
115 return 0;
116
117 if ((stable_page_flags(p) & hwpoison_filter_flags_mask) ==
118 hwpoison_filter_flags_value)
119 return 0;
120 else
121 return -EINVAL;
122}
123
Andi Kleen4fd466e2009-12-16 12:19:59 +0100124/*
125 * This allows stress tests to limit test scope to a collection of tasks
126 * by putting them under some memcg. This prevents killing unrelated/important
127 * processes such as /sbin/init. Note that the target task may share clean
128 * pages with init (eg. libc text), which is harmless. If the target task
129 * share _dirty_ pages with another task B, the test scheme must make sure B
130 * is also included in the memcg. At last, due to race conditions this filter
131 * can only guarantee that the page either belongs to the memcg tasks, or is
132 * a freed page.
133 */
Vladimir Davydov94a59fb2015-09-09 15:35:31 -0700134#ifdef CONFIG_MEMCG
Andi Kleen4fd466e2009-12-16 12:19:59 +0100135u64 hwpoison_filter_memcg;
136EXPORT_SYMBOL_GPL(hwpoison_filter_memcg);
137static int hwpoison_filter_task(struct page *p)
138{
Andi Kleen4fd466e2009-12-16 12:19:59 +0100139 if (!hwpoison_filter_memcg)
140 return 0;
141
Vladimir Davydov94a59fb2015-09-09 15:35:31 -0700142 if (page_cgroup_ino(p) != hwpoison_filter_memcg)
Andi Kleen4fd466e2009-12-16 12:19:59 +0100143 return -EINVAL;
144
145 return 0;
146}
147#else
148static int hwpoison_filter_task(struct page *p) { return 0; }
149#endif
150
Wu Fengguang7c116f22009-12-16 12:19:59 +0100151int hwpoison_filter(struct page *p)
152{
Haicheng Li1bfe5fe2009-12-16 12:19:59 +0100153 if (!hwpoison_filter_enable)
154 return 0;
155
Wu Fengguang7c116f22009-12-16 12:19:59 +0100156 if (hwpoison_filter_dev(p))
157 return -EINVAL;
158
Wu Fengguang478c5ff2009-12-16 12:19:59 +0100159 if (hwpoison_filter_flags(p))
160 return -EINVAL;
161
Andi Kleen4fd466e2009-12-16 12:19:59 +0100162 if (hwpoison_filter_task(p))
163 return -EINVAL;
164
Wu Fengguang7c116f22009-12-16 12:19:59 +0100165 return 0;
166}
Andi Kleen27df5062009-12-21 19:56:42 +0100167#else
168int hwpoison_filter(struct page *p)
169{
170 return 0;
171}
172#endif
173
Wu Fengguang7c116f22009-12-16 12:19:59 +0100174EXPORT_SYMBOL_GPL(hwpoison_filter);
175
Andi Kleen6a460792009-09-16 11:50:15 +0200176/*
Tony Luck7329bbe2011-12-13 09:27:58 -0800177 * Send all the processes who have the page mapped a signal.
178 * ``action optional'' if they are not immediately affected by the error
179 * ``action required'' if error happened in current execution context
Andi Kleen6a460792009-09-16 11:50:15 +0200180 */
Tony Luck7329bbe2011-12-13 09:27:58 -0800181static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
182 unsigned long pfn, struct page *page, int flags)
Andi Kleen6a460792009-09-16 11:50:15 +0200183{
184 struct siginfo si;
185 int ret;
186
187 printk(KERN_ERR
Tony Luck7329bbe2011-12-13 09:27:58 -0800188 "MCE %#lx: Killing %s:%d due to hardware memory corruption\n",
Andi Kleen6a460792009-09-16 11:50:15 +0200189 pfn, t->comm, t->pid);
190 si.si_signo = SIGBUS;
191 si.si_errno = 0;
Andi Kleen6a460792009-09-16 11:50:15 +0200192 si.si_addr = (void *)addr;
193#ifdef __ARCH_SI_TRAPNO
194 si.si_trapno = trapno;
195#endif
Wanpeng Lif9121152013-09-11 14:22:52 -0700196 si.si_addr_lsb = compound_order(compound_head(page)) + PAGE_SHIFT;
Tony Luck7329bbe2011-12-13 09:27:58 -0800197
Tony Lucka70ffca2014-06-04 16:10:59 -0700198 if ((flags & MF_ACTION_REQUIRED) && t->mm == current->mm) {
Tony Luck7329bbe2011-12-13 09:27:58 -0800199 si.si_code = BUS_MCEERR_AR;
Tony Lucka70ffca2014-06-04 16:10:59 -0700200 ret = force_sig_info(SIGBUS, &si, current);
Tony Luck7329bbe2011-12-13 09:27:58 -0800201 } else {
202 /*
203 * Don't use force here, it's convenient if the signal
204 * can be temporarily blocked.
205 * This could cause a loop when the user sets SIGBUS
206 * to SIG_IGN, but hopefully no one will do that?
207 */
208 si.si_code = BUS_MCEERR_AO;
209 ret = send_sig_info(SIGBUS, &si, t); /* synchronous? */
210 }
Andi Kleen6a460792009-09-16 11:50:15 +0200211 if (ret < 0)
212 printk(KERN_INFO "MCE: Error sending signal to %s:%d: %d\n",
213 t->comm, t->pid, ret);
214 return ret;
215}
216
217/*
Andi Kleen588f9ce2009-12-16 12:19:57 +0100218 * When a unknown page type is encountered drain as many buffers as possible
219 * in the hope to turn the page into a LRU or free page, which we can handle.
220 */
Andi Kleenfacb6012009-12-16 12:20:00 +0100221void shake_page(struct page *p, int access)
Andi Kleen588f9ce2009-12-16 12:19:57 +0100222{
223 if (!PageSlab(p)) {
224 lru_add_drain_all();
225 if (PageLRU(p))
226 return;
Vlastimil Babkac0554322014-12-10 15:43:10 -0800227 drain_all_pages(page_zone(p));
Andi Kleen588f9ce2009-12-16 12:19:57 +0100228 if (PageLRU(p) || is_free_buddy_page(p))
229 return;
230 }
Andi Kleenfacb6012009-12-16 12:20:00 +0100231
Andi Kleen588f9ce2009-12-16 12:19:57 +0100232 /*
Johannes Weiner6b4f7792014-12-12 16:56:13 -0800233 * Only call shrink_node_slabs here (which would also shrink
234 * other caches) if access is not potentially fatal.
Andi Kleen588f9ce2009-12-16 12:19:57 +0100235 */
Vladimir Davydovcb731d62015-02-12 14:58:54 -0800236 if (access)
237 drop_slab_node(page_to_nid(p));
Andi Kleen588f9ce2009-12-16 12:19:57 +0100238}
239EXPORT_SYMBOL_GPL(shake_page);
240
241/*
Andi Kleen6a460792009-09-16 11:50:15 +0200242 * Kill all processes that have a poisoned page mapped and then isolate
243 * the page.
244 *
245 * General strategy:
246 * Find all processes having the page mapped and kill them.
247 * But we keep a page reference around so that the page is not
248 * actually freed yet.
249 * Then stash the page away
250 *
251 * There's no convenient way to get back to mapped processes
252 * from the VMAs. So do a brute-force search over all
253 * running processes.
254 *
255 * Remember that machine checks are not common (or rather
256 * if they are common you have other problems), so this shouldn't
257 * be a performance issue.
258 *
259 * Also there are some races possible while we get from the
260 * error detection to actually handle it.
261 */
262
263struct to_kill {
264 struct list_head nd;
265 struct task_struct *tsk;
266 unsigned long addr;
Andi Kleen9033ae12010-09-27 23:36:05 +0200267 char addr_valid;
Andi Kleen6a460792009-09-16 11:50:15 +0200268};
269
270/*
271 * Failure handling: if we can't find or can't kill a process there's
272 * not much we can do. We just print a message and ignore otherwise.
273 */
274
275/*
276 * Schedule a process for later kill.
277 * Uses GFP_ATOMIC allocations to avoid potential recursions in the VM.
278 * TBD would GFP_NOIO be enough?
279 */
280static void add_to_kill(struct task_struct *tsk, struct page *p,
281 struct vm_area_struct *vma,
282 struct list_head *to_kill,
283 struct to_kill **tkc)
284{
285 struct to_kill *tk;
286
287 if (*tkc) {
288 tk = *tkc;
289 *tkc = NULL;
290 } else {
291 tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC);
292 if (!tk) {
293 printk(KERN_ERR
294 "MCE: Out of memory while machine check handling\n");
295 return;
296 }
297 }
298 tk->addr = page_address_in_vma(p, vma);
299 tk->addr_valid = 1;
300
301 /*
302 * In theory we don't have to kill when the page was
303 * munmaped. But it could be also a mremap. Since that's
304 * likely very rare kill anyways just out of paranoia, but use
305 * a SIGKILL because the error is not contained anymore.
306 */
307 if (tk->addr == -EFAULT) {
Andi Kleenfb46e732010-09-27 23:31:30 +0200308 pr_info("MCE: Unable to find user space address %lx in %s\n",
Andi Kleen6a460792009-09-16 11:50:15 +0200309 page_to_pfn(p), tsk->comm);
310 tk->addr_valid = 0;
311 }
312 get_task_struct(tsk);
313 tk->tsk = tsk;
314 list_add_tail(&tk->nd, to_kill);
315}
316
317/*
318 * Kill the processes that have been collected earlier.
319 *
320 * Only do anything when DOIT is set, otherwise just free the list
321 * (this is used for clean pages which do not need killing)
322 * Also when FAIL is set do a force kill because something went
323 * wrong earlier.
324 */
Tony Luck6751ed62012-07-11 10:20:47 -0700325static void kill_procs(struct list_head *to_kill, int forcekill, int trapno,
Tony Luck7329bbe2011-12-13 09:27:58 -0800326 int fail, struct page *page, unsigned long pfn,
327 int flags)
Andi Kleen6a460792009-09-16 11:50:15 +0200328{
329 struct to_kill *tk, *next;
330
331 list_for_each_entry_safe (tk, next, to_kill, nd) {
Tony Luck6751ed62012-07-11 10:20:47 -0700332 if (forcekill) {
Andi Kleen6a460792009-09-16 11:50:15 +0200333 /*
André Goddard Rosaaf901ca2009-11-14 13:09:05 -0200334 * In case something went wrong with munmapping
Andi Kleen6a460792009-09-16 11:50:15 +0200335 * make sure the process doesn't catch the
336 * signal and then access the memory. Just kill it.
Andi Kleen6a460792009-09-16 11:50:15 +0200337 */
338 if (fail || tk->addr_valid == 0) {
339 printk(KERN_ERR
340 "MCE %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
341 pfn, tk->tsk->comm, tk->tsk->pid);
342 force_sig(SIGKILL, tk->tsk);
343 }
344
345 /*
346 * In theory the process could have mapped
347 * something else on the address in-between. We could
348 * check for that, but we need to tell the
349 * process anyways.
350 */
Tony Luck7329bbe2011-12-13 09:27:58 -0800351 else if (kill_proc(tk->tsk, tk->addr, trapno,
352 pfn, page, flags) < 0)
Andi Kleen6a460792009-09-16 11:50:15 +0200353 printk(KERN_ERR
354 "MCE %#lx: Cannot send advisory machine check signal to %s:%d\n",
355 pfn, tk->tsk->comm, tk->tsk->pid);
356 }
357 put_task_struct(tk->tsk);
358 kfree(tk);
359 }
360}
361
Naoya Horiguchi3ba08122014-06-04 16:11:02 -0700362/*
363 * Find a dedicated thread which is supposed to handle SIGBUS(BUS_MCEERR_AO)
364 * on behalf of the thread group. Return task_struct of the (first found)
365 * dedicated thread if found, and return NULL otherwise.
366 *
367 * We already hold read_lock(&tasklist_lock) in the caller, so we don't
368 * have to call rcu_read_lock/unlock() in this function.
369 */
370static struct task_struct *find_early_kill_thread(struct task_struct *tsk)
Andi Kleen6a460792009-09-16 11:50:15 +0200371{
Naoya Horiguchi3ba08122014-06-04 16:11:02 -0700372 struct task_struct *t;
373
374 for_each_thread(tsk, t)
375 if ((t->flags & PF_MCE_PROCESS) && (t->flags & PF_MCE_EARLY))
376 return t;
377 return NULL;
378}
379
380/*
381 * Determine whether a given process is "early kill" process which expects
382 * to be signaled when some page under the process is hwpoisoned.
383 * Return task_struct of the dedicated thread (main thread unless explicitly
384 * specified) if the process is "early kill," and otherwise returns NULL.
385 */
386static struct task_struct *task_early_kill(struct task_struct *tsk,
387 int force_early)
388{
389 struct task_struct *t;
Andi Kleen6a460792009-09-16 11:50:15 +0200390 if (!tsk->mm)
Naoya Horiguchi3ba08122014-06-04 16:11:02 -0700391 return NULL;
Tony Luck74614de2014-06-04 16:11:01 -0700392 if (force_early)
Naoya Horiguchi3ba08122014-06-04 16:11:02 -0700393 return tsk;
394 t = find_early_kill_thread(tsk);
395 if (t)
396 return t;
397 if (sysctl_memory_failure_early_kill)
398 return tsk;
399 return NULL;
Andi Kleen6a460792009-09-16 11:50:15 +0200400}
401
402/*
403 * Collect processes when the error hit an anonymous page.
404 */
405static void collect_procs_anon(struct page *page, struct list_head *to_kill,
Tony Luck74614de2014-06-04 16:11:01 -0700406 struct to_kill **tkc, int force_early)
Andi Kleen6a460792009-09-16 11:50:15 +0200407{
408 struct vm_area_struct *vma;
409 struct task_struct *tsk;
410 struct anon_vma *av;
Michel Lespinassebf181b92012-10-08 16:31:39 -0700411 pgoff_t pgoff;
Andi Kleen6a460792009-09-16 11:50:15 +0200412
Ingo Molnar4fc3f1d2012-12-02 19:56:50 +0000413 av = page_lock_anon_vma_read(page);
Andi Kleen6a460792009-09-16 11:50:15 +0200414 if (av == NULL) /* Not actually mapped anymore */
Peter Zijlstra9b679322011-06-27 16:18:09 -0700415 return;
416
Naoya Horiguchia0f7a752014-07-23 14:00:01 -0700417 pgoff = page_to_pgoff(page);
Peter Zijlstra9b679322011-06-27 16:18:09 -0700418 read_lock(&tasklist_lock);
Andi Kleen6a460792009-09-16 11:50:15 +0200419 for_each_process (tsk) {
Rik van Riel5beb4932010-03-05 13:42:07 -0800420 struct anon_vma_chain *vmac;
Naoya Horiguchi3ba08122014-06-04 16:11:02 -0700421 struct task_struct *t = task_early_kill(tsk, force_early);
Rik van Riel5beb4932010-03-05 13:42:07 -0800422
Naoya Horiguchi3ba08122014-06-04 16:11:02 -0700423 if (!t)
Andi Kleen6a460792009-09-16 11:50:15 +0200424 continue;
Michel Lespinassebf181b92012-10-08 16:31:39 -0700425 anon_vma_interval_tree_foreach(vmac, &av->rb_root,
426 pgoff, pgoff) {
Rik van Riel5beb4932010-03-05 13:42:07 -0800427 vma = vmac->vma;
Andi Kleen6a460792009-09-16 11:50:15 +0200428 if (!page_mapped_in_vma(page, vma))
429 continue;
Naoya Horiguchi3ba08122014-06-04 16:11:02 -0700430 if (vma->vm_mm == t->mm)
431 add_to_kill(t, page, vma, to_kill, tkc);
Andi Kleen6a460792009-09-16 11:50:15 +0200432 }
433 }
Andi Kleen6a460792009-09-16 11:50:15 +0200434 read_unlock(&tasklist_lock);
Ingo Molnar4fc3f1d2012-12-02 19:56:50 +0000435 page_unlock_anon_vma_read(av);
Andi Kleen6a460792009-09-16 11:50:15 +0200436}
437
438/*
439 * Collect processes when the error hit a file mapped page.
440 */
441static void collect_procs_file(struct page *page, struct list_head *to_kill,
Tony Luck74614de2014-06-04 16:11:01 -0700442 struct to_kill **tkc, int force_early)
Andi Kleen6a460792009-09-16 11:50:15 +0200443{
444 struct vm_area_struct *vma;
445 struct task_struct *tsk;
Andi Kleen6a460792009-09-16 11:50:15 +0200446 struct address_space *mapping = page->mapping;
447
Davidlohr Buesod28eb9c2014-12-12 16:54:36 -0800448 i_mmap_lock_read(mapping);
Peter Zijlstra9b679322011-06-27 16:18:09 -0700449 read_lock(&tasklist_lock);
Andi Kleen6a460792009-09-16 11:50:15 +0200450 for_each_process(tsk) {
Naoya Horiguchia0f7a752014-07-23 14:00:01 -0700451 pgoff_t pgoff = page_to_pgoff(page);
Naoya Horiguchi3ba08122014-06-04 16:11:02 -0700452 struct task_struct *t = task_early_kill(tsk, force_early);
Andi Kleen6a460792009-09-16 11:50:15 +0200453
Naoya Horiguchi3ba08122014-06-04 16:11:02 -0700454 if (!t)
Andi Kleen6a460792009-09-16 11:50:15 +0200455 continue;
Michel Lespinasse6b2dbba2012-10-08 16:31:25 -0700456 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff,
Andi Kleen6a460792009-09-16 11:50:15 +0200457 pgoff) {
458 /*
459 * Send early kill signal to tasks where a vma covers
460 * the page but the corrupted page is not necessarily
461 * mapped it in its pte.
462 * Assume applications who requested early kill want
463 * to be informed of all such data corruptions.
464 */
Naoya Horiguchi3ba08122014-06-04 16:11:02 -0700465 if (vma->vm_mm == t->mm)
466 add_to_kill(t, page, vma, to_kill, tkc);
Andi Kleen6a460792009-09-16 11:50:15 +0200467 }
468 }
Andi Kleen6a460792009-09-16 11:50:15 +0200469 read_unlock(&tasklist_lock);
Davidlohr Buesod28eb9c2014-12-12 16:54:36 -0800470 i_mmap_unlock_read(mapping);
Andi Kleen6a460792009-09-16 11:50:15 +0200471}
472
473/*
474 * Collect the processes who have the corrupted page mapped to kill.
475 * This is done in two steps for locking reasons.
476 * First preallocate one tokill structure outside the spin locks,
477 * so that we can kill at least one process reasonably reliable.
478 */
Tony Luck74614de2014-06-04 16:11:01 -0700479static void collect_procs(struct page *page, struct list_head *tokill,
480 int force_early)
Andi Kleen6a460792009-09-16 11:50:15 +0200481{
482 struct to_kill *tk;
483
484 if (!page->mapping)
485 return;
486
487 tk = kmalloc(sizeof(struct to_kill), GFP_NOIO);
488 if (!tk)
489 return;
490 if (PageAnon(page))
Tony Luck74614de2014-06-04 16:11:01 -0700491 collect_procs_anon(page, tokill, &tk, force_early);
Andi Kleen6a460792009-09-16 11:50:15 +0200492 else
Tony Luck74614de2014-06-04 16:11:01 -0700493 collect_procs_file(page, tokill, &tk, force_early);
Andi Kleen6a460792009-09-16 11:50:15 +0200494 kfree(tk);
495}
496
Andi Kleen6a460792009-09-16 11:50:15 +0200497static const char *action_name[] = {
Xie XiuQicc637b12015-06-24 16:57:30 -0700498 [MF_IGNORED] = "Ignored",
499 [MF_FAILED] = "Failed",
500 [MF_DELAYED] = "Delayed",
501 [MF_RECOVERED] = "Recovered",
Naoya Horiguchi64d37a22015-04-15 16:13:05 -0700502};
503
504static const char * const action_page_types[] = {
Xie XiuQicc637b12015-06-24 16:57:30 -0700505 [MF_MSG_KERNEL] = "reserved kernel page",
506 [MF_MSG_KERNEL_HIGH_ORDER] = "high-order kernel page",
507 [MF_MSG_SLAB] = "kernel slab page",
508 [MF_MSG_DIFFERENT_COMPOUND] = "different compound page after locking",
509 [MF_MSG_POISONED_HUGE] = "huge page already hardware poisoned",
510 [MF_MSG_HUGE] = "huge page",
511 [MF_MSG_FREE_HUGE] = "free huge page",
512 [MF_MSG_UNMAP_FAILED] = "unmapping failed page",
513 [MF_MSG_DIRTY_SWAPCACHE] = "dirty swapcache page",
514 [MF_MSG_CLEAN_SWAPCACHE] = "clean swapcache page",
515 [MF_MSG_DIRTY_MLOCKED_LRU] = "dirty mlocked LRU page",
516 [MF_MSG_CLEAN_MLOCKED_LRU] = "clean mlocked LRU page",
517 [MF_MSG_DIRTY_UNEVICTABLE_LRU] = "dirty unevictable LRU page",
518 [MF_MSG_CLEAN_UNEVICTABLE_LRU] = "clean unevictable LRU page",
519 [MF_MSG_DIRTY_LRU] = "dirty LRU page",
520 [MF_MSG_CLEAN_LRU] = "clean LRU page",
521 [MF_MSG_TRUNCATED_LRU] = "already truncated LRU page",
522 [MF_MSG_BUDDY] = "free buddy page",
523 [MF_MSG_BUDDY_2ND] = "free buddy page (2nd try)",
524 [MF_MSG_UNKNOWN] = "unknown page",
Naoya Horiguchi64d37a22015-04-15 16:13:05 -0700525};
526
Andi Kleen6a460792009-09-16 11:50:15 +0200527/*
Wu Fengguangdc2a1cb2009-12-16 12:19:58 +0100528 * XXX: It is possible that a page is isolated from LRU cache,
529 * and then kept in swap cache or failed to remove from page cache.
530 * The page count will stop it from being freed by unpoison.
531 * Stress tests should be aware of this memory leak problem.
532 */
533static int delete_from_lru_cache(struct page *p)
534{
535 if (!isolate_lru_page(p)) {
536 /*
537 * Clear sensible page flags, so that the buddy system won't
538 * complain when the page is unpoison-and-freed.
539 */
540 ClearPageActive(p);
541 ClearPageUnevictable(p);
Michal Hocko361376a2017-05-12 15:46:26 -0700542
543 /*
544 * Poisoned page might never drop its ref count to 0 so we have
545 * to uncharge it manually from its memcg.
546 */
547 mem_cgroup_uncharge(p);
548
Wu Fengguangdc2a1cb2009-12-16 12:19:58 +0100549 /*
550 * drop the page count elevated by isolate_lru_page()
551 */
552 page_cache_release(p);
553 return 0;
554 }
555 return -EIO;
556}
557
558/*
Andi Kleen6a460792009-09-16 11:50:15 +0200559 * Error hit kernel page.
560 * Do nothing, try to be lucky and not touch this instead. For a few cases we
561 * could be more sophisticated.
562 */
563static int me_kernel(struct page *p, unsigned long pfn)
564{
Xie XiuQicc637b12015-06-24 16:57:30 -0700565 return MF_IGNORED;
Andi Kleen6a460792009-09-16 11:50:15 +0200566}
567
568/*
569 * Page in unknown state. Do nothing.
570 */
571static int me_unknown(struct page *p, unsigned long pfn)
572{
573 printk(KERN_ERR "MCE %#lx: Unknown page state\n", pfn);
Xie XiuQicc637b12015-06-24 16:57:30 -0700574 return MF_FAILED;
Andi Kleen6a460792009-09-16 11:50:15 +0200575}
576
577/*
Andi Kleen6a460792009-09-16 11:50:15 +0200578 * Clean (or cleaned) page cache page.
579 */
580static int me_pagecache_clean(struct page *p, unsigned long pfn)
581{
582 int err;
Xie XiuQicc637b12015-06-24 16:57:30 -0700583 int ret = MF_FAILED;
Andi Kleen6a460792009-09-16 11:50:15 +0200584 struct address_space *mapping;
585
Wu Fengguangdc2a1cb2009-12-16 12:19:58 +0100586 delete_from_lru_cache(p);
587
Andi Kleen6a460792009-09-16 11:50:15 +0200588 /*
589 * For anonymous pages we're done the only reference left
590 * should be the one m_f() holds.
591 */
592 if (PageAnon(p))
Xie XiuQicc637b12015-06-24 16:57:30 -0700593 return MF_RECOVERED;
Andi Kleen6a460792009-09-16 11:50:15 +0200594
595 /*
596 * Now truncate the page in the page cache. This is really
597 * more like a "temporary hole punch"
598 * Don't do this for block devices when someone else
599 * has a reference, because it could be file system metadata
600 * and that's not safe to truncate.
601 */
602 mapping = page_mapping(p);
603 if (!mapping) {
604 /*
605 * Page has been teared down in the meanwhile
606 */
Xie XiuQicc637b12015-06-24 16:57:30 -0700607 return MF_FAILED;
Andi Kleen6a460792009-09-16 11:50:15 +0200608 }
609
610 /*
611 * Truncation is a bit tricky. Enable it per file system for now.
612 *
613 * Open: to take i_mutex or not for this? Right now we don't.
614 */
615 if (mapping->a_ops->error_remove_page) {
616 err = mapping->a_ops->error_remove_page(mapping, p);
617 if (err != 0) {
618 printk(KERN_INFO "MCE %#lx: Failed to punch page: %d\n",
619 pfn, err);
620 } else if (page_has_private(p) &&
621 !try_to_release_page(p, GFP_NOIO)) {
Andi Kleenfb46e732010-09-27 23:31:30 +0200622 pr_info("MCE %#lx: failed to release buffers\n", pfn);
Andi Kleen6a460792009-09-16 11:50:15 +0200623 } else {
Xie XiuQicc637b12015-06-24 16:57:30 -0700624 ret = MF_RECOVERED;
Andi Kleen6a460792009-09-16 11:50:15 +0200625 }
626 } else {
627 /*
628 * If the file system doesn't support it just invalidate
629 * This fails on dirty or anything with private pages
630 */
631 if (invalidate_inode_page(p))
Xie XiuQicc637b12015-06-24 16:57:30 -0700632 ret = MF_RECOVERED;
Andi Kleen6a460792009-09-16 11:50:15 +0200633 else
634 printk(KERN_INFO "MCE %#lx: Failed to invalidate\n",
635 pfn);
636 }
637 return ret;
638}
639
640/*
Zhi Yong Wu549543d2014-01-21 15:49:08 -0800641 * Dirty pagecache page
Andi Kleen6a460792009-09-16 11:50:15 +0200642 * Issues: when the error hit a hole page the error is not properly
643 * propagated.
644 */
645static int me_pagecache_dirty(struct page *p, unsigned long pfn)
646{
647 struct address_space *mapping = page_mapping(p);
648
649 SetPageError(p);
650 /* TBD: print more information about the file. */
651 if (mapping) {
652 /*
653 * IO error will be reported by write(), fsync(), etc.
654 * who check the mapping.
655 * This way the application knows that something went
656 * wrong with its dirty file data.
657 *
658 * There's one open issue:
659 *
660 * The EIO will be only reported on the next IO
661 * operation and then cleared through the IO map.
662 * Normally Linux has two mechanisms to pass IO error
663 * first through the AS_EIO flag in the address space
664 * and then through the PageError flag in the page.
665 * Since we drop pages on memory failure handling the
666 * only mechanism open to use is through AS_AIO.
667 *
668 * This has the disadvantage that it gets cleared on
669 * the first operation that returns an error, while
670 * the PageError bit is more sticky and only cleared
671 * when the page is reread or dropped. If an
672 * application assumes it will always get error on
673 * fsync, but does other operations on the fd before
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300674 * and the page is dropped between then the error
Andi Kleen6a460792009-09-16 11:50:15 +0200675 * will not be properly reported.
676 *
677 * This can already happen even without hwpoisoned
678 * pages: first on metadata IO errors (which only
679 * report through AS_EIO) or when the page is dropped
680 * at the wrong time.
681 *
682 * So right now we assume that the application DTRT on
683 * the first EIO, but we're not worse than other parts
684 * of the kernel.
685 */
686 mapping_set_error(mapping, EIO);
687 }
688
689 return me_pagecache_clean(p, pfn);
690}
691
692/*
693 * Clean and dirty swap cache.
694 *
695 * Dirty swap cache page is tricky to handle. The page could live both in page
696 * cache and swap cache(ie. page is freshly swapped in). So it could be
697 * referenced concurrently by 2 types of PTEs:
698 * normal PTEs and swap PTEs. We try to handle them consistently by calling
699 * try_to_unmap(TTU_IGNORE_HWPOISON) to convert the normal PTEs to swap PTEs,
700 * and then
701 * - clear dirty bit to prevent IO
702 * - remove from LRU
703 * - but keep in the swap cache, so that when we return to it on
704 * a later page fault, we know the application is accessing
705 * corrupted data and shall be killed (we installed simple
706 * interception code in do_swap_page to catch it).
707 *
708 * Clean swap cache pages can be directly isolated. A later page fault will
709 * bring in the known good data from disk.
710 */
711static int me_swapcache_dirty(struct page *p, unsigned long pfn)
712{
Andi Kleen6a460792009-09-16 11:50:15 +0200713 ClearPageDirty(p);
714 /* Trigger EIO in shmem: */
715 ClearPageUptodate(p);
716
Wu Fengguangdc2a1cb2009-12-16 12:19:58 +0100717 if (!delete_from_lru_cache(p))
Xie XiuQicc637b12015-06-24 16:57:30 -0700718 return MF_DELAYED;
Wu Fengguangdc2a1cb2009-12-16 12:19:58 +0100719 else
Xie XiuQicc637b12015-06-24 16:57:30 -0700720 return MF_FAILED;
Andi Kleen6a460792009-09-16 11:50:15 +0200721}
722
723static int me_swapcache_clean(struct page *p, unsigned long pfn)
724{
Andi Kleen6a460792009-09-16 11:50:15 +0200725 delete_from_swap_cache(p);
Wu Fengguange43c3af2009-09-29 13:16:20 +0800726
Wu Fengguangdc2a1cb2009-12-16 12:19:58 +0100727 if (!delete_from_lru_cache(p))
Xie XiuQicc637b12015-06-24 16:57:30 -0700728 return MF_RECOVERED;
Wu Fengguangdc2a1cb2009-12-16 12:19:58 +0100729 else
Xie XiuQicc637b12015-06-24 16:57:30 -0700730 return MF_FAILED;
Andi Kleen6a460792009-09-16 11:50:15 +0200731}
732
733/*
734 * Huge pages. Needs work.
735 * Issues:
Naoya Horiguchi93f70f92010-05-28 09:29:20 +0900736 * - Error on hugepage is contained in hugepage unit (not in raw page unit.)
737 * To narrow down kill region to one page, we need to break up pmd.
Andi Kleen6a460792009-09-16 11:50:15 +0200738 */
739static int me_huge_page(struct page *p, unsigned long pfn)
740{
Naoya Horiguchi6de2b1a2010-09-08 10:19:36 +0900741 int res = 0;
Naoya Horiguchi93f70f92010-05-28 09:29:20 +0900742 struct page *hpage = compound_head(p);
Naoya Horiguchi2491ffe2015-06-24 16:56:53 -0700743
744 if (!PageHuge(hpage))
745 return MF_DELAYED;
746
Naoya Horiguchi93f70f92010-05-28 09:29:20 +0900747 /*
748 * We can safely recover from error on free or reserved (i.e.
749 * not in-use) hugepage by dequeuing it from freelist.
750 * To check whether a hugepage is in-use or not, we can't use
751 * page->lru because it can be used in other hugepage operations,
752 * such as __unmap_hugepage_range() and gather_surplus_pages().
753 * So instead we use page_mapping() and PageAnon().
754 * We assume that this function is called with page lock held,
755 * so there is no race between isolation and mapping/unmapping.
756 */
757 if (!(page_mapping(hpage) || PageAnon(hpage))) {
Naoya Horiguchi6de2b1a2010-09-08 10:19:36 +0900758 res = dequeue_hwpoisoned_huge_page(hpage);
759 if (!res)
Xie XiuQicc637b12015-06-24 16:57:30 -0700760 return MF_RECOVERED;
Naoya Horiguchi93f70f92010-05-28 09:29:20 +0900761 }
Xie XiuQicc637b12015-06-24 16:57:30 -0700762 return MF_DELAYED;
Andi Kleen6a460792009-09-16 11:50:15 +0200763}
764
765/*
766 * Various page states we can handle.
767 *
768 * A page state is defined by its current page->flags bits.
769 * The table matches them in order and calls the right handler.
770 *
771 * This is quite tricky because we can access page at any time
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300772 * in its live cycle, so all accesses have to be extremely careful.
Andi Kleen6a460792009-09-16 11:50:15 +0200773 *
774 * This is not complete. More states could be added.
775 * For any missing state don't attempt recovery.
776 */
777
778#define dirty (1UL << PG_dirty)
779#define sc (1UL << PG_swapcache)
780#define unevict (1UL << PG_unevictable)
781#define mlock (1UL << PG_mlocked)
782#define writeback (1UL << PG_writeback)
783#define lru (1UL << PG_lru)
784#define swapbacked (1UL << PG_swapbacked)
785#define head (1UL << PG_head)
Andi Kleen6a460792009-09-16 11:50:15 +0200786#define slab (1UL << PG_slab)
Andi Kleen6a460792009-09-16 11:50:15 +0200787#define reserved (1UL << PG_reserved)
788
789static struct page_state {
790 unsigned long mask;
791 unsigned long res;
Xie XiuQicc637b12015-06-24 16:57:30 -0700792 enum mf_action_page_type type;
Andi Kleen6a460792009-09-16 11:50:15 +0200793 int (*action)(struct page *p, unsigned long pfn);
794} error_states[] = {
Xie XiuQicc637b12015-06-24 16:57:30 -0700795 { reserved, reserved, MF_MSG_KERNEL, me_kernel },
Wu Fengguang95d01fc2009-12-16 12:19:58 +0100796 /*
797 * free pages are specially detected outside this table:
798 * PG_buddy pages only make a small fraction of all free pages.
799 */
Andi Kleen6a460792009-09-16 11:50:15 +0200800
801 /*
802 * Could in theory check if slab page is free or if we can drop
803 * currently unused objects without touching them. But just
804 * treat it as standard kernel for now.
805 */
Xie XiuQicc637b12015-06-24 16:57:30 -0700806 { slab, slab, MF_MSG_SLAB, me_kernel },
Andi Kleen6a460792009-09-16 11:50:15 +0200807
Xie XiuQicc637b12015-06-24 16:57:30 -0700808 { head, head, MF_MSG_HUGE, me_huge_page },
Andi Kleen6a460792009-09-16 11:50:15 +0200809
Xie XiuQicc637b12015-06-24 16:57:30 -0700810 { sc|dirty, sc|dirty, MF_MSG_DIRTY_SWAPCACHE, me_swapcache_dirty },
811 { sc|dirty, sc, MF_MSG_CLEAN_SWAPCACHE, me_swapcache_clean },
Andi Kleen6a460792009-09-16 11:50:15 +0200812
Xie XiuQicc637b12015-06-24 16:57:30 -0700813 { mlock|dirty, mlock|dirty, MF_MSG_DIRTY_MLOCKED_LRU, me_pagecache_dirty },
814 { mlock|dirty, mlock, MF_MSG_CLEAN_MLOCKED_LRU, me_pagecache_clean },
Andi Kleen6a460792009-09-16 11:50:15 +0200815
Xie XiuQicc637b12015-06-24 16:57:30 -0700816 { unevict|dirty, unevict|dirty, MF_MSG_DIRTY_UNEVICTABLE_LRU, me_pagecache_dirty },
817 { unevict|dirty, unevict, MF_MSG_CLEAN_UNEVICTABLE_LRU, me_pagecache_clean },
Naoya Horiguchi5f4b9fc2013-02-22 16:35:53 -0800818
Xie XiuQicc637b12015-06-24 16:57:30 -0700819 { lru|dirty, lru|dirty, MF_MSG_DIRTY_LRU, me_pagecache_dirty },
820 { lru|dirty, lru, MF_MSG_CLEAN_LRU, me_pagecache_clean },
Andi Kleen6a460792009-09-16 11:50:15 +0200821
822 /*
823 * Catchall entry: must be at end.
824 */
Xie XiuQicc637b12015-06-24 16:57:30 -0700825 { 0, 0, MF_MSG_UNKNOWN, me_unknown },
Andi Kleen6a460792009-09-16 11:50:15 +0200826};
827
Andi Kleen2326c462009-12-16 12:20:00 +0100828#undef dirty
829#undef sc
830#undef unevict
831#undef mlock
832#undef writeback
833#undef lru
834#undef swapbacked
835#undef head
836#undef tail
837#undef compound
838#undef slab
839#undef reserved
840
Naoya Horiguchiff604cf2012-12-11 16:01:32 -0800841/*
842 * "Dirty/Clean" indication is not 100% accurate due to the possibility of
843 * setting PG_dirty outside page lock. See also comment above set_page_dirty().
844 */
Xie XiuQicc3e2af2015-06-24 16:57:33 -0700845static void action_result(unsigned long pfn, enum mf_action_page_type type,
846 enum mf_result result)
Andi Kleen6a460792009-09-16 11:50:15 +0200847{
Xie XiuQi97f0b132015-06-24 16:57:36 -0700848 trace_memory_failure_event(pfn, type, result);
849
Naoya Horiguchi64d37a22015-04-15 16:13:05 -0700850 pr_err("MCE %#lx: recovery action for %s: %s\n",
851 pfn, action_page_types[type], action_name[result]);
Andi Kleen6a460792009-09-16 11:50:15 +0200852}
853
854static int page_action(struct page_state *ps, struct page *p,
Wu Fengguangbd1ce5f2009-12-16 12:19:57 +0100855 unsigned long pfn)
Andi Kleen6a460792009-09-16 11:50:15 +0200856{
857 int result;
Wu Fengguang7456b042009-10-19 08:15:01 +0200858 int count;
Andi Kleen6a460792009-09-16 11:50:15 +0200859
860 result = ps->action(p, pfn);
Wu Fengguang7456b042009-10-19 08:15:01 +0200861
Wu Fengguangbd1ce5f2009-12-16 12:19:57 +0100862 count = page_count(p) - 1;
Xie XiuQicc637b12015-06-24 16:57:30 -0700863 if (ps->action == me_swapcache_dirty && result == MF_DELAYED)
Wu Fengguang138ce282009-12-16 12:19:58 +0100864 count--;
865 if (count != 0) {
Andi Kleen6a460792009-09-16 11:50:15 +0200866 printk(KERN_ERR
Naoya Horiguchi64d37a22015-04-15 16:13:05 -0700867 "MCE %#lx: %s still referenced by %d users\n",
868 pfn, action_page_types[ps->type], count);
Xie XiuQicc637b12015-06-24 16:57:30 -0700869 result = MF_FAILED;
Wu Fengguang138ce282009-12-16 12:19:58 +0100870 }
Naoya Horiguchi64d37a22015-04-15 16:13:05 -0700871 action_result(pfn, ps->type, result);
Andi Kleen6a460792009-09-16 11:50:15 +0200872
873 /* Could do more checks here if page looks ok */
874 /*
875 * Could adjust zone counters here to correct for the missing page.
876 */
877
Xie XiuQicc637b12015-06-24 16:57:30 -0700878 return (result == MF_RECOVERED || result == MF_DELAYED) ? 0 : -EBUSY;
Andi Kleen6a460792009-09-16 11:50:15 +0200879}
880
Naoya Horiguchiead07f62015-06-24 16:56:48 -0700881/**
882 * get_hwpoison_page() - Get refcount for memory error handling:
883 * @page: raw error page (hit by memory error)
884 *
885 * Return: return 0 if failed to grab the refcount, otherwise true (some
886 * non-zero value.)
887 */
888int get_hwpoison_page(struct page *page)
889{
890 struct page *head = compound_head(page);
891
892 if (PageHuge(head))
893 return get_page_unless_zero(head);
894
895 /*
896 * Thp tail page has special refcounting rule (refcount of tail pages
897 * is stored in ->_mapcount,) so we can't call get_page_unless_zero()
898 * directly for tail pages.
899 */
900 if (PageTransHuge(head)) {
Naoya Horiguchi98ed2b02015-08-06 15:47:04 -0700901 /*
902 * Non anonymous thp exists only in allocation/free time. We
903 * can't handle such a case correctly, so let's give it up.
904 * This should be better than triggering BUG_ON when kernel
905 * tries to touch the "partially handled" page.
906 */
907 if (!PageAnon(head)) {
908 pr_err("MCE: %#lx: non anonymous thp\n",
909 page_to_pfn(page));
910 return 0;
911 }
912
Naoya Horiguchiead07f62015-06-24 16:56:48 -0700913 if (get_page_unless_zero(head)) {
914 if (PageTail(page))
915 get_page(page);
916 return 1;
917 } else {
918 return 0;
919 }
920 }
921
922 return get_page_unless_zero(page);
923}
924EXPORT_SYMBOL_GPL(get_hwpoison_page);
925
Wanpeng Li94bf4ec2015-09-08 15:03:15 -0700926/**
927 * put_hwpoison_page() - Put refcount for memory error handling:
928 * @page: raw error page (hit by memory error)
929 */
930void put_hwpoison_page(struct page *page)
931{
932 struct page *head = compound_head(page);
933
934 if (PageHuge(head)) {
935 put_page(head);
936 return;
937 }
938
939 if (PageTransHuge(head))
940 if (page != head)
941 put_page(head);
942
943 put_page(page);
944}
945EXPORT_SYMBOL_GPL(put_hwpoison_page);
946
Andi Kleen6a460792009-09-16 11:50:15 +0200947/*
948 * Do all that is necessary to remove user space mappings. Unmap
949 * the pages and send SIGBUS to the processes if the data was dirty.
950 */
Wu Fengguang1668bfd2009-12-16 12:19:58 +0100951static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
Naoya Horiguchi54b9dd12014-01-23 15:53:14 -0800952 int trapno, int flags, struct page **hpagep)
Andi Kleen6a460792009-09-16 11:50:15 +0200953{
954 enum ttu_flags ttu = TTU_UNMAP | TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
955 struct address_space *mapping;
956 LIST_HEAD(tokill);
957 int ret;
Tony Luck6751ed62012-07-11 10:20:47 -0700958 int kill = 1, forcekill;
Naoya Horiguchi54b9dd12014-01-23 15:53:14 -0800959 struct page *hpage = *hpagep;
Andi Kleen6a460792009-09-16 11:50:15 +0200960
Naoya Horiguchi93a9eb32014-07-30 16:08:28 -0700961 /*
962 * Here we are interested only in user-mapped pages, so skip any
963 * other types of pages.
964 */
965 if (PageReserved(p) || PageSlab(p))
966 return SWAP_SUCCESS;
967 if (!(PageLRU(hpage) || PageHuge(p)))
Wu Fengguang1668bfd2009-12-16 12:19:58 +0100968 return SWAP_SUCCESS;
Andi Kleen6a460792009-09-16 11:50:15 +0200969
Andi Kleen6a460792009-09-16 11:50:15 +0200970 /*
971 * This check implies we don't kill processes if their pages
972 * are in the swap cache early. Those are always late kills.
973 */
Naoya Horiguchi7af446a2010-05-28 09:29:17 +0900974 if (!page_mapped(hpage))
Wu Fengguang1668bfd2009-12-16 12:19:58 +0100975 return SWAP_SUCCESS;
976
Naoya Horiguchi52089b12014-07-30 16:08:30 -0700977 if (PageKsm(p)) {
978 pr_err("MCE %#lx: can't handle KSM pages.\n", pfn);
Wu Fengguang1668bfd2009-12-16 12:19:58 +0100979 return SWAP_FAIL;
Naoya Horiguchi52089b12014-07-30 16:08:30 -0700980 }
Andi Kleen6a460792009-09-16 11:50:15 +0200981
982 if (PageSwapCache(p)) {
983 printk(KERN_ERR
984 "MCE %#lx: keeping poisoned page in swap cache\n", pfn);
985 ttu |= TTU_IGNORE_HWPOISON;
986 }
987
988 /*
989 * Propagate the dirty bit from PTEs to struct page first, because we
990 * need this to decide if we should kill or just drop the page.
Wu Fengguangdb0480b2009-12-16 12:19:58 +0100991 * XXX: the dirty test could be racy: set_page_dirty() may not always
992 * be called inside page lock (it's recommended but not enforced).
Andi Kleen6a460792009-09-16 11:50:15 +0200993 */
Naoya Horiguchi7af446a2010-05-28 09:29:17 +0900994 mapping = page_mapping(hpage);
Tony Luck6751ed62012-07-11 10:20:47 -0700995 if (!(flags & MF_MUST_KILL) && !PageDirty(hpage) && mapping &&
Naoya Horiguchi7af446a2010-05-28 09:29:17 +0900996 mapping_cap_writeback_dirty(mapping)) {
997 if (page_mkclean(hpage)) {
998 SetPageDirty(hpage);
Andi Kleen6a460792009-09-16 11:50:15 +0200999 } else {
1000 kill = 0;
1001 ttu |= TTU_IGNORE_HWPOISON;
1002 printk(KERN_INFO
1003 "MCE %#lx: corrupted page was clean: dropped without side effects\n",
1004 pfn);
1005 }
1006 }
1007
Jin Dongminga6d30dd2011-02-01 15:52:40 -08001008 /*
Andi Kleen6a460792009-09-16 11:50:15 +02001009 * First collect all the processes that have the page
1010 * mapped in dirty form. This has to be done before try_to_unmap,
1011 * because ttu takes the rmap data structures down.
1012 *
1013 * Error handling: We ignore errors here because
1014 * there's nothing that can be done.
1015 */
1016 if (kill)
Naoya Horiguchi415c64c2015-06-24 16:56:45 -07001017 collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED);
Andi Kleen6a460792009-09-16 11:50:15 +02001018
Naoya Horiguchi415c64c2015-06-24 16:56:45 -07001019 ret = try_to_unmap(hpage, ttu);
Andi Kleen6a460792009-09-16 11:50:15 +02001020 if (ret != SWAP_SUCCESS)
1021 printk(KERN_ERR "MCE %#lx: failed to unmap page (mapcount=%d)\n",
Naoya Horiguchi415c64c2015-06-24 16:56:45 -07001022 pfn, page_mapcount(hpage));
Jin Dongminga6d30dd2011-02-01 15:52:40 -08001023
Andi Kleen6a460792009-09-16 11:50:15 +02001024 /*
1025 * Now that the dirty bit has been propagated to the
1026 * struct page and all unmaps done we can decide if
1027 * killing is needed or not. Only kill when the page
Tony Luck6751ed62012-07-11 10:20:47 -07001028 * was dirty or the process is not restartable,
1029 * otherwise the tokill list is merely
Andi Kleen6a460792009-09-16 11:50:15 +02001030 * freed. When there was a problem unmapping earlier
1031 * use a more force-full uncatchable kill to prevent
1032 * any accesses to the poisoned memory.
1033 */
Naoya Horiguchi415c64c2015-06-24 16:56:45 -07001034 forcekill = PageDirty(hpage) || (flags & MF_MUST_KILL);
Tony Luck6751ed62012-07-11 10:20:47 -07001035 kill_procs(&tokill, forcekill, trapno,
Tony Luck7329bbe2011-12-13 09:27:58 -08001036 ret != SWAP_SUCCESS, p, pfn, flags);
Wu Fengguang1668bfd2009-12-16 12:19:58 +01001037
1038 return ret;
Andi Kleen6a460792009-09-16 11:50:15 +02001039}
1040
Naoya Horiguchi7013feb2010-05-28 09:29:18 +09001041static void set_page_hwpoison_huge_page(struct page *hpage)
1042{
1043 int i;
Wanpeng Lif9121152013-09-11 14:22:52 -07001044 int nr_pages = 1 << compound_order(hpage);
Naoya Horiguchi7013feb2010-05-28 09:29:18 +09001045 for (i = 0; i < nr_pages; i++)
1046 SetPageHWPoison(hpage + i);
1047}
1048
1049static void clear_page_hwpoison_huge_page(struct page *hpage)
1050{
1051 int i;
Wanpeng Lif9121152013-09-11 14:22:52 -07001052 int nr_pages = 1 << compound_order(hpage);
Naoya Horiguchi7013feb2010-05-28 09:29:18 +09001053 for (i = 0; i < nr_pages; i++)
1054 ClearPageHWPoison(hpage + i);
1055}
1056
Tony Luckcd42f4a2011-12-15 10:48:12 -08001057/**
1058 * memory_failure - Handle memory failure of a page.
1059 * @pfn: Page Number of the corrupted page
1060 * @trapno: Trap number reported in the signal to user space.
1061 * @flags: fine tune action taken
1062 *
1063 * This function is called by the low level machine check code
1064 * of an architecture when it detects hardware memory corruption
1065 * of a page. It tries its best to recover, which includes
1066 * dropping pages, killing processes etc.
1067 *
1068 * The function is primarily of use for corruptions that
1069 * happen outside the current execution context (e.g. when
1070 * detected by a background scrubber)
1071 *
1072 * Must run in process context (e.g. a work queue) with interrupts
1073 * enabled and no spinlocks hold.
1074 */
1075int memory_failure(unsigned long pfn, int trapno, int flags)
Andi Kleen6a460792009-09-16 11:50:15 +02001076{
1077 struct page_state *ps;
1078 struct page *p;
Naoya Horiguchi7af446a2010-05-28 09:29:17 +09001079 struct page *hpage;
Naoya Horiguchi415c64c2015-06-24 16:56:45 -07001080 struct page *orig_head;
Andi Kleen6a460792009-09-16 11:50:15 +02001081 int res;
Naoya Horiguchic9fbdd52010-05-28 09:29:19 +09001082 unsigned int nr_pages;
Naoya Horiguchi524fca12013-02-22 16:35:51 -08001083 unsigned long page_flags;
Andi Kleen6a460792009-09-16 11:50:15 +02001084
1085 if (!sysctl_memory_failure_recovery)
1086 panic("Memory failure from trap %d on page %lx", trapno, pfn);
1087
1088 if (!pfn_valid(pfn)) {
Wu Fengguanga7560fc2009-12-16 12:19:57 +01001089 printk(KERN_ERR
1090 "MCE %#lx: memory outside kernel control\n",
1091 pfn);
1092 return -ENXIO;
Andi Kleen6a460792009-09-16 11:50:15 +02001093 }
1094
1095 p = pfn_to_page(pfn);
Naoya Horiguchi415c64c2015-06-24 16:56:45 -07001096 orig_head = hpage = compound_head(p);
Andi Kleen6a460792009-09-16 11:50:15 +02001097 if (TestSetPageHWPoison(p)) {
Wu Fengguangd95ea512009-12-16 12:19:58 +01001098 printk(KERN_ERR "MCE %#lx: already hardware poisoned\n", pfn);
Andi Kleen6a460792009-09-16 11:50:15 +02001099 return 0;
1100 }
1101
Naoya Horiguchi4db0e952013-02-22 16:34:05 -08001102 /*
1103 * Currently errors on hugetlbfs pages are measured in hugepage units,
1104 * so nr_pages should be 1 << compound_order. OTOH when errors are on
1105 * transparent hugepages, they are supposed to be split and error
1106 * measurement is done in normal page units. So nr_pages should be one
1107 * in this case.
1108 */
1109 if (PageHuge(p))
1110 nr_pages = 1 << compound_order(hpage);
1111 else /* normal page or thp */
1112 nr_pages = 1;
Naoya Horiguchi8e304562015-09-08 15:03:24 -07001113 num_poisoned_pages_add(nr_pages);
Andi Kleen6a460792009-09-16 11:50:15 +02001114
1115 /*
1116 * We need/can do nothing about count=0 pages.
1117 * 1) it's a free page, and therefore in safe hand:
1118 * prep_new_page() will be the gate keeper.
Naoya Horiguchi8c6c2ec2010-09-08 10:19:38 +09001119 * 2) it's a free hugepage, which is also safe:
1120 * an affected hugepage will be dequeued from hugepage freelist,
1121 * so there's no concern about reusing it ever after.
1122 * 3) it's part of a non-compound high order page.
Andi Kleen6a460792009-09-16 11:50:15 +02001123 * Implies some kernel user: cannot stop them from
1124 * R/W the page; let's pray that the page has been
1125 * used and will be freed some time later.
1126 * In fact it's dangerous to directly bump up page count from 0,
1127 * that may make page_freeze_refs()/page_unfreeze_refs() mismatch.
1128 */
Naoya Horiguchiead07f62015-06-24 16:56:48 -07001129 if (!(flags & MF_COUNT_INCREASED) && !get_hwpoison_page(p)) {
Wu Fengguang8d22ba12009-12-16 12:19:58 +01001130 if (is_free_buddy_page(p)) {
Xie XiuQicc637b12015-06-24 16:57:30 -07001131 action_result(pfn, MF_MSG_BUDDY, MF_DELAYED);
Wu Fengguang8d22ba12009-12-16 12:19:58 +01001132 return 0;
Naoya Horiguchi8c6c2ec2010-09-08 10:19:38 +09001133 } else if (PageHuge(hpage)) {
1134 /*
Chen Yucongb9851942014-05-22 11:54:15 -07001135 * Check "filter hit" and "race with other subpage."
Naoya Horiguchi8c6c2ec2010-09-08 10:19:38 +09001136 */
Jens Axboe7eaceac2011-03-10 08:52:07 +01001137 lock_page(hpage);
Chen Yucongb9851942014-05-22 11:54:15 -07001138 if (PageHWPoison(hpage)) {
1139 if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
1140 || (p != hpage && TestSetPageHWPoison(hpage))) {
Naoya Horiguchi8e304562015-09-08 15:03:24 -07001141 num_poisoned_pages_sub(nr_pages);
Chen Yucongb9851942014-05-22 11:54:15 -07001142 unlock_page(hpage);
1143 return 0;
1144 }
Naoya Horiguchi8c6c2ec2010-09-08 10:19:38 +09001145 }
1146 set_page_hwpoison_huge_page(hpage);
1147 res = dequeue_hwpoisoned_huge_page(hpage);
Xie XiuQicc637b12015-06-24 16:57:30 -07001148 action_result(pfn, MF_MSG_FREE_HUGE,
1149 res ? MF_IGNORED : MF_DELAYED);
Naoya Horiguchi8c6c2ec2010-09-08 10:19:38 +09001150 unlock_page(hpage);
1151 return res;
Wu Fengguang8d22ba12009-12-16 12:19:58 +01001152 } else {
Xie XiuQicc637b12015-06-24 16:57:30 -07001153 action_result(pfn, MF_MSG_KERNEL_HIGH_ORDER, MF_IGNORED);
Wu Fengguang8d22ba12009-12-16 12:19:58 +01001154 return -EBUSY;
1155 }
Andi Kleen6a460792009-09-16 11:50:15 +02001156 }
1157
Naoya Horiguchi415c64c2015-06-24 16:56:45 -07001158 if (!PageHuge(p) && PageTransHuge(hpage)) {
Wanpeng Li7f6bf392015-08-14 15:35:08 -07001159 if (!PageAnon(hpage) || unlikely(split_huge_page(hpage))) {
1160 if (!PageAnon(hpage))
1161 pr_err("MCE: %#lx: non anonymous thp\n", pfn);
1162 else
1163 pr_err("MCE: %#lx: thp split failed\n", pfn);
Naoya Horiguchiead07f62015-06-24 16:56:48 -07001164 if (TestClearPageHWPoison(p))
Naoya Horiguchi8e304562015-09-08 15:03:24 -07001165 num_poisoned_pages_sub(nr_pages);
Wanpeng Li665d9da2015-09-08 15:03:21 -07001166 put_hwpoison_page(p);
Naoya Horiguchi415c64c2015-06-24 16:56:45 -07001167 return -EBUSY;
1168 }
1169 VM_BUG_ON_PAGE(!page_count(p), p);
1170 hpage = compound_head(p);
1171 }
1172
Andi Kleen6a460792009-09-16 11:50:15 +02001173 /*
Wu Fengguange43c3af2009-09-29 13:16:20 +08001174 * We ignore non-LRU pages for good reasons.
1175 * - PG_locked is only well defined for LRU pages and a few others
1176 * - to avoid races with __set_page_locked()
1177 * - to avoid races with __SetPageSlab*() (and more non-atomic ops)
1178 * The check (unnecessarily) ignores LRU pages being isolated and
1179 * walked by the page reclaim code, however that's not a big loss.
1180 */
Naoya Horiguchi09789e52015-05-05 16:23:35 -07001181 if (!PageHuge(p)) {
Naoya Horiguchi415c64c2015-06-24 16:56:45 -07001182 if (!PageLRU(p))
1183 shake_page(p, 0);
1184 if (!PageLRU(p)) {
Jin Dongmingaf241a02011-02-01 15:52:41 -08001185 /*
1186 * shake_page could have turned it free.
1187 */
1188 if (is_free_buddy_page(p)) {
Wanpeng Li2d421ac2013-09-30 13:45:23 -07001189 if (flags & MF_COUNT_INCREASED)
Xie XiuQicc637b12015-06-24 16:57:30 -07001190 action_result(pfn, MF_MSG_BUDDY, MF_DELAYED);
Wanpeng Li2d421ac2013-09-30 13:45:23 -07001191 else
Xie XiuQicc637b12015-06-24 16:57:30 -07001192 action_result(pfn, MF_MSG_BUDDY_2ND,
1193 MF_DELAYED);
Jin Dongmingaf241a02011-02-01 15:52:41 -08001194 return 0;
1195 }
Andi Kleen0474a602009-12-16 12:20:00 +01001196 }
Wu Fengguange43c3af2009-09-29 13:16:20 +08001197 }
Wu Fengguange43c3af2009-09-29 13:16:20 +08001198
Jens Axboe7eaceac2011-03-10 08:52:07 +01001199 lock_page(hpage);
Wu Fengguang847ce402009-12-16 12:19:58 +01001200
1201 /*
Andi Kleenf37d4292014-08-06 16:06:49 -07001202 * The page could have changed compound pages during the locking.
1203 * If this happens just bail out.
1204 */
Naoya Horiguchi415c64c2015-06-24 16:56:45 -07001205 if (PageCompound(p) && compound_head(p) != orig_head) {
Xie XiuQicc637b12015-06-24 16:57:30 -07001206 action_result(pfn, MF_MSG_DIFFERENT_COMPOUND, MF_IGNORED);
Andi Kleenf37d4292014-08-06 16:06:49 -07001207 res = -EBUSY;
1208 goto out;
1209 }
1210
1211 /*
Naoya Horiguchi524fca12013-02-22 16:35:51 -08001212 * We use page flags to determine what action should be taken, but
1213 * the flags can be modified by the error containment action. One
1214 * example is an mlocked page, where PG_mlocked is cleared by
1215 * page_remove_rmap() in try_to_unmap_one(). So to determine page status
1216 * correctly, we save a copy of the page flags at this time.
1217 */
James Morsebfbd2442017-06-16 14:02:29 -07001218 if (PageHuge(p))
1219 page_flags = hpage->flags;
1220 else
1221 page_flags = p->flags;
Naoya Horiguchi524fca12013-02-22 16:35:51 -08001222
1223 /*
Wu Fengguang847ce402009-12-16 12:19:58 +01001224 * unpoison always clear PG_hwpoison inside page lock
1225 */
1226 if (!PageHWPoison(p)) {
Wu Fengguangd95ea512009-12-16 12:19:58 +01001227 printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
Naoya Horiguchi8e304562015-09-08 15:03:24 -07001228 num_poisoned_pages_sub(nr_pages);
Naoya Horiguchia09233f2015-08-06 15:46:58 -07001229 unlock_page(hpage);
Wanpeng Li665d9da2015-09-08 15:03:21 -07001230 put_hwpoison_page(hpage);
Naoya Horiguchia09233f2015-08-06 15:46:58 -07001231 return 0;
Wu Fengguang847ce402009-12-16 12:19:58 +01001232 }
Wu Fengguang7c116f22009-12-16 12:19:59 +01001233 if (hwpoison_filter(p)) {
1234 if (TestClearPageHWPoison(p))
Naoya Horiguchi8e304562015-09-08 15:03:24 -07001235 num_poisoned_pages_sub(nr_pages);
Naoya Horiguchi7af446a2010-05-28 09:29:17 +09001236 unlock_page(hpage);
Wanpeng Li665d9da2015-09-08 15:03:21 -07001237 put_hwpoison_page(hpage);
Wu Fengguang7c116f22009-12-16 12:19:59 +01001238 return 0;
1239 }
Wu Fengguang847ce402009-12-16 12:19:58 +01001240
Chen Yucong0bc1f8b2014-07-02 15:22:37 -07001241 if (!PageHuge(p) && !PageTransTail(p) && !PageLRU(p))
1242 goto identify_page_state;
1243
Naoya Horiguchi7013feb2010-05-28 09:29:18 +09001244 /*
1245 * For error on the tail page, we should set PG_hwpoison
1246 * on the head page to show that the hugepage is hwpoisoned
1247 */
Jin Dongminga6d30dd2011-02-01 15:52:40 -08001248 if (PageHuge(p) && PageTail(p) && TestSetPageHWPoison(hpage)) {
Xie XiuQicc637b12015-06-24 16:57:30 -07001249 action_result(pfn, MF_MSG_POISONED_HUGE, MF_IGNORED);
Naoya Horiguchi7013feb2010-05-28 09:29:18 +09001250 unlock_page(hpage);
Wanpeng Li665d9da2015-09-08 15:03:21 -07001251 put_hwpoison_page(hpage);
Naoya Horiguchi7013feb2010-05-28 09:29:18 +09001252 return 0;
1253 }
1254 /*
1255 * Set PG_hwpoison on all pages in an error hugepage,
1256 * because containment is done in hugepage unit for now.
1257 * Since we have done TestSetPageHWPoison() for the head page with
1258 * page lock held, we can safely set PG_hwpoison bits on tail pages.
1259 */
1260 if (PageHuge(p))
1261 set_page_hwpoison_huge_page(hpage);
1262
Naoya Horiguchi6edd6cc2014-06-04 16:10:35 -07001263 /*
1264 * It's very difficult to mess with pages currently under IO
1265 * and in many cases impossible, so we just avoid it here.
1266 */
Andi Kleen6a460792009-09-16 11:50:15 +02001267 wait_on_page_writeback(p);
1268
1269 /*
1270 * Now take care of user space mappings.
Minchan Kime64a7822011-03-22 16:32:44 -07001271 * Abort on fail: __delete_from_page_cache() assumes unmapped page.
Naoya Horiguchi54b9dd12014-01-23 15:53:14 -08001272 *
1273 * When the raw error page is thp tail page, hpage points to the raw
1274 * page after thp split.
Andi Kleen6a460792009-09-16 11:50:15 +02001275 */
Naoya Horiguchi54b9dd12014-01-23 15:53:14 -08001276 if (hwpoison_user_mappings(p, pfn, trapno, flags, &hpage)
1277 != SWAP_SUCCESS) {
Xie XiuQicc637b12015-06-24 16:57:30 -07001278 action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
Wu Fengguang1668bfd2009-12-16 12:19:58 +01001279 res = -EBUSY;
1280 goto out;
1281 }
Andi Kleen6a460792009-09-16 11:50:15 +02001282
1283 /*
1284 * Torn down by someone else?
1285 */
Wu Fengguangdc2a1cb2009-12-16 12:19:58 +01001286 if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) {
Xie XiuQicc637b12015-06-24 16:57:30 -07001287 action_result(pfn, MF_MSG_TRUNCATED_LRU, MF_IGNORED);
Wu Fengguangd95ea512009-12-16 12:19:58 +01001288 res = -EBUSY;
Andi Kleen6a460792009-09-16 11:50:15 +02001289 goto out;
1290 }
1291
Chen Yucong0bc1f8b2014-07-02 15:22:37 -07001292identify_page_state:
Andi Kleen6a460792009-09-16 11:50:15 +02001293 res = -EBUSY;
Naoya Horiguchi524fca12013-02-22 16:35:51 -08001294 /*
1295 * The first check uses the current page flags which may not have any
1296 * relevant information. The second check with the saved page flagss is
1297 * carried out only if the first check can't determine the page status.
1298 */
1299 for (ps = error_states;; ps++)
1300 if ((p->flags & ps->mask) == ps->res)
Andi Kleen6a460792009-09-16 11:50:15 +02001301 break;
Wanpeng Li841fcc52013-09-11 14:22:50 -07001302
1303 page_flags |= (p->flags & (1UL << PG_dirty));
1304
Naoya Horiguchi524fca12013-02-22 16:35:51 -08001305 if (!ps->mask)
1306 for (ps = error_states;; ps++)
1307 if ((page_flags & ps->mask) == ps->res)
1308 break;
1309 res = page_action(ps, p, pfn);
Andi Kleen6a460792009-09-16 11:50:15 +02001310out:
Naoya Horiguchi7af446a2010-05-28 09:29:17 +09001311 unlock_page(hpage);
Andi Kleen6a460792009-09-16 11:50:15 +02001312 return res;
1313}
Tony Luckcd42f4a2011-12-15 10:48:12 -08001314EXPORT_SYMBOL_GPL(memory_failure);
Wu Fengguang847ce402009-12-16 12:19:58 +01001315
Huang Yingea8f5fb2011-07-13 13:14:27 +08001316#define MEMORY_FAILURE_FIFO_ORDER 4
1317#define MEMORY_FAILURE_FIFO_SIZE (1 << MEMORY_FAILURE_FIFO_ORDER)
1318
1319struct memory_failure_entry {
1320 unsigned long pfn;
1321 int trapno;
1322 int flags;
1323};
1324
1325struct memory_failure_cpu {
1326 DECLARE_KFIFO(fifo, struct memory_failure_entry,
1327 MEMORY_FAILURE_FIFO_SIZE);
1328 spinlock_t lock;
1329 struct work_struct work;
1330};
1331
1332static DEFINE_PER_CPU(struct memory_failure_cpu, memory_failure_cpu);
1333
1334/**
1335 * memory_failure_queue - Schedule handling memory failure of a page.
1336 * @pfn: Page Number of the corrupted page
1337 * @trapno: Trap number reported in the signal to user space.
1338 * @flags: Flags for memory failure handling
1339 *
1340 * This function is called by the low level hardware error handler
1341 * when it detects hardware memory corruption of a page. It schedules
1342 * the recovering of error page, including dropping pages, killing
1343 * processes etc.
1344 *
1345 * The function is primarily of use for corruptions that
1346 * happen outside the current execution context (e.g. when
1347 * detected by a background scrubber)
1348 *
1349 * Can run in IRQ context.
1350 */
1351void memory_failure_queue(unsigned long pfn, int trapno, int flags)
1352{
1353 struct memory_failure_cpu *mf_cpu;
1354 unsigned long proc_flags;
1355 struct memory_failure_entry entry = {
1356 .pfn = pfn,
1357 .trapno = trapno,
1358 .flags = flags,
1359 };
1360
1361 mf_cpu = &get_cpu_var(memory_failure_cpu);
1362 spin_lock_irqsave(&mf_cpu->lock, proc_flags);
Stefani Seibold498d3192013-11-14 14:32:17 -08001363 if (kfifo_put(&mf_cpu->fifo, entry))
Huang Yingea8f5fb2011-07-13 13:14:27 +08001364 schedule_work_on(smp_processor_id(), &mf_cpu->work);
1365 else
Joe Perches8e33a522013-07-25 11:53:25 -07001366 pr_err("Memory failure: buffer overflow when queuing memory failure at %#lx\n",
Huang Yingea8f5fb2011-07-13 13:14:27 +08001367 pfn);
1368 spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
1369 put_cpu_var(memory_failure_cpu);
1370}
1371EXPORT_SYMBOL_GPL(memory_failure_queue);
1372
1373static void memory_failure_work_func(struct work_struct *work)
1374{
1375 struct memory_failure_cpu *mf_cpu;
1376 struct memory_failure_entry entry = { 0, };
1377 unsigned long proc_flags;
1378 int gotten;
1379
Christoph Lameter7c8e0182014-06-04 16:07:56 -07001380 mf_cpu = this_cpu_ptr(&memory_failure_cpu);
Huang Yingea8f5fb2011-07-13 13:14:27 +08001381 for (;;) {
1382 spin_lock_irqsave(&mf_cpu->lock, proc_flags);
1383 gotten = kfifo_get(&mf_cpu->fifo, &entry);
1384 spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
1385 if (!gotten)
1386 break;
Naveen N. Raocf870c72013-07-10 14:57:01 +05301387 if (entry.flags & MF_SOFT_OFFLINE)
1388 soft_offline_page(pfn_to_page(entry.pfn), entry.flags);
1389 else
1390 memory_failure(entry.pfn, entry.trapno, entry.flags);
Huang Yingea8f5fb2011-07-13 13:14:27 +08001391 }
1392}
1393
1394static int __init memory_failure_init(void)
1395{
1396 struct memory_failure_cpu *mf_cpu;
1397 int cpu;
1398
1399 for_each_possible_cpu(cpu) {
1400 mf_cpu = &per_cpu(memory_failure_cpu, cpu);
1401 spin_lock_init(&mf_cpu->lock);
1402 INIT_KFIFO(mf_cpu->fifo);
1403 INIT_WORK(&mf_cpu->work, memory_failure_work_func);
1404 }
1405
1406 return 0;
1407}
1408core_initcall(memory_failure_init);
1409
Naoya Horiguchia5f65102015-11-05 18:47:26 -08001410#define unpoison_pr_info(fmt, pfn, rs) \
1411({ \
1412 if (__ratelimit(rs)) \
1413 pr_info(fmt, pfn); \
1414})
1415
Wu Fengguang847ce402009-12-16 12:19:58 +01001416/**
1417 * unpoison_memory - Unpoison a previously poisoned page
1418 * @pfn: Page number of the to be unpoisoned page
1419 *
1420 * Software-unpoison a page that has been poisoned by
1421 * memory_failure() earlier.
1422 *
1423 * This is only done on the software-level, so it only works
1424 * for linux injected failures, not real hardware failures
1425 *
1426 * Returns 0 for success, otherwise -errno.
1427 */
1428int unpoison_memory(unsigned long pfn)
1429{
1430 struct page *page;
1431 struct page *p;
1432 int freeit = 0;
Naoya Horiguchic9fbdd52010-05-28 09:29:19 +09001433 unsigned int nr_pages;
Naoya Horiguchia5f65102015-11-05 18:47:26 -08001434 static DEFINE_RATELIMIT_STATE(unpoison_rs, DEFAULT_RATELIMIT_INTERVAL,
1435 DEFAULT_RATELIMIT_BURST);
Wu Fengguang847ce402009-12-16 12:19:58 +01001436
1437 if (!pfn_valid(pfn))
1438 return -ENXIO;
1439
1440 p = pfn_to_page(pfn);
1441 page = compound_head(p);
1442
1443 if (!PageHWPoison(p)) {
Naoya Horiguchia5f65102015-11-05 18:47:26 -08001444 unpoison_pr_info("MCE: Page was already unpoisoned %#lx\n",
1445 pfn, &unpoison_rs);
Wu Fengguang847ce402009-12-16 12:19:58 +01001446 return 0;
1447 }
1448
Naoya Horiguchi230ac712015-09-08 15:03:29 -07001449 if (page_count(page) > 1) {
Naoya Horiguchia5f65102015-11-05 18:47:26 -08001450 unpoison_pr_info("MCE: Someone grabs the hwpoison page %#lx\n",
1451 pfn, &unpoison_rs);
Naoya Horiguchi230ac712015-09-08 15:03:29 -07001452 return 0;
1453 }
1454
1455 if (page_mapped(page)) {
Naoya Horiguchia5f65102015-11-05 18:47:26 -08001456 unpoison_pr_info("MCE: Someone maps the hwpoison page %#lx\n",
1457 pfn, &unpoison_rs);
Naoya Horiguchi230ac712015-09-08 15:03:29 -07001458 return 0;
1459 }
1460
1461 if (page_mapping(page)) {
Naoya Horiguchia5f65102015-11-05 18:47:26 -08001462 unpoison_pr_info("MCE: the hwpoison page has non-NULL mapping %#lx\n",
1463 pfn, &unpoison_rs);
Naoya Horiguchi230ac712015-09-08 15:03:29 -07001464 return 0;
1465 }
1466
Wanpeng Li0cea3fd2013-09-11 14:22:53 -07001467 /*
1468 * unpoison_memory() can encounter thp only when the thp is being
1469 * worked by memory_failure() and the page lock is not held yet.
1470 * In such case, we yield to memory_failure() and make unpoison fail.
1471 */
Wanpeng Lie76d30e2013-09-30 13:45:22 -07001472 if (!PageHuge(page) && PageTransHuge(page)) {
Naoya Horiguchia5f65102015-11-05 18:47:26 -08001473 unpoison_pr_info("MCE: Memory failure is now running on %#lx\n",
1474 pfn, &unpoison_rs);
Naoya Horiguchiead07f62015-06-24 16:56:48 -07001475 return 0;
Wanpeng Li0cea3fd2013-09-11 14:22:53 -07001476 }
1477
Wanpeng Lif9121152013-09-11 14:22:52 -07001478 nr_pages = 1 << compound_order(page);
Naoya Horiguchic9fbdd52010-05-28 09:29:19 +09001479
Naoya Horiguchiead07f62015-06-24 16:56:48 -07001480 if (!get_hwpoison_page(p)) {
Naoya Horiguchi8c6c2ec2010-09-08 10:19:38 +09001481 /*
1482 * Since HWPoisoned hugepage should have non-zero refcount,
1483 * race between memory failure and unpoison seems to happen.
1484 * In such case unpoison fails and memory failure runs
1485 * to the end.
1486 */
1487 if (PageHuge(page)) {
Naoya Horiguchia5f65102015-11-05 18:47:26 -08001488 unpoison_pr_info("MCE: Memory failure is now running on free hugepage %#lx\n",
1489 pfn, &unpoison_rs);
Naoya Horiguchi8c6c2ec2010-09-08 10:19:38 +09001490 return 0;
1491 }
Wu Fengguang847ce402009-12-16 12:19:58 +01001492 if (TestClearPageHWPoison(p))
Naoya Horiguchi8e304562015-09-08 15:03:24 -07001493 num_poisoned_pages_dec();
Naoya Horiguchia5f65102015-11-05 18:47:26 -08001494 unpoison_pr_info("MCE: Software-unpoisoned free page %#lx\n",
1495 pfn, &unpoison_rs);
Wu Fengguang847ce402009-12-16 12:19:58 +01001496 return 0;
1497 }
1498
Jens Axboe7eaceac2011-03-10 08:52:07 +01001499 lock_page(page);
Wu Fengguang847ce402009-12-16 12:19:58 +01001500 /*
1501 * This test is racy because PG_hwpoison is set outside of page lock.
1502 * That's acceptable because that won't trigger kernel panic. Instead,
1503 * the PG_hwpoison page will be caught and isolated on the entrance to
1504 * the free buddy page pool.
1505 */
Naoya Horiguchic9fbdd52010-05-28 09:29:19 +09001506 if (TestClearPageHWPoison(page)) {
Naoya Horiguchia5f65102015-11-05 18:47:26 -08001507 unpoison_pr_info("MCE: Software-unpoisoned page %#lx\n",
1508 pfn, &unpoison_rs);
Naoya Horiguchi8e304562015-09-08 15:03:24 -07001509 num_poisoned_pages_sub(nr_pages);
Wu Fengguang847ce402009-12-16 12:19:58 +01001510 freeit = 1;
Naoya Horiguchi6a901812010-09-08 10:19:40 +09001511 if (PageHuge(page))
1512 clear_page_hwpoison_huge_page(page);
Wu Fengguang847ce402009-12-16 12:19:58 +01001513 }
1514 unlock_page(page);
1515
Wanpeng Li665d9da2015-09-08 15:03:21 -07001516 put_hwpoison_page(page);
Wanpeng Li3ba5eeb2013-09-11 14:23:01 -07001517 if (freeit && !(pfn == my_zero_pfn(0) && page_count(p) == 1))
Wanpeng Li665d9da2015-09-08 15:03:21 -07001518 put_hwpoison_page(page);
Wu Fengguang847ce402009-12-16 12:19:58 +01001519
1520 return 0;
1521}
1522EXPORT_SYMBOL(unpoison_memory);
Andi Kleenfacb6012009-12-16 12:20:00 +01001523
1524static struct page *new_page(struct page *p, unsigned long private, int **x)
1525{
Andi Kleen12686d12009-12-16 12:20:01 +01001526 int nid = page_to_nid(p);
Naoya Horiguchid950b952010-09-08 10:19:39 +09001527 if (PageHuge(p))
1528 return alloc_huge_page_node(page_hstate(compound_head(p)),
1529 nid);
1530 else
Vlastimil Babka96db8002015-09-08 15:03:50 -07001531 return __alloc_pages_node(nid, GFP_HIGHUSER_MOVABLE, 0);
Andi Kleenfacb6012009-12-16 12:20:00 +01001532}
1533
1534/*
1535 * Safely get reference count of an arbitrary page.
1536 * Returns 0 for a free page, -EIO for a zero refcount page
1537 * that is not free, and 1 for any other page type.
1538 * For 1 the page is returned with increased page count, otherwise not.
1539 */
Naoya Horiguchiaf8fae72013-02-22 16:34:03 -08001540static int __get_any_page(struct page *p, unsigned long pfn, int flags)
Andi Kleenfacb6012009-12-16 12:20:00 +01001541{
1542 int ret;
1543
1544 if (flags & MF_COUNT_INCREASED)
1545 return 1;
1546
1547 /*
Naoya Horiguchid950b952010-09-08 10:19:39 +09001548 * When the target page is a free hugepage, just remove it
1549 * from free hugepage list.
1550 */
Naoya Horiguchiead07f62015-06-24 16:56:48 -07001551 if (!get_hwpoison_page(p)) {
Naoya Horiguchid950b952010-09-08 10:19:39 +09001552 if (PageHuge(p)) {
Borislav Petkov71dd0b82012-05-29 15:06:16 -07001553 pr_info("%s: %#lx free huge page\n", __func__, pfn);
Naoya Horiguchiaf8fae72013-02-22 16:34:03 -08001554 ret = 0;
Naoya Horiguchid950b952010-09-08 10:19:39 +09001555 } else if (is_free_buddy_page(p)) {
Borislav Petkov71dd0b82012-05-29 15:06:16 -07001556 pr_info("%s: %#lx free buddy page\n", __func__, pfn);
Andi Kleenfacb6012009-12-16 12:20:00 +01001557 ret = 0;
1558 } else {
Borislav Petkov71dd0b82012-05-29 15:06:16 -07001559 pr_info("%s: %#lx: unknown zero refcount page type %lx\n",
1560 __func__, pfn, p->flags);
Andi Kleenfacb6012009-12-16 12:20:00 +01001561 ret = -EIO;
1562 }
1563 } else {
1564 /* Not a free page */
1565 ret = 1;
1566 }
Andi Kleenfacb6012009-12-16 12:20:00 +01001567 return ret;
1568}
1569
Naoya Horiguchiaf8fae72013-02-22 16:34:03 -08001570static int get_any_page(struct page *page, unsigned long pfn, int flags)
1571{
1572 int ret = __get_any_page(page, pfn, flags);
1573
1574 if (ret == 1 && !PageHuge(page) && !PageLRU(page)) {
1575 /*
1576 * Try to free it.
1577 */
Wanpeng Li665d9da2015-09-08 15:03:21 -07001578 put_hwpoison_page(page);
Naoya Horiguchiaf8fae72013-02-22 16:34:03 -08001579 shake_page(page, 1);
1580
1581 /*
1582 * Did it turn free?
1583 */
1584 ret = __get_any_page(page, pfn, 0);
Naoya Horiguchibd559132016-01-15 16:54:03 -08001585 if (ret == 1 && !PageLRU(page)) {
Wanpeng Li4f32be62015-08-14 15:34:56 -07001586 /* Drop page reference which is from __get_any_page() */
Wanpeng Li665d9da2015-09-08 15:03:21 -07001587 put_hwpoison_page(page);
Naoya Horiguchiaf8fae72013-02-22 16:34:03 -08001588 pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n",
1589 pfn, page->flags);
1590 return -EIO;
1591 }
1592 }
1593 return ret;
1594}
1595
Naoya Horiguchid950b952010-09-08 10:19:39 +09001596static int soft_offline_huge_page(struct page *page, int flags)
1597{
1598 int ret;
1599 unsigned long pfn = page_to_pfn(page);
1600 struct page *hpage = compound_head(page);
Naoya Horiguchib8ec1ce2013-09-11 14:22:01 -07001601 LIST_HEAD(pagelist);
Naoya Horiguchid950b952010-09-08 10:19:39 +09001602
Naoya Horiguchiaf8fae72013-02-22 16:34:03 -08001603 /*
1604 * This double-check of PageHWPoison is to avoid the race with
1605 * memory_failure(). See also comment in __soft_offline_page().
1606 */
1607 lock_page(hpage);
Xishi Qiu0ebff322013-02-22 16:33:59 -08001608 if (PageHWPoison(hpage)) {
Naoya Horiguchiaf8fae72013-02-22 16:34:03 -08001609 unlock_page(hpage);
Wanpeng Li665d9da2015-09-08 15:03:21 -07001610 put_hwpoison_page(hpage);
Xishi Qiu0ebff322013-02-22 16:33:59 -08001611 pr_info("soft offline: %#lx hugepage already poisoned\n", pfn);
Naoya Horiguchiaf8fae72013-02-22 16:34:03 -08001612 return -EBUSY;
Xishi Qiu0ebff322013-02-22 16:33:59 -08001613 }
Naoya Horiguchiaf8fae72013-02-22 16:34:03 -08001614 unlock_page(hpage);
Naoya Horiguchid950b952010-09-08 10:19:39 +09001615
Naoya Horiguchibcc54222015-04-15 16:14:38 -07001616 ret = isolate_huge_page(hpage, &pagelist);
Wanpeng Li03613802015-08-14 15:34:59 -07001617 /*
1618 * get_any_page() and isolate_huge_page() takes a refcount each,
1619 * so need to drop one here.
1620 */
Wanpeng Li665d9da2015-09-08 15:03:21 -07001621 put_hwpoison_page(hpage);
Wanpeng Li03613802015-08-14 15:34:59 -07001622 if (!ret) {
Naoya Horiguchibcc54222015-04-15 16:14:38 -07001623 pr_info("soft offline: %#lx hugepage failed to isolate\n", pfn);
1624 return -EBUSY;
1625 }
1626
David Rientjes68711a72014-06-04 16:08:25 -07001627 ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL,
Naoya Horiguchib8ec1ce2013-09-11 14:22:01 -07001628 MIGRATE_SYNC, MR_MEMORY_FAILURE);
Naoya Horiguchid950b952010-09-08 10:19:39 +09001629 if (ret) {
Dean Nelsondd73e852011-10-31 17:09:04 -07001630 pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
1631 pfn, ret, page->flags);
Punit Agrawal7e13bab2017-06-02 14:46:40 -07001632 if (!list_empty(&pagelist))
1633 putback_movable_pages(&pagelist);
Naoya Horiguchib8ec1ce2013-09-11 14:22:01 -07001634 if (ret > 0)
1635 ret = -EIO;
Naoya Horiguchiaf8fae72013-02-22 16:34:03 -08001636 } else {
Jianguo Wua49ecbc2013-12-18 17:08:54 -08001637 /* overcommit hugetlb page will be freed to buddy */
1638 if (PageHuge(page)) {
1639 set_page_hwpoison_huge_page(hpage);
1640 dequeue_hwpoisoned_huge_page(hpage);
Naoya Horiguchi8e304562015-09-08 15:03:24 -07001641 num_poisoned_pages_add(1 << compound_order(hpage));
Jianguo Wua49ecbc2013-12-18 17:08:54 -08001642 } else {
1643 SetPageHWPoison(page);
Naoya Horiguchi8e304562015-09-08 15:03:24 -07001644 num_poisoned_pages_inc();
Jianguo Wua49ecbc2013-12-18 17:08:54 -08001645 }
Naoya Horiguchid950b952010-09-08 10:19:39 +09001646 }
Naoya Horiguchid950b952010-09-08 10:19:39 +09001647 return ret;
1648}
1649
Naoya Horiguchiaf8fae72013-02-22 16:34:03 -08001650static int __soft_offline_page(struct page *page, int flags)
1651{
1652 int ret;
1653 unsigned long pfn = page_to_pfn(page);
Andi Kleenfacb6012009-12-16 12:20:00 +01001654
1655 /*
Naoya Horiguchiaf8fae72013-02-22 16:34:03 -08001656 * Check PageHWPoison again inside page lock because PageHWPoison
1657 * is set by memory_failure() outside page lock. Note that
1658 * memory_failure() also double-checks PageHWPoison inside page lock,
1659 * so there's no race between soft_offline_page() and memory_failure().
Andi Kleenfacb6012009-12-16 12:20:00 +01001660 */
Xishi Qiu0ebff322013-02-22 16:33:59 -08001661 lock_page(page);
1662 wait_on_page_writeback(page);
Naoya Horiguchiaf8fae72013-02-22 16:34:03 -08001663 if (PageHWPoison(page)) {
1664 unlock_page(page);
Wanpeng Li665d9da2015-09-08 15:03:21 -07001665 put_hwpoison_page(page);
Naoya Horiguchiaf8fae72013-02-22 16:34:03 -08001666 pr_info("soft offline: %#lx page already poisoned\n", pfn);
1667 return -EBUSY;
1668 }
Andi Kleenfacb6012009-12-16 12:20:00 +01001669 /*
1670 * Try to invalidate first. This should work for
1671 * non dirty unmapped page cache pages.
1672 */
1673 ret = invalidate_inode_page(page);
1674 unlock_page(page);
Andi Kleenfacb6012009-12-16 12:20:00 +01001675 /*
Andi Kleenfacb6012009-12-16 12:20:00 +01001676 * RED-PEN would be better to keep it isolated here, but we
1677 * would need to fix isolation locking first.
1678 */
Andi Kleenfacb6012009-12-16 12:20:00 +01001679 if (ret == 1) {
Wanpeng Li665d9da2015-09-08 15:03:21 -07001680 put_hwpoison_page(page);
Andi Kleenfb46e732010-09-27 23:31:30 +02001681 pr_info("soft_offline: %#lx: invalidated\n", pfn);
Naoya Horiguchiaf8fae72013-02-22 16:34:03 -08001682 SetPageHWPoison(page);
Naoya Horiguchi8e304562015-09-08 15:03:24 -07001683 num_poisoned_pages_inc();
Naoya Horiguchiaf8fae72013-02-22 16:34:03 -08001684 return 0;
Andi Kleenfacb6012009-12-16 12:20:00 +01001685 }
1686
1687 /*
1688 * Simple invalidation didn't work.
1689 * Try to migrate to a new page instead. migrate.c
1690 * handles a large number of cases for us.
1691 */
1692 ret = isolate_lru_page(page);
Konstantin Khlebnikovbd486282011-05-24 17:12:20 -07001693 /*
1694 * Drop page reference which is came from get_any_page()
1695 * successful isolate_lru_page() already took another one.
1696 */
Wanpeng Li665d9da2015-09-08 15:03:21 -07001697 put_hwpoison_page(page);
Andi Kleenfacb6012009-12-16 12:20:00 +01001698 if (!ret) {
1699 LIST_HEAD(pagelist);
Minchan Kim5db8a732011-06-15 15:08:48 -07001700 inc_zone_page_state(page, NR_ISOLATED_ANON +
Hugh Dickins9c620e22013-02-22 16:35:14 -08001701 page_is_file_cache(page));
Andi Kleenfacb6012009-12-16 12:20:00 +01001702 list_add(&page->lru, &pagelist);
David Rientjes68711a72014-06-04 16:08:25 -07001703 ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL,
Hugh Dickins9c620e22013-02-22 16:35:14 -08001704 MIGRATE_SYNC, MR_MEMORY_FAILURE);
Andi Kleenfacb6012009-12-16 12:20:00 +01001705 if (ret) {
Joonsoo Kim59c82b72014-01-21 15:51:17 -08001706 if (!list_empty(&pagelist)) {
1707 list_del(&page->lru);
1708 dec_zone_page_state(page, NR_ISOLATED_ANON +
1709 page_is_file_cache(page));
1710 putback_lru_page(page);
1711 }
1712
Andi Kleenfb46e732010-09-27 23:31:30 +02001713 pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
Andi Kleenfacb6012009-12-16 12:20:00 +01001714 pfn, ret, page->flags);
1715 if (ret > 0)
1716 ret = -EIO;
1717 }
1718 } else {
Andi Kleenfb46e732010-09-27 23:31:30 +02001719 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
Dean Nelsondd73e852011-10-31 17:09:04 -07001720 pfn, ret, page_count(page), page->flags);
Andi Kleenfacb6012009-12-16 12:20:00 +01001721 }
Andi Kleenfacb6012009-12-16 12:20:00 +01001722 return ret;
1723}
Wanpeng Li86e05772013-09-11 14:22:56 -07001724
1725/**
1726 * soft_offline_page - Soft offline a page.
1727 * @page: page to offline
1728 * @flags: flags. Same as memory_failure().
1729 *
1730 * Returns 0 on success, otherwise negated errno.
1731 *
1732 * Soft offline a page, by migration or invalidation,
1733 * without killing anything. This is for the case when
1734 * a page is not corrupted yet (so it's still valid to access),
1735 * but has had a number of corrected errors and is better taken
1736 * out.
1737 *
1738 * The actual policy on when to do that is maintained by
1739 * user space.
1740 *
1741 * This should never impact any application or cause data loss,
1742 * however it might take some time.
1743 *
1744 * This is not a 100% solution for all memory, but tries to be
1745 * ``good enough'' for the majority of memory.
1746 */
1747int soft_offline_page(struct page *page, int flags)
1748{
1749 int ret;
1750 unsigned long pfn = page_to_pfn(page);
David Rientjes668f9abb2014-03-03 15:38:18 -08001751 struct page *hpage = compound_head(page);
Wanpeng Li86e05772013-09-11 14:22:56 -07001752
1753 if (PageHWPoison(page)) {
1754 pr_info("soft offline: %#lx page already poisoned\n", pfn);
Wanpeng Li1e0e6352015-09-08 15:03:13 -07001755 if (flags & MF_COUNT_INCREASED)
Wanpeng Li665d9da2015-09-08 15:03:21 -07001756 put_hwpoison_page(page);
Wanpeng Li86e05772013-09-11 14:22:56 -07001757 return -EBUSY;
1758 }
1759 if (!PageHuge(page) && PageTransHuge(hpage)) {
1760 if (PageAnon(hpage) && unlikely(split_huge_page(hpage))) {
1761 pr_info("soft offline: %#lx: failed to split THP\n",
1762 pfn);
Wanpeng Li7d1900c2015-09-08 15:03:10 -07001763 if (flags & MF_COUNT_INCREASED)
Wanpeng Li665d9da2015-09-08 15:03:21 -07001764 put_hwpoison_page(page);
Wanpeng Li86e05772013-09-11 14:22:56 -07001765 return -EBUSY;
1766 }
1767 }
1768
Vladimir Davydovbfc8c902014-06-04 16:07:18 -07001769 get_online_mems();
Naoya Horiguchi03b61ff2013-11-12 15:07:26 -08001770
Wanpeng Li86e05772013-09-11 14:22:56 -07001771 ret = get_any_page(page, pfn, flags);
Vladimir Davydovbfc8c902014-06-04 16:07:18 -07001772 put_online_mems();
Naoya Horiguchi03b61ff2013-11-12 15:07:26 -08001773 if (ret > 0) { /* for in-use pages */
Wanpeng Li86e05772013-09-11 14:22:56 -07001774 if (PageHuge(page))
1775 ret = soft_offline_huge_page(page, flags);
1776 else
1777 ret = __soft_offline_page(page, flags);
Naoya Horiguchi03b61ff2013-11-12 15:07:26 -08001778 } else if (ret == 0) { /* for free pages */
Wanpeng Li86e05772013-09-11 14:22:56 -07001779 if (PageHuge(page)) {
1780 set_page_hwpoison_huge_page(hpage);
Naoya Horiguchi602498f2015-05-05 16:23:46 -07001781 if (!dequeue_hwpoisoned_huge_page(hpage))
Naoya Horiguchi8e304562015-09-08 15:03:24 -07001782 num_poisoned_pages_add(1 << compound_order(hpage));
Wanpeng Li86e05772013-09-11 14:22:56 -07001783 } else {
Naoya Horiguchi602498f2015-05-05 16:23:46 -07001784 if (!TestSetPageHWPoison(page))
Naoya Horiguchi8e304562015-09-08 15:03:24 -07001785 num_poisoned_pages_inc();
Wanpeng Li86e05772013-09-11 14:22:56 -07001786 }
1787 }
Wanpeng Li86e05772013-09-11 14:22:56 -07001788 return ret;
1789}