blob: 877e50cf5199038e30008502f0927784529535c8 [file] [log] [blame]
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001/*
2 * Copyright (C) 2009 Red Hat, Inc.
3 *
4 * This work is licensed under the terms of the GNU GPL, version 2. See
5 * the COPYING file in the top-level directory.
6 */
7
Andrew Mortonae3a8c12014-06-04 16:06:58 -07008#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -080010#include <linux/mm.h>
11#include <linux/sched.h>
12#include <linux/highmem.h>
13#include <linux/hugetlb.h>
14#include <linux/mmu_notifier.h>
15#include <linux/rmap.h>
16#include <linux/swap.h>
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -080017#include <linux/shrinker.h>
Andrea Arcangeliba761492011-01-13 15:46:58 -080018#include <linux/mm_inline.h>
19#include <linux/kthread.h>
20#include <linux/khugepaged.h>
Andrea Arcangeli878aee72011-01-13 15:47:10 -080021#include <linux/freezer.h>
Andrea Arcangelia664b2d2011-01-13 15:47:17 -080022#include <linux/mman.h>
Ralf Baechle325adeb2012-10-15 13:44:56 +020023#include <linux/pagemap.h>
Mel Gorman4daae3b2012-11-02 11:33:45 +000024#include <linux/migrate.h>
Sasha Levin43b5fbb2013-02-22 16:32:27 -080025#include <linux/hashtable.h>
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -080026
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -080027#include <asm/tlb.h>
28#include <asm/pgalloc.h>
29#include "internal.h"
30
Andrea Arcangeliba761492011-01-13 15:46:58 -080031/*
Jianguo Wu8bfa3f92013-11-12 15:07:16 -080032 * By default transparent hugepage support is disabled in order that avoid
33 * to risk increase the memory footprint of applications without a guaranteed
34 * benefit. When transparent hugepage support is enabled, is for all mappings,
35 * and khugepaged scans all mappings.
36 * Defrag is invoked by khugepaged hugepage allocations and by page faults
37 * for all hugepage allocations.
Andrea Arcangeliba761492011-01-13 15:46:58 -080038 */
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -080039unsigned long transparent_hugepage_flags __read_mostly =
Andrea Arcangeli13ece882011-01-13 15:47:07 -080040#ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
Andrea Arcangeliba761492011-01-13 15:46:58 -080041 (1<<TRANSPARENT_HUGEPAGE_FLAG)|
Andrea Arcangeli13ece882011-01-13 15:47:07 -080042#endif
43#ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
44 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
45#endif
Andrea Arcangelid39d33c2011-01-13 15:47:05 -080046 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)|
Kirill A. Shutemov79da5402012-12-12 13:51:12 -080047 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
48 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
Andrea Arcangeliba761492011-01-13 15:46:58 -080049
50/* default scan 8*512 pte (or vmas) every 30 second */
51static unsigned int khugepaged_pages_to_scan __read_mostly = HPAGE_PMD_NR*8;
52static unsigned int khugepaged_pages_collapsed;
53static unsigned int khugepaged_full_scans;
54static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
55/* during fragmentation poll the hugepage allocator once every minute */
56static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
57static struct task_struct *khugepaged_thread __read_mostly;
58static DEFINE_MUTEX(khugepaged_mutex);
59static DEFINE_SPINLOCK(khugepaged_mm_lock);
60static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
61/*
62 * default collapse hugepages if there is at least one pte mapped like
63 * it would have happened if the vma was large enough during page
64 * fault.
65 */
66static unsigned int khugepaged_max_ptes_none __read_mostly = HPAGE_PMD_NR-1;
67
68static int khugepaged(void *none);
Andrea Arcangeliba761492011-01-13 15:46:58 -080069static int khugepaged_slab_init(void);
Kirill A. Shutemov65ebb642015-04-15 16:14:20 -070070static void khugepaged_slab_exit(void);
Andrea Arcangeliba761492011-01-13 15:46:58 -080071
Sasha Levin43b5fbb2013-02-22 16:32:27 -080072#define MM_SLOTS_HASH_BITS 10
73static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
74
Andrea Arcangeliba761492011-01-13 15:46:58 -080075static struct kmem_cache *mm_slot_cache __read_mostly;
76
77/**
78 * struct mm_slot - hash lookup from mm to mm_slot
79 * @hash: hash collision list
80 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
81 * @mm: the mm that this information is valid for
82 */
83struct mm_slot {
84 struct hlist_node hash;
85 struct list_head mm_node;
86 struct mm_struct *mm;
87};
88
89/**
90 * struct khugepaged_scan - cursor for scanning
91 * @mm_head: the head of the mm list to scan
92 * @mm_slot: the current mm_slot we are scanning
93 * @address: the next address inside that to be scanned
94 *
95 * There is only the one khugepaged_scan instance of this cursor structure.
96 */
97struct khugepaged_scan {
98 struct list_head mm_head;
99 struct mm_slot *mm_slot;
100 unsigned long address;
H Hartley Sweeten2f1da642011-10-31 17:09:25 -0700101};
102static struct khugepaged_scan khugepaged_scan = {
Andrea Arcangeliba761492011-01-13 15:46:58 -0800103 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
104};
105
Andrea Arcangelif0005652011-01-13 15:47:04 -0800106
107static int set_recommended_min_free_kbytes(void)
108{
109 struct zone *zone;
110 int nr_zones = 0;
111 unsigned long recommended_min;
Andrea Arcangelif0005652011-01-13 15:47:04 -0800112
Andrea Arcangelif0005652011-01-13 15:47:04 -0800113 for_each_populated_zone(zone)
114 nr_zones++;
115
116 /* Make sure at least 2 hugepages are free for MIGRATE_RESERVE */
117 recommended_min = pageblock_nr_pages * nr_zones * 2;
118
119 /*
120 * Make sure that on average at least two pageblocks are almost free
121 * of another type, one for a migratetype to fall back to and a
122 * second to avoid subsequent fallbacks of other types There are 3
123 * MIGRATE_TYPES we care about.
124 */
125 recommended_min += pageblock_nr_pages * nr_zones *
126 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
127
128 /* don't ever allow to reserve more than 5% of the lowmem */
129 recommended_min = min(recommended_min,
130 (unsigned long) nr_free_buffer_pages() / 20);
131 recommended_min <<= (PAGE_SHIFT-10);
132
Han Pingtian42aa83c2014-01-23 15:53:28 -0800133 if (recommended_min > min_free_kbytes) {
134 if (user_min_free_kbytes >= 0)
135 pr_info("raising min_free_kbytes from %d to %lu "
136 "to help transparent hugepage allocations\n",
137 min_free_kbytes, recommended_min);
138
Andrea Arcangelif0005652011-01-13 15:47:04 -0800139 min_free_kbytes = recommended_min;
Han Pingtian42aa83c2014-01-23 15:53:28 -0800140 }
Andrea Arcangelif0005652011-01-13 15:47:04 -0800141 setup_per_zone_wmarks();
142 return 0;
143}
Andrea Arcangelif0005652011-01-13 15:47:04 -0800144
Kirill A. Shutemov79553da2015-04-15 16:14:56 -0700145static int start_stop_khugepaged(void)
Andrea Arcangeliba761492011-01-13 15:46:58 -0800146{
147 int err = 0;
148 if (khugepaged_enabled()) {
Andrea Arcangeliba761492011-01-13 15:46:58 -0800149 if (!khugepaged_thread)
150 khugepaged_thread = kthread_run(khugepaged, NULL,
151 "khugepaged");
152 if (unlikely(IS_ERR(khugepaged_thread))) {
Andrew Mortonae3a8c12014-06-04 16:06:58 -0700153 pr_err("khugepaged: kthread_run(khugepaged) failed\n");
Andrea Arcangeliba761492011-01-13 15:46:58 -0800154 err = PTR_ERR(khugepaged_thread);
155 khugepaged_thread = NULL;
Kirill A. Shutemov79553da2015-04-15 16:14:56 -0700156 goto fail;
Andrea Arcangeliba761492011-01-13 15:46:58 -0800157 }
Xiao Guangrong911891a2012-10-08 16:29:41 -0700158
159 if (!list_empty(&khugepaged_scan.mm_head))
Andrea Arcangeliba761492011-01-13 15:46:58 -0800160 wake_up_interruptible(&khugepaged_wait);
Andrea Arcangelif0005652011-01-13 15:47:04 -0800161
162 set_recommended_min_free_kbytes();
Xiao Guangrong911891a2012-10-08 16:29:41 -0700163 } else if (khugepaged_thread) {
Xiao Guangrong911891a2012-10-08 16:29:41 -0700164 kthread_stop(khugepaged_thread);
165 khugepaged_thread = NULL;
166 }
Kirill A. Shutemov79553da2015-04-15 16:14:56 -0700167fail:
Andrea Arcangeliba761492011-01-13 15:46:58 -0800168 return err;
169}
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800170
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -0800171static atomic_t huge_zero_refcount;
Wang, Yalin56873f42015-02-11 15:24:51 -0800172struct page *huge_zero_page __read_mostly;
Kirill A. Shutemov4a6c1292012-12-12 13:50:47 -0800173
Matthew Wilcox0cc6c2d2015-09-08 14:58:51 -0700174struct page *get_huge_zero_page(void)
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -0800175{
176 struct page *zero_page;
177retry:
178 if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
Jason Low4db0c3c2015-04-15 16:14:08 -0700179 return READ_ONCE(huge_zero_page);
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -0800180
181 zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
182 HPAGE_PMD_ORDER);
Kirill A. Shutemovd8a8e1f2012-12-12 13:51:09 -0800183 if (!zero_page) {
184 count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
Kirill A. Shutemov5918d102013-04-29 15:08:44 -0700185 return NULL;
Kirill A. Shutemovd8a8e1f2012-12-12 13:51:09 -0800186 }
187 count_vm_event(THP_ZERO_PAGE_ALLOC);
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -0800188 preempt_disable();
Kirill A. Shutemov5918d102013-04-29 15:08:44 -0700189 if (cmpxchg(&huge_zero_page, NULL, zero_page)) {
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -0800190 preempt_enable();
Yu Zhao5ddacbe2014-10-29 14:50:26 -0700191 __free_pages(zero_page, compound_order(zero_page));
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -0800192 goto retry;
193 }
194
195 /* We take additional reference here. It will be put back by shrinker */
196 atomic_set(&huge_zero_refcount, 2);
197 preempt_enable();
Jason Low4db0c3c2015-04-15 16:14:08 -0700198 return READ_ONCE(huge_zero_page);
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -0800199}
200
201static void put_huge_zero_page(void)
202{
203 /*
204 * Counter should never go to zero here. Only shrinker can put
205 * last reference.
206 */
207 BUG_ON(atomic_dec_and_test(&huge_zero_refcount));
208}
209
Glauber Costa48896462013-08-28 10:18:15 +1000210static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink,
211 struct shrink_control *sc)
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -0800212{
Glauber Costa48896462013-08-28 10:18:15 +1000213 /* we can free zero page only if last reference remains */
214 return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
215}
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -0800216
Glauber Costa48896462013-08-28 10:18:15 +1000217static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
218 struct shrink_control *sc)
219{
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -0800220 if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
Kirill A. Shutemov5918d102013-04-29 15:08:44 -0700221 struct page *zero_page = xchg(&huge_zero_page, NULL);
222 BUG_ON(zero_page == NULL);
Yu Zhao5ddacbe2014-10-29 14:50:26 -0700223 __free_pages(zero_page, compound_order(zero_page));
Glauber Costa48896462013-08-28 10:18:15 +1000224 return HPAGE_PMD_NR;
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -0800225 }
226
227 return 0;
228}
229
230static struct shrinker huge_zero_page_shrinker = {
Glauber Costa48896462013-08-28 10:18:15 +1000231 .count_objects = shrink_huge_zero_page_count,
232 .scan_objects = shrink_huge_zero_page_scan,
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -0800233 .seeks = DEFAULT_SEEKS,
234};
235
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800236#ifdef CONFIG_SYSFS
Andrea Arcangeliba761492011-01-13 15:46:58 -0800237
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800238static ssize_t double_flag_show(struct kobject *kobj,
239 struct kobj_attribute *attr, char *buf,
240 enum transparent_hugepage_flag enabled,
241 enum transparent_hugepage_flag req_madv)
242{
243 if (test_bit(enabled, &transparent_hugepage_flags)) {
244 VM_BUG_ON(test_bit(req_madv, &transparent_hugepage_flags));
245 return sprintf(buf, "[always] madvise never\n");
246 } else if (test_bit(req_madv, &transparent_hugepage_flags))
247 return sprintf(buf, "always [madvise] never\n");
248 else
249 return sprintf(buf, "always madvise [never]\n");
250}
251static ssize_t double_flag_store(struct kobject *kobj,
252 struct kobj_attribute *attr,
253 const char *buf, size_t count,
254 enum transparent_hugepage_flag enabled,
255 enum transparent_hugepage_flag req_madv)
256{
257 if (!memcmp("always", buf,
258 min(sizeof("always")-1, count))) {
259 set_bit(enabled, &transparent_hugepage_flags);
260 clear_bit(req_madv, &transparent_hugepage_flags);
261 } else if (!memcmp("madvise", buf,
262 min(sizeof("madvise")-1, count))) {
263 clear_bit(enabled, &transparent_hugepage_flags);
264 set_bit(req_madv, &transparent_hugepage_flags);
265 } else if (!memcmp("never", buf,
266 min(sizeof("never")-1, count))) {
267 clear_bit(enabled, &transparent_hugepage_flags);
268 clear_bit(req_madv, &transparent_hugepage_flags);
269 } else
270 return -EINVAL;
271
272 return count;
273}
274
275static ssize_t enabled_show(struct kobject *kobj,
276 struct kobj_attribute *attr, char *buf)
277{
278 return double_flag_show(kobj, attr, buf,
279 TRANSPARENT_HUGEPAGE_FLAG,
280 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
281}
282static ssize_t enabled_store(struct kobject *kobj,
283 struct kobj_attribute *attr,
284 const char *buf, size_t count)
285{
Andrea Arcangeliba761492011-01-13 15:46:58 -0800286 ssize_t ret;
287
288 ret = double_flag_store(kobj, attr, buf, count,
289 TRANSPARENT_HUGEPAGE_FLAG,
290 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
291
292 if (ret > 0) {
Xiao Guangrong911891a2012-10-08 16:29:41 -0700293 int err;
294
295 mutex_lock(&khugepaged_mutex);
Kirill A. Shutemov79553da2015-04-15 16:14:56 -0700296 err = start_stop_khugepaged();
Xiao Guangrong911891a2012-10-08 16:29:41 -0700297 mutex_unlock(&khugepaged_mutex);
298
Andrea Arcangeliba761492011-01-13 15:46:58 -0800299 if (err)
300 ret = err;
301 }
302
303 return ret;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800304}
305static struct kobj_attribute enabled_attr =
306 __ATTR(enabled, 0644, enabled_show, enabled_store);
307
308static ssize_t single_flag_show(struct kobject *kobj,
309 struct kobj_attribute *attr, char *buf,
310 enum transparent_hugepage_flag flag)
311{
Ben Hutchingse27e6152011-04-14 15:22:21 -0700312 return sprintf(buf, "%d\n",
313 !!test_bit(flag, &transparent_hugepage_flags));
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800314}
Ben Hutchingse27e6152011-04-14 15:22:21 -0700315
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800316static ssize_t single_flag_store(struct kobject *kobj,
317 struct kobj_attribute *attr,
318 const char *buf, size_t count,
319 enum transparent_hugepage_flag flag)
320{
Ben Hutchingse27e6152011-04-14 15:22:21 -0700321 unsigned long value;
322 int ret;
323
324 ret = kstrtoul(buf, 10, &value);
325 if (ret < 0)
326 return ret;
327 if (value > 1)
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800328 return -EINVAL;
329
Ben Hutchingse27e6152011-04-14 15:22:21 -0700330 if (value)
331 set_bit(flag, &transparent_hugepage_flags);
332 else
333 clear_bit(flag, &transparent_hugepage_flags);
334
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800335 return count;
336}
337
338/*
339 * Currently defrag only disables __GFP_NOWAIT for allocation. A blind
340 * __GFP_REPEAT is too aggressive, it's never worth swapping tons of
341 * memory just to allocate one more hugepage.
342 */
343static ssize_t defrag_show(struct kobject *kobj,
344 struct kobj_attribute *attr, char *buf)
345{
346 return double_flag_show(kobj, attr, buf,
347 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
348 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
349}
350static ssize_t defrag_store(struct kobject *kobj,
351 struct kobj_attribute *attr,
352 const char *buf, size_t count)
353{
354 return double_flag_store(kobj, attr, buf, count,
355 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
356 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
357}
358static struct kobj_attribute defrag_attr =
359 __ATTR(defrag, 0644, defrag_show, defrag_store);
360
Kirill A. Shutemov79da5402012-12-12 13:51:12 -0800361static ssize_t use_zero_page_show(struct kobject *kobj,
362 struct kobj_attribute *attr, char *buf)
363{
364 return single_flag_show(kobj, attr, buf,
365 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
366}
367static ssize_t use_zero_page_store(struct kobject *kobj,
368 struct kobj_attribute *attr, const char *buf, size_t count)
369{
370 return single_flag_store(kobj, attr, buf, count,
371 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
372}
373static struct kobj_attribute use_zero_page_attr =
374 __ATTR(use_zero_page, 0644, use_zero_page_show, use_zero_page_store);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800375#ifdef CONFIG_DEBUG_VM
376static ssize_t debug_cow_show(struct kobject *kobj,
377 struct kobj_attribute *attr, char *buf)
378{
379 return single_flag_show(kobj, attr, buf,
380 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
381}
382static ssize_t debug_cow_store(struct kobject *kobj,
383 struct kobj_attribute *attr,
384 const char *buf, size_t count)
385{
386 return single_flag_store(kobj, attr, buf, count,
387 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
388}
389static struct kobj_attribute debug_cow_attr =
390 __ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store);
391#endif /* CONFIG_DEBUG_VM */
392
393static struct attribute *hugepage_attr[] = {
394 &enabled_attr.attr,
395 &defrag_attr.attr,
Kirill A. Shutemov79da5402012-12-12 13:51:12 -0800396 &use_zero_page_attr.attr,
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800397#ifdef CONFIG_DEBUG_VM
398 &debug_cow_attr.attr,
399#endif
400 NULL,
401};
402
403static struct attribute_group hugepage_attr_group = {
404 .attrs = hugepage_attr,
Andrea Arcangeliba761492011-01-13 15:46:58 -0800405};
406
407static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
408 struct kobj_attribute *attr,
409 char *buf)
410{
411 return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
412}
413
414static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
415 struct kobj_attribute *attr,
416 const char *buf, size_t count)
417{
418 unsigned long msecs;
419 int err;
420
Jingoo Han3dbb95f2013-09-11 14:20:25 -0700421 err = kstrtoul(buf, 10, &msecs);
Andrea Arcangeliba761492011-01-13 15:46:58 -0800422 if (err || msecs > UINT_MAX)
423 return -EINVAL;
424
425 khugepaged_scan_sleep_millisecs = msecs;
426 wake_up_interruptible(&khugepaged_wait);
427
428 return count;
429}
430static struct kobj_attribute scan_sleep_millisecs_attr =
431 __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
432 scan_sleep_millisecs_store);
433
434static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
435 struct kobj_attribute *attr,
436 char *buf)
437{
438 return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
439}
440
441static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
442 struct kobj_attribute *attr,
443 const char *buf, size_t count)
444{
445 unsigned long msecs;
446 int err;
447
Jingoo Han3dbb95f2013-09-11 14:20:25 -0700448 err = kstrtoul(buf, 10, &msecs);
Andrea Arcangeliba761492011-01-13 15:46:58 -0800449 if (err || msecs > UINT_MAX)
450 return -EINVAL;
451
452 khugepaged_alloc_sleep_millisecs = msecs;
453 wake_up_interruptible(&khugepaged_wait);
454
455 return count;
456}
457static struct kobj_attribute alloc_sleep_millisecs_attr =
458 __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
459 alloc_sleep_millisecs_store);
460
461static ssize_t pages_to_scan_show(struct kobject *kobj,
462 struct kobj_attribute *attr,
463 char *buf)
464{
465 return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
466}
467static ssize_t pages_to_scan_store(struct kobject *kobj,
468 struct kobj_attribute *attr,
469 const char *buf, size_t count)
470{
471 int err;
472 unsigned long pages;
473
Jingoo Han3dbb95f2013-09-11 14:20:25 -0700474 err = kstrtoul(buf, 10, &pages);
Andrea Arcangeliba761492011-01-13 15:46:58 -0800475 if (err || !pages || pages > UINT_MAX)
476 return -EINVAL;
477
478 khugepaged_pages_to_scan = pages;
479
480 return count;
481}
482static struct kobj_attribute pages_to_scan_attr =
483 __ATTR(pages_to_scan, 0644, pages_to_scan_show,
484 pages_to_scan_store);
485
486static ssize_t pages_collapsed_show(struct kobject *kobj,
487 struct kobj_attribute *attr,
488 char *buf)
489{
490 return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
491}
492static struct kobj_attribute pages_collapsed_attr =
493 __ATTR_RO(pages_collapsed);
494
495static ssize_t full_scans_show(struct kobject *kobj,
496 struct kobj_attribute *attr,
497 char *buf)
498{
499 return sprintf(buf, "%u\n", khugepaged_full_scans);
500}
501static struct kobj_attribute full_scans_attr =
502 __ATTR_RO(full_scans);
503
504static ssize_t khugepaged_defrag_show(struct kobject *kobj,
505 struct kobj_attribute *attr, char *buf)
506{
507 return single_flag_show(kobj, attr, buf,
508 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
509}
510static ssize_t khugepaged_defrag_store(struct kobject *kobj,
511 struct kobj_attribute *attr,
512 const char *buf, size_t count)
513{
514 return single_flag_store(kobj, attr, buf, count,
515 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
516}
517static struct kobj_attribute khugepaged_defrag_attr =
518 __ATTR(defrag, 0644, khugepaged_defrag_show,
519 khugepaged_defrag_store);
520
521/*
522 * max_ptes_none controls if khugepaged should collapse hugepages over
523 * any unmapped ptes in turn potentially increasing the memory
524 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
525 * reduce the available free memory in the system as it
526 * runs. Increasing max_ptes_none will instead potentially reduce the
527 * free memory in the system during the khugepaged scan.
528 */
529static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
530 struct kobj_attribute *attr,
531 char *buf)
532{
533 return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
534}
535static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
536 struct kobj_attribute *attr,
537 const char *buf, size_t count)
538{
539 int err;
540 unsigned long max_ptes_none;
541
Jingoo Han3dbb95f2013-09-11 14:20:25 -0700542 err = kstrtoul(buf, 10, &max_ptes_none);
Andrea Arcangeliba761492011-01-13 15:46:58 -0800543 if (err || max_ptes_none > HPAGE_PMD_NR-1)
544 return -EINVAL;
545
546 khugepaged_max_ptes_none = max_ptes_none;
547
548 return count;
549}
550static struct kobj_attribute khugepaged_max_ptes_none_attr =
551 __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
552 khugepaged_max_ptes_none_store);
553
554static struct attribute *khugepaged_attr[] = {
555 &khugepaged_defrag_attr.attr,
556 &khugepaged_max_ptes_none_attr.attr,
557 &pages_to_scan_attr.attr,
558 &pages_collapsed_attr.attr,
559 &full_scans_attr.attr,
560 &scan_sleep_millisecs_attr.attr,
561 &alloc_sleep_millisecs_attr.attr,
562 NULL,
563};
564
565static struct attribute_group khugepaged_attr_group = {
566 .attrs = khugepaged_attr,
567 .name = "khugepaged",
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800568};
Shaohua Li569e5592012-01-12 17:19:11 -0800569
570static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
571{
572 int err;
573
574 *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
575 if (unlikely(!*hugepage_kobj)) {
Andrew Mortonae3a8c12014-06-04 16:06:58 -0700576 pr_err("failed to create transparent hugepage kobject\n");
Shaohua Li569e5592012-01-12 17:19:11 -0800577 return -ENOMEM;
578 }
579
580 err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group);
581 if (err) {
Andrew Mortonae3a8c12014-06-04 16:06:58 -0700582 pr_err("failed to register transparent hugepage group\n");
Shaohua Li569e5592012-01-12 17:19:11 -0800583 goto delete_obj;
584 }
585
586 err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group);
587 if (err) {
Andrew Mortonae3a8c12014-06-04 16:06:58 -0700588 pr_err("failed to register transparent hugepage group\n");
Shaohua Li569e5592012-01-12 17:19:11 -0800589 goto remove_hp_group;
590 }
591
592 return 0;
593
594remove_hp_group:
595 sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group);
596delete_obj:
597 kobject_put(*hugepage_kobj);
598 return err;
599}
600
601static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj)
602{
603 sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group);
604 sysfs_remove_group(hugepage_kobj, &hugepage_attr_group);
605 kobject_put(hugepage_kobj);
606}
607#else
608static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj)
609{
610 return 0;
611}
612
613static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj)
614{
615}
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800616#endif /* CONFIG_SYSFS */
617
618static int __init hugepage_init(void)
619{
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800620 int err;
Shaohua Li569e5592012-01-12 17:19:11 -0800621 struct kobject *hugepage_kobj;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800622
Andrea Arcangeli4b7167b2011-01-13 15:47:09 -0800623 if (!has_transparent_hugepage()) {
624 transparent_hugepage_flags = 0;
Shaohua Li569e5592012-01-12 17:19:11 -0800625 return -EINVAL;
Andrea Arcangeli4b7167b2011-01-13 15:47:09 -0800626 }
627
Shaohua Li569e5592012-01-12 17:19:11 -0800628 err = hugepage_init_sysfs(&hugepage_kobj);
629 if (err)
Kirill A. Shutemov65ebb642015-04-15 16:14:20 -0700630 goto err_sysfs;
Andrea Arcangeliba761492011-01-13 15:46:58 -0800631
632 err = khugepaged_slab_init();
633 if (err)
Kirill A. Shutemov65ebb642015-04-15 16:14:20 -0700634 goto err_slab;
Andrea Arcangeliba761492011-01-13 15:46:58 -0800635
Kirill A. Shutemov65ebb642015-04-15 16:14:20 -0700636 err = register_shrinker(&huge_zero_page_shrinker);
637 if (err)
638 goto err_hzp_shrinker;
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -0800639
Rik van Riel97562cd2011-01-13 15:47:12 -0800640 /*
641 * By default disable transparent hugepages on smaller systems,
642 * where the extra memory used could hurt more than TLB overhead
643 * is likely to save. The admin can still enable it through /sys.
644 */
Kirill A. Shutemov79553da2015-04-15 16:14:56 -0700645 if (totalram_pages < (512 << (20 - PAGE_SHIFT))) {
Rik van Riel97562cd2011-01-13 15:47:12 -0800646 transparent_hugepage_flags = 0;
Kirill A. Shutemov79553da2015-04-15 16:14:56 -0700647 return 0;
648 }
Rik van Riel97562cd2011-01-13 15:47:12 -0800649
Kirill A. Shutemov79553da2015-04-15 16:14:56 -0700650 err = start_stop_khugepaged();
Kirill A. Shutemov65ebb642015-04-15 16:14:20 -0700651 if (err)
652 goto err_khugepaged;
Andrea Arcangeliba761492011-01-13 15:46:58 -0800653
Shaohua Li569e5592012-01-12 17:19:11 -0800654 return 0;
Kirill A. Shutemov65ebb642015-04-15 16:14:20 -0700655err_khugepaged:
656 unregister_shrinker(&huge_zero_page_shrinker);
657err_hzp_shrinker:
658 khugepaged_slab_exit();
659err_slab:
Shaohua Li569e5592012-01-12 17:19:11 -0800660 hugepage_exit_sysfs(hugepage_kobj);
Kirill A. Shutemov65ebb642015-04-15 16:14:20 -0700661err_sysfs:
Andrea Arcangeliba761492011-01-13 15:46:58 -0800662 return err;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800663}
Paul Gortmakera64fb3c2014-01-23 15:53:30 -0800664subsys_initcall(hugepage_init);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800665
666static int __init setup_transparent_hugepage(char *str)
667{
668 int ret = 0;
669 if (!str)
670 goto out;
671 if (!strcmp(str, "always")) {
672 set_bit(TRANSPARENT_HUGEPAGE_FLAG,
673 &transparent_hugepage_flags);
674 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
675 &transparent_hugepage_flags);
676 ret = 1;
677 } else if (!strcmp(str, "madvise")) {
678 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
679 &transparent_hugepage_flags);
680 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
681 &transparent_hugepage_flags);
682 ret = 1;
683 } else if (!strcmp(str, "never")) {
684 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
685 &transparent_hugepage_flags);
686 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
687 &transparent_hugepage_flags);
688 ret = 1;
689 }
690out:
691 if (!ret)
Andrew Mortonae3a8c12014-06-04 16:06:58 -0700692 pr_warn("transparent_hugepage= cannot parse, ignored\n");
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800693 return ret;
694}
695__setup("transparent_hugepage=", setup_transparent_hugepage);
696
Mel Gormanb32967f2012-11-19 12:35:47 +0000697pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800698{
699 if (likely(vma->vm_flags & VM_WRITE))
700 pmd = pmd_mkwrite(pmd);
701 return pmd;
702}
703
Kirill A. Shutemov31223592013-09-12 15:14:01 -0700704static inline pmd_t mk_huge_pmd(struct page *page, pgprot_t prot)
Bob Liub3092b32012-12-11 16:00:41 -0800705{
706 pmd_t entry;
Kirill A. Shutemov31223592013-09-12 15:14:01 -0700707 entry = mk_pmd(page, prot);
Bob Liub3092b32012-12-11 16:00:41 -0800708 entry = pmd_mkhuge(entry);
709 return entry;
710}
711
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800712static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
713 struct vm_area_struct *vma,
714 unsigned long haddr, pmd_t *pmd,
Michal Hocko3b363692015-04-15 16:13:29 -0700715 struct page *page, gfp_t gfp)
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800716{
Johannes Weiner00501b52014-08-08 14:19:20 -0700717 struct mem_cgroup *memcg;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800718 pgtable_t pgtable;
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -0800719 spinlock_t *ptl;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800720
Sasha Levin309381fea2014-01-23 15:52:54 -0800721 VM_BUG_ON_PAGE(!PageCompound(page), page);
Johannes Weiner00501b52014-08-08 14:19:20 -0700722
Michal Hocko3b363692015-04-15 16:13:29 -0700723 if (mem_cgroup_try_charge(page, mm, gfp, &memcg))
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800724 return VM_FAULT_OOM;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800725
Johannes Weiner00501b52014-08-08 14:19:20 -0700726 pgtable = pte_alloc_one(mm, haddr);
727 if (unlikely(!pgtable)) {
728 mem_cgroup_cancel_charge(page, memcg);
729 return VM_FAULT_OOM;
730 }
731
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800732 clear_huge_page(page, haddr, HPAGE_PMD_NR);
Minchan Kim52f37622013-04-29 15:08:15 -0700733 /*
734 * The memory barrier inside __SetPageUptodate makes sure that
735 * clear_huge_page writes become visible before the set_pmd_at()
736 * write.
737 */
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800738 __SetPageUptodate(page);
739
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -0800740 ptl = pmd_lock(mm, pmd);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800741 if (unlikely(!pmd_none(*pmd))) {
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -0800742 spin_unlock(ptl);
Johannes Weiner00501b52014-08-08 14:19:20 -0700743 mem_cgroup_cancel_charge(page, memcg);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800744 put_page(page);
745 pte_free(mm, pgtable);
746 } else {
747 pmd_t entry;
Kirill A. Shutemov31223592013-09-12 15:14:01 -0700748 entry = mk_huge_pmd(page, vma->vm_page_prot);
749 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800750 page_add_new_anon_rmap(page, vma, haddr);
Johannes Weiner00501b52014-08-08 14:19:20 -0700751 mem_cgroup_commit_charge(page, memcg, false);
752 lru_cache_add_active_or_unevictable(page, vma);
Aneesh Kumar K.V6b0b50b2013-06-05 17:14:02 -0700753 pgtable_trans_huge_deposit(mm, pmd, pgtable);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800754 set_pmd_at(mm, haddr, pmd, entry);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800755 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
Kirill A. Shutemove1f56c82013-11-14 14:30:48 -0800756 atomic_long_inc(&mm->nr_ptes);
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -0800757 spin_unlock(ptl);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800758 }
759
David Rientjesaa2e8782012-05-29 15:06:17 -0700760 return 0;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800761}
762
Andi Kleencc5d4622011-03-22 16:33:13 -0700763static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp)
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -0800764{
Andi Kleencc5d4622011-03-22 16:33:13 -0700765 return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT)) | extra_gfp;
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -0800766}
767
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -0800768/* Caller must hold page table lock. */
Kirill A. Shutemov3ea41e62012-12-12 13:51:14 -0800769static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -0800770 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
Kirill A. Shutemov5918d102013-04-29 15:08:44 -0700771 struct page *zero_page)
Kirill A. Shutemovfc9fe822012-12-12 13:50:51 -0800772{
773 pmd_t entry;
Kirill A. Shutemov3ea41e62012-12-12 13:51:14 -0800774 if (!pmd_none(*pmd))
775 return false;
Kirill A. Shutemov5918d102013-04-29 15:08:44 -0700776 entry = mk_pmd(zero_page, vma->vm_page_prot);
Kirill A. Shutemovfc9fe822012-12-12 13:50:51 -0800777 entry = pmd_mkhuge(entry);
Aneesh Kumar K.V6b0b50b2013-06-05 17:14:02 -0700778 pgtable_trans_huge_deposit(mm, pmd, pgtable);
Kirill A. Shutemovfc9fe822012-12-12 13:50:51 -0800779 set_pmd_at(mm, haddr, pmd, entry);
Kirill A. Shutemove1f56c82013-11-14 14:30:48 -0800780 atomic_long_inc(&mm->nr_ptes);
Kirill A. Shutemov3ea41e62012-12-12 13:51:14 -0800781 return true;
Kirill A. Shutemovfc9fe822012-12-12 13:50:51 -0800782}
783
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800784int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
785 unsigned long address, pmd_t *pmd,
786 unsigned int flags)
787{
Aneesh Kumar K.V077fcf12015-02-11 15:27:12 -0800788 gfp_t gfp;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800789 struct page *page;
790 unsigned long haddr = address & HPAGE_PMD_MASK;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800791
Kirill A. Shutemov128ec032013-09-12 15:14:03 -0700792 if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
Kirill A. Shutemovc0292552013-09-12 15:14:05 -0700793 return VM_FAULT_FALLBACK;
Kirill A. Shutemov128ec032013-09-12 15:14:03 -0700794 if (unlikely(anon_vma_prepare(vma)))
795 return VM_FAULT_OOM;
David Rientjes6d50e602014-10-29 14:50:31 -0700796 if (unlikely(khugepaged_enter(vma, vma->vm_flags)))
Kirill A. Shutemov128ec032013-09-12 15:14:03 -0700797 return VM_FAULT_OOM;
Dominik Dingel593befa2014-10-23 12:07:44 +0200798 if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm) &&
Kirill A. Shutemov128ec032013-09-12 15:14:03 -0700799 transparent_hugepage_use_zero_page()) {
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -0800800 spinlock_t *ptl;
Kirill A. Shutemov128ec032013-09-12 15:14:03 -0700801 pgtable_t pgtable;
802 struct page *zero_page;
803 bool set;
804 pgtable = pte_alloc_one(mm, haddr);
805 if (unlikely(!pgtable))
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800806 return VM_FAULT_OOM;
Kirill A. Shutemov128ec032013-09-12 15:14:03 -0700807 zero_page = get_huge_zero_page();
808 if (unlikely(!zero_page)) {
809 pte_free(mm, pgtable);
Andi Kleen81ab4202011-04-14 15:22:06 -0700810 count_vm_event(THP_FAULT_FALLBACK);
Kirill A. Shutemovc0292552013-09-12 15:14:05 -0700811 return VM_FAULT_FALLBACK;
Andi Kleen81ab4202011-04-14 15:22:06 -0700812 }
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -0800813 ptl = pmd_lock(mm, pmd);
Kirill A. Shutemov128ec032013-09-12 15:14:03 -0700814 set = set_huge_zero_page(pgtable, mm, vma, haddr, pmd,
815 zero_page);
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -0800816 spin_unlock(ptl);
Kirill A. Shutemov128ec032013-09-12 15:14:03 -0700817 if (!set) {
818 pte_free(mm, pgtable);
819 put_huge_zero_page();
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -0800820 }
David Rientjesedad9d22012-05-29 15:06:17 -0700821 return 0;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800822 }
Aneesh Kumar K.V077fcf12015-02-11 15:27:12 -0800823 gfp = alloc_hugepage_gfpmask(transparent_hugepage_defrag(vma), 0);
824 page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
Kirill A. Shutemov128ec032013-09-12 15:14:03 -0700825 if (unlikely(!page)) {
826 count_vm_event(THP_FAULT_FALLBACK);
Kirill A. Shutemovc0292552013-09-12 15:14:05 -0700827 return VM_FAULT_FALLBACK;
Kirill A. Shutemov128ec032013-09-12 15:14:03 -0700828 }
Michal Hocko3b363692015-04-15 16:13:29 -0700829 if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page, gfp))) {
Kirill A. Shutemov128ec032013-09-12 15:14:03 -0700830 put_page(page);
David Rientjes17766dd2013-09-12 15:14:06 -0700831 count_vm_event(THP_FAULT_FALLBACK);
Kirill A. Shutemovc0292552013-09-12 15:14:05 -0700832 return VM_FAULT_FALLBACK;
Kirill A. Shutemov128ec032013-09-12 15:14:03 -0700833 }
834
David Rientjes17766dd2013-09-12 15:14:06 -0700835 count_vm_event(THP_FAULT_ALLOC);
Kirill A. Shutemov128ec032013-09-12 15:14:03 -0700836 return 0;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800837}
838
Matthew Wilcoxd1d767f2015-09-08 14:58:54 -0700839static int insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
840 pmd_t *pmd, unsigned long pfn, pgprot_t prot, bool write)
841{
842 struct mm_struct *mm = vma->vm_mm;
843 pmd_t entry;
844 spinlock_t *ptl;
845
846 ptl = pmd_lock(mm, pmd);
847 if (pmd_none(*pmd)) {
848 entry = pmd_mkhuge(pfn_pmd(pfn, prot));
849 if (write) {
850 entry = pmd_mkyoung(pmd_mkdirty(entry));
851 entry = maybe_pmd_mkwrite(entry, vma);
852 }
853 set_pmd_at(mm, addr, pmd, entry);
854 update_mmu_cache_pmd(vma, addr, pmd);
855 }
856 spin_unlock(ptl);
857 return VM_FAULT_NOPAGE;
858}
859
860int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
861 pmd_t *pmd, unsigned long pfn, bool write)
862{
863 pgprot_t pgprot = vma->vm_page_prot;
864 /*
865 * If we had pmd_special, we could avoid all these restrictions,
866 * but we need to be consistent with PTEs and architectures that
867 * can't support a 'special' bit.
868 */
869 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
870 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
871 (VM_PFNMAP|VM_MIXEDMAP));
872 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
873 BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
874
875 if (addr < vma->vm_start || addr >= vma->vm_end)
876 return VM_FAULT_SIGBUS;
877 if (track_pfn_insert(vma, &pgprot, pfn))
878 return VM_FAULT_SIGBUS;
879 return insert_pfn_pmd(vma, addr, pmd, pfn, pgprot, write);
880}
881
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800882int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
883 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
884 struct vm_area_struct *vma)
885{
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -0800886 spinlock_t *dst_ptl, *src_ptl;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800887 struct page *src_page;
888 pmd_t pmd;
889 pgtable_t pgtable;
890 int ret;
891
892 ret = -ENOMEM;
893 pgtable = pte_alloc_one(dst_mm, addr);
894 if (unlikely(!pgtable))
895 goto out;
896
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -0800897 dst_ptl = pmd_lock(dst_mm, dst_pmd);
898 src_ptl = pmd_lockptr(src_mm, src_pmd);
899 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800900
901 ret = -EAGAIN;
902 pmd = *src_pmd;
903 if (unlikely(!pmd_trans_huge(pmd))) {
904 pte_free(dst_mm, pgtable);
905 goto out_unlock;
906 }
Kirill A. Shutemovfc9fe822012-12-12 13:50:51 -0800907 /*
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -0800908 * When page table lock is held, the huge zero pmd should not be
Kirill A. Shutemovfc9fe822012-12-12 13:50:51 -0800909 * under splitting since we don't split the page itself, only pmd to
910 * a page table.
911 */
912 if (is_huge_zero_pmd(pmd)) {
Kirill A. Shutemov5918d102013-04-29 15:08:44 -0700913 struct page *zero_page;
Kirill A. Shutemov3ea41e62012-12-12 13:51:14 -0800914 bool set;
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -0800915 /*
916 * get_huge_zero_page() will never allocate a new page here,
917 * since we already have a zero page to copy. It just takes a
918 * reference.
919 */
Kirill A. Shutemov5918d102013-04-29 15:08:44 -0700920 zero_page = get_huge_zero_page();
Kirill A. Shutemov3ea41e62012-12-12 13:51:14 -0800921 set = set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd,
Kirill A. Shutemov5918d102013-04-29 15:08:44 -0700922 zero_page);
Kirill A. Shutemov3ea41e62012-12-12 13:51:14 -0800923 BUG_ON(!set); /* unexpected !pmd_none(dst_pmd) */
Kirill A. Shutemovfc9fe822012-12-12 13:50:51 -0800924 ret = 0;
925 goto out_unlock;
926 }
Mel Gormande466bd2013-12-18 17:08:42 -0800927
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800928 if (unlikely(pmd_trans_splitting(pmd))) {
929 /* split huge page running from under us */
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -0800930 spin_unlock(src_ptl);
931 spin_unlock(dst_ptl);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800932 pte_free(dst_mm, pgtable);
933
934 wait_split_huge_page(vma->anon_vma, src_pmd); /* src_vma */
935 goto out;
936 }
937 src_page = pmd_page(pmd);
Sasha Levin309381fea2014-01-23 15:52:54 -0800938 VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800939 get_page(src_page);
940 page_dup_rmap(src_page);
941 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
942
943 pmdp_set_wrprotect(src_mm, addr, src_pmd);
944 pmd = pmd_mkold(pmd_wrprotect(pmd));
Aneesh Kumar K.V6b0b50b2013-06-05 17:14:02 -0700945 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800946 set_pmd_at(dst_mm, addr, dst_pmd, pmd);
Kirill A. Shutemove1f56c82013-11-14 14:30:48 -0800947 atomic_long_inc(&dst_mm->nr_ptes);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800948
949 ret = 0;
950out_unlock:
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -0800951 spin_unlock(src_ptl);
952 spin_unlock(dst_ptl);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800953out:
954 return ret;
955}
956
Will Deacona1dd4502012-12-11 16:01:27 -0800957void huge_pmd_set_accessed(struct mm_struct *mm,
958 struct vm_area_struct *vma,
959 unsigned long address,
960 pmd_t *pmd, pmd_t orig_pmd,
961 int dirty)
962{
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -0800963 spinlock_t *ptl;
Will Deacona1dd4502012-12-11 16:01:27 -0800964 pmd_t entry;
965 unsigned long haddr;
966
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -0800967 ptl = pmd_lock(mm, pmd);
Will Deacona1dd4502012-12-11 16:01:27 -0800968 if (unlikely(!pmd_same(*pmd, orig_pmd)))
969 goto unlock;
970
971 entry = pmd_mkyoung(orig_pmd);
972 haddr = address & HPAGE_PMD_MASK;
973 if (pmdp_set_access_flags(vma, haddr, pmd, entry, dirty))
974 update_mmu_cache_pmd(vma, address, pmd);
975
976unlock:
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -0800977 spin_unlock(ptl);
Will Deacona1dd4502012-12-11 16:01:27 -0800978}
979
Hugh Dickins5338a932014-06-23 13:22:05 -0700980/*
981 * Save CONFIG_DEBUG_PAGEALLOC from faulting falsely on tail pages
982 * during copy_user_huge_page()'s copy_page_rep(): in the case when
983 * the source page gets split and a tail freed before copy completes.
984 * Called under pmd_lock of checked pmd, so safe from splitting itself.
985 */
986static void get_user_huge_page(struct page *page)
987{
988 if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC)) {
989 struct page *endpage = page + HPAGE_PMD_NR;
990
991 atomic_add(HPAGE_PMD_NR, &page->_count);
992 while (++page < endpage)
993 get_huge_page_tail(page);
994 } else {
995 get_page(page);
996 }
997}
998
999static void put_user_huge_page(struct page *page)
1000{
1001 if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC)) {
1002 struct page *endpage = page + HPAGE_PMD_NR;
1003
1004 while (page < endpage)
1005 put_page(page++);
1006 } else {
1007 put_page(page);
1008 }
1009}
1010
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001011static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
1012 struct vm_area_struct *vma,
1013 unsigned long address,
1014 pmd_t *pmd, pmd_t orig_pmd,
1015 struct page *page,
1016 unsigned long haddr)
1017{
Johannes Weiner00501b52014-08-08 14:19:20 -07001018 struct mem_cgroup *memcg;
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08001019 spinlock_t *ptl;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001020 pgtable_t pgtable;
1021 pmd_t _pmd;
1022 int ret = 0, i;
1023 struct page **pages;
Sagi Grimberg2ec74c32012-10-08 16:33:33 -07001024 unsigned long mmun_start; /* For mmu_notifiers */
1025 unsigned long mmun_end; /* For mmu_notifiers */
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001026
1027 pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR,
1028 GFP_KERNEL);
1029 if (unlikely(!pages)) {
1030 ret |= VM_FAULT_OOM;
1031 goto out;
1032 }
1033
1034 for (i = 0; i < HPAGE_PMD_NR; i++) {
Andi Kleencc5d4622011-03-22 16:33:13 -07001035 pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE |
1036 __GFP_OTHER_NODE,
Andi Kleen19ee1512011-03-04 17:36:31 -08001037 vma, address, page_to_nid(page));
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -08001038 if (unlikely(!pages[i] ||
Johannes Weiner00501b52014-08-08 14:19:20 -07001039 mem_cgroup_try_charge(pages[i], mm, GFP_KERNEL,
1040 &memcg))) {
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -08001041 if (pages[i])
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001042 put_page(pages[i]);
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -08001043 while (--i >= 0) {
Johannes Weiner00501b52014-08-08 14:19:20 -07001044 memcg = (void *)page_private(pages[i]);
1045 set_page_private(pages[i], 0);
1046 mem_cgroup_cancel_charge(pages[i], memcg);
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -08001047 put_page(pages[i]);
1048 }
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001049 kfree(pages);
1050 ret |= VM_FAULT_OOM;
1051 goto out;
1052 }
Johannes Weiner00501b52014-08-08 14:19:20 -07001053 set_page_private(pages[i], (unsigned long)memcg);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001054 }
1055
1056 for (i = 0; i < HPAGE_PMD_NR; i++) {
1057 copy_user_highpage(pages[i], page + i,
Hillf Danton0089e482011-10-31 17:09:38 -07001058 haddr + PAGE_SIZE * i, vma);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001059 __SetPageUptodate(pages[i]);
1060 cond_resched();
1061 }
1062
Sagi Grimberg2ec74c32012-10-08 16:33:33 -07001063 mmun_start = haddr;
1064 mmun_end = haddr + HPAGE_PMD_SIZE;
1065 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1066
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08001067 ptl = pmd_lock(mm, pmd);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001068 if (unlikely(!pmd_same(*pmd, orig_pmd)))
1069 goto out_free_pages;
Sasha Levin309381fea2014-01-23 15:52:54 -08001070 VM_BUG_ON_PAGE(!PageHead(page), page);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001071
Joerg Roedel34ee6452014-11-13 13:46:09 +11001072 pmdp_clear_flush_notify(vma, haddr, pmd);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001073 /* leave pmd empty until pte is filled */
1074
Aneesh Kumar K.V6b0b50b2013-06-05 17:14:02 -07001075 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001076 pmd_populate(mm, &_pmd, pgtable);
1077
1078 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
1079 pte_t *pte, entry;
1080 entry = mk_pte(pages[i], vma->vm_page_prot);
1081 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
Johannes Weiner00501b52014-08-08 14:19:20 -07001082 memcg = (void *)page_private(pages[i]);
1083 set_page_private(pages[i], 0);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001084 page_add_new_anon_rmap(pages[i], vma, haddr);
Johannes Weiner00501b52014-08-08 14:19:20 -07001085 mem_cgroup_commit_charge(pages[i], memcg, false);
1086 lru_cache_add_active_or_unevictable(pages[i], vma);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001087 pte = pte_offset_map(&_pmd, haddr);
1088 VM_BUG_ON(!pte_none(*pte));
1089 set_pte_at(mm, haddr, pte, entry);
1090 pte_unmap(pte);
1091 }
1092 kfree(pages);
1093
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001094 smp_wmb(); /* make pte visible before pmd */
1095 pmd_populate(mm, pmd, pgtable);
1096 page_remove_rmap(page);
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08001097 spin_unlock(ptl);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001098
Sagi Grimberg2ec74c32012-10-08 16:33:33 -07001099 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1100
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001101 ret |= VM_FAULT_WRITE;
1102 put_page(page);
1103
1104out:
1105 return ret;
1106
1107out_free_pages:
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08001108 spin_unlock(ptl);
Sagi Grimberg2ec74c32012-10-08 16:33:33 -07001109 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -08001110 for (i = 0; i < HPAGE_PMD_NR; i++) {
Johannes Weiner00501b52014-08-08 14:19:20 -07001111 memcg = (void *)page_private(pages[i]);
1112 set_page_private(pages[i], 0);
1113 mem_cgroup_cancel_charge(pages[i], memcg);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001114 put_page(pages[i]);
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -08001115 }
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001116 kfree(pages);
1117 goto out;
1118}
1119
1120int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
1121 unsigned long address, pmd_t *pmd, pmd_t orig_pmd)
1122{
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08001123 spinlock_t *ptl;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001124 int ret = 0;
Kirill A. Shutemov93b47962012-12-12 13:50:54 -08001125 struct page *page = NULL, *new_page;
Johannes Weiner00501b52014-08-08 14:19:20 -07001126 struct mem_cgroup *memcg;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001127 unsigned long haddr;
Sagi Grimberg2ec74c32012-10-08 16:33:33 -07001128 unsigned long mmun_start; /* For mmu_notifiers */
1129 unsigned long mmun_end; /* For mmu_notifiers */
Michal Hocko3b363692015-04-15 16:13:29 -07001130 gfp_t huge_gfp; /* for allocation and charge */
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001131
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08001132 ptl = pmd_lockptr(mm, pmd);
Sasha Levin81d1b092014-10-09 15:28:10 -07001133 VM_BUG_ON_VMA(!vma->anon_vma, vma);
Kirill A. Shutemov93b47962012-12-12 13:50:54 -08001134 haddr = address & HPAGE_PMD_MASK;
1135 if (is_huge_zero_pmd(orig_pmd))
1136 goto alloc;
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08001137 spin_lock(ptl);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001138 if (unlikely(!pmd_same(*pmd, orig_pmd)))
1139 goto out_unlock;
1140
1141 page = pmd_page(orig_pmd);
Sasha Levin309381fea2014-01-23 15:52:54 -08001142 VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001143 if (page_mapcount(page) == 1) {
1144 pmd_t entry;
1145 entry = pmd_mkyoung(orig_pmd);
1146 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1147 if (pmdp_set_access_flags(vma, haddr, pmd, entry, 1))
David Millerb113da62012-10-08 16:34:25 -07001148 update_mmu_cache_pmd(vma, address, pmd);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001149 ret |= VM_FAULT_WRITE;
1150 goto out_unlock;
1151 }
Hugh Dickins5338a932014-06-23 13:22:05 -07001152 get_user_huge_page(page);
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08001153 spin_unlock(ptl);
Kirill A. Shutemov93b47962012-12-12 13:50:54 -08001154alloc:
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001155 if (transparent_hugepage_enabled(vma) &&
Aneesh Kumar K.V077fcf12015-02-11 15:27:12 -08001156 !transparent_hugepage_debug_cow()) {
Michal Hocko3b363692015-04-15 16:13:29 -07001157 huge_gfp = alloc_hugepage_gfpmask(transparent_hugepage_defrag(vma), 0);
1158 new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER);
Aneesh Kumar K.V077fcf12015-02-11 15:27:12 -08001159 } else
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001160 new_page = NULL;
1161
1162 if (unlikely(!new_page)) {
Hugh Dickinseecc1e42014-01-12 01:25:21 -08001163 if (!page) {
Kirill A. Shutemove9b71ca2014-04-03 14:48:17 -07001164 split_huge_page_pmd(vma, address, pmd);
1165 ret |= VM_FAULT_FALLBACK;
Kirill A. Shutemov93b47962012-12-12 13:50:54 -08001166 } else {
1167 ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
1168 pmd, orig_pmd, page, haddr);
Kirill A. Shutemov9845cbb2014-02-25 15:01:42 -08001169 if (ret & VM_FAULT_OOM) {
Kirill A. Shutemov93b47962012-12-12 13:50:54 -08001170 split_huge_page(page);
Kirill A. Shutemov9845cbb2014-02-25 15:01:42 -08001171 ret |= VM_FAULT_FALLBACK;
1172 }
Hugh Dickins5338a932014-06-23 13:22:05 -07001173 put_user_huge_page(page);
Kirill A. Shutemov93b47962012-12-12 13:50:54 -08001174 }
David Rientjes17766dd2013-09-12 15:14:06 -07001175 count_vm_event(THP_FAULT_FALLBACK);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001176 goto out;
1177 }
1178
Michal Hocko3b363692015-04-15 16:13:29 -07001179 if (unlikely(mem_cgroup_try_charge(new_page, mm, huge_gfp, &memcg))) {
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -08001180 put_page(new_page);
Kirill A. Shutemov93b47962012-12-12 13:50:54 -08001181 if (page) {
1182 split_huge_page(page);
Hugh Dickins5338a932014-06-23 13:22:05 -07001183 put_user_huge_page(page);
Kirill A. Shutemov9845cbb2014-02-25 15:01:42 -08001184 } else
1185 split_huge_page_pmd(vma, address, pmd);
1186 ret |= VM_FAULT_FALLBACK;
David Rientjes17766dd2013-09-12 15:14:06 -07001187 count_vm_event(THP_FAULT_FALLBACK);
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -08001188 goto out;
1189 }
1190
David Rientjes17766dd2013-09-12 15:14:06 -07001191 count_vm_event(THP_FAULT_ALLOC);
1192
Hugh Dickinseecc1e42014-01-12 01:25:21 -08001193 if (!page)
Kirill A. Shutemov93b47962012-12-12 13:50:54 -08001194 clear_huge_page(new_page, haddr, HPAGE_PMD_NR);
1195 else
1196 copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001197 __SetPageUptodate(new_page);
1198
Sagi Grimberg2ec74c32012-10-08 16:33:33 -07001199 mmun_start = haddr;
1200 mmun_end = haddr + HPAGE_PMD_SIZE;
1201 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1202
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08001203 spin_lock(ptl);
Kirill A. Shutemov93b47962012-12-12 13:50:54 -08001204 if (page)
Hugh Dickins5338a932014-06-23 13:22:05 -07001205 put_user_huge_page(page);
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -08001206 if (unlikely(!pmd_same(*pmd, orig_pmd))) {
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08001207 spin_unlock(ptl);
Johannes Weiner00501b52014-08-08 14:19:20 -07001208 mem_cgroup_cancel_charge(new_page, memcg);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001209 put_page(new_page);
Sagi Grimberg2ec74c32012-10-08 16:33:33 -07001210 goto out_mn;
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -08001211 } else {
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001212 pmd_t entry;
Kirill A. Shutemov31223592013-09-12 15:14:01 -07001213 entry = mk_huge_pmd(new_page, vma->vm_page_prot);
1214 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
Joerg Roedel34ee6452014-11-13 13:46:09 +11001215 pmdp_clear_flush_notify(vma, haddr, pmd);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001216 page_add_new_anon_rmap(new_page, vma, haddr);
Johannes Weiner00501b52014-08-08 14:19:20 -07001217 mem_cgroup_commit_charge(new_page, memcg, false);
1218 lru_cache_add_active_or_unevictable(new_page, vma);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001219 set_pmd_at(mm, haddr, pmd, entry);
David Millerb113da62012-10-08 16:34:25 -07001220 update_mmu_cache_pmd(vma, address, pmd);
Hugh Dickinseecc1e42014-01-12 01:25:21 -08001221 if (!page) {
Kirill A. Shutemov93b47962012-12-12 13:50:54 -08001222 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -08001223 put_huge_zero_page();
1224 } else {
Sasha Levin309381fea2014-01-23 15:52:54 -08001225 VM_BUG_ON_PAGE(!PageHead(page), page);
Kirill A. Shutemov93b47962012-12-12 13:50:54 -08001226 page_remove_rmap(page);
1227 put_page(page);
1228 }
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001229 ret |= VM_FAULT_WRITE;
1230 }
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08001231 spin_unlock(ptl);
Sagi Grimberg2ec74c32012-10-08 16:33:33 -07001232out_mn:
1233 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1234out:
1235 return ret;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001236out_unlock:
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08001237 spin_unlock(ptl);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001238 return ret;
1239}
1240
David Rientjesb676b292012-10-08 16:34:03 -07001241struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001242 unsigned long addr,
1243 pmd_t *pmd,
1244 unsigned int flags)
1245{
David Rientjesb676b292012-10-08 16:34:03 -07001246 struct mm_struct *mm = vma->vm_mm;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001247 struct page *page = NULL;
1248
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08001249 assert_spin_locked(pmd_lockptr(mm, pmd));
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001250
1251 if (flags & FOLL_WRITE && !pmd_write(*pmd))
1252 goto out;
1253
Kirill A. Shutemov85facf22013-02-04 14:28:42 -08001254 /* Avoid dumping huge zero page */
1255 if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
1256 return ERR_PTR(-EFAULT);
1257
Mel Gorman2b4847e2013-12-18 17:08:32 -08001258 /* Full NUMA hinting faults to serialise migration in fault paths */
Mel Gorman8a0516e2015-02-12 14:58:22 -08001259 if ((flags & FOLL_NUMA) && pmd_protnone(*pmd))
Mel Gorman2b4847e2013-12-18 17:08:32 -08001260 goto out;
1261
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001262 page = pmd_page(*pmd);
Sasha Levin309381fea2014-01-23 15:52:54 -08001263 VM_BUG_ON_PAGE(!PageHead(page), page);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001264 if (flags & FOLL_TOUCH) {
1265 pmd_t _pmd;
1266 /*
1267 * We should set the dirty bit only for FOLL_WRITE but
1268 * for now the dirty bit in the pmd is meaningless.
1269 * And if the dirty bit will become meaningful and
1270 * we'll only set it with FOLL_WRITE, an atomic
1271 * set_bit will be required on the pmd to set the
1272 * young bit, instead of the current set_pmd_at.
1273 */
1274 _pmd = pmd_mkyoung(pmd_mkdirty(*pmd));
Aneesh Kumar K.V8663890a2013-06-06 00:20:34 -07001275 if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
1276 pmd, _pmd, 1))
1277 update_mmu_cache_pmd(vma, addr, pmd);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001278 }
Kirill A. Shutemov84d33df2015-04-14 15:44:37 -07001279 if ((flags & FOLL_POPULATE) && (vma->vm_flags & VM_LOCKED)) {
David Rientjesb676b292012-10-08 16:34:03 -07001280 if (page->mapping && trylock_page(page)) {
1281 lru_add_drain();
1282 if (page->mapping)
1283 mlock_vma_page(page);
1284 unlock_page(page);
1285 }
1286 }
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001287 page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
Sasha Levin309381fea2014-01-23 15:52:54 -08001288 VM_BUG_ON_PAGE(!PageCompound(page), page);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001289 if (flags & FOLL_GET)
Andrea Arcangeli70b50f92011-11-02 13:36:59 -07001290 get_page_foll(page);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001291
1292out:
1293 return page;
1294}
1295
Mel Gormand10e63f2012-10-25 14:16:31 +02001296/* NUMA hinting page fault entry point for trans huge pmds */
Mel Gorman4daae3b2012-11-02 11:33:45 +00001297int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
1298 unsigned long addr, pmd_t pmd, pmd_t *pmdp)
Mel Gormand10e63f2012-10-25 14:16:31 +02001299{
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08001300 spinlock_t *ptl;
Mel Gormanb8916632013-10-07 11:28:44 +01001301 struct anon_vma *anon_vma = NULL;
Mel Gormanb32967f2012-11-19 12:35:47 +00001302 struct page *page;
Mel Gormand10e63f2012-10-25 14:16:31 +02001303 unsigned long haddr = addr & HPAGE_PMD_MASK;
Mel Gorman8191acb2013-10-07 11:28:45 +01001304 int page_nid = -1, this_nid = numa_node_id();
Peter Zijlstra90572892013-10-07 11:29:20 +01001305 int target_nid, last_cpupid = -1;
Mel Gorman8191acb2013-10-07 11:28:45 +01001306 bool page_locked;
1307 bool migrated = false;
Mel Gormanb191f9b2015-03-25 15:55:40 -07001308 bool was_writable;
Peter Zijlstra6688cc02013-10-07 11:29:24 +01001309 int flags = 0;
Mel Gormand10e63f2012-10-25 14:16:31 +02001310
Mel Gormanc0e7cad2015-02-12 14:58:41 -08001311 /* A PROT_NONE fault should not end up here */
1312 BUG_ON(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)));
1313
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08001314 ptl = pmd_lock(mm, pmdp);
Mel Gormand10e63f2012-10-25 14:16:31 +02001315 if (unlikely(!pmd_same(pmd, *pmdp)))
1316 goto out_unlock;
1317
Mel Gormande466bd2013-12-18 17:08:42 -08001318 /*
1319 * If there are potential migrations, wait for completion and retry
1320 * without disrupting NUMA hinting information. Do not relock and
1321 * check_same as the page may no longer be mapped.
1322 */
1323 if (unlikely(pmd_trans_migrating(*pmdp))) {
Mel Gorman5d833062015-02-12 14:58:16 -08001324 page = pmd_page(*pmdp);
Mel Gormande466bd2013-12-18 17:08:42 -08001325 spin_unlock(ptl);
Mel Gorman5d833062015-02-12 14:58:16 -08001326 wait_on_page_locked(page);
Mel Gormande466bd2013-12-18 17:08:42 -08001327 goto out;
1328 }
1329
Mel Gormand10e63f2012-10-25 14:16:31 +02001330 page = pmd_page(pmd);
Mel Gormana1a46182013-10-07 11:28:50 +01001331 BUG_ON(is_huge_zero_page(page));
Mel Gorman8191acb2013-10-07 11:28:45 +01001332 page_nid = page_to_nid(page);
Peter Zijlstra90572892013-10-07 11:29:20 +01001333 last_cpupid = page_cpupid_last(page);
Mel Gorman03c5a6e2012-11-02 14:52:48 +00001334 count_vm_numa_event(NUMA_HINT_FAULTS);
Rik van Riel04bb2f92013-10-07 11:29:36 +01001335 if (page_nid == this_nid) {
Mel Gorman03c5a6e2012-11-02 14:52:48 +00001336 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
Rik van Riel04bb2f92013-10-07 11:29:36 +01001337 flags |= TNF_FAULT_LOCAL;
1338 }
Mel Gorman4daae3b2012-11-02 11:33:45 +00001339
Mel Gormanbea66fb2015-03-25 15:55:37 -07001340 /* See similar comment in do_numa_page for explanation */
1341 if (!(vma->vm_flags & VM_WRITE))
Peter Zijlstra6688cc02013-10-07 11:29:24 +01001342 flags |= TNF_NO_GROUP;
1343
1344 /*
Mel Gormanff9042b2013-10-07 11:28:43 +01001345 * Acquire the page lock to serialise THP migrations but avoid dropping
1346 * page_table_lock if at all possible
1347 */
Mel Gormanb8916632013-10-07 11:28:44 +01001348 page_locked = trylock_page(page);
1349 target_nid = mpol_misplaced(page, vma, haddr);
1350 if (target_nid == -1) {
1351 /* If the page was locked, there are no parallel migrations */
Mel Gormana54a4072013-10-07 11:28:46 +01001352 if (page_locked)
Mel Gormanb8916632013-10-07 11:28:44 +01001353 goto clear_pmdnuma;
Mel Gorman2b4847e2013-12-18 17:08:32 -08001354 }
Mel Gorman4daae3b2012-11-02 11:33:45 +00001355
Mel Gormande466bd2013-12-18 17:08:42 -08001356 /* Migration could have started since the pmd_trans_migrating check */
Mel Gorman2b4847e2013-12-18 17:08:32 -08001357 if (!page_locked) {
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08001358 spin_unlock(ptl);
Mel Gormanb8916632013-10-07 11:28:44 +01001359 wait_on_page_locked(page);
Mel Gormana54a4072013-10-07 11:28:46 +01001360 page_nid = -1;
Mel Gormanb8916632013-10-07 11:28:44 +01001361 goto out;
1362 }
1363
Mel Gorman2b4847e2013-12-18 17:08:32 -08001364 /*
1365 * Page is misplaced. Page lock serialises migrations. Acquire anon_vma
1366 * to serialises splits
1367 */
Mel Gormanb8916632013-10-07 11:28:44 +01001368 get_page(page);
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08001369 spin_unlock(ptl);
Mel Gormanb8916632013-10-07 11:28:44 +01001370 anon_vma = page_lock_anon_vma_read(page);
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001371
Peter Zijlstrac69307d2013-10-07 11:28:41 +01001372 /* Confirm the PMD did not change while page_table_lock was released */
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08001373 spin_lock(ptl);
Mel Gormanb32967f2012-11-19 12:35:47 +00001374 if (unlikely(!pmd_same(pmd, *pmdp))) {
1375 unlock_page(page);
1376 put_page(page);
Mel Gormana54a4072013-10-07 11:28:46 +01001377 page_nid = -1;
Mel Gormanb32967f2012-11-19 12:35:47 +00001378 goto out_unlock;
1379 }
Mel Gormanff9042b2013-10-07 11:28:43 +01001380
Mel Gormanc3a489c2013-12-18 17:08:38 -08001381 /* Bail if we fail to protect against THP splits for any reason */
1382 if (unlikely(!anon_vma)) {
1383 put_page(page);
1384 page_nid = -1;
1385 goto clear_pmdnuma;
1386 }
1387
Mel Gormana54a4072013-10-07 11:28:46 +01001388 /*
1389 * Migrate the THP to the requested node, returns with page unlocked
Mel Gorman8a0516e2015-02-12 14:58:22 -08001390 * and access rights restored.
Mel Gormana54a4072013-10-07 11:28:46 +01001391 */
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08001392 spin_unlock(ptl);
Mel Gormanb32967f2012-11-19 12:35:47 +00001393 migrated = migrate_misplaced_transhuge_page(mm, vma,
Hugh Dickins340ef392013-02-22 16:34:33 -08001394 pmdp, pmd, addr, page, target_nid);
Peter Zijlstra6688cc02013-10-07 11:29:24 +01001395 if (migrated) {
1396 flags |= TNF_MIGRATED;
Mel Gorman8191acb2013-10-07 11:28:45 +01001397 page_nid = target_nid;
Mel Gorman074c2382015-03-25 15:55:42 -07001398 } else
1399 flags |= TNF_MIGRATE_FAIL;
Mel Gormanb32967f2012-11-19 12:35:47 +00001400
Mel Gorman8191acb2013-10-07 11:28:45 +01001401 goto out;
Mel Gorman4daae3b2012-11-02 11:33:45 +00001402clear_pmdnuma:
Mel Gormana54a4072013-10-07 11:28:46 +01001403 BUG_ON(!PageLocked(page));
Mel Gormanb191f9b2015-03-25 15:55:40 -07001404 was_writable = pmd_write(pmd);
Mel Gorman4d942462015-02-12 14:58:28 -08001405 pmd = pmd_modify(pmd, vma->vm_page_prot);
Mel Gormanb7b04002015-03-25 15:55:45 -07001406 pmd = pmd_mkyoung(pmd);
Mel Gormanb191f9b2015-03-25 15:55:40 -07001407 if (was_writable)
1408 pmd = pmd_mkwrite(pmd);
Mel Gormand10e63f2012-10-25 14:16:31 +02001409 set_pmd_at(mm, haddr, pmdp, pmd);
Mel Gormand10e63f2012-10-25 14:16:31 +02001410 update_mmu_cache_pmd(vma, addr, pmdp);
Mel Gormana54a4072013-10-07 11:28:46 +01001411 unlock_page(page);
Mel Gormand10e63f2012-10-25 14:16:31 +02001412out_unlock:
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08001413 spin_unlock(ptl);
Mel Gormanb8916632013-10-07 11:28:44 +01001414
1415out:
1416 if (anon_vma)
1417 page_unlock_anon_vma_read(anon_vma);
1418
Mel Gorman8191acb2013-10-07 11:28:45 +01001419 if (page_nid != -1)
Peter Zijlstra6688cc02013-10-07 11:29:24 +01001420 task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, flags);
Mel Gorman8191acb2013-10-07 11:28:45 +01001421
Mel Gormand10e63f2012-10-25 14:16:31 +02001422 return 0;
1423}
1424
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001425int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
Shaohua Lif21760b2012-01-12 17:19:16 -08001426 pmd_t *pmd, unsigned long addr)
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001427{
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08001428 spinlock_t *ptl;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001429 int ret = 0;
1430
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08001431 if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
Naoya Horiguchi025c5b22012-03-21 16:33:57 -07001432 struct page *page;
1433 pgtable_t pgtable;
David Millerf5c8ad42012-10-08 16:34:26 -07001434 pmd_t orig_pmd;
Aneesh Kumar K.Va6bf2bb2013-06-05 17:14:04 -07001435 /*
1436 * For architectures like ppc64 we look at deposited pgtable
1437 * when calling pmdp_get_and_clear. So do the
1438 * pgtable_trans_huge_withdraw after finishing pmdp related
1439 * operations.
1440 */
Martin Schwidefskyfcbe08d62014-10-24 10:52:29 +02001441 orig_pmd = pmdp_get_and_clear_full(tlb->mm, addr, pmd,
1442 tlb->fullmm);
Naoya Horiguchi025c5b22012-03-21 16:33:57 -07001443 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
Aneesh Kumar K.Va6bf2bb2013-06-05 17:14:04 -07001444 pgtable = pgtable_trans_huge_withdraw(tlb->mm, pmd);
Kirill A. Shutemov479f0ab2012-12-12 13:50:50 -08001445 if (is_huge_zero_pmd(orig_pmd)) {
Kirill A. Shutemove1f56c82013-11-14 14:30:48 -08001446 atomic_long_dec(&tlb->mm->nr_ptes);
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08001447 spin_unlock(ptl);
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -08001448 put_huge_zero_page();
Kirill A. Shutemov479f0ab2012-12-12 13:50:50 -08001449 } else {
1450 page = pmd_page(orig_pmd);
1451 page_remove_rmap(page);
Sasha Levin309381fea2014-01-23 15:52:54 -08001452 VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
Kirill A. Shutemov479f0ab2012-12-12 13:50:50 -08001453 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
Sasha Levin309381fea2014-01-23 15:52:54 -08001454 VM_BUG_ON_PAGE(!PageHead(page), page);
Kirill A. Shutemove1f56c82013-11-14 14:30:48 -08001455 atomic_long_dec(&tlb->mm->nr_ptes);
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08001456 spin_unlock(ptl);
Kirill A. Shutemov479f0ab2012-12-12 13:50:50 -08001457 tlb_remove_page(tlb, page);
1458 }
Naoya Horiguchi025c5b22012-03-21 16:33:57 -07001459 pte_free(tlb->mm, pgtable);
1460 ret = 1;
1461 }
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001462 return ret;
1463}
1464
Andrea Arcangeli37a1c492011-10-31 17:08:30 -07001465int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
1466 unsigned long old_addr,
1467 unsigned long new_addr, unsigned long old_end,
1468 pmd_t *old_pmd, pmd_t *new_pmd)
1469{
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08001470 spinlock_t *old_ptl, *new_ptl;
Andrea Arcangeli37a1c492011-10-31 17:08:30 -07001471 int ret = 0;
1472 pmd_t pmd;
1473
1474 struct mm_struct *mm = vma->vm_mm;
1475
1476 if ((old_addr & ~HPAGE_PMD_MASK) ||
1477 (new_addr & ~HPAGE_PMD_MASK) ||
1478 old_end - old_addr < HPAGE_PMD_SIZE ||
1479 (new_vma->vm_flags & VM_NOHUGEPAGE))
1480 goto out;
1481
1482 /*
1483 * The destination pmd shouldn't be established, free_pgtables()
1484 * should have release it.
1485 */
1486 if (WARN_ON(!pmd_none(*new_pmd))) {
1487 VM_BUG_ON(pmd_trans_huge(*new_pmd));
1488 goto out;
1489 }
1490
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08001491 /*
1492 * We don't have to worry about the ordering of src and dst
1493 * ptlocks because exclusive mmap_sem prevents deadlock.
1494 */
1495 ret = __pmd_trans_huge_lock(old_pmd, vma, &old_ptl);
Naoya Horiguchi025c5b22012-03-21 16:33:57 -07001496 if (ret == 1) {
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08001497 new_ptl = pmd_lockptr(mm, new_pmd);
1498 if (new_ptl != old_ptl)
1499 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
Naoya Horiguchi025c5b22012-03-21 16:33:57 -07001500 pmd = pmdp_get_and_clear(mm, old_addr, old_pmd);
1501 VM_BUG_ON(!pmd_none(*new_pmd));
Kirill A. Shutemov35928062013-12-12 17:12:33 -08001502
Aneesh Kumar K.Vb3084f42014-01-13 11:34:24 +05301503 if (pmd_move_must_withdraw(new_ptl, old_ptl)) {
1504 pgtable_t pgtable;
Kirill A. Shutemov35928062013-12-12 17:12:33 -08001505 pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
1506 pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
Kirill A. Shutemov35928062013-12-12 17:12:33 -08001507 }
Aneesh Kumar K.Vb3084f42014-01-13 11:34:24 +05301508 set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd));
1509 if (new_ptl != old_ptl)
1510 spin_unlock(new_ptl);
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08001511 spin_unlock(old_ptl);
Andrea Arcangeli37a1c492011-10-31 17:08:30 -07001512 }
1513out:
1514 return ret;
1515}
1516
Mel Gormanf123d742013-10-07 11:28:49 +01001517/*
1518 * Returns
1519 * - 0 if PMD could not be locked
1520 * - 1 if PMD was locked but protections unchange and TLB flush unnecessary
1521 * - HPAGE_PMD_NR is protections changed and TLB flush necessary
1522 */
Johannes Weinercd7548a2011-01-13 15:47:04 -08001523int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
Mel Gormane944fd62015-02-12 14:58:35 -08001524 unsigned long addr, pgprot_t newprot, int prot_numa)
Johannes Weinercd7548a2011-01-13 15:47:04 -08001525{
1526 struct mm_struct *mm = vma->vm_mm;
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08001527 spinlock_t *ptl;
Johannes Weinercd7548a2011-01-13 15:47:04 -08001528 int ret = 0;
1529
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08001530 if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
Naoya Horiguchi025c5b22012-03-21 16:33:57 -07001531 pmd_t entry;
Mel Gormanb191f9b2015-03-25 15:55:40 -07001532 bool preserve_write = prot_numa && pmd_write(*pmd);
Mel Gormanba68bc02015-03-07 15:20:48 +00001533 ret = 1;
Mel Gormane944fd62015-02-12 14:58:35 -08001534
1535 /*
1536 * Avoid trapping faults against the zero page. The read-only
1537 * data is likely to be read-cached on the local CPU and
1538 * local/remote hits to the zero page are not interesting.
1539 */
1540 if (prot_numa && is_huge_zero_pmd(*pmd)) {
1541 spin_unlock(ptl);
Mel Gormanba68bc02015-03-07 15:20:48 +00001542 return ret;
Mel Gormane944fd62015-02-12 14:58:35 -08001543 }
1544
Mel Gorman10c10452015-02-12 14:58:44 -08001545 if (!prot_numa || !pmd_protnone(*pmd)) {
Mel Gorman10c10452015-02-12 14:58:44 -08001546 entry = pmdp_get_and_clear_notify(mm, addr, pmd);
1547 entry = pmd_modify(entry, newprot);
Mel Gormanb191f9b2015-03-25 15:55:40 -07001548 if (preserve_write)
1549 entry = pmd_mkwrite(entry);
Mel Gorman10c10452015-02-12 14:58:44 -08001550 ret = HPAGE_PMD_NR;
1551 set_pmd_at(mm, addr, pmd, entry);
Mel Gormanb191f9b2015-03-25 15:55:40 -07001552 BUG_ON(!preserve_write && pmd_write(entry));
Mel Gorman10c10452015-02-12 14:58:44 -08001553 }
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08001554 spin_unlock(ptl);
Naoya Horiguchi025c5b22012-03-21 16:33:57 -07001555 }
Johannes Weinercd7548a2011-01-13 15:47:04 -08001556
1557 return ret;
1558}
1559
Naoya Horiguchi025c5b22012-03-21 16:33:57 -07001560/*
1561 * Returns 1 if a given pmd maps a stable (not under splitting) thp.
1562 * Returns -1 if it maps a thp under splitting. Returns 0 otherwise.
1563 *
1564 * Note that if it returns 1, this routine returns without unlocking page
1565 * table locks. So callers must unlock them.
1566 */
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08001567int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
1568 spinlock_t **ptl)
Naoya Horiguchi025c5b22012-03-21 16:33:57 -07001569{
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08001570 *ptl = pmd_lock(vma->vm_mm, pmd);
Naoya Horiguchi025c5b22012-03-21 16:33:57 -07001571 if (likely(pmd_trans_huge(*pmd))) {
1572 if (unlikely(pmd_trans_splitting(*pmd))) {
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08001573 spin_unlock(*ptl);
Naoya Horiguchi025c5b22012-03-21 16:33:57 -07001574 wait_split_huge_page(vma->anon_vma, pmd);
1575 return -1;
1576 } else {
1577 /* Thp mapped by 'pmd' is stable, so we can
1578 * handle it as it is. */
1579 return 1;
1580 }
1581 }
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08001582 spin_unlock(*ptl);
Naoya Horiguchi025c5b22012-03-21 16:33:57 -07001583 return 0;
1584}
1585
Kirill A. Shutemov117b0792013-11-14 14:30:56 -08001586/*
1587 * This function returns whether a given @page is mapped onto the @address
1588 * in the virtual space of @mm.
1589 *
1590 * When it's true, this function returns *pmd with holding the page table lock
1591 * and passing it back to the caller via @ptl.
1592 * If it's false, returns NULL without holding the page table lock.
1593 */
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001594pmd_t *page_check_address_pmd(struct page *page,
1595 struct mm_struct *mm,
1596 unsigned long address,
Kirill A. Shutemov117b0792013-11-14 14:30:56 -08001597 enum page_check_address_pmd_flag flag,
1598 spinlock_t **ptl)
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001599{
Kirill A. Shutemovb5a8cad2014-04-18 15:07:25 -07001600 pgd_t *pgd;
1601 pud_t *pud;
Kirill A. Shutemov117b0792013-11-14 14:30:56 -08001602 pmd_t *pmd;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001603
1604 if (address & ~HPAGE_PMD_MASK)
Kirill A. Shutemov117b0792013-11-14 14:30:56 -08001605 return NULL;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001606
Kirill A. Shutemovb5a8cad2014-04-18 15:07:25 -07001607 pgd = pgd_offset(mm, address);
1608 if (!pgd_present(*pgd))
Kirill A. Shutemov117b0792013-11-14 14:30:56 -08001609 return NULL;
Kirill A. Shutemovb5a8cad2014-04-18 15:07:25 -07001610 pud = pud_offset(pgd, address);
1611 if (!pud_present(*pud))
1612 return NULL;
1613 pmd = pmd_offset(pud, address);
1614
Kirill A. Shutemov117b0792013-11-14 14:30:56 -08001615 *ptl = pmd_lock(mm, pmd);
Kirill A. Shutemovb5a8cad2014-04-18 15:07:25 -07001616 if (!pmd_present(*pmd))
Kirill A. Shutemov117b0792013-11-14 14:30:56 -08001617 goto unlock;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001618 if (pmd_page(*pmd) != page)
Kirill A. Shutemov117b0792013-11-14 14:30:56 -08001619 goto unlock;
Andrea Arcangeli94fcc582011-01-13 15:47:08 -08001620 /*
1621 * split_vma() may create temporary aliased mappings. There is
1622 * no risk as long as all huge pmd are found and have their
1623 * splitting bit set before __split_huge_page_refcount
1624 * runs. Finding the same huge pmd more than once during the
1625 * same rmap walk is not a problem.
1626 */
1627 if (flag == PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG &&
1628 pmd_trans_splitting(*pmd))
Kirill A. Shutemov117b0792013-11-14 14:30:56 -08001629 goto unlock;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001630 if (pmd_trans_huge(*pmd)) {
1631 VM_BUG_ON(flag == PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG &&
1632 !pmd_trans_splitting(*pmd));
Kirill A. Shutemov117b0792013-11-14 14:30:56 -08001633 return pmd;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001634 }
Kirill A. Shutemov117b0792013-11-14 14:30:56 -08001635unlock:
1636 spin_unlock(*ptl);
1637 return NULL;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001638}
1639
1640static int __split_huge_page_splitting(struct page *page,
1641 struct vm_area_struct *vma,
1642 unsigned long address)
1643{
1644 struct mm_struct *mm = vma->vm_mm;
Kirill A. Shutemov117b0792013-11-14 14:30:56 -08001645 spinlock_t *ptl;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001646 pmd_t *pmd;
1647 int ret = 0;
Sagi Grimberg2ec74c32012-10-08 16:33:33 -07001648 /* For mmu_notifiers */
1649 const unsigned long mmun_start = address;
1650 const unsigned long mmun_end = address + HPAGE_PMD_SIZE;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001651
Sagi Grimberg2ec74c32012-10-08 16:33:33 -07001652 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001653 pmd = page_check_address_pmd(page, mm, address,
Kirill A. Shutemov117b0792013-11-14 14:30:56 -08001654 PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG, &ptl);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001655 if (pmd) {
1656 /*
1657 * We can't temporarily set the pmd to null in order
1658 * to split it, the pmd must remain marked huge at all
1659 * times or the VM won't take the pmd_trans_huge paths
Ingo Molnar5a505082012-12-02 19:56:46 +00001660 * and it won't wait on the anon_vma->root->rwsem to
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001661 * serialize against split_huge_page*.
1662 */
Sagi Grimberg2ec74c32012-10-08 16:33:33 -07001663 pmdp_splitting_flush(vma, address, pmd);
Joerg Roedel34ee6452014-11-13 13:46:09 +11001664
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001665 ret = 1;
Kirill A. Shutemov117b0792013-11-14 14:30:56 -08001666 spin_unlock(ptl);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001667 }
Sagi Grimberg2ec74c32012-10-08 16:33:33 -07001668 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001669
1670 return ret;
1671}
1672
Shaohua Li5bc7b8a2013-04-29 15:08:36 -07001673static void __split_huge_page_refcount(struct page *page,
1674 struct list_head *list)
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001675{
1676 int i;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001677 struct zone *zone = page_zone(page);
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001678 struct lruvec *lruvec;
Andrea Arcangeli70b50f92011-11-02 13:36:59 -07001679 int tail_count = 0;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001680
1681 /* prevent PageLRU to go away from under us, and freeze lru stats */
1682 spin_lock_irq(&zone->lru_lock);
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001683 lruvec = mem_cgroup_page_lruvec(page, zone);
1684
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001685 compound_lock(page);
KAMEZAWA Hiroyukie94c8a92012-01-12 17:18:20 -08001686 /* complete memcg works before add pages to LRU */
1687 mem_cgroup_split_huge_fixup(page);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001688
Shaohua Li45676882012-01-12 17:19:18 -08001689 for (i = HPAGE_PMD_NR - 1; i >= 1; i--) {
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001690 struct page *page_tail = page + i;
1691
Andrea Arcangeli70b50f92011-11-02 13:36:59 -07001692 /* tail_page->_mapcount cannot change */
1693 BUG_ON(page_mapcount(page_tail) < 0);
1694 tail_count += page_mapcount(page_tail);
1695 /* check for overflow */
1696 BUG_ON(tail_count < 0);
1697 BUG_ON(atomic_read(&page_tail->_count) != 0);
1698 /*
1699 * tail_page->_count is zero and not changing from
1700 * under us. But get_page_unless_zero() may be running
1701 * from under us on the tail_page. If we used
1702 * atomic_set() below instead of atomic_add(), we
1703 * would then run atomic_set() concurrently with
1704 * get_page_unless_zero(), and atomic_set() is
1705 * implemented in C not using locked ops. spin_unlock
1706 * on x86 sometime uses locked ops because of PPro
1707 * errata 66, 92, so unless somebody can guarantee
1708 * atomic_set() here would be safe on all archs (and
1709 * not only on x86), it's safer to use atomic_add().
1710 */
1711 atomic_add(page_mapcount(page) + page_mapcount(page_tail) + 1,
1712 &page_tail->_count);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001713
1714 /* after clearing PageTail the gup refcount can be released */
Waiman Long3a79d522014-08-06 16:05:38 -07001715 smp_mb__after_atomic();
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001716
Naoya Horiguchi9de27bd2015-08-06 15:47:08 -07001717 page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001718 page_tail->flags |= (page->flags &
1719 ((1L << PG_referenced) |
1720 (1L << PG_swapbacked) |
1721 (1L << PG_mlocked) |
Kirill A. Shutemove180cf82013-07-31 13:53:39 -07001722 (1L << PG_uptodate) |
1723 (1L << PG_active) |
1724 (1L << PG_unevictable)));
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001725 page_tail->flags |= (1L << PG_dirty);
1726
Andrea Arcangeli70b50f92011-11-02 13:36:59 -07001727 /* clear PageTail before overwriting first_page */
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001728 smp_wmb();
1729
1730 /*
1731 * __split_huge_page_splitting() already set the
1732 * splitting bit in all pmd that could map this
1733 * hugepage, that will ensure no CPU can alter the
1734 * mapcount on the head page. The mapcount is only
1735 * accounted in the head page and it has to be
1736 * transferred to all tail pages in the below code. So
1737 * for this code to be safe, the split the mapcount
1738 * can't change. But that doesn't mean userland can't
1739 * keep changing and reading the page contents while
1740 * we transfer the mapcount, so the pmd splitting
1741 * status is achieved setting a reserved bit in the
1742 * pmd, not by clearing the present bit.
1743 */
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001744 page_tail->_mapcount = page->_mapcount;
1745
1746 BUG_ON(page_tail->mapping);
1747 page_tail->mapping = page->mapping;
1748
Shaohua Li45676882012-01-12 17:19:18 -08001749 page_tail->index = page->index + i;
Peter Zijlstra90572892013-10-07 11:29:20 +01001750 page_cpupid_xchg_last(page_tail, page_cpupid_last(page));
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001751
1752 BUG_ON(!PageAnon(page_tail));
1753 BUG_ON(!PageUptodate(page_tail));
1754 BUG_ON(!PageDirty(page_tail));
1755 BUG_ON(!PageSwapBacked(page_tail));
1756
Shaohua Li5bc7b8a2013-04-29 15:08:36 -07001757 lru_add_page_tail(page, page_tail, lruvec, list);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001758 }
Andrea Arcangeli70b50f92011-11-02 13:36:59 -07001759 atomic_sub(tail_count, &page->_count);
1760 BUG_ON(atomic_read(&page->_count) <= 0);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001761
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001762 __mod_zone_page_state(zone, NR_ANON_TRANSPARENT_HUGEPAGES, -1);
Andrea Arcangeli79134172011-01-13 15:46:58 -08001763
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001764 ClearPageCompound(page);
1765 compound_unlock(page);
1766 spin_unlock_irq(&zone->lru_lock);
1767
1768 for (i = 1; i < HPAGE_PMD_NR; i++) {
1769 struct page *page_tail = page + i;
1770 BUG_ON(page_count(page_tail) <= 0);
1771 /*
1772 * Tail pages may be freed if there wasn't any mapping
1773 * like if add_to_swap() is running on a lru page that
1774 * had its mapping zapped. And freeing these pages
1775 * requires taking the lru_lock so we do the put_page
1776 * of the tail pages after the split is complete.
1777 */
1778 put_page(page_tail);
1779 }
1780
1781 /*
1782 * Only the head page (now become a regular page) is required
1783 * to be pinned by the caller.
1784 */
1785 BUG_ON(page_count(page) <= 0);
1786}
1787
1788static int __split_huge_page_map(struct page *page,
1789 struct vm_area_struct *vma,
1790 unsigned long address)
1791{
1792 struct mm_struct *mm = vma->vm_mm;
Kirill A. Shutemov117b0792013-11-14 14:30:56 -08001793 spinlock_t *ptl;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001794 pmd_t *pmd, _pmd;
1795 int ret = 0, i;
1796 pgtable_t pgtable;
1797 unsigned long haddr;
1798
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001799 pmd = page_check_address_pmd(page, mm, address,
Kirill A. Shutemov117b0792013-11-14 14:30:56 -08001800 PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG, &ptl);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001801 if (pmd) {
Aneesh Kumar K.V6b0b50b2013-06-05 17:14:02 -07001802 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001803 pmd_populate(mm, &_pmd, pgtable);
Waiman Longf8303c22014-08-06 16:05:36 -07001804 if (pmd_write(*pmd))
1805 BUG_ON(page_mapcount(page) != 1);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001806
Gerald Schaefere3ebcf642012-10-08 16:30:07 -07001807 haddr = address;
1808 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001809 pte_t *pte, entry;
1810 BUG_ON(PageCompound(page+i));
Mel Gormanabc40bd2014-10-02 19:47:42 +01001811 /*
Mel Gorman8a0516e2015-02-12 14:58:22 -08001812 * Note that NUMA hinting access restrictions are not
1813 * transferred to avoid any possibility of altering
1814 * permissions across VMAs.
Mel Gormanabc40bd2014-10-02 19:47:42 +01001815 */
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001816 entry = mk_pte(page + i, vma->vm_page_prot);
1817 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1818 if (!pmd_write(*pmd))
1819 entry = pte_wrprotect(entry);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001820 if (!pmd_young(*pmd))
1821 entry = pte_mkold(entry);
1822 pte = pte_offset_map(&_pmd, haddr);
1823 BUG_ON(!pte_none(*pte));
1824 set_pte_at(mm, haddr, pte, entry);
1825 pte_unmap(pte);
1826 }
1827
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001828 smp_wmb(); /* make pte visible before pmd */
1829 /*
1830 * Up to this point the pmd is present and huge and
1831 * userland has the whole access to the hugepage
1832 * during the split (which happens in place). If we
1833 * overwrite the pmd with the not-huge version
1834 * pointing to the pte here (which of course we could
1835 * if all CPUs were bug free), userland could trigger
1836 * a small page size TLB miss on the small sized TLB
1837 * while the hugepage TLB entry is still established
1838 * in the huge TLB. Some CPU doesn't like that. See
1839 * http://support.amd.com/us/Processor_TechDocs/41322.pdf,
1840 * Erratum 383 on page 93. Intel should be safe but is
1841 * also warns that it's only safe if the permission
1842 * and cache attributes of the two entries loaded in
1843 * the two TLB is identical (which should be the case
1844 * here). But it is generally safer to never allow
1845 * small and huge TLB entries for the same virtual
1846 * address to be loaded simultaneously. So instead of
1847 * doing "pmd_populate(); flush_tlb_range();" we first
1848 * mark the current pmd notpresent (atomically because
1849 * here the pmd_trans_huge and pmd_trans_splitting
1850 * must remain set at all times on the pmd until the
1851 * split is complete for this pmd), then we flush the
1852 * SMP TLB and finally we write the non-huge version
1853 * of the pmd entry with pmd_populate.
1854 */
Gerald Schaefer46dcde72012-10-08 16:30:09 -07001855 pmdp_invalidate(vma, address, pmd);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001856 pmd_populate(mm, pmd, pgtable);
1857 ret = 1;
Kirill A. Shutemov117b0792013-11-14 14:30:56 -08001858 spin_unlock(ptl);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001859 }
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001860
1861 return ret;
1862}
1863
Ingo Molnar5a505082012-12-02 19:56:46 +00001864/* must be called with anon_vma->root->rwsem held */
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001865static void __split_huge_page(struct page *page,
Shaohua Li5bc7b8a2013-04-29 15:08:36 -07001866 struct anon_vma *anon_vma,
1867 struct list_head *list)
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001868{
1869 int mapcount, mapcount2;
Michel Lespinassebf181b92012-10-08 16:31:39 -07001870 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001871 struct anon_vma_chain *avc;
1872
1873 BUG_ON(!PageHead(page));
1874 BUG_ON(PageTail(page));
1875
1876 mapcount = 0;
Michel Lespinassebf181b92012-10-08 16:31:39 -07001877 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001878 struct vm_area_struct *vma = avc->vma;
1879 unsigned long addr = vma_address(page, vma);
1880 BUG_ON(is_vma_temporary_stack(vma));
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001881 mapcount += __split_huge_page_splitting(page, vma, addr);
1882 }
Andrea Arcangeli05759d32011-01-13 15:46:53 -08001883 /*
1884 * It is critical that new vmas are added to the tail of the
1885 * anon_vma list. This guarantes that if copy_huge_pmd() runs
1886 * and establishes a child pmd before
1887 * __split_huge_page_splitting() freezes the parent pmd (so if
1888 * we fail to prevent copy_huge_pmd() from running until the
1889 * whole __split_huge_page() is complete), we will still see
1890 * the newly established pmd of the child later during the
1891 * walk, to be able to set it as pmd_trans_splitting too.
1892 */
Kirill A. Shutemovff9e43e2014-06-04 16:06:57 -07001893 if (mapcount != page_mapcount(page)) {
Andrew Mortonae3a8c12014-06-04 16:06:58 -07001894 pr_err("mapcount %d page_mapcount %d\n",
1895 mapcount, page_mapcount(page));
Kirill A. Shutemovff9e43e2014-06-04 16:06:57 -07001896 BUG();
1897 }
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001898
Shaohua Li5bc7b8a2013-04-29 15:08:36 -07001899 __split_huge_page_refcount(page, list);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001900
1901 mapcount2 = 0;
Michel Lespinassebf181b92012-10-08 16:31:39 -07001902 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001903 struct vm_area_struct *vma = avc->vma;
1904 unsigned long addr = vma_address(page, vma);
1905 BUG_ON(is_vma_temporary_stack(vma));
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001906 mapcount2 += __split_huge_page_map(page, vma, addr);
1907 }
Kirill A. Shutemovff9e43e2014-06-04 16:06:57 -07001908 if (mapcount != mapcount2) {
Andrew Mortonae3a8c12014-06-04 16:06:58 -07001909 pr_err("mapcount %d mapcount2 %d page_mapcount %d\n",
1910 mapcount, mapcount2, page_mapcount(page));
Kirill A. Shutemovff9e43e2014-06-04 16:06:57 -07001911 BUG();
1912 }
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001913}
1914
Shaohua Li5bc7b8a2013-04-29 15:08:36 -07001915/*
1916 * Split a hugepage into normal pages. This doesn't change the position of head
1917 * page. If @list is null, tail pages will be added to LRU list, otherwise, to
1918 * @list. Both head page and tail pages will inherit mapping, flags, and so on
1919 * from the hugepage.
1920 * Return 0 if the hugepage is split successfully otherwise return 1.
1921 */
1922int split_huge_page_to_list(struct page *page, struct list_head *list)
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001923{
1924 struct anon_vma *anon_vma;
1925 int ret = 1;
1926
Kirill A. Shutemov5918d102013-04-29 15:08:44 -07001927 BUG_ON(is_huge_zero_page(page));
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001928 BUG_ON(!PageAnon(page));
Mel Gorman062f1af2013-01-11 14:32:02 -08001929
1930 /*
1931 * The caller does not necessarily hold an mmap_sem that would prevent
1932 * the anon_vma disappearing so we first we take a reference to it
1933 * and then lock the anon_vma for write. This is similar to
1934 * page_lock_anon_vma_read except the write lock is taken to serialise
1935 * against parallel split or collapse operations.
1936 */
1937 anon_vma = page_get_anon_vma(page);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001938 if (!anon_vma)
1939 goto out;
Mel Gorman062f1af2013-01-11 14:32:02 -08001940 anon_vma_lock_write(anon_vma);
1941
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001942 ret = 0;
1943 if (!PageCompound(page))
1944 goto out_unlock;
1945
1946 BUG_ON(!PageSwapBacked(page));
Shaohua Li5bc7b8a2013-04-29 15:08:36 -07001947 __split_huge_page(page, anon_vma, list);
Andi Kleen81ab4202011-04-14 15:22:06 -07001948 count_vm_event(THP_SPLIT);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001949
1950 BUG_ON(PageCompound(page));
1951out_unlock:
Konstantin Khlebnikov08b52702013-02-22 16:34:40 -08001952 anon_vma_unlock_write(anon_vma);
Mel Gorman062f1af2013-01-11 14:32:02 -08001953 put_anon_vma(anon_vma);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001954out:
1955 return ret;
1956}
1957
Vlastimil Babka9050d7e2014-03-03 15:38:27 -08001958#define VM_NO_THP (VM_SPECIAL | VM_HUGETLB | VM_SHARED | VM_MAYSHARE)
Andrea Arcangeli78f11a22011-04-27 15:26:45 -07001959
Andrea Arcangeli60ab3242011-01-13 15:47:18 -08001960int hugepage_madvise(struct vm_area_struct *vma,
1961 unsigned long *vm_flags, int advice)
Andrea Arcangeli0af4e982011-01-13 15:46:55 -08001962{
Andrea Arcangelia664b2d2011-01-13 15:47:17 -08001963 switch (advice) {
1964 case MADV_HUGEPAGE:
Alex Thorlton1e1836e2014-04-07 15:37:09 -07001965#ifdef CONFIG_S390
1966 /*
1967 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
1968 * can't handle this properly after s390_enable_sie, so we simply
1969 * ignore the madvise to prevent qemu from causing a SIGSEGV.
1970 */
1971 if (mm_has_pgste(vma->vm_mm))
1972 return 0;
1973#endif
Andrea Arcangelia664b2d2011-01-13 15:47:17 -08001974 /*
1975 * Be somewhat over-protective like KSM for now!
1976 */
Andrea Arcangeli78f11a22011-04-27 15:26:45 -07001977 if (*vm_flags & (VM_HUGEPAGE | VM_NO_THP))
Andrea Arcangelia664b2d2011-01-13 15:47:17 -08001978 return -EINVAL;
1979 *vm_flags &= ~VM_NOHUGEPAGE;
1980 *vm_flags |= VM_HUGEPAGE;
Andrea Arcangeli60ab3242011-01-13 15:47:18 -08001981 /*
1982 * If the vma become good for khugepaged to scan,
1983 * register it here without waiting a page fault that
1984 * may not happen any time soon.
1985 */
David Rientjes6d50e602014-10-29 14:50:31 -07001986 if (unlikely(khugepaged_enter_vma_merge(vma, *vm_flags)))
Andrea Arcangeli60ab3242011-01-13 15:47:18 -08001987 return -ENOMEM;
Andrea Arcangelia664b2d2011-01-13 15:47:17 -08001988 break;
1989 case MADV_NOHUGEPAGE:
1990 /*
1991 * Be somewhat over-protective like KSM for now!
1992 */
Andrea Arcangeli78f11a22011-04-27 15:26:45 -07001993 if (*vm_flags & (VM_NOHUGEPAGE | VM_NO_THP))
Andrea Arcangelia664b2d2011-01-13 15:47:17 -08001994 return -EINVAL;
1995 *vm_flags &= ~VM_HUGEPAGE;
1996 *vm_flags |= VM_NOHUGEPAGE;
Andrea Arcangeli60ab3242011-01-13 15:47:18 -08001997 /*
1998 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
1999 * this vma even if we leave the mm registered in khugepaged if
2000 * it got registered before VM_NOHUGEPAGE was set.
2001 */
Andrea Arcangelia664b2d2011-01-13 15:47:17 -08002002 break;
2003 }
Andrea Arcangeli0af4e982011-01-13 15:46:55 -08002004
2005 return 0;
2006}
2007
Andrea Arcangeliba761492011-01-13 15:46:58 -08002008static int __init khugepaged_slab_init(void)
2009{
2010 mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
2011 sizeof(struct mm_slot),
2012 __alignof__(struct mm_slot), 0, NULL);
2013 if (!mm_slot_cache)
2014 return -ENOMEM;
2015
2016 return 0;
2017}
2018
Kirill A. Shutemov65ebb642015-04-15 16:14:20 -07002019static void __init khugepaged_slab_exit(void)
2020{
2021 kmem_cache_destroy(mm_slot_cache);
2022}
2023
Andrea Arcangeliba761492011-01-13 15:46:58 -08002024static inline struct mm_slot *alloc_mm_slot(void)
2025{
2026 if (!mm_slot_cache) /* initialization failed */
2027 return NULL;
2028 return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
2029}
2030
2031static inline void free_mm_slot(struct mm_slot *mm_slot)
2032{
2033 kmem_cache_free(mm_slot_cache, mm_slot);
2034}
2035
Andrea Arcangeliba761492011-01-13 15:46:58 -08002036static struct mm_slot *get_mm_slot(struct mm_struct *mm)
2037{
2038 struct mm_slot *mm_slot;
Andrea Arcangeliba761492011-01-13 15:46:58 -08002039
Sasha Levinb67bfe02013-02-27 17:06:00 -08002040 hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
Andrea Arcangeliba761492011-01-13 15:46:58 -08002041 if (mm == mm_slot->mm)
2042 return mm_slot;
Sasha Levin43b5fbb2013-02-22 16:32:27 -08002043
Andrea Arcangeliba761492011-01-13 15:46:58 -08002044 return NULL;
2045}
2046
2047static void insert_to_mm_slots_hash(struct mm_struct *mm,
2048 struct mm_slot *mm_slot)
2049{
Andrea Arcangeliba761492011-01-13 15:46:58 -08002050 mm_slot->mm = mm;
Sasha Levin43b5fbb2013-02-22 16:32:27 -08002051 hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002052}
2053
2054static inline int khugepaged_test_exit(struct mm_struct *mm)
2055{
2056 return atomic_read(&mm->mm_users) == 0;
2057}
2058
2059int __khugepaged_enter(struct mm_struct *mm)
2060{
2061 struct mm_slot *mm_slot;
2062 int wakeup;
2063
2064 mm_slot = alloc_mm_slot();
2065 if (!mm_slot)
2066 return -ENOMEM;
2067
2068 /* __khugepaged_exit() must not run from under us */
Sasha Levin96dad672014-10-09 15:28:39 -07002069 VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002070 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
2071 free_mm_slot(mm_slot);
2072 return 0;
2073 }
2074
2075 spin_lock(&khugepaged_mm_lock);
2076 insert_to_mm_slots_hash(mm, mm_slot);
2077 /*
2078 * Insert just behind the scanning cursor, to let the area settle
2079 * down a little.
2080 */
2081 wakeup = list_empty(&khugepaged_scan.mm_head);
2082 list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
2083 spin_unlock(&khugepaged_mm_lock);
2084
2085 atomic_inc(&mm->mm_count);
2086 if (wakeup)
2087 wake_up_interruptible(&khugepaged_wait);
2088
2089 return 0;
2090}
2091
David Rientjes6d50e602014-10-29 14:50:31 -07002092int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
2093 unsigned long vm_flags)
Andrea Arcangeliba761492011-01-13 15:46:58 -08002094{
2095 unsigned long hstart, hend;
2096 if (!vma->anon_vma)
2097 /*
2098 * Not yet faulted in so we will register later in the
2099 * page fault if needed.
2100 */
2101 return 0;
Konstantin Khlebnikov9684dc02016-04-28 16:18:32 -07002102 if (vma->vm_ops || (vm_flags & VM_NO_THP))
Andrea Arcangeliba761492011-01-13 15:46:58 -08002103 /* khugepaged not yet working on file or special mappings */
2104 return 0;
Andrea Arcangeliba761492011-01-13 15:46:58 -08002105 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2106 hend = vma->vm_end & HPAGE_PMD_MASK;
2107 if (hstart < hend)
David Rientjes6d50e602014-10-29 14:50:31 -07002108 return khugepaged_enter(vma, vm_flags);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002109 return 0;
2110}
2111
2112void __khugepaged_exit(struct mm_struct *mm)
2113{
2114 struct mm_slot *mm_slot;
2115 int free = 0;
2116
2117 spin_lock(&khugepaged_mm_lock);
2118 mm_slot = get_mm_slot(mm);
2119 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
Sasha Levin43b5fbb2013-02-22 16:32:27 -08002120 hash_del(&mm_slot->hash);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002121 list_del(&mm_slot->mm_node);
2122 free = 1;
2123 }
Chris Wrightd788e802011-07-25 17:12:14 -07002124 spin_unlock(&khugepaged_mm_lock);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002125
2126 if (free) {
Andrea Arcangeliba761492011-01-13 15:46:58 -08002127 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
2128 free_mm_slot(mm_slot);
2129 mmdrop(mm);
2130 } else if (mm_slot) {
Andrea Arcangeliba761492011-01-13 15:46:58 -08002131 /*
2132 * This is required to serialize against
2133 * khugepaged_test_exit() (which is guaranteed to run
2134 * under mmap sem read mode). Stop here (after we
2135 * return all pagetables will be destroyed) until
2136 * khugepaged has finished working on the pagetables
2137 * under the mmap_sem.
2138 */
2139 down_write(&mm->mmap_sem);
2140 up_write(&mm->mmap_sem);
Chris Wrightd788e802011-07-25 17:12:14 -07002141 }
Andrea Arcangeliba761492011-01-13 15:46:58 -08002142}
2143
2144static void release_pte_page(struct page *page)
2145{
2146 /* 0 stands for page_is_file_cache(page) == false */
2147 dec_zone_page_state(page, NR_ISOLATED_ANON + 0);
2148 unlock_page(page);
2149 putback_lru_page(page);
2150}
2151
2152static void release_pte_pages(pte_t *pte, pte_t *_pte)
2153{
2154 while (--_pte >= pte) {
2155 pte_t pteval = *_pte;
Ebru Akagunduzca0984c2015-04-14 15:45:24 -07002156 if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)))
Andrea Arcangeliba761492011-01-13 15:46:58 -08002157 release_pte_page(pte_page(pteval));
2158 }
2159}
2160
Andrea Arcangeliba761492011-01-13 15:46:58 -08002161static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
2162 unsigned long address,
2163 pte_t *pte)
2164{
2165 struct page *page;
2166 pte_t *_pte;
Ebru Akagunduzca0984c2015-04-14 15:45:24 -07002167 int none_or_zero = 0;
Ebru Akagunduz10359212015-02-11 15:28:28 -08002168 bool referenced = false, writable = false;
Andrea Arcangeliba761492011-01-13 15:46:58 -08002169 for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
2170 _pte++, address += PAGE_SIZE) {
2171 pte_t pteval = *_pte;
Minchan Kimdc14f052015-10-22 13:32:19 -07002172 if (pte_none(pteval) || (pte_present(pteval) &&
2173 is_zero_pfn(pte_pfn(pteval)))) {
Ebru Akagunduzca0984c2015-04-14 15:45:24 -07002174 if (++none_or_zero <= khugepaged_max_ptes_none)
Andrea Arcangeliba761492011-01-13 15:46:58 -08002175 continue;
Bob Liu344aa352012-12-11 16:00:34 -08002176 else
Andrea Arcangeliba761492011-01-13 15:46:58 -08002177 goto out;
Andrea Arcangeliba761492011-01-13 15:46:58 -08002178 }
Ebru Akagunduz10359212015-02-11 15:28:28 -08002179 if (!pte_present(pteval))
Andrea Arcangeliba761492011-01-13 15:46:58 -08002180 goto out;
Andrea Arcangeliba761492011-01-13 15:46:58 -08002181 page = vm_normal_page(vma, address, pteval);
Bob Liu344aa352012-12-11 16:00:34 -08002182 if (unlikely(!page))
Andrea Arcangeliba761492011-01-13 15:46:58 -08002183 goto out;
Bob Liu344aa352012-12-11 16:00:34 -08002184
Sasha Levin309381fea2014-01-23 15:52:54 -08002185 VM_BUG_ON_PAGE(PageCompound(page), page);
2186 VM_BUG_ON_PAGE(!PageAnon(page), page);
2187 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002188
Andrea Arcangeliba761492011-01-13 15:46:58 -08002189 /*
2190 * We can do it before isolate_lru_page because the
2191 * page can't be freed from under us. NOTE: PG_lock
2192 * is needed to serialize against split_huge_page
2193 * when invoked from the VM.
2194 */
Bob Liu344aa352012-12-11 16:00:34 -08002195 if (!trylock_page(page))
Andrea Arcangeliba761492011-01-13 15:46:58 -08002196 goto out;
Ebru Akagunduz10359212015-02-11 15:28:28 -08002197
2198 /*
2199 * cannot use mapcount: can't collapse if there's a gup pin.
2200 * The page must only be referenced by the scanned process
2201 * and page swap cache.
2202 */
2203 if (page_count(page) != 1 + !!PageSwapCache(page)) {
2204 unlock_page(page);
2205 goto out;
2206 }
2207 if (pte_write(pteval)) {
2208 writable = true;
2209 } else {
2210 if (PageSwapCache(page) && !reuse_swap_page(page)) {
2211 unlock_page(page);
2212 goto out;
2213 }
2214 /*
2215 * Page is not in the swap cache. It can be collapsed
2216 * into a THP.
2217 */
2218 }
2219
Andrea Arcangeliba761492011-01-13 15:46:58 -08002220 /*
2221 * Isolate the page to avoid collapsing an hugepage
2222 * currently in use by the VM.
2223 */
2224 if (isolate_lru_page(page)) {
2225 unlock_page(page);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002226 goto out;
2227 }
2228 /* 0 stands for page_is_file_cache(page) == false */
2229 inc_zone_page_state(page, NR_ISOLATED_ANON + 0);
Sasha Levin309381fea2014-01-23 15:52:54 -08002230 VM_BUG_ON_PAGE(!PageLocked(page), page);
2231 VM_BUG_ON_PAGE(PageLRU(page), page);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002232
2233 /* If there is no mapped pte young don't collapse the page */
Andrea Arcangeli8ee53822011-01-13 15:47:10 -08002234 if (pte_young(pteval) || PageReferenced(page) ||
2235 mmu_notifier_test_young(vma->vm_mm, address))
Ebru Akagunduz10359212015-02-11 15:28:28 -08002236 referenced = true;
Andrea Arcangeliba761492011-01-13 15:46:58 -08002237 }
Ebru Akagunduz10359212015-02-11 15:28:28 -08002238 if (likely(referenced && writable))
Bob Liu344aa352012-12-11 16:00:34 -08002239 return 1;
Andrea Arcangeliba761492011-01-13 15:46:58 -08002240out:
Bob Liu344aa352012-12-11 16:00:34 -08002241 release_pte_pages(pte, _pte);
2242 return 0;
Andrea Arcangeliba761492011-01-13 15:46:58 -08002243}
2244
2245static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
2246 struct vm_area_struct *vma,
2247 unsigned long address,
2248 spinlock_t *ptl)
2249{
2250 pte_t *_pte;
2251 for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++) {
2252 pte_t pteval = *_pte;
2253 struct page *src_page;
2254
Ebru Akagunduzca0984c2015-04-14 15:45:24 -07002255 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
Andrea Arcangeliba761492011-01-13 15:46:58 -08002256 clear_user_highpage(page, address);
2257 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
Ebru Akagunduzca0984c2015-04-14 15:45:24 -07002258 if (is_zero_pfn(pte_pfn(pteval))) {
2259 /*
2260 * ptl mostly unnecessary.
2261 */
2262 spin_lock(ptl);
2263 /*
2264 * paravirt calls inside pte_clear here are
2265 * superfluous.
2266 */
2267 pte_clear(vma->vm_mm, address, _pte);
2268 spin_unlock(ptl);
2269 }
Andrea Arcangeliba761492011-01-13 15:46:58 -08002270 } else {
2271 src_page = pte_page(pteval);
2272 copy_user_highpage(page, src_page, address, vma);
Sasha Levin309381fea2014-01-23 15:52:54 -08002273 VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002274 release_pte_page(src_page);
2275 /*
2276 * ptl mostly unnecessary, but preempt has to
2277 * be disabled to update the per-cpu stats
2278 * inside page_remove_rmap().
2279 */
2280 spin_lock(ptl);
2281 /*
2282 * paravirt calls inside pte_clear here are
2283 * superfluous.
2284 */
2285 pte_clear(vma->vm_mm, address, _pte);
2286 page_remove_rmap(src_page);
2287 spin_unlock(ptl);
2288 free_page_and_swap_cache(src_page);
2289 }
2290
2291 address += PAGE_SIZE;
2292 page++;
2293 }
2294}
2295
Xiao Guangrong26234f32012-10-08 16:29:51 -07002296static void khugepaged_alloc_sleep(void)
2297{
2298 wait_event_freezable_timeout(khugepaged_wait, false,
2299 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
2300}
2301
Bob Liu9f1b8682013-11-12 15:07:37 -08002302static int khugepaged_node_load[MAX_NUMNODES];
2303
David Rientjes14a4e212014-08-06 16:07:29 -07002304static bool khugepaged_scan_abort(int nid)
2305{
2306 int i;
2307
2308 /*
2309 * If zone_reclaim_mode is disabled, then no extra effort is made to
2310 * allocate memory locally.
2311 */
2312 if (!zone_reclaim_mode)
2313 return false;
2314
2315 /* If there is a count for this node already, it must be acceptable */
2316 if (khugepaged_node_load[nid])
2317 return false;
2318
2319 for (i = 0; i < MAX_NUMNODES; i++) {
2320 if (!khugepaged_node_load[i])
2321 continue;
2322 if (node_distance(nid, i) > RECLAIM_DISTANCE)
2323 return true;
2324 }
2325 return false;
2326}
2327
Xiao Guangrong26234f32012-10-08 16:29:51 -07002328#ifdef CONFIG_NUMA
Bob Liu9f1b8682013-11-12 15:07:37 -08002329static int khugepaged_find_target_node(void)
2330{
2331 static int last_khugepaged_target_node = NUMA_NO_NODE;
2332 int nid, target_node = 0, max_value = 0;
2333
2334 /* find first node with max normal pages hit */
2335 for (nid = 0; nid < MAX_NUMNODES; nid++)
2336 if (khugepaged_node_load[nid] > max_value) {
2337 max_value = khugepaged_node_load[nid];
2338 target_node = nid;
2339 }
2340
2341 /* do some balance if several nodes have the same hit record */
2342 if (target_node <= last_khugepaged_target_node)
2343 for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
2344 nid++)
2345 if (max_value == khugepaged_node_load[nid]) {
2346 target_node = nid;
2347 break;
2348 }
2349
2350 last_khugepaged_target_node = target_node;
2351 return target_node;
2352}
2353
Xiao Guangrong26234f32012-10-08 16:29:51 -07002354static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
2355{
2356 if (IS_ERR(*hpage)) {
2357 if (!*wait)
2358 return false;
2359
2360 *wait = false;
Xiao Guangronge3b41262012-10-08 16:32:57 -07002361 *hpage = NULL;
Xiao Guangrong26234f32012-10-08 16:29:51 -07002362 khugepaged_alloc_sleep();
2363 } else if (*hpage) {
2364 put_page(*hpage);
2365 *hpage = NULL;
2366 }
2367
2368 return true;
2369}
2370
Michal Hocko3b363692015-04-15 16:13:29 -07002371static struct page *
2372khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm,
Xiao Guangrong26234f32012-10-08 16:29:51 -07002373 struct vm_area_struct *vma, unsigned long address,
2374 int node)
2375{
Sasha Levin309381fea2014-01-23 15:52:54 -08002376 VM_BUG_ON_PAGE(*hpage, *hpage);
Vlastimil Babka8b164562014-10-09 15:27:00 -07002377
Xiao Guangrong26234f32012-10-08 16:29:51 -07002378 /*
Vlastimil Babka8b164562014-10-09 15:27:00 -07002379 * Before allocating the hugepage, release the mmap_sem read lock.
2380 * The allocation can take potentially a long time if it involves
2381 * sync compaction, and we do not need to hold the mmap_sem during
2382 * that. We will recheck the vma after taking it again in write mode.
Xiao Guangrong26234f32012-10-08 16:29:51 -07002383 */
2384 up_read(&mm->mmap_sem);
Vlastimil Babka8b164562014-10-09 15:27:00 -07002385
Michal Hocko3b363692015-04-15 16:13:29 -07002386 *hpage = alloc_pages_exact_node(node, gfp, HPAGE_PMD_ORDER);
Xiao Guangrong26234f32012-10-08 16:29:51 -07002387 if (unlikely(!*hpage)) {
2388 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
2389 *hpage = ERR_PTR(-ENOMEM);
2390 return NULL;
2391 }
2392
2393 count_vm_event(THP_COLLAPSE_ALLOC);
2394 return *hpage;
2395}
2396#else
Bob Liu9f1b8682013-11-12 15:07:37 -08002397static int khugepaged_find_target_node(void)
2398{
2399 return 0;
2400}
2401
Bob Liu10dc4152013-11-12 15:07:35 -08002402static inline struct page *alloc_hugepage(int defrag)
2403{
2404 return alloc_pages(alloc_hugepage_gfpmask(defrag, 0),
2405 HPAGE_PMD_ORDER);
2406}
2407
Xiao Guangrong26234f32012-10-08 16:29:51 -07002408static struct page *khugepaged_alloc_hugepage(bool *wait)
2409{
2410 struct page *hpage;
2411
2412 do {
2413 hpage = alloc_hugepage(khugepaged_defrag());
2414 if (!hpage) {
2415 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
2416 if (!*wait)
2417 return NULL;
2418
2419 *wait = false;
2420 khugepaged_alloc_sleep();
2421 } else
2422 count_vm_event(THP_COLLAPSE_ALLOC);
2423 } while (unlikely(!hpage) && likely(khugepaged_enabled()));
2424
2425 return hpage;
2426}
2427
2428static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
2429{
2430 if (!*hpage)
2431 *hpage = khugepaged_alloc_hugepage(wait);
2432
2433 if (unlikely(!*hpage))
2434 return false;
2435
2436 return true;
2437}
2438
Michal Hocko3b363692015-04-15 16:13:29 -07002439static struct page *
2440khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm,
Xiao Guangrong26234f32012-10-08 16:29:51 -07002441 struct vm_area_struct *vma, unsigned long address,
2442 int node)
2443{
2444 up_read(&mm->mmap_sem);
2445 VM_BUG_ON(!*hpage);
Michal Hocko3b363692015-04-15 16:13:29 -07002446
Xiao Guangrong26234f32012-10-08 16:29:51 -07002447 return *hpage;
2448}
2449#endif
2450
Bob Liufa475e52012-12-11 16:00:39 -08002451static bool hugepage_vma_check(struct vm_area_struct *vma)
2452{
2453 if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
2454 (vma->vm_flags & VM_NOHUGEPAGE))
2455 return false;
2456
2457 if (!vma->anon_vma || vma->vm_ops)
2458 return false;
2459 if (is_vma_temporary_stack(vma))
2460 return false;
Konstantin Khlebnikov9684dc02016-04-28 16:18:32 -07002461 return !(vma->vm_flags & VM_NO_THP);
Bob Liufa475e52012-12-11 16:00:39 -08002462}
2463
Andrea Arcangeliba761492011-01-13 15:46:58 -08002464static void collapse_huge_page(struct mm_struct *mm,
Xiao Guangrong26234f32012-10-08 16:29:51 -07002465 unsigned long address,
2466 struct page **hpage,
2467 struct vm_area_struct *vma,
2468 int node)
Andrea Arcangeliba761492011-01-13 15:46:58 -08002469{
Andrea Arcangeliba761492011-01-13 15:46:58 -08002470 pmd_t *pmd, _pmd;
2471 pte_t *pte;
2472 pgtable_t pgtable;
2473 struct page *new_page;
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08002474 spinlock_t *pmd_ptl, *pte_ptl;
Andrea Arcangeliba761492011-01-13 15:46:58 -08002475 int isolated;
2476 unsigned long hstart, hend;
Johannes Weiner00501b52014-08-08 14:19:20 -07002477 struct mem_cgroup *memcg;
Sagi Grimberg2ec74c32012-10-08 16:33:33 -07002478 unsigned long mmun_start; /* For mmu_notifiers */
2479 unsigned long mmun_end; /* For mmu_notifiers */
Michal Hocko3b363692015-04-15 16:13:29 -07002480 gfp_t gfp;
Andrea Arcangeliba761492011-01-13 15:46:58 -08002481
2482 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
Andrea Arcangeli692e0b32011-05-24 17:12:14 -07002483
Michal Hocko3b363692015-04-15 16:13:29 -07002484 /* Only allocate from the target node */
2485 gfp = alloc_hugepage_gfpmask(khugepaged_defrag(), __GFP_OTHER_NODE) |
2486 __GFP_THISNODE;
2487
Xiao Guangrong26234f32012-10-08 16:29:51 -07002488 /* release the mmap_sem read lock. */
Michal Hocko3b363692015-04-15 16:13:29 -07002489 new_page = khugepaged_alloc_page(hpage, gfp, mm, vma, address, node);
Xiao Guangrong26234f32012-10-08 16:29:51 -07002490 if (!new_page)
Andrea Arcangelice83d212011-01-13 15:47:06 -08002491 return;
Andrea Arcangelice83d212011-01-13 15:47:06 -08002492
Johannes Weiner00501b52014-08-08 14:19:20 -07002493 if (unlikely(mem_cgroup_try_charge(new_page, mm,
Michal Hocko3b363692015-04-15 16:13:29 -07002494 gfp, &memcg)))
Andrea Arcangeli692e0b32011-05-24 17:12:14 -07002495 return;
Andrea Arcangeliba761492011-01-13 15:46:58 -08002496
2497 /*
2498 * Prevent all access to pagetables with the exception of
2499 * gup_fast later hanlded by the ptep_clear_flush and the VM
2500 * handled by the anon_vma lock + PG_lock.
2501 */
2502 down_write(&mm->mmap_sem);
2503 if (unlikely(khugepaged_test_exit(mm)))
2504 goto out;
2505
2506 vma = find_vma(mm, address);
Libina8f531eb2013-09-11 14:20:38 -07002507 if (!vma)
2508 goto out;
Andrea Arcangeliba761492011-01-13 15:46:58 -08002509 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2510 hend = vma->vm_end & HPAGE_PMD_MASK;
2511 if (address < hstart || address + HPAGE_PMD_SIZE > hend)
2512 goto out;
Bob Liufa475e52012-12-11 16:00:39 -08002513 if (!hugepage_vma_check(vma))
Andrea Arcangeliba761492011-01-13 15:46:58 -08002514 goto out;
Bob Liu62190492012-12-11 16:00:37 -08002515 pmd = mm_find_pmd(mm, address);
2516 if (!pmd)
Andrea Arcangeliba761492011-01-13 15:46:58 -08002517 goto out;
Andrea Arcangeliba761492011-01-13 15:46:58 -08002518
Ingo Molnar4fc3f1d2012-12-02 19:56:50 +00002519 anon_vma_lock_write(vma->anon_vma);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002520
2521 pte = pte_offset_map(pmd, address);
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08002522 pte_ptl = pte_lockptr(mm, pmd);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002523
Sagi Grimberg2ec74c32012-10-08 16:33:33 -07002524 mmun_start = address;
2525 mmun_end = address + HPAGE_PMD_SIZE;
2526 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08002527 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
Andrea Arcangeliba761492011-01-13 15:46:58 -08002528 /*
2529 * After this gup_fast can't run anymore. This also removes
2530 * any huge TLB entry from the CPU so we won't allow
2531 * huge and small TLB entries for the same virtual address
2532 * to avoid the risk of CPU bugs in that area.
2533 */
Sagi Grimberg2ec74c32012-10-08 16:33:33 -07002534 _pmd = pmdp_clear_flush(vma, address, pmd);
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08002535 spin_unlock(pmd_ptl);
Sagi Grimberg2ec74c32012-10-08 16:33:33 -07002536 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002537
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08002538 spin_lock(pte_ptl);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002539 isolated = __collapse_huge_page_isolate(vma, address, pte);
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08002540 spin_unlock(pte_ptl);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002541
2542 if (unlikely(!isolated)) {
Johannes Weiner453c7192011-01-20 14:44:18 -08002543 pte_unmap(pte);
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08002544 spin_lock(pmd_ptl);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002545 BUG_ON(!pmd_none(*pmd));
Aneesh Kumar K.V7c342512013-05-24 15:55:21 -07002546 /*
2547 * We can only use set_pmd_at when establishing
2548 * hugepmds and never for establishing regular pmds that
2549 * points to regular pagetables. Use pmd_populate for that
2550 */
2551 pmd_populate(mm, pmd, pmd_pgtable(_pmd));
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08002552 spin_unlock(pmd_ptl);
Konstantin Khlebnikov08b52702013-02-22 16:34:40 -08002553 anon_vma_unlock_write(vma->anon_vma);
Andrea Arcangelice83d212011-01-13 15:47:06 -08002554 goto out;
Andrea Arcangeliba761492011-01-13 15:46:58 -08002555 }
2556
2557 /*
2558 * All pages are isolated and locked so anon_vma rmap
2559 * can't run anymore.
2560 */
Konstantin Khlebnikov08b52702013-02-22 16:34:40 -08002561 anon_vma_unlock_write(vma->anon_vma);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002562
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08002563 __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl);
Johannes Weiner453c7192011-01-20 14:44:18 -08002564 pte_unmap(pte);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002565 __SetPageUptodate(new_page);
2566 pgtable = pmd_pgtable(_pmd);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002567
Kirill A. Shutemov31223592013-09-12 15:14:01 -07002568 _pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
2569 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002570
2571 /*
2572 * spin_lock() below is not the equivalent of smp_wmb(), so
2573 * this is needed to avoid the copy_huge_page writes to become
2574 * visible after the set_pmd_at() write.
2575 */
2576 smp_wmb();
2577
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08002578 spin_lock(pmd_ptl);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002579 BUG_ON(!pmd_none(*pmd));
2580 page_add_new_anon_rmap(new_page, vma, address);
Johannes Weiner00501b52014-08-08 14:19:20 -07002581 mem_cgroup_commit_charge(new_page, memcg, false);
2582 lru_cache_add_active_or_unevictable(new_page, vma);
Aneesh Kumar K.Vfce144b2013-06-05 17:14:06 -07002583 pgtable_trans_huge_deposit(mm, pmd, pgtable);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002584 set_pmd_at(mm, address, pmd, _pmd);
David Millerb113da62012-10-08 16:34:25 -07002585 update_mmu_cache_pmd(vma, address, pmd);
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08002586 spin_unlock(pmd_ptl);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002587
2588 *hpage = NULL;
Xiao Guangrong420256ef2012-10-08 16:29:49 -07002589
Andrea Arcangeliba761492011-01-13 15:46:58 -08002590 khugepaged_pages_collapsed++;
Andrea Arcangelice83d212011-01-13 15:47:06 -08002591out_up_write:
Andrea Arcangeliba761492011-01-13 15:46:58 -08002592 up_write(&mm->mmap_sem);
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002593 return;
2594
Andrea Arcangelice83d212011-01-13 15:47:06 -08002595out:
Johannes Weiner00501b52014-08-08 14:19:20 -07002596 mem_cgroup_cancel_charge(new_page, memcg);
Andrea Arcangelice83d212011-01-13 15:47:06 -08002597 goto out_up_write;
Andrea Arcangeliba761492011-01-13 15:46:58 -08002598}
2599
2600static int khugepaged_scan_pmd(struct mm_struct *mm,
2601 struct vm_area_struct *vma,
2602 unsigned long address,
2603 struct page **hpage)
2604{
Andrea Arcangeliba761492011-01-13 15:46:58 -08002605 pmd_t *pmd;
2606 pte_t *pte, *_pte;
Ebru Akagunduzca0984c2015-04-14 15:45:24 -07002607 int ret = 0, none_or_zero = 0;
Andrea Arcangeliba761492011-01-13 15:46:58 -08002608 struct page *page;
2609 unsigned long _address;
2610 spinlock_t *ptl;
David Rientjes00ef2d22013-02-22 16:35:36 -08002611 int node = NUMA_NO_NODE;
Ebru Akagunduz10359212015-02-11 15:28:28 -08002612 bool writable = false, referenced = false;
Andrea Arcangeliba761492011-01-13 15:46:58 -08002613
2614 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
2615
Bob Liu62190492012-12-11 16:00:37 -08002616 pmd = mm_find_pmd(mm, address);
2617 if (!pmd)
Andrea Arcangeliba761492011-01-13 15:46:58 -08002618 goto out;
Andrea Arcangeliba761492011-01-13 15:46:58 -08002619
Bob Liu9f1b8682013-11-12 15:07:37 -08002620 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
Andrea Arcangeliba761492011-01-13 15:46:58 -08002621 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
2622 for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
2623 _pte++, _address += PAGE_SIZE) {
2624 pte_t pteval = *_pte;
Ebru Akagunduzca0984c2015-04-14 15:45:24 -07002625 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
2626 if (++none_or_zero <= khugepaged_max_ptes_none)
Andrea Arcangeliba761492011-01-13 15:46:58 -08002627 continue;
2628 else
2629 goto out_unmap;
2630 }
Ebru Akagunduz10359212015-02-11 15:28:28 -08002631 if (!pte_present(pteval))
Andrea Arcangeliba761492011-01-13 15:46:58 -08002632 goto out_unmap;
Ebru Akagunduz10359212015-02-11 15:28:28 -08002633 if (pte_write(pteval))
2634 writable = true;
2635
Andrea Arcangeliba761492011-01-13 15:46:58 -08002636 page = vm_normal_page(vma, _address, pteval);
2637 if (unlikely(!page))
2638 goto out_unmap;
Andi Kleen5c4b4be2011-03-04 17:36:32 -08002639 /*
Bob Liu9f1b8682013-11-12 15:07:37 -08002640 * Record which node the original page is from and save this
2641 * information to khugepaged_node_load[].
2642 * Khupaged will allocate hugepage from the node has the max
2643 * hit record.
Andi Kleen5c4b4be2011-03-04 17:36:32 -08002644 */
Bob Liu9f1b8682013-11-12 15:07:37 -08002645 node = page_to_nid(page);
David Rientjes14a4e212014-08-06 16:07:29 -07002646 if (khugepaged_scan_abort(node))
2647 goto out_unmap;
Bob Liu9f1b8682013-11-12 15:07:37 -08002648 khugepaged_node_load[node]++;
Sasha Levin309381fea2014-01-23 15:52:54 -08002649 VM_BUG_ON_PAGE(PageCompound(page), page);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002650 if (!PageLRU(page) || PageLocked(page) || !PageAnon(page))
2651 goto out_unmap;
Ebru Akagunduz10359212015-02-11 15:28:28 -08002652 /*
2653 * cannot use mapcount: can't collapse if there's a gup pin.
2654 * The page must only be referenced by the scanned process
2655 * and page swap cache.
2656 */
2657 if (page_count(page) != 1 + !!PageSwapCache(page))
Andrea Arcangeliba761492011-01-13 15:46:58 -08002658 goto out_unmap;
Andrea Arcangeli8ee53822011-01-13 15:47:10 -08002659 if (pte_young(pteval) || PageReferenced(page) ||
2660 mmu_notifier_test_young(vma->vm_mm, address))
Ebru Akagunduz10359212015-02-11 15:28:28 -08002661 referenced = true;
Andrea Arcangeliba761492011-01-13 15:46:58 -08002662 }
Ebru Akagunduz10359212015-02-11 15:28:28 -08002663 if (referenced && writable)
Andrea Arcangeliba761492011-01-13 15:46:58 -08002664 ret = 1;
2665out_unmap:
2666 pte_unmap_unlock(pte, ptl);
Bob Liu9f1b8682013-11-12 15:07:37 -08002667 if (ret) {
2668 node = khugepaged_find_target_node();
Andrea Arcangelice83d212011-01-13 15:47:06 -08002669 /* collapse_huge_page will return with the mmap_sem released */
Andi Kleen5c4b4be2011-03-04 17:36:32 -08002670 collapse_huge_page(mm, address, hpage, vma, node);
Bob Liu9f1b8682013-11-12 15:07:37 -08002671 }
Andrea Arcangeliba761492011-01-13 15:46:58 -08002672out:
2673 return ret;
2674}
2675
2676static void collect_mm_slot(struct mm_slot *mm_slot)
2677{
2678 struct mm_struct *mm = mm_slot->mm;
2679
Hugh Dickinsb9980cd2012-02-08 17:13:40 -08002680 VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
Andrea Arcangeliba761492011-01-13 15:46:58 -08002681
2682 if (khugepaged_test_exit(mm)) {
2683 /* free mm_slot */
Sasha Levin43b5fbb2013-02-22 16:32:27 -08002684 hash_del(&mm_slot->hash);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002685 list_del(&mm_slot->mm_node);
2686
2687 /*
2688 * Not strictly needed because the mm exited already.
2689 *
2690 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
2691 */
2692
2693 /* khugepaged_mm_lock actually not necessary for the below */
2694 free_mm_slot(mm_slot);
2695 mmdrop(mm);
2696 }
2697}
2698
2699static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
2700 struct page **hpage)
H Hartley Sweeten2f1da642011-10-31 17:09:25 -07002701 __releases(&khugepaged_mm_lock)
2702 __acquires(&khugepaged_mm_lock)
Andrea Arcangeliba761492011-01-13 15:46:58 -08002703{
2704 struct mm_slot *mm_slot;
2705 struct mm_struct *mm;
2706 struct vm_area_struct *vma;
2707 int progress = 0;
2708
2709 VM_BUG_ON(!pages);
Hugh Dickinsb9980cd2012-02-08 17:13:40 -08002710 VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
Andrea Arcangeliba761492011-01-13 15:46:58 -08002711
2712 if (khugepaged_scan.mm_slot)
2713 mm_slot = khugepaged_scan.mm_slot;
2714 else {
2715 mm_slot = list_entry(khugepaged_scan.mm_head.next,
2716 struct mm_slot, mm_node);
2717 khugepaged_scan.address = 0;
2718 khugepaged_scan.mm_slot = mm_slot;
2719 }
2720 spin_unlock(&khugepaged_mm_lock);
2721
2722 mm = mm_slot->mm;
2723 down_read(&mm->mmap_sem);
2724 if (unlikely(khugepaged_test_exit(mm)))
2725 vma = NULL;
2726 else
2727 vma = find_vma(mm, khugepaged_scan.address);
2728
2729 progress++;
2730 for (; vma; vma = vma->vm_next) {
2731 unsigned long hstart, hend;
2732
2733 cond_resched();
2734 if (unlikely(khugepaged_test_exit(mm))) {
2735 progress++;
2736 break;
2737 }
Bob Liufa475e52012-12-11 16:00:39 -08002738 if (!hugepage_vma_check(vma)) {
2739skip:
Andrea Arcangeliba761492011-01-13 15:46:58 -08002740 progress++;
2741 continue;
2742 }
Andrea Arcangeliba761492011-01-13 15:46:58 -08002743 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2744 hend = vma->vm_end & HPAGE_PMD_MASK;
Andrea Arcangelia7d6e4e2011-02-15 19:02:45 +01002745 if (hstart >= hend)
2746 goto skip;
2747 if (khugepaged_scan.address > hend)
2748 goto skip;
Andrea Arcangeliba761492011-01-13 15:46:58 -08002749 if (khugepaged_scan.address < hstart)
2750 khugepaged_scan.address = hstart;
Andrea Arcangelia7d6e4e2011-02-15 19:02:45 +01002751 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002752
2753 while (khugepaged_scan.address < hend) {
2754 int ret;
2755 cond_resched();
2756 if (unlikely(khugepaged_test_exit(mm)))
2757 goto breakouterloop;
2758
2759 VM_BUG_ON(khugepaged_scan.address < hstart ||
2760 khugepaged_scan.address + HPAGE_PMD_SIZE >
2761 hend);
2762 ret = khugepaged_scan_pmd(mm, vma,
2763 khugepaged_scan.address,
2764 hpage);
2765 /* move to next address */
2766 khugepaged_scan.address += HPAGE_PMD_SIZE;
2767 progress += HPAGE_PMD_NR;
2768 if (ret)
2769 /* we released mmap_sem so break loop */
2770 goto breakouterloop_mmap_sem;
2771 if (progress >= pages)
2772 goto breakouterloop;
2773 }
2774 }
2775breakouterloop:
2776 up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
2777breakouterloop_mmap_sem:
2778
2779 spin_lock(&khugepaged_mm_lock);
Andrea Arcangelia7d6e4e2011-02-15 19:02:45 +01002780 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002781 /*
2782 * Release the current mm_slot if this mm is about to die, or
2783 * if we scanned all vmas of this mm.
2784 */
2785 if (khugepaged_test_exit(mm) || !vma) {
2786 /*
2787 * Make sure that if mm_users is reaching zero while
2788 * khugepaged runs here, khugepaged_exit will find
2789 * mm_slot not pointing to the exiting mm.
2790 */
2791 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
2792 khugepaged_scan.mm_slot = list_entry(
2793 mm_slot->mm_node.next,
2794 struct mm_slot, mm_node);
2795 khugepaged_scan.address = 0;
2796 } else {
2797 khugepaged_scan.mm_slot = NULL;
2798 khugepaged_full_scans++;
2799 }
2800
2801 collect_mm_slot(mm_slot);
2802 }
2803
2804 return progress;
2805}
2806
2807static int khugepaged_has_work(void)
2808{
2809 return !list_empty(&khugepaged_scan.mm_head) &&
2810 khugepaged_enabled();
2811}
2812
2813static int khugepaged_wait_event(void)
2814{
2815 return !list_empty(&khugepaged_scan.mm_head) ||
Xiao Guangrong2017c0b2012-10-08 16:29:44 -07002816 kthread_should_stop();
Andrea Arcangeliba761492011-01-13 15:46:58 -08002817}
2818
Xiao Guangrongd5169042012-10-08 16:29:48 -07002819static void khugepaged_do_scan(void)
2820{
2821 struct page *hpage = NULL;
Andrea Arcangeliba761492011-01-13 15:46:58 -08002822 unsigned int progress = 0, pass_through_head = 0;
2823 unsigned int pages = khugepaged_pages_to_scan;
Xiao Guangrongd5169042012-10-08 16:29:48 -07002824 bool wait = true;
Andrea Arcangeliba761492011-01-13 15:46:58 -08002825
2826 barrier(); /* write khugepaged_pages_to_scan to local stack */
2827
2828 while (progress < pages) {
Xiao Guangrong26234f32012-10-08 16:29:51 -07002829 if (!khugepaged_prealloc_page(&hpage, &wait))
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002830 break;
Xiao Guangrong26234f32012-10-08 16:29:51 -07002831
Xiao Guangrong420256ef2012-10-08 16:29:49 -07002832 cond_resched();
Andrea Arcangeliba761492011-01-13 15:46:58 -08002833
Andrea Arcangeli878aee72011-01-13 15:47:10 -08002834 if (unlikely(kthread_should_stop() || freezing(current)))
2835 break;
2836
Andrea Arcangeliba761492011-01-13 15:46:58 -08002837 spin_lock(&khugepaged_mm_lock);
2838 if (!khugepaged_scan.mm_slot)
2839 pass_through_head++;
2840 if (khugepaged_has_work() &&
2841 pass_through_head < 2)
2842 progress += khugepaged_scan_mm_slot(pages - progress,
Xiao Guangrongd5169042012-10-08 16:29:48 -07002843 &hpage);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002844 else
2845 progress = pages;
2846 spin_unlock(&khugepaged_mm_lock);
2847 }
Andrea Arcangeliba761492011-01-13 15:46:58 -08002848
Xiao Guangrongd5169042012-10-08 16:29:48 -07002849 if (!IS_ERR_OR_NULL(hpage))
2850 put_page(hpage);
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002851}
2852
Xiao Guangrong2017c0b2012-10-08 16:29:44 -07002853static void khugepaged_wait_work(void)
2854{
2855 try_to_freeze();
2856
2857 if (khugepaged_has_work()) {
2858 if (!khugepaged_scan_sleep_millisecs)
2859 return;
2860
2861 wait_event_freezable_timeout(khugepaged_wait,
2862 kthread_should_stop(),
2863 msecs_to_jiffies(khugepaged_scan_sleep_millisecs));
2864 return;
2865 }
2866
2867 if (khugepaged_enabled())
2868 wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2869}
2870
Andrea Arcangeliba761492011-01-13 15:46:58 -08002871static int khugepaged(void *none)
2872{
2873 struct mm_slot *mm_slot;
2874
Andrea Arcangeli878aee72011-01-13 15:47:10 -08002875 set_freezable();
Dongsheng Yang8698a742014-03-11 18:09:12 +08002876 set_user_nice(current, MAX_NICE);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002877
Xiao Guangrongb7231782012-10-08 16:29:54 -07002878 while (!kthread_should_stop()) {
2879 khugepaged_do_scan();
2880 khugepaged_wait_work();
2881 }
Andrea Arcangeliba761492011-01-13 15:46:58 -08002882
2883 spin_lock(&khugepaged_mm_lock);
2884 mm_slot = khugepaged_scan.mm_slot;
2885 khugepaged_scan.mm_slot = NULL;
2886 if (mm_slot)
2887 collect_mm_slot(mm_slot);
2888 spin_unlock(&khugepaged_mm_lock);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002889 return 0;
2890}
2891
Kirill A. Shutemovc5a647d2012-12-12 13:51:00 -08002892static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
2893 unsigned long haddr, pmd_t *pmd)
2894{
2895 struct mm_struct *mm = vma->vm_mm;
2896 pgtable_t pgtable;
2897 pmd_t _pmd;
2898 int i;
2899
Joerg Roedel34ee6452014-11-13 13:46:09 +11002900 pmdp_clear_flush_notify(vma, haddr, pmd);
Kirill A. Shutemovc5a647d2012-12-12 13:51:00 -08002901 /* leave pmd empty until pte is filled */
2902
Aneesh Kumar K.V6b0b50b2013-06-05 17:14:02 -07002903 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
Kirill A. Shutemovc5a647d2012-12-12 13:51:00 -08002904 pmd_populate(mm, &_pmd, pgtable);
2905
2906 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
2907 pte_t *pte, entry;
2908 entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot);
2909 entry = pte_mkspecial(entry);
2910 pte = pte_offset_map(&_pmd, haddr);
2911 VM_BUG_ON(!pte_none(*pte));
2912 set_pte_at(mm, haddr, pte, entry);
2913 pte_unmap(pte);
2914 }
2915 smp_wmb(); /* make pte visible before pmd */
2916 pmd_populate(mm, pmd, pgtable);
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -08002917 put_huge_zero_page();
Kirill A. Shutemovc5a647d2012-12-12 13:51:00 -08002918}
2919
Kirill A. Shutemove1803772012-12-12 13:50:59 -08002920void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address,
2921 pmd_t *pmd)
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08002922{
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08002923 spinlock_t *ptl;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08002924 struct page *page;
Kirill A. Shutemove1803772012-12-12 13:50:59 -08002925 struct mm_struct *mm = vma->vm_mm;
Kirill A. Shutemovc5a647d2012-12-12 13:51:00 -08002926 unsigned long haddr = address & HPAGE_PMD_MASK;
2927 unsigned long mmun_start; /* For mmu_notifiers */
2928 unsigned long mmun_end; /* For mmu_notifiers */
Kirill A. Shutemove1803772012-12-12 13:50:59 -08002929
2930 BUG_ON(vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08002931
Kirill A. Shutemovc5a647d2012-12-12 13:51:00 -08002932 mmun_start = haddr;
2933 mmun_end = haddr + HPAGE_PMD_SIZE;
Hugh Dickins750e8162013-10-16 13:47:08 -07002934again:
Kirill A. Shutemovc5a647d2012-12-12 13:51:00 -08002935 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08002936 ptl = pmd_lock(mm, pmd);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08002937 if (unlikely(!pmd_trans_huge(*pmd))) {
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08002938 spin_unlock(ptl);
Kirill A. Shutemovc5a647d2012-12-12 13:51:00 -08002939 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2940 return;
2941 }
2942 if (is_huge_zero_pmd(*pmd)) {
2943 __split_huge_zero_page_pmd(vma, haddr, pmd);
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08002944 spin_unlock(ptl);
Kirill A. Shutemovc5a647d2012-12-12 13:51:00 -08002945 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08002946 return;
2947 }
2948 page = pmd_page(*pmd);
Sasha Levin309381fea2014-01-23 15:52:54 -08002949 VM_BUG_ON_PAGE(!page_count(page), page);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08002950 get_page(page);
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08002951 spin_unlock(ptl);
Kirill A. Shutemovc5a647d2012-12-12 13:51:00 -08002952 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08002953
2954 split_huge_page(page);
2955
2956 put_page(page);
Hugh Dickins750e8162013-10-16 13:47:08 -07002957
2958 /*
2959 * We don't always have down_write of mmap_sem here: a racing
2960 * do_huge_pmd_wp_page() might have copied-on-write to another
2961 * huge page before our split_huge_page() got the anon_vma lock.
2962 */
2963 if (unlikely(pmd_trans_huge(*pmd)))
2964 goto again;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08002965}
Andrea Arcangeli94fcc582011-01-13 15:47:08 -08002966
Kirill A. Shutemove1803772012-12-12 13:50:59 -08002967void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address,
2968 pmd_t *pmd)
2969{
2970 struct vm_area_struct *vma;
2971
2972 vma = find_vma(mm, address);
2973 BUG_ON(vma == NULL);
2974 split_huge_page_pmd(vma, address, pmd);
2975}
2976
Andrea Arcangeli94fcc582011-01-13 15:47:08 -08002977static void split_huge_page_address(struct mm_struct *mm,
2978 unsigned long address)
2979{
Hugh Dickinsf72e7dc2014-06-23 13:22:05 -07002980 pgd_t *pgd;
2981 pud_t *pud;
Andrea Arcangeli94fcc582011-01-13 15:47:08 -08002982 pmd_t *pmd;
2983
2984 VM_BUG_ON(!(address & ~HPAGE_PMD_MASK));
2985
Hugh Dickinsf72e7dc2014-06-23 13:22:05 -07002986 pgd = pgd_offset(mm, address);
2987 if (!pgd_present(*pgd))
2988 return;
2989
2990 pud = pud_offset(pgd, address);
2991 if (!pud_present(*pud))
2992 return;
2993
2994 pmd = pmd_offset(pud, address);
2995 if (!pmd_present(*pmd))
Andrea Arcangeli94fcc582011-01-13 15:47:08 -08002996 return;
2997 /*
2998 * Caller holds the mmap_sem write mode, so a huge pmd cannot
2999 * materialize from under us.
3000 */
Kirill A. Shutemove1803772012-12-12 13:50:59 -08003001 split_huge_page_pmd_mm(mm, address, pmd);
Andrea Arcangeli94fcc582011-01-13 15:47:08 -08003002}
3003
3004void __vma_adjust_trans_huge(struct vm_area_struct *vma,
3005 unsigned long start,
3006 unsigned long end,
3007 long adjust_next)
3008{
3009 /*
3010 * If the new start address isn't hpage aligned and it could
3011 * previously contain an hugepage: check if we need to split
3012 * an huge pmd.
3013 */
3014 if (start & ~HPAGE_PMD_MASK &&
3015 (start & HPAGE_PMD_MASK) >= vma->vm_start &&
3016 (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
3017 split_huge_page_address(vma->vm_mm, start);
3018
3019 /*
3020 * If the new end address isn't hpage aligned and it could
3021 * previously contain an hugepage: check if we need to split
3022 * an huge pmd.
3023 */
3024 if (end & ~HPAGE_PMD_MASK &&
3025 (end & HPAGE_PMD_MASK) >= vma->vm_start &&
3026 (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
3027 split_huge_page_address(vma->vm_mm, end);
3028
3029 /*
3030 * If we're also updating the vma->vm_next->vm_start, if the new
3031 * vm_next->vm_start isn't page aligned and it could previously
3032 * contain an hugepage: check if we need to split an huge pmd.
3033 */
3034 if (adjust_next > 0) {
3035 struct vm_area_struct *next = vma->vm_next;
3036 unsigned long nstart = next->vm_start;
3037 nstart += adjust_next << PAGE_SHIFT;
3038 if (nstart & ~HPAGE_PMD_MASK &&
3039 (nstart & HPAGE_PMD_MASK) >= next->vm_start &&
3040 (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end)
3041 split_huge_page_address(next->vm_mm, nstart);
3042 }
3043}