blob: 4acf55b31f7c65774eedfa41ddd1f82e8295c535 [file] [log] [blame]
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001/*
2 * Copyright (C) 2009 Red Hat, Inc.
3 *
4 * This work is licensed under the terms of the GNU GPL, version 2. See
5 * the COPYING file in the top-level directory.
6 */
7
Andrew Mortonae3a8c12014-06-04 16:06:58 -07008#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -080010#include <linux/mm.h>
11#include <linux/sched.h>
12#include <linux/highmem.h>
13#include <linux/hugetlb.h>
14#include <linux/mmu_notifier.h>
15#include <linux/rmap.h>
16#include <linux/swap.h>
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -080017#include <linux/shrinker.h>
Andrea Arcangeliba761492011-01-13 15:46:58 -080018#include <linux/mm_inline.h>
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -080019#include <linux/swapops.h>
Matthew Wilcox4897c762015-09-08 14:58:45 -070020#include <linux/dax.h>
Andrea Arcangeliba761492011-01-13 15:46:58 -080021#include <linux/kthread.h>
22#include <linux/khugepaged.h>
Andrea Arcangeli878aee72011-01-13 15:47:10 -080023#include <linux/freezer.h>
Andrea Arcangelia664b2d2011-01-13 15:47:17 -080024#include <linux/mman.h>
Ralf Baechle325adeb2012-10-15 13:44:56 +020025#include <linux/pagemap.h>
Mel Gorman4daae3b2012-11-02 11:33:45 +000026#include <linux/migrate.h>
Sasha Levin43b5fbb2013-02-22 16:32:27 -080027#include <linux/hashtable.h>
Andrea Arcangeli6b251fc2015-09-04 15:46:20 -070028#include <linux/userfaultfd_k.h>
Vladimir Davydov33c3fc72015-09-09 15:35:45 -070029#include <linux/page_idle.h>
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -080030
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -080031#include <asm/tlb.h>
32#include <asm/pgalloc.h>
33#include "internal.h"
34
Ebru Akagunduz7d2eba02016-01-14 15:22:19 -080035enum scan_result {
36 SCAN_FAIL,
37 SCAN_SUCCEED,
38 SCAN_PMD_NULL,
39 SCAN_EXCEED_NONE_PTE,
40 SCAN_PTE_NON_PRESENT,
41 SCAN_PAGE_RO,
42 SCAN_NO_REFERENCED_PAGE,
43 SCAN_PAGE_NULL,
44 SCAN_SCAN_ABORT,
45 SCAN_PAGE_COUNT,
46 SCAN_PAGE_LRU,
47 SCAN_PAGE_LOCK,
48 SCAN_PAGE_ANON,
Kirill A. Shutemovb1caa952016-01-15 16:52:39 -080049 SCAN_PAGE_COMPOUND,
Ebru Akagunduz7d2eba02016-01-14 15:22:19 -080050 SCAN_ANY_PROCESS,
51 SCAN_VMA_NULL,
52 SCAN_VMA_CHECK,
53 SCAN_ADDRESS_RANGE,
54 SCAN_SWAP_CACHE_PAGE,
55 SCAN_DEL_PAGE_LRU,
56 SCAN_ALLOC_HUGE_PAGE_FAIL,
57 SCAN_CGROUP_CHARGE_FAIL
58};
59
60#define CREATE_TRACE_POINTS
61#include <trace/events/huge_memory.h>
62
Andrea Arcangeliba761492011-01-13 15:46:58 -080063/*
Jianguo Wu8bfa3f92013-11-12 15:07:16 -080064 * By default transparent hugepage support is disabled in order that avoid
65 * to risk increase the memory footprint of applications without a guaranteed
66 * benefit. When transparent hugepage support is enabled, is for all mappings,
67 * and khugepaged scans all mappings.
68 * Defrag is invoked by khugepaged hugepage allocations and by page faults
69 * for all hugepage allocations.
Andrea Arcangeliba761492011-01-13 15:46:58 -080070 */
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -080071unsigned long transparent_hugepage_flags __read_mostly =
Andrea Arcangeli13ece882011-01-13 15:47:07 -080072#ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
Andrea Arcangeliba761492011-01-13 15:46:58 -080073 (1<<TRANSPARENT_HUGEPAGE_FLAG)|
Andrea Arcangeli13ece882011-01-13 15:47:07 -080074#endif
75#ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
76 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
77#endif
Andrea Arcangelid39d33c2011-01-13 15:47:05 -080078 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)|
Kirill A. Shutemov79da5402012-12-12 13:51:12 -080079 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
80 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
Andrea Arcangeliba761492011-01-13 15:46:58 -080081
82/* default scan 8*512 pte (or vmas) every 30 second */
83static unsigned int khugepaged_pages_to_scan __read_mostly = HPAGE_PMD_NR*8;
84static unsigned int khugepaged_pages_collapsed;
85static unsigned int khugepaged_full_scans;
86static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
87/* during fragmentation poll the hugepage allocator once every minute */
88static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
89static struct task_struct *khugepaged_thread __read_mostly;
90static DEFINE_MUTEX(khugepaged_mutex);
91static DEFINE_SPINLOCK(khugepaged_mm_lock);
92static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
93/*
94 * default collapse hugepages if there is at least one pte mapped like
95 * it would have happened if the vma was large enough during page
96 * fault.
97 */
98static unsigned int khugepaged_max_ptes_none __read_mostly = HPAGE_PMD_NR-1;
99
100static int khugepaged(void *none);
Andrea Arcangeliba761492011-01-13 15:46:58 -0800101static int khugepaged_slab_init(void);
Kirill A. Shutemov65ebb642015-04-15 16:14:20 -0700102static void khugepaged_slab_exit(void);
Andrea Arcangeliba761492011-01-13 15:46:58 -0800103
Sasha Levin43b5fbb2013-02-22 16:32:27 -0800104#define MM_SLOTS_HASH_BITS 10
105static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
106
Andrea Arcangeliba761492011-01-13 15:46:58 -0800107static struct kmem_cache *mm_slot_cache __read_mostly;
108
109/**
110 * struct mm_slot - hash lookup from mm to mm_slot
111 * @hash: hash collision list
112 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
113 * @mm: the mm that this information is valid for
114 */
115struct mm_slot {
116 struct hlist_node hash;
117 struct list_head mm_node;
118 struct mm_struct *mm;
119};
120
121/**
122 * struct khugepaged_scan - cursor for scanning
123 * @mm_head: the head of the mm list to scan
124 * @mm_slot: the current mm_slot we are scanning
125 * @address: the next address inside that to be scanned
126 *
127 * There is only the one khugepaged_scan instance of this cursor structure.
128 */
129struct khugepaged_scan {
130 struct list_head mm_head;
131 struct mm_slot *mm_slot;
132 unsigned long address;
H Hartley Sweeten2f1da642011-10-31 17:09:25 -0700133};
134static struct khugepaged_scan khugepaged_scan = {
Andrea Arcangeliba761492011-01-13 15:46:58 -0800135 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
136};
137
Kirill A. Shutemov9a982252016-01-15 16:54:17 -0800138static DEFINE_SPINLOCK(split_queue_lock);
139static LIST_HEAD(split_queue);
140static unsigned long split_queue_len;
141static struct shrinker deferred_split_shrinker;
Andrea Arcangelif0005652011-01-13 15:47:04 -0800142
Nicholas Krause2c0b80d2015-09-08 15:00:33 -0700143static void set_recommended_min_free_kbytes(void)
Andrea Arcangelif0005652011-01-13 15:47:04 -0800144{
145 struct zone *zone;
146 int nr_zones = 0;
147 unsigned long recommended_min;
Andrea Arcangelif0005652011-01-13 15:47:04 -0800148
Andrea Arcangelif0005652011-01-13 15:47:04 -0800149 for_each_populated_zone(zone)
150 nr_zones++;
151
Mel Gorman974a7862015-11-06 16:28:34 -0800152 /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
Andrea Arcangelif0005652011-01-13 15:47:04 -0800153 recommended_min = pageblock_nr_pages * nr_zones * 2;
154
155 /*
156 * Make sure that on average at least two pageblocks are almost free
157 * of another type, one for a migratetype to fall back to and a
158 * second to avoid subsequent fallbacks of other types There are 3
159 * MIGRATE_TYPES we care about.
160 */
161 recommended_min += pageblock_nr_pages * nr_zones *
162 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
163
164 /* don't ever allow to reserve more than 5% of the lowmem */
165 recommended_min = min(recommended_min,
166 (unsigned long) nr_free_buffer_pages() / 20);
167 recommended_min <<= (PAGE_SHIFT-10);
168
Han Pingtian42aa83c2014-01-23 15:53:28 -0800169 if (recommended_min > min_free_kbytes) {
170 if (user_min_free_kbytes >= 0)
171 pr_info("raising min_free_kbytes from %d to %lu "
172 "to help transparent hugepage allocations\n",
173 min_free_kbytes, recommended_min);
174
Andrea Arcangelif0005652011-01-13 15:47:04 -0800175 min_free_kbytes = recommended_min;
Han Pingtian42aa83c2014-01-23 15:53:28 -0800176 }
Andrea Arcangelif0005652011-01-13 15:47:04 -0800177 setup_per_zone_wmarks();
Andrea Arcangelif0005652011-01-13 15:47:04 -0800178}
Andrea Arcangelif0005652011-01-13 15:47:04 -0800179
Kirill A. Shutemov79553da2015-04-15 16:14:56 -0700180static int start_stop_khugepaged(void)
Andrea Arcangeliba761492011-01-13 15:46:58 -0800181{
182 int err = 0;
183 if (khugepaged_enabled()) {
Andrea Arcangeliba761492011-01-13 15:46:58 -0800184 if (!khugepaged_thread)
185 khugepaged_thread = kthread_run(khugepaged, NULL,
186 "khugepaged");
Viresh Kumar18e8e5c2015-08-12 15:59:46 +0530187 if (IS_ERR(khugepaged_thread)) {
Andrew Mortonae3a8c12014-06-04 16:06:58 -0700188 pr_err("khugepaged: kthread_run(khugepaged) failed\n");
Andrea Arcangeliba761492011-01-13 15:46:58 -0800189 err = PTR_ERR(khugepaged_thread);
190 khugepaged_thread = NULL;
Kirill A. Shutemov79553da2015-04-15 16:14:56 -0700191 goto fail;
Andrea Arcangeliba761492011-01-13 15:46:58 -0800192 }
Xiao Guangrong911891a2012-10-08 16:29:41 -0700193
194 if (!list_empty(&khugepaged_scan.mm_head))
Andrea Arcangeliba761492011-01-13 15:46:58 -0800195 wake_up_interruptible(&khugepaged_wait);
Andrea Arcangelif0005652011-01-13 15:47:04 -0800196
197 set_recommended_min_free_kbytes();
Xiao Guangrong911891a2012-10-08 16:29:41 -0700198 } else if (khugepaged_thread) {
Xiao Guangrong911891a2012-10-08 16:29:41 -0700199 kthread_stop(khugepaged_thread);
200 khugepaged_thread = NULL;
201 }
Kirill A. Shutemov79553da2015-04-15 16:14:56 -0700202fail:
Andrea Arcangeliba761492011-01-13 15:46:58 -0800203 return err;
204}
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800205
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -0800206static atomic_t huge_zero_refcount;
Wang, Yalin56873f42015-02-11 15:24:51 -0800207struct page *huge_zero_page __read_mostly;
Kirill A. Shutemov4a6c1292012-12-12 13:50:47 -0800208
Matthew Wilcoxfc437042015-09-08 14:58:51 -0700209struct page *get_huge_zero_page(void)
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -0800210{
211 struct page *zero_page;
212retry:
213 if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
Jason Low4db0c3c2015-04-15 16:14:08 -0700214 return READ_ONCE(huge_zero_page);
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -0800215
216 zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
217 HPAGE_PMD_ORDER);
Kirill A. Shutemovd8a8e1f2012-12-12 13:51:09 -0800218 if (!zero_page) {
219 count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
Kirill A. Shutemov5918d102013-04-29 15:08:44 -0700220 return NULL;
Kirill A. Shutemovd8a8e1f2012-12-12 13:51:09 -0800221 }
222 count_vm_event(THP_ZERO_PAGE_ALLOC);
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -0800223 preempt_disable();
Kirill A. Shutemov5918d102013-04-29 15:08:44 -0700224 if (cmpxchg(&huge_zero_page, NULL, zero_page)) {
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -0800225 preempt_enable();
Yu Zhao5ddacbe2014-10-29 14:50:26 -0700226 __free_pages(zero_page, compound_order(zero_page));
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -0800227 goto retry;
228 }
229
230 /* We take additional reference here. It will be put back by shrinker */
231 atomic_set(&huge_zero_refcount, 2);
232 preempt_enable();
Jason Low4db0c3c2015-04-15 16:14:08 -0700233 return READ_ONCE(huge_zero_page);
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -0800234}
235
236static void put_huge_zero_page(void)
237{
238 /*
239 * Counter should never go to zero here. Only shrinker can put
240 * last reference.
241 */
242 BUG_ON(atomic_dec_and_test(&huge_zero_refcount));
243}
244
Glauber Costa48896462013-08-28 10:18:15 +1000245static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink,
246 struct shrink_control *sc)
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -0800247{
Glauber Costa48896462013-08-28 10:18:15 +1000248 /* we can free zero page only if last reference remains */
249 return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
250}
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -0800251
Glauber Costa48896462013-08-28 10:18:15 +1000252static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
253 struct shrink_control *sc)
254{
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -0800255 if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
Kirill A. Shutemov5918d102013-04-29 15:08:44 -0700256 struct page *zero_page = xchg(&huge_zero_page, NULL);
257 BUG_ON(zero_page == NULL);
Yu Zhao5ddacbe2014-10-29 14:50:26 -0700258 __free_pages(zero_page, compound_order(zero_page));
Glauber Costa48896462013-08-28 10:18:15 +1000259 return HPAGE_PMD_NR;
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -0800260 }
261
262 return 0;
263}
264
265static struct shrinker huge_zero_page_shrinker = {
Glauber Costa48896462013-08-28 10:18:15 +1000266 .count_objects = shrink_huge_zero_page_count,
267 .scan_objects = shrink_huge_zero_page_scan,
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -0800268 .seeks = DEFAULT_SEEKS,
269};
270
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800271#ifdef CONFIG_SYSFS
Andrea Arcangeliba761492011-01-13 15:46:58 -0800272
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800273static ssize_t double_flag_show(struct kobject *kobj,
274 struct kobj_attribute *attr, char *buf,
275 enum transparent_hugepage_flag enabled,
276 enum transparent_hugepage_flag req_madv)
277{
278 if (test_bit(enabled, &transparent_hugepage_flags)) {
279 VM_BUG_ON(test_bit(req_madv, &transparent_hugepage_flags));
280 return sprintf(buf, "[always] madvise never\n");
281 } else if (test_bit(req_madv, &transparent_hugepage_flags))
282 return sprintf(buf, "always [madvise] never\n");
283 else
284 return sprintf(buf, "always madvise [never]\n");
285}
286static ssize_t double_flag_store(struct kobject *kobj,
287 struct kobj_attribute *attr,
288 const char *buf, size_t count,
289 enum transparent_hugepage_flag enabled,
290 enum transparent_hugepage_flag req_madv)
291{
292 if (!memcmp("always", buf,
293 min(sizeof("always")-1, count))) {
294 set_bit(enabled, &transparent_hugepage_flags);
295 clear_bit(req_madv, &transparent_hugepage_flags);
296 } else if (!memcmp("madvise", buf,
297 min(sizeof("madvise")-1, count))) {
298 clear_bit(enabled, &transparent_hugepage_flags);
299 set_bit(req_madv, &transparent_hugepage_flags);
300 } else if (!memcmp("never", buf,
301 min(sizeof("never")-1, count))) {
302 clear_bit(enabled, &transparent_hugepage_flags);
303 clear_bit(req_madv, &transparent_hugepage_flags);
304 } else
305 return -EINVAL;
306
307 return count;
308}
309
310static ssize_t enabled_show(struct kobject *kobj,
311 struct kobj_attribute *attr, char *buf)
312{
313 return double_flag_show(kobj, attr, buf,
314 TRANSPARENT_HUGEPAGE_FLAG,
315 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
316}
317static ssize_t enabled_store(struct kobject *kobj,
318 struct kobj_attribute *attr,
319 const char *buf, size_t count)
320{
Andrea Arcangeliba761492011-01-13 15:46:58 -0800321 ssize_t ret;
322
323 ret = double_flag_store(kobj, attr, buf, count,
324 TRANSPARENT_HUGEPAGE_FLAG,
325 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
326
327 if (ret > 0) {
Xiao Guangrong911891a2012-10-08 16:29:41 -0700328 int err;
329
330 mutex_lock(&khugepaged_mutex);
Kirill A. Shutemov79553da2015-04-15 16:14:56 -0700331 err = start_stop_khugepaged();
Xiao Guangrong911891a2012-10-08 16:29:41 -0700332 mutex_unlock(&khugepaged_mutex);
333
Andrea Arcangeliba761492011-01-13 15:46:58 -0800334 if (err)
335 ret = err;
336 }
337
338 return ret;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800339}
340static struct kobj_attribute enabled_attr =
341 __ATTR(enabled, 0644, enabled_show, enabled_store);
342
343static ssize_t single_flag_show(struct kobject *kobj,
344 struct kobj_attribute *attr, char *buf,
345 enum transparent_hugepage_flag flag)
346{
Ben Hutchingse27e6152011-04-14 15:22:21 -0700347 return sprintf(buf, "%d\n",
348 !!test_bit(flag, &transparent_hugepage_flags));
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800349}
Ben Hutchingse27e6152011-04-14 15:22:21 -0700350
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800351static ssize_t single_flag_store(struct kobject *kobj,
352 struct kobj_attribute *attr,
353 const char *buf, size_t count,
354 enum transparent_hugepage_flag flag)
355{
Ben Hutchingse27e6152011-04-14 15:22:21 -0700356 unsigned long value;
357 int ret;
358
359 ret = kstrtoul(buf, 10, &value);
360 if (ret < 0)
361 return ret;
362 if (value > 1)
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800363 return -EINVAL;
364
Ben Hutchingse27e6152011-04-14 15:22:21 -0700365 if (value)
366 set_bit(flag, &transparent_hugepage_flags);
367 else
368 clear_bit(flag, &transparent_hugepage_flags);
369
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800370 return count;
371}
372
373/*
374 * Currently defrag only disables __GFP_NOWAIT for allocation. A blind
375 * __GFP_REPEAT is too aggressive, it's never worth swapping tons of
376 * memory just to allocate one more hugepage.
377 */
378static ssize_t defrag_show(struct kobject *kobj,
379 struct kobj_attribute *attr, char *buf)
380{
381 return double_flag_show(kobj, attr, buf,
382 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
383 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
384}
385static ssize_t defrag_store(struct kobject *kobj,
386 struct kobj_attribute *attr,
387 const char *buf, size_t count)
388{
389 return double_flag_store(kobj, attr, buf, count,
390 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
391 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
392}
393static struct kobj_attribute defrag_attr =
394 __ATTR(defrag, 0644, defrag_show, defrag_store);
395
Kirill A. Shutemov79da5402012-12-12 13:51:12 -0800396static ssize_t use_zero_page_show(struct kobject *kobj,
397 struct kobj_attribute *attr, char *buf)
398{
399 return single_flag_show(kobj, attr, buf,
400 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
401}
402static ssize_t use_zero_page_store(struct kobject *kobj,
403 struct kobj_attribute *attr, const char *buf, size_t count)
404{
405 return single_flag_store(kobj, attr, buf, count,
406 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
407}
408static struct kobj_attribute use_zero_page_attr =
409 __ATTR(use_zero_page, 0644, use_zero_page_show, use_zero_page_store);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800410#ifdef CONFIG_DEBUG_VM
411static ssize_t debug_cow_show(struct kobject *kobj,
412 struct kobj_attribute *attr, char *buf)
413{
414 return single_flag_show(kobj, attr, buf,
415 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
416}
417static ssize_t debug_cow_store(struct kobject *kobj,
418 struct kobj_attribute *attr,
419 const char *buf, size_t count)
420{
421 return single_flag_store(kobj, attr, buf, count,
422 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
423}
424static struct kobj_attribute debug_cow_attr =
425 __ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store);
426#endif /* CONFIG_DEBUG_VM */
427
428static struct attribute *hugepage_attr[] = {
429 &enabled_attr.attr,
430 &defrag_attr.attr,
Kirill A. Shutemov79da5402012-12-12 13:51:12 -0800431 &use_zero_page_attr.attr,
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800432#ifdef CONFIG_DEBUG_VM
433 &debug_cow_attr.attr,
434#endif
435 NULL,
436};
437
438static struct attribute_group hugepage_attr_group = {
439 .attrs = hugepage_attr,
Andrea Arcangeliba761492011-01-13 15:46:58 -0800440};
441
442static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
443 struct kobj_attribute *attr,
444 char *buf)
445{
446 return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
447}
448
449static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
450 struct kobj_attribute *attr,
451 const char *buf, size_t count)
452{
453 unsigned long msecs;
454 int err;
455
Jingoo Han3dbb95f2013-09-11 14:20:25 -0700456 err = kstrtoul(buf, 10, &msecs);
Andrea Arcangeliba761492011-01-13 15:46:58 -0800457 if (err || msecs > UINT_MAX)
458 return -EINVAL;
459
460 khugepaged_scan_sleep_millisecs = msecs;
461 wake_up_interruptible(&khugepaged_wait);
462
463 return count;
464}
465static struct kobj_attribute scan_sleep_millisecs_attr =
466 __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
467 scan_sleep_millisecs_store);
468
469static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
470 struct kobj_attribute *attr,
471 char *buf)
472{
473 return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
474}
475
476static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
477 struct kobj_attribute *attr,
478 const char *buf, size_t count)
479{
480 unsigned long msecs;
481 int err;
482
Jingoo Han3dbb95f2013-09-11 14:20:25 -0700483 err = kstrtoul(buf, 10, &msecs);
Andrea Arcangeliba761492011-01-13 15:46:58 -0800484 if (err || msecs > UINT_MAX)
485 return -EINVAL;
486
487 khugepaged_alloc_sleep_millisecs = msecs;
488 wake_up_interruptible(&khugepaged_wait);
489
490 return count;
491}
492static struct kobj_attribute alloc_sleep_millisecs_attr =
493 __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
494 alloc_sleep_millisecs_store);
495
496static ssize_t pages_to_scan_show(struct kobject *kobj,
497 struct kobj_attribute *attr,
498 char *buf)
499{
500 return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
501}
502static ssize_t pages_to_scan_store(struct kobject *kobj,
503 struct kobj_attribute *attr,
504 const char *buf, size_t count)
505{
506 int err;
507 unsigned long pages;
508
Jingoo Han3dbb95f2013-09-11 14:20:25 -0700509 err = kstrtoul(buf, 10, &pages);
Andrea Arcangeliba761492011-01-13 15:46:58 -0800510 if (err || !pages || pages > UINT_MAX)
511 return -EINVAL;
512
513 khugepaged_pages_to_scan = pages;
514
515 return count;
516}
517static struct kobj_attribute pages_to_scan_attr =
518 __ATTR(pages_to_scan, 0644, pages_to_scan_show,
519 pages_to_scan_store);
520
521static ssize_t pages_collapsed_show(struct kobject *kobj,
522 struct kobj_attribute *attr,
523 char *buf)
524{
525 return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
526}
527static struct kobj_attribute pages_collapsed_attr =
528 __ATTR_RO(pages_collapsed);
529
530static ssize_t full_scans_show(struct kobject *kobj,
531 struct kobj_attribute *attr,
532 char *buf)
533{
534 return sprintf(buf, "%u\n", khugepaged_full_scans);
535}
536static struct kobj_attribute full_scans_attr =
537 __ATTR_RO(full_scans);
538
539static ssize_t khugepaged_defrag_show(struct kobject *kobj,
540 struct kobj_attribute *attr, char *buf)
541{
542 return single_flag_show(kobj, attr, buf,
543 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
544}
545static ssize_t khugepaged_defrag_store(struct kobject *kobj,
546 struct kobj_attribute *attr,
547 const char *buf, size_t count)
548{
549 return single_flag_store(kobj, attr, buf, count,
550 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
551}
552static struct kobj_attribute khugepaged_defrag_attr =
553 __ATTR(defrag, 0644, khugepaged_defrag_show,
554 khugepaged_defrag_store);
555
556/*
557 * max_ptes_none controls if khugepaged should collapse hugepages over
558 * any unmapped ptes in turn potentially increasing the memory
559 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
560 * reduce the available free memory in the system as it
561 * runs. Increasing max_ptes_none will instead potentially reduce the
562 * free memory in the system during the khugepaged scan.
563 */
564static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
565 struct kobj_attribute *attr,
566 char *buf)
567{
568 return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
569}
570static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
571 struct kobj_attribute *attr,
572 const char *buf, size_t count)
573{
574 int err;
575 unsigned long max_ptes_none;
576
Jingoo Han3dbb95f2013-09-11 14:20:25 -0700577 err = kstrtoul(buf, 10, &max_ptes_none);
Andrea Arcangeliba761492011-01-13 15:46:58 -0800578 if (err || max_ptes_none > HPAGE_PMD_NR-1)
579 return -EINVAL;
580
581 khugepaged_max_ptes_none = max_ptes_none;
582
583 return count;
584}
585static struct kobj_attribute khugepaged_max_ptes_none_attr =
586 __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
587 khugepaged_max_ptes_none_store);
588
589static struct attribute *khugepaged_attr[] = {
590 &khugepaged_defrag_attr.attr,
591 &khugepaged_max_ptes_none_attr.attr,
592 &pages_to_scan_attr.attr,
593 &pages_collapsed_attr.attr,
594 &full_scans_attr.attr,
595 &scan_sleep_millisecs_attr.attr,
596 &alloc_sleep_millisecs_attr.attr,
597 NULL,
598};
599
600static struct attribute_group khugepaged_attr_group = {
601 .attrs = khugepaged_attr,
602 .name = "khugepaged",
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800603};
Shaohua Li569e5592012-01-12 17:19:11 -0800604
605static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
606{
607 int err;
608
609 *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
610 if (unlikely(!*hugepage_kobj)) {
Andrew Mortonae3a8c12014-06-04 16:06:58 -0700611 pr_err("failed to create transparent hugepage kobject\n");
Shaohua Li569e5592012-01-12 17:19:11 -0800612 return -ENOMEM;
613 }
614
615 err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group);
616 if (err) {
Andrew Mortonae3a8c12014-06-04 16:06:58 -0700617 pr_err("failed to register transparent hugepage group\n");
Shaohua Li569e5592012-01-12 17:19:11 -0800618 goto delete_obj;
619 }
620
621 err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group);
622 if (err) {
Andrew Mortonae3a8c12014-06-04 16:06:58 -0700623 pr_err("failed to register transparent hugepage group\n");
Shaohua Li569e5592012-01-12 17:19:11 -0800624 goto remove_hp_group;
625 }
626
627 return 0;
628
629remove_hp_group:
630 sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group);
631delete_obj:
632 kobject_put(*hugepage_kobj);
633 return err;
634}
635
636static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj)
637{
638 sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group);
639 sysfs_remove_group(hugepage_kobj, &hugepage_attr_group);
640 kobject_put(hugepage_kobj);
641}
642#else
643static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj)
644{
645 return 0;
646}
647
648static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj)
649{
650}
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800651#endif /* CONFIG_SYSFS */
652
653static int __init hugepage_init(void)
654{
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800655 int err;
Shaohua Li569e5592012-01-12 17:19:11 -0800656 struct kobject *hugepage_kobj;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800657
Andrea Arcangeli4b7167b2011-01-13 15:47:09 -0800658 if (!has_transparent_hugepage()) {
659 transparent_hugepage_flags = 0;
Shaohua Li569e5592012-01-12 17:19:11 -0800660 return -EINVAL;
Andrea Arcangeli4b7167b2011-01-13 15:47:09 -0800661 }
662
Shaohua Li569e5592012-01-12 17:19:11 -0800663 err = hugepage_init_sysfs(&hugepage_kobj);
664 if (err)
Kirill A. Shutemov65ebb642015-04-15 16:14:20 -0700665 goto err_sysfs;
Andrea Arcangeliba761492011-01-13 15:46:58 -0800666
667 err = khugepaged_slab_init();
668 if (err)
Kirill A. Shutemov65ebb642015-04-15 16:14:20 -0700669 goto err_slab;
Andrea Arcangeliba761492011-01-13 15:46:58 -0800670
Kirill A. Shutemov65ebb642015-04-15 16:14:20 -0700671 err = register_shrinker(&huge_zero_page_shrinker);
672 if (err)
673 goto err_hzp_shrinker;
Kirill A. Shutemov9a982252016-01-15 16:54:17 -0800674 err = register_shrinker(&deferred_split_shrinker);
675 if (err)
676 goto err_split_shrinker;
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -0800677
Rik van Riel97562cd2011-01-13 15:47:12 -0800678 /*
679 * By default disable transparent hugepages on smaller systems,
680 * where the extra memory used could hurt more than TLB overhead
681 * is likely to save. The admin can still enable it through /sys.
682 */
Kirill A. Shutemov79553da2015-04-15 16:14:56 -0700683 if (totalram_pages < (512 << (20 - PAGE_SHIFT))) {
Rik van Riel97562cd2011-01-13 15:47:12 -0800684 transparent_hugepage_flags = 0;
Kirill A. Shutemov79553da2015-04-15 16:14:56 -0700685 return 0;
686 }
Rik van Riel97562cd2011-01-13 15:47:12 -0800687
Kirill A. Shutemov79553da2015-04-15 16:14:56 -0700688 err = start_stop_khugepaged();
Kirill A. Shutemov65ebb642015-04-15 16:14:20 -0700689 if (err)
690 goto err_khugepaged;
Andrea Arcangeliba761492011-01-13 15:46:58 -0800691
Shaohua Li569e5592012-01-12 17:19:11 -0800692 return 0;
Kirill A. Shutemov65ebb642015-04-15 16:14:20 -0700693err_khugepaged:
Kirill A. Shutemov9a982252016-01-15 16:54:17 -0800694 unregister_shrinker(&deferred_split_shrinker);
695err_split_shrinker:
Kirill A. Shutemov65ebb642015-04-15 16:14:20 -0700696 unregister_shrinker(&huge_zero_page_shrinker);
697err_hzp_shrinker:
698 khugepaged_slab_exit();
699err_slab:
Shaohua Li569e5592012-01-12 17:19:11 -0800700 hugepage_exit_sysfs(hugepage_kobj);
Kirill A. Shutemov65ebb642015-04-15 16:14:20 -0700701err_sysfs:
Andrea Arcangeliba761492011-01-13 15:46:58 -0800702 return err;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800703}
Paul Gortmakera64fb3c2014-01-23 15:53:30 -0800704subsys_initcall(hugepage_init);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800705
706static int __init setup_transparent_hugepage(char *str)
707{
708 int ret = 0;
709 if (!str)
710 goto out;
711 if (!strcmp(str, "always")) {
712 set_bit(TRANSPARENT_HUGEPAGE_FLAG,
713 &transparent_hugepage_flags);
714 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
715 &transparent_hugepage_flags);
716 ret = 1;
717 } else if (!strcmp(str, "madvise")) {
718 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
719 &transparent_hugepage_flags);
720 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
721 &transparent_hugepage_flags);
722 ret = 1;
723 } else if (!strcmp(str, "never")) {
724 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
725 &transparent_hugepage_flags);
726 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
727 &transparent_hugepage_flags);
728 ret = 1;
729 }
730out:
731 if (!ret)
Andrew Mortonae3a8c12014-06-04 16:06:58 -0700732 pr_warn("transparent_hugepage= cannot parse, ignored\n");
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800733 return ret;
734}
735__setup("transparent_hugepage=", setup_transparent_hugepage);
736
Mel Gormanb32967f2012-11-19 12:35:47 +0000737pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800738{
739 if (likely(vma->vm_flags & VM_WRITE))
740 pmd = pmd_mkwrite(pmd);
741 return pmd;
742}
743
Kirill A. Shutemov31223592013-09-12 15:14:01 -0700744static inline pmd_t mk_huge_pmd(struct page *page, pgprot_t prot)
Bob Liub3092b32012-12-11 16:00:41 -0800745{
746 pmd_t entry;
Kirill A. Shutemov31223592013-09-12 15:14:01 -0700747 entry = mk_pmd(page, prot);
Bob Liub3092b32012-12-11 16:00:41 -0800748 entry = pmd_mkhuge(entry);
749 return entry;
750}
751
Kirill A. Shutemov9a982252016-01-15 16:54:17 -0800752static inline struct list_head *page_deferred_list(struct page *page)
753{
754 /*
755 * ->lru in the tail pages is occupied by compound_head.
756 * Let's use ->mapping + ->index in the second tail page as list_head.
757 */
758 return (struct list_head *)&page[2].mapping;
759}
760
761void prep_transhuge_page(struct page *page)
762{
763 /*
764 * we use page->mapping and page->indexlru in second tail page
765 * as list_head: assuming THP order >= 2
766 */
767 BUILD_BUG_ON(HPAGE_PMD_ORDER < 2);
768
769 INIT_LIST_HEAD(page_deferred_list(page));
770 set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR);
771}
772
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800773static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
774 struct vm_area_struct *vma,
Andrea Arcangeli230c92a2015-09-04 15:47:20 -0700775 unsigned long address, pmd_t *pmd,
Andrea Arcangeli6b251fc2015-09-04 15:46:20 -0700776 struct page *page, gfp_t gfp,
777 unsigned int flags)
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800778{
Johannes Weiner00501b52014-08-08 14:19:20 -0700779 struct mem_cgroup *memcg;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800780 pgtable_t pgtable;
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -0800781 spinlock_t *ptl;
Andrea Arcangeli230c92a2015-09-04 15:47:20 -0700782 unsigned long haddr = address & HPAGE_PMD_MASK;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800783
Sasha Levin309381fea2014-01-23 15:52:54 -0800784 VM_BUG_ON_PAGE(!PageCompound(page), page);
Johannes Weiner00501b52014-08-08 14:19:20 -0700785
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -0800786 if (mem_cgroup_try_charge(page, mm, gfp, &memcg, true)) {
Andrea Arcangeli6b251fc2015-09-04 15:46:20 -0700787 put_page(page);
788 count_vm_event(THP_FAULT_FALLBACK);
789 return VM_FAULT_FALLBACK;
790 }
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800791
Johannes Weiner00501b52014-08-08 14:19:20 -0700792 pgtable = pte_alloc_one(mm, haddr);
793 if (unlikely(!pgtable)) {
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -0800794 mem_cgroup_cancel_charge(page, memcg, true);
Andrea Arcangeli6b251fc2015-09-04 15:46:20 -0700795 put_page(page);
Johannes Weiner00501b52014-08-08 14:19:20 -0700796 return VM_FAULT_OOM;
797 }
798
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800799 clear_huge_page(page, haddr, HPAGE_PMD_NR);
Minchan Kim52f37622013-04-29 15:08:15 -0700800 /*
801 * The memory barrier inside __SetPageUptodate makes sure that
802 * clear_huge_page writes become visible before the set_pmd_at()
803 * write.
804 */
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800805 __SetPageUptodate(page);
806
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -0800807 ptl = pmd_lock(mm, pmd);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800808 if (unlikely(!pmd_none(*pmd))) {
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -0800809 spin_unlock(ptl);
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -0800810 mem_cgroup_cancel_charge(page, memcg, true);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800811 put_page(page);
812 pte_free(mm, pgtable);
813 } else {
814 pmd_t entry;
Andrea Arcangeli6b251fc2015-09-04 15:46:20 -0700815
816 /* Deliver the page fault to userland */
817 if (userfaultfd_missing(vma)) {
818 int ret;
819
820 spin_unlock(ptl);
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -0800821 mem_cgroup_cancel_charge(page, memcg, true);
Andrea Arcangeli6b251fc2015-09-04 15:46:20 -0700822 put_page(page);
823 pte_free(mm, pgtable);
Andrea Arcangeli230c92a2015-09-04 15:47:20 -0700824 ret = handle_userfault(vma, address, flags,
Andrea Arcangeli6b251fc2015-09-04 15:46:20 -0700825 VM_UFFD_MISSING);
826 VM_BUG_ON(ret & VM_FAULT_FALLBACK);
827 return ret;
828 }
829
Kirill A. Shutemov31223592013-09-12 15:14:01 -0700830 entry = mk_huge_pmd(page, vma->vm_page_prot);
831 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
Kirill A. Shutemovd281ee62016-01-15 16:52:16 -0800832 page_add_new_anon_rmap(page, vma, haddr, true);
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -0800833 mem_cgroup_commit_charge(page, memcg, false, true);
Johannes Weiner00501b52014-08-08 14:19:20 -0700834 lru_cache_add_active_or_unevictable(page, vma);
Aneesh Kumar K.V6b0b50b2013-06-05 17:14:02 -0700835 pgtable_trans_huge_deposit(mm, pmd, pgtable);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800836 set_pmd_at(mm, haddr, pmd, entry);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800837 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
Kirill A. Shutemove1f56c82013-11-14 14:30:48 -0800838 atomic_long_inc(&mm->nr_ptes);
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -0800839 spin_unlock(ptl);
Andrea Arcangeli6b251fc2015-09-04 15:46:20 -0700840 count_vm_event(THP_FAULT_ALLOC);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800841 }
842
David Rientjesaa2e8782012-05-29 15:06:17 -0700843 return 0;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800844}
845
Andi Kleencc5d4622011-03-22 16:33:13 -0700846static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp)
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -0800847{
Mel Gorman71baba42015-11-06 16:28:28 -0800848 return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_RECLAIM)) | extra_gfp;
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -0800849}
850
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -0800851/* Caller must hold page table lock. */
Kirill A. Shutemovd295e342015-09-08 14:59:34 -0700852static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -0800853 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
Kirill A. Shutemov5918d102013-04-29 15:08:44 -0700854 struct page *zero_page)
Kirill A. Shutemovfc9fe822012-12-12 13:50:51 -0800855{
856 pmd_t entry;
Andrew Morton7c414162015-09-08 14:58:43 -0700857 if (!pmd_none(*pmd))
858 return false;
Kirill A. Shutemov5918d102013-04-29 15:08:44 -0700859 entry = mk_pmd(zero_page, vma->vm_page_prot);
Kirill A. Shutemovfc9fe822012-12-12 13:50:51 -0800860 entry = pmd_mkhuge(entry);
Aneesh Kumar K.V6b0b50b2013-06-05 17:14:02 -0700861 pgtable_trans_huge_deposit(mm, pmd, pgtable);
Kirill A. Shutemovfc9fe822012-12-12 13:50:51 -0800862 set_pmd_at(mm, haddr, pmd, entry);
Kirill A. Shutemove1f56c82013-11-14 14:30:48 -0800863 atomic_long_inc(&mm->nr_ptes);
Andrew Morton7c414162015-09-08 14:58:43 -0700864 return true;
Kirill A. Shutemovfc9fe822012-12-12 13:50:51 -0800865}
866
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800867int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
868 unsigned long address, pmd_t *pmd,
869 unsigned int flags)
870{
Aneesh Kumar K.V077fcf12015-02-11 15:27:12 -0800871 gfp_t gfp;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800872 struct page *page;
873 unsigned long haddr = address & HPAGE_PMD_MASK;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800874
Kirill A. Shutemov128ec032013-09-12 15:14:03 -0700875 if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
Kirill A. Shutemovc0292552013-09-12 15:14:05 -0700876 return VM_FAULT_FALLBACK;
Kirill A. Shutemov7479df62016-01-15 16:52:35 -0800877 if (vma->vm_flags & VM_LOCKED)
878 return VM_FAULT_FALLBACK;
Kirill A. Shutemov128ec032013-09-12 15:14:03 -0700879 if (unlikely(anon_vma_prepare(vma)))
880 return VM_FAULT_OOM;
David Rientjes6d50e602014-10-29 14:50:31 -0700881 if (unlikely(khugepaged_enter(vma, vma->vm_flags)))
Kirill A. Shutemov128ec032013-09-12 15:14:03 -0700882 return VM_FAULT_OOM;
Dominik Dingel593befa2014-10-23 12:07:44 +0200883 if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm) &&
Kirill A. Shutemov128ec032013-09-12 15:14:03 -0700884 transparent_hugepage_use_zero_page()) {
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -0800885 spinlock_t *ptl;
Kirill A. Shutemov128ec032013-09-12 15:14:03 -0700886 pgtable_t pgtable;
887 struct page *zero_page;
888 bool set;
Andrea Arcangeli6b251fc2015-09-04 15:46:20 -0700889 int ret;
Kirill A. Shutemov128ec032013-09-12 15:14:03 -0700890 pgtable = pte_alloc_one(mm, haddr);
891 if (unlikely(!pgtable))
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800892 return VM_FAULT_OOM;
Kirill A. Shutemov128ec032013-09-12 15:14:03 -0700893 zero_page = get_huge_zero_page();
894 if (unlikely(!zero_page)) {
895 pte_free(mm, pgtable);
Andi Kleen81ab4202011-04-14 15:22:06 -0700896 count_vm_event(THP_FAULT_FALLBACK);
Kirill A. Shutemovc0292552013-09-12 15:14:05 -0700897 return VM_FAULT_FALLBACK;
Andi Kleen81ab4202011-04-14 15:22:06 -0700898 }
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -0800899 ptl = pmd_lock(mm, pmd);
Andrea Arcangeli6b251fc2015-09-04 15:46:20 -0700900 ret = 0;
901 set = false;
902 if (pmd_none(*pmd)) {
903 if (userfaultfd_missing(vma)) {
904 spin_unlock(ptl);
Andrea Arcangeli230c92a2015-09-04 15:47:20 -0700905 ret = handle_userfault(vma, address, flags,
Andrea Arcangeli6b251fc2015-09-04 15:46:20 -0700906 VM_UFFD_MISSING);
907 VM_BUG_ON(ret & VM_FAULT_FALLBACK);
908 } else {
909 set_huge_zero_page(pgtable, mm, vma,
910 haddr, pmd,
911 zero_page);
912 spin_unlock(ptl);
913 set = true;
914 }
915 } else
916 spin_unlock(ptl);
Kirill A. Shutemov128ec032013-09-12 15:14:03 -0700917 if (!set) {
918 pte_free(mm, pgtable);
919 put_huge_zero_page();
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -0800920 }
Andrea Arcangeli6b251fc2015-09-04 15:46:20 -0700921 return ret;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800922 }
Aneesh Kumar K.V077fcf12015-02-11 15:27:12 -0800923 gfp = alloc_hugepage_gfpmask(transparent_hugepage_defrag(vma), 0);
924 page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
Kirill A. Shutemov128ec032013-09-12 15:14:03 -0700925 if (unlikely(!page)) {
926 count_vm_event(THP_FAULT_FALLBACK);
Kirill A. Shutemovc0292552013-09-12 15:14:05 -0700927 return VM_FAULT_FALLBACK;
Kirill A. Shutemov128ec032013-09-12 15:14:03 -0700928 }
Kirill A. Shutemov9a982252016-01-15 16:54:17 -0800929 prep_transhuge_page(page);
Andrea Arcangeli230c92a2015-09-04 15:47:20 -0700930 return __do_huge_pmd_anonymous_page(mm, vma, address, pmd, page, gfp,
931 flags);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800932}
933
Matthew Wilcoxae18d6d2015-09-08 14:59:14 -0700934static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
Matthew Wilcox5cad4652015-09-08 14:58:54 -0700935 pmd_t *pmd, unsigned long pfn, pgprot_t prot, bool write)
936{
937 struct mm_struct *mm = vma->vm_mm;
938 pmd_t entry;
939 spinlock_t *ptl;
940
941 ptl = pmd_lock(mm, pmd);
942 if (pmd_none(*pmd)) {
943 entry = pmd_mkhuge(pfn_pmd(pfn, prot));
944 if (write) {
945 entry = pmd_mkyoung(pmd_mkdirty(entry));
946 entry = maybe_pmd_mkwrite(entry, vma);
947 }
948 set_pmd_at(mm, addr, pmd, entry);
949 update_mmu_cache_pmd(vma, addr, pmd);
950 }
951 spin_unlock(ptl);
Matthew Wilcox5cad4652015-09-08 14:58:54 -0700952}
953
954int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
955 pmd_t *pmd, unsigned long pfn, bool write)
956{
957 pgprot_t pgprot = vma->vm_page_prot;
958 /*
959 * If we had pmd_special, we could avoid all these restrictions,
960 * but we need to be consistent with PTEs and architectures that
961 * can't support a 'special' bit.
962 */
963 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
964 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
965 (VM_PFNMAP|VM_MIXEDMAP));
966 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
967 BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
968
969 if (addr < vma->vm_start || addr >= vma->vm_end)
970 return VM_FAULT_SIGBUS;
971 if (track_pfn_insert(vma, &pgprot, pfn))
972 return VM_FAULT_SIGBUS;
Matthew Wilcoxae18d6d2015-09-08 14:59:14 -0700973 insert_pfn_pmd(vma, addr, pmd, pfn, pgprot, write);
974 return VM_FAULT_NOPAGE;
Matthew Wilcox5cad4652015-09-08 14:58:54 -0700975}
976
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800977int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
978 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
979 struct vm_area_struct *vma)
980{
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -0800981 spinlock_t *dst_ptl, *src_ptl;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800982 struct page *src_page;
983 pmd_t pmd;
984 pgtable_t pgtable;
985 int ret;
986
987 ret = -ENOMEM;
988 pgtable = pte_alloc_one(dst_mm, addr);
989 if (unlikely(!pgtable))
990 goto out;
991
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -0800992 dst_ptl = pmd_lock(dst_mm, dst_pmd);
993 src_ptl = pmd_lockptr(src_mm, src_pmd);
994 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800995
996 ret = -EAGAIN;
997 pmd = *src_pmd;
998 if (unlikely(!pmd_trans_huge(pmd))) {
999 pte_free(dst_mm, pgtable);
1000 goto out_unlock;
1001 }
Kirill A. Shutemovfc9fe822012-12-12 13:50:51 -08001002 /*
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08001003 * When page table lock is held, the huge zero pmd should not be
Kirill A. Shutemovfc9fe822012-12-12 13:50:51 -08001004 * under splitting since we don't split the page itself, only pmd to
1005 * a page table.
1006 */
1007 if (is_huge_zero_pmd(pmd)) {
Kirill A. Shutemov5918d102013-04-29 15:08:44 -07001008 struct page *zero_page;
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -08001009 /*
1010 * get_huge_zero_page() will never allocate a new page here,
1011 * since we already have a zero page to copy. It just takes a
1012 * reference.
1013 */
Kirill A. Shutemov5918d102013-04-29 15:08:44 -07001014 zero_page = get_huge_zero_page();
Andrea Arcangeli6b251fc2015-09-04 15:46:20 -07001015 set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd,
Kirill A. Shutemov5918d102013-04-29 15:08:44 -07001016 zero_page);
Kirill A. Shutemovfc9fe822012-12-12 13:50:51 -08001017 ret = 0;
1018 goto out_unlock;
1019 }
Mel Gormande466bd2013-12-18 17:08:42 -08001020
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001021 src_page = pmd_page(pmd);
Sasha Levin309381fea2014-01-23 15:52:54 -08001022 VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001023 get_page(src_page);
Kirill A. Shutemov53f92632016-01-15 16:53:42 -08001024 page_dup_rmap(src_page, true);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001025 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
1026
1027 pmdp_set_wrprotect(src_mm, addr, src_pmd);
1028 pmd = pmd_mkold(pmd_wrprotect(pmd));
Aneesh Kumar K.V6b0b50b2013-06-05 17:14:02 -07001029 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001030 set_pmd_at(dst_mm, addr, dst_pmd, pmd);
Kirill A. Shutemove1f56c82013-11-14 14:30:48 -08001031 atomic_long_inc(&dst_mm->nr_ptes);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001032
1033 ret = 0;
1034out_unlock:
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08001035 spin_unlock(src_ptl);
1036 spin_unlock(dst_ptl);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001037out:
1038 return ret;
1039}
1040
Will Deacona1dd4502012-12-11 16:01:27 -08001041void huge_pmd_set_accessed(struct mm_struct *mm,
1042 struct vm_area_struct *vma,
1043 unsigned long address,
1044 pmd_t *pmd, pmd_t orig_pmd,
1045 int dirty)
1046{
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08001047 spinlock_t *ptl;
Will Deacona1dd4502012-12-11 16:01:27 -08001048 pmd_t entry;
1049 unsigned long haddr;
1050
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08001051 ptl = pmd_lock(mm, pmd);
Will Deacona1dd4502012-12-11 16:01:27 -08001052 if (unlikely(!pmd_same(*pmd, orig_pmd)))
1053 goto unlock;
1054
1055 entry = pmd_mkyoung(orig_pmd);
1056 haddr = address & HPAGE_PMD_MASK;
1057 if (pmdp_set_access_flags(vma, haddr, pmd, entry, dirty))
1058 update_mmu_cache_pmd(vma, address, pmd);
1059
1060unlock:
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08001061 spin_unlock(ptl);
Will Deacona1dd4502012-12-11 16:01:27 -08001062}
1063
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001064static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
1065 struct vm_area_struct *vma,
1066 unsigned long address,
1067 pmd_t *pmd, pmd_t orig_pmd,
1068 struct page *page,
1069 unsigned long haddr)
1070{
Johannes Weiner00501b52014-08-08 14:19:20 -07001071 struct mem_cgroup *memcg;
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08001072 spinlock_t *ptl;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001073 pgtable_t pgtable;
1074 pmd_t _pmd;
1075 int ret = 0, i;
1076 struct page **pages;
Sagi Grimberg2ec74c32012-10-08 16:33:33 -07001077 unsigned long mmun_start; /* For mmu_notifiers */
1078 unsigned long mmun_end; /* For mmu_notifiers */
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001079
1080 pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR,
1081 GFP_KERNEL);
1082 if (unlikely(!pages)) {
1083 ret |= VM_FAULT_OOM;
1084 goto out;
1085 }
1086
1087 for (i = 0; i < HPAGE_PMD_NR; i++) {
Andi Kleencc5d4622011-03-22 16:33:13 -07001088 pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE |
1089 __GFP_OTHER_NODE,
Andi Kleen19ee1512011-03-04 17:36:31 -08001090 vma, address, page_to_nid(page));
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -08001091 if (unlikely(!pages[i] ||
Johannes Weiner00501b52014-08-08 14:19:20 -07001092 mem_cgroup_try_charge(pages[i], mm, GFP_KERNEL,
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08001093 &memcg, false))) {
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -08001094 if (pages[i])
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001095 put_page(pages[i]);
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -08001096 while (--i >= 0) {
Johannes Weiner00501b52014-08-08 14:19:20 -07001097 memcg = (void *)page_private(pages[i]);
1098 set_page_private(pages[i], 0);
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08001099 mem_cgroup_cancel_charge(pages[i], memcg,
1100 false);
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -08001101 put_page(pages[i]);
1102 }
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001103 kfree(pages);
1104 ret |= VM_FAULT_OOM;
1105 goto out;
1106 }
Johannes Weiner00501b52014-08-08 14:19:20 -07001107 set_page_private(pages[i], (unsigned long)memcg);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001108 }
1109
1110 for (i = 0; i < HPAGE_PMD_NR; i++) {
1111 copy_user_highpage(pages[i], page + i,
Hillf Danton0089e482011-10-31 17:09:38 -07001112 haddr + PAGE_SIZE * i, vma);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001113 __SetPageUptodate(pages[i]);
1114 cond_resched();
1115 }
1116
Sagi Grimberg2ec74c32012-10-08 16:33:33 -07001117 mmun_start = haddr;
1118 mmun_end = haddr + HPAGE_PMD_SIZE;
1119 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1120
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08001121 ptl = pmd_lock(mm, pmd);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001122 if (unlikely(!pmd_same(*pmd, orig_pmd)))
1123 goto out_free_pages;
Sasha Levin309381fea2014-01-23 15:52:54 -08001124 VM_BUG_ON_PAGE(!PageHead(page), page);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001125
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -07001126 pmdp_huge_clear_flush_notify(vma, haddr, pmd);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001127 /* leave pmd empty until pte is filled */
1128
Aneesh Kumar K.V6b0b50b2013-06-05 17:14:02 -07001129 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001130 pmd_populate(mm, &_pmd, pgtable);
1131
1132 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
1133 pte_t *pte, entry;
1134 entry = mk_pte(pages[i], vma->vm_page_prot);
1135 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
Johannes Weiner00501b52014-08-08 14:19:20 -07001136 memcg = (void *)page_private(pages[i]);
1137 set_page_private(pages[i], 0);
Kirill A. Shutemovd281ee62016-01-15 16:52:16 -08001138 page_add_new_anon_rmap(pages[i], vma, haddr, false);
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08001139 mem_cgroup_commit_charge(pages[i], memcg, false, false);
Johannes Weiner00501b52014-08-08 14:19:20 -07001140 lru_cache_add_active_or_unevictable(pages[i], vma);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001141 pte = pte_offset_map(&_pmd, haddr);
1142 VM_BUG_ON(!pte_none(*pte));
1143 set_pte_at(mm, haddr, pte, entry);
1144 pte_unmap(pte);
1145 }
1146 kfree(pages);
1147
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001148 smp_wmb(); /* make pte visible before pmd */
1149 pmd_populate(mm, pmd, pgtable);
Kirill A. Shutemovd281ee62016-01-15 16:52:16 -08001150 page_remove_rmap(page, true);
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08001151 spin_unlock(ptl);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001152
Sagi Grimberg2ec74c32012-10-08 16:33:33 -07001153 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1154
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001155 ret |= VM_FAULT_WRITE;
1156 put_page(page);
1157
1158out:
1159 return ret;
1160
1161out_free_pages:
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08001162 spin_unlock(ptl);
Sagi Grimberg2ec74c32012-10-08 16:33:33 -07001163 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -08001164 for (i = 0; i < HPAGE_PMD_NR; i++) {
Johannes Weiner00501b52014-08-08 14:19:20 -07001165 memcg = (void *)page_private(pages[i]);
1166 set_page_private(pages[i], 0);
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08001167 mem_cgroup_cancel_charge(pages[i], memcg, false);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001168 put_page(pages[i]);
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -08001169 }
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001170 kfree(pages);
1171 goto out;
1172}
1173
1174int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
1175 unsigned long address, pmd_t *pmd, pmd_t orig_pmd)
1176{
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08001177 spinlock_t *ptl;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001178 int ret = 0;
Kirill A. Shutemov93b47962012-12-12 13:50:54 -08001179 struct page *page = NULL, *new_page;
Johannes Weiner00501b52014-08-08 14:19:20 -07001180 struct mem_cgroup *memcg;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001181 unsigned long haddr;
Sagi Grimberg2ec74c32012-10-08 16:33:33 -07001182 unsigned long mmun_start; /* For mmu_notifiers */
1183 unsigned long mmun_end; /* For mmu_notifiers */
Michal Hocko3b363692015-04-15 16:13:29 -07001184 gfp_t huge_gfp; /* for allocation and charge */
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001185
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08001186 ptl = pmd_lockptr(mm, pmd);
Sasha Levin81d1b092014-10-09 15:28:10 -07001187 VM_BUG_ON_VMA(!vma->anon_vma, vma);
Kirill A. Shutemov93b47962012-12-12 13:50:54 -08001188 haddr = address & HPAGE_PMD_MASK;
1189 if (is_huge_zero_pmd(orig_pmd))
1190 goto alloc;
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08001191 spin_lock(ptl);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001192 if (unlikely(!pmd_same(*pmd, orig_pmd)))
1193 goto out_unlock;
1194
1195 page = pmd_page(orig_pmd);
Sasha Levin309381fea2014-01-23 15:52:54 -08001196 VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page);
Kirill A. Shutemov1f25fe22016-01-15 16:52:24 -08001197 /*
1198 * We can only reuse the page if nobody else maps the huge page or it's
1199 * part. We can do it by checking page_mapcount() on each sub-page, but
1200 * it's expensive.
1201 * The cheaper way is to check page_count() to be equal 1: every
1202 * mapcount takes page reference reference, so this way we can
1203 * guarantee, that the PMD is the only mapping.
1204 * This can give false negative if somebody pinned the page, but that's
1205 * fine.
1206 */
1207 if (page_mapcount(page) == 1 && page_count(page) == 1) {
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001208 pmd_t entry;
1209 entry = pmd_mkyoung(orig_pmd);
1210 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1211 if (pmdp_set_access_flags(vma, haddr, pmd, entry, 1))
David Millerb113da62012-10-08 16:34:25 -07001212 update_mmu_cache_pmd(vma, address, pmd);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001213 ret |= VM_FAULT_WRITE;
1214 goto out_unlock;
1215 }
Kirill A. Shutemovddc58f22016-01-15 16:52:56 -08001216 get_page(page);
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08001217 spin_unlock(ptl);
Kirill A. Shutemov93b47962012-12-12 13:50:54 -08001218alloc:
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001219 if (transparent_hugepage_enabled(vma) &&
Aneesh Kumar K.V077fcf12015-02-11 15:27:12 -08001220 !transparent_hugepage_debug_cow()) {
Michal Hocko3b363692015-04-15 16:13:29 -07001221 huge_gfp = alloc_hugepage_gfpmask(transparent_hugepage_defrag(vma), 0);
1222 new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER);
Aneesh Kumar K.V077fcf12015-02-11 15:27:12 -08001223 } else
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001224 new_page = NULL;
1225
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08001226 if (likely(new_page)) {
1227 prep_transhuge_page(new_page);
1228 } else {
Hugh Dickinseecc1e42014-01-12 01:25:21 -08001229 if (!page) {
Kirill A. Shutemov78ddc532016-01-15 16:52:42 -08001230 split_huge_pmd(vma, pmd, address);
Kirill A. Shutemove9b71ca2014-04-03 14:48:17 -07001231 ret |= VM_FAULT_FALLBACK;
Kirill A. Shutemov93b47962012-12-12 13:50:54 -08001232 } else {
1233 ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
1234 pmd, orig_pmd, page, haddr);
Kirill A. Shutemov9845cbb2014-02-25 15:01:42 -08001235 if (ret & VM_FAULT_OOM) {
Kirill A. Shutemov78ddc532016-01-15 16:52:42 -08001236 split_huge_pmd(vma, pmd, address);
Kirill A. Shutemov9845cbb2014-02-25 15:01:42 -08001237 ret |= VM_FAULT_FALLBACK;
1238 }
Kirill A. Shutemovddc58f22016-01-15 16:52:56 -08001239 put_page(page);
Kirill A. Shutemov93b47962012-12-12 13:50:54 -08001240 }
David Rientjes17766dd2013-09-12 15:14:06 -07001241 count_vm_event(THP_FAULT_FALLBACK);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001242 goto out;
1243 }
1244
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08001245 if (unlikely(mem_cgroup_try_charge(new_page, mm, huge_gfp, &memcg,
1246 true))) {
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -08001247 put_page(new_page);
Kirill A. Shutemov93b47962012-12-12 13:50:54 -08001248 if (page) {
Kirill A. Shutemov78ddc532016-01-15 16:52:42 -08001249 split_huge_pmd(vma, pmd, address);
Kirill A. Shutemovddc58f22016-01-15 16:52:56 -08001250 put_page(page);
Kirill A. Shutemov9845cbb2014-02-25 15:01:42 -08001251 } else
Kirill A. Shutemov78ddc532016-01-15 16:52:42 -08001252 split_huge_pmd(vma, pmd, address);
Kirill A. Shutemov9845cbb2014-02-25 15:01:42 -08001253 ret |= VM_FAULT_FALLBACK;
David Rientjes17766dd2013-09-12 15:14:06 -07001254 count_vm_event(THP_FAULT_FALLBACK);
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -08001255 goto out;
1256 }
1257
David Rientjes17766dd2013-09-12 15:14:06 -07001258 count_vm_event(THP_FAULT_ALLOC);
1259
Hugh Dickinseecc1e42014-01-12 01:25:21 -08001260 if (!page)
Kirill A. Shutemov93b47962012-12-12 13:50:54 -08001261 clear_huge_page(new_page, haddr, HPAGE_PMD_NR);
1262 else
1263 copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001264 __SetPageUptodate(new_page);
1265
Sagi Grimberg2ec74c32012-10-08 16:33:33 -07001266 mmun_start = haddr;
1267 mmun_end = haddr + HPAGE_PMD_SIZE;
1268 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1269
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08001270 spin_lock(ptl);
Kirill A. Shutemov93b47962012-12-12 13:50:54 -08001271 if (page)
Kirill A. Shutemovddc58f22016-01-15 16:52:56 -08001272 put_page(page);
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -08001273 if (unlikely(!pmd_same(*pmd, orig_pmd))) {
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08001274 spin_unlock(ptl);
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08001275 mem_cgroup_cancel_charge(new_page, memcg, true);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001276 put_page(new_page);
Sagi Grimberg2ec74c32012-10-08 16:33:33 -07001277 goto out_mn;
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -08001278 } else {
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001279 pmd_t entry;
Kirill A. Shutemov31223592013-09-12 15:14:01 -07001280 entry = mk_huge_pmd(new_page, vma->vm_page_prot);
1281 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -07001282 pmdp_huge_clear_flush_notify(vma, haddr, pmd);
Kirill A. Shutemovd281ee62016-01-15 16:52:16 -08001283 page_add_new_anon_rmap(new_page, vma, haddr, true);
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08001284 mem_cgroup_commit_charge(new_page, memcg, false, true);
Johannes Weiner00501b52014-08-08 14:19:20 -07001285 lru_cache_add_active_or_unevictable(new_page, vma);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001286 set_pmd_at(mm, haddr, pmd, entry);
David Millerb113da62012-10-08 16:34:25 -07001287 update_mmu_cache_pmd(vma, address, pmd);
Hugh Dickinseecc1e42014-01-12 01:25:21 -08001288 if (!page) {
Kirill A. Shutemov93b47962012-12-12 13:50:54 -08001289 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -08001290 put_huge_zero_page();
1291 } else {
Sasha Levin309381fea2014-01-23 15:52:54 -08001292 VM_BUG_ON_PAGE(!PageHead(page), page);
Kirill A. Shutemovd281ee62016-01-15 16:52:16 -08001293 page_remove_rmap(page, true);
Kirill A. Shutemov93b47962012-12-12 13:50:54 -08001294 put_page(page);
1295 }
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001296 ret |= VM_FAULT_WRITE;
1297 }
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08001298 spin_unlock(ptl);
Sagi Grimberg2ec74c32012-10-08 16:33:33 -07001299out_mn:
1300 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1301out:
1302 return ret;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001303out_unlock:
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08001304 spin_unlock(ptl);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001305 return ret;
1306}
1307
David Rientjesb676b292012-10-08 16:34:03 -07001308struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001309 unsigned long addr,
1310 pmd_t *pmd,
1311 unsigned int flags)
1312{
David Rientjesb676b292012-10-08 16:34:03 -07001313 struct mm_struct *mm = vma->vm_mm;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001314 struct page *page = NULL;
1315
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08001316 assert_spin_locked(pmd_lockptr(mm, pmd));
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001317
1318 if (flags & FOLL_WRITE && !pmd_write(*pmd))
1319 goto out;
1320
Kirill A. Shutemov85facf22013-02-04 14:28:42 -08001321 /* Avoid dumping huge zero page */
1322 if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
1323 return ERR_PTR(-EFAULT);
1324
Mel Gorman2b4847e2013-12-18 17:08:32 -08001325 /* Full NUMA hinting faults to serialise migration in fault paths */
Mel Gorman8a0516e2015-02-12 14:58:22 -08001326 if ((flags & FOLL_NUMA) && pmd_protnone(*pmd))
Mel Gorman2b4847e2013-12-18 17:08:32 -08001327 goto out;
1328
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001329 page = pmd_page(*pmd);
Sasha Levin309381fea2014-01-23 15:52:54 -08001330 VM_BUG_ON_PAGE(!PageHead(page), page);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001331 if (flags & FOLL_TOUCH) {
1332 pmd_t _pmd;
1333 /*
1334 * We should set the dirty bit only for FOLL_WRITE but
1335 * for now the dirty bit in the pmd is meaningless.
1336 * And if the dirty bit will become meaningful and
1337 * we'll only set it with FOLL_WRITE, an atomic
1338 * set_bit will be required on the pmd to set the
1339 * young bit, instead of the current set_pmd_at.
1340 */
1341 _pmd = pmd_mkyoung(pmd_mkdirty(*pmd));
Aneesh Kumar K.V8663890a2013-06-06 00:20:34 -07001342 if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
1343 pmd, _pmd, 1))
1344 update_mmu_cache_pmd(vma, addr, pmd);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001345 }
Eric B Munsonde60f5f2015-11-05 18:51:36 -08001346 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
David Rientjesb676b292012-10-08 16:34:03 -07001347 if (page->mapping && trylock_page(page)) {
1348 lru_add_drain();
1349 if (page->mapping)
1350 mlock_vma_page(page);
1351 unlock_page(page);
1352 }
1353 }
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001354 page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
Sasha Levin309381fea2014-01-23 15:52:54 -08001355 VM_BUG_ON_PAGE(!PageCompound(page), page);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001356 if (flags & FOLL_GET)
Kirill A. Shutemovddc58f22016-01-15 16:52:56 -08001357 get_page(page);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001358
1359out:
1360 return page;
1361}
1362
Mel Gormand10e63f2012-10-25 14:16:31 +02001363/* NUMA hinting page fault entry point for trans huge pmds */
Mel Gorman4daae3b2012-11-02 11:33:45 +00001364int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
1365 unsigned long addr, pmd_t pmd, pmd_t *pmdp)
Mel Gormand10e63f2012-10-25 14:16:31 +02001366{
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08001367 spinlock_t *ptl;
Mel Gormanb8916632013-10-07 11:28:44 +01001368 struct anon_vma *anon_vma = NULL;
Mel Gormanb32967f2012-11-19 12:35:47 +00001369 struct page *page;
Mel Gormand10e63f2012-10-25 14:16:31 +02001370 unsigned long haddr = addr & HPAGE_PMD_MASK;
Mel Gorman8191acb2013-10-07 11:28:45 +01001371 int page_nid = -1, this_nid = numa_node_id();
Peter Zijlstra90572892013-10-07 11:29:20 +01001372 int target_nid, last_cpupid = -1;
Mel Gorman8191acb2013-10-07 11:28:45 +01001373 bool page_locked;
1374 bool migrated = false;
Mel Gormanb191f9b2015-03-25 15:55:40 -07001375 bool was_writable;
Peter Zijlstra6688cc02013-10-07 11:29:24 +01001376 int flags = 0;
Mel Gormand10e63f2012-10-25 14:16:31 +02001377
Mel Gormanc0e7cad2015-02-12 14:58:41 -08001378 /* A PROT_NONE fault should not end up here */
1379 BUG_ON(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)));
1380
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08001381 ptl = pmd_lock(mm, pmdp);
Mel Gormand10e63f2012-10-25 14:16:31 +02001382 if (unlikely(!pmd_same(pmd, *pmdp)))
1383 goto out_unlock;
1384
Mel Gormande466bd2013-12-18 17:08:42 -08001385 /*
1386 * If there are potential migrations, wait for completion and retry
1387 * without disrupting NUMA hinting information. Do not relock and
1388 * check_same as the page may no longer be mapped.
1389 */
1390 if (unlikely(pmd_trans_migrating(*pmdp))) {
Mel Gorman5d833062015-02-12 14:58:16 -08001391 page = pmd_page(*pmdp);
Mel Gormande466bd2013-12-18 17:08:42 -08001392 spin_unlock(ptl);
Mel Gorman5d833062015-02-12 14:58:16 -08001393 wait_on_page_locked(page);
Mel Gormande466bd2013-12-18 17:08:42 -08001394 goto out;
1395 }
1396
Mel Gormand10e63f2012-10-25 14:16:31 +02001397 page = pmd_page(pmd);
Mel Gormana1a46182013-10-07 11:28:50 +01001398 BUG_ON(is_huge_zero_page(page));
Mel Gorman8191acb2013-10-07 11:28:45 +01001399 page_nid = page_to_nid(page);
Peter Zijlstra90572892013-10-07 11:29:20 +01001400 last_cpupid = page_cpupid_last(page);
Mel Gorman03c5a6e2012-11-02 14:52:48 +00001401 count_vm_numa_event(NUMA_HINT_FAULTS);
Rik van Riel04bb2f92013-10-07 11:29:36 +01001402 if (page_nid == this_nid) {
Mel Gorman03c5a6e2012-11-02 14:52:48 +00001403 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
Rik van Riel04bb2f92013-10-07 11:29:36 +01001404 flags |= TNF_FAULT_LOCAL;
1405 }
Mel Gorman4daae3b2012-11-02 11:33:45 +00001406
Mel Gormanbea66fb2015-03-25 15:55:37 -07001407 /* See similar comment in do_numa_page for explanation */
1408 if (!(vma->vm_flags & VM_WRITE))
Peter Zijlstra6688cc02013-10-07 11:29:24 +01001409 flags |= TNF_NO_GROUP;
1410
1411 /*
Mel Gormanff9042b2013-10-07 11:28:43 +01001412 * Acquire the page lock to serialise THP migrations but avoid dropping
1413 * page_table_lock if at all possible
1414 */
Mel Gormanb8916632013-10-07 11:28:44 +01001415 page_locked = trylock_page(page);
1416 target_nid = mpol_misplaced(page, vma, haddr);
1417 if (target_nid == -1) {
1418 /* If the page was locked, there are no parallel migrations */
Mel Gormana54a4072013-10-07 11:28:46 +01001419 if (page_locked)
Mel Gormanb8916632013-10-07 11:28:44 +01001420 goto clear_pmdnuma;
Mel Gorman2b4847e2013-12-18 17:08:32 -08001421 }
Mel Gorman4daae3b2012-11-02 11:33:45 +00001422
Mel Gormande466bd2013-12-18 17:08:42 -08001423 /* Migration could have started since the pmd_trans_migrating check */
Mel Gorman2b4847e2013-12-18 17:08:32 -08001424 if (!page_locked) {
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08001425 spin_unlock(ptl);
Mel Gormanb8916632013-10-07 11:28:44 +01001426 wait_on_page_locked(page);
Mel Gormana54a4072013-10-07 11:28:46 +01001427 page_nid = -1;
Mel Gormanb8916632013-10-07 11:28:44 +01001428 goto out;
1429 }
1430
Mel Gorman2b4847e2013-12-18 17:08:32 -08001431 /*
1432 * Page is misplaced. Page lock serialises migrations. Acquire anon_vma
1433 * to serialises splits
1434 */
Mel Gormanb8916632013-10-07 11:28:44 +01001435 get_page(page);
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08001436 spin_unlock(ptl);
Mel Gormanb8916632013-10-07 11:28:44 +01001437 anon_vma = page_lock_anon_vma_read(page);
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001438
Peter Zijlstrac69307d2013-10-07 11:28:41 +01001439 /* Confirm the PMD did not change while page_table_lock was released */
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08001440 spin_lock(ptl);
Mel Gormanb32967f2012-11-19 12:35:47 +00001441 if (unlikely(!pmd_same(pmd, *pmdp))) {
1442 unlock_page(page);
1443 put_page(page);
Mel Gormana54a4072013-10-07 11:28:46 +01001444 page_nid = -1;
Mel Gormanb32967f2012-11-19 12:35:47 +00001445 goto out_unlock;
1446 }
Mel Gormanff9042b2013-10-07 11:28:43 +01001447
Mel Gormanc3a489c2013-12-18 17:08:38 -08001448 /* Bail if we fail to protect against THP splits for any reason */
1449 if (unlikely(!anon_vma)) {
1450 put_page(page);
1451 page_nid = -1;
1452 goto clear_pmdnuma;
1453 }
1454
Mel Gormana54a4072013-10-07 11:28:46 +01001455 /*
1456 * Migrate the THP to the requested node, returns with page unlocked
Mel Gorman8a0516e2015-02-12 14:58:22 -08001457 * and access rights restored.
Mel Gormana54a4072013-10-07 11:28:46 +01001458 */
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08001459 spin_unlock(ptl);
Mel Gormanb32967f2012-11-19 12:35:47 +00001460 migrated = migrate_misplaced_transhuge_page(mm, vma,
Hugh Dickins340ef392013-02-22 16:34:33 -08001461 pmdp, pmd, addr, page, target_nid);
Peter Zijlstra6688cc02013-10-07 11:29:24 +01001462 if (migrated) {
1463 flags |= TNF_MIGRATED;
Mel Gorman8191acb2013-10-07 11:28:45 +01001464 page_nid = target_nid;
Mel Gorman074c2382015-03-25 15:55:42 -07001465 } else
1466 flags |= TNF_MIGRATE_FAIL;
Mel Gormanb32967f2012-11-19 12:35:47 +00001467
Mel Gorman8191acb2013-10-07 11:28:45 +01001468 goto out;
Mel Gorman4daae3b2012-11-02 11:33:45 +00001469clear_pmdnuma:
Mel Gormana54a4072013-10-07 11:28:46 +01001470 BUG_ON(!PageLocked(page));
Mel Gormanb191f9b2015-03-25 15:55:40 -07001471 was_writable = pmd_write(pmd);
Mel Gorman4d942462015-02-12 14:58:28 -08001472 pmd = pmd_modify(pmd, vma->vm_page_prot);
Mel Gormanb7b04002015-03-25 15:55:45 -07001473 pmd = pmd_mkyoung(pmd);
Mel Gormanb191f9b2015-03-25 15:55:40 -07001474 if (was_writable)
1475 pmd = pmd_mkwrite(pmd);
Mel Gormand10e63f2012-10-25 14:16:31 +02001476 set_pmd_at(mm, haddr, pmdp, pmd);
Mel Gormand10e63f2012-10-25 14:16:31 +02001477 update_mmu_cache_pmd(vma, addr, pmdp);
Mel Gormana54a4072013-10-07 11:28:46 +01001478 unlock_page(page);
Mel Gormand10e63f2012-10-25 14:16:31 +02001479out_unlock:
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08001480 spin_unlock(ptl);
Mel Gormanb8916632013-10-07 11:28:44 +01001481
1482out:
1483 if (anon_vma)
1484 page_unlock_anon_vma_read(anon_vma);
1485
Mel Gorman8191acb2013-10-07 11:28:45 +01001486 if (page_nid != -1)
Peter Zijlstra6688cc02013-10-07 11:29:24 +01001487 task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, flags);
Mel Gorman8191acb2013-10-07 11:28:45 +01001488
Mel Gormand10e63f2012-10-25 14:16:31 +02001489 return 0;
1490}
1491
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001492int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
Shaohua Lif21760b2012-01-12 17:19:16 -08001493 pmd_t *pmd, unsigned long addr)
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001494{
Kirill A. Shutemovda146762015-09-08 14:59:31 -07001495 pmd_t orig_pmd;
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08001496 spinlock_t *ptl;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001497
Kirill A. Shutemov4b471e82016-01-15 16:53:39 -08001498 if (!__pmd_trans_huge_lock(pmd, vma, &ptl))
Kirill A. Shutemovda146762015-09-08 14:59:31 -07001499 return 0;
1500 /*
1501 * For architectures like ppc64 we look at deposited pgtable
1502 * when calling pmdp_huge_get_and_clear. So do the
1503 * pgtable_trans_huge_withdraw after finishing pmdp related
1504 * operations.
1505 */
1506 orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd,
1507 tlb->fullmm);
1508 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1509 if (vma_is_dax(vma)) {
1510 spin_unlock(ptl);
1511 if (is_huge_zero_pmd(orig_pmd))
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -08001512 put_huge_zero_page();
Kirill A. Shutemovda146762015-09-08 14:59:31 -07001513 } else if (is_huge_zero_pmd(orig_pmd)) {
1514 pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd));
1515 atomic_long_dec(&tlb->mm->nr_ptes);
1516 spin_unlock(ptl);
1517 put_huge_zero_page();
1518 } else {
1519 struct page *page = pmd_page(orig_pmd);
Kirill A. Shutemovd281ee62016-01-15 16:52:16 -08001520 page_remove_rmap(page, true);
Kirill A. Shutemovda146762015-09-08 14:59:31 -07001521 VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
1522 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
1523 VM_BUG_ON_PAGE(!PageHead(page), page);
1524 pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd));
1525 atomic_long_dec(&tlb->mm->nr_ptes);
1526 spin_unlock(ptl);
1527 tlb_remove_page(tlb, page);
Naoya Horiguchi025c5b22012-03-21 16:33:57 -07001528 }
Kirill A. Shutemovda146762015-09-08 14:59:31 -07001529 return 1;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001530}
1531
Kirill A. Shutemov4b471e82016-01-15 16:53:39 -08001532bool move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
Andrea Arcangeli37a1c492011-10-31 17:08:30 -07001533 unsigned long old_addr,
1534 unsigned long new_addr, unsigned long old_end,
1535 pmd_t *old_pmd, pmd_t *new_pmd)
1536{
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08001537 spinlock_t *old_ptl, *new_ptl;
Andrea Arcangeli37a1c492011-10-31 17:08:30 -07001538 pmd_t pmd;
1539
1540 struct mm_struct *mm = vma->vm_mm;
1541
1542 if ((old_addr & ~HPAGE_PMD_MASK) ||
1543 (new_addr & ~HPAGE_PMD_MASK) ||
1544 old_end - old_addr < HPAGE_PMD_SIZE ||
1545 (new_vma->vm_flags & VM_NOHUGEPAGE))
Kirill A. Shutemov4b471e82016-01-15 16:53:39 -08001546 return false;
Andrea Arcangeli37a1c492011-10-31 17:08:30 -07001547
1548 /*
1549 * The destination pmd shouldn't be established, free_pgtables()
1550 * should have release it.
1551 */
1552 if (WARN_ON(!pmd_none(*new_pmd))) {
1553 VM_BUG_ON(pmd_trans_huge(*new_pmd));
Kirill A. Shutemov4b471e82016-01-15 16:53:39 -08001554 return false;
Andrea Arcangeli37a1c492011-10-31 17:08:30 -07001555 }
1556
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08001557 /*
1558 * We don't have to worry about the ordering of src and dst
1559 * ptlocks because exclusive mmap_sem prevents deadlock.
1560 */
Kirill A. Shutemov4b471e82016-01-15 16:53:39 -08001561 if (__pmd_trans_huge_lock(old_pmd, vma, &old_ptl)) {
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08001562 new_ptl = pmd_lockptr(mm, new_pmd);
1563 if (new_ptl != old_ptl)
1564 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -07001565 pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
Naoya Horiguchi025c5b22012-03-21 16:33:57 -07001566 VM_BUG_ON(!pmd_none(*new_pmd));
Kirill A. Shutemov35928062013-12-12 17:12:33 -08001567
Aneesh Kumar K.Vb3084f42014-01-13 11:34:24 +05301568 if (pmd_move_must_withdraw(new_ptl, old_ptl)) {
1569 pgtable_t pgtable;
Kirill A. Shutemov35928062013-12-12 17:12:33 -08001570 pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
1571 pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
Kirill A. Shutemov35928062013-12-12 17:12:33 -08001572 }
Aneesh Kumar K.Vb3084f42014-01-13 11:34:24 +05301573 set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd));
1574 if (new_ptl != old_ptl)
1575 spin_unlock(new_ptl);
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08001576 spin_unlock(old_ptl);
Kirill A. Shutemov4b471e82016-01-15 16:53:39 -08001577 return true;
Andrea Arcangeli37a1c492011-10-31 17:08:30 -07001578 }
Kirill A. Shutemov4b471e82016-01-15 16:53:39 -08001579 return false;
Andrea Arcangeli37a1c492011-10-31 17:08:30 -07001580}
1581
Mel Gormanf123d742013-10-07 11:28:49 +01001582/*
1583 * Returns
1584 * - 0 if PMD could not be locked
1585 * - 1 if PMD was locked but protections unchange and TLB flush unnecessary
1586 * - HPAGE_PMD_NR is protections changed and TLB flush necessary
1587 */
Johannes Weinercd7548a2011-01-13 15:47:04 -08001588int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
Mel Gormane944fd62015-02-12 14:58:35 -08001589 unsigned long addr, pgprot_t newprot, int prot_numa)
Johannes Weinercd7548a2011-01-13 15:47:04 -08001590{
1591 struct mm_struct *mm = vma->vm_mm;
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08001592 spinlock_t *ptl;
Johannes Weinercd7548a2011-01-13 15:47:04 -08001593 int ret = 0;
1594
Kirill A. Shutemov4b471e82016-01-15 16:53:39 -08001595 if (__pmd_trans_huge_lock(pmd, vma, &ptl)) {
Naoya Horiguchi025c5b22012-03-21 16:33:57 -07001596 pmd_t entry;
Mel Gormanb191f9b2015-03-25 15:55:40 -07001597 bool preserve_write = prot_numa && pmd_write(*pmd);
Mel Gormanba68bc02015-03-07 15:20:48 +00001598 ret = 1;
Mel Gormane944fd62015-02-12 14:58:35 -08001599
1600 /*
1601 * Avoid trapping faults against the zero page. The read-only
1602 * data is likely to be read-cached on the local CPU and
1603 * local/remote hits to the zero page are not interesting.
1604 */
1605 if (prot_numa && is_huge_zero_pmd(*pmd)) {
1606 spin_unlock(ptl);
Mel Gormanba68bc02015-03-07 15:20:48 +00001607 return ret;
Mel Gormane944fd62015-02-12 14:58:35 -08001608 }
1609
Mel Gorman10c10452015-02-12 14:58:44 -08001610 if (!prot_numa || !pmd_protnone(*pmd)) {
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -07001611 entry = pmdp_huge_get_and_clear_notify(mm, addr, pmd);
Mel Gorman10c10452015-02-12 14:58:44 -08001612 entry = pmd_modify(entry, newprot);
Mel Gormanb191f9b2015-03-25 15:55:40 -07001613 if (preserve_write)
1614 entry = pmd_mkwrite(entry);
Mel Gorman10c10452015-02-12 14:58:44 -08001615 ret = HPAGE_PMD_NR;
1616 set_pmd_at(mm, addr, pmd, entry);
Mel Gormanb191f9b2015-03-25 15:55:40 -07001617 BUG_ON(!preserve_write && pmd_write(entry));
Mel Gorman10c10452015-02-12 14:58:44 -08001618 }
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08001619 spin_unlock(ptl);
Naoya Horiguchi025c5b22012-03-21 16:33:57 -07001620 }
Johannes Weinercd7548a2011-01-13 15:47:04 -08001621
1622 return ret;
1623}
1624
Naoya Horiguchi025c5b22012-03-21 16:33:57 -07001625/*
Kirill A. Shutemov4b471e82016-01-15 16:53:39 -08001626 * Returns true if a given pmd maps a thp, false otherwise.
Naoya Horiguchi025c5b22012-03-21 16:33:57 -07001627 *
Kirill A. Shutemov4b471e82016-01-15 16:53:39 -08001628 * Note that if it returns true, this routine returns without unlocking page
1629 * table lock. So callers must unlock it.
Naoya Horiguchi025c5b22012-03-21 16:33:57 -07001630 */
Kirill A. Shutemov4b471e82016-01-15 16:53:39 -08001631bool __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08001632 spinlock_t **ptl)
Naoya Horiguchi025c5b22012-03-21 16:33:57 -07001633{
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08001634 *ptl = pmd_lock(vma->vm_mm, pmd);
Kirill A. Shutemov4b471e82016-01-15 16:53:39 -08001635 if (likely(pmd_trans_huge(*pmd)))
1636 return true;
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08001637 spin_unlock(*ptl);
Kirill A. Shutemov4b471e82016-01-15 16:53:39 -08001638 return false;
Naoya Horiguchi025c5b22012-03-21 16:33:57 -07001639}
1640
Kirill A. Shutemov117b0792013-11-14 14:30:56 -08001641/*
1642 * This function returns whether a given @page is mapped onto the @address
1643 * in the virtual space of @mm.
1644 *
1645 * When it's true, this function returns *pmd with holding the page table lock
1646 * and passing it back to the caller via @ptl.
1647 * If it's false, returns NULL without holding the page table lock.
1648 */
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001649pmd_t *page_check_address_pmd(struct page *page,
1650 struct mm_struct *mm,
1651 unsigned long address,
Kirill A. Shutemov117b0792013-11-14 14:30:56 -08001652 spinlock_t **ptl)
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001653{
Kirill A. Shutemovb5a8cad2014-04-18 15:07:25 -07001654 pgd_t *pgd;
1655 pud_t *pud;
Kirill A. Shutemov117b0792013-11-14 14:30:56 -08001656 pmd_t *pmd;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001657
1658 if (address & ~HPAGE_PMD_MASK)
Kirill A. Shutemov117b0792013-11-14 14:30:56 -08001659 return NULL;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001660
Kirill A. Shutemovb5a8cad2014-04-18 15:07:25 -07001661 pgd = pgd_offset(mm, address);
1662 if (!pgd_present(*pgd))
Kirill A. Shutemov117b0792013-11-14 14:30:56 -08001663 return NULL;
Kirill A. Shutemovb5a8cad2014-04-18 15:07:25 -07001664 pud = pud_offset(pgd, address);
1665 if (!pud_present(*pud))
1666 return NULL;
1667 pmd = pmd_offset(pud, address);
1668
Kirill A. Shutemov117b0792013-11-14 14:30:56 -08001669 *ptl = pmd_lock(mm, pmd);
Kirill A. Shutemovb5a8cad2014-04-18 15:07:25 -07001670 if (!pmd_present(*pmd))
Kirill A. Shutemov117b0792013-11-14 14:30:56 -08001671 goto unlock;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001672 if (pmd_page(*pmd) != page)
Kirill A. Shutemov117b0792013-11-14 14:30:56 -08001673 goto unlock;
Kirill A. Shutemov4b471e82016-01-15 16:53:39 -08001674 if (pmd_trans_huge(*pmd))
Kirill A. Shutemov117b0792013-11-14 14:30:56 -08001675 return pmd;
Kirill A. Shutemov117b0792013-11-14 14:30:56 -08001676unlock:
1677 spin_unlock(*ptl);
1678 return NULL;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001679}
1680
Vlastimil Babka9050d7e2014-03-03 15:38:27 -08001681#define VM_NO_THP (VM_SPECIAL | VM_HUGETLB | VM_SHARED | VM_MAYSHARE)
Andrea Arcangeli78f11a22011-04-27 15:26:45 -07001682
Andrea Arcangeli60ab3242011-01-13 15:47:18 -08001683int hugepage_madvise(struct vm_area_struct *vma,
1684 unsigned long *vm_flags, int advice)
Andrea Arcangeli0af4e982011-01-13 15:46:55 -08001685{
Andrea Arcangelia664b2d2011-01-13 15:47:17 -08001686 switch (advice) {
1687 case MADV_HUGEPAGE:
Alex Thorlton1e1836e2014-04-07 15:37:09 -07001688#ifdef CONFIG_S390
1689 /*
1690 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
1691 * can't handle this properly after s390_enable_sie, so we simply
1692 * ignore the madvise to prevent qemu from causing a SIGSEGV.
1693 */
1694 if (mm_has_pgste(vma->vm_mm))
1695 return 0;
1696#endif
Andrea Arcangelia664b2d2011-01-13 15:47:17 -08001697 /*
1698 * Be somewhat over-protective like KSM for now!
1699 */
Jason J. Herne1a763612015-11-20 15:57:04 -08001700 if (*vm_flags & VM_NO_THP)
Andrea Arcangelia664b2d2011-01-13 15:47:17 -08001701 return -EINVAL;
1702 *vm_flags &= ~VM_NOHUGEPAGE;
1703 *vm_flags |= VM_HUGEPAGE;
Andrea Arcangeli60ab3242011-01-13 15:47:18 -08001704 /*
1705 * If the vma become good for khugepaged to scan,
1706 * register it here without waiting a page fault that
1707 * may not happen any time soon.
1708 */
David Rientjes6d50e602014-10-29 14:50:31 -07001709 if (unlikely(khugepaged_enter_vma_merge(vma, *vm_flags)))
Andrea Arcangeli60ab3242011-01-13 15:47:18 -08001710 return -ENOMEM;
Andrea Arcangelia664b2d2011-01-13 15:47:17 -08001711 break;
1712 case MADV_NOHUGEPAGE:
1713 /*
1714 * Be somewhat over-protective like KSM for now!
1715 */
Jason J. Herne1a763612015-11-20 15:57:04 -08001716 if (*vm_flags & VM_NO_THP)
Andrea Arcangelia664b2d2011-01-13 15:47:17 -08001717 return -EINVAL;
1718 *vm_flags &= ~VM_HUGEPAGE;
1719 *vm_flags |= VM_NOHUGEPAGE;
Andrea Arcangeli60ab3242011-01-13 15:47:18 -08001720 /*
1721 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
1722 * this vma even if we leave the mm registered in khugepaged if
1723 * it got registered before VM_NOHUGEPAGE was set.
1724 */
Andrea Arcangelia664b2d2011-01-13 15:47:17 -08001725 break;
1726 }
Andrea Arcangeli0af4e982011-01-13 15:46:55 -08001727
1728 return 0;
1729}
1730
Andrea Arcangeliba761492011-01-13 15:46:58 -08001731static int __init khugepaged_slab_init(void)
1732{
1733 mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
1734 sizeof(struct mm_slot),
1735 __alignof__(struct mm_slot), 0, NULL);
1736 if (!mm_slot_cache)
1737 return -ENOMEM;
1738
1739 return 0;
1740}
1741
Kirill A. Shutemov65ebb642015-04-15 16:14:20 -07001742static void __init khugepaged_slab_exit(void)
1743{
1744 kmem_cache_destroy(mm_slot_cache);
1745}
1746
Andrea Arcangeliba761492011-01-13 15:46:58 -08001747static inline struct mm_slot *alloc_mm_slot(void)
1748{
1749 if (!mm_slot_cache) /* initialization failed */
1750 return NULL;
1751 return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
1752}
1753
1754static inline void free_mm_slot(struct mm_slot *mm_slot)
1755{
1756 kmem_cache_free(mm_slot_cache, mm_slot);
1757}
1758
Andrea Arcangeliba761492011-01-13 15:46:58 -08001759static struct mm_slot *get_mm_slot(struct mm_struct *mm)
1760{
1761 struct mm_slot *mm_slot;
Andrea Arcangeliba761492011-01-13 15:46:58 -08001762
Sasha Levinb67bfe02013-02-27 17:06:00 -08001763 hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
Andrea Arcangeliba761492011-01-13 15:46:58 -08001764 if (mm == mm_slot->mm)
1765 return mm_slot;
Sasha Levin43b5fbb2013-02-22 16:32:27 -08001766
Andrea Arcangeliba761492011-01-13 15:46:58 -08001767 return NULL;
1768}
1769
1770static void insert_to_mm_slots_hash(struct mm_struct *mm,
1771 struct mm_slot *mm_slot)
1772{
Andrea Arcangeliba761492011-01-13 15:46:58 -08001773 mm_slot->mm = mm;
Sasha Levin43b5fbb2013-02-22 16:32:27 -08001774 hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
Andrea Arcangeliba761492011-01-13 15:46:58 -08001775}
1776
1777static inline int khugepaged_test_exit(struct mm_struct *mm)
1778{
1779 return atomic_read(&mm->mm_users) == 0;
1780}
1781
1782int __khugepaged_enter(struct mm_struct *mm)
1783{
1784 struct mm_slot *mm_slot;
1785 int wakeup;
1786
1787 mm_slot = alloc_mm_slot();
1788 if (!mm_slot)
1789 return -ENOMEM;
1790
1791 /* __khugepaged_exit() must not run from under us */
Sasha Levin96dad672014-10-09 15:28:39 -07001792 VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
Andrea Arcangeliba761492011-01-13 15:46:58 -08001793 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
1794 free_mm_slot(mm_slot);
1795 return 0;
1796 }
1797
1798 spin_lock(&khugepaged_mm_lock);
1799 insert_to_mm_slots_hash(mm, mm_slot);
1800 /*
1801 * Insert just behind the scanning cursor, to let the area settle
1802 * down a little.
1803 */
1804 wakeup = list_empty(&khugepaged_scan.mm_head);
1805 list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
1806 spin_unlock(&khugepaged_mm_lock);
1807
1808 atomic_inc(&mm->mm_count);
1809 if (wakeup)
1810 wake_up_interruptible(&khugepaged_wait);
1811
1812 return 0;
1813}
1814
David Rientjes6d50e602014-10-29 14:50:31 -07001815int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
1816 unsigned long vm_flags)
Andrea Arcangeliba761492011-01-13 15:46:58 -08001817{
1818 unsigned long hstart, hend;
1819 if (!vma->anon_vma)
1820 /*
1821 * Not yet faulted in so we will register later in the
1822 * page fault if needed.
1823 */
1824 return 0;
Andrea Arcangeli78f11a22011-04-27 15:26:45 -07001825 if (vma->vm_ops)
Andrea Arcangeliba761492011-01-13 15:46:58 -08001826 /* khugepaged not yet working on file or special mappings */
1827 return 0;
David Rientjes6d50e602014-10-29 14:50:31 -07001828 VM_BUG_ON_VMA(vm_flags & VM_NO_THP, vma);
Andrea Arcangeliba761492011-01-13 15:46:58 -08001829 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1830 hend = vma->vm_end & HPAGE_PMD_MASK;
1831 if (hstart < hend)
David Rientjes6d50e602014-10-29 14:50:31 -07001832 return khugepaged_enter(vma, vm_flags);
Andrea Arcangeliba761492011-01-13 15:46:58 -08001833 return 0;
1834}
1835
1836void __khugepaged_exit(struct mm_struct *mm)
1837{
1838 struct mm_slot *mm_slot;
1839 int free = 0;
1840
1841 spin_lock(&khugepaged_mm_lock);
1842 mm_slot = get_mm_slot(mm);
1843 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
Sasha Levin43b5fbb2013-02-22 16:32:27 -08001844 hash_del(&mm_slot->hash);
Andrea Arcangeliba761492011-01-13 15:46:58 -08001845 list_del(&mm_slot->mm_node);
1846 free = 1;
1847 }
Chris Wrightd788e802011-07-25 17:12:14 -07001848 spin_unlock(&khugepaged_mm_lock);
Andrea Arcangeliba761492011-01-13 15:46:58 -08001849
1850 if (free) {
Andrea Arcangeliba761492011-01-13 15:46:58 -08001851 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1852 free_mm_slot(mm_slot);
1853 mmdrop(mm);
1854 } else if (mm_slot) {
Andrea Arcangeliba761492011-01-13 15:46:58 -08001855 /*
1856 * This is required to serialize against
1857 * khugepaged_test_exit() (which is guaranteed to run
1858 * under mmap sem read mode). Stop here (after we
1859 * return all pagetables will be destroyed) until
1860 * khugepaged has finished working on the pagetables
1861 * under the mmap_sem.
1862 */
1863 down_write(&mm->mmap_sem);
1864 up_write(&mm->mmap_sem);
Chris Wrightd788e802011-07-25 17:12:14 -07001865 }
Andrea Arcangeliba761492011-01-13 15:46:58 -08001866}
1867
1868static void release_pte_page(struct page *page)
1869{
1870 /* 0 stands for page_is_file_cache(page) == false */
1871 dec_zone_page_state(page, NR_ISOLATED_ANON + 0);
1872 unlock_page(page);
1873 putback_lru_page(page);
1874}
1875
1876static void release_pte_pages(pte_t *pte, pte_t *_pte)
1877{
1878 while (--_pte >= pte) {
1879 pte_t pteval = *_pte;
Ebru Akagunduzca0984c2015-04-14 15:45:24 -07001880 if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)))
Andrea Arcangeliba761492011-01-13 15:46:58 -08001881 release_pte_page(pte_page(pteval));
1882 }
1883}
1884
Andrea Arcangeliba761492011-01-13 15:46:58 -08001885static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
1886 unsigned long address,
1887 pte_t *pte)
1888{
Ebru Akagunduz7d2eba02016-01-14 15:22:19 -08001889 struct page *page = NULL;
Andrea Arcangeliba761492011-01-13 15:46:58 -08001890 pte_t *_pte;
Ebru Akagunduz7d2eba02016-01-14 15:22:19 -08001891 int none_or_zero = 0, result = 0;
Ebru Akagunduz10359212015-02-11 15:28:28 -08001892 bool referenced = false, writable = false;
Ebru Akagunduz7d2eba02016-01-14 15:22:19 -08001893
Andrea Arcangeliba761492011-01-13 15:46:58 -08001894 for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
1895 _pte++, address += PAGE_SIZE) {
1896 pte_t pteval = *_pte;
Minchan Kim47aee4d2015-10-22 13:32:19 -07001897 if (pte_none(pteval) || (pte_present(pteval) &&
1898 is_zero_pfn(pte_pfn(pteval)))) {
Andrea Arcangelic1294d02015-09-04 15:46:27 -07001899 if (!userfaultfd_armed(vma) &&
Ebru Akagunduz7d2eba02016-01-14 15:22:19 -08001900 ++none_or_zero <= khugepaged_max_ptes_none) {
Andrea Arcangeliba761492011-01-13 15:46:58 -08001901 continue;
Ebru Akagunduz7d2eba02016-01-14 15:22:19 -08001902 } else {
1903 result = SCAN_EXCEED_NONE_PTE;
Andrea Arcangeliba761492011-01-13 15:46:58 -08001904 goto out;
Ebru Akagunduz7d2eba02016-01-14 15:22:19 -08001905 }
Andrea Arcangeliba761492011-01-13 15:46:58 -08001906 }
Ebru Akagunduz7d2eba02016-01-14 15:22:19 -08001907 if (!pte_present(pteval)) {
1908 result = SCAN_PTE_NON_PRESENT;
Andrea Arcangeliba761492011-01-13 15:46:58 -08001909 goto out;
Ebru Akagunduz7d2eba02016-01-14 15:22:19 -08001910 }
Andrea Arcangeliba761492011-01-13 15:46:58 -08001911 page = vm_normal_page(vma, address, pteval);
Ebru Akagunduz7d2eba02016-01-14 15:22:19 -08001912 if (unlikely(!page)) {
1913 result = SCAN_PAGE_NULL;
Andrea Arcangeliba761492011-01-13 15:46:58 -08001914 goto out;
Ebru Akagunduz7d2eba02016-01-14 15:22:19 -08001915 }
Bob Liu344aa352012-12-11 16:00:34 -08001916
Sasha Levin309381fea2014-01-23 15:52:54 -08001917 VM_BUG_ON_PAGE(PageCompound(page), page);
1918 VM_BUG_ON_PAGE(!PageAnon(page), page);
1919 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
Andrea Arcangeliba761492011-01-13 15:46:58 -08001920
Andrea Arcangeliba761492011-01-13 15:46:58 -08001921 /*
1922 * We can do it before isolate_lru_page because the
1923 * page can't be freed from under us. NOTE: PG_lock
1924 * is needed to serialize against split_huge_page
1925 * when invoked from the VM.
1926 */
Ebru Akagunduz7d2eba02016-01-14 15:22:19 -08001927 if (!trylock_page(page)) {
1928 result = SCAN_PAGE_LOCK;
Andrea Arcangeliba761492011-01-13 15:46:58 -08001929 goto out;
Ebru Akagunduz7d2eba02016-01-14 15:22:19 -08001930 }
Ebru Akagunduz10359212015-02-11 15:28:28 -08001931
1932 /*
1933 * cannot use mapcount: can't collapse if there's a gup pin.
1934 * The page must only be referenced by the scanned process
1935 * and page swap cache.
1936 */
1937 if (page_count(page) != 1 + !!PageSwapCache(page)) {
1938 unlock_page(page);
Ebru Akagunduz7d2eba02016-01-14 15:22:19 -08001939 result = SCAN_PAGE_COUNT;
Ebru Akagunduz10359212015-02-11 15:28:28 -08001940 goto out;
1941 }
1942 if (pte_write(pteval)) {
1943 writable = true;
1944 } else {
1945 if (PageSwapCache(page) && !reuse_swap_page(page)) {
1946 unlock_page(page);
Ebru Akagunduz7d2eba02016-01-14 15:22:19 -08001947 result = SCAN_SWAP_CACHE_PAGE;
Ebru Akagunduz10359212015-02-11 15:28:28 -08001948 goto out;
1949 }
1950 /*
1951 * Page is not in the swap cache. It can be collapsed
1952 * into a THP.
1953 */
1954 }
1955
Andrea Arcangeliba761492011-01-13 15:46:58 -08001956 /*
1957 * Isolate the page to avoid collapsing an hugepage
1958 * currently in use by the VM.
1959 */
1960 if (isolate_lru_page(page)) {
1961 unlock_page(page);
Ebru Akagunduz7d2eba02016-01-14 15:22:19 -08001962 result = SCAN_DEL_PAGE_LRU;
Andrea Arcangeliba761492011-01-13 15:46:58 -08001963 goto out;
1964 }
1965 /* 0 stands for page_is_file_cache(page) == false */
1966 inc_zone_page_state(page, NR_ISOLATED_ANON + 0);
Sasha Levin309381fea2014-01-23 15:52:54 -08001967 VM_BUG_ON_PAGE(!PageLocked(page), page);
1968 VM_BUG_ON_PAGE(PageLRU(page), page);
Andrea Arcangeliba761492011-01-13 15:46:58 -08001969
1970 /* If there is no mapped pte young don't collapse the page */
Vladimir Davydov33c3fc72015-09-09 15:35:45 -07001971 if (pte_young(pteval) ||
1972 page_is_young(page) || PageReferenced(page) ||
Andrea Arcangeli8ee53822011-01-13 15:47:10 -08001973 mmu_notifier_test_young(vma->vm_mm, address))
Ebru Akagunduz10359212015-02-11 15:28:28 -08001974 referenced = true;
Andrea Arcangeliba761492011-01-13 15:46:58 -08001975 }
Ebru Akagunduz7d2eba02016-01-14 15:22:19 -08001976 if (likely(writable)) {
1977 if (likely(referenced)) {
1978 result = SCAN_SUCCEED;
1979 trace_mm_collapse_huge_page_isolate(page_to_pfn(page), none_or_zero,
1980 referenced, writable, result);
1981 return 1;
1982 }
1983 } else {
1984 result = SCAN_PAGE_RO;
1985 }
1986
Andrea Arcangeliba761492011-01-13 15:46:58 -08001987out:
Bob Liu344aa352012-12-11 16:00:34 -08001988 release_pte_pages(pte, _pte);
Ebru Akagunduz7d2eba02016-01-14 15:22:19 -08001989 trace_mm_collapse_huge_page_isolate(page_to_pfn(page), none_or_zero,
1990 referenced, writable, result);
Bob Liu344aa352012-12-11 16:00:34 -08001991 return 0;
Andrea Arcangeliba761492011-01-13 15:46:58 -08001992}
1993
1994static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
1995 struct vm_area_struct *vma,
1996 unsigned long address,
1997 spinlock_t *ptl)
1998{
1999 pte_t *_pte;
2000 for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++) {
2001 pte_t pteval = *_pte;
2002 struct page *src_page;
2003
Ebru Akagunduzca0984c2015-04-14 15:45:24 -07002004 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
Andrea Arcangeliba761492011-01-13 15:46:58 -08002005 clear_user_highpage(page, address);
2006 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
Ebru Akagunduzca0984c2015-04-14 15:45:24 -07002007 if (is_zero_pfn(pte_pfn(pteval))) {
2008 /*
2009 * ptl mostly unnecessary.
2010 */
2011 spin_lock(ptl);
2012 /*
2013 * paravirt calls inside pte_clear here are
2014 * superfluous.
2015 */
2016 pte_clear(vma->vm_mm, address, _pte);
2017 spin_unlock(ptl);
2018 }
Andrea Arcangeliba761492011-01-13 15:46:58 -08002019 } else {
2020 src_page = pte_page(pteval);
2021 copy_user_highpage(page, src_page, address, vma);
Sasha Levin309381fea2014-01-23 15:52:54 -08002022 VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002023 release_pte_page(src_page);
2024 /*
2025 * ptl mostly unnecessary, but preempt has to
2026 * be disabled to update the per-cpu stats
2027 * inside page_remove_rmap().
2028 */
2029 spin_lock(ptl);
2030 /*
2031 * paravirt calls inside pte_clear here are
2032 * superfluous.
2033 */
2034 pte_clear(vma->vm_mm, address, _pte);
Kirill A. Shutemovd281ee62016-01-15 16:52:16 -08002035 page_remove_rmap(src_page, false);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002036 spin_unlock(ptl);
2037 free_page_and_swap_cache(src_page);
2038 }
2039
2040 address += PAGE_SIZE;
2041 page++;
2042 }
2043}
2044
Xiao Guangrong26234f32012-10-08 16:29:51 -07002045static void khugepaged_alloc_sleep(void)
2046{
Petr Mladekbde43c62015-09-08 15:04:05 -07002047 DEFINE_WAIT(wait);
2048
2049 add_wait_queue(&khugepaged_wait, &wait);
2050 freezable_schedule_timeout_interruptible(
2051 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
2052 remove_wait_queue(&khugepaged_wait, &wait);
Xiao Guangrong26234f32012-10-08 16:29:51 -07002053}
2054
Bob Liu9f1b8682013-11-12 15:07:37 -08002055static int khugepaged_node_load[MAX_NUMNODES];
2056
David Rientjes14a4e212014-08-06 16:07:29 -07002057static bool khugepaged_scan_abort(int nid)
2058{
2059 int i;
2060
2061 /*
2062 * If zone_reclaim_mode is disabled, then no extra effort is made to
2063 * allocate memory locally.
2064 */
2065 if (!zone_reclaim_mode)
2066 return false;
2067
2068 /* If there is a count for this node already, it must be acceptable */
2069 if (khugepaged_node_load[nid])
2070 return false;
2071
2072 for (i = 0; i < MAX_NUMNODES; i++) {
2073 if (!khugepaged_node_load[i])
2074 continue;
2075 if (node_distance(nid, i) > RECLAIM_DISTANCE)
2076 return true;
2077 }
2078 return false;
2079}
2080
Xiao Guangrong26234f32012-10-08 16:29:51 -07002081#ifdef CONFIG_NUMA
Bob Liu9f1b8682013-11-12 15:07:37 -08002082static int khugepaged_find_target_node(void)
2083{
2084 static int last_khugepaged_target_node = NUMA_NO_NODE;
2085 int nid, target_node = 0, max_value = 0;
2086
2087 /* find first node with max normal pages hit */
2088 for (nid = 0; nid < MAX_NUMNODES; nid++)
2089 if (khugepaged_node_load[nid] > max_value) {
2090 max_value = khugepaged_node_load[nid];
2091 target_node = nid;
2092 }
2093
2094 /* do some balance if several nodes have the same hit record */
2095 if (target_node <= last_khugepaged_target_node)
2096 for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
2097 nid++)
2098 if (max_value == khugepaged_node_load[nid]) {
2099 target_node = nid;
2100 break;
2101 }
2102
2103 last_khugepaged_target_node = target_node;
2104 return target_node;
2105}
2106
Xiao Guangrong26234f32012-10-08 16:29:51 -07002107static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
2108{
2109 if (IS_ERR(*hpage)) {
2110 if (!*wait)
2111 return false;
2112
2113 *wait = false;
Xiao Guangronge3b41262012-10-08 16:32:57 -07002114 *hpage = NULL;
Xiao Guangrong26234f32012-10-08 16:29:51 -07002115 khugepaged_alloc_sleep();
2116 } else if (*hpage) {
2117 put_page(*hpage);
2118 *hpage = NULL;
2119 }
2120
2121 return true;
2122}
2123
Michal Hocko3b363692015-04-15 16:13:29 -07002124static struct page *
2125khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm,
Aaron Tomlind6669d62015-11-06 16:28:52 -08002126 unsigned long address, int node)
Xiao Guangrong26234f32012-10-08 16:29:51 -07002127{
Sasha Levin309381fea2014-01-23 15:52:54 -08002128 VM_BUG_ON_PAGE(*hpage, *hpage);
Vlastimil Babka8b164562014-10-09 15:27:00 -07002129
Xiao Guangrong26234f32012-10-08 16:29:51 -07002130 /*
Vlastimil Babka8b164562014-10-09 15:27:00 -07002131 * Before allocating the hugepage, release the mmap_sem read lock.
2132 * The allocation can take potentially a long time if it involves
2133 * sync compaction, and we do not need to hold the mmap_sem during
2134 * that. We will recheck the vma after taking it again in write mode.
Xiao Guangrong26234f32012-10-08 16:29:51 -07002135 */
2136 up_read(&mm->mmap_sem);
Vlastimil Babka8b164562014-10-09 15:27:00 -07002137
Vlastimil Babka96db8002015-09-08 15:03:50 -07002138 *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
Xiao Guangrong26234f32012-10-08 16:29:51 -07002139 if (unlikely(!*hpage)) {
2140 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
2141 *hpage = ERR_PTR(-ENOMEM);
2142 return NULL;
2143 }
2144
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08002145 prep_transhuge_page(*hpage);
Xiao Guangrong26234f32012-10-08 16:29:51 -07002146 count_vm_event(THP_COLLAPSE_ALLOC);
2147 return *hpage;
2148}
2149#else
Bob Liu9f1b8682013-11-12 15:07:37 -08002150static int khugepaged_find_target_node(void)
2151{
2152 return 0;
2153}
2154
Bob Liu10dc4152013-11-12 15:07:35 -08002155static inline struct page *alloc_hugepage(int defrag)
2156{
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08002157 struct page *page;
2158
2159 page = alloc_pages(alloc_hugepage_gfpmask(defrag, 0), HPAGE_PMD_ORDER);
2160 if (page)
2161 prep_transhuge_page(page);
2162 return page;
Bob Liu10dc4152013-11-12 15:07:35 -08002163}
2164
Xiao Guangrong26234f32012-10-08 16:29:51 -07002165static struct page *khugepaged_alloc_hugepage(bool *wait)
2166{
2167 struct page *hpage;
2168
2169 do {
2170 hpage = alloc_hugepage(khugepaged_defrag());
2171 if (!hpage) {
2172 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
2173 if (!*wait)
2174 return NULL;
2175
2176 *wait = false;
2177 khugepaged_alloc_sleep();
2178 } else
2179 count_vm_event(THP_COLLAPSE_ALLOC);
2180 } while (unlikely(!hpage) && likely(khugepaged_enabled()));
2181
2182 return hpage;
2183}
2184
2185static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
2186{
2187 if (!*hpage)
2188 *hpage = khugepaged_alloc_hugepage(wait);
2189
2190 if (unlikely(!*hpage))
2191 return false;
2192
2193 return true;
2194}
2195
Michal Hocko3b363692015-04-15 16:13:29 -07002196static struct page *
2197khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm,
Aaron Tomlind6669d62015-11-06 16:28:52 -08002198 unsigned long address, int node)
Xiao Guangrong26234f32012-10-08 16:29:51 -07002199{
2200 up_read(&mm->mmap_sem);
2201 VM_BUG_ON(!*hpage);
Michal Hocko3b363692015-04-15 16:13:29 -07002202
Xiao Guangrong26234f32012-10-08 16:29:51 -07002203 return *hpage;
2204}
2205#endif
2206
Bob Liufa475e52012-12-11 16:00:39 -08002207static bool hugepage_vma_check(struct vm_area_struct *vma)
2208{
2209 if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
2210 (vma->vm_flags & VM_NOHUGEPAGE))
2211 return false;
Kirill A. Shutemov7479df62016-01-15 16:52:35 -08002212 if (vma->vm_flags & VM_LOCKED)
2213 return false;
Bob Liufa475e52012-12-11 16:00:39 -08002214 if (!vma->anon_vma || vma->vm_ops)
2215 return false;
2216 if (is_vma_temporary_stack(vma))
2217 return false;
Sasha Levin81d1b092014-10-09 15:28:10 -07002218 VM_BUG_ON_VMA(vma->vm_flags & VM_NO_THP, vma);
Bob Liufa475e52012-12-11 16:00:39 -08002219 return true;
2220}
2221
Andrea Arcangeliba761492011-01-13 15:46:58 -08002222static void collapse_huge_page(struct mm_struct *mm,
Xiao Guangrong26234f32012-10-08 16:29:51 -07002223 unsigned long address,
2224 struct page **hpage,
2225 struct vm_area_struct *vma,
2226 int node)
Andrea Arcangeliba761492011-01-13 15:46:58 -08002227{
Andrea Arcangeliba761492011-01-13 15:46:58 -08002228 pmd_t *pmd, _pmd;
2229 pte_t *pte;
2230 pgtable_t pgtable;
2231 struct page *new_page;
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08002232 spinlock_t *pmd_ptl, *pte_ptl;
Ebru Akagunduz7d2eba02016-01-14 15:22:19 -08002233 int isolated, result = 0;
Andrea Arcangeliba761492011-01-13 15:46:58 -08002234 unsigned long hstart, hend;
Johannes Weiner00501b52014-08-08 14:19:20 -07002235 struct mem_cgroup *memcg;
Sagi Grimberg2ec74c32012-10-08 16:33:33 -07002236 unsigned long mmun_start; /* For mmu_notifiers */
2237 unsigned long mmun_end; /* For mmu_notifiers */
Michal Hocko3b363692015-04-15 16:13:29 -07002238 gfp_t gfp;
Andrea Arcangeliba761492011-01-13 15:46:58 -08002239
2240 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
Andrea Arcangeli692e0b32011-05-24 17:12:14 -07002241
Michal Hocko3b363692015-04-15 16:13:29 -07002242 /* Only allocate from the target node */
2243 gfp = alloc_hugepage_gfpmask(khugepaged_defrag(), __GFP_OTHER_NODE) |
2244 __GFP_THISNODE;
2245
Xiao Guangrong26234f32012-10-08 16:29:51 -07002246 /* release the mmap_sem read lock. */
Aaron Tomlind6669d62015-11-06 16:28:52 -08002247 new_page = khugepaged_alloc_page(hpage, gfp, mm, address, node);
Ebru Akagunduz7d2eba02016-01-14 15:22:19 -08002248 if (!new_page) {
2249 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
2250 goto out_nolock;
2251 }
Andrea Arcangelice83d212011-01-13 15:47:06 -08002252
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08002253 if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
Ebru Akagunduz7d2eba02016-01-14 15:22:19 -08002254 result = SCAN_CGROUP_CHARGE_FAIL;
2255 goto out_nolock;
2256 }
Andrea Arcangeliba761492011-01-13 15:46:58 -08002257
2258 /*
2259 * Prevent all access to pagetables with the exception of
2260 * gup_fast later hanlded by the ptep_clear_flush and the VM
2261 * handled by the anon_vma lock + PG_lock.
2262 */
2263 down_write(&mm->mmap_sem);
Ebru Akagunduz7d2eba02016-01-14 15:22:19 -08002264 if (unlikely(khugepaged_test_exit(mm))) {
2265 result = SCAN_ANY_PROCESS;
Andrea Arcangeliba761492011-01-13 15:46:58 -08002266 goto out;
Ebru Akagunduz7d2eba02016-01-14 15:22:19 -08002267 }
Andrea Arcangeliba761492011-01-13 15:46:58 -08002268
2269 vma = find_vma(mm, address);
Ebru Akagunduz7d2eba02016-01-14 15:22:19 -08002270 if (!vma) {
2271 result = SCAN_VMA_NULL;
Libina8f531eb2013-09-11 14:20:38 -07002272 goto out;
Ebru Akagunduz7d2eba02016-01-14 15:22:19 -08002273 }
Andrea Arcangeliba761492011-01-13 15:46:58 -08002274 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2275 hend = vma->vm_end & HPAGE_PMD_MASK;
Ebru Akagunduz7d2eba02016-01-14 15:22:19 -08002276 if (address < hstart || address + HPAGE_PMD_SIZE > hend) {
2277 result = SCAN_ADDRESS_RANGE;
Andrea Arcangeliba761492011-01-13 15:46:58 -08002278 goto out;
Ebru Akagunduz7d2eba02016-01-14 15:22:19 -08002279 }
2280 if (!hugepage_vma_check(vma)) {
2281 result = SCAN_VMA_CHECK;
Andrea Arcangeliba761492011-01-13 15:46:58 -08002282 goto out;
Ebru Akagunduz7d2eba02016-01-14 15:22:19 -08002283 }
Bob Liu62190492012-12-11 16:00:37 -08002284 pmd = mm_find_pmd(mm, address);
Ebru Akagunduz7d2eba02016-01-14 15:22:19 -08002285 if (!pmd) {
2286 result = SCAN_PMD_NULL;
Andrea Arcangeliba761492011-01-13 15:46:58 -08002287 goto out;
Ebru Akagunduz7d2eba02016-01-14 15:22:19 -08002288 }
Andrea Arcangeliba761492011-01-13 15:46:58 -08002289
Ingo Molnar4fc3f1d2012-12-02 19:56:50 +00002290 anon_vma_lock_write(vma->anon_vma);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002291
2292 pte = pte_offset_map(pmd, address);
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08002293 pte_ptl = pte_lockptr(mm, pmd);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002294
Sagi Grimberg2ec74c32012-10-08 16:33:33 -07002295 mmun_start = address;
2296 mmun_end = address + HPAGE_PMD_SIZE;
2297 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08002298 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
Andrea Arcangeliba761492011-01-13 15:46:58 -08002299 /*
2300 * After this gup_fast can't run anymore. This also removes
2301 * any huge TLB entry from the CPU so we won't allow
2302 * huge and small TLB entries for the same virtual address
2303 * to avoid the risk of CPU bugs in that area.
2304 */
Aneesh Kumar K.V15a25b22015-06-24 16:57:39 -07002305 _pmd = pmdp_collapse_flush(vma, address, pmd);
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08002306 spin_unlock(pmd_ptl);
Sagi Grimberg2ec74c32012-10-08 16:33:33 -07002307 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002308
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08002309 spin_lock(pte_ptl);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002310 isolated = __collapse_huge_page_isolate(vma, address, pte);
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08002311 spin_unlock(pte_ptl);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002312
2313 if (unlikely(!isolated)) {
Johannes Weiner453c7192011-01-20 14:44:18 -08002314 pte_unmap(pte);
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08002315 spin_lock(pmd_ptl);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002316 BUG_ON(!pmd_none(*pmd));
Aneesh Kumar K.V7c342512013-05-24 15:55:21 -07002317 /*
2318 * We can only use set_pmd_at when establishing
2319 * hugepmds and never for establishing regular pmds that
2320 * points to regular pagetables. Use pmd_populate for that
2321 */
2322 pmd_populate(mm, pmd, pmd_pgtable(_pmd));
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08002323 spin_unlock(pmd_ptl);
Konstantin Khlebnikov08b52702013-02-22 16:34:40 -08002324 anon_vma_unlock_write(vma->anon_vma);
Ebru Akagunduz7d2eba02016-01-14 15:22:19 -08002325 result = SCAN_FAIL;
Andrea Arcangelice83d212011-01-13 15:47:06 -08002326 goto out;
Andrea Arcangeliba761492011-01-13 15:46:58 -08002327 }
2328
2329 /*
2330 * All pages are isolated and locked so anon_vma rmap
2331 * can't run anymore.
2332 */
Konstantin Khlebnikov08b52702013-02-22 16:34:40 -08002333 anon_vma_unlock_write(vma->anon_vma);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002334
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08002335 __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl);
Johannes Weiner453c7192011-01-20 14:44:18 -08002336 pte_unmap(pte);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002337 __SetPageUptodate(new_page);
2338 pgtable = pmd_pgtable(_pmd);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002339
Kirill A. Shutemov31223592013-09-12 15:14:01 -07002340 _pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
2341 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002342
2343 /*
2344 * spin_lock() below is not the equivalent of smp_wmb(), so
2345 * this is needed to avoid the copy_huge_page writes to become
2346 * visible after the set_pmd_at() write.
2347 */
2348 smp_wmb();
2349
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08002350 spin_lock(pmd_ptl);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002351 BUG_ON(!pmd_none(*pmd));
Kirill A. Shutemovd281ee62016-01-15 16:52:16 -08002352 page_add_new_anon_rmap(new_page, vma, address, true);
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08002353 mem_cgroup_commit_charge(new_page, memcg, false, true);
Johannes Weiner00501b52014-08-08 14:19:20 -07002354 lru_cache_add_active_or_unevictable(new_page, vma);
Aneesh Kumar K.Vfce144b2013-06-05 17:14:06 -07002355 pgtable_trans_huge_deposit(mm, pmd, pgtable);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002356 set_pmd_at(mm, address, pmd, _pmd);
David Millerb113da62012-10-08 16:34:25 -07002357 update_mmu_cache_pmd(vma, address, pmd);
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08002358 spin_unlock(pmd_ptl);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002359
2360 *hpage = NULL;
Xiao Guangrong420256ef2012-10-08 16:29:49 -07002361
Andrea Arcangeliba761492011-01-13 15:46:58 -08002362 khugepaged_pages_collapsed++;
Ebru Akagunduz7d2eba02016-01-14 15:22:19 -08002363 result = SCAN_SUCCEED;
Andrea Arcangelice83d212011-01-13 15:47:06 -08002364out_up_write:
Andrea Arcangeliba761492011-01-13 15:46:58 -08002365 up_write(&mm->mmap_sem);
Ebru Akagunduz7d2eba02016-01-14 15:22:19 -08002366 trace_mm_collapse_huge_page(mm, isolated, result);
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002367 return;
2368
Ebru Akagunduz7d2eba02016-01-14 15:22:19 -08002369out_nolock:
2370 trace_mm_collapse_huge_page(mm, isolated, result);
2371 return;
Andrea Arcangelice83d212011-01-13 15:47:06 -08002372out:
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08002373 mem_cgroup_cancel_charge(new_page, memcg, true);
Andrea Arcangelice83d212011-01-13 15:47:06 -08002374 goto out_up_write;
Andrea Arcangeliba761492011-01-13 15:46:58 -08002375}
2376
2377static int khugepaged_scan_pmd(struct mm_struct *mm,
2378 struct vm_area_struct *vma,
2379 unsigned long address,
2380 struct page **hpage)
2381{
Andrea Arcangeliba761492011-01-13 15:46:58 -08002382 pmd_t *pmd;
2383 pte_t *pte, *_pte;
Ebru Akagunduz7d2eba02016-01-14 15:22:19 -08002384 int ret = 0, none_or_zero = 0, result = 0;
2385 struct page *page = NULL;
Andrea Arcangeliba761492011-01-13 15:46:58 -08002386 unsigned long _address;
2387 spinlock_t *ptl;
David Rientjes00ef2d22013-02-22 16:35:36 -08002388 int node = NUMA_NO_NODE;
Ebru Akagunduz10359212015-02-11 15:28:28 -08002389 bool writable = false, referenced = false;
Andrea Arcangeliba761492011-01-13 15:46:58 -08002390
2391 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
2392
Bob Liu62190492012-12-11 16:00:37 -08002393 pmd = mm_find_pmd(mm, address);
Ebru Akagunduz7d2eba02016-01-14 15:22:19 -08002394 if (!pmd) {
2395 result = SCAN_PMD_NULL;
Andrea Arcangeliba761492011-01-13 15:46:58 -08002396 goto out;
Ebru Akagunduz7d2eba02016-01-14 15:22:19 -08002397 }
Andrea Arcangeliba761492011-01-13 15:46:58 -08002398
Bob Liu9f1b8682013-11-12 15:07:37 -08002399 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
Andrea Arcangeliba761492011-01-13 15:46:58 -08002400 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
2401 for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
2402 _pte++, _address += PAGE_SIZE) {
2403 pte_t pteval = *_pte;
Ebru Akagunduzca0984c2015-04-14 15:45:24 -07002404 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
Andrea Arcangelic1294d02015-09-04 15:46:27 -07002405 if (!userfaultfd_armed(vma) &&
Ebru Akagunduz7d2eba02016-01-14 15:22:19 -08002406 ++none_or_zero <= khugepaged_max_ptes_none) {
Andrea Arcangeliba761492011-01-13 15:46:58 -08002407 continue;
Ebru Akagunduz7d2eba02016-01-14 15:22:19 -08002408 } else {
2409 result = SCAN_EXCEED_NONE_PTE;
Andrea Arcangeliba761492011-01-13 15:46:58 -08002410 goto out_unmap;
Ebru Akagunduz7d2eba02016-01-14 15:22:19 -08002411 }
Andrea Arcangeliba761492011-01-13 15:46:58 -08002412 }
Ebru Akagunduz7d2eba02016-01-14 15:22:19 -08002413 if (!pte_present(pteval)) {
2414 result = SCAN_PTE_NON_PRESENT;
Andrea Arcangeliba761492011-01-13 15:46:58 -08002415 goto out_unmap;
Ebru Akagunduz7d2eba02016-01-14 15:22:19 -08002416 }
Ebru Akagunduz10359212015-02-11 15:28:28 -08002417 if (pte_write(pteval))
2418 writable = true;
2419
Andrea Arcangeliba761492011-01-13 15:46:58 -08002420 page = vm_normal_page(vma, _address, pteval);
Ebru Akagunduz7d2eba02016-01-14 15:22:19 -08002421 if (unlikely(!page)) {
2422 result = SCAN_PAGE_NULL;
Andrea Arcangeliba761492011-01-13 15:46:58 -08002423 goto out_unmap;
Ebru Akagunduz7d2eba02016-01-14 15:22:19 -08002424 }
Kirill A. Shutemovb1caa952016-01-15 16:52:39 -08002425
2426 /* TODO: teach khugepaged to collapse THP mapped with pte */
2427 if (PageCompound(page)) {
2428 result = SCAN_PAGE_COMPOUND;
2429 goto out_unmap;
2430 }
2431
Andi Kleen5c4b4be2011-03-04 17:36:32 -08002432 /*
Bob Liu9f1b8682013-11-12 15:07:37 -08002433 * Record which node the original page is from and save this
2434 * information to khugepaged_node_load[].
2435 * Khupaged will allocate hugepage from the node has the max
2436 * hit record.
Andi Kleen5c4b4be2011-03-04 17:36:32 -08002437 */
Bob Liu9f1b8682013-11-12 15:07:37 -08002438 node = page_to_nid(page);
Ebru Akagunduz7d2eba02016-01-14 15:22:19 -08002439 if (khugepaged_scan_abort(node)) {
2440 result = SCAN_SCAN_ABORT;
David Rientjes14a4e212014-08-06 16:07:29 -07002441 goto out_unmap;
Ebru Akagunduz7d2eba02016-01-14 15:22:19 -08002442 }
Bob Liu9f1b8682013-11-12 15:07:37 -08002443 khugepaged_node_load[node]++;
Ebru Akagunduz7d2eba02016-01-14 15:22:19 -08002444 if (!PageLRU(page)) {
2445 result = SCAN_SCAN_ABORT;
Andrea Arcangeliba761492011-01-13 15:46:58 -08002446 goto out_unmap;
Ebru Akagunduz7d2eba02016-01-14 15:22:19 -08002447 }
2448 if (PageLocked(page)) {
2449 result = SCAN_PAGE_LOCK;
2450 goto out_unmap;
2451 }
2452 if (!PageAnon(page)) {
2453 result = SCAN_PAGE_ANON;
2454 goto out_unmap;
2455 }
2456
Ebru Akagunduz10359212015-02-11 15:28:28 -08002457 /*
2458 * cannot use mapcount: can't collapse if there's a gup pin.
2459 * The page must only be referenced by the scanned process
2460 * and page swap cache.
2461 */
Ebru Akagunduz7d2eba02016-01-14 15:22:19 -08002462 if (page_count(page) != 1 + !!PageSwapCache(page)) {
2463 result = SCAN_PAGE_COUNT;
Andrea Arcangeliba761492011-01-13 15:46:58 -08002464 goto out_unmap;
Ebru Akagunduz7d2eba02016-01-14 15:22:19 -08002465 }
Vladimir Davydov33c3fc72015-09-09 15:35:45 -07002466 if (pte_young(pteval) ||
2467 page_is_young(page) || PageReferenced(page) ||
Andrea Arcangeli8ee53822011-01-13 15:47:10 -08002468 mmu_notifier_test_young(vma->vm_mm, address))
Ebru Akagunduz10359212015-02-11 15:28:28 -08002469 referenced = true;
Andrea Arcangeliba761492011-01-13 15:46:58 -08002470 }
Ebru Akagunduz7d2eba02016-01-14 15:22:19 -08002471 if (writable) {
2472 if (referenced) {
2473 result = SCAN_SUCCEED;
2474 ret = 1;
2475 } else {
2476 result = SCAN_NO_REFERENCED_PAGE;
2477 }
2478 } else {
2479 result = SCAN_PAGE_RO;
2480 }
Andrea Arcangeliba761492011-01-13 15:46:58 -08002481out_unmap:
2482 pte_unmap_unlock(pte, ptl);
Bob Liu9f1b8682013-11-12 15:07:37 -08002483 if (ret) {
2484 node = khugepaged_find_target_node();
Andrea Arcangelice83d212011-01-13 15:47:06 -08002485 /* collapse_huge_page will return with the mmap_sem released */
Andi Kleen5c4b4be2011-03-04 17:36:32 -08002486 collapse_huge_page(mm, address, hpage, vma, node);
Bob Liu9f1b8682013-11-12 15:07:37 -08002487 }
Andrea Arcangeliba761492011-01-13 15:46:58 -08002488out:
Ebru Akagunduz7d2eba02016-01-14 15:22:19 -08002489 trace_mm_khugepaged_scan_pmd(mm, page_to_pfn(page), writable, referenced,
2490 none_or_zero, result);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002491 return ret;
2492}
2493
2494static void collect_mm_slot(struct mm_slot *mm_slot)
2495{
2496 struct mm_struct *mm = mm_slot->mm;
2497
Hugh Dickinsb9980cd2012-02-08 17:13:40 -08002498 VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
Andrea Arcangeliba761492011-01-13 15:46:58 -08002499
2500 if (khugepaged_test_exit(mm)) {
2501 /* free mm_slot */
Sasha Levin43b5fbb2013-02-22 16:32:27 -08002502 hash_del(&mm_slot->hash);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002503 list_del(&mm_slot->mm_node);
2504
2505 /*
2506 * Not strictly needed because the mm exited already.
2507 *
2508 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
2509 */
2510
2511 /* khugepaged_mm_lock actually not necessary for the below */
2512 free_mm_slot(mm_slot);
2513 mmdrop(mm);
2514 }
2515}
2516
2517static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
2518 struct page **hpage)
H Hartley Sweeten2f1da642011-10-31 17:09:25 -07002519 __releases(&khugepaged_mm_lock)
2520 __acquires(&khugepaged_mm_lock)
Andrea Arcangeliba761492011-01-13 15:46:58 -08002521{
2522 struct mm_slot *mm_slot;
2523 struct mm_struct *mm;
2524 struct vm_area_struct *vma;
2525 int progress = 0;
2526
2527 VM_BUG_ON(!pages);
Hugh Dickinsb9980cd2012-02-08 17:13:40 -08002528 VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
Andrea Arcangeliba761492011-01-13 15:46:58 -08002529
2530 if (khugepaged_scan.mm_slot)
2531 mm_slot = khugepaged_scan.mm_slot;
2532 else {
2533 mm_slot = list_entry(khugepaged_scan.mm_head.next,
2534 struct mm_slot, mm_node);
2535 khugepaged_scan.address = 0;
2536 khugepaged_scan.mm_slot = mm_slot;
2537 }
2538 spin_unlock(&khugepaged_mm_lock);
2539
2540 mm = mm_slot->mm;
2541 down_read(&mm->mmap_sem);
2542 if (unlikely(khugepaged_test_exit(mm)))
2543 vma = NULL;
2544 else
2545 vma = find_vma(mm, khugepaged_scan.address);
2546
2547 progress++;
2548 for (; vma; vma = vma->vm_next) {
2549 unsigned long hstart, hend;
2550
2551 cond_resched();
2552 if (unlikely(khugepaged_test_exit(mm))) {
2553 progress++;
2554 break;
2555 }
Bob Liufa475e52012-12-11 16:00:39 -08002556 if (!hugepage_vma_check(vma)) {
2557skip:
Andrea Arcangeliba761492011-01-13 15:46:58 -08002558 progress++;
2559 continue;
2560 }
Andrea Arcangeliba761492011-01-13 15:46:58 -08002561 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2562 hend = vma->vm_end & HPAGE_PMD_MASK;
Andrea Arcangelia7d6e4e2011-02-15 19:02:45 +01002563 if (hstart >= hend)
2564 goto skip;
2565 if (khugepaged_scan.address > hend)
2566 goto skip;
Andrea Arcangeliba761492011-01-13 15:46:58 -08002567 if (khugepaged_scan.address < hstart)
2568 khugepaged_scan.address = hstart;
Andrea Arcangelia7d6e4e2011-02-15 19:02:45 +01002569 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002570
2571 while (khugepaged_scan.address < hend) {
2572 int ret;
2573 cond_resched();
2574 if (unlikely(khugepaged_test_exit(mm)))
2575 goto breakouterloop;
2576
2577 VM_BUG_ON(khugepaged_scan.address < hstart ||
2578 khugepaged_scan.address + HPAGE_PMD_SIZE >
2579 hend);
2580 ret = khugepaged_scan_pmd(mm, vma,
2581 khugepaged_scan.address,
2582 hpage);
2583 /* move to next address */
2584 khugepaged_scan.address += HPAGE_PMD_SIZE;
2585 progress += HPAGE_PMD_NR;
2586 if (ret)
2587 /* we released mmap_sem so break loop */
2588 goto breakouterloop_mmap_sem;
2589 if (progress >= pages)
2590 goto breakouterloop;
2591 }
2592 }
2593breakouterloop:
2594 up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
2595breakouterloop_mmap_sem:
2596
2597 spin_lock(&khugepaged_mm_lock);
Andrea Arcangelia7d6e4e2011-02-15 19:02:45 +01002598 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002599 /*
2600 * Release the current mm_slot if this mm is about to die, or
2601 * if we scanned all vmas of this mm.
2602 */
2603 if (khugepaged_test_exit(mm) || !vma) {
2604 /*
2605 * Make sure that if mm_users is reaching zero while
2606 * khugepaged runs here, khugepaged_exit will find
2607 * mm_slot not pointing to the exiting mm.
2608 */
2609 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
2610 khugepaged_scan.mm_slot = list_entry(
2611 mm_slot->mm_node.next,
2612 struct mm_slot, mm_node);
2613 khugepaged_scan.address = 0;
2614 } else {
2615 khugepaged_scan.mm_slot = NULL;
2616 khugepaged_full_scans++;
2617 }
2618
2619 collect_mm_slot(mm_slot);
2620 }
2621
2622 return progress;
2623}
2624
2625static int khugepaged_has_work(void)
2626{
2627 return !list_empty(&khugepaged_scan.mm_head) &&
2628 khugepaged_enabled();
2629}
2630
2631static int khugepaged_wait_event(void)
2632{
2633 return !list_empty(&khugepaged_scan.mm_head) ||
Xiao Guangrong2017c0b2012-10-08 16:29:44 -07002634 kthread_should_stop();
Andrea Arcangeliba761492011-01-13 15:46:58 -08002635}
2636
Xiao Guangrongd5169042012-10-08 16:29:48 -07002637static void khugepaged_do_scan(void)
2638{
2639 struct page *hpage = NULL;
Andrea Arcangeliba761492011-01-13 15:46:58 -08002640 unsigned int progress = 0, pass_through_head = 0;
2641 unsigned int pages = khugepaged_pages_to_scan;
Xiao Guangrongd5169042012-10-08 16:29:48 -07002642 bool wait = true;
Andrea Arcangeliba761492011-01-13 15:46:58 -08002643
2644 barrier(); /* write khugepaged_pages_to_scan to local stack */
2645
2646 while (progress < pages) {
Xiao Guangrong26234f32012-10-08 16:29:51 -07002647 if (!khugepaged_prealloc_page(&hpage, &wait))
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002648 break;
Xiao Guangrong26234f32012-10-08 16:29:51 -07002649
Xiao Guangrong420256ef2012-10-08 16:29:49 -07002650 cond_resched();
Andrea Arcangeliba761492011-01-13 15:46:58 -08002651
Jiri Kosinacd092412015-06-24 16:56:07 -07002652 if (unlikely(kthread_should_stop() || try_to_freeze()))
Andrea Arcangeli878aee72011-01-13 15:47:10 -08002653 break;
2654
Andrea Arcangeliba761492011-01-13 15:46:58 -08002655 spin_lock(&khugepaged_mm_lock);
2656 if (!khugepaged_scan.mm_slot)
2657 pass_through_head++;
2658 if (khugepaged_has_work() &&
2659 pass_through_head < 2)
2660 progress += khugepaged_scan_mm_slot(pages - progress,
Xiao Guangrongd5169042012-10-08 16:29:48 -07002661 &hpage);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002662 else
2663 progress = pages;
2664 spin_unlock(&khugepaged_mm_lock);
2665 }
Andrea Arcangeliba761492011-01-13 15:46:58 -08002666
Xiao Guangrongd5169042012-10-08 16:29:48 -07002667 if (!IS_ERR_OR_NULL(hpage))
2668 put_page(hpage);
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002669}
2670
Xiao Guangrong2017c0b2012-10-08 16:29:44 -07002671static void khugepaged_wait_work(void)
2672{
Xiao Guangrong2017c0b2012-10-08 16:29:44 -07002673 if (khugepaged_has_work()) {
2674 if (!khugepaged_scan_sleep_millisecs)
2675 return;
2676
2677 wait_event_freezable_timeout(khugepaged_wait,
2678 kthread_should_stop(),
2679 msecs_to_jiffies(khugepaged_scan_sleep_millisecs));
2680 return;
2681 }
2682
2683 if (khugepaged_enabled())
2684 wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2685}
2686
Andrea Arcangeliba761492011-01-13 15:46:58 -08002687static int khugepaged(void *none)
2688{
2689 struct mm_slot *mm_slot;
2690
Andrea Arcangeli878aee72011-01-13 15:47:10 -08002691 set_freezable();
Dongsheng Yang8698a742014-03-11 18:09:12 +08002692 set_user_nice(current, MAX_NICE);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002693
Xiao Guangrongb7231782012-10-08 16:29:54 -07002694 while (!kthread_should_stop()) {
2695 khugepaged_do_scan();
2696 khugepaged_wait_work();
2697 }
Andrea Arcangeliba761492011-01-13 15:46:58 -08002698
2699 spin_lock(&khugepaged_mm_lock);
2700 mm_slot = khugepaged_scan.mm_slot;
2701 khugepaged_scan.mm_slot = NULL;
2702 if (mm_slot)
2703 collect_mm_slot(mm_slot);
2704 spin_unlock(&khugepaged_mm_lock);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002705 return 0;
2706}
2707
Kirill A. Shutemoveef1b3b2016-01-15 16:53:53 -08002708static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
2709 unsigned long haddr, pmd_t *pmd)
2710{
2711 struct mm_struct *mm = vma->vm_mm;
2712 pgtable_t pgtable;
2713 pmd_t _pmd;
2714 int i;
2715
2716 /* leave pmd empty until pte is filled */
2717 pmdp_huge_clear_flush_notify(vma, haddr, pmd);
2718
2719 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2720 pmd_populate(mm, &_pmd, pgtable);
2721
2722 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
2723 pte_t *pte, entry;
2724 entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot);
2725 entry = pte_mkspecial(entry);
2726 pte = pte_offset_map(&_pmd, haddr);
2727 VM_BUG_ON(!pte_none(*pte));
2728 set_pte_at(mm, haddr, pte, entry);
2729 pte_unmap(pte);
2730 }
2731 smp_wmb(); /* make pte visible before pmd */
2732 pmd_populate(mm, pmd, pgtable);
2733 put_huge_zero_page();
2734}
2735
2736static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
Kirill A. Shutemovba988282016-01-15 16:53:56 -08002737 unsigned long haddr, bool freeze)
Kirill A. Shutemoveef1b3b2016-01-15 16:53:53 -08002738{
2739 struct mm_struct *mm = vma->vm_mm;
2740 struct page *page;
2741 pgtable_t pgtable;
2742 pmd_t _pmd;
2743 bool young, write;
2744 int i;
2745
2746 VM_BUG_ON(haddr & ~HPAGE_PMD_MASK);
2747 VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
2748 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma);
2749 VM_BUG_ON(!pmd_trans_huge(*pmd));
2750
2751 count_vm_event(THP_SPLIT_PMD);
2752
2753 if (vma_is_dax(vma)) {
2754 pmd_t _pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd);
2755 if (is_huge_zero_pmd(_pmd))
2756 put_huge_zero_page();
2757 return;
2758 } else if (is_huge_zero_pmd(*pmd)) {
2759 return __split_huge_zero_page_pmd(vma, haddr, pmd);
2760 }
2761
2762 page = pmd_page(*pmd);
2763 VM_BUG_ON_PAGE(!page_count(page), page);
2764 atomic_add(HPAGE_PMD_NR - 1, &page->_count);
2765 write = pmd_write(*pmd);
2766 young = pmd_young(*pmd);
2767
Kirill A. Shutemoveef1b3b2016-01-15 16:53:53 -08002768 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2769 pmd_populate(mm, &_pmd, pgtable);
2770
2771 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
2772 pte_t entry, *pte;
2773 /*
2774 * Note that NUMA hinting access restrictions are not
2775 * transferred to avoid any possibility of altering
2776 * permissions across VMAs.
2777 */
Kirill A. Shutemovba988282016-01-15 16:53:56 -08002778 if (freeze) {
2779 swp_entry_t swp_entry;
2780 swp_entry = make_migration_entry(page + i, write);
2781 entry = swp_entry_to_pte(swp_entry);
2782 } else {
2783 entry = mk_pte(page + i, vma->vm_page_prot);
2784 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2785 if (!write)
2786 entry = pte_wrprotect(entry);
2787 if (!young)
2788 entry = pte_mkold(entry);
2789 }
Kirill A. Shutemoveef1b3b2016-01-15 16:53:53 -08002790 pte = pte_offset_map(&_pmd, haddr);
2791 BUG_ON(!pte_none(*pte));
2792 set_pte_at(mm, haddr, pte, entry);
2793 atomic_inc(&page[i]._mapcount);
2794 pte_unmap(pte);
2795 }
2796
2797 /*
2798 * Set PG_double_map before dropping compound_mapcount to avoid
2799 * false-negative page_mapped().
2800 */
2801 if (compound_mapcount(page) > 1 && !TestSetPageDoubleMap(page)) {
2802 for (i = 0; i < HPAGE_PMD_NR; i++)
2803 atomic_inc(&page[i]._mapcount);
2804 }
2805
2806 if (atomic_add_negative(-1, compound_mapcount_ptr(page))) {
2807 /* Last compound_mapcount is gone. */
2808 __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
2809 if (TestClearPageDoubleMap(page)) {
2810 /* No need in mapcount reference anymore */
2811 for (i = 0; i < HPAGE_PMD_NR; i++)
2812 atomic_dec(&page[i]._mapcount);
2813 }
2814 }
2815
2816 smp_wmb(); /* make pte visible before pmd */
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002817 /*
2818 * Up to this point the pmd is present and huge and userland has the
2819 * whole access to the hugepage during the split (which happens in
2820 * place). If we overwrite the pmd with the not-huge version pointing
2821 * to the pte here (which of course we could if all CPUs were bug
2822 * free), userland could trigger a small page size TLB miss on the
2823 * small sized TLB while the hugepage TLB entry is still established in
2824 * the huge TLB. Some CPU doesn't like that.
2825 * See http://support.amd.com/us/Processor_TechDocs/41322.pdf, Erratum
2826 * 383 on page 93. Intel should be safe but is also warns that it's
2827 * only safe if the permission and cache attributes of the two entries
2828 * loaded in the two TLB is identical (which should be the case here).
2829 * But it is generally safer to never allow small and huge TLB entries
2830 * for the same virtual address to be loaded simultaneously. So instead
2831 * of doing "pmd_populate(); flush_pmd_tlb_range();" we first mark the
2832 * current pmd notpresent (atomically because here the pmd_trans_huge
2833 * and pmd_trans_splitting must remain set at all times on the pmd
2834 * until the split is complete for this pmd), then we flush the SMP TLB
2835 * and finally we write the non-huge version of the pmd entry with
2836 * pmd_populate.
2837 */
2838 pmdp_invalidate(vma, haddr, pmd);
Kirill A. Shutemoveef1b3b2016-01-15 16:53:53 -08002839 pmd_populate(mm, pmd, pgtable);
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002840
2841 if (freeze) {
2842 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
2843 page_remove_rmap(page + i, false);
2844 put_page(page + i);
2845 }
2846 }
Kirill A. Shutemoveef1b3b2016-01-15 16:53:53 -08002847}
2848
2849void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
2850 unsigned long address)
2851{
2852 spinlock_t *ptl;
2853 struct mm_struct *mm = vma->vm_mm;
2854 unsigned long haddr = address & HPAGE_PMD_MASK;
2855
2856 mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PMD_SIZE);
2857 ptl = pmd_lock(mm, pmd);
2858 if (likely(pmd_trans_huge(*pmd)))
Kirill A. Shutemovba988282016-01-15 16:53:56 -08002859 __split_huge_pmd_locked(vma, pmd, haddr, false);
Kirill A. Shutemoveef1b3b2016-01-15 16:53:53 -08002860 spin_unlock(ptl);
2861 mmu_notifier_invalidate_range_end(mm, haddr, haddr + HPAGE_PMD_SIZE);
2862}
2863
Kirill A. Shutemov78ddc532016-01-15 16:52:42 -08002864static void split_huge_pmd_address(struct vm_area_struct *vma,
Andrea Arcangeli94fcc582011-01-13 15:47:08 -08002865 unsigned long address)
2866{
Hugh Dickinsf72e7dc2014-06-23 13:22:05 -07002867 pgd_t *pgd;
2868 pud_t *pud;
Andrea Arcangeli94fcc582011-01-13 15:47:08 -08002869 pmd_t *pmd;
2870
2871 VM_BUG_ON(!(address & ~HPAGE_PMD_MASK));
2872
Kirill A. Shutemov78ddc532016-01-15 16:52:42 -08002873 pgd = pgd_offset(vma->vm_mm, address);
Hugh Dickinsf72e7dc2014-06-23 13:22:05 -07002874 if (!pgd_present(*pgd))
2875 return;
2876
2877 pud = pud_offset(pgd, address);
2878 if (!pud_present(*pud))
2879 return;
2880
2881 pmd = pmd_offset(pud, address);
Kirill A. Shutemov78ddc532016-01-15 16:52:42 -08002882 if (!pmd_present(*pmd) || !pmd_trans_huge(*pmd))
Andrea Arcangeli94fcc582011-01-13 15:47:08 -08002883 return;
2884 /*
2885 * Caller holds the mmap_sem write mode, so a huge pmd cannot
2886 * materialize from under us.
2887 */
Kirill A. Shutemovad0bed22016-01-15 16:52:53 -08002888 split_huge_pmd(vma, pmd, address);
Andrea Arcangeli94fcc582011-01-13 15:47:08 -08002889}
2890
Kirill A. Shutemove1b99962015-09-08 14:58:37 -07002891void vma_adjust_trans_huge(struct vm_area_struct *vma,
Andrea Arcangeli94fcc582011-01-13 15:47:08 -08002892 unsigned long start,
2893 unsigned long end,
2894 long adjust_next)
2895{
2896 /*
2897 * If the new start address isn't hpage aligned and it could
2898 * previously contain an hugepage: check if we need to split
2899 * an huge pmd.
2900 */
2901 if (start & ~HPAGE_PMD_MASK &&
2902 (start & HPAGE_PMD_MASK) >= vma->vm_start &&
2903 (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
Kirill A. Shutemov78ddc532016-01-15 16:52:42 -08002904 split_huge_pmd_address(vma, start);
Andrea Arcangeli94fcc582011-01-13 15:47:08 -08002905
2906 /*
2907 * If the new end address isn't hpage aligned and it could
2908 * previously contain an hugepage: check if we need to split
2909 * an huge pmd.
2910 */
2911 if (end & ~HPAGE_PMD_MASK &&
2912 (end & HPAGE_PMD_MASK) >= vma->vm_start &&
2913 (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
Kirill A. Shutemov78ddc532016-01-15 16:52:42 -08002914 split_huge_pmd_address(vma, end);
Andrea Arcangeli94fcc582011-01-13 15:47:08 -08002915
2916 /*
2917 * If we're also updating the vma->vm_next->vm_start, if the new
2918 * vm_next->vm_start isn't page aligned and it could previously
2919 * contain an hugepage: check if we need to split an huge pmd.
2920 */
2921 if (adjust_next > 0) {
2922 struct vm_area_struct *next = vma->vm_next;
2923 unsigned long nstart = next->vm_start;
2924 nstart += adjust_next << PAGE_SHIFT;
2925 if (nstart & ~HPAGE_PMD_MASK &&
2926 (nstart & HPAGE_PMD_MASK) >= next->vm_start &&
2927 (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end)
Kirill A. Shutemov78ddc532016-01-15 16:52:42 -08002928 split_huge_pmd_address(next, nstart);
Andrea Arcangeli94fcc582011-01-13 15:47:08 -08002929 }
2930}
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002931
2932static void freeze_page_vma(struct vm_area_struct *vma, struct page *page,
2933 unsigned long address)
2934{
2935 spinlock_t *ptl;
2936 pgd_t *pgd;
2937 pud_t *pud;
2938 pmd_t *pmd;
2939 pte_t *pte;
2940 int i, nr = HPAGE_PMD_NR;
2941
2942 /* Skip pages which doesn't belong to the VMA */
2943 if (address < vma->vm_start) {
2944 int off = (vma->vm_start - address) >> PAGE_SHIFT;
2945 page += off;
2946 nr -= off;
2947 address = vma->vm_start;
2948 }
2949
2950 pgd = pgd_offset(vma->vm_mm, address);
2951 if (!pgd_present(*pgd))
2952 return;
2953 pud = pud_offset(pgd, address);
2954 if (!pud_present(*pud))
2955 return;
2956 pmd = pmd_offset(pud, address);
2957 ptl = pmd_lock(vma->vm_mm, pmd);
2958 if (!pmd_present(*pmd)) {
2959 spin_unlock(ptl);
2960 return;
2961 }
2962 if (pmd_trans_huge(*pmd)) {
2963 if (page == pmd_page(*pmd))
2964 __split_huge_pmd_locked(vma, pmd, address, true);
2965 spin_unlock(ptl);
2966 return;
2967 }
2968 spin_unlock(ptl);
2969
2970 pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
2971 for (i = 0; i < nr; i++, address += PAGE_SIZE, page++) {
2972 pte_t entry, swp_pte;
2973 swp_entry_t swp_entry;
2974
2975 if (!pte_present(pte[i]))
2976 continue;
2977 if (page_to_pfn(page) != pte_pfn(pte[i]))
2978 continue;
2979 flush_cache_page(vma, address, page_to_pfn(page));
2980 entry = ptep_clear_flush(vma, address, pte + i);
2981 swp_entry = make_migration_entry(page, pte_write(entry));
2982 swp_pte = swp_entry_to_pte(swp_entry);
2983 if (pte_soft_dirty(entry))
2984 swp_pte = pte_swp_mksoft_dirty(swp_pte);
2985 set_pte_at(vma->vm_mm, address, pte + i, swp_pte);
2986 page_remove_rmap(page, false);
2987 put_page(page);
2988 }
2989 pte_unmap_unlock(pte, ptl);
2990}
2991
2992static void freeze_page(struct anon_vma *anon_vma, struct page *page)
2993{
2994 struct anon_vma_chain *avc;
2995 pgoff_t pgoff = page_to_pgoff(page);
2996
2997 VM_BUG_ON_PAGE(!PageHead(page), page);
2998
2999 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff,
3000 pgoff + HPAGE_PMD_NR - 1) {
3001 unsigned long haddr;
3002
3003 haddr = __vma_address(page, avc->vma) & HPAGE_PMD_MASK;
3004 mmu_notifier_invalidate_range_start(avc->vma->vm_mm,
3005 haddr, haddr + HPAGE_PMD_SIZE);
3006 freeze_page_vma(avc->vma, page, haddr);
3007 mmu_notifier_invalidate_range_end(avc->vma->vm_mm,
3008 haddr, haddr + HPAGE_PMD_SIZE);
3009 }
3010}
3011
3012static void unfreeze_page_vma(struct vm_area_struct *vma, struct page *page,
3013 unsigned long address)
3014{
3015 spinlock_t *ptl;
3016 pmd_t *pmd;
3017 pte_t *pte, entry;
3018 swp_entry_t swp_entry;
3019 int i, nr = HPAGE_PMD_NR;
3020
3021 /* Skip pages which doesn't belong to the VMA */
3022 if (address < vma->vm_start) {
3023 int off = (vma->vm_start - address) >> PAGE_SHIFT;
3024 page += off;
3025 nr -= off;
3026 address = vma->vm_start;
3027 }
3028
3029 pmd = mm_find_pmd(vma->vm_mm, address);
3030 if (!pmd)
3031 return;
3032 pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
3033 for (i = 0; i < nr; i++, address += PAGE_SIZE, page++) {
3034 if (!is_swap_pte(pte[i]))
3035 continue;
3036
3037 swp_entry = pte_to_swp_entry(pte[i]);
3038 if (!is_migration_entry(swp_entry))
3039 continue;
3040 if (migration_entry_to_page(swp_entry) != page)
3041 continue;
3042
3043 get_page(page);
3044 page_add_anon_rmap(page, vma, address, false);
3045
3046 entry = pte_mkold(mk_pte(page, vma->vm_page_prot));
3047 entry = pte_mkdirty(entry);
3048 if (is_write_migration_entry(swp_entry))
3049 entry = maybe_mkwrite(entry, vma);
3050
3051 flush_dcache_page(page);
3052 set_pte_at(vma->vm_mm, address, pte + i, entry);
3053
3054 /* No need to invalidate - it was non-present before */
3055 update_mmu_cache(vma, address, pte + i);
3056 }
3057 pte_unmap_unlock(pte, ptl);
3058}
3059
3060static void unfreeze_page(struct anon_vma *anon_vma, struct page *page)
3061{
3062 struct anon_vma_chain *avc;
3063 pgoff_t pgoff = page_to_pgoff(page);
3064
3065 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
3066 pgoff, pgoff + HPAGE_PMD_NR - 1) {
3067 unsigned long address = __vma_address(page, avc->vma);
3068
3069 mmu_notifier_invalidate_range_start(avc->vma->vm_mm,
3070 address, address + HPAGE_PMD_SIZE);
3071 unfreeze_page_vma(avc->vma, page, address);
3072 mmu_notifier_invalidate_range_end(avc->vma->vm_mm,
3073 address, address + HPAGE_PMD_SIZE);
3074 }
3075}
3076
3077static int total_mapcount(struct page *page)
3078{
3079 int i, ret;
3080
3081 ret = compound_mapcount(page);
3082 for (i = 0; i < HPAGE_PMD_NR; i++)
3083 ret += atomic_read(&page[i]._mapcount) + 1;
3084
3085 if (PageDoubleMap(page))
3086 ret -= HPAGE_PMD_NR;
3087
3088 return ret;
3089}
3090
3091static int __split_huge_page_tail(struct page *head, int tail,
3092 struct lruvec *lruvec, struct list_head *list)
3093{
3094 int mapcount;
3095 struct page *page_tail = head + tail;
3096
3097 mapcount = atomic_read(&page_tail->_mapcount) + 1;
3098 VM_BUG_ON_PAGE(atomic_read(&page_tail->_count) != 0, page_tail);
3099
3100 /*
3101 * tail_page->_count is zero and not changing from under us. But
3102 * get_page_unless_zero() may be running from under us on the
3103 * tail_page. If we used atomic_set() below instead of atomic_add(), we
3104 * would then run atomic_set() concurrently with
3105 * get_page_unless_zero(), and atomic_set() is implemented in C not
3106 * using locked ops. spin_unlock on x86 sometime uses locked ops
3107 * because of PPro errata 66, 92, so unless somebody can guarantee
3108 * atomic_set() here would be safe on all archs (and not only on x86),
3109 * it's safer to use atomic_add().
3110 */
3111 atomic_add(mapcount + 1, &page_tail->_count);
3112
3113
3114 page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
3115 page_tail->flags |= (head->flags &
3116 ((1L << PG_referenced) |
3117 (1L << PG_swapbacked) |
3118 (1L << PG_mlocked) |
3119 (1L << PG_uptodate) |
3120 (1L << PG_active) |
3121 (1L << PG_locked) |
3122 (1L << PG_unevictable)));
3123 page_tail->flags |= (1L << PG_dirty);
3124
3125 /*
3126 * After clearing PageTail the gup refcount can be released.
3127 * Page flags also must be visible before we make the page non-compound.
3128 */
3129 smp_wmb();
3130
3131 clear_compound_head(page_tail);
3132
3133 if (page_is_young(head))
3134 set_page_young(page_tail);
3135 if (page_is_idle(head))
3136 set_page_idle(page_tail);
3137
3138 /* ->mapping in first tail page is compound_mapcount */
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08003139 VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08003140 page_tail);
3141 page_tail->mapping = head->mapping;
3142
3143 page_tail->index = head->index + tail;
3144 page_cpupid_xchg_last(page_tail, page_cpupid_last(head));
3145 lru_add_page_tail(head, page_tail, lruvec, list);
3146
3147 return mapcount;
3148}
3149
3150static void __split_huge_page(struct page *page, struct list_head *list)
3151{
3152 struct page *head = compound_head(page);
3153 struct zone *zone = page_zone(head);
3154 struct lruvec *lruvec;
3155 int i, tail_mapcount;
3156
3157 /* prevent PageLRU to go away from under us, and freeze lru stats */
3158 spin_lock_irq(&zone->lru_lock);
3159 lruvec = mem_cgroup_page_lruvec(head, zone);
3160
3161 /* complete memcg works before add pages to LRU */
3162 mem_cgroup_split_huge_fixup(head);
3163
3164 tail_mapcount = 0;
3165 for (i = HPAGE_PMD_NR - 1; i >= 1; i--)
3166 tail_mapcount += __split_huge_page_tail(head, i, lruvec, list);
3167 atomic_sub(tail_mapcount, &head->_count);
3168
3169 ClearPageCompound(head);
3170 spin_unlock_irq(&zone->lru_lock);
3171
3172 unfreeze_page(page_anon_vma(head), head);
3173
3174 for (i = 0; i < HPAGE_PMD_NR; i++) {
3175 struct page *subpage = head + i;
3176 if (subpage == page)
3177 continue;
3178 unlock_page(subpage);
3179
3180 /*
3181 * Subpages may be freed if there wasn't any mapping
3182 * like if add_to_swap() is running on a lru page that
3183 * had its mapping zapped. And freeing these pages
3184 * requires taking the lru_lock so we do the put_page
3185 * of the tail pages after the split is complete.
3186 */
3187 put_page(subpage);
3188 }
3189}
3190
3191/*
3192 * This function splits huge page into normal pages. @page can point to any
3193 * subpage of huge page to split. Split doesn't change the position of @page.
3194 *
3195 * Only caller must hold pin on the @page, otherwise split fails with -EBUSY.
3196 * The huge page must be locked.
3197 *
3198 * If @list is null, tail pages will be added to LRU list, otherwise, to @list.
3199 *
3200 * Both head page and tail pages will inherit mapping, flags, and so on from
3201 * the hugepage.
3202 *
3203 * GUP pin and PG_locked transferred to @page. Rest subpages can be freed if
3204 * they are not mapped.
3205 *
3206 * Returns 0 if the hugepage is split successfully.
3207 * Returns -EBUSY if the page is pinned or if anon_vma disappeared from under
3208 * us.
3209 */
3210int split_huge_page_to_list(struct page *page, struct list_head *list)
3211{
3212 struct page *head = compound_head(page);
3213 struct anon_vma *anon_vma;
3214 int count, mapcount, ret;
3215
3216 VM_BUG_ON_PAGE(is_huge_zero_page(page), page);
3217 VM_BUG_ON_PAGE(!PageAnon(page), page);
3218 VM_BUG_ON_PAGE(!PageLocked(page), page);
3219 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
3220 VM_BUG_ON_PAGE(!PageCompound(page), page);
3221
3222 /*
3223 * The caller does not necessarily hold an mmap_sem that would prevent
3224 * the anon_vma disappearing so we first we take a reference to it
3225 * and then lock the anon_vma for write. This is similar to
3226 * page_lock_anon_vma_read except the write lock is taken to serialise
3227 * against parallel split or collapse operations.
3228 */
3229 anon_vma = page_get_anon_vma(head);
3230 if (!anon_vma) {
3231 ret = -EBUSY;
3232 goto out;
3233 }
3234 anon_vma_lock_write(anon_vma);
3235
3236 /*
3237 * Racy check if we can split the page, before freeze_page() will
3238 * split PMDs
3239 */
3240 if (total_mapcount(head) != page_count(head) - 1) {
3241 ret = -EBUSY;
3242 goto out_unlock;
3243 }
3244
3245 freeze_page(anon_vma, head);
3246 VM_BUG_ON_PAGE(compound_mapcount(head), head);
3247
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08003248 /* Prevent deferred_split_scan() touching ->_count */
3249 spin_lock(&split_queue_lock);
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08003250 count = page_count(head);
3251 mapcount = total_mapcount(head);
3252 if (mapcount == count - 1) {
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08003253 if (!list_empty(page_deferred_list(head))) {
3254 split_queue_len--;
3255 list_del(page_deferred_list(head));
3256 }
3257 spin_unlock(&split_queue_lock);
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08003258 __split_huge_page(page, list);
3259 ret = 0;
3260 } else if (IS_ENABLED(CONFIG_DEBUG_VM) && mapcount > count - 1) {
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08003261 spin_unlock(&split_queue_lock);
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08003262 pr_alert("total_mapcount: %u, page_count(): %u\n",
3263 mapcount, count);
3264 if (PageTail(page))
3265 dump_page(head, NULL);
3266 dump_page(page, "total_mapcount(head) > page_count(head) - 1");
3267 BUG();
3268 } else {
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08003269 spin_unlock(&split_queue_lock);
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08003270 unfreeze_page(anon_vma, head);
3271 ret = -EBUSY;
3272 }
3273
3274out_unlock:
3275 anon_vma_unlock_write(anon_vma);
3276 put_anon_vma(anon_vma);
3277out:
3278 count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
3279 return ret;
3280}
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08003281
3282void free_transhuge_page(struct page *page)
3283{
3284 unsigned long flags;
3285
3286 spin_lock_irqsave(&split_queue_lock, flags);
3287 if (!list_empty(page_deferred_list(page))) {
3288 split_queue_len--;
3289 list_del(page_deferred_list(page));
3290 }
3291 spin_unlock_irqrestore(&split_queue_lock, flags);
3292 free_compound_page(page);
3293}
3294
3295void deferred_split_huge_page(struct page *page)
3296{
3297 unsigned long flags;
3298
3299 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
3300
3301 spin_lock_irqsave(&split_queue_lock, flags);
3302 if (list_empty(page_deferred_list(page))) {
3303 list_add_tail(page_deferred_list(page), &split_queue);
3304 split_queue_len++;
3305 }
3306 spin_unlock_irqrestore(&split_queue_lock, flags);
3307}
3308
3309static unsigned long deferred_split_count(struct shrinker *shrink,
3310 struct shrink_control *sc)
3311{
3312 /*
3313 * Split a page from split_queue will free up at least one page,
3314 * at most HPAGE_PMD_NR - 1. We don't track exact number.
3315 * Let's use HPAGE_PMD_NR / 2 as ballpark.
3316 */
3317 return ACCESS_ONCE(split_queue_len) * HPAGE_PMD_NR / 2;
3318}
3319
3320static unsigned long deferred_split_scan(struct shrinker *shrink,
3321 struct shrink_control *sc)
3322{
3323 unsigned long flags;
3324 LIST_HEAD(list), *pos, *next;
3325 struct page *page;
3326 int split = 0;
3327
3328 spin_lock_irqsave(&split_queue_lock, flags);
3329 list_splice_init(&split_queue, &list);
3330
3331 /* Take pin on all head pages to avoid freeing them under us */
3332 list_for_each_safe(pos, next, &list) {
3333 page = list_entry((void *)pos, struct page, mapping);
3334 page = compound_head(page);
3335 /* race with put_compound_page() */
3336 if (!get_page_unless_zero(page)) {
3337 list_del_init(page_deferred_list(page));
3338 split_queue_len--;
3339 }
3340 }
3341 spin_unlock_irqrestore(&split_queue_lock, flags);
3342
3343 list_for_each_safe(pos, next, &list) {
3344 page = list_entry((void *)pos, struct page, mapping);
3345 lock_page(page);
3346 /* split_huge_page() removes page from list on success */
3347 if (!split_huge_page(page))
3348 split++;
3349 unlock_page(page);
3350 put_page(page);
3351 }
3352
3353 spin_lock_irqsave(&split_queue_lock, flags);
3354 list_splice_tail(&list, &split_queue);
3355 spin_unlock_irqrestore(&split_queue_lock, flags);
3356
3357 return split * HPAGE_PMD_NR / 2;
3358}
3359
3360static struct shrinker deferred_split_shrinker = {
3361 .count_objects = deferred_split_count,
3362 .scan_objects = deferred_split_scan,
3363 .seeks = DEFAULT_SEEKS,
3364};