blob: 1785ac603fb0624bf11f647f0f46a28dcccb2c95 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/mm/swap.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 */
6
7/*
Simon Arlott183ff222007-10-20 01:27:18 +02008 * This file contains the default values for the operation of the
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * Linux VM subsystem. Fine-tuning documentation can be found in
10 * Documentation/sysctl/vm.txt.
11 * Started 18.12.91
12 * Swap aging added 23.2.95, Stephen Tweedie.
13 * Buffermem limits added 12.3.98, Rik van Riel.
14 */
15
16#include <linux/mm.h>
17#include <linux/sched.h>
18#include <linux/kernel_stat.h>
19#include <linux/swap.h>
20#include <linux/mman.h>
21#include <linux/pagemap.h>
22#include <linux/pagevec.h>
23#include <linux/init.h>
Paul Gortmakerb95f1b312011-10-16 02:01:52 -040024#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <linux/mm_inline.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/percpu_counter.h>
27#include <linux/percpu.h>
28#include <linux/cpu.h>
29#include <linux/notifier.h>
Peter Zijlstrae0bf68d2007-10-16 23:25:46 -070030#include <linux/backing-dev.h>
Balbir Singh66e17072008-02-07 00:13:56 -080031#include <linux/memcontrol.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090032#include <linux/gfp.h>
Kent Overstreeta27bb332013-05-07 16:19:08 -070033#include <linux/uio.h>
Naoya Horiguchi822fc612015-04-15 16:14:35 -070034#include <linux/hugetlb.h>
Ingo Molnarf5904d82009-07-03 08:29:51 -050035#include <linux/locallock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036
Lee Schermerhorn64d65192008-10-18 20:26:52 -070037#include "internal.h"
38
Mel Gormanc6286c92013-07-03 15:02:26 -070039#define CREATE_TRACE_POINTS
40#include <trace/events/pagemap.h>
41
Linus Torvalds1da177e2005-04-16 15:20:36 -070042/* How many pages do we try to swap or page in/out together? */
43int page_cluster;
44
Mel Gorman13f7f782013-07-03 15:02:28 -070045static DEFINE_PER_CPU(struct pagevec, lru_add_pvec);
Vegard Nossumf84f95042008-07-23 21:28:14 -070046static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
Minchan Kimcc5993b2015-04-15 16:13:26 -070047static DEFINE_PER_CPU(struct pagevec, lru_deactivate_file_pvecs);
Hisashi Hifumi902aaed2007-10-16 01:24:52 -070048
Ingo Molnarf5904d82009-07-03 08:29:51 -050049static DEFINE_LOCAL_IRQ_LOCK(rotate_lock);
Sebastian Andrzej Siewioref6373e2015-01-28 17:14:16 +010050DEFINE_LOCAL_IRQ_LOCK(swapvec_lock);
Ingo Molnarf5904d82009-07-03 08:29:51 -050051
Adrian Bunkb2213852006-09-25 23:31:02 -070052/*
53 * This path almost never happens for VM activity - pages are normally
54 * freed via pagevecs. But it gets used by networking.
55 */
Harvey Harrison920c7a52008-02-04 22:29:26 -080056static void __page_cache_release(struct page *page)
Adrian Bunkb2213852006-09-25 23:31:02 -070057{
58 if (PageLRU(page)) {
Adrian Bunkb2213852006-09-25 23:31:02 -070059 struct zone *zone = page_zone(page);
Hugh Dickinsfa9add62012-05-29 15:07:09 -070060 struct lruvec *lruvec;
61 unsigned long flags;
Adrian Bunkb2213852006-09-25 23:31:02 -070062
63 spin_lock_irqsave(&zone->lru_lock, flags);
Hugh Dickinsfa9add62012-05-29 15:07:09 -070064 lruvec = mem_cgroup_page_lruvec(page, zone);
Sasha Levin309381fea2014-01-23 15:52:54 -080065 VM_BUG_ON_PAGE(!PageLRU(page), page);
Adrian Bunkb2213852006-09-25 23:31:02 -070066 __ClearPageLRU(page);
Hugh Dickinsfa9add62012-05-29 15:07:09 -070067 del_page_from_lru_list(page, lruvec, page_off_lru(page));
Adrian Bunkb2213852006-09-25 23:31:02 -070068 spin_unlock_irqrestore(&zone->lru_lock, flags);
69 }
Johannes Weiner0a31bc92014-08-08 14:19:22 -070070 mem_cgroup_uncharge(page);
Andrea Arcangeli91807062011-01-13 15:46:32 -080071}
72
73static void __put_single_page(struct page *page)
74{
75 __page_cache_release(page);
Mel Gormanb745bc82014-06-04 16:10:22 -070076 free_hot_cold_page(page, false);
Adrian Bunkb2213852006-09-25 23:31:02 -070077}
78
Andrea Arcangeli91807062011-01-13 15:46:32 -080079static void __put_compound_page(struct page *page)
80{
81 compound_page_dtor *dtor;
82
Naoya Horiguchi822fc612015-04-15 16:14:35 -070083 /*
84 * __page_cache_release() is supposed to be called for thp, not for
85 * hugetlb. This is because hugetlb page does never have PageLRU set
86 * (it's never listed to any LRU lists) and no memcg routines should
87 * be called for hugetlb (it has a separate hugetlb_cgroup.)
88 */
89 if (!PageHuge(page))
90 __page_cache_release(page);
Andrea Arcangeli91807062011-01-13 15:46:32 -080091 dtor = get_compound_page_dtor(page);
92 (*dtor)(page);
93}
94
Jianyu Zhanc747ce72014-06-04 16:07:59 -070095/**
96 * Two special cases here: we could avoid taking compound_lock_irqsave
97 * and could skip the tail refcounting(in _mapcount).
98 *
99 * 1. Hugetlbfs page:
100 *
101 * PageHeadHuge will remain true until the compound page
102 * is released and enters the buddy allocator, and it could
103 * not be split by __split_huge_page_refcount().
104 *
105 * So if we see PageHeadHuge set, and we have the tail page pin,
106 * then we could safely put head page.
107 *
108 * 2. Slab THP page:
109 *
110 * PG_slab is cleared before the slab frees the head page, and
111 * tail pin cannot be the last reference left on the head page,
112 * because the slab code is free to reuse the compound page
113 * after a kfree/kmem_cache_free without having to check if
114 * there's any tail pin left. In turn all tail pinsmust be always
115 * released while the head is still pinned by the slab code
116 * and so we know PG_slab will be still set too.
117 *
118 * So if we see PageSlab set, and we have the tail page pin,
119 * then we could safely put head page.
120 */
121static __always_inline
122void put_unrefcounted_compound_page(struct page *page_head, struct page *page)
123{
124 /*
125 * If @page is a THP tail, we must read the tail page
126 * flags after the head page flags. The
127 * __split_huge_page_refcount side enforces write memory barriers
128 * between clearing PageTail and before the head page
129 * can be freed and reallocated.
130 */
131 smp_rmb();
132 if (likely(PageTail(page))) {
133 /*
134 * __split_huge_page_refcount cannot race
135 * here, see the comment above this function.
136 */
137 VM_BUG_ON_PAGE(!PageHead(page_head), page_head);
138 VM_BUG_ON_PAGE(page_mapcount(page) != 0, page);
139 if (put_page_testzero(page_head)) {
140 /*
141 * If this is the tail of a slab THP page,
142 * the tail pin must not be the last reference
143 * held on the page, because the PG_slab cannot
144 * be cleared before all tail pins (which skips
145 * the _mapcount tail refcounting) have been
146 * released.
147 *
148 * If this is the tail of a hugetlbfs page,
149 * the tail pin may be the last reference on
150 * the page instead, because PageHeadHuge will
151 * not go away until the compound page enters
152 * the buddy allocator.
153 */
154 VM_BUG_ON_PAGE(PageSlab(page_head), page_head);
155 __put_compound_page(page_head);
156 }
157 } else
158 /*
159 * __split_huge_page_refcount run before us,
160 * @page was a THP tail. The split @page_head
161 * has been freed and reallocated as slab or
162 * hugetlbfs page of smaller order (only
163 * possible if reallocated as slab on x86).
164 */
165 if (put_page_testzero(page))
166 __put_single_page(page);
167}
168
169static __always_inline
170void put_refcounted_compound_page(struct page *page_head, struct page *page)
171{
172 if (likely(page != page_head && get_page_unless_zero(page_head))) {
173 unsigned long flags;
174
175 /*
176 * @page_head wasn't a dangling pointer but it may not
177 * be a head page anymore by the time we obtain the
178 * lock. That is ok as long as it can't be freed from
179 * under us.
180 */
181 flags = compound_lock_irqsave(page_head);
182 if (unlikely(!PageTail(page))) {
183 /* __split_huge_page_refcount run before us */
184 compound_unlock_irqrestore(page_head, flags);
185 if (put_page_testzero(page_head)) {
186 /*
187 * The @page_head may have been freed
188 * and reallocated as a compound page
189 * of smaller order and then freed
190 * again. All we know is that it
191 * cannot have become: a THP page, a
192 * compound page of higher order, a
193 * tail page. That is because we
194 * still hold the refcount of the
195 * split THP tail and page_head was
196 * the THP head before the split.
197 */
198 if (PageHead(page_head))
199 __put_compound_page(page_head);
200 else
201 __put_single_page(page_head);
202 }
203out_put_single:
204 if (put_page_testzero(page))
205 __put_single_page(page);
206 return;
207 }
208 VM_BUG_ON_PAGE(page_head != page->first_page, page);
209 /*
210 * We can release the refcount taken by
211 * get_page_unless_zero() now that
212 * __split_huge_page_refcount() is blocked on the
213 * compound_lock.
214 */
215 if (put_page_testzero(page_head))
216 VM_BUG_ON_PAGE(1, page_head);
217 /* __split_huge_page_refcount will wait now */
218 VM_BUG_ON_PAGE(page_mapcount(page) <= 0, page);
219 atomic_dec(&page->_mapcount);
220 VM_BUG_ON_PAGE(atomic_read(&page_head->_count) <= 0, page_head);
221 VM_BUG_ON_PAGE(atomic_read(&page->_count) != 0, page);
222 compound_unlock_irqrestore(page_head, flags);
223
224 if (put_page_testzero(page_head)) {
225 if (PageHead(page_head))
226 __put_compound_page(page_head);
227 else
228 __put_single_page(page_head);
229 }
230 } else {
231 /* @page_head is a dangling pointer */
232 VM_BUG_ON_PAGE(PageTail(page), page);
233 goto out_put_single;
234 }
235}
236
Nick Piggin8519fb32006-02-07 12:58:52 -0800237static void put_compound_page(struct page *page)
238{
Andrew Morton26296ad2014-01-21 15:48:59 -0800239 struct page *page_head;
240
Jianyu Zhan4bd3e8f2014-06-04 16:08:01 -0700241 /*
242 * We see the PageCompound set and PageTail not set, so @page maybe:
243 * 1. hugetlbfs head page, or
244 * 2. THP head page.
245 */
Andrew Morton26296ad2014-01-21 15:48:59 -0800246 if (likely(!PageTail(page))) {
247 if (put_page_testzero(page)) {
248 /*
249 * By the time all refcounts have been released
250 * split_huge_page cannot run anymore from under us.
251 */
252 if (PageHead(page))
253 __put_compound_page(page);
254 else
255 __put_single_page(page);
256 }
257 return;
258 }
259
Andrew Morton26296ad2014-01-21 15:48:59 -0800260 /*
Jianyu Zhan4bd3e8f2014-06-04 16:08:01 -0700261 * We see the PageCompound set and PageTail set, so @page maybe:
262 * 1. a tail hugetlbfs page, or
263 * 2. a tail THP page, or
264 * 3. a split THP page.
Andrew Morton26296ad2014-01-21 15:48:59 -0800265 *
Jianyu Zhan4bd3e8f2014-06-04 16:08:01 -0700266 * Case 3 is possible, as we may race with
267 * __split_huge_page_refcount tearing down a THP page.
Andrew Morton26296ad2014-01-21 15:48:59 -0800268 */
Jianyu Zhand2ee40e2014-06-04 16:08:02 -0700269 page_head = compound_head_by_tail(page);
Jianyu Zhan4bd3e8f2014-06-04 16:08:01 -0700270 if (!__compound_tail_refcounted(page_head))
271 put_unrefcounted_compound_page(page_head, page);
272 else
273 put_refcounted_compound_page(page_head, page);
Nick Piggin8519fb32006-02-07 12:58:52 -0800274}
275
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276void put_page(struct page *page)
277{
Nick Piggin8519fb32006-02-07 12:58:52 -0800278 if (unlikely(PageCompound(page)))
279 put_compound_page(page);
280 else if (put_page_testzero(page))
Andrea Arcangeli91807062011-01-13 15:46:32 -0800281 __put_single_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282}
283EXPORT_SYMBOL(put_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284
Andrea Arcangeli70b50f92011-11-02 13:36:59 -0700285/*
286 * This function is exported but must not be called by anything other
287 * than get_page(). It implements the slow path of get_page().
288 */
289bool __get_page_tail(struct page *page)
290{
291 /*
292 * This takes care of get_page() if run on a tail page
293 * returned by one of the get_user_pages/follow_page variants.
294 * get_user_pages/follow_page itself doesn't need the compound
295 * lock because it runs __get_page_tail_foll() under the
296 * proper PT lock that already serializes against
297 * split_huge_page().
298 */
Andrea Arcangeli27c73ae2013-11-21 14:32:02 -0800299 unsigned long flags;
Andrea Arcangeliebf360f2014-01-21 15:48:51 -0800300 bool got;
David Rientjes668f9abb2014-03-03 15:38:18 -0800301 struct page *page_head = compound_head(page);
Andrea Arcangeli70b50f92011-11-02 13:36:59 -0700302
Andrea Arcangeliebf360f2014-01-21 15:48:51 -0800303 /* Ref to put_compound_page() comment. */
Andrea Arcangeli3bfcd132014-01-21 15:48:56 -0800304 if (!__compound_tail_refcounted(page_head)) {
Andrea Arcangeliebf360f2014-01-21 15:48:51 -0800305 smp_rmb();
306 if (likely(PageTail(page))) {
307 /*
308 * This is a hugetlbfs page or a slab
309 * page. __split_huge_page_refcount
310 * cannot race here.
311 */
Sasha Levin309381fea2014-01-23 15:52:54 -0800312 VM_BUG_ON_PAGE(!PageHead(page_head), page_head);
Andrea Arcangeliebf360f2014-01-21 15:48:51 -0800313 __get_page_tail_foll(page, true);
314 return true;
315 } else {
316 /*
317 * __split_huge_page_refcount run
318 * before us, "page" was a THP
319 * tail. The split page_head has been
320 * freed and reallocated as slab or
321 * hugetlbfs page of smaller order
322 * (only possible if reallocated as
323 * slab on x86).
324 */
325 return false;
Pravin B Shelar5bf5f032012-05-29 15:06:49 -0700326 }
Andrea Arcangeliebf360f2014-01-21 15:48:51 -0800327 }
Andrea Arcangeli27c73ae2013-11-21 14:32:02 -0800328
Andrea Arcangeliebf360f2014-01-21 15:48:51 -0800329 got = false;
330 if (likely(page != page_head && get_page_unless_zero(page_head))) {
Andrea Arcangeli27c73ae2013-11-21 14:32:02 -0800331 /*
332 * page_head wasn't a dangling pointer but it
333 * may not be a head page anymore by the time
334 * we obtain the lock. That is ok as long as it
335 * can't be freed from under us.
336 */
337 flags = compound_lock_irqsave(page_head);
338 /* here __split_huge_page_refcount won't run anymore */
339 if (likely(PageTail(page))) {
340 __get_page_tail_foll(page, false);
341 got = true;
342 }
343 compound_unlock_irqrestore(page_head, flags);
344 if (unlikely(!got))
345 put_page(page_head);
Andrea Arcangeli70b50f92011-11-02 13:36:59 -0700346 }
347 return got;
348}
349EXPORT_SYMBOL(__get_page_tail);
350
Alexander Zarochentsev1d7ea732006-08-13 23:24:27 -0700351/**
Randy Dunlap76824862008-03-19 17:00:40 -0700352 * put_pages_list() - release a list of pages
353 * @pages: list of pages threaded on page->lru
Alexander Zarochentsev1d7ea732006-08-13 23:24:27 -0700354 *
355 * Release a list of pages which are strung together on page.lru. Currently
356 * used by read_cache_pages() and related error recovery code.
Alexander Zarochentsev1d7ea732006-08-13 23:24:27 -0700357 */
358void put_pages_list(struct list_head *pages)
359{
360 while (!list_empty(pages)) {
361 struct page *victim;
362
363 victim = list_entry(pages->prev, struct page, lru);
364 list_del(&victim->lru);
365 page_cache_release(victim);
366 }
367}
368EXPORT_SYMBOL(put_pages_list);
369
Mel Gorman18022c52012-07-31 16:44:51 -0700370/*
371 * get_kernel_pages() - pin kernel pages in memory
372 * @kiov: An array of struct kvec structures
373 * @nr_segs: number of segments to pin
374 * @write: pinning for read/write, currently ignored
375 * @pages: array that receives pointers to the pages pinned.
376 * Should be at least nr_segs long.
377 *
378 * Returns number of pages pinned. This may be fewer than the number
379 * requested. If nr_pages is 0 or negative, returns 0. If no pages
380 * were pinned, returns -errno. Each page returned must be released
381 * with a put_page() call when it is finished with.
382 */
383int get_kernel_pages(const struct kvec *kiov, int nr_segs, int write,
384 struct page **pages)
385{
386 int seg;
387
388 for (seg = 0; seg < nr_segs; seg++) {
389 if (WARN_ON(kiov[seg].iov_len != PAGE_SIZE))
390 return seg;
391
Mel Gorman5a178112012-07-31 16:45:02 -0700392 pages[seg] = kmap_to_page(kiov[seg].iov_base);
Mel Gorman18022c52012-07-31 16:44:51 -0700393 page_cache_get(pages[seg]);
394 }
395
396 return seg;
397}
398EXPORT_SYMBOL_GPL(get_kernel_pages);
399
400/*
401 * get_kernel_page() - pin a kernel page in memory
402 * @start: starting kernel address
403 * @write: pinning for read/write, currently ignored
404 * @pages: array that receives pointer to the page pinned.
405 * Must be at least nr_segs long.
406 *
407 * Returns 1 if page is pinned. If the page was not pinned, returns
408 * -errno. The page returned must be released with a put_page() call
409 * when it is finished with.
410 */
411int get_kernel_page(unsigned long start, int write, struct page **pages)
412{
413 const struct kvec kiov = {
414 .iov_base = (void *)start,
415 .iov_len = PAGE_SIZE
416 };
417
418 return get_kernel_pages(&kiov, 1, write, pages);
419}
420EXPORT_SYMBOL_GPL(get_kernel_page);
421
Shaohua Li3dd7ae82011-03-22 16:33:45 -0700422static void pagevec_lru_move_fn(struct pagevec *pvec,
Hugh Dickinsfa9add62012-05-29 15:07:09 -0700423 void (*move_fn)(struct page *page, struct lruvec *lruvec, void *arg),
424 void *arg)
Hisashi Hifumi902aaed2007-10-16 01:24:52 -0700425{
426 int i;
Hisashi Hifumi902aaed2007-10-16 01:24:52 -0700427 struct zone *zone = NULL;
Hugh Dickinsfa9add62012-05-29 15:07:09 -0700428 struct lruvec *lruvec;
Shaohua Li3dd7ae82011-03-22 16:33:45 -0700429 unsigned long flags = 0;
Hisashi Hifumi902aaed2007-10-16 01:24:52 -0700430
431 for (i = 0; i < pagevec_count(pvec); i++) {
432 struct page *page = pvec->pages[i];
433 struct zone *pagezone = page_zone(page);
434
435 if (pagezone != zone) {
436 if (zone)
Shaohua Li3dd7ae82011-03-22 16:33:45 -0700437 spin_unlock_irqrestore(&zone->lru_lock, flags);
Hisashi Hifumi902aaed2007-10-16 01:24:52 -0700438 zone = pagezone;
Shaohua Li3dd7ae82011-03-22 16:33:45 -0700439 spin_lock_irqsave(&zone->lru_lock, flags);
Hisashi Hifumi902aaed2007-10-16 01:24:52 -0700440 }
Shaohua Li3dd7ae82011-03-22 16:33:45 -0700441
Hugh Dickinsfa9add62012-05-29 15:07:09 -0700442 lruvec = mem_cgroup_page_lruvec(page, zone);
443 (*move_fn)(page, lruvec, arg);
Hisashi Hifumi902aaed2007-10-16 01:24:52 -0700444 }
445 if (zone)
Shaohua Li3dd7ae82011-03-22 16:33:45 -0700446 spin_unlock_irqrestore(&zone->lru_lock, flags);
Linus Torvalds83896fb2011-01-17 14:42:34 -0800447 release_pages(pvec->pages, pvec->nr, pvec->cold);
448 pagevec_reinit(pvec);
Shaohua Lid8505de2011-01-13 15:47:33 -0800449}
450
Hugh Dickinsfa9add62012-05-29 15:07:09 -0700451static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec,
452 void *arg)
Shaohua Li3dd7ae82011-03-22 16:33:45 -0700453{
454 int *pgmoved = arg;
Shaohua Li3dd7ae82011-03-22 16:33:45 -0700455
456 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
457 enum lru_list lru = page_lru_base_type(page);
Johannes Weiner925b7672012-01-12 17:18:15 -0800458 list_move_tail(&page->lru, &lruvec->lists[lru]);
Shaohua Li3dd7ae82011-03-22 16:33:45 -0700459 (*pgmoved)++;
460 }
461}
462
463/*
464 * pagevec_move_tail() must be called with IRQ disabled.
465 * Otherwise this may cause nasty races.
466 */
467static void pagevec_move_tail(struct pagevec *pvec)
468{
469 int pgmoved = 0;
470
471 pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved);
472 __count_vm_events(PGROTATED, pgmoved);
473}
474
Hisashi Hifumi902aaed2007-10-16 01:24:52 -0700475/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476 * Writeback is about to end against a page which has been marked for immediate
477 * reclaim. If it still appears to be reclaimable, move it to the tail of the
Hisashi Hifumi902aaed2007-10-16 01:24:52 -0700478 * inactive list.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 */
Shaohua Li3dd7ae82011-03-22 16:33:45 -0700480void rotate_reclaimable_page(struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481{
Miklos Szerediac6aadb2008-04-28 02:12:38 -0700482 if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) &&
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700483 !PageUnevictable(page) && PageLRU(page)) {
Miklos Szerediac6aadb2008-04-28 02:12:38 -0700484 struct pagevec *pvec;
485 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486
Miklos Szerediac6aadb2008-04-28 02:12:38 -0700487 page_cache_get(page);
Ingo Molnarf5904d82009-07-03 08:29:51 -0500488 local_lock_irqsave(rotate_lock, flags);
Christoph Lameter7c8e0182014-06-04 16:07:56 -0700489 pvec = this_cpu_ptr(&lru_rotate_pvecs);
Miklos Szerediac6aadb2008-04-28 02:12:38 -0700490 if (!pagevec_add(pvec, page))
491 pagevec_move_tail(pvec);
Ingo Molnarf5904d82009-07-03 08:29:51 -0500492 local_unlock_irqrestore(rotate_lock, flags);
Miklos Szerediac6aadb2008-04-28 02:12:38 -0700493 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494}
495
Hugh Dickinsfa9add62012-05-29 15:07:09 -0700496static void update_page_reclaim_stat(struct lruvec *lruvec,
KOSAKI Motohiro3e2f41f2009-01-07 18:08:20 -0800497 int file, int rotated)
498{
Hugh Dickinsfa9add62012-05-29 15:07:09 -0700499 struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
KOSAKI Motohiro3e2f41f2009-01-07 18:08:20 -0800500
501 reclaim_stat->recent_scanned[file]++;
502 if (rotated)
503 reclaim_stat->recent_rotated[file]++;
KOSAKI Motohiro3e2f41f2009-01-07 18:08:20 -0800504}
505
Hugh Dickinsfa9add62012-05-29 15:07:09 -0700506static void __activate_page(struct page *page, struct lruvec *lruvec,
507 void *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508{
Linus Torvalds7a608572011-01-17 14:42:19 -0800509 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
510 int file = page_is_file_cache(page);
511 int lru = page_lru_base_type(page);
Linus Torvalds7a608572011-01-17 14:42:19 -0800512
Hugh Dickinsfa9add62012-05-29 15:07:09 -0700513 del_page_from_lru_list(page, lruvec, lru);
Linus Torvalds7a608572011-01-17 14:42:19 -0800514 SetPageActive(page);
515 lru += LRU_ACTIVE;
Hugh Dickinsfa9add62012-05-29 15:07:09 -0700516 add_page_to_lru_list(page, lruvec, lru);
Mel Gorman24b7e582014-08-06 16:07:11 -0700517 trace_mm_lru_activate(page);
Linus Torvalds7a608572011-01-17 14:42:19 -0800518
Hugh Dickinsfa9add62012-05-29 15:07:09 -0700519 __count_vm_event(PGACTIVATE);
520 update_page_reclaim_stat(lruvec, file, 1);
Linus Torvalds7a608572011-01-17 14:42:19 -0800521 }
Shaohua Lieb709b02011-05-24 17:12:55 -0700522}
523
524#ifdef CONFIG_SMP
525static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs);
526
527static void activate_page_drain(int cpu)
528{
529 struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu);
530
531 if (pagevec_count(pvec))
532 pagevec_lru_move_fn(pvec, __activate_page, NULL);
533}
534
Chris Metcalf5fbc4612013-09-12 15:13:55 -0700535static bool need_activate_page_drain(int cpu)
536{
537 return pagevec_count(&per_cpu(activate_page_pvecs, cpu)) != 0;
538}
539
Shaohua Lieb709b02011-05-24 17:12:55 -0700540void activate_page(struct page *page)
541{
542 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
Ingo Molnarf5904d82009-07-03 08:29:51 -0500543 struct pagevec *pvec = &get_locked_var(swapvec_lock,
544 activate_page_pvecs);
Shaohua Lieb709b02011-05-24 17:12:55 -0700545
546 page_cache_get(page);
547 if (!pagevec_add(pvec, page))
548 pagevec_lru_move_fn(pvec, __activate_page, NULL);
Ingo Molnarf5904d82009-07-03 08:29:51 -0500549 put_locked_var(swapvec_lock, activate_page_pvecs);
Shaohua Lieb709b02011-05-24 17:12:55 -0700550 }
551}
552
553#else
554static inline void activate_page_drain(int cpu)
555{
556}
557
Chris Metcalf5fbc4612013-09-12 15:13:55 -0700558static bool need_activate_page_drain(int cpu)
559{
560 return false;
561}
562
Shaohua Lieb709b02011-05-24 17:12:55 -0700563void activate_page(struct page *page)
564{
565 struct zone *zone = page_zone(page);
566
567 spin_lock_irq(&zone->lru_lock);
Hugh Dickinsfa9add62012-05-29 15:07:09 -0700568 __activate_page(page, mem_cgroup_page_lruvec(page, zone), NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569 spin_unlock_irq(&zone->lru_lock);
570}
Shaohua Lieb709b02011-05-24 17:12:55 -0700571#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572
Mel Gorman059285a2013-07-03 15:02:30 -0700573static void __lru_cache_activate_page(struct page *page)
574{
Ingo Molnarf5904d82009-07-03 08:29:51 -0500575 struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec);
Mel Gorman059285a2013-07-03 15:02:30 -0700576 int i;
577
578 /*
579 * Search backwards on the optimistic assumption that the page being
580 * activated has just been added to this pagevec. Note that only
581 * the local pagevec is examined as a !PageLRU page could be in the
582 * process of being released, reclaimed, migrated or on a remote
583 * pagevec that is currently being drained. Furthermore, marking
584 * a remote pagevec's page PageActive potentially hits a race where
585 * a page is marked PageActive just after it is added to the inactive
586 * list causing accounting errors and BUG_ON checks to trigger.
587 */
588 for (i = pagevec_count(pvec) - 1; i >= 0; i--) {
589 struct page *pagevec_page = pvec->pages[i];
590
591 if (pagevec_page == page) {
592 SetPageActive(page);
593 break;
594 }
595 }
596
Ingo Molnarf5904d82009-07-03 08:29:51 -0500597 put_locked_var(swapvec_lock, lru_add_pvec);
Mel Gorman059285a2013-07-03 15:02:30 -0700598}
599
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600/*
601 * Mark a page as having seen activity.
602 *
603 * inactive,unreferenced -> inactive,referenced
604 * inactive,referenced -> active,unreferenced
605 * active,unreferenced -> active,referenced
Hugh Dickinseb39d612014-08-06 16:06:43 -0700606 *
607 * When a newly allocated page is not yet visible, so safe for non-atomic ops,
608 * __SetPageReferenced(page) may be substituted for mark_page_accessed(page).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 */
Harvey Harrison920c7a52008-02-04 22:29:26 -0800610void mark_page_accessed(struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611{
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700612 if (!PageActive(page) && !PageUnevictable(page) &&
Mel Gorman059285a2013-07-03 15:02:30 -0700613 PageReferenced(page)) {
614
615 /*
616 * If the page is on the LRU, queue it for activation via
617 * activate_page_pvecs. Otherwise, assume the page is on a
618 * pagevec, mark it active and it'll be moved to the active
619 * LRU on the next drain.
620 */
621 if (PageLRU(page))
622 activate_page(page);
623 else
624 __lru_cache_activate_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625 ClearPageReferenced(page);
Johannes Weinera5289102014-04-03 14:47:51 -0700626 if (page_is_file_cache(page))
627 workingset_activation(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 } else if (!PageReferenced(page)) {
629 SetPageReferenced(page);
630 }
631}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632EXPORT_SYMBOL(mark_page_accessed);
633
Jianyu Zhan2329d372014-06-04 16:07:31 -0700634static void __lru_cache_add(struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635{
Ingo Molnarf5904d82009-07-03 08:29:51 -0500636 struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec);
Mel Gorman13f7f782013-07-03 15:02:28 -0700637
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638 page_cache_get(page);
Robin Dongd741c9c2012-10-08 16:29:05 -0700639 if (!pagevec_space(pvec))
Mel Gormana0b8cab32013-07-03 15:02:32 -0700640 __pagevec_lru_add(pvec);
Robin Dongd741c9c2012-10-08 16:29:05 -0700641 pagevec_add(pvec, page);
Ingo Molnarf5904d82009-07-03 08:29:51 -0500642 put_locked_var(swapvec_lock, lru_add_pvec);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643}
Jianyu Zhan2329d372014-06-04 16:07:31 -0700644
645/**
646 * lru_cache_add: add a page to the page lists
647 * @page: the page to add
648 */
649void lru_cache_add_anon(struct page *page)
650{
Mel Gorman6fb81a12014-06-04 16:10:28 -0700651 if (PageActive(page))
652 ClearPageActive(page);
Jianyu Zhan2329d372014-06-04 16:07:31 -0700653 __lru_cache_add(page);
654}
655
656void lru_cache_add_file(struct page *page)
657{
Mel Gorman6fb81a12014-06-04 16:10:28 -0700658 if (PageActive(page))
659 ClearPageActive(page);
Jianyu Zhan2329d372014-06-04 16:07:31 -0700660 __lru_cache_add(page);
661}
662EXPORT_SYMBOL(lru_cache_add_file);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663
KOSAKI Motohirof04e9eb2008-10-18 20:26:19 -0700664/**
Mel Gormanc53954a2013-07-03 15:02:34 -0700665 * lru_cache_add - add a page to a page list
KOSAKI Motohirof04e9eb2008-10-18 20:26:19 -0700666 * @page: the page to be added to the LRU.
Jianyu Zhan2329d372014-06-04 16:07:31 -0700667 *
668 * Queue the page for addition to the LRU via pagevec. The decision on whether
669 * to add the page to the [in]active [file|anon] list is deferred until the
670 * pagevec is drained. This gives a chance for the caller of lru_cache_add()
671 * have the page added to the active list using mark_page_accessed().
KOSAKI Motohirof04e9eb2008-10-18 20:26:19 -0700672 */
Mel Gormanc53954a2013-07-03 15:02:34 -0700673void lru_cache_add(struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674{
Sasha Levin309381fea2014-01-23 15:52:54 -0800675 VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page);
676 VM_BUG_ON_PAGE(PageLRU(page), page);
Mel Gormanc53954a2013-07-03 15:02:34 -0700677 __lru_cache_add(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678}
679
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700680/**
681 * add_page_to_unevictable_list - add a page to the unevictable list
682 * @page: the page to be added to the unevictable list
683 *
684 * Add page directly to its zone's unevictable list. To avoid races with
685 * tasks that might be making the page evictable, through eg. munlock,
686 * munmap or exit, while it's not on the lru, we want to add the page
687 * while it's locked or otherwise "invisible" to other tasks. This is
688 * difficult to do when using the pagevec cache, so bypass that.
689 */
690void add_page_to_unevictable_list(struct page *page)
691{
692 struct zone *zone = page_zone(page);
Hugh Dickinsfa9add62012-05-29 15:07:09 -0700693 struct lruvec *lruvec;
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700694
695 spin_lock_irq(&zone->lru_lock);
Hugh Dickinsfa9add62012-05-29 15:07:09 -0700696 lruvec = mem_cgroup_page_lruvec(page, zone);
Naoya Horiguchief2a2cb2013-07-31 13:53:37 -0700697 ClearPageActive(page);
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700698 SetPageUnevictable(page);
699 SetPageLRU(page);
Hugh Dickinsfa9add62012-05-29 15:07:09 -0700700 add_page_to_lru_list(page, lruvec, LRU_UNEVICTABLE);
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700701 spin_unlock_irq(&zone->lru_lock);
702}
703
Johannes Weiner00501b52014-08-08 14:19:20 -0700704/**
705 * lru_cache_add_active_or_unevictable
706 * @page: the page to be added to LRU
707 * @vma: vma in which page is mapped for determining reclaimability
708 *
709 * Place @page on the active or unevictable LRU list, depending on its
710 * evictability. Note that if the page is not evictable, it goes
711 * directly back onto it's zone's unevictable list, it does NOT use a
712 * per cpu pagevec.
713 */
714void lru_cache_add_active_or_unevictable(struct page *page,
715 struct vm_area_struct *vma)
716{
717 VM_BUG_ON_PAGE(PageLRU(page), page);
718
719 if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) {
720 SetPageActive(page);
721 lru_cache_add(page);
722 return;
723 }
724
725 if (!TestSetPageMlocked(page)) {
726 /*
727 * We use the irq-unsafe __mod_zone_page_stat because this
728 * counter is not modified from interrupt context, and the pte
729 * lock is held(spinlock), which implies preemption disabled.
730 */
731 __mod_zone_page_state(page_zone(page), NR_MLOCK,
732 hpage_nr_pages(page));
733 count_vm_event(UNEVICTABLE_PGMLOCKED);
734 }
735 add_page_to_unevictable_list(page);
736}
737
Hisashi Hifumi902aaed2007-10-16 01:24:52 -0700738/*
Minchan Kim31560182011-03-22 16:32:52 -0700739 * If the page can not be invalidated, it is moved to the
740 * inactive list to speed up its reclaim. It is moved to the
741 * head of the list, rather than the tail, to give the flusher
742 * threads some time to write it out, as this is much more
743 * effective than the single-page writeout from reclaim.
Minchan Kim278df9f2011-03-22 16:32:54 -0700744 *
745 * If the page isn't page_mapped and dirty/writeback, the page
746 * could reclaim asap using PG_reclaim.
747 *
748 * 1. active, mapped page -> none
749 * 2. active, dirty/writeback page -> inactive, head, PG_reclaim
750 * 3. inactive, mapped page -> none
751 * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim
752 * 5. inactive, clean -> inactive, tail
753 * 6. Others -> none
754 *
755 * In 4, why it moves inactive's head, the VM expects the page would
756 * be write it out by flusher threads as this is much more effective
757 * than the single-page writeout from reclaim.
Minchan Kim31560182011-03-22 16:32:52 -0700758 */
Minchan Kimcc5993b2015-04-15 16:13:26 -0700759static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
Hugh Dickinsfa9add62012-05-29 15:07:09 -0700760 void *arg)
Minchan Kim31560182011-03-22 16:32:52 -0700761{
762 int lru, file;
Minchan Kim278df9f2011-03-22 16:32:54 -0700763 bool active;
Minchan Kim31560182011-03-22 16:32:52 -0700764
Minchan Kim278df9f2011-03-22 16:32:54 -0700765 if (!PageLRU(page))
Minchan Kim31560182011-03-22 16:32:52 -0700766 return;
767
Minchan Kimbad49d92011-05-11 15:13:30 -0700768 if (PageUnevictable(page))
769 return;
770
Minchan Kim31560182011-03-22 16:32:52 -0700771 /* Some processes are using the page */
772 if (page_mapped(page))
773 return;
774
Minchan Kim278df9f2011-03-22 16:32:54 -0700775 active = PageActive(page);
Minchan Kim31560182011-03-22 16:32:52 -0700776 file = page_is_file_cache(page);
777 lru = page_lru_base_type(page);
Hugh Dickinsfa9add62012-05-29 15:07:09 -0700778
779 del_page_from_lru_list(page, lruvec, lru + active);
Minchan Kim31560182011-03-22 16:32:52 -0700780 ClearPageActive(page);
781 ClearPageReferenced(page);
Hugh Dickinsfa9add62012-05-29 15:07:09 -0700782 add_page_to_lru_list(page, lruvec, lru);
Minchan Kim31560182011-03-22 16:32:52 -0700783
Minchan Kim278df9f2011-03-22 16:32:54 -0700784 if (PageWriteback(page) || PageDirty(page)) {
785 /*
786 * PG_reclaim could be raced with end_page_writeback
787 * It can make readahead confusing. But race window
788 * is _really_ small and it's non-critical problem.
789 */
790 SetPageReclaim(page);
791 } else {
792 /*
793 * The page's writeback ends up during pagevec
794 * We moves tha page into tail of inactive.
795 */
Johannes Weiner925b7672012-01-12 17:18:15 -0800796 list_move_tail(&page->lru, &lruvec->lists[lru]);
Minchan Kim278df9f2011-03-22 16:32:54 -0700797 __count_vm_event(PGROTATED);
798 }
799
800 if (active)
801 __count_vm_event(PGDEACTIVATE);
Hugh Dickinsfa9add62012-05-29 15:07:09 -0700802 update_page_reclaim_stat(lruvec, file, 0);
Minchan Kim31560182011-03-22 16:32:52 -0700803}
804
Minchan Kim31560182011-03-22 16:32:52 -0700805/*
Hisashi Hifumi902aaed2007-10-16 01:24:52 -0700806 * Drain pages out of the cpu's pagevecs.
807 * Either "cpu" is the current CPU, and preemption has already been
808 * disabled; or "cpu" is being hot-unplugged, and is already dead.
809 */
Konstantin Khlebnikovf0cb3c72012-03-21 16:34:06 -0700810void lru_add_drain_cpu(int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811{
Mel Gorman13f7f782013-07-03 15:02:28 -0700812 struct pagevec *pvec = &per_cpu(lru_add_pvec, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813
Mel Gorman13f7f782013-07-03 15:02:28 -0700814 if (pagevec_count(pvec))
Mel Gormana0b8cab32013-07-03 15:02:32 -0700815 __pagevec_lru_add(pvec);
Hisashi Hifumi902aaed2007-10-16 01:24:52 -0700816
817 pvec = &per_cpu(lru_rotate_pvecs, cpu);
818 if (pagevec_count(pvec)) {
819 unsigned long flags;
820
821 /* No harm done if a racing interrupt already did this */
Ingo Molnarf5904d82009-07-03 08:29:51 -0500822 local_lock_irqsave(rotate_lock, flags);
Hisashi Hifumi902aaed2007-10-16 01:24:52 -0700823 pagevec_move_tail(pvec);
Ingo Molnarf5904d82009-07-03 08:29:51 -0500824 local_unlock_irqrestore(rotate_lock, flags);
Hisashi Hifumi902aaed2007-10-16 01:24:52 -0700825 }
Minchan Kim31560182011-03-22 16:32:52 -0700826
Minchan Kimcc5993b2015-04-15 16:13:26 -0700827 pvec = &per_cpu(lru_deactivate_file_pvecs, cpu);
Minchan Kim31560182011-03-22 16:32:52 -0700828 if (pagevec_count(pvec))
Minchan Kimcc5993b2015-04-15 16:13:26 -0700829 pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
Shaohua Lieb709b02011-05-24 17:12:55 -0700830
831 activate_page_drain(cpu);
Minchan Kim31560182011-03-22 16:32:52 -0700832}
833
834/**
Minchan Kimcc5993b2015-04-15 16:13:26 -0700835 * deactivate_file_page - forcefully deactivate a file page
Minchan Kim31560182011-03-22 16:32:52 -0700836 * @page: page to deactivate
837 *
838 * This function hints the VM that @page is a good reclaim candidate,
839 * for example if its invalidation fails due to the page being dirty
840 * or under writeback.
841 */
Minchan Kimcc5993b2015-04-15 16:13:26 -0700842void deactivate_file_page(struct page *page)
Minchan Kim31560182011-03-22 16:32:52 -0700843{
Minchan Kim821ed6b2011-05-24 17:12:31 -0700844 /*
Minchan Kimcc5993b2015-04-15 16:13:26 -0700845 * In a workload with many unevictable page such as mprotect,
846 * unevictable page deactivation for accelerating reclaim is pointless.
Minchan Kim821ed6b2011-05-24 17:12:31 -0700847 */
848 if (PageUnevictable(page))
849 return;
850
Minchan Kim31560182011-03-22 16:32:52 -0700851 if (likely(get_page_unless_zero(page))) {
Ingo Molnarf5904d82009-07-03 08:29:51 -0500852 struct pagevec *pvec = &get_locked_var(swapvec_lock,
853 lru_deactivate_file_pvecs);
Minchan Kim31560182011-03-22 16:32:52 -0700854
855 if (!pagevec_add(pvec, page))
Minchan Kimcc5993b2015-04-15 16:13:26 -0700856 pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
Ingo Molnarf5904d82009-07-03 08:29:51 -0500857 put_locked_var(swapvec_lock, lru_deactivate_file_pvecs);
Minchan Kim31560182011-03-22 16:32:52 -0700858 }
Andrew Morton80bfed92006-01-06 00:11:14 -0800859}
860
861void lru_add_drain(void)
862{
Ingo Molnarf5904d82009-07-03 08:29:51 -0500863 lru_add_drain_cpu(local_lock_cpu(swapvec_lock));
864 local_unlock_cpu(swapvec_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865}
866
David Howellsc4028952006-11-22 14:57:56 +0000867static void lru_add_drain_per_cpu(struct work_struct *dummy)
Nick Piggin053837f2006-01-18 17:42:27 -0800868{
869 lru_add_drain();
870}
871
Chris Metcalf5fbc4612013-09-12 15:13:55 -0700872static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
873
874void lru_add_drain_all(void)
Nick Piggin053837f2006-01-18 17:42:27 -0800875{
Chris Metcalf5fbc4612013-09-12 15:13:55 -0700876 static DEFINE_MUTEX(lock);
877 static struct cpumask has_work;
878 int cpu;
879
880 mutex_lock(&lock);
881 get_online_cpus();
882 cpumask_clear(&has_work);
883
884 for_each_online_cpu(cpu) {
885 struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
886
887 if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) ||
888 pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) ||
Minchan Kimcc5993b2015-04-15 16:13:26 -0700889 pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) ||
Chris Metcalf5fbc4612013-09-12 15:13:55 -0700890 need_activate_page_drain(cpu)) {
891 INIT_WORK(work, lru_add_drain_per_cpu);
892 schedule_work_on(cpu, work);
893 cpumask_set_cpu(cpu, &has_work);
894 }
895 }
896
897 for_each_cpu(cpu, &has_work)
898 flush_work(&per_cpu(lru_add_drain_work, cpu));
899
900 put_online_cpus();
901 mutex_unlock(&lock);
Nick Piggin053837f2006-01-18 17:42:27 -0800902}
903
Michal Hockoaabfb572014-10-09 15:28:52 -0700904/**
905 * release_pages - batched page_cache_release()
906 * @pages: array of pages to release
907 * @nr: number of pages
908 * @cold: whether the pages are cache cold
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909 *
Michal Hockoaabfb572014-10-09 15:28:52 -0700910 * Decrement the reference count on all the pages in @pages. If it
911 * fell to zero, remove the page from the LRU and free it.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 */
Mel Gormanb745bc82014-06-04 16:10:22 -0700913void release_pages(struct page **pages, int nr, bool cold)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914{
915 int i;
Konstantin Khlebnikovcc598502012-01-10 15:07:04 -0800916 LIST_HEAD(pages_to_free);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917 struct zone *zone = NULL;
Hugh Dickinsfa9add62012-05-29 15:07:09 -0700918 struct lruvec *lruvec;
Hisashi Hifumi902aaed2007-10-16 01:24:52 -0700919 unsigned long uninitialized_var(flags);
Michal Hockoaabfb572014-10-09 15:28:52 -0700920 unsigned int uninitialized_var(lock_batch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922 for (i = 0; i < nr; i++) {
923 struct page *page = pages[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924
Nick Piggin8519fb32006-02-07 12:58:52 -0800925 if (unlikely(PageCompound(page))) {
926 if (zone) {
Hisashi Hifumi902aaed2007-10-16 01:24:52 -0700927 spin_unlock_irqrestore(&zone->lru_lock, flags);
Nick Piggin8519fb32006-02-07 12:58:52 -0800928 zone = NULL;
929 }
930 put_compound_page(page);
931 continue;
932 }
933
Michal Hockoaabfb572014-10-09 15:28:52 -0700934 /*
935 * Make sure the IRQ-safe lock-holding time does not get
936 * excessive with a continuous string of pages from the
937 * same zone. The lock is held only if zone != NULL.
938 */
939 if (zone && ++lock_batch == SWAP_CLUSTER_MAX) {
940 spin_unlock_irqrestore(&zone->lru_lock, flags);
941 zone = NULL;
942 }
943
Nick Pigginb5810032005-10-29 18:16:12 -0700944 if (!put_page_testzero(page))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945 continue;
946
Nick Piggin46453a62006-03-22 00:07:58 -0800947 if (PageLRU(page)) {
948 struct zone *pagezone = page_zone(page);
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700949
Nick Piggin46453a62006-03-22 00:07:58 -0800950 if (pagezone != zone) {
951 if (zone)
Hisashi Hifumi902aaed2007-10-16 01:24:52 -0700952 spin_unlock_irqrestore(&zone->lru_lock,
953 flags);
Michal Hockoaabfb572014-10-09 15:28:52 -0700954 lock_batch = 0;
Nick Piggin46453a62006-03-22 00:07:58 -0800955 zone = pagezone;
Hisashi Hifumi902aaed2007-10-16 01:24:52 -0700956 spin_lock_irqsave(&zone->lru_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957 }
Hugh Dickinsfa9add62012-05-29 15:07:09 -0700958
959 lruvec = mem_cgroup_page_lruvec(page, zone);
Sasha Levin309381fea2014-01-23 15:52:54 -0800960 VM_BUG_ON_PAGE(!PageLRU(page), page);
Nick Piggin67453912006-03-22 00:08:00 -0800961 __ClearPageLRU(page);
Hugh Dickinsfa9add62012-05-29 15:07:09 -0700962 del_page_from_lru_list(page, lruvec, page_off_lru(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963 }
Nick Piggin46453a62006-03-22 00:07:58 -0800964
Mel Gormanc53954a2013-07-03 15:02:34 -0700965 /* Clear Active bit in case of parallel mark_page_accessed */
Mel Gormane3741b52014-06-04 16:10:26 -0700966 __ClearPageActive(page);
Mel Gormanc53954a2013-07-03 15:02:34 -0700967
Konstantin Khlebnikovcc598502012-01-10 15:07:04 -0800968 list_add(&page->lru, &pages_to_free);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969 }
970 if (zone)
Hisashi Hifumi902aaed2007-10-16 01:24:52 -0700971 spin_unlock_irqrestore(&zone->lru_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972
Johannes Weiner747db952014-08-08 14:19:24 -0700973 mem_cgroup_uncharge_list(&pages_to_free);
Konstantin Khlebnikovcc598502012-01-10 15:07:04 -0800974 free_hot_cold_page_list(&pages_to_free, cold);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975}
Miklos Szeredi0be85572010-10-27 15:34:46 -0700976EXPORT_SYMBOL(release_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977
978/*
979 * The pages which we're about to release may be in the deferred lru-addition
980 * queues. That would prevent them from really being freed right now. That's
981 * OK from a correctness point of view but is inefficient - those pages may be
982 * cache-warm and we want to give them back to the page allocator ASAP.
983 *
984 * So __pagevec_release() will drain those queues here. __pagevec_lru_add()
985 * and __pagevec_lru_add_active() call release_pages() directly to avoid
986 * mutual recursion.
987 */
988void __pagevec_release(struct pagevec *pvec)
989{
990 lru_add_drain();
991 release_pages(pvec->pages, pagevec_count(pvec), pvec->cold);
992 pagevec_reinit(pvec);
993}
Steve French7f285702005-11-01 10:22:55 -0800994EXPORT_SYMBOL(__pagevec_release);
995
Hugh Dickins12d27102012-01-12 17:19:52 -0800996#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800997/* used by __split_huge_page_refcount() */
Hugh Dickinsfa9add62012-05-29 15:07:09 -0700998void lru_add_page_tail(struct page *page, struct page *page_tail,
Shaohua Li5bc7b8a2013-04-29 15:08:36 -0700999 struct lruvec *lruvec, struct list_head *list)
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001000{
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001001 const int file = 0;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001002
Sasha Levin309381fea2014-01-23 15:52:54 -08001003 VM_BUG_ON_PAGE(!PageHead(page), page);
1004 VM_BUG_ON_PAGE(PageCompound(page_tail), page);
1005 VM_BUG_ON_PAGE(PageLRU(page_tail), page);
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001006 VM_BUG_ON(NR_CPUS != 1 &&
1007 !spin_is_locked(&lruvec_zone(lruvec)->lru_lock));
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001008
Shaohua Li5bc7b8a2013-04-29 15:08:36 -07001009 if (!list)
1010 SetPageLRU(page_tail);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001011
Hugh Dickins12d27102012-01-12 17:19:52 -08001012 if (likely(PageLRU(page)))
1013 list_add_tail(&page_tail->lru, &page->lru);
Shaohua Li5bc7b8a2013-04-29 15:08:36 -07001014 else if (list) {
1015 /* page reclaim is reclaiming a huge page */
1016 get_page(page_tail);
1017 list_add_tail(&page_tail->lru, list);
1018 } else {
Hugh Dickins12d27102012-01-12 17:19:52 -08001019 struct list_head *list_head;
1020 /*
1021 * Head page has not yet been counted, as an hpage,
1022 * so we must account for each subpage individually.
1023 *
1024 * Use the standard add function to put page_tail on the list,
1025 * but then correct its position so they all end up in order.
1026 */
Kirill A. Shutemove180cf82013-07-31 13:53:39 -07001027 add_page_to_lru_list(page_tail, lruvec, page_lru(page_tail));
Hugh Dickins12d27102012-01-12 17:19:52 -08001028 list_head = page_tail->lru.prev;
1029 list_move_tail(&page_tail->lru, list_head);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001030 }
Hugh Dickins75121022012-03-05 14:59:18 -08001031
1032 if (!PageUnevictable(page))
Kirill A. Shutemove180cf82013-07-31 13:53:39 -07001033 update_page_reclaim_stat(lruvec, file, PageActive(page_tail));
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001034}
Hugh Dickins12d27102012-01-12 17:19:52 -08001035#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001036
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001037static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
1038 void *arg)
Shaohua Li3dd7ae82011-03-22 16:33:45 -07001039{
Mel Gorman13f7f782013-07-03 15:02:28 -07001040 int file = page_is_file_cache(page);
1041 int active = PageActive(page);
1042 enum lru_list lru = page_lru(page);
Shaohua Li3dd7ae82011-03-22 16:33:45 -07001043
Sasha Levin309381fea2014-01-23 15:52:54 -08001044 VM_BUG_ON_PAGE(PageLRU(page), page);
Shaohua Li3dd7ae82011-03-22 16:33:45 -07001045
1046 SetPageLRU(page);
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001047 add_page_to_lru_list(page, lruvec, lru);
1048 update_page_reclaim_stat(lruvec, file, active);
Mel Gorman24b7e582014-08-06 16:07:11 -07001049 trace_mm_lru_insertion(page, lru);
Shaohua Li3dd7ae82011-03-22 16:33:45 -07001050}
1051
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053 * Add the passed pages to the LRU, then drop the caller's refcount
1054 * on them. Reinitialises the caller's pagevec.
1055 */
Mel Gormana0b8cab32013-07-03 15:02:32 -07001056void __pagevec_lru_add(struct pagevec *pvec)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057{
Mel Gormana0b8cab32013-07-03 15:02:32 -07001058 pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059}
Hugh Dickins5095ae832012-01-12 17:19:58 -08001060EXPORT_SYMBOL(__pagevec_lru_add);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062/**
Johannes Weiner0cd61442014-04-03 14:47:46 -07001063 * pagevec_lookup_entries - gang pagecache lookup
1064 * @pvec: Where the resulting entries are placed
1065 * @mapping: The address_space to search
1066 * @start: The starting entry index
1067 * @nr_entries: The maximum number of entries
1068 * @indices: The cache indices corresponding to the entries in @pvec
1069 *
1070 * pagevec_lookup_entries() will search for and return a group of up
1071 * to @nr_entries pages and shadow entries in the mapping. All
1072 * entries are placed in @pvec. pagevec_lookup_entries() takes a
1073 * reference against actual pages in @pvec.
1074 *
1075 * The search returns a group of mapping-contiguous entries with
1076 * ascending indexes. There may be holes in the indices due to
1077 * not-present entries.
1078 *
1079 * pagevec_lookup_entries() returns the number of entries which were
1080 * found.
1081 */
1082unsigned pagevec_lookup_entries(struct pagevec *pvec,
1083 struct address_space *mapping,
1084 pgoff_t start, unsigned nr_pages,
1085 pgoff_t *indices)
1086{
1087 pvec->nr = find_get_entries(mapping, start, nr_pages,
1088 pvec->pages, indices);
1089 return pagevec_count(pvec);
1090}
1091
1092/**
1093 * pagevec_remove_exceptionals - pagevec exceptionals pruning
1094 * @pvec: The pagevec to prune
1095 *
1096 * pagevec_lookup_entries() fills both pages and exceptional radix
1097 * tree entries into the pagevec. This function prunes all
1098 * exceptionals from @pvec without leaving holes, so that it can be
1099 * passed on to page-only pagevec operations.
1100 */
1101void pagevec_remove_exceptionals(struct pagevec *pvec)
1102{
1103 int i, j;
1104
1105 for (i = 0, j = 0; i < pagevec_count(pvec); i++) {
1106 struct page *page = pvec->pages[i];
1107 if (!radix_tree_exceptional_entry(page))
1108 pvec->pages[j++] = page;
1109 }
1110 pvec->nr = j;
1111}
1112
1113/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114 * pagevec_lookup - gang pagecache lookup
1115 * @pvec: Where the resulting pages are placed
1116 * @mapping: The address_space to search
1117 * @start: The starting page index
1118 * @nr_pages: The maximum number of pages
1119 *
1120 * pagevec_lookup() will search for and return a group of up to @nr_pages pages
1121 * in the mapping. The pages are placed in @pvec. pagevec_lookup() takes a
1122 * reference against the pages in @pvec.
1123 *
1124 * The search returns a group of mapping-contiguous pages with ascending
1125 * indexes. There may be holes in the indices due to not-present pages.
1126 *
1127 * pagevec_lookup() returns the number of pages which were found.
1128 */
1129unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
1130 pgoff_t start, unsigned nr_pages)
1131{
1132 pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages);
1133 return pagevec_count(pvec);
1134}
Christoph Hellwig78539fd2006-01-11 20:47:41 +11001135EXPORT_SYMBOL(pagevec_lookup);
1136
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping,
1138 pgoff_t *index, int tag, unsigned nr_pages)
1139{
1140 pvec->nr = find_get_pages_tag(mapping, index, tag,
1141 nr_pages, pvec->pages);
1142 return pagevec_count(pvec);
1143}
Steve French7f285702005-11-01 10:22:55 -08001144EXPORT_SYMBOL(pagevec_lookup_tag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146/*
1147 * Perform any setup for the swap system
1148 */
1149void __init swap_setup(void)
1150{
Jan Beulich44813742009-09-21 17:03:05 -07001151 unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT);
Peter Zijlstrae0bf68d2007-10-16 23:25:46 -07001152#ifdef CONFIG_SWAP
Shaohua Li33806f02013-02-22 16:34:37 -08001153 int i;
1154
Kirill A. Shutemov27ba0642015-02-10 14:09:59 -08001155 for (i = 0; i < MAX_SWAPFILES; i++)
Shaohua Li33806f02013-02-22 16:34:37 -08001156 spin_lock_init(&swapper_spaces[i].tree_lock);
Peter Zijlstrae0bf68d2007-10-16 23:25:46 -07001157#endif
1158
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159 /* Use a smaller cluster for small-memory machines */
1160 if (megs < 16)
1161 page_cluster = 2;
1162 else
1163 page_cluster = 3;
1164 /*
1165 * Right now other parts of the system means that we
1166 * _really_ don't want to cluster much more
1167 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168}