blob: 0503ad705e7c0c65804ff2adaa8c9eb4432a45f7 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/mm/swap.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 */
6
7/*
Simon Arlott183ff222007-10-20 01:27:18 +02008 * This file contains the default values for the operation of the
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * Linux VM subsystem. Fine-tuning documentation can be found in
10 * Documentation/sysctl/vm.txt.
11 * Started 18.12.91
12 * Swap aging added 23.2.95, Stephen Tweedie.
13 * Buffermem limits added 12.3.98, Rik van Riel.
14 */
15
16#include <linux/mm.h>
17#include <linux/sched.h>
18#include <linux/kernel_stat.h>
19#include <linux/swap.h>
20#include <linux/mman.h>
21#include <linux/pagemap.h>
22#include <linux/pagevec.h>
23#include <linux/init.h>
Paul Gortmakerb95f1b312011-10-16 02:01:52 -040024#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <linux/mm_inline.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/percpu_counter.h>
27#include <linux/percpu.h>
28#include <linux/cpu.h>
29#include <linux/notifier.h>
Peter Zijlstrae0bf68d2007-10-16 23:25:46 -070030#include <linux/backing-dev.h>
Balbir Singh66e17072008-02-07 00:13:56 -080031#include <linux/memcontrol.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090032#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
Lee Schermerhorn64d65192008-10-18 20:26:52 -070034#include "internal.h"
35
Linus Torvalds1da177e2005-04-16 15:20:36 -070036/* How many pages do we try to swap or page in/out together? */
37int page_cluster;
38
KOSAKI Motohirof04e9eb2008-10-18 20:26:19 -070039static DEFINE_PER_CPU(struct pagevec[NR_LRU_LISTS], lru_add_pvecs);
Vegard Nossumf84f95042008-07-23 21:28:14 -070040static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
Minchan Kim31560182011-03-22 16:32:52 -070041static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
Hisashi Hifumi902aaed2007-10-16 01:24:52 -070042
Adrian Bunkb2213852006-09-25 23:31:02 -070043/*
44 * This path almost never happens for VM activity - pages are normally
45 * freed via pagevecs. But it gets used by networking.
46 */
Harvey Harrison920c7a52008-02-04 22:29:26 -080047static void __page_cache_release(struct page *page)
Adrian Bunkb2213852006-09-25 23:31:02 -070048{
49 if (PageLRU(page)) {
50 unsigned long flags;
51 struct zone *zone = page_zone(page);
52
53 spin_lock_irqsave(&zone->lru_lock, flags);
54 VM_BUG_ON(!PageLRU(page));
55 __ClearPageLRU(page);
Hugh Dickins1c1c53d2012-01-12 17:20:04 -080056 del_page_from_lru_list(zone, page, page_off_lru(page));
Adrian Bunkb2213852006-09-25 23:31:02 -070057 spin_unlock_irqrestore(&zone->lru_lock, flags);
58 }
Andrea Arcangeli91807062011-01-13 15:46:32 -080059}
60
61static void __put_single_page(struct page *page)
62{
63 __page_cache_release(page);
Li Hongfc916682010-03-05 13:41:54 -080064 free_hot_cold_page(page, 0);
Adrian Bunkb2213852006-09-25 23:31:02 -070065}
66
Andrea Arcangeli91807062011-01-13 15:46:32 -080067static void __put_compound_page(struct page *page)
68{
69 compound_page_dtor *dtor;
70
71 __page_cache_release(page);
72 dtor = get_compound_page_dtor(page);
73 (*dtor)(page);
74}
75
Nick Piggin8519fb32006-02-07 12:58:52 -080076static void put_compound_page(struct page *page)
77{
Andrea Arcangeli91807062011-01-13 15:46:32 -080078 if (unlikely(PageTail(page))) {
79 /* __split_huge_page_refcount can run under us */
Andrea Arcangeli70b50f92011-11-02 13:36:59 -070080 struct page *page_head = compound_trans_head(page);
81
82 if (likely(page != page_head &&
83 get_page_unless_zero(page_head))) {
Andrea Arcangeli91807062011-01-13 15:46:32 -080084 unsigned long flags;
Pravin B Shelar5bf5f032012-05-29 15:06:49 -070085
86 /*
87 * THP can not break up slab pages so avoid taking
88 * compound_lock(). Slab performs non-atomic bit ops
89 * on page->flags for better performance. In particular
90 * slab_unlock() in slub used to be a hot path. It is
91 * still hot on arches that do not support
92 * this_cpu_cmpxchg_double().
93 */
94 if (PageSlab(page_head)) {
95 if (PageTail(page)) {
96 if (put_page_testzero(page_head))
97 VM_BUG_ON(1);
98
99 atomic_dec(&page->_mapcount);
100 goto skip_lock_tail;
101 } else
102 goto skip_lock;
103 }
Andrea Arcangeli91807062011-01-13 15:46:32 -0800104 /*
Andrea Arcangeli70b50f92011-11-02 13:36:59 -0700105 * page_head wasn't a dangling pointer but it
106 * may not be a head page anymore by the time
107 * we obtain the lock. That is ok as long as it
108 * can't be freed from under us.
Andrea Arcangeli91807062011-01-13 15:46:32 -0800109 */
Andrea Arcangeli91807062011-01-13 15:46:32 -0800110 flags = compound_lock_irqsave(page_head);
111 if (unlikely(!PageTail(page))) {
112 /* __split_huge_page_refcount run before us */
113 compound_unlock_irqrestore(page_head, flags);
Pravin B Shelar5bf5f032012-05-29 15:06:49 -0700114skip_lock:
Andrea Arcangeli91807062011-01-13 15:46:32 -0800115 if (put_page_testzero(page_head))
116 __put_single_page(page_head);
Pravin B Shelar5bf5f032012-05-29 15:06:49 -0700117out_put_single:
Andrea Arcangeli91807062011-01-13 15:46:32 -0800118 if (put_page_testzero(page))
119 __put_single_page(page);
120 return;
121 }
122 VM_BUG_ON(page_head != page->first_page);
123 /*
124 * We can release the refcount taken by
Andrea Arcangeli70b50f92011-11-02 13:36:59 -0700125 * get_page_unless_zero() now that
126 * __split_huge_page_refcount() is blocked on
127 * the compound_lock.
Andrea Arcangeli91807062011-01-13 15:46:32 -0800128 */
129 if (put_page_testzero(page_head))
130 VM_BUG_ON(1);
131 /* __split_huge_page_refcount will wait now */
Andrea Arcangeli70b50f92011-11-02 13:36:59 -0700132 VM_BUG_ON(page_mapcount(page) <= 0);
133 atomic_dec(&page->_mapcount);
Andrea Arcangeli91807062011-01-13 15:46:32 -0800134 VM_BUG_ON(atomic_read(&page_head->_count) <= 0);
Andrea Arcangeli70b50f92011-11-02 13:36:59 -0700135 VM_BUG_ON(atomic_read(&page->_count) != 0);
Andrea Arcangeli91807062011-01-13 15:46:32 -0800136 compound_unlock_irqrestore(page_head, flags);
Pravin B Shelar5bf5f032012-05-29 15:06:49 -0700137
138skip_lock_tail:
Andrea Arcangelia95a82e2011-01-13 15:46:33 -0800139 if (put_page_testzero(page_head)) {
140 if (PageHead(page_head))
141 __put_compound_page(page_head);
142 else
143 __put_single_page(page_head);
144 }
Andrea Arcangeli91807062011-01-13 15:46:32 -0800145 } else {
146 /* page_head is a dangling pointer */
147 VM_BUG_ON(PageTail(page));
148 goto out_put_single;
149 }
150 } else if (put_page_testzero(page)) {
151 if (PageHead(page))
152 __put_compound_page(page);
153 else
154 __put_single_page(page);
Nick Piggin8519fb32006-02-07 12:58:52 -0800155 }
156}
157
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158void put_page(struct page *page)
159{
Nick Piggin8519fb32006-02-07 12:58:52 -0800160 if (unlikely(PageCompound(page)))
161 put_compound_page(page);
162 else if (put_page_testzero(page))
Andrea Arcangeli91807062011-01-13 15:46:32 -0800163 __put_single_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164}
165EXPORT_SYMBOL(put_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166
Andrea Arcangeli70b50f92011-11-02 13:36:59 -0700167/*
168 * This function is exported but must not be called by anything other
169 * than get_page(). It implements the slow path of get_page().
170 */
171bool __get_page_tail(struct page *page)
172{
173 /*
174 * This takes care of get_page() if run on a tail page
175 * returned by one of the get_user_pages/follow_page variants.
176 * get_user_pages/follow_page itself doesn't need the compound
177 * lock because it runs __get_page_tail_foll() under the
178 * proper PT lock that already serializes against
179 * split_huge_page().
180 */
181 unsigned long flags;
182 bool got = false;
183 struct page *page_head = compound_trans_head(page);
184
185 if (likely(page != page_head && get_page_unless_zero(page_head))) {
Pravin B Shelar5bf5f032012-05-29 15:06:49 -0700186
187 /* Ref to put_compound_page() comment. */
188 if (PageSlab(page_head)) {
189 if (likely(PageTail(page))) {
190 __get_page_tail_foll(page, false);
191 return true;
192 } else {
193 put_page(page_head);
194 return false;
195 }
196 }
197
Andrea Arcangeli70b50f92011-11-02 13:36:59 -0700198 /*
199 * page_head wasn't a dangling pointer but it
200 * may not be a head page anymore by the time
201 * we obtain the lock. That is ok as long as it
202 * can't be freed from under us.
203 */
204 flags = compound_lock_irqsave(page_head);
205 /* here __split_huge_page_refcount won't run anymore */
206 if (likely(PageTail(page))) {
207 __get_page_tail_foll(page, false);
208 got = true;
209 }
210 compound_unlock_irqrestore(page_head, flags);
211 if (unlikely(!got))
212 put_page(page_head);
213 }
214 return got;
215}
216EXPORT_SYMBOL(__get_page_tail);
217
Alexander Zarochentsev1d7ea732006-08-13 23:24:27 -0700218/**
Randy Dunlap76824862008-03-19 17:00:40 -0700219 * put_pages_list() - release a list of pages
220 * @pages: list of pages threaded on page->lru
Alexander Zarochentsev1d7ea732006-08-13 23:24:27 -0700221 *
222 * Release a list of pages which are strung together on page.lru. Currently
223 * used by read_cache_pages() and related error recovery code.
Alexander Zarochentsev1d7ea732006-08-13 23:24:27 -0700224 */
225void put_pages_list(struct list_head *pages)
226{
227 while (!list_empty(pages)) {
228 struct page *victim;
229
230 victim = list_entry(pages->prev, struct page, lru);
231 list_del(&victim->lru);
232 page_cache_release(victim);
233 }
234}
235EXPORT_SYMBOL(put_pages_list);
236
Shaohua Li3dd7ae82011-03-22 16:33:45 -0700237static void pagevec_lru_move_fn(struct pagevec *pvec,
238 void (*move_fn)(struct page *page, void *arg),
239 void *arg)
Hisashi Hifumi902aaed2007-10-16 01:24:52 -0700240{
241 int i;
Hisashi Hifumi902aaed2007-10-16 01:24:52 -0700242 struct zone *zone = NULL;
Shaohua Li3dd7ae82011-03-22 16:33:45 -0700243 unsigned long flags = 0;
Hisashi Hifumi902aaed2007-10-16 01:24:52 -0700244
245 for (i = 0; i < pagevec_count(pvec); i++) {
246 struct page *page = pvec->pages[i];
247 struct zone *pagezone = page_zone(page);
248
249 if (pagezone != zone) {
250 if (zone)
Shaohua Li3dd7ae82011-03-22 16:33:45 -0700251 spin_unlock_irqrestore(&zone->lru_lock, flags);
Hisashi Hifumi902aaed2007-10-16 01:24:52 -0700252 zone = pagezone;
Shaohua Li3dd7ae82011-03-22 16:33:45 -0700253 spin_lock_irqsave(&zone->lru_lock, flags);
Hisashi Hifumi902aaed2007-10-16 01:24:52 -0700254 }
Shaohua Li3dd7ae82011-03-22 16:33:45 -0700255
256 (*move_fn)(page, arg);
Hisashi Hifumi902aaed2007-10-16 01:24:52 -0700257 }
258 if (zone)
Shaohua Li3dd7ae82011-03-22 16:33:45 -0700259 spin_unlock_irqrestore(&zone->lru_lock, flags);
Linus Torvalds83896fb2011-01-17 14:42:34 -0800260 release_pages(pvec->pages, pvec->nr, pvec->cold);
261 pagevec_reinit(pvec);
Shaohua Lid8505de2011-01-13 15:47:33 -0800262}
263
Shaohua Li3dd7ae82011-03-22 16:33:45 -0700264static void pagevec_move_tail_fn(struct page *page, void *arg)
265{
266 int *pgmoved = arg;
Shaohua Li3dd7ae82011-03-22 16:33:45 -0700267
268 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
269 enum lru_list lru = page_lru_base_type(page);
Johannes Weiner925b7672012-01-12 17:18:15 -0800270 struct lruvec *lruvec;
271
272 lruvec = mem_cgroup_lru_move_lists(page_zone(page),
273 page, lru, lru);
274 list_move_tail(&page->lru, &lruvec->lists[lru]);
Shaohua Li3dd7ae82011-03-22 16:33:45 -0700275 (*pgmoved)++;
276 }
277}
278
279/*
280 * pagevec_move_tail() must be called with IRQ disabled.
281 * Otherwise this may cause nasty races.
282 */
283static void pagevec_move_tail(struct pagevec *pvec)
284{
285 int pgmoved = 0;
286
287 pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved);
288 __count_vm_events(PGROTATED, pgmoved);
289}
290
Hisashi Hifumi902aaed2007-10-16 01:24:52 -0700291/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 * Writeback is about to end against a page which has been marked for immediate
293 * reclaim. If it still appears to be reclaimable, move it to the tail of the
Hisashi Hifumi902aaed2007-10-16 01:24:52 -0700294 * inactive list.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295 */
Shaohua Li3dd7ae82011-03-22 16:33:45 -0700296void rotate_reclaimable_page(struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297{
Miklos Szerediac6aadb2008-04-28 02:12:38 -0700298 if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) &&
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700299 !PageUnevictable(page) && PageLRU(page)) {
Miklos Szerediac6aadb2008-04-28 02:12:38 -0700300 struct pagevec *pvec;
301 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302
Miklos Szerediac6aadb2008-04-28 02:12:38 -0700303 page_cache_get(page);
304 local_irq_save(flags);
305 pvec = &__get_cpu_var(lru_rotate_pvecs);
306 if (!pagevec_add(pvec, page))
307 pagevec_move_tail(pvec);
308 local_irq_restore(flags);
309 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310}
311
KOSAKI Motohiro3e2f41f2009-01-07 18:08:20 -0800312static void update_page_reclaim_stat(struct zone *zone, struct page *page,
313 int file, int rotated)
314{
Hugh Dickins89abfab2012-05-29 15:06:53 -0700315 struct zone_reclaim_stat *reclaim_stat;
KOSAKI Motohiro3e2f41f2009-01-07 18:08:20 -0800316
Hugh Dickins89abfab2012-05-29 15:06:53 -0700317 reclaim_stat = mem_cgroup_get_reclaim_stat_from_page(page);
318 if (!reclaim_stat)
319 reclaim_stat = &zone->lruvec.reclaim_stat;
KOSAKI Motohiro3e2f41f2009-01-07 18:08:20 -0800320
321 reclaim_stat->recent_scanned[file]++;
322 if (rotated)
323 reclaim_stat->recent_rotated[file]++;
KOSAKI Motohiro3e2f41f2009-01-07 18:08:20 -0800324}
325
Shaohua Lieb709b02011-05-24 17:12:55 -0700326static void __activate_page(struct page *page, void *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327{
328 struct zone *zone = page_zone(page);
329
Linus Torvalds7a608572011-01-17 14:42:19 -0800330 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
331 int file = page_is_file_cache(page);
332 int lru = page_lru_base_type(page);
333 del_page_from_lru_list(zone, page, lru);
334
335 SetPageActive(page);
336 lru += LRU_ACTIVE;
337 add_page_to_lru_list(zone, page, lru);
338 __count_vm_event(PGACTIVATE);
339
340 update_page_reclaim_stat(zone, page, file, 1);
341 }
Shaohua Lieb709b02011-05-24 17:12:55 -0700342}
343
344#ifdef CONFIG_SMP
345static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs);
346
347static void activate_page_drain(int cpu)
348{
349 struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu);
350
351 if (pagevec_count(pvec))
352 pagevec_lru_move_fn(pvec, __activate_page, NULL);
353}
354
355void activate_page(struct page *page)
356{
357 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
358 struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
359
360 page_cache_get(page);
361 if (!pagevec_add(pvec, page))
362 pagevec_lru_move_fn(pvec, __activate_page, NULL);
363 put_cpu_var(activate_page_pvecs);
364 }
365}
366
367#else
368static inline void activate_page_drain(int cpu)
369{
370}
371
372void activate_page(struct page *page)
373{
374 struct zone *zone = page_zone(page);
375
376 spin_lock_irq(&zone->lru_lock);
377 __activate_page(page, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 spin_unlock_irq(&zone->lru_lock);
379}
Shaohua Lieb709b02011-05-24 17:12:55 -0700380#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381
382/*
383 * Mark a page as having seen activity.
384 *
385 * inactive,unreferenced -> inactive,referenced
386 * inactive,referenced -> active,unreferenced
387 * active,unreferenced -> active,referenced
388 */
Harvey Harrison920c7a52008-02-04 22:29:26 -0800389void mark_page_accessed(struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390{
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700391 if (!PageActive(page) && !PageUnevictable(page) &&
392 PageReferenced(page) && PageLRU(page)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 activate_page(page);
394 ClearPageReferenced(page);
395 } else if (!PageReferenced(page)) {
396 SetPageReferenced(page);
397 }
398}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399EXPORT_SYMBOL(mark_page_accessed);
400
KOSAKI Motohirof04e9eb2008-10-18 20:26:19 -0700401void __lru_cache_add(struct page *page, enum lru_list lru)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402{
KOSAKI Motohirof04e9eb2008-10-18 20:26:19 -0700403 struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404
405 page_cache_get(page);
406 if (!pagevec_add(pvec, page))
Hugh Dickins5095ae832012-01-12 17:19:58 -0800407 __pagevec_lru_add(pvec, lru);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408 put_cpu_var(lru_add_pvecs);
409}
Miklos Szeredi47846b02010-05-25 15:06:06 +0200410EXPORT_SYMBOL(__lru_cache_add);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411
KOSAKI Motohirof04e9eb2008-10-18 20:26:19 -0700412/**
413 * lru_cache_add_lru - add a page to a page list
414 * @page: the page to be added to the LRU.
415 * @lru: the LRU list to which the page is added.
416 */
417void lru_cache_add_lru(struct page *page, enum lru_list lru)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418{
KOSAKI Motohirof04e9eb2008-10-18 20:26:19 -0700419 if (PageActive(page)) {
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700420 VM_BUG_ON(PageUnevictable(page));
KOSAKI Motohirof04e9eb2008-10-18 20:26:19 -0700421 ClearPageActive(page);
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700422 } else if (PageUnevictable(page)) {
423 VM_BUG_ON(PageActive(page));
424 ClearPageUnevictable(page);
KOSAKI Motohirof04e9eb2008-10-18 20:26:19 -0700425 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700427 VM_BUG_ON(PageLRU(page) || PageActive(page) || PageUnevictable(page));
KOSAKI Motohirof04e9eb2008-10-18 20:26:19 -0700428 __lru_cache_add(page, lru);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429}
430
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700431/**
432 * add_page_to_unevictable_list - add a page to the unevictable list
433 * @page: the page to be added to the unevictable list
434 *
435 * Add page directly to its zone's unevictable list. To avoid races with
436 * tasks that might be making the page evictable, through eg. munlock,
437 * munmap or exit, while it's not on the lru, we want to add the page
438 * while it's locked or otherwise "invisible" to other tasks. This is
439 * difficult to do when using the pagevec cache, so bypass that.
440 */
441void add_page_to_unevictable_list(struct page *page)
442{
443 struct zone *zone = page_zone(page);
444
445 spin_lock_irq(&zone->lru_lock);
446 SetPageUnevictable(page);
447 SetPageLRU(page);
448 add_page_to_lru_list(zone, page, LRU_UNEVICTABLE);
449 spin_unlock_irq(&zone->lru_lock);
450}
451
Hisashi Hifumi902aaed2007-10-16 01:24:52 -0700452/*
Minchan Kim31560182011-03-22 16:32:52 -0700453 * If the page can not be invalidated, it is moved to the
454 * inactive list to speed up its reclaim. It is moved to the
455 * head of the list, rather than the tail, to give the flusher
456 * threads some time to write it out, as this is much more
457 * effective than the single-page writeout from reclaim.
Minchan Kim278df9f2011-03-22 16:32:54 -0700458 *
459 * If the page isn't page_mapped and dirty/writeback, the page
460 * could reclaim asap using PG_reclaim.
461 *
462 * 1. active, mapped page -> none
463 * 2. active, dirty/writeback page -> inactive, head, PG_reclaim
464 * 3. inactive, mapped page -> none
465 * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim
466 * 5. inactive, clean -> inactive, tail
467 * 6. Others -> none
468 *
469 * In 4, why it moves inactive's head, the VM expects the page would
470 * be write it out by flusher threads as this is much more effective
471 * than the single-page writeout from reclaim.
Minchan Kim31560182011-03-22 16:32:52 -0700472 */
Shaohua Li3dd7ae82011-03-22 16:33:45 -0700473static void lru_deactivate_fn(struct page *page, void *arg)
Minchan Kim31560182011-03-22 16:32:52 -0700474{
475 int lru, file;
Minchan Kim278df9f2011-03-22 16:32:54 -0700476 bool active;
Shaohua Li3dd7ae82011-03-22 16:33:45 -0700477 struct zone *zone = page_zone(page);
Minchan Kim31560182011-03-22 16:32:52 -0700478
Minchan Kim278df9f2011-03-22 16:32:54 -0700479 if (!PageLRU(page))
Minchan Kim31560182011-03-22 16:32:52 -0700480 return;
481
Minchan Kimbad49d92011-05-11 15:13:30 -0700482 if (PageUnevictable(page))
483 return;
484
Minchan Kim31560182011-03-22 16:32:52 -0700485 /* Some processes are using the page */
486 if (page_mapped(page))
487 return;
488
Minchan Kim278df9f2011-03-22 16:32:54 -0700489 active = PageActive(page);
490
Minchan Kim31560182011-03-22 16:32:52 -0700491 file = page_is_file_cache(page);
492 lru = page_lru_base_type(page);
Minchan Kim278df9f2011-03-22 16:32:54 -0700493 del_page_from_lru_list(zone, page, lru + active);
Minchan Kim31560182011-03-22 16:32:52 -0700494 ClearPageActive(page);
495 ClearPageReferenced(page);
496 add_page_to_lru_list(zone, page, lru);
Minchan Kim31560182011-03-22 16:32:52 -0700497
Minchan Kim278df9f2011-03-22 16:32:54 -0700498 if (PageWriteback(page) || PageDirty(page)) {
499 /*
500 * PG_reclaim could be raced with end_page_writeback
501 * It can make readahead confusing. But race window
502 * is _really_ small and it's non-critical problem.
503 */
504 SetPageReclaim(page);
505 } else {
Johannes Weiner925b7672012-01-12 17:18:15 -0800506 struct lruvec *lruvec;
Minchan Kim278df9f2011-03-22 16:32:54 -0700507 /*
508 * The page's writeback ends up during pagevec
509 * We moves tha page into tail of inactive.
510 */
Johannes Weiner925b7672012-01-12 17:18:15 -0800511 lruvec = mem_cgroup_lru_move_lists(zone, page, lru, lru);
512 list_move_tail(&page->lru, &lruvec->lists[lru]);
Minchan Kim278df9f2011-03-22 16:32:54 -0700513 __count_vm_event(PGROTATED);
514 }
515
516 if (active)
517 __count_vm_event(PGDEACTIVATE);
Minchan Kim31560182011-03-22 16:32:52 -0700518 update_page_reclaim_stat(zone, page, file, 0);
519}
520
Minchan Kim31560182011-03-22 16:32:52 -0700521/*
Hisashi Hifumi902aaed2007-10-16 01:24:52 -0700522 * Drain pages out of the cpu's pagevecs.
523 * Either "cpu" is the current CPU, and preemption has already been
524 * disabled; or "cpu" is being hot-unplugged, and is already dead.
525 */
Konstantin Khlebnikovf0cb3c72012-03-21 16:34:06 -0700526void lru_add_drain_cpu(int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527{
KOSAKI Motohirof04e9eb2008-10-18 20:26:19 -0700528 struct pagevec *pvecs = per_cpu(lru_add_pvecs, cpu);
Hisashi Hifumi902aaed2007-10-16 01:24:52 -0700529 struct pagevec *pvec;
KOSAKI Motohirof04e9eb2008-10-18 20:26:19 -0700530 int lru;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531
KOSAKI Motohirof04e9eb2008-10-18 20:26:19 -0700532 for_each_lru(lru) {
533 pvec = &pvecs[lru - LRU_BASE];
534 if (pagevec_count(pvec))
Hugh Dickins5095ae832012-01-12 17:19:58 -0800535 __pagevec_lru_add(pvec, lru);
KOSAKI Motohirof04e9eb2008-10-18 20:26:19 -0700536 }
Hisashi Hifumi902aaed2007-10-16 01:24:52 -0700537
538 pvec = &per_cpu(lru_rotate_pvecs, cpu);
539 if (pagevec_count(pvec)) {
540 unsigned long flags;
541
542 /* No harm done if a racing interrupt already did this */
543 local_irq_save(flags);
544 pagevec_move_tail(pvec);
545 local_irq_restore(flags);
546 }
Minchan Kim31560182011-03-22 16:32:52 -0700547
548 pvec = &per_cpu(lru_deactivate_pvecs, cpu);
549 if (pagevec_count(pvec))
Shaohua Li3dd7ae82011-03-22 16:33:45 -0700550 pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
Shaohua Lieb709b02011-05-24 17:12:55 -0700551
552 activate_page_drain(cpu);
Minchan Kim31560182011-03-22 16:32:52 -0700553}
554
555/**
556 * deactivate_page - forcefully deactivate a page
557 * @page: page to deactivate
558 *
559 * This function hints the VM that @page is a good reclaim candidate,
560 * for example if its invalidation fails due to the page being dirty
561 * or under writeback.
562 */
563void deactivate_page(struct page *page)
564{
Minchan Kim821ed6b2011-05-24 17:12:31 -0700565 /*
566 * In a workload with many unevictable page such as mprotect, unevictable
567 * page deactivation for accelerating reclaim is pointless.
568 */
569 if (PageUnevictable(page))
570 return;
571
Minchan Kim31560182011-03-22 16:32:52 -0700572 if (likely(get_page_unless_zero(page))) {
573 struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
574
575 if (!pagevec_add(pvec, page))
Shaohua Li3dd7ae82011-03-22 16:33:45 -0700576 pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
Minchan Kim31560182011-03-22 16:32:52 -0700577 put_cpu_var(lru_deactivate_pvecs);
578 }
Andrew Morton80bfed92006-01-06 00:11:14 -0800579}
580
581void lru_add_drain(void)
582{
Konstantin Khlebnikovf0cb3c72012-03-21 16:34:06 -0700583 lru_add_drain_cpu(get_cpu());
Andrew Morton80bfed92006-01-06 00:11:14 -0800584 put_cpu();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585}
586
David Howellsc4028952006-11-22 14:57:56 +0000587static void lru_add_drain_per_cpu(struct work_struct *dummy)
Nick Piggin053837f2006-01-18 17:42:27 -0800588{
589 lru_add_drain();
590}
591
592/*
593 * Returns 0 for success
594 */
595int lru_add_drain_all(void)
596{
David Howellsc4028952006-11-22 14:57:56 +0000597 return schedule_on_each_cpu(lru_add_drain_per_cpu);
Nick Piggin053837f2006-01-18 17:42:27 -0800598}
599
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601 * Batched page_cache_release(). Decrement the reference count on all the
602 * passed pages. If it fell to zero then remove the page from the LRU and
603 * free it.
604 *
605 * Avoid taking zone->lru_lock if possible, but if it is taken, retain it
606 * for the remainder of the operation.
607 *
Fernando Luis Vazquez Caoab33dc02008-07-29 22:33:40 -0700608 * The locking in this function is against shrink_inactive_list(): we recheck
609 * the page count inside the lock to see whether shrink_inactive_list()
610 * grabbed the page via the LRU. If it did, give up: shrink_inactive_list()
611 * will free it.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612 */
613void release_pages(struct page **pages, int nr, int cold)
614{
615 int i;
Konstantin Khlebnikovcc598502012-01-10 15:07:04 -0800616 LIST_HEAD(pages_to_free);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617 struct zone *zone = NULL;
Hisashi Hifumi902aaed2007-10-16 01:24:52 -0700618 unsigned long uninitialized_var(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 for (i = 0; i < nr; i++) {
621 struct page *page = pages[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622
Nick Piggin8519fb32006-02-07 12:58:52 -0800623 if (unlikely(PageCompound(page))) {
624 if (zone) {
Hisashi Hifumi902aaed2007-10-16 01:24:52 -0700625 spin_unlock_irqrestore(&zone->lru_lock, flags);
Nick Piggin8519fb32006-02-07 12:58:52 -0800626 zone = NULL;
627 }
628 put_compound_page(page);
629 continue;
630 }
631
Nick Pigginb5810032005-10-29 18:16:12 -0700632 if (!put_page_testzero(page))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633 continue;
634
Nick Piggin46453a62006-03-22 00:07:58 -0800635 if (PageLRU(page)) {
636 struct zone *pagezone = page_zone(page);
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700637
Nick Piggin46453a62006-03-22 00:07:58 -0800638 if (pagezone != zone) {
639 if (zone)
Hisashi Hifumi902aaed2007-10-16 01:24:52 -0700640 spin_unlock_irqrestore(&zone->lru_lock,
641 flags);
Nick Piggin46453a62006-03-22 00:07:58 -0800642 zone = pagezone;
Hisashi Hifumi902aaed2007-10-16 01:24:52 -0700643 spin_lock_irqsave(&zone->lru_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644 }
Nick Piggin725d7042006-09-25 23:30:55 -0700645 VM_BUG_ON(!PageLRU(page));
Nick Piggin67453912006-03-22 00:08:00 -0800646 __ClearPageLRU(page);
Hugh Dickins1c1c53d2012-01-12 17:20:04 -0800647 del_page_from_lru_list(zone, page, page_off_lru(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 }
Nick Piggin46453a62006-03-22 00:07:58 -0800649
Konstantin Khlebnikovcc598502012-01-10 15:07:04 -0800650 list_add(&page->lru, &pages_to_free);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651 }
652 if (zone)
Hisashi Hifumi902aaed2007-10-16 01:24:52 -0700653 spin_unlock_irqrestore(&zone->lru_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654
Konstantin Khlebnikovcc598502012-01-10 15:07:04 -0800655 free_hot_cold_page_list(&pages_to_free, cold);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656}
Miklos Szeredi0be85572010-10-27 15:34:46 -0700657EXPORT_SYMBOL(release_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658
659/*
660 * The pages which we're about to release may be in the deferred lru-addition
661 * queues. That would prevent them from really being freed right now. That's
662 * OK from a correctness point of view but is inefficient - those pages may be
663 * cache-warm and we want to give them back to the page allocator ASAP.
664 *
665 * So __pagevec_release() will drain those queues here. __pagevec_lru_add()
666 * and __pagevec_lru_add_active() call release_pages() directly to avoid
667 * mutual recursion.
668 */
669void __pagevec_release(struct pagevec *pvec)
670{
671 lru_add_drain();
672 release_pages(pvec->pages, pagevec_count(pvec), pvec->cold);
673 pagevec_reinit(pvec);
674}
Steve French7f285702005-11-01 10:22:55 -0800675EXPORT_SYMBOL(__pagevec_release);
676
Hugh Dickins12d27102012-01-12 17:19:52 -0800677#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800678/* used by __split_huge_page_refcount() */
679void lru_add_page_tail(struct zone* zone,
680 struct page *page, struct page *page_tail)
681{
Hugh Dickins75121022012-03-05 14:59:18 -0800682 int uninitialized_var(active);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800683 enum lru_list lru;
684 const int file = 0;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800685
686 VM_BUG_ON(!PageHead(page));
687 VM_BUG_ON(PageCompound(page_tail));
688 VM_BUG_ON(PageLRU(page_tail));
Hugh Dickinsb9980cd2012-02-08 17:13:40 -0800689 VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&zone->lru_lock));
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800690
691 SetPageLRU(page_tail);
692
693 if (page_evictable(page_tail, NULL)) {
694 if (PageActive(page)) {
695 SetPageActive(page_tail);
696 active = 1;
697 lru = LRU_ACTIVE_ANON;
698 } else {
699 active = 0;
700 lru = LRU_INACTIVE_ANON;
701 }
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800702 } else {
703 SetPageUnevictable(page_tail);
Hugh Dickins12d27102012-01-12 17:19:52 -0800704 lru = LRU_UNEVICTABLE;
705 }
706
707 if (likely(PageLRU(page)))
708 list_add_tail(&page_tail->lru, &page->lru);
709 else {
710 struct list_head *list_head;
711 /*
712 * Head page has not yet been counted, as an hpage,
713 * so we must account for each subpage individually.
714 *
715 * Use the standard add function to put page_tail on the list,
716 * but then correct its position so they all end up in order.
717 */
718 add_page_to_lru_list(zone, page_tail, lru);
719 list_head = page_tail->lru.prev;
720 list_move_tail(&page_tail->lru, list_head);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800721 }
Hugh Dickins75121022012-03-05 14:59:18 -0800722
723 if (!PageUnevictable(page))
724 update_page_reclaim_stat(zone, page_tail, file, active);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800725}
Hugh Dickins12d27102012-01-12 17:19:52 -0800726#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800727
Hugh Dickins5095ae832012-01-12 17:19:58 -0800728static void __pagevec_lru_add_fn(struct page *page, void *arg)
Shaohua Li3dd7ae82011-03-22 16:33:45 -0700729{
730 enum lru_list lru = (enum lru_list)arg;
731 struct zone *zone = page_zone(page);
732 int file = is_file_lru(lru);
733 int active = is_active_lru(lru);
734
735 VM_BUG_ON(PageActive(page));
736 VM_BUG_ON(PageUnevictable(page));
737 VM_BUG_ON(PageLRU(page));
738
739 SetPageLRU(page);
740 if (active)
741 SetPageActive(page);
Shaohua Li3dd7ae82011-03-22 16:33:45 -0700742 add_page_to_lru_list(zone, page, lru);
Hugh Dickins75121022012-03-05 14:59:18 -0800743 update_page_reclaim_stat(zone, page, file, active);
Shaohua Li3dd7ae82011-03-22 16:33:45 -0700744}
745
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747 * Add the passed pages to the LRU, then drop the caller's refcount
748 * on them. Reinitialises the caller's pagevec.
749 */
Hugh Dickins5095ae832012-01-12 17:19:58 -0800750void __pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751{
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700752 VM_BUG_ON(is_unevictable_lru(lru));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753
Hugh Dickins5095ae832012-01-12 17:19:58 -0800754 pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, (void *)lru);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755}
Hugh Dickins5095ae832012-01-12 17:19:58 -0800756EXPORT_SYMBOL(__pagevec_lru_add);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758/**
759 * pagevec_lookup - gang pagecache lookup
760 * @pvec: Where the resulting pages are placed
761 * @mapping: The address_space to search
762 * @start: The starting page index
763 * @nr_pages: The maximum number of pages
764 *
765 * pagevec_lookup() will search for and return a group of up to @nr_pages pages
766 * in the mapping. The pages are placed in @pvec. pagevec_lookup() takes a
767 * reference against the pages in @pvec.
768 *
769 * The search returns a group of mapping-contiguous pages with ascending
770 * indexes. There may be holes in the indices due to not-present pages.
771 *
772 * pagevec_lookup() returns the number of pages which were found.
773 */
774unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
775 pgoff_t start, unsigned nr_pages)
776{
777 pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages);
778 return pagevec_count(pvec);
779}
Christoph Hellwig78539fd2006-01-11 20:47:41 +1100780EXPORT_SYMBOL(pagevec_lookup);
781
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping,
783 pgoff_t *index, int tag, unsigned nr_pages)
784{
785 pvec->nr = find_get_pages_tag(mapping, index, tag,
786 nr_pages, pvec->pages);
787 return pagevec_count(pvec);
788}
Steve French7f285702005-11-01 10:22:55 -0800789EXPORT_SYMBOL(pagevec_lookup_tag);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791/*
792 * Perform any setup for the swap system
793 */
794void __init swap_setup(void)
795{
Jan Beulich44813742009-09-21 17:03:05 -0700796 unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797
Peter Zijlstrae0bf68d2007-10-16 23:25:46 -0700798#ifdef CONFIG_SWAP
799 bdi_init(swapper_space.backing_dev_info);
800#endif
801
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802 /* Use a smaller cluster for small-memory machines */
803 if (megs < 16)
804 page_cluster = 2;
805 else
806 page_cluster = 3;
807 /*
808 * Right now other parts of the system means that we
809 * _really_ don't want to cluster much more
810 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811}