blob: 46ef5c499d0d7d41f0e0b35cb01cb5e2c8ea12ab [file] [log] [blame]
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07001#include <linux/mm.h>
2#include <linux/mmzone.h>
3#include <linux/bootmem.h>
4#include <linux/bit_spinlock.h>
5#include <linux/page_cgroup.h>
6#include <linux/hash.h>
KAMEZAWA Hiroyuki94b6da52008-10-22 14:15:05 -07007#include <linux/slab.h>
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07008#include <linux/memory.h>
Paul Mundt4c8210422008-10-22 14:14:58 -07009#include <linux/vmalloc.h>
KAMEZAWA Hiroyuki94b6da52008-10-22 14:15:05 -070010#include <linux/cgroup.h>
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -080011#include <linux/swapops.h>
Catalin Marinas7952f982010-07-19 11:54:14 +010012#include <linux/kmemleak.h>
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070013
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070014static unsigned long total_usage;
15
Thomas Gleixner56291972009-08-19 09:56:42 +020016static void page_cgroup_lock_init(struct page_cgroup *pc, int nr_pages)
17{
18#ifdef CONFIG_PREEMPT_RT_BASE
19 for (; nr_pages; nr_pages--, pc++)
20 spin_lock_init(&pc->pcg_lock);
21#endif
22}
23
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070024#if !defined(CONFIG_SPARSEMEM)
25
26
Al Viro31168482008-11-22 17:33:24 +000027void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070028{
29 pgdat->node_page_cgroup = NULL;
30}
31
32struct page_cgroup *lookup_page_cgroup(struct page *page)
33{
34 unsigned long pfn = page_to_pfn(page);
35 unsigned long offset;
36 struct page_cgroup *base;
37
38 base = NODE_DATA(page_to_nid(page))->node_page_cgroup;
Johannes Weiner00c54c02012-01-12 17:18:40 -080039#ifdef CONFIG_DEBUG_VM
40 /*
41 * The sanity checks the page allocator does upon freeing a
42 * page can reach here before the page_cgroup arrays are
43 * allocated when feeding a range of pages to the allocator
44 * for the first time during bootup or memory hotplug.
45 */
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070046 if (unlikely(!base))
47 return NULL;
Johannes Weiner00c54c02012-01-12 17:18:40 -080048#endif
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070049 offset = pfn - NODE_DATA(page_to_nid(page))->node_start_pfn;
50 return base + offset;
51}
52
53static int __init alloc_node_page_cgroup(int nid)
54{
Johannes Weiner6b208e32012-01-12 17:18:18 -080055 struct page_cgroup *base;
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070056 unsigned long table_size;
Johannes Weiner6b208e32012-01-12 17:18:18 -080057 unsigned long nr_pages;
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070058
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070059 nr_pages = NODE_DATA(nid)->node_spanned_pages;
KAMEZAWA Hiroyuki653d22c2008-12-09 13:14:20 -080060 if (!nr_pages)
61 return 0;
62
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070063 table_size = sizeof(struct page_cgroup) * nr_pages;
KAMEZAWA Hiroyukica371c02009-06-12 10:33:53 +030064
Grygorii Strashko0d036e92014-01-21 15:50:38 -080065 base = memblock_virt_alloc_try_nid_nopanic(
66 table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
67 BOOTMEM_ALLOC_ACCESSIBLE, nid);
KAMEZAWA Hiroyukica371c02009-06-12 10:33:53 +030068 if (!base)
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070069 return -ENOMEM;
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070070 NODE_DATA(nid)->node_page_cgroup = base;
71 total_usage += table_size;
Thomas Gleixner56291972009-08-19 09:56:42 +020072 page_cgroup_lock_init(base, nr_pages);
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070073 return 0;
74}
75
KAMEZAWA Hiroyukica371c02009-06-12 10:33:53 +030076void __init page_cgroup_init_flatmem(void)
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070077{
78
79 int nid, fail;
80
Hirokazu Takahashif8d665422009-01-07 18:08:02 -080081 if (mem_cgroup_disabled())
KAMEZAWA Hiroyuki94b6da52008-10-22 14:15:05 -070082 return;
83
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070084 for_each_online_node(nid) {
85 fail = alloc_node_page_cgroup(nid);
86 if (fail)
87 goto fail;
88 }
89 printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage);
Randy Dunlap8ca739e2009-06-17 16:26:32 -070090 printk(KERN_INFO "please try 'cgroup_disable=memory' option if you"
91 " don't want memory cgroups\n");
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070092 return;
93fail:
Randy Dunlap8ca739e2009-06-17 16:26:32 -070094 printk(KERN_CRIT "allocation of page_cgroup failed.\n");
95 printk(KERN_CRIT "please try 'cgroup_disable=memory' boot option\n");
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070096 panic("Out of memory");
97}
98
99#else /* CONFIG_FLAT_NODE_MEM_MAP */
100
101struct page_cgroup *lookup_page_cgroup(struct page *page)
102{
103 unsigned long pfn = page_to_pfn(page);
104 struct mem_section *section = __pfn_to_section(pfn);
Johannes Weiner00c54c02012-01-12 17:18:40 -0800105#ifdef CONFIG_DEBUG_VM
106 /*
107 * The sanity checks the page allocator does upon freeing a
108 * page can reach here before the page_cgroup arrays are
109 * allocated when feeding a range of pages to the allocator
110 * for the first time during bootup or memory hotplug.
111 */
Balbir Singhd69b0422009-06-17 16:26:34 -0700112 if (!section->page_cgroup)
113 return NULL;
Johannes Weiner00c54c02012-01-12 17:18:40 -0800114#endif
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700115 return section->page_cgroup + pfn;
116}
117
Namhyung Kim268433b2011-05-26 16:25:29 -0700118static void *__meminit alloc_page_cgroup(size_t size, int nid)
Michal Hockodde79e02011-03-23 16:42:40 -0700119{
Johannes Weiner6b208e32012-01-12 17:18:18 -0800120 gfp_t flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN;
Michal Hockodde79e02011-03-23 16:42:40 -0700121 void *addr = NULL;
122
Steven Rostedtff7ee932011-11-02 13:38:11 -0700123 addr = alloc_pages_exact_nid(nid, size, flags);
124 if (addr) {
125 kmemleak_alloc(addr, size, 1, flags);
Michal Hockodde79e02011-03-23 16:42:40 -0700126 return addr;
Steven Rostedtff7ee932011-11-02 13:38:11 -0700127 }
Michal Hockodde79e02011-03-23 16:42:40 -0700128
129 if (node_state(nid, N_HIGH_MEMORY))
Johannes Weiner6b208e32012-01-12 17:18:18 -0800130 addr = vzalloc_node(size, nid);
Michal Hockodde79e02011-03-23 16:42:40 -0700131 else
Johannes Weiner6b208e32012-01-12 17:18:18 -0800132 addr = vzalloc(size);
Michal Hockodde79e02011-03-23 16:42:40 -0700133
134 return addr;
135}
136
KAMEZAWA Hiroyuki37573e82011-06-15 15:08:42 -0700137static int __meminit init_section_page_cgroup(unsigned long pfn, int nid)
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700138{
Johannes Weiner6b3ae582011-03-23 16:42:30 -0700139 struct mem_section *section;
Johannes Weiner6b208e32012-01-12 17:18:18 -0800140 struct page_cgroup *base;
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700141 unsigned long table_size;
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700142
Johannes Weiner6b208e32012-01-12 17:18:18 -0800143 section = __pfn_to_section(pfn);
Johannes Weiner6b3ae582011-03-23 16:42:30 -0700144
145 if (section->page_cgroup)
146 return 0;
147
Johannes Weiner6b3ae582011-03-23 16:42:30 -0700148 table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
Michal Hockodde79e02011-03-23 16:42:40 -0700149 base = alloc_page_cgroup(table_size, nid);
150
Johannes Weiner6b3ae582011-03-23 16:42:30 -0700151 /*
152 * The value stored in section->page_cgroup is (base - pfn)
153 * and it does not point to the memory block allocated above,
154 * causing kmemleak false positives.
155 */
156 kmemleak_not_leak(base);
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700157
158 if (!base) {
159 printk(KERN_ERR "page cgroup allocation failure\n");
160 return -ENOMEM;
161 }
162
Thomas Gleixner56291972009-08-19 09:56:42 +0200163 page_cgroup_lock_init(base, PAGES_PER_SECTION);
164
KAMEZAWA Hiroyuki37573e82011-06-15 15:08:42 -0700165 /*
166 * The passed "pfn" may not be aligned to SECTION. For the calculation
167 * we need to apply a mask.
168 */
169 pfn &= PAGE_SECTION_MASK;
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700170 section->page_cgroup = base - pfn;
171 total_usage += table_size;
172 return 0;
173}
174#ifdef CONFIG_MEMORY_HOTPLUG
Bob Liu0efc8eb2012-01-12 17:19:08 -0800175static void free_page_cgroup(void *addr)
176{
177 if (is_vmalloc_addr(addr)) {
178 vfree(addr);
179 } else {
180 struct page *page = virt_to_page(addr);
181 size_t table_size =
182 sizeof(struct page_cgroup) * PAGES_PER_SECTION;
183
184 BUG_ON(PageReserved(page));
Wang Nan3716dc82014-10-29 14:50:18 -0700185 kmemleak_free(addr);
Bob Liu0efc8eb2012-01-12 17:19:08 -0800186 free_pages_exact(addr, table_size);
187 }
188}
189
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700190void __free_page_cgroup(unsigned long pfn)
191{
192 struct mem_section *ms;
193 struct page_cgroup *base;
194
195 ms = __pfn_to_section(pfn);
196 if (!ms || !ms->page_cgroup)
197 return;
198 base = ms->page_cgroup + pfn;
Michal Hockodde79e02011-03-23 16:42:40 -0700199 free_page_cgroup(base);
200 ms->page_cgroup = NULL;
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700201}
202
Al Viro31168482008-11-22 17:33:24 +0000203int __meminit online_page_cgroup(unsigned long start_pfn,
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700204 unsigned long nr_pages,
205 int nid)
206{
207 unsigned long start, end, pfn;
208 int fail = 0;
209
Daniel Kiper1bb36fb2011-07-25 17:12:13 -0700210 start = SECTION_ALIGN_DOWN(start_pfn);
211 end = SECTION_ALIGN_UP(start_pfn + nr_pages);
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700212
KAMEZAWA Hiroyuki37573e82011-06-15 15:08:42 -0700213 if (nid == -1) {
214 /*
215 * In this case, "nid" already exists and contains valid memory.
216 * "start_pfn" passed to us is a pfn which is an arg for
217 * online__pages(), and start_pfn should exist.
218 */
219 nid = pfn_to_nid(start_pfn);
220 VM_BUG_ON(!node_state(nid, N_ONLINE));
221 }
222
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700223 for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) {
224 if (!pfn_present(pfn))
225 continue;
KAMEZAWA Hiroyuki37573e82011-06-15 15:08:42 -0700226 fail = init_section_page_cgroup(pfn, nid);
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700227 }
228 if (!fail)
229 return 0;
230
231 /* rollback */
232 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
233 __free_page_cgroup(pfn);
234
235 return -ENOMEM;
236}
237
Al Viro31168482008-11-22 17:33:24 +0000238int __meminit offline_page_cgroup(unsigned long start_pfn,
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700239 unsigned long nr_pages, int nid)
240{
241 unsigned long start, end, pfn;
242
Daniel Kiper1bb36fb2011-07-25 17:12:13 -0700243 start = SECTION_ALIGN_DOWN(start_pfn);
244 end = SECTION_ALIGN_UP(start_pfn + nr_pages);
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700245
246 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
247 __free_page_cgroup(pfn);
248 return 0;
249
250}
251
Al Viro31168482008-11-22 17:33:24 +0000252static int __meminit page_cgroup_callback(struct notifier_block *self,
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700253 unsigned long action, void *arg)
254{
255 struct memory_notify *mn = arg;
256 int ret = 0;
257 switch (action) {
258 case MEM_GOING_ONLINE:
259 ret = online_page_cgroup(mn->start_pfn,
260 mn->nr_pages, mn->status_change_nid);
261 break;
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700262 case MEM_OFFLINE:
263 offline_page_cgroup(mn->start_pfn,
264 mn->nr_pages, mn->status_change_nid);
265 break;
KAMEZAWA Hiroyukidc19f9d2008-12-01 13:13:48 -0800266 case MEM_CANCEL_ONLINE:
Wen Congyang7c72eb32012-12-11 16:00:49 -0800267 offline_page_cgroup(mn->start_pfn,
268 mn->nr_pages, mn->status_change_nid);
269 break;
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700270 case MEM_GOING_OFFLINE:
271 break;
272 case MEM_ONLINE:
273 case MEM_CANCEL_OFFLINE:
274 break;
275 }
KAMEZAWA Hiroyukidc19f9d2008-12-01 13:13:48 -0800276
Prarit Bhargava5fda1bd2011-03-22 16:30:49 -0700277 return notifier_from_errno(ret);
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700278}
279
280#endif
281
282void __init page_cgroup_init(void)
283{
284 unsigned long pfn;
KAMEZAWA Hiroyuki37573e82011-06-15 15:08:42 -0700285 int nid;
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700286
Hirokazu Takahashif8d665422009-01-07 18:08:02 -0800287 if (mem_cgroup_disabled())
KAMEZAWA Hiroyuki94b6da52008-10-22 14:15:05 -0700288 return;
289
Lai Jiangshan31aaea42012-12-12 13:51:27 -0800290 for_each_node_state(nid, N_MEMORY) {
KAMEZAWA Hiroyuki37573e82011-06-15 15:08:42 -0700291 unsigned long start_pfn, end_pfn;
292
293 start_pfn = node_start_pfn(nid);
294 end_pfn = node_end_pfn(nid);
295 /*
296 * start_pfn and end_pfn may not be aligned to SECTION and the
297 * page->flags of out of node pages are not initialized. So we
298 * scan [start_pfn, the biggest section's pfn < end_pfn) here.
299 */
300 for (pfn = start_pfn;
301 pfn < end_pfn;
302 pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) {
303
304 if (!pfn_valid(pfn))
305 continue;
306 /*
307 * Nodes's pfns can be overlapping.
308 * We know some arch can have a nodes layout such as
309 * -------------pfn-------------->
310 * N0 | N1 | N2 | N0 | N1 | N2|....
311 */
312 if (pfn_to_nid(pfn) != nid)
313 continue;
314 if (init_section_page_cgroup(pfn, nid))
315 goto oom;
316 }
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700317 }
KAMEZAWA Hiroyuki37573e82011-06-15 15:08:42 -0700318 hotplug_memory_notifier(page_cgroup_callback, 0);
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700319 printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage);
KAMEZAWA Hiroyuki37573e82011-06-15 15:08:42 -0700320 printk(KERN_INFO "please try 'cgroup_disable=memory' option if you "
321 "don't want memory cgroups\n");
322 return;
323oom:
324 printk(KERN_CRIT "try 'cgroup_disable=memory' boot option\n");
325 panic("Out of memory");
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700326}
327
Al Viro31168482008-11-22 17:33:24 +0000328void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700329{
330 return;
331}
332
333#endif
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800334
335
Andrew Mortonc255a452012-07-31 16:43:02 -0700336#ifdef CONFIG_MEMCG_SWAP
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800337
338static DEFINE_MUTEX(swap_cgroup_mutex);
339struct swap_cgroup_ctrl {
340 struct page **map;
341 unsigned long length;
KAMEZAWA Hiroyukie9e58a42010-03-15 00:34:57 -0400342 spinlock_t lock;
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800343};
344
H Hartley Sweeten61600f52011-11-02 13:38:36 -0700345static struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES];
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800346
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800347struct swap_cgroup {
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -0700348 unsigned short id;
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800349};
350#define SC_PER_PAGE (PAGE_SIZE/sizeof(struct swap_cgroup))
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800351
352/*
353 * SwapCgroup implements "lookup" and "exchange" operations.
354 * In typical usage, this swap_cgroup is accessed via memcg's charge/uncharge
355 * against SwapCache. At swap_free(), this is accessed directly from swap.
356 *
357 * This means,
358 * - we have no race in "exchange" when we're accessed via SwapCache because
359 * SwapCache(and its swp_entry) is under lock.
360 * - When called via swap_free(), there is no user of this entry and no race.
361 * Then, we don't need lock around "exchange".
362 *
363 * TODO: we can push these buffers out to HIGHMEM.
364 */
365
366/*
367 * allocate buffer for swap_cgroup.
368 */
369static int swap_cgroup_prepare(int type)
370{
371 struct page *page;
372 struct swap_cgroup_ctrl *ctrl;
373 unsigned long idx, max;
374
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800375 ctrl = &swap_cgroup_ctrl[type];
376
377 for (idx = 0; idx < ctrl->length; idx++) {
378 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
379 if (!page)
380 goto not_enough_page;
381 ctrl->map[idx] = page;
382 }
383 return 0;
384not_enough_page:
385 max = idx;
386 for (idx = 0; idx < max; idx++)
387 __free_page(ctrl->map[idx]);
388
389 return -ENOMEM;
390}
391
Bob Liu9fb4b7c2012-01-12 17:18:48 -0800392static struct swap_cgroup *lookup_swap_cgroup(swp_entry_t ent,
393 struct swap_cgroup_ctrl **ctrlp)
394{
395 pgoff_t offset = swp_offset(ent);
396 struct swap_cgroup_ctrl *ctrl;
397 struct page *mappage;
Hugh Dickinsc09ff082012-03-05 20:52:55 -0800398 struct swap_cgroup *sc;
Bob Liu9fb4b7c2012-01-12 17:18:48 -0800399
400 ctrl = &swap_cgroup_ctrl[swp_type(ent)];
401 if (ctrlp)
402 *ctrlp = ctrl;
403
404 mappage = ctrl->map[offset / SC_PER_PAGE];
Hugh Dickinsc09ff082012-03-05 20:52:55 -0800405 sc = page_address(mappage);
406 return sc + offset % SC_PER_PAGE;
Bob Liu9fb4b7c2012-01-12 17:18:48 -0800407}
408
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800409/**
Daisuke Nishimura02491442010-03-10 15:22:17 -0800410 * swap_cgroup_cmpxchg - cmpxchg mem_cgroup's id for this swp_entry.
Wanpeng Lidad75572012-06-20 12:53:01 -0700411 * @ent: swap entry to be cmpxchged
Daisuke Nishimura02491442010-03-10 15:22:17 -0800412 * @old: old id
413 * @new: new id
414 *
415 * Returns old id at success, 0 at failure.
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300416 * (There is no mem_cgroup using 0 as its id)
Daisuke Nishimura02491442010-03-10 15:22:17 -0800417 */
418unsigned short swap_cgroup_cmpxchg(swp_entry_t ent,
419 unsigned short old, unsigned short new)
420{
Daisuke Nishimura02491442010-03-10 15:22:17 -0800421 struct swap_cgroup_ctrl *ctrl;
Daisuke Nishimura02491442010-03-10 15:22:17 -0800422 struct swap_cgroup *sc;
KAMEZAWA Hiroyukie9e58a42010-03-15 00:34:57 -0400423 unsigned long flags;
424 unsigned short retval;
Daisuke Nishimura02491442010-03-10 15:22:17 -0800425
Bob Liu9fb4b7c2012-01-12 17:18:48 -0800426 sc = lookup_swap_cgroup(ent, &ctrl);
Daisuke Nishimura02491442010-03-10 15:22:17 -0800427
KAMEZAWA Hiroyukie9e58a42010-03-15 00:34:57 -0400428 spin_lock_irqsave(&ctrl->lock, flags);
429 retval = sc->id;
430 if (retval == old)
431 sc->id = new;
Daisuke Nishimura02491442010-03-10 15:22:17 -0800432 else
KAMEZAWA Hiroyukie9e58a42010-03-15 00:34:57 -0400433 retval = 0;
434 spin_unlock_irqrestore(&ctrl->lock, flags);
435 return retval;
Daisuke Nishimura02491442010-03-10 15:22:17 -0800436}
437
438/**
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800439 * swap_cgroup_record - record mem_cgroup for this swp_entry.
440 * @ent: swap entry to be recorded into
Wanpeng Lidad75572012-06-20 12:53:01 -0700441 * @id: mem_cgroup to be recorded
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800442 *
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -0700443 * Returns old value at success, 0 at failure.
444 * (Of course, old value can be 0.)
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800445 */
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -0700446unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id)
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800447{
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800448 struct swap_cgroup_ctrl *ctrl;
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800449 struct swap_cgroup *sc;
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -0700450 unsigned short old;
KAMEZAWA Hiroyukie9e58a42010-03-15 00:34:57 -0400451 unsigned long flags;
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800452
Bob Liu9fb4b7c2012-01-12 17:18:48 -0800453 sc = lookup_swap_cgroup(ent, &ctrl);
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800454
KAMEZAWA Hiroyukie9e58a42010-03-15 00:34:57 -0400455 spin_lock_irqsave(&ctrl->lock, flags);
456 old = sc->id;
457 sc->id = id;
458 spin_unlock_irqrestore(&ctrl->lock, flags);
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800459
460 return old;
461}
462
463/**
Bob Liu9fb4b7c2012-01-12 17:18:48 -0800464 * lookup_swap_cgroup_id - lookup mem_cgroup id tied to swap entry
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800465 * @ent: swap entry to be looked up.
466 *
Hugh Dickinsb3ff8a22014-01-12 20:23:27 -0800467 * Returns ID of mem_cgroup at success. 0 at failure. (0 is invalid ID)
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800468 */
Bob Liu9fb4b7c2012-01-12 17:18:48 -0800469unsigned short lookup_swap_cgroup_id(swp_entry_t ent)
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800470{
Bob Liu9fb4b7c2012-01-12 17:18:48 -0800471 return lookup_swap_cgroup(ent, NULL)->id;
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800472}
473
474int swap_cgroup_swapon(int type, unsigned long max_pages)
475{
476 void *array;
477 unsigned long array_size;
478 unsigned long length;
479 struct swap_cgroup_ctrl *ctrl;
480
481 if (!do_swap_account)
482 return 0;
483
Namhyung Kim33278f72011-05-26 16:25:30 -0700484 length = DIV_ROUND_UP(max_pages, SC_PER_PAGE);
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800485 array_size = length * sizeof(void *);
486
Joe Perches8c1fec12011-05-28 10:36:34 -0700487 array = vzalloc(array_size);
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800488 if (!array)
489 goto nomem;
490
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800491 ctrl = &swap_cgroup_ctrl[type];
492 mutex_lock(&swap_cgroup_mutex);
493 ctrl->length = length;
494 ctrl->map = array;
KAMEZAWA Hiroyukie9e58a42010-03-15 00:34:57 -0400495 spin_lock_init(&ctrl->lock);
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800496 if (swap_cgroup_prepare(type)) {
497 /* memory shortage */
498 ctrl->map = NULL;
499 ctrl->length = 0;
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800500 mutex_unlock(&swap_cgroup_mutex);
Namhyung Kim6a5b18d2011-05-26 16:25:31 -0700501 vfree(array);
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800502 goto nomem;
503 }
504 mutex_unlock(&swap_cgroup_mutex);
505
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800506 return 0;
507nomem:
508 printk(KERN_INFO "couldn't allocate enough memory for swap_cgroup.\n");
509 printk(KERN_INFO
WANG Cong00a66d22011-07-25 17:12:12 -0700510 "swap_cgroup can be disabled by swapaccount=0 boot option\n");
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800511 return -ENOMEM;
512}
513
514void swap_cgroup_swapoff(int type)
515{
Namhyung Kim6a5b18d2011-05-26 16:25:31 -0700516 struct page **map;
517 unsigned long i, length;
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800518 struct swap_cgroup_ctrl *ctrl;
519
520 if (!do_swap_account)
521 return;
522
523 mutex_lock(&swap_cgroup_mutex);
524 ctrl = &swap_cgroup_ctrl[type];
Namhyung Kim6a5b18d2011-05-26 16:25:31 -0700525 map = ctrl->map;
526 length = ctrl->length;
527 ctrl->map = NULL;
528 ctrl->length = 0;
529 mutex_unlock(&swap_cgroup_mutex);
530
531 if (map) {
532 for (i = 0; i < length; i++) {
533 struct page *page = map[i];
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800534 if (page)
535 __free_page(page);
536 }
Namhyung Kim6a5b18d2011-05-26 16:25:31 -0700537 vfree(map);
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800538 }
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800539}
540
541#endif