KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 1 | #include <linux/mm.h> |
| 2 | #include <linux/mmzone.h> |
| 3 | #include <linux/bootmem.h> |
| 4 | #include <linux/bit_spinlock.h> |
| 5 | #include <linux/page_cgroup.h> |
| 6 | #include <linux/hash.h> |
KAMEZAWA Hiroyuki | 94b6da5 | 2008-10-22 14:15:05 -0700 | [diff] [blame] | 7 | #include <linux/slab.h> |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 8 | #include <linux/memory.h> |
Paul Mundt | 4c821042 | 2008-10-22 14:14:58 -0700 | [diff] [blame] | 9 | #include <linux/vmalloc.h> |
KAMEZAWA Hiroyuki | 94b6da5 | 2008-10-22 14:15:05 -0700 | [diff] [blame] | 10 | #include <linux/cgroup.h> |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 11 | #include <linux/swapops.h> |
Catalin Marinas | 7952f98 | 2010-07-19 11:54:14 +0100 | [diff] [blame] | 12 | #include <linux/kmemleak.h> |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 13 | |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 14 | static unsigned long total_usage; |
| 15 | |
Thomas Gleixner | 5629197 | 2009-08-19 09:56:42 +0200 | [diff] [blame] | 16 | static void page_cgroup_lock_init(struct page_cgroup *pc, int nr_pages) |
| 17 | { |
| 18 | #ifdef CONFIG_PREEMPT_RT_BASE |
| 19 | for (; nr_pages; nr_pages--, pc++) |
| 20 | spin_lock_init(&pc->pcg_lock); |
| 21 | #endif |
| 22 | } |
| 23 | |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 24 | #if !defined(CONFIG_SPARSEMEM) |
| 25 | |
| 26 | |
Al Viro | 3116848 | 2008-11-22 17:33:24 +0000 | [diff] [blame] | 27 | void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat) |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 28 | { |
| 29 | pgdat->node_page_cgroup = NULL; |
| 30 | } |
| 31 | |
| 32 | struct page_cgroup *lookup_page_cgroup(struct page *page) |
| 33 | { |
| 34 | unsigned long pfn = page_to_pfn(page); |
| 35 | unsigned long offset; |
| 36 | struct page_cgroup *base; |
| 37 | |
| 38 | base = NODE_DATA(page_to_nid(page))->node_page_cgroup; |
Johannes Weiner | 00c54c0 | 2012-01-12 17:18:40 -0800 | [diff] [blame] | 39 | #ifdef CONFIG_DEBUG_VM |
| 40 | /* |
| 41 | * The sanity checks the page allocator does upon freeing a |
| 42 | * page can reach here before the page_cgroup arrays are |
| 43 | * allocated when feeding a range of pages to the allocator |
| 44 | * for the first time during bootup or memory hotplug. |
| 45 | */ |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 46 | if (unlikely(!base)) |
| 47 | return NULL; |
Johannes Weiner | 00c54c0 | 2012-01-12 17:18:40 -0800 | [diff] [blame] | 48 | #endif |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 49 | offset = pfn - NODE_DATA(page_to_nid(page))->node_start_pfn; |
| 50 | return base + offset; |
| 51 | } |
| 52 | |
| 53 | static int __init alloc_node_page_cgroup(int nid) |
| 54 | { |
Johannes Weiner | 6b208e3 | 2012-01-12 17:18:18 -0800 | [diff] [blame] | 55 | struct page_cgroup *base; |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 56 | unsigned long table_size; |
Johannes Weiner | 6b208e3 | 2012-01-12 17:18:18 -0800 | [diff] [blame] | 57 | unsigned long nr_pages; |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 58 | |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 59 | nr_pages = NODE_DATA(nid)->node_spanned_pages; |
KAMEZAWA Hiroyuki | 653d22c | 2008-12-09 13:14:20 -0800 | [diff] [blame] | 60 | if (!nr_pages) |
| 61 | return 0; |
| 62 | |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 63 | table_size = sizeof(struct page_cgroup) * nr_pages; |
KAMEZAWA Hiroyuki | ca371c0 | 2009-06-12 10:33:53 +0300 | [diff] [blame] | 64 | |
Grygorii Strashko | 0d036e9 | 2014-01-21 15:50:38 -0800 | [diff] [blame] | 65 | base = memblock_virt_alloc_try_nid_nopanic( |
| 66 | table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS), |
| 67 | BOOTMEM_ALLOC_ACCESSIBLE, nid); |
KAMEZAWA Hiroyuki | ca371c0 | 2009-06-12 10:33:53 +0300 | [diff] [blame] | 68 | if (!base) |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 69 | return -ENOMEM; |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 70 | NODE_DATA(nid)->node_page_cgroup = base; |
| 71 | total_usage += table_size; |
Thomas Gleixner | 5629197 | 2009-08-19 09:56:42 +0200 | [diff] [blame] | 72 | page_cgroup_lock_init(base, nr_pages); |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 73 | return 0; |
| 74 | } |
| 75 | |
KAMEZAWA Hiroyuki | ca371c0 | 2009-06-12 10:33:53 +0300 | [diff] [blame] | 76 | void __init page_cgroup_init_flatmem(void) |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 77 | { |
| 78 | |
| 79 | int nid, fail; |
| 80 | |
Hirokazu Takahashi | f8d66542 | 2009-01-07 18:08:02 -0800 | [diff] [blame] | 81 | if (mem_cgroup_disabled()) |
KAMEZAWA Hiroyuki | 94b6da5 | 2008-10-22 14:15:05 -0700 | [diff] [blame] | 82 | return; |
| 83 | |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 84 | for_each_online_node(nid) { |
| 85 | fail = alloc_node_page_cgroup(nid); |
| 86 | if (fail) |
| 87 | goto fail; |
| 88 | } |
| 89 | printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage); |
Randy Dunlap | 8ca739e | 2009-06-17 16:26:32 -0700 | [diff] [blame] | 90 | printk(KERN_INFO "please try 'cgroup_disable=memory' option if you" |
| 91 | " don't want memory cgroups\n"); |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 92 | return; |
| 93 | fail: |
Randy Dunlap | 8ca739e | 2009-06-17 16:26:32 -0700 | [diff] [blame] | 94 | printk(KERN_CRIT "allocation of page_cgroup failed.\n"); |
| 95 | printk(KERN_CRIT "please try 'cgroup_disable=memory' boot option\n"); |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 96 | panic("Out of memory"); |
| 97 | } |
| 98 | |
| 99 | #else /* CONFIG_FLAT_NODE_MEM_MAP */ |
| 100 | |
| 101 | struct page_cgroup *lookup_page_cgroup(struct page *page) |
| 102 | { |
| 103 | unsigned long pfn = page_to_pfn(page); |
| 104 | struct mem_section *section = __pfn_to_section(pfn); |
Johannes Weiner | 00c54c0 | 2012-01-12 17:18:40 -0800 | [diff] [blame] | 105 | #ifdef CONFIG_DEBUG_VM |
| 106 | /* |
| 107 | * The sanity checks the page allocator does upon freeing a |
| 108 | * page can reach here before the page_cgroup arrays are |
| 109 | * allocated when feeding a range of pages to the allocator |
| 110 | * for the first time during bootup or memory hotplug. |
| 111 | */ |
Balbir Singh | d69b042 | 2009-06-17 16:26:34 -0700 | [diff] [blame] | 112 | if (!section->page_cgroup) |
| 113 | return NULL; |
Johannes Weiner | 00c54c0 | 2012-01-12 17:18:40 -0800 | [diff] [blame] | 114 | #endif |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 115 | return section->page_cgroup + pfn; |
| 116 | } |
| 117 | |
Namhyung Kim | 268433b | 2011-05-26 16:25:29 -0700 | [diff] [blame] | 118 | static void *__meminit alloc_page_cgroup(size_t size, int nid) |
Michal Hocko | dde79e0 | 2011-03-23 16:42:40 -0700 | [diff] [blame] | 119 | { |
Johannes Weiner | 6b208e3 | 2012-01-12 17:18:18 -0800 | [diff] [blame] | 120 | gfp_t flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN; |
Michal Hocko | dde79e0 | 2011-03-23 16:42:40 -0700 | [diff] [blame] | 121 | void *addr = NULL; |
| 122 | |
Steven Rostedt | ff7ee93 | 2011-11-02 13:38:11 -0700 | [diff] [blame] | 123 | addr = alloc_pages_exact_nid(nid, size, flags); |
| 124 | if (addr) { |
| 125 | kmemleak_alloc(addr, size, 1, flags); |
Michal Hocko | dde79e0 | 2011-03-23 16:42:40 -0700 | [diff] [blame] | 126 | return addr; |
Steven Rostedt | ff7ee93 | 2011-11-02 13:38:11 -0700 | [diff] [blame] | 127 | } |
Michal Hocko | dde79e0 | 2011-03-23 16:42:40 -0700 | [diff] [blame] | 128 | |
| 129 | if (node_state(nid, N_HIGH_MEMORY)) |
Johannes Weiner | 6b208e3 | 2012-01-12 17:18:18 -0800 | [diff] [blame] | 130 | addr = vzalloc_node(size, nid); |
Michal Hocko | dde79e0 | 2011-03-23 16:42:40 -0700 | [diff] [blame] | 131 | else |
Johannes Weiner | 6b208e3 | 2012-01-12 17:18:18 -0800 | [diff] [blame] | 132 | addr = vzalloc(size); |
Michal Hocko | dde79e0 | 2011-03-23 16:42:40 -0700 | [diff] [blame] | 133 | |
| 134 | return addr; |
| 135 | } |
| 136 | |
KAMEZAWA Hiroyuki | 37573e8 | 2011-06-15 15:08:42 -0700 | [diff] [blame] | 137 | static int __meminit init_section_page_cgroup(unsigned long pfn, int nid) |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 138 | { |
Johannes Weiner | 6b3ae58 | 2011-03-23 16:42:30 -0700 | [diff] [blame] | 139 | struct mem_section *section; |
Johannes Weiner | 6b208e3 | 2012-01-12 17:18:18 -0800 | [diff] [blame] | 140 | struct page_cgroup *base; |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 141 | unsigned long table_size; |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 142 | |
Johannes Weiner | 6b208e3 | 2012-01-12 17:18:18 -0800 | [diff] [blame] | 143 | section = __pfn_to_section(pfn); |
Johannes Weiner | 6b3ae58 | 2011-03-23 16:42:30 -0700 | [diff] [blame] | 144 | |
| 145 | if (section->page_cgroup) |
| 146 | return 0; |
| 147 | |
Johannes Weiner | 6b3ae58 | 2011-03-23 16:42:30 -0700 | [diff] [blame] | 148 | table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION; |
Michal Hocko | dde79e0 | 2011-03-23 16:42:40 -0700 | [diff] [blame] | 149 | base = alloc_page_cgroup(table_size, nid); |
| 150 | |
Johannes Weiner | 6b3ae58 | 2011-03-23 16:42:30 -0700 | [diff] [blame] | 151 | /* |
| 152 | * The value stored in section->page_cgroup is (base - pfn) |
| 153 | * and it does not point to the memory block allocated above, |
| 154 | * causing kmemleak false positives. |
| 155 | */ |
| 156 | kmemleak_not_leak(base); |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 157 | |
| 158 | if (!base) { |
| 159 | printk(KERN_ERR "page cgroup allocation failure\n"); |
| 160 | return -ENOMEM; |
| 161 | } |
| 162 | |
Thomas Gleixner | 5629197 | 2009-08-19 09:56:42 +0200 | [diff] [blame] | 163 | page_cgroup_lock_init(base, PAGES_PER_SECTION); |
| 164 | |
KAMEZAWA Hiroyuki | 37573e8 | 2011-06-15 15:08:42 -0700 | [diff] [blame] | 165 | /* |
| 166 | * The passed "pfn" may not be aligned to SECTION. For the calculation |
| 167 | * we need to apply a mask. |
| 168 | */ |
| 169 | pfn &= PAGE_SECTION_MASK; |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 170 | section->page_cgroup = base - pfn; |
| 171 | total_usage += table_size; |
| 172 | return 0; |
| 173 | } |
| 174 | #ifdef CONFIG_MEMORY_HOTPLUG |
Bob Liu | 0efc8eb | 2012-01-12 17:19:08 -0800 | [diff] [blame] | 175 | static void free_page_cgroup(void *addr) |
| 176 | { |
| 177 | if (is_vmalloc_addr(addr)) { |
| 178 | vfree(addr); |
| 179 | } else { |
| 180 | struct page *page = virt_to_page(addr); |
| 181 | size_t table_size = |
| 182 | sizeof(struct page_cgroup) * PAGES_PER_SECTION; |
| 183 | |
| 184 | BUG_ON(PageReserved(page)); |
Wang Nan | 3716dc8 | 2014-10-29 14:50:18 -0700 | [diff] [blame] | 185 | kmemleak_free(addr); |
Bob Liu | 0efc8eb | 2012-01-12 17:19:08 -0800 | [diff] [blame] | 186 | free_pages_exact(addr, table_size); |
| 187 | } |
| 188 | } |
| 189 | |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 190 | void __free_page_cgroup(unsigned long pfn) |
| 191 | { |
| 192 | struct mem_section *ms; |
| 193 | struct page_cgroup *base; |
| 194 | |
| 195 | ms = __pfn_to_section(pfn); |
| 196 | if (!ms || !ms->page_cgroup) |
| 197 | return; |
| 198 | base = ms->page_cgroup + pfn; |
Michal Hocko | dde79e0 | 2011-03-23 16:42:40 -0700 | [diff] [blame] | 199 | free_page_cgroup(base); |
| 200 | ms->page_cgroup = NULL; |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 201 | } |
| 202 | |
Al Viro | 3116848 | 2008-11-22 17:33:24 +0000 | [diff] [blame] | 203 | int __meminit online_page_cgroup(unsigned long start_pfn, |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 204 | unsigned long nr_pages, |
| 205 | int nid) |
| 206 | { |
| 207 | unsigned long start, end, pfn; |
| 208 | int fail = 0; |
| 209 | |
Daniel Kiper | 1bb36fb | 2011-07-25 17:12:13 -0700 | [diff] [blame] | 210 | start = SECTION_ALIGN_DOWN(start_pfn); |
| 211 | end = SECTION_ALIGN_UP(start_pfn + nr_pages); |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 212 | |
KAMEZAWA Hiroyuki | 37573e8 | 2011-06-15 15:08:42 -0700 | [diff] [blame] | 213 | if (nid == -1) { |
| 214 | /* |
| 215 | * In this case, "nid" already exists and contains valid memory. |
| 216 | * "start_pfn" passed to us is a pfn which is an arg for |
| 217 | * online__pages(), and start_pfn should exist. |
| 218 | */ |
| 219 | nid = pfn_to_nid(start_pfn); |
| 220 | VM_BUG_ON(!node_state(nid, N_ONLINE)); |
| 221 | } |
| 222 | |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 223 | for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) { |
| 224 | if (!pfn_present(pfn)) |
| 225 | continue; |
KAMEZAWA Hiroyuki | 37573e8 | 2011-06-15 15:08:42 -0700 | [diff] [blame] | 226 | fail = init_section_page_cgroup(pfn, nid); |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 227 | } |
| 228 | if (!fail) |
| 229 | return 0; |
| 230 | |
| 231 | /* rollback */ |
| 232 | for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) |
| 233 | __free_page_cgroup(pfn); |
| 234 | |
| 235 | return -ENOMEM; |
| 236 | } |
| 237 | |
Al Viro | 3116848 | 2008-11-22 17:33:24 +0000 | [diff] [blame] | 238 | int __meminit offline_page_cgroup(unsigned long start_pfn, |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 239 | unsigned long nr_pages, int nid) |
| 240 | { |
| 241 | unsigned long start, end, pfn; |
| 242 | |
Daniel Kiper | 1bb36fb | 2011-07-25 17:12:13 -0700 | [diff] [blame] | 243 | start = SECTION_ALIGN_DOWN(start_pfn); |
| 244 | end = SECTION_ALIGN_UP(start_pfn + nr_pages); |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 245 | |
| 246 | for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) |
| 247 | __free_page_cgroup(pfn); |
| 248 | return 0; |
| 249 | |
| 250 | } |
| 251 | |
Al Viro | 3116848 | 2008-11-22 17:33:24 +0000 | [diff] [blame] | 252 | static int __meminit page_cgroup_callback(struct notifier_block *self, |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 253 | unsigned long action, void *arg) |
| 254 | { |
| 255 | struct memory_notify *mn = arg; |
| 256 | int ret = 0; |
| 257 | switch (action) { |
| 258 | case MEM_GOING_ONLINE: |
| 259 | ret = online_page_cgroup(mn->start_pfn, |
| 260 | mn->nr_pages, mn->status_change_nid); |
| 261 | break; |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 262 | case MEM_OFFLINE: |
| 263 | offline_page_cgroup(mn->start_pfn, |
| 264 | mn->nr_pages, mn->status_change_nid); |
| 265 | break; |
KAMEZAWA Hiroyuki | dc19f9d | 2008-12-01 13:13:48 -0800 | [diff] [blame] | 266 | case MEM_CANCEL_ONLINE: |
Wen Congyang | 7c72eb3 | 2012-12-11 16:00:49 -0800 | [diff] [blame] | 267 | offline_page_cgroup(mn->start_pfn, |
| 268 | mn->nr_pages, mn->status_change_nid); |
| 269 | break; |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 270 | case MEM_GOING_OFFLINE: |
| 271 | break; |
| 272 | case MEM_ONLINE: |
| 273 | case MEM_CANCEL_OFFLINE: |
| 274 | break; |
| 275 | } |
KAMEZAWA Hiroyuki | dc19f9d | 2008-12-01 13:13:48 -0800 | [diff] [blame] | 276 | |
Prarit Bhargava | 5fda1bd | 2011-03-22 16:30:49 -0700 | [diff] [blame] | 277 | return notifier_from_errno(ret); |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 278 | } |
| 279 | |
| 280 | #endif |
| 281 | |
| 282 | void __init page_cgroup_init(void) |
| 283 | { |
| 284 | unsigned long pfn; |
KAMEZAWA Hiroyuki | 37573e8 | 2011-06-15 15:08:42 -0700 | [diff] [blame] | 285 | int nid; |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 286 | |
Hirokazu Takahashi | f8d66542 | 2009-01-07 18:08:02 -0800 | [diff] [blame] | 287 | if (mem_cgroup_disabled()) |
KAMEZAWA Hiroyuki | 94b6da5 | 2008-10-22 14:15:05 -0700 | [diff] [blame] | 288 | return; |
| 289 | |
Lai Jiangshan | 31aaea4 | 2012-12-12 13:51:27 -0800 | [diff] [blame] | 290 | for_each_node_state(nid, N_MEMORY) { |
KAMEZAWA Hiroyuki | 37573e8 | 2011-06-15 15:08:42 -0700 | [diff] [blame] | 291 | unsigned long start_pfn, end_pfn; |
| 292 | |
| 293 | start_pfn = node_start_pfn(nid); |
| 294 | end_pfn = node_end_pfn(nid); |
| 295 | /* |
| 296 | * start_pfn and end_pfn may not be aligned to SECTION and the |
| 297 | * page->flags of out of node pages are not initialized. So we |
| 298 | * scan [start_pfn, the biggest section's pfn < end_pfn) here. |
| 299 | */ |
| 300 | for (pfn = start_pfn; |
| 301 | pfn < end_pfn; |
| 302 | pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) { |
| 303 | |
| 304 | if (!pfn_valid(pfn)) |
| 305 | continue; |
| 306 | /* |
| 307 | * Nodes's pfns can be overlapping. |
| 308 | * We know some arch can have a nodes layout such as |
| 309 | * -------------pfn--------------> |
| 310 | * N0 | N1 | N2 | N0 | N1 | N2|.... |
| 311 | */ |
| 312 | if (pfn_to_nid(pfn) != nid) |
| 313 | continue; |
| 314 | if (init_section_page_cgroup(pfn, nid)) |
| 315 | goto oom; |
| 316 | } |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 317 | } |
KAMEZAWA Hiroyuki | 37573e8 | 2011-06-15 15:08:42 -0700 | [diff] [blame] | 318 | hotplug_memory_notifier(page_cgroup_callback, 0); |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 319 | printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage); |
KAMEZAWA Hiroyuki | 37573e8 | 2011-06-15 15:08:42 -0700 | [diff] [blame] | 320 | printk(KERN_INFO "please try 'cgroup_disable=memory' option if you " |
| 321 | "don't want memory cgroups\n"); |
| 322 | return; |
| 323 | oom: |
| 324 | printk(KERN_CRIT "try 'cgroup_disable=memory' boot option\n"); |
| 325 | panic("Out of memory"); |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 326 | } |
| 327 | |
Al Viro | 3116848 | 2008-11-22 17:33:24 +0000 | [diff] [blame] | 328 | void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat) |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 329 | { |
| 330 | return; |
| 331 | } |
| 332 | |
| 333 | #endif |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 334 | |
| 335 | |
Andrew Morton | c255a45 | 2012-07-31 16:43:02 -0700 | [diff] [blame] | 336 | #ifdef CONFIG_MEMCG_SWAP |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 337 | |
| 338 | static DEFINE_MUTEX(swap_cgroup_mutex); |
| 339 | struct swap_cgroup_ctrl { |
| 340 | struct page **map; |
| 341 | unsigned long length; |
KAMEZAWA Hiroyuki | e9e58a4 | 2010-03-15 00:34:57 -0400 | [diff] [blame] | 342 | spinlock_t lock; |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 343 | }; |
| 344 | |
H Hartley Sweeten | 61600f5 | 2011-11-02 13:38:36 -0700 | [diff] [blame] | 345 | static struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES]; |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 346 | |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 347 | struct swap_cgroup { |
KAMEZAWA Hiroyuki | a3b2d69 | 2009-04-02 16:57:45 -0700 | [diff] [blame] | 348 | unsigned short id; |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 349 | }; |
| 350 | #define SC_PER_PAGE (PAGE_SIZE/sizeof(struct swap_cgroup)) |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 351 | |
| 352 | /* |
| 353 | * SwapCgroup implements "lookup" and "exchange" operations. |
| 354 | * In typical usage, this swap_cgroup is accessed via memcg's charge/uncharge |
| 355 | * against SwapCache. At swap_free(), this is accessed directly from swap. |
| 356 | * |
| 357 | * This means, |
| 358 | * - we have no race in "exchange" when we're accessed via SwapCache because |
| 359 | * SwapCache(and its swp_entry) is under lock. |
| 360 | * - When called via swap_free(), there is no user of this entry and no race. |
| 361 | * Then, we don't need lock around "exchange". |
| 362 | * |
| 363 | * TODO: we can push these buffers out to HIGHMEM. |
| 364 | */ |
| 365 | |
| 366 | /* |
| 367 | * allocate buffer for swap_cgroup. |
| 368 | */ |
| 369 | static int swap_cgroup_prepare(int type) |
| 370 | { |
| 371 | struct page *page; |
| 372 | struct swap_cgroup_ctrl *ctrl; |
| 373 | unsigned long idx, max; |
| 374 | |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 375 | ctrl = &swap_cgroup_ctrl[type]; |
| 376 | |
| 377 | for (idx = 0; idx < ctrl->length; idx++) { |
| 378 | page = alloc_page(GFP_KERNEL | __GFP_ZERO); |
| 379 | if (!page) |
| 380 | goto not_enough_page; |
| 381 | ctrl->map[idx] = page; |
| 382 | } |
| 383 | return 0; |
| 384 | not_enough_page: |
| 385 | max = idx; |
| 386 | for (idx = 0; idx < max; idx++) |
| 387 | __free_page(ctrl->map[idx]); |
| 388 | |
| 389 | return -ENOMEM; |
| 390 | } |
| 391 | |
Bob Liu | 9fb4b7c | 2012-01-12 17:18:48 -0800 | [diff] [blame] | 392 | static struct swap_cgroup *lookup_swap_cgroup(swp_entry_t ent, |
| 393 | struct swap_cgroup_ctrl **ctrlp) |
| 394 | { |
| 395 | pgoff_t offset = swp_offset(ent); |
| 396 | struct swap_cgroup_ctrl *ctrl; |
| 397 | struct page *mappage; |
Hugh Dickins | c09ff08 | 2012-03-05 20:52:55 -0800 | [diff] [blame] | 398 | struct swap_cgroup *sc; |
Bob Liu | 9fb4b7c | 2012-01-12 17:18:48 -0800 | [diff] [blame] | 399 | |
| 400 | ctrl = &swap_cgroup_ctrl[swp_type(ent)]; |
| 401 | if (ctrlp) |
| 402 | *ctrlp = ctrl; |
| 403 | |
| 404 | mappage = ctrl->map[offset / SC_PER_PAGE]; |
Hugh Dickins | c09ff08 | 2012-03-05 20:52:55 -0800 | [diff] [blame] | 405 | sc = page_address(mappage); |
| 406 | return sc + offset % SC_PER_PAGE; |
Bob Liu | 9fb4b7c | 2012-01-12 17:18:48 -0800 | [diff] [blame] | 407 | } |
| 408 | |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 409 | /** |
Daisuke Nishimura | 0249144 | 2010-03-10 15:22:17 -0800 | [diff] [blame] | 410 | * swap_cgroup_cmpxchg - cmpxchg mem_cgroup's id for this swp_entry. |
Wanpeng Li | dad7557 | 2012-06-20 12:53:01 -0700 | [diff] [blame] | 411 | * @ent: swap entry to be cmpxchged |
Daisuke Nishimura | 0249144 | 2010-03-10 15:22:17 -0800 | [diff] [blame] | 412 | * @old: old id |
| 413 | * @new: new id |
| 414 | * |
| 415 | * Returns old id at success, 0 at failure. |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 416 | * (There is no mem_cgroup using 0 as its id) |
Daisuke Nishimura | 0249144 | 2010-03-10 15:22:17 -0800 | [diff] [blame] | 417 | */ |
| 418 | unsigned short swap_cgroup_cmpxchg(swp_entry_t ent, |
| 419 | unsigned short old, unsigned short new) |
| 420 | { |
Daisuke Nishimura | 0249144 | 2010-03-10 15:22:17 -0800 | [diff] [blame] | 421 | struct swap_cgroup_ctrl *ctrl; |
Daisuke Nishimura | 0249144 | 2010-03-10 15:22:17 -0800 | [diff] [blame] | 422 | struct swap_cgroup *sc; |
KAMEZAWA Hiroyuki | e9e58a4 | 2010-03-15 00:34:57 -0400 | [diff] [blame] | 423 | unsigned long flags; |
| 424 | unsigned short retval; |
Daisuke Nishimura | 0249144 | 2010-03-10 15:22:17 -0800 | [diff] [blame] | 425 | |
Bob Liu | 9fb4b7c | 2012-01-12 17:18:48 -0800 | [diff] [blame] | 426 | sc = lookup_swap_cgroup(ent, &ctrl); |
Daisuke Nishimura | 0249144 | 2010-03-10 15:22:17 -0800 | [diff] [blame] | 427 | |
KAMEZAWA Hiroyuki | e9e58a4 | 2010-03-15 00:34:57 -0400 | [diff] [blame] | 428 | spin_lock_irqsave(&ctrl->lock, flags); |
| 429 | retval = sc->id; |
| 430 | if (retval == old) |
| 431 | sc->id = new; |
Daisuke Nishimura | 0249144 | 2010-03-10 15:22:17 -0800 | [diff] [blame] | 432 | else |
KAMEZAWA Hiroyuki | e9e58a4 | 2010-03-15 00:34:57 -0400 | [diff] [blame] | 433 | retval = 0; |
| 434 | spin_unlock_irqrestore(&ctrl->lock, flags); |
| 435 | return retval; |
Daisuke Nishimura | 0249144 | 2010-03-10 15:22:17 -0800 | [diff] [blame] | 436 | } |
| 437 | |
| 438 | /** |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 439 | * swap_cgroup_record - record mem_cgroup for this swp_entry. |
| 440 | * @ent: swap entry to be recorded into |
Wanpeng Li | dad7557 | 2012-06-20 12:53:01 -0700 | [diff] [blame] | 441 | * @id: mem_cgroup to be recorded |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 442 | * |
KAMEZAWA Hiroyuki | a3b2d69 | 2009-04-02 16:57:45 -0700 | [diff] [blame] | 443 | * Returns old value at success, 0 at failure. |
| 444 | * (Of course, old value can be 0.) |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 445 | */ |
KAMEZAWA Hiroyuki | a3b2d69 | 2009-04-02 16:57:45 -0700 | [diff] [blame] | 446 | unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id) |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 447 | { |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 448 | struct swap_cgroup_ctrl *ctrl; |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 449 | struct swap_cgroup *sc; |
KAMEZAWA Hiroyuki | a3b2d69 | 2009-04-02 16:57:45 -0700 | [diff] [blame] | 450 | unsigned short old; |
KAMEZAWA Hiroyuki | e9e58a4 | 2010-03-15 00:34:57 -0400 | [diff] [blame] | 451 | unsigned long flags; |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 452 | |
Bob Liu | 9fb4b7c | 2012-01-12 17:18:48 -0800 | [diff] [blame] | 453 | sc = lookup_swap_cgroup(ent, &ctrl); |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 454 | |
KAMEZAWA Hiroyuki | e9e58a4 | 2010-03-15 00:34:57 -0400 | [diff] [blame] | 455 | spin_lock_irqsave(&ctrl->lock, flags); |
| 456 | old = sc->id; |
| 457 | sc->id = id; |
| 458 | spin_unlock_irqrestore(&ctrl->lock, flags); |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 459 | |
| 460 | return old; |
| 461 | } |
| 462 | |
| 463 | /** |
Bob Liu | 9fb4b7c | 2012-01-12 17:18:48 -0800 | [diff] [blame] | 464 | * lookup_swap_cgroup_id - lookup mem_cgroup id tied to swap entry |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 465 | * @ent: swap entry to be looked up. |
| 466 | * |
Hugh Dickins | b3ff8a2 | 2014-01-12 20:23:27 -0800 | [diff] [blame] | 467 | * Returns ID of mem_cgroup at success. 0 at failure. (0 is invalid ID) |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 468 | */ |
Bob Liu | 9fb4b7c | 2012-01-12 17:18:48 -0800 | [diff] [blame] | 469 | unsigned short lookup_swap_cgroup_id(swp_entry_t ent) |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 470 | { |
Bob Liu | 9fb4b7c | 2012-01-12 17:18:48 -0800 | [diff] [blame] | 471 | return lookup_swap_cgroup(ent, NULL)->id; |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 472 | } |
| 473 | |
| 474 | int swap_cgroup_swapon(int type, unsigned long max_pages) |
| 475 | { |
| 476 | void *array; |
| 477 | unsigned long array_size; |
| 478 | unsigned long length; |
| 479 | struct swap_cgroup_ctrl *ctrl; |
| 480 | |
| 481 | if (!do_swap_account) |
| 482 | return 0; |
| 483 | |
Namhyung Kim | 33278f7 | 2011-05-26 16:25:30 -0700 | [diff] [blame] | 484 | length = DIV_ROUND_UP(max_pages, SC_PER_PAGE); |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 485 | array_size = length * sizeof(void *); |
| 486 | |
Joe Perches | 8c1fec1 | 2011-05-28 10:36:34 -0700 | [diff] [blame] | 487 | array = vzalloc(array_size); |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 488 | if (!array) |
| 489 | goto nomem; |
| 490 | |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 491 | ctrl = &swap_cgroup_ctrl[type]; |
| 492 | mutex_lock(&swap_cgroup_mutex); |
| 493 | ctrl->length = length; |
| 494 | ctrl->map = array; |
KAMEZAWA Hiroyuki | e9e58a4 | 2010-03-15 00:34:57 -0400 | [diff] [blame] | 495 | spin_lock_init(&ctrl->lock); |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 496 | if (swap_cgroup_prepare(type)) { |
| 497 | /* memory shortage */ |
| 498 | ctrl->map = NULL; |
| 499 | ctrl->length = 0; |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 500 | mutex_unlock(&swap_cgroup_mutex); |
Namhyung Kim | 6a5b18d | 2011-05-26 16:25:31 -0700 | [diff] [blame] | 501 | vfree(array); |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 502 | goto nomem; |
| 503 | } |
| 504 | mutex_unlock(&swap_cgroup_mutex); |
| 505 | |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 506 | return 0; |
| 507 | nomem: |
| 508 | printk(KERN_INFO "couldn't allocate enough memory for swap_cgroup.\n"); |
| 509 | printk(KERN_INFO |
WANG Cong | 00a66d2 | 2011-07-25 17:12:12 -0700 | [diff] [blame] | 510 | "swap_cgroup can be disabled by swapaccount=0 boot option\n"); |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 511 | return -ENOMEM; |
| 512 | } |
| 513 | |
| 514 | void swap_cgroup_swapoff(int type) |
| 515 | { |
Namhyung Kim | 6a5b18d | 2011-05-26 16:25:31 -0700 | [diff] [blame] | 516 | struct page **map; |
| 517 | unsigned long i, length; |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 518 | struct swap_cgroup_ctrl *ctrl; |
| 519 | |
| 520 | if (!do_swap_account) |
| 521 | return; |
| 522 | |
| 523 | mutex_lock(&swap_cgroup_mutex); |
| 524 | ctrl = &swap_cgroup_ctrl[type]; |
Namhyung Kim | 6a5b18d | 2011-05-26 16:25:31 -0700 | [diff] [blame] | 525 | map = ctrl->map; |
| 526 | length = ctrl->length; |
| 527 | ctrl->map = NULL; |
| 528 | ctrl->length = 0; |
| 529 | mutex_unlock(&swap_cgroup_mutex); |
| 530 | |
| 531 | if (map) { |
| 532 | for (i = 0; i < length; i++) { |
| 533 | struct page *page = map[i]; |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 534 | if (page) |
| 535 | __free_page(page); |
| 536 | } |
Namhyung Kim | 6a5b18d | 2011-05-26 16:25:31 -0700 | [diff] [blame] | 537 | vfree(map); |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 538 | } |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 539 | } |
| 540 | |
| 541 | #endif |