blob: e229e3ad615cc235e4c78752f18fcf847401d229 [file] [log] [blame]
Balbir Singh8cdea7c2008-02-07 00:13:50 -08001/* memcontrol.c - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
Pavel Emelianov78fb7462008-02-07 00:13:51 -08006 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08009 * Memory thresholds
10 * Copyright (C) 2009 Nokia Corporation
11 * Author: Kirill A. Shutemov
12 *
Glauber Costa7ae1e1d2012-12-18 14:21:56 -080013 * Kernel Memory Controller
14 * Copyright (C) 2012 Parallels Inc. and Google Inc.
15 * Authors: Glauber Costa and Suleiman Souhlal
16 *
Balbir Singh8cdea7c2008-02-07 00:13:50 -080017 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation; either version 2 of the License, or
20 * (at your option) any later version.
21 *
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
26 */
27
Johannes Weiner3e32cb22014-12-10 15:42:31 -080028#include <linux/page_counter.h>
Balbir Singh8cdea7c2008-02-07 00:13:50 -080029#include <linux/memcontrol.h>
30#include <linux/cgroup.h>
Pavel Emelianov78fb7462008-02-07 00:13:51 -080031#include <linux/mm.h>
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -080032#include <linux/hugetlb.h>
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -080033#include <linux/pagemap.h>
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -080034#include <linux/smp.h>
Balbir Singh8a9f3cc2008-02-07 00:13:53 -080035#include <linux/page-flags.h>
Balbir Singh66e17072008-02-07 00:13:56 -080036#include <linux/backing-dev.h>
Balbir Singh8a9f3cc2008-02-07 00:13:53 -080037#include <linux/bit_spinlock.h>
38#include <linux/rcupdate.h>
Balbir Singhe2224322009-04-02 16:57:39 -070039#include <linux/limits.h>
Paul Gortmakerb9e15ba2011-05-26 16:00:52 -040040#include <linux/export.h>
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -080041#include <linux/mutex.h>
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -070042#include <linux/rbtree.h>
Balbir Singhb6ac57d2008-04-29 01:00:19 -070043#include <linux/slab.h>
Balbir Singh66e17072008-02-07 00:13:56 -080044#include <linux/swap.h>
Daisuke Nishimura02491442010-03-10 15:22:17 -080045#include <linux/swapops.h>
Balbir Singh66e17072008-02-07 00:13:56 -080046#include <linux/spinlock.h>
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -080047#include <linux/eventfd.h>
Tejun Heo79bd9812013-11-22 18:20:42 -050048#include <linux/poll.h>
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -080049#include <linux/sort.h>
Balbir Singh66e17072008-02-07 00:13:56 -080050#include <linux/fs.h>
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -080051#include <linux/seq_file.h>
Anton Vorontsov70ddf632013-04-29 15:08:31 -070052#include <linux/vmpressure.h>
Christoph Lameterb69408e2008-10-18 20:26:14 -070053#include <linux/mm_inline.h>
Johannes Weiner5d1ea482014-12-10 15:44:55 -080054#include <linux/swap_cgroup.h>
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -080055#include <linux/cpu.h>
KAMEZAWA Hiroyuki158e0a22010-08-10 18:03:00 -070056#include <linux/oom.h>
Johannes Weiner0056f4e2013-10-31 16:34:14 -070057#include <linux/lockdep.h>
Tejun Heo79bd9812013-11-22 18:20:42 -050058#include <linux/file.h>
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -080059#include "internal.h"
Glauber Costad1a4c0b2011-12-11 21:47:04 +000060#include <net/sock.h>
Michal Hocko4bd2c1e2012-10-08 16:33:10 -070061#include <net/ip.h>
Glauber Costad1a4c0b2011-12-11 21:47:04 +000062#include <net/tcp_memcontrol.h>
Qiang Huangf35c3a82013-11-12 15:08:22 -080063#include "slab.h"
Balbir Singh8cdea7c2008-02-07 00:13:50 -080064
Balbir Singh8697d332008-02-07 00:13:59 -080065#include <asm/uaccess.h>
66
KOSAKI Motohirocc8e9702010-08-09 17:19:57 -070067#include <trace/events/vmscan.h>
68
Tejun Heo073219e2014-02-08 10:36:58 -050069struct cgroup_subsys memory_cgrp_subsys __read_mostly;
70EXPORT_SYMBOL(memory_cgrp_subsys);
David Rientjes68ae5642012-12-12 13:51:57 -080071
KAMEZAWA Hiroyukia181b0e2008-07-25 01:47:08 -070072#define MEM_CGROUP_RECLAIM_RETRIES 5
Kirill A. Shutemov6bbda352012-05-29 15:06:55 -070073static struct mem_cgroup *root_mem_cgroup __read_mostly;
Balbir Singh8cdea7c2008-02-07 00:13:50 -080074
Andrew Mortonc255a452012-07-31 16:43:02 -070075#ifdef CONFIG_MEMCG_SWAP
Li Zefan338c8432009-06-17 16:27:15 -070076/* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -080077int do_swap_account __read_mostly;
Michal Hockoa42c3902010-11-24 12:57:08 -080078
79/* for remember boot option*/
Andrew Mortonc255a452012-07-31 16:43:02 -070080#ifdef CONFIG_MEMCG_SWAP_ENABLED
Michal Hockoa42c3902010-11-24 12:57:08 -080081static int really_do_swap_account __initdata = 1;
82#else
Fabian Frederickada4ba52014-06-04 16:08:08 -070083static int really_do_swap_account __initdata;
Michal Hockoa42c3902010-11-24 12:57:08 -080084#endif
85
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -080086#else
Kirill A. Shutemova0db00f2012-05-29 15:06:56 -070087#define do_swap_account 0
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -080088#endif
89
90
Johannes Weineraf7c4b02012-05-29 15:07:08 -070091static const char * const mem_cgroup_stat_names[] = {
92 "cache",
93 "rss",
David Rientjesb070e652013-05-07 16:18:09 -070094 "rss_huge",
Johannes Weineraf7c4b02012-05-29 15:07:08 -070095 "mapped_file",
Sha Zhengju3ea67d02013-09-12 15:13:53 -070096 "writeback",
Johannes Weineraf7c4b02012-05-29 15:07:08 -070097 "swap",
98};
99
Johannes Weinere9f89742011-03-23 16:42:37 -0700100enum mem_cgroup_events_index {
101 MEM_CGROUP_EVENTS_PGPGIN, /* # of pages paged in */
102 MEM_CGROUP_EVENTS_PGPGOUT, /* # of pages paged out */
Ying Han456f9982011-05-26 16:25:38 -0700103 MEM_CGROUP_EVENTS_PGFAULT, /* # of page-faults */
104 MEM_CGROUP_EVENTS_PGMAJFAULT, /* # of major page-faults */
Johannes Weinere9f89742011-03-23 16:42:37 -0700105 MEM_CGROUP_EVENTS_NSTATS,
106};
Johannes Weineraf7c4b02012-05-29 15:07:08 -0700107
108static const char * const mem_cgroup_events_names[] = {
109 "pgpgin",
110 "pgpgout",
111 "pgfault",
112 "pgmajfault",
113};
114
Sha Zhengju58cf1882013-02-22 16:32:05 -0800115static const char * const mem_cgroup_lru_names[] = {
116 "inactive_anon",
117 "active_anon",
118 "inactive_file",
119 "active_file",
120 "unevictable",
121};
122
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700123/*
124 * Per memcg event counter is incremented at every pagein/pageout. With THP,
125 * it will be incremated by the number of pages. This counter is used for
126 * for trigger some periodic events. This is straightforward and better
127 * than using jiffies etc. to handle periodic memcg event.
128 */
129enum mem_cgroup_events_target {
130 MEM_CGROUP_TARGET_THRESH,
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700131 MEM_CGROUP_TARGET_SOFTLIMIT,
KAMEZAWA Hiroyuki453a9bf2011-07-08 15:39:43 -0700132 MEM_CGROUP_TARGET_NUMAINFO,
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700133 MEM_CGROUP_NTARGETS,
134};
Kirill A. Shutemova0db00f2012-05-29 15:06:56 -0700135#define THRESHOLDS_EVENTS_TARGET 128
136#define SOFTLIMIT_EVENTS_TARGET 1024
137#define NUMAINFO_EVENTS_TARGET 1024
Johannes Weinere9f89742011-03-23 16:42:37 -0700138
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800139struct mem_cgroup_stat_cpu {
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700140 long count[MEM_CGROUP_STAT_NSTATS];
Johannes Weinere9f89742011-03-23 16:42:37 -0700141 unsigned long events[MEM_CGROUP_EVENTS_NSTATS];
Johannes Weiner13114712012-05-29 15:07:07 -0700142 unsigned long nr_page_events;
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700143 unsigned long targets[MEM_CGROUP_NTARGETS];
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800144};
145
Johannes Weiner5ac8fb32014-12-10 15:42:39 -0800146struct reclaim_iter {
147 struct mem_cgroup *position;
Johannes Weiner527a5ec2012-01-12 17:17:55 -0800148 /* scan generation, increased every round-trip */
149 unsigned int generation;
150};
151
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800152/*
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800153 * per-zone information in memory controller.
154 */
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800155struct mem_cgroup_per_zone {
Johannes Weiner6290df52012-01-12 17:18:10 -0800156 struct lruvec lruvec;
Hugh Dickins1eb49272012-03-21 16:34:19 -0700157 unsigned long lru_size[NR_LRU_LISTS];
KOSAKI Motohiro3e2f41f2009-01-07 18:08:20 -0800158
Johannes Weiner5ac8fb32014-12-10 15:42:39 -0800159 struct reclaim_iter iter[DEF_PRIORITY + 1];
Johannes Weiner527a5ec2012-01-12 17:17:55 -0800160
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700161 struct rb_node tree_node; /* RB tree node */
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800162 unsigned long usage_in_excess;/* Set to the value by which */
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700163 /* the soft limit is exceeded*/
164 bool on_tree;
Hugh Dickinsd79154b2012-03-21 16:34:18 -0700165 struct mem_cgroup *memcg; /* Back pointer, we cannot */
Balbir Singh4e416952009-09-23 15:56:39 -0700166 /* use container_of */
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800167};
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800168
169struct mem_cgroup_per_node {
170 struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
171};
172
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700173/*
174 * Cgroups above their limits are maintained in a RB-Tree, independent of
175 * their hierarchy representation
176 */
177
178struct mem_cgroup_tree_per_zone {
179 struct rb_root rb_root;
180 spinlock_t lock;
181};
182
183struct mem_cgroup_tree_per_node {
184 struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
185};
186
187struct mem_cgroup_tree {
188 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
189};
190
191static struct mem_cgroup_tree soft_limit_tree __read_mostly;
192
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -0800193struct mem_cgroup_threshold {
194 struct eventfd_ctx *eventfd;
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800195 unsigned long threshold;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -0800196};
197
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -0700198/* For threshold */
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -0800199struct mem_cgroup_threshold_ary {
Sha Zhengju748dad32012-05-29 15:06:57 -0700200 /* An array index points to threshold just below or equal to usage. */
Phil Carmody5407a562010-05-26 14:42:42 -0700201 int current_threshold;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -0800202 /* Size of entries[] */
203 unsigned int size;
204 /* Array of thresholds */
205 struct mem_cgroup_threshold entries[0];
206};
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -0700207
208struct mem_cgroup_thresholds {
209 /* Primary thresholds array */
210 struct mem_cgroup_threshold_ary *primary;
211 /*
212 * Spare threshold array.
213 * This is needed to make mem_cgroup_unregister_event() "never fail".
214 * It must be able to store at least primary->size - 1 entries.
215 */
216 struct mem_cgroup_threshold_ary *spare;
217};
218
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -0700219/* for OOM */
220struct mem_cgroup_eventfd_list {
221 struct list_head list;
222 struct eventfd_ctx *eventfd;
223};
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -0800224
Tejun Heo79bd9812013-11-22 18:20:42 -0500225/*
226 * cgroup_event represents events which userspace want to receive.
227 */
Tejun Heo3bc942f2013-11-22 18:20:44 -0500228struct mem_cgroup_event {
Tejun Heo79bd9812013-11-22 18:20:42 -0500229 /*
Tejun Heo59b6f872013-11-22 18:20:43 -0500230 * memcg which the event belongs to.
Tejun Heo79bd9812013-11-22 18:20:42 -0500231 */
Tejun Heo59b6f872013-11-22 18:20:43 -0500232 struct mem_cgroup *memcg;
Tejun Heo79bd9812013-11-22 18:20:42 -0500233 /*
Tejun Heo79bd9812013-11-22 18:20:42 -0500234 * eventfd to signal userspace about the event.
235 */
236 struct eventfd_ctx *eventfd;
237 /*
238 * Each of these stored in a list by the cgroup.
239 */
240 struct list_head list;
241 /*
Tejun Heofba94802013-11-22 18:20:43 -0500242 * register_event() callback will be used to add new userspace
243 * waiter for changes related to this event. Use eventfd_signal()
244 * on eventfd to send notification to userspace.
245 */
Tejun Heo59b6f872013-11-22 18:20:43 -0500246 int (*register_event)(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -0500247 struct eventfd_ctx *eventfd, const char *args);
Tejun Heofba94802013-11-22 18:20:43 -0500248 /*
249 * unregister_event() callback will be called when userspace closes
250 * the eventfd or on cgroup removing. This callback must be set,
251 * if you want provide notification functionality.
252 */
Tejun Heo59b6f872013-11-22 18:20:43 -0500253 void (*unregister_event)(struct mem_cgroup *memcg,
Tejun Heofba94802013-11-22 18:20:43 -0500254 struct eventfd_ctx *eventfd);
255 /*
Tejun Heo79bd9812013-11-22 18:20:42 -0500256 * All fields below needed to unregister event when
257 * userspace closes eventfd.
258 */
259 poll_table pt;
260 wait_queue_head_t *wqh;
261 wait_queue_t wait;
262 struct work_struct remove;
263};
264
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700265static void mem_cgroup_threshold(struct mem_cgroup *memcg);
266static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -0800267
Balbir Singhf64c3f52009-09-23 15:56:37 -0700268/*
Balbir Singh8cdea7c2008-02-07 00:13:50 -0800269 * The memory controller data structure. The memory controller controls both
270 * page cache and RSS per cgroup. We would eventually like to provide
271 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
272 * to help the administrator determine what knobs to tune.
273 *
274 * TODO: Add a water mark for the memory controller. Reclaim will begin when
Balbir Singh8a9f3cc2008-02-07 00:13:53 -0800275 * we hit the water mark. May be even add a low water mark, such that
276 * no reclaim occurs from a cgroup at it's low water mark, this is
277 * a feature that will be implemented much later in the future.
Balbir Singh8cdea7c2008-02-07 00:13:50 -0800278 */
279struct mem_cgroup {
280 struct cgroup_subsys_state css;
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800281
282 /* Accounted resources */
283 struct page_counter memory;
284 struct page_counter memsw;
285 struct page_counter kmem;
286
287 unsigned long soft_limit;
Hugh Dickins59927fb2012-03-15 15:17:07 -0700288
Anton Vorontsov70ddf632013-04-29 15:08:31 -0700289 /* vmpressure notifications */
290 struct vmpressure vmpressure;
291
Johannes Weiner2f7dd7a2014-10-02 16:16:57 -0700292 /* css_online() has been completed */
293 int initialized;
294
Li Zefan465939a2013-07-08 16:00:38 -0700295 /*
Balbir Singh18f59ea2009-01-07 18:08:07 -0800296 * Should the accounting and control be hierarchical, per subtree?
297 */
298 bool use_hierarchy;
Michal Hocko79dfdac2011-07-26 16:08:23 -0700299
300 bool oom_lock;
301 atomic_t under_oom;
Johannes Weiner3812c8c2013-09-12 15:13:44 -0700302 atomic_t oom_wakeups;
Michal Hocko79dfdac2011-07-26 16:08:23 -0700303
KAMEZAWA Hiroyuki1f4c0252011-07-26 16:08:21 -0700304 int swappiness;
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -0700305 /* OOM-Killer disable */
306 int oom_kill_disable;
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -0800307
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -0800308 /* protect arrays of thresholds */
309 struct mutex thresholds_lock;
310
311 /* thresholds for memory usage. RCU-protected */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -0700312 struct mem_cgroup_thresholds thresholds;
Kirill A. Shutemov907860e2010-05-26 14:42:46 -0700313
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -0800314 /* thresholds for mem+swap usage. RCU-protected */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -0700315 struct mem_cgroup_thresholds memsw_thresholds;
Kirill A. Shutemov907860e2010-05-26 14:42:46 -0700316
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -0700317 /* For oom notifier event fd */
318 struct list_head oom_notify;
Johannes Weiner185efc02011-09-14 16:21:58 -0700319
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800320 /*
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -0800321 * Should we move charges of a task when a task is moved into this
322 * mem_cgroup ? And what type of charges should we move ?
323 */
Andrew Mortonf894ffa2013-09-12 15:13:35 -0700324 unsigned long move_charge_at_immigrate;
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -0800325 /*
KAMEZAWA Hiroyuki619d0942012-03-21 16:34:23 -0700326 * set > 0 if pages under this cgroup are moving to other cgroup.
327 */
328 atomic_t moving_account;
KAMEZAWA Hiroyuki312734c02012-03-21 16:34:24 -0700329 /* taken only while moving_account > 0 */
330 spinlock_t move_lock;
KAMEZAWA Hiroyuki619d0942012-03-21 16:34:23 -0700331 /*
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -0800332 * percpu counter.
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800333 */
Kirill A. Shutemov3a7951b2012-05-29 15:06:56 -0700334 struct mem_cgroup_stat_cpu __percpu *stat;
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -0700335 /*
336 * used when a cpu is offlined or other synchronizations
337 * See mem_cgroup_read_stat().
338 */
339 struct mem_cgroup_stat_cpu nocpu_base;
340 spinlock_t pcp_counter_lock;
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000341
Michal Hocko4bd2c1e2012-10-08 16:33:10 -0700342#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET)
Eric W. Biederman2e685ca2013-10-19 16:26:19 -0700343 struct cg_proto tcp_mem;
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000344#endif
Glauber Costa2633d7a2012-12-18 14:22:34 -0800345#if defined(CONFIG_MEMCG_KMEM)
Vladimir Davydovbd673142014-06-04 16:07:40 -0700346 /* analogous to slab_common's slab_caches list, but per-memcg;
347 * protected by memcg_slab_mutex */
Glauber Costa2633d7a2012-12-18 14:22:34 -0800348 struct list_head memcg_slab_caches;
Glauber Costa2633d7a2012-12-18 14:22:34 -0800349 /* Index in the kmem_cache->memcg_params->memcg_caches array */
350 int kmemcg_id;
351#endif
Glauber Costa45cf7eb2013-02-22 16:34:49 -0800352
353 int last_scanned_node;
354#if MAX_NUMNODES > 1
355 nodemask_t scan_nodes;
356 atomic_t numainfo_events;
357 atomic_t numainfo_updating;
358#endif
Anton Vorontsov70ddf632013-04-29 15:08:31 -0700359
Tejun Heofba94802013-11-22 18:20:43 -0500360 /* List of events which userspace want to receive */
361 struct list_head event_list;
362 spinlock_t event_list_lock;
363
Johannes Weiner54f72fe2013-07-08 15:59:49 -0700364 struct mem_cgroup_per_node *nodeinfo[0];
365 /* WARNING: nodeinfo must be the last member here */
Balbir Singh8cdea7c2008-02-07 00:13:50 -0800366};
367
Glauber Costa510fc4e2012-12-18 14:21:47 -0800368#ifdef CONFIG_MEMCG_KMEM
Glauber Costa7de37682012-12-18 14:22:07 -0800369static bool memcg_kmem_is_active(struct mem_cgroup *memcg)
370{
Vladimir Davydov900a38f2014-12-12 16:55:10 -0800371 return memcg->kmemcg_id >= 0;
Glauber Costa7de37682012-12-18 14:22:07 -0800372}
Glauber Costa510fc4e2012-12-18 14:21:47 -0800373#endif
374
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -0800375/* Stuffs for move charges at task migration. */
376/*
Glauber Costaee5e8472013-02-22 16:34:50 -0800377 * Types of charges to be moved. "move_charge_at_immitgrate" and
378 * "immigrate_flags" are treated as a left-shifted bitmap of these types.
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -0800379 */
380enum move_type {
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -0800381 MOVE_CHARGE_TYPE_ANON, /* private anonymous page and swap of it */
Daisuke Nishimura87946a72010-05-26 14:42:39 -0700382 MOVE_CHARGE_TYPE_FILE, /* file page(including tmpfs) and swap of it */
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -0800383 NR_MOVE_TYPE,
384};
385
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -0800386/* "mc" and its members are protected by cgroup_mutex */
387static struct move_charge_struct {
Daisuke Nishimurab1dd6932010-11-24 12:57:06 -0800388 spinlock_t lock; /* for from, to */
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -0800389 struct mem_cgroup *from;
390 struct mem_cgroup *to;
Glauber Costaee5e8472013-02-22 16:34:50 -0800391 unsigned long immigrate_flags;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -0800392 unsigned long precharge;
Daisuke Nishimura854ffa82010-03-10 15:22:15 -0800393 unsigned long moved_charge;
Daisuke Nishimura483c30b2010-03-10 15:22:18 -0800394 unsigned long moved_swap;
Daisuke Nishimura8033b972010-03-10 15:22:16 -0800395 struct task_struct *moving_task; /* a task moving charges */
396 wait_queue_head_t waitq; /* a waitq for other context */
397} mc = {
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -0700398 .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
Daisuke Nishimura8033b972010-03-10 15:22:16 -0800399 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
400};
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -0800401
Daisuke Nishimura90254a62010-05-26 14:42:38 -0700402static bool move_anon(void)
403{
Glauber Costaee5e8472013-02-22 16:34:50 -0800404 return test_bit(MOVE_CHARGE_TYPE_ANON, &mc.immigrate_flags);
Daisuke Nishimura90254a62010-05-26 14:42:38 -0700405}
406
Daisuke Nishimura87946a72010-05-26 14:42:39 -0700407static bool move_file(void)
408{
Glauber Costaee5e8472013-02-22 16:34:50 -0800409 return test_bit(MOVE_CHARGE_TYPE_FILE, &mc.immigrate_flags);
Daisuke Nishimura87946a72010-05-26 14:42:39 -0700410}
411
Balbir Singh4e416952009-09-23 15:56:39 -0700412/*
413 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
414 * limit reclaim to prevent infinite loops, if they ever occur.
415 */
Kirill A. Shutemova0db00f2012-05-29 15:06:56 -0700416#define MEM_CGROUP_MAX_RECLAIM_LOOPS 100
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700417#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
Balbir Singh4e416952009-09-23 15:56:39 -0700418
KAMEZAWA Hiroyuki217bc312008-02-07 00:14:17 -0800419enum charge_type {
420 MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
Kamezawa Hiroyuki41326c12012-07-31 16:41:40 -0700421 MEM_CGROUP_CHARGE_TYPE_ANON,
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -0800422 MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
KAMEZAWA Hiroyuki8a9478c2009-06-17 16:27:17 -0700423 MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */
KAMEZAWA Hiroyukic05555b2008-10-18 20:28:11 -0700424 NR_CHARGE_TYPE,
425};
426
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800427/* for encoding cft->private value on file */
Glauber Costa86ae53e2012-12-18 14:21:45 -0800428enum res_type {
429 _MEM,
430 _MEMSWAP,
431 _OOM_TYPE,
Glauber Costa510fc4e2012-12-18 14:21:47 -0800432 _KMEM,
Glauber Costa86ae53e2012-12-18 14:21:45 -0800433};
434
Kirill A. Shutemova0db00f2012-05-29 15:06:56 -0700435#define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val))
436#define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff)
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800437#define MEMFILE_ATTR(val) ((val) & 0xffff)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -0700438/* Used for OOM nofiier */
439#define OOM_CONTROL (0)
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800440
Balbir Singh75822b42009-09-23 15:56:38 -0700441/*
Glauber Costa09998212013-02-22 16:34:55 -0800442 * The memcg_create_mutex will be held whenever a new cgroup is created.
443 * As a consequence, any change that needs to protect against new child cgroups
444 * appearing has to hold it as well.
445 */
446static DEFINE_MUTEX(memcg_create_mutex);
447
Wanpeng Lib2145142012-07-31 16:46:01 -0700448struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *s)
449{
Tejun Heoa7c6d552013-08-08 20:11:23 -0400450 return s ? container_of(s, struct mem_cgroup, css) : NULL;
Wanpeng Lib2145142012-07-31 16:46:01 -0700451}
452
Anton Vorontsov70ddf632013-04-29 15:08:31 -0700453/* Some nice accessors for the vmpressure. */
454struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
455{
456 if (!memcg)
457 memcg = root_mem_cgroup;
458 return &memcg->vmpressure;
459}
460
461struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
462{
463 return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
464}
465
Michal Hocko7ffc0ed2012-10-08 16:33:13 -0700466static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
467{
468 return (memcg == root_mem_cgroup);
469}
470
Li Zefan4219b2d2013-09-23 16:56:29 +0800471/*
472 * We restrict the id in the range of [1, 65535], so it can fit into
473 * an unsigned short.
474 */
475#define MEM_CGROUP_ID_MAX USHRT_MAX
476
Li Zefan34c00c32013-09-23 16:56:01 +0800477static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
478{
Tejun Heo15a4c832014-05-04 15:09:14 -0400479 return memcg->css.id;
Li Zefan34c00c32013-09-23 16:56:01 +0800480}
481
482static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
483{
484 struct cgroup_subsys_state *css;
485
Tejun Heo7d699dd2014-05-04 15:09:13 -0400486 css = css_from_id(id, &memory_cgrp_subsys);
Li Zefan34c00c32013-09-23 16:56:01 +0800487 return mem_cgroup_from_css(css);
488}
489
Glauber Costae1aab162011-12-11 21:47:03 +0000490/* Writing them here to avoid exposing memcg's inner layout */
Michal Hocko4bd2c1e2012-10-08 16:33:10 -0700491#if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM)
Glauber Costae1aab162011-12-11 21:47:03 +0000492
Glauber Costae1aab162011-12-11 21:47:03 +0000493void sock_update_memcg(struct sock *sk)
494{
Glauber Costa376be5f2012-01-20 04:57:14 +0000495 if (mem_cgroup_sockets_enabled) {
Glauber Costae1aab162011-12-11 21:47:03 +0000496 struct mem_cgroup *memcg;
Glauber Costa3f134612012-05-29 15:07:11 -0700497 struct cg_proto *cg_proto;
Glauber Costae1aab162011-12-11 21:47:03 +0000498
499 BUG_ON(!sk->sk_prot->proto_cgroup);
500
Glauber Costaf3f511e2012-01-05 20:16:39 +0000501 /* Socket cloning can throw us here with sk_cgrp already
502 * filled. It won't however, necessarily happen from
503 * process context. So the test for root memcg given
504 * the current task's memcg won't help us in this case.
505 *
506 * Respecting the original socket's memcg is a better
507 * decision in this case.
508 */
509 if (sk->sk_cgrp) {
510 BUG_ON(mem_cgroup_is_root(sk->sk_cgrp->memcg));
Li Zefan5347e5a2013-07-08 16:00:30 -0700511 css_get(&sk->sk_cgrp->memcg->css);
Glauber Costaf3f511e2012-01-05 20:16:39 +0000512 return;
513 }
514
Glauber Costae1aab162011-12-11 21:47:03 +0000515 rcu_read_lock();
516 memcg = mem_cgroup_from_task(current);
Glauber Costa3f134612012-05-29 15:07:11 -0700517 cg_proto = sk->sk_prot->proto_cgroup(memcg);
Li Zefan5347e5a2013-07-08 16:00:30 -0700518 if (!mem_cgroup_is_root(memcg) &&
Tejun Heoec903c02014-05-13 12:11:01 -0400519 memcg_proto_active(cg_proto) &&
520 css_tryget_online(&memcg->css)) {
Glauber Costa3f134612012-05-29 15:07:11 -0700521 sk->sk_cgrp = cg_proto;
Glauber Costae1aab162011-12-11 21:47:03 +0000522 }
523 rcu_read_unlock();
524 }
525}
526EXPORT_SYMBOL(sock_update_memcg);
527
528void sock_release_memcg(struct sock *sk)
529{
Glauber Costa376be5f2012-01-20 04:57:14 +0000530 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
Glauber Costae1aab162011-12-11 21:47:03 +0000531 struct mem_cgroup *memcg;
532 WARN_ON(!sk->sk_cgrp->memcg);
533 memcg = sk->sk_cgrp->memcg;
Li Zefan5347e5a2013-07-08 16:00:30 -0700534 css_put(&sk->sk_cgrp->memcg->css);
Glauber Costae1aab162011-12-11 21:47:03 +0000535 }
536}
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000537
538struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg)
539{
540 if (!memcg || mem_cgroup_is_root(memcg))
541 return NULL;
542
Eric W. Biederman2e685ca2013-10-19 16:26:19 -0700543 return &memcg->tcp_mem;
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000544}
545EXPORT_SYMBOL(tcp_proto_cgroup);
Glauber Costae1aab162011-12-11 21:47:03 +0000546
Glauber Costa3f134612012-05-29 15:07:11 -0700547static void disarm_sock_keys(struct mem_cgroup *memcg)
548{
Eric W. Biederman2e685ca2013-10-19 16:26:19 -0700549 if (!memcg_proto_activated(&memcg->tcp_mem))
Glauber Costa3f134612012-05-29 15:07:11 -0700550 return;
551 static_key_slow_dec(&memcg_socket_limit_enabled);
552}
553#else
554static void disarm_sock_keys(struct mem_cgroup *memcg)
555{
556}
557#endif
558
Glauber Costaa8964b92012-12-18 14:22:09 -0800559#ifdef CONFIG_MEMCG_KMEM
Glauber Costa55007d82012-12-18 14:22:38 -0800560/*
561 * This will be the memcg's index in each cache's ->memcg_params->memcg_caches.
Li Zefanb8627832013-09-23 16:56:47 +0800562 * The main reason for not using cgroup id for this:
563 * this works better in sparse environments, where we have a lot of memcgs,
564 * but only a few kmem-limited. Or also, if we have, for instance, 200
565 * memcgs, and none but the 200th is kmem-limited, we'd have to have a
566 * 200 entry array for that.
Glauber Costa55007d82012-12-18 14:22:38 -0800567 *
568 * The current size of the caches array is stored in
569 * memcg_limited_groups_array_size. It will double each time we have to
570 * increase it.
571 */
572static DEFINE_IDA(kmem_limited_groups);
Glauber Costa749c5412012-12-18 14:23:01 -0800573int memcg_limited_groups_array_size;
574
Glauber Costa55007d82012-12-18 14:22:38 -0800575/*
576 * MIN_SIZE is different than 1, because we would like to avoid going through
577 * the alloc/free process all the time. In a small machine, 4 kmem-limited
578 * cgroups is a reasonable guess. In the future, it could be a parameter or
579 * tunable, but that is strictly not necessary.
580 *
Li Zefanb8627832013-09-23 16:56:47 +0800581 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get
Glauber Costa55007d82012-12-18 14:22:38 -0800582 * this constant directly from cgroup, but it is understandable that this is
583 * better kept as an internal representation in cgroup.c. In any case, the
Li Zefanb8627832013-09-23 16:56:47 +0800584 * cgrp_id space is not getting any smaller, and we don't have to necessarily
Glauber Costa55007d82012-12-18 14:22:38 -0800585 * increase ours as well if it increases.
586 */
587#define MEMCG_CACHES_MIN_SIZE 4
Li Zefanb8627832013-09-23 16:56:47 +0800588#define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
Glauber Costa55007d82012-12-18 14:22:38 -0800589
Glauber Costad7f25f82012-12-18 14:22:40 -0800590/*
591 * A lot of the calls to the cache allocation functions are expected to be
592 * inlined by the compiler. Since the calls to memcg_kmem_get_cache are
593 * conditional to this static branch, we'll have to allow modules that does
594 * kmem_cache_alloc and the such to see this symbol as well
595 */
Glauber Costaa8964b92012-12-18 14:22:09 -0800596struct static_key memcg_kmem_enabled_key;
Glauber Costad7f25f82012-12-18 14:22:40 -0800597EXPORT_SYMBOL(memcg_kmem_enabled_key);
Glauber Costaa8964b92012-12-18 14:22:09 -0800598
Vladimir Davydovf3bb3042014-10-09 15:28:45 -0700599static void memcg_free_cache_id(int id);
600
Glauber Costaa8964b92012-12-18 14:22:09 -0800601static void disarm_kmem_keys(struct mem_cgroup *memcg)
602{
Glauber Costa55007d82012-12-18 14:22:38 -0800603 if (memcg_kmem_is_active(memcg)) {
Glauber Costaa8964b92012-12-18 14:22:09 -0800604 static_key_slow_dec(&memcg_kmem_enabled_key);
Vladimir Davydovf3bb3042014-10-09 15:28:45 -0700605 memcg_free_cache_id(memcg->kmemcg_id);
Glauber Costa55007d82012-12-18 14:22:38 -0800606 }
Glauber Costabea207c2012-12-18 14:22:11 -0800607 /*
608 * This check can't live in kmem destruction function,
609 * since the charges will outlive the cgroup
610 */
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800611 WARN_ON(page_counter_read(&memcg->kmem));
Glauber Costaa8964b92012-12-18 14:22:09 -0800612}
613#else
614static void disarm_kmem_keys(struct mem_cgroup *memcg)
615{
616}
617#endif /* CONFIG_MEMCG_KMEM */
618
619static void disarm_static_keys(struct mem_cgroup *memcg)
620{
621 disarm_sock_keys(memcg);
622 disarm_kmem_keys(memcg);
623}
624
Balbir Singhf64c3f52009-09-23 15:56:37 -0700625static struct mem_cgroup_per_zone *
Jianyu Zhane2318752014-06-06 14:38:20 -0700626mem_cgroup_zone_zoneinfo(struct mem_cgroup *memcg, struct zone *zone)
Balbir Singhf64c3f52009-09-23 15:56:37 -0700627{
Jianyu Zhane2318752014-06-06 14:38:20 -0700628 int nid = zone_to_nid(zone);
629 int zid = zone_idx(zone);
630
Johannes Weiner54f72fe2013-07-08 15:59:49 -0700631 return &memcg->nodeinfo[nid]->zoneinfo[zid];
Balbir Singhf64c3f52009-09-23 15:56:37 -0700632}
633
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700634struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg)
Wu Fengguangd3242362009-12-16 12:19:59 +0100635{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700636 return &memcg->css;
Wu Fengguangd3242362009-12-16 12:19:59 +0100637}
638
Balbir Singhf64c3f52009-09-23 15:56:37 -0700639static struct mem_cgroup_per_zone *
Jianyu Zhane2318752014-06-06 14:38:20 -0700640mem_cgroup_page_zoneinfo(struct mem_cgroup *memcg, struct page *page)
Balbir Singhf64c3f52009-09-23 15:56:37 -0700641{
Johannes Weiner97a6c372011-03-23 16:42:27 -0700642 int nid = page_to_nid(page);
643 int zid = page_zonenum(page);
Balbir Singhf64c3f52009-09-23 15:56:37 -0700644
Jianyu Zhane2318752014-06-06 14:38:20 -0700645 return &memcg->nodeinfo[nid]->zoneinfo[zid];
Balbir Singhf64c3f52009-09-23 15:56:37 -0700646}
647
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700648static struct mem_cgroup_tree_per_zone *
649soft_limit_tree_node_zone(int nid, int zid)
650{
651 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
652}
653
654static struct mem_cgroup_tree_per_zone *
655soft_limit_tree_from_page(struct page *page)
656{
657 int nid = page_to_nid(page);
658 int zid = page_zonenum(page);
659
660 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
661}
662
Johannes Weinercf2c8122014-06-06 14:38:21 -0700663static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_zone *mz,
664 struct mem_cgroup_tree_per_zone *mctz,
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800665 unsigned long new_usage_in_excess)
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700666{
667 struct rb_node **p = &mctz->rb_root.rb_node;
668 struct rb_node *parent = NULL;
669 struct mem_cgroup_per_zone *mz_node;
670
671 if (mz->on_tree)
672 return;
673
674 mz->usage_in_excess = new_usage_in_excess;
675 if (!mz->usage_in_excess)
676 return;
677 while (*p) {
678 parent = *p;
679 mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
680 tree_node);
681 if (mz->usage_in_excess < mz_node->usage_in_excess)
682 p = &(*p)->rb_left;
683 /*
684 * We can't avoid mem cgroups that are over their soft
685 * limit by the same amount
686 */
687 else if (mz->usage_in_excess >= mz_node->usage_in_excess)
688 p = &(*p)->rb_right;
689 }
690 rb_link_node(&mz->tree_node, parent, p);
691 rb_insert_color(&mz->tree_node, &mctz->rb_root);
692 mz->on_tree = true;
693}
694
Johannes Weinercf2c8122014-06-06 14:38:21 -0700695static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_zone *mz,
696 struct mem_cgroup_tree_per_zone *mctz)
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700697{
698 if (!mz->on_tree)
699 return;
700 rb_erase(&mz->tree_node, &mctz->rb_root);
701 mz->on_tree = false;
702}
703
Johannes Weinercf2c8122014-06-06 14:38:21 -0700704static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_zone *mz,
705 struct mem_cgroup_tree_per_zone *mctz)
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700706{
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700707 unsigned long flags;
708
709 spin_lock_irqsave(&mctz->lock, flags);
Johannes Weinercf2c8122014-06-06 14:38:21 -0700710 __mem_cgroup_remove_exceeded(mz, mctz);
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700711 spin_unlock_irqrestore(&mctz->lock, flags);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700712}
713
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800714static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
715{
716 unsigned long nr_pages = page_counter_read(&memcg->memory);
717 unsigned long soft_limit = ACCESS_ONCE(memcg->soft_limit);
718 unsigned long excess = 0;
719
720 if (nr_pages > soft_limit)
721 excess = nr_pages - soft_limit;
722
723 return excess;
724}
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700725
726static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
727{
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800728 unsigned long excess;
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700729 struct mem_cgroup_per_zone *mz;
730 struct mem_cgroup_tree_per_zone *mctz;
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700731
Jianyu Zhane2318752014-06-06 14:38:20 -0700732 mctz = soft_limit_tree_from_page(page);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700733 /*
734 * Necessary to update all ancestors when hierarchy is used.
735 * because their event counter is not touched.
736 */
737 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
Jianyu Zhane2318752014-06-06 14:38:20 -0700738 mz = mem_cgroup_page_zoneinfo(memcg, page);
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800739 excess = soft_limit_excess(memcg);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700740 /*
741 * We have to update the tree if mz is on RB-tree or
742 * mem is over its softlimit.
743 */
744 if (excess || mz->on_tree) {
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700745 unsigned long flags;
746
747 spin_lock_irqsave(&mctz->lock, flags);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700748 /* if on-tree, remove it */
749 if (mz->on_tree)
Johannes Weinercf2c8122014-06-06 14:38:21 -0700750 __mem_cgroup_remove_exceeded(mz, mctz);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700751 /*
752 * Insert again. mz->usage_in_excess will be updated.
753 * If excess is 0, no tree ops.
754 */
Johannes Weinercf2c8122014-06-06 14:38:21 -0700755 __mem_cgroup_insert_exceeded(mz, mctz, excess);
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700756 spin_unlock_irqrestore(&mctz->lock, flags);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700757 }
758 }
759}
760
761static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
762{
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700763 struct mem_cgroup_tree_per_zone *mctz;
Jianyu Zhane2318752014-06-06 14:38:20 -0700764 struct mem_cgroup_per_zone *mz;
765 int nid, zid;
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700766
Jianyu Zhane2318752014-06-06 14:38:20 -0700767 for_each_node(nid) {
768 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
769 mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
770 mctz = soft_limit_tree_node_zone(nid, zid);
Johannes Weinercf2c8122014-06-06 14:38:21 -0700771 mem_cgroup_remove_exceeded(mz, mctz);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700772 }
773 }
774}
775
776static struct mem_cgroup_per_zone *
777__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
778{
779 struct rb_node *rightmost = NULL;
780 struct mem_cgroup_per_zone *mz;
781
782retry:
783 mz = NULL;
784 rightmost = rb_last(&mctz->rb_root);
785 if (!rightmost)
786 goto done; /* Nothing to reclaim from */
787
788 mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
789 /*
790 * Remove the node now but someone else can add it back,
791 * we will to add it back at the end of reclaim to its correct
792 * position in the tree.
793 */
Johannes Weinercf2c8122014-06-06 14:38:21 -0700794 __mem_cgroup_remove_exceeded(mz, mctz);
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800795 if (!soft_limit_excess(mz->memcg) ||
Tejun Heoec903c02014-05-13 12:11:01 -0400796 !css_tryget_online(&mz->memcg->css))
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700797 goto retry;
798done:
799 return mz;
800}
801
802static struct mem_cgroup_per_zone *
803mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
804{
805 struct mem_cgroup_per_zone *mz;
806
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700807 spin_lock_irq(&mctz->lock);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700808 mz = __mem_cgroup_largest_soft_limit_node(mctz);
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700809 spin_unlock_irq(&mctz->lock);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700810 return mz;
811}
812
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -0700813/*
814 * Implementation Note: reading percpu statistics for memcg.
815 *
816 * Both of vmstat[] and percpu_counter has threshold and do periodic
817 * synchronization to implement "quick" read. There are trade-off between
818 * reading cost and precision of value. Then, we may have a chance to implement
819 * a periodic synchronizion of counter in memcg's counter.
820 *
821 * But this _read() function is used for user interface now. The user accounts
822 * memory usage by memory cgroup and he _always_ requires exact value because
823 * he accounts memory. Even if we provide quick-and-fuzzy read, we always
824 * have to visit all online cpus and make sum. So, for now, unnecessary
825 * synchronization is not implemented. (just implemented for cpu hotplug)
826 *
827 * If there are kernel internal actions which can make use of some not-exact
828 * value, and reading all cpu value can be performance bottleneck in some
829 * common workload, threashold and synchonization as vmstat[] should be
830 * implemented.
831 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700832static long mem_cgroup_read_stat(struct mem_cgroup *memcg,
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700833 enum mem_cgroup_stat_index idx)
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -0800834{
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700835 long val = 0;
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -0800836 int cpu;
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -0800837
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -0700838 get_online_cpus();
839 for_each_online_cpu(cpu)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700840 val += per_cpu(memcg->stat->count[idx], cpu);
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -0700841#ifdef CONFIG_HOTPLUG_CPU
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700842 spin_lock(&memcg->pcp_counter_lock);
843 val += memcg->nocpu_base.count[idx];
844 spin_unlock(&memcg->pcp_counter_lock);
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -0700845#endif
846 put_online_cpus();
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -0800847 return val;
848}
849
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700850static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
Johannes Weinere9f89742011-03-23 16:42:37 -0700851 enum mem_cgroup_events_index idx)
852{
853 unsigned long val = 0;
854 int cpu;
855
David Rientjes9c567512013-10-16 13:46:43 -0700856 get_online_cpus();
Johannes Weinere9f89742011-03-23 16:42:37 -0700857 for_each_online_cpu(cpu)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700858 val += per_cpu(memcg->stat->events[idx], cpu);
Johannes Weinere9f89742011-03-23 16:42:37 -0700859#ifdef CONFIG_HOTPLUG_CPU
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700860 spin_lock(&memcg->pcp_counter_lock);
861 val += memcg->nocpu_base.events[idx];
862 spin_unlock(&memcg->pcp_counter_lock);
Johannes Weinere9f89742011-03-23 16:42:37 -0700863#endif
David Rientjes9c567512013-10-16 13:46:43 -0700864 put_online_cpus();
Johannes Weinere9f89742011-03-23 16:42:37 -0700865 return val;
866}
867
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700868static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
David Rientjesb070e652013-05-07 16:18:09 -0700869 struct page *page,
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700870 int nr_pages)
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800871{
KAMEZAWA Hiroyukib24028572012-03-21 16:34:22 -0700872 /*
873 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is
874 * counted as CACHE even if it's on ANON LRU.
875 */
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700876 if (PageAnon(page))
KAMEZAWA Hiroyukib24028572012-03-21 16:34:22 -0700877 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS],
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700878 nr_pages);
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800879 else
KAMEZAWA Hiroyukib24028572012-03-21 16:34:22 -0700880 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700881 nr_pages);
Balaji Rao55e462b2008-05-01 04:35:12 -0700882
David Rientjesb070e652013-05-07 16:18:09 -0700883 if (PageTransHuge(page))
884 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
885 nr_pages);
886
KAMEZAWA Hiroyukie401f172011-01-20 14:44:23 -0800887 /* pagein of a big page is an event. So, ignore page size */
888 if (nr_pages > 0)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700889 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
KAMEZAWA Hiroyuki3751d602011-02-01 15:52:45 -0800890 else {
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700891 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
KAMEZAWA Hiroyuki3751d602011-02-01 15:52:45 -0800892 nr_pages = -nr_pages; /* for event */
893 }
KAMEZAWA Hiroyukie401f172011-01-20 14:44:23 -0800894
Johannes Weiner13114712012-05-29 15:07:07 -0700895 __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800896}
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800897
Jianyu Zhane2318752014-06-06 14:38:20 -0700898unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
Konstantin Khlebnikov074291f2012-05-29 15:07:00 -0700899{
900 struct mem_cgroup_per_zone *mz;
901
902 mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
903 return mz->lru_size[lru];
904}
905
Jianyu Zhane2318752014-06-06 14:38:20 -0700906static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
907 int nid,
908 unsigned int lru_mask)
Ying Han889976d2011-05-26 16:25:33 -0700909{
Jianyu Zhane2318752014-06-06 14:38:20 -0700910 unsigned long nr = 0;
Ying Han889976d2011-05-26 16:25:33 -0700911 int zid;
912
Jianyu Zhane2318752014-06-06 14:38:20 -0700913 VM_BUG_ON((unsigned)nid >= nr_node_ids);
KAMEZAWA Hiroyukibb2a0de2011-07-26 16:08:22 -0700914
Jianyu Zhane2318752014-06-06 14:38:20 -0700915 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
916 struct mem_cgroup_per_zone *mz;
917 enum lru_list lru;
918
919 for_each_lru(lru) {
920 if (!(BIT(lru) & lru_mask))
921 continue;
922 mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
923 nr += mz->lru_size[lru];
924 }
925 }
926 return nr;
Ying Han889976d2011-05-26 16:25:33 -0700927}
KAMEZAWA Hiroyukibb2a0de2011-07-26 16:08:22 -0700928
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700929static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
KAMEZAWA Hiroyukibb2a0de2011-07-26 16:08:22 -0700930 unsigned int lru_mask)
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800931{
Jianyu Zhane2318752014-06-06 14:38:20 -0700932 unsigned long nr = 0;
Ying Han889976d2011-05-26 16:25:33 -0700933 int nid;
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800934
Lai Jiangshan31aaea42012-12-12 13:51:27 -0800935 for_each_node_state(nid, N_MEMORY)
Jianyu Zhane2318752014-06-06 14:38:20 -0700936 nr += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask);
937 return nr;
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800938}
939
Johannes Weinerf53d7ce2012-01-12 17:18:23 -0800940static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
941 enum mem_cgroup_events_target target)
KAMEZAWA Hiroyukid2265e62010-03-10 15:22:31 -0800942{
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700943 unsigned long val, next;
944
Johannes Weiner13114712012-05-29 15:07:07 -0700945 val = __this_cpu_read(memcg->stat->nr_page_events);
Steven Rostedt47994012011-11-02 13:38:33 -0700946 next = __this_cpu_read(memcg->stat->targets[target]);
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700947 /* from time_after() in jiffies.h */
Johannes Weinerf53d7ce2012-01-12 17:18:23 -0800948 if ((long)next - (long)val < 0) {
949 switch (target) {
950 case MEM_CGROUP_TARGET_THRESH:
951 next = val + THRESHOLDS_EVENTS_TARGET;
952 break;
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700953 case MEM_CGROUP_TARGET_SOFTLIMIT:
954 next = val + SOFTLIMIT_EVENTS_TARGET;
955 break;
Johannes Weinerf53d7ce2012-01-12 17:18:23 -0800956 case MEM_CGROUP_TARGET_NUMAINFO:
957 next = val + NUMAINFO_EVENTS_TARGET;
958 break;
959 default:
960 break;
961 }
962 __this_cpu_write(memcg->stat->targets[target], next);
963 return true;
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700964 }
Johannes Weinerf53d7ce2012-01-12 17:18:23 -0800965 return false;
KAMEZAWA Hiroyukid2265e62010-03-10 15:22:31 -0800966}
967
968/*
969 * Check events in order.
970 *
971 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700972static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
KAMEZAWA Hiroyukid2265e62010-03-10 15:22:31 -0800973{
974 /* threshold event is triggered in finer grain than soft limit */
Johannes Weinerf53d7ce2012-01-12 17:18:23 -0800975 if (unlikely(mem_cgroup_event_ratelimit(memcg,
976 MEM_CGROUP_TARGET_THRESH))) {
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700977 bool do_softlimit;
Andrew Morton82b3f2a2012-02-03 15:37:14 -0800978 bool do_numainfo __maybe_unused;
Johannes Weinerf53d7ce2012-01-12 17:18:23 -0800979
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700980 do_softlimit = mem_cgroup_event_ratelimit(memcg,
981 MEM_CGROUP_TARGET_SOFTLIMIT);
KAMEZAWA Hiroyuki453a9bf2011-07-08 15:39:43 -0700982#if MAX_NUMNODES > 1
Johannes Weinerf53d7ce2012-01-12 17:18:23 -0800983 do_numainfo = mem_cgroup_event_ratelimit(memcg,
984 MEM_CGROUP_TARGET_NUMAINFO);
KAMEZAWA Hiroyuki453a9bf2011-07-08 15:39:43 -0700985#endif
Johannes Weinerf53d7ce2012-01-12 17:18:23 -0800986 mem_cgroup_threshold(memcg);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700987 if (unlikely(do_softlimit))
988 mem_cgroup_update_tree(memcg, page);
Johannes Weinerf53d7ce2012-01-12 17:18:23 -0800989#if MAX_NUMNODES > 1
990 if (unlikely(do_numainfo))
991 atomic_inc(&memcg->numainfo_events);
992#endif
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700993 }
KAMEZAWA Hiroyukid2265e62010-03-10 15:22:31 -0800994}
995
Balbir Singhcf475ad2008-04-29 01:00:16 -0700996struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
Pavel Emelianov78fb7462008-02-07 00:13:51 -0800997{
Balbir Singh31a78f22008-09-28 23:09:31 +0100998 /*
999 * mm_update_next_owner() may clear mm->owner to NULL
1000 * if it races with swapoff, page migration, etc.
1001 * So this can be called with p == NULL.
1002 */
1003 if (unlikely(!p))
1004 return NULL;
1005
Tejun Heo073219e2014-02-08 10:36:58 -05001006 return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
Pavel Emelianov78fb7462008-02-07 00:13:51 -08001007}
1008
Johannes Weinerdf381972014-04-07 15:37:43 -07001009static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -08001010{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001011 struct mem_cgroup *memcg = NULL;
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -07001012
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -08001013 rcu_read_lock();
1014 do {
Michal Hocko6f6acb02014-05-22 11:54:19 -07001015 /*
1016 * Page cache insertions can happen withou an
1017 * actual mm context, e.g. during disk probing
1018 * on boot, loopback IO, acct() writes etc.
1019 */
1020 if (unlikely(!mm))
Johannes Weinerdf381972014-04-07 15:37:43 -07001021 memcg = root_mem_cgroup;
Michal Hocko6f6acb02014-05-22 11:54:19 -07001022 else {
1023 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1024 if (unlikely(!memcg))
1025 memcg = root_mem_cgroup;
1026 }
Tejun Heoec903c02014-05-13 12:11:01 -04001027 } while (!css_tryget_online(&memcg->css));
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -08001028 rcu_read_unlock();
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001029 return memcg;
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -08001030}
1031
Johannes Weiner56600482012-01-12 17:17:59 -08001032/**
1033 * mem_cgroup_iter - iterate over memory cgroup hierarchy
1034 * @root: hierarchy root
1035 * @prev: previously returned memcg, NULL on first invocation
1036 * @reclaim: cookie for shared reclaim walks, NULL for full walks
1037 *
1038 * Returns references to children of the hierarchy below @root, or
1039 * @root itself, or %NULL after a full round-trip.
1040 *
1041 * Caller must pass the return value in @prev on subsequent
1042 * invocations for reference counting, or use mem_cgroup_iter_break()
1043 * to cancel a hierarchy walk before the round-trip is complete.
1044 *
1045 * Reclaimers can specify a zone and a priority level in @reclaim to
1046 * divide up the memcgs in the hierarchy among all concurrent
1047 * reclaimers operating on the same zone and priority.
1048 */
Andrew Morton694fbc02013-09-24 15:27:37 -07001049struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
Johannes Weiner56600482012-01-12 17:17:59 -08001050 struct mem_cgroup *prev,
Andrew Morton694fbc02013-09-24 15:27:37 -07001051 struct mem_cgroup_reclaim_cookie *reclaim)
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07001052{
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001053 struct reclaim_iter *uninitialized_var(iter);
1054 struct cgroup_subsys_state *css = NULL;
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001055 struct mem_cgroup *memcg = NULL;
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001056 struct mem_cgroup *pos = NULL;
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001057
Andrew Morton694fbc02013-09-24 15:27:37 -07001058 if (mem_cgroup_disabled())
1059 return NULL;
Johannes Weiner56600482012-01-12 17:17:59 -08001060
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -07001061 if (!root)
1062 root = root_mem_cgroup;
1063
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001064 if (prev && !reclaim)
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001065 pos = prev;
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001066
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001067 if (!root->use_hierarchy && root != root_mem_cgroup) {
1068 if (prev)
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001069 goto out;
Andrew Morton694fbc02013-09-24 15:27:37 -07001070 return root;
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001071 }
1072
Michal Hocko542f85f2013-04-29 15:07:15 -07001073 rcu_read_lock();
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001074
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001075 if (reclaim) {
1076 struct mem_cgroup_per_zone *mz;
Johannes Weiner527a5ec2012-01-12 17:17:55 -08001077
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001078 mz = mem_cgroup_zone_zoneinfo(root, reclaim->zone);
1079 iter = &mz->iter[reclaim->priority];
Michal Hocko5f578162013-04-29 15:07:17 -07001080
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001081 if (prev && reclaim->generation != iter->generation)
Michal Hocko542f85f2013-04-29 15:07:15 -07001082 goto out_unlock;
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001083
1084 do {
1085 pos = ACCESS_ONCE(iter->position);
1086 /*
1087 * A racing update may change the position and
1088 * put the last reference, hence css_tryget(),
1089 * or retry to see the updated position.
1090 */
1091 } while (pos && !css_tryget(&pos->css));
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001092 }
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001093
1094 if (pos)
1095 css = &pos->css;
1096
1097 for (;;) {
1098 css = css_next_descendant_pre(css, &root->css);
1099 if (!css) {
1100 /*
1101 * Reclaimers share the hierarchy walk, and a
1102 * new one might jump in right at the end of
1103 * the hierarchy - make sure they see at least
1104 * one group and restart from the beginning.
1105 */
1106 if (!prev)
1107 continue;
1108 break;
1109 }
1110
1111 /*
1112 * Verify the css and acquire a reference. The root
1113 * is provided by the caller, so we know it's alive
1114 * and kicking, and don't take an extra reference.
1115 */
1116 memcg = mem_cgroup_from_css(css);
1117
1118 if (css == &root->css)
1119 break;
1120
Johannes Weinerb2052562014-12-10 15:42:48 -08001121 if (css_tryget(css)) {
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001122 /*
1123 * Make sure the memcg is initialized:
1124 * mem_cgroup_css_online() orders the the
1125 * initialization against setting the flag.
1126 */
1127 if (smp_load_acquire(&memcg->initialized))
1128 break;
1129
1130 css_put(css);
1131 }
1132
1133 memcg = NULL;
1134 }
1135
1136 if (reclaim) {
1137 if (cmpxchg(&iter->position, pos, memcg) == pos) {
1138 if (memcg)
1139 css_get(&memcg->css);
1140 if (pos)
1141 css_put(&pos->css);
1142 }
1143
1144 /*
1145 * pairs with css_tryget when dereferencing iter->position
1146 * above.
1147 */
1148 if (pos)
1149 css_put(&pos->css);
1150
1151 if (!memcg)
1152 iter->generation++;
1153 else if (!prev)
1154 reclaim->generation = iter->generation;
1155 }
1156
Michal Hocko542f85f2013-04-29 15:07:15 -07001157out_unlock:
1158 rcu_read_unlock();
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001159out:
Michal Hockoc40046f2013-04-29 15:07:14 -07001160 if (prev && prev != root)
1161 css_put(&prev->css);
1162
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001163 return memcg;
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001164}
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001165
Johannes Weiner56600482012-01-12 17:17:59 -08001166/**
1167 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1168 * @root: hierarchy root
1169 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1170 */
1171void mem_cgroup_iter_break(struct mem_cgroup *root,
1172 struct mem_cgroup *prev)
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001173{
1174 if (!root)
1175 root = root_mem_cgroup;
1176 if (prev && prev != root)
1177 css_put(&prev->css);
1178}
1179
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001180/*
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001181 * Iteration constructs for visiting all cgroups (under a tree). If
1182 * loops are exited prematurely (break), mem_cgroup_iter_break() must
1183 * be used for reference counting.
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001184 */
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001185#define for_each_mem_cgroup_tree(iter, root) \
Johannes Weiner527a5ec2012-01-12 17:17:55 -08001186 for (iter = mem_cgroup_iter(root, NULL, NULL); \
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001187 iter != NULL; \
Johannes Weiner527a5ec2012-01-12 17:17:55 -08001188 iter = mem_cgroup_iter(root, iter, NULL))
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001189
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001190#define for_each_mem_cgroup(iter) \
Johannes Weiner527a5ec2012-01-12 17:17:55 -08001191 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001192 iter != NULL; \
Johannes Weiner527a5ec2012-01-12 17:17:55 -08001193 iter = mem_cgroup_iter(NULL, iter, NULL))
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001194
David Rientjes68ae5642012-12-12 13:51:57 -08001195void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
Ying Han456f9982011-05-26 16:25:38 -07001196{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001197 struct mem_cgroup *memcg;
Ying Han456f9982011-05-26 16:25:38 -07001198
Ying Han456f9982011-05-26 16:25:38 -07001199 rcu_read_lock();
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001200 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1201 if (unlikely(!memcg))
Ying Han456f9982011-05-26 16:25:38 -07001202 goto out;
1203
1204 switch (idx) {
Ying Han456f9982011-05-26 16:25:38 -07001205 case PGFAULT:
Johannes Weiner0e574a92012-01-12 17:18:35 -08001206 this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT]);
1207 break;
1208 case PGMAJFAULT:
1209 this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT]);
Ying Han456f9982011-05-26 16:25:38 -07001210 break;
1211 default:
1212 BUG();
1213 }
1214out:
1215 rcu_read_unlock();
1216}
David Rientjes68ae5642012-12-12 13:51:57 -08001217EXPORT_SYMBOL(__mem_cgroup_count_vm_event);
Ying Han456f9982011-05-26 16:25:38 -07001218
Johannes Weiner925b7672012-01-12 17:18:15 -08001219/**
1220 * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg
1221 * @zone: zone of the wanted lruvec
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001222 * @memcg: memcg of the wanted lruvec
Johannes Weiner925b7672012-01-12 17:18:15 -08001223 *
1224 * Returns the lru list vector holding pages for the given @zone and
1225 * @mem. This can be the global zone lruvec, if the memory controller
1226 * is disabled.
1227 */
1228struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
1229 struct mem_cgroup *memcg)
1230{
1231 struct mem_cgroup_per_zone *mz;
Hugh Dickinsbea8c152012-11-16 14:14:54 -08001232 struct lruvec *lruvec;
Johannes Weiner925b7672012-01-12 17:18:15 -08001233
Hugh Dickinsbea8c152012-11-16 14:14:54 -08001234 if (mem_cgroup_disabled()) {
1235 lruvec = &zone->lruvec;
1236 goto out;
1237 }
Johannes Weiner925b7672012-01-12 17:18:15 -08001238
Jianyu Zhane2318752014-06-06 14:38:20 -07001239 mz = mem_cgroup_zone_zoneinfo(memcg, zone);
Hugh Dickinsbea8c152012-11-16 14:14:54 -08001240 lruvec = &mz->lruvec;
1241out:
1242 /*
1243 * Since a node can be onlined after the mem_cgroup was created,
1244 * we have to be prepared to initialize lruvec->zone here;
1245 * and if offlined then reonlined, we need to reinitialize it.
1246 */
1247 if (unlikely(lruvec->zone != zone))
1248 lruvec->zone = zone;
1249 return lruvec;
Johannes Weiner925b7672012-01-12 17:18:15 -08001250}
1251
Johannes Weiner925b7672012-01-12 17:18:15 -08001252/**
Johannes Weinerdfe0e772014-12-10 15:43:43 -08001253 * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page
Johannes Weiner925b7672012-01-12 17:18:15 -08001254 * @page: the page
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001255 * @zone: zone of the page
Johannes Weinerdfe0e772014-12-10 15:43:43 -08001256 *
1257 * This function is only safe when following the LRU page isolation
1258 * and putback protocol: the LRU lock must be held, and the page must
1259 * either be PageLRU() or the caller must have isolated/allocated it.
Minchan Kim3f58a822011-03-22 16:32:53 -07001260 */
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001261struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)
Minchan Kim3f58a822011-03-22 16:32:53 -07001262{
1263 struct mem_cgroup_per_zone *mz;
Johannes Weiner925b7672012-01-12 17:18:15 -08001264 struct mem_cgroup *memcg;
Hugh Dickinsbea8c152012-11-16 14:14:54 -08001265 struct lruvec *lruvec;
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08001266
Hugh Dickinsbea8c152012-11-16 14:14:54 -08001267 if (mem_cgroup_disabled()) {
1268 lruvec = &zone->lruvec;
1269 goto out;
1270 }
Christoph Lameterb69408e2008-10-18 20:26:14 -07001271
Johannes Weiner1306a852014-12-10 15:44:52 -08001272 memcg = page->mem_cgroup;
Hugh Dickins75121022012-03-05 14:59:18 -08001273 /*
Johannes Weinerdfe0e772014-12-10 15:43:43 -08001274 * Swapcache readahead pages are added to the LRU - and
Johannes Weiner29833312014-12-10 15:44:02 -08001275 * possibly migrated - before they are charged.
Hugh Dickins75121022012-03-05 14:59:18 -08001276 */
Johannes Weiner29833312014-12-10 15:44:02 -08001277 if (!memcg)
1278 memcg = root_mem_cgroup;
Hugh Dickins75121022012-03-05 14:59:18 -08001279
Jianyu Zhane2318752014-06-06 14:38:20 -07001280 mz = mem_cgroup_page_zoneinfo(memcg, page);
Hugh Dickinsbea8c152012-11-16 14:14:54 -08001281 lruvec = &mz->lruvec;
1282out:
1283 /*
1284 * Since a node can be onlined after the mem_cgroup was created,
1285 * we have to be prepared to initialize lruvec->zone here;
1286 * and if offlined then reonlined, we need to reinitialize it.
1287 */
1288 if (unlikely(lruvec->zone != zone))
1289 lruvec->zone = zone;
1290 return lruvec;
Johannes Weiner925b7672012-01-12 17:18:15 -08001291}
1292
1293/**
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001294 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1295 * @lruvec: mem_cgroup per zone lru vector
1296 * @lru: index of lru list the page is sitting on
1297 * @nr_pages: positive when adding or negative when removing
Johannes Weiner925b7672012-01-12 17:18:15 -08001298 *
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001299 * This function must be called when a page is added to or removed from an
1300 * lru list.
Johannes Weiner925b7672012-01-12 17:18:15 -08001301 */
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001302void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1303 int nr_pages)
Johannes Weiner925b7672012-01-12 17:18:15 -08001304{
1305 struct mem_cgroup_per_zone *mz;
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001306 unsigned long *lru_size;
Johannes Weiner925b7672012-01-12 17:18:15 -08001307
1308 if (mem_cgroup_disabled())
1309 return;
1310
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001311 mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
1312 lru_size = mz->lru_size + lru;
1313 *lru_size += nr_pages;
1314 VM_BUG_ON((long)(*lru_size) < 0);
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08001315}
KAMEZAWA Hiroyuki544122e2009-01-07 18:08:34 -08001316
Johannes Weiner2314b422014-12-10 15:44:33 -08001317bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, struct mem_cgroup *root)
Michal Hocko3e920412011-07-26 16:08:29 -07001318{
Johannes Weiner2314b422014-12-10 15:44:33 -08001319 if (root == memcg)
Johannes Weiner91c637342012-05-29 15:06:24 -07001320 return true;
Johannes Weiner2314b422014-12-10 15:44:33 -08001321 if (!root->use_hierarchy)
Johannes Weiner91c637342012-05-29 15:06:24 -07001322 return false;
Johannes Weiner2314b422014-12-10 15:44:33 -08001323 return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
Johannes Weinerc3ac9a82012-05-29 15:06:25 -07001324}
1325
Johannes Weiner2314b422014-12-10 15:44:33 -08001326bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg)
Johannes Weinerc3ac9a82012-05-29 15:06:25 -07001327{
Johannes Weiner2314b422014-12-10 15:44:33 -08001328 struct mem_cgroup *task_memcg;
KAMEZAWA Hiroyuki158e0a22010-08-10 18:03:00 -07001329 struct task_struct *p;
David Rientjesffbdccf2013-07-03 15:01:23 -07001330 bool ret;
David Rientjes4c4a2212008-02-07 00:14:06 -08001331
KAMEZAWA Hiroyuki158e0a22010-08-10 18:03:00 -07001332 p = find_lock_task_mm(task);
David Rientjesde077d22012-01-12 17:18:52 -08001333 if (p) {
Johannes Weiner2314b422014-12-10 15:44:33 -08001334 task_memcg = get_mem_cgroup_from_mm(p->mm);
David Rientjesde077d22012-01-12 17:18:52 -08001335 task_unlock(p);
1336 } else {
1337 /*
1338 * All threads may have already detached their mm's, but the oom
1339 * killer still needs to detect if they have already been oom
1340 * killed to prevent needlessly killing additional tasks.
1341 */
David Rientjesffbdccf2013-07-03 15:01:23 -07001342 rcu_read_lock();
Johannes Weiner2314b422014-12-10 15:44:33 -08001343 task_memcg = mem_cgroup_from_task(task);
1344 css_get(&task_memcg->css);
David Rientjesffbdccf2013-07-03 15:01:23 -07001345 rcu_read_unlock();
David Rientjesde077d22012-01-12 17:18:52 -08001346 }
Johannes Weiner2314b422014-12-10 15:44:33 -08001347 ret = mem_cgroup_is_descendant(task_memcg, memcg);
1348 css_put(&task_memcg->css);
David Rientjes4c4a2212008-02-07 00:14:06 -08001349 return ret;
1350}
1351
Konstantin Khlebnikovc56d5c72012-05-29 15:07:00 -07001352int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
KOSAKI Motohiro14797e22009-01-07 18:08:18 -08001353{
KOSAKI Motohiroc772be92009-01-07 18:08:25 -08001354 unsigned long inactive_ratio;
Johannes Weiner9b272972011-11-02 13:38:23 -07001355 unsigned long inactive;
1356 unsigned long active;
1357 unsigned long gb;
KOSAKI Motohiro14797e22009-01-07 18:08:18 -08001358
Hugh Dickins4d7dcca2012-05-29 15:07:08 -07001359 inactive = mem_cgroup_get_lru_size(lruvec, LRU_INACTIVE_ANON);
1360 active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_ANON);
KOSAKI Motohiro14797e22009-01-07 18:08:18 -08001361
KOSAKI Motohiroc772be92009-01-07 18:08:25 -08001362 gb = (inactive + active) >> (30 - PAGE_SHIFT);
1363 if (gb)
1364 inactive_ratio = int_sqrt(10 * gb);
1365 else
1366 inactive_ratio = 1;
1367
Johannes Weiner9b272972011-11-02 13:38:23 -07001368 return inactive * inactive_ratio < active;
KOSAKI Motohiroc772be92009-01-07 18:08:25 -08001369}
1370
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001371#define mem_cgroup_from_counter(counter, member) \
Balbir Singh6d61ef42009-01-07 18:08:06 -08001372 container_of(counter, struct mem_cgroup, member)
1373
Johannes Weiner19942822011-02-01 15:52:43 -08001374/**
Johannes Weiner9d11ea92011-03-23 16:42:21 -07001375 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
Wanpeng Lidad75572012-06-20 12:53:01 -07001376 * @memcg: the memory cgroup
Johannes Weiner19942822011-02-01 15:52:43 -08001377 *
Johannes Weiner9d11ea92011-03-23 16:42:21 -07001378 * Returns the maximum amount of memory @mem can be charged with, in
Johannes Weiner7ec99d62011-03-23 16:42:36 -07001379 * pages.
Johannes Weiner19942822011-02-01 15:52:43 -08001380 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001381static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
Johannes Weiner19942822011-02-01 15:52:43 -08001382{
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001383 unsigned long margin = 0;
1384 unsigned long count;
1385 unsigned long limit;
Johannes Weiner9d11ea92011-03-23 16:42:21 -07001386
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001387 count = page_counter_read(&memcg->memory);
1388 limit = ACCESS_ONCE(memcg->memory.limit);
1389 if (count < limit)
1390 margin = limit - count;
1391
1392 if (do_swap_account) {
1393 count = page_counter_read(&memcg->memsw);
1394 limit = ACCESS_ONCE(memcg->memsw.limit);
1395 if (count <= limit)
1396 margin = min(margin, limit - count);
1397 }
1398
1399 return margin;
Johannes Weiner19942822011-02-01 15:52:43 -08001400}
1401
KAMEZAWA Hiroyuki1f4c0252011-07-26 16:08:21 -07001402int mem_cgroup_swappiness(struct mem_cgroup *memcg)
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08001403{
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08001404 /* root ? */
Linus Torvalds14208b02014-06-09 15:03:33 -07001405 if (mem_cgroup_disabled() || !memcg->css.parent)
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08001406 return vm_swappiness;
1407
Johannes Weinerbf1ff262011-03-23 16:42:32 -07001408 return memcg->swappiness;
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08001409}
1410
KAMEZAWA Hiroyuki619d0942012-03-21 16:34:23 -07001411/*
Qiang Huangbdcbb652014-06-04 16:08:21 -07001412 * A routine for checking "mem" is under move_account() or not.
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001413 *
Qiang Huangbdcbb652014-06-04 16:08:21 -07001414 * Checking a cgroup is mc.from or mc.to or under hierarchy of
1415 * moving cgroups. This is for waiting at high-memory pressure
1416 * caused by "move".
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001417 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001418static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001419{
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07001420 struct mem_cgroup *from;
1421 struct mem_cgroup *to;
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001422 bool ret = false;
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07001423 /*
1424 * Unlike task_move routines, we access mc.to, mc.from not under
1425 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1426 */
1427 spin_lock(&mc.lock);
1428 from = mc.from;
1429 to = mc.to;
1430 if (!from)
1431 goto unlock;
Michal Hocko3e920412011-07-26 16:08:29 -07001432
Johannes Weiner2314b422014-12-10 15:44:33 -08001433 ret = mem_cgroup_is_descendant(from, memcg) ||
1434 mem_cgroup_is_descendant(to, memcg);
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07001435unlock:
1436 spin_unlock(&mc.lock);
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001437 return ret;
1438}
1439
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001440static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001441{
1442 if (mc.moving_task && current != mc.moving_task) {
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001443 if (mem_cgroup_under_move(memcg)) {
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001444 DEFINE_WAIT(wait);
1445 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1446 /* moving charge context might have finished. */
1447 if (mc.moving_task)
1448 schedule();
1449 finish_wait(&mc.waitq, &wait);
1450 return true;
1451 }
1452 }
1453 return false;
1454}
1455
Sha Zhengju58cf1882013-02-22 16:32:05 -08001456#define K(x) ((x) << (PAGE_SHIFT-10))
Balbir Singhe2224322009-04-02 16:57:39 -07001457/**
Sha Zhengju58cf1882013-02-22 16:32:05 -08001458 * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller.
Balbir Singhe2224322009-04-02 16:57:39 -07001459 * @memcg: The memory cgroup that went over limit
1460 * @p: Task that is going to be killed
1461 *
1462 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1463 * enabled
1464 */
1465void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1466{
Tejun Heoe61734c2014-02-12 09:29:50 -05001467 /* oom_info_lock ensures that parallel ooms do not interleave */
Michal Hocko08088cb2014-02-25 15:01:44 -08001468 static DEFINE_MUTEX(oom_info_lock);
Sha Zhengju58cf1882013-02-22 16:32:05 -08001469 struct mem_cgroup *iter;
1470 unsigned int i;
Balbir Singhe2224322009-04-02 16:57:39 -07001471
Sha Zhengju58cf1882013-02-22 16:32:05 -08001472 if (!p)
Balbir Singhe2224322009-04-02 16:57:39 -07001473 return;
1474
Michal Hocko08088cb2014-02-25 15:01:44 -08001475 mutex_lock(&oom_info_lock);
Balbir Singhe2224322009-04-02 16:57:39 -07001476 rcu_read_lock();
1477
Tejun Heoe61734c2014-02-12 09:29:50 -05001478 pr_info("Task in ");
1479 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
Greg Thelen0346dad2015-01-26 12:58:38 -08001480 pr_cont(" killed as a result of limit of ");
Tejun Heoe61734c2014-02-12 09:29:50 -05001481 pr_cont_cgroup_path(memcg->css.cgroup);
Greg Thelen0346dad2015-01-26 12:58:38 -08001482 pr_cont("\n");
Balbir Singhe2224322009-04-02 16:57:39 -07001483
Balbir Singhe2224322009-04-02 16:57:39 -07001484 rcu_read_unlock();
1485
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001486 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1487 K((u64)page_counter_read(&memcg->memory)),
1488 K((u64)memcg->memory.limit), memcg->memory.failcnt);
1489 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1490 K((u64)page_counter_read(&memcg->memsw)),
1491 K((u64)memcg->memsw.limit), memcg->memsw.failcnt);
1492 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1493 K((u64)page_counter_read(&memcg->kmem)),
1494 K((u64)memcg->kmem.limit), memcg->kmem.failcnt);
Sha Zhengju58cf1882013-02-22 16:32:05 -08001495
1496 for_each_mem_cgroup_tree(iter, memcg) {
Tejun Heoe61734c2014-02-12 09:29:50 -05001497 pr_info("Memory cgroup stats for ");
1498 pr_cont_cgroup_path(iter->css.cgroup);
Sha Zhengju58cf1882013-02-22 16:32:05 -08001499 pr_cont(":");
1500
1501 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
1502 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
1503 continue;
1504 pr_cont(" %s:%ldKB", mem_cgroup_stat_names[i],
1505 K(mem_cgroup_read_stat(iter, i)));
1506 }
1507
1508 for (i = 0; i < NR_LRU_LISTS; i++)
1509 pr_cont(" %s:%luKB", mem_cgroup_lru_names[i],
1510 K(mem_cgroup_nr_lru_pages(iter, BIT(i))));
1511
1512 pr_cont("\n");
1513 }
Michal Hocko08088cb2014-02-25 15:01:44 -08001514 mutex_unlock(&oom_info_lock);
Balbir Singhe2224322009-04-02 16:57:39 -07001515}
1516
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07001517/*
1518 * This function returns the number of memcg under hierarchy tree. Returns
1519 * 1(self count) if no children.
1520 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001521static int mem_cgroup_count_children(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07001522{
1523 int num = 0;
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001524 struct mem_cgroup *iter;
1525
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001526 for_each_mem_cgroup_tree(iter, memcg)
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001527 num++;
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07001528 return num;
1529}
1530
Balbir Singh6d61ef42009-01-07 18:08:06 -08001531/*
David Rientjesa63d83f2010-08-09 17:19:46 -07001532 * Return the memory (and swap, if configured) limit for a memcg.
1533 */
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001534static unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg)
David Rientjesa63d83f2010-08-09 17:19:46 -07001535{
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001536 unsigned long limit;
David Rientjesa63d83f2010-08-09 17:19:46 -07001537
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001538 limit = memcg->memory.limit;
Michal Hocko9a5a8f12012-11-16 14:14:49 -08001539 if (mem_cgroup_swappiness(memcg)) {
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001540 unsigned long memsw_limit;
Michal Hocko9a5a8f12012-11-16 14:14:49 -08001541
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001542 memsw_limit = memcg->memsw.limit;
1543 limit = min(limit + total_swap_pages, memsw_limit);
Michal Hocko9a5a8f12012-11-16 14:14:49 -08001544 }
Michal Hocko9a5a8f12012-11-16 14:14:49 -08001545 return limit;
David Rientjesa63d83f2010-08-09 17:19:46 -07001546}
1547
David Rientjes19965462012-12-11 16:00:26 -08001548static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1549 int order)
David Rientjes9cbb78b2012-07-31 16:43:44 -07001550{
1551 struct mem_cgroup *iter;
1552 unsigned long chosen_points = 0;
1553 unsigned long totalpages;
1554 unsigned int points = 0;
1555 struct task_struct *chosen = NULL;
1556
David Rientjes876aafb2012-07-31 16:43:48 -07001557 /*
David Rientjes465adcf2013-04-29 15:08:45 -07001558 * If current has a pending SIGKILL or is exiting, then automatically
1559 * select it. The goal is to allow it to allocate so that it may
1560 * quickly exit and free its memory.
David Rientjes876aafb2012-07-31 16:43:48 -07001561 */
Oleg Nesterovd003f372014-12-12 16:56:24 -08001562 if (fatal_signal_pending(current) || task_will_free_mem(current)) {
David Rientjes876aafb2012-07-31 16:43:48 -07001563 set_thread_flag(TIF_MEMDIE);
1564 return;
1565 }
1566
1567 check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001568 totalpages = mem_cgroup_get_limit(memcg) ? : 1;
David Rientjes9cbb78b2012-07-31 16:43:44 -07001569 for_each_mem_cgroup_tree(iter, memcg) {
Tejun Heo72ec7022013-08-08 20:11:26 -04001570 struct css_task_iter it;
David Rientjes9cbb78b2012-07-31 16:43:44 -07001571 struct task_struct *task;
1572
Tejun Heo72ec7022013-08-08 20:11:26 -04001573 css_task_iter_start(&iter->css, &it);
1574 while ((task = css_task_iter_next(&it))) {
David Rientjes9cbb78b2012-07-31 16:43:44 -07001575 switch (oom_scan_process_thread(task, totalpages, NULL,
1576 false)) {
1577 case OOM_SCAN_SELECT:
1578 if (chosen)
1579 put_task_struct(chosen);
1580 chosen = task;
1581 chosen_points = ULONG_MAX;
1582 get_task_struct(chosen);
1583 /* fall through */
1584 case OOM_SCAN_CONTINUE:
1585 continue;
1586 case OOM_SCAN_ABORT:
Tejun Heo72ec7022013-08-08 20:11:26 -04001587 css_task_iter_end(&it);
David Rientjes9cbb78b2012-07-31 16:43:44 -07001588 mem_cgroup_iter_break(memcg, iter);
1589 if (chosen)
1590 put_task_struct(chosen);
1591 return;
1592 case OOM_SCAN_OK:
1593 break;
1594 };
1595 points = oom_badness(task, memcg, NULL, totalpages);
David Rientjesd49ad932014-01-23 15:53:34 -08001596 if (!points || points < chosen_points)
1597 continue;
1598 /* Prefer thread group leaders for display purposes */
1599 if (points == chosen_points &&
1600 thread_group_leader(chosen))
1601 continue;
1602
1603 if (chosen)
1604 put_task_struct(chosen);
1605 chosen = task;
1606 chosen_points = points;
1607 get_task_struct(chosen);
David Rientjes9cbb78b2012-07-31 16:43:44 -07001608 }
Tejun Heo72ec7022013-08-08 20:11:26 -04001609 css_task_iter_end(&it);
David Rientjes9cbb78b2012-07-31 16:43:44 -07001610 }
1611
1612 if (!chosen)
1613 return;
1614 points = chosen_points * 1000 / totalpages;
David Rientjes9cbb78b2012-07-31 16:43:44 -07001615 oom_kill_process(chosen, gfp_mask, order, points, totalpages, memcg,
1616 NULL, "Memory cgroup out of memory");
David Rientjes9cbb78b2012-07-31 16:43:44 -07001617}
1618
Michele Curtiae6e71d2014-12-12 16:56:35 -08001619#if MAX_NUMNODES > 1
1620
KAMEZAWA Hiroyuki4d0c0662011-07-08 15:39:42 -07001621/**
1622 * test_mem_cgroup_node_reclaimable
Wanpeng Lidad75572012-06-20 12:53:01 -07001623 * @memcg: the target memcg
KAMEZAWA Hiroyuki4d0c0662011-07-08 15:39:42 -07001624 * @nid: the node ID to be checked.
1625 * @noswap : specify true here if the user wants flle only information.
1626 *
1627 * This function returns whether the specified memcg contains any
1628 * reclaimable pages on a node. Returns true if there are any reclaimable
1629 * pages in the node.
1630 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001631static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
KAMEZAWA Hiroyuki4d0c0662011-07-08 15:39:42 -07001632 int nid, bool noswap)
1633{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001634 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE))
KAMEZAWA Hiroyuki4d0c0662011-07-08 15:39:42 -07001635 return true;
1636 if (noswap || !total_swap_pages)
1637 return false;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001638 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON))
KAMEZAWA Hiroyuki4d0c0662011-07-08 15:39:42 -07001639 return true;
1640 return false;
1641
1642}
Ying Han889976d2011-05-26 16:25:33 -07001643
1644/*
1645 * Always updating the nodemask is not very good - even if we have an empty
1646 * list or the wrong list here, we can start from some node and traverse all
1647 * nodes based on the zonelist. So update the list loosely once per 10 secs.
1648 *
1649 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001650static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg)
Ying Han889976d2011-05-26 16:25:33 -07001651{
1652 int nid;
KAMEZAWA Hiroyuki453a9bf2011-07-08 15:39:43 -07001653 /*
1654 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
1655 * pagein/pageout changes since the last update.
1656 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001657 if (!atomic_read(&memcg->numainfo_events))
KAMEZAWA Hiroyuki453a9bf2011-07-08 15:39:43 -07001658 return;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001659 if (atomic_inc_return(&memcg->numainfo_updating) > 1)
Ying Han889976d2011-05-26 16:25:33 -07001660 return;
1661
Ying Han889976d2011-05-26 16:25:33 -07001662 /* make a nodemask where this memcg uses memory from */
Lai Jiangshan31aaea42012-12-12 13:51:27 -08001663 memcg->scan_nodes = node_states[N_MEMORY];
Ying Han889976d2011-05-26 16:25:33 -07001664
Lai Jiangshan31aaea42012-12-12 13:51:27 -08001665 for_each_node_mask(nid, node_states[N_MEMORY]) {
Ying Han889976d2011-05-26 16:25:33 -07001666
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001667 if (!test_mem_cgroup_node_reclaimable(memcg, nid, false))
1668 node_clear(nid, memcg->scan_nodes);
Ying Han889976d2011-05-26 16:25:33 -07001669 }
KAMEZAWA Hiroyuki453a9bf2011-07-08 15:39:43 -07001670
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001671 atomic_set(&memcg->numainfo_events, 0);
1672 atomic_set(&memcg->numainfo_updating, 0);
Ying Han889976d2011-05-26 16:25:33 -07001673}
1674
1675/*
1676 * Selecting a node where we start reclaim from. Because what we need is just
1677 * reducing usage counter, start from anywhere is O,K. Considering
1678 * memory reclaim from current node, there are pros. and cons.
1679 *
1680 * Freeing memory from current node means freeing memory from a node which
1681 * we'll use or we've used. So, it may make LRU bad. And if several threads
1682 * hit limits, it will see a contention on a node. But freeing from remote
1683 * node means more costs for memory reclaim because of memory latency.
1684 *
1685 * Now, we use round-robin. Better algorithm is welcomed.
1686 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001687int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
Ying Han889976d2011-05-26 16:25:33 -07001688{
1689 int node;
1690
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001691 mem_cgroup_may_update_nodemask(memcg);
1692 node = memcg->last_scanned_node;
Ying Han889976d2011-05-26 16:25:33 -07001693
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001694 node = next_node(node, memcg->scan_nodes);
Ying Han889976d2011-05-26 16:25:33 -07001695 if (node == MAX_NUMNODES)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001696 node = first_node(memcg->scan_nodes);
Ying Han889976d2011-05-26 16:25:33 -07001697 /*
1698 * We call this when we hit limit, not when pages are added to LRU.
1699 * No LRU may hold pages because all pages are UNEVICTABLE or
1700 * memcg is too small and all pages are not on LRU. In that case,
1701 * we use curret node.
1702 */
1703 if (unlikely(node == MAX_NUMNODES))
1704 node = numa_node_id();
1705
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001706 memcg->last_scanned_node = node;
Ying Han889976d2011-05-26 16:25:33 -07001707 return node;
1708}
Ying Han889976d2011-05-26 16:25:33 -07001709#else
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001710int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
Ying Han889976d2011-05-26 16:25:33 -07001711{
1712 return 0;
1713}
1714#endif
1715
Andrew Morton0608f432013-09-24 15:27:41 -07001716static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1717 struct zone *zone,
1718 gfp_t gfp_mask,
1719 unsigned long *total_scanned)
Balbir Singh6d61ef42009-01-07 18:08:06 -08001720{
Andrew Morton0608f432013-09-24 15:27:41 -07001721 struct mem_cgroup *victim = NULL;
1722 int total = 0;
1723 int loop = 0;
1724 unsigned long excess;
1725 unsigned long nr_scanned;
1726 struct mem_cgroup_reclaim_cookie reclaim = {
1727 .zone = zone,
1728 .priority = 0,
1729 };
Johannes Weiner9d11ea92011-03-23 16:42:21 -07001730
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001731 excess = soft_limit_excess(root_memcg);
Balbir Singh6d61ef42009-01-07 18:08:06 -08001732
Andrew Morton0608f432013-09-24 15:27:41 -07001733 while (1) {
1734 victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1735 if (!victim) {
1736 loop++;
1737 if (loop >= 2) {
1738 /*
1739 * If we have not been able to reclaim
1740 * anything, it might because there are
1741 * no reclaimable pages under this hierarchy
1742 */
1743 if (!total)
1744 break;
1745 /*
1746 * We want to do more targeted reclaim.
1747 * excess >> 2 is not to excessive so as to
1748 * reclaim too much, nor too less that we keep
1749 * coming back to reclaim from this cgroup
1750 */
1751 if (total >= (excess >> 2) ||
1752 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1753 break;
1754 }
1755 continue;
1756 }
Andrew Morton0608f432013-09-24 15:27:41 -07001757 total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false,
1758 zone, &nr_scanned);
1759 *total_scanned += nr_scanned;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001760 if (!soft_limit_excess(root_memcg))
Andrew Morton0608f432013-09-24 15:27:41 -07001761 break;
Balbir Singh6d61ef42009-01-07 18:08:06 -08001762 }
Andrew Morton0608f432013-09-24 15:27:41 -07001763 mem_cgroup_iter_break(root_memcg, victim);
1764 return total;
Balbir Singh6d61ef42009-01-07 18:08:06 -08001765}
1766
Johannes Weiner0056f4e2013-10-31 16:34:14 -07001767#ifdef CONFIG_LOCKDEP
1768static struct lockdep_map memcg_oom_lock_dep_map = {
1769 .name = "memcg_oom_lock",
1770};
1771#endif
1772
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001773static DEFINE_SPINLOCK(memcg_oom_lock);
1774
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001775/*
1776 * Check OOM-Killer is already running under our hierarchy.
1777 * If someone is running, return false.
1778 */
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001779static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001780{
Michal Hocko79dfdac2011-07-26 16:08:23 -07001781 struct mem_cgroup *iter, *failed = NULL;
KAMEZAWA Hiroyukia636b322009-01-07 18:08:08 -08001782
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001783 spin_lock(&memcg_oom_lock);
1784
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001785 for_each_mem_cgroup_tree(iter, memcg) {
Johannes Weiner23751be2011-08-25 15:59:16 -07001786 if (iter->oom_lock) {
Michal Hocko79dfdac2011-07-26 16:08:23 -07001787 /*
1788 * this subtree of our hierarchy is already locked
1789 * so we cannot give a lock.
1790 */
Michal Hocko79dfdac2011-07-26 16:08:23 -07001791 failed = iter;
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001792 mem_cgroup_iter_break(memcg, iter);
1793 break;
Johannes Weiner23751be2011-08-25 15:59:16 -07001794 } else
1795 iter->oom_lock = true;
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001796 }
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001797
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001798 if (failed) {
1799 /*
1800 * OK, we failed to lock the whole subtree so we have
1801 * to clean up what we set up to the failing subtree
1802 */
1803 for_each_mem_cgroup_tree(iter, memcg) {
1804 if (iter == failed) {
1805 mem_cgroup_iter_break(memcg, iter);
1806 break;
1807 }
1808 iter->oom_lock = false;
Michal Hocko79dfdac2011-07-26 16:08:23 -07001809 }
Johannes Weiner0056f4e2013-10-31 16:34:14 -07001810 } else
1811 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001812
1813 spin_unlock(&memcg_oom_lock);
1814
1815 return !failed;
KAMEZAWA Hiroyukia636b322009-01-07 18:08:08 -08001816}
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -07001817
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001818static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -07001819{
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001820 struct mem_cgroup *iter;
1821
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001822 spin_lock(&memcg_oom_lock);
Johannes Weiner0056f4e2013-10-31 16:34:14 -07001823 mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001824 for_each_mem_cgroup_tree(iter, memcg)
Michal Hocko79dfdac2011-07-26 16:08:23 -07001825 iter->oom_lock = false;
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001826 spin_unlock(&memcg_oom_lock);
Michal Hocko79dfdac2011-07-26 16:08:23 -07001827}
1828
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001829static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
Michal Hocko79dfdac2011-07-26 16:08:23 -07001830{
1831 struct mem_cgroup *iter;
1832
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001833 for_each_mem_cgroup_tree(iter, memcg)
Michal Hocko79dfdac2011-07-26 16:08:23 -07001834 atomic_inc(&iter->under_oom);
1835}
1836
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001837static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
Michal Hocko79dfdac2011-07-26 16:08:23 -07001838{
1839 struct mem_cgroup *iter;
1840
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001841 /*
1842 * When a new child is created while the hierarchy is under oom,
1843 * mem_cgroup_oom_lock() may not be called. We have to use
1844 * atomic_add_unless() here.
1845 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001846 for_each_mem_cgroup_tree(iter, memcg)
Michal Hocko79dfdac2011-07-26 16:08:23 -07001847 atomic_add_unless(&iter->under_oom, -1, 0);
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -07001848}
1849
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001850static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1851
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001852struct oom_wait_info {
Hugh Dickinsd79154b2012-03-21 16:34:18 -07001853 struct mem_cgroup *memcg;
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001854 wait_queue_t wait;
1855};
1856
1857static int memcg_oom_wake_function(wait_queue_t *wait,
1858 unsigned mode, int sync, void *arg)
1859{
Hugh Dickinsd79154b2012-03-21 16:34:18 -07001860 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1861 struct mem_cgroup *oom_wait_memcg;
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001862 struct oom_wait_info *oom_wait_info;
1863
1864 oom_wait_info = container_of(wait, struct oom_wait_info, wait);
Hugh Dickinsd79154b2012-03-21 16:34:18 -07001865 oom_wait_memcg = oom_wait_info->memcg;
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001866
Johannes Weiner2314b422014-12-10 15:44:33 -08001867 if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1868 !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001869 return 0;
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001870 return autoremove_wake_function(wait, mode, sync, arg);
1871}
1872
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001873static void memcg_wakeup_oom(struct mem_cgroup *memcg)
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001874{
Johannes Weiner3812c8c2013-09-12 15:13:44 -07001875 atomic_inc(&memcg->oom_wakeups);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001876 /* for filtering, pass "memcg" as argument. */
1877 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001878}
1879
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001880static void memcg_oom_recover(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07001881{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001882 if (memcg && atomic_read(&memcg->under_oom))
1883 memcg_wakeup_oom(memcg);
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07001884}
1885
Johannes Weiner3812c8c2013-09-12 15:13:44 -07001886static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001887{
Johannes Weiner3812c8c2013-09-12 15:13:44 -07001888 if (!current->memcg_oom.may_oom)
1889 return;
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001890 /*
Johannes Weiner49426422013-10-16 13:46:59 -07001891 * We are in the middle of the charge context here, so we
1892 * don't want to block when potentially sitting on a callstack
1893 * that holds all kinds of filesystem and mm locks.
1894 *
1895 * Also, the caller may handle a failed allocation gracefully
1896 * (like optional page cache readahead) and so an OOM killer
1897 * invocation might not even be necessary.
1898 *
1899 * That's why we don't do anything here except remember the
1900 * OOM context and then deal with it at the end of the page
1901 * fault when the stack is unwound, the locks are released,
1902 * and when we know whether the fault was overall successful.
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001903 */
Johannes Weiner49426422013-10-16 13:46:59 -07001904 css_get(&memcg->css);
1905 current->memcg_oom.memcg = memcg;
1906 current->memcg_oom.gfp_mask = mask;
1907 current->memcg_oom.order = order;
1908}
1909
1910/**
1911 * mem_cgroup_oom_synchronize - complete memcg OOM handling
1912 * @handle: actually kill/wait or just clean up the OOM state
1913 *
1914 * This has to be called at the end of a page fault if the memcg OOM
1915 * handler was enabled.
1916 *
1917 * Memcg supports userspace OOM handling where failed allocations must
1918 * sleep on a waitqueue until the userspace task resolves the
1919 * situation. Sleeping directly in the charge context with all kinds
1920 * of locks held is not a good idea, instead we remember an OOM state
1921 * in the task and mem_cgroup_oom_synchronize() has to be called at
1922 * the end of the page fault to complete the OOM handling.
1923 *
1924 * Returns %true if an ongoing memcg OOM situation was detected and
1925 * completed, %false otherwise.
1926 */
1927bool mem_cgroup_oom_synchronize(bool handle)
1928{
1929 struct mem_cgroup *memcg = current->memcg_oom.memcg;
1930 struct oom_wait_info owait;
1931 bool locked;
1932
1933 /* OOM is global, do not handle */
1934 if (!memcg)
1935 return false;
1936
1937 if (!handle)
1938 goto cleanup;
1939
1940 owait.memcg = memcg;
1941 owait.wait.flags = 0;
1942 owait.wait.func = memcg_oom_wake_function;
1943 owait.wait.private = current;
1944 INIT_LIST_HEAD(&owait.wait.task_list);
1945
1946 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001947 mem_cgroup_mark_under_oom(memcg);
1948
1949 locked = mem_cgroup_oom_trylock(memcg);
1950
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07001951 if (locked)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001952 mem_cgroup_oom_notify(memcg);
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001953
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001954 if (locked && !memcg->oom_kill_disable) {
1955 mem_cgroup_unmark_under_oom(memcg);
Johannes Weiner49426422013-10-16 13:46:59 -07001956 finish_wait(&memcg_oom_waitq, &owait.wait);
1957 mem_cgroup_out_of_memory(memcg, current->memcg_oom.gfp_mask,
1958 current->memcg_oom.order);
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07001959 } else {
Johannes Weiner3812c8c2013-09-12 15:13:44 -07001960 schedule();
Johannes Weiner49426422013-10-16 13:46:59 -07001961 mem_cgroup_unmark_under_oom(memcg);
1962 finish_wait(&memcg_oom_waitq, &owait.wait);
1963 }
1964
1965 if (locked) {
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001966 mem_cgroup_oom_unlock(memcg);
1967 /*
1968 * There is no guarantee that an OOM-lock contender
1969 * sees the wakeups triggered by the OOM kill
1970 * uncharges. Wake any sleepers explicitely.
1971 */
1972 memcg_oom_recover(memcg);
1973 }
Johannes Weiner49426422013-10-16 13:46:59 -07001974cleanup:
1975 current->memcg_oom.memcg = NULL;
Johannes Weiner3812c8c2013-09-12 15:13:44 -07001976 css_put(&memcg->css);
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001977 return true;
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -07001978}
1979
Johannes Weinerd7365e72014-10-29 14:50:48 -07001980/**
1981 * mem_cgroup_begin_page_stat - begin a page state statistics transaction
1982 * @page: page that is going to change accounted state
1983 * @locked: &memcg->move_lock slowpath was taken
1984 * @flags: IRQ-state flags for &memcg->move_lock
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001985 *
Johannes Weinerd7365e72014-10-29 14:50:48 -07001986 * This function must mark the beginning of an accounted page state
1987 * change to prevent double accounting when the page is concurrently
1988 * being moved to another memcg:
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001989 *
Johannes Weinerd7365e72014-10-29 14:50:48 -07001990 * memcg = mem_cgroup_begin_page_stat(page, &locked, &flags);
1991 * if (TestClearPageState(page))
1992 * mem_cgroup_update_page_stat(memcg, state, -1);
1993 * mem_cgroup_end_page_stat(memcg, locked, flags);
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001994 *
Johannes Weinerd7365e72014-10-29 14:50:48 -07001995 * The RCU lock is held throughout the transaction. The fast path can
1996 * get away without acquiring the memcg->move_lock (@locked is false)
1997 * because page moving starts with an RCU grace period.
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001998 *
Johannes Weinerd7365e72014-10-29 14:50:48 -07001999 * The RCU lock also protects the memcg from being freed when the page
2000 * state that is going to change is the only thing preventing the page
2001 * from being uncharged. E.g. end-writeback clearing PageWriteback(),
2002 * which allows migration to go ahead and uncharge the page before the
2003 * account transaction might be complete.
Balbir Singhd69b0422009-06-17 16:26:34 -07002004 */
Johannes Weinerd7365e72014-10-29 14:50:48 -07002005struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page,
2006 bool *locked,
2007 unsigned long *flags)
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07002008{
2009 struct mem_cgroup *memcg;
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07002010
Johannes Weinerd7365e72014-10-29 14:50:48 -07002011 rcu_read_lock();
2012
2013 if (mem_cgroup_disabled())
2014 return NULL;
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07002015again:
Johannes Weiner1306a852014-12-10 15:44:52 -08002016 memcg = page->mem_cgroup;
Johannes Weiner29833312014-12-10 15:44:02 -08002017 if (unlikely(!memcg))
Johannes Weinerd7365e72014-10-29 14:50:48 -07002018 return NULL;
2019
2020 *locked = false;
Qiang Huangbdcbb652014-06-04 16:08:21 -07002021 if (atomic_read(&memcg->moving_account) <= 0)
Johannes Weinerd7365e72014-10-29 14:50:48 -07002022 return memcg;
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07002023
Johannes Weiner354a4782014-12-10 15:44:05 -08002024 spin_lock_irqsave(&memcg->move_lock, *flags);
Johannes Weiner1306a852014-12-10 15:44:52 -08002025 if (memcg != page->mem_cgroup) {
Johannes Weiner354a4782014-12-10 15:44:05 -08002026 spin_unlock_irqrestore(&memcg->move_lock, *flags);
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07002027 goto again;
2028 }
2029 *locked = true;
Johannes Weinerd7365e72014-10-29 14:50:48 -07002030
2031 return memcg;
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07002032}
2033
Johannes Weinerd7365e72014-10-29 14:50:48 -07002034/**
2035 * mem_cgroup_end_page_stat - finish a page state statistics transaction
2036 * @memcg: the memcg that was accounted against
2037 * @locked: value received from mem_cgroup_begin_page_stat()
2038 * @flags: value received from mem_cgroup_begin_page_stat()
2039 */
Michal Hockoe4bd6a02014-12-10 15:44:39 -08002040void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool *locked,
2041 unsigned long *flags)
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07002042{
Michal Hockoe4bd6a02014-12-10 15:44:39 -08002043 if (memcg && *locked)
2044 spin_unlock_irqrestore(&memcg->move_lock, *flags);
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07002045
Johannes Weinerd7365e72014-10-29 14:50:48 -07002046 rcu_read_unlock();
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07002047}
2048
Johannes Weinerd7365e72014-10-29 14:50:48 -07002049/**
2050 * mem_cgroup_update_page_stat - update page state statistics
2051 * @memcg: memcg to account against
2052 * @idx: page state item to account
2053 * @val: number of pages (positive or negative)
2054 *
2055 * See mem_cgroup_begin_page_stat() for locking requirements.
2056 */
2057void mem_cgroup_update_page_stat(struct mem_cgroup *memcg,
Sha Zhengju68b48762013-09-12 15:13:50 -07002058 enum mem_cgroup_stat_index idx, int val)
Balbir Singhd69b0422009-06-17 16:26:34 -07002059{
Sha Zhengju658b72c2013-09-12 15:13:52 -07002060 VM_BUG_ON(!rcu_read_lock_held());
KAMEZAWA Hiroyuki26174ef2010-10-27 15:33:43 -07002061
Johannes Weinerd7365e72014-10-29 14:50:48 -07002062 if (memcg)
2063 this_cpu_add(memcg->stat->count[idx], val);
Balbir Singhd69b0422009-06-17 16:26:34 -07002064}
KAMEZAWA Hiroyuki26174ef2010-10-27 15:33:43 -07002065
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08002066/*
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002067 * size of first charge trial. "32" comes from vmscan.c's magic value.
2068 * TODO: maybe necessary to use big numbers in big irons.
2069 */
Johannes Weiner7ec99d62011-03-23 16:42:36 -07002070#define CHARGE_BATCH 32U
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002071struct memcg_stock_pcp {
2072 struct mem_cgroup *cached; /* this never be root cgroup */
Johannes Weiner11c9ea42011-03-23 16:42:34 -07002073 unsigned int nr_pages;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002074 struct work_struct work;
KAMEZAWA Hiroyuki26fe6162011-06-15 15:08:45 -07002075 unsigned long flags;
Kirill A. Shutemova0db00f2012-05-29 15:06:56 -07002076#define FLUSHING_CACHED_CHARGE 0
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002077};
2078static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
Michal Hocko9f50fad2011-08-09 11:56:26 +02002079static DEFINE_MUTEX(percpu_charge_mutex);
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002080
Suleiman Souhlala0956d52012-12-18 14:21:36 -08002081/**
2082 * consume_stock: Try to consume stocked charge on this cpu.
2083 * @memcg: memcg to consume from.
2084 * @nr_pages: how many pages to charge.
2085 *
2086 * The charges will only happen if @memcg matches the current cpu's memcg
2087 * stock, and at least @nr_pages are available in that stock. Failure to
2088 * service an allocation will refill the stock.
2089 *
2090 * returns true if successful, false otherwise.
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002091 */
Suleiman Souhlala0956d52012-12-18 14:21:36 -08002092static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002093{
2094 struct memcg_stock_pcp *stock;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002095 bool ret = false;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002096
Suleiman Souhlala0956d52012-12-18 14:21:36 -08002097 if (nr_pages > CHARGE_BATCH)
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002098 return ret;
Suleiman Souhlala0956d52012-12-18 14:21:36 -08002099
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002100 stock = &get_cpu_var(memcg_stock);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002101 if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
Suleiman Souhlala0956d52012-12-18 14:21:36 -08002102 stock->nr_pages -= nr_pages;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002103 ret = true;
2104 }
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002105 put_cpu_var(memcg_stock);
2106 return ret;
2107}
2108
2109/*
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002110 * Returns stocks cached in percpu and reset cached information.
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002111 */
2112static void drain_stock(struct memcg_stock_pcp *stock)
2113{
2114 struct mem_cgroup *old = stock->cached;
2115
Johannes Weiner11c9ea42011-03-23 16:42:34 -07002116 if (stock->nr_pages) {
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002117 page_counter_uncharge(&old->memory, stock->nr_pages);
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002118 if (do_swap_account)
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002119 page_counter_uncharge(&old->memsw, stock->nr_pages);
Johannes Weinere8ea14c2014-12-10 15:42:42 -08002120 css_put_many(&old->css, stock->nr_pages);
Johannes Weiner11c9ea42011-03-23 16:42:34 -07002121 stock->nr_pages = 0;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002122 }
2123 stock->cached = NULL;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002124}
2125
2126/*
2127 * This must be called under preempt disabled or must be called by
2128 * a thread which is pinned to local cpu.
2129 */
2130static void drain_local_stock(struct work_struct *dummy)
2131{
Christoph Lameter7c8e0182014-06-04 16:07:56 -07002132 struct memcg_stock_pcp *stock = this_cpu_ptr(&memcg_stock);
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002133 drain_stock(stock);
KAMEZAWA Hiroyuki26fe6162011-06-15 15:08:45 -07002134 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002135}
2136
Michal Hockoe4777492013-02-22 16:35:40 -08002137static void __init memcg_stock_init(void)
2138{
2139 int cpu;
2140
2141 for_each_possible_cpu(cpu) {
2142 struct memcg_stock_pcp *stock =
2143 &per_cpu(memcg_stock, cpu);
2144 INIT_WORK(&stock->work, drain_local_stock);
2145 }
2146}
2147
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002148/*
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002149 * Cache charges(val) to local per_cpu area.
Greg Thelen320cc512010-03-15 15:27:28 +01002150 * This will be consumed by consume_stock() function, later.
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002151 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002152static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002153{
2154 struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
2155
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002156 if (stock->cached != memcg) { /* reset if necessary */
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002157 drain_stock(stock);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002158 stock->cached = memcg;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002159 }
Johannes Weiner11c9ea42011-03-23 16:42:34 -07002160 stock->nr_pages += nr_pages;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002161 put_cpu_var(memcg_stock);
2162}
2163
2164/*
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002165 * Drains all per-CPU charge caches for given root_memcg resp. subtree
Johannes Weiner6d3d6aa2014-12-10 15:42:50 -08002166 * of the hierarchy under it.
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002167 */
Johannes Weiner6d3d6aa2014-12-10 15:42:50 -08002168static void drain_all_stock(struct mem_cgroup *root_memcg)
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002169{
KAMEZAWA Hiroyuki26fe6162011-06-15 15:08:45 -07002170 int cpu, curcpu;
Michal Hockod38144b2011-07-26 16:08:28 -07002171
Johannes Weiner6d3d6aa2014-12-10 15:42:50 -08002172 /* If someone's already draining, avoid adding running more workers. */
2173 if (!mutex_trylock(&percpu_charge_mutex))
2174 return;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002175 /* Notify other cpus that system-wide "drain" is running */
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002176 get_online_cpus();
Johannes Weiner5af12d02011-08-25 15:59:07 -07002177 curcpu = get_cpu();
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002178 for_each_online_cpu(cpu) {
2179 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002180 struct mem_cgroup *memcg;
KAMEZAWA Hiroyuki26fe6162011-06-15 15:08:45 -07002181
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002182 memcg = stock->cached;
2183 if (!memcg || !stock->nr_pages)
KAMEZAWA Hiroyuki26fe6162011-06-15 15:08:45 -07002184 continue;
Johannes Weiner2314b422014-12-10 15:44:33 -08002185 if (!mem_cgroup_is_descendant(memcg, root_memcg))
Michal Hocko3e920412011-07-26 16:08:29 -07002186 continue;
Michal Hockod1a05b62011-07-26 16:08:27 -07002187 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2188 if (cpu == curcpu)
2189 drain_local_stock(&stock->work);
2190 else
2191 schedule_work_on(cpu, &stock->work);
2192 }
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002193 }
Johannes Weiner5af12d02011-08-25 15:59:07 -07002194 put_cpu();
Andrew Mortonf894ffa2013-09-12 15:13:35 -07002195 put_online_cpus();
Michal Hocko9f50fad2011-08-09 11:56:26 +02002196 mutex_unlock(&percpu_charge_mutex);
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002197}
2198
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -07002199/*
2200 * This function drains percpu counter value from DEAD cpu and
2201 * move it to local cpu. Note that this function can be preempted.
2202 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002203static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *memcg, int cpu)
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -07002204{
2205 int i;
2206
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002207 spin_lock(&memcg->pcp_counter_lock);
Johannes Weiner61046212012-05-29 15:07:05 -07002208 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002209 long x = per_cpu(memcg->stat->count[i], cpu);
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -07002210
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002211 per_cpu(memcg->stat->count[i], cpu) = 0;
2212 memcg->nocpu_base.count[i] += x;
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -07002213 }
Johannes Weinere9f89742011-03-23 16:42:37 -07002214 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002215 unsigned long x = per_cpu(memcg->stat->events[i], cpu);
Johannes Weinere9f89742011-03-23 16:42:37 -07002216
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002217 per_cpu(memcg->stat->events[i], cpu) = 0;
2218 memcg->nocpu_base.events[i] += x;
Johannes Weinere9f89742011-03-23 16:42:37 -07002219 }
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002220 spin_unlock(&memcg->pcp_counter_lock);
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -07002221}
2222
Paul Gortmaker0db06282013-06-19 14:53:51 -04002223static int memcg_cpu_hotplug_callback(struct notifier_block *nb,
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002224 unsigned long action,
2225 void *hcpu)
2226{
2227 int cpu = (unsigned long)hcpu;
2228 struct memcg_stock_pcp *stock;
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -07002229 struct mem_cgroup *iter;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002230
KAMEZAWA Hiroyuki619d0942012-03-21 16:34:23 -07002231 if (action == CPU_ONLINE)
KAMEZAWA Hiroyuki1489eba2010-10-27 15:33:42 -07002232 return NOTIFY_OK;
KAMEZAWA Hiroyuki1489eba2010-10-27 15:33:42 -07002233
Kirill A. Shutemovd8330492012-04-12 12:49:11 -07002234 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002235 return NOTIFY_OK;
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -07002236
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08002237 for_each_mem_cgroup(iter)
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -07002238 mem_cgroup_drain_pcp_counter(iter, cpu);
2239
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002240 stock = &per_cpu(memcg_stock, cpu);
2241 drain_stock(stock);
2242 return NOTIFY_OK;
2243}
2244
Johannes Weiner00501b52014-08-08 14:19:20 -07002245static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2246 unsigned int nr_pages)
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08002247{
Johannes Weiner7ec99d62011-03-23 16:42:36 -07002248 unsigned int batch = max(CHARGE_BATCH, nr_pages);
Johannes Weiner9b130612014-08-06 16:05:51 -07002249 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
Johannes Weiner6539cc02014-08-06 16:05:42 -07002250 struct mem_cgroup *mem_over_limit;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002251 struct page_counter *counter;
Johannes Weiner6539cc02014-08-06 16:05:42 -07002252 unsigned long nr_reclaimed;
Johannes Weinerb70a2a22014-10-09 15:28:56 -07002253 bool may_swap = true;
2254 bool drained = false;
Johannes Weiner05b84302014-08-06 16:05:59 -07002255 int ret = 0;
KAMEZAWA Hiroyukia636b322009-01-07 18:08:08 -08002256
Johannes Weinerce00a962014-09-05 08:43:57 -04002257 if (mem_cgroup_is_root(memcg))
2258 goto done;
Johannes Weiner6539cc02014-08-06 16:05:42 -07002259retry:
Michal Hockob6b6cc72014-04-07 15:37:44 -07002260 if (consume_stock(memcg, nr_pages))
2261 goto done;
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08002262
Johannes Weiner3fbe7242014-10-09 15:28:54 -07002263 if (!do_swap_account ||
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002264 !page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2265 if (!page_counter_try_charge(&memcg->memory, batch, &counter))
Johannes Weiner6539cc02014-08-06 16:05:42 -07002266 goto done_restock;
Johannes Weiner3fbe7242014-10-09 15:28:54 -07002267 if (do_swap_account)
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002268 page_counter_uncharge(&memcg->memsw, batch);
2269 mem_over_limit = mem_cgroup_from_counter(counter, memory);
Johannes Weiner3fbe7242014-10-09 15:28:54 -07002270 } else {
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002271 mem_over_limit = mem_cgroup_from_counter(counter, memsw);
Johannes Weinerb70a2a22014-10-09 15:28:56 -07002272 may_swap = false;
Johannes Weiner3fbe7242014-10-09 15:28:54 -07002273 }
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002274
Johannes Weiner6539cc02014-08-06 16:05:42 -07002275 if (batch > nr_pages) {
2276 batch = nr_pages;
2277 goto retry;
2278 }
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002279
Johannes Weiner06b078f2014-08-06 16:05:44 -07002280 /*
2281 * Unlike in global OOM situations, memcg is not in a physical
2282 * memory shortage. Allow dying and OOM-killed tasks to
2283 * bypass the last charges so that they can exit quickly and
2284 * free their memory.
2285 */
2286 if (unlikely(test_thread_flag(TIF_MEMDIE) ||
2287 fatal_signal_pending(current) ||
2288 current->flags & PF_EXITING))
2289 goto bypass;
2290
2291 if (unlikely(task_in_memcg_oom(current)))
2292 goto nomem;
2293
Johannes Weiner6539cc02014-08-06 16:05:42 -07002294 if (!(gfp_mask & __GFP_WAIT))
2295 goto nomem;
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07002296
Johannes Weinerb70a2a22014-10-09 15:28:56 -07002297 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2298 gfp_mask, may_swap);
Johannes Weiner6539cc02014-08-06 16:05:42 -07002299
Johannes Weiner61e02c72014-08-06 16:08:16 -07002300 if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
Johannes Weiner6539cc02014-08-06 16:05:42 -07002301 goto retry;
Johannes Weiner28c34c22014-08-06 16:05:47 -07002302
Johannes Weinerb70a2a22014-10-09 15:28:56 -07002303 if (!drained) {
Johannes Weiner6d3d6aa2014-12-10 15:42:50 -08002304 drain_all_stock(mem_over_limit);
Johannes Weinerb70a2a22014-10-09 15:28:56 -07002305 drained = true;
2306 goto retry;
2307 }
2308
Johannes Weiner28c34c22014-08-06 16:05:47 -07002309 if (gfp_mask & __GFP_NORETRY)
2310 goto nomem;
Johannes Weiner6539cc02014-08-06 16:05:42 -07002311 /*
2312 * Even though the limit is exceeded at this point, reclaim
2313 * may have been able to free some pages. Retry the charge
2314 * before killing the task.
2315 *
2316 * Only for regular pages, though: huge pages are rather
2317 * unlikely to succeed so close to the limit, and we fall back
2318 * to regular pages anyway in case of failure.
2319 */
Johannes Weiner61e02c72014-08-06 16:08:16 -07002320 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
Johannes Weiner6539cc02014-08-06 16:05:42 -07002321 goto retry;
2322 /*
2323 * At task move, charge accounts can be doubly counted. So, it's
2324 * better to wait until the end of task_move if something is going on.
2325 */
2326 if (mem_cgroup_wait_acct_move(mem_over_limit))
2327 goto retry;
2328
Johannes Weiner9b130612014-08-06 16:05:51 -07002329 if (nr_retries--)
2330 goto retry;
2331
Johannes Weiner06b078f2014-08-06 16:05:44 -07002332 if (gfp_mask & __GFP_NOFAIL)
2333 goto bypass;
2334
Johannes Weiner6539cc02014-08-06 16:05:42 -07002335 if (fatal_signal_pending(current))
2336 goto bypass;
2337
Johannes Weiner61e02c72014-08-06 16:08:16 -07002338 mem_cgroup_oom(mem_over_limit, gfp_mask, get_order(nr_pages));
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002339nomem:
Johannes Weiner6d1fdc42014-04-07 15:37:45 -07002340 if (!(gfp_mask & __GFP_NOFAIL))
Johannes Weiner3168ecb2013-10-31 16:34:13 -07002341 return -ENOMEM;
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08002342bypass:
Johannes Weinerce00a962014-09-05 08:43:57 -04002343 return -EINTR;
Johannes Weiner6539cc02014-08-06 16:05:42 -07002344
2345done_restock:
Johannes Weinere8ea14c2014-12-10 15:42:42 -08002346 css_get_many(&memcg->css, batch);
Johannes Weiner6539cc02014-08-06 16:05:42 -07002347 if (batch > nr_pages)
2348 refill_stock(memcg, batch - nr_pages);
2349done:
Johannes Weiner05b84302014-08-06 16:05:59 -07002350 return ret;
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002351}
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08002352
Johannes Weiner00501b52014-08-08 14:19:20 -07002353static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
Daisuke Nishimuraa3032a22009-12-15 16:47:10 -08002354{
Johannes Weinerce00a962014-09-05 08:43:57 -04002355 if (mem_cgroup_is_root(memcg))
2356 return;
2357
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002358 page_counter_uncharge(&memcg->memory, nr_pages);
Johannes Weiner05b84302014-08-06 16:05:59 -07002359 if (do_swap_account)
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002360 page_counter_uncharge(&memcg->memsw, nr_pages);
Daisuke Nishimuraa3032a22009-12-15 16:47:10 -08002361
Johannes Weinere8ea14c2014-12-10 15:42:42 -08002362 css_put_many(&memcg->css, nr_pages);
KAMEZAWA Hiroyukid01dd172012-05-29 15:07:03 -07002363}
2364
2365/*
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -07002366 * A helper function to get mem_cgroup from ID. must be called under
Tejun Heoec903c02014-05-13 12:11:01 -04002367 * rcu_read_lock(). The caller is responsible for calling
2368 * css_tryget_online() if the mem_cgroup is used for charging. (dropping
2369 * refcnt from swap can be called against removed memcg.)
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -07002370 */
2371static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
2372{
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -07002373 /* ID 0 is unused ID */
2374 if (!id)
2375 return NULL;
Li Zefan34c00c32013-09-23 16:56:01 +08002376 return mem_cgroup_from_id(id);
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -07002377}
2378
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002379/*
2380 * try_get_mem_cgroup_from_page - look up page's memcg association
2381 * @page: the page
2382 *
2383 * Look up, get a css reference, and return the memcg that owns @page.
2384 *
2385 * The page must be locked to prevent racing with swap-in and page
2386 * cache charges. If coming from an unlocked page table, the caller
2387 * must ensure the page is on the LRU or this can race with charging.
2388 */
Wu Fengguange42d9d52009-12-16 12:19:59 +01002389struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
KAMEZAWA Hiroyukib5a84312009-01-07 18:08:35 -08002390{
Johannes Weiner29833312014-12-10 15:44:02 -08002391 struct mem_cgroup *memcg;
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -07002392 unsigned short id;
KAMEZAWA Hiroyukib5a84312009-01-07 18:08:35 -08002393 swp_entry_t ent;
2394
Sasha Levin309381fea2014-01-23 15:52:54 -08002395 VM_BUG_ON_PAGE(!PageLocked(page), page);
Daisuke Nishimura3c776e62009-04-02 16:57:43 -07002396
Johannes Weiner1306a852014-12-10 15:44:52 -08002397 memcg = page->mem_cgroup;
Johannes Weiner29833312014-12-10 15:44:02 -08002398 if (memcg) {
2399 if (!css_tryget_online(&memcg->css))
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002400 memcg = NULL;
Wu Fengguange42d9d52009-12-16 12:19:59 +01002401 } else if (PageSwapCache(page)) {
Daisuke Nishimura3c776e62009-04-02 16:57:43 -07002402 ent.val = page_private(page);
Bob Liu9fb4b7c2012-01-12 17:18:48 -08002403 id = lookup_swap_cgroup_id(ent);
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -07002404 rcu_read_lock();
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002405 memcg = mem_cgroup_lookup(id);
Tejun Heoec903c02014-05-13 12:11:01 -04002406 if (memcg && !css_tryget_online(&memcg->css))
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002407 memcg = NULL;
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -07002408 rcu_read_unlock();
Daisuke Nishimura3c776e62009-04-02 16:57:43 -07002409 }
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002410 return memcg;
KAMEZAWA Hiroyukib5a84312009-01-07 18:08:35 -08002411}
2412
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002413static void lock_page_lru(struct page *page, int *isolated)
2414{
2415 struct zone *zone = page_zone(page);
2416
2417 spin_lock_irq(&zone->lru_lock);
2418 if (PageLRU(page)) {
2419 struct lruvec *lruvec;
2420
2421 lruvec = mem_cgroup_page_lruvec(page, zone);
2422 ClearPageLRU(page);
2423 del_page_from_lru_list(page, lruvec, page_lru(page));
2424 *isolated = 1;
2425 } else
2426 *isolated = 0;
2427}
2428
2429static void unlock_page_lru(struct page *page, int isolated)
2430{
2431 struct zone *zone = page_zone(page);
2432
2433 if (isolated) {
2434 struct lruvec *lruvec;
2435
2436 lruvec = mem_cgroup_page_lruvec(page, zone);
2437 VM_BUG_ON_PAGE(PageLRU(page), page);
2438 SetPageLRU(page);
2439 add_page_to_lru_list(page, lruvec, page_lru(page));
2440 }
2441 spin_unlock_irq(&zone->lru_lock);
2442}
2443
Johannes Weiner00501b52014-08-08 14:19:20 -07002444static void commit_charge(struct page *page, struct mem_cgroup *memcg,
Johannes Weiner6abb5a82014-08-08 14:19:33 -07002445 bool lrucare)
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002446{
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002447 int isolated;
Hugh Dickins9ce70c02012-03-05 14:59:16 -08002448
Johannes Weiner1306a852014-12-10 15:44:52 -08002449 VM_BUG_ON_PAGE(page->mem_cgroup, page);
Hugh Dickins9ce70c02012-03-05 14:59:16 -08002450
2451 /*
2452 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
2453 * may already be on some other mem_cgroup's LRU. Take care of it.
2454 */
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002455 if (lrucare)
2456 lock_page_lru(page, &isolated);
Hugh Dickins9ce70c02012-03-05 14:59:16 -08002457
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002458 /*
2459 * Nobody should be changing or seriously looking at
Johannes Weiner1306a852014-12-10 15:44:52 -08002460 * page->mem_cgroup at this point:
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002461 *
2462 * - the page is uncharged
2463 *
2464 * - the page is off-LRU
2465 *
2466 * - an anonymous fault has exclusive page access, except for
2467 * a locked page table
2468 *
2469 * - a page cache insertion, a swapin fault, or a migration
2470 * have the page locked
2471 */
Johannes Weiner1306a852014-12-10 15:44:52 -08002472 page->mem_cgroup = memcg;
Hugh Dickins3be91272008-02-07 00:14:19 -08002473
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002474 if (lrucare)
2475 unlock_page_lru(page, isolated);
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08002476}
2477
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002478#ifdef CONFIG_MEMCG_KMEM
Vladimir Davydovbd673142014-06-04 16:07:40 -07002479/*
2480 * The memcg_slab_mutex is held whenever a per memcg kmem cache is created or
2481 * destroyed. It protects memcg_caches arrays and memcg_slab_caches lists.
2482 */
2483static DEFINE_MUTEX(memcg_slab_mutex);
2484
Glauber Costa1f458cb2012-12-18 14:22:50 -08002485/*
2486 * This is a bit cumbersome, but it is rarely used and avoids a backpointer
2487 * in the memcg_cache_params struct.
2488 */
2489static struct kmem_cache *memcg_params_to_cache(struct memcg_cache_params *p)
2490{
2491 struct kmem_cache *cachep;
2492
2493 VM_BUG_ON(p->is_root_cache);
2494 cachep = p->root_cache;
Qiang Huang7a67d7a2013-11-12 15:08:24 -08002495 return cache_from_memcg_idx(cachep, memcg_cache_id(p->memcg));
Glauber Costa1f458cb2012-12-18 14:22:50 -08002496}
2497
Vladimir Davydovdbf22eb2015-02-10 14:11:41 -08002498int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp,
2499 unsigned long nr_pages)
Glauber Costa749c5412012-12-18 14:23:01 -08002500{
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002501 struct page_counter *counter;
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002502 int ret = 0;
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002503
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002504 ret = page_counter_try_charge(&memcg->kmem, nr_pages, &counter);
2505 if (ret < 0)
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002506 return ret;
2507
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002508 ret = try_charge(memcg, gfp, nr_pages);
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002509 if (ret == -EINTR) {
2510 /*
Johannes Weiner00501b52014-08-08 14:19:20 -07002511 * try_charge() chose to bypass to root due to OOM kill or
2512 * fatal signal. Since our only options are to either fail
2513 * the allocation or charge it to this cgroup, do it as a
2514 * temporary condition. But we can't fail. From a kmem/slab
2515 * perspective, the cache has already been selected, by
2516 * mem_cgroup_kmem_get_cache(), so it is too late to change
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002517 * our minds.
2518 *
2519 * This condition will only trigger if the task entered
Johannes Weiner00501b52014-08-08 14:19:20 -07002520 * memcg_charge_kmem in a sane state, but was OOM-killed
2521 * during try_charge() above. Tasks that were already dying
2522 * when the allocation triggers should have been already
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002523 * directed to the root cgroup in memcontrol.h
2524 */
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002525 page_counter_charge(&memcg->memory, nr_pages);
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002526 if (do_swap_account)
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002527 page_counter_charge(&memcg->memsw, nr_pages);
Johannes Weinere8ea14c2014-12-10 15:42:42 -08002528 css_get_many(&memcg->css, nr_pages);
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002529 ret = 0;
2530 } else if (ret)
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002531 page_counter_uncharge(&memcg->kmem, nr_pages);
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002532
2533 return ret;
2534}
2535
Vladimir Davydovdbf22eb2015-02-10 14:11:41 -08002536void memcg_uncharge_kmem(struct mem_cgroup *memcg, unsigned long nr_pages)
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002537{
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002538 page_counter_uncharge(&memcg->memory, nr_pages);
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002539 if (do_swap_account)
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002540 page_counter_uncharge(&memcg->memsw, nr_pages);
Glauber Costa7de37682012-12-18 14:22:07 -08002541
Johannes Weiner64f21992014-12-10 15:42:45 -08002542 page_counter_uncharge(&memcg->kmem, nr_pages);
Glauber Costa7de37682012-12-18 14:22:07 -08002543
Johannes Weinere8ea14c2014-12-10 15:42:42 -08002544 css_put_many(&memcg->css, nr_pages);
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002545}
2546
Glauber Costa2633d7a2012-12-18 14:22:34 -08002547/*
2548 * helper for acessing a memcg's index. It will be used as an index in the
2549 * child cache array in kmem_cache, and also to derive its name. This function
2550 * will return -1 when this is not a kmem-limited memcg.
2551 */
2552int memcg_cache_id(struct mem_cgroup *memcg)
2553{
2554 return memcg ? memcg->kmemcg_id : -1;
2555}
2556
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07002557static int memcg_alloc_cache_id(void)
Glauber Costa55007d82012-12-18 14:22:38 -08002558{
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07002559 int id, size;
2560 int err;
Glauber Costa55007d82012-12-18 14:22:38 -08002561
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07002562 id = ida_simple_get(&kmem_limited_groups,
2563 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
2564 if (id < 0)
2565 return id;
2566
2567 if (id < memcg_limited_groups_array_size)
2568 return id;
2569
2570 /*
2571 * There's no space for the new id in memcg_caches arrays,
2572 * so we have to grow them.
2573 */
2574
2575 size = 2 * (id + 1);
Glauber Costa55007d82012-12-18 14:22:38 -08002576 if (size < MEMCG_CACHES_MIN_SIZE)
2577 size = MEMCG_CACHES_MIN_SIZE;
2578 else if (size > MEMCG_CACHES_MAX_SIZE)
2579 size = MEMCG_CACHES_MAX_SIZE;
2580
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07002581 mutex_lock(&memcg_slab_mutex);
2582 err = memcg_update_all_caches(size);
2583 mutex_unlock(&memcg_slab_mutex);
2584
2585 if (err) {
2586 ida_simple_remove(&kmem_limited_groups, id);
2587 return err;
2588 }
2589 return id;
2590}
2591
2592static void memcg_free_cache_id(int id)
2593{
2594 ida_simple_remove(&kmem_limited_groups, id);
Glauber Costa55007d82012-12-18 14:22:38 -08002595}
2596
2597/*
2598 * We should update the current array size iff all caches updates succeed. This
2599 * can only be done from the slab side. The slab mutex needs to be held when
2600 * calling this.
2601 */
2602void memcg_update_array_size(int num)
2603{
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07002604 memcg_limited_groups_array_size = num;
Glauber Costa55007d82012-12-18 14:22:38 -08002605}
2606
Vladimir Davydov776ed0f2014-06-04 16:10:02 -07002607static void memcg_register_cache(struct mem_cgroup *memcg,
2608 struct kmem_cache *root_cache)
Glauber Costa2633d7a2012-12-18 14:22:34 -08002609{
Vladimir Davydov93f39ee2014-06-04 16:08:24 -07002610 static char memcg_name_buf[NAME_MAX + 1]; /* protected by
2611 memcg_slab_mutex */
Vladimir Davydovbd673142014-06-04 16:07:40 -07002612 struct kmem_cache *cachep;
Glauber Costad7f25f82012-12-18 14:22:40 -08002613 int id;
2614
Vladimir Davydovbd673142014-06-04 16:07:40 -07002615 lockdep_assert_held(&memcg_slab_mutex);
2616
2617 id = memcg_cache_id(memcg);
Glauber Costad7f25f82012-12-18 14:22:40 -08002618
Vladimir Davydov2edefe12014-01-23 15:53:02 -08002619 /*
Vladimir Davydovbd673142014-06-04 16:07:40 -07002620 * Since per-memcg caches are created asynchronously on first
2621 * allocation (see memcg_kmem_get_cache()), several threads can try to
2622 * create the same cache, but only one of them may succeed.
Vladimir Davydov2edefe12014-01-23 15:53:02 -08002623 */
Vladimir Davydovbd673142014-06-04 16:07:40 -07002624 if (cache_from_memcg_idx(root_cache, id))
2625 return;
Vladimir Davydov2edefe12014-01-23 15:53:02 -08002626
Vladimir Davydov073ee1c2014-06-04 16:08:23 -07002627 cgroup_name(memcg->css.cgroup, memcg_name_buf, NAME_MAX + 1);
Vladimir Davydov776ed0f2014-06-04 16:10:02 -07002628 cachep = memcg_create_kmem_cache(memcg, root_cache, memcg_name_buf);
Vladimir Davydovbd673142014-06-04 16:07:40 -07002629 /*
2630 * If we could not create a memcg cache, do not complain, because
2631 * that's not critical at all as we can always proceed with the root
2632 * cache.
2633 */
2634 if (!cachep)
2635 return;
2636
2637 list_add(&cachep->memcg_params->list, &memcg->memcg_slab_caches);
Vladimir Davydov1aa13252014-01-23 15:52:58 -08002638
Vladimir Davydov1aa13252014-01-23 15:52:58 -08002639 /*
Vladimir Davydov959c8962014-01-23 15:52:59 -08002640 * Since readers won't lock (see cache_from_memcg_idx()), we need a
2641 * barrier here to ensure nobody will see the kmem_cache partially
2642 * initialized.
Vladimir Davydov1aa13252014-01-23 15:52:58 -08002643 */
Vladimir Davydov959c8962014-01-23 15:52:59 -08002644 smp_wmb();
2645
Vladimir Davydovbd673142014-06-04 16:07:40 -07002646 BUG_ON(root_cache->memcg_params->memcg_caches[id]);
2647 root_cache->memcg_params->memcg_caches[id] = cachep;
Vladimir Davydov1aa13252014-01-23 15:52:58 -08002648}
2649
Vladimir Davydov776ed0f2014-06-04 16:10:02 -07002650static void memcg_unregister_cache(struct kmem_cache *cachep)
Vladimir Davydov1aa13252014-01-23 15:52:58 -08002651{
Vladimir Davydovbd673142014-06-04 16:07:40 -07002652 struct kmem_cache *root_cache;
Vladimir Davydov1aa13252014-01-23 15:52:58 -08002653 struct mem_cgroup *memcg;
2654 int id;
2655
Vladimir Davydovbd673142014-06-04 16:07:40 -07002656 lockdep_assert_held(&memcg_slab_mutex);
Glauber Costad7f25f82012-12-18 14:22:40 -08002657
Vladimir Davydovbd673142014-06-04 16:07:40 -07002658 BUG_ON(is_root_cache(cachep));
Vladimir Davydov2edefe12014-01-23 15:53:02 -08002659
Vladimir Davydovbd673142014-06-04 16:07:40 -07002660 root_cache = cachep->memcg_params->root_cache;
2661 memcg = cachep->memcg_params->memcg;
Vladimir Davydov96403da2014-01-23 15:53:01 -08002662 id = memcg_cache_id(memcg);
Glauber Costad7f25f82012-12-18 14:22:40 -08002663
Vladimir Davydovbd673142014-06-04 16:07:40 -07002664 BUG_ON(root_cache->memcg_params->memcg_caches[id] != cachep);
2665 root_cache->memcg_params->memcg_caches[id] = NULL;
Glauber Costad7f25f82012-12-18 14:22:40 -08002666
Vladimir Davydovbd673142014-06-04 16:07:40 -07002667 list_del(&cachep->memcg_params->list);
2668
2669 kmem_cache_destroy(cachep);
Glauber Costa2633d7a2012-12-18 14:22:34 -08002670}
2671
Vladimir Davydov776ed0f2014-06-04 16:10:02 -07002672int __memcg_cleanup_cache_params(struct kmem_cache *s)
Glauber Costa7cf27982012-12-18 14:22:55 -08002673{
2674 struct kmem_cache *c;
Vladimir Davydovb8529902014-04-07 15:39:28 -07002675 int i, failed = 0;
Glauber Costa7cf27982012-12-18 14:22:55 -08002676
Vladimir Davydovbd673142014-06-04 16:07:40 -07002677 mutex_lock(&memcg_slab_mutex);
Qiang Huang7a67d7a2013-11-12 15:08:24 -08002678 for_each_memcg_cache_index(i) {
2679 c = cache_from_memcg_idx(s, i);
Glauber Costa7cf27982012-12-18 14:22:55 -08002680 if (!c)
2681 continue;
2682
Vladimir Davydov776ed0f2014-06-04 16:10:02 -07002683 memcg_unregister_cache(c);
Vladimir Davydovb8529902014-04-07 15:39:28 -07002684
2685 if (cache_from_memcg_idx(s, i))
2686 failed++;
Glauber Costa7cf27982012-12-18 14:22:55 -08002687 }
Vladimir Davydovbd673142014-06-04 16:07:40 -07002688 mutex_unlock(&memcg_slab_mutex);
Vladimir Davydovb8529902014-04-07 15:39:28 -07002689 return failed;
Glauber Costa7cf27982012-12-18 14:22:55 -08002690}
2691
Vladimir Davydov776ed0f2014-06-04 16:10:02 -07002692static void memcg_unregister_all_caches(struct mem_cgroup *memcg)
Glauber Costa1f458cb2012-12-18 14:22:50 -08002693{
2694 struct kmem_cache *cachep;
Vladimir Davydovbd673142014-06-04 16:07:40 -07002695 struct memcg_cache_params *params, *tmp;
Glauber Costa1f458cb2012-12-18 14:22:50 -08002696
2697 if (!memcg_kmem_is_active(memcg))
2698 return;
2699
Vladimir Davydovbd673142014-06-04 16:07:40 -07002700 mutex_lock(&memcg_slab_mutex);
2701 list_for_each_entry_safe(params, tmp, &memcg->memcg_slab_caches, list) {
Glauber Costa1f458cb2012-12-18 14:22:50 -08002702 cachep = memcg_params_to_cache(params);
Vladimir Davydov8135be52014-12-12 16:56:38 -08002703 memcg_unregister_cache(cachep);
Glauber Costa1f458cb2012-12-18 14:22:50 -08002704 }
Vladimir Davydovbd673142014-06-04 16:07:40 -07002705 mutex_unlock(&memcg_slab_mutex);
Glauber Costa1f458cb2012-12-18 14:22:50 -08002706}
2707
Vladimir Davydov776ed0f2014-06-04 16:10:02 -07002708struct memcg_register_cache_work {
Vladimir Davydov5722d092014-04-07 15:39:24 -07002709 struct mem_cgroup *memcg;
2710 struct kmem_cache *cachep;
2711 struct work_struct work;
2712};
2713
Vladimir Davydov776ed0f2014-06-04 16:10:02 -07002714static void memcg_register_cache_func(struct work_struct *w)
Glauber Costad7f25f82012-12-18 14:22:40 -08002715{
Vladimir Davydov776ed0f2014-06-04 16:10:02 -07002716 struct memcg_register_cache_work *cw =
2717 container_of(w, struct memcg_register_cache_work, work);
Vladimir Davydov5722d092014-04-07 15:39:24 -07002718 struct mem_cgroup *memcg = cw->memcg;
2719 struct kmem_cache *cachep = cw->cachep;
Glauber Costad7f25f82012-12-18 14:22:40 -08002720
Vladimir Davydovbd673142014-06-04 16:07:40 -07002721 mutex_lock(&memcg_slab_mutex);
Vladimir Davydov776ed0f2014-06-04 16:10:02 -07002722 memcg_register_cache(memcg, cachep);
Vladimir Davydovbd673142014-06-04 16:07:40 -07002723 mutex_unlock(&memcg_slab_mutex);
2724
Vladimir Davydov5722d092014-04-07 15:39:24 -07002725 css_put(&memcg->css);
Glauber Costad7f25f82012-12-18 14:22:40 -08002726 kfree(cw);
2727}
2728
2729/*
2730 * Enqueue the creation of a per-memcg kmem_cache.
Glauber Costad7f25f82012-12-18 14:22:40 -08002731 */
Vladimir Davydov776ed0f2014-06-04 16:10:02 -07002732static void __memcg_schedule_register_cache(struct mem_cgroup *memcg,
2733 struct kmem_cache *cachep)
Glauber Costad7f25f82012-12-18 14:22:40 -08002734{
Vladimir Davydov776ed0f2014-06-04 16:10:02 -07002735 struct memcg_register_cache_work *cw;
Glauber Costad7f25f82012-12-18 14:22:40 -08002736
Vladimir Davydov776ed0f2014-06-04 16:10:02 -07002737 cw = kmalloc(sizeof(*cw), GFP_NOWAIT);
Vladimir Davydov8135be52014-12-12 16:56:38 -08002738 if (!cw)
Glauber Costad7f25f82012-12-18 14:22:40 -08002739 return;
Vladimir Davydov8135be52014-12-12 16:56:38 -08002740
2741 css_get(&memcg->css);
Glauber Costad7f25f82012-12-18 14:22:40 -08002742
2743 cw->memcg = memcg;
2744 cw->cachep = cachep;
2745
Vladimir Davydov776ed0f2014-06-04 16:10:02 -07002746 INIT_WORK(&cw->work, memcg_register_cache_func);
Glauber Costad7f25f82012-12-18 14:22:40 -08002747 schedule_work(&cw->work);
2748}
2749
Vladimir Davydov776ed0f2014-06-04 16:10:02 -07002750static void memcg_schedule_register_cache(struct mem_cgroup *memcg,
2751 struct kmem_cache *cachep)
Glauber Costa0e9d92f2012-12-18 14:22:42 -08002752{
2753 /*
2754 * We need to stop accounting when we kmalloc, because if the
2755 * corresponding kmalloc cache is not yet created, the first allocation
Vladimir Davydov776ed0f2014-06-04 16:10:02 -07002756 * in __memcg_schedule_register_cache will recurse.
Glauber Costa0e9d92f2012-12-18 14:22:42 -08002757 *
2758 * However, it is better to enclose the whole function. Depending on
2759 * the debugging options enabled, INIT_WORK(), for instance, can
2760 * trigger an allocation. This too, will make us recurse. Because at
2761 * this point we can't allow ourselves back into memcg_kmem_get_cache,
2762 * the safest choice is to do it like this, wrapping the whole function.
2763 */
Vladimir Davydov6f185c22014-12-12 16:55:15 -08002764 current->memcg_kmem_skip_account = 1;
Vladimir Davydov776ed0f2014-06-04 16:10:02 -07002765 __memcg_schedule_register_cache(memcg, cachep);
Vladimir Davydov6f185c22014-12-12 16:55:15 -08002766 current->memcg_kmem_skip_account = 0;
Glauber Costa0e9d92f2012-12-18 14:22:42 -08002767}
Vladimir Davydovc67a8a62014-06-04 16:07:39 -07002768
Glauber Costad7f25f82012-12-18 14:22:40 -08002769/*
2770 * Return the kmem_cache we're supposed to use for a slab allocation.
2771 * We try to use the current memcg's version of the cache.
2772 *
2773 * If the cache does not exist yet, if we are the first user of it,
2774 * we either create it immediately, if possible, or create it asynchronously
2775 * in a workqueue.
2776 * In the latter case, we will let the current allocation go through with
2777 * the original cache.
2778 *
2779 * Can't be called in interrupt context or from kernel threads.
2780 * This function needs to be called with rcu_read_lock() held.
2781 */
Zhang Zhen056b7cc2014-12-12 16:55:38 -08002782struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep)
Glauber Costad7f25f82012-12-18 14:22:40 -08002783{
2784 struct mem_cgroup *memcg;
Vladimir Davydov959c8962014-01-23 15:52:59 -08002785 struct kmem_cache *memcg_cachep;
Glauber Costad7f25f82012-12-18 14:22:40 -08002786
2787 VM_BUG_ON(!cachep->memcg_params);
2788 VM_BUG_ON(!cachep->memcg_params->is_root_cache);
2789
Vladimir Davydov9d100c52014-12-12 16:54:53 -08002790 if (current->memcg_kmem_skip_account)
Glauber Costa0e9d92f2012-12-18 14:22:42 -08002791 return cachep;
2792
Vladimir Davydov8135be52014-12-12 16:56:38 -08002793 memcg = get_mem_cgroup_from_mm(current->mm);
Vladimir Davydovcf2b8fb2014-10-09 15:28:59 -07002794 if (!memcg_kmem_is_active(memcg))
Li Zefanca0dde92013-04-29 15:08:57 -07002795 goto out;
Glauber Costad7f25f82012-12-18 14:22:40 -08002796
Vladimir Davydov959c8962014-01-23 15:52:59 -08002797 memcg_cachep = cache_from_memcg_idx(cachep, memcg_cache_id(memcg));
Vladimir Davydov8135be52014-12-12 16:56:38 -08002798 if (likely(memcg_cachep))
2799 return memcg_cachep;
Li Zefanca0dde92013-04-29 15:08:57 -07002800
2801 /*
2802 * If we are in a safe context (can wait, and not in interrupt
2803 * context), we could be be predictable and return right away.
2804 * This would guarantee that the allocation being performed
2805 * already belongs in the new cache.
2806 *
2807 * However, there are some clashes that can arrive from locking.
2808 * For instance, because we acquire the slab_mutex while doing
Vladimir Davydov776ed0f2014-06-04 16:10:02 -07002809 * memcg_create_kmem_cache, this means no further allocation
2810 * could happen with the slab_mutex held. So it's better to
2811 * defer everything.
Li Zefanca0dde92013-04-29 15:08:57 -07002812 */
Vladimir Davydov776ed0f2014-06-04 16:10:02 -07002813 memcg_schedule_register_cache(memcg, cachep);
Li Zefanca0dde92013-04-29 15:08:57 -07002814out:
Vladimir Davydov8135be52014-12-12 16:56:38 -08002815 css_put(&memcg->css);
Li Zefanca0dde92013-04-29 15:08:57 -07002816 return cachep;
Glauber Costad7f25f82012-12-18 14:22:40 -08002817}
Glauber Costad7f25f82012-12-18 14:22:40 -08002818
Vladimir Davydov8135be52014-12-12 16:56:38 -08002819void __memcg_kmem_put_cache(struct kmem_cache *cachep)
2820{
2821 if (!is_root_cache(cachep))
2822 css_put(&cachep->memcg_params->memcg->css);
2823}
2824
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002825/*
2826 * We need to verify if the allocation against current->mm->owner's memcg is
2827 * possible for the given order. But the page is not allocated yet, so we'll
2828 * need a further commit step to do the final arrangements.
2829 *
2830 * It is possible for the task to switch cgroups in this mean time, so at
2831 * commit time, we can't rely on task conversion any longer. We'll then use
2832 * the handle argument to return to the caller which cgroup we should commit
2833 * against. We could also return the memcg directly and avoid the pointer
2834 * passing, but a boolean return value gives better semantics considering
2835 * the compiled-out case as well.
2836 *
2837 * Returning true means the allocation is possible.
2838 */
2839bool
2840__memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **_memcg, int order)
2841{
2842 struct mem_cgroup *memcg;
2843 int ret;
2844
2845 *_memcg = NULL;
Glauber Costa6d42c232013-07-08 16:00:00 -07002846
Johannes Weinerdf381972014-04-07 15:37:43 -07002847 memcg = get_mem_cgroup_from_mm(current->mm);
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002848
Vladimir Davydovcf2b8fb2014-10-09 15:28:59 -07002849 if (!memcg_kmem_is_active(memcg)) {
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002850 css_put(&memcg->css);
2851 return true;
2852 }
2853
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002854 ret = memcg_charge_kmem(memcg, gfp, 1 << order);
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002855 if (!ret)
2856 *_memcg = memcg;
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002857
2858 css_put(&memcg->css);
2859 return (ret == 0);
2860}
2861
2862void __memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg,
2863 int order)
2864{
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002865 VM_BUG_ON(mem_cgroup_is_root(memcg));
2866
2867 /* The page allocation failed. Revert */
2868 if (!page) {
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002869 memcg_uncharge_kmem(memcg, 1 << order);
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002870 return;
2871 }
Johannes Weiner1306a852014-12-10 15:44:52 -08002872 page->mem_cgroup = memcg;
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002873}
2874
2875void __memcg_kmem_uncharge_pages(struct page *page, int order)
2876{
Johannes Weiner1306a852014-12-10 15:44:52 -08002877 struct mem_cgroup *memcg = page->mem_cgroup;
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002878
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002879 if (!memcg)
2880 return;
2881
Sasha Levin309381fea2014-01-23 15:52:54 -08002882 VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
Johannes Weiner29833312014-12-10 15:44:02 -08002883
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002884 memcg_uncharge_kmem(memcg, 1 << order);
Johannes Weiner1306a852014-12-10 15:44:52 -08002885 page->mem_cgroup = NULL;
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002886}
2887#endif /* CONFIG_MEMCG_KMEM */
2888
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002889#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2890
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002891/*
2892 * Because tail pages are not marked as "used", set it. We're under
KAMEZAWA Hiroyukie94c8a92012-01-12 17:18:20 -08002893 * zone->lru_lock, 'splitting on pmd' and compound_lock.
2894 * charge/uncharge will be never happen and move_account() is done under
2895 * compound_lock(), so we don't have to take care of races.
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002896 */
KAMEZAWA Hiroyukie94c8a92012-01-12 17:18:20 -08002897void mem_cgroup_split_huge_fixup(struct page *head)
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002898{
KAMEZAWA Hiroyukie94c8a92012-01-12 17:18:20 -08002899 int i;
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002900
KAMEZAWA Hiroyuki3d37c4a2011-01-25 15:07:28 -08002901 if (mem_cgroup_disabled())
2902 return;
David Rientjesb070e652013-05-07 16:18:09 -07002903
Johannes Weiner29833312014-12-10 15:44:02 -08002904 for (i = 1; i < HPAGE_PMD_NR; i++)
Johannes Weiner1306a852014-12-10 15:44:52 -08002905 head[i].mem_cgroup = head->mem_cgroup;
Michal Hockob9982f82014-12-10 15:43:51 -08002906
Johannes Weiner1306a852014-12-10 15:44:52 -08002907 __this_cpu_sub(head->mem_cgroup->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
David Rientjesb070e652013-05-07 16:18:09 -07002908 HPAGE_PMD_NR);
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002909}
Hugh Dickins12d27102012-01-12 17:19:52 -08002910#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002911
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08002912/**
Johannes Weinerde3638d2011-03-23 16:42:28 -07002913 * mem_cgroup_move_account - move account of the page
Johannes Weiner5564e882011-03-23 16:42:29 -07002914 * @page: the page
Johannes Weiner7ec99d62011-03-23 16:42:36 -07002915 * @nr_pages: number of regular pages (>1 for huge pages)
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08002916 * @from: mem_cgroup which the page is moved from.
2917 * @to: mem_cgroup which the page is moved to. @from != @to.
2918 *
2919 * The caller must confirm following.
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08002920 * - page is not on LRU (isolate_page() is useful.)
Johannes Weiner7ec99d62011-03-23 16:42:36 -07002921 * - compound_lock is held when nr_pages > 1
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08002922 *
KAMEZAWA Hiroyuki2f3479b2012-05-29 15:07:04 -07002923 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
2924 * from old cgroup.
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08002925 */
Johannes Weiner7ec99d62011-03-23 16:42:36 -07002926static int mem_cgroup_move_account(struct page *page,
2927 unsigned int nr_pages,
Johannes Weiner7ec99d62011-03-23 16:42:36 -07002928 struct mem_cgroup *from,
KAMEZAWA Hiroyuki2f3479b2012-05-29 15:07:04 -07002929 struct mem_cgroup *to)
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08002930{
Johannes Weinerde3638d2011-03-23 16:42:28 -07002931 unsigned long flags;
2932 int ret;
KAMEZAWA Hiroyuki987eba62011-01-20 14:44:25 -08002933
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08002934 VM_BUG_ON(from == to);
Sasha Levin309381fea2014-01-23 15:52:54 -08002935 VM_BUG_ON_PAGE(PageLRU(page), page);
Johannes Weinerde3638d2011-03-23 16:42:28 -07002936 /*
2937 * The page is isolated from LRU. So, collapse function
2938 * will not handle this page. But page splitting can happen.
2939 * Do this check under compound_page_lock(). The caller should
2940 * hold it.
2941 */
2942 ret = -EBUSY;
Johannes Weiner7ec99d62011-03-23 16:42:36 -07002943 if (nr_pages > 1 && !PageTransHuge(page))
Johannes Weinerde3638d2011-03-23 16:42:28 -07002944 goto out;
2945
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002946 /*
Johannes Weiner1306a852014-12-10 15:44:52 -08002947 * Prevent mem_cgroup_migrate() from looking at page->mem_cgroup
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002948 * of its source page while we change it: page migration takes
2949 * both pages off the LRU, but page cache replacement doesn't.
2950 */
2951 if (!trylock_page(page))
2952 goto out;
Johannes Weinerde3638d2011-03-23 16:42:28 -07002953
2954 ret = -EINVAL;
Johannes Weiner1306a852014-12-10 15:44:52 -08002955 if (page->mem_cgroup != from)
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002956 goto out_unlock;
Johannes Weinerde3638d2011-03-23 16:42:28 -07002957
Johannes Weiner354a4782014-12-10 15:44:05 -08002958 spin_lock_irqsave(&from->move_lock, flags);
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08002959
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002960 if (!PageAnon(page) && page_mapped(page)) {
Johannes Weiner59d1d252014-04-07 15:37:40 -07002961 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
2962 nr_pages);
2963 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
2964 nr_pages);
2965 }
Sha Zhengju3ea67d02013-09-12 15:13:53 -07002966
Johannes Weiner59d1d252014-04-07 15:37:40 -07002967 if (PageWriteback(page)) {
2968 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_WRITEBACK],
2969 nr_pages);
2970 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_WRITEBACK],
2971 nr_pages);
2972 }
Sha Zhengju3ea67d02013-09-12 15:13:53 -07002973
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002974 /*
Johannes Weiner1306a852014-12-10 15:44:52 -08002975 * It is safe to change page->mem_cgroup here because the page
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002976 * is referenced, charged, and isolated - we can't race with
2977 * uncharging, charging, migration, or LRU putback.
2978 */
Balbir Singhd69b0422009-06-17 16:26:34 -07002979
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08002980 /* caller should have done css_get */
Johannes Weiner1306a852014-12-10 15:44:52 -08002981 page->mem_cgroup = to;
Johannes Weiner354a4782014-12-10 15:44:05 -08002982 spin_unlock_irqrestore(&from->move_lock, flags);
2983
Johannes Weinerde3638d2011-03-23 16:42:28 -07002984 ret = 0;
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002985
2986 local_irq_disable();
2987 mem_cgroup_charge_statistics(to, page, nr_pages);
Johannes Weiner5564e882011-03-23 16:42:29 -07002988 memcg_check_events(to, page);
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002989 mem_cgroup_charge_statistics(from, page, -nr_pages);
Johannes Weiner5564e882011-03-23 16:42:29 -07002990 memcg_check_events(from, page);
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002991 local_irq_enable();
2992out_unlock:
2993 unlock_page(page);
Johannes Weinerde3638d2011-03-23 16:42:28 -07002994out:
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08002995 return ret;
2996}
2997
Andrew Mortonc255a452012-07-31 16:43:02 -07002998#ifdef CONFIG_MEMCG_SWAP
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002999static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
3000 bool charge)
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003001{
Johannes Weiner0a31bc92014-08-08 14:19:22 -07003002 int val = (charge) ? 1 : -1;
3003 this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAP], val);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003004}
Daisuke Nishimura02491442010-03-10 15:22:17 -08003005
3006/**
3007 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
3008 * @entry: swap entry to be moved
3009 * @from: mem_cgroup which the entry is moved from
3010 * @to: mem_cgroup which the entry is moved to
3011 *
3012 * It succeeds only when the swap_cgroup's record for this entry is the same
3013 * as the mem_cgroup's id of @from.
3014 *
3015 * Returns 0 on success, -EINVAL on failure.
3016 *
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003017 * The caller must have charged to @to, IOW, called page_counter_charge() about
Daisuke Nishimura02491442010-03-10 15:22:17 -08003018 * both res and memsw, and called css_get().
3019 */
3020static int mem_cgroup_move_swap_account(swp_entry_t entry,
Hugh Dickinse91cbb42012-05-29 15:06:51 -07003021 struct mem_cgroup *from, struct mem_cgroup *to)
Daisuke Nishimura02491442010-03-10 15:22:17 -08003022{
3023 unsigned short old_id, new_id;
3024
Li Zefan34c00c32013-09-23 16:56:01 +08003025 old_id = mem_cgroup_id(from);
3026 new_id = mem_cgroup_id(to);
Daisuke Nishimura02491442010-03-10 15:22:17 -08003027
3028 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
Daisuke Nishimura02491442010-03-10 15:22:17 -08003029 mem_cgroup_swap_statistics(from, false);
Daisuke Nishimura02491442010-03-10 15:22:17 -08003030 mem_cgroup_swap_statistics(to, true);
Daisuke Nishimura02491442010-03-10 15:22:17 -08003031 return 0;
3032 }
3033 return -EINVAL;
3034}
3035#else
3036static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
Hugh Dickinse91cbb42012-05-29 15:06:51 -07003037 struct mem_cgroup *from, struct mem_cgroup *to)
Daisuke Nishimura02491442010-03-10 15:22:17 -08003038{
3039 return -EINVAL;
3040}
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003041#endif
3042
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003043static DEFINE_MUTEX(memcg_limit_mutex);
Daisuke Nishimuraf212ad72011-03-23 16:42:25 -07003044
KOSAKI Motohirod38d2a72009-01-06 14:39:44 -08003045static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003046 unsigned long limit)
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003047{
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003048 unsigned long curusage;
3049 unsigned long oldusage;
3050 bool enlarge = false;
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07003051 int retry_count;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003052 int ret;
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07003053
3054 /*
3055 * For keeping hierarchical_reclaim simple, how long we should retry
3056 * is depends on callers. We set our retry-count to be function
3057 * of # of children which we should visit in this loop.
3058 */
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003059 retry_count = MEM_CGROUP_RECLAIM_RETRIES *
3060 mem_cgroup_count_children(memcg);
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07003061
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003062 oldusage = page_counter_read(&memcg->memory);
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003063
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003064 do {
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003065 if (signal_pending(current)) {
3066 ret = -EINTR;
3067 break;
3068 }
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003069
3070 mutex_lock(&memcg_limit_mutex);
3071 if (limit > memcg->memsw.limit) {
3072 mutex_unlock(&memcg_limit_mutex);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003073 ret = -EINVAL;
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003074 break;
3075 }
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003076 if (limit > memcg->memory.limit)
3077 enlarge = true;
3078 ret = page_counter_limit(&memcg->memory, limit);
3079 mutex_unlock(&memcg_limit_mutex);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003080
3081 if (!ret)
3082 break;
3083
Johannes Weinerb70a2a22014-10-09 15:28:56 -07003084 try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, true);
3085
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003086 curusage = page_counter_read(&memcg->memory);
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07003087 /* Usage is reduced ? */
Andrew Mortonf894ffa2013-09-12 15:13:35 -07003088 if (curusage >= oldusage)
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07003089 retry_count--;
3090 else
3091 oldusage = curusage;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003092 } while (retry_count);
3093
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003094 if (!ret && enlarge)
3095 memcg_oom_recover(memcg);
KOSAKI Motohiro14797e22009-01-07 18:08:18 -08003096
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003097 return ret;
3098}
3099
Li Zefan338c8432009-06-17 16:27:15 -07003100static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003101 unsigned long limit)
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003102{
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003103 unsigned long curusage;
3104 unsigned long oldusage;
3105 bool enlarge = false;
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07003106 int retry_count;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003107 int ret;
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003108
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07003109 /* see mem_cgroup_resize_res_limit */
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003110 retry_count = MEM_CGROUP_RECLAIM_RETRIES *
3111 mem_cgroup_count_children(memcg);
3112
3113 oldusage = page_counter_read(&memcg->memsw);
3114
3115 do {
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003116 if (signal_pending(current)) {
3117 ret = -EINTR;
3118 break;
3119 }
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003120
3121 mutex_lock(&memcg_limit_mutex);
3122 if (limit < memcg->memory.limit) {
3123 mutex_unlock(&memcg_limit_mutex);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003124 ret = -EINVAL;
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003125 break;
3126 }
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003127 if (limit > memcg->memsw.limit)
3128 enlarge = true;
3129 ret = page_counter_limit(&memcg->memsw, limit);
3130 mutex_unlock(&memcg_limit_mutex);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003131
3132 if (!ret)
3133 break;
3134
Johannes Weinerb70a2a22014-10-09 15:28:56 -07003135 try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, false);
3136
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003137 curusage = page_counter_read(&memcg->memsw);
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07003138 /* Usage is reduced ? */
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003139 if (curusage >= oldusage)
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003140 retry_count--;
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07003141 else
3142 oldusage = curusage;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003143 } while (retry_count);
3144
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003145 if (!ret && enlarge)
3146 memcg_oom_recover(memcg);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003147
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003148 return ret;
3149}
3150
Andrew Morton0608f432013-09-24 15:27:41 -07003151unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
3152 gfp_t gfp_mask,
3153 unsigned long *total_scanned)
3154{
3155 unsigned long nr_reclaimed = 0;
3156 struct mem_cgroup_per_zone *mz, *next_mz = NULL;
3157 unsigned long reclaimed;
3158 int loop = 0;
3159 struct mem_cgroup_tree_per_zone *mctz;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003160 unsigned long excess;
Andrew Morton0608f432013-09-24 15:27:41 -07003161 unsigned long nr_scanned;
3162
3163 if (order > 0)
3164 return 0;
3165
3166 mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone));
3167 /*
3168 * This loop can run a while, specially if mem_cgroup's continuously
3169 * keep exceeding their soft limit and putting the system under
3170 * pressure
3171 */
3172 do {
3173 if (next_mz)
3174 mz = next_mz;
3175 else
3176 mz = mem_cgroup_largest_soft_limit_node(mctz);
3177 if (!mz)
3178 break;
3179
3180 nr_scanned = 0;
3181 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, zone,
3182 gfp_mask, &nr_scanned);
3183 nr_reclaimed += reclaimed;
3184 *total_scanned += nr_scanned;
Johannes Weiner0a31bc92014-08-08 14:19:22 -07003185 spin_lock_irq(&mctz->lock);
Vladimir Davydovbc2f2e72014-12-10 15:43:40 -08003186 __mem_cgroup_remove_exceeded(mz, mctz);
Andrew Morton0608f432013-09-24 15:27:41 -07003187
3188 /*
3189 * If we failed to reclaim anything from this memory cgroup
3190 * it is time to move on to the next cgroup
3191 */
3192 next_mz = NULL;
Vladimir Davydovbc2f2e72014-12-10 15:43:40 -08003193 if (!reclaimed)
3194 next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
3195
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003196 excess = soft_limit_excess(mz->memcg);
Andrew Morton0608f432013-09-24 15:27:41 -07003197 /*
3198 * One school of thought says that we should not add
3199 * back the node to the tree if reclaim returns 0.
3200 * But our reclaim could return 0, simply because due
3201 * to priority we are exposing a smaller subset of
3202 * memory to reclaim from. Consider this as a longer
3203 * term TODO.
3204 */
3205 /* If excess == 0, no tree ops */
Johannes Weinercf2c8122014-06-06 14:38:21 -07003206 __mem_cgroup_insert_exceeded(mz, mctz, excess);
Johannes Weiner0a31bc92014-08-08 14:19:22 -07003207 spin_unlock_irq(&mctz->lock);
Andrew Morton0608f432013-09-24 15:27:41 -07003208 css_put(&mz->memcg->css);
3209 loop++;
3210 /*
3211 * Could not reclaim anything and there are no more
3212 * mem cgroups to try or we seem to be looping without
3213 * reclaiming anything.
3214 */
3215 if (!nr_reclaimed &&
3216 (next_mz == NULL ||
3217 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
3218 break;
3219 } while (!nr_reclaimed);
3220 if (next_mz)
3221 css_put(&next_mz->memcg->css);
3222 return nr_reclaimed;
3223}
3224
Tejun Heoea280e72014-05-16 13:22:48 -04003225/*
3226 * Test whether @memcg has children, dead or alive. Note that this
3227 * function doesn't care whether @memcg has use_hierarchy enabled and
3228 * returns %true if there are child csses according to the cgroup
3229 * hierarchy. Testing use_hierarchy is the caller's responsiblity.
3230 */
Glauber Costab5f99b52013-02-22 16:34:53 -08003231static inline bool memcg_has_children(struct mem_cgroup *memcg)
3232{
Tejun Heoea280e72014-05-16 13:22:48 -04003233 bool ret;
3234
Johannes Weiner696ac172013-10-31 16:34:15 -07003235 /*
Tejun Heoea280e72014-05-16 13:22:48 -04003236 * The lock does not prevent addition or deletion of children, but
3237 * it prevents a new child from being initialized based on this
3238 * parent in css_online(), so it's enough to decide whether
3239 * hierarchically inherited attributes can still be changed or not.
Johannes Weiner696ac172013-10-31 16:34:15 -07003240 */
Tejun Heoea280e72014-05-16 13:22:48 -04003241 lockdep_assert_held(&memcg_create_mutex);
3242
3243 rcu_read_lock();
3244 ret = css_next_child(NULL, &memcg->css);
3245 rcu_read_unlock();
3246 return ret;
Glauber Costab5f99b52013-02-22 16:34:53 -08003247}
3248
3249/*
Michal Hockoc26251f2012-10-26 13:37:28 +02003250 * Reclaims as many pages from the given memcg as possible and moves
3251 * the rest to the parent.
3252 *
3253 * Caller is responsible for holding css reference for memcg.
3254 */
3255static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
3256{
3257 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
Michal Hockoc26251f2012-10-26 13:37:28 +02003258
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003259 /* we call try-to-free pages for make this cgroup empty */
3260 lru_add_drain_all();
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003261 /* try to free all pages in this cgroup */
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003262 while (nr_retries && page_counter_read(&memcg->memory)) {
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003263 int progress;
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003264
Michal Hockoc26251f2012-10-26 13:37:28 +02003265 if (signal_pending(current))
3266 return -EINTR;
3267
Johannes Weinerb70a2a22014-10-09 15:28:56 -07003268 progress = try_to_free_mem_cgroup_pages(memcg, 1,
3269 GFP_KERNEL, true);
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003270 if (!progress) {
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003271 nr_retries--;
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003272 /* maybe some writeback is necessary */
Jens Axboe8aa7e842009-07-09 14:52:32 +02003273 congestion_wait(BLK_RW_ASYNC, HZ/10);
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003274 }
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003275
3276 }
Michal Hockoab5196c2012-10-26 13:37:32 +02003277
3278 return 0;
KAMEZAWA Hiroyukicc847582008-02-07 00:14:16 -08003279}
3280
Tejun Heo6770c642014-05-13 12:16:21 -04003281static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
3282 char *buf, size_t nbytes,
3283 loff_t off)
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003284{
Tejun Heo6770c642014-05-13 12:16:21 -04003285 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
Michal Hockoc26251f2012-10-26 13:37:28 +02003286
Michal Hockod8423012012-10-26 13:37:29 +02003287 if (mem_cgroup_is_root(memcg))
3288 return -EINVAL;
Tejun Heo6770c642014-05-13 12:16:21 -04003289 return mem_cgroup_force_empty(memcg) ?: nbytes;
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003290}
3291
Tejun Heo182446d2013-08-08 20:11:24 -04003292static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
3293 struct cftype *cft)
Balbir Singh18f59ea2009-01-07 18:08:07 -08003294{
Tejun Heo182446d2013-08-08 20:11:24 -04003295 return mem_cgroup_from_css(css)->use_hierarchy;
Balbir Singh18f59ea2009-01-07 18:08:07 -08003296}
3297
Tejun Heo182446d2013-08-08 20:11:24 -04003298static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
3299 struct cftype *cft, u64 val)
Balbir Singh18f59ea2009-01-07 18:08:07 -08003300{
3301 int retval = 0;
Tejun Heo182446d2013-08-08 20:11:24 -04003302 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Tejun Heo5c9d5352014-05-16 13:22:48 -04003303 struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent);
Balbir Singh18f59ea2009-01-07 18:08:07 -08003304
Glauber Costa09998212013-02-22 16:34:55 -08003305 mutex_lock(&memcg_create_mutex);
Glauber Costa567fb432012-07-31 16:43:07 -07003306
3307 if (memcg->use_hierarchy == val)
3308 goto out;
3309
Balbir Singh18f59ea2009-01-07 18:08:07 -08003310 /*
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003311 * If parent's use_hierarchy is set, we can't make any modifications
Balbir Singh18f59ea2009-01-07 18:08:07 -08003312 * in the child subtrees. If it is unset, then the change can
3313 * occur, provided the current cgroup has no children.
3314 *
3315 * For the root cgroup, parent_mem is NULL, we allow value to be
3316 * set if there are no children.
3317 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003318 if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
Balbir Singh18f59ea2009-01-07 18:08:07 -08003319 (val == 1 || val == 0)) {
Tejun Heoea280e72014-05-16 13:22:48 -04003320 if (!memcg_has_children(memcg))
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003321 memcg->use_hierarchy = val;
Balbir Singh18f59ea2009-01-07 18:08:07 -08003322 else
3323 retval = -EBUSY;
3324 } else
3325 retval = -EINVAL;
Glauber Costa567fb432012-07-31 16:43:07 -07003326
3327out:
Glauber Costa09998212013-02-22 16:34:55 -08003328 mutex_unlock(&memcg_create_mutex);
Balbir Singh18f59ea2009-01-07 18:08:07 -08003329
3330 return retval;
3331}
3332
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003333static unsigned long tree_stat(struct mem_cgroup *memcg,
3334 enum mem_cgroup_stat_index idx)
Johannes Weinerce00a962014-09-05 08:43:57 -04003335{
3336 struct mem_cgroup *iter;
3337 long val = 0;
3338
3339 /* Per-cpu values can be negative, use a signed accumulator */
3340 for_each_mem_cgroup_tree(iter, memcg)
3341 val += mem_cgroup_read_stat(iter, idx);
3342
3343 if (val < 0) /* race ? */
3344 val = 0;
3345 return val;
3346}
3347
3348static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
3349{
3350 u64 val;
3351
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003352 if (mem_cgroup_is_root(memcg)) {
3353 val = tree_stat(memcg, MEM_CGROUP_STAT_CACHE);
3354 val += tree_stat(memcg, MEM_CGROUP_STAT_RSS);
3355 if (swap)
3356 val += tree_stat(memcg, MEM_CGROUP_STAT_SWAP);
3357 } else {
Johannes Weinerce00a962014-09-05 08:43:57 -04003358 if (!swap)
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003359 val = page_counter_read(&memcg->memory);
Johannes Weinerce00a962014-09-05 08:43:57 -04003360 else
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003361 val = page_counter_read(&memcg->memsw);
Johannes Weinerce00a962014-09-05 08:43:57 -04003362 }
Johannes Weinerce00a962014-09-05 08:43:57 -04003363 return val << PAGE_SHIFT;
3364}
3365
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003366enum {
3367 RES_USAGE,
3368 RES_LIMIT,
3369 RES_MAX_USAGE,
3370 RES_FAILCNT,
3371 RES_SOFT_LIMIT,
3372};
Johannes Weinerce00a962014-09-05 08:43:57 -04003373
Tejun Heo791badb2013-12-05 12:28:02 -05003374static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
Johannes Weiner05b84302014-08-06 16:05:59 -07003375 struct cftype *cft)
Balbir Singh8cdea7c2008-02-07 00:13:50 -08003376{
Tejun Heo182446d2013-08-08 20:11:24 -04003377 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003378 struct page_counter *counter;
Tejun Heoaf36f902012-04-01 12:09:55 -07003379
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003380 switch (MEMFILE_TYPE(cft->private)) {
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003381 case _MEM:
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003382 counter = &memcg->memory;
Glauber Costa510fc4e2012-12-18 14:21:47 -08003383 break;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003384 case _MEMSWAP:
3385 counter = &memcg->memsw;
3386 break;
3387 case _KMEM:
3388 counter = &memcg->kmem;
3389 break;
3390 default:
3391 BUG();
3392 }
3393
3394 switch (MEMFILE_ATTR(cft->private)) {
3395 case RES_USAGE:
3396 if (counter == &memcg->memory)
3397 return mem_cgroup_usage(memcg, false);
3398 if (counter == &memcg->memsw)
3399 return mem_cgroup_usage(memcg, true);
3400 return (u64)page_counter_read(counter) * PAGE_SIZE;
3401 case RES_LIMIT:
3402 return (u64)counter->limit * PAGE_SIZE;
3403 case RES_MAX_USAGE:
3404 return (u64)counter->watermark * PAGE_SIZE;
3405 case RES_FAILCNT:
3406 return counter->failcnt;
3407 case RES_SOFT_LIMIT:
3408 return (u64)memcg->soft_limit * PAGE_SIZE;
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003409 default:
3410 BUG();
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003411 }
Balbir Singh8cdea7c2008-02-07 00:13:50 -08003412}
Glauber Costa510fc4e2012-12-18 14:21:47 -08003413
Glauber Costa510fc4e2012-12-18 14:21:47 -08003414#ifdef CONFIG_MEMCG_KMEM
Vladimir Davydov8c0145b2014-12-10 15:43:48 -08003415static int memcg_activate_kmem(struct mem_cgroup *memcg,
3416 unsigned long nr_pages)
Vladimir Davydovd6441632014-01-23 15:53:09 -08003417{
3418 int err = 0;
3419 int memcg_id;
3420
3421 if (memcg_kmem_is_active(memcg))
3422 return 0;
3423
3424 /*
Glauber Costa510fc4e2012-12-18 14:21:47 -08003425 * For simplicity, we won't allow this to be disabled. It also can't
3426 * be changed if the cgroup has children already, or if tasks had
3427 * already joined.
3428 *
3429 * If tasks join before we set the limit, a person looking at
3430 * kmem.usage_in_bytes will have no way to determine when it took
3431 * place, which makes the value quite meaningless.
3432 *
3433 * After it first became limited, changes in the value of the limit are
3434 * of course permitted.
Glauber Costa510fc4e2012-12-18 14:21:47 -08003435 */
Glauber Costa09998212013-02-22 16:34:55 -08003436 mutex_lock(&memcg_create_mutex);
Tejun Heoea280e72014-05-16 13:22:48 -04003437 if (cgroup_has_tasks(memcg->css.cgroup) ||
3438 (memcg->use_hierarchy && memcg_has_children(memcg)))
Vladimir Davydovd6441632014-01-23 15:53:09 -08003439 err = -EBUSY;
Glauber Costa09998212013-02-22 16:34:55 -08003440 mutex_unlock(&memcg_create_mutex);
Vladimir Davydovd6441632014-01-23 15:53:09 -08003441 if (err)
3442 goto out;
3443
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07003444 memcg_id = memcg_alloc_cache_id();
Vladimir Davydovd6441632014-01-23 15:53:09 -08003445 if (memcg_id < 0) {
3446 err = memcg_id;
3447 goto out;
3448 }
3449
Vladimir Davydovd6441632014-01-23 15:53:09 -08003450 /*
Vladimir Davydov900a38f2014-12-12 16:55:10 -08003451 * We couldn't have accounted to this cgroup, because it hasn't got
3452 * activated yet, so this should succeed.
Vladimir Davydovd6441632014-01-23 15:53:09 -08003453 */
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003454 err = page_counter_limit(&memcg->kmem, nr_pages);
Vladimir Davydovd6441632014-01-23 15:53:09 -08003455 VM_BUG_ON(err);
3456
3457 static_key_slow_inc(&memcg_kmem_enabled_key);
3458 /*
Vladimir Davydov900a38f2014-12-12 16:55:10 -08003459 * A memory cgroup is considered kmem-active as soon as it gets
3460 * kmemcg_id. Setting the id after enabling static branching will
Vladimir Davydovd6441632014-01-23 15:53:09 -08003461 * guarantee no one starts accounting before all call sites are
3462 * patched.
3463 */
Vladimir Davydov900a38f2014-12-12 16:55:10 -08003464 memcg->kmemcg_id = memcg_id;
Vladimir Davydovd6441632014-01-23 15:53:09 -08003465out:
Vladimir Davydovd6441632014-01-23 15:53:09 -08003466 return err;
Vladimir Davydovd6441632014-01-23 15:53:09 -08003467}
3468
Vladimir Davydovd6441632014-01-23 15:53:09 -08003469static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003470 unsigned long limit)
Vladimir Davydovd6441632014-01-23 15:53:09 -08003471{
3472 int ret;
3473
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003474 mutex_lock(&memcg_limit_mutex);
Vladimir Davydovd6441632014-01-23 15:53:09 -08003475 if (!memcg_kmem_is_active(memcg))
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003476 ret = memcg_activate_kmem(memcg, limit);
Vladimir Davydovd6441632014-01-23 15:53:09 -08003477 else
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003478 ret = page_counter_limit(&memcg->kmem, limit);
3479 mutex_unlock(&memcg_limit_mutex);
Vladimir Davydovd6441632014-01-23 15:53:09 -08003480 return ret;
3481}
3482
Glauber Costa55007d82012-12-18 14:22:38 -08003483static int memcg_propagate_kmem(struct mem_cgroup *memcg)
Glauber Costa510fc4e2012-12-18 14:21:47 -08003484{
Glauber Costa55007d82012-12-18 14:22:38 -08003485 int ret = 0;
Glauber Costa510fc4e2012-12-18 14:21:47 -08003486 struct mem_cgroup *parent = parent_mem_cgroup(memcg);
Vladimir Davydovd6441632014-01-23 15:53:09 -08003487
Glauber Costa510fc4e2012-12-18 14:21:47 -08003488 if (!parent)
Vladimir Davydovd6441632014-01-23 15:53:09 -08003489 return 0;
Glauber Costa55007d82012-12-18 14:22:38 -08003490
Vladimir Davydov8c0145b2014-12-10 15:43:48 -08003491 mutex_lock(&memcg_limit_mutex);
Glauber Costaa8964b92012-12-18 14:22:09 -08003492 /*
Vladimir Davydovd6441632014-01-23 15:53:09 -08003493 * If the parent cgroup is not kmem-active now, it cannot be activated
3494 * after this point, because it has at least one child already.
Glauber Costaa8964b92012-12-18 14:22:09 -08003495 */
Vladimir Davydovd6441632014-01-23 15:53:09 -08003496 if (memcg_kmem_is_active(parent))
Vladimir Davydov8c0145b2014-12-10 15:43:48 -08003497 ret = memcg_activate_kmem(memcg, PAGE_COUNTER_MAX);
3498 mutex_unlock(&memcg_limit_mutex);
Glauber Costa55007d82012-12-18 14:22:38 -08003499 return ret;
Glauber Costa510fc4e2012-12-18 14:21:47 -08003500}
Vladimir Davydovd6441632014-01-23 15:53:09 -08003501#else
3502static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003503 unsigned long limit)
Vladimir Davydovd6441632014-01-23 15:53:09 -08003504{
3505 return -EINVAL;
3506}
Hugh Dickins6d0439902013-02-22 16:35:50 -08003507#endif /* CONFIG_MEMCG_KMEM */
Glauber Costa510fc4e2012-12-18 14:21:47 -08003508
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003509/*
3510 * The user of this function is...
3511 * RES_LIMIT.
3512 */
Tejun Heo451af502014-05-13 12:16:21 -04003513static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
3514 char *buf, size_t nbytes, loff_t off)
Balbir Singh8cdea7c2008-02-07 00:13:50 -08003515{
Tejun Heo451af502014-05-13 12:16:21 -04003516 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003517 unsigned long nr_pages;
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003518 int ret;
3519
Tejun Heo451af502014-05-13 12:16:21 -04003520 buf = strstrip(buf);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003521 ret = page_counter_memparse(buf, &nr_pages);
3522 if (ret)
3523 return ret;
Tejun Heoaf36f902012-04-01 12:09:55 -07003524
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003525 switch (MEMFILE_ATTR(of_cft(of)->private)) {
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003526 case RES_LIMIT:
Balbir Singh4b3bde42009-09-23 15:56:32 -07003527 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
3528 ret = -EINVAL;
3529 break;
3530 }
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003531 switch (MEMFILE_TYPE(of_cft(of)->private)) {
3532 case _MEM:
3533 ret = mem_cgroup_resize_limit(memcg, nr_pages);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003534 break;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003535 case _MEMSWAP:
3536 ret = mem_cgroup_resize_memsw_limit(memcg, nr_pages);
3537 break;
3538 case _KMEM:
3539 ret = memcg_update_kmem_limit(memcg, nr_pages);
3540 break;
3541 }
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003542 break;
Balbir Singh296c81d2009-09-23 15:56:36 -07003543 case RES_SOFT_LIMIT:
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003544 memcg->soft_limit = nr_pages;
3545 ret = 0;
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003546 break;
3547 }
Tejun Heo451af502014-05-13 12:16:21 -04003548 return ret ?: nbytes;
Balbir Singh8cdea7c2008-02-07 00:13:50 -08003549}
3550
Tejun Heo6770c642014-05-13 12:16:21 -04003551static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
3552 size_t nbytes, loff_t off)
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07003553{
Tejun Heo6770c642014-05-13 12:16:21 -04003554 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003555 struct page_counter *counter;
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07003556
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003557 switch (MEMFILE_TYPE(of_cft(of)->private)) {
3558 case _MEM:
3559 counter = &memcg->memory;
3560 break;
3561 case _MEMSWAP:
3562 counter = &memcg->memsw;
3563 break;
3564 case _KMEM:
3565 counter = &memcg->kmem;
3566 break;
3567 default:
3568 BUG();
3569 }
Tejun Heoaf36f902012-04-01 12:09:55 -07003570
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003571 switch (MEMFILE_ATTR(of_cft(of)->private)) {
Pavel Emelyanov29f2a4d2008-04-29 01:00:21 -07003572 case RES_MAX_USAGE:
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003573 page_counter_reset_watermark(counter);
Pavel Emelyanov29f2a4d2008-04-29 01:00:21 -07003574 break;
3575 case RES_FAILCNT:
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003576 counter->failcnt = 0;
Pavel Emelyanov29f2a4d2008-04-29 01:00:21 -07003577 break;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003578 default:
3579 BUG();
Pavel Emelyanov29f2a4d2008-04-29 01:00:21 -07003580 }
Balbir Singhf64c3f52009-09-23 15:56:37 -07003581
Tejun Heo6770c642014-05-13 12:16:21 -04003582 return nbytes;
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07003583}
3584
Tejun Heo182446d2013-08-08 20:11:24 -04003585static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003586 struct cftype *cft)
3587{
Tejun Heo182446d2013-08-08 20:11:24 -04003588 return mem_cgroup_from_css(css)->move_charge_at_immigrate;
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003589}
3590
Daisuke Nishimura02491442010-03-10 15:22:17 -08003591#ifdef CONFIG_MMU
Tejun Heo182446d2013-08-08 20:11:24 -04003592static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003593 struct cftype *cft, u64 val)
3594{
Tejun Heo182446d2013-08-08 20:11:24 -04003595 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003596
3597 if (val >= (1 << NR_MOVE_TYPE))
3598 return -EINVAL;
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003599
Glauber Costaee5e8472013-02-22 16:34:50 -08003600 /*
3601 * No kind of locking is needed in here, because ->can_attach() will
3602 * check this value once in the beginning of the process, and then carry
3603 * on with stale data. This means that changes to this value will only
3604 * affect task migrations starting after the change.
3605 */
3606 memcg->move_charge_at_immigrate = val;
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003607 return 0;
3608}
Daisuke Nishimura02491442010-03-10 15:22:17 -08003609#else
Tejun Heo182446d2013-08-08 20:11:24 -04003610static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
Daisuke Nishimura02491442010-03-10 15:22:17 -08003611 struct cftype *cft, u64 val)
3612{
3613 return -ENOSYS;
3614}
3615#endif
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003616
Ying Han406eb0c2011-05-26 16:25:37 -07003617#ifdef CONFIG_NUMA
Tejun Heo2da8ca82013-12-05 12:28:04 -05003618static int memcg_numa_stat_show(struct seq_file *m, void *v)
Ying Han406eb0c2011-05-26 16:25:37 -07003619{
Greg Thelen25485de2013-11-12 15:07:40 -08003620 struct numa_stat {
3621 const char *name;
3622 unsigned int lru_mask;
3623 };
3624
3625 static const struct numa_stat stats[] = {
3626 { "total", LRU_ALL },
3627 { "file", LRU_ALL_FILE },
3628 { "anon", LRU_ALL_ANON },
3629 { "unevictable", BIT(LRU_UNEVICTABLE) },
3630 };
3631 const struct numa_stat *stat;
Ying Han406eb0c2011-05-26 16:25:37 -07003632 int nid;
Greg Thelen25485de2013-11-12 15:07:40 -08003633 unsigned long nr;
Tejun Heo2da8ca82013-12-05 12:28:04 -05003634 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
Ying Han406eb0c2011-05-26 16:25:37 -07003635
Greg Thelen25485de2013-11-12 15:07:40 -08003636 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3637 nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask);
3638 seq_printf(m, "%s=%lu", stat->name, nr);
3639 for_each_node_state(nid, N_MEMORY) {
3640 nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
3641 stat->lru_mask);
3642 seq_printf(m, " N%d=%lu", nid, nr);
3643 }
3644 seq_putc(m, '\n');
Ying Han406eb0c2011-05-26 16:25:37 -07003645 }
Ying Han406eb0c2011-05-26 16:25:37 -07003646
Ying Han071aee12013-11-12 15:07:41 -08003647 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3648 struct mem_cgroup *iter;
Ying Han406eb0c2011-05-26 16:25:37 -07003649
Ying Han071aee12013-11-12 15:07:41 -08003650 nr = 0;
3651 for_each_mem_cgroup_tree(iter, memcg)
3652 nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask);
3653 seq_printf(m, "hierarchical_%s=%lu", stat->name, nr);
3654 for_each_node_state(nid, N_MEMORY) {
3655 nr = 0;
3656 for_each_mem_cgroup_tree(iter, memcg)
3657 nr += mem_cgroup_node_nr_lru_pages(
3658 iter, nid, stat->lru_mask);
3659 seq_printf(m, " N%d=%lu", nid, nr);
3660 }
3661 seq_putc(m, '\n');
Ying Han406eb0c2011-05-26 16:25:37 -07003662 }
Ying Han406eb0c2011-05-26 16:25:37 -07003663
Ying Han406eb0c2011-05-26 16:25:37 -07003664 return 0;
3665}
3666#endif /* CONFIG_NUMA */
3667
Tejun Heo2da8ca82013-12-05 12:28:04 -05003668static int memcg_stat_show(struct seq_file *m, void *v)
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08003669{
Tejun Heo2da8ca82013-12-05 12:28:04 -05003670 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003671 unsigned long memory, memsw;
Johannes Weineraf7c4b02012-05-29 15:07:08 -07003672 struct mem_cgroup *mi;
3673 unsigned int i;
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08003674
Rickard Strandqvist70bc0682014-12-12 16:56:41 -08003675 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
3676
Johannes Weineraf7c4b02012-05-29 15:07:08 -07003677 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
Kamezawa Hiroyukibff6bb82012-07-31 16:41:38 -07003678 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
Daisuke Nishimura1dd3a272009-09-23 15:56:43 -07003679 continue;
Johannes Weineraf7c4b02012-05-29 15:07:08 -07003680 seq_printf(m, "%s %ld\n", mem_cgroup_stat_names[i],
3681 mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
Daisuke Nishimura1dd3a272009-09-23 15:56:43 -07003682 }
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08003683
Johannes Weineraf7c4b02012-05-29 15:07:08 -07003684 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++)
3685 seq_printf(m, "%s %lu\n", mem_cgroup_events_names[i],
3686 mem_cgroup_read_events(memcg, i));
3687
3688 for (i = 0; i < NR_LRU_LISTS; i++)
3689 seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
3690 mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE);
3691
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07003692 /* Hierarchical information */
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003693 memory = memsw = PAGE_COUNTER_MAX;
3694 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
3695 memory = min(memory, mi->memory.limit);
3696 memsw = min(memsw, mi->memsw.limit);
KAMEZAWA Hiroyukifee7b542009-01-07 18:08:26 -08003697 }
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003698 seq_printf(m, "hierarchical_memory_limit %llu\n",
3699 (u64)memory * PAGE_SIZE);
3700 if (do_swap_account)
3701 seq_printf(m, "hierarchical_memsw_limit %llu\n",
3702 (u64)memsw * PAGE_SIZE);
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08003703
Johannes Weineraf7c4b02012-05-29 15:07:08 -07003704 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
3705 long long val = 0;
3706
Kamezawa Hiroyukibff6bb82012-07-31 16:41:38 -07003707 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
Daisuke Nishimura1dd3a272009-09-23 15:56:43 -07003708 continue;
Johannes Weineraf7c4b02012-05-29 15:07:08 -07003709 for_each_mem_cgroup_tree(mi, memcg)
3710 val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
3711 seq_printf(m, "total_%s %lld\n", mem_cgroup_stat_names[i], val);
3712 }
3713
3714 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
3715 unsigned long long val = 0;
3716
3717 for_each_mem_cgroup_tree(mi, memcg)
3718 val += mem_cgroup_read_events(mi, i);
3719 seq_printf(m, "total_%s %llu\n",
3720 mem_cgroup_events_names[i], val);
3721 }
3722
3723 for (i = 0; i < NR_LRU_LISTS; i++) {
3724 unsigned long long val = 0;
3725
3726 for_each_mem_cgroup_tree(mi, memcg)
3727 val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE;
3728 seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val);
Daisuke Nishimura1dd3a272009-09-23 15:56:43 -07003729 }
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07003730
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08003731#ifdef CONFIG_DEBUG_VM
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08003732 {
3733 int nid, zid;
3734 struct mem_cgroup_per_zone *mz;
Hugh Dickins89abfab2012-05-29 15:06:53 -07003735 struct zone_reclaim_stat *rstat;
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08003736 unsigned long recent_rotated[2] = {0, 0};
3737 unsigned long recent_scanned[2] = {0, 0};
3738
3739 for_each_online_node(nid)
3740 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
Jianyu Zhane2318752014-06-06 14:38:20 -07003741 mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
Hugh Dickins89abfab2012-05-29 15:06:53 -07003742 rstat = &mz->lruvec.reclaim_stat;
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08003743
Hugh Dickins89abfab2012-05-29 15:06:53 -07003744 recent_rotated[0] += rstat->recent_rotated[0];
3745 recent_rotated[1] += rstat->recent_rotated[1];
3746 recent_scanned[0] += rstat->recent_scanned[0];
3747 recent_scanned[1] += rstat->recent_scanned[1];
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08003748 }
Johannes Weiner78ccf5b2012-05-29 15:07:06 -07003749 seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]);
3750 seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]);
3751 seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]);
3752 seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]);
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08003753 }
3754#endif
3755
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08003756 return 0;
3757}
3758
Tejun Heo182446d2013-08-08 20:11:24 -04003759static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
3760 struct cftype *cft)
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08003761{
Tejun Heo182446d2013-08-08 20:11:24 -04003762 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08003763
KAMEZAWA Hiroyuki1f4c0252011-07-26 16:08:21 -07003764 return mem_cgroup_swappiness(memcg);
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08003765}
3766
Tejun Heo182446d2013-08-08 20:11:24 -04003767static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
3768 struct cftype *cft, u64 val)
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08003769{
Tejun Heo182446d2013-08-08 20:11:24 -04003770 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Li Zefan068b38c2009-01-15 13:51:26 -08003771
Johannes Weiner3dae7fe2014-06-04 16:07:01 -07003772 if (val > 100)
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08003773 return -EINVAL;
3774
Linus Torvalds14208b02014-06-09 15:03:33 -07003775 if (css->parent)
Johannes Weiner3dae7fe2014-06-04 16:07:01 -07003776 memcg->swappiness = val;
3777 else
3778 vm_swappiness = val;
Li Zefan068b38c2009-01-15 13:51:26 -08003779
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08003780 return 0;
3781}
3782
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003783static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
3784{
3785 struct mem_cgroup_threshold_ary *t;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003786 unsigned long usage;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003787 int i;
3788
3789 rcu_read_lock();
3790 if (!swap)
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003791 t = rcu_dereference(memcg->thresholds.primary);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003792 else
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003793 t = rcu_dereference(memcg->memsw_thresholds.primary);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003794
3795 if (!t)
3796 goto unlock;
3797
Johannes Weinerce00a962014-09-05 08:43:57 -04003798 usage = mem_cgroup_usage(memcg, swap);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003799
3800 /*
Sha Zhengju748dad32012-05-29 15:06:57 -07003801 * current_threshold points to threshold just below or equal to usage.
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003802 * If it's not true, a threshold was crossed after last
3803 * call of __mem_cgroup_threshold().
3804 */
Phil Carmody5407a562010-05-26 14:42:42 -07003805 i = t->current_threshold;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003806
3807 /*
3808 * Iterate backward over array of thresholds starting from
3809 * current_threshold and check if a threshold is crossed.
3810 * If none of thresholds below usage is crossed, we read
3811 * only one element of the array here.
3812 */
3813 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
3814 eventfd_signal(t->entries[i].eventfd, 1);
3815
3816 /* i = current_threshold + 1 */
3817 i++;
3818
3819 /*
3820 * Iterate forward over array of thresholds starting from
3821 * current_threshold+1 and check if a threshold is crossed.
3822 * If none of thresholds above usage is crossed, we read
3823 * only one element of the array here.
3824 */
3825 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
3826 eventfd_signal(t->entries[i].eventfd, 1);
3827
3828 /* Update current_threshold */
Phil Carmody5407a562010-05-26 14:42:42 -07003829 t->current_threshold = i - 1;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003830unlock:
3831 rcu_read_unlock();
3832}
3833
3834static void mem_cgroup_threshold(struct mem_cgroup *memcg)
3835{
Kirill A. Shutemovad4ca5f2010-10-07 12:59:27 -07003836 while (memcg) {
3837 __mem_cgroup_threshold(memcg, false);
3838 if (do_swap_account)
3839 __mem_cgroup_threshold(memcg, true);
3840
3841 memcg = parent_mem_cgroup(memcg);
3842 }
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003843}
3844
3845static int compare_thresholds(const void *a, const void *b)
3846{
3847 const struct mem_cgroup_threshold *_a = a;
3848 const struct mem_cgroup_threshold *_b = b;
3849
Greg Thelen2bff24a2013-09-11 14:23:08 -07003850 if (_a->threshold > _b->threshold)
3851 return 1;
3852
3853 if (_a->threshold < _b->threshold)
3854 return -1;
3855
3856 return 0;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003857}
3858
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003859static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003860{
3861 struct mem_cgroup_eventfd_list *ev;
3862
Michal Hocko2bcf2e92014-07-30 16:08:33 -07003863 spin_lock(&memcg_oom_lock);
3864
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003865 list_for_each_entry(ev, &memcg->oom_notify, list)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003866 eventfd_signal(ev->eventfd, 1);
Michal Hocko2bcf2e92014-07-30 16:08:33 -07003867
3868 spin_unlock(&memcg_oom_lock);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003869 return 0;
3870}
3871
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003872static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003873{
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07003874 struct mem_cgroup *iter;
3875
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003876 for_each_mem_cgroup_tree(iter, memcg)
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07003877 mem_cgroup_oom_notify_cb(iter);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003878}
3879
Tejun Heo59b6f872013-11-22 18:20:43 -05003880static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05003881 struct eventfd_ctx *eventfd, const char *args, enum res_type type)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003882{
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003883 struct mem_cgroup_thresholds *thresholds;
3884 struct mem_cgroup_threshold_ary *new;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003885 unsigned long threshold;
3886 unsigned long usage;
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003887 int i, size, ret;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003888
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003889 ret = page_counter_memparse(args, &threshold);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003890 if (ret)
3891 return ret;
3892
3893 mutex_lock(&memcg->thresholds_lock);
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003894
Johannes Weiner05b84302014-08-06 16:05:59 -07003895 if (type == _MEM) {
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003896 thresholds = &memcg->thresholds;
Johannes Weinerce00a962014-09-05 08:43:57 -04003897 usage = mem_cgroup_usage(memcg, false);
Johannes Weiner05b84302014-08-06 16:05:59 -07003898 } else if (type == _MEMSWAP) {
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003899 thresholds = &memcg->memsw_thresholds;
Johannes Weinerce00a962014-09-05 08:43:57 -04003900 usage = mem_cgroup_usage(memcg, true);
Johannes Weiner05b84302014-08-06 16:05:59 -07003901 } else
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003902 BUG();
3903
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003904 /* Check if a threshold crossed before adding a new one */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003905 if (thresholds->primary)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003906 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3907
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003908 size = thresholds->primary ? thresholds->primary->size + 1 : 1;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003909
3910 /* Allocate memory for new array of thresholds */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003911 new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003912 GFP_KERNEL);
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003913 if (!new) {
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003914 ret = -ENOMEM;
3915 goto unlock;
3916 }
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003917 new->size = size;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003918
3919 /* Copy thresholds (if any) to new array */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003920 if (thresholds->primary) {
3921 memcpy(new->entries, thresholds->primary->entries, (size - 1) *
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003922 sizeof(struct mem_cgroup_threshold));
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003923 }
3924
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003925 /* Add new threshold */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003926 new->entries[size - 1].eventfd = eventfd;
3927 new->entries[size - 1].threshold = threshold;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003928
3929 /* Sort thresholds. Registering of new threshold isn't time-critical */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003930 sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003931 compare_thresholds, NULL);
3932
3933 /* Find current threshold */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003934 new->current_threshold = -1;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003935 for (i = 0; i < size; i++) {
Sha Zhengju748dad32012-05-29 15:06:57 -07003936 if (new->entries[i].threshold <= usage) {
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003937 /*
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003938 * new->current_threshold will not be used until
3939 * rcu_assign_pointer(), so it's safe to increment
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003940 * it here.
3941 */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003942 ++new->current_threshold;
Sha Zhengju748dad32012-05-29 15:06:57 -07003943 } else
3944 break;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003945 }
3946
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003947 /* Free old spare buffer and save old primary buffer as spare */
3948 kfree(thresholds->spare);
3949 thresholds->spare = thresholds->primary;
3950
3951 rcu_assign_pointer(thresholds->primary, new);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003952
Kirill A. Shutemov907860e2010-05-26 14:42:46 -07003953 /* To be sure that nobody uses thresholds */
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003954 synchronize_rcu();
3955
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003956unlock:
3957 mutex_unlock(&memcg->thresholds_lock);
3958
3959 return ret;
3960}
3961
Tejun Heo59b6f872013-11-22 18:20:43 -05003962static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05003963 struct eventfd_ctx *eventfd, const char *args)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003964{
Tejun Heo59b6f872013-11-22 18:20:43 -05003965 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
Tejun Heo347c4a82013-11-22 18:20:43 -05003966}
3967
Tejun Heo59b6f872013-11-22 18:20:43 -05003968static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05003969 struct eventfd_ctx *eventfd, const char *args)
3970{
Tejun Heo59b6f872013-11-22 18:20:43 -05003971 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
Tejun Heo347c4a82013-11-22 18:20:43 -05003972}
3973
Tejun Heo59b6f872013-11-22 18:20:43 -05003974static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05003975 struct eventfd_ctx *eventfd, enum res_type type)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003976{
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003977 struct mem_cgroup_thresholds *thresholds;
3978 struct mem_cgroup_threshold_ary *new;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003979 unsigned long usage;
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003980 int i, j, size;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003981
3982 mutex_lock(&memcg->thresholds_lock);
Johannes Weiner05b84302014-08-06 16:05:59 -07003983
3984 if (type == _MEM) {
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003985 thresholds = &memcg->thresholds;
Johannes Weinerce00a962014-09-05 08:43:57 -04003986 usage = mem_cgroup_usage(memcg, false);
Johannes Weiner05b84302014-08-06 16:05:59 -07003987 } else if (type == _MEMSWAP) {
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003988 thresholds = &memcg->memsw_thresholds;
Johannes Weinerce00a962014-09-05 08:43:57 -04003989 usage = mem_cgroup_usage(memcg, true);
Johannes Weiner05b84302014-08-06 16:05:59 -07003990 } else
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003991 BUG();
3992
Anton Vorontsov371528c2012-02-24 05:14:46 +04003993 if (!thresholds->primary)
3994 goto unlock;
3995
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003996 /* Check if a threshold crossed before removing */
3997 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3998
3999 /* Calculate new number of threshold */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004000 size = 0;
4001 for (i = 0; i < thresholds->primary->size; i++) {
4002 if (thresholds->primary->entries[i].eventfd != eventfd)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004003 size++;
4004 }
4005
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004006 new = thresholds->spare;
Kirill A. Shutemov907860e2010-05-26 14:42:46 -07004007
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004008 /* Set thresholds array to NULL if we don't have thresholds */
4009 if (!size) {
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004010 kfree(new);
4011 new = NULL;
Kirill A. Shutemov907860e2010-05-26 14:42:46 -07004012 goto swap_buffers;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004013 }
4014
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004015 new->size = size;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004016
4017 /* Copy thresholds and find current threshold */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004018 new->current_threshold = -1;
4019 for (i = 0, j = 0; i < thresholds->primary->size; i++) {
4020 if (thresholds->primary->entries[i].eventfd == eventfd)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004021 continue;
4022
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004023 new->entries[j] = thresholds->primary->entries[i];
Sha Zhengju748dad32012-05-29 15:06:57 -07004024 if (new->entries[j].threshold <= usage) {
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004025 /*
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004026 * new->current_threshold will not be used
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004027 * until rcu_assign_pointer(), so it's safe to increment
4028 * it here.
4029 */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004030 ++new->current_threshold;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004031 }
4032 j++;
4033 }
4034
Kirill A. Shutemov907860e2010-05-26 14:42:46 -07004035swap_buffers:
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004036 /* Swap primary and spare array */
4037 thresholds->spare = thresholds->primary;
Sha Zhengju8c757762012-05-10 13:01:45 -07004038 /* If all events are unregistered, free the spare array */
4039 if (!new) {
4040 kfree(thresholds->spare);
4041 thresholds->spare = NULL;
4042 }
4043
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004044 rcu_assign_pointer(thresholds->primary, new);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004045
Kirill A. Shutemov907860e2010-05-26 14:42:46 -07004046 /* To be sure that nobody uses thresholds */
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004047 synchronize_rcu();
Anton Vorontsov371528c2012-02-24 05:14:46 +04004048unlock:
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004049 mutex_unlock(&memcg->thresholds_lock);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004050}
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08004051
Tejun Heo59b6f872013-11-22 18:20:43 -05004052static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05004053 struct eventfd_ctx *eventfd)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004054{
Tejun Heo59b6f872013-11-22 18:20:43 -05004055 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
Tejun Heo347c4a82013-11-22 18:20:43 -05004056}
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004057
Tejun Heo59b6f872013-11-22 18:20:43 -05004058static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05004059 struct eventfd_ctx *eventfd)
4060{
Tejun Heo59b6f872013-11-22 18:20:43 -05004061 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
Tejun Heo347c4a82013-11-22 18:20:43 -05004062}
4063
Tejun Heo59b6f872013-11-22 18:20:43 -05004064static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05004065 struct eventfd_ctx *eventfd, const char *args)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004066{
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004067 struct mem_cgroup_eventfd_list *event;
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004068
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004069 event = kmalloc(sizeof(*event), GFP_KERNEL);
4070 if (!event)
4071 return -ENOMEM;
4072
Michal Hocko1af8efe2011-07-26 16:08:24 -07004073 spin_lock(&memcg_oom_lock);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004074
4075 event->eventfd = eventfd;
4076 list_add(&event->list, &memcg->oom_notify);
4077
4078 /* already in OOM ? */
Michal Hocko79dfdac2011-07-26 16:08:23 -07004079 if (atomic_read(&memcg->under_oom))
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004080 eventfd_signal(eventfd, 1);
Michal Hocko1af8efe2011-07-26 16:08:24 -07004081 spin_unlock(&memcg_oom_lock);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004082
4083 return 0;
4084}
4085
Tejun Heo59b6f872013-11-22 18:20:43 -05004086static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05004087 struct eventfd_ctx *eventfd)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004088{
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004089 struct mem_cgroup_eventfd_list *ev, *tmp;
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004090
Michal Hocko1af8efe2011-07-26 16:08:24 -07004091 spin_lock(&memcg_oom_lock);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004092
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004093 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004094 if (ev->eventfd == eventfd) {
4095 list_del(&ev->list);
4096 kfree(ev);
4097 }
4098 }
4099
Michal Hocko1af8efe2011-07-26 16:08:24 -07004100 spin_unlock(&memcg_oom_lock);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004101}
4102
Tejun Heo2da8ca82013-12-05 12:28:04 -05004103static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07004104{
Tejun Heo2da8ca82013-12-05 12:28:04 -05004105 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07004106
Tejun Heo791badb2013-12-05 12:28:02 -05004107 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
4108 seq_printf(sf, "under_oom %d\n", (bool)atomic_read(&memcg->under_oom));
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07004109 return 0;
4110}
4111
Tejun Heo182446d2013-08-08 20:11:24 -04004112static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07004113 struct cftype *cft, u64 val)
4114{
Tejun Heo182446d2013-08-08 20:11:24 -04004115 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07004116
4117 /* cannot set to root cgroup and only 0 and 1 are allowed */
Linus Torvalds14208b02014-06-09 15:03:33 -07004118 if (!css->parent || !((val == 0) || (val == 1)))
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07004119 return -EINVAL;
4120
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004121 memcg->oom_kill_disable = val;
KAMEZAWA Hiroyuki4d845eb2010-06-29 15:05:18 -07004122 if (!val)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004123 memcg_oom_recover(memcg);
Johannes Weiner3dae7fe2014-06-04 16:07:01 -07004124
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07004125 return 0;
4126}
4127
Andrew Mortonc255a452012-07-31 16:43:02 -07004128#ifdef CONFIG_MEMCG_KMEM
Glauber Costacbe128e32012-04-09 19:36:34 -03004129static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
Glauber Costae5671df2011-12-11 21:47:01 +00004130{
Glauber Costa55007d82012-12-18 14:22:38 -08004131 int ret;
4132
Glauber Costa55007d82012-12-18 14:22:38 -08004133 ret = memcg_propagate_kmem(memcg);
4134 if (ret)
4135 return ret;
Glauber Costa2633d7a2012-12-18 14:22:34 -08004136
Glauber Costa1d62e432012-04-09 19:36:33 -03004137 return mem_cgroup_sockets_init(memcg, ss);
Michel Lespinasse573b4002013-04-29 15:08:13 -07004138}
Glauber Costae5671df2011-12-11 21:47:01 +00004139
Li Zefan10d5ebf2013-07-08 16:00:33 -07004140static void memcg_destroy_kmem(struct mem_cgroup *memcg)
Glauber Costad1a4c0b2011-12-11 21:47:04 +00004141{
Vladimir Davydov8135be52014-12-12 16:56:38 -08004142 memcg_unregister_all_caches(memcg);
Glauber Costa1d62e432012-04-09 19:36:33 -03004143 mem_cgroup_sockets_destroy(memcg);
Li Zefan10d5ebf2013-07-08 16:00:33 -07004144}
Glauber Costae5671df2011-12-11 21:47:01 +00004145#else
Glauber Costacbe128e32012-04-09 19:36:34 -03004146static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
Glauber Costae5671df2011-12-11 21:47:01 +00004147{
4148 return 0;
4149}
Glauber Costad1a4c0b2011-12-11 21:47:04 +00004150
Li Zefan10d5ebf2013-07-08 16:00:33 -07004151static void memcg_destroy_kmem(struct mem_cgroup *memcg)
4152{
4153}
Glauber Costae5671df2011-12-11 21:47:01 +00004154#endif
4155
Tejun Heo79bd9812013-11-22 18:20:42 -05004156/*
Tejun Heo3bc942f2013-11-22 18:20:44 -05004157 * DO NOT USE IN NEW FILES.
4158 *
4159 * "cgroup.event_control" implementation.
4160 *
4161 * This is way over-engineered. It tries to support fully configurable
4162 * events for each user. Such level of flexibility is completely
4163 * unnecessary especially in the light of the planned unified hierarchy.
4164 *
4165 * Please deprecate this and replace with something simpler if at all
4166 * possible.
4167 */
4168
4169/*
Tejun Heo79bd9812013-11-22 18:20:42 -05004170 * Unregister event and free resources.
4171 *
4172 * Gets called from workqueue.
4173 */
Tejun Heo3bc942f2013-11-22 18:20:44 -05004174static void memcg_event_remove(struct work_struct *work)
Tejun Heo79bd9812013-11-22 18:20:42 -05004175{
Tejun Heo3bc942f2013-11-22 18:20:44 -05004176 struct mem_cgroup_event *event =
4177 container_of(work, struct mem_cgroup_event, remove);
Tejun Heo59b6f872013-11-22 18:20:43 -05004178 struct mem_cgroup *memcg = event->memcg;
Tejun Heo79bd9812013-11-22 18:20:42 -05004179
4180 remove_wait_queue(event->wqh, &event->wait);
4181
Tejun Heo59b6f872013-11-22 18:20:43 -05004182 event->unregister_event(memcg, event->eventfd);
Tejun Heo79bd9812013-11-22 18:20:42 -05004183
4184 /* Notify userspace the event is going away. */
4185 eventfd_signal(event->eventfd, 1);
4186
4187 eventfd_ctx_put(event->eventfd);
4188 kfree(event);
Tejun Heo59b6f872013-11-22 18:20:43 -05004189 css_put(&memcg->css);
Tejun Heo79bd9812013-11-22 18:20:42 -05004190}
4191
4192/*
4193 * Gets called on POLLHUP on eventfd when user closes it.
4194 *
4195 * Called with wqh->lock held and interrupts disabled.
4196 */
Tejun Heo3bc942f2013-11-22 18:20:44 -05004197static int memcg_event_wake(wait_queue_t *wait, unsigned mode,
4198 int sync, void *key)
Tejun Heo79bd9812013-11-22 18:20:42 -05004199{
Tejun Heo3bc942f2013-11-22 18:20:44 -05004200 struct mem_cgroup_event *event =
4201 container_of(wait, struct mem_cgroup_event, wait);
Tejun Heo59b6f872013-11-22 18:20:43 -05004202 struct mem_cgroup *memcg = event->memcg;
Tejun Heo79bd9812013-11-22 18:20:42 -05004203 unsigned long flags = (unsigned long)key;
4204
4205 if (flags & POLLHUP) {
4206 /*
4207 * If the event has been detached at cgroup removal, we
4208 * can simply return knowing the other side will cleanup
4209 * for us.
4210 *
4211 * We can't race against event freeing since the other
4212 * side will require wqh->lock via remove_wait_queue(),
4213 * which we hold.
4214 */
Tejun Heofba94802013-11-22 18:20:43 -05004215 spin_lock(&memcg->event_list_lock);
Tejun Heo79bd9812013-11-22 18:20:42 -05004216 if (!list_empty(&event->list)) {
4217 list_del_init(&event->list);
4218 /*
4219 * We are in atomic context, but cgroup_event_remove()
4220 * may sleep, so we have to call it in workqueue.
4221 */
4222 schedule_work(&event->remove);
4223 }
Tejun Heofba94802013-11-22 18:20:43 -05004224 spin_unlock(&memcg->event_list_lock);
Tejun Heo79bd9812013-11-22 18:20:42 -05004225 }
4226
4227 return 0;
4228}
4229
Tejun Heo3bc942f2013-11-22 18:20:44 -05004230static void memcg_event_ptable_queue_proc(struct file *file,
Tejun Heo79bd9812013-11-22 18:20:42 -05004231 wait_queue_head_t *wqh, poll_table *pt)
4232{
Tejun Heo3bc942f2013-11-22 18:20:44 -05004233 struct mem_cgroup_event *event =
4234 container_of(pt, struct mem_cgroup_event, pt);
Tejun Heo79bd9812013-11-22 18:20:42 -05004235
4236 event->wqh = wqh;
4237 add_wait_queue(wqh, &event->wait);
4238}
4239
4240/*
Tejun Heo3bc942f2013-11-22 18:20:44 -05004241 * DO NOT USE IN NEW FILES.
4242 *
Tejun Heo79bd9812013-11-22 18:20:42 -05004243 * Parse input and register new cgroup event handler.
4244 *
4245 * Input must be in format '<event_fd> <control_fd> <args>'.
4246 * Interpretation of args is defined by control file implementation.
4247 */
Tejun Heo451af502014-05-13 12:16:21 -04004248static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
4249 char *buf, size_t nbytes, loff_t off)
Tejun Heo79bd9812013-11-22 18:20:42 -05004250{
Tejun Heo451af502014-05-13 12:16:21 -04004251 struct cgroup_subsys_state *css = of_css(of);
Tejun Heofba94802013-11-22 18:20:43 -05004252 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Tejun Heo3bc942f2013-11-22 18:20:44 -05004253 struct mem_cgroup_event *event;
Tejun Heo79bd9812013-11-22 18:20:42 -05004254 struct cgroup_subsys_state *cfile_css;
4255 unsigned int efd, cfd;
4256 struct fd efile;
4257 struct fd cfile;
Tejun Heofba94802013-11-22 18:20:43 -05004258 const char *name;
Tejun Heo79bd9812013-11-22 18:20:42 -05004259 char *endp;
4260 int ret;
4261
Tejun Heo451af502014-05-13 12:16:21 -04004262 buf = strstrip(buf);
4263
4264 efd = simple_strtoul(buf, &endp, 10);
Tejun Heo79bd9812013-11-22 18:20:42 -05004265 if (*endp != ' ')
4266 return -EINVAL;
Tejun Heo451af502014-05-13 12:16:21 -04004267 buf = endp + 1;
Tejun Heo79bd9812013-11-22 18:20:42 -05004268
Tejun Heo451af502014-05-13 12:16:21 -04004269 cfd = simple_strtoul(buf, &endp, 10);
Tejun Heo79bd9812013-11-22 18:20:42 -05004270 if ((*endp != ' ') && (*endp != '\0'))
4271 return -EINVAL;
Tejun Heo451af502014-05-13 12:16:21 -04004272 buf = endp + 1;
Tejun Heo79bd9812013-11-22 18:20:42 -05004273
4274 event = kzalloc(sizeof(*event), GFP_KERNEL);
4275 if (!event)
4276 return -ENOMEM;
4277
Tejun Heo59b6f872013-11-22 18:20:43 -05004278 event->memcg = memcg;
Tejun Heo79bd9812013-11-22 18:20:42 -05004279 INIT_LIST_HEAD(&event->list);
Tejun Heo3bc942f2013-11-22 18:20:44 -05004280 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
4281 init_waitqueue_func_entry(&event->wait, memcg_event_wake);
4282 INIT_WORK(&event->remove, memcg_event_remove);
Tejun Heo79bd9812013-11-22 18:20:42 -05004283
4284 efile = fdget(efd);
4285 if (!efile.file) {
4286 ret = -EBADF;
4287 goto out_kfree;
4288 }
4289
4290 event->eventfd = eventfd_ctx_fileget(efile.file);
4291 if (IS_ERR(event->eventfd)) {
4292 ret = PTR_ERR(event->eventfd);
4293 goto out_put_efile;
4294 }
4295
4296 cfile = fdget(cfd);
4297 if (!cfile.file) {
4298 ret = -EBADF;
4299 goto out_put_eventfd;
4300 }
4301
4302 /* the process need read permission on control file */
4303 /* AV: shouldn't we check that it's been opened for read instead? */
4304 ret = inode_permission(file_inode(cfile.file), MAY_READ);
4305 if (ret < 0)
4306 goto out_put_cfile;
4307
Tejun Heo79bd9812013-11-22 18:20:42 -05004308 /*
Tejun Heofba94802013-11-22 18:20:43 -05004309 * Determine the event callbacks and set them in @event. This used
4310 * to be done via struct cftype but cgroup core no longer knows
4311 * about these events. The following is crude but the whole thing
4312 * is for compatibility anyway.
Tejun Heo3bc942f2013-11-22 18:20:44 -05004313 *
4314 * DO NOT ADD NEW FILES.
Tejun Heofba94802013-11-22 18:20:43 -05004315 */
Al Virob5830432014-10-31 01:22:04 -04004316 name = cfile.file->f_path.dentry->d_name.name;
Tejun Heofba94802013-11-22 18:20:43 -05004317
4318 if (!strcmp(name, "memory.usage_in_bytes")) {
4319 event->register_event = mem_cgroup_usage_register_event;
4320 event->unregister_event = mem_cgroup_usage_unregister_event;
4321 } else if (!strcmp(name, "memory.oom_control")) {
4322 event->register_event = mem_cgroup_oom_register_event;
4323 event->unregister_event = mem_cgroup_oom_unregister_event;
4324 } else if (!strcmp(name, "memory.pressure_level")) {
4325 event->register_event = vmpressure_register_event;
4326 event->unregister_event = vmpressure_unregister_event;
4327 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
Tejun Heo347c4a82013-11-22 18:20:43 -05004328 event->register_event = memsw_cgroup_usage_register_event;
4329 event->unregister_event = memsw_cgroup_usage_unregister_event;
Tejun Heofba94802013-11-22 18:20:43 -05004330 } else {
4331 ret = -EINVAL;
4332 goto out_put_cfile;
4333 }
4334
4335 /*
Tejun Heob5557c42013-11-22 18:20:42 -05004336 * Verify @cfile should belong to @css. Also, remaining events are
4337 * automatically removed on cgroup destruction but the removal is
4338 * asynchronous, so take an extra ref on @css.
Tejun Heo79bd9812013-11-22 18:20:42 -05004339 */
Al Virob5830432014-10-31 01:22:04 -04004340 cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent,
Tejun Heoec903c02014-05-13 12:11:01 -04004341 &memory_cgrp_subsys);
Tejun Heo79bd9812013-11-22 18:20:42 -05004342 ret = -EINVAL;
Tejun Heo5a17f542014-02-11 11:52:47 -05004343 if (IS_ERR(cfile_css))
Tejun Heo79bd9812013-11-22 18:20:42 -05004344 goto out_put_cfile;
Tejun Heo5a17f542014-02-11 11:52:47 -05004345 if (cfile_css != css) {
4346 css_put(cfile_css);
4347 goto out_put_cfile;
4348 }
Tejun Heo79bd9812013-11-22 18:20:42 -05004349
Tejun Heo451af502014-05-13 12:16:21 -04004350 ret = event->register_event(memcg, event->eventfd, buf);
Tejun Heo79bd9812013-11-22 18:20:42 -05004351 if (ret)
4352 goto out_put_css;
4353
4354 efile.file->f_op->poll(efile.file, &event->pt);
4355
Tejun Heofba94802013-11-22 18:20:43 -05004356 spin_lock(&memcg->event_list_lock);
4357 list_add(&event->list, &memcg->event_list);
4358 spin_unlock(&memcg->event_list_lock);
Tejun Heo79bd9812013-11-22 18:20:42 -05004359
4360 fdput(cfile);
4361 fdput(efile);
4362
Tejun Heo451af502014-05-13 12:16:21 -04004363 return nbytes;
Tejun Heo79bd9812013-11-22 18:20:42 -05004364
4365out_put_css:
Tejun Heob5557c42013-11-22 18:20:42 -05004366 css_put(css);
Tejun Heo79bd9812013-11-22 18:20:42 -05004367out_put_cfile:
4368 fdput(cfile);
4369out_put_eventfd:
4370 eventfd_ctx_put(event->eventfd);
4371out_put_efile:
4372 fdput(efile);
4373out_kfree:
4374 kfree(event);
4375
4376 return ret;
4377}
4378
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004379static struct cftype mem_cgroup_files[] = {
4380 {
Balbir Singh0eea1032008-02-07 00:13:57 -08004381 .name = "usage_in_bytes",
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004382 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
Tejun Heo791badb2013-12-05 12:28:02 -05004383 .read_u64 = mem_cgroup_read_u64,
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004384 },
4385 {
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07004386 .name = "max_usage_in_bytes",
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004387 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
Tejun Heo6770c642014-05-13 12:16:21 -04004388 .write = mem_cgroup_reset,
Tejun Heo791badb2013-12-05 12:28:02 -05004389 .read_u64 = mem_cgroup_read_u64,
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07004390 },
4391 {
Balbir Singh0eea1032008-02-07 00:13:57 -08004392 .name = "limit_in_bytes",
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004393 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
Tejun Heo451af502014-05-13 12:16:21 -04004394 .write = mem_cgroup_write,
Tejun Heo791badb2013-12-05 12:28:02 -05004395 .read_u64 = mem_cgroup_read_u64,
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004396 },
4397 {
Balbir Singh296c81d2009-09-23 15:56:36 -07004398 .name = "soft_limit_in_bytes",
4399 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
Tejun Heo451af502014-05-13 12:16:21 -04004400 .write = mem_cgroup_write,
Tejun Heo791badb2013-12-05 12:28:02 -05004401 .read_u64 = mem_cgroup_read_u64,
Balbir Singh296c81d2009-09-23 15:56:36 -07004402 },
4403 {
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004404 .name = "failcnt",
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004405 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
Tejun Heo6770c642014-05-13 12:16:21 -04004406 .write = mem_cgroup_reset,
Tejun Heo791badb2013-12-05 12:28:02 -05004407 .read_u64 = mem_cgroup_read_u64,
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004408 },
Balbir Singh8697d332008-02-07 00:13:59 -08004409 {
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08004410 .name = "stat",
Tejun Heo2da8ca82013-12-05 12:28:04 -05004411 .seq_show = memcg_stat_show,
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08004412 },
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08004413 {
4414 .name = "force_empty",
Tejun Heo6770c642014-05-13 12:16:21 -04004415 .write = mem_cgroup_force_empty_write,
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08004416 },
Balbir Singh18f59ea2009-01-07 18:08:07 -08004417 {
4418 .name = "use_hierarchy",
4419 .write_u64 = mem_cgroup_hierarchy_write,
4420 .read_u64 = mem_cgroup_hierarchy_read,
4421 },
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08004422 {
Tejun Heo3bc942f2013-11-22 18:20:44 -05004423 .name = "cgroup.event_control", /* XXX: for compat */
Tejun Heo451af502014-05-13 12:16:21 -04004424 .write = memcg_write_event_control,
Tejun Heo79bd9812013-11-22 18:20:42 -05004425 .flags = CFTYPE_NO_PREFIX,
4426 .mode = S_IWUGO,
4427 },
4428 {
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08004429 .name = "swappiness",
4430 .read_u64 = mem_cgroup_swappiness_read,
4431 .write_u64 = mem_cgroup_swappiness_write,
4432 },
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004433 {
4434 .name = "move_charge_at_immigrate",
4435 .read_u64 = mem_cgroup_move_charge_read,
4436 .write_u64 = mem_cgroup_move_charge_write,
4437 },
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004438 {
4439 .name = "oom_control",
Tejun Heo2da8ca82013-12-05 12:28:04 -05004440 .seq_show = mem_cgroup_oom_control_read,
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07004441 .write_u64 = mem_cgroup_oom_control_write,
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004442 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
4443 },
Anton Vorontsov70ddf632013-04-29 15:08:31 -07004444 {
4445 .name = "pressure_level",
Anton Vorontsov70ddf632013-04-29 15:08:31 -07004446 },
Ying Han406eb0c2011-05-26 16:25:37 -07004447#ifdef CONFIG_NUMA
4448 {
4449 .name = "numa_stat",
Tejun Heo2da8ca82013-12-05 12:28:04 -05004450 .seq_show = memcg_numa_stat_show,
Ying Han406eb0c2011-05-26 16:25:37 -07004451 },
4452#endif
Glauber Costa510fc4e2012-12-18 14:21:47 -08004453#ifdef CONFIG_MEMCG_KMEM
4454 {
4455 .name = "kmem.limit_in_bytes",
4456 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
Tejun Heo451af502014-05-13 12:16:21 -04004457 .write = mem_cgroup_write,
Tejun Heo791badb2013-12-05 12:28:02 -05004458 .read_u64 = mem_cgroup_read_u64,
Glauber Costa510fc4e2012-12-18 14:21:47 -08004459 },
4460 {
4461 .name = "kmem.usage_in_bytes",
4462 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
Tejun Heo791badb2013-12-05 12:28:02 -05004463 .read_u64 = mem_cgroup_read_u64,
Glauber Costa510fc4e2012-12-18 14:21:47 -08004464 },
4465 {
4466 .name = "kmem.failcnt",
4467 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
Tejun Heo6770c642014-05-13 12:16:21 -04004468 .write = mem_cgroup_reset,
Tejun Heo791badb2013-12-05 12:28:02 -05004469 .read_u64 = mem_cgroup_read_u64,
Glauber Costa510fc4e2012-12-18 14:21:47 -08004470 },
4471 {
4472 .name = "kmem.max_usage_in_bytes",
4473 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
Tejun Heo6770c642014-05-13 12:16:21 -04004474 .write = mem_cgroup_reset,
Tejun Heo791badb2013-12-05 12:28:02 -05004475 .read_u64 = mem_cgroup_read_u64,
Glauber Costa510fc4e2012-12-18 14:21:47 -08004476 },
Glauber Costa749c5412012-12-18 14:23:01 -08004477#ifdef CONFIG_SLABINFO
4478 {
4479 .name = "kmem.slabinfo",
Vladimir Davydovb0475012014-12-10 15:44:19 -08004480 .seq_start = slab_start,
4481 .seq_next = slab_next,
4482 .seq_stop = slab_stop,
4483 .seq_show = memcg_slab_show,
Glauber Costa749c5412012-12-18 14:23:01 -08004484 },
4485#endif
Glauber Costa510fc4e2012-12-18 14:21:47 -08004486#endif
Tejun Heo6bc10342012-04-01 12:09:55 -07004487 { }, /* terminate */
Tejun Heoaf36f902012-04-01 12:09:55 -07004488};
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004489
Michal Hocko2d110852013-02-22 16:34:43 -08004490#ifdef CONFIG_MEMCG_SWAP
4491static struct cftype memsw_cgroup_files[] = {
4492 {
4493 .name = "memsw.usage_in_bytes",
4494 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
Tejun Heo791badb2013-12-05 12:28:02 -05004495 .read_u64 = mem_cgroup_read_u64,
Michal Hocko2d110852013-02-22 16:34:43 -08004496 },
4497 {
4498 .name = "memsw.max_usage_in_bytes",
4499 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
Tejun Heo6770c642014-05-13 12:16:21 -04004500 .write = mem_cgroup_reset,
Tejun Heo791badb2013-12-05 12:28:02 -05004501 .read_u64 = mem_cgroup_read_u64,
Michal Hocko2d110852013-02-22 16:34:43 -08004502 },
4503 {
4504 .name = "memsw.limit_in_bytes",
4505 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
Tejun Heo451af502014-05-13 12:16:21 -04004506 .write = mem_cgroup_write,
Tejun Heo791badb2013-12-05 12:28:02 -05004507 .read_u64 = mem_cgroup_read_u64,
Michal Hocko2d110852013-02-22 16:34:43 -08004508 },
4509 {
4510 .name = "memsw.failcnt",
4511 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
Tejun Heo6770c642014-05-13 12:16:21 -04004512 .write = mem_cgroup_reset,
Tejun Heo791badb2013-12-05 12:28:02 -05004513 .read_u64 = mem_cgroup_read_u64,
Michal Hocko2d110852013-02-22 16:34:43 -08004514 },
4515 { }, /* terminate */
4516};
4517#endif
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004518static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08004519{
4520 struct mem_cgroup_per_node *pn;
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08004521 struct mem_cgroup_per_zone *mz;
KAMEZAWA Hiroyuki41e33552008-04-08 17:41:54 -07004522 int zone, tmp = node;
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08004523 /*
4524 * This routine is called against possible nodes.
4525 * But it's BUG to call kmalloc() against offline node.
4526 *
4527 * TODO: this routine can waste much memory for nodes which will
4528 * never be onlined. It's better to use memory hotplug callback
4529 * function.
4530 */
KAMEZAWA Hiroyuki41e33552008-04-08 17:41:54 -07004531 if (!node_state(node, N_NORMAL_MEMORY))
4532 tmp = -1;
Jesper Juhl17295c82011-01-13 15:47:42 -08004533 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08004534 if (!pn)
4535 return 1;
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08004536
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08004537 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4538 mz = &pn->zoneinfo[zone];
Hugh Dickinsbea8c152012-11-16 14:14:54 -08004539 lruvec_init(&mz->lruvec);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -07004540 mz->usage_in_excess = 0;
4541 mz->on_tree = false;
Hugh Dickinsd79154b2012-03-21 16:34:18 -07004542 mz->memcg = memcg;
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08004543 }
Johannes Weiner54f72fe2013-07-08 15:59:49 -07004544 memcg->nodeinfo[node] = pn;
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08004545 return 0;
4546}
4547
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004548static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08004549{
Johannes Weiner54f72fe2013-07-08 15:59:49 -07004550 kfree(memcg->nodeinfo[node]);
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08004551}
4552
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07004553static struct mem_cgroup *mem_cgroup_alloc(void)
4554{
Hugh Dickinsd79154b2012-03-21 16:34:18 -07004555 struct mem_cgroup *memcg;
Vladimir Davydov8ff69e22014-01-23 15:52:52 -08004556 size_t size;
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07004557
Vladimir Davydov8ff69e22014-01-23 15:52:52 -08004558 size = sizeof(struct mem_cgroup);
4559 size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07004560
Vladimir Davydov8ff69e22014-01-23 15:52:52 -08004561 memcg = kzalloc(size, GFP_KERNEL);
Hugh Dickinsd79154b2012-03-21 16:34:18 -07004562 if (!memcg)
Dan Carpentere7bbcdf2010-03-23 13:35:12 -07004563 return NULL;
4564
Hugh Dickinsd79154b2012-03-21 16:34:18 -07004565 memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
4566 if (!memcg->stat)
Dan Carpenterd2e61b82010-11-11 14:05:12 -08004567 goto out_free;
Hugh Dickinsd79154b2012-03-21 16:34:18 -07004568 spin_lock_init(&memcg->pcp_counter_lock);
4569 return memcg;
Dan Carpenterd2e61b82010-11-11 14:05:12 -08004570
4571out_free:
Vladimir Davydov8ff69e22014-01-23 15:52:52 -08004572 kfree(memcg);
Dan Carpenterd2e61b82010-11-11 14:05:12 -08004573 return NULL;
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07004574}
4575
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004576/*
Glauber Costac8b2a362012-12-18 14:22:13 -08004577 * At destroying mem_cgroup, references from swap_cgroup can remain.
4578 * (scanning all at force_empty is too costly...)
4579 *
4580 * Instead of clearing all references at force_empty, we remember
4581 * the number of reference from swap_cgroup and free mem_cgroup when
4582 * it goes down to 0.
4583 *
4584 * Removal of cgroup itself succeeds regardless of refs from swap.
Hugh Dickins59927fb2012-03-15 15:17:07 -07004585 */
Glauber Costac8b2a362012-12-18 14:22:13 -08004586
4587static void __mem_cgroup_free(struct mem_cgroup *memcg)
Hugh Dickins59927fb2012-03-15 15:17:07 -07004588{
Glauber Costac8b2a362012-12-18 14:22:13 -08004589 int node;
Hugh Dickins59927fb2012-03-15 15:17:07 -07004590
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -07004591 mem_cgroup_remove_from_trees(memcg);
Glauber Costac8b2a362012-12-18 14:22:13 -08004592
4593 for_each_node(node)
4594 free_mem_cgroup_per_zone_info(memcg, node);
4595
4596 free_percpu(memcg->stat);
4597
Glauber Costaa8964b92012-12-18 14:22:09 -08004598 disarm_static_keys(memcg);
Vladimir Davydov8ff69e22014-01-23 15:52:52 -08004599 kfree(memcg);
Hugh Dickins59927fb2012-03-15 15:17:07 -07004600}
Glauber Costa3afe36b2012-05-29 15:07:10 -07004601
Daisuke Nishimura7bcc1bb2009-01-29 14:25:11 -08004602/*
4603 * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
4604 */
Glauber Costae1aab162011-12-11 21:47:03 +00004605struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
Daisuke Nishimura7bcc1bb2009-01-29 14:25:11 -08004606{
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004607 if (!memcg->memory.parent)
Daisuke Nishimura7bcc1bb2009-01-29 14:25:11 -08004608 return NULL;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004609 return mem_cgroup_from_counter(memcg->memory.parent, memory);
Daisuke Nishimura7bcc1bb2009-01-29 14:25:11 -08004610}
Glauber Costae1aab162011-12-11 21:47:03 +00004611EXPORT_SYMBOL(parent_mem_cgroup);
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07004612
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -07004613static void __init mem_cgroup_soft_limit_tree_init(void)
4614{
4615 struct mem_cgroup_tree_per_node *rtpn;
4616 struct mem_cgroup_tree_per_zone *rtpz;
4617 int tmp, node, zone;
4618
4619 for_each_node(node) {
4620 tmp = node;
4621 if (!node_state(node, N_NORMAL_MEMORY))
4622 tmp = -1;
4623 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
4624 BUG_ON(!rtpn);
4625
4626 soft_limit_tree.rb_tree_per_node[node] = rtpn;
4627
4628 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4629 rtpz = &rtpn->rb_tree_per_zone[zone];
4630 rtpz->rb_root = RB_ROOT;
4631 spin_lock_init(&rtpz->lock);
4632 }
4633 }
4634}
4635
Li Zefan0eb253e2009-01-15 13:51:25 -08004636static struct cgroup_subsys_state * __ref
Tejun Heoeb954192013-08-08 20:11:23 -04004637mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004638{
Glauber Costad142e3e2013-02-22 16:34:52 -08004639 struct mem_cgroup *memcg;
KAMEZAWA Hiroyuki04046e12009-04-02 16:57:33 -07004640 long error = -ENOMEM;
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08004641 int node;
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004642
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004643 memcg = mem_cgroup_alloc();
4644 if (!memcg)
KAMEZAWA Hiroyuki04046e12009-04-02 16:57:33 -07004645 return ERR_PTR(error);
Pavel Emelianov78fb7462008-02-07 00:13:51 -08004646
Bob Liu3ed28fa2012-01-12 17:19:04 -08004647 for_each_node(node)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004648 if (alloc_mem_cgroup_per_zone_info(memcg, node))
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08004649 goto free_out;
Balbir Singhf64c3f52009-09-23 15:56:37 -07004650
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -08004651 /* root ? */
Tejun Heoeb954192013-08-08 20:11:23 -04004652 if (parent_css == NULL) {
Hillf Dantona41c58a2011-12-19 17:11:57 -08004653 root_mem_cgroup = memcg;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004654 page_counter_init(&memcg->memory, NULL);
Johannes Weiner24d404d2015-01-08 14:32:35 -08004655 memcg->soft_limit = PAGE_COUNTER_MAX;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004656 page_counter_init(&memcg->memsw, NULL);
4657 page_counter_init(&memcg->kmem, NULL);
Balbir Singh18f59ea2009-01-07 18:08:07 -08004658 }
Balbir Singh28dbc4b2009-01-07 18:08:05 -08004659
Glauber Costad142e3e2013-02-22 16:34:52 -08004660 memcg->last_scanned_node = MAX_NUMNODES;
4661 INIT_LIST_HEAD(&memcg->oom_notify);
Glauber Costad142e3e2013-02-22 16:34:52 -08004662 memcg->move_charge_at_immigrate = 0;
4663 mutex_init(&memcg->thresholds_lock);
4664 spin_lock_init(&memcg->move_lock);
Anton Vorontsov70ddf632013-04-29 15:08:31 -07004665 vmpressure_init(&memcg->vmpressure);
Tejun Heofba94802013-11-22 18:20:43 -05004666 INIT_LIST_HEAD(&memcg->event_list);
4667 spin_lock_init(&memcg->event_list_lock);
Vladimir Davydov900a38f2014-12-12 16:55:10 -08004668#ifdef CONFIG_MEMCG_KMEM
4669 memcg->kmemcg_id = -1;
4670 INIT_LIST_HEAD(&memcg->memcg_slab_caches);
4671#endif
Glauber Costad142e3e2013-02-22 16:34:52 -08004672
4673 return &memcg->css;
4674
4675free_out:
4676 __mem_cgroup_free(memcg);
4677 return ERR_PTR(error);
4678}
4679
4680static int
Tejun Heoeb954192013-08-08 20:11:23 -04004681mem_cgroup_css_online(struct cgroup_subsys_state *css)
Glauber Costad142e3e2013-02-22 16:34:52 -08004682{
Tejun Heoeb954192013-08-08 20:11:23 -04004683 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Tejun Heo5c9d5352014-05-16 13:22:48 -04004684 struct mem_cgroup *parent = mem_cgroup_from_css(css->parent);
Johannes Weiner2f7dd7a2014-10-02 16:16:57 -07004685 int ret;
Glauber Costad142e3e2013-02-22 16:34:52 -08004686
Tejun Heo15a4c832014-05-04 15:09:14 -04004687 if (css->id > MEM_CGROUP_ID_MAX)
Li Zefan4219b2d2013-09-23 16:56:29 +08004688 return -ENOSPC;
4689
Tejun Heo63876982013-08-08 20:11:23 -04004690 if (!parent)
Glauber Costad142e3e2013-02-22 16:34:52 -08004691 return 0;
4692
Glauber Costa09998212013-02-22 16:34:55 -08004693 mutex_lock(&memcg_create_mutex);
Glauber Costad142e3e2013-02-22 16:34:52 -08004694
4695 memcg->use_hierarchy = parent->use_hierarchy;
4696 memcg->oom_kill_disable = parent->oom_kill_disable;
4697 memcg->swappiness = mem_cgroup_swappiness(parent);
4698
4699 if (parent->use_hierarchy) {
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004700 page_counter_init(&memcg->memory, &parent->memory);
Johannes Weiner24d404d2015-01-08 14:32:35 -08004701 memcg->soft_limit = PAGE_COUNTER_MAX;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004702 page_counter_init(&memcg->memsw, &parent->memsw);
4703 page_counter_init(&memcg->kmem, &parent->kmem);
Glauber Costa55007d82012-12-18 14:22:38 -08004704
Daisuke Nishimura7bcc1bb2009-01-29 14:25:11 -08004705 /*
Li Zefan8d76a972013-07-08 16:00:36 -07004706 * No need to take a reference to the parent because cgroup
4707 * core guarantees its existence.
Daisuke Nishimura7bcc1bb2009-01-29 14:25:11 -08004708 */
Balbir Singh18f59ea2009-01-07 18:08:07 -08004709 } else {
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004710 page_counter_init(&memcg->memory, NULL);
Johannes Weiner24d404d2015-01-08 14:32:35 -08004711 memcg->soft_limit = PAGE_COUNTER_MAX;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004712 page_counter_init(&memcg->memsw, NULL);
4713 page_counter_init(&memcg->kmem, NULL);
Tejun Heo8c7f6ed2012-09-13 12:20:58 -07004714 /*
4715 * Deeper hierachy with use_hierarchy == false doesn't make
4716 * much sense so let cgroup subsystem know about this
4717 * unfortunate state in our controller.
4718 */
Glauber Costad142e3e2013-02-22 16:34:52 -08004719 if (parent != root_mem_cgroup)
Tejun Heo073219e2014-02-08 10:36:58 -05004720 memory_cgrp_subsys.broken_hierarchy = true;
Balbir Singh18f59ea2009-01-07 18:08:07 -08004721 }
Glauber Costa09998212013-02-22 16:34:55 -08004722 mutex_unlock(&memcg_create_mutex);
Vladimir Davydovd6441632014-01-23 15:53:09 -08004723
Johannes Weiner2f7dd7a2014-10-02 16:16:57 -07004724 ret = memcg_init_kmem(memcg, &memory_cgrp_subsys);
4725 if (ret)
4726 return ret;
4727
4728 /*
4729 * Make sure the memcg is initialized: mem_cgroup_iter()
4730 * orders reading memcg->initialized against its callers
4731 * reading the memcg members.
4732 */
4733 smp_store_release(&memcg->initialized, 1);
4734
4735 return 0;
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004736}
4737
Tejun Heoeb954192013-08-08 20:11:23 -04004738static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
KAMEZAWA Hiroyukidf878fb2008-02-07 00:14:28 -08004739{
Tejun Heoeb954192013-08-08 20:11:23 -04004740 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Tejun Heo3bc942f2013-11-22 18:20:44 -05004741 struct mem_cgroup_event *event, *tmp;
Tejun Heo79bd9812013-11-22 18:20:42 -05004742
4743 /*
4744 * Unregister events and notify userspace.
4745 * Notify userspace about cgroup removing only after rmdir of cgroup
4746 * directory to avoid race between userspace and kernelspace.
4747 */
Tejun Heofba94802013-11-22 18:20:43 -05004748 spin_lock(&memcg->event_list_lock);
4749 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
Tejun Heo79bd9812013-11-22 18:20:42 -05004750 list_del_init(&event->list);
4751 schedule_work(&event->remove);
4752 }
Tejun Heofba94802013-11-22 18:20:43 -05004753 spin_unlock(&memcg->event_list_lock);
KAMEZAWA Hiroyukiec64f512009-04-02 16:57:26 -07004754
Michal Hocko33cb8762013-07-31 13:53:51 -07004755 vmpressure_cleanup(&memcg->vmpressure);
KAMEZAWA Hiroyukidf878fb2008-02-07 00:14:28 -08004756}
4757
Tejun Heoeb954192013-08-08 20:11:23 -04004758static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004759{
Tejun Heoeb954192013-08-08 20:11:23 -04004760 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Daisuke Nishimurac268e992009-01-15 13:51:13 -08004761
Li Zefan10d5ebf2013-07-08 16:00:33 -07004762 memcg_destroy_kmem(memcg);
Li Zefan465939a2013-07-08 16:00:38 -07004763 __mem_cgroup_free(memcg);
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004764}
4765
Tejun Heo1ced9532014-07-08 18:02:57 -04004766/**
4767 * mem_cgroup_css_reset - reset the states of a mem_cgroup
4768 * @css: the target css
4769 *
4770 * Reset the states of the mem_cgroup associated with @css. This is
4771 * invoked when the userland requests disabling on the default hierarchy
4772 * but the memcg is pinned through dependency. The memcg should stop
4773 * applying policies and should revert to the vanilla state as it may be
4774 * made visible again.
4775 *
4776 * The current implementation only resets the essential configurations.
4777 * This needs to be expanded to cover all the visible parts.
4778 */
4779static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
4780{
4781 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4782
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004783 mem_cgroup_resize_limit(memcg, PAGE_COUNTER_MAX);
4784 mem_cgroup_resize_memsw_limit(memcg, PAGE_COUNTER_MAX);
4785 memcg_update_kmem_limit(memcg, PAGE_COUNTER_MAX);
Johannes Weiner24d404d2015-01-08 14:32:35 -08004786 memcg->soft_limit = PAGE_COUNTER_MAX;
Tejun Heo1ced9532014-07-08 18:02:57 -04004787}
4788
Daisuke Nishimura02491442010-03-10 15:22:17 -08004789#ifdef CONFIG_MMU
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004790/* Handlers for move charge at task migration. */
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004791static int mem_cgroup_do_precharge(unsigned long count)
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004792{
Johannes Weiner05b84302014-08-06 16:05:59 -07004793 int ret;
Johannes Weiner9476db92014-08-06 16:05:55 -07004794
4795 /* Try a single bulk charge without reclaim first */
Johannes Weiner00501b52014-08-08 14:19:20 -07004796 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_WAIT, count);
Johannes Weiner9476db92014-08-06 16:05:55 -07004797 if (!ret) {
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004798 mc.precharge += count;
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004799 return ret;
4800 }
Johannes Weiner692e7c42014-08-06 16:05:57 -07004801 if (ret == -EINTR) {
Johannes Weiner00501b52014-08-08 14:19:20 -07004802 cancel_charge(root_mem_cgroup, count);
Johannes Weiner692e7c42014-08-06 16:05:57 -07004803 return ret;
4804 }
Johannes Weiner9476db92014-08-06 16:05:55 -07004805
4806 /* Try charges one by one with reclaim */
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004807 while (count--) {
Johannes Weiner00501b52014-08-08 14:19:20 -07004808 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_NORETRY, 1);
Johannes Weiner9476db92014-08-06 16:05:55 -07004809 /*
4810 * In case of failure, any residual charges against
4811 * mc.to will be dropped by mem_cgroup_clear_mc()
Johannes Weiner692e7c42014-08-06 16:05:57 -07004812 * later on. However, cancel any charges that are
4813 * bypassed to root right away or they'll be lost.
Johannes Weiner9476db92014-08-06 16:05:55 -07004814 */
Johannes Weiner692e7c42014-08-06 16:05:57 -07004815 if (ret == -EINTR)
Johannes Weiner00501b52014-08-08 14:19:20 -07004816 cancel_charge(root_mem_cgroup, 1);
KAMEZAWA Hiroyuki38c5d722012-01-12 17:19:01 -08004817 if (ret)
KAMEZAWA Hiroyuki38c5d722012-01-12 17:19:01 -08004818 return ret;
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004819 mc.precharge++;
Johannes Weiner9476db92014-08-06 16:05:55 -07004820 cond_resched();
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004821 }
Johannes Weiner9476db92014-08-06 16:05:55 -07004822 return 0;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004823}
4824
4825/**
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07004826 * get_mctgt_type - get target type of moving charge
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004827 * @vma: the vma the pte to be checked belongs
4828 * @addr: the address corresponding to the pte to be checked
4829 * @ptent: the pte to be checked
Daisuke Nishimura02491442010-03-10 15:22:17 -08004830 * @target: the pointer the target page or swap ent will be stored(can be NULL)
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004831 *
4832 * Returns
4833 * 0(MC_TARGET_NONE): if the pte is not a target for move charge.
4834 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
4835 * move charge. if @target is not NULL, the page is stored in target->page
4836 * with extra refcnt got(Callers should handle it).
Daisuke Nishimura02491442010-03-10 15:22:17 -08004837 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
4838 * target for charge migration. if @target is not NULL, the entry is stored
4839 * in target->ent.
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004840 *
4841 * Called with pte lock held.
4842 */
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004843union mc_target {
4844 struct page *page;
Daisuke Nishimura02491442010-03-10 15:22:17 -08004845 swp_entry_t ent;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004846};
4847
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004848enum mc_target_type {
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07004849 MC_TARGET_NONE = 0,
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004850 MC_TARGET_PAGE,
Daisuke Nishimura02491442010-03-10 15:22:17 -08004851 MC_TARGET_SWAP,
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004852};
4853
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004854static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
4855 unsigned long addr, pte_t ptent)
4856{
4857 struct page *page = vm_normal_page(vma, addr, ptent);
4858
4859 if (!page || !page_mapped(page))
4860 return NULL;
4861 if (PageAnon(page)) {
4862 /* we don't move shared anon */
KAMEZAWA Hiroyuki4b913552012-05-29 15:06:51 -07004863 if (!move_anon())
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004864 return NULL;
Daisuke Nishimura87946a72010-05-26 14:42:39 -07004865 } else if (!move_file())
4866 /* we ignore mapcount for file pages */
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004867 return NULL;
4868 if (!get_page_unless_zero(page))
4869 return NULL;
4870
4871 return page;
4872}
4873
KAMEZAWA Hiroyuki4b913552012-05-29 15:06:51 -07004874#ifdef CONFIG_SWAP
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004875static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
4876 unsigned long addr, pte_t ptent, swp_entry_t *entry)
4877{
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004878 struct page *page = NULL;
4879 swp_entry_t ent = pte_to_swp_entry(ptent);
4880
4881 if (!move_anon() || non_swap_entry(ent))
4882 return NULL;
KAMEZAWA Hiroyuki4b913552012-05-29 15:06:51 -07004883 /*
4884 * Because lookup_swap_cache() updates some statistics counter,
4885 * we call find_get_page() with swapper_space directly.
4886 */
Shaohua Li33806f02013-02-22 16:34:37 -08004887 page = find_get_page(swap_address_space(ent), ent.val);
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004888 if (do_swap_account)
4889 entry->val = ent.val;
4890
4891 return page;
4892}
KAMEZAWA Hiroyuki4b913552012-05-29 15:06:51 -07004893#else
4894static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
4895 unsigned long addr, pte_t ptent, swp_entry_t *entry)
4896{
4897 return NULL;
4898}
4899#endif
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004900
Daisuke Nishimura87946a72010-05-26 14:42:39 -07004901static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
4902 unsigned long addr, pte_t ptent, swp_entry_t *entry)
4903{
4904 struct page *page = NULL;
Daisuke Nishimura87946a72010-05-26 14:42:39 -07004905 struct address_space *mapping;
4906 pgoff_t pgoff;
4907
4908 if (!vma->vm_file) /* anonymous vma */
4909 return NULL;
4910 if (!move_file())
4911 return NULL;
4912
Daisuke Nishimura87946a72010-05-26 14:42:39 -07004913 mapping = vma->vm_file->f_mapping;
Kirill A. Shutemov0661a332015-02-10 14:10:04 -08004914 pgoff = linear_page_index(vma, addr);
Daisuke Nishimura87946a72010-05-26 14:42:39 -07004915
4916 /* page is moved even if it's not RSS of this task(page-faulted). */
Hugh Dickinsaa3b1892011-08-03 16:21:24 -07004917#ifdef CONFIG_SWAP
4918 /* shmem/tmpfs may report page out on swap: account for that too. */
Johannes Weiner139b6a62014-05-06 12:50:05 -07004919 if (shmem_mapping(mapping)) {
4920 page = find_get_entry(mapping, pgoff);
4921 if (radix_tree_exceptional_entry(page)) {
4922 swp_entry_t swp = radix_to_swp_entry(page);
4923 if (do_swap_account)
4924 *entry = swp;
4925 page = find_get_page(swap_address_space(swp), swp.val);
4926 }
4927 } else
4928 page = find_get_page(mapping, pgoff);
4929#else
4930 page = find_get_page(mapping, pgoff);
Hugh Dickinsaa3b1892011-08-03 16:21:24 -07004931#endif
Daisuke Nishimura87946a72010-05-26 14:42:39 -07004932 return page;
4933}
4934
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07004935static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004936 unsigned long addr, pte_t ptent, union mc_target *target)
4937{
Daisuke Nishimura02491442010-03-10 15:22:17 -08004938 struct page *page = NULL;
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07004939 enum mc_target_type ret = MC_TARGET_NONE;
Daisuke Nishimura02491442010-03-10 15:22:17 -08004940 swp_entry_t ent = { .val = 0 };
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004941
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004942 if (pte_present(ptent))
4943 page = mc_handle_present_pte(vma, addr, ptent);
4944 else if (is_swap_pte(ptent))
4945 page = mc_handle_swap_pte(vma, addr, ptent, &ent);
Kirill A. Shutemov0661a332015-02-10 14:10:04 -08004946 else if (pte_none(ptent))
Daisuke Nishimura87946a72010-05-26 14:42:39 -07004947 page = mc_handle_file_pte(vma, addr, ptent, &ent);
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004948
4949 if (!page && !ent.val)
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07004950 return ret;
Daisuke Nishimura02491442010-03-10 15:22:17 -08004951 if (page) {
Daisuke Nishimura02491442010-03-10 15:22:17 -08004952 /*
Johannes Weiner0a31bc92014-08-08 14:19:22 -07004953 * Do only loose check w/o serialization.
Johannes Weiner1306a852014-12-10 15:44:52 -08004954 * mem_cgroup_move_account() checks the page is valid or
Johannes Weiner0a31bc92014-08-08 14:19:22 -07004955 * not under LRU exclusion.
Daisuke Nishimura02491442010-03-10 15:22:17 -08004956 */
Johannes Weiner1306a852014-12-10 15:44:52 -08004957 if (page->mem_cgroup == mc.from) {
Daisuke Nishimura02491442010-03-10 15:22:17 -08004958 ret = MC_TARGET_PAGE;
4959 if (target)
4960 target->page = page;
4961 }
4962 if (!ret || !target)
4963 put_page(page);
4964 }
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004965 /* There is a swap entry and a page doesn't exist or isn't charged */
4966 if (ent.val && !ret &&
Li Zefan34c00c32013-09-23 16:56:01 +08004967 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
KAMEZAWA Hiroyuki7f0f1542010-05-11 14:06:58 -07004968 ret = MC_TARGET_SWAP;
4969 if (target)
4970 target->ent = ent;
Daisuke Nishimura02491442010-03-10 15:22:17 -08004971 }
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004972 return ret;
4973}
4974
Naoya Horiguchi12724852012-03-21 16:34:28 -07004975#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4976/*
4977 * We don't consider swapping or file mapped pages because THP does not
4978 * support them for now.
4979 * Caller should make sure that pmd_trans_huge(pmd) is true.
4980 */
4981static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
4982 unsigned long addr, pmd_t pmd, union mc_target *target)
4983{
4984 struct page *page = NULL;
Naoya Horiguchi12724852012-03-21 16:34:28 -07004985 enum mc_target_type ret = MC_TARGET_NONE;
4986
4987 page = pmd_page(pmd);
Sasha Levin309381fea2014-01-23 15:52:54 -08004988 VM_BUG_ON_PAGE(!page || !PageHead(page), page);
Naoya Horiguchi12724852012-03-21 16:34:28 -07004989 if (!move_anon())
4990 return ret;
Johannes Weiner1306a852014-12-10 15:44:52 -08004991 if (page->mem_cgroup == mc.from) {
Naoya Horiguchi12724852012-03-21 16:34:28 -07004992 ret = MC_TARGET_PAGE;
4993 if (target) {
4994 get_page(page);
4995 target->page = page;
4996 }
4997 }
4998 return ret;
4999}
5000#else
5001static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5002 unsigned long addr, pmd_t pmd, union mc_target *target)
5003{
5004 return MC_TARGET_NONE;
5005}
5006#endif
5007
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005008static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
5009 unsigned long addr, unsigned long end,
5010 struct mm_walk *walk)
5011{
5012 struct vm_area_struct *vma = walk->private;
5013 pte_t *pte;
5014 spinlock_t *ptl;
5015
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08005016 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
Naoya Horiguchi12724852012-03-21 16:34:28 -07005017 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
5018 mc.precharge += HPAGE_PMD_NR;
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08005019 spin_unlock(ptl);
Andrea Arcangeli1a5a9902012-03-21 16:33:42 -07005020 return 0;
Naoya Horiguchi12724852012-03-21 16:34:28 -07005021 }
Dave Hansen03319322011-03-22 16:32:56 -07005022
Andrea Arcangeli45f83ce2012-03-28 14:42:40 -07005023 if (pmd_trans_unstable(pmd))
5024 return 0;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005025 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5026 for (; addr != end; pte++, addr += PAGE_SIZE)
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07005027 if (get_mctgt_type(vma, addr, *pte, NULL))
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005028 mc.precharge++; /* increment precharge temporarily */
5029 pte_unmap_unlock(pte - 1, ptl);
5030 cond_resched();
5031
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005032 return 0;
5033}
5034
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005035static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
5036{
5037 unsigned long precharge;
5038 struct vm_area_struct *vma;
5039
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005040 down_read(&mm->mmap_sem);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005041 for (vma = mm->mmap; vma; vma = vma->vm_next) {
5042 struct mm_walk mem_cgroup_count_precharge_walk = {
5043 .pmd_entry = mem_cgroup_count_precharge_pte_range,
5044 .mm = mm,
5045 .private = vma,
5046 };
5047 if (is_vm_hugetlb_page(vma))
5048 continue;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005049 walk_page_range(vma->vm_start, vma->vm_end,
5050 &mem_cgroup_count_precharge_walk);
5051 }
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005052 up_read(&mm->mmap_sem);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005053
5054 precharge = mc.precharge;
5055 mc.precharge = 0;
5056
5057 return precharge;
5058}
5059
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005060static int mem_cgroup_precharge_mc(struct mm_struct *mm)
5061{
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005062 unsigned long precharge = mem_cgroup_count_precharge(mm);
5063
5064 VM_BUG_ON(mc.moving_task);
5065 mc.moving_task = current;
5066 return mem_cgroup_do_precharge(precharge);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005067}
5068
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005069/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
5070static void __mem_cgroup_clear_mc(void)
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005071{
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07005072 struct mem_cgroup *from = mc.from;
5073 struct mem_cgroup *to = mc.to;
5074
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005075 /* we must uncharge all the leftover precharges from mc.to */
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08005076 if (mc.precharge) {
Johannes Weiner00501b52014-08-08 14:19:20 -07005077 cancel_charge(mc.to, mc.precharge);
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08005078 mc.precharge = 0;
5079 }
5080 /*
5081 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
5082 * we must uncharge here.
5083 */
5084 if (mc.moved_charge) {
Johannes Weiner00501b52014-08-08 14:19:20 -07005085 cancel_charge(mc.from, mc.moved_charge);
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08005086 mc.moved_charge = 0;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005087 }
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08005088 /* we must fixup refcnts and charges */
5089 if (mc.moved_swap) {
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08005090 /* uncharge swap account from the old cgroup */
Johannes Weinerce00a962014-09-05 08:43:57 -04005091 if (!mem_cgroup_is_root(mc.from))
Johannes Weiner3e32cb22014-12-10 15:42:31 -08005092 page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08005093
Johannes Weiner05b84302014-08-06 16:05:59 -07005094 /*
Johannes Weiner3e32cb22014-12-10 15:42:31 -08005095 * we charged both to->memory and to->memsw, so we
5096 * should uncharge to->memory.
Johannes Weiner05b84302014-08-06 16:05:59 -07005097 */
Johannes Weinerce00a962014-09-05 08:43:57 -04005098 if (!mem_cgroup_is_root(mc.to))
Johannes Weiner3e32cb22014-12-10 15:42:31 -08005099 page_counter_uncharge(&mc.to->memory, mc.moved_swap);
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08005100
Johannes Weinere8ea14c2014-12-10 15:42:42 -08005101 css_put_many(&mc.from->css, mc.moved_swap);
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08005102
Li Zefan40503772013-07-08 16:00:34 -07005103 /* we've already done css_get(mc.to) */
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08005104 mc.moved_swap = 0;
5105 }
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005106 memcg_oom_recover(from);
5107 memcg_oom_recover(to);
5108 wake_up_all(&mc.waitq);
5109}
5110
5111static void mem_cgroup_clear_mc(void)
5112{
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005113 /*
5114 * we must clear moving_task before waking up waiters at the end of
5115 * task migration.
5116 */
5117 mc.moving_task = NULL;
5118 __mem_cgroup_clear_mc();
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07005119 spin_lock(&mc.lock);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005120 mc.from = NULL;
5121 mc.to = NULL;
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07005122 spin_unlock(&mc.lock);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005123}
5124
Tejun Heoeb954192013-08-08 20:11:23 -04005125static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
Li Zefan761b3ef2012-01-31 13:47:36 +08005126 struct cgroup_taskset *tset)
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005127{
Tejun Heo2f7ee562011-12-12 18:12:21 -08005128 struct task_struct *p = cgroup_taskset_first(tset);
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005129 int ret = 0;
Tejun Heoeb954192013-08-08 20:11:23 -04005130 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Glauber Costaee5e8472013-02-22 16:34:50 -08005131 unsigned long move_charge_at_immigrate;
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005132
Glauber Costaee5e8472013-02-22 16:34:50 -08005133 /*
5134 * We are now commited to this value whatever it is. Changes in this
5135 * tunable will only affect upcoming migrations, not the current one.
5136 * So we need to save it, and keep it going.
5137 */
5138 move_charge_at_immigrate = memcg->move_charge_at_immigrate;
5139 if (move_charge_at_immigrate) {
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005140 struct mm_struct *mm;
5141 struct mem_cgroup *from = mem_cgroup_from_task(p);
5142
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07005143 VM_BUG_ON(from == memcg);
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005144
5145 mm = get_task_mm(p);
5146 if (!mm)
5147 return 0;
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005148 /* We move charges only when we move a owner of the mm */
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005149 if (mm->owner == p) {
5150 VM_BUG_ON(mc.from);
5151 VM_BUG_ON(mc.to);
5152 VM_BUG_ON(mc.precharge);
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08005153 VM_BUG_ON(mc.moved_charge);
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08005154 VM_BUG_ON(mc.moved_swap);
Johannes Weiner247b1442014-12-10 15:44:11 -08005155
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07005156 spin_lock(&mc.lock);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005157 mc.from = from;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07005158 mc.to = memcg;
Glauber Costaee5e8472013-02-22 16:34:50 -08005159 mc.immigrate_flags = move_charge_at_immigrate;
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07005160 spin_unlock(&mc.lock);
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005161 /* We set mc.moving_task later */
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005162
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005163 ret = mem_cgroup_precharge_mc(mm);
5164 if (ret)
5165 mem_cgroup_clear_mc();
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005166 }
5167 mmput(mm);
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005168 }
5169 return ret;
5170}
5171
Tejun Heoeb954192013-08-08 20:11:23 -04005172static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css,
Li Zefan761b3ef2012-01-31 13:47:36 +08005173 struct cgroup_taskset *tset)
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005174{
Johannes Weiner4e2f2452014-12-10 15:44:08 -08005175 if (mc.to)
5176 mem_cgroup_clear_mc();
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005177}
5178
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005179static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
5180 unsigned long addr, unsigned long end,
5181 struct mm_walk *walk)
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005182{
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005183 int ret = 0;
5184 struct vm_area_struct *vma = walk->private;
5185 pte_t *pte;
5186 spinlock_t *ptl;
Naoya Horiguchi12724852012-03-21 16:34:28 -07005187 enum mc_target_type target_type;
5188 union mc_target target;
5189 struct page *page;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005190
Naoya Horiguchi12724852012-03-21 16:34:28 -07005191 /*
5192 * We don't take compound_lock() here but no race with splitting thp
5193 * happens because:
5194 * - if pmd_trans_huge_lock() returns 1, the relevant thp is not
5195 * under splitting, which means there's no concurrent thp split,
5196 * - if another thread runs into split_huge_page() just after we
5197 * entered this if-block, the thread must wait for page table lock
5198 * to be unlocked in __split_huge_page_splitting(), where the main
5199 * part of thp split is not executed yet.
5200 */
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08005201 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
Hugh Dickins62ade862012-05-18 11:28:34 -07005202 if (mc.precharge < HPAGE_PMD_NR) {
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08005203 spin_unlock(ptl);
Naoya Horiguchi12724852012-03-21 16:34:28 -07005204 return 0;
5205 }
5206 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
5207 if (target_type == MC_TARGET_PAGE) {
5208 page = target.page;
5209 if (!isolate_lru_page(page)) {
Naoya Horiguchi12724852012-03-21 16:34:28 -07005210 if (!mem_cgroup_move_account(page, HPAGE_PMD_NR,
Johannes Weiner1306a852014-12-10 15:44:52 -08005211 mc.from, mc.to)) {
Naoya Horiguchi12724852012-03-21 16:34:28 -07005212 mc.precharge -= HPAGE_PMD_NR;
5213 mc.moved_charge += HPAGE_PMD_NR;
5214 }
5215 putback_lru_page(page);
5216 }
5217 put_page(page);
5218 }
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08005219 spin_unlock(ptl);
Andrea Arcangeli1a5a9902012-03-21 16:33:42 -07005220 return 0;
Naoya Horiguchi12724852012-03-21 16:34:28 -07005221 }
5222
Andrea Arcangeli45f83ce2012-03-28 14:42:40 -07005223 if (pmd_trans_unstable(pmd))
5224 return 0;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005225retry:
5226 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5227 for (; addr != end; addr += PAGE_SIZE) {
5228 pte_t ptent = *(pte++);
Daisuke Nishimura02491442010-03-10 15:22:17 -08005229 swp_entry_t ent;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005230
5231 if (!mc.precharge)
5232 break;
5233
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07005234 switch (get_mctgt_type(vma, addr, ptent, &target)) {
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005235 case MC_TARGET_PAGE:
5236 page = target.page;
5237 if (isolate_lru_page(page))
5238 goto put;
Johannes Weiner1306a852014-12-10 15:44:52 -08005239 if (!mem_cgroup_move_account(page, 1, mc.from, mc.to)) {
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005240 mc.precharge--;
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08005241 /* we uncharge from mc.from later. */
5242 mc.moved_charge++;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005243 }
5244 putback_lru_page(page);
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07005245put: /* get_mctgt_type() gets the page */
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005246 put_page(page);
5247 break;
Daisuke Nishimura02491442010-03-10 15:22:17 -08005248 case MC_TARGET_SWAP:
5249 ent = target.ent;
Hugh Dickinse91cbb42012-05-29 15:06:51 -07005250 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
Daisuke Nishimura02491442010-03-10 15:22:17 -08005251 mc.precharge--;
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08005252 /* we fixup refcnts and charges later. */
5253 mc.moved_swap++;
5254 }
Daisuke Nishimura02491442010-03-10 15:22:17 -08005255 break;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005256 default:
5257 break;
5258 }
5259 }
5260 pte_unmap_unlock(pte - 1, ptl);
5261 cond_resched();
5262
5263 if (addr != end) {
5264 /*
5265 * We have consumed all precharges we got in can_attach().
5266 * We try charge one by one, but don't do any additional
5267 * charges to mc.to if we have failed in charge once in attach()
5268 * phase.
5269 */
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08005270 ret = mem_cgroup_do_precharge(1);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005271 if (!ret)
5272 goto retry;
5273 }
5274
5275 return ret;
5276}
5277
5278static void mem_cgroup_move_charge(struct mm_struct *mm)
5279{
5280 struct vm_area_struct *vma;
5281
5282 lru_add_drain_all();
Johannes Weiner312722c2014-12-10 15:44:25 -08005283 /*
5284 * Signal mem_cgroup_begin_page_stat() to take the memcg's
5285 * move_lock while we're moving its pages to another memcg.
5286 * Then wait for already started RCU-only updates to finish.
5287 */
5288 atomic_inc(&mc.from->moving_account);
5289 synchronize_rcu();
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005290retry:
5291 if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
5292 /*
5293 * Someone who are holding the mmap_sem might be waiting in
5294 * waitq. So we cancel all extra charges, wake up all waiters,
5295 * and retry. Because we cancel precharges, we might not be able
5296 * to move enough charges, but moving charge is a best-effort
5297 * feature anyway, so it wouldn't be a big problem.
5298 */
5299 __mem_cgroup_clear_mc();
5300 cond_resched();
5301 goto retry;
5302 }
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005303 for (vma = mm->mmap; vma; vma = vma->vm_next) {
5304 int ret;
5305 struct mm_walk mem_cgroup_move_charge_walk = {
5306 .pmd_entry = mem_cgroup_move_charge_pte_range,
5307 .mm = mm,
5308 .private = vma,
5309 };
5310 if (is_vm_hugetlb_page(vma))
5311 continue;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005312 ret = walk_page_range(vma->vm_start, vma->vm_end,
5313 &mem_cgroup_move_charge_walk);
5314 if (ret)
5315 /*
5316 * means we have consumed all precharges and failed in
5317 * doing additional charge. Just abandon here.
5318 */
5319 break;
5320 }
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005321 up_read(&mm->mmap_sem);
Johannes Weiner312722c2014-12-10 15:44:25 -08005322 atomic_dec(&mc.from->moving_account);
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005323}
5324
Tejun Heoeb954192013-08-08 20:11:23 -04005325static void mem_cgroup_move_task(struct cgroup_subsys_state *css,
Li Zefan761b3ef2012-01-31 13:47:36 +08005326 struct cgroup_taskset *tset)
Balbir Singh67e465a2008-02-07 00:13:54 -08005327{
Tejun Heo2f7ee562011-12-12 18:12:21 -08005328 struct task_struct *p = cgroup_taskset_first(tset);
KOSAKI Motohiroa4336582011-06-15 15:08:13 -07005329 struct mm_struct *mm = get_task_mm(p);
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005330
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005331 if (mm) {
KOSAKI Motohiroa4336582011-06-15 15:08:13 -07005332 if (mc.to)
5333 mem_cgroup_move_charge(mm);
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005334 mmput(mm);
5335 }
KOSAKI Motohiroa4336582011-06-15 15:08:13 -07005336 if (mc.to)
5337 mem_cgroup_clear_mc();
Balbir Singh67e465a2008-02-07 00:13:54 -08005338}
Daisuke Nishimura5cfb80a2010-03-23 13:35:11 -07005339#else /* !CONFIG_MMU */
Tejun Heoeb954192013-08-08 20:11:23 -04005340static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
Li Zefan761b3ef2012-01-31 13:47:36 +08005341 struct cgroup_taskset *tset)
Daisuke Nishimura5cfb80a2010-03-23 13:35:11 -07005342{
5343 return 0;
5344}
Tejun Heoeb954192013-08-08 20:11:23 -04005345static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css,
Li Zefan761b3ef2012-01-31 13:47:36 +08005346 struct cgroup_taskset *tset)
Daisuke Nishimura5cfb80a2010-03-23 13:35:11 -07005347{
5348}
Tejun Heoeb954192013-08-08 20:11:23 -04005349static void mem_cgroup_move_task(struct cgroup_subsys_state *css,
Li Zefan761b3ef2012-01-31 13:47:36 +08005350 struct cgroup_taskset *tset)
Daisuke Nishimura5cfb80a2010-03-23 13:35:11 -07005351{
5352}
5353#endif
Balbir Singh67e465a2008-02-07 00:13:54 -08005354
Tejun Heof00baae2013-04-15 13:41:15 -07005355/*
5356 * Cgroup retains root cgroups across [un]mount cycles making it necessary
Tejun Heoaa6ec292014-07-09 10:08:08 -04005357 * to verify whether we're attached to the default hierarchy on each mount
5358 * attempt.
Tejun Heof00baae2013-04-15 13:41:15 -07005359 */
Tejun Heoeb954192013-08-08 20:11:23 -04005360static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
Tejun Heof00baae2013-04-15 13:41:15 -07005361{
5362 /*
Tejun Heoaa6ec292014-07-09 10:08:08 -04005363 * use_hierarchy is forced on the default hierarchy. cgroup core
Tejun Heof00baae2013-04-15 13:41:15 -07005364 * guarantees that @root doesn't have any children, so turning it
5365 * on for the root memcg is enough.
5366 */
Tejun Heoaa6ec292014-07-09 10:08:08 -04005367 if (cgroup_on_dfl(root_css->cgroup))
Tejun Heoeb954192013-08-08 20:11:23 -04005368 mem_cgroup_from_css(root_css)->use_hierarchy = true;
Tejun Heof00baae2013-04-15 13:41:15 -07005369}
5370
Tejun Heo073219e2014-02-08 10:36:58 -05005371struct cgroup_subsys memory_cgrp_subsys = {
Tejun Heo92fb9742012-11-19 08:13:38 -08005372 .css_alloc = mem_cgroup_css_alloc,
Glauber Costad142e3e2013-02-22 16:34:52 -08005373 .css_online = mem_cgroup_css_online,
Tejun Heo92fb9742012-11-19 08:13:38 -08005374 .css_offline = mem_cgroup_css_offline,
5375 .css_free = mem_cgroup_css_free,
Tejun Heo1ced9532014-07-08 18:02:57 -04005376 .css_reset = mem_cgroup_css_reset,
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005377 .can_attach = mem_cgroup_can_attach,
5378 .cancel_attach = mem_cgroup_cancel_attach,
Balbir Singh67e465a2008-02-07 00:13:54 -08005379 .attach = mem_cgroup_move_task,
Tejun Heof00baae2013-04-15 13:41:15 -07005380 .bind = mem_cgroup_bind,
Tejun Heo55779642014-07-15 11:05:09 -04005381 .legacy_cftypes = mem_cgroup_files,
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08005382 .early_init = 0,
Balbir Singh8cdea7c2008-02-07 00:13:50 -08005383};
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -08005384
Andrew Mortonc255a452012-07-31 16:43:02 -07005385#ifdef CONFIG_MEMCG_SWAP
Michal Hockoa42c3902010-11-24 12:57:08 -08005386static int __init enable_swap_account(char *s)
5387{
Michal Hockoa2c89902011-05-24 17:12:50 -07005388 if (!strcmp(s, "1"))
Michal Hockoa42c3902010-11-24 12:57:08 -08005389 really_do_swap_account = 1;
Michal Hockoa2c89902011-05-24 17:12:50 -07005390 else if (!strcmp(s, "0"))
Michal Hockoa42c3902010-11-24 12:57:08 -08005391 really_do_swap_account = 0;
5392 return 1;
5393}
Michal Hockoa2c89902011-05-24 17:12:50 -07005394__setup("swapaccount=", enable_swap_account);
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -08005395
Michal Hocko2d110852013-02-22 16:34:43 -08005396static void __init memsw_file_init(void)
5397{
Tejun Heo2cf669a2014-07-15 11:05:09 -04005398 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys,
5399 memsw_cgroup_files));
Michal Hocko2d110852013-02-22 16:34:43 -08005400}
Michal Hocko6acc8b02013-02-22 16:34:45 -08005401
5402static void __init enable_swap_cgroup(void)
5403{
5404 if (!mem_cgroup_disabled() && really_do_swap_account) {
5405 do_swap_account = 1;
5406 memsw_file_init();
5407 }
5408}
5409
Michal Hocko2d110852013-02-22 16:34:43 -08005410#else
Michal Hocko6acc8b02013-02-22 16:34:45 -08005411static void __init enable_swap_cgroup(void)
Michal Hocko2d110852013-02-22 16:34:43 -08005412{
5413}
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -08005414#endif
Michal Hocko2d110852013-02-22 16:34:43 -08005415
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005416#ifdef CONFIG_MEMCG_SWAP
5417/**
5418 * mem_cgroup_swapout - transfer a memsw charge to swap
5419 * @page: page whose memsw charge to transfer
5420 * @entry: swap entry to move the charge to
5421 *
5422 * Transfer the memsw charge of @page to @entry.
5423 */
5424void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
5425{
Johannes Weiner7bdd143c2014-12-10 15:43:54 -08005426 struct mem_cgroup *memcg;
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005427 unsigned short oldid;
5428
5429 VM_BUG_ON_PAGE(PageLRU(page), page);
5430 VM_BUG_ON_PAGE(page_count(page), page);
5431
5432 if (!do_swap_account)
5433 return;
5434
Johannes Weiner1306a852014-12-10 15:44:52 -08005435 memcg = page->mem_cgroup;
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005436
5437 /* Readahead page, never charged */
Johannes Weiner29833312014-12-10 15:44:02 -08005438 if (!memcg)
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005439 return;
5440
Johannes Weiner7bdd143c2014-12-10 15:43:54 -08005441 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005442 VM_BUG_ON_PAGE(oldid, page);
Johannes Weiner7bdd143c2014-12-10 15:43:54 -08005443 mem_cgroup_swap_statistics(memcg, true);
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005444
Johannes Weiner1306a852014-12-10 15:44:52 -08005445 page->mem_cgroup = NULL;
Johannes Weiner7bdd143c2014-12-10 15:43:54 -08005446
5447 if (!mem_cgroup_is_root(memcg))
5448 page_counter_uncharge(&memcg->memory, 1);
5449
5450 /* XXX: caller holds IRQ-safe mapping->tree_lock */
5451 VM_BUG_ON(!irqs_disabled());
5452
5453 mem_cgroup_charge_statistics(memcg, page, -1);
5454 memcg_check_events(memcg, page);
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005455}
5456
5457/**
5458 * mem_cgroup_uncharge_swap - uncharge a swap entry
5459 * @entry: swap entry to uncharge
5460 *
5461 * Drop the memsw charge associated with @entry.
5462 */
5463void mem_cgroup_uncharge_swap(swp_entry_t entry)
5464{
5465 struct mem_cgroup *memcg;
5466 unsigned short id;
5467
5468 if (!do_swap_account)
5469 return;
5470
5471 id = swap_cgroup_record(entry, 0);
5472 rcu_read_lock();
5473 memcg = mem_cgroup_lookup(id);
5474 if (memcg) {
Johannes Weinerce00a962014-09-05 08:43:57 -04005475 if (!mem_cgroup_is_root(memcg))
Johannes Weiner3e32cb22014-12-10 15:42:31 -08005476 page_counter_uncharge(&memcg->memsw, 1);
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005477 mem_cgroup_swap_statistics(memcg, false);
5478 css_put(&memcg->css);
5479 }
5480 rcu_read_unlock();
5481}
5482#endif
5483
Johannes Weiner00501b52014-08-08 14:19:20 -07005484/**
5485 * mem_cgroup_try_charge - try charging a page
5486 * @page: page to charge
5487 * @mm: mm context of the victim
5488 * @gfp_mask: reclaim mode
5489 * @memcgp: charged memcg return
5490 *
5491 * Try to charge @page to the memcg that @mm belongs to, reclaiming
5492 * pages according to @gfp_mask if necessary.
5493 *
5494 * Returns 0 on success, with *@memcgp pointing to the charged memcg.
5495 * Otherwise, an error code is returned.
5496 *
5497 * After page->mapping has been set up, the caller must finalize the
5498 * charge with mem_cgroup_commit_charge(). Or abort the transaction
5499 * with mem_cgroup_cancel_charge() in case page instantiation fails.
5500 */
5501int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
5502 gfp_t gfp_mask, struct mem_cgroup **memcgp)
5503{
5504 struct mem_cgroup *memcg = NULL;
5505 unsigned int nr_pages = 1;
5506 int ret = 0;
5507
5508 if (mem_cgroup_disabled())
5509 goto out;
5510
5511 if (PageSwapCache(page)) {
Johannes Weiner00501b52014-08-08 14:19:20 -07005512 /*
5513 * Every swap fault against a single page tries to charge the
5514 * page, bail as early as possible. shmem_unuse() encounters
5515 * already charged pages, too. The USED bit is protected by
5516 * the page lock, which serializes swap cache removal, which
5517 * in turn serializes uncharging.
5518 */
Johannes Weiner1306a852014-12-10 15:44:52 -08005519 if (page->mem_cgroup)
Johannes Weiner00501b52014-08-08 14:19:20 -07005520 goto out;
5521 }
5522
5523 if (PageTransHuge(page)) {
5524 nr_pages <<= compound_order(page);
5525 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
5526 }
5527
5528 if (do_swap_account && PageSwapCache(page))
5529 memcg = try_get_mem_cgroup_from_page(page);
5530 if (!memcg)
5531 memcg = get_mem_cgroup_from_mm(mm);
5532
5533 ret = try_charge(memcg, gfp_mask, nr_pages);
5534
5535 css_put(&memcg->css);
5536
5537 if (ret == -EINTR) {
5538 memcg = root_mem_cgroup;
5539 ret = 0;
5540 }
5541out:
5542 *memcgp = memcg;
5543 return ret;
5544}
5545
5546/**
5547 * mem_cgroup_commit_charge - commit a page charge
5548 * @page: page to charge
5549 * @memcg: memcg to charge the page to
5550 * @lrucare: page might be on LRU already
5551 *
5552 * Finalize a charge transaction started by mem_cgroup_try_charge(),
5553 * after page->mapping has been set up. This must happen atomically
5554 * as part of the page instantiation, i.e. under the page table lock
5555 * for anonymous pages, under the page lock for page and swap cache.
5556 *
5557 * In addition, the page must not be on the LRU during the commit, to
5558 * prevent racing with task migration. If it might be, use @lrucare.
5559 *
5560 * Use mem_cgroup_cancel_charge() to cancel the transaction instead.
5561 */
5562void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
5563 bool lrucare)
5564{
5565 unsigned int nr_pages = 1;
5566
5567 VM_BUG_ON_PAGE(!page->mapping, page);
5568 VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page);
5569
5570 if (mem_cgroup_disabled())
5571 return;
5572 /*
5573 * Swap faults will attempt to charge the same page multiple
5574 * times. But reuse_swap_page() might have removed the page
5575 * from swapcache already, so we can't check PageSwapCache().
5576 */
5577 if (!memcg)
5578 return;
5579
Johannes Weiner6abb5a82014-08-08 14:19:33 -07005580 commit_charge(page, memcg, lrucare);
5581
Johannes Weiner00501b52014-08-08 14:19:20 -07005582 if (PageTransHuge(page)) {
5583 nr_pages <<= compound_order(page);
5584 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
5585 }
5586
Johannes Weiner6abb5a82014-08-08 14:19:33 -07005587 local_irq_disable();
5588 mem_cgroup_charge_statistics(memcg, page, nr_pages);
5589 memcg_check_events(memcg, page);
5590 local_irq_enable();
Johannes Weiner00501b52014-08-08 14:19:20 -07005591
5592 if (do_swap_account && PageSwapCache(page)) {
5593 swp_entry_t entry = { .val = page_private(page) };
5594 /*
5595 * The swap entry might not get freed for a long time,
5596 * let's not wait for it. The page already received a
5597 * memory+swap charge, drop the swap entry duplicate.
5598 */
5599 mem_cgroup_uncharge_swap(entry);
5600 }
5601}
5602
5603/**
5604 * mem_cgroup_cancel_charge - cancel a page charge
5605 * @page: page to charge
5606 * @memcg: memcg to charge the page to
5607 *
5608 * Cancel a charge transaction started by mem_cgroup_try_charge().
5609 */
5610void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg)
5611{
5612 unsigned int nr_pages = 1;
5613
5614 if (mem_cgroup_disabled())
5615 return;
5616 /*
5617 * Swap faults will attempt to charge the same page multiple
5618 * times. But reuse_swap_page() might have removed the page
5619 * from swapcache already, so we can't check PageSwapCache().
5620 */
5621 if (!memcg)
5622 return;
5623
5624 if (PageTransHuge(page)) {
5625 nr_pages <<= compound_order(page);
5626 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
5627 }
5628
5629 cancel_charge(memcg, nr_pages);
5630}
5631
Johannes Weiner747db952014-08-08 14:19:24 -07005632static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
Johannes Weiner747db952014-08-08 14:19:24 -07005633 unsigned long nr_anon, unsigned long nr_file,
5634 unsigned long nr_huge, struct page *dummy_page)
5635{
Johannes Weiner18eca2e2014-12-10 15:43:57 -08005636 unsigned long nr_pages = nr_anon + nr_file;
Johannes Weiner747db952014-08-08 14:19:24 -07005637 unsigned long flags;
5638
Johannes Weinerce00a962014-09-05 08:43:57 -04005639 if (!mem_cgroup_is_root(memcg)) {
Johannes Weiner18eca2e2014-12-10 15:43:57 -08005640 page_counter_uncharge(&memcg->memory, nr_pages);
5641 if (do_swap_account)
5642 page_counter_uncharge(&memcg->memsw, nr_pages);
Johannes Weinerce00a962014-09-05 08:43:57 -04005643 memcg_oom_recover(memcg);
5644 }
Johannes Weiner747db952014-08-08 14:19:24 -07005645
5646 local_irq_save(flags);
5647 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon);
5648 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file);
5649 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge);
5650 __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT], pgpgout);
Johannes Weiner18eca2e2014-12-10 15:43:57 -08005651 __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
Johannes Weiner747db952014-08-08 14:19:24 -07005652 memcg_check_events(memcg, dummy_page);
5653 local_irq_restore(flags);
Johannes Weinere8ea14c2014-12-10 15:42:42 -08005654
5655 if (!mem_cgroup_is_root(memcg))
Johannes Weiner18eca2e2014-12-10 15:43:57 -08005656 css_put_many(&memcg->css, nr_pages);
Johannes Weiner747db952014-08-08 14:19:24 -07005657}
5658
5659static void uncharge_list(struct list_head *page_list)
5660{
5661 struct mem_cgroup *memcg = NULL;
Johannes Weiner747db952014-08-08 14:19:24 -07005662 unsigned long nr_anon = 0;
5663 unsigned long nr_file = 0;
5664 unsigned long nr_huge = 0;
5665 unsigned long pgpgout = 0;
Johannes Weiner747db952014-08-08 14:19:24 -07005666 struct list_head *next;
5667 struct page *page;
5668
5669 next = page_list->next;
5670 do {
5671 unsigned int nr_pages = 1;
Johannes Weiner747db952014-08-08 14:19:24 -07005672
5673 page = list_entry(next, struct page, lru);
5674 next = page->lru.next;
5675
5676 VM_BUG_ON_PAGE(PageLRU(page), page);
5677 VM_BUG_ON_PAGE(page_count(page), page);
5678
Johannes Weiner1306a852014-12-10 15:44:52 -08005679 if (!page->mem_cgroup)
Johannes Weiner747db952014-08-08 14:19:24 -07005680 continue;
5681
5682 /*
5683 * Nobody should be changing or seriously looking at
Johannes Weiner1306a852014-12-10 15:44:52 -08005684 * page->mem_cgroup at this point, we have fully
Johannes Weiner29833312014-12-10 15:44:02 -08005685 * exclusive access to the page.
Johannes Weiner747db952014-08-08 14:19:24 -07005686 */
5687
Johannes Weiner1306a852014-12-10 15:44:52 -08005688 if (memcg != page->mem_cgroup) {
Johannes Weiner747db952014-08-08 14:19:24 -07005689 if (memcg) {
Johannes Weiner18eca2e2014-12-10 15:43:57 -08005690 uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
5691 nr_huge, page);
5692 pgpgout = nr_anon = nr_file = nr_huge = 0;
Johannes Weiner747db952014-08-08 14:19:24 -07005693 }
Johannes Weiner1306a852014-12-10 15:44:52 -08005694 memcg = page->mem_cgroup;
Johannes Weiner747db952014-08-08 14:19:24 -07005695 }
5696
5697 if (PageTransHuge(page)) {
5698 nr_pages <<= compound_order(page);
5699 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
5700 nr_huge += nr_pages;
5701 }
5702
5703 if (PageAnon(page))
5704 nr_anon += nr_pages;
5705 else
5706 nr_file += nr_pages;
5707
Johannes Weiner1306a852014-12-10 15:44:52 -08005708 page->mem_cgroup = NULL;
Johannes Weiner747db952014-08-08 14:19:24 -07005709
5710 pgpgout++;
5711 } while (next != page_list);
5712
5713 if (memcg)
Johannes Weiner18eca2e2014-12-10 15:43:57 -08005714 uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
5715 nr_huge, page);
Johannes Weiner747db952014-08-08 14:19:24 -07005716}
5717
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005718/**
5719 * mem_cgroup_uncharge - uncharge a page
5720 * @page: page to uncharge
5721 *
5722 * Uncharge a page previously charged with mem_cgroup_try_charge() and
5723 * mem_cgroup_commit_charge().
5724 */
5725void mem_cgroup_uncharge(struct page *page)
5726{
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005727 if (mem_cgroup_disabled())
5728 return;
5729
Johannes Weiner747db952014-08-08 14:19:24 -07005730 /* Don't touch page->lru of any random page, pre-check: */
Johannes Weiner1306a852014-12-10 15:44:52 -08005731 if (!page->mem_cgroup)
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005732 return;
5733
Johannes Weiner747db952014-08-08 14:19:24 -07005734 INIT_LIST_HEAD(&page->lru);
5735 uncharge_list(&page->lru);
5736}
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005737
Johannes Weiner747db952014-08-08 14:19:24 -07005738/**
5739 * mem_cgroup_uncharge_list - uncharge a list of page
5740 * @page_list: list of pages to uncharge
5741 *
5742 * Uncharge a list of pages previously charged with
5743 * mem_cgroup_try_charge() and mem_cgroup_commit_charge().
5744 */
5745void mem_cgroup_uncharge_list(struct list_head *page_list)
5746{
5747 if (mem_cgroup_disabled())
5748 return;
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005749
Johannes Weiner747db952014-08-08 14:19:24 -07005750 if (!list_empty(page_list))
5751 uncharge_list(page_list);
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005752}
5753
5754/**
5755 * mem_cgroup_migrate - migrate a charge to another page
5756 * @oldpage: currently charged page
5757 * @newpage: page to transfer the charge to
Michal Hockof5e03a42015-02-05 12:25:14 -08005758 * @lrucare: either or both pages might be on the LRU already
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005759 *
5760 * Migrate the charge from @oldpage to @newpage.
5761 *
5762 * Both pages must be locked, @newpage->mapping must be set up.
5763 */
5764void mem_cgroup_migrate(struct page *oldpage, struct page *newpage,
5765 bool lrucare)
5766{
Johannes Weiner29833312014-12-10 15:44:02 -08005767 struct mem_cgroup *memcg;
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005768 int isolated;
5769
5770 VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
5771 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
5772 VM_BUG_ON_PAGE(!lrucare && PageLRU(oldpage), oldpage);
5773 VM_BUG_ON_PAGE(!lrucare && PageLRU(newpage), newpage);
5774 VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage);
Johannes Weiner6abb5a82014-08-08 14:19:33 -07005775 VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage),
5776 newpage);
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005777
5778 if (mem_cgroup_disabled())
5779 return;
5780
5781 /* Page cache replacement: new page already charged? */
Johannes Weiner1306a852014-12-10 15:44:52 -08005782 if (newpage->mem_cgroup)
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005783 return;
5784
Johannes Weiner7d5e3242014-12-10 15:43:46 -08005785 /*
5786 * Swapcache readahead pages can get migrated before being
5787 * charged, and migration from compaction can happen to an
5788 * uncharged page when the PFN walker finds a page that
5789 * reclaim just put back on the LRU but has not released yet.
5790 */
Johannes Weiner1306a852014-12-10 15:44:52 -08005791 memcg = oldpage->mem_cgroup;
Johannes Weiner29833312014-12-10 15:44:02 -08005792 if (!memcg)
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005793 return;
5794
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005795 if (lrucare)
5796 lock_page_lru(oldpage, &isolated);
5797
Johannes Weiner1306a852014-12-10 15:44:52 -08005798 oldpage->mem_cgroup = NULL;
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005799
5800 if (lrucare)
5801 unlock_page_lru(oldpage, isolated);
5802
Johannes Weiner29833312014-12-10 15:44:02 -08005803 commit_charge(newpage, memcg, lrucare);
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005804}
5805
Michal Hocko2d110852013-02-22 16:34:43 -08005806/*
Michal Hocko10813122013-02-22 16:35:41 -08005807 * subsys_initcall() for memory controller.
5808 *
5809 * Some parts like hotcpu_notifier() have to be initialized from this context
5810 * because of lock dependencies (cgroup_lock -> cpu hotplug) but basically
5811 * everything that doesn't depend on a specific mem_cgroup structure should
5812 * be initialized from here.
Michal Hocko2d110852013-02-22 16:34:43 -08005813 */
5814static int __init mem_cgroup_init(void)
5815{
5816 hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
Michal Hocko6acc8b02013-02-22 16:34:45 -08005817 enable_swap_cgroup();
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -07005818 mem_cgroup_soft_limit_tree_init();
Michal Hockoe4777492013-02-22 16:35:40 -08005819 memcg_stock_init();
Michal Hocko2d110852013-02-22 16:34:43 -08005820 return 0;
5821}
5822subsys_initcall(mem_cgroup_init);