blob: ed90b18b822799b4cce773d9380c4934a561fef4 [file] [log] [blame]
Balbir Singh8cdea7c2008-02-07 00:13:50 -08001/* memcontrol.c - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
Pavel Emelianov78fb7462008-02-07 00:13:51 -08006 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08009 * Memory thresholds
10 * Copyright (C) 2009 Nokia Corporation
11 * Author: Kirill A. Shutemov
12 *
Glauber Costa7ae1e1d2012-12-18 14:21:56 -080013 * Kernel Memory Controller
14 * Copyright (C) 2012 Parallels Inc. and Google Inc.
15 * Authors: Glauber Costa and Suleiman Souhlal
16 *
Johannes Weiner1575e682015-04-14 15:44:51 -070017 * Native page reclaim
18 * Charge lifetime sanitation
19 * Lockless page tracking & accounting
20 * Unified hierarchy configuration model
21 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
22 *
Balbir Singh8cdea7c2008-02-07 00:13:50 -080023 * This program is free software; you can redistribute it and/or modify
24 * it under the terms of the GNU General Public License as published by
25 * the Free Software Foundation; either version 2 of the License, or
26 * (at your option) any later version.
27 *
28 * This program is distributed in the hope that it will be useful,
29 * but WITHOUT ANY WARRANTY; without even the implied warranty of
30 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
31 * GNU General Public License for more details.
32 */
33
Johannes Weiner3e32cb22014-12-10 15:42:31 -080034#include <linux/page_counter.h>
Balbir Singh8cdea7c2008-02-07 00:13:50 -080035#include <linux/memcontrol.h>
36#include <linux/cgroup.h>
Pavel Emelianov78fb7462008-02-07 00:13:51 -080037#include <linux/mm.h>
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -080038#include <linux/hugetlb.h>
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -080039#include <linux/pagemap.h>
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -080040#include <linux/smp.h>
Balbir Singh8a9f3cc2008-02-07 00:13:53 -080041#include <linux/page-flags.h>
Balbir Singh66e17072008-02-07 00:13:56 -080042#include <linux/backing-dev.h>
Balbir Singh8a9f3cc2008-02-07 00:13:53 -080043#include <linux/bit_spinlock.h>
44#include <linux/rcupdate.h>
Balbir Singhe2224322009-04-02 16:57:39 -070045#include <linux/limits.h>
Paul Gortmakerb9e15ba2011-05-26 16:00:52 -040046#include <linux/export.h>
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -080047#include <linux/mutex.h>
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -070048#include <linux/rbtree.h>
Balbir Singhb6ac57d2008-04-29 01:00:19 -070049#include <linux/slab.h>
Balbir Singh66e17072008-02-07 00:13:56 -080050#include <linux/swap.h>
Daisuke Nishimura02491442010-03-10 15:22:17 -080051#include <linux/swapops.h>
Balbir Singh66e17072008-02-07 00:13:56 -080052#include <linux/spinlock.h>
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -080053#include <linux/eventfd.h>
Tejun Heo79bd9812013-11-22 18:20:42 -050054#include <linux/poll.h>
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -080055#include <linux/sort.h>
Balbir Singh66e17072008-02-07 00:13:56 -080056#include <linux/fs.h>
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -080057#include <linux/seq_file.h>
Anton Vorontsov70ddf632013-04-29 15:08:31 -070058#include <linux/vmpressure.h>
Christoph Lameterb69408e2008-10-18 20:26:14 -070059#include <linux/mm_inline.h>
Johannes Weiner5d1ea482014-12-10 15:44:55 -080060#include <linux/swap_cgroup.h>
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -080061#include <linux/cpu.h>
KAMEZAWA Hiroyuki158e0a22010-08-10 18:03:00 -070062#include <linux/oom.h>
Johannes Weiner0056f4e2013-10-31 16:34:14 -070063#include <linux/lockdep.h>
Tejun Heo79bd9812013-11-22 18:20:42 -050064#include <linux/file.h>
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -080065#include "internal.h"
Glauber Costad1a4c0b2011-12-11 21:47:04 +000066#include <net/sock.h>
Michal Hocko4bd2c1e2012-10-08 16:33:10 -070067#include <net/ip.h>
Glauber Costad1a4c0b2011-12-11 21:47:04 +000068#include <net/tcp_memcontrol.h>
Qiang Huangf35c3a82013-11-12 15:08:22 -080069#include "slab.h"
Balbir Singh8cdea7c2008-02-07 00:13:50 -080070
Balbir Singh8697d332008-02-07 00:13:59 -080071#include <asm/uaccess.h>
72
KOSAKI Motohirocc8e9702010-08-09 17:19:57 -070073#include <trace/events/vmscan.h>
74
Tejun Heo073219e2014-02-08 10:36:58 -050075struct cgroup_subsys memory_cgrp_subsys __read_mostly;
76EXPORT_SYMBOL(memory_cgrp_subsys);
David Rientjes68ae5642012-12-12 13:51:57 -080077
KAMEZAWA Hiroyukia181b0e2008-07-25 01:47:08 -070078#define MEM_CGROUP_RECLAIM_RETRIES 5
Kirill A. Shutemov6bbda352012-05-29 15:06:55 -070079static struct mem_cgroup *root_mem_cgroup __read_mostly;
Tejun Heodbee2272015-05-22 17:13:20 -040080struct cgroup_subsys_state *mem_cgroup_root_css __read_mostly;
Balbir Singh8cdea7c2008-02-07 00:13:50 -080081
Johannes Weiner21afa382015-02-11 15:26:36 -080082/* Whether the swap controller is active */
Andrew Mortonc255a452012-07-31 16:43:02 -070083#ifdef CONFIG_MEMCG_SWAP
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -080084int do_swap_account __read_mostly;
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -080085#else
Kirill A. Shutemova0db00f2012-05-29 15:06:56 -070086#define do_swap_account 0
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -080087#endif
88
Johannes Weineraf7c4b02012-05-29 15:07:08 -070089static const char * const mem_cgroup_stat_names[] = {
90 "cache",
91 "rss",
David Rientjesb070e652013-05-07 16:18:09 -070092 "rss_huge",
Johannes Weineraf7c4b02012-05-29 15:07:08 -070093 "mapped_file",
Greg Thelen7c9d3ff2015-05-22 17:13:16 -040094 "dirty",
Sha Zhengju3ea67d02013-09-12 15:13:53 -070095 "writeback",
Johannes Weineraf7c4b02012-05-29 15:07:08 -070096 "swap",
97};
98
Johannes Weineraf7c4b02012-05-29 15:07:08 -070099static const char * const mem_cgroup_events_names[] = {
100 "pgpgin",
101 "pgpgout",
102 "pgfault",
103 "pgmajfault",
104};
105
Sha Zhengju58cf1882013-02-22 16:32:05 -0800106static const char * const mem_cgroup_lru_names[] = {
107 "inactive_anon",
108 "active_anon",
109 "inactive_file",
110 "active_file",
111 "unevictable",
112};
113
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700114/*
115 * Per memcg event counter is incremented at every pagein/pageout. With THP,
116 * it will be incremated by the number of pages. This counter is used for
117 * for trigger some periodic events. This is straightforward and better
118 * than using jiffies etc. to handle periodic memcg event.
119 */
120enum mem_cgroup_events_target {
121 MEM_CGROUP_TARGET_THRESH,
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700122 MEM_CGROUP_TARGET_SOFTLIMIT,
KAMEZAWA Hiroyuki453a9bf2011-07-08 15:39:43 -0700123 MEM_CGROUP_TARGET_NUMAINFO,
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700124 MEM_CGROUP_NTARGETS,
125};
Kirill A. Shutemova0db00f2012-05-29 15:06:56 -0700126#define THRESHOLDS_EVENTS_TARGET 128
127#define SOFTLIMIT_EVENTS_TARGET 1024
128#define NUMAINFO_EVENTS_TARGET 1024
Johannes Weinere9f89742011-03-23 16:42:37 -0700129
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800130struct mem_cgroup_stat_cpu {
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700131 long count[MEM_CGROUP_STAT_NSTATS];
Johannes Weiner241994ed2015-02-11 15:26:06 -0800132 unsigned long events[MEMCG_NR_EVENTS];
Johannes Weiner13114712012-05-29 15:07:07 -0700133 unsigned long nr_page_events;
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700134 unsigned long targets[MEM_CGROUP_NTARGETS];
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800135};
136
Johannes Weiner5ac8fb32014-12-10 15:42:39 -0800137struct reclaim_iter {
138 struct mem_cgroup *position;
Johannes Weiner527a5ec2012-01-12 17:17:55 -0800139 /* scan generation, increased every round-trip */
140 unsigned int generation;
141};
142
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800143/*
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800144 * per-zone information in memory controller.
145 */
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800146struct mem_cgroup_per_zone {
Johannes Weiner6290df52012-01-12 17:18:10 -0800147 struct lruvec lruvec;
Hugh Dickins1eb49272012-03-21 16:34:19 -0700148 unsigned long lru_size[NR_LRU_LISTS];
KOSAKI Motohiro3e2f41f2009-01-07 18:08:20 -0800149
Johannes Weiner5ac8fb32014-12-10 15:42:39 -0800150 struct reclaim_iter iter[DEF_PRIORITY + 1];
Johannes Weiner527a5ec2012-01-12 17:17:55 -0800151
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700152 struct rb_node tree_node; /* RB tree node */
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800153 unsigned long usage_in_excess;/* Set to the value by which */
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700154 /* the soft limit is exceeded*/
155 bool on_tree;
Hugh Dickinsd79154b2012-03-21 16:34:18 -0700156 struct mem_cgroup *memcg; /* Back pointer, we cannot */
Balbir Singh4e416952009-09-23 15:56:39 -0700157 /* use container_of */
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800158};
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800159
160struct mem_cgroup_per_node {
161 struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
162};
163
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700164/*
165 * Cgroups above their limits are maintained in a RB-Tree, independent of
166 * their hierarchy representation
167 */
168
169struct mem_cgroup_tree_per_zone {
170 struct rb_root rb_root;
171 spinlock_t lock;
172};
173
174struct mem_cgroup_tree_per_node {
175 struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
176};
177
178struct mem_cgroup_tree {
179 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
180};
181
182static struct mem_cgroup_tree soft_limit_tree __read_mostly;
183
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -0800184struct mem_cgroup_threshold {
185 struct eventfd_ctx *eventfd;
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800186 unsigned long threshold;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -0800187};
188
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -0700189/* For threshold */
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -0800190struct mem_cgroup_threshold_ary {
Sha Zhengju748dad32012-05-29 15:06:57 -0700191 /* An array index points to threshold just below or equal to usage. */
Phil Carmody5407a562010-05-26 14:42:42 -0700192 int current_threshold;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -0800193 /* Size of entries[] */
194 unsigned int size;
195 /* Array of thresholds */
196 struct mem_cgroup_threshold entries[0];
197};
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -0700198
199struct mem_cgroup_thresholds {
200 /* Primary thresholds array */
201 struct mem_cgroup_threshold_ary *primary;
202 /*
203 * Spare threshold array.
204 * This is needed to make mem_cgroup_unregister_event() "never fail".
205 * It must be able to store at least primary->size - 1 entries.
206 */
207 struct mem_cgroup_threshold_ary *spare;
208};
209
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -0700210/* for OOM */
211struct mem_cgroup_eventfd_list {
212 struct list_head list;
213 struct eventfd_ctx *eventfd;
214};
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -0800215
Tejun Heo79bd9812013-11-22 18:20:42 -0500216/*
217 * cgroup_event represents events which userspace want to receive.
218 */
Tejun Heo3bc942f2013-11-22 18:20:44 -0500219struct mem_cgroup_event {
Tejun Heo79bd9812013-11-22 18:20:42 -0500220 /*
Tejun Heo59b6f872013-11-22 18:20:43 -0500221 * memcg which the event belongs to.
Tejun Heo79bd9812013-11-22 18:20:42 -0500222 */
Tejun Heo59b6f872013-11-22 18:20:43 -0500223 struct mem_cgroup *memcg;
Tejun Heo79bd9812013-11-22 18:20:42 -0500224 /*
Tejun Heo79bd9812013-11-22 18:20:42 -0500225 * eventfd to signal userspace about the event.
226 */
227 struct eventfd_ctx *eventfd;
228 /*
229 * Each of these stored in a list by the cgroup.
230 */
231 struct list_head list;
232 /*
Tejun Heofba94802013-11-22 18:20:43 -0500233 * register_event() callback will be used to add new userspace
234 * waiter for changes related to this event. Use eventfd_signal()
235 * on eventfd to send notification to userspace.
236 */
Tejun Heo59b6f872013-11-22 18:20:43 -0500237 int (*register_event)(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -0500238 struct eventfd_ctx *eventfd, const char *args);
Tejun Heofba94802013-11-22 18:20:43 -0500239 /*
240 * unregister_event() callback will be called when userspace closes
241 * the eventfd or on cgroup removing. This callback must be set,
242 * if you want provide notification functionality.
243 */
Tejun Heo59b6f872013-11-22 18:20:43 -0500244 void (*unregister_event)(struct mem_cgroup *memcg,
Tejun Heofba94802013-11-22 18:20:43 -0500245 struct eventfd_ctx *eventfd);
246 /*
Tejun Heo79bd9812013-11-22 18:20:42 -0500247 * All fields below needed to unregister event when
248 * userspace closes eventfd.
249 */
250 poll_table pt;
251 wait_queue_head_t *wqh;
252 wait_queue_t wait;
253 struct work_struct remove;
254};
255
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700256static void mem_cgroup_threshold(struct mem_cgroup *memcg);
257static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -0800258
Balbir Singhf64c3f52009-09-23 15:56:37 -0700259/*
Balbir Singh8cdea7c2008-02-07 00:13:50 -0800260 * The memory controller data structure. The memory controller controls both
261 * page cache and RSS per cgroup. We would eventually like to provide
262 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
263 * to help the administrator determine what knobs to tune.
Balbir Singh8cdea7c2008-02-07 00:13:50 -0800264 */
265struct mem_cgroup {
266 struct cgroup_subsys_state css;
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800267
268 /* Accounted resources */
269 struct page_counter memory;
270 struct page_counter memsw;
271 struct page_counter kmem;
272
Johannes Weiner241994ed2015-02-11 15:26:06 -0800273 /* Normal memory consumption range */
274 unsigned long low;
275 unsigned long high;
276
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800277 unsigned long soft_limit;
Hugh Dickins59927fb2012-03-15 15:17:07 -0700278
Anton Vorontsov70ddf632013-04-29 15:08:31 -0700279 /* vmpressure notifications */
280 struct vmpressure vmpressure;
281
Johannes Weiner2f7dd7a2014-10-02 16:16:57 -0700282 /* css_online() has been completed */
283 int initialized;
284
Li Zefan465939a2013-07-08 16:00:38 -0700285 /*
Balbir Singh18f59ea2009-01-07 18:08:07 -0800286 * Should the accounting and control be hierarchical, per subtree?
287 */
288 bool use_hierarchy;
Michal Hocko79dfdac2011-07-26 16:08:23 -0700289
290 bool oom_lock;
291 atomic_t under_oom;
Johannes Weiner3812c8c2013-09-12 15:13:44 -0700292 atomic_t oom_wakeups;
Michal Hocko79dfdac2011-07-26 16:08:23 -0700293
KAMEZAWA Hiroyuki1f4c0252011-07-26 16:08:21 -0700294 int swappiness;
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -0700295 /* OOM-Killer disable */
296 int oom_kill_disable;
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -0800297
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -0800298 /* protect arrays of thresholds */
299 struct mutex thresholds_lock;
300
301 /* thresholds for memory usage. RCU-protected */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -0700302 struct mem_cgroup_thresholds thresholds;
Kirill A. Shutemov907860e2010-05-26 14:42:46 -0700303
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -0800304 /* thresholds for mem+swap usage. RCU-protected */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -0700305 struct mem_cgroup_thresholds memsw_thresholds;
Kirill A. Shutemov907860e2010-05-26 14:42:46 -0700306
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -0700307 /* For oom notifier event fd */
308 struct list_head oom_notify;
Johannes Weiner185efc02011-09-14 16:21:58 -0700309
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800310 /*
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -0800311 * Should we move charges of a task when a task is moved into this
312 * mem_cgroup ? And what type of charges should we move ?
313 */
Andrew Mortonf894ffa2013-09-12 15:13:35 -0700314 unsigned long move_charge_at_immigrate;
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -0800315 /*
KAMEZAWA Hiroyuki619d0942012-03-21 16:34:23 -0700316 * set > 0 if pages under this cgroup are moving to other cgroup.
317 */
Johannes Weiner6de22612015-02-11 15:25:01 -0800318 atomic_t moving_account;
KAMEZAWA Hiroyuki312734c02012-03-21 16:34:24 -0700319 /* taken only while moving_account > 0 */
Johannes Weiner6de22612015-02-11 15:25:01 -0800320 spinlock_t move_lock;
321 struct task_struct *move_lock_task;
322 unsigned long move_lock_flags;
KAMEZAWA Hiroyuki619d0942012-03-21 16:34:23 -0700323 /*
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -0800324 * percpu counter.
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800325 */
Kirill A. Shutemov3a7951b2012-05-29 15:06:56 -0700326 struct mem_cgroup_stat_cpu __percpu *stat;
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -0700327 /*
328 * used when a cpu is offlined or other synchronizations
329 * See mem_cgroup_read_stat().
330 */
331 struct mem_cgroup_stat_cpu nocpu_base;
332 spinlock_t pcp_counter_lock;
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000333
Michal Hocko4bd2c1e2012-10-08 16:33:10 -0700334#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET)
Eric W. Biederman2e685ca2013-10-19 16:26:19 -0700335 struct cg_proto tcp_mem;
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000336#endif
Glauber Costa2633d7a2012-12-18 14:22:34 -0800337#if defined(CONFIG_MEMCG_KMEM)
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800338 /* Index in the kmem_cache->memcg_params.memcg_caches array */
Glauber Costa2633d7a2012-12-18 14:22:34 -0800339 int kmemcg_id;
Vladimir Davydov2788cf0c2015-02-12 14:59:38 -0800340 bool kmem_acct_activated;
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -0800341 bool kmem_acct_active;
Glauber Costa2633d7a2012-12-18 14:22:34 -0800342#endif
Glauber Costa45cf7eb2013-02-22 16:34:49 -0800343
344 int last_scanned_node;
345#if MAX_NUMNODES > 1
346 nodemask_t scan_nodes;
347 atomic_t numainfo_events;
348 atomic_t numainfo_updating;
349#endif
Anton Vorontsov70ddf632013-04-29 15:08:31 -0700350
Tejun Heofba94802013-11-22 18:20:43 -0500351 /* List of events which userspace want to receive */
352 struct list_head event_list;
353 spinlock_t event_list_lock;
354
Johannes Weiner54f72fe2013-07-08 15:59:49 -0700355 struct mem_cgroup_per_node *nodeinfo[0];
356 /* WARNING: nodeinfo must be the last member here */
Balbir Singh8cdea7c2008-02-07 00:13:50 -0800357};
358
Glauber Costa510fc4e2012-12-18 14:21:47 -0800359#ifdef CONFIG_MEMCG_KMEM
Vladimir Davydovcb731d62015-02-12 14:58:54 -0800360bool memcg_kmem_is_active(struct mem_cgroup *memcg)
Glauber Costa7de37682012-12-18 14:22:07 -0800361{
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -0800362 return memcg->kmem_acct_active;
Glauber Costa7de37682012-12-18 14:22:07 -0800363}
Glauber Costa510fc4e2012-12-18 14:21:47 -0800364#endif
365
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -0800366/* Stuffs for move charges at task migration. */
367/*
Johannes Weiner1dfab5a2015-02-11 15:26:09 -0800368 * Types of charges to be moved.
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -0800369 */
Johannes Weiner1dfab5a2015-02-11 15:26:09 -0800370#define MOVE_ANON 0x1U
371#define MOVE_FILE 0x2U
372#define MOVE_MASK (MOVE_ANON | MOVE_FILE)
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -0800373
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -0800374/* "mc" and its members are protected by cgroup_mutex */
375static struct move_charge_struct {
Daisuke Nishimurab1dd6932010-11-24 12:57:06 -0800376 spinlock_t lock; /* for from, to */
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -0800377 struct mem_cgroup *from;
378 struct mem_cgroup *to;
Johannes Weiner1dfab5a2015-02-11 15:26:09 -0800379 unsigned long flags;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -0800380 unsigned long precharge;
Daisuke Nishimura854ffa82010-03-10 15:22:15 -0800381 unsigned long moved_charge;
Daisuke Nishimura483c30b2010-03-10 15:22:18 -0800382 unsigned long moved_swap;
Daisuke Nishimura8033b972010-03-10 15:22:16 -0800383 struct task_struct *moving_task; /* a task moving charges */
384 wait_queue_head_t waitq; /* a waitq for other context */
385} mc = {
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -0700386 .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
Daisuke Nishimura8033b972010-03-10 15:22:16 -0800387 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
388};
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -0800389
Balbir Singh4e416952009-09-23 15:56:39 -0700390/*
391 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
392 * limit reclaim to prevent infinite loops, if they ever occur.
393 */
Kirill A. Shutemova0db00f2012-05-29 15:06:56 -0700394#define MEM_CGROUP_MAX_RECLAIM_LOOPS 100
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700395#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
Balbir Singh4e416952009-09-23 15:56:39 -0700396
KAMEZAWA Hiroyuki217bc312008-02-07 00:14:17 -0800397enum charge_type {
398 MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
Kamezawa Hiroyuki41326c12012-07-31 16:41:40 -0700399 MEM_CGROUP_CHARGE_TYPE_ANON,
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -0800400 MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
KAMEZAWA Hiroyuki8a9478c2009-06-17 16:27:17 -0700401 MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */
KAMEZAWA Hiroyukic05555b2008-10-18 20:28:11 -0700402 NR_CHARGE_TYPE,
403};
404
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800405/* for encoding cft->private value on file */
Glauber Costa86ae53e2012-12-18 14:21:45 -0800406enum res_type {
407 _MEM,
408 _MEMSWAP,
409 _OOM_TYPE,
Glauber Costa510fc4e2012-12-18 14:21:47 -0800410 _KMEM,
Glauber Costa86ae53e2012-12-18 14:21:45 -0800411};
412
Kirill A. Shutemova0db00f2012-05-29 15:06:56 -0700413#define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val))
414#define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff)
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800415#define MEMFILE_ATTR(val) ((val) & 0xffff)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -0700416/* Used for OOM nofiier */
417#define OOM_CONTROL (0)
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800418
Balbir Singh75822b42009-09-23 15:56:38 -0700419/*
Glauber Costa09998212013-02-22 16:34:55 -0800420 * The memcg_create_mutex will be held whenever a new cgroup is created.
421 * As a consequence, any change that needs to protect against new child cgroups
422 * appearing has to hold it as well.
423 */
424static DEFINE_MUTEX(memcg_create_mutex);
425
Wanpeng Lib2145142012-07-31 16:46:01 -0700426struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *s)
427{
Tejun Heoa7c6d552013-08-08 20:11:23 -0400428 return s ? container_of(s, struct mem_cgroup, css) : NULL;
Wanpeng Lib2145142012-07-31 16:46:01 -0700429}
430
Anton Vorontsov70ddf632013-04-29 15:08:31 -0700431/* Some nice accessors for the vmpressure. */
432struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
433{
434 if (!memcg)
435 memcg = root_mem_cgroup;
436 return &memcg->vmpressure;
437}
438
439struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
440{
441 return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
442}
443
Michal Hocko7ffc0ed2012-10-08 16:33:13 -0700444static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
445{
446 return (memcg == root_mem_cgroup);
447}
448
Li Zefan4219b2d2013-09-23 16:56:29 +0800449/*
450 * We restrict the id in the range of [1, 65535], so it can fit into
451 * an unsigned short.
452 */
453#define MEM_CGROUP_ID_MAX USHRT_MAX
454
Li Zefan34c00c32013-09-23 16:56:01 +0800455static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
456{
Tejun Heo15a4c832014-05-04 15:09:14 -0400457 return memcg->css.id;
Li Zefan34c00c32013-09-23 16:56:01 +0800458}
459
Vladimir Davydovadbe4272015-04-15 16:13:00 -0700460/*
461 * A helper function to get mem_cgroup from ID. must be called under
462 * rcu_read_lock(). The caller is responsible for calling
463 * css_tryget_online() if the mem_cgroup is used for charging. (dropping
464 * refcnt from swap can be called against removed memcg.)
465 */
Li Zefan34c00c32013-09-23 16:56:01 +0800466static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
467{
468 struct cgroup_subsys_state *css;
469
Tejun Heo7d699dd2014-05-04 15:09:13 -0400470 css = css_from_id(id, &memory_cgrp_subsys);
Li Zefan34c00c32013-09-23 16:56:01 +0800471 return mem_cgroup_from_css(css);
472}
473
Glauber Costae1aab162011-12-11 21:47:03 +0000474/* Writing them here to avoid exposing memcg's inner layout */
Michal Hocko4bd2c1e2012-10-08 16:33:10 -0700475#if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM)
Glauber Costae1aab162011-12-11 21:47:03 +0000476
Glauber Costae1aab162011-12-11 21:47:03 +0000477void sock_update_memcg(struct sock *sk)
478{
Glauber Costa376be5f2012-01-20 04:57:14 +0000479 if (mem_cgroup_sockets_enabled) {
Glauber Costae1aab162011-12-11 21:47:03 +0000480 struct mem_cgroup *memcg;
Glauber Costa3f134612012-05-29 15:07:11 -0700481 struct cg_proto *cg_proto;
Glauber Costae1aab162011-12-11 21:47:03 +0000482
483 BUG_ON(!sk->sk_prot->proto_cgroup);
484
Glauber Costaf3f511e2012-01-05 20:16:39 +0000485 /* Socket cloning can throw us here with sk_cgrp already
486 * filled. It won't however, necessarily happen from
487 * process context. So the test for root memcg given
488 * the current task's memcg won't help us in this case.
489 *
490 * Respecting the original socket's memcg is a better
491 * decision in this case.
492 */
493 if (sk->sk_cgrp) {
494 BUG_ON(mem_cgroup_is_root(sk->sk_cgrp->memcg));
Li Zefan5347e5a2013-07-08 16:00:30 -0700495 css_get(&sk->sk_cgrp->memcg->css);
Glauber Costaf3f511e2012-01-05 20:16:39 +0000496 return;
497 }
498
Glauber Costae1aab162011-12-11 21:47:03 +0000499 rcu_read_lock();
500 memcg = mem_cgroup_from_task(current);
Glauber Costa3f134612012-05-29 15:07:11 -0700501 cg_proto = sk->sk_prot->proto_cgroup(memcg);
Li Zefan5347e5a2013-07-08 16:00:30 -0700502 if (!mem_cgroup_is_root(memcg) &&
Tejun Heoec903c02014-05-13 12:11:01 -0400503 memcg_proto_active(cg_proto) &&
504 css_tryget_online(&memcg->css)) {
Glauber Costa3f134612012-05-29 15:07:11 -0700505 sk->sk_cgrp = cg_proto;
Glauber Costae1aab162011-12-11 21:47:03 +0000506 }
507 rcu_read_unlock();
508 }
509}
510EXPORT_SYMBOL(sock_update_memcg);
511
512void sock_release_memcg(struct sock *sk)
513{
Glauber Costa376be5f2012-01-20 04:57:14 +0000514 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
Glauber Costae1aab162011-12-11 21:47:03 +0000515 struct mem_cgroup *memcg;
516 WARN_ON(!sk->sk_cgrp->memcg);
517 memcg = sk->sk_cgrp->memcg;
Li Zefan5347e5a2013-07-08 16:00:30 -0700518 css_put(&sk->sk_cgrp->memcg->css);
Glauber Costae1aab162011-12-11 21:47:03 +0000519 }
520}
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000521
522struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg)
523{
524 if (!memcg || mem_cgroup_is_root(memcg))
525 return NULL;
526
Eric W. Biederman2e685ca2013-10-19 16:26:19 -0700527 return &memcg->tcp_mem;
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000528}
529EXPORT_SYMBOL(tcp_proto_cgroup);
Glauber Costae1aab162011-12-11 21:47:03 +0000530
Glauber Costa3f134612012-05-29 15:07:11 -0700531#endif
532
Glauber Costaa8964b92012-12-18 14:22:09 -0800533#ifdef CONFIG_MEMCG_KMEM
Glauber Costa55007d82012-12-18 14:22:38 -0800534/*
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800535 * This will be the memcg's index in each cache's ->memcg_params.memcg_caches.
Li Zefanb8627832013-09-23 16:56:47 +0800536 * The main reason for not using cgroup id for this:
537 * this works better in sparse environments, where we have a lot of memcgs,
538 * but only a few kmem-limited. Or also, if we have, for instance, 200
539 * memcgs, and none but the 200th is kmem-limited, we'd have to have a
540 * 200 entry array for that.
Glauber Costa55007d82012-12-18 14:22:38 -0800541 *
Vladimir Davydovdbcf73e2015-02-12 14:58:57 -0800542 * The current size of the caches array is stored in memcg_nr_cache_ids. It
543 * will double each time we have to increase it.
Glauber Costa55007d82012-12-18 14:22:38 -0800544 */
Vladimir Davydovdbcf73e2015-02-12 14:58:57 -0800545static DEFINE_IDA(memcg_cache_ida);
546int memcg_nr_cache_ids;
Glauber Costa749c5412012-12-18 14:23:01 -0800547
Vladimir Davydov05257a12015-02-12 14:59:01 -0800548/* Protects memcg_nr_cache_ids */
549static DECLARE_RWSEM(memcg_cache_ids_sem);
550
551void memcg_get_cache_ids(void)
552{
553 down_read(&memcg_cache_ids_sem);
554}
555
556void memcg_put_cache_ids(void)
557{
558 up_read(&memcg_cache_ids_sem);
559}
560
Glauber Costa55007d82012-12-18 14:22:38 -0800561/*
562 * MIN_SIZE is different than 1, because we would like to avoid going through
563 * the alloc/free process all the time. In a small machine, 4 kmem-limited
564 * cgroups is a reasonable guess. In the future, it could be a parameter or
565 * tunable, but that is strictly not necessary.
566 *
Li Zefanb8627832013-09-23 16:56:47 +0800567 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get
Glauber Costa55007d82012-12-18 14:22:38 -0800568 * this constant directly from cgroup, but it is understandable that this is
569 * better kept as an internal representation in cgroup.c. In any case, the
Li Zefanb8627832013-09-23 16:56:47 +0800570 * cgrp_id space is not getting any smaller, and we don't have to necessarily
Glauber Costa55007d82012-12-18 14:22:38 -0800571 * increase ours as well if it increases.
572 */
573#define MEMCG_CACHES_MIN_SIZE 4
Li Zefanb8627832013-09-23 16:56:47 +0800574#define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
Glauber Costa55007d82012-12-18 14:22:38 -0800575
Glauber Costad7f25f82012-12-18 14:22:40 -0800576/*
577 * A lot of the calls to the cache allocation functions are expected to be
578 * inlined by the compiler. Since the calls to memcg_kmem_get_cache are
579 * conditional to this static branch, we'll have to allow modules that does
580 * kmem_cache_alloc and the such to see this symbol as well
581 */
Glauber Costaa8964b92012-12-18 14:22:09 -0800582struct static_key memcg_kmem_enabled_key;
Glauber Costad7f25f82012-12-18 14:22:40 -0800583EXPORT_SYMBOL(memcg_kmem_enabled_key);
Glauber Costaa8964b92012-12-18 14:22:09 -0800584
Glauber Costaa8964b92012-12-18 14:22:09 -0800585#endif /* CONFIG_MEMCG_KMEM */
586
Balbir Singhf64c3f52009-09-23 15:56:37 -0700587static struct mem_cgroup_per_zone *
Jianyu Zhane2318752014-06-06 14:38:20 -0700588mem_cgroup_zone_zoneinfo(struct mem_cgroup *memcg, struct zone *zone)
Balbir Singhf64c3f52009-09-23 15:56:37 -0700589{
Jianyu Zhane2318752014-06-06 14:38:20 -0700590 int nid = zone_to_nid(zone);
591 int zid = zone_idx(zone);
592
Johannes Weiner54f72fe2013-07-08 15:59:49 -0700593 return &memcg->nodeinfo[nid]->zoneinfo[zid];
Balbir Singhf64c3f52009-09-23 15:56:37 -0700594}
595
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700596struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg)
Wu Fengguangd3242362009-12-16 12:19:59 +0100597{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700598 return &memcg->css;
Wu Fengguangd3242362009-12-16 12:19:59 +0100599}
600
Balbir Singhf64c3f52009-09-23 15:56:37 -0700601static struct mem_cgroup_per_zone *
Jianyu Zhane2318752014-06-06 14:38:20 -0700602mem_cgroup_page_zoneinfo(struct mem_cgroup *memcg, struct page *page)
Balbir Singhf64c3f52009-09-23 15:56:37 -0700603{
Johannes Weiner97a6c372011-03-23 16:42:27 -0700604 int nid = page_to_nid(page);
605 int zid = page_zonenum(page);
Balbir Singhf64c3f52009-09-23 15:56:37 -0700606
Jianyu Zhane2318752014-06-06 14:38:20 -0700607 return &memcg->nodeinfo[nid]->zoneinfo[zid];
Balbir Singhf64c3f52009-09-23 15:56:37 -0700608}
609
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700610static struct mem_cgroup_tree_per_zone *
611soft_limit_tree_node_zone(int nid, int zid)
612{
613 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
614}
615
616static struct mem_cgroup_tree_per_zone *
617soft_limit_tree_from_page(struct page *page)
618{
619 int nid = page_to_nid(page);
620 int zid = page_zonenum(page);
621
622 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
623}
624
Johannes Weinercf2c8122014-06-06 14:38:21 -0700625static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_zone *mz,
626 struct mem_cgroup_tree_per_zone *mctz,
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800627 unsigned long new_usage_in_excess)
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700628{
629 struct rb_node **p = &mctz->rb_root.rb_node;
630 struct rb_node *parent = NULL;
631 struct mem_cgroup_per_zone *mz_node;
632
633 if (mz->on_tree)
634 return;
635
636 mz->usage_in_excess = new_usage_in_excess;
637 if (!mz->usage_in_excess)
638 return;
639 while (*p) {
640 parent = *p;
641 mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
642 tree_node);
643 if (mz->usage_in_excess < mz_node->usage_in_excess)
644 p = &(*p)->rb_left;
645 /*
646 * We can't avoid mem cgroups that are over their soft
647 * limit by the same amount
648 */
649 else if (mz->usage_in_excess >= mz_node->usage_in_excess)
650 p = &(*p)->rb_right;
651 }
652 rb_link_node(&mz->tree_node, parent, p);
653 rb_insert_color(&mz->tree_node, &mctz->rb_root);
654 mz->on_tree = true;
655}
656
Johannes Weinercf2c8122014-06-06 14:38:21 -0700657static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_zone *mz,
658 struct mem_cgroup_tree_per_zone *mctz)
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700659{
660 if (!mz->on_tree)
661 return;
662 rb_erase(&mz->tree_node, &mctz->rb_root);
663 mz->on_tree = false;
664}
665
Johannes Weinercf2c8122014-06-06 14:38:21 -0700666static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_zone *mz,
667 struct mem_cgroup_tree_per_zone *mctz)
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700668{
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700669 unsigned long flags;
670
671 spin_lock_irqsave(&mctz->lock, flags);
Johannes Weinercf2c8122014-06-06 14:38:21 -0700672 __mem_cgroup_remove_exceeded(mz, mctz);
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700673 spin_unlock_irqrestore(&mctz->lock, flags);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700674}
675
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800676static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
677{
678 unsigned long nr_pages = page_counter_read(&memcg->memory);
Jason Low4db0c3c2015-04-15 16:14:08 -0700679 unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800680 unsigned long excess = 0;
681
682 if (nr_pages > soft_limit)
683 excess = nr_pages - soft_limit;
684
685 return excess;
686}
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700687
688static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
689{
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800690 unsigned long excess;
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700691 struct mem_cgroup_per_zone *mz;
692 struct mem_cgroup_tree_per_zone *mctz;
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700693
Jianyu Zhane2318752014-06-06 14:38:20 -0700694 mctz = soft_limit_tree_from_page(page);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700695 /*
696 * Necessary to update all ancestors when hierarchy is used.
697 * because their event counter is not touched.
698 */
699 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
Jianyu Zhane2318752014-06-06 14:38:20 -0700700 mz = mem_cgroup_page_zoneinfo(memcg, page);
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800701 excess = soft_limit_excess(memcg);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700702 /*
703 * We have to update the tree if mz is on RB-tree or
704 * mem is over its softlimit.
705 */
706 if (excess || mz->on_tree) {
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700707 unsigned long flags;
708
709 spin_lock_irqsave(&mctz->lock, flags);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700710 /* if on-tree, remove it */
711 if (mz->on_tree)
Johannes Weinercf2c8122014-06-06 14:38:21 -0700712 __mem_cgroup_remove_exceeded(mz, mctz);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700713 /*
714 * Insert again. mz->usage_in_excess will be updated.
715 * If excess is 0, no tree ops.
716 */
Johannes Weinercf2c8122014-06-06 14:38:21 -0700717 __mem_cgroup_insert_exceeded(mz, mctz, excess);
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700718 spin_unlock_irqrestore(&mctz->lock, flags);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700719 }
720 }
721}
722
723static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
724{
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700725 struct mem_cgroup_tree_per_zone *mctz;
Jianyu Zhane2318752014-06-06 14:38:20 -0700726 struct mem_cgroup_per_zone *mz;
727 int nid, zid;
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700728
Jianyu Zhane2318752014-06-06 14:38:20 -0700729 for_each_node(nid) {
730 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
731 mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
732 mctz = soft_limit_tree_node_zone(nid, zid);
Johannes Weinercf2c8122014-06-06 14:38:21 -0700733 mem_cgroup_remove_exceeded(mz, mctz);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700734 }
735 }
736}
737
738static struct mem_cgroup_per_zone *
739__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
740{
741 struct rb_node *rightmost = NULL;
742 struct mem_cgroup_per_zone *mz;
743
744retry:
745 mz = NULL;
746 rightmost = rb_last(&mctz->rb_root);
747 if (!rightmost)
748 goto done; /* Nothing to reclaim from */
749
750 mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
751 /*
752 * Remove the node now but someone else can add it back,
753 * we will to add it back at the end of reclaim to its correct
754 * position in the tree.
755 */
Johannes Weinercf2c8122014-06-06 14:38:21 -0700756 __mem_cgroup_remove_exceeded(mz, mctz);
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800757 if (!soft_limit_excess(mz->memcg) ||
Tejun Heoec903c02014-05-13 12:11:01 -0400758 !css_tryget_online(&mz->memcg->css))
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700759 goto retry;
760done:
761 return mz;
762}
763
764static struct mem_cgroup_per_zone *
765mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
766{
767 struct mem_cgroup_per_zone *mz;
768
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700769 spin_lock_irq(&mctz->lock);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700770 mz = __mem_cgroup_largest_soft_limit_node(mctz);
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700771 spin_unlock_irq(&mctz->lock);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700772 return mz;
773}
774
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -0700775/*
776 * Implementation Note: reading percpu statistics for memcg.
777 *
778 * Both of vmstat[] and percpu_counter has threshold and do periodic
779 * synchronization to implement "quick" read. There are trade-off between
780 * reading cost and precision of value. Then, we may have a chance to implement
781 * a periodic synchronizion of counter in memcg's counter.
782 *
783 * But this _read() function is used for user interface now. The user accounts
784 * memory usage by memory cgroup and he _always_ requires exact value because
785 * he accounts memory. Even if we provide quick-and-fuzzy read, we always
786 * have to visit all online cpus and make sum. So, for now, unnecessary
787 * synchronization is not implemented. (just implemented for cpu hotplug)
788 *
789 * If there are kernel internal actions which can make use of some not-exact
790 * value, and reading all cpu value can be performance bottleneck in some
791 * common workload, threashold and synchonization as vmstat[] should be
792 * implemented.
793 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700794static long mem_cgroup_read_stat(struct mem_cgroup *memcg,
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700795 enum mem_cgroup_stat_index idx)
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -0800796{
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700797 long val = 0;
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -0800798 int cpu;
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -0800799
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -0700800 get_online_cpus();
801 for_each_online_cpu(cpu)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700802 val += per_cpu(memcg->stat->count[idx], cpu);
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -0700803#ifdef CONFIG_HOTPLUG_CPU
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700804 spin_lock(&memcg->pcp_counter_lock);
805 val += memcg->nocpu_base.count[idx];
806 spin_unlock(&memcg->pcp_counter_lock);
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -0700807#endif
808 put_online_cpus();
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -0800809 return val;
810}
811
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700812static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
Johannes Weinere9f89742011-03-23 16:42:37 -0700813 enum mem_cgroup_events_index idx)
814{
815 unsigned long val = 0;
816 int cpu;
817
David Rientjes9c567512013-10-16 13:46:43 -0700818 get_online_cpus();
Johannes Weinere9f89742011-03-23 16:42:37 -0700819 for_each_online_cpu(cpu)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700820 val += per_cpu(memcg->stat->events[idx], cpu);
Johannes Weinere9f89742011-03-23 16:42:37 -0700821#ifdef CONFIG_HOTPLUG_CPU
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700822 spin_lock(&memcg->pcp_counter_lock);
823 val += memcg->nocpu_base.events[idx];
824 spin_unlock(&memcg->pcp_counter_lock);
Johannes Weinere9f89742011-03-23 16:42:37 -0700825#endif
David Rientjes9c567512013-10-16 13:46:43 -0700826 put_online_cpus();
Johannes Weinere9f89742011-03-23 16:42:37 -0700827 return val;
828}
829
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700830static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
David Rientjesb070e652013-05-07 16:18:09 -0700831 struct page *page,
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700832 int nr_pages)
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800833{
KAMEZAWA Hiroyukib24028572012-03-21 16:34:22 -0700834 /*
835 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is
836 * counted as CACHE even if it's on ANON LRU.
837 */
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700838 if (PageAnon(page))
KAMEZAWA Hiroyukib24028572012-03-21 16:34:22 -0700839 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS],
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700840 nr_pages);
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800841 else
KAMEZAWA Hiroyukib24028572012-03-21 16:34:22 -0700842 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700843 nr_pages);
Balaji Rao55e462b2008-05-01 04:35:12 -0700844
David Rientjesb070e652013-05-07 16:18:09 -0700845 if (PageTransHuge(page))
846 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
847 nr_pages);
848
KAMEZAWA Hiroyukie401f172011-01-20 14:44:23 -0800849 /* pagein of a big page is an event. So, ignore page size */
850 if (nr_pages > 0)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700851 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
KAMEZAWA Hiroyuki3751d602011-02-01 15:52:45 -0800852 else {
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700853 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
KAMEZAWA Hiroyuki3751d602011-02-01 15:52:45 -0800854 nr_pages = -nr_pages; /* for event */
855 }
KAMEZAWA Hiroyukie401f172011-01-20 14:44:23 -0800856
Johannes Weiner13114712012-05-29 15:07:07 -0700857 __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800858}
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800859
Jianyu Zhane2318752014-06-06 14:38:20 -0700860unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
Konstantin Khlebnikov074291f2012-05-29 15:07:00 -0700861{
862 struct mem_cgroup_per_zone *mz;
863
864 mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
865 return mz->lru_size[lru];
866}
867
Jianyu Zhane2318752014-06-06 14:38:20 -0700868static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
869 int nid,
870 unsigned int lru_mask)
Ying Han889976d2011-05-26 16:25:33 -0700871{
Jianyu Zhane2318752014-06-06 14:38:20 -0700872 unsigned long nr = 0;
Ying Han889976d2011-05-26 16:25:33 -0700873 int zid;
874
Jianyu Zhane2318752014-06-06 14:38:20 -0700875 VM_BUG_ON((unsigned)nid >= nr_node_ids);
KAMEZAWA Hiroyukibb2a0de2011-07-26 16:08:22 -0700876
Jianyu Zhane2318752014-06-06 14:38:20 -0700877 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
878 struct mem_cgroup_per_zone *mz;
879 enum lru_list lru;
880
881 for_each_lru(lru) {
882 if (!(BIT(lru) & lru_mask))
883 continue;
884 mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
885 nr += mz->lru_size[lru];
886 }
887 }
888 return nr;
Ying Han889976d2011-05-26 16:25:33 -0700889}
KAMEZAWA Hiroyukibb2a0de2011-07-26 16:08:22 -0700890
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700891static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
KAMEZAWA Hiroyukibb2a0de2011-07-26 16:08:22 -0700892 unsigned int lru_mask)
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800893{
Jianyu Zhane2318752014-06-06 14:38:20 -0700894 unsigned long nr = 0;
Ying Han889976d2011-05-26 16:25:33 -0700895 int nid;
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800896
Lai Jiangshan31aaea42012-12-12 13:51:27 -0800897 for_each_node_state(nid, N_MEMORY)
Jianyu Zhane2318752014-06-06 14:38:20 -0700898 nr += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask);
899 return nr;
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800900}
901
Johannes Weinerf53d7ce2012-01-12 17:18:23 -0800902static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
903 enum mem_cgroup_events_target target)
KAMEZAWA Hiroyukid2265e62010-03-10 15:22:31 -0800904{
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700905 unsigned long val, next;
906
Johannes Weiner13114712012-05-29 15:07:07 -0700907 val = __this_cpu_read(memcg->stat->nr_page_events);
Steven Rostedt47994012011-11-02 13:38:33 -0700908 next = __this_cpu_read(memcg->stat->targets[target]);
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700909 /* from time_after() in jiffies.h */
Johannes Weinerf53d7ce2012-01-12 17:18:23 -0800910 if ((long)next - (long)val < 0) {
911 switch (target) {
912 case MEM_CGROUP_TARGET_THRESH:
913 next = val + THRESHOLDS_EVENTS_TARGET;
914 break;
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700915 case MEM_CGROUP_TARGET_SOFTLIMIT:
916 next = val + SOFTLIMIT_EVENTS_TARGET;
917 break;
Johannes Weinerf53d7ce2012-01-12 17:18:23 -0800918 case MEM_CGROUP_TARGET_NUMAINFO:
919 next = val + NUMAINFO_EVENTS_TARGET;
920 break;
921 default:
922 break;
923 }
924 __this_cpu_write(memcg->stat->targets[target], next);
925 return true;
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700926 }
Johannes Weinerf53d7ce2012-01-12 17:18:23 -0800927 return false;
KAMEZAWA Hiroyukid2265e62010-03-10 15:22:31 -0800928}
929
930/*
931 * Check events in order.
932 *
933 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700934static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
KAMEZAWA Hiroyukid2265e62010-03-10 15:22:31 -0800935{
936 /* threshold event is triggered in finer grain than soft limit */
Johannes Weinerf53d7ce2012-01-12 17:18:23 -0800937 if (unlikely(mem_cgroup_event_ratelimit(memcg,
938 MEM_CGROUP_TARGET_THRESH))) {
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700939 bool do_softlimit;
Andrew Morton82b3f2a2012-02-03 15:37:14 -0800940 bool do_numainfo __maybe_unused;
Johannes Weinerf53d7ce2012-01-12 17:18:23 -0800941
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700942 do_softlimit = mem_cgroup_event_ratelimit(memcg,
943 MEM_CGROUP_TARGET_SOFTLIMIT);
KAMEZAWA Hiroyuki453a9bf2011-07-08 15:39:43 -0700944#if MAX_NUMNODES > 1
Johannes Weinerf53d7ce2012-01-12 17:18:23 -0800945 do_numainfo = mem_cgroup_event_ratelimit(memcg,
946 MEM_CGROUP_TARGET_NUMAINFO);
KAMEZAWA Hiroyuki453a9bf2011-07-08 15:39:43 -0700947#endif
Johannes Weinerf53d7ce2012-01-12 17:18:23 -0800948 mem_cgroup_threshold(memcg);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700949 if (unlikely(do_softlimit))
950 mem_cgroup_update_tree(memcg, page);
Johannes Weinerf53d7ce2012-01-12 17:18:23 -0800951#if MAX_NUMNODES > 1
952 if (unlikely(do_numainfo))
953 atomic_inc(&memcg->numainfo_events);
954#endif
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700955 }
KAMEZAWA Hiroyukid2265e62010-03-10 15:22:31 -0800956}
957
Balbir Singhcf475ad2008-04-29 01:00:16 -0700958struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
Pavel Emelianov78fb7462008-02-07 00:13:51 -0800959{
Balbir Singh31a78f22008-09-28 23:09:31 +0100960 /*
961 * mm_update_next_owner() may clear mm->owner to NULL
962 * if it races with swapoff, page migration, etc.
963 * So this can be called with p == NULL.
964 */
965 if (unlikely(!p))
966 return NULL;
967
Tejun Heo073219e2014-02-08 10:36:58 -0500968 return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
Pavel Emelianov78fb7462008-02-07 00:13:51 -0800969}
970
Johannes Weinerdf381972014-04-07 15:37:43 -0700971static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -0800972{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700973 struct mem_cgroup *memcg = NULL;
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -0700974
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -0800975 rcu_read_lock();
976 do {
Michal Hocko6f6acb02014-05-22 11:54:19 -0700977 /*
978 * Page cache insertions can happen withou an
979 * actual mm context, e.g. during disk probing
980 * on boot, loopback IO, acct() writes etc.
981 */
982 if (unlikely(!mm))
Johannes Weinerdf381972014-04-07 15:37:43 -0700983 memcg = root_mem_cgroup;
Michal Hocko6f6acb02014-05-22 11:54:19 -0700984 else {
985 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
986 if (unlikely(!memcg))
987 memcg = root_mem_cgroup;
988 }
Tejun Heoec903c02014-05-13 12:11:01 -0400989 } while (!css_tryget_online(&memcg->css));
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -0800990 rcu_read_unlock();
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700991 return memcg;
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -0800992}
993
Johannes Weiner56600482012-01-12 17:17:59 -0800994/**
995 * mem_cgroup_iter - iterate over memory cgroup hierarchy
996 * @root: hierarchy root
997 * @prev: previously returned memcg, NULL on first invocation
998 * @reclaim: cookie for shared reclaim walks, NULL for full walks
999 *
1000 * Returns references to children of the hierarchy below @root, or
1001 * @root itself, or %NULL after a full round-trip.
1002 *
1003 * Caller must pass the return value in @prev on subsequent
1004 * invocations for reference counting, or use mem_cgroup_iter_break()
1005 * to cancel a hierarchy walk before the round-trip is complete.
1006 *
1007 * Reclaimers can specify a zone and a priority level in @reclaim to
1008 * divide up the memcgs in the hierarchy among all concurrent
1009 * reclaimers operating on the same zone and priority.
1010 */
Andrew Morton694fbc02013-09-24 15:27:37 -07001011struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
Johannes Weiner56600482012-01-12 17:17:59 -08001012 struct mem_cgroup *prev,
Andrew Morton694fbc02013-09-24 15:27:37 -07001013 struct mem_cgroup_reclaim_cookie *reclaim)
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07001014{
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001015 struct reclaim_iter *uninitialized_var(iter);
1016 struct cgroup_subsys_state *css = NULL;
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001017 struct mem_cgroup *memcg = NULL;
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001018 struct mem_cgroup *pos = NULL;
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001019
Andrew Morton694fbc02013-09-24 15:27:37 -07001020 if (mem_cgroup_disabled())
1021 return NULL;
Johannes Weiner56600482012-01-12 17:17:59 -08001022
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -07001023 if (!root)
1024 root = root_mem_cgroup;
1025
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001026 if (prev && !reclaim)
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001027 pos = prev;
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001028
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001029 if (!root->use_hierarchy && root != root_mem_cgroup) {
1030 if (prev)
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001031 goto out;
Andrew Morton694fbc02013-09-24 15:27:37 -07001032 return root;
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001033 }
1034
Michal Hocko542f85f2013-04-29 15:07:15 -07001035 rcu_read_lock();
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001036
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001037 if (reclaim) {
1038 struct mem_cgroup_per_zone *mz;
Johannes Weiner527a5ec2012-01-12 17:17:55 -08001039
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001040 mz = mem_cgroup_zone_zoneinfo(root, reclaim->zone);
1041 iter = &mz->iter[reclaim->priority];
Michal Hocko5f578162013-04-29 15:07:17 -07001042
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001043 if (prev && reclaim->generation != iter->generation)
Michal Hocko542f85f2013-04-29 15:07:15 -07001044 goto out_unlock;
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001045
1046 do {
Jason Low4db0c3c2015-04-15 16:14:08 -07001047 pos = READ_ONCE(iter->position);
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001048 /*
1049 * A racing update may change the position and
1050 * put the last reference, hence css_tryget(),
1051 * or retry to see the updated position.
1052 */
1053 } while (pos && !css_tryget(&pos->css));
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001054 }
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001055
1056 if (pos)
1057 css = &pos->css;
1058
1059 for (;;) {
1060 css = css_next_descendant_pre(css, &root->css);
1061 if (!css) {
1062 /*
1063 * Reclaimers share the hierarchy walk, and a
1064 * new one might jump in right at the end of
1065 * the hierarchy - make sure they see at least
1066 * one group and restart from the beginning.
1067 */
1068 if (!prev)
1069 continue;
1070 break;
1071 }
1072
1073 /*
1074 * Verify the css and acquire a reference. The root
1075 * is provided by the caller, so we know it's alive
1076 * and kicking, and don't take an extra reference.
1077 */
1078 memcg = mem_cgroup_from_css(css);
1079
1080 if (css == &root->css)
1081 break;
1082
Johannes Weinerb2052562014-12-10 15:42:48 -08001083 if (css_tryget(css)) {
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001084 /*
1085 * Make sure the memcg is initialized:
1086 * mem_cgroup_css_online() orders the the
1087 * initialization against setting the flag.
1088 */
1089 if (smp_load_acquire(&memcg->initialized))
1090 break;
1091
1092 css_put(css);
1093 }
1094
1095 memcg = NULL;
1096 }
1097
1098 if (reclaim) {
1099 if (cmpxchg(&iter->position, pos, memcg) == pos) {
1100 if (memcg)
1101 css_get(&memcg->css);
1102 if (pos)
1103 css_put(&pos->css);
1104 }
1105
1106 /*
1107 * pairs with css_tryget when dereferencing iter->position
1108 * above.
1109 */
1110 if (pos)
1111 css_put(&pos->css);
1112
1113 if (!memcg)
1114 iter->generation++;
1115 else if (!prev)
1116 reclaim->generation = iter->generation;
1117 }
1118
Michal Hocko542f85f2013-04-29 15:07:15 -07001119out_unlock:
1120 rcu_read_unlock();
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001121out:
Michal Hockoc40046f2013-04-29 15:07:14 -07001122 if (prev && prev != root)
1123 css_put(&prev->css);
1124
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001125 return memcg;
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001126}
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001127
Johannes Weiner56600482012-01-12 17:17:59 -08001128/**
1129 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1130 * @root: hierarchy root
1131 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1132 */
1133void mem_cgroup_iter_break(struct mem_cgroup *root,
1134 struct mem_cgroup *prev)
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001135{
1136 if (!root)
1137 root = root_mem_cgroup;
1138 if (prev && prev != root)
1139 css_put(&prev->css);
1140}
1141
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001142/*
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001143 * Iteration constructs for visiting all cgroups (under a tree). If
1144 * loops are exited prematurely (break), mem_cgroup_iter_break() must
1145 * be used for reference counting.
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001146 */
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001147#define for_each_mem_cgroup_tree(iter, root) \
Johannes Weiner527a5ec2012-01-12 17:17:55 -08001148 for (iter = mem_cgroup_iter(root, NULL, NULL); \
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001149 iter != NULL; \
Johannes Weiner527a5ec2012-01-12 17:17:55 -08001150 iter = mem_cgroup_iter(root, iter, NULL))
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001151
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001152#define for_each_mem_cgroup(iter) \
Johannes Weiner527a5ec2012-01-12 17:17:55 -08001153 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001154 iter != NULL; \
Johannes Weiner527a5ec2012-01-12 17:17:55 -08001155 iter = mem_cgroup_iter(NULL, iter, NULL))
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001156
David Rientjes68ae5642012-12-12 13:51:57 -08001157void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
Ying Han456f9982011-05-26 16:25:38 -07001158{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001159 struct mem_cgroup *memcg;
Ying Han456f9982011-05-26 16:25:38 -07001160
Ying Han456f9982011-05-26 16:25:38 -07001161 rcu_read_lock();
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001162 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1163 if (unlikely(!memcg))
Ying Han456f9982011-05-26 16:25:38 -07001164 goto out;
1165
1166 switch (idx) {
Ying Han456f9982011-05-26 16:25:38 -07001167 case PGFAULT:
Johannes Weiner0e574a92012-01-12 17:18:35 -08001168 this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT]);
1169 break;
1170 case PGMAJFAULT:
1171 this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT]);
Ying Han456f9982011-05-26 16:25:38 -07001172 break;
1173 default:
1174 BUG();
1175 }
1176out:
1177 rcu_read_unlock();
1178}
David Rientjes68ae5642012-12-12 13:51:57 -08001179EXPORT_SYMBOL(__mem_cgroup_count_vm_event);
Ying Han456f9982011-05-26 16:25:38 -07001180
Johannes Weiner925b7672012-01-12 17:18:15 -08001181/**
1182 * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg
1183 * @zone: zone of the wanted lruvec
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001184 * @memcg: memcg of the wanted lruvec
Johannes Weiner925b7672012-01-12 17:18:15 -08001185 *
1186 * Returns the lru list vector holding pages for the given @zone and
1187 * @mem. This can be the global zone lruvec, if the memory controller
1188 * is disabled.
1189 */
1190struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
1191 struct mem_cgroup *memcg)
1192{
1193 struct mem_cgroup_per_zone *mz;
Hugh Dickinsbea8c152012-11-16 14:14:54 -08001194 struct lruvec *lruvec;
Johannes Weiner925b7672012-01-12 17:18:15 -08001195
Hugh Dickinsbea8c152012-11-16 14:14:54 -08001196 if (mem_cgroup_disabled()) {
1197 lruvec = &zone->lruvec;
1198 goto out;
1199 }
Johannes Weiner925b7672012-01-12 17:18:15 -08001200
Jianyu Zhane2318752014-06-06 14:38:20 -07001201 mz = mem_cgroup_zone_zoneinfo(memcg, zone);
Hugh Dickinsbea8c152012-11-16 14:14:54 -08001202 lruvec = &mz->lruvec;
1203out:
1204 /*
1205 * Since a node can be onlined after the mem_cgroup was created,
1206 * we have to be prepared to initialize lruvec->zone here;
1207 * and if offlined then reonlined, we need to reinitialize it.
1208 */
1209 if (unlikely(lruvec->zone != zone))
1210 lruvec->zone = zone;
1211 return lruvec;
Johannes Weiner925b7672012-01-12 17:18:15 -08001212}
1213
Johannes Weiner925b7672012-01-12 17:18:15 -08001214/**
Johannes Weinerdfe0e772014-12-10 15:43:43 -08001215 * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page
Johannes Weiner925b7672012-01-12 17:18:15 -08001216 * @page: the page
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001217 * @zone: zone of the page
Johannes Weinerdfe0e772014-12-10 15:43:43 -08001218 *
1219 * This function is only safe when following the LRU page isolation
1220 * and putback protocol: the LRU lock must be held, and the page must
1221 * either be PageLRU() or the caller must have isolated/allocated it.
Minchan Kim3f58a822011-03-22 16:32:53 -07001222 */
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001223struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)
Minchan Kim3f58a822011-03-22 16:32:53 -07001224{
1225 struct mem_cgroup_per_zone *mz;
Johannes Weiner925b7672012-01-12 17:18:15 -08001226 struct mem_cgroup *memcg;
Hugh Dickinsbea8c152012-11-16 14:14:54 -08001227 struct lruvec *lruvec;
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08001228
Hugh Dickinsbea8c152012-11-16 14:14:54 -08001229 if (mem_cgroup_disabled()) {
1230 lruvec = &zone->lruvec;
1231 goto out;
1232 }
Christoph Lameterb69408e2008-10-18 20:26:14 -07001233
Johannes Weiner1306a852014-12-10 15:44:52 -08001234 memcg = page->mem_cgroup;
Hugh Dickins75121022012-03-05 14:59:18 -08001235 /*
Johannes Weinerdfe0e772014-12-10 15:43:43 -08001236 * Swapcache readahead pages are added to the LRU - and
Johannes Weiner29833312014-12-10 15:44:02 -08001237 * possibly migrated - before they are charged.
Hugh Dickins75121022012-03-05 14:59:18 -08001238 */
Johannes Weiner29833312014-12-10 15:44:02 -08001239 if (!memcg)
1240 memcg = root_mem_cgroup;
Hugh Dickins75121022012-03-05 14:59:18 -08001241
Jianyu Zhane2318752014-06-06 14:38:20 -07001242 mz = mem_cgroup_page_zoneinfo(memcg, page);
Hugh Dickinsbea8c152012-11-16 14:14:54 -08001243 lruvec = &mz->lruvec;
1244out:
1245 /*
1246 * Since a node can be onlined after the mem_cgroup was created,
1247 * we have to be prepared to initialize lruvec->zone here;
1248 * and if offlined then reonlined, we need to reinitialize it.
1249 */
1250 if (unlikely(lruvec->zone != zone))
1251 lruvec->zone = zone;
1252 return lruvec;
Johannes Weiner925b7672012-01-12 17:18:15 -08001253}
1254
1255/**
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001256 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1257 * @lruvec: mem_cgroup per zone lru vector
1258 * @lru: index of lru list the page is sitting on
1259 * @nr_pages: positive when adding or negative when removing
Johannes Weiner925b7672012-01-12 17:18:15 -08001260 *
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001261 * This function must be called when a page is added to or removed from an
1262 * lru list.
Johannes Weiner925b7672012-01-12 17:18:15 -08001263 */
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001264void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1265 int nr_pages)
Johannes Weiner925b7672012-01-12 17:18:15 -08001266{
1267 struct mem_cgroup_per_zone *mz;
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001268 unsigned long *lru_size;
Johannes Weiner925b7672012-01-12 17:18:15 -08001269
1270 if (mem_cgroup_disabled())
1271 return;
1272
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001273 mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
1274 lru_size = mz->lru_size + lru;
1275 *lru_size += nr_pages;
1276 VM_BUG_ON((long)(*lru_size) < 0);
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08001277}
KAMEZAWA Hiroyuki544122e2009-01-07 18:08:34 -08001278
Johannes Weiner2314b422014-12-10 15:44:33 -08001279bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, struct mem_cgroup *root)
Michal Hocko3e920412011-07-26 16:08:29 -07001280{
Johannes Weiner2314b422014-12-10 15:44:33 -08001281 if (root == memcg)
Johannes Weiner91c637342012-05-29 15:06:24 -07001282 return true;
Johannes Weiner2314b422014-12-10 15:44:33 -08001283 if (!root->use_hierarchy)
Johannes Weiner91c637342012-05-29 15:06:24 -07001284 return false;
Johannes Weiner2314b422014-12-10 15:44:33 -08001285 return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
Johannes Weinerc3ac9a82012-05-29 15:06:25 -07001286}
1287
Johannes Weiner2314b422014-12-10 15:44:33 -08001288bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg)
Johannes Weinerc3ac9a82012-05-29 15:06:25 -07001289{
Johannes Weiner2314b422014-12-10 15:44:33 -08001290 struct mem_cgroup *task_memcg;
KAMEZAWA Hiroyuki158e0a22010-08-10 18:03:00 -07001291 struct task_struct *p;
David Rientjesffbdccf2013-07-03 15:01:23 -07001292 bool ret;
David Rientjes4c4a2212008-02-07 00:14:06 -08001293
KAMEZAWA Hiroyuki158e0a22010-08-10 18:03:00 -07001294 p = find_lock_task_mm(task);
David Rientjesde077d22012-01-12 17:18:52 -08001295 if (p) {
Johannes Weiner2314b422014-12-10 15:44:33 -08001296 task_memcg = get_mem_cgroup_from_mm(p->mm);
David Rientjesde077d22012-01-12 17:18:52 -08001297 task_unlock(p);
1298 } else {
1299 /*
1300 * All threads may have already detached their mm's, but the oom
1301 * killer still needs to detect if they have already been oom
1302 * killed to prevent needlessly killing additional tasks.
1303 */
David Rientjesffbdccf2013-07-03 15:01:23 -07001304 rcu_read_lock();
Johannes Weiner2314b422014-12-10 15:44:33 -08001305 task_memcg = mem_cgroup_from_task(task);
1306 css_get(&task_memcg->css);
David Rientjesffbdccf2013-07-03 15:01:23 -07001307 rcu_read_unlock();
David Rientjesde077d22012-01-12 17:18:52 -08001308 }
Johannes Weiner2314b422014-12-10 15:44:33 -08001309 ret = mem_cgroup_is_descendant(task_memcg, memcg);
1310 css_put(&task_memcg->css);
David Rientjes4c4a2212008-02-07 00:14:06 -08001311 return ret;
1312}
1313
Konstantin Khlebnikovc56d5c72012-05-29 15:07:00 -07001314int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
KOSAKI Motohiro14797e22009-01-07 18:08:18 -08001315{
KOSAKI Motohiroc772be92009-01-07 18:08:25 -08001316 unsigned long inactive_ratio;
Johannes Weiner9b272972011-11-02 13:38:23 -07001317 unsigned long inactive;
1318 unsigned long active;
1319 unsigned long gb;
KOSAKI Motohiro14797e22009-01-07 18:08:18 -08001320
Hugh Dickins4d7dcca2012-05-29 15:07:08 -07001321 inactive = mem_cgroup_get_lru_size(lruvec, LRU_INACTIVE_ANON);
1322 active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_ANON);
KOSAKI Motohiro14797e22009-01-07 18:08:18 -08001323
KOSAKI Motohiroc772be92009-01-07 18:08:25 -08001324 gb = (inactive + active) >> (30 - PAGE_SHIFT);
1325 if (gb)
1326 inactive_ratio = int_sqrt(10 * gb);
1327 else
1328 inactive_ratio = 1;
1329
Johannes Weiner9b272972011-11-02 13:38:23 -07001330 return inactive * inactive_ratio < active;
KOSAKI Motohiroc772be92009-01-07 18:08:25 -08001331}
1332
Vladimir Davydov90cbc252015-02-11 15:25:55 -08001333bool mem_cgroup_lruvec_online(struct lruvec *lruvec)
1334{
1335 struct mem_cgroup_per_zone *mz;
1336 struct mem_cgroup *memcg;
1337
1338 if (mem_cgroup_disabled())
1339 return true;
1340
1341 mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
1342 memcg = mz->memcg;
1343
1344 return !!(memcg->css.flags & CSS_ONLINE);
1345}
1346
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001347#define mem_cgroup_from_counter(counter, member) \
Balbir Singh6d61ef42009-01-07 18:08:06 -08001348 container_of(counter, struct mem_cgroup, member)
1349
Johannes Weiner19942822011-02-01 15:52:43 -08001350/**
Johannes Weiner9d11ea92011-03-23 16:42:21 -07001351 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
Wanpeng Lidad75572012-06-20 12:53:01 -07001352 * @memcg: the memory cgroup
Johannes Weiner19942822011-02-01 15:52:43 -08001353 *
Johannes Weiner9d11ea92011-03-23 16:42:21 -07001354 * Returns the maximum amount of memory @mem can be charged with, in
Johannes Weiner7ec99d62011-03-23 16:42:36 -07001355 * pages.
Johannes Weiner19942822011-02-01 15:52:43 -08001356 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001357static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
Johannes Weiner19942822011-02-01 15:52:43 -08001358{
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001359 unsigned long margin = 0;
1360 unsigned long count;
1361 unsigned long limit;
Johannes Weiner9d11ea92011-03-23 16:42:21 -07001362
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001363 count = page_counter_read(&memcg->memory);
Jason Low4db0c3c2015-04-15 16:14:08 -07001364 limit = READ_ONCE(memcg->memory.limit);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001365 if (count < limit)
1366 margin = limit - count;
1367
1368 if (do_swap_account) {
1369 count = page_counter_read(&memcg->memsw);
Jason Low4db0c3c2015-04-15 16:14:08 -07001370 limit = READ_ONCE(memcg->memsw.limit);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001371 if (count <= limit)
1372 margin = min(margin, limit - count);
1373 }
1374
1375 return margin;
Johannes Weiner19942822011-02-01 15:52:43 -08001376}
1377
KAMEZAWA Hiroyuki1f4c0252011-07-26 16:08:21 -07001378int mem_cgroup_swappiness(struct mem_cgroup *memcg)
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08001379{
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08001380 /* root ? */
Linus Torvalds14208b02014-06-09 15:03:33 -07001381 if (mem_cgroup_disabled() || !memcg->css.parent)
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08001382 return vm_swappiness;
1383
Johannes Weinerbf1ff262011-03-23 16:42:32 -07001384 return memcg->swappiness;
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08001385}
1386
KAMEZAWA Hiroyuki619d0942012-03-21 16:34:23 -07001387/*
Qiang Huangbdcbb652014-06-04 16:08:21 -07001388 * A routine for checking "mem" is under move_account() or not.
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001389 *
Qiang Huangbdcbb652014-06-04 16:08:21 -07001390 * Checking a cgroup is mc.from or mc.to or under hierarchy of
1391 * moving cgroups. This is for waiting at high-memory pressure
1392 * caused by "move".
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001393 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001394static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001395{
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07001396 struct mem_cgroup *from;
1397 struct mem_cgroup *to;
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001398 bool ret = false;
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07001399 /*
1400 * Unlike task_move routines, we access mc.to, mc.from not under
1401 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1402 */
1403 spin_lock(&mc.lock);
1404 from = mc.from;
1405 to = mc.to;
1406 if (!from)
1407 goto unlock;
Michal Hocko3e920412011-07-26 16:08:29 -07001408
Johannes Weiner2314b422014-12-10 15:44:33 -08001409 ret = mem_cgroup_is_descendant(from, memcg) ||
1410 mem_cgroup_is_descendant(to, memcg);
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07001411unlock:
1412 spin_unlock(&mc.lock);
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001413 return ret;
1414}
1415
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001416static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001417{
1418 if (mc.moving_task && current != mc.moving_task) {
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001419 if (mem_cgroup_under_move(memcg)) {
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001420 DEFINE_WAIT(wait);
1421 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1422 /* moving charge context might have finished. */
1423 if (mc.moving_task)
1424 schedule();
1425 finish_wait(&mc.waitq, &wait);
1426 return true;
1427 }
1428 }
1429 return false;
1430}
1431
Sha Zhengju58cf1882013-02-22 16:32:05 -08001432#define K(x) ((x) << (PAGE_SHIFT-10))
Balbir Singhe2224322009-04-02 16:57:39 -07001433/**
Sha Zhengju58cf1882013-02-22 16:32:05 -08001434 * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller.
Balbir Singhe2224322009-04-02 16:57:39 -07001435 * @memcg: The memory cgroup that went over limit
1436 * @p: Task that is going to be killed
1437 *
1438 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1439 * enabled
1440 */
1441void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1442{
Tejun Heoe61734c2014-02-12 09:29:50 -05001443 /* oom_info_lock ensures that parallel ooms do not interleave */
Michal Hocko08088cb2014-02-25 15:01:44 -08001444 static DEFINE_MUTEX(oom_info_lock);
Sha Zhengju58cf1882013-02-22 16:32:05 -08001445 struct mem_cgroup *iter;
1446 unsigned int i;
Balbir Singhe2224322009-04-02 16:57:39 -07001447
Michal Hocko08088cb2014-02-25 15:01:44 -08001448 mutex_lock(&oom_info_lock);
Balbir Singhe2224322009-04-02 16:57:39 -07001449 rcu_read_lock();
1450
Balasubramani Vivekanandan2415b9f2015-04-14 15:48:18 -07001451 if (p) {
1452 pr_info("Task in ");
1453 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1454 pr_cont(" killed as a result of limit of ");
1455 } else {
1456 pr_info("Memory limit reached of cgroup ");
1457 }
1458
Tejun Heoe61734c2014-02-12 09:29:50 -05001459 pr_cont_cgroup_path(memcg->css.cgroup);
Greg Thelen0346dad2015-01-26 12:58:38 -08001460 pr_cont("\n");
Balbir Singhe2224322009-04-02 16:57:39 -07001461
Balbir Singhe2224322009-04-02 16:57:39 -07001462 rcu_read_unlock();
1463
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001464 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1465 K((u64)page_counter_read(&memcg->memory)),
1466 K((u64)memcg->memory.limit), memcg->memory.failcnt);
1467 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1468 K((u64)page_counter_read(&memcg->memsw)),
1469 K((u64)memcg->memsw.limit), memcg->memsw.failcnt);
1470 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1471 K((u64)page_counter_read(&memcg->kmem)),
1472 K((u64)memcg->kmem.limit), memcg->kmem.failcnt);
Sha Zhengju58cf1882013-02-22 16:32:05 -08001473
1474 for_each_mem_cgroup_tree(iter, memcg) {
Tejun Heoe61734c2014-02-12 09:29:50 -05001475 pr_info("Memory cgroup stats for ");
1476 pr_cont_cgroup_path(iter->css.cgroup);
Sha Zhengju58cf1882013-02-22 16:32:05 -08001477 pr_cont(":");
1478
1479 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
1480 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
1481 continue;
1482 pr_cont(" %s:%ldKB", mem_cgroup_stat_names[i],
1483 K(mem_cgroup_read_stat(iter, i)));
1484 }
1485
1486 for (i = 0; i < NR_LRU_LISTS; i++)
1487 pr_cont(" %s:%luKB", mem_cgroup_lru_names[i],
1488 K(mem_cgroup_nr_lru_pages(iter, BIT(i))));
1489
1490 pr_cont("\n");
1491 }
Michal Hocko08088cb2014-02-25 15:01:44 -08001492 mutex_unlock(&oom_info_lock);
Balbir Singhe2224322009-04-02 16:57:39 -07001493}
1494
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07001495/*
1496 * This function returns the number of memcg under hierarchy tree. Returns
1497 * 1(self count) if no children.
1498 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001499static int mem_cgroup_count_children(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07001500{
1501 int num = 0;
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001502 struct mem_cgroup *iter;
1503
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001504 for_each_mem_cgroup_tree(iter, memcg)
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001505 num++;
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07001506 return num;
1507}
1508
Balbir Singh6d61ef42009-01-07 18:08:06 -08001509/*
David Rientjesa63d83f2010-08-09 17:19:46 -07001510 * Return the memory (and swap, if configured) limit for a memcg.
1511 */
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001512static unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg)
David Rientjesa63d83f2010-08-09 17:19:46 -07001513{
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001514 unsigned long limit;
David Rientjesa63d83f2010-08-09 17:19:46 -07001515
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001516 limit = memcg->memory.limit;
Michal Hocko9a5a8f12012-11-16 14:14:49 -08001517 if (mem_cgroup_swappiness(memcg)) {
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001518 unsigned long memsw_limit;
Michal Hocko9a5a8f12012-11-16 14:14:49 -08001519
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001520 memsw_limit = memcg->memsw.limit;
1521 limit = min(limit + total_swap_pages, memsw_limit);
Michal Hocko9a5a8f12012-11-16 14:14:49 -08001522 }
Michal Hocko9a5a8f12012-11-16 14:14:49 -08001523 return limit;
David Rientjesa63d83f2010-08-09 17:19:46 -07001524}
1525
David Rientjes19965462012-12-11 16:00:26 -08001526static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1527 int order)
David Rientjes9cbb78b2012-07-31 16:43:44 -07001528{
1529 struct mem_cgroup *iter;
1530 unsigned long chosen_points = 0;
1531 unsigned long totalpages;
1532 unsigned int points = 0;
1533 struct task_struct *chosen = NULL;
1534
David Rientjes876aafb2012-07-31 16:43:48 -07001535 /*
David Rientjes465adcf2013-04-29 15:08:45 -07001536 * If current has a pending SIGKILL or is exiting, then automatically
1537 * select it. The goal is to allow it to allocate so that it may
1538 * quickly exit and free its memory.
David Rientjes876aafb2012-07-31 16:43:48 -07001539 */
Oleg Nesterovd003f372014-12-12 16:56:24 -08001540 if (fatal_signal_pending(current) || task_will_free_mem(current)) {
Michal Hocko49550b62015-02-11 15:26:12 -08001541 mark_tsk_oom_victim(current);
David Rientjes876aafb2012-07-31 16:43:48 -07001542 return;
1543 }
1544
Balasubramani Vivekanandan2415b9f2015-04-14 15:48:18 -07001545 check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL, memcg);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001546 totalpages = mem_cgroup_get_limit(memcg) ? : 1;
David Rientjes9cbb78b2012-07-31 16:43:44 -07001547 for_each_mem_cgroup_tree(iter, memcg) {
Tejun Heo72ec7022013-08-08 20:11:26 -04001548 struct css_task_iter it;
David Rientjes9cbb78b2012-07-31 16:43:44 -07001549 struct task_struct *task;
1550
Tejun Heo72ec7022013-08-08 20:11:26 -04001551 css_task_iter_start(&iter->css, &it);
1552 while ((task = css_task_iter_next(&it))) {
David Rientjes9cbb78b2012-07-31 16:43:44 -07001553 switch (oom_scan_process_thread(task, totalpages, NULL,
1554 false)) {
1555 case OOM_SCAN_SELECT:
1556 if (chosen)
1557 put_task_struct(chosen);
1558 chosen = task;
1559 chosen_points = ULONG_MAX;
1560 get_task_struct(chosen);
1561 /* fall through */
1562 case OOM_SCAN_CONTINUE:
1563 continue;
1564 case OOM_SCAN_ABORT:
Tejun Heo72ec7022013-08-08 20:11:26 -04001565 css_task_iter_end(&it);
David Rientjes9cbb78b2012-07-31 16:43:44 -07001566 mem_cgroup_iter_break(memcg, iter);
1567 if (chosen)
1568 put_task_struct(chosen);
1569 return;
1570 case OOM_SCAN_OK:
1571 break;
1572 };
1573 points = oom_badness(task, memcg, NULL, totalpages);
David Rientjesd49ad932014-01-23 15:53:34 -08001574 if (!points || points < chosen_points)
1575 continue;
1576 /* Prefer thread group leaders for display purposes */
1577 if (points == chosen_points &&
1578 thread_group_leader(chosen))
1579 continue;
1580
1581 if (chosen)
1582 put_task_struct(chosen);
1583 chosen = task;
1584 chosen_points = points;
1585 get_task_struct(chosen);
David Rientjes9cbb78b2012-07-31 16:43:44 -07001586 }
Tejun Heo72ec7022013-08-08 20:11:26 -04001587 css_task_iter_end(&it);
David Rientjes9cbb78b2012-07-31 16:43:44 -07001588 }
1589
1590 if (!chosen)
1591 return;
1592 points = chosen_points * 1000 / totalpages;
David Rientjes9cbb78b2012-07-31 16:43:44 -07001593 oom_kill_process(chosen, gfp_mask, order, points, totalpages, memcg,
1594 NULL, "Memory cgroup out of memory");
David Rientjes9cbb78b2012-07-31 16:43:44 -07001595}
1596
Michele Curtiae6e71d2014-12-12 16:56:35 -08001597#if MAX_NUMNODES > 1
1598
KAMEZAWA Hiroyuki4d0c0662011-07-08 15:39:42 -07001599/**
1600 * test_mem_cgroup_node_reclaimable
Wanpeng Lidad75572012-06-20 12:53:01 -07001601 * @memcg: the target memcg
KAMEZAWA Hiroyuki4d0c0662011-07-08 15:39:42 -07001602 * @nid: the node ID to be checked.
1603 * @noswap : specify true here if the user wants flle only information.
1604 *
1605 * This function returns whether the specified memcg contains any
1606 * reclaimable pages on a node. Returns true if there are any reclaimable
1607 * pages in the node.
1608 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001609static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
KAMEZAWA Hiroyuki4d0c0662011-07-08 15:39:42 -07001610 int nid, bool noswap)
1611{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001612 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE))
KAMEZAWA Hiroyuki4d0c0662011-07-08 15:39:42 -07001613 return true;
1614 if (noswap || !total_swap_pages)
1615 return false;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001616 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON))
KAMEZAWA Hiroyuki4d0c0662011-07-08 15:39:42 -07001617 return true;
1618 return false;
1619
1620}
Ying Han889976d2011-05-26 16:25:33 -07001621
1622/*
1623 * Always updating the nodemask is not very good - even if we have an empty
1624 * list or the wrong list here, we can start from some node and traverse all
1625 * nodes based on the zonelist. So update the list loosely once per 10 secs.
1626 *
1627 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001628static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg)
Ying Han889976d2011-05-26 16:25:33 -07001629{
1630 int nid;
KAMEZAWA Hiroyuki453a9bf2011-07-08 15:39:43 -07001631 /*
1632 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
1633 * pagein/pageout changes since the last update.
1634 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001635 if (!atomic_read(&memcg->numainfo_events))
KAMEZAWA Hiroyuki453a9bf2011-07-08 15:39:43 -07001636 return;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001637 if (atomic_inc_return(&memcg->numainfo_updating) > 1)
Ying Han889976d2011-05-26 16:25:33 -07001638 return;
1639
Ying Han889976d2011-05-26 16:25:33 -07001640 /* make a nodemask where this memcg uses memory from */
Lai Jiangshan31aaea42012-12-12 13:51:27 -08001641 memcg->scan_nodes = node_states[N_MEMORY];
Ying Han889976d2011-05-26 16:25:33 -07001642
Lai Jiangshan31aaea42012-12-12 13:51:27 -08001643 for_each_node_mask(nid, node_states[N_MEMORY]) {
Ying Han889976d2011-05-26 16:25:33 -07001644
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001645 if (!test_mem_cgroup_node_reclaimable(memcg, nid, false))
1646 node_clear(nid, memcg->scan_nodes);
Ying Han889976d2011-05-26 16:25:33 -07001647 }
KAMEZAWA Hiroyuki453a9bf2011-07-08 15:39:43 -07001648
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001649 atomic_set(&memcg->numainfo_events, 0);
1650 atomic_set(&memcg->numainfo_updating, 0);
Ying Han889976d2011-05-26 16:25:33 -07001651}
1652
1653/*
1654 * Selecting a node where we start reclaim from. Because what we need is just
1655 * reducing usage counter, start from anywhere is O,K. Considering
1656 * memory reclaim from current node, there are pros. and cons.
1657 *
1658 * Freeing memory from current node means freeing memory from a node which
1659 * we'll use or we've used. So, it may make LRU bad. And if several threads
1660 * hit limits, it will see a contention on a node. But freeing from remote
1661 * node means more costs for memory reclaim because of memory latency.
1662 *
1663 * Now, we use round-robin. Better algorithm is welcomed.
1664 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001665int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
Ying Han889976d2011-05-26 16:25:33 -07001666{
1667 int node;
1668
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001669 mem_cgroup_may_update_nodemask(memcg);
1670 node = memcg->last_scanned_node;
Ying Han889976d2011-05-26 16:25:33 -07001671
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001672 node = next_node(node, memcg->scan_nodes);
Ying Han889976d2011-05-26 16:25:33 -07001673 if (node == MAX_NUMNODES)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001674 node = first_node(memcg->scan_nodes);
Ying Han889976d2011-05-26 16:25:33 -07001675 /*
1676 * We call this when we hit limit, not when pages are added to LRU.
1677 * No LRU may hold pages because all pages are UNEVICTABLE or
1678 * memcg is too small and all pages are not on LRU. In that case,
1679 * we use curret node.
1680 */
1681 if (unlikely(node == MAX_NUMNODES))
1682 node = numa_node_id();
1683
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001684 memcg->last_scanned_node = node;
Ying Han889976d2011-05-26 16:25:33 -07001685 return node;
1686}
Ying Han889976d2011-05-26 16:25:33 -07001687#else
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001688int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
Ying Han889976d2011-05-26 16:25:33 -07001689{
1690 return 0;
1691}
1692#endif
1693
Andrew Morton0608f432013-09-24 15:27:41 -07001694static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1695 struct zone *zone,
1696 gfp_t gfp_mask,
1697 unsigned long *total_scanned)
Balbir Singh6d61ef42009-01-07 18:08:06 -08001698{
Andrew Morton0608f432013-09-24 15:27:41 -07001699 struct mem_cgroup *victim = NULL;
1700 int total = 0;
1701 int loop = 0;
1702 unsigned long excess;
1703 unsigned long nr_scanned;
1704 struct mem_cgroup_reclaim_cookie reclaim = {
1705 .zone = zone,
1706 .priority = 0,
1707 };
Johannes Weiner9d11ea92011-03-23 16:42:21 -07001708
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001709 excess = soft_limit_excess(root_memcg);
Balbir Singh6d61ef42009-01-07 18:08:06 -08001710
Andrew Morton0608f432013-09-24 15:27:41 -07001711 while (1) {
1712 victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1713 if (!victim) {
1714 loop++;
1715 if (loop >= 2) {
1716 /*
1717 * If we have not been able to reclaim
1718 * anything, it might because there are
1719 * no reclaimable pages under this hierarchy
1720 */
1721 if (!total)
1722 break;
1723 /*
1724 * We want to do more targeted reclaim.
1725 * excess >> 2 is not to excessive so as to
1726 * reclaim too much, nor too less that we keep
1727 * coming back to reclaim from this cgroup
1728 */
1729 if (total >= (excess >> 2) ||
1730 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1731 break;
1732 }
1733 continue;
1734 }
Andrew Morton0608f432013-09-24 15:27:41 -07001735 total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false,
1736 zone, &nr_scanned);
1737 *total_scanned += nr_scanned;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001738 if (!soft_limit_excess(root_memcg))
Andrew Morton0608f432013-09-24 15:27:41 -07001739 break;
Balbir Singh6d61ef42009-01-07 18:08:06 -08001740 }
Andrew Morton0608f432013-09-24 15:27:41 -07001741 mem_cgroup_iter_break(root_memcg, victim);
1742 return total;
Balbir Singh6d61ef42009-01-07 18:08:06 -08001743}
1744
Johannes Weiner0056f4e2013-10-31 16:34:14 -07001745#ifdef CONFIG_LOCKDEP
1746static struct lockdep_map memcg_oom_lock_dep_map = {
1747 .name = "memcg_oom_lock",
1748};
1749#endif
1750
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001751static DEFINE_SPINLOCK(memcg_oom_lock);
1752
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001753/*
1754 * Check OOM-Killer is already running under our hierarchy.
1755 * If someone is running, return false.
1756 */
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001757static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001758{
Michal Hocko79dfdac2011-07-26 16:08:23 -07001759 struct mem_cgroup *iter, *failed = NULL;
KAMEZAWA Hiroyukia636b322009-01-07 18:08:08 -08001760
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001761 spin_lock(&memcg_oom_lock);
1762
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001763 for_each_mem_cgroup_tree(iter, memcg) {
Johannes Weiner23751be2011-08-25 15:59:16 -07001764 if (iter->oom_lock) {
Michal Hocko79dfdac2011-07-26 16:08:23 -07001765 /*
1766 * this subtree of our hierarchy is already locked
1767 * so we cannot give a lock.
1768 */
Michal Hocko79dfdac2011-07-26 16:08:23 -07001769 failed = iter;
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001770 mem_cgroup_iter_break(memcg, iter);
1771 break;
Johannes Weiner23751be2011-08-25 15:59:16 -07001772 } else
1773 iter->oom_lock = true;
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001774 }
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001775
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001776 if (failed) {
1777 /*
1778 * OK, we failed to lock the whole subtree so we have
1779 * to clean up what we set up to the failing subtree
1780 */
1781 for_each_mem_cgroup_tree(iter, memcg) {
1782 if (iter == failed) {
1783 mem_cgroup_iter_break(memcg, iter);
1784 break;
1785 }
1786 iter->oom_lock = false;
Michal Hocko79dfdac2011-07-26 16:08:23 -07001787 }
Johannes Weiner0056f4e2013-10-31 16:34:14 -07001788 } else
1789 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001790
1791 spin_unlock(&memcg_oom_lock);
1792
1793 return !failed;
KAMEZAWA Hiroyukia636b322009-01-07 18:08:08 -08001794}
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -07001795
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001796static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -07001797{
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001798 struct mem_cgroup *iter;
1799
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001800 spin_lock(&memcg_oom_lock);
Johannes Weiner0056f4e2013-10-31 16:34:14 -07001801 mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001802 for_each_mem_cgroup_tree(iter, memcg)
Michal Hocko79dfdac2011-07-26 16:08:23 -07001803 iter->oom_lock = false;
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001804 spin_unlock(&memcg_oom_lock);
Michal Hocko79dfdac2011-07-26 16:08:23 -07001805}
1806
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001807static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
Michal Hocko79dfdac2011-07-26 16:08:23 -07001808{
1809 struct mem_cgroup *iter;
1810
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001811 for_each_mem_cgroup_tree(iter, memcg)
Michal Hocko79dfdac2011-07-26 16:08:23 -07001812 atomic_inc(&iter->under_oom);
1813}
1814
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001815static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
Michal Hocko79dfdac2011-07-26 16:08:23 -07001816{
1817 struct mem_cgroup *iter;
1818
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001819 /*
1820 * When a new child is created while the hierarchy is under oom,
1821 * mem_cgroup_oom_lock() may not be called. We have to use
1822 * atomic_add_unless() here.
1823 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001824 for_each_mem_cgroup_tree(iter, memcg)
Michal Hocko79dfdac2011-07-26 16:08:23 -07001825 atomic_add_unless(&iter->under_oom, -1, 0);
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -07001826}
1827
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001828static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1829
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001830struct oom_wait_info {
Hugh Dickinsd79154b2012-03-21 16:34:18 -07001831 struct mem_cgroup *memcg;
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001832 wait_queue_t wait;
1833};
1834
1835static int memcg_oom_wake_function(wait_queue_t *wait,
1836 unsigned mode, int sync, void *arg)
1837{
Hugh Dickinsd79154b2012-03-21 16:34:18 -07001838 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1839 struct mem_cgroup *oom_wait_memcg;
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001840 struct oom_wait_info *oom_wait_info;
1841
1842 oom_wait_info = container_of(wait, struct oom_wait_info, wait);
Hugh Dickinsd79154b2012-03-21 16:34:18 -07001843 oom_wait_memcg = oom_wait_info->memcg;
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001844
Johannes Weiner2314b422014-12-10 15:44:33 -08001845 if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1846 !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001847 return 0;
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001848 return autoremove_wake_function(wait, mode, sync, arg);
1849}
1850
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001851static void memcg_wakeup_oom(struct mem_cgroup *memcg)
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001852{
Johannes Weiner3812c8c2013-09-12 15:13:44 -07001853 atomic_inc(&memcg->oom_wakeups);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001854 /* for filtering, pass "memcg" as argument. */
1855 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001856}
1857
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001858static void memcg_oom_recover(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07001859{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001860 if (memcg && atomic_read(&memcg->under_oom))
1861 memcg_wakeup_oom(memcg);
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07001862}
1863
Johannes Weiner3812c8c2013-09-12 15:13:44 -07001864static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001865{
Johannes Weiner3812c8c2013-09-12 15:13:44 -07001866 if (!current->memcg_oom.may_oom)
1867 return;
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001868 /*
Johannes Weiner49426422013-10-16 13:46:59 -07001869 * We are in the middle of the charge context here, so we
1870 * don't want to block when potentially sitting on a callstack
1871 * that holds all kinds of filesystem and mm locks.
1872 *
1873 * Also, the caller may handle a failed allocation gracefully
1874 * (like optional page cache readahead) and so an OOM killer
1875 * invocation might not even be necessary.
1876 *
1877 * That's why we don't do anything here except remember the
1878 * OOM context and then deal with it at the end of the page
1879 * fault when the stack is unwound, the locks are released,
1880 * and when we know whether the fault was overall successful.
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001881 */
Johannes Weiner49426422013-10-16 13:46:59 -07001882 css_get(&memcg->css);
1883 current->memcg_oom.memcg = memcg;
1884 current->memcg_oom.gfp_mask = mask;
1885 current->memcg_oom.order = order;
1886}
1887
1888/**
1889 * mem_cgroup_oom_synchronize - complete memcg OOM handling
1890 * @handle: actually kill/wait or just clean up the OOM state
1891 *
1892 * This has to be called at the end of a page fault if the memcg OOM
1893 * handler was enabled.
1894 *
1895 * Memcg supports userspace OOM handling where failed allocations must
1896 * sleep on a waitqueue until the userspace task resolves the
1897 * situation. Sleeping directly in the charge context with all kinds
1898 * of locks held is not a good idea, instead we remember an OOM state
1899 * in the task and mem_cgroup_oom_synchronize() has to be called at
1900 * the end of the page fault to complete the OOM handling.
1901 *
1902 * Returns %true if an ongoing memcg OOM situation was detected and
1903 * completed, %false otherwise.
1904 */
1905bool mem_cgroup_oom_synchronize(bool handle)
1906{
1907 struct mem_cgroup *memcg = current->memcg_oom.memcg;
1908 struct oom_wait_info owait;
1909 bool locked;
1910
1911 /* OOM is global, do not handle */
1912 if (!memcg)
1913 return false;
1914
Michal Hockoc32b3cb2015-02-11 15:26:24 -08001915 if (!handle || oom_killer_disabled)
Johannes Weiner49426422013-10-16 13:46:59 -07001916 goto cleanup;
1917
1918 owait.memcg = memcg;
1919 owait.wait.flags = 0;
1920 owait.wait.func = memcg_oom_wake_function;
1921 owait.wait.private = current;
1922 INIT_LIST_HEAD(&owait.wait.task_list);
1923
1924 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001925 mem_cgroup_mark_under_oom(memcg);
1926
1927 locked = mem_cgroup_oom_trylock(memcg);
1928
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07001929 if (locked)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001930 mem_cgroup_oom_notify(memcg);
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001931
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001932 if (locked && !memcg->oom_kill_disable) {
1933 mem_cgroup_unmark_under_oom(memcg);
Johannes Weiner49426422013-10-16 13:46:59 -07001934 finish_wait(&memcg_oom_waitq, &owait.wait);
1935 mem_cgroup_out_of_memory(memcg, current->memcg_oom.gfp_mask,
1936 current->memcg_oom.order);
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07001937 } else {
Johannes Weiner3812c8c2013-09-12 15:13:44 -07001938 schedule();
Johannes Weiner49426422013-10-16 13:46:59 -07001939 mem_cgroup_unmark_under_oom(memcg);
1940 finish_wait(&memcg_oom_waitq, &owait.wait);
1941 }
1942
1943 if (locked) {
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001944 mem_cgroup_oom_unlock(memcg);
1945 /*
1946 * There is no guarantee that an OOM-lock contender
1947 * sees the wakeups triggered by the OOM kill
1948 * uncharges. Wake any sleepers explicitely.
1949 */
1950 memcg_oom_recover(memcg);
1951 }
Johannes Weiner49426422013-10-16 13:46:59 -07001952cleanup:
1953 current->memcg_oom.memcg = NULL;
Johannes Weiner3812c8c2013-09-12 15:13:44 -07001954 css_put(&memcg->css);
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001955 return true;
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -07001956}
1957
Johannes Weinerd7365e72014-10-29 14:50:48 -07001958/**
1959 * mem_cgroup_begin_page_stat - begin a page state statistics transaction
1960 * @page: page that is going to change accounted state
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001961 *
Johannes Weinerd7365e72014-10-29 14:50:48 -07001962 * This function must mark the beginning of an accounted page state
1963 * change to prevent double accounting when the page is concurrently
1964 * being moved to another memcg:
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001965 *
Johannes Weiner6de22612015-02-11 15:25:01 -08001966 * memcg = mem_cgroup_begin_page_stat(page);
Johannes Weinerd7365e72014-10-29 14:50:48 -07001967 * if (TestClearPageState(page))
1968 * mem_cgroup_update_page_stat(memcg, state, -1);
Johannes Weiner6de22612015-02-11 15:25:01 -08001969 * mem_cgroup_end_page_stat(memcg);
Balbir Singhd69b0422009-06-17 16:26:34 -07001970 */
Johannes Weiner6de22612015-02-11 15:25:01 -08001971struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page)
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07001972{
1973 struct mem_cgroup *memcg;
Johannes Weiner6de22612015-02-11 15:25:01 -08001974 unsigned long flags;
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07001975
Johannes Weiner6de22612015-02-11 15:25:01 -08001976 /*
1977 * The RCU lock is held throughout the transaction. The fast
1978 * path can get away without acquiring the memcg->move_lock
1979 * because page moving starts with an RCU grace period.
1980 *
1981 * The RCU lock also protects the memcg from being freed when
1982 * the page state that is going to change is the only thing
1983 * preventing the page from being uncharged.
1984 * E.g. end-writeback clearing PageWriteback(), which allows
1985 * migration to go ahead and uncharge the page before the
1986 * account transaction might be complete.
1987 */
Johannes Weinerd7365e72014-10-29 14:50:48 -07001988 rcu_read_lock();
1989
1990 if (mem_cgroup_disabled())
1991 return NULL;
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07001992again:
Johannes Weiner1306a852014-12-10 15:44:52 -08001993 memcg = page->mem_cgroup;
Johannes Weiner29833312014-12-10 15:44:02 -08001994 if (unlikely(!memcg))
Johannes Weinerd7365e72014-10-29 14:50:48 -07001995 return NULL;
1996
Qiang Huangbdcbb652014-06-04 16:08:21 -07001997 if (atomic_read(&memcg->moving_account) <= 0)
Johannes Weinerd7365e72014-10-29 14:50:48 -07001998 return memcg;
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07001999
Johannes Weiner6de22612015-02-11 15:25:01 -08002000 spin_lock_irqsave(&memcg->move_lock, flags);
Johannes Weiner1306a852014-12-10 15:44:52 -08002001 if (memcg != page->mem_cgroup) {
Johannes Weiner6de22612015-02-11 15:25:01 -08002002 spin_unlock_irqrestore(&memcg->move_lock, flags);
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07002003 goto again;
2004 }
Johannes Weiner6de22612015-02-11 15:25:01 -08002005
2006 /*
2007 * When charge migration first begins, we can have locked and
2008 * unlocked page stat updates happening concurrently. Track
2009 * the task who has the lock for mem_cgroup_end_page_stat().
2010 */
2011 memcg->move_lock_task = current;
2012 memcg->move_lock_flags = flags;
Johannes Weinerd7365e72014-10-29 14:50:48 -07002013
2014 return memcg;
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07002015}
Greg Thelen7c9d3ff2015-05-22 17:13:16 -04002016EXPORT_SYMBOL(mem_cgroup_begin_page_stat);
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07002017
Johannes Weinerd7365e72014-10-29 14:50:48 -07002018/**
2019 * mem_cgroup_end_page_stat - finish a page state statistics transaction
2020 * @memcg: the memcg that was accounted against
Johannes Weinerd7365e72014-10-29 14:50:48 -07002021 */
Johannes Weiner6de22612015-02-11 15:25:01 -08002022void mem_cgroup_end_page_stat(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07002023{
Johannes Weiner6de22612015-02-11 15:25:01 -08002024 if (memcg && memcg->move_lock_task == current) {
2025 unsigned long flags = memcg->move_lock_flags;
2026
2027 memcg->move_lock_task = NULL;
2028 memcg->move_lock_flags = 0;
2029
2030 spin_unlock_irqrestore(&memcg->move_lock, flags);
2031 }
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07002032
Johannes Weinerd7365e72014-10-29 14:50:48 -07002033 rcu_read_unlock();
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07002034}
Greg Thelen7c9d3ff2015-05-22 17:13:16 -04002035EXPORT_SYMBOL(mem_cgroup_end_page_stat);
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07002036
Johannes Weinerd7365e72014-10-29 14:50:48 -07002037/**
2038 * mem_cgroup_update_page_stat - update page state statistics
2039 * @memcg: memcg to account against
2040 * @idx: page state item to account
2041 * @val: number of pages (positive or negative)
2042 *
2043 * See mem_cgroup_begin_page_stat() for locking requirements.
2044 */
2045void mem_cgroup_update_page_stat(struct mem_cgroup *memcg,
Sha Zhengju68b48762013-09-12 15:13:50 -07002046 enum mem_cgroup_stat_index idx, int val)
Balbir Singhd69b0422009-06-17 16:26:34 -07002047{
Sha Zhengju658b72c2013-09-12 15:13:52 -07002048 VM_BUG_ON(!rcu_read_lock_held());
KAMEZAWA Hiroyuki26174ef2010-10-27 15:33:43 -07002049
Johannes Weinerd7365e72014-10-29 14:50:48 -07002050 if (memcg)
2051 this_cpu_add(memcg->stat->count[idx], val);
Balbir Singhd69b0422009-06-17 16:26:34 -07002052}
KAMEZAWA Hiroyuki26174ef2010-10-27 15:33:43 -07002053
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08002054/*
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002055 * size of first charge trial. "32" comes from vmscan.c's magic value.
2056 * TODO: maybe necessary to use big numbers in big irons.
2057 */
Johannes Weiner7ec99d62011-03-23 16:42:36 -07002058#define CHARGE_BATCH 32U
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002059struct memcg_stock_pcp {
2060 struct mem_cgroup *cached; /* this never be root cgroup */
Johannes Weiner11c9ea42011-03-23 16:42:34 -07002061 unsigned int nr_pages;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002062 struct work_struct work;
KAMEZAWA Hiroyuki26fe6162011-06-15 15:08:45 -07002063 unsigned long flags;
Kirill A. Shutemova0db00f2012-05-29 15:06:56 -07002064#define FLUSHING_CACHED_CHARGE 0
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002065};
2066static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
Michal Hocko9f50fad2011-08-09 11:56:26 +02002067static DEFINE_MUTEX(percpu_charge_mutex);
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002068
Suleiman Souhlala0956d52012-12-18 14:21:36 -08002069/**
2070 * consume_stock: Try to consume stocked charge on this cpu.
2071 * @memcg: memcg to consume from.
2072 * @nr_pages: how many pages to charge.
2073 *
2074 * The charges will only happen if @memcg matches the current cpu's memcg
2075 * stock, and at least @nr_pages are available in that stock. Failure to
2076 * service an allocation will refill the stock.
2077 *
2078 * returns true if successful, false otherwise.
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002079 */
Suleiman Souhlala0956d52012-12-18 14:21:36 -08002080static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002081{
2082 struct memcg_stock_pcp *stock;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002083 bool ret = false;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002084
Suleiman Souhlala0956d52012-12-18 14:21:36 -08002085 if (nr_pages > CHARGE_BATCH)
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002086 return ret;
Suleiman Souhlala0956d52012-12-18 14:21:36 -08002087
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002088 stock = &get_cpu_var(memcg_stock);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002089 if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
Suleiman Souhlala0956d52012-12-18 14:21:36 -08002090 stock->nr_pages -= nr_pages;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002091 ret = true;
2092 }
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002093 put_cpu_var(memcg_stock);
2094 return ret;
2095}
2096
2097/*
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002098 * Returns stocks cached in percpu and reset cached information.
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002099 */
2100static void drain_stock(struct memcg_stock_pcp *stock)
2101{
2102 struct mem_cgroup *old = stock->cached;
2103
Johannes Weiner11c9ea42011-03-23 16:42:34 -07002104 if (stock->nr_pages) {
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002105 page_counter_uncharge(&old->memory, stock->nr_pages);
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002106 if (do_swap_account)
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002107 page_counter_uncharge(&old->memsw, stock->nr_pages);
Johannes Weinere8ea14c2014-12-10 15:42:42 -08002108 css_put_many(&old->css, stock->nr_pages);
Johannes Weiner11c9ea42011-03-23 16:42:34 -07002109 stock->nr_pages = 0;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002110 }
2111 stock->cached = NULL;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002112}
2113
2114/*
2115 * This must be called under preempt disabled or must be called by
2116 * a thread which is pinned to local cpu.
2117 */
2118static void drain_local_stock(struct work_struct *dummy)
2119{
Christoph Lameter7c8e0182014-06-04 16:07:56 -07002120 struct memcg_stock_pcp *stock = this_cpu_ptr(&memcg_stock);
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002121 drain_stock(stock);
KAMEZAWA Hiroyuki26fe6162011-06-15 15:08:45 -07002122 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002123}
2124
2125/*
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002126 * Cache charges(val) to local per_cpu area.
Greg Thelen320cc512010-03-15 15:27:28 +01002127 * This will be consumed by consume_stock() function, later.
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002128 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002129static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002130{
2131 struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
2132
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002133 if (stock->cached != memcg) { /* reset if necessary */
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002134 drain_stock(stock);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002135 stock->cached = memcg;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002136 }
Johannes Weiner11c9ea42011-03-23 16:42:34 -07002137 stock->nr_pages += nr_pages;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002138 put_cpu_var(memcg_stock);
2139}
2140
2141/*
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002142 * Drains all per-CPU charge caches for given root_memcg resp. subtree
Johannes Weiner6d3d6aa2014-12-10 15:42:50 -08002143 * of the hierarchy under it.
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002144 */
Johannes Weiner6d3d6aa2014-12-10 15:42:50 -08002145static void drain_all_stock(struct mem_cgroup *root_memcg)
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002146{
KAMEZAWA Hiroyuki26fe6162011-06-15 15:08:45 -07002147 int cpu, curcpu;
Michal Hockod38144b2011-07-26 16:08:28 -07002148
Johannes Weiner6d3d6aa2014-12-10 15:42:50 -08002149 /* If someone's already draining, avoid adding running more workers. */
2150 if (!mutex_trylock(&percpu_charge_mutex))
2151 return;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002152 /* Notify other cpus that system-wide "drain" is running */
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002153 get_online_cpus();
Johannes Weiner5af12d02011-08-25 15:59:07 -07002154 curcpu = get_cpu();
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002155 for_each_online_cpu(cpu) {
2156 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002157 struct mem_cgroup *memcg;
KAMEZAWA Hiroyuki26fe6162011-06-15 15:08:45 -07002158
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002159 memcg = stock->cached;
2160 if (!memcg || !stock->nr_pages)
KAMEZAWA Hiroyuki26fe6162011-06-15 15:08:45 -07002161 continue;
Johannes Weiner2314b422014-12-10 15:44:33 -08002162 if (!mem_cgroup_is_descendant(memcg, root_memcg))
Michal Hocko3e920412011-07-26 16:08:29 -07002163 continue;
Michal Hockod1a05b62011-07-26 16:08:27 -07002164 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2165 if (cpu == curcpu)
2166 drain_local_stock(&stock->work);
2167 else
2168 schedule_work_on(cpu, &stock->work);
2169 }
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002170 }
Johannes Weiner5af12d02011-08-25 15:59:07 -07002171 put_cpu();
Andrew Mortonf894ffa2013-09-12 15:13:35 -07002172 put_online_cpus();
Michal Hocko9f50fad2011-08-09 11:56:26 +02002173 mutex_unlock(&percpu_charge_mutex);
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002174}
2175
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -07002176/*
2177 * This function drains percpu counter value from DEAD cpu and
2178 * move it to local cpu. Note that this function can be preempted.
2179 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002180static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *memcg, int cpu)
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -07002181{
2182 int i;
2183
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002184 spin_lock(&memcg->pcp_counter_lock);
Johannes Weiner61046212012-05-29 15:07:05 -07002185 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002186 long x = per_cpu(memcg->stat->count[i], cpu);
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -07002187
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002188 per_cpu(memcg->stat->count[i], cpu) = 0;
2189 memcg->nocpu_base.count[i] += x;
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -07002190 }
Johannes Weinere9f89742011-03-23 16:42:37 -07002191 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002192 unsigned long x = per_cpu(memcg->stat->events[i], cpu);
Johannes Weinere9f89742011-03-23 16:42:37 -07002193
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002194 per_cpu(memcg->stat->events[i], cpu) = 0;
2195 memcg->nocpu_base.events[i] += x;
Johannes Weinere9f89742011-03-23 16:42:37 -07002196 }
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002197 spin_unlock(&memcg->pcp_counter_lock);
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -07002198}
2199
Paul Gortmaker0db06282013-06-19 14:53:51 -04002200static int memcg_cpu_hotplug_callback(struct notifier_block *nb,
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002201 unsigned long action,
2202 void *hcpu)
2203{
2204 int cpu = (unsigned long)hcpu;
2205 struct memcg_stock_pcp *stock;
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -07002206 struct mem_cgroup *iter;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002207
KAMEZAWA Hiroyuki619d0942012-03-21 16:34:23 -07002208 if (action == CPU_ONLINE)
KAMEZAWA Hiroyuki1489eba2010-10-27 15:33:42 -07002209 return NOTIFY_OK;
KAMEZAWA Hiroyuki1489eba2010-10-27 15:33:42 -07002210
Kirill A. Shutemovd8330492012-04-12 12:49:11 -07002211 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002212 return NOTIFY_OK;
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -07002213
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08002214 for_each_mem_cgroup(iter)
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -07002215 mem_cgroup_drain_pcp_counter(iter, cpu);
2216
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002217 stock = &per_cpu(memcg_stock, cpu);
2218 drain_stock(stock);
2219 return NOTIFY_OK;
2220}
2221
Johannes Weiner00501b52014-08-08 14:19:20 -07002222static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2223 unsigned int nr_pages)
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08002224{
Johannes Weiner7ec99d62011-03-23 16:42:36 -07002225 unsigned int batch = max(CHARGE_BATCH, nr_pages);
Johannes Weiner9b130612014-08-06 16:05:51 -07002226 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
Johannes Weiner6539cc02014-08-06 16:05:42 -07002227 struct mem_cgroup *mem_over_limit;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002228 struct page_counter *counter;
Johannes Weiner6539cc02014-08-06 16:05:42 -07002229 unsigned long nr_reclaimed;
Johannes Weinerb70a2a22014-10-09 15:28:56 -07002230 bool may_swap = true;
2231 bool drained = false;
Johannes Weiner05b84302014-08-06 16:05:59 -07002232 int ret = 0;
KAMEZAWA Hiroyukia636b322009-01-07 18:08:08 -08002233
Johannes Weinerce00a962014-09-05 08:43:57 -04002234 if (mem_cgroup_is_root(memcg))
2235 goto done;
Johannes Weiner6539cc02014-08-06 16:05:42 -07002236retry:
Michal Hockob6b6cc72014-04-07 15:37:44 -07002237 if (consume_stock(memcg, nr_pages))
2238 goto done;
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08002239
Johannes Weiner3fbe7242014-10-09 15:28:54 -07002240 if (!do_swap_account ||
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002241 !page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2242 if (!page_counter_try_charge(&memcg->memory, batch, &counter))
Johannes Weiner6539cc02014-08-06 16:05:42 -07002243 goto done_restock;
Johannes Weiner3fbe7242014-10-09 15:28:54 -07002244 if (do_swap_account)
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002245 page_counter_uncharge(&memcg->memsw, batch);
2246 mem_over_limit = mem_cgroup_from_counter(counter, memory);
Johannes Weiner3fbe7242014-10-09 15:28:54 -07002247 } else {
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002248 mem_over_limit = mem_cgroup_from_counter(counter, memsw);
Johannes Weinerb70a2a22014-10-09 15:28:56 -07002249 may_swap = false;
Johannes Weiner3fbe7242014-10-09 15:28:54 -07002250 }
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002251
Johannes Weiner6539cc02014-08-06 16:05:42 -07002252 if (batch > nr_pages) {
2253 batch = nr_pages;
2254 goto retry;
2255 }
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002256
Johannes Weiner06b078f2014-08-06 16:05:44 -07002257 /*
2258 * Unlike in global OOM situations, memcg is not in a physical
2259 * memory shortage. Allow dying and OOM-killed tasks to
2260 * bypass the last charges so that they can exit quickly and
2261 * free their memory.
2262 */
2263 if (unlikely(test_thread_flag(TIF_MEMDIE) ||
2264 fatal_signal_pending(current) ||
2265 current->flags & PF_EXITING))
2266 goto bypass;
2267
2268 if (unlikely(task_in_memcg_oom(current)))
2269 goto nomem;
2270
Johannes Weiner6539cc02014-08-06 16:05:42 -07002271 if (!(gfp_mask & __GFP_WAIT))
2272 goto nomem;
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07002273
Johannes Weiner241994ed2015-02-11 15:26:06 -08002274 mem_cgroup_events(mem_over_limit, MEMCG_MAX, 1);
2275
Johannes Weinerb70a2a22014-10-09 15:28:56 -07002276 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2277 gfp_mask, may_swap);
Johannes Weiner6539cc02014-08-06 16:05:42 -07002278
Johannes Weiner61e02c72014-08-06 16:08:16 -07002279 if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
Johannes Weiner6539cc02014-08-06 16:05:42 -07002280 goto retry;
Johannes Weiner28c34c22014-08-06 16:05:47 -07002281
Johannes Weinerb70a2a22014-10-09 15:28:56 -07002282 if (!drained) {
Johannes Weiner6d3d6aa2014-12-10 15:42:50 -08002283 drain_all_stock(mem_over_limit);
Johannes Weinerb70a2a22014-10-09 15:28:56 -07002284 drained = true;
2285 goto retry;
2286 }
2287
Johannes Weiner28c34c22014-08-06 16:05:47 -07002288 if (gfp_mask & __GFP_NORETRY)
2289 goto nomem;
Johannes Weiner6539cc02014-08-06 16:05:42 -07002290 /*
2291 * Even though the limit is exceeded at this point, reclaim
2292 * may have been able to free some pages. Retry the charge
2293 * before killing the task.
2294 *
2295 * Only for regular pages, though: huge pages are rather
2296 * unlikely to succeed so close to the limit, and we fall back
2297 * to regular pages anyway in case of failure.
2298 */
Johannes Weiner61e02c72014-08-06 16:08:16 -07002299 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
Johannes Weiner6539cc02014-08-06 16:05:42 -07002300 goto retry;
2301 /*
2302 * At task move, charge accounts can be doubly counted. So, it's
2303 * better to wait until the end of task_move if something is going on.
2304 */
2305 if (mem_cgroup_wait_acct_move(mem_over_limit))
2306 goto retry;
2307
Johannes Weiner9b130612014-08-06 16:05:51 -07002308 if (nr_retries--)
2309 goto retry;
2310
Johannes Weiner06b078f2014-08-06 16:05:44 -07002311 if (gfp_mask & __GFP_NOFAIL)
2312 goto bypass;
2313
Johannes Weiner6539cc02014-08-06 16:05:42 -07002314 if (fatal_signal_pending(current))
2315 goto bypass;
2316
Johannes Weiner241994ed2015-02-11 15:26:06 -08002317 mem_cgroup_events(mem_over_limit, MEMCG_OOM, 1);
2318
Johannes Weiner61e02c72014-08-06 16:08:16 -07002319 mem_cgroup_oom(mem_over_limit, gfp_mask, get_order(nr_pages));
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002320nomem:
Johannes Weiner6d1fdc42014-04-07 15:37:45 -07002321 if (!(gfp_mask & __GFP_NOFAIL))
Johannes Weiner3168ecb2013-10-31 16:34:13 -07002322 return -ENOMEM;
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08002323bypass:
Johannes Weinerce00a962014-09-05 08:43:57 -04002324 return -EINTR;
Johannes Weiner6539cc02014-08-06 16:05:42 -07002325
2326done_restock:
Johannes Weinere8ea14c2014-12-10 15:42:42 -08002327 css_get_many(&memcg->css, batch);
Johannes Weiner6539cc02014-08-06 16:05:42 -07002328 if (batch > nr_pages)
2329 refill_stock(memcg, batch - nr_pages);
Vladimir Davydov7d638092015-06-10 11:14:46 -07002330 if (!(gfp_mask & __GFP_WAIT))
2331 goto done;
Johannes Weiner241994ed2015-02-11 15:26:06 -08002332 /*
2333 * If the hierarchy is above the normal consumption range,
2334 * make the charging task trim their excess contribution.
2335 */
2336 do {
2337 if (page_counter_read(&memcg->memory) <= memcg->high)
2338 continue;
2339 mem_cgroup_events(memcg, MEMCG_HIGH, 1);
2340 try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true);
2341 } while ((memcg = parent_mem_cgroup(memcg)));
Johannes Weiner6539cc02014-08-06 16:05:42 -07002342done:
Johannes Weiner05b84302014-08-06 16:05:59 -07002343 return ret;
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002344}
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08002345
Johannes Weiner00501b52014-08-08 14:19:20 -07002346static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
Daisuke Nishimuraa3032a22009-12-15 16:47:10 -08002347{
Johannes Weinerce00a962014-09-05 08:43:57 -04002348 if (mem_cgroup_is_root(memcg))
2349 return;
2350
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002351 page_counter_uncharge(&memcg->memory, nr_pages);
Johannes Weiner05b84302014-08-06 16:05:59 -07002352 if (do_swap_account)
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002353 page_counter_uncharge(&memcg->memsw, nr_pages);
Daisuke Nishimuraa3032a22009-12-15 16:47:10 -08002354
Johannes Weinere8ea14c2014-12-10 15:42:42 -08002355 css_put_many(&memcg->css, nr_pages);
KAMEZAWA Hiroyukid01dd172012-05-29 15:07:03 -07002356}
2357
2358/*
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002359 * try_get_mem_cgroup_from_page - look up page's memcg association
2360 * @page: the page
2361 *
2362 * Look up, get a css reference, and return the memcg that owns @page.
2363 *
2364 * The page must be locked to prevent racing with swap-in and page
2365 * cache charges. If coming from an unlocked page table, the caller
2366 * must ensure the page is on the LRU or this can race with charging.
2367 */
Wu Fengguange42d9d52009-12-16 12:19:59 +01002368struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
KAMEZAWA Hiroyukib5a84312009-01-07 18:08:35 -08002369{
Johannes Weiner29833312014-12-10 15:44:02 -08002370 struct mem_cgroup *memcg;
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -07002371 unsigned short id;
KAMEZAWA Hiroyukib5a84312009-01-07 18:08:35 -08002372 swp_entry_t ent;
2373
Sasha Levin309381fea2014-01-23 15:52:54 -08002374 VM_BUG_ON_PAGE(!PageLocked(page), page);
Daisuke Nishimura3c776e62009-04-02 16:57:43 -07002375
Johannes Weiner1306a852014-12-10 15:44:52 -08002376 memcg = page->mem_cgroup;
Johannes Weiner29833312014-12-10 15:44:02 -08002377 if (memcg) {
2378 if (!css_tryget_online(&memcg->css))
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002379 memcg = NULL;
Wu Fengguange42d9d52009-12-16 12:19:59 +01002380 } else if (PageSwapCache(page)) {
Daisuke Nishimura3c776e62009-04-02 16:57:43 -07002381 ent.val = page_private(page);
Bob Liu9fb4b7c2012-01-12 17:18:48 -08002382 id = lookup_swap_cgroup_id(ent);
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -07002383 rcu_read_lock();
Vladimir Davydovadbe4272015-04-15 16:13:00 -07002384 memcg = mem_cgroup_from_id(id);
Tejun Heoec903c02014-05-13 12:11:01 -04002385 if (memcg && !css_tryget_online(&memcg->css))
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002386 memcg = NULL;
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -07002387 rcu_read_unlock();
Daisuke Nishimura3c776e62009-04-02 16:57:43 -07002388 }
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002389 return memcg;
KAMEZAWA Hiroyukib5a84312009-01-07 18:08:35 -08002390}
2391
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002392static void lock_page_lru(struct page *page, int *isolated)
2393{
2394 struct zone *zone = page_zone(page);
2395
2396 spin_lock_irq(&zone->lru_lock);
2397 if (PageLRU(page)) {
2398 struct lruvec *lruvec;
2399
2400 lruvec = mem_cgroup_page_lruvec(page, zone);
2401 ClearPageLRU(page);
2402 del_page_from_lru_list(page, lruvec, page_lru(page));
2403 *isolated = 1;
2404 } else
2405 *isolated = 0;
2406}
2407
2408static void unlock_page_lru(struct page *page, int isolated)
2409{
2410 struct zone *zone = page_zone(page);
2411
2412 if (isolated) {
2413 struct lruvec *lruvec;
2414
2415 lruvec = mem_cgroup_page_lruvec(page, zone);
2416 VM_BUG_ON_PAGE(PageLRU(page), page);
2417 SetPageLRU(page);
2418 add_page_to_lru_list(page, lruvec, page_lru(page));
2419 }
2420 spin_unlock_irq(&zone->lru_lock);
2421}
2422
Johannes Weiner00501b52014-08-08 14:19:20 -07002423static void commit_charge(struct page *page, struct mem_cgroup *memcg,
Johannes Weiner6abb5a82014-08-08 14:19:33 -07002424 bool lrucare)
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002425{
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002426 int isolated;
Hugh Dickins9ce70c02012-03-05 14:59:16 -08002427
Johannes Weiner1306a852014-12-10 15:44:52 -08002428 VM_BUG_ON_PAGE(page->mem_cgroup, page);
Hugh Dickins9ce70c02012-03-05 14:59:16 -08002429
2430 /*
2431 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
2432 * may already be on some other mem_cgroup's LRU. Take care of it.
2433 */
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002434 if (lrucare)
2435 lock_page_lru(page, &isolated);
Hugh Dickins9ce70c02012-03-05 14:59:16 -08002436
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002437 /*
2438 * Nobody should be changing or seriously looking at
Johannes Weiner1306a852014-12-10 15:44:52 -08002439 * page->mem_cgroup at this point:
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002440 *
2441 * - the page is uncharged
2442 *
2443 * - the page is off-LRU
2444 *
2445 * - an anonymous fault has exclusive page access, except for
2446 * a locked page table
2447 *
2448 * - a page cache insertion, a swapin fault, or a migration
2449 * have the page locked
2450 */
Johannes Weiner1306a852014-12-10 15:44:52 -08002451 page->mem_cgroup = memcg;
Hugh Dickins3be91272008-02-07 00:14:19 -08002452
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002453 if (lrucare)
2454 unlock_page_lru(page, isolated);
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08002455}
2456
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002457#ifdef CONFIG_MEMCG_KMEM
Vladimir Davydovdbf22eb2015-02-10 14:11:41 -08002458int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp,
2459 unsigned long nr_pages)
Glauber Costa749c5412012-12-18 14:23:01 -08002460{
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002461 struct page_counter *counter;
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002462 int ret = 0;
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002463
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002464 ret = page_counter_try_charge(&memcg->kmem, nr_pages, &counter);
2465 if (ret < 0)
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002466 return ret;
2467
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002468 ret = try_charge(memcg, gfp, nr_pages);
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002469 if (ret == -EINTR) {
2470 /*
Johannes Weiner00501b52014-08-08 14:19:20 -07002471 * try_charge() chose to bypass to root due to OOM kill or
2472 * fatal signal. Since our only options are to either fail
2473 * the allocation or charge it to this cgroup, do it as a
2474 * temporary condition. But we can't fail. From a kmem/slab
2475 * perspective, the cache has already been selected, by
2476 * mem_cgroup_kmem_get_cache(), so it is too late to change
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002477 * our minds.
2478 *
2479 * This condition will only trigger if the task entered
Johannes Weiner00501b52014-08-08 14:19:20 -07002480 * memcg_charge_kmem in a sane state, but was OOM-killed
2481 * during try_charge() above. Tasks that were already dying
2482 * when the allocation triggers should have been already
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002483 * directed to the root cgroup in memcontrol.h
2484 */
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002485 page_counter_charge(&memcg->memory, nr_pages);
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002486 if (do_swap_account)
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002487 page_counter_charge(&memcg->memsw, nr_pages);
Johannes Weinere8ea14c2014-12-10 15:42:42 -08002488 css_get_many(&memcg->css, nr_pages);
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002489 ret = 0;
2490 } else if (ret)
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002491 page_counter_uncharge(&memcg->kmem, nr_pages);
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002492
2493 return ret;
2494}
2495
Vladimir Davydovdbf22eb2015-02-10 14:11:41 -08002496void memcg_uncharge_kmem(struct mem_cgroup *memcg, unsigned long nr_pages)
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002497{
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002498 page_counter_uncharge(&memcg->memory, nr_pages);
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002499 if (do_swap_account)
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002500 page_counter_uncharge(&memcg->memsw, nr_pages);
Glauber Costa7de37682012-12-18 14:22:07 -08002501
Johannes Weiner64f21992014-12-10 15:42:45 -08002502 page_counter_uncharge(&memcg->kmem, nr_pages);
Glauber Costa7de37682012-12-18 14:22:07 -08002503
Johannes Weinere8ea14c2014-12-10 15:42:42 -08002504 css_put_many(&memcg->css, nr_pages);
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002505}
2506
Glauber Costa2633d7a2012-12-18 14:22:34 -08002507/*
2508 * helper for acessing a memcg's index. It will be used as an index in the
2509 * child cache array in kmem_cache, and also to derive its name. This function
2510 * will return -1 when this is not a kmem-limited memcg.
2511 */
2512int memcg_cache_id(struct mem_cgroup *memcg)
2513{
2514 return memcg ? memcg->kmemcg_id : -1;
2515}
2516
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07002517static int memcg_alloc_cache_id(void)
Glauber Costa55007d82012-12-18 14:22:38 -08002518{
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07002519 int id, size;
2520 int err;
Glauber Costa55007d82012-12-18 14:22:38 -08002521
Vladimir Davydovdbcf73e2015-02-12 14:58:57 -08002522 id = ida_simple_get(&memcg_cache_ida,
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07002523 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
2524 if (id < 0)
2525 return id;
2526
Vladimir Davydovdbcf73e2015-02-12 14:58:57 -08002527 if (id < memcg_nr_cache_ids)
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07002528 return id;
2529
2530 /*
2531 * There's no space for the new id in memcg_caches arrays,
2532 * so we have to grow them.
2533 */
Vladimir Davydov05257a12015-02-12 14:59:01 -08002534 down_write(&memcg_cache_ids_sem);
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07002535
2536 size = 2 * (id + 1);
Glauber Costa55007d82012-12-18 14:22:38 -08002537 if (size < MEMCG_CACHES_MIN_SIZE)
2538 size = MEMCG_CACHES_MIN_SIZE;
2539 else if (size > MEMCG_CACHES_MAX_SIZE)
2540 size = MEMCG_CACHES_MAX_SIZE;
2541
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07002542 err = memcg_update_all_caches(size);
Vladimir Davydov05257a12015-02-12 14:59:01 -08002543 if (!err)
Vladimir Davydov60d3fd32015-02-12 14:59:10 -08002544 err = memcg_update_all_list_lrus(size);
2545 if (!err)
Vladimir Davydov05257a12015-02-12 14:59:01 -08002546 memcg_nr_cache_ids = size;
2547
2548 up_write(&memcg_cache_ids_sem);
2549
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07002550 if (err) {
Vladimir Davydovdbcf73e2015-02-12 14:58:57 -08002551 ida_simple_remove(&memcg_cache_ida, id);
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07002552 return err;
2553 }
2554 return id;
2555}
2556
2557static void memcg_free_cache_id(int id)
2558{
Vladimir Davydovdbcf73e2015-02-12 14:58:57 -08002559 ida_simple_remove(&memcg_cache_ida, id);
Glauber Costa55007d82012-12-18 14:22:38 -08002560}
2561
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002562struct memcg_kmem_cache_create_work {
Vladimir Davydov5722d092014-04-07 15:39:24 -07002563 struct mem_cgroup *memcg;
2564 struct kmem_cache *cachep;
2565 struct work_struct work;
2566};
2567
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002568static void memcg_kmem_cache_create_func(struct work_struct *w)
Glauber Costad7f25f82012-12-18 14:22:40 -08002569{
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002570 struct memcg_kmem_cache_create_work *cw =
2571 container_of(w, struct memcg_kmem_cache_create_work, work);
Vladimir Davydov5722d092014-04-07 15:39:24 -07002572 struct mem_cgroup *memcg = cw->memcg;
2573 struct kmem_cache *cachep = cw->cachep;
Glauber Costad7f25f82012-12-18 14:22:40 -08002574
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002575 memcg_create_kmem_cache(memcg, cachep);
Vladimir Davydovbd673142014-06-04 16:07:40 -07002576
Vladimir Davydov5722d092014-04-07 15:39:24 -07002577 css_put(&memcg->css);
Glauber Costad7f25f82012-12-18 14:22:40 -08002578 kfree(cw);
2579}
2580
2581/*
2582 * Enqueue the creation of a per-memcg kmem_cache.
Glauber Costad7f25f82012-12-18 14:22:40 -08002583 */
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002584static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2585 struct kmem_cache *cachep)
Glauber Costad7f25f82012-12-18 14:22:40 -08002586{
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002587 struct memcg_kmem_cache_create_work *cw;
Glauber Costad7f25f82012-12-18 14:22:40 -08002588
Vladimir Davydov776ed0f2014-06-04 16:10:02 -07002589 cw = kmalloc(sizeof(*cw), GFP_NOWAIT);
Vladimir Davydov8135be52014-12-12 16:56:38 -08002590 if (!cw)
Glauber Costad7f25f82012-12-18 14:22:40 -08002591 return;
Vladimir Davydov8135be52014-12-12 16:56:38 -08002592
2593 css_get(&memcg->css);
Glauber Costad7f25f82012-12-18 14:22:40 -08002594
2595 cw->memcg = memcg;
2596 cw->cachep = cachep;
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002597 INIT_WORK(&cw->work, memcg_kmem_cache_create_func);
Glauber Costad7f25f82012-12-18 14:22:40 -08002598
Glauber Costad7f25f82012-12-18 14:22:40 -08002599 schedule_work(&cw->work);
2600}
2601
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002602static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2603 struct kmem_cache *cachep)
Glauber Costa0e9d92f2012-12-18 14:22:42 -08002604{
2605 /*
2606 * We need to stop accounting when we kmalloc, because if the
2607 * corresponding kmalloc cache is not yet created, the first allocation
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002608 * in __memcg_schedule_kmem_cache_create will recurse.
Glauber Costa0e9d92f2012-12-18 14:22:42 -08002609 *
2610 * However, it is better to enclose the whole function. Depending on
2611 * the debugging options enabled, INIT_WORK(), for instance, can
2612 * trigger an allocation. This too, will make us recurse. Because at
2613 * this point we can't allow ourselves back into memcg_kmem_get_cache,
2614 * the safest choice is to do it like this, wrapping the whole function.
2615 */
Vladimir Davydov6f185c22014-12-12 16:55:15 -08002616 current->memcg_kmem_skip_account = 1;
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002617 __memcg_schedule_kmem_cache_create(memcg, cachep);
Vladimir Davydov6f185c22014-12-12 16:55:15 -08002618 current->memcg_kmem_skip_account = 0;
Glauber Costa0e9d92f2012-12-18 14:22:42 -08002619}
Vladimir Davydovc67a8a62014-06-04 16:07:39 -07002620
Glauber Costad7f25f82012-12-18 14:22:40 -08002621/*
2622 * Return the kmem_cache we're supposed to use for a slab allocation.
2623 * We try to use the current memcg's version of the cache.
2624 *
2625 * If the cache does not exist yet, if we are the first user of it,
2626 * we either create it immediately, if possible, or create it asynchronously
2627 * in a workqueue.
2628 * In the latter case, we will let the current allocation go through with
2629 * the original cache.
2630 *
2631 * Can't be called in interrupt context or from kernel threads.
2632 * This function needs to be called with rcu_read_lock() held.
2633 */
Zhang Zhen056b7cc2014-12-12 16:55:38 -08002634struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep)
Glauber Costad7f25f82012-12-18 14:22:40 -08002635{
2636 struct mem_cgroup *memcg;
Vladimir Davydov959c8962014-01-23 15:52:59 -08002637 struct kmem_cache *memcg_cachep;
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -08002638 int kmemcg_id;
Glauber Costad7f25f82012-12-18 14:22:40 -08002639
Vladimir Davydovf7ce3192015-02-12 14:59:20 -08002640 VM_BUG_ON(!is_root_cache(cachep));
Glauber Costad7f25f82012-12-18 14:22:40 -08002641
Vladimir Davydov9d100c52014-12-12 16:54:53 -08002642 if (current->memcg_kmem_skip_account)
Glauber Costa0e9d92f2012-12-18 14:22:42 -08002643 return cachep;
2644
Vladimir Davydov8135be52014-12-12 16:56:38 -08002645 memcg = get_mem_cgroup_from_mm(current->mm);
Jason Low4db0c3c2015-04-15 16:14:08 -07002646 kmemcg_id = READ_ONCE(memcg->kmemcg_id);
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -08002647 if (kmemcg_id < 0)
Li Zefanca0dde92013-04-29 15:08:57 -07002648 goto out;
Glauber Costad7f25f82012-12-18 14:22:40 -08002649
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -08002650 memcg_cachep = cache_from_memcg_idx(cachep, kmemcg_id);
Vladimir Davydov8135be52014-12-12 16:56:38 -08002651 if (likely(memcg_cachep))
2652 return memcg_cachep;
Li Zefanca0dde92013-04-29 15:08:57 -07002653
2654 /*
2655 * If we are in a safe context (can wait, and not in interrupt
2656 * context), we could be be predictable and return right away.
2657 * This would guarantee that the allocation being performed
2658 * already belongs in the new cache.
2659 *
2660 * However, there are some clashes that can arrive from locking.
2661 * For instance, because we acquire the slab_mutex while doing
Vladimir Davydov776ed0f2014-06-04 16:10:02 -07002662 * memcg_create_kmem_cache, this means no further allocation
2663 * could happen with the slab_mutex held. So it's better to
2664 * defer everything.
Li Zefanca0dde92013-04-29 15:08:57 -07002665 */
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002666 memcg_schedule_kmem_cache_create(memcg, cachep);
Li Zefanca0dde92013-04-29 15:08:57 -07002667out:
Vladimir Davydov8135be52014-12-12 16:56:38 -08002668 css_put(&memcg->css);
Li Zefanca0dde92013-04-29 15:08:57 -07002669 return cachep;
Glauber Costad7f25f82012-12-18 14:22:40 -08002670}
Glauber Costad7f25f82012-12-18 14:22:40 -08002671
Vladimir Davydov8135be52014-12-12 16:56:38 -08002672void __memcg_kmem_put_cache(struct kmem_cache *cachep)
2673{
2674 if (!is_root_cache(cachep))
Vladimir Davydovf7ce3192015-02-12 14:59:20 -08002675 css_put(&cachep->memcg_params.memcg->css);
Vladimir Davydov8135be52014-12-12 16:56:38 -08002676}
2677
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002678/*
2679 * We need to verify if the allocation against current->mm->owner's memcg is
2680 * possible for the given order. But the page is not allocated yet, so we'll
2681 * need a further commit step to do the final arrangements.
2682 *
2683 * It is possible for the task to switch cgroups in this mean time, so at
2684 * commit time, we can't rely on task conversion any longer. We'll then use
2685 * the handle argument to return to the caller which cgroup we should commit
2686 * against. We could also return the memcg directly and avoid the pointer
2687 * passing, but a boolean return value gives better semantics considering
2688 * the compiled-out case as well.
2689 *
2690 * Returning true means the allocation is possible.
2691 */
2692bool
2693__memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **_memcg, int order)
2694{
2695 struct mem_cgroup *memcg;
2696 int ret;
2697
2698 *_memcg = NULL;
Glauber Costa6d42c232013-07-08 16:00:00 -07002699
Johannes Weinerdf381972014-04-07 15:37:43 -07002700 memcg = get_mem_cgroup_from_mm(current->mm);
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002701
Vladimir Davydovcf2b8fb2014-10-09 15:28:59 -07002702 if (!memcg_kmem_is_active(memcg)) {
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002703 css_put(&memcg->css);
2704 return true;
2705 }
2706
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002707 ret = memcg_charge_kmem(memcg, gfp, 1 << order);
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002708 if (!ret)
2709 *_memcg = memcg;
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002710
2711 css_put(&memcg->css);
2712 return (ret == 0);
2713}
2714
2715void __memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg,
2716 int order)
2717{
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002718 VM_BUG_ON(mem_cgroup_is_root(memcg));
2719
2720 /* The page allocation failed. Revert */
2721 if (!page) {
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002722 memcg_uncharge_kmem(memcg, 1 << order);
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002723 return;
2724 }
Johannes Weiner1306a852014-12-10 15:44:52 -08002725 page->mem_cgroup = memcg;
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002726}
2727
2728void __memcg_kmem_uncharge_pages(struct page *page, int order)
2729{
Johannes Weiner1306a852014-12-10 15:44:52 -08002730 struct mem_cgroup *memcg = page->mem_cgroup;
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002731
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002732 if (!memcg)
2733 return;
2734
Sasha Levin309381fea2014-01-23 15:52:54 -08002735 VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
Johannes Weiner29833312014-12-10 15:44:02 -08002736
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002737 memcg_uncharge_kmem(memcg, 1 << order);
Johannes Weiner1306a852014-12-10 15:44:52 -08002738 page->mem_cgroup = NULL;
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002739}
Vladimir Davydov60d3fd32015-02-12 14:59:10 -08002740
2741struct mem_cgroup *__mem_cgroup_from_kmem(void *ptr)
2742{
2743 struct mem_cgroup *memcg = NULL;
2744 struct kmem_cache *cachep;
2745 struct page *page;
2746
2747 page = virt_to_head_page(ptr);
2748 if (PageSlab(page)) {
2749 cachep = page->slab_cache;
2750 if (!is_root_cache(cachep))
Vladimir Davydovf7ce3192015-02-12 14:59:20 -08002751 memcg = cachep->memcg_params.memcg;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -08002752 } else
2753 /* page allocated by alloc_kmem_pages */
2754 memcg = page->mem_cgroup;
2755
2756 return memcg;
2757}
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002758#endif /* CONFIG_MEMCG_KMEM */
2759
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002760#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2761
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002762/*
2763 * Because tail pages are not marked as "used", set it. We're under
KAMEZAWA Hiroyukie94c8a92012-01-12 17:18:20 -08002764 * zone->lru_lock, 'splitting on pmd' and compound_lock.
2765 * charge/uncharge will be never happen and move_account() is done under
2766 * compound_lock(), so we don't have to take care of races.
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002767 */
KAMEZAWA Hiroyukie94c8a92012-01-12 17:18:20 -08002768void mem_cgroup_split_huge_fixup(struct page *head)
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002769{
KAMEZAWA Hiroyukie94c8a92012-01-12 17:18:20 -08002770 int i;
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002771
KAMEZAWA Hiroyuki3d37c4a2011-01-25 15:07:28 -08002772 if (mem_cgroup_disabled())
2773 return;
David Rientjesb070e652013-05-07 16:18:09 -07002774
Johannes Weiner29833312014-12-10 15:44:02 -08002775 for (i = 1; i < HPAGE_PMD_NR; i++)
Johannes Weiner1306a852014-12-10 15:44:52 -08002776 head[i].mem_cgroup = head->mem_cgroup;
Michal Hockob9982f82014-12-10 15:43:51 -08002777
Johannes Weiner1306a852014-12-10 15:44:52 -08002778 __this_cpu_sub(head->mem_cgroup->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
David Rientjesb070e652013-05-07 16:18:09 -07002779 HPAGE_PMD_NR);
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002780}
Hugh Dickins12d27102012-01-12 17:19:52 -08002781#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002782
Andrew Mortonc255a452012-07-31 16:43:02 -07002783#ifdef CONFIG_MEMCG_SWAP
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002784static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
2785 bool charge)
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002786{
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002787 int val = (charge) ? 1 : -1;
2788 this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAP], val);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002789}
Daisuke Nishimura02491442010-03-10 15:22:17 -08002790
2791/**
2792 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
2793 * @entry: swap entry to be moved
2794 * @from: mem_cgroup which the entry is moved from
2795 * @to: mem_cgroup which the entry is moved to
2796 *
2797 * It succeeds only when the swap_cgroup's record for this entry is the same
2798 * as the mem_cgroup's id of @from.
2799 *
2800 * Returns 0 on success, -EINVAL on failure.
2801 *
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002802 * The caller must have charged to @to, IOW, called page_counter_charge() about
Daisuke Nishimura02491442010-03-10 15:22:17 -08002803 * both res and memsw, and called css_get().
2804 */
2805static int mem_cgroup_move_swap_account(swp_entry_t entry,
Hugh Dickinse91cbb42012-05-29 15:06:51 -07002806 struct mem_cgroup *from, struct mem_cgroup *to)
Daisuke Nishimura02491442010-03-10 15:22:17 -08002807{
2808 unsigned short old_id, new_id;
2809
Li Zefan34c00c32013-09-23 16:56:01 +08002810 old_id = mem_cgroup_id(from);
2811 new_id = mem_cgroup_id(to);
Daisuke Nishimura02491442010-03-10 15:22:17 -08002812
2813 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
Daisuke Nishimura02491442010-03-10 15:22:17 -08002814 mem_cgroup_swap_statistics(from, false);
Daisuke Nishimura02491442010-03-10 15:22:17 -08002815 mem_cgroup_swap_statistics(to, true);
Daisuke Nishimura02491442010-03-10 15:22:17 -08002816 return 0;
2817 }
2818 return -EINVAL;
2819}
2820#else
2821static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
Hugh Dickinse91cbb42012-05-29 15:06:51 -07002822 struct mem_cgroup *from, struct mem_cgroup *to)
Daisuke Nishimura02491442010-03-10 15:22:17 -08002823{
2824 return -EINVAL;
2825}
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002826#endif
2827
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002828static DEFINE_MUTEX(memcg_limit_mutex);
Daisuke Nishimuraf212ad72011-03-23 16:42:25 -07002829
KOSAKI Motohirod38d2a72009-01-06 14:39:44 -08002830static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002831 unsigned long limit)
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07002832{
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002833 unsigned long curusage;
2834 unsigned long oldusage;
2835 bool enlarge = false;
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07002836 int retry_count;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002837 int ret;
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07002838
2839 /*
2840 * For keeping hierarchical_reclaim simple, how long we should retry
2841 * is depends on callers. We set our retry-count to be function
2842 * of # of children which we should visit in this loop.
2843 */
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002844 retry_count = MEM_CGROUP_RECLAIM_RETRIES *
2845 mem_cgroup_count_children(memcg);
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07002846
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002847 oldusage = page_counter_read(&memcg->memory);
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07002848
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002849 do {
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07002850 if (signal_pending(current)) {
2851 ret = -EINTR;
2852 break;
2853 }
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002854
2855 mutex_lock(&memcg_limit_mutex);
2856 if (limit > memcg->memsw.limit) {
2857 mutex_unlock(&memcg_limit_mutex);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002858 ret = -EINVAL;
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07002859 break;
2860 }
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002861 if (limit > memcg->memory.limit)
2862 enlarge = true;
2863 ret = page_counter_limit(&memcg->memory, limit);
2864 mutex_unlock(&memcg_limit_mutex);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002865
2866 if (!ret)
2867 break;
2868
Johannes Weinerb70a2a22014-10-09 15:28:56 -07002869 try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, true);
2870
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002871 curusage = page_counter_read(&memcg->memory);
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07002872 /* Usage is reduced ? */
Andrew Mortonf894ffa2013-09-12 15:13:35 -07002873 if (curusage >= oldusage)
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07002874 retry_count--;
2875 else
2876 oldusage = curusage;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002877 } while (retry_count);
2878
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07002879 if (!ret && enlarge)
2880 memcg_oom_recover(memcg);
KOSAKI Motohiro14797e22009-01-07 18:08:18 -08002881
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002882 return ret;
2883}
2884
Li Zefan338c8432009-06-17 16:27:15 -07002885static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002886 unsigned long limit)
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002887{
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002888 unsigned long curusage;
2889 unsigned long oldusage;
2890 bool enlarge = false;
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07002891 int retry_count;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002892 int ret;
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002893
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07002894 /* see mem_cgroup_resize_res_limit */
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002895 retry_count = MEM_CGROUP_RECLAIM_RETRIES *
2896 mem_cgroup_count_children(memcg);
2897
2898 oldusage = page_counter_read(&memcg->memsw);
2899
2900 do {
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002901 if (signal_pending(current)) {
2902 ret = -EINTR;
2903 break;
2904 }
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002905
2906 mutex_lock(&memcg_limit_mutex);
2907 if (limit < memcg->memory.limit) {
2908 mutex_unlock(&memcg_limit_mutex);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002909 ret = -EINVAL;
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002910 break;
2911 }
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002912 if (limit > memcg->memsw.limit)
2913 enlarge = true;
2914 ret = page_counter_limit(&memcg->memsw, limit);
2915 mutex_unlock(&memcg_limit_mutex);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002916
2917 if (!ret)
2918 break;
2919
Johannes Weinerb70a2a22014-10-09 15:28:56 -07002920 try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, false);
2921
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002922 curusage = page_counter_read(&memcg->memsw);
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07002923 /* Usage is reduced ? */
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002924 if (curusage >= oldusage)
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07002925 retry_count--;
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07002926 else
2927 oldusage = curusage;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002928 } while (retry_count);
2929
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07002930 if (!ret && enlarge)
2931 memcg_oom_recover(memcg);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002932
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07002933 return ret;
2934}
2935
Andrew Morton0608f432013-09-24 15:27:41 -07002936unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
2937 gfp_t gfp_mask,
2938 unsigned long *total_scanned)
2939{
2940 unsigned long nr_reclaimed = 0;
2941 struct mem_cgroup_per_zone *mz, *next_mz = NULL;
2942 unsigned long reclaimed;
2943 int loop = 0;
2944 struct mem_cgroup_tree_per_zone *mctz;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002945 unsigned long excess;
Andrew Morton0608f432013-09-24 15:27:41 -07002946 unsigned long nr_scanned;
2947
2948 if (order > 0)
2949 return 0;
2950
2951 mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone));
2952 /*
2953 * This loop can run a while, specially if mem_cgroup's continuously
2954 * keep exceeding their soft limit and putting the system under
2955 * pressure
2956 */
2957 do {
2958 if (next_mz)
2959 mz = next_mz;
2960 else
2961 mz = mem_cgroup_largest_soft_limit_node(mctz);
2962 if (!mz)
2963 break;
2964
2965 nr_scanned = 0;
2966 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, zone,
2967 gfp_mask, &nr_scanned);
2968 nr_reclaimed += reclaimed;
2969 *total_scanned += nr_scanned;
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002970 spin_lock_irq(&mctz->lock);
Vladimir Davydovbc2f2e72014-12-10 15:43:40 -08002971 __mem_cgroup_remove_exceeded(mz, mctz);
Andrew Morton0608f432013-09-24 15:27:41 -07002972
2973 /*
2974 * If we failed to reclaim anything from this memory cgroup
2975 * it is time to move on to the next cgroup
2976 */
2977 next_mz = NULL;
Vladimir Davydovbc2f2e72014-12-10 15:43:40 -08002978 if (!reclaimed)
2979 next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
2980
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002981 excess = soft_limit_excess(mz->memcg);
Andrew Morton0608f432013-09-24 15:27:41 -07002982 /*
2983 * One school of thought says that we should not add
2984 * back the node to the tree if reclaim returns 0.
2985 * But our reclaim could return 0, simply because due
2986 * to priority we are exposing a smaller subset of
2987 * memory to reclaim from. Consider this as a longer
2988 * term TODO.
2989 */
2990 /* If excess == 0, no tree ops */
Johannes Weinercf2c8122014-06-06 14:38:21 -07002991 __mem_cgroup_insert_exceeded(mz, mctz, excess);
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002992 spin_unlock_irq(&mctz->lock);
Andrew Morton0608f432013-09-24 15:27:41 -07002993 css_put(&mz->memcg->css);
2994 loop++;
2995 /*
2996 * Could not reclaim anything and there are no more
2997 * mem cgroups to try or we seem to be looping without
2998 * reclaiming anything.
2999 */
3000 if (!nr_reclaimed &&
3001 (next_mz == NULL ||
3002 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
3003 break;
3004 } while (!nr_reclaimed);
3005 if (next_mz)
3006 css_put(&next_mz->memcg->css);
3007 return nr_reclaimed;
3008}
3009
Tejun Heoea280e72014-05-16 13:22:48 -04003010/*
3011 * Test whether @memcg has children, dead or alive. Note that this
3012 * function doesn't care whether @memcg has use_hierarchy enabled and
3013 * returns %true if there are child csses according to the cgroup
3014 * hierarchy. Testing use_hierarchy is the caller's responsiblity.
3015 */
Glauber Costab5f99b52013-02-22 16:34:53 -08003016static inline bool memcg_has_children(struct mem_cgroup *memcg)
3017{
Tejun Heoea280e72014-05-16 13:22:48 -04003018 bool ret;
3019
Johannes Weiner696ac172013-10-31 16:34:15 -07003020 /*
Tejun Heoea280e72014-05-16 13:22:48 -04003021 * The lock does not prevent addition or deletion of children, but
3022 * it prevents a new child from being initialized based on this
3023 * parent in css_online(), so it's enough to decide whether
3024 * hierarchically inherited attributes can still be changed or not.
Johannes Weiner696ac172013-10-31 16:34:15 -07003025 */
Tejun Heoea280e72014-05-16 13:22:48 -04003026 lockdep_assert_held(&memcg_create_mutex);
3027
3028 rcu_read_lock();
3029 ret = css_next_child(NULL, &memcg->css);
3030 rcu_read_unlock();
3031 return ret;
Glauber Costab5f99b52013-02-22 16:34:53 -08003032}
3033
3034/*
Michal Hockoc26251f2012-10-26 13:37:28 +02003035 * Reclaims as many pages from the given memcg as possible and moves
3036 * the rest to the parent.
3037 *
3038 * Caller is responsible for holding css reference for memcg.
3039 */
3040static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
3041{
3042 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
Michal Hockoc26251f2012-10-26 13:37:28 +02003043
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003044 /* we call try-to-free pages for make this cgroup empty */
3045 lru_add_drain_all();
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003046 /* try to free all pages in this cgroup */
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003047 while (nr_retries && page_counter_read(&memcg->memory)) {
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003048 int progress;
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003049
Michal Hockoc26251f2012-10-26 13:37:28 +02003050 if (signal_pending(current))
3051 return -EINTR;
3052
Johannes Weinerb70a2a22014-10-09 15:28:56 -07003053 progress = try_to_free_mem_cgroup_pages(memcg, 1,
3054 GFP_KERNEL, true);
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003055 if (!progress) {
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003056 nr_retries--;
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003057 /* maybe some writeback is necessary */
Jens Axboe8aa7e842009-07-09 14:52:32 +02003058 congestion_wait(BLK_RW_ASYNC, HZ/10);
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003059 }
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003060
3061 }
Michal Hockoab5196c2012-10-26 13:37:32 +02003062
3063 return 0;
KAMEZAWA Hiroyukicc847582008-02-07 00:14:16 -08003064}
3065
Tejun Heo6770c642014-05-13 12:16:21 -04003066static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
3067 char *buf, size_t nbytes,
3068 loff_t off)
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003069{
Tejun Heo6770c642014-05-13 12:16:21 -04003070 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
Michal Hockoc26251f2012-10-26 13:37:28 +02003071
Michal Hockod8423012012-10-26 13:37:29 +02003072 if (mem_cgroup_is_root(memcg))
3073 return -EINVAL;
Tejun Heo6770c642014-05-13 12:16:21 -04003074 return mem_cgroup_force_empty(memcg) ?: nbytes;
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003075}
3076
Tejun Heo182446d2013-08-08 20:11:24 -04003077static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
3078 struct cftype *cft)
Balbir Singh18f59ea2009-01-07 18:08:07 -08003079{
Tejun Heo182446d2013-08-08 20:11:24 -04003080 return mem_cgroup_from_css(css)->use_hierarchy;
Balbir Singh18f59ea2009-01-07 18:08:07 -08003081}
3082
Tejun Heo182446d2013-08-08 20:11:24 -04003083static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
3084 struct cftype *cft, u64 val)
Balbir Singh18f59ea2009-01-07 18:08:07 -08003085{
3086 int retval = 0;
Tejun Heo182446d2013-08-08 20:11:24 -04003087 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Tejun Heo5c9d5352014-05-16 13:22:48 -04003088 struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent);
Balbir Singh18f59ea2009-01-07 18:08:07 -08003089
Glauber Costa09998212013-02-22 16:34:55 -08003090 mutex_lock(&memcg_create_mutex);
Glauber Costa567fb432012-07-31 16:43:07 -07003091
3092 if (memcg->use_hierarchy == val)
3093 goto out;
3094
Balbir Singh18f59ea2009-01-07 18:08:07 -08003095 /*
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003096 * If parent's use_hierarchy is set, we can't make any modifications
Balbir Singh18f59ea2009-01-07 18:08:07 -08003097 * in the child subtrees. If it is unset, then the change can
3098 * occur, provided the current cgroup has no children.
3099 *
3100 * For the root cgroup, parent_mem is NULL, we allow value to be
3101 * set if there are no children.
3102 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003103 if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
Balbir Singh18f59ea2009-01-07 18:08:07 -08003104 (val == 1 || val == 0)) {
Tejun Heoea280e72014-05-16 13:22:48 -04003105 if (!memcg_has_children(memcg))
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003106 memcg->use_hierarchy = val;
Balbir Singh18f59ea2009-01-07 18:08:07 -08003107 else
3108 retval = -EBUSY;
3109 } else
3110 retval = -EINVAL;
Glauber Costa567fb432012-07-31 16:43:07 -07003111
3112out:
Glauber Costa09998212013-02-22 16:34:55 -08003113 mutex_unlock(&memcg_create_mutex);
Balbir Singh18f59ea2009-01-07 18:08:07 -08003114
3115 return retval;
3116}
3117
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003118static unsigned long tree_stat(struct mem_cgroup *memcg,
3119 enum mem_cgroup_stat_index idx)
Johannes Weinerce00a962014-09-05 08:43:57 -04003120{
3121 struct mem_cgroup *iter;
3122 long val = 0;
3123
3124 /* Per-cpu values can be negative, use a signed accumulator */
3125 for_each_mem_cgroup_tree(iter, memcg)
3126 val += mem_cgroup_read_stat(iter, idx);
3127
3128 if (val < 0) /* race ? */
3129 val = 0;
3130 return val;
3131}
3132
3133static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
3134{
3135 u64 val;
3136
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003137 if (mem_cgroup_is_root(memcg)) {
3138 val = tree_stat(memcg, MEM_CGROUP_STAT_CACHE);
3139 val += tree_stat(memcg, MEM_CGROUP_STAT_RSS);
3140 if (swap)
3141 val += tree_stat(memcg, MEM_CGROUP_STAT_SWAP);
3142 } else {
Johannes Weinerce00a962014-09-05 08:43:57 -04003143 if (!swap)
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003144 val = page_counter_read(&memcg->memory);
Johannes Weinerce00a962014-09-05 08:43:57 -04003145 else
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003146 val = page_counter_read(&memcg->memsw);
Johannes Weinerce00a962014-09-05 08:43:57 -04003147 }
Johannes Weinerce00a962014-09-05 08:43:57 -04003148 return val << PAGE_SHIFT;
3149}
3150
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003151enum {
3152 RES_USAGE,
3153 RES_LIMIT,
3154 RES_MAX_USAGE,
3155 RES_FAILCNT,
3156 RES_SOFT_LIMIT,
3157};
Johannes Weinerce00a962014-09-05 08:43:57 -04003158
Tejun Heo791badb2013-12-05 12:28:02 -05003159static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
Johannes Weiner05b84302014-08-06 16:05:59 -07003160 struct cftype *cft)
Balbir Singh8cdea7c2008-02-07 00:13:50 -08003161{
Tejun Heo182446d2013-08-08 20:11:24 -04003162 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003163 struct page_counter *counter;
Tejun Heoaf36f902012-04-01 12:09:55 -07003164
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003165 switch (MEMFILE_TYPE(cft->private)) {
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003166 case _MEM:
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003167 counter = &memcg->memory;
Glauber Costa510fc4e2012-12-18 14:21:47 -08003168 break;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003169 case _MEMSWAP:
3170 counter = &memcg->memsw;
3171 break;
3172 case _KMEM:
3173 counter = &memcg->kmem;
3174 break;
3175 default:
3176 BUG();
3177 }
3178
3179 switch (MEMFILE_ATTR(cft->private)) {
3180 case RES_USAGE:
3181 if (counter == &memcg->memory)
3182 return mem_cgroup_usage(memcg, false);
3183 if (counter == &memcg->memsw)
3184 return mem_cgroup_usage(memcg, true);
3185 return (u64)page_counter_read(counter) * PAGE_SIZE;
3186 case RES_LIMIT:
3187 return (u64)counter->limit * PAGE_SIZE;
3188 case RES_MAX_USAGE:
3189 return (u64)counter->watermark * PAGE_SIZE;
3190 case RES_FAILCNT:
3191 return counter->failcnt;
3192 case RES_SOFT_LIMIT:
3193 return (u64)memcg->soft_limit * PAGE_SIZE;
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003194 default:
3195 BUG();
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003196 }
Balbir Singh8cdea7c2008-02-07 00:13:50 -08003197}
Glauber Costa510fc4e2012-12-18 14:21:47 -08003198
Glauber Costa510fc4e2012-12-18 14:21:47 -08003199#ifdef CONFIG_MEMCG_KMEM
Vladimir Davydov8c0145b2014-12-10 15:43:48 -08003200static int memcg_activate_kmem(struct mem_cgroup *memcg,
3201 unsigned long nr_pages)
Vladimir Davydovd6441632014-01-23 15:53:09 -08003202{
3203 int err = 0;
3204 int memcg_id;
3205
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -08003206 BUG_ON(memcg->kmemcg_id >= 0);
Vladimir Davydov2788cf0c2015-02-12 14:59:38 -08003207 BUG_ON(memcg->kmem_acct_activated);
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -08003208 BUG_ON(memcg->kmem_acct_active);
Vladimir Davydovd6441632014-01-23 15:53:09 -08003209
3210 /*
Glauber Costa510fc4e2012-12-18 14:21:47 -08003211 * For simplicity, we won't allow this to be disabled. It also can't
3212 * be changed if the cgroup has children already, or if tasks had
3213 * already joined.
3214 *
3215 * If tasks join before we set the limit, a person looking at
3216 * kmem.usage_in_bytes will have no way to determine when it took
3217 * place, which makes the value quite meaningless.
3218 *
3219 * After it first became limited, changes in the value of the limit are
3220 * of course permitted.
Glauber Costa510fc4e2012-12-18 14:21:47 -08003221 */
Glauber Costa09998212013-02-22 16:34:55 -08003222 mutex_lock(&memcg_create_mutex);
Tejun Heoea280e72014-05-16 13:22:48 -04003223 if (cgroup_has_tasks(memcg->css.cgroup) ||
3224 (memcg->use_hierarchy && memcg_has_children(memcg)))
Vladimir Davydovd6441632014-01-23 15:53:09 -08003225 err = -EBUSY;
Glauber Costa09998212013-02-22 16:34:55 -08003226 mutex_unlock(&memcg_create_mutex);
Vladimir Davydovd6441632014-01-23 15:53:09 -08003227 if (err)
3228 goto out;
3229
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07003230 memcg_id = memcg_alloc_cache_id();
Vladimir Davydovd6441632014-01-23 15:53:09 -08003231 if (memcg_id < 0) {
3232 err = memcg_id;
3233 goto out;
3234 }
3235
Vladimir Davydovd6441632014-01-23 15:53:09 -08003236 /*
Vladimir Davydov900a38f2014-12-12 16:55:10 -08003237 * We couldn't have accounted to this cgroup, because it hasn't got
3238 * activated yet, so this should succeed.
Vladimir Davydovd6441632014-01-23 15:53:09 -08003239 */
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003240 err = page_counter_limit(&memcg->kmem, nr_pages);
Vladimir Davydovd6441632014-01-23 15:53:09 -08003241 VM_BUG_ON(err);
3242
3243 static_key_slow_inc(&memcg_kmem_enabled_key);
3244 /*
Vladimir Davydov900a38f2014-12-12 16:55:10 -08003245 * A memory cgroup is considered kmem-active as soon as it gets
3246 * kmemcg_id. Setting the id after enabling static branching will
Vladimir Davydovd6441632014-01-23 15:53:09 -08003247 * guarantee no one starts accounting before all call sites are
3248 * patched.
3249 */
Vladimir Davydov900a38f2014-12-12 16:55:10 -08003250 memcg->kmemcg_id = memcg_id;
Vladimir Davydov2788cf0c2015-02-12 14:59:38 -08003251 memcg->kmem_acct_activated = true;
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -08003252 memcg->kmem_acct_active = true;
Vladimir Davydovd6441632014-01-23 15:53:09 -08003253out:
Vladimir Davydovd6441632014-01-23 15:53:09 -08003254 return err;
Vladimir Davydovd6441632014-01-23 15:53:09 -08003255}
3256
Vladimir Davydovd6441632014-01-23 15:53:09 -08003257static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003258 unsigned long limit)
Vladimir Davydovd6441632014-01-23 15:53:09 -08003259{
3260 int ret;
3261
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003262 mutex_lock(&memcg_limit_mutex);
Vladimir Davydovd6441632014-01-23 15:53:09 -08003263 if (!memcg_kmem_is_active(memcg))
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003264 ret = memcg_activate_kmem(memcg, limit);
Vladimir Davydovd6441632014-01-23 15:53:09 -08003265 else
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003266 ret = page_counter_limit(&memcg->kmem, limit);
3267 mutex_unlock(&memcg_limit_mutex);
Vladimir Davydovd6441632014-01-23 15:53:09 -08003268 return ret;
3269}
3270
Glauber Costa55007d82012-12-18 14:22:38 -08003271static int memcg_propagate_kmem(struct mem_cgroup *memcg)
Glauber Costa510fc4e2012-12-18 14:21:47 -08003272{
Glauber Costa55007d82012-12-18 14:22:38 -08003273 int ret = 0;
Glauber Costa510fc4e2012-12-18 14:21:47 -08003274 struct mem_cgroup *parent = parent_mem_cgroup(memcg);
Vladimir Davydovd6441632014-01-23 15:53:09 -08003275
Glauber Costa510fc4e2012-12-18 14:21:47 -08003276 if (!parent)
Vladimir Davydovd6441632014-01-23 15:53:09 -08003277 return 0;
Glauber Costa55007d82012-12-18 14:22:38 -08003278
Vladimir Davydov8c0145b2014-12-10 15:43:48 -08003279 mutex_lock(&memcg_limit_mutex);
Glauber Costaa8964b92012-12-18 14:22:09 -08003280 /*
Vladimir Davydovd6441632014-01-23 15:53:09 -08003281 * If the parent cgroup is not kmem-active now, it cannot be activated
3282 * after this point, because it has at least one child already.
Glauber Costaa8964b92012-12-18 14:22:09 -08003283 */
Vladimir Davydovd6441632014-01-23 15:53:09 -08003284 if (memcg_kmem_is_active(parent))
Vladimir Davydov8c0145b2014-12-10 15:43:48 -08003285 ret = memcg_activate_kmem(memcg, PAGE_COUNTER_MAX);
3286 mutex_unlock(&memcg_limit_mutex);
Glauber Costa55007d82012-12-18 14:22:38 -08003287 return ret;
Glauber Costa510fc4e2012-12-18 14:21:47 -08003288}
Vladimir Davydovd6441632014-01-23 15:53:09 -08003289#else
3290static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003291 unsigned long limit)
Vladimir Davydovd6441632014-01-23 15:53:09 -08003292{
3293 return -EINVAL;
3294}
Hugh Dickins6d0439902013-02-22 16:35:50 -08003295#endif /* CONFIG_MEMCG_KMEM */
Glauber Costa510fc4e2012-12-18 14:21:47 -08003296
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003297/*
3298 * The user of this function is...
3299 * RES_LIMIT.
3300 */
Tejun Heo451af502014-05-13 12:16:21 -04003301static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
3302 char *buf, size_t nbytes, loff_t off)
Balbir Singh8cdea7c2008-02-07 00:13:50 -08003303{
Tejun Heo451af502014-05-13 12:16:21 -04003304 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003305 unsigned long nr_pages;
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003306 int ret;
3307
Tejun Heo451af502014-05-13 12:16:21 -04003308 buf = strstrip(buf);
Johannes Weiner650c5e52015-02-11 15:26:03 -08003309 ret = page_counter_memparse(buf, "-1", &nr_pages);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003310 if (ret)
3311 return ret;
Tejun Heoaf36f902012-04-01 12:09:55 -07003312
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003313 switch (MEMFILE_ATTR(of_cft(of)->private)) {
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003314 case RES_LIMIT:
Balbir Singh4b3bde42009-09-23 15:56:32 -07003315 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
3316 ret = -EINVAL;
3317 break;
3318 }
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003319 switch (MEMFILE_TYPE(of_cft(of)->private)) {
3320 case _MEM:
3321 ret = mem_cgroup_resize_limit(memcg, nr_pages);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003322 break;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003323 case _MEMSWAP:
3324 ret = mem_cgroup_resize_memsw_limit(memcg, nr_pages);
3325 break;
3326 case _KMEM:
3327 ret = memcg_update_kmem_limit(memcg, nr_pages);
3328 break;
3329 }
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003330 break;
Balbir Singh296c81d2009-09-23 15:56:36 -07003331 case RES_SOFT_LIMIT:
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003332 memcg->soft_limit = nr_pages;
3333 ret = 0;
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003334 break;
3335 }
Tejun Heo451af502014-05-13 12:16:21 -04003336 return ret ?: nbytes;
Balbir Singh8cdea7c2008-02-07 00:13:50 -08003337}
3338
Tejun Heo6770c642014-05-13 12:16:21 -04003339static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
3340 size_t nbytes, loff_t off)
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07003341{
Tejun Heo6770c642014-05-13 12:16:21 -04003342 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003343 struct page_counter *counter;
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07003344
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003345 switch (MEMFILE_TYPE(of_cft(of)->private)) {
3346 case _MEM:
3347 counter = &memcg->memory;
3348 break;
3349 case _MEMSWAP:
3350 counter = &memcg->memsw;
3351 break;
3352 case _KMEM:
3353 counter = &memcg->kmem;
3354 break;
3355 default:
3356 BUG();
3357 }
Tejun Heoaf36f902012-04-01 12:09:55 -07003358
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003359 switch (MEMFILE_ATTR(of_cft(of)->private)) {
Pavel Emelyanov29f2a4d2008-04-29 01:00:21 -07003360 case RES_MAX_USAGE:
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003361 page_counter_reset_watermark(counter);
Pavel Emelyanov29f2a4d2008-04-29 01:00:21 -07003362 break;
3363 case RES_FAILCNT:
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003364 counter->failcnt = 0;
Pavel Emelyanov29f2a4d2008-04-29 01:00:21 -07003365 break;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003366 default:
3367 BUG();
Pavel Emelyanov29f2a4d2008-04-29 01:00:21 -07003368 }
Balbir Singhf64c3f52009-09-23 15:56:37 -07003369
Tejun Heo6770c642014-05-13 12:16:21 -04003370 return nbytes;
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07003371}
3372
Tejun Heo182446d2013-08-08 20:11:24 -04003373static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003374 struct cftype *cft)
3375{
Tejun Heo182446d2013-08-08 20:11:24 -04003376 return mem_cgroup_from_css(css)->move_charge_at_immigrate;
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003377}
3378
Daisuke Nishimura02491442010-03-10 15:22:17 -08003379#ifdef CONFIG_MMU
Tejun Heo182446d2013-08-08 20:11:24 -04003380static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003381 struct cftype *cft, u64 val)
3382{
Tejun Heo182446d2013-08-08 20:11:24 -04003383 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003384
Johannes Weiner1dfab5a2015-02-11 15:26:09 -08003385 if (val & ~MOVE_MASK)
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003386 return -EINVAL;
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003387
Glauber Costaee5e8472013-02-22 16:34:50 -08003388 /*
3389 * No kind of locking is needed in here, because ->can_attach() will
3390 * check this value once in the beginning of the process, and then carry
3391 * on with stale data. This means that changes to this value will only
3392 * affect task migrations starting after the change.
3393 */
3394 memcg->move_charge_at_immigrate = val;
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003395 return 0;
3396}
Daisuke Nishimura02491442010-03-10 15:22:17 -08003397#else
Tejun Heo182446d2013-08-08 20:11:24 -04003398static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
Daisuke Nishimura02491442010-03-10 15:22:17 -08003399 struct cftype *cft, u64 val)
3400{
3401 return -ENOSYS;
3402}
3403#endif
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003404
Ying Han406eb0c2011-05-26 16:25:37 -07003405#ifdef CONFIG_NUMA
Tejun Heo2da8ca82013-12-05 12:28:04 -05003406static int memcg_numa_stat_show(struct seq_file *m, void *v)
Ying Han406eb0c2011-05-26 16:25:37 -07003407{
Greg Thelen25485de2013-11-12 15:07:40 -08003408 struct numa_stat {
3409 const char *name;
3410 unsigned int lru_mask;
3411 };
3412
3413 static const struct numa_stat stats[] = {
3414 { "total", LRU_ALL },
3415 { "file", LRU_ALL_FILE },
3416 { "anon", LRU_ALL_ANON },
3417 { "unevictable", BIT(LRU_UNEVICTABLE) },
3418 };
3419 const struct numa_stat *stat;
Ying Han406eb0c2011-05-26 16:25:37 -07003420 int nid;
Greg Thelen25485de2013-11-12 15:07:40 -08003421 unsigned long nr;
Tejun Heo2da8ca82013-12-05 12:28:04 -05003422 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
Ying Han406eb0c2011-05-26 16:25:37 -07003423
Greg Thelen25485de2013-11-12 15:07:40 -08003424 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3425 nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask);
3426 seq_printf(m, "%s=%lu", stat->name, nr);
3427 for_each_node_state(nid, N_MEMORY) {
3428 nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
3429 stat->lru_mask);
3430 seq_printf(m, " N%d=%lu", nid, nr);
3431 }
3432 seq_putc(m, '\n');
Ying Han406eb0c2011-05-26 16:25:37 -07003433 }
Ying Han406eb0c2011-05-26 16:25:37 -07003434
Ying Han071aee12013-11-12 15:07:41 -08003435 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3436 struct mem_cgroup *iter;
Ying Han406eb0c2011-05-26 16:25:37 -07003437
Ying Han071aee12013-11-12 15:07:41 -08003438 nr = 0;
3439 for_each_mem_cgroup_tree(iter, memcg)
3440 nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask);
3441 seq_printf(m, "hierarchical_%s=%lu", stat->name, nr);
3442 for_each_node_state(nid, N_MEMORY) {
3443 nr = 0;
3444 for_each_mem_cgroup_tree(iter, memcg)
3445 nr += mem_cgroup_node_nr_lru_pages(
3446 iter, nid, stat->lru_mask);
3447 seq_printf(m, " N%d=%lu", nid, nr);
3448 }
3449 seq_putc(m, '\n');
Ying Han406eb0c2011-05-26 16:25:37 -07003450 }
Ying Han406eb0c2011-05-26 16:25:37 -07003451
Ying Han406eb0c2011-05-26 16:25:37 -07003452 return 0;
3453}
3454#endif /* CONFIG_NUMA */
3455
Tejun Heo2da8ca82013-12-05 12:28:04 -05003456static int memcg_stat_show(struct seq_file *m, void *v)
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08003457{
Tejun Heo2da8ca82013-12-05 12:28:04 -05003458 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003459 unsigned long memory, memsw;
Johannes Weineraf7c4b02012-05-29 15:07:08 -07003460 struct mem_cgroup *mi;
3461 unsigned int i;
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08003462
Greg Thelen0ca44b12015-02-11 15:25:58 -08003463 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_stat_names) !=
3464 MEM_CGROUP_STAT_NSTATS);
3465 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_events_names) !=
3466 MEM_CGROUP_EVENTS_NSTATS);
Rickard Strandqvist70bc0682014-12-12 16:56:41 -08003467 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
3468
Johannes Weineraf7c4b02012-05-29 15:07:08 -07003469 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
Kamezawa Hiroyukibff6bb82012-07-31 16:41:38 -07003470 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
Daisuke Nishimura1dd3a272009-09-23 15:56:43 -07003471 continue;
Johannes Weineraf7c4b02012-05-29 15:07:08 -07003472 seq_printf(m, "%s %ld\n", mem_cgroup_stat_names[i],
3473 mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
Daisuke Nishimura1dd3a272009-09-23 15:56:43 -07003474 }
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08003475
Johannes Weineraf7c4b02012-05-29 15:07:08 -07003476 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++)
3477 seq_printf(m, "%s %lu\n", mem_cgroup_events_names[i],
3478 mem_cgroup_read_events(memcg, i));
3479
3480 for (i = 0; i < NR_LRU_LISTS; i++)
3481 seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
3482 mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE);
3483
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07003484 /* Hierarchical information */
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003485 memory = memsw = PAGE_COUNTER_MAX;
3486 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
3487 memory = min(memory, mi->memory.limit);
3488 memsw = min(memsw, mi->memsw.limit);
KAMEZAWA Hiroyukifee7b542009-01-07 18:08:26 -08003489 }
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003490 seq_printf(m, "hierarchical_memory_limit %llu\n",
3491 (u64)memory * PAGE_SIZE);
3492 if (do_swap_account)
3493 seq_printf(m, "hierarchical_memsw_limit %llu\n",
3494 (u64)memsw * PAGE_SIZE);
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08003495
Johannes Weineraf7c4b02012-05-29 15:07:08 -07003496 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
3497 long long val = 0;
3498
Kamezawa Hiroyukibff6bb82012-07-31 16:41:38 -07003499 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
Daisuke Nishimura1dd3a272009-09-23 15:56:43 -07003500 continue;
Johannes Weineraf7c4b02012-05-29 15:07:08 -07003501 for_each_mem_cgroup_tree(mi, memcg)
3502 val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
3503 seq_printf(m, "total_%s %lld\n", mem_cgroup_stat_names[i], val);
3504 }
3505
3506 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
3507 unsigned long long val = 0;
3508
3509 for_each_mem_cgroup_tree(mi, memcg)
3510 val += mem_cgroup_read_events(mi, i);
3511 seq_printf(m, "total_%s %llu\n",
3512 mem_cgroup_events_names[i], val);
3513 }
3514
3515 for (i = 0; i < NR_LRU_LISTS; i++) {
3516 unsigned long long val = 0;
3517
3518 for_each_mem_cgroup_tree(mi, memcg)
3519 val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE;
3520 seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val);
Daisuke Nishimura1dd3a272009-09-23 15:56:43 -07003521 }
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07003522
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08003523#ifdef CONFIG_DEBUG_VM
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08003524 {
3525 int nid, zid;
3526 struct mem_cgroup_per_zone *mz;
Hugh Dickins89abfab2012-05-29 15:06:53 -07003527 struct zone_reclaim_stat *rstat;
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08003528 unsigned long recent_rotated[2] = {0, 0};
3529 unsigned long recent_scanned[2] = {0, 0};
3530
3531 for_each_online_node(nid)
3532 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
Jianyu Zhane2318752014-06-06 14:38:20 -07003533 mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
Hugh Dickins89abfab2012-05-29 15:06:53 -07003534 rstat = &mz->lruvec.reclaim_stat;
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08003535
Hugh Dickins89abfab2012-05-29 15:06:53 -07003536 recent_rotated[0] += rstat->recent_rotated[0];
3537 recent_rotated[1] += rstat->recent_rotated[1];
3538 recent_scanned[0] += rstat->recent_scanned[0];
3539 recent_scanned[1] += rstat->recent_scanned[1];
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08003540 }
Johannes Weiner78ccf5b2012-05-29 15:07:06 -07003541 seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]);
3542 seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]);
3543 seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]);
3544 seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]);
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08003545 }
3546#endif
3547
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08003548 return 0;
3549}
3550
Tejun Heo182446d2013-08-08 20:11:24 -04003551static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
3552 struct cftype *cft)
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08003553{
Tejun Heo182446d2013-08-08 20:11:24 -04003554 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08003555
KAMEZAWA Hiroyuki1f4c0252011-07-26 16:08:21 -07003556 return mem_cgroup_swappiness(memcg);
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08003557}
3558
Tejun Heo182446d2013-08-08 20:11:24 -04003559static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
3560 struct cftype *cft, u64 val)
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08003561{
Tejun Heo182446d2013-08-08 20:11:24 -04003562 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Li Zefan068b38c2009-01-15 13:51:26 -08003563
Johannes Weiner3dae7fe2014-06-04 16:07:01 -07003564 if (val > 100)
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08003565 return -EINVAL;
3566
Linus Torvalds14208b02014-06-09 15:03:33 -07003567 if (css->parent)
Johannes Weiner3dae7fe2014-06-04 16:07:01 -07003568 memcg->swappiness = val;
3569 else
3570 vm_swappiness = val;
Li Zefan068b38c2009-01-15 13:51:26 -08003571
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08003572 return 0;
3573}
3574
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003575static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
3576{
3577 struct mem_cgroup_threshold_ary *t;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003578 unsigned long usage;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003579 int i;
3580
3581 rcu_read_lock();
3582 if (!swap)
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003583 t = rcu_dereference(memcg->thresholds.primary);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003584 else
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003585 t = rcu_dereference(memcg->memsw_thresholds.primary);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003586
3587 if (!t)
3588 goto unlock;
3589
Johannes Weinerce00a962014-09-05 08:43:57 -04003590 usage = mem_cgroup_usage(memcg, swap);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003591
3592 /*
Sha Zhengju748dad32012-05-29 15:06:57 -07003593 * current_threshold points to threshold just below or equal to usage.
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003594 * If it's not true, a threshold was crossed after last
3595 * call of __mem_cgroup_threshold().
3596 */
Phil Carmody5407a562010-05-26 14:42:42 -07003597 i = t->current_threshold;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003598
3599 /*
3600 * Iterate backward over array of thresholds starting from
3601 * current_threshold and check if a threshold is crossed.
3602 * If none of thresholds below usage is crossed, we read
3603 * only one element of the array here.
3604 */
3605 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
3606 eventfd_signal(t->entries[i].eventfd, 1);
3607
3608 /* i = current_threshold + 1 */
3609 i++;
3610
3611 /*
3612 * Iterate forward over array of thresholds starting from
3613 * current_threshold+1 and check if a threshold is crossed.
3614 * If none of thresholds above usage is crossed, we read
3615 * only one element of the array here.
3616 */
3617 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
3618 eventfd_signal(t->entries[i].eventfd, 1);
3619
3620 /* Update current_threshold */
Phil Carmody5407a562010-05-26 14:42:42 -07003621 t->current_threshold = i - 1;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003622unlock:
3623 rcu_read_unlock();
3624}
3625
3626static void mem_cgroup_threshold(struct mem_cgroup *memcg)
3627{
Kirill A. Shutemovad4ca5f2010-10-07 12:59:27 -07003628 while (memcg) {
3629 __mem_cgroup_threshold(memcg, false);
3630 if (do_swap_account)
3631 __mem_cgroup_threshold(memcg, true);
3632
3633 memcg = parent_mem_cgroup(memcg);
3634 }
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003635}
3636
3637static int compare_thresholds(const void *a, const void *b)
3638{
3639 const struct mem_cgroup_threshold *_a = a;
3640 const struct mem_cgroup_threshold *_b = b;
3641
Greg Thelen2bff24a2013-09-11 14:23:08 -07003642 if (_a->threshold > _b->threshold)
3643 return 1;
3644
3645 if (_a->threshold < _b->threshold)
3646 return -1;
3647
3648 return 0;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003649}
3650
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003651static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003652{
3653 struct mem_cgroup_eventfd_list *ev;
3654
Michal Hocko2bcf2e92014-07-30 16:08:33 -07003655 spin_lock(&memcg_oom_lock);
3656
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003657 list_for_each_entry(ev, &memcg->oom_notify, list)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003658 eventfd_signal(ev->eventfd, 1);
Michal Hocko2bcf2e92014-07-30 16:08:33 -07003659
3660 spin_unlock(&memcg_oom_lock);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003661 return 0;
3662}
3663
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003664static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003665{
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07003666 struct mem_cgroup *iter;
3667
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003668 for_each_mem_cgroup_tree(iter, memcg)
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07003669 mem_cgroup_oom_notify_cb(iter);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003670}
3671
Tejun Heo59b6f872013-11-22 18:20:43 -05003672static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05003673 struct eventfd_ctx *eventfd, const char *args, enum res_type type)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003674{
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003675 struct mem_cgroup_thresholds *thresholds;
3676 struct mem_cgroup_threshold_ary *new;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003677 unsigned long threshold;
3678 unsigned long usage;
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003679 int i, size, ret;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003680
Johannes Weiner650c5e52015-02-11 15:26:03 -08003681 ret = page_counter_memparse(args, "-1", &threshold);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003682 if (ret)
3683 return ret;
3684
3685 mutex_lock(&memcg->thresholds_lock);
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003686
Johannes Weiner05b84302014-08-06 16:05:59 -07003687 if (type == _MEM) {
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003688 thresholds = &memcg->thresholds;
Johannes Weinerce00a962014-09-05 08:43:57 -04003689 usage = mem_cgroup_usage(memcg, false);
Johannes Weiner05b84302014-08-06 16:05:59 -07003690 } else if (type == _MEMSWAP) {
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003691 thresholds = &memcg->memsw_thresholds;
Johannes Weinerce00a962014-09-05 08:43:57 -04003692 usage = mem_cgroup_usage(memcg, true);
Johannes Weiner05b84302014-08-06 16:05:59 -07003693 } else
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003694 BUG();
3695
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003696 /* Check if a threshold crossed before adding a new one */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003697 if (thresholds->primary)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003698 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3699
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003700 size = thresholds->primary ? thresholds->primary->size + 1 : 1;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003701
3702 /* Allocate memory for new array of thresholds */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003703 new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003704 GFP_KERNEL);
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003705 if (!new) {
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003706 ret = -ENOMEM;
3707 goto unlock;
3708 }
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003709 new->size = size;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003710
3711 /* Copy thresholds (if any) to new array */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003712 if (thresholds->primary) {
3713 memcpy(new->entries, thresholds->primary->entries, (size - 1) *
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003714 sizeof(struct mem_cgroup_threshold));
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003715 }
3716
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003717 /* Add new threshold */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003718 new->entries[size - 1].eventfd = eventfd;
3719 new->entries[size - 1].threshold = threshold;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003720
3721 /* Sort thresholds. Registering of new threshold isn't time-critical */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003722 sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003723 compare_thresholds, NULL);
3724
3725 /* Find current threshold */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003726 new->current_threshold = -1;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003727 for (i = 0; i < size; i++) {
Sha Zhengju748dad32012-05-29 15:06:57 -07003728 if (new->entries[i].threshold <= usage) {
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003729 /*
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003730 * new->current_threshold will not be used until
3731 * rcu_assign_pointer(), so it's safe to increment
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003732 * it here.
3733 */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003734 ++new->current_threshold;
Sha Zhengju748dad32012-05-29 15:06:57 -07003735 } else
3736 break;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003737 }
3738
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003739 /* Free old spare buffer and save old primary buffer as spare */
3740 kfree(thresholds->spare);
3741 thresholds->spare = thresholds->primary;
3742
3743 rcu_assign_pointer(thresholds->primary, new);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003744
Kirill A. Shutemov907860e2010-05-26 14:42:46 -07003745 /* To be sure that nobody uses thresholds */
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003746 synchronize_rcu();
3747
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003748unlock:
3749 mutex_unlock(&memcg->thresholds_lock);
3750
3751 return ret;
3752}
3753
Tejun Heo59b6f872013-11-22 18:20:43 -05003754static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05003755 struct eventfd_ctx *eventfd, const char *args)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003756{
Tejun Heo59b6f872013-11-22 18:20:43 -05003757 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
Tejun Heo347c4a82013-11-22 18:20:43 -05003758}
3759
Tejun Heo59b6f872013-11-22 18:20:43 -05003760static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05003761 struct eventfd_ctx *eventfd, const char *args)
3762{
Tejun Heo59b6f872013-11-22 18:20:43 -05003763 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
Tejun Heo347c4a82013-11-22 18:20:43 -05003764}
3765
Tejun Heo59b6f872013-11-22 18:20:43 -05003766static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05003767 struct eventfd_ctx *eventfd, enum res_type type)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003768{
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003769 struct mem_cgroup_thresholds *thresholds;
3770 struct mem_cgroup_threshold_ary *new;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003771 unsigned long usage;
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003772 int i, j, size;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003773
3774 mutex_lock(&memcg->thresholds_lock);
Johannes Weiner05b84302014-08-06 16:05:59 -07003775
3776 if (type == _MEM) {
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003777 thresholds = &memcg->thresholds;
Johannes Weinerce00a962014-09-05 08:43:57 -04003778 usage = mem_cgroup_usage(memcg, false);
Johannes Weiner05b84302014-08-06 16:05:59 -07003779 } else if (type == _MEMSWAP) {
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003780 thresholds = &memcg->memsw_thresholds;
Johannes Weinerce00a962014-09-05 08:43:57 -04003781 usage = mem_cgroup_usage(memcg, true);
Johannes Weiner05b84302014-08-06 16:05:59 -07003782 } else
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003783 BUG();
3784
Anton Vorontsov371528c2012-02-24 05:14:46 +04003785 if (!thresholds->primary)
3786 goto unlock;
3787
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003788 /* Check if a threshold crossed before removing */
3789 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3790
3791 /* Calculate new number of threshold */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003792 size = 0;
3793 for (i = 0; i < thresholds->primary->size; i++) {
3794 if (thresholds->primary->entries[i].eventfd != eventfd)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003795 size++;
3796 }
3797
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003798 new = thresholds->spare;
Kirill A. Shutemov907860e2010-05-26 14:42:46 -07003799
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003800 /* Set thresholds array to NULL if we don't have thresholds */
3801 if (!size) {
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003802 kfree(new);
3803 new = NULL;
Kirill A. Shutemov907860e2010-05-26 14:42:46 -07003804 goto swap_buffers;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003805 }
3806
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003807 new->size = size;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003808
3809 /* Copy thresholds and find current threshold */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003810 new->current_threshold = -1;
3811 for (i = 0, j = 0; i < thresholds->primary->size; i++) {
3812 if (thresholds->primary->entries[i].eventfd == eventfd)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003813 continue;
3814
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003815 new->entries[j] = thresholds->primary->entries[i];
Sha Zhengju748dad32012-05-29 15:06:57 -07003816 if (new->entries[j].threshold <= usage) {
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003817 /*
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003818 * new->current_threshold will not be used
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003819 * until rcu_assign_pointer(), so it's safe to increment
3820 * it here.
3821 */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003822 ++new->current_threshold;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003823 }
3824 j++;
3825 }
3826
Kirill A. Shutemov907860e2010-05-26 14:42:46 -07003827swap_buffers:
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003828 /* Swap primary and spare array */
3829 thresholds->spare = thresholds->primary;
Sha Zhengju8c757762012-05-10 13:01:45 -07003830 /* If all events are unregistered, free the spare array */
3831 if (!new) {
3832 kfree(thresholds->spare);
3833 thresholds->spare = NULL;
3834 }
3835
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003836 rcu_assign_pointer(thresholds->primary, new);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003837
Kirill A. Shutemov907860e2010-05-26 14:42:46 -07003838 /* To be sure that nobody uses thresholds */
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003839 synchronize_rcu();
Anton Vorontsov371528c2012-02-24 05:14:46 +04003840unlock:
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003841 mutex_unlock(&memcg->thresholds_lock);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003842}
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003843
Tejun Heo59b6f872013-11-22 18:20:43 -05003844static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05003845 struct eventfd_ctx *eventfd)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003846{
Tejun Heo59b6f872013-11-22 18:20:43 -05003847 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
Tejun Heo347c4a82013-11-22 18:20:43 -05003848}
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003849
Tejun Heo59b6f872013-11-22 18:20:43 -05003850static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05003851 struct eventfd_ctx *eventfd)
3852{
Tejun Heo59b6f872013-11-22 18:20:43 -05003853 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
Tejun Heo347c4a82013-11-22 18:20:43 -05003854}
3855
Tejun Heo59b6f872013-11-22 18:20:43 -05003856static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05003857 struct eventfd_ctx *eventfd, const char *args)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003858{
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003859 struct mem_cgroup_eventfd_list *event;
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003860
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003861 event = kmalloc(sizeof(*event), GFP_KERNEL);
3862 if (!event)
3863 return -ENOMEM;
3864
Michal Hocko1af8efe2011-07-26 16:08:24 -07003865 spin_lock(&memcg_oom_lock);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003866
3867 event->eventfd = eventfd;
3868 list_add(&event->list, &memcg->oom_notify);
3869
3870 /* already in OOM ? */
Michal Hocko79dfdac2011-07-26 16:08:23 -07003871 if (atomic_read(&memcg->under_oom))
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003872 eventfd_signal(eventfd, 1);
Michal Hocko1af8efe2011-07-26 16:08:24 -07003873 spin_unlock(&memcg_oom_lock);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003874
3875 return 0;
3876}
3877
Tejun Heo59b6f872013-11-22 18:20:43 -05003878static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05003879 struct eventfd_ctx *eventfd)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003880{
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003881 struct mem_cgroup_eventfd_list *ev, *tmp;
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003882
Michal Hocko1af8efe2011-07-26 16:08:24 -07003883 spin_lock(&memcg_oom_lock);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003884
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003885 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003886 if (ev->eventfd == eventfd) {
3887 list_del(&ev->list);
3888 kfree(ev);
3889 }
3890 }
3891
Michal Hocko1af8efe2011-07-26 16:08:24 -07003892 spin_unlock(&memcg_oom_lock);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003893}
3894
Tejun Heo2da8ca82013-12-05 12:28:04 -05003895static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003896{
Tejun Heo2da8ca82013-12-05 12:28:04 -05003897 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003898
Tejun Heo791badb2013-12-05 12:28:02 -05003899 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
3900 seq_printf(sf, "under_oom %d\n", (bool)atomic_read(&memcg->under_oom));
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003901 return 0;
3902}
3903
Tejun Heo182446d2013-08-08 20:11:24 -04003904static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003905 struct cftype *cft, u64 val)
3906{
Tejun Heo182446d2013-08-08 20:11:24 -04003907 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003908
3909 /* cannot set to root cgroup and only 0 and 1 are allowed */
Linus Torvalds14208b02014-06-09 15:03:33 -07003910 if (!css->parent || !((val == 0) || (val == 1)))
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003911 return -EINVAL;
3912
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003913 memcg->oom_kill_disable = val;
KAMEZAWA Hiroyuki4d845eb2010-06-29 15:05:18 -07003914 if (!val)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003915 memcg_oom_recover(memcg);
Johannes Weiner3dae7fe2014-06-04 16:07:01 -07003916
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003917 return 0;
3918}
3919
Andrew Mortonc255a452012-07-31 16:43:02 -07003920#ifdef CONFIG_MEMCG_KMEM
Glauber Costacbe128e32012-04-09 19:36:34 -03003921static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
Glauber Costae5671df2011-12-11 21:47:01 +00003922{
Glauber Costa55007d82012-12-18 14:22:38 -08003923 int ret;
3924
Glauber Costa55007d82012-12-18 14:22:38 -08003925 ret = memcg_propagate_kmem(memcg);
3926 if (ret)
3927 return ret;
Glauber Costa2633d7a2012-12-18 14:22:34 -08003928
Glauber Costa1d62e432012-04-09 19:36:33 -03003929 return mem_cgroup_sockets_init(memcg, ss);
Michel Lespinasse573b4002013-04-29 15:08:13 -07003930}
Glauber Costae5671df2011-12-11 21:47:01 +00003931
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -08003932static void memcg_deactivate_kmem(struct mem_cgroup *memcg)
3933{
Vladimir Davydov2788cf0c2015-02-12 14:59:38 -08003934 struct cgroup_subsys_state *css;
3935 struct mem_cgroup *parent, *child;
3936 int kmemcg_id;
3937
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -08003938 if (!memcg->kmem_acct_active)
3939 return;
3940
3941 /*
3942 * Clear the 'active' flag before clearing memcg_caches arrays entries.
3943 * Since we take the slab_mutex in memcg_deactivate_kmem_caches(), it
3944 * guarantees no cache will be created for this cgroup after we are
3945 * done (see memcg_create_kmem_cache()).
3946 */
3947 memcg->kmem_acct_active = false;
3948
3949 memcg_deactivate_kmem_caches(memcg);
Vladimir Davydov2788cf0c2015-02-12 14:59:38 -08003950
3951 kmemcg_id = memcg->kmemcg_id;
3952 BUG_ON(kmemcg_id < 0);
3953
3954 parent = parent_mem_cgroup(memcg);
3955 if (!parent)
3956 parent = root_mem_cgroup;
3957
3958 /*
3959 * Change kmemcg_id of this cgroup and all its descendants to the
3960 * parent's id, and then move all entries from this cgroup's list_lrus
3961 * to ones of the parent. After we have finished, all list_lrus
3962 * corresponding to this cgroup are guaranteed to remain empty. The
3963 * ordering is imposed by list_lru_node->lock taken by
3964 * memcg_drain_all_list_lrus().
3965 */
3966 css_for_each_descendant_pre(css, &memcg->css) {
3967 child = mem_cgroup_from_css(css);
3968 BUG_ON(child->kmemcg_id != kmemcg_id);
3969 child->kmemcg_id = parent->kmemcg_id;
3970 if (!memcg->use_hierarchy)
3971 break;
3972 }
3973 memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id);
3974
3975 memcg_free_cache_id(kmemcg_id);
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -08003976}
3977
Li Zefan10d5ebf2013-07-08 16:00:33 -07003978static void memcg_destroy_kmem(struct mem_cgroup *memcg)
Glauber Costad1a4c0b2011-12-11 21:47:04 +00003979{
Vladimir Davydovf48b80a2015-02-12 14:59:56 -08003980 if (memcg->kmem_acct_activated) {
3981 memcg_destroy_kmem_caches(memcg);
3982 static_key_slow_dec(&memcg_kmem_enabled_key);
3983 WARN_ON(page_counter_read(&memcg->kmem));
3984 }
Glauber Costa1d62e432012-04-09 19:36:33 -03003985 mem_cgroup_sockets_destroy(memcg);
Li Zefan10d5ebf2013-07-08 16:00:33 -07003986}
Glauber Costae5671df2011-12-11 21:47:01 +00003987#else
Glauber Costacbe128e32012-04-09 19:36:34 -03003988static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
Glauber Costae5671df2011-12-11 21:47:01 +00003989{
3990 return 0;
3991}
Glauber Costad1a4c0b2011-12-11 21:47:04 +00003992
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -08003993static void memcg_deactivate_kmem(struct mem_cgroup *memcg)
3994{
3995}
3996
Li Zefan10d5ebf2013-07-08 16:00:33 -07003997static void memcg_destroy_kmem(struct mem_cgroup *memcg)
3998{
3999}
Glauber Costae5671df2011-12-11 21:47:01 +00004000#endif
4001
Tejun Heo79bd9812013-11-22 18:20:42 -05004002/*
Tejun Heo3bc942f2013-11-22 18:20:44 -05004003 * DO NOT USE IN NEW FILES.
4004 *
4005 * "cgroup.event_control" implementation.
4006 *
4007 * This is way over-engineered. It tries to support fully configurable
4008 * events for each user. Such level of flexibility is completely
4009 * unnecessary especially in the light of the planned unified hierarchy.
4010 *
4011 * Please deprecate this and replace with something simpler if at all
4012 * possible.
4013 */
4014
4015/*
Tejun Heo79bd9812013-11-22 18:20:42 -05004016 * Unregister event and free resources.
4017 *
4018 * Gets called from workqueue.
4019 */
Tejun Heo3bc942f2013-11-22 18:20:44 -05004020static void memcg_event_remove(struct work_struct *work)
Tejun Heo79bd9812013-11-22 18:20:42 -05004021{
Tejun Heo3bc942f2013-11-22 18:20:44 -05004022 struct mem_cgroup_event *event =
4023 container_of(work, struct mem_cgroup_event, remove);
Tejun Heo59b6f872013-11-22 18:20:43 -05004024 struct mem_cgroup *memcg = event->memcg;
Tejun Heo79bd9812013-11-22 18:20:42 -05004025
4026 remove_wait_queue(event->wqh, &event->wait);
4027
Tejun Heo59b6f872013-11-22 18:20:43 -05004028 event->unregister_event(memcg, event->eventfd);
Tejun Heo79bd9812013-11-22 18:20:42 -05004029
4030 /* Notify userspace the event is going away. */
4031 eventfd_signal(event->eventfd, 1);
4032
4033 eventfd_ctx_put(event->eventfd);
4034 kfree(event);
Tejun Heo59b6f872013-11-22 18:20:43 -05004035 css_put(&memcg->css);
Tejun Heo79bd9812013-11-22 18:20:42 -05004036}
4037
4038/*
4039 * Gets called on POLLHUP on eventfd when user closes it.
4040 *
4041 * Called with wqh->lock held and interrupts disabled.
4042 */
Tejun Heo3bc942f2013-11-22 18:20:44 -05004043static int memcg_event_wake(wait_queue_t *wait, unsigned mode,
4044 int sync, void *key)
Tejun Heo79bd9812013-11-22 18:20:42 -05004045{
Tejun Heo3bc942f2013-11-22 18:20:44 -05004046 struct mem_cgroup_event *event =
4047 container_of(wait, struct mem_cgroup_event, wait);
Tejun Heo59b6f872013-11-22 18:20:43 -05004048 struct mem_cgroup *memcg = event->memcg;
Tejun Heo79bd9812013-11-22 18:20:42 -05004049 unsigned long flags = (unsigned long)key;
4050
4051 if (flags & POLLHUP) {
4052 /*
4053 * If the event has been detached at cgroup removal, we
4054 * can simply return knowing the other side will cleanup
4055 * for us.
4056 *
4057 * We can't race against event freeing since the other
4058 * side will require wqh->lock via remove_wait_queue(),
4059 * which we hold.
4060 */
Tejun Heofba94802013-11-22 18:20:43 -05004061 spin_lock(&memcg->event_list_lock);
Tejun Heo79bd9812013-11-22 18:20:42 -05004062 if (!list_empty(&event->list)) {
4063 list_del_init(&event->list);
4064 /*
4065 * We are in atomic context, but cgroup_event_remove()
4066 * may sleep, so we have to call it in workqueue.
4067 */
4068 schedule_work(&event->remove);
4069 }
Tejun Heofba94802013-11-22 18:20:43 -05004070 spin_unlock(&memcg->event_list_lock);
Tejun Heo79bd9812013-11-22 18:20:42 -05004071 }
4072
4073 return 0;
4074}
4075
Tejun Heo3bc942f2013-11-22 18:20:44 -05004076static void memcg_event_ptable_queue_proc(struct file *file,
Tejun Heo79bd9812013-11-22 18:20:42 -05004077 wait_queue_head_t *wqh, poll_table *pt)
4078{
Tejun Heo3bc942f2013-11-22 18:20:44 -05004079 struct mem_cgroup_event *event =
4080 container_of(pt, struct mem_cgroup_event, pt);
Tejun Heo79bd9812013-11-22 18:20:42 -05004081
4082 event->wqh = wqh;
4083 add_wait_queue(wqh, &event->wait);
4084}
4085
4086/*
Tejun Heo3bc942f2013-11-22 18:20:44 -05004087 * DO NOT USE IN NEW FILES.
4088 *
Tejun Heo79bd9812013-11-22 18:20:42 -05004089 * Parse input and register new cgroup event handler.
4090 *
4091 * Input must be in format '<event_fd> <control_fd> <args>'.
4092 * Interpretation of args is defined by control file implementation.
4093 */
Tejun Heo451af502014-05-13 12:16:21 -04004094static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
4095 char *buf, size_t nbytes, loff_t off)
Tejun Heo79bd9812013-11-22 18:20:42 -05004096{
Tejun Heo451af502014-05-13 12:16:21 -04004097 struct cgroup_subsys_state *css = of_css(of);
Tejun Heofba94802013-11-22 18:20:43 -05004098 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Tejun Heo3bc942f2013-11-22 18:20:44 -05004099 struct mem_cgroup_event *event;
Tejun Heo79bd9812013-11-22 18:20:42 -05004100 struct cgroup_subsys_state *cfile_css;
4101 unsigned int efd, cfd;
4102 struct fd efile;
4103 struct fd cfile;
Tejun Heofba94802013-11-22 18:20:43 -05004104 const char *name;
Tejun Heo79bd9812013-11-22 18:20:42 -05004105 char *endp;
4106 int ret;
4107
Tejun Heo451af502014-05-13 12:16:21 -04004108 buf = strstrip(buf);
4109
4110 efd = simple_strtoul(buf, &endp, 10);
Tejun Heo79bd9812013-11-22 18:20:42 -05004111 if (*endp != ' ')
4112 return -EINVAL;
Tejun Heo451af502014-05-13 12:16:21 -04004113 buf = endp + 1;
Tejun Heo79bd9812013-11-22 18:20:42 -05004114
Tejun Heo451af502014-05-13 12:16:21 -04004115 cfd = simple_strtoul(buf, &endp, 10);
Tejun Heo79bd9812013-11-22 18:20:42 -05004116 if ((*endp != ' ') && (*endp != '\0'))
4117 return -EINVAL;
Tejun Heo451af502014-05-13 12:16:21 -04004118 buf = endp + 1;
Tejun Heo79bd9812013-11-22 18:20:42 -05004119
4120 event = kzalloc(sizeof(*event), GFP_KERNEL);
4121 if (!event)
4122 return -ENOMEM;
4123
Tejun Heo59b6f872013-11-22 18:20:43 -05004124 event->memcg = memcg;
Tejun Heo79bd9812013-11-22 18:20:42 -05004125 INIT_LIST_HEAD(&event->list);
Tejun Heo3bc942f2013-11-22 18:20:44 -05004126 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
4127 init_waitqueue_func_entry(&event->wait, memcg_event_wake);
4128 INIT_WORK(&event->remove, memcg_event_remove);
Tejun Heo79bd9812013-11-22 18:20:42 -05004129
4130 efile = fdget(efd);
4131 if (!efile.file) {
4132 ret = -EBADF;
4133 goto out_kfree;
4134 }
4135
4136 event->eventfd = eventfd_ctx_fileget(efile.file);
4137 if (IS_ERR(event->eventfd)) {
4138 ret = PTR_ERR(event->eventfd);
4139 goto out_put_efile;
4140 }
4141
4142 cfile = fdget(cfd);
4143 if (!cfile.file) {
4144 ret = -EBADF;
4145 goto out_put_eventfd;
4146 }
4147
4148 /* the process need read permission on control file */
4149 /* AV: shouldn't we check that it's been opened for read instead? */
4150 ret = inode_permission(file_inode(cfile.file), MAY_READ);
4151 if (ret < 0)
4152 goto out_put_cfile;
4153
Tejun Heo79bd9812013-11-22 18:20:42 -05004154 /*
Tejun Heofba94802013-11-22 18:20:43 -05004155 * Determine the event callbacks and set them in @event. This used
4156 * to be done via struct cftype but cgroup core no longer knows
4157 * about these events. The following is crude but the whole thing
4158 * is for compatibility anyway.
Tejun Heo3bc942f2013-11-22 18:20:44 -05004159 *
4160 * DO NOT ADD NEW FILES.
Tejun Heofba94802013-11-22 18:20:43 -05004161 */
Al Virob5830432014-10-31 01:22:04 -04004162 name = cfile.file->f_path.dentry->d_name.name;
Tejun Heofba94802013-11-22 18:20:43 -05004163
4164 if (!strcmp(name, "memory.usage_in_bytes")) {
4165 event->register_event = mem_cgroup_usage_register_event;
4166 event->unregister_event = mem_cgroup_usage_unregister_event;
4167 } else if (!strcmp(name, "memory.oom_control")) {
4168 event->register_event = mem_cgroup_oom_register_event;
4169 event->unregister_event = mem_cgroup_oom_unregister_event;
4170 } else if (!strcmp(name, "memory.pressure_level")) {
4171 event->register_event = vmpressure_register_event;
4172 event->unregister_event = vmpressure_unregister_event;
4173 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
Tejun Heo347c4a82013-11-22 18:20:43 -05004174 event->register_event = memsw_cgroup_usage_register_event;
4175 event->unregister_event = memsw_cgroup_usage_unregister_event;
Tejun Heofba94802013-11-22 18:20:43 -05004176 } else {
4177 ret = -EINVAL;
4178 goto out_put_cfile;
4179 }
4180
4181 /*
Tejun Heob5557c42013-11-22 18:20:42 -05004182 * Verify @cfile should belong to @css. Also, remaining events are
4183 * automatically removed on cgroup destruction but the removal is
4184 * asynchronous, so take an extra ref on @css.
Tejun Heo79bd9812013-11-22 18:20:42 -05004185 */
Al Virob5830432014-10-31 01:22:04 -04004186 cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent,
Tejun Heoec903c02014-05-13 12:11:01 -04004187 &memory_cgrp_subsys);
Tejun Heo79bd9812013-11-22 18:20:42 -05004188 ret = -EINVAL;
Tejun Heo5a17f542014-02-11 11:52:47 -05004189 if (IS_ERR(cfile_css))
Tejun Heo79bd9812013-11-22 18:20:42 -05004190 goto out_put_cfile;
Tejun Heo5a17f542014-02-11 11:52:47 -05004191 if (cfile_css != css) {
4192 css_put(cfile_css);
4193 goto out_put_cfile;
4194 }
Tejun Heo79bd9812013-11-22 18:20:42 -05004195
Tejun Heo451af502014-05-13 12:16:21 -04004196 ret = event->register_event(memcg, event->eventfd, buf);
Tejun Heo79bd9812013-11-22 18:20:42 -05004197 if (ret)
4198 goto out_put_css;
4199
4200 efile.file->f_op->poll(efile.file, &event->pt);
4201
Tejun Heofba94802013-11-22 18:20:43 -05004202 spin_lock(&memcg->event_list_lock);
4203 list_add(&event->list, &memcg->event_list);
4204 spin_unlock(&memcg->event_list_lock);
Tejun Heo79bd9812013-11-22 18:20:42 -05004205
4206 fdput(cfile);
4207 fdput(efile);
4208
Tejun Heo451af502014-05-13 12:16:21 -04004209 return nbytes;
Tejun Heo79bd9812013-11-22 18:20:42 -05004210
4211out_put_css:
Tejun Heob5557c42013-11-22 18:20:42 -05004212 css_put(css);
Tejun Heo79bd9812013-11-22 18:20:42 -05004213out_put_cfile:
4214 fdput(cfile);
4215out_put_eventfd:
4216 eventfd_ctx_put(event->eventfd);
4217out_put_efile:
4218 fdput(efile);
4219out_kfree:
4220 kfree(event);
4221
4222 return ret;
4223}
4224
Johannes Weiner241994ed2015-02-11 15:26:06 -08004225static struct cftype mem_cgroup_legacy_files[] = {
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004226 {
Balbir Singh0eea1032008-02-07 00:13:57 -08004227 .name = "usage_in_bytes",
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004228 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
Tejun Heo791badb2013-12-05 12:28:02 -05004229 .read_u64 = mem_cgroup_read_u64,
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004230 },
4231 {
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07004232 .name = "max_usage_in_bytes",
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004233 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
Tejun Heo6770c642014-05-13 12:16:21 -04004234 .write = mem_cgroup_reset,
Tejun Heo791badb2013-12-05 12:28:02 -05004235 .read_u64 = mem_cgroup_read_u64,
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07004236 },
4237 {
Balbir Singh0eea1032008-02-07 00:13:57 -08004238 .name = "limit_in_bytes",
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004239 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
Tejun Heo451af502014-05-13 12:16:21 -04004240 .write = mem_cgroup_write,
Tejun Heo791badb2013-12-05 12:28:02 -05004241 .read_u64 = mem_cgroup_read_u64,
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004242 },
4243 {
Balbir Singh296c81d2009-09-23 15:56:36 -07004244 .name = "soft_limit_in_bytes",
4245 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
Tejun Heo451af502014-05-13 12:16:21 -04004246 .write = mem_cgroup_write,
Tejun Heo791badb2013-12-05 12:28:02 -05004247 .read_u64 = mem_cgroup_read_u64,
Balbir Singh296c81d2009-09-23 15:56:36 -07004248 },
4249 {
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004250 .name = "failcnt",
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004251 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
Tejun Heo6770c642014-05-13 12:16:21 -04004252 .write = mem_cgroup_reset,
Tejun Heo791badb2013-12-05 12:28:02 -05004253 .read_u64 = mem_cgroup_read_u64,
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004254 },
Balbir Singh8697d332008-02-07 00:13:59 -08004255 {
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08004256 .name = "stat",
Tejun Heo2da8ca82013-12-05 12:28:04 -05004257 .seq_show = memcg_stat_show,
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08004258 },
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08004259 {
4260 .name = "force_empty",
Tejun Heo6770c642014-05-13 12:16:21 -04004261 .write = mem_cgroup_force_empty_write,
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08004262 },
Balbir Singh18f59ea2009-01-07 18:08:07 -08004263 {
4264 .name = "use_hierarchy",
4265 .write_u64 = mem_cgroup_hierarchy_write,
4266 .read_u64 = mem_cgroup_hierarchy_read,
4267 },
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08004268 {
Tejun Heo3bc942f2013-11-22 18:20:44 -05004269 .name = "cgroup.event_control", /* XXX: for compat */
Tejun Heo451af502014-05-13 12:16:21 -04004270 .write = memcg_write_event_control,
Tejun Heo79bd9812013-11-22 18:20:42 -05004271 .flags = CFTYPE_NO_PREFIX,
4272 .mode = S_IWUGO,
4273 },
4274 {
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08004275 .name = "swappiness",
4276 .read_u64 = mem_cgroup_swappiness_read,
4277 .write_u64 = mem_cgroup_swappiness_write,
4278 },
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004279 {
4280 .name = "move_charge_at_immigrate",
4281 .read_u64 = mem_cgroup_move_charge_read,
4282 .write_u64 = mem_cgroup_move_charge_write,
4283 },
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004284 {
4285 .name = "oom_control",
Tejun Heo2da8ca82013-12-05 12:28:04 -05004286 .seq_show = mem_cgroup_oom_control_read,
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07004287 .write_u64 = mem_cgroup_oom_control_write,
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004288 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
4289 },
Anton Vorontsov70ddf632013-04-29 15:08:31 -07004290 {
4291 .name = "pressure_level",
Anton Vorontsov70ddf632013-04-29 15:08:31 -07004292 },
Ying Han406eb0c2011-05-26 16:25:37 -07004293#ifdef CONFIG_NUMA
4294 {
4295 .name = "numa_stat",
Tejun Heo2da8ca82013-12-05 12:28:04 -05004296 .seq_show = memcg_numa_stat_show,
Ying Han406eb0c2011-05-26 16:25:37 -07004297 },
4298#endif
Glauber Costa510fc4e2012-12-18 14:21:47 -08004299#ifdef CONFIG_MEMCG_KMEM
4300 {
4301 .name = "kmem.limit_in_bytes",
4302 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
Tejun Heo451af502014-05-13 12:16:21 -04004303 .write = mem_cgroup_write,
Tejun Heo791badb2013-12-05 12:28:02 -05004304 .read_u64 = mem_cgroup_read_u64,
Glauber Costa510fc4e2012-12-18 14:21:47 -08004305 },
4306 {
4307 .name = "kmem.usage_in_bytes",
4308 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
Tejun Heo791badb2013-12-05 12:28:02 -05004309 .read_u64 = mem_cgroup_read_u64,
Glauber Costa510fc4e2012-12-18 14:21:47 -08004310 },
4311 {
4312 .name = "kmem.failcnt",
4313 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
Tejun Heo6770c642014-05-13 12:16:21 -04004314 .write = mem_cgroup_reset,
Tejun Heo791badb2013-12-05 12:28:02 -05004315 .read_u64 = mem_cgroup_read_u64,
Glauber Costa510fc4e2012-12-18 14:21:47 -08004316 },
4317 {
4318 .name = "kmem.max_usage_in_bytes",
4319 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
Tejun Heo6770c642014-05-13 12:16:21 -04004320 .write = mem_cgroup_reset,
Tejun Heo791badb2013-12-05 12:28:02 -05004321 .read_u64 = mem_cgroup_read_u64,
Glauber Costa510fc4e2012-12-18 14:21:47 -08004322 },
Glauber Costa749c5412012-12-18 14:23:01 -08004323#ifdef CONFIG_SLABINFO
4324 {
4325 .name = "kmem.slabinfo",
Vladimir Davydovb0475012014-12-10 15:44:19 -08004326 .seq_start = slab_start,
4327 .seq_next = slab_next,
4328 .seq_stop = slab_stop,
4329 .seq_show = memcg_slab_show,
Glauber Costa749c5412012-12-18 14:23:01 -08004330 },
4331#endif
Glauber Costa510fc4e2012-12-18 14:21:47 -08004332#endif
Tejun Heo6bc10342012-04-01 12:09:55 -07004333 { }, /* terminate */
Tejun Heoaf36f902012-04-01 12:09:55 -07004334};
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004335
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004336static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08004337{
4338 struct mem_cgroup_per_node *pn;
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08004339 struct mem_cgroup_per_zone *mz;
KAMEZAWA Hiroyuki41e33552008-04-08 17:41:54 -07004340 int zone, tmp = node;
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08004341 /*
4342 * This routine is called against possible nodes.
4343 * But it's BUG to call kmalloc() against offline node.
4344 *
4345 * TODO: this routine can waste much memory for nodes which will
4346 * never be onlined. It's better to use memory hotplug callback
4347 * function.
4348 */
KAMEZAWA Hiroyuki41e33552008-04-08 17:41:54 -07004349 if (!node_state(node, N_NORMAL_MEMORY))
4350 tmp = -1;
Jesper Juhl17295c82011-01-13 15:47:42 -08004351 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08004352 if (!pn)
4353 return 1;
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08004354
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08004355 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4356 mz = &pn->zoneinfo[zone];
Hugh Dickinsbea8c152012-11-16 14:14:54 -08004357 lruvec_init(&mz->lruvec);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -07004358 mz->usage_in_excess = 0;
4359 mz->on_tree = false;
Hugh Dickinsd79154b2012-03-21 16:34:18 -07004360 mz->memcg = memcg;
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08004361 }
Johannes Weiner54f72fe2013-07-08 15:59:49 -07004362 memcg->nodeinfo[node] = pn;
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08004363 return 0;
4364}
4365
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004366static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08004367{
Johannes Weiner54f72fe2013-07-08 15:59:49 -07004368 kfree(memcg->nodeinfo[node]);
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08004369}
4370
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07004371static struct mem_cgroup *mem_cgroup_alloc(void)
4372{
Hugh Dickinsd79154b2012-03-21 16:34:18 -07004373 struct mem_cgroup *memcg;
Vladimir Davydov8ff69e22014-01-23 15:52:52 -08004374 size_t size;
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07004375
Vladimir Davydov8ff69e22014-01-23 15:52:52 -08004376 size = sizeof(struct mem_cgroup);
4377 size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07004378
Vladimir Davydov8ff69e22014-01-23 15:52:52 -08004379 memcg = kzalloc(size, GFP_KERNEL);
Hugh Dickinsd79154b2012-03-21 16:34:18 -07004380 if (!memcg)
Dan Carpentere7bbcdf2010-03-23 13:35:12 -07004381 return NULL;
4382
Hugh Dickinsd79154b2012-03-21 16:34:18 -07004383 memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
4384 if (!memcg->stat)
Dan Carpenterd2e61b82010-11-11 14:05:12 -08004385 goto out_free;
Hugh Dickinsd79154b2012-03-21 16:34:18 -07004386 spin_lock_init(&memcg->pcp_counter_lock);
4387 return memcg;
Dan Carpenterd2e61b82010-11-11 14:05:12 -08004388
4389out_free:
Vladimir Davydov8ff69e22014-01-23 15:52:52 -08004390 kfree(memcg);
Dan Carpenterd2e61b82010-11-11 14:05:12 -08004391 return NULL;
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07004392}
4393
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004394/*
Glauber Costac8b2a362012-12-18 14:22:13 -08004395 * At destroying mem_cgroup, references from swap_cgroup can remain.
4396 * (scanning all at force_empty is too costly...)
4397 *
4398 * Instead of clearing all references at force_empty, we remember
4399 * the number of reference from swap_cgroup and free mem_cgroup when
4400 * it goes down to 0.
4401 *
4402 * Removal of cgroup itself succeeds regardless of refs from swap.
Hugh Dickins59927fb2012-03-15 15:17:07 -07004403 */
Glauber Costac8b2a362012-12-18 14:22:13 -08004404
4405static void __mem_cgroup_free(struct mem_cgroup *memcg)
Hugh Dickins59927fb2012-03-15 15:17:07 -07004406{
Glauber Costac8b2a362012-12-18 14:22:13 -08004407 int node;
Hugh Dickins59927fb2012-03-15 15:17:07 -07004408
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -07004409 mem_cgroup_remove_from_trees(memcg);
Glauber Costac8b2a362012-12-18 14:22:13 -08004410
4411 for_each_node(node)
4412 free_mem_cgroup_per_zone_info(memcg, node);
4413
4414 free_percpu(memcg->stat);
Vladimir Davydov8ff69e22014-01-23 15:52:52 -08004415 kfree(memcg);
Hugh Dickins59927fb2012-03-15 15:17:07 -07004416}
Glauber Costa3afe36b2012-05-29 15:07:10 -07004417
Daisuke Nishimura7bcc1bb2009-01-29 14:25:11 -08004418/*
4419 * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
4420 */
Glauber Costae1aab162011-12-11 21:47:03 +00004421struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
Daisuke Nishimura7bcc1bb2009-01-29 14:25:11 -08004422{
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004423 if (!memcg->memory.parent)
Daisuke Nishimura7bcc1bb2009-01-29 14:25:11 -08004424 return NULL;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004425 return mem_cgroup_from_counter(memcg->memory.parent, memory);
Daisuke Nishimura7bcc1bb2009-01-29 14:25:11 -08004426}
Glauber Costae1aab162011-12-11 21:47:03 +00004427EXPORT_SYMBOL(parent_mem_cgroup);
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07004428
Li Zefan0eb253e2009-01-15 13:51:25 -08004429static struct cgroup_subsys_state * __ref
Tejun Heoeb954192013-08-08 20:11:23 -04004430mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004431{
Glauber Costad142e3e2013-02-22 16:34:52 -08004432 struct mem_cgroup *memcg;
KAMEZAWA Hiroyuki04046e12009-04-02 16:57:33 -07004433 long error = -ENOMEM;
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08004434 int node;
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004435
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004436 memcg = mem_cgroup_alloc();
4437 if (!memcg)
KAMEZAWA Hiroyuki04046e12009-04-02 16:57:33 -07004438 return ERR_PTR(error);
Pavel Emelianov78fb7462008-02-07 00:13:51 -08004439
Bob Liu3ed28fa2012-01-12 17:19:04 -08004440 for_each_node(node)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004441 if (alloc_mem_cgroup_per_zone_info(memcg, node))
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08004442 goto free_out;
Balbir Singhf64c3f52009-09-23 15:56:37 -07004443
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -08004444 /* root ? */
Tejun Heoeb954192013-08-08 20:11:23 -04004445 if (parent_css == NULL) {
Hillf Dantona41c58a2011-12-19 17:11:57 -08004446 root_mem_cgroup = memcg;
Tejun Heodbee2272015-05-22 17:13:20 -04004447 mem_cgroup_root_css = &memcg->css;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004448 page_counter_init(&memcg->memory, NULL);
Johannes Weiner241994ed2015-02-11 15:26:06 -08004449 memcg->high = PAGE_COUNTER_MAX;
Johannes Weiner24d404d2015-01-08 14:32:35 -08004450 memcg->soft_limit = PAGE_COUNTER_MAX;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004451 page_counter_init(&memcg->memsw, NULL);
4452 page_counter_init(&memcg->kmem, NULL);
Balbir Singh18f59ea2009-01-07 18:08:07 -08004453 }
Balbir Singh28dbc4b2009-01-07 18:08:05 -08004454
Glauber Costad142e3e2013-02-22 16:34:52 -08004455 memcg->last_scanned_node = MAX_NUMNODES;
4456 INIT_LIST_HEAD(&memcg->oom_notify);
Glauber Costad142e3e2013-02-22 16:34:52 -08004457 memcg->move_charge_at_immigrate = 0;
4458 mutex_init(&memcg->thresholds_lock);
4459 spin_lock_init(&memcg->move_lock);
Anton Vorontsov70ddf632013-04-29 15:08:31 -07004460 vmpressure_init(&memcg->vmpressure);
Tejun Heofba94802013-11-22 18:20:43 -05004461 INIT_LIST_HEAD(&memcg->event_list);
4462 spin_lock_init(&memcg->event_list_lock);
Vladimir Davydov900a38f2014-12-12 16:55:10 -08004463#ifdef CONFIG_MEMCG_KMEM
4464 memcg->kmemcg_id = -1;
Vladimir Davydov900a38f2014-12-12 16:55:10 -08004465#endif
Glauber Costad142e3e2013-02-22 16:34:52 -08004466
4467 return &memcg->css;
4468
4469free_out:
4470 __mem_cgroup_free(memcg);
4471 return ERR_PTR(error);
4472}
4473
4474static int
Tejun Heoeb954192013-08-08 20:11:23 -04004475mem_cgroup_css_online(struct cgroup_subsys_state *css)
Glauber Costad142e3e2013-02-22 16:34:52 -08004476{
Tejun Heoeb954192013-08-08 20:11:23 -04004477 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Tejun Heo5c9d5352014-05-16 13:22:48 -04004478 struct mem_cgroup *parent = mem_cgroup_from_css(css->parent);
Johannes Weiner2f7dd7a2014-10-02 16:16:57 -07004479 int ret;
Glauber Costad142e3e2013-02-22 16:34:52 -08004480
Tejun Heo15a4c832014-05-04 15:09:14 -04004481 if (css->id > MEM_CGROUP_ID_MAX)
Li Zefan4219b2d2013-09-23 16:56:29 +08004482 return -ENOSPC;
4483
Tejun Heo63876982013-08-08 20:11:23 -04004484 if (!parent)
Glauber Costad142e3e2013-02-22 16:34:52 -08004485 return 0;
4486
Glauber Costa09998212013-02-22 16:34:55 -08004487 mutex_lock(&memcg_create_mutex);
Glauber Costad142e3e2013-02-22 16:34:52 -08004488
4489 memcg->use_hierarchy = parent->use_hierarchy;
4490 memcg->oom_kill_disable = parent->oom_kill_disable;
4491 memcg->swappiness = mem_cgroup_swappiness(parent);
4492
4493 if (parent->use_hierarchy) {
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004494 page_counter_init(&memcg->memory, &parent->memory);
Johannes Weiner241994ed2015-02-11 15:26:06 -08004495 memcg->high = PAGE_COUNTER_MAX;
Johannes Weiner24d404d2015-01-08 14:32:35 -08004496 memcg->soft_limit = PAGE_COUNTER_MAX;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004497 page_counter_init(&memcg->memsw, &parent->memsw);
4498 page_counter_init(&memcg->kmem, &parent->kmem);
Glauber Costa55007d82012-12-18 14:22:38 -08004499
Daisuke Nishimura7bcc1bb2009-01-29 14:25:11 -08004500 /*
Li Zefan8d76a972013-07-08 16:00:36 -07004501 * No need to take a reference to the parent because cgroup
4502 * core guarantees its existence.
Daisuke Nishimura7bcc1bb2009-01-29 14:25:11 -08004503 */
Balbir Singh18f59ea2009-01-07 18:08:07 -08004504 } else {
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004505 page_counter_init(&memcg->memory, NULL);
Johannes Weiner241994ed2015-02-11 15:26:06 -08004506 memcg->high = PAGE_COUNTER_MAX;
Johannes Weiner24d404d2015-01-08 14:32:35 -08004507 memcg->soft_limit = PAGE_COUNTER_MAX;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004508 page_counter_init(&memcg->memsw, NULL);
4509 page_counter_init(&memcg->kmem, NULL);
Tejun Heo8c7f6ed2012-09-13 12:20:58 -07004510 /*
4511 * Deeper hierachy with use_hierarchy == false doesn't make
4512 * much sense so let cgroup subsystem know about this
4513 * unfortunate state in our controller.
4514 */
Glauber Costad142e3e2013-02-22 16:34:52 -08004515 if (parent != root_mem_cgroup)
Tejun Heo073219e2014-02-08 10:36:58 -05004516 memory_cgrp_subsys.broken_hierarchy = true;
Balbir Singh18f59ea2009-01-07 18:08:07 -08004517 }
Glauber Costa09998212013-02-22 16:34:55 -08004518 mutex_unlock(&memcg_create_mutex);
Vladimir Davydovd6441632014-01-23 15:53:09 -08004519
Johannes Weiner2f7dd7a2014-10-02 16:16:57 -07004520 ret = memcg_init_kmem(memcg, &memory_cgrp_subsys);
4521 if (ret)
4522 return ret;
4523
4524 /*
4525 * Make sure the memcg is initialized: mem_cgroup_iter()
4526 * orders reading memcg->initialized against its callers
4527 * reading the memcg members.
4528 */
4529 smp_store_release(&memcg->initialized, 1);
4530
4531 return 0;
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004532}
4533
Tejun Heoeb954192013-08-08 20:11:23 -04004534static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
KAMEZAWA Hiroyukidf878fb2008-02-07 00:14:28 -08004535{
Tejun Heoeb954192013-08-08 20:11:23 -04004536 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Tejun Heo3bc942f2013-11-22 18:20:44 -05004537 struct mem_cgroup_event *event, *tmp;
Tejun Heo79bd9812013-11-22 18:20:42 -05004538
4539 /*
4540 * Unregister events and notify userspace.
4541 * Notify userspace about cgroup removing only after rmdir of cgroup
4542 * directory to avoid race between userspace and kernelspace.
4543 */
Tejun Heofba94802013-11-22 18:20:43 -05004544 spin_lock(&memcg->event_list_lock);
4545 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
Tejun Heo79bd9812013-11-22 18:20:42 -05004546 list_del_init(&event->list);
4547 schedule_work(&event->remove);
4548 }
Tejun Heofba94802013-11-22 18:20:43 -05004549 spin_unlock(&memcg->event_list_lock);
KAMEZAWA Hiroyukiec64f512009-04-02 16:57:26 -07004550
Michal Hocko33cb8762013-07-31 13:53:51 -07004551 vmpressure_cleanup(&memcg->vmpressure);
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -08004552
4553 memcg_deactivate_kmem(memcg);
KAMEZAWA Hiroyukidf878fb2008-02-07 00:14:28 -08004554}
4555
Tejun Heoeb954192013-08-08 20:11:23 -04004556static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004557{
Tejun Heoeb954192013-08-08 20:11:23 -04004558 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Daisuke Nishimurac268e992009-01-15 13:51:13 -08004559
Li Zefan10d5ebf2013-07-08 16:00:33 -07004560 memcg_destroy_kmem(memcg);
Li Zefan465939a2013-07-08 16:00:38 -07004561 __mem_cgroup_free(memcg);
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004562}
4563
Tejun Heo1ced9532014-07-08 18:02:57 -04004564/**
4565 * mem_cgroup_css_reset - reset the states of a mem_cgroup
4566 * @css: the target css
4567 *
4568 * Reset the states of the mem_cgroup associated with @css. This is
4569 * invoked when the userland requests disabling on the default hierarchy
4570 * but the memcg is pinned through dependency. The memcg should stop
4571 * applying policies and should revert to the vanilla state as it may be
4572 * made visible again.
4573 *
4574 * The current implementation only resets the essential configurations.
4575 * This needs to be expanded to cover all the visible parts.
4576 */
4577static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
4578{
4579 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4580
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004581 mem_cgroup_resize_limit(memcg, PAGE_COUNTER_MAX);
4582 mem_cgroup_resize_memsw_limit(memcg, PAGE_COUNTER_MAX);
4583 memcg_update_kmem_limit(memcg, PAGE_COUNTER_MAX);
Johannes Weiner241994ed2015-02-11 15:26:06 -08004584 memcg->low = 0;
4585 memcg->high = PAGE_COUNTER_MAX;
Johannes Weiner24d404d2015-01-08 14:32:35 -08004586 memcg->soft_limit = PAGE_COUNTER_MAX;
Tejun Heo1ced9532014-07-08 18:02:57 -04004587}
4588
Daisuke Nishimura02491442010-03-10 15:22:17 -08004589#ifdef CONFIG_MMU
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004590/* Handlers for move charge at task migration. */
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004591static int mem_cgroup_do_precharge(unsigned long count)
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004592{
Johannes Weiner05b84302014-08-06 16:05:59 -07004593 int ret;
Johannes Weiner9476db92014-08-06 16:05:55 -07004594
4595 /* Try a single bulk charge without reclaim first */
Johannes Weiner00501b52014-08-08 14:19:20 -07004596 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_WAIT, count);
Johannes Weiner9476db92014-08-06 16:05:55 -07004597 if (!ret) {
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004598 mc.precharge += count;
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004599 return ret;
4600 }
Johannes Weiner692e7c42014-08-06 16:05:57 -07004601 if (ret == -EINTR) {
Johannes Weiner00501b52014-08-08 14:19:20 -07004602 cancel_charge(root_mem_cgroup, count);
Johannes Weiner692e7c42014-08-06 16:05:57 -07004603 return ret;
4604 }
Johannes Weiner9476db92014-08-06 16:05:55 -07004605
4606 /* Try charges one by one with reclaim */
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004607 while (count--) {
Johannes Weiner00501b52014-08-08 14:19:20 -07004608 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_NORETRY, 1);
Johannes Weiner9476db92014-08-06 16:05:55 -07004609 /*
4610 * In case of failure, any residual charges against
4611 * mc.to will be dropped by mem_cgroup_clear_mc()
Johannes Weiner692e7c42014-08-06 16:05:57 -07004612 * later on. However, cancel any charges that are
4613 * bypassed to root right away or they'll be lost.
Johannes Weiner9476db92014-08-06 16:05:55 -07004614 */
Johannes Weiner692e7c42014-08-06 16:05:57 -07004615 if (ret == -EINTR)
Johannes Weiner00501b52014-08-08 14:19:20 -07004616 cancel_charge(root_mem_cgroup, 1);
KAMEZAWA Hiroyuki38c5d722012-01-12 17:19:01 -08004617 if (ret)
KAMEZAWA Hiroyuki38c5d722012-01-12 17:19:01 -08004618 return ret;
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004619 mc.precharge++;
Johannes Weiner9476db92014-08-06 16:05:55 -07004620 cond_resched();
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004621 }
Johannes Weiner9476db92014-08-06 16:05:55 -07004622 return 0;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004623}
4624
4625/**
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07004626 * get_mctgt_type - get target type of moving charge
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004627 * @vma: the vma the pte to be checked belongs
4628 * @addr: the address corresponding to the pte to be checked
4629 * @ptent: the pte to be checked
Daisuke Nishimura02491442010-03-10 15:22:17 -08004630 * @target: the pointer the target page or swap ent will be stored(can be NULL)
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004631 *
4632 * Returns
4633 * 0(MC_TARGET_NONE): if the pte is not a target for move charge.
4634 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
4635 * move charge. if @target is not NULL, the page is stored in target->page
4636 * with extra refcnt got(Callers should handle it).
Daisuke Nishimura02491442010-03-10 15:22:17 -08004637 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
4638 * target for charge migration. if @target is not NULL, the entry is stored
4639 * in target->ent.
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004640 *
4641 * Called with pte lock held.
4642 */
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004643union mc_target {
4644 struct page *page;
Daisuke Nishimura02491442010-03-10 15:22:17 -08004645 swp_entry_t ent;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004646};
4647
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004648enum mc_target_type {
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07004649 MC_TARGET_NONE = 0,
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004650 MC_TARGET_PAGE,
Daisuke Nishimura02491442010-03-10 15:22:17 -08004651 MC_TARGET_SWAP,
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004652};
4653
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004654static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
4655 unsigned long addr, pte_t ptent)
4656{
4657 struct page *page = vm_normal_page(vma, addr, ptent);
4658
4659 if (!page || !page_mapped(page))
4660 return NULL;
4661 if (PageAnon(page)) {
Johannes Weiner1dfab5a2015-02-11 15:26:09 -08004662 if (!(mc.flags & MOVE_ANON))
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004663 return NULL;
Johannes Weiner1dfab5a2015-02-11 15:26:09 -08004664 } else {
4665 if (!(mc.flags & MOVE_FILE))
4666 return NULL;
4667 }
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004668 if (!get_page_unless_zero(page))
4669 return NULL;
4670
4671 return page;
4672}
4673
KAMEZAWA Hiroyuki4b913552012-05-29 15:06:51 -07004674#ifdef CONFIG_SWAP
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004675static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
4676 unsigned long addr, pte_t ptent, swp_entry_t *entry)
4677{
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004678 struct page *page = NULL;
4679 swp_entry_t ent = pte_to_swp_entry(ptent);
4680
Johannes Weiner1dfab5a2015-02-11 15:26:09 -08004681 if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent))
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004682 return NULL;
KAMEZAWA Hiroyuki4b913552012-05-29 15:06:51 -07004683 /*
4684 * Because lookup_swap_cache() updates some statistics counter,
4685 * we call find_get_page() with swapper_space directly.
4686 */
Shaohua Li33806f02013-02-22 16:34:37 -08004687 page = find_get_page(swap_address_space(ent), ent.val);
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004688 if (do_swap_account)
4689 entry->val = ent.val;
4690
4691 return page;
4692}
KAMEZAWA Hiroyuki4b913552012-05-29 15:06:51 -07004693#else
4694static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
4695 unsigned long addr, pte_t ptent, swp_entry_t *entry)
4696{
4697 return NULL;
4698}
4699#endif
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004700
Daisuke Nishimura87946a72010-05-26 14:42:39 -07004701static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
4702 unsigned long addr, pte_t ptent, swp_entry_t *entry)
4703{
4704 struct page *page = NULL;
Daisuke Nishimura87946a72010-05-26 14:42:39 -07004705 struct address_space *mapping;
4706 pgoff_t pgoff;
4707
4708 if (!vma->vm_file) /* anonymous vma */
4709 return NULL;
Johannes Weiner1dfab5a2015-02-11 15:26:09 -08004710 if (!(mc.flags & MOVE_FILE))
Daisuke Nishimura87946a72010-05-26 14:42:39 -07004711 return NULL;
4712
Daisuke Nishimura87946a72010-05-26 14:42:39 -07004713 mapping = vma->vm_file->f_mapping;
Kirill A. Shutemov0661a332015-02-10 14:10:04 -08004714 pgoff = linear_page_index(vma, addr);
Daisuke Nishimura87946a72010-05-26 14:42:39 -07004715
4716 /* page is moved even if it's not RSS of this task(page-faulted). */
Hugh Dickinsaa3b1892011-08-03 16:21:24 -07004717#ifdef CONFIG_SWAP
4718 /* shmem/tmpfs may report page out on swap: account for that too. */
Johannes Weiner139b6a62014-05-06 12:50:05 -07004719 if (shmem_mapping(mapping)) {
4720 page = find_get_entry(mapping, pgoff);
4721 if (radix_tree_exceptional_entry(page)) {
4722 swp_entry_t swp = radix_to_swp_entry(page);
4723 if (do_swap_account)
4724 *entry = swp;
4725 page = find_get_page(swap_address_space(swp), swp.val);
4726 }
4727 } else
4728 page = find_get_page(mapping, pgoff);
4729#else
4730 page = find_get_page(mapping, pgoff);
Hugh Dickinsaa3b1892011-08-03 16:21:24 -07004731#endif
Daisuke Nishimura87946a72010-05-26 14:42:39 -07004732 return page;
4733}
4734
Chen Gangb1b0dea2015-04-14 15:47:35 -07004735/**
4736 * mem_cgroup_move_account - move account of the page
4737 * @page: the page
4738 * @nr_pages: number of regular pages (>1 for huge pages)
4739 * @from: mem_cgroup which the page is moved from.
4740 * @to: mem_cgroup which the page is moved to. @from != @to.
4741 *
4742 * The caller must confirm following.
4743 * - page is not on LRU (isolate_page() is useful.)
4744 * - compound_lock is held when nr_pages > 1
4745 *
4746 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
4747 * from old cgroup.
4748 */
4749static int mem_cgroup_move_account(struct page *page,
4750 unsigned int nr_pages,
4751 struct mem_cgroup *from,
4752 struct mem_cgroup *to)
4753{
4754 unsigned long flags;
4755 int ret;
Greg Thelen7c9d3ff2015-05-22 17:13:16 -04004756 bool anon;
Chen Gangb1b0dea2015-04-14 15:47:35 -07004757
4758 VM_BUG_ON(from == to);
4759 VM_BUG_ON_PAGE(PageLRU(page), page);
4760 /*
4761 * The page is isolated from LRU. So, collapse function
4762 * will not handle this page. But page splitting can happen.
4763 * Do this check under compound_page_lock(). The caller should
4764 * hold it.
4765 */
4766 ret = -EBUSY;
4767 if (nr_pages > 1 && !PageTransHuge(page))
4768 goto out;
4769
4770 /*
4771 * Prevent mem_cgroup_migrate() from looking at page->mem_cgroup
4772 * of its source page while we change it: page migration takes
4773 * both pages off the LRU, but page cache replacement doesn't.
4774 */
4775 if (!trylock_page(page))
4776 goto out;
4777
4778 ret = -EINVAL;
4779 if (page->mem_cgroup != from)
4780 goto out_unlock;
4781
Greg Thelen7c9d3ff2015-05-22 17:13:16 -04004782 anon = PageAnon(page);
4783
Chen Gangb1b0dea2015-04-14 15:47:35 -07004784 spin_lock_irqsave(&from->move_lock, flags);
4785
Greg Thelen7c9d3ff2015-05-22 17:13:16 -04004786 if (!anon && page_mapped(page)) {
Chen Gangb1b0dea2015-04-14 15:47:35 -07004787 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
4788 nr_pages);
4789 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
4790 nr_pages);
4791 }
4792
Greg Thelen7c9d3ff2015-05-22 17:13:16 -04004793 /*
4794 * move_lock grabbed above and caller set from->moving_account, so
4795 * mem_cgroup_update_page_stat() will serialize updates to PageDirty.
4796 * So mapping should be stable for dirty pages.
4797 */
4798 if (!anon && PageDirty(page)) {
4799 struct address_space *mapping = page_mapping(page);
4800
4801 if (mapping_cap_account_dirty(mapping)) {
4802 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_DIRTY],
4803 nr_pages);
4804 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_DIRTY],
4805 nr_pages);
4806 }
4807 }
4808
Chen Gangb1b0dea2015-04-14 15:47:35 -07004809 if (PageWriteback(page)) {
4810 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_WRITEBACK],
4811 nr_pages);
4812 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_WRITEBACK],
4813 nr_pages);
4814 }
4815
4816 /*
4817 * It is safe to change page->mem_cgroup here because the page
4818 * is referenced, charged, and isolated - we can't race with
4819 * uncharging, charging, migration, or LRU putback.
4820 */
4821
4822 /* caller should have done css_get */
4823 page->mem_cgroup = to;
4824 spin_unlock_irqrestore(&from->move_lock, flags);
4825
4826 ret = 0;
4827
4828 local_irq_disable();
4829 mem_cgroup_charge_statistics(to, page, nr_pages);
4830 memcg_check_events(to, page);
4831 mem_cgroup_charge_statistics(from, page, -nr_pages);
4832 memcg_check_events(from, page);
4833 local_irq_enable();
4834out_unlock:
4835 unlock_page(page);
4836out:
4837 return ret;
4838}
4839
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07004840static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004841 unsigned long addr, pte_t ptent, union mc_target *target)
4842{
Daisuke Nishimura02491442010-03-10 15:22:17 -08004843 struct page *page = NULL;
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07004844 enum mc_target_type ret = MC_TARGET_NONE;
Daisuke Nishimura02491442010-03-10 15:22:17 -08004845 swp_entry_t ent = { .val = 0 };
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004846
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004847 if (pte_present(ptent))
4848 page = mc_handle_present_pte(vma, addr, ptent);
4849 else if (is_swap_pte(ptent))
4850 page = mc_handle_swap_pte(vma, addr, ptent, &ent);
Kirill A. Shutemov0661a332015-02-10 14:10:04 -08004851 else if (pte_none(ptent))
Daisuke Nishimura87946a72010-05-26 14:42:39 -07004852 page = mc_handle_file_pte(vma, addr, ptent, &ent);
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004853
4854 if (!page && !ent.val)
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07004855 return ret;
Daisuke Nishimura02491442010-03-10 15:22:17 -08004856 if (page) {
Daisuke Nishimura02491442010-03-10 15:22:17 -08004857 /*
Johannes Weiner0a31bc92014-08-08 14:19:22 -07004858 * Do only loose check w/o serialization.
Johannes Weiner1306a852014-12-10 15:44:52 -08004859 * mem_cgroup_move_account() checks the page is valid or
Johannes Weiner0a31bc92014-08-08 14:19:22 -07004860 * not under LRU exclusion.
Daisuke Nishimura02491442010-03-10 15:22:17 -08004861 */
Johannes Weiner1306a852014-12-10 15:44:52 -08004862 if (page->mem_cgroup == mc.from) {
Daisuke Nishimura02491442010-03-10 15:22:17 -08004863 ret = MC_TARGET_PAGE;
4864 if (target)
4865 target->page = page;
4866 }
4867 if (!ret || !target)
4868 put_page(page);
4869 }
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004870 /* There is a swap entry and a page doesn't exist or isn't charged */
4871 if (ent.val && !ret &&
Li Zefan34c00c32013-09-23 16:56:01 +08004872 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
KAMEZAWA Hiroyuki7f0f1542010-05-11 14:06:58 -07004873 ret = MC_TARGET_SWAP;
4874 if (target)
4875 target->ent = ent;
Daisuke Nishimura02491442010-03-10 15:22:17 -08004876 }
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004877 return ret;
4878}
4879
Naoya Horiguchi12724852012-03-21 16:34:28 -07004880#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4881/*
4882 * We don't consider swapping or file mapped pages because THP does not
4883 * support them for now.
4884 * Caller should make sure that pmd_trans_huge(pmd) is true.
4885 */
4886static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
4887 unsigned long addr, pmd_t pmd, union mc_target *target)
4888{
4889 struct page *page = NULL;
Naoya Horiguchi12724852012-03-21 16:34:28 -07004890 enum mc_target_type ret = MC_TARGET_NONE;
4891
4892 page = pmd_page(pmd);
Sasha Levin309381fea2014-01-23 15:52:54 -08004893 VM_BUG_ON_PAGE(!page || !PageHead(page), page);
Johannes Weiner1dfab5a2015-02-11 15:26:09 -08004894 if (!(mc.flags & MOVE_ANON))
Naoya Horiguchi12724852012-03-21 16:34:28 -07004895 return ret;
Johannes Weiner1306a852014-12-10 15:44:52 -08004896 if (page->mem_cgroup == mc.from) {
Naoya Horiguchi12724852012-03-21 16:34:28 -07004897 ret = MC_TARGET_PAGE;
4898 if (target) {
4899 get_page(page);
4900 target->page = page;
4901 }
4902 }
4903 return ret;
4904}
4905#else
4906static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
4907 unsigned long addr, pmd_t pmd, union mc_target *target)
4908{
4909 return MC_TARGET_NONE;
4910}
4911#endif
4912
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004913static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
4914 unsigned long addr, unsigned long end,
4915 struct mm_walk *walk)
4916{
Naoya Horiguchi26bcd642015-02-11 15:27:57 -08004917 struct vm_area_struct *vma = walk->vma;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004918 pte_t *pte;
4919 spinlock_t *ptl;
4920
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08004921 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
Naoya Horiguchi12724852012-03-21 16:34:28 -07004922 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
4923 mc.precharge += HPAGE_PMD_NR;
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08004924 spin_unlock(ptl);
Andrea Arcangeli1a5a9902012-03-21 16:33:42 -07004925 return 0;
Naoya Horiguchi12724852012-03-21 16:34:28 -07004926 }
Dave Hansen03319322011-03-22 16:32:56 -07004927
Andrea Arcangeli45f83ce2012-03-28 14:42:40 -07004928 if (pmd_trans_unstable(pmd))
4929 return 0;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004930 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4931 for (; addr != end; pte++, addr += PAGE_SIZE)
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07004932 if (get_mctgt_type(vma, addr, *pte, NULL))
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004933 mc.precharge++; /* increment precharge temporarily */
4934 pte_unmap_unlock(pte - 1, ptl);
4935 cond_resched();
4936
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004937 return 0;
4938}
4939
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004940static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
4941{
4942 unsigned long precharge;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004943
Naoya Horiguchi26bcd642015-02-11 15:27:57 -08004944 struct mm_walk mem_cgroup_count_precharge_walk = {
4945 .pmd_entry = mem_cgroup_count_precharge_pte_range,
4946 .mm = mm,
4947 };
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08004948 down_read(&mm->mmap_sem);
Naoya Horiguchi26bcd642015-02-11 15:27:57 -08004949 walk_page_range(0, ~0UL, &mem_cgroup_count_precharge_walk);
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08004950 up_read(&mm->mmap_sem);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004951
4952 precharge = mc.precharge;
4953 mc.precharge = 0;
4954
4955 return precharge;
4956}
4957
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004958static int mem_cgroup_precharge_mc(struct mm_struct *mm)
4959{
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08004960 unsigned long precharge = mem_cgroup_count_precharge(mm);
4961
4962 VM_BUG_ON(mc.moving_task);
4963 mc.moving_task = current;
4964 return mem_cgroup_do_precharge(precharge);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004965}
4966
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08004967/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
4968static void __mem_cgroup_clear_mc(void)
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004969{
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07004970 struct mem_cgroup *from = mc.from;
4971 struct mem_cgroup *to = mc.to;
4972
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004973 /* we must uncharge all the leftover precharges from mc.to */
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004974 if (mc.precharge) {
Johannes Weiner00501b52014-08-08 14:19:20 -07004975 cancel_charge(mc.to, mc.precharge);
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004976 mc.precharge = 0;
4977 }
4978 /*
4979 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
4980 * we must uncharge here.
4981 */
4982 if (mc.moved_charge) {
Johannes Weiner00501b52014-08-08 14:19:20 -07004983 cancel_charge(mc.from, mc.moved_charge);
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004984 mc.moved_charge = 0;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004985 }
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08004986 /* we must fixup refcnts and charges */
4987 if (mc.moved_swap) {
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08004988 /* uncharge swap account from the old cgroup */
Johannes Weinerce00a962014-09-05 08:43:57 -04004989 if (!mem_cgroup_is_root(mc.from))
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004990 page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08004991
Johannes Weiner05b84302014-08-06 16:05:59 -07004992 /*
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004993 * we charged both to->memory and to->memsw, so we
4994 * should uncharge to->memory.
Johannes Weiner05b84302014-08-06 16:05:59 -07004995 */
Johannes Weinerce00a962014-09-05 08:43:57 -04004996 if (!mem_cgroup_is_root(mc.to))
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004997 page_counter_uncharge(&mc.to->memory, mc.moved_swap);
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08004998
Johannes Weinere8ea14c2014-12-10 15:42:42 -08004999 css_put_many(&mc.from->css, mc.moved_swap);
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08005000
Li Zefan40503772013-07-08 16:00:34 -07005001 /* we've already done css_get(mc.to) */
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08005002 mc.moved_swap = 0;
5003 }
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005004 memcg_oom_recover(from);
5005 memcg_oom_recover(to);
5006 wake_up_all(&mc.waitq);
5007}
5008
5009static void mem_cgroup_clear_mc(void)
5010{
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005011 /*
5012 * we must clear moving_task before waking up waiters at the end of
5013 * task migration.
5014 */
5015 mc.moving_task = NULL;
5016 __mem_cgroup_clear_mc();
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07005017 spin_lock(&mc.lock);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005018 mc.from = NULL;
5019 mc.to = NULL;
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07005020 spin_unlock(&mc.lock);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005021}
5022
Tejun Heoeb954192013-08-08 20:11:23 -04005023static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
Li Zefan761b3ef2012-01-31 13:47:36 +08005024 struct cgroup_taskset *tset)
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005025{
Tejun Heo2f7ee562011-12-12 18:12:21 -08005026 struct task_struct *p = cgroup_taskset_first(tset);
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005027 int ret = 0;
Tejun Heoeb954192013-08-08 20:11:23 -04005028 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Johannes Weiner1dfab5a2015-02-11 15:26:09 -08005029 unsigned long move_flags;
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005030
Glauber Costaee5e8472013-02-22 16:34:50 -08005031 /*
5032 * We are now commited to this value whatever it is. Changes in this
5033 * tunable will only affect upcoming migrations, not the current one.
5034 * So we need to save it, and keep it going.
5035 */
Jason Low4db0c3c2015-04-15 16:14:08 -07005036 move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
Johannes Weiner1dfab5a2015-02-11 15:26:09 -08005037 if (move_flags) {
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005038 struct mm_struct *mm;
5039 struct mem_cgroup *from = mem_cgroup_from_task(p);
5040
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07005041 VM_BUG_ON(from == memcg);
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005042
5043 mm = get_task_mm(p);
5044 if (!mm)
5045 return 0;
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005046 /* We move charges only when we move a owner of the mm */
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005047 if (mm->owner == p) {
5048 VM_BUG_ON(mc.from);
5049 VM_BUG_ON(mc.to);
5050 VM_BUG_ON(mc.precharge);
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08005051 VM_BUG_ON(mc.moved_charge);
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08005052 VM_BUG_ON(mc.moved_swap);
Johannes Weiner247b1442014-12-10 15:44:11 -08005053
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07005054 spin_lock(&mc.lock);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005055 mc.from = from;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07005056 mc.to = memcg;
Johannes Weiner1dfab5a2015-02-11 15:26:09 -08005057 mc.flags = move_flags;
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07005058 spin_unlock(&mc.lock);
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005059 /* We set mc.moving_task later */
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005060
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005061 ret = mem_cgroup_precharge_mc(mm);
5062 if (ret)
5063 mem_cgroup_clear_mc();
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005064 }
5065 mmput(mm);
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005066 }
5067 return ret;
5068}
5069
Tejun Heoeb954192013-08-08 20:11:23 -04005070static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css,
Li Zefan761b3ef2012-01-31 13:47:36 +08005071 struct cgroup_taskset *tset)
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005072{
Johannes Weiner4e2f2452014-12-10 15:44:08 -08005073 if (mc.to)
5074 mem_cgroup_clear_mc();
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005075}
5076
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005077static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
5078 unsigned long addr, unsigned long end,
5079 struct mm_walk *walk)
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005080{
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005081 int ret = 0;
Naoya Horiguchi26bcd642015-02-11 15:27:57 -08005082 struct vm_area_struct *vma = walk->vma;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005083 pte_t *pte;
5084 spinlock_t *ptl;
Naoya Horiguchi12724852012-03-21 16:34:28 -07005085 enum mc_target_type target_type;
5086 union mc_target target;
5087 struct page *page;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005088
Naoya Horiguchi12724852012-03-21 16:34:28 -07005089 /*
5090 * We don't take compound_lock() here but no race with splitting thp
5091 * happens because:
5092 * - if pmd_trans_huge_lock() returns 1, the relevant thp is not
5093 * under splitting, which means there's no concurrent thp split,
5094 * - if another thread runs into split_huge_page() just after we
5095 * entered this if-block, the thread must wait for page table lock
5096 * to be unlocked in __split_huge_page_splitting(), where the main
5097 * part of thp split is not executed yet.
5098 */
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08005099 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
Hugh Dickins62ade862012-05-18 11:28:34 -07005100 if (mc.precharge < HPAGE_PMD_NR) {
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08005101 spin_unlock(ptl);
Naoya Horiguchi12724852012-03-21 16:34:28 -07005102 return 0;
5103 }
5104 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
5105 if (target_type == MC_TARGET_PAGE) {
5106 page = target.page;
5107 if (!isolate_lru_page(page)) {
Naoya Horiguchi12724852012-03-21 16:34:28 -07005108 if (!mem_cgroup_move_account(page, HPAGE_PMD_NR,
Johannes Weiner1306a852014-12-10 15:44:52 -08005109 mc.from, mc.to)) {
Naoya Horiguchi12724852012-03-21 16:34:28 -07005110 mc.precharge -= HPAGE_PMD_NR;
5111 mc.moved_charge += HPAGE_PMD_NR;
5112 }
5113 putback_lru_page(page);
5114 }
5115 put_page(page);
5116 }
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08005117 spin_unlock(ptl);
Andrea Arcangeli1a5a9902012-03-21 16:33:42 -07005118 return 0;
Naoya Horiguchi12724852012-03-21 16:34:28 -07005119 }
5120
Andrea Arcangeli45f83ce2012-03-28 14:42:40 -07005121 if (pmd_trans_unstable(pmd))
5122 return 0;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005123retry:
5124 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5125 for (; addr != end; addr += PAGE_SIZE) {
5126 pte_t ptent = *(pte++);
Daisuke Nishimura02491442010-03-10 15:22:17 -08005127 swp_entry_t ent;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005128
5129 if (!mc.precharge)
5130 break;
5131
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07005132 switch (get_mctgt_type(vma, addr, ptent, &target)) {
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005133 case MC_TARGET_PAGE:
5134 page = target.page;
5135 if (isolate_lru_page(page))
5136 goto put;
Johannes Weiner1306a852014-12-10 15:44:52 -08005137 if (!mem_cgroup_move_account(page, 1, mc.from, mc.to)) {
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005138 mc.precharge--;
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08005139 /* we uncharge from mc.from later. */
5140 mc.moved_charge++;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005141 }
5142 putback_lru_page(page);
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07005143put: /* get_mctgt_type() gets the page */
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005144 put_page(page);
5145 break;
Daisuke Nishimura02491442010-03-10 15:22:17 -08005146 case MC_TARGET_SWAP:
5147 ent = target.ent;
Hugh Dickinse91cbb42012-05-29 15:06:51 -07005148 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
Daisuke Nishimura02491442010-03-10 15:22:17 -08005149 mc.precharge--;
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08005150 /* we fixup refcnts and charges later. */
5151 mc.moved_swap++;
5152 }
Daisuke Nishimura02491442010-03-10 15:22:17 -08005153 break;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005154 default:
5155 break;
5156 }
5157 }
5158 pte_unmap_unlock(pte - 1, ptl);
5159 cond_resched();
5160
5161 if (addr != end) {
5162 /*
5163 * We have consumed all precharges we got in can_attach().
5164 * We try charge one by one, but don't do any additional
5165 * charges to mc.to if we have failed in charge once in attach()
5166 * phase.
5167 */
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08005168 ret = mem_cgroup_do_precharge(1);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005169 if (!ret)
5170 goto retry;
5171 }
5172
5173 return ret;
5174}
5175
5176static void mem_cgroup_move_charge(struct mm_struct *mm)
5177{
Naoya Horiguchi26bcd642015-02-11 15:27:57 -08005178 struct mm_walk mem_cgroup_move_charge_walk = {
5179 .pmd_entry = mem_cgroup_move_charge_pte_range,
5180 .mm = mm,
5181 };
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005182
5183 lru_add_drain_all();
Johannes Weiner312722c2014-12-10 15:44:25 -08005184 /*
5185 * Signal mem_cgroup_begin_page_stat() to take the memcg's
5186 * move_lock while we're moving its pages to another memcg.
5187 * Then wait for already started RCU-only updates to finish.
5188 */
5189 atomic_inc(&mc.from->moving_account);
5190 synchronize_rcu();
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005191retry:
5192 if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
5193 /*
5194 * Someone who are holding the mmap_sem might be waiting in
5195 * waitq. So we cancel all extra charges, wake up all waiters,
5196 * and retry. Because we cancel precharges, we might not be able
5197 * to move enough charges, but moving charge is a best-effort
5198 * feature anyway, so it wouldn't be a big problem.
5199 */
5200 __mem_cgroup_clear_mc();
5201 cond_resched();
5202 goto retry;
5203 }
Naoya Horiguchi26bcd642015-02-11 15:27:57 -08005204 /*
5205 * When we have consumed all precharges and failed in doing
5206 * additional charge, the page walk just aborts.
5207 */
5208 walk_page_range(0, ~0UL, &mem_cgroup_move_charge_walk);
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005209 up_read(&mm->mmap_sem);
Johannes Weiner312722c2014-12-10 15:44:25 -08005210 atomic_dec(&mc.from->moving_account);
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005211}
5212
Tejun Heoeb954192013-08-08 20:11:23 -04005213static void mem_cgroup_move_task(struct cgroup_subsys_state *css,
Li Zefan761b3ef2012-01-31 13:47:36 +08005214 struct cgroup_taskset *tset)
Balbir Singh67e465a2008-02-07 00:13:54 -08005215{
Tejun Heo2f7ee562011-12-12 18:12:21 -08005216 struct task_struct *p = cgroup_taskset_first(tset);
KOSAKI Motohiroa4336582011-06-15 15:08:13 -07005217 struct mm_struct *mm = get_task_mm(p);
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005218
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005219 if (mm) {
KOSAKI Motohiroa4336582011-06-15 15:08:13 -07005220 if (mc.to)
5221 mem_cgroup_move_charge(mm);
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005222 mmput(mm);
5223 }
KOSAKI Motohiroa4336582011-06-15 15:08:13 -07005224 if (mc.to)
5225 mem_cgroup_clear_mc();
Balbir Singh67e465a2008-02-07 00:13:54 -08005226}
Daisuke Nishimura5cfb80a2010-03-23 13:35:11 -07005227#else /* !CONFIG_MMU */
Tejun Heoeb954192013-08-08 20:11:23 -04005228static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
Li Zefan761b3ef2012-01-31 13:47:36 +08005229 struct cgroup_taskset *tset)
Daisuke Nishimura5cfb80a2010-03-23 13:35:11 -07005230{
5231 return 0;
5232}
Tejun Heoeb954192013-08-08 20:11:23 -04005233static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css,
Li Zefan761b3ef2012-01-31 13:47:36 +08005234 struct cgroup_taskset *tset)
Daisuke Nishimura5cfb80a2010-03-23 13:35:11 -07005235{
5236}
Tejun Heoeb954192013-08-08 20:11:23 -04005237static void mem_cgroup_move_task(struct cgroup_subsys_state *css,
Li Zefan761b3ef2012-01-31 13:47:36 +08005238 struct cgroup_taskset *tset)
Daisuke Nishimura5cfb80a2010-03-23 13:35:11 -07005239{
5240}
5241#endif
Balbir Singh67e465a2008-02-07 00:13:54 -08005242
Tejun Heof00baae2013-04-15 13:41:15 -07005243/*
5244 * Cgroup retains root cgroups across [un]mount cycles making it necessary
Tejun Heoaa6ec292014-07-09 10:08:08 -04005245 * to verify whether we're attached to the default hierarchy on each mount
5246 * attempt.
Tejun Heof00baae2013-04-15 13:41:15 -07005247 */
Tejun Heoeb954192013-08-08 20:11:23 -04005248static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
Tejun Heof00baae2013-04-15 13:41:15 -07005249{
5250 /*
Tejun Heoaa6ec292014-07-09 10:08:08 -04005251 * use_hierarchy is forced on the default hierarchy. cgroup core
Tejun Heof00baae2013-04-15 13:41:15 -07005252 * guarantees that @root doesn't have any children, so turning it
5253 * on for the root memcg is enough.
5254 */
Tejun Heoaa6ec292014-07-09 10:08:08 -04005255 if (cgroup_on_dfl(root_css->cgroup))
Vladimir Davydov7feee592015-03-12 16:26:19 -07005256 root_mem_cgroup->use_hierarchy = true;
5257 else
5258 root_mem_cgroup->use_hierarchy = false;
Tejun Heof00baae2013-04-15 13:41:15 -07005259}
5260
Johannes Weiner241994ed2015-02-11 15:26:06 -08005261static u64 memory_current_read(struct cgroup_subsys_state *css,
5262 struct cftype *cft)
5263{
5264 return mem_cgroup_usage(mem_cgroup_from_css(css), false);
5265}
5266
5267static int memory_low_show(struct seq_file *m, void *v)
5268{
5269 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
Jason Low4db0c3c2015-04-15 16:14:08 -07005270 unsigned long low = READ_ONCE(memcg->low);
Johannes Weiner241994ed2015-02-11 15:26:06 -08005271
5272 if (low == PAGE_COUNTER_MAX)
Johannes Weinerd2973692015-02-27 15:52:04 -08005273 seq_puts(m, "max\n");
Johannes Weiner241994ed2015-02-11 15:26:06 -08005274 else
5275 seq_printf(m, "%llu\n", (u64)low * PAGE_SIZE);
5276
5277 return 0;
5278}
5279
5280static ssize_t memory_low_write(struct kernfs_open_file *of,
5281 char *buf, size_t nbytes, loff_t off)
5282{
5283 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5284 unsigned long low;
5285 int err;
5286
5287 buf = strstrip(buf);
Johannes Weinerd2973692015-02-27 15:52:04 -08005288 err = page_counter_memparse(buf, "max", &low);
Johannes Weiner241994ed2015-02-11 15:26:06 -08005289 if (err)
5290 return err;
5291
5292 memcg->low = low;
5293
5294 return nbytes;
5295}
5296
5297static int memory_high_show(struct seq_file *m, void *v)
5298{
5299 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
Jason Low4db0c3c2015-04-15 16:14:08 -07005300 unsigned long high = READ_ONCE(memcg->high);
Johannes Weiner241994ed2015-02-11 15:26:06 -08005301
5302 if (high == PAGE_COUNTER_MAX)
Johannes Weinerd2973692015-02-27 15:52:04 -08005303 seq_puts(m, "max\n");
Johannes Weiner241994ed2015-02-11 15:26:06 -08005304 else
5305 seq_printf(m, "%llu\n", (u64)high * PAGE_SIZE);
5306
5307 return 0;
5308}
5309
5310static ssize_t memory_high_write(struct kernfs_open_file *of,
5311 char *buf, size_t nbytes, loff_t off)
5312{
5313 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5314 unsigned long high;
5315 int err;
5316
5317 buf = strstrip(buf);
Johannes Weinerd2973692015-02-27 15:52:04 -08005318 err = page_counter_memparse(buf, "max", &high);
Johannes Weiner241994ed2015-02-11 15:26:06 -08005319 if (err)
5320 return err;
5321
5322 memcg->high = high;
5323
5324 return nbytes;
5325}
5326
5327static int memory_max_show(struct seq_file *m, void *v)
5328{
5329 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
Jason Low4db0c3c2015-04-15 16:14:08 -07005330 unsigned long max = READ_ONCE(memcg->memory.limit);
Johannes Weiner241994ed2015-02-11 15:26:06 -08005331
5332 if (max == PAGE_COUNTER_MAX)
Johannes Weinerd2973692015-02-27 15:52:04 -08005333 seq_puts(m, "max\n");
Johannes Weiner241994ed2015-02-11 15:26:06 -08005334 else
5335 seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE);
5336
5337 return 0;
5338}
5339
5340static ssize_t memory_max_write(struct kernfs_open_file *of,
5341 char *buf, size_t nbytes, loff_t off)
5342{
5343 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5344 unsigned long max;
5345 int err;
5346
5347 buf = strstrip(buf);
Johannes Weinerd2973692015-02-27 15:52:04 -08005348 err = page_counter_memparse(buf, "max", &max);
Johannes Weiner241994ed2015-02-11 15:26:06 -08005349 if (err)
5350 return err;
5351
5352 err = mem_cgroup_resize_limit(memcg, max);
5353 if (err)
5354 return err;
5355
5356 return nbytes;
5357}
5358
5359static int memory_events_show(struct seq_file *m, void *v)
5360{
5361 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5362
5363 seq_printf(m, "low %lu\n", mem_cgroup_read_events(memcg, MEMCG_LOW));
5364 seq_printf(m, "high %lu\n", mem_cgroup_read_events(memcg, MEMCG_HIGH));
5365 seq_printf(m, "max %lu\n", mem_cgroup_read_events(memcg, MEMCG_MAX));
5366 seq_printf(m, "oom %lu\n", mem_cgroup_read_events(memcg, MEMCG_OOM));
5367
5368 return 0;
5369}
5370
5371static struct cftype memory_files[] = {
5372 {
5373 .name = "current",
5374 .read_u64 = memory_current_read,
5375 },
5376 {
5377 .name = "low",
5378 .flags = CFTYPE_NOT_ON_ROOT,
5379 .seq_show = memory_low_show,
5380 .write = memory_low_write,
5381 },
5382 {
5383 .name = "high",
5384 .flags = CFTYPE_NOT_ON_ROOT,
5385 .seq_show = memory_high_show,
5386 .write = memory_high_write,
5387 },
5388 {
5389 .name = "max",
5390 .flags = CFTYPE_NOT_ON_ROOT,
5391 .seq_show = memory_max_show,
5392 .write = memory_max_write,
5393 },
5394 {
5395 .name = "events",
5396 .flags = CFTYPE_NOT_ON_ROOT,
5397 .seq_show = memory_events_show,
5398 },
5399 { } /* terminate */
5400};
5401
Tejun Heo073219e2014-02-08 10:36:58 -05005402struct cgroup_subsys memory_cgrp_subsys = {
Tejun Heo92fb9742012-11-19 08:13:38 -08005403 .css_alloc = mem_cgroup_css_alloc,
Glauber Costad142e3e2013-02-22 16:34:52 -08005404 .css_online = mem_cgroup_css_online,
Tejun Heo92fb9742012-11-19 08:13:38 -08005405 .css_offline = mem_cgroup_css_offline,
5406 .css_free = mem_cgroup_css_free,
Tejun Heo1ced9532014-07-08 18:02:57 -04005407 .css_reset = mem_cgroup_css_reset,
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005408 .can_attach = mem_cgroup_can_attach,
5409 .cancel_attach = mem_cgroup_cancel_attach,
Balbir Singh67e465a2008-02-07 00:13:54 -08005410 .attach = mem_cgroup_move_task,
Tejun Heof00baae2013-04-15 13:41:15 -07005411 .bind = mem_cgroup_bind,
Johannes Weiner241994ed2015-02-11 15:26:06 -08005412 .dfl_cftypes = memory_files,
5413 .legacy_cftypes = mem_cgroup_legacy_files,
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08005414 .early_init = 0,
Balbir Singh8cdea7c2008-02-07 00:13:50 -08005415};
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -08005416
Johannes Weiner241994ed2015-02-11 15:26:06 -08005417/**
5418 * mem_cgroup_events - count memory events against a cgroup
5419 * @memcg: the memory cgroup
5420 * @idx: the event index
5421 * @nr: the number of events to account for
5422 */
5423void mem_cgroup_events(struct mem_cgroup *memcg,
5424 enum mem_cgroup_events_index idx,
5425 unsigned int nr)
5426{
5427 this_cpu_add(memcg->stat->events[idx], nr);
5428}
5429
5430/**
5431 * mem_cgroup_low - check if memory consumption is below the normal range
5432 * @root: the highest ancestor to consider
5433 * @memcg: the memory cgroup to check
5434 *
5435 * Returns %true if memory consumption of @memcg, and that of all
5436 * configurable ancestors up to @root, is below the normal range.
5437 */
5438bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg)
5439{
5440 if (mem_cgroup_disabled())
5441 return false;
5442
5443 /*
5444 * The toplevel group doesn't have a configurable range, so
5445 * it's never low when looked at directly, and it is not
5446 * considered an ancestor when assessing the hierarchy.
5447 */
5448
5449 if (memcg == root_mem_cgroup)
5450 return false;
5451
Michal Hocko4e54ded2015-02-27 15:51:46 -08005452 if (page_counter_read(&memcg->memory) >= memcg->low)
Johannes Weiner241994ed2015-02-11 15:26:06 -08005453 return false;
5454
5455 while (memcg != root) {
5456 memcg = parent_mem_cgroup(memcg);
5457
5458 if (memcg == root_mem_cgroup)
5459 break;
5460
Michal Hocko4e54ded2015-02-27 15:51:46 -08005461 if (page_counter_read(&memcg->memory) >= memcg->low)
Johannes Weiner241994ed2015-02-11 15:26:06 -08005462 return false;
5463 }
5464 return true;
5465}
5466
Johannes Weiner00501b52014-08-08 14:19:20 -07005467/**
5468 * mem_cgroup_try_charge - try charging a page
5469 * @page: page to charge
5470 * @mm: mm context of the victim
5471 * @gfp_mask: reclaim mode
5472 * @memcgp: charged memcg return
5473 *
5474 * Try to charge @page to the memcg that @mm belongs to, reclaiming
5475 * pages according to @gfp_mask if necessary.
5476 *
5477 * Returns 0 on success, with *@memcgp pointing to the charged memcg.
5478 * Otherwise, an error code is returned.
5479 *
5480 * After page->mapping has been set up, the caller must finalize the
5481 * charge with mem_cgroup_commit_charge(). Or abort the transaction
5482 * with mem_cgroup_cancel_charge() in case page instantiation fails.
5483 */
5484int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
5485 gfp_t gfp_mask, struct mem_cgroup **memcgp)
5486{
5487 struct mem_cgroup *memcg = NULL;
5488 unsigned int nr_pages = 1;
5489 int ret = 0;
5490
5491 if (mem_cgroup_disabled())
5492 goto out;
5493
5494 if (PageSwapCache(page)) {
Johannes Weiner00501b52014-08-08 14:19:20 -07005495 /*
5496 * Every swap fault against a single page tries to charge the
5497 * page, bail as early as possible. shmem_unuse() encounters
5498 * already charged pages, too. The USED bit is protected by
5499 * the page lock, which serializes swap cache removal, which
5500 * in turn serializes uncharging.
5501 */
Johannes Weiner1306a852014-12-10 15:44:52 -08005502 if (page->mem_cgroup)
Johannes Weiner00501b52014-08-08 14:19:20 -07005503 goto out;
5504 }
5505
5506 if (PageTransHuge(page)) {
5507 nr_pages <<= compound_order(page);
5508 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
5509 }
5510
5511 if (do_swap_account && PageSwapCache(page))
5512 memcg = try_get_mem_cgroup_from_page(page);
5513 if (!memcg)
5514 memcg = get_mem_cgroup_from_mm(mm);
5515
5516 ret = try_charge(memcg, gfp_mask, nr_pages);
5517
5518 css_put(&memcg->css);
5519
5520 if (ret == -EINTR) {
5521 memcg = root_mem_cgroup;
5522 ret = 0;
5523 }
5524out:
5525 *memcgp = memcg;
5526 return ret;
5527}
5528
5529/**
5530 * mem_cgroup_commit_charge - commit a page charge
5531 * @page: page to charge
5532 * @memcg: memcg to charge the page to
5533 * @lrucare: page might be on LRU already
5534 *
5535 * Finalize a charge transaction started by mem_cgroup_try_charge(),
5536 * after page->mapping has been set up. This must happen atomically
5537 * as part of the page instantiation, i.e. under the page table lock
5538 * for anonymous pages, under the page lock for page and swap cache.
5539 *
5540 * In addition, the page must not be on the LRU during the commit, to
5541 * prevent racing with task migration. If it might be, use @lrucare.
5542 *
5543 * Use mem_cgroup_cancel_charge() to cancel the transaction instead.
5544 */
5545void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
5546 bool lrucare)
5547{
5548 unsigned int nr_pages = 1;
5549
5550 VM_BUG_ON_PAGE(!page->mapping, page);
5551 VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page);
5552
5553 if (mem_cgroup_disabled())
5554 return;
5555 /*
5556 * Swap faults will attempt to charge the same page multiple
5557 * times. But reuse_swap_page() might have removed the page
5558 * from swapcache already, so we can't check PageSwapCache().
5559 */
5560 if (!memcg)
5561 return;
5562
Johannes Weiner6abb5a82014-08-08 14:19:33 -07005563 commit_charge(page, memcg, lrucare);
5564
Johannes Weiner00501b52014-08-08 14:19:20 -07005565 if (PageTransHuge(page)) {
5566 nr_pages <<= compound_order(page);
5567 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
5568 }
5569
Johannes Weiner6abb5a82014-08-08 14:19:33 -07005570 local_irq_disable();
5571 mem_cgroup_charge_statistics(memcg, page, nr_pages);
5572 memcg_check_events(memcg, page);
5573 local_irq_enable();
Johannes Weiner00501b52014-08-08 14:19:20 -07005574
5575 if (do_swap_account && PageSwapCache(page)) {
5576 swp_entry_t entry = { .val = page_private(page) };
5577 /*
5578 * The swap entry might not get freed for a long time,
5579 * let's not wait for it. The page already received a
5580 * memory+swap charge, drop the swap entry duplicate.
5581 */
5582 mem_cgroup_uncharge_swap(entry);
5583 }
5584}
5585
5586/**
5587 * mem_cgroup_cancel_charge - cancel a page charge
5588 * @page: page to charge
5589 * @memcg: memcg to charge the page to
5590 *
5591 * Cancel a charge transaction started by mem_cgroup_try_charge().
5592 */
5593void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg)
5594{
5595 unsigned int nr_pages = 1;
5596
5597 if (mem_cgroup_disabled())
5598 return;
5599 /*
5600 * Swap faults will attempt to charge the same page multiple
5601 * times. But reuse_swap_page() might have removed the page
5602 * from swapcache already, so we can't check PageSwapCache().
5603 */
5604 if (!memcg)
5605 return;
5606
5607 if (PageTransHuge(page)) {
5608 nr_pages <<= compound_order(page);
5609 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
5610 }
5611
5612 cancel_charge(memcg, nr_pages);
5613}
5614
Johannes Weiner747db952014-08-08 14:19:24 -07005615static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
Johannes Weiner747db952014-08-08 14:19:24 -07005616 unsigned long nr_anon, unsigned long nr_file,
5617 unsigned long nr_huge, struct page *dummy_page)
5618{
Johannes Weiner18eca2e2014-12-10 15:43:57 -08005619 unsigned long nr_pages = nr_anon + nr_file;
Johannes Weiner747db952014-08-08 14:19:24 -07005620 unsigned long flags;
5621
Johannes Weinerce00a962014-09-05 08:43:57 -04005622 if (!mem_cgroup_is_root(memcg)) {
Johannes Weiner18eca2e2014-12-10 15:43:57 -08005623 page_counter_uncharge(&memcg->memory, nr_pages);
5624 if (do_swap_account)
5625 page_counter_uncharge(&memcg->memsw, nr_pages);
Johannes Weinerce00a962014-09-05 08:43:57 -04005626 memcg_oom_recover(memcg);
5627 }
Johannes Weiner747db952014-08-08 14:19:24 -07005628
5629 local_irq_save(flags);
5630 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon);
5631 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file);
5632 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge);
5633 __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT], pgpgout);
Johannes Weiner18eca2e2014-12-10 15:43:57 -08005634 __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
Johannes Weiner747db952014-08-08 14:19:24 -07005635 memcg_check_events(memcg, dummy_page);
5636 local_irq_restore(flags);
Johannes Weinere8ea14c2014-12-10 15:42:42 -08005637
5638 if (!mem_cgroup_is_root(memcg))
Johannes Weiner18eca2e2014-12-10 15:43:57 -08005639 css_put_many(&memcg->css, nr_pages);
Johannes Weiner747db952014-08-08 14:19:24 -07005640}
5641
5642static void uncharge_list(struct list_head *page_list)
5643{
5644 struct mem_cgroup *memcg = NULL;
Johannes Weiner747db952014-08-08 14:19:24 -07005645 unsigned long nr_anon = 0;
5646 unsigned long nr_file = 0;
5647 unsigned long nr_huge = 0;
5648 unsigned long pgpgout = 0;
Johannes Weiner747db952014-08-08 14:19:24 -07005649 struct list_head *next;
5650 struct page *page;
5651
5652 next = page_list->next;
5653 do {
5654 unsigned int nr_pages = 1;
Johannes Weiner747db952014-08-08 14:19:24 -07005655
5656 page = list_entry(next, struct page, lru);
5657 next = page->lru.next;
5658
5659 VM_BUG_ON_PAGE(PageLRU(page), page);
5660 VM_BUG_ON_PAGE(page_count(page), page);
5661
Johannes Weiner1306a852014-12-10 15:44:52 -08005662 if (!page->mem_cgroup)
Johannes Weiner747db952014-08-08 14:19:24 -07005663 continue;
5664
5665 /*
5666 * Nobody should be changing or seriously looking at
Johannes Weiner1306a852014-12-10 15:44:52 -08005667 * page->mem_cgroup at this point, we have fully
Johannes Weiner29833312014-12-10 15:44:02 -08005668 * exclusive access to the page.
Johannes Weiner747db952014-08-08 14:19:24 -07005669 */
5670
Johannes Weiner1306a852014-12-10 15:44:52 -08005671 if (memcg != page->mem_cgroup) {
Johannes Weiner747db952014-08-08 14:19:24 -07005672 if (memcg) {
Johannes Weiner18eca2e2014-12-10 15:43:57 -08005673 uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
5674 nr_huge, page);
5675 pgpgout = nr_anon = nr_file = nr_huge = 0;
Johannes Weiner747db952014-08-08 14:19:24 -07005676 }
Johannes Weiner1306a852014-12-10 15:44:52 -08005677 memcg = page->mem_cgroup;
Johannes Weiner747db952014-08-08 14:19:24 -07005678 }
5679
5680 if (PageTransHuge(page)) {
5681 nr_pages <<= compound_order(page);
5682 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
5683 nr_huge += nr_pages;
5684 }
5685
5686 if (PageAnon(page))
5687 nr_anon += nr_pages;
5688 else
5689 nr_file += nr_pages;
5690
Johannes Weiner1306a852014-12-10 15:44:52 -08005691 page->mem_cgroup = NULL;
Johannes Weiner747db952014-08-08 14:19:24 -07005692
5693 pgpgout++;
5694 } while (next != page_list);
5695
5696 if (memcg)
Johannes Weiner18eca2e2014-12-10 15:43:57 -08005697 uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
5698 nr_huge, page);
Johannes Weiner747db952014-08-08 14:19:24 -07005699}
5700
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005701/**
5702 * mem_cgroup_uncharge - uncharge a page
5703 * @page: page to uncharge
5704 *
5705 * Uncharge a page previously charged with mem_cgroup_try_charge() and
5706 * mem_cgroup_commit_charge().
5707 */
5708void mem_cgroup_uncharge(struct page *page)
5709{
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005710 if (mem_cgroup_disabled())
5711 return;
5712
Johannes Weiner747db952014-08-08 14:19:24 -07005713 /* Don't touch page->lru of any random page, pre-check: */
Johannes Weiner1306a852014-12-10 15:44:52 -08005714 if (!page->mem_cgroup)
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005715 return;
5716
Johannes Weiner747db952014-08-08 14:19:24 -07005717 INIT_LIST_HEAD(&page->lru);
5718 uncharge_list(&page->lru);
5719}
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005720
Johannes Weiner747db952014-08-08 14:19:24 -07005721/**
5722 * mem_cgroup_uncharge_list - uncharge a list of page
5723 * @page_list: list of pages to uncharge
5724 *
5725 * Uncharge a list of pages previously charged with
5726 * mem_cgroup_try_charge() and mem_cgroup_commit_charge().
5727 */
5728void mem_cgroup_uncharge_list(struct list_head *page_list)
5729{
5730 if (mem_cgroup_disabled())
5731 return;
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005732
Johannes Weiner747db952014-08-08 14:19:24 -07005733 if (!list_empty(page_list))
5734 uncharge_list(page_list);
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005735}
5736
5737/**
5738 * mem_cgroup_migrate - migrate a charge to another page
5739 * @oldpage: currently charged page
5740 * @newpage: page to transfer the charge to
Michal Hockof5e03a42015-02-05 12:25:14 -08005741 * @lrucare: either or both pages might be on the LRU already
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005742 *
5743 * Migrate the charge from @oldpage to @newpage.
5744 *
5745 * Both pages must be locked, @newpage->mapping must be set up.
5746 */
5747void mem_cgroup_migrate(struct page *oldpage, struct page *newpage,
5748 bool lrucare)
5749{
Johannes Weiner29833312014-12-10 15:44:02 -08005750 struct mem_cgroup *memcg;
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005751 int isolated;
5752
5753 VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
5754 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
5755 VM_BUG_ON_PAGE(!lrucare && PageLRU(oldpage), oldpage);
5756 VM_BUG_ON_PAGE(!lrucare && PageLRU(newpage), newpage);
5757 VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage);
Johannes Weiner6abb5a82014-08-08 14:19:33 -07005758 VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage),
5759 newpage);
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005760
5761 if (mem_cgroup_disabled())
5762 return;
5763
5764 /* Page cache replacement: new page already charged? */
Johannes Weiner1306a852014-12-10 15:44:52 -08005765 if (newpage->mem_cgroup)
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005766 return;
5767
Johannes Weiner7d5e3242014-12-10 15:43:46 -08005768 /*
5769 * Swapcache readahead pages can get migrated before being
5770 * charged, and migration from compaction can happen to an
5771 * uncharged page when the PFN walker finds a page that
5772 * reclaim just put back on the LRU but has not released yet.
5773 */
Johannes Weiner1306a852014-12-10 15:44:52 -08005774 memcg = oldpage->mem_cgroup;
Johannes Weiner29833312014-12-10 15:44:02 -08005775 if (!memcg)
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005776 return;
5777
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005778 if (lrucare)
5779 lock_page_lru(oldpage, &isolated);
5780
Johannes Weiner1306a852014-12-10 15:44:52 -08005781 oldpage->mem_cgroup = NULL;
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005782
5783 if (lrucare)
5784 unlock_page_lru(oldpage, isolated);
5785
Johannes Weiner29833312014-12-10 15:44:02 -08005786 commit_charge(newpage, memcg, lrucare);
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005787}
5788
Michal Hocko2d110852013-02-22 16:34:43 -08005789/*
Michal Hocko10813122013-02-22 16:35:41 -08005790 * subsys_initcall() for memory controller.
5791 *
5792 * Some parts like hotcpu_notifier() have to be initialized from this context
5793 * because of lock dependencies (cgroup_lock -> cpu hotplug) but basically
5794 * everything that doesn't depend on a specific mem_cgroup structure should
5795 * be initialized from here.
Michal Hocko2d110852013-02-22 16:34:43 -08005796 */
5797static int __init mem_cgroup_init(void)
5798{
Johannes Weiner95a045f2015-02-11 15:26:33 -08005799 int cpu, node;
5800
Michal Hocko2d110852013-02-22 16:34:43 -08005801 hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
Johannes Weiner95a045f2015-02-11 15:26:33 -08005802
5803 for_each_possible_cpu(cpu)
5804 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
5805 drain_local_stock);
5806
5807 for_each_node(node) {
5808 struct mem_cgroup_tree_per_node *rtpn;
5809 int zone;
5810
5811 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
5812 node_online(node) ? node : NUMA_NO_NODE);
5813
5814 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
5815 struct mem_cgroup_tree_per_zone *rtpz;
5816
5817 rtpz = &rtpn->rb_tree_per_zone[zone];
5818 rtpz->rb_root = RB_ROOT;
5819 spin_lock_init(&rtpz->lock);
5820 }
5821 soft_limit_tree.rb_tree_per_node[node] = rtpn;
5822 }
5823
Michal Hocko2d110852013-02-22 16:34:43 -08005824 return 0;
5825}
5826subsys_initcall(mem_cgroup_init);
Johannes Weiner21afa382015-02-11 15:26:36 -08005827
5828#ifdef CONFIG_MEMCG_SWAP
5829/**
5830 * mem_cgroup_swapout - transfer a memsw charge to swap
5831 * @page: page whose memsw charge to transfer
5832 * @entry: swap entry to move the charge to
5833 *
5834 * Transfer the memsw charge of @page to @entry.
5835 */
5836void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
5837{
5838 struct mem_cgroup *memcg;
5839 unsigned short oldid;
5840
5841 VM_BUG_ON_PAGE(PageLRU(page), page);
5842 VM_BUG_ON_PAGE(page_count(page), page);
5843
5844 if (!do_swap_account)
5845 return;
5846
5847 memcg = page->mem_cgroup;
5848
5849 /* Readahead page, never charged */
5850 if (!memcg)
5851 return;
5852
5853 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
5854 VM_BUG_ON_PAGE(oldid, page);
5855 mem_cgroup_swap_statistics(memcg, true);
5856
5857 page->mem_cgroup = NULL;
5858
5859 if (!mem_cgroup_is_root(memcg))
5860 page_counter_uncharge(&memcg->memory, 1);
5861
Johannes Weinerf3717632015-06-10 11:14:54 -07005862 /* Caller disabled preemption with mapping->tree_lock */
Johannes Weiner21afa382015-02-11 15:26:36 -08005863 mem_cgroup_charge_statistics(memcg, page, -1);
5864 memcg_check_events(memcg, page);
5865}
5866
5867/**
5868 * mem_cgroup_uncharge_swap - uncharge a swap entry
5869 * @entry: swap entry to uncharge
5870 *
5871 * Drop the memsw charge associated with @entry.
5872 */
5873void mem_cgroup_uncharge_swap(swp_entry_t entry)
5874{
5875 struct mem_cgroup *memcg;
5876 unsigned short id;
5877
5878 if (!do_swap_account)
5879 return;
5880
5881 id = swap_cgroup_record(entry, 0);
5882 rcu_read_lock();
Vladimir Davydovadbe4272015-04-15 16:13:00 -07005883 memcg = mem_cgroup_from_id(id);
Johannes Weiner21afa382015-02-11 15:26:36 -08005884 if (memcg) {
5885 if (!mem_cgroup_is_root(memcg))
5886 page_counter_uncharge(&memcg->memsw, 1);
5887 mem_cgroup_swap_statistics(memcg, false);
5888 css_put(&memcg->css);
5889 }
5890 rcu_read_unlock();
5891}
5892
5893/* for remember boot option*/
5894#ifdef CONFIG_MEMCG_SWAP_ENABLED
5895static int really_do_swap_account __initdata = 1;
5896#else
5897static int really_do_swap_account __initdata;
5898#endif
5899
5900static int __init enable_swap_account(char *s)
5901{
5902 if (!strcmp(s, "1"))
5903 really_do_swap_account = 1;
5904 else if (!strcmp(s, "0"))
5905 really_do_swap_account = 0;
5906 return 1;
5907}
5908__setup("swapaccount=", enable_swap_account);
5909
5910static struct cftype memsw_cgroup_files[] = {
5911 {
5912 .name = "memsw.usage_in_bytes",
5913 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
5914 .read_u64 = mem_cgroup_read_u64,
5915 },
5916 {
5917 .name = "memsw.max_usage_in_bytes",
5918 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
5919 .write = mem_cgroup_reset,
5920 .read_u64 = mem_cgroup_read_u64,
5921 },
5922 {
5923 .name = "memsw.limit_in_bytes",
5924 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
5925 .write = mem_cgroup_write,
5926 .read_u64 = mem_cgroup_read_u64,
5927 },
5928 {
5929 .name = "memsw.failcnt",
5930 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
5931 .write = mem_cgroup_reset,
5932 .read_u64 = mem_cgroup_read_u64,
5933 },
5934 { }, /* terminate */
5935};
5936
5937static int __init mem_cgroup_swap_init(void)
5938{
5939 if (!mem_cgroup_disabled() && really_do_swap_account) {
5940 do_swap_account = 1;
5941 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys,
5942 memsw_cgroup_files));
5943 }
5944 return 0;
5945}
5946subsys_initcall(mem_cgroup_swap_init);
5947
5948#endif /* CONFIG_MEMCG_SWAP */