blob: 01e7246de8dfaa8b59b8151f55233ffd82a764af [file] [log] [blame]
Christoph Lameter039363f2012-07-06 15:25:10 -05001/*
2 * Slab allocator functions that are independent of the allocator strategy
3 *
4 * (C) 2012 Christoph Lameter <cl@linux.com>
5 */
6#include <linux/slab.h>
7
8#include <linux/mm.h>
9#include <linux/poison.h>
10#include <linux/interrupt.h>
11#include <linux/memory.h>
12#include <linux/compiler.h>
13#include <linux/module.h>
Christoph Lameter20cea962012-07-06 15:25:13 -050014#include <linux/cpu.h>
15#include <linux/uaccess.h>
Glauber Costab7454ad2012-10-19 18:20:25 +040016#include <linux/seq_file.h>
17#include <linux/proc_fs.h>
Christoph Lameter039363f2012-07-06 15:25:10 -050018#include <asm/cacheflush.h>
19#include <asm/tlbflush.h>
20#include <asm/page.h>
Glauber Costa2633d7a2012-12-18 14:22:34 -080021#include <linux/memcontrol.h>
Andrey Ryabinin928cec92014-08-06 16:04:44 -070022
23#define CREATE_TRACE_POINTS
Christoph Lameterf1b6eb62013-09-04 16:35:34 +000024#include <trace/events/kmem.h>
Christoph Lameter039363f2012-07-06 15:25:10 -050025
Christoph Lameter97d06602012-07-06 15:25:11 -050026#include "slab.h"
27
28enum slab_state slab_state;
Christoph Lameter18004c52012-07-06 15:25:12 -050029LIST_HEAD(slab_caches);
30DEFINE_MUTEX(slab_mutex);
Christoph Lameter9b030cb2012-09-05 00:20:33 +000031struct kmem_cache *kmem_cache;
Christoph Lameter97d06602012-07-06 15:25:11 -050032
Joonsoo Kim07f361b2014-10-09 15:26:00 -070033/*
Joonsoo Kim423c9292014-10-09 15:26:22 -070034 * Set of flags that will prevent slab merging
35 */
36#define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
37 SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
38 SLAB_FAILSLAB)
39
Konstantin Khlebnikov3e810ae2015-08-06 15:46:36 -070040#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | SLAB_NOTRACK)
Joonsoo Kim423c9292014-10-09 15:26:22 -070041
42/*
43 * Merge control. If this is set then no merging of slab caches will occur.
44 * (Could be removed. This was introduced to pacify the merge skeptics.)
45 */
46static int slab_nomerge;
47
48static int __init setup_slab_nomerge(char *str)
49{
50 slab_nomerge = 1;
51 return 1;
52}
53
54#ifdef CONFIG_SLUB
55__setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0);
56#endif
57
58__setup("slab_nomerge", setup_slab_nomerge);
59
60/*
Joonsoo Kim07f361b2014-10-09 15:26:00 -070061 * Determine the size of a slab object
62 */
63unsigned int kmem_cache_size(struct kmem_cache *s)
64{
65 return s->object_size;
66}
67EXPORT_SYMBOL(kmem_cache_size);
68
Shuah Khan77be4b12012-08-16 00:09:46 -070069#ifdef CONFIG_DEBUG_VM
Vladimir Davydov794b1242014-04-07 15:39:26 -070070static int kmem_cache_sanity_check(const char *name, size_t size)
Shuah Khan77be4b12012-08-16 00:09:46 -070071{
72 struct kmem_cache *s = NULL;
73
74 if (!name || in_interrupt() || size < sizeof(void *) ||
75 size > KMALLOC_MAX_SIZE) {
76 pr_err("kmem_cache_create(%s) integrity check failed\n", name);
77 return -EINVAL;
78 }
79
80 list_for_each_entry(s, &slab_caches, list) {
81 char tmp;
82 int res;
83
84 /*
85 * This happens when the module gets unloaded and doesn't
86 * destroy its slab cache and no-one else reuses the vmalloc
87 * area of the module. Print a warning.
88 */
89 res = probe_kernel_address(s->name, tmp);
90 if (res) {
91 pr_err("Slab cache with size %d has lost its name\n",
92 s->object_size);
93 continue;
94 }
Shuah Khan77be4b12012-08-16 00:09:46 -070095 }
96
97 WARN_ON(strchr(name, ' ')); /* It confuses parsers */
98 return 0;
99}
100#else
Vladimir Davydov794b1242014-04-07 15:39:26 -0700101static inline int kmem_cache_sanity_check(const char *name, size_t size)
Shuah Khan77be4b12012-08-16 00:09:46 -0700102{
103 return 0;
104}
105#endif
106
Christoph Lameter484748f2015-09-04 15:45:34 -0700107void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p)
108{
109 size_t i;
110
111 for (i = 0; i < nr; i++)
112 kmem_cache_free(s, p[i]);
113}
114
Jesper Dangaard Brouer865762a2015-11-20 15:57:58 -0800115int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
Christoph Lameter484748f2015-09-04 15:45:34 -0700116 void **p)
117{
118 size_t i;
119
120 for (i = 0; i < nr; i++) {
121 void *x = p[i] = kmem_cache_alloc(s, flags);
122 if (!x) {
123 __kmem_cache_free_bulk(s, i, p);
Jesper Dangaard Brouer865762a2015-11-20 15:57:58 -0800124 return 0;
Christoph Lameter484748f2015-09-04 15:45:34 -0700125 }
126 }
Jesper Dangaard Brouer865762a2015-11-20 15:57:58 -0800127 return i;
Christoph Lameter484748f2015-09-04 15:45:34 -0700128}
129
Glauber Costa55007d82012-12-18 14:22:38 -0800130#ifdef CONFIG_MEMCG_KMEM
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800131void slab_init_memcg_params(struct kmem_cache *s)
Vladimir Davydov33a690c2014-10-09 15:28:43 -0700132{
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800133 s->memcg_params.is_root_cache = true;
Vladimir Davydov426589f2015-02-12 14:59:23 -0800134 INIT_LIST_HEAD(&s->memcg_params.list);
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800135 RCU_INIT_POINTER(s->memcg_params.memcg_caches, NULL);
136}
Vladimir Davydov33a690c2014-10-09 15:28:43 -0700137
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800138static int init_memcg_params(struct kmem_cache *s,
139 struct mem_cgroup *memcg, struct kmem_cache *root_cache)
140{
141 struct memcg_cache_array *arr;
Vladimir Davydov33a690c2014-10-09 15:28:43 -0700142
143 if (memcg) {
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800144 s->memcg_params.is_root_cache = false;
145 s->memcg_params.memcg = memcg;
146 s->memcg_params.root_cache = root_cache;
147 return 0;
148 }
Vladimir Davydov33a690c2014-10-09 15:28:43 -0700149
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800150 slab_init_memcg_params(s);
151
152 if (!memcg_nr_cache_ids)
153 return 0;
154
155 arr = kzalloc(sizeof(struct memcg_cache_array) +
156 memcg_nr_cache_ids * sizeof(void *),
157 GFP_KERNEL);
158 if (!arr)
159 return -ENOMEM;
160
161 RCU_INIT_POINTER(s->memcg_params.memcg_caches, arr);
Vladimir Davydov33a690c2014-10-09 15:28:43 -0700162 return 0;
163}
164
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800165static void destroy_memcg_params(struct kmem_cache *s)
Vladimir Davydov33a690c2014-10-09 15:28:43 -0700166{
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800167 if (is_root_cache(s))
168 kfree(rcu_access_pointer(s->memcg_params.memcg_caches));
Vladimir Davydov33a690c2014-10-09 15:28:43 -0700169}
170
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800171static int update_memcg_params(struct kmem_cache *s, int new_array_size)
Vladimir Davydov6f817f42014-10-09 15:28:47 -0700172{
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800173 struct memcg_cache_array *old, *new;
Vladimir Davydov6f817f42014-10-09 15:28:47 -0700174
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800175 if (!is_root_cache(s))
176 return 0;
Vladimir Davydov6f817f42014-10-09 15:28:47 -0700177
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800178 new = kzalloc(sizeof(struct memcg_cache_array) +
179 new_array_size * sizeof(void *), GFP_KERNEL);
180 if (!new)
Vladimir Davydov6f817f42014-10-09 15:28:47 -0700181 return -ENOMEM;
182
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800183 old = rcu_dereference_protected(s->memcg_params.memcg_caches,
184 lockdep_is_held(&slab_mutex));
185 if (old)
186 memcpy(new->entries, old->entries,
187 memcg_nr_cache_ids * sizeof(void *));
Vladimir Davydov6f817f42014-10-09 15:28:47 -0700188
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800189 rcu_assign_pointer(s->memcg_params.memcg_caches, new);
190 if (old)
191 kfree_rcu(old, rcu);
Vladimir Davydov6f817f42014-10-09 15:28:47 -0700192 return 0;
193}
194
Glauber Costa55007d82012-12-18 14:22:38 -0800195int memcg_update_all_caches(int num_memcgs)
196{
197 struct kmem_cache *s;
198 int ret = 0;
Glauber Costa55007d82012-12-18 14:22:38 -0800199
Vladimir Davydov05257a12015-02-12 14:59:01 -0800200 mutex_lock(&slab_mutex);
Glauber Costa55007d82012-12-18 14:22:38 -0800201 list_for_each_entry(s, &slab_caches, list) {
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800202 ret = update_memcg_params(s, num_memcgs);
Glauber Costa55007d82012-12-18 14:22:38 -0800203 /*
Glauber Costa55007d82012-12-18 14:22:38 -0800204 * Instead of freeing the memory, we'll just leave the caches
205 * up to this point in an updated state.
206 */
207 if (ret)
Vladimir Davydov05257a12015-02-12 14:59:01 -0800208 break;
Glauber Costa55007d82012-12-18 14:22:38 -0800209 }
Glauber Costa55007d82012-12-18 14:22:38 -0800210 mutex_unlock(&slab_mutex);
211 return ret;
212}
Vladimir Davydov33a690c2014-10-09 15:28:43 -0700213#else
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800214static inline int init_memcg_params(struct kmem_cache *s,
215 struct mem_cgroup *memcg, struct kmem_cache *root_cache)
Vladimir Davydov33a690c2014-10-09 15:28:43 -0700216{
217 return 0;
218}
219
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800220static inline void destroy_memcg_params(struct kmem_cache *s)
Vladimir Davydov33a690c2014-10-09 15:28:43 -0700221{
222}
223#endif /* CONFIG_MEMCG_KMEM */
Glauber Costa55007d82012-12-18 14:22:38 -0800224
Christoph Lameter039363f2012-07-06 15:25:10 -0500225/*
Joonsoo Kim423c9292014-10-09 15:26:22 -0700226 * Find a mergeable slab cache
227 */
228int slab_unmergeable(struct kmem_cache *s)
229{
230 if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE))
231 return 1;
232
233 if (!is_root_cache(s))
234 return 1;
235
236 if (s->ctor)
237 return 1;
238
239 /*
240 * We may have set a slab to be unmergeable during bootstrap.
241 */
242 if (s->refcount < 0)
243 return 1;
244
245 return 0;
246}
247
248struct kmem_cache *find_mergeable(size_t size, size_t align,
249 unsigned long flags, const char *name, void (*ctor)(void *))
250{
251 struct kmem_cache *s;
252
Grygorii Maistrenko9ac38e32017-02-22 15:40:59 -0800253 if (slab_nomerge)
Joonsoo Kim423c9292014-10-09 15:26:22 -0700254 return NULL;
255
256 if (ctor)
257 return NULL;
258
259 size = ALIGN(size, sizeof(void *));
260 align = calculate_alignment(flags, align, size);
261 size = ALIGN(size, align);
262 flags = kmem_cache_flags(size, flags, name, NULL);
263
Grygorii Maistrenko9ac38e32017-02-22 15:40:59 -0800264 if (flags & SLAB_NEVER_MERGE)
265 return NULL;
266
Joonsoo Kim54362052014-12-10 15:42:18 -0800267 list_for_each_entry_reverse(s, &slab_caches, list) {
Joonsoo Kim423c9292014-10-09 15:26:22 -0700268 if (slab_unmergeable(s))
269 continue;
270
271 if (size > s->size)
272 continue;
273
274 if ((flags & SLAB_MERGE_SAME) != (s->flags & SLAB_MERGE_SAME))
275 continue;
276 /*
277 * Check if alignment is compatible.
278 * Courtesy of Adrian Drzewiecki
279 */
280 if ((s->size & ~(align - 1)) != s->size)
281 continue;
282
283 if (s->size - size >= sizeof(void *))
284 continue;
285
Joonsoo Kim95069ac82014-11-13 15:19:25 -0800286 if (IS_ENABLED(CONFIG_SLAB) && align &&
287 (align > s->align || s->align % align))
288 continue;
289
Joonsoo Kim423c9292014-10-09 15:26:22 -0700290 return s;
291 }
292 return NULL;
293}
294
295/*
Christoph Lameter45906852012-11-28 16:23:16 +0000296 * Figure out what the alignment of the objects will be given a set of
297 * flags, a user specified alignment and the size of the objects.
298 */
299unsigned long calculate_alignment(unsigned long flags,
300 unsigned long align, unsigned long size)
301{
302 /*
303 * If the user wants hardware cache aligned objects then follow that
304 * suggestion if the object is sufficiently large.
305 *
306 * The hardware cache alignment cannot override the specified
307 * alignment though. If that is greater then use it.
308 */
309 if (flags & SLAB_HWCACHE_ALIGN) {
310 unsigned long ralign = cache_line_size();
311 while (size <= ralign / 2)
312 ralign /= 2;
313 align = max(align, ralign);
314 }
315
316 if (align < ARCH_SLAB_MINALIGN)
317 align = ARCH_SLAB_MINALIGN;
318
319 return ALIGN(align, sizeof(void *));
320}
321
Vladimir Davydovc9a77a72015-11-05 18:45:08 -0800322static struct kmem_cache *create_cache(const char *name,
323 size_t object_size, size_t size, size_t align,
324 unsigned long flags, void (*ctor)(void *),
325 struct mem_cgroup *memcg, struct kmem_cache *root_cache)
Vladimir Davydov794b1242014-04-07 15:39:26 -0700326{
327 struct kmem_cache *s;
328 int err;
329
330 err = -ENOMEM;
331 s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
332 if (!s)
333 goto out;
334
335 s->name = name;
336 s->object_size = object_size;
337 s->size = size;
338 s->align = align;
339 s->ctor = ctor;
340
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800341 err = init_memcg_params(s, memcg, root_cache);
Vladimir Davydov794b1242014-04-07 15:39:26 -0700342 if (err)
343 goto out_free_cache;
344
345 err = __kmem_cache_create(s, flags);
346 if (err)
347 goto out_free_cache;
348
349 s->refcount = 1;
350 list_add(&s->list, &slab_caches);
Vladimir Davydov794b1242014-04-07 15:39:26 -0700351out:
352 if (err)
353 return ERR_PTR(err);
354 return s;
355
356out_free_cache:
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800357 destroy_memcg_params(s);
Vaishali Thakkar7c4da062015-02-10 14:09:40 -0800358 kmem_cache_free(kmem_cache, s);
Vladimir Davydov794b1242014-04-07 15:39:26 -0700359 goto out;
360}
Christoph Lameter45906852012-11-28 16:23:16 +0000361
362/*
Christoph Lameter039363f2012-07-06 15:25:10 -0500363 * kmem_cache_create - Create a cache.
364 * @name: A string which is used in /proc/slabinfo to identify this cache.
365 * @size: The size of objects to be created in this cache.
366 * @align: The required alignment for the objects.
367 * @flags: SLAB flags
368 * @ctor: A constructor for the objects.
369 *
370 * Returns a ptr to the cache on success, NULL on failure.
371 * Cannot be called within a interrupt, but can be interrupted.
372 * The @ctor is run when new pages are allocated by the cache.
373 *
374 * The flags are
375 *
376 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
377 * to catch references to uninitialised memory.
378 *
379 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
380 * for buffer overruns.
381 *
382 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
383 * cacheline. This can be beneficial if you're counting cycles as closely
384 * as davem.
385 */
Glauber Costa2633d7a2012-12-18 14:22:34 -0800386struct kmem_cache *
Vladimir Davydov794b1242014-04-07 15:39:26 -0700387kmem_cache_create(const char *name, size_t size, size_t align,
388 unsigned long flags, void (*ctor)(void *))
Christoph Lameter039363f2012-07-06 15:25:10 -0500389{
Alexandru Moise40911a72015-11-05 18:45:43 -0800390 struct kmem_cache *s = NULL;
Andrzej Hajda3dec16e2015-02-13 14:36:38 -0800391 const char *cache_name;
Vladimir Davydov3965fc32014-01-23 15:52:55 -0800392 int err;
Christoph Lameter039363f2012-07-06 15:25:10 -0500393
Pekka Enbergb9205362012-08-16 10:12:18 +0300394 get_online_cpus();
Vladimir Davydov03afc0e2014-06-04 16:07:20 -0700395 get_online_mems();
Vladimir Davydov05257a12015-02-12 14:59:01 -0800396 memcg_get_cache_ids();
Vladimir Davydov03afc0e2014-06-04 16:07:20 -0700397
Pekka Enbergb9205362012-08-16 10:12:18 +0300398 mutex_lock(&slab_mutex);
Christoph Lameter686d5502012-09-05 00:20:33 +0000399
Vladimir Davydov794b1242014-04-07 15:39:26 -0700400 err = kmem_cache_sanity_check(name, size);
Andrew Morton3aa24f52014-10-09 15:25:58 -0700401 if (err) {
Vladimir Davydov3965fc32014-01-23 15:52:55 -0800402 goto out_unlock;
Andrew Morton3aa24f52014-10-09 15:25:58 -0700403 }
Christoph Lameter686d5502012-09-05 00:20:33 +0000404
Glauber Costad8843922012-10-17 15:36:51 +0400405 /*
406 * Some allocators will constraint the set of valid flags to a subset
407 * of all flags. We expect them to define CACHE_CREATE_MASK in this
408 * case, and we'll just provide them with a sanitized version of the
409 * passed flags.
410 */
411 flags &= CACHE_CREATE_MASK;
Christoph Lameter686d5502012-09-05 00:20:33 +0000412
Vladimir Davydov794b1242014-04-07 15:39:26 -0700413 s = __kmem_cache_alias(name, size, align, flags, ctor);
414 if (s)
Vladimir Davydov3965fc32014-01-23 15:52:55 -0800415 goto out_unlock;
Glauber Costa2633d7a2012-12-18 14:22:34 -0800416
Andrzej Hajda3dec16e2015-02-13 14:36:38 -0800417 cache_name = kstrdup_const(name, GFP_KERNEL);
Vladimir Davydov794b1242014-04-07 15:39:26 -0700418 if (!cache_name) {
419 err = -ENOMEM;
420 goto out_unlock;
421 }
Glauber Costa2633d7a2012-12-18 14:22:34 -0800422
Vladimir Davydovc9a77a72015-11-05 18:45:08 -0800423 s = create_cache(cache_name, size, size,
424 calculate_alignment(flags, align, size),
425 flags, ctor, NULL, NULL);
Vladimir Davydov794b1242014-04-07 15:39:26 -0700426 if (IS_ERR(s)) {
427 err = PTR_ERR(s);
Andrzej Hajda3dec16e2015-02-13 14:36:38 -0800428 kfree_const(cache_name);
Vladimir Davydov794b1242014-04-07 15:39:26 -0700429 }
Vladimir Davydov3965fc32014-01-23 15:52:55 -0800430
431out_unlock:
Christoph Lameter20cea962012-07-06 15:25:13 -0500432 mutex_unlock(&slab_mutex);
Vladimir Davydov03afc0e2014-06-04 16:07:20 -0700433
Vladimir Davydov05257a12015-02-12 14:59:01 -0800434 memcg_put_cache_ids();
Vladimir Davydov03afc0e2014-06-04 16:07:20 -0700435 put_online_mems();
Christoph Lameter20cea962012-07-06 15:25:13 -0500436 put_online_cpus();
437
Dave Jonesba3253c2014-01-29 14:05:48 -0800438 if (err) {
Christoph Lameter686d5502012-09-05 00:20:33 +0000439 if (flags & SLAB_PANIC)
440 panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n",
441 name, err);
442 else {
443 printk(KERN_WARNING "kmem_cache_create(%s) failed with error %d",
444 name, err);
445 dump_stack();
446 }
Christoph Lameter686d5502012-09-05 00:20:33 +0000447 return NULL;
448 }
Christoph Lameter039363f2012-07-06 15:25:10 -0500449 return s;
Glauber Costa2633d7a2012-12-18 14:22:34 -0800450}
Christoph Lameter039363f2012-07-06 15:25:10 -0500451EXPORT_SYMBOL(kmem_cache_create);
Christoph Lameter97d06602012-07-06 15:25:11 -0500452
Vladimir Davydovc9a77a72015-11-05 18:45:08 -0800453static int shutdown_cache(struct kmem_cache *s,
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800454 struct list_head *release, bool *need_rcu_barrier)
455{
Vladimir Davydovcd918c52015-11-05 18:45:14 -0800456 if (__kmem_cache_shutdown(s) != 0)
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800457 return -EBUSY;
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800458
459 if (s->flags & SLAB_DESTROY_BY_RCU)
460 *need_rcu_barrier = true;
461
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800462 list_move(&s->list, release);
463 return 0;
464}
465
Vladimir Davydovc9a77a72015-11-05 18:45:08 -0800466static void release_caches(struct list_head *release, bool need_rcu_barrier)
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800467{
468 struct kmem_cache *s, *s2;
469
470 if (need_rcu_barrier)
471 rcu_barrier();
472
473 list_for_each_entry_safe(s, s2, release, list) {
474#ifdef SLAB_SUPPORTS_SYSFS
475 sysfs_slab_remove(s);
476#else
477 slab_kmem_cache_release(s);
478#endif
479 }
480}
481
Vladimir Davydov794b1242014-04-07 15:39:26 -0700482#ifdef CONFIG_MEMCG_KMEM
483/*
Vladimir Davydov776ed0f2014-06-04 16:10:02 -0700484 * memcg_create_kmem_cache - Create a cache for a memory cgroup.
Vladimir Davydov794b1242014-04-07 15:39:26 -0700485 * @memcg: The memory cgroup the new cache is for.
486 * @root_cache: The parent of the new cache.
487 *
488 * This function attempts to create a kmem cache that will serve allocation
489 * requests going from @memcg to @root_cache. The new cache inherits properties
490 * from its parent.
491 */
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800492void memcg_create_kmem_cache(struct mem_cgroup *memcg,
493 struct kmem_cache *root_cache)
Vladimir Davydov794b1242014-04-07 15:39:26 -0700494{
Vladimir Davydov3e0350a2015-02-10 14:11:44 -0800495 static char memcg_name_buf[NAME_MAX + 1]; /* protected by slab_mutex */
Michal Hocko33398cf2015-09-08 15:01:02 -0700496 struct cgroup_subsys_state *css = &memcg->css;
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800497 struct memcg_cache_array *arr;
Vladimir Davydovbd673142014-06-04 16:07:40 -0700498 struct kmem_cache *s = NULL;
Vladimir Davydov794b1242014-04-07 15:39:26 -0700499 char *cache_name;
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800500 int idx;
Vladimir Davydov794b1242014-04-07 15:39:26 -0700501
502 get_online_cpus();
Vladimir Davydov03afc0e2014-06-04 16:07:20 -0700503 get_online_mems();
504
Vladimir Davydov794b1242014-04-07 15:39:26 -0700505 mutex_lock(&slab_mutex);
506
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -0800507 /*
508 * The memory cgroup could have been deactivated while the cache
509 * creation work was pending.
510 */
511 if (!memcg_kmem_is_active(memcg))
512 goto out_unlock;
513
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800514 idx = memcg_cache_id(memcg);
515 arr = rcu_dereference_protected(root_cache->memcg_params.memcg_caches,
516 lockdep_is_held(&slab_mutex));
517
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800518 /*
519 * Since per-memcg caches are created asynchronously on first
520 * allocation (see memcg_kmem_get_cache()), several threads can try to
521 * create the same cache, but only one of them may succeed.
522 */
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800523 if (arr->entries[idx])
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800524 goto out_unlock;
525
Vladimir Davydovf1008362015-02-12 14:59:29 -0800526 cgroup_name(css->cgroup, memcg_name_buf, sizeof(memcg_name_buf));
Johannes Weiner8627c772016-07-20 15:44:57 -0700527 cache_name = kasprintf(GFP_KERNEL, "%s(%llu:%s)", root_cache->name,
528 css->serial_nr, memcg_name_buf);
Vladimir Davydov794b1242014-04-07 15:39:26 -0700529 if (!cache_name)
530 goto out_unlock;
531
Vladimir Davydovc9a77a72015-11-05 18:45:08 -0800532 s = create_cache(cache_name, root_cache->object_size,
533 root_cache->size, root_cache->align,
534 root_cache->flags, root_cache->ctor,
535 memcg, root_cache);
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800536 /*
537 * If we could not create a memcg cache, do not complain, because
538 * that's not critical at all as we can always proceed with the root
539 * cache.
540 */
Vladimir Davydovbd673142014-06-04 16:07:40 -0700541 if (IS_ERR(s)) {
Vladimir Davydov794b1242014-04-07 15:39:26 -0700542 kfree(cache_name);
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800543 goto out_unlock;
Vladimir Davydovbd673142014-06-04 16:07:40 -0700544 }
Vladimir Davydov794b1242014-04-07 15:39:26 -0700545
Vladimir Davydov426589f2015-02-12 14:59:23 -0800546 list_add(&s->memcg_params.list, &root_cache->memcg_params.list);
547
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800548 /*
549 * Since readers won't lock (see cache_from_memcg_idx()), we need a
550 * barrier here to ensure nobody will see the kmem_cache partially
551 * initialized.
552 */
553 smp_wmb();
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800554 arr->entries[idx] = s;
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800555
Vladimir Davydov794b1242014-04-07 15:39:26 -0700556out_unlock:
557 mutex_unlock(&slab_mutex);
Vladimir Davydov03afc0e2014-06-04 16:07:20 -0700558
559 put_online_mems();
Vladimir Davydov794b1242014-04-07 15:39:26 -0700560 put_online_cpus();
561}
Vladimir Davydovb8529902014-04-07 15:39:28 -0700562
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -0800563void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg)
564{
565 int idx;
566 struct memcg_cache_array *arr;
Vladimir Davydovd6e0b7f2015-02-12 14:59:47 -0800567 struct kmem_cache *s, *c;
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -0800568
569 idx = memcg_cache_id(memcg);
570
Vladimir Davydovd6e0b7f2015-02-12 14:59:47 -0800571 get_online_cpus();
572 get_online_mems();
573
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -0800574 mutex_lock(&slab_mutex);
575 list_for_each_entry(s, &slab_caches, list) {
576 if (!is_root_cache(s))
577 continue;
578
579 arr = rcu_dereference_protected(s->memcg_params.memcg_caches,
580 lockdep_is_held(&slab_mutex));
Vladimir Davydovd6e0b7f2015-02-12 14:59:47 -0800581 c = arr->entries[idx];
582 if (!c)
583 continue;
584
585 __kmem_cache_shrink(c, true);
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -0800586 arr->entries[idx] = NULL;
587 }
588 mutex_unlock(&slab_mutex);
Vladimir Davydovd6e0b7f2015-02-12 14:59:47 -0800589
590 put_online_mems();
591 put_online_cpus();
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -0800592}
593
Vladimir Davydovd60fdcc2015-11-05 18:45:11 -0800594static int __shutdown_memcg_cache(struct kmem_cache *s,
595 struct list_head *release, bool *need_rcu_barrier)
596{
597 BUG_ON(is_root_cache(s));
598
599 if (shutdown_cache(s, release, need_rcu_barrier))
600 return -EBUSY;
601
602 list_del(&s->memcg_params.list);
603 return 0;
604}
605
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800606void memcg_destroy_kmem_caches(struct mem_cgroup *memcg)
Vladimir Davydovb8529902014-04-07 15:39:28 -0700607{
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800608 LIST_HEAD(release);
609 bool need_rcu_barrier = false;
610 struct kmem_cache *s, *s2;
Vladimir Davydovb8529902014-04-07 15:39:28 -0700611
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800612 get_online_cpus();
613 get_online_mems();
Vladimir Davydovb8529902014-04-07 15:39:28 -0700614
Vladimir Davydovb8529902014-04-07 15:39:28 -0700615 mutex_lock(&slab_mutex);
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800616 list_for_each_entry_safe(s, s2, &slab_caches, list) {
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800617 if (is_root_cache(s) || s->memcg_params.memcg != memcg)
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800618 continue;
619 /*
620 * The cgroup is about to be freed and therefore has no charges
621 * left. Hence, all its caches must be empty by now.
622 */
Vladimir Davydovd60fdcc2015-11-05 18:45:11 -0800623 BUG_ON(__shutdown_memcg_cache(s, &release, &need_rcu_barrier));
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800624 }
625 mutex_unlock(&slab_mutex);
Vladimir Davydovb8529902014-04-07 15:39:28 -0700626
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800627 put_online_mems();
628 put_online_cpus();
629
Vladimir Davydovc9a77a72015-11-05 18:45:08 -0800630 release_caches(&release, need_rcu_barrier);
Vladimir Davydovb8529902014-04-07 15:39:28 -0700631}
Vladimir Davydovd60fdcc2015-11-05 18:45:11 -0800632
633static int shutdown_memcg_caches(struct kmem_cache *s,
634 struct list_head *release, bool *need_rcu_barrier)
635{
636 struct memcg_cache_array *arr;
637 struct kmem_cache *c, *c2;
638 LIST_HEAD(busy);
639 int i;
640
641 BUG_ON(!is_root_cache(s));
642
643 /*
644 * First, shutdown active caches, i.e. caches that belong to online
645 * memory cgroups.
646 */
647 arr = rcu_dereference_protected(s->memcg_params.memcg_caches,
648 lockdep_is_held(&slab_mutex));
649 for_each_memcg_cache_index(i) {
650 c = arr->entries[i];
651 if (!c)
652 continue;
653 if (__shutdown_memcg_cache(c, release, need_rcu_barrier))
654 /*
655 * The cache still has objects. Move it to a temporary
656 * list so as not to try to destroy it for a second
657 * time while iterating over inactive caches below.
658 */
659 list_move(&c->memcg_params.list, &busy);
660 else
661 /*
662 * The cache is empty and will be destroyed soon. Clear
663 * the pointer to it in the memcg_caches array so that
664 * it will never be accessed even if the root cache
665 * stays alive.
666 */
667 arr->entries[i] = NULL;
668 }
669
670 /*
671 * Second, shutdown all caches left from memory cgroups that are now
672 * offline.
673 */
674 list_for_each_entry_safe(c, c2, &s->memcg_params.list,
675 memcg_params.list)
676 __shutdown_memcg_cache(c, release, need_rcu_barrier);
677
678 list_splice(&busy, &s->memcg_params.list);
679
680 /*
681 * A cache being destroyed must be empty. In particular, this means
682 * that all per memcg caches attached to it must be empty too.
683 */
684 if (!list_empty(&s->memcg_params.list))
685 return -EBUSY;
686 return 0;
687}
688#else
689static inline int shutdown_memcg_caches(struct kmem_cache *s,
690 struct list_head *release, bool *need_rcu_barrier)
691{
692 return 0;
693}
Vladimir Davydov794b1242014-04-07 15:39:26 -0700694#endif /* CONFIG_MEMCG_KMEM */
695
Christoph Lameter41a21282014-05-06 12:50:08 -0700696void slab_kmem_cache_release(struct kmem_cache *s)
697{
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800698 destroy_memcg_params(s);
Andrzej Hajda3dec16e2015-02-13 14:36:38 -0800699 kfree_const(s->name);
Christoph Lameter41a21282014-05-06 12:50:08 -0700700 kmem_cache_free(kmem_cache, s);
701}
702
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000703void kmem_cache_destroy(struct kmem_cache *s)
704{
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800705 LIST_HEAD(release);
706 bool need_rcu_barrier = false;
Vladimir Davydovd60fdcc2015-11-05 18:45:11 -0800707 int err;
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800708
Sergey Senozhatsky3942d292015-09-08 15:00:50 -0700709 if (unlikely(!s))
710 return;
711
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000712 get_online_cpus();
Vladimir Davydov03afc0e2014-06-04 16:07:20 -0700713 get_online_mems();
714
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000715 mutex_lock(&slab_mutex);
Vladimir Davydovb8529902014-04-07 15:39:28 -0700716
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000717 s->refcount--;
Vladimir Davydovb8529902014-04-07 15:39:28 -0700718 if (s->refcount)
719 goto out_unlock;
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000720
Vladimir Davydovd60fdcc2015-11-05 18:45:11 -0800721 err = shutdown_memcg_caches(s, &release, &need_rcu_barrier);
722 if (!err)
Vladimir Davydovcd918c52015-11-05 18:45:14 -0800723 err = shutdown_cache(s, &release, &need_rcu_barrier);
Vladimir Davydovb8529902014-04-07 15:39:28 -0700724
Vladimir Davydovcd918c52015-11-05 18:45:14 -0800725 if (err) {
726 pr_err("kmem_cache_destroy %s: "
727 "Slab cache still has objects\n", s->name);
728 dump_stack();
729 }
Vladimir Davydovb8529902014-04-07 15:39:28 -0700730out_unlock:
731 mutex_unlock(&slab_mutex);
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800732
Vladimir Davydov03afc0e2014-06-04 16:07:20 -0700733 put_online_mems();
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000734 put_online_cpus();
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800735
Vladimir Davydovc9a77a72015-11-05 18:45:08 -0800736 release_caches(&release, need_rcu_barrier);
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000737}
738EXPORT_SYMBOL(kmem_cache_destroy);
739
Vladimir Davydov03afc0e2014-06-04 16:07:20 -0700740/**
741 * kmem_cache_shrink - Shrink a cache.
742 * @cachep: The cache to shrink.
743 *
744 * Releases as many slabs as possible for a cache.
745 * To help debugging, a zero exit status indicates all slabs were released.
746 */
747int kmem_cache_shrink(struct kmem_cache *cachep)
748{
749 int ret;
750
751 get_online_cpus();
752 get_online_mems();
Vladimir Davydovd6e0b7f2015-02-12 14:59:47 -0800753 ret = __kmem_cache_shrink(cachep, false);
Vladimir Davydov03afc0e2014-06-04 16:07:20 -0700754 put_online_mems();
755 put_online_cpus();
756 return ret;
757}
758EXPORT_SYMBOL(kmem_cache_shrink);
759
Denis Kirjanovfda90122015-11-05 18:44:59 -0800760bool slab_is_available(void)
Christoph Lameter97d06602012-07-06 15:25:11 -0500761{
762 return slab_state >= UP;
763}
Glauber Costab7454ad2012-10-19 18:20:25 +0400764
Christoph Lameter45530c42012-11-28 16:23:07 +0000765#ifndef CONFIG_SLOB
766/* Create a cache during boot when no slab services are available yet */
767void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t size,
768 unsigned long flags)
769{
770 int err;
771
772 s->name = name;
773 s->size = s->object_size = size;
Christoph Lameter45906852012-11-28 16:23:16 +0000774 s->align = calculate_alignment(flags, ARCH_KMALLOC_MINALIGN, size);
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800775
776 slab_init_memcg_params(s);
777
Christoph Lameter45530c42012-11-28 16:23:07 +0000778 err = __kmem_cache_create(s, flags);
779
780 if (err)
Christoph Lameter31ba7342013-01-10 19:00:53 +0000781 panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
Christoph Lameter45530c42012-11-28 16:23:07 +0000782 name, size, err);
783
784 s->refcount = -1; /* Exempt from merging for now */
785}
786
787struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
788 unsigned long flags)
789{
790 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
791
792 if (!s)
793 panic("Out of memory when creating slab %s\n", name);
794
795 create_boot_cache(s, name, size, flags);
796 list_add(&s->list, &slab_caches);
797 s->refcount = 1;
798 return s;
799}
800
Christoph Lameter9425c582013-01-10 19:12:17 +0000801struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
802EXPORT_SYMBOL(kmalloc_caches);
803
804#ifdef CONFIG_ZONE_DMA
805struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
806EXPORT_SYMBOL(kmalloc_dma_caches);
807#endif
808
Christoph Lameterf97d5f62013-01-10 19:12:17 +0000809/*
Christoph Lameter2c59dd62013-01-10 19:14:19 +0000810 * Conversion table for small slabs sizes / 8 to the index in the
811 * kmalloc array. This is necessary for slabs < 192 since we have non power
812 * of two cache sizes there. The size of larger slabs can be determined using
813 * fls.
814 */
815static s8 size_index[24] = {
816 3, /* 8 */
817 4, /* 16 */
818 5, /* 24 */
819 5, /* 32 */
820 6, /* 40 */
821 6, /* 48 */
822 6, /* 56 */
823 6, /* 64 */
824 1, /* 72 */
825 1, /* 80 */
826 1, /* 88 */
827 1, /* 96 */
828 7, /* 104 */
829 7, /* 112 */
830 7, /* 120 */
831 7, /* 128 */
832 2, /* 136 */
833 2, /* 144 */
834 2, /* 152 */
835 2, /* 160 */
836 2, /* 168 */
837 2, /* 176 */
838 2, /* 184 */
839 2 /* 192 */
840};
841
842static inline int size_index_elem(size_t bytes)
843{
844 return (bytes - 1) / 8;
845}
846
847/*
848 * Find the kmem_cache structure that serves a given size of
849 * allocation
850 */
851struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
852{
853 int index;
854
Joonsoo Kim9de1bc82013-08-02 11:02:42 +0900855 if (unlikely(size > KMALLOC_MAX_SIZE)) {
Sasha Levin907985f2013-06-10 15:18:00 -0400856 WARN_ON_ONCE(!(flags & __GFP_NOWARN));
Christoph Lameter6286ae92013-05-03 15:43:18 +0000857 return NULL;
Sasha Levin907985f2013-06-10 15:18:00 -0400858 }
Christoph Lameter6286ae92013-05-03 15:43:18 +0000859
Christoph Lameter2c59dd62013-01-10 19:14:19 +0000860 if (size <= 192) {
861 if (!size)
862 return ZERO_SIZE_PTR;
863
864 index = size_index[size_index_elem(size)];
865 } else
866 index = fls(size - 1);
867
868#ifdef CONFIG_ZONE_DMA
Joonsoo Kimb1e05412013-02-04 23:46:46 +0900869 if (unlikely((flags & GFP_DMA)))
Christoph Lameter2c59dd62013-01-10 19:14:19 +0000870 return kmalloc_dma_caches[index];
871
872#endif
873 return kmalloc_caches[index];
874}
875
876/*
Gavin Guo4066c332015-06-24 16:55:54 -0700877 * kmalloc_info[] is to make slub_debug=,kmalloc-xx option work at boot time.
878 * kmalloc_index() supports up to 2^26=64MB, so the final entry of the table is
879 * kmalloc-67108864.
880 */
881static struct {
882 const char *name;
883 unsigned long size;
884} const kmalloc_info[] __initconst = {
885 {NULL, 0}, {"kmalloc-96", 96},
886 {"kmalloc-192", 192}, {"kmalloc-8", 8},
887 {"kmalloc-16", 16}, {"kmalloc-32", 32},
888 {"kmalloc-64", 64}, {"kmalloc-128", 128},
889 {"kmalloc-256", 256}, {"kmalloc-512", 512},
890 {"kmalloc-1024", 1024}, {"kmalloc-2048", 2048},
891 {"kmalloc-4096", 4096}, {"kmalloc-8192", 8192},
892 {"kmalloc-16384", 16384}, {"kmalloc-32768", 32768},
893 {"kmalloc-65536", 65536}, {"kmalloc-131072", 131072},
894 {"kmalloc-262144", 262144}, {"kmalloc-524288", 524288},
895 {"kmalloc-1048576", 1048576}, {"kmalloc-2097152", 2097152},
896 {"kmalloc-4194304", 4194304}, {"kmalloc-8388608", 8388608},
897 {"kmalloc-16777216", 16777216}, {"kmalloc-33554432", 33554432},
898 {"kmalloc-67108864", 67108864}
899};
900
901/*
Daniel Sanders34cc6992015-06-24 16:55:57 -0700902 * Patch up the size_index table if we have strange large alignment
903 * requirements for the kmalloc array. This is only the case for
904 * MIPS it seems. The standard arches will not generate any code here.
905 *
906 * Largest permitted alignment is 256 bytes due to the way we
907 * handle the index determination for the smaller caches.
908 *
909 * Make sure that nothing crazy happens if someone starts tinkering
910 * around with ARCH_KMALLOC_MINALIGN
Christoph Lameterf97d5f62013-01-10 19:12:17 +0000911 */
Daniel Sanders34cc6992015-06-24 16:55:57 -0700912void __init setup_kmalloc_cache_index_table(void)
Christoph Lameterf97d5f62013-01-10 19:12:17 +0000913{
914 int i;
915
Christoph Lameter2c59dd62013-01-10 19:14:19 +0000916 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
917 (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
918
919 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
920 int elem = size_index_elem(i);
921
922 if (elem >= ARRAY_SIZE(size_index))
923 break;
924 size_index[elem] = KMALLOC_SHIFT_LOW;
925 }
926
927 if (KMALLOC_MIN_SIZE >= 64) {
928 /*
929 * The 96 byte size cache is not used if the alignment
930 * is 64 byte.
931 */
932 for (i = 64 + 8; i <= 96; i += 8)
933 size_index[size_index_elem(i)] = 7;
934
935 }
936
937 if (KMALLOC_MIN_SIZE >= 128) {
938 /*
939 * The 192 byte sized cache is not used if the alignment
940 * is 128 byte. Redirect kmalloc to use the 256 byte cache
941 * instead.
942 */
943 for (i = 128 + 8; i <= 192; i += 8)
944 size_index[size_index_elem(i)] = 8;
945 }
Daniel Sanders34cc6992015-06-24 16:55:57 -0700946}
947
Christoph Lameterae6f2462015-06-30 09:01:11 -0500948static void __init new_kmalloc_cache(int idx, unsigned long flags)
Christoph Lametera9730fc2015-06-29 09:28:08 -0500949{
950 kmalloc_caches[idx] = create_kmalloc_cache(kmalloc_info[idx].name,
951 kmalloc_info[idx].size, flags);
952}
953
Daniel Sanders34cc6992015-06-24 16:55:57 -0700954/*
955 * Create the kmalloc array. Some of the regular kmalloc arrays
956 * may already have been created because they were needed to
957 * enable allocations for slab creation.
958 */
959void __init create_kmalloc_caches(unsigned long flags)
960{
961 int i;
962
Christoph Lametera9730fc2015-06-29 09:28:08 -0500963 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
964 if (!kmalloc_caches[i])
965 new_kmalloc_cache(i, flags);
Chris Mason956e46e2013-05-08 15:56:28 -0400966
967 /*
Christoph Lametera9730fc2015-06-29 09:28:08 -0500968 * Caches that are not of the two-to-the-power-of size.
969 * These have to be created immediately after the
970 * earlier power of two caches
Chris Mason956e46e2013-05-08 15:56:28 -0400971 */
Christoph Lametera9730fc2015-06-29 09:28:08 -0500972 if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
973 new_kmalloc_cache(1, flags);
974 if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
975 new_kmalloc_cache(2, flags);
Christoph Lameter8a965b32013-05-03 18:04:18 +0000976 }
977
Christoph Lameterf97d5f62013-01-10 19:12:17 +0000978 /* Kmalloc array is now usable */
979 slab_state = UP;
980
Christoph Lameterf97d5f62013-01-10 19:12:17 +0000981#ifdef CONFIG_ZONE_DMA
982 for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
983 struct kmem_cache *s = kmalloc_caches[i];
984
985 if (s) {
986 int size = kmalloc_size(i);
987 char *n = kasprintf(GFP_NOWAIT,
988 "dma-kmalloc-%d", size);
989
990 BUG_ON(!n);
991 kmalloc_dma_caches[i] = create_kmalloc_cache(n,
992 size, SLAB_CACHE_DMA | flags);
993 }
994 }
995#endif
996}
Christoph Lameter45530c42012-11-28 16:23:07 +0000997#endif /* !CONFIG_SLOB */
998
Vladimir Davydovcea371f2014-06-04 16:07:04 -0700999/*
1000 * To avoid unnecessary overhead, we pass through large allocation requests
1001 * directly to the page allocator. We use __GFP_COMP, because we will need to
1002 * know the allocation order to free the pages properly in kfree.
1003 */
Vladimir Davydov52383432014-06-04 16:06:39 -07001004void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
1005{
1006 void *ret;
1007 struct page *page;
1008
1009 flags |= __GFP_COMP;
1010 page = alloc_kmem_pages(flags, order);
1011 ret = page ? page_address(page) : NULL;
1012 kmemleak_alloc(ret, size, 1, flags);
Andrey Ryabinin0316bec2015-02-13 14:39:42 -08001013 kasan_kmalloc_large(ret, size);
Vladimir Davydov52383432014-06-04 16:06:39 -07001014 return ret;
1015}
1016EXPORT_SYMBOL(kmalloc_order);
1017
Christoph Lameterf1b6eb62013-09-04 16:35:34 +00001018#ifdef CONFIG_TRACING
1019void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
1020{
1021 void *ret = kmalloc_order(size, flags, order);
1022 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
1023 return ret;
1024}
1025EXPORT_SYMBOL(kmalloc_order_trace);
1026#endif
Christoph Lameter45530c42012-11-28 16:23:07 +00001027
Glauber Costab7454ad2012-10-19 18:20:25 +04001028#ifdef CONFIG_SLABINFO
Wanpeng Lie9b4db22013-07-04 08:33:24 +08001029
1030#ifdef CONFIG_SLAB
1031#define SLABINFO_RIGHTS (S_IWUSR | S_IRUSR)
1032#else
1033#define SLABINFO_RIGHTS S_IRUSR
1034#endif
1035
Vladimir Davydovb0475012014-12-10 15:44:19 -08001036static void print_slabinfo_header(struct seq_file *m)
Glauber Costabcee6e22012-10-19 18:20:26 +04001037{
1038 /*
1039 * Output format version, so at least we can change it
1040 * without _too_ many complaints.
1041 */
1042#ifdef CONFIG_DEBUG_SLAB
1043 seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
1044#else
1045 seq_puts(m, "slabinfo - version: 2.1\n");
1046#endif
1047 seq_puts(m, "# name <active_objs> <num_objs> <objsize> "
1048 "<objperslab> <pagesperslab>");
1049 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
1050 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
1051#ifdef CONFIG_DEBUG_SLAB
1052 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
1053 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
1054 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
1055#endif
1056 seq_putc(m, '\n');
1057}
1058
Vladimir Davydov1df3b262014-12-10 15:42:16 -08001059void *slab_start(struct seq_file *m, loff_t *pos)
Glauber Costab7454ad2012-10-19 18:20:25 +04001060{
Glauber Costab7454ad2012-10-19 18:20:25 +04001061 mutex_lock(&slab_mutex);
Glauber Costab7454ad2012-10-19 18:20:25 +04001062 return seq_list_start(&slab_caches, *pos);
1063}
1064
Wanpeng Li276a2432013-07-08 08:08:28 +08001065void *slab_next(struct seq_file *m, void *p, loff_t *pos)
Glauber Costab7454ad2012-10-19 18:20:25 +04001066{
1067 return seq_list_next(p, &slab_caches, pos);
1068}
1069
Wanpeng Li276a2432013-07-08 08:08:28 +08001070void slab_stop(struct seq_file *m, void *p)
Glauber Costab7454ad2012-10-19 18:20:25 +04001071{
1072 mutex_unlock(&slab_mutex);
1073}
1074
Glauber Costa749c5412012-12-18 14:23:01 -08001075static void
1076memcg_accumulate_slabinfo(struct kmem_cache *s, struct slabinfo *info)
Glauber Costab7454ad2012-10-19 18:20:25 +04001077{
Glauber Costa749c5412012-12-18 14:23:01 -08001078 struct kmem_cache *c;
1079 struct slabinfo sinfo;
Glauber Costa749c5412012-12-18 14:23:01 -08001080
1081 if (!is_root_cache(s))
1082 return;
1083
Vladimir Davydov426589f2015-02-12 14:59:23 -08001084 for_each_memcg_cache(c, s) {
Glauber Costa749c5412012-12-18 14:23:01 -08001085 memset(&sinfo, 0, sizeof(sinfo));
1086 get_slabinfo(c, &sinfo);
1087
1088 info->active_slabs += sinfo.active_slabs;
1089 info->num_slabs += sinfo.num_slabs;
1090 info->shared_avail += sinfo.shared_avail;
1091 info->active_objs += sinfo.active_objs;
1092 info->num_objs += sinfo.num_objs;
1093 }
1094}
1095
Vladimir Davydovb0475012014-12-10 15:44:19 -08001096static void cache_show(struct kmem_cache *s, struct seq_file *m)
Glauber Costa749c5412012-12-18 14:23:01 -08001097{
Glauber Costa0d7561c2012-10-19 18:20:27 +04001098 struct slabinfo sinfo;
1099
1100 memset(&sinfo, 0, sizeof(sinfo));
1101 get_slabinfo(s, &sinfo);
1102
Glauber Costa749c5412012-12-18 14:23:01 -08001103 memcg_accumulate_slabinfo(s, &sinfo);
1104
Glauber Costa0d7561c2012-10-19 18:20:27 +04001105 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
Glauber Costa749c5412012-12-18 14:23:01 -08001106 cache_name(s), sinfo.active_objs, sinfo.num_objs, s->size,
Glauber Costa0d7561c2012-10-19 18:20:27 +04001107 sinfo.objects_per_slab, (1 << sinfo.cache_order));
1108
1109 seq_printf(m, " : tunables %4u %4u %4u",
1110 sinfo.limit, sinfo.batchcount, sinfo.shared);
1111 seq_printf(m, " : slabdata %6lu %6lu %6lu",
1112 sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail);
1113 slabinfo_show_stats(m, s);
1114 seq_putc(m, '\n');
Glauber Costab7454ad2012-10-19 18:20:25 +04001115}
1116
Vladimir Davydov1df3b262014-12-10 15:42:16 -08001117static int slab_show(struct seq_file *m, void *p)
Glauber Costa749c5412012-12-18 14:23:01 -08001118{
1119 struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
1120
Vladimir Davydov1df3b262014-12-10 15:42:16 -08001121 if (p == slab_caches.next)
1122 print_slabinfo_header(m);
Vladimir Davydovb0475012014-12-10 15:44:19 -08001123 if (is_root_cache(s))
1124 cache_show(s, m);
1125 return 0;
Glauber Costa749c5412012-12-18 14:23:01 -08001126}
1127
Vladimir Davydovb0475012014-12-10 15:44:19 -08001128#ifdef CONFIG_MEMCG_KMEM
1129int memcg_slab_show(struct seq_file *m, void *p)
1130{
1131 struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
1132 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
1133
1134 if (p == slab_caches.next)
1135 print_slabinfo_header(m);
Vladimir Davydovf7ce3192015-02-12 14:59:20 -08001136 if (!is_root_cache(s) && s->memcg_params.memcg == memcg)
Vladimir Davydovb0475012014-12-10 15:44:19 -08001137 cache_show(s, m);
1138 return 0;
1139}
1140#endif
1141
Glauber Costab7454ad2012-10-19 18:20:25 +04001142/*
1143 * slabinfo_op - iterator that generates /proc/slabinfo
1144 *
1145 * Output layout:
1146 * cache-name
1147 * num-active-objs
1148 * total-objs
1149 * object size
1150 * num-active-slabs
1151 * total-slabs
1152 * num-pages-per-slab
1153 * + further values on SMP and with statistics enabled
1154 */
1155static const struct seq_operations slabinfo_op = {
Vladimir Davydov1df3b262014-12-10 15:42:16 -08001156 .start = slab_start,
Wanpeng Li276a2432013-07-08 08:08:28 +08001157 .next = slab_next,
1158 .stop = slab_stop,
Vladimir Davydov1df3b262014-12-10 15:42:16 -08001159 .show = slab_show,
Glauber Costab7454ad2012-10-19 18:20:25 +04001160};
1161
1162static int slabinfo_open(struct inode *inode, struct file *file)
1163{
1164 return seq_open(file, &slabinfo_op);
1165}
1166
1167static const struct file_operations proc_slabinfo_operations = {
1168 .open = slabinfo_open,
1169 .read = seq_read,
1170 .write = slabinfo_write,
1171 .llseek = seq_lseek,
1172 .release = seq_release,
1173};
1174
1175static int __init slab_proc_init(void)
1176{
Wanpeng Lie9b4db22013-07-04 08:33:24 +08001177 proc_create("slabinfo", SLABINFO_RIGHTS, NULL,
1178 &proc_slabinfo_operations);
Glauber Costab7454ad2012-10-19 18:20:25 +04001179 return 0;
1180}
1181module_init(slab_proc_init);
1182#endif /* CONFIG_SLABINFO */
Andrey Ryabinin928cec92014-08-06 16:04:44 -07001183
1184static __always_inline void *__do_krealloc(const void *p, size_t new_size,
1185 gfp_t flags)
1186{
1187 void *ret;
1188 size_t ks = 0;
1189
1190 if (p)
1191 ks = ksize(p);
1192
Andrey Ryabinin0316bec2015-02-13 14:39:42 -08001193 if (ks >= new_size) {
1194 kasan_krealloc((void *)p, new_size);
Andrey Ryabinin928cec92014-08-06 16:04:44 -07001195 return (void *)p;
Andrey Ryabinin0316bec2015-02-13 14:39:42 -08001196 }
Andrey Ryabinin928cec92014-08-06 16:04:44 -07001197
1198 ret = kmalloc_track_caller(new_size, flags);
1199 if (ret && p)
1200 memcpy(ret, p, ks);
1201
1202 return ret;
1203}
1204
1205/**
1206 * __krealloc - like krealloc() but don't free @p.
1207 * @p: object to reallocate memory for.
1208 * @new_size: how many bytes of memory are required.
1209 * @flags: the type of memory to allocate.
1210 *
1211 * This function is like krealloc() except it never frees the originally
1212 * allocated buffer. Use this if you don't want to free the buffer immediately
1213 * like, for example, with RCU.
1214 */
1215void *__krealloc(const void *p, size_t new_size, gfp_t flags)
1216{
1217 if (unlikely(!new_size))
1218 return ZERO_SIZE_PTR;
1219
1220 return __do_krealloc(p, new_size, flags);
1221
1222}
1223EXPORT_SYMBOL(__krealloc);
1224
1225/**
1226 * krealloc - reallocate memory. The contents will remain unchanged.
1227 * @p: object to reallocate memory for.
1228 * @new_size: how many bytes of memory are required.
1229 * @flags: the type of memory to allocate.
1230 *
1231 * The contents of the object pointed to are preserved up to the
1232 * lesser of the new and old sizes. If @p is %NULL, krealloc()
1233 * behaves exactly like kmalloc(). If @new_size is 0 and @p is not a
1234 * %NULL pointer, the object pointed to is freed.
1235 */
1236void *krealloc(const void *p, size_t new_size, gfp_t flags)
1237{
1238 void *ret;
1239
1240 if (unlikely(!new_size)) {
1241 kfree(p);
1242 return ZERO_SIZE_PTR;
1243 }
1244
1245 ret = __do_krealloc(p, new_size, flags);
1246 if (ret && p != ret)
1247 kfree(p);
1248
1249 return ret;
1250}
1251EXPORT_SYMBOL(krealloc);
1252
1253/**
1254 * kzfree - like kfree but zero memory
1255 * @p: object to free memory of
1256 *
1257 * The memory of the object @p points to is zeroed before freed.
1258 * If @p is %NULL, kzfree() does nothing.
1259 *
1260 * Note: this function zeroes the whole allocated buffer which can be a good
1261 * deal bigger than the requested buffer size passed to kmalloc(). So be
1262 * careful when using this function in performance sensitive code.
1263 */
1264void kzfree(const void *p)
1265{
1266 size_t ks;
1267 void *mem = (void *)p;
1268
1269 if (unlikely(ZERO_OR_NULL_PTR(mem)))
1270 return;
1271 ks = ksize(mem);
1272 memset(mem, 0, ks);
1273 kfree(mem);
1274}
1275EXPORT_SYMBOL(kzfree);
1276
1277/* Tracepoints definitions. */
1278EXPORT_TRACEPOINT_SYMBOL(kmalloc);
1279EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
1280EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
1281EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);
1282EXPORT_TRACEPOINT_SYMBOL(kfree);
1283EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);