blob: e6311b2dbba6e2ee088880f4733b3411d5adf2fd [file] [log] [blame]
Christoph Lameter039363f2012-07-06 15:25:10 -05001/*
2 * Slab allocator functions that are independent of the allocator strategy
3 *
4 * (C) 2012 Christoph Lameter <cl@linux.com>
5 */
6#include <linux/slab.h>
7
8#include <linux/mm.h>
9#include <linux/poison.h>
10#include <linux/interrupt.h>
11#include <linux/memory.h>
12#include <linux/compiler.h>
13#include <linux/module.h>
Christoph Lameter20cea962012-07-06 15:25:13 -050014#include <linux/cpu.h>
15#include <linux/uaccess.h>
Glauber Costab7454ad2012-10-19 18:20:25 +040016#include <linux/seq_file.h>
17#include <linux/proc_fs.h>
Christoph Lameter039363f2012-07-06 15:25:10 -050018#include <asm/cacheflush.h>
19#include <asm/tlbflush.h>
20#include <asm/page.h>
Glauber Costa2633d7a2012-12-18 14:22:34 -080021#include <linux/memcontrol.h>
Andrey Ryabinin928cec92014-08-06 16:04:44 -070022
23#define CREATE_TRACE_POINTS
Christoph Lameterf1b6eb62013-09-04 16:35:34 +000024#include <trace/events/kmem.h>
Christoph Lameter039363f2012-07-06 15:25:10 -050025
Christoph Lameter97d06602012-07-06 15:25:11 -050026#include "slab.h"
27
28enum slab_state slab_state;
Christoph Lameter18004c52012-07-06 15:25:12 -050029LIST_HEAD(slab_caches);
30DEFINE_MUTEX(slab_mutex);
Christoph Lameter9b030cb2012-09-05 00:20:33 +000031struct kmem_cache *kmem_cache;
Christoph Lameter97d06602012-07-06 15:25:11 -050032
Joonsoo Kim07f361b2014-10-09 15:26:00 -070033/*
Joonsoo Kim423c9292014-10-09 15:26:22 -070034 * Set of flags that will prevent slab merging
35 */
36#define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
37 SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -070038 SLAB_FAILSLAB | SLAB_KASAN)
Joonsoo Kim423c9292014-10-09 15:26:22 -070039
Vladimir Davydov230e9fc2016-01-14 15:18:15 -080040#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
41 SLAB_NOTRACK | SLAB_ACCOUNT)
Joonsoo Kim423c9292014-10-09 15:26:22 -070042
43/*
44 * Merge control. If this is set then no merging of slab caches will occur.
45 * (Could be removed. This was introduced to pacify the merge skeptics.)
46 */
47static int slab_nomerge;
48
49static int __init setup_slab_nomerge(char *str)
50{
51 slab_nomerge = 1;
52 return 1;
53}
54
55#ifdef CONFIG_SLUB
56__setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0);
57#endif
58
59__setup("slab_nomerge", setup_slab_nomerge);
60
61/*
Joonsoo Kim07f361b2014-10-09 15:26:00 -070062 * Determine the size of a slab object
63 */
64unsigned int kmem_cache_size(struct kmem_cache *s)
65{
66 return s->object_size;
67}
68EXPORT_SYMBOL(kmem_cache_size);
69
Shuah Khan77be4b12012-08-16 00:09:46 -070070#ifdef CONFIG_DEBUG_VM
Vladimir Davydov794b1242014-04-07 15:39:26 -070071static int kmem_cache_sanity_check(const char *name, size_t size)
Shuah Khan77be4b12012-08-16 00:09:46 -070072{
73 struct kmem_cache *s = NULL;
74
75 if (!name || in_interrupt() || size < sizeof(void *) ||
76 size > KMALLOC_MAX_SIZE) {
77 pr_err("kmem_cache_create(%s) integrity check failed\n", name);
78 return -EINVAL;
79 }
80
81 list_for_each_entry(s, &slab_caches, list) {
82 char tmp;
83 int res;
84
85 /*
86 * This happens when the module gets unloaded and doesn't
87 * destroy its slab cache and no-one else reuses the vmalloc
88 * area of the module. Print a warning.
89 */
90 res = probe_kernel_address(s->name, tmp);
91 if (res) {
92 pr_err("Slab cache with size %d has lost its name\n",
93 s->object_size);
94 continue;
95 }
Shuah Khan77be4b12012-08-16 00:09:46 -070096 }
97
98 WARN_ON(strchr(name, ' ')); /* It confuses parsers */
99 return 0;
100}
101#else
Vladimir Davydov794b1242014-04-07 15:39:26 -0700102static inline int kmem_cache_sanity_check(const char *name, size_t size)
Shuah Khan77be4b12012-08-16 00:09:46 -0700103{
104 return 0;
105}
106#endif
107
Christoph Lameter484748f2015-09-04 15:45:34 -0700108void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p)
109{
110 size_t i;
111
Jesper Dangaard Brouerca257192016-03-15 14:54:00 -0700112 for (i = 0; i < nr; i++) {
113 if (s)
114 kmem_cache_free(s, p[i]);
115 else
116 kfree(p[i]);
117 }
Christoph Lameter484748f2015-09-04 15:45:34 -0700118}
119
Jesper Dangaard Brouer865762a2015-11-20 15:57:58 -0800120int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
Christoph Lameter484748f2015-09-04 15:45:34 -0700121 void **p)
122{
123 size_t i;
124
125 for (i = 0; i < nr; i++) {
126 void *x = p[i] = kmem_cache_alloc(s, flags);
127 if (!x) {
128 __kmem_cache_free_bulk(s, i, p);
Jesper Dangaard Brouer865762a2015-11-20 15:57:58 -0800129 return 0;
Christoph Lameter484748f2015-09-04 15:45:34 -0700130 }
131 }
Jesper Dangaard Brouer865762a2015-11-20 15:57:58 -0800132 return i;
Christoph Lameter484748f2015-09-04 15:45:34 -0700133}
134
Johannes Weiner127424c2016-01-20 15:02:32 -0800135#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800136void slab_init_memcg_params(struct kmem_cache *s)
Vladimir Davydov33a690c2014-10-09 15:28:43 -0700137{
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800138 s->memcg_params.is_root_cache = true;
Vladimir Davydov426589f2015-02-12 14:59:23 -0800139 INIT_LIST_HEAD(&s->memcg_params.list);
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800140 RCU_INIT_POINTER(s->memcg_params.memcg_caches, NULL);
141}
Vladimir Davydov33a690c2014-10-09 15:28:43 -0700142
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800143static int init_memcg_params(struct kmem_cache *s,
144 struct mem_cgroup *memcg, struct kmem_cache *root_cache)
145{
146 struct memcg_cache_array *arr;
Vladimir Davydov33a690c2014-10-09 15:28:43 -0700147
148 if (memcg) {
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800149 s->memcg_params.is_root_cache = false;
150 s->memcg_params.memcg = memcg;
151 s->memcg_params.root_cache = root_cache;
152 return 0;
153 }
Vladimir Davydov33a690c2014-10-09 15:28:43 -0700154
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800155 slab_init_memcg_params(s);
156
157 if (!memcg_nr_cache_ids)
158 return 0;
159
160 arr = kzalloc(sizeof(struct memcg_cache_array) +
161 memcg_nr_cache_ids * sizeof(void *),
162 GFP_KERNEL);
163 if (!arr)
164 return -ENOMEM;
165
166 RCU_INIT_POINTER(s->memcg_params.memcg_caches, arr);
Vladimir Davydov33a690c2014-10-09 15:28:43 -0700167 return 0;
168}
169
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800170static void destroy_memcg_params(struct kmem_cache *s)
Vladimir Davydov33a690c2014-10-09 15:28:43 -0700171{
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800172 if (is_root_cache(s))
173 kfree(rcu_access_pointer(s->memcg_params.memcg_caches));
Vladimir Davydov33a690c2014-10-09 15:28:43 -0700174}
175
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800176static int update_memcg_params(struct kmem_cache *s, int new_array_size)
Vladimir Davydov6f817f42014-10-09 15:28:47 -0700177{
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800178 struct memcg_cache_array *old, *new;
Vladimir Davydov6f817f42014-10-09 15:28:47 -0700179
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800180 if (!is_root_cache(s))
181 return 0;
Vladimir Davydov6f817f42014-10-09 15:28:47 -0700182
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800183 new = kzalloc(sizeof(struct memcg_cache_array) +
184 new_array_size * sizeof(void *), GFP_KERNEL);
185 if (!new)
Vladimir Davydov6f817f42014-10-09 15:28:47 -0700186 return -ENOMEM;
187
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800188 old = rcu_dereference_protected(s->memcg_params.memcg_caches,
189 lockdep_is_held(&slab_mutex));
190 if (old)
191 memcpy(new->entries, old->entries,
192 memcg_nr_cache_ids * sizeof(void *));
Vladimir Davydov6f817f42014-10-09 15:28:47 -0700193
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800194 rcu_assign_pointer(s->memcg_params.memcg_caches, new);
195 if (old)
196 kfree_rcu(old, rcu);
Vladimir Davydov6f817f42014-10-09 15:28:47 -0700197 return 0;
198}
199
Glauber Costa55007d82012-12-18 14:22:38 -0800200int memcg_update_all_caches(int num_memcgs)
201{
202 struct kmem_cache *s;
203 int ret = 0;
Glauber Costa55007d82012-12-18 14:22:38 -0800204
Vladimir Davydov05257a12015-02-12 14:59:01 -0800205 mutex_lock(&slab_mutex);
Glauber Costa55007d82012-12-18 14:22:38 -0800206 list_for_each_entry(s, &slab_caches, list) {
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800207 ret = update_memcg_params(s, num_memcgs);
Glauber Costa55007d82012-12-18 14:22:38 -0800208 /*
Glauber Costa55007d82012-12-18 14:22:38 -0800209 * Instead of freeing the memory, we'll just leave the caches
210 * up to this point in an updated state.
211 */
212 if (ret)
Vladimir Davydov05257a12015-02-12 14:59:01 -0800213 break;
Glauber Costa55007d82012-12-18 14:22:38 -0800214 }
Glauber Costa55007d82012-12-18 14:22:38 -0800215 mutex_unlock(&slab_mutex);
216 return ret;
217}
Vladimir Davydov33a690c2014-10-09 15:28:43 -0700218#else
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800219static inline int init_memcg_params(struct kmem_cache *s,
220 struct mem_cgroup *memcg, struct kmem_cache *root_cache)
Vladimir Davydov33a690c2014-10-09 15:28:43 -0700221{
222 return 0;
223}
224
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800225static inline void destroy_memcg_params(struct kmem_cache *s)
Vladimir Davydov33a690c2014-10-09 15:28:43 -0700226{
227}
Johannes Weiner127424c2016-01-20 15:02:32 -0800228#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
Glauber Costa55007d82012-12-18 14:22:38 -0800229
Christoph Lameter039363f2012-07-06 15:25:10 -0500230/*
Joonsoo Kim423c9292014-10-09 15:26:22 -0700231 * Find a mergeable slab cache
232 */
233int slab_unmergeable(struct kmem_cache *s)
234{
235 if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE))
236 return 1;
237
238 if (!is_root_cache(s))
239 return 1;
240
241 if (s->ctor)
242 return 1;
243
244 /*
245 * We may have set a slab to be unmergeable during bootstrap.
246 */
247 if (s->refcount < 0)
248 return 1;
249
250 return 0;
251}
252
253struct kmem_cache *find_mergeable(size_t size, size_t align,
254 unsigned long flags, const char *name, void (*ctor)(void *))
255{
256 struct kmem_cache *s;
257
Grygorii Maistrenkoc6e28892017-02-22 15:40:59 -0800258 if (slab_nomerge)
Joonsoo Kim423c9292014-10-09 15:26:22 -0700259 return NULL;
260
261 if (ctor)
262 return NULL;
263
264 size = ALIGN(size, sizeof(void *));
265 align = calculate_alignment(flags, align, size);
266 size = ALIGN(size, align);
267 flags = kmem_cache_flags(size, flags, name, NULL);
268
Grygorii Maistrenkoc6e28892017-02-22 15:40:59 -0800269 if (flags & SLAB_NEVER_MERGE)
270 return NULL;
271
Joonsoo Kim54362052014-12-10 15:42:18 -0800272 list_for_each_entry_reverse(s, &slab_caches, list) {
Joonsoo Kim423c9292014-10-09 15:26:22 -0700273 if (slab_unmergeable(s))
274 continue;
275
276 if (size > s->size)
277 continue;
278
279 if ((flags & SLAB_MERGE_SAME) != (s->flags & SLAB_MERGE_SAME))
280 continue;
281 /*
282 * Check if alignment is compatible.
283 * Courtesy of Adrian Drzewiecki
284 */
285 if ((s->size & ~(align - 1)) != s->size)
286 continue;
287
288 if (s->size - size >= sizeof(void *))
289 continue;
290
Joonsoo Kim95069ac82014-11-13 15:19:25 -0800291 if (IS_ENABLED(CONFIG_SLAB) && align &&
292 (align > s->align || s->align % align))
293 continue;
294
Joonsoo Kim423c9292014-10-09 15:26:22 -0700295 return s;
296 }
297 return NULL;
298}
299
300/*
Christoph Lameter45906852012-11-28 16:23:16 +0000301 * Figure out what the alignment of the objects will be given a set of
302 * flags, a user specified alignment and the size of the objects.
303 */
304unsigned long calculate_alignment(unsigned long flags,
305 unsigned long align, unsigned long size)
306{
307 /*
308 * If the user wants hardware cache aligned objects then follow that
309 * suggestion if the object is sufficiently large.
310 *
311 * The hardware cache alignment cannot override the specified
312 * alignment though. If that is greater then use it.
313 */
314 if (flags & SLAB_HWCACHE_ALIGN) {
315 unsigned long ralign = cache_line_size();
316 while (size <= ralign / 2)
317 ralign /= 2;
318 align = max(align, ralign);
319 }
320
321 if (align < ARCH_SLAB_MINALIGN)
322 align = ARCH_SLAB_MINALIGN;
323
324 return ALIGN(align, sizeof(void *));
325}
326
Vladimir Davydovc9a77a72015-11-05 18:45:08 -0800327static struct kmem_cache *create_cache(const char *name,
328 size_t object_size, size_t size, size_t align,
329 unsigned long flags, void (*ctor)(void *),
330 struct mem_cgroup *memcg, struct kmem_cache *root_cache)
Vladimir Davydov794b1242014-04-07 15:39:26 -0700331{
332 struct kmem_cache *s;
333 int err;
334
335 err = -ENOMEM;
336 s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
337 if (!s)
338 goto out;
339
340 s->name = name;
341 s->object_size = object_size;
342 s->size = size;
343 s->align = align;
344 s->ctor = ctor;
345
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800346 err = init_memcg_params(s, memcg, root_cache);
Vladimir Davydov794b1242014-04-07 15:39:26 -0700347 if (err)
348 goto out_free_cache;
349
350 err = __kmem_cache_create(s, flags);
351 if (err)
352 goto out_free_cache;
353
354 s->refcount = 1;
355 list_add(&s->list, &slab_caches);
Vladimir Davydov794b1242014-04-07 15:39:26 -0700356out:
357 if (err)
358 return ERR_PTR(err);
359 return s;
360
361out_free_cache:
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800362 destroy_memcg_params(s);
Vaishali Thakkar7c4da062015-02-10 14:09:40 -0800363 kmem_cache_free(kmem_cache, s);
Vladimir Davydov794b1242014-04-07 15:39:26 -0700364 goto out;
365}
Christoph Lameter45906852012-11-28 16:23:16 +0000366
367/*
Christoph Lameter039363f2012-07-06 15:25:10 -0500368 * kmem_cache_create - Create a cache.
369 * @name: A string which is used in /proc/slabinfo to identify this cache.
370 * @size: The size of objects to be created in this cache.
371 * @align: The required alignment for the objects.
372 * @flags: SLAB flags
373 * @ctor: A constructor for the objects.
374 *
375 * Returns a ptr to the cache on success, NULL on failure.
376 * Cannot be called within a interrupt, but can be interrupted.
377 * The @ctor is run when new pages are allocated by the cache.
378 *
379 * The flags are
380 *
381 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
382 * to catch references to uninitialised memory.
383 *
384 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
385 * for buffer overruns.
386 *
387 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
388 * cacheline. This can be beneficial if you're counting cycles as closely
389 * as davem.
390 */
Glauber Costa2633d7a2012-12-18 14:22:34 -0800391struct kmem_cache *
Vladimir Davydov794b1242014-04-07 15:39:26 -0700392kmem_cache_create(const char *name, size_t size, size_t align,
393 unsigned long flags, void (*ctor)(void *))
Christoph Lameter039363f2012-07-06 15:25:10 -0500394{
Alexandru Moise40911a72015-11-05 18:45:43 -0800395 struct kmem_cache *s = NULL;
Andrzej Hajda3dec16e2015-02-13 14:36:38 -0800396 const char *cache_name;
Vladimir Davydov3965fc32014-01-23 15:52:55 -0800397 int err;
Christoph Lameter039363f2012-07-06 15:25:10 -0500398
Pekka Enbergb9205362012-08-16 10:12:18 +0300399 get_online_cpus();
Vladimir Davydov03afc0e2014-06-04 16:07:20 -0700400 get_online_mems();
Vladimir Davydov05257a12015-02-12 14:59:01 -0800401 memcg_get_cache_ids();
Vladimir Davydov03afc0e2014-06-04 16:07:20 -0700402
Pekka Enbergb9205362012-08-16 10:12:18 +0300403 mutex_lock(&slab_mutex);
Christoph Lameter686d5502012-09-05 00:20:33 +0000404
Vladimir Davydov794b1242014-04-07 15:39:26 -0700405 err = kmem_cache_sanity_check(name, size);
Andrew Morton3aa24f52014-10-09 15:25:58 -0700406 if (err) {
Vladimir Davydov3965fc32014-01-23 15:52:55 -0800407 goto out_unlock;
Andrew Morton3aa24f52014-10-09 15:25:58 -0700408 }
Christoph Lameter686d5502012-09-05 00:20:33 +0000409
Thomas Garniere70954f2016-12-12 16:41:38 -0800410 /* Refuse requests with allocator specific flags */
411 if (flags & ~SLAB_FLAGS_PERMITTED) {
412 err = -EINVAL;
413 goto out_unlock;
414 }
415
Glauber Costad8843922012-10-17 15:36:51 +0400416 /*
417 * Some allocators will constraint the set of valid flags to a subset
418 * of all flags. We expect them to define CACHE_CREATE_MASK in this
419 * case, and we'll just provide them with a sanitized version of the
420 * passed flags.
421 */
422 flags &= CACHE_CREATE_MASK;
Christoph Lameter686d5502012-09-05 00:20:33 +0000423
Vladimir Davydov794b1242014-04-07 15:39:26 -0700424 s = __kmem_cache_alias(name, size, align, flags, ctor);
425 if (s)
Vladimir Davydov3965fc32014-01-23 15:52:55 -0800426 goto out_unlock;
Glauber Costa2633d7a2012-12-18 14:22:34 -0800427
Andrzej Hajda3dec16e2015-02-13 14:36:38 -0800428 cache_name = kstrdup_const(name, GFP_KERNEL);
Vladimir Davydov794b1242014-04-07 15:39:26 -0700429 if (!cache_name) {
430 err = -ENOMEM;
431 goto out_unlock;
432 }
Glauber Costa2633d7a2012-12-18 14:22:34 -0800433
Vladimir Davydovc9a77a72015-11-05 18:45:08 -0800434 s = create_cache(cache_name, size, size,
435 calculate_alignment(flags, align, size),
436 flags, ctor, NULL, NULL);
Vladimir Davydov794b1242014-04-07 15:39:26 -0700437 if (IS_ERR(s)) {
438 err = PTR_ERR(s);
Andrzej Hajda3dec16e2015-02-13 14:36:38 -0800439 kfree_const(cache_name);
Vladimir Davydov794b1242014-04-07 15:39:26 -0700440 }
Vladimir Davydov3965fc32014-01-23 15:52:55 -0800441
442out_unlock:
Christoph Lameter20cea962012-07-06 15:25:13 -0500443 mutex_unlock(&slab_mutex);
Vladimir Davydov03afc0e2014-06-04 16:07:20 -0700444
Vladimir Davydov05257a12015-02-12 14:59:01 -0800445 memcg_put_cache_ids();
Vladimir Davydov03afc0e2014-06-04 16:07:20 -0700446 put_online_mems();
Christoph Lameter20cea962012-07-06 15:25:13 -0500447 put_online_cpus();
448
Dave Jonesba3253c2014-01-29 14:05:48 -0800449 if (err) {
Christoph Lameter686d5502012-09-05 00:20:33 +0000450 if (flags & SLAB_PANIC)
451 panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n",
452 name, err);
453 else {
Joe Perches11705322016-03-17 14:19:50 -0700454 pr_warn("kmem_cache_create(%s) failed with error %d\n",
Christoph Lameter686d5502012-09-05 00:20:33 +0000455 name, err);
456 dump_stack();
457 }
Christoph Lameter686d5502012-09-05 00:20:33 +0000458 return NULL;
459 }
Christoph Lameter039363f2012-07-06 15:25:10 -0500460 return s;
Glauber Costa2633d7a2012-12-18 14:22:34 -0800461}
Christoph Lameter039363f2012-07-06 15:25:10 -0500462EXPORT_SYMBOL(kmem_cache_create);
Christoph Lameter97d06602012-07-06 15:25:11 -0500463
Vladimir Davydovc9a77a72015-11-05 18:45:08 -0800464static int shutdown_cache(struct kmem_cache *s,
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800465 struct list_head *release, bool *need_rcu_barrier)
466{
Vladimir Davydovcd918c52015-11-05 18:45:14 -0800467 if (__kmem_cache_shutdown(s) != 0)
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800468 return -EBUSY;
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800469
470 if (s->flags & SLAB_DESTROY_BY_RCU)
471 *need_rcu_barrier = true;
472
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800473 list_move(&s->list, release);
474 return 0;
475}
476
Vladimir Davydovc9a77a72015-11-05 18:45:08 -0800477static void release_caches(struct list_head *release, bool need_rcu_barrier)
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800478{
479 struct kmem_cache *s, *s2;
480
481 if (need_rcu_barrier)
482 rcu_barrier();
483
484 list_for_each_entry_safe(s, s2, release, list) {
485#ifdef SLAB_SUPPORTS_SYSFS
Tejun Heobf5eb3d2017-02-22 15:41:11 -0800486 sysfs_slab_release(s);
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800487#else
488 slab_kmem_cache_release(s);
489#endif
490 }
491}
492
Johannes Weiner127424c2016-01-20 15:02:32 -0800493#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
Vladimir Davydov794b1242014-04-07 15:39:26 -0700494/*
Vladimir Davydov776ed0f2014-06-04 16:10:02 -0700495 * memcg_create_kmem_cache - Create a cache for a memory cgroup.
Vladimir Davydov794b1242014-04-07 15:39:26 -0700496 * @memcg: The memory cgroup the new cache is for.
497 * @root_cache: The parent of the new cache.
498 *
499 * This function attempts to create a kmem cache that will serve allocation
500 * requests going from @memcg to @root_cache. The new cache inherits properties
501 * from its parent.
502 */
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800503void memcg_create_kmem_cache(struct mem_cgroup *memcg,
504 struct kmem_cache *root_cache)
Vladimir Davydov794b1242014-04-07 15:39:26 -0700505{
Vladimir Davydov3e0350a2015-02-10 14:11:44 -0800506 static char memcg_name_buf[NAME_MAX + 1]; /* protected by slab_mutex */
Michal Hocko33398cf2015-09-08 15:01:02 -0700507 struct cgroup_subsys_state *css = &memcg->css;
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800508 struct memcg_cache_array *arr;
Vladimir Davydovbd673142014-06-04 16:07:40 -0700509 struct kmem_cache *s = NULL;
Vladimir Davydov794b1242014-04-07 15:39:26 -0700510 char *cache_name;
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800511 int idx;
Vladimir Davydov794b1242014-04-07 15:39:26 -0700512
513 get_online_cpus();
Vladimir Davydov03afc0e2014-06-04 16:07:20 -0700514 get_online_mems();
515
Vladimir Davydov794b1242014-04-07 15:39:26 -0700516 mutex_lock(&slab_mutex);
517
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -0800518 /*
Johannes Weiner567e9ab2016-01-20 15:02:24 -0800519 * The memory cgroup could have been offlined while the cache
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -0800520 * creation work was pending.
521 */
Vladimir Davydovb6ecd2d2016-03-17 14:18:33 -0700522 if (memcg->kmem_state != KMEM_ONLINE)
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -0800523 goto out_unlock;
524
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800525 idx = memcg_cache_id(memcg);
526 arr = rcu_dereference_protected(root_cache->memcg_params.memcg_caches,
527 lockdep_is_held(&slab_mutex));
528
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800529 /*
530 * Since per-memcg caches are created asynchronously on first
531 * allocation (see memcg_kmem_get_cache()), several threads can try to
532 * create the same cache, but only one of them may succeed.
533 */
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800534 if (arr->entries[idx])
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800535 goto out_unlock;
536
Vladimir Davydovf1008362015-02-12 14:59:29 -0800537 cgroup_name(css->cgroup, memcg_name_buf, sizeof(memcg_name_buf));
Johannes Weiner73f576c2016-07-20 15:44:57 -0700538 cache_name = kasprintf(GFP_KERNEL, "%s(%llu:%s)", root_cache->name,
539 css->serial_nr, memcg_name_buf);
Vladimir Davydov794b1242014-04-07 15:39:26 -0700540 if (!cache_name)
541 goto out_unlock;
542
Vladimir Davydovc9a77a72015-11-05 18:45:08 -0800543 s = create_cache(cache_name, root_cache->object_size,
544 root_cache->size, root_cache->align,
Greg Thelenf773e362016-11-10 10:46:41 -0800545 root_cache->flags & CACHE_CREATE_MASK,
546 root_cache->ctor, memcg, root_cache);
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800547 /*
548 * If we could not create a memcg cache, do not complain, because
549 * that's not critical at all as we can always proceed with the root
550 * cache.
551 */
Vladimir Davydovbd673142014-06-04 16:07:40 -0700552 if (IS_ERR(s)) {
Vladimir Davydov794b1242014-04-07 15:39:26 -0700553 kfree(cache_name);
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800554 goto out_unlock;
Vladimir Davydovbd673142014-06-04 16:07:40 -0700555 }
Vladimir Davydov794b1242014-04-07 15:39:26 -0700556
Vladimir Davydov426589f2015-02-12 14:59:23 -0800557 list_add(&s->memcg_params.list, &root_cache->memcg_params.list);
558
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800559 /*
560 * Since readers won't lock (see cache_from_memcg_idx()), we need a
561 * barrier here to ensure nobody will see the kmem_cache partially
562 * initialized.
563 */
564 smp_wmb();
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800565 arr->entries[idx] = s;
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800566
Vladimir Davydov794b1242014-04-07 15:39:26 -0700567out_unlock:
568 mutex_unlock(&slab_mutex);
Vladimir Davydov03afc0e2014-06-04 16:07:20 -0700569
570 put_online_mems();
Vladimir Davydov794b1242014-04-07 15:39:26 -0700571 put_online_cpus();
572}
Vladimir Davydovb8529902014-04-07 15:39:28 -0700573
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -0800574void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg)
575{
576 int idx;
577 struct memcg_cache_array *arr;
Vladimir Davydovd6e0b7f2015-02-12 14:59:47 -0800578 struct kmem_cache *s, *c;
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -0800579
580 idx = memcg_cache_id(memcg);
581
Vladimir Davydovd6e0b7f2015-02-12 14:59:47 -0800582 get_online_cpus();
583 get_online_mems();
584
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -0800585 mutex_lock(&slab_mutex);
586 list_for_each_entry(s, &slab_caches, list) {
587 if (!is_root_cache(s))
588 continue;
589
590 arr = rcu_dereference_protected(s->memcg_params.memcg_caches,
591 lockdep_is_held(&slab_mutex));
Vladimir Davydovd6e0b7f2015-02-12 14:59:47 -0800592 c = arr->entries[idx];
593 if (!c)
594 continue;
595
Tejun Heo290b6a52017-02-22 15:41:08 -0800596 __kmem_cache_shrink(c, true);
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -0800597 arr->entries[idx] = NULL;
598 }
599 mutex_unlock(&slab_mutex);
Vladimir Davydovd6e0b7f2015-02-12 14:59:47 -0800600
601 put_online_mems();
602 put_online_cpus();
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -0800603}
604
Vladimir Davydovd60fdcc2015-11-05 18:45:11 -0800605static int __shutdown_memcg_cache(struct kmem_cache *s,
606 struct list_head *release, bool *need_rcu_barrier)
607{
608 BUG_ON(is_root_cache(s));
609
610 if (shutdown_cache(s, release, need_rcu_barrier))
611 return -EBUSY;
612
613 list_del(&s->memcg_params.list);
614 return 0;
615}
616
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800617void memcg_destroy_kmem_caches(struct mem_cgroup *memcg)
Vladimir Davydovb8529902014-04-07 15:39:28 -0700618{
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800619 LIST_HEAD(release);
620 bool need_rcu_barrier = false;
621 struct kmem_cache *s, *s2;
Vladimir Davydovb8529902014-04-07 15:39:28 -0700622
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800623 get_online_cpus();
624 get_online_mems();
Vladimir Davydovb8529902014-04-07 15:39:28 -0700625
Vladimir Davydovb8529902014-04-07 15:39:28 -0700626 mutex_lock(&slab_mutex);
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800627 list_for_each_entry_safe(s, s2, &slab_caches, list) {
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800628 if (is_root_cache(s) || s->memcg_params.memcg != memcg)
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800629 continue;
630 /*
631 * The cgroup is about to be freed and therefore has no charges
632 * left. Hence, all its caches must be empty by now.
633 */
Vladimir Davydovd60fdcc2015-11-05 18:45:11 -0800634 BUG_ON(__shutdown_memcg_cache(s, &release, &need_rcu_barrier));
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800635 }
636 mutex_unlock(&slab_mutex);
Vladimir Davydovb8529902014-04-07 15:39:28 -0700637
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800638 put_online_mems();
639 put_online_cpus();
640
Vladimir Davydovc9a77a72015-11-05 18:45:08 -0800641 release_caches(&release, need_rcu_barrier);
Vladimir Davydovb8529902014-04-07 15:39:28 -0700642}
Vladimir Davydovd60fdcc2015-11-05 18:45:11 -0800643
644static int shutdown_memcg_caches(struct kmem_cache *s,
645 struct list_head *release, bool *need_rcu_barrier)
646{
647 struct memcg_cache_array *arr;
648 struct kmem_cache *c, *c2;
649 LIST_HEAD(busy);
650 int i;
651
652 BUG_ON(!is_root_cache(s));
653
654 /*
655 * First, shutdown active caches, i.e. caches that belong to online
656 * memory cgroups.
657 */
658 arr = rcu_dereference_protected(s->memcg_params.memcg_caches,
659 lockdep_is_held(&slab_mutex));
660 for_each_memcg_cache_index(i) {
661 c = arr->entries[i];
662 if (!c)
663 continue;
664 if (__shutdown_memcg_cache(c, release, need_rcu_barrier))
665 /*
666 * The cache still has objects. Move it to a temporary
667 * list so as not to try to destroy it for a second
668 * time while iterating over inactive caches below.
669 */
670 list_move(&c->memcg_params.list, &busy);
671 else
672 /*
673 * The cache is empty and will be destroyed soon. Clear
674 * the pointer to it in the memcg_caches array so that
675 * it will never be accessed even if the root cache
676 * stays alive.
677 */
678 arr->entries[i] = NULL;
679 }
680
681 /*
682 * Second, shutdown all caches left from memory cgroups that are now
683 * offline.
684 */
685 list_for_each_entry_safe(c, c2, &s->memcg_params.list,
686 memcg_params.list)
687 __shutdown_memcg_cache(c, release, need_rcu_barrier);
688
689 list_splice(&busy, &s->memcg_params.list);
690
691 /*
692 * A cache being destroyed must be empty. In particular, this means
693 * that all per memcg caches attached to it must be empty too.
694 */
695 if (!list_empty(&s->memcg_params.list))
696 return -EBUSY;
697 return 0;
698}
699#else
700static inline int shutdown_memcg_caches(struct kmem_cache *s,
701 struct list_head *release, bool *need_rcu_barrier)
702{
703 return 0;
704}
Johannes Weiner127424c2016-01-20 15:02:32 -0800705#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
Vladimir Davydov794b1242014-04-07 15:39:26 -0700706
Christoph Lameter41a21282014-05-06 12:50:08 -0700707void slab_kmem_cache_release(struct kmem_cache *s)
708{
Dmitry Safonov52b4b952016-02-17 13:11:37 -0800709 __kmem_cache_release(s);
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800710 destroy_memcg_params(s);
Andrzej Hajda3dec16e2015-02-13 14:36:38 -0800711 kfree_const(s->name);
Christoph Lameter41a21282014-05-06 12:50:08 -0700712 kmem_cache_free(kmem_cache, s);
713}
714
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000715void kmem_cache_destroy(struct kmem_cache *s)
716{
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800717 LIST_HEAD(release);
718 bool need_rcu_barrier = false;
Vladimir Davydovd60fdcc2015-11-05 18:45:11 -0800719 int err;
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800720
Sergey Senozhatsky3942d292015-09-08 15:00:50 -0700721 if (unlikely(!s))
722 return;
723
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000724 get_online_cpus();
Vladimir Davydov03afc0e2014-06-04 16:07:20 -0700725 get_online_mems();
726
Alexander Potapenko55834c52016-05-20 16:59:11 -0700727 kasan_cache_destroy(s);
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000728 mutex_lock(&slab_mutex);
Vladimir Davydovb8529902014-04-07 15:39:28 -0700729
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000730 s->refcount--;
Vladimir Davydovb8529902014-04-07 15:39:28 -0700731 if (s->refcount)
732 goto out_unlock;
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000733
Vladimir Davydovd60fdcc2015-11-05 18:45:11 -0800734 err = shutdown_memcg_caches(s, &release, &need_rcu_barrier);
735 if (!err)
Vladimir Davydovcd918c52015-11-05 18:45:14 -0800736 err = shutdown_cache(s, &release, &need_rcu_barrier);
Vladimir Davydovb8529902014-04-07 15:39:28 -0700737
Vladimir Davydovcd918c52015-11-05 18:45:14 -0800738 if (err) {
Joe Perches756a0252016-03-17 14:19:47 -0700739 pr_err("kmem_cache_destroy %s: Slab cache still has objects\n",
740 s->name);
Vladimir Davydovcd918c52015-11-05 18:45:14 -0800741 dump_stack();
742 }
Vladimir Davydovb8529902014-04-07 15:39:28 -0700743out_unlock:
744 mutex_unlock(&slab_mutex);
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800745
Vladimir Davydov03afc0e2014-06-04 16:07:20 -0700746 put_online_mems();
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000747 put_online_cpus();
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800748
Vladimir Davydovc9a77a72015-11-05 18:45:08 -0800749 release_caches(&release, need_rcu_barrier);
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000750}
751EXPORT_SYMBOL(kmem_cache_destroy);
752
Vladimir Davydov03afc0e2014-06-04 16:07:20 -0700753/**
754 * kmem_cache_shrink - Shrink a cache.
755 * @cachep: The cache to shrink.
756 *
757 * Releases as many slabs as possible for a cache.
758 * To help debugging, a zero exit status indicates all slabs were released.
759 */
760int kmem_cache_shrink(struct kmem_cache *cachep)
761{
762 int ret;
763
764 get_online_cpus();
765 get_online_mems();
Alexander Potapenko55834c52016-05-20 16:59:11 -0700766 kasan_cache_shrink(cachep);
Tejun Heo290b6a52017-02-22 15:41:08 -0800767 ret = __kmem_cache_shrink(cachep, false);
Vladimir Davydov03afc0e2014-06-04 16:07:20 -0700768 put_online_mems();
769 put_online_cpus();
770 return ret;
771}
772EXPORT_SYMBOL(kmem_cache_shrink);
773
Denis Kirjanovfda90122015-11-05 18:44:59 -0800774bool slab_is_available(void)
Christoph Lameter97d06602012-07-06 15:25:11 -0500775{
776 return slab_state >= UP;
777}
Glauber Costab7454ad2012-10-19 18:20:25 +0400778
Christoph Lameter45530c42012-11-28 16:23:07 +0000779#ifndef CONFIG_SLOB
780/* Create a cache during boot when no slab services are available yet */
781void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t size,
782 unsigned long flags)
783{
784 int err;
785
786 s->name = name;
787 s->size = s->object_size = size;
Christoph Lameter45906852012-11-28 16:23:16 +0000788 s->align = calculate_alignment(flags, ARCH_KMALLOC_MINALIGN, size);
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800789
790 slab_init_memcg_params(s);
791
Christoph Lameter45530c42012-11-28 16:23:07 +0000792 err = __kmem_cache_create(s, flags);
793
794 if (err)
Christoph Lameter31ba7342013-01-10 19:00:53 +0000795 panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
Christoph Lameter45530c42012-11-28 16:23:07 +0000796 name, size, err);
797
798 s->refcount = -1; /* Exempt from merging for now */
799}
800
801struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
802 unsigned long flags)
803{
804 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
805
806 if (!s)
807 panic("Out of memory when creating slab %s\n", name);
808
809 create_boot_cache(s, name, size, flags);
810 list_add(&s->list, &slab_caches);
811 s->refcount = 1;
812 return s;
813}
814
Christoph Lameter9425c582013-01-10 19:12:17 +0000815struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
816EXPORT_SYMBOL(kmalloc_caches);
817
818#ifdef CONFIG_ZONE_DMA
819struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
820EXPORT_SYMBOL(kmalloc_dma_caches);
821#endif
822
Christoph Lameterf97d5f62013-01-10 19:12:17 +0000823/*
Christoph Lameter2c59dd62013-01-10 19:14:19 +0000824 * Conversion table for small slabs sizes / 8 to the index in the
825 * kmalloc array. This is necessary for slabs < 192 since we have non power
826 * of two cache sizes there. The size of larger slabs can be determined using
827 * fls.
828 */
829static s8 size_index[24] = {
830 3, /* 8 */
831 4, /* 16 */
832 5, /* 24 */
833 5, /* 32 */
834 6, /* 40 */
835 6, /* 48 */
836 6, /* 56 */
837 6, /* 64 */
838 1, /* 72 */
839 1, /* 80 */
840 1, /* 88 */
841 1, /* 96 */
842 7, /* 104 */
843 7, /* 112 */
844 7, /* 120 */
845 7, /* 128 */
846 2, /* 136 */
847 2, /* 144 */
848 2, /* 152 */
849 2, /* 160 */
850 2, /* 168 */
851 2, /* 176 */
852 2, /* 184 */
853 2 /* 192 */
854};
855
856static inline int size_index_elem(size_t bytes)
857{
858 return (bytes - 1) / 8;
859}
860
861/*
862 * Find the kmem_cache structure that serves a given size of
863 * allocation
864 */
865struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
866{
867 int index;
868
Joonsoo Kim9de1bc82013-08-02 11:02:42 +0900869 if (unlikely(size > KMALLOC_MAX_SIZE)) {
Sasha Levin907985f2013-06-10 15:18:00 -0400870 WARN_ON_ONCE(!(flags & __GFP_NOWARN));
Christoph Lameter6286ae92013-05-03 15:43:18 +0000871 return NULL;
Sasha Levin907985f2013-06-10 15:18:00 -0400872 }
Christoph Lameter6286ae92013-05-03 15:43:18 +0000873
Christoph Lameter2c59dd62013-01-10 19:14:19 +0000874 if (size <= 192) {
875 if (!size)
876 return ZERO_SIZE_PTR;
877
878 index = size_index[size_index_elem(size)];
879 } else
880 index = fls(size - 1);
881
882#ifdef CONFIG_ZONE_DMA
Joonsoo Kimb1e05412013-02-04 23:46:46 +0900883 if (unlikely((flags & GFP_DMA)))
Christoph Lameter2c59dd62013-01-10 19:14:19 +0000884 return kmalloc_dma_caches[index];
885
886#endif
887 return kmalloc_caches[index];
888}
889
890/*
Gavin Guo4066c332015-06-24 16:55:54 -0700891 * kmalloc_info[] is to make slub_debug=,kmalloc-xx option work at boot time.
892 * kmalloc_index() supports up to 2^26=64MB, so the final entry of the table is
893 * kmalloc-67108864.
894 */
Vlastimil Babkaaf3b5f82017-02-22 15:41:05 -0800895const struct kmalloc_info_struct kmalloc_info[] __initconst = {
Gavin Guo4066c332015-06-24 16:55:54 -0700896 {NULL, 0}, {"kmalloc-96", 96},
897 {"kmalloc-192", 192}, {"kmalloc-8", 8},
898 {"kmalloc-16", 16}, {"kmalloc-32", 32},
899 {"kmalloc-64", 64}, {"kmalloc-128", 128},
900 {"kmalloc-256", 256}, {"kmalloc-512", 512},
901 {"kmalloc-1024", 1024}, {"kmalloc-2048", 2048},
902 {"kmalloc-4096", 4096}, {"kmalloc-8192", 8192},
903 {"kmalloc-16384", 16384}, {"kmalloc-32768", 32768},
904 {"kmalloc-65536", 65536}, {"kmalloc-131072", 131072},
905 {"kmalloc-262144", 262144}, {"kmalloc-524288", 524288},
906 {"kmalloc-1048576", 1048576}, {"kmalloc-2097152", 2097152},
907 {"kmalloc-4194304", 4194304}, {"kmalloc-8388608", 8388608},
908 {"kmalloc-16777216", 16777216}, {"kmalloc-33554432", 33554432},
909 {"kmalloc-67108864", 67108864}
910};
911
912/*
Daniel Sanders34cc6992015-06-24 16:55:57 -0700913 * Patch up the size_index table if we have strange large alignment
914 * requirements for the kmalloc array. This is only the case for
915 * MIPS it seems. The standard arches will not generate any code here.
916 *
917 * Largest permitted alignment is 256 bytes due to the way we
918 * handle the index determination for the smaller caches.
919 *
920 * Make sure that nothing crazy happens if someone starts tinkering
921 * around with ARCH_KMALLOC_MINALIGN
Christoph Lameterf97d5f62013-01-10 19:12:17 +0000922 */
Daniel Sanders34cc6992015-06-24 16:55:57 -0700923void __init setup_kmalloc_cache_index_table(void)
Christoph Lameterf97d5f62013-01-10 19:12:17 +0000924{
925 int i;
926
Christoph Lameter2c59dd62013-01-10 19:14:19 +0000927 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
928 (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
929
930 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
931 int elem = size_index_elem(i);
932
933 if (elem >= ARRAY_SIZE(size_index))
934 break;
935 size_index[elem] = KMALLOC_SHIFT_LOW;
936 }
937
938 if (KMALLOC_MIN_SIZE >= 64) {
939 /*
940 * The 96 byte size cache is not used if the alignment
941 * is 64 byte.
942 */
943 for (i = 64 + 8; i <= 96; i += 8)
944 size_index[size_index_elem(i)] = 7;
945
946 }
947
948 if (KMALLOC_MIN_SIZE >= 128) {
949 /*
950 * The 192 byte sized cache is not used if the alignment
951 * is 128 byte. Redirect kmalloc to use the 256 byte cache
952 * instead.
953 */
954 for (i = 128 + 8; i <= 192; i += 8)
955 size_index[size_index_elem(i)] = 8;
956 }
Daniel Sanders34cc6992015-06-24 16:55:57 -0700957}
958
Christoph Lameterae6f2462015-06-30 09:01:11 -0500959static void __init new_kmalloc_cache(int idx, unsigned long flags)
Christoph Lametera9730fc2015-06-29 09:28:08 -0500960{
961 kmalloc_caches[idx] = create_kmalloc_cache(kmalloc_info[idx].name,
962 kmalloc_info[idx].size, flags);
963}
964
Daniel Sanders34cc6992015-06-24 16:55:57 -0700965/*
966 * Create the kmalloc array. Some of the regular kmalloc arrays
967 * may already have been created because they were needed to
968 * enable allocations for slab creation.
969 */
970void __init create_kmalloc_caches(unsigned long flags)
971{
972 int i;
973
Christoph Lametera9730fc2015-06-29 09:28:08 -0500974 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
975 if (!kmalloc_caches[i])
976 new_kmalloc_cache(i, flags);
Chris Mason956e46e2013-05-08 15:56:28 -0400977
978 /*
Christoph Lametera9730fc2015-06-29 09:28:08 -0500979 * Caches that are not of the two-to-the-power-of size.
980 * These have to be created immediately after the
981 * earlier power of two caches
Chris Mason956e46e2013-05-08 15:56:28 -0400982 */
Christoph Lametera9730fc2015-06-29 09:28:08 -0500983 if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
984 new_kmalloc_cache(1, flags);
985 if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
986 new_kmalloc_cache(2, flags);
Christoph Lameter8a965b32013-05-03 18:04:18 +0000987 }
988
Christoph Lameterf97d5f62013-01-10 19:12:17 +0000989 /* Kmalloc array is now usable */
990 slab_state = UP;
991
Christoph Lameterf97d5f62013-01-10 19:12:17 +0000992#ifdef CONFIG_ZONE_DMA
993 for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
994 struct kmem_cache *s = kmalloc_caches[i];
995
996 if (s) {
997 int size = kmalloc_size(i);
998 char *n = kasprintf(GFP_NOWAIT,
999 "dma-kmalloc-%d", size);
1000
1001 BUG_ON(!n);
1002 kmalloc_dma_caches[i] = create_kmalloc_cache(n,
1003 size, SLAB_CACHE_DMA | flags);
1004 }
1005 }
1006#endif
1007}
Christoph Lameter45530c42012-11-28 16:23:07 +00001008#endif /* !CONFIG_SLOB */
1009
Vladimir Davydovcea371f2014-06-04 16:07:04 -07001010/*
1011 * To avoid unnecessary overhead, we pass through large allocation requests
1012 * directly to the page allocator. We use __GFP_COMP, because we will need to
1013 * know the allocation order to free the pages properly in kfree.
1014 */
Vladimir Davydov52383432014-06-04 16:06:39 -07001015void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
1016{
1017 void *ret;
1018 struct page *page;
1019
1020 flags |= __GFP_COMP;
Vladimir Davydov49491482016-07-26 15:24:24 -07001021 page = alloc_pages(flags, order);
Vladimir Davydov52383432014-06-04 16:06:39 -07001022 ret = page ? page_address(page) : NULL;
1023 kmemleak_alloc(ret, size, 1, flags);
Alexander Potapenko505f5dc2016-03-25 14:22:02 -07001024 kasan_kmalloc_large(ret, size, flags);
Vladimir Davydov52383432014-06-04 16:06:39 -07001025 return ret;
1026}
1027EXPORT_SYMBOL(kmalloc_order);
1028
Christoph Lameterf1b6eb62013-09-04 16:35:34 +00001029#ifdef CONFIG_TRACING
1030void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
1031{
1032 void *ret = kmalloc_order(size, flags, order);
1033 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
1034 return ret;
1035}
1036EXPORT_SYMBOL(kmalloc_order_trace);
1037#endif
Christoph Lameter45530c42012-11-28 16:23:07 +00001038
Thomas Garnier7c00fce2016-07-26 15:21:56 -07001039#ifdef CONFIG_SLAB_FREELIST_RANDOM
1040/* Randomize a generic freelist */
1041static void freelist_randomize(struct rnd_state *state, unsigned int *list,
1042 size_t count)
1043{
1044 size_t i;
1045 unsigned int rand;
1046
1047 for (i = 0; i < count; i++)
1048 list[i] = i;
1049
1050 /* Fisher-Yates shuffle */
1051 for (i = count - 1; i > 0; i--) {
1052 rand = prandom_u32_state(state);
1053 rand %= (i + 1);
1054 swap(list[i], list[rand]);
1055 }
1056}
1057
1058/* Create a random sequence per cache */
1059int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
1060 gfp_t gfp)
1061{
1062 struct rnd_state state;
1063
1064 if (count < 2 || cachep->random_seq)
1065 return 0;
1066
1067 cachep->random_seq = kcalloc(count, sizeof(unsigned int), gfp);
1068 if (!cachep->random_seq)
1069 return -ENOMEM;
1070
1071 /* Get best entropy at this stage of boot */
1072 prandom_seed_state(&state, get_random_long());
1073
1074 freelist_randomize(&state, cachep->random_seq, count);
1075 return 0;
1076}
1077
1078/* Destroy the per-cache random freelist sequence */
1079void cache_random_seq_destroy(struct kmem_cache *cachep)
1080{
1081 kfree(cachep->random_seq);
1082 cachep->random_seq = NULL;
1083}
1084#endif /* CONFIG_SLAB_FREELIST_RANDOM */
1085
Glauber Costab7454ad2012-10-19 18:20:25 +04001086#ifdef CONFIG_SLABINFO
Wanpeng Lie9b4db22013-07-04 08:33:24 +08001087
1088#ifdef CONFIG_SLAB
1089#define SLABINFO_RIGHTS (S_IWUSR | S_IRUSR)
1090#else
1091#define SLABINFO_RIGHTS S_IRUSR
1092#endif
1093
Vladimir Davydovb0475012014-12-10 15:44:19 -08001094static void print_slabinfo_header(struct seq_file *m)
Glauber Costabcee6e22012-10-19 18:20:26 +04001095{
1096 /*
1097 * Output format version, so at least we can change it
1098 * without _too_ many complaints.
1099 */
1100#ifdef CONFIG_DEBUG_SLAB
1101 seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
1102#else
1103 seq_puts(m, "slabinfo - version: 2.1\n");
1104#endif
Joe Perches756a0252016-03-17 14:19:47 -07001105 seq_puts(m, "# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
Glauber Costabcee6e22012-10-19 18:20:26 +04001106 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
1107 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
1108#ifdef CONFIG_DEBUG_SLAB
Joe Perches756a0252016-03-17 14:19:47 -07001109 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> <error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
Glauber Costabcee6e22012-10-19 18:20:26 +04001110 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
1111#endif
1112 seq_putc(m, '\n');
1113}
1114
Vladimir Davydov1df3b262014-12-10 15:42:16 -08001115void *slab_start(struct seq_file *m, loff_t *pos)
Glauber Costab7454ad2012-10-19 18:20:25 +04001116{
Glauber Costab7454ad2012-10-19 18:20:25 +04001117 mutex_lock(&slab_mutex);
Glauber Costab7454ad2012-10-19 18:20:25 +04001118 return seq_list_start(&slab_caches, *pos);
1119}
1120
Wanpeng Li276a2432013-07-08 08:08:28 +08001121void *slab_next(struct seq_file *m, void *p, loff_t *pos)
Glauber Costab7454ad2012-10-19 18:20:25 +04001122{
1123 return seq_list_next(p, &slab_caches, pos);
1124}
1125
Wanpeng Li276a2432013-07-08 08:08:28 +08001126void slab_stop(struct seq_file *m, void *p)
Glauber Costab7454ad2012-10-19 18:20:25 +04001127{
1128 mutex_unlock(&slab_mutex);
1129}
1130
Glauber Costa749c5412012-12-18 14:23:01 -08001131static void
1132memcg_accumulate_slabinfo(struct kmem_cache *s, struct slabinfo *info)
Glauber Costab7454ad2012-10-19 18:20:25 +04001133{
Glauber Costa749c5412012-12-18 14:23:01 -08001134 struct kmem_cache *c;
1135 struct slabinfo sinfo;
Glauber Costa749c5412012-12-18 14:23:01 -08001136
1137 if (!is_root_cache(s))
1138 return;
1139
Vladimir Davydov426589f2015-02-12 14:59:23 -08001140 for_each_memcg_cache(c, s) {
Glauber Costa749c5412012-12-18 14:23:01 -08001141 memset(&sinfo, 0, sizeof(sinfo));
1142 get_slabinfo(c, &sinfo);
1143
1144 info->active_slabs += sinfo.active_slabs;
1145 info->num_slabs += sinfo.num_slabs;
1146 info->shared_avail += sinfo.shared_avail;
1147 info->active_objs += sinfo.active_objs;
1148 info->num_objs += sinfo.num_objs;
1149 }
1150}
1151
Vladimir Davydovb0475012014-12-10 15:44:19 -08001152static void cache_show(struct kmem_cache *s, struct seq_file *m)
Glauber Costa749c5412012-12-18 14:23:01 -08001153{
Glauber Costa0d7561c2012-10-19 18:20:27 +04001154 struct slabinfo sinfo;
1155
1156 memset(&sinfo, 0, sizeof(sinfo));
1157 get_slabinfo(s, &sinfo);
1158
Glauber Costa749c5412012-12-18 14:23:01 -08001159 memcg_accumulate_slabinfo(s, &sinfo);
1160
Glauber Costa0d7561c2012-10-19 18:20:27 +04001161 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
Glauber Costa749c5412012-12-18 14:23:01 -08001162 cache_name(s), sinfo.active_objs, sinfo.num_objs, s->size,
Glauber Costa0d7561c2012-10-19 18:20:27 +04001163 sinfo.objects_per_slab, (1 << sinfo.cache_order));
1164
1165 seq_printf(m, " : tunables %4u %4u %4u",
1166 sinfo.limit, sinfo.batchcount, sinfo.shared);
1167 seq_printf(m, " : slabdata %6lu %6lu %6lu",
1168 sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail);
1169 slabinfo_show_stats(m, s);
1170 seq_putc(m, '\n');
Glauber Costab7454ad2012-10-19 18:20:25 +04001171}
1172
Vladimir Davydov1df3b262014-12-10 15:42:16 -08001173static int slab_show(struct seq_file *m, void *p)
Glauber Costa749c5412012-12-18 14:23:01 -08001174{
1175 struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
1176
Vladimir Davydov1df3b262014-12-10 15:42:16 -08001177 if (p == slab_caches.next)
1178 print_slabinfo_header(m);
Vladimir Davydovb0475012014-12-10 15:44:19 -08001179 if (is_root_cache(s))
1180 cache_show(s, m);
1181 return 0;
Glauber Costa749c5412012-12-18 14:23:01 -08001182}
1183
Johannes Weiner127424c2016-01-20 15:02:32 -08001184#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
Vladimir Davydovb0475012014-12-10 15:44:19 -08001185int memcg_slab_show(struct seq_file *m, void *p)
1186{
1187 struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
1188 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
1189
1190 if (p == slab_caches.next)
1191 print_slabinfo_header(m);
Vladimir Davydovf7ce3192015-02-12 14:59:20 -08001192 if (!is_root_cache(s) && s->memcg_params.memcg == memcg)
Vladimir Davydovb0475012014-12-10 15:44:19 -08001193 cache_show(s, m);
1194 return 0;
1195}
1196#endif
1197
Glauber Costab7454ad2012-10-19 18:20:25 +04001198/*
1199 * slabinfo_op - iterator that generates /proc/slabinfo
1200 *
1201 * Output layout:
1202 * cache-name
1203 * num-active-objs
1204 * total-objs
1205 * object size
1206 * num-active-slabs
1207 * total-slabs
1208 * num-pages-per-slab
1209 * + further values on SMP and with statistics enabled
1210 */
1211static const struct seq_operations slabinfo_op = {
Vladimir Davydov1df3b262014-12-10 15:42:16 -08001212 .start = slab_start,
Wanpeng Li276a2432013-07-08 08:08:28 +08001213 .next = slab_next,
1214 .stop = slab_stop,
Vladimir Davydov1df3b262014-12-10 15:42:16 -08001215 .show = slab_show,
Glauber Costab7454ad2012-10-19 18:20:25 +04001216};
1217
1218static int slabinfo_open(struct inode *inode, struct file *file)
1219{
1220 return seq_open(file, &slabinfo_op);
1221}
1222
1223static const struct file_operations proc_slabinfo_operations = {
1224 .open = slabinfo_open,
1225 .read = seq_read,
1226 .write = slabinfo_write,
1227 .llseek = seq_lseek,
1228 .release = seq_release,
1229};
1230
1231static int __init slab_proc_init(void)
1232{
Wanpeng Lie9b4db22013-07-04 08:33:24 +08001233 proc_create("slabinfo", SLABINFO_RIGHTS, NULL,
1234 &proc_slabinfo_operations);
Glauber Costab7454ad2012-10-19 18:20:25 +04001235 return 0;
1236}
1237module_init(slab_proc_init);
1238#endif /* CONFIG_SLABINFO */
Andrey Ryabinin928cec92014-08-06 16:04:44 -07001239
1240static __always_inline void *__do_krealloc(const void *p, size_t new_size,
1241 gfp_t flags)
1242{
1243 void *ret;
1244 size_t ks = 0;
1245
1246 if (p)
1247 ks = ksize(p);
1248
Andrey Ryabinin0316bec2015-02-13 14:39:42 -08001249 if (ks >= new_size) {
Alexander Potapenko505f5dc2016-03-25 14:22:02 -07001250 kasan_krealloc((void *)p, new_size, flags);
Andrey Ryabinin928cec92014-08-06 16:04:44 -07001251 return (void *)p;
Andrey Ryabinin0316bec2015-02-13 14:39:42 -08001252 }
Andrey Ryabinin928cec92014-08-06 16:04:44 -07001253
1254 ret = kmalloc_track_caller(new_size, flags);
1255 if (ret && p)
1256 memcpy(ret, p, ks);
1257
1258 return ret;
1259}
1260
1261/**
1262 * __krealloc - like krealloc() but don't free @p.
1263 * @p: object to reallocate memory for.
1264 * @new_size: how many bytes of memory are required.
1265 * @flags: the type of memory to allocate.
1266 *
1267 * This function is like krealloc() except it never frees the originally
1268 * allocated buffer. Use this if you don't want to free the buffer immediately
1269 * like, for example, with RCU.
1270 */
1271void *__krealloc(const void *p, size_t new_size, gfp_t flags)
1272{
1273 if (unlikely(!new_size))
1274 return ZERO_SIZE_PTR;
1275
1276 return __do_krealloc(p, new_size, flags);
1277
1278}
1279EXPORT_SYMBOL(__krealloc);
1280
1281/**
1282 * krealloc - reallocate memory. The contents will remain unchanged.
1283 * @p: object to reallocate memory for.
1284 * @new_size: how many bytes of memory are required.
1285 * @flags: the type of memory to allocate.
1286 *
1287 * The contents of the object pointed to are preserved up to the
1288 * lesser of the new and old sizes. If @p is %NULL, krealloc()
1289 * behaves exactly like kmalloc(). If @new_size is 0 and @p is not a
1290 * %NULL pointer, the object pointed to is freed.
1291 */
1292void *krealloc(const void *p, size_t new_size, gfp_t flags)
1293{
1294 void *ret;
1295
1296 if (unlikely(!new_size)) {
1297 kfree(p);
1298 return ZERO_SIZE_PTR;
1299 }
1300
1301 ret = __do_krealloc(p, new_size, flags);
1302 if (ret && p != ret)
1303 kfree(p);
1304
1305 return ret;
1306}
1307EXPORT_SYMBOL(krealloc);
1308
1309/**
1310 * kzfree - like kfree but zero memory
1311 * @p: object to free memory of
1312 *
1313 * The memory of the object @p points to is zeroed before freed.
1314 * If @p is %NULL, kzfree() does nothing.
1315 *
1316 * Note: this function zeroes the whole allocated buffer which can be a good
1317 * deal bigger than the requested buffer size passed to kmalloc(). So be
1318 * careful when using this function in performance sensitive code.
1319 */
1320void kzfree(const void *p)
1321{
1322 size_t ks;
1323 void *mem = (void *)p;
1324
1325 if (unlikely(ZERO_OR_NULL_PTR(mem)))
1326 return;
1327 ks = ksize(mem);
1328 memset(mem, 0, ks);
1329 kfree(mem);
1330}
1331EXPORT_SYMBOL(kzfree);
1332
1333/* Tracepoints definitions. */
1334EXPORT_TRACEPOINT_SYMBOL(kmalloc);
1335EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
1336EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
1337EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);
1338EXPORT_TRACEPOINT_SYMBOL(kfree);
1339EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);