blob: c1010cb7ca0c9d6f9d54c7e6ad411de72334d1e0 [file] [log] [blame]
Andrew Morton16d69262008-07-25 19:44:36 -07001#include <linux/mm.h>
Matt Mackall30992c92006-01-08 01:01:43 -08002#include <linux/slab.h>
3#include <linux/string.h>
Paul Gortmakerb95f1b312011-10-16 02:01:52 -04004#include <linux/export.h>
Davi Arnaut96840aa2006-03-24 03:18:42 -08005#include <linux/err.h>
Adrian Bunk3b8f14b2008-07-26 15:22:28 -07006#include <linux/sched.h>
Al Viroeb36c582012-05-30 20:17:35 -04007#include <linux/security.h>
Shaohua Li98003392013-02-22 16:34:35 -08008#include <linux/swap.h>
Shaohua Li33806f02013-02-22 16:34:37 -08009#include <linux/swapops.h>
Jerome Marchand00619bc2013-11-12 15:08:31 -080010#include <linux/mman.h>
11#include <linux/hugetlb.h>
12
Davi Arnaut96840aa2006-03-24 03:18:42 -080013#include <asm/uaccess.h>
Matt Mackall30992c92006-01-08 01:01:43 -080014
Namhyung Kim6038def2011-05-24 17:11:22 -070015#include "internal.h"
16
Steven Rostedta8d154b2009-04-10 09:36:00 -040017#define CREATE_TRACE_POINTS
Steven Rostedtad8d75f2009-04-14 19:39:12 -040018#include <trace/events/kmem.h>
Steven Rostedta8d154b2009-04-10 09:36:00 -040019
Matt Mackall30992c92006-01-08 01:01:43 -080020/**
Matt Mackall30992c92006-01-08 01:01:43 -080021 * kstrdup - allocate space for and copy an existing string
Matt Mackall30992c92006-01-08 01:01:43 -080022 * @s: the string to duplicate
23 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
24 */
25char *kstrdup(const char *s, gfp_t gfp)
26{
27 size_t len;
28 char *buf;
29
30 if (!s)
31 return NULL;
32
33 len = strlen(s) + 1;
Christoph Hellwig1d2c8ee2006-10-04 02:15:25 -070034 buf = kmalloc_track_caller(len, gfp);
Matt Mackall30992c92006-01-08 01:01:43 -080035 if (buf)
36 memcpy(buf, s, len);
37 return buf;
38}
39EXPORT_SYMBOL(kstrdup);
Davi Arnaut96840aa2006-03-24 03:18:42 -080040
Alexey Dobriyan1a2f67b2006-09-30 23:27:20 -070041/**
Jeremy Fitzhardinge1e66df32007-07-17 18:37:02 -070042 * kstrndup - allocate space for and copy an existing string
43 * @s: the string to duplicate
44 * @max: read at most @max chars from @s
45 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
46 */
47char *kstrndup(const char *s, size_t max, gfp_t gfp)
48{
49 size_t len;
50 char *buf;
51
52 if (!s)
53 return NULL;
54
55 len = strnlen(s, max);
56 buf = kmalloc_track_caller(len+1, gfp);
57 if (buf) {
58 memcpy(buf, s, len);
59 buf[len] = '\0';
60 }
61 return buf;
62}
63EXPORT_SYMBOL(kstrndup);
64
65/**
Alexey Dobriyan1a2f67b2006-09-30 23:27:20 -070066 * kmemdup - duplicate region of memory
67 *
68 * @src: memory region to duplicate
69 * @len: memory region length
70 * @gfp: GFP mask to use
71 */
72void *kmemdup(const void *src, size_t len, gfp_t gfp)
73{
74 void *p;
75
Christoph Hellwig1d2c8ee2006-10-04 02:15:25 -070076 p = kmalloc_track_caller(len, gfp);
Alexey Dobriyan1a2f67b2006-09-30 23:27:20 -070077 if (p)
78 memcpy(p, src, len);
79 return p;
80}
81EXPORT_SYMBOL(kmemdup);
82
Christoph Lameteref2ad802007-07-17 04:03:21 -070083/**
Li Zefan610a77e2009-03-31 15:23:16 -070084 * memdup_user - duplicate memory region from user space
85 *
86 * @src: source address in user space
87 * @len: number of bytes to copy
88 *
89 * Returns an ERR_PTR() on failure.
90 */
91void *memdup_user(const void __user *src, size_t len)
92{
93 void *p;
94
95 /*
96 * Always use GFP_KERNEL, since copy_from_user() can sleep and
97 * cause pagefault, which makes it pointless to use GFP_NOFS
98 * or GFP_ATOMIC.
99 */
100 p = kmalloc_track_caller(len, GFP_KERNEL);
101 if (!p)
102 return ERR_PTR(-ENOMEM);
103
104 if (copy_from_user(p, src, len)) {
105 kfree(p);
106 return ERR_PTR(-EFAULT);
107 }
108
109 return p;
110}
111EXPORT_SYMBOL(memdup_user);
112
Ezequiel Garciae21827a2012-08-14 09:55:21 -0300113static __always_inline void *__do_krealloc(const void *p, size_t new_size,
114 gfp_t flags)
Pekka Enberg93bc4e82008-07-26 17:49:33 -0700115{
116 void *ret;
117 size_t ks = 0;
118
Pekka Enberg93bc4e82008-07-26 17:49:33 -0700119 if (p)
120 ks = ksize(p);
121
122 if (ks >= new_size)
123 return (void *)p;
124
125 ret = kmalloc_track_caller(new_size, flags);
126 if (ret && p)
127 memcpy(ret, p, ks);
128
129 return ret;
130}
Ezequiel Garciae21827a2012-08-14 09:55:21 -0300131
132/**
133 * __krealloc - like krealloc() but don't free @p.
134 * @p: object to reallocate memory for.
135 * @new_size: how many bytes of memory are required.
136 * @flags: the type of memory to allocate.
137 *
138 * This function is like krealloc() except it never frees the originally
139 * allocated buffer. Use this if you don't want to free the buffer immediately
140 * like, for example, with RCU.
141 */
142void *__krealloc(const void *p, size_t new_size, gfp_t flags)
143{
144 if (unlikely(!new_size))
145 return ZERO_SIZE_PTR;
146
147 return __do_krealloc(p, new_size, flags);
148
149}
Pekka Enberg93bc4e82008-07-26 17:49:33 -0700150EXPORT_SYMBOL(__krealloc);
151
152/**
Christoph Lameteref2ad802007-07-17 04:03:21 -0700153 * krealloc - reallocate memory. The contents will remain unchanged.
154 * @p: object to reallocate memory for.
155 * @new_size: how many bytes of memory are required.
156 * @flags: the type of memory to allocate.
157 *
158 * The contents of the object pointed to are preserved up to the
159 * lesser of the new and old sizes. If @p is %NULL, krealloc()
Borislav Petkov0db10c82012-10-11 21:05:10 +0200160 * behaves exactly like kmalloc(). If @new_size is 0 and @p is not a
Christoph Lameteref2ad802007-07-17 04:03:21 -0700161 * %NULL pointer, the object pointed to is freed.
162 */
163void *krealloc(const void *p, size_t new_size, gfp_t flags)
164{
165 void *ret;
Christoph Lameteref2ad802007-07-17 04:03:21 -0700166
167 if (unlikely(!new_size)) {
168 kfree(p);
Christoph Lameter6cb8f912007-07-17 04:03:22 -0700169 return ZERO_SIZE_PTR;
Christoph Lameteref2ad802007-07-17 04:03:21 -0700170 }
171
Ezequiel Garciae21827a2012-08-14 09:55:21 -0300172 ret = __do_krealloc(p, new_size, flags);
Pekka Enberg93bc4e82008-07-26 17:49:33 -0700173 if (ret && p != ret)
Christoph Lameteref2ad802007-07-17 04:03:21 -0700174 kfree(p);
Pekka Enberg93bc4e82008-07-26 17:49:33 -0700175
Christoph Lameteref2ad802007-07-17 04:03:21 -0700176 return ret;
177}
178EXPORT_SYMBOL(krealloc);
179
Johannes Weiner3ef0e5b2009-02-20 15:38:41 -0800180/**
181 * kzfree - like kfree but zero memory
182 * @p: object to free memory of
183 *
184 * The memory of the object @p points to is zeroed before freed.
185 * If @p is %NULL, kzfree() does nothing.
Pekka Enberga234bdc2009-05-31 13:50:38 +0300186 *
187 * Note: this function zeroes the whole allocated buffer which can be a good
188 * deal bigger than the requested buffer size passed to kmalloc(). So be
189 * careful when using this function in performance sensitive code.
Johannes Weiner3ef0e5b2009-02-20 15:38:41 -0800190 */
191void kzfree(const void *p)
192{
193 size_t ks;
194 void *mem = (void *)p;
195
196 if (unlikely(ZERO_OR_NULL_PTR(mem)))
197 return;
198 ks = ksize(mem);
199 memset(mem, 0, ks);
200 kfree(mem);
201}
202EXPORT_SYMBOL(kzfree);
203
Davi Arnaut96840aa2006-03-24 03:18:42 -0800204/*
205 * strndup_user - duplicate an existing string from user space
Davi Arnaut96840aa2006-03-24 03:18:42 -0800206 * @s: The string to duplicate
207 * @n: Maximum number of bytes to copy, including the trailing NUL.
208 */
209char *strndup_user(const char __user *s, long n)
210{
211 char *p;
212 long length;
213
214 length = strnlen_user(s, n);
215
216 if (!length)
217 return ERR_PTR(-EFAULT);
218
219 if (length > n)
220 return ERR_PTR(-EINVAL);
221
Julia Lawall90d74042010-08-09 17:18:26 -0700222 p = memdup_user(s, length);
Davi Arnaut96840aa2006-03-24 03:18:42 -0800223
Julia Lawall90d74042010-08-09 17:18:26 -0700224 if (IS_ERR(p))
225 return p;
Davi Arnaut96840aa2006-03-24 03:18:42 -0800226
227 p[length - 1] = '\0';
228
229 return p;
230}
231EXPORT_SYMBOL(strndup_user);
Andrew Morton16d69262008-07-25 19:44:36 -0700232
Namhyung Kim6038def2011-05-24 17:11:22 -0700233void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
234 struct vm_area_struct *prev, struct rb_node *rb_parent)
235{
236 struct vm_area_struct *next;
237
238 vma->vm_prev = prev;
239 if (prev) {
240 next = prev->vm_next;
241 prev->vm_next = vma;
242 } else {
243 mm->mmap = vma;
244 if (rb_parent)
245 next = rb_entry(rb_parent,
246 struct vm_area_struct, vm_rb);
247 else
248 next = NULL;
249 }
250 vma->vm_next = next;
251 if (next)
252 next->vm_prev = vma;
253}
254
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700255/* Check if the vma is being used as a stack by this task */
256static int vm_is_stack_for_task(struct task_struct *t,
257 struct vm_area_struct *vma)
258{
259 return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
260}
261
262/*
263 * Check if the vma is being used as a stack.
264 * If is_group is non-zero, check in the entire thread group or else
265 * just check in the current task. Returns the pid of the task that
266 * the vma is stack for.
267 */
268pid_t vm_is_stack(struct task_struct *task,
269 struct vm_area_struct *vma, int in_group)
270{
271 pid_t ret = 0;
272
273 if (vm_is_stack_for_task(task, vma))
274 return task->pid;
275
276 if (in_group) {
277 struct task_struct *t;
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700278
Oleg Nesterov07675d92014-08-08 14:19:17 -0700279 rcu_read_lock();
280 for_each_thread(task, t) {
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700281 if (vm_is_stack_for_task(t, vma)) {
282 ret = t->pid;
283 goto done;
284 }
Oleg Nesterov07675d92014-08-08 14:19:17 -0700285 }
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700286done:
287 rcu_read_unlock();
288 }
289
290 return ret;
291}
292
David Howellsefc1a3b2010-01-15 17:01:35 -0800293#if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
Andrew Morton16d69262008-07-25 19:44:36 -0700294void arch_pick_mmap_layout(struct mm_struct *mm)
295{
296 mm->mmap_base = TASK_UNMAPPED_BASE;
297 mm->get_unmapped_area = arch_get_unmapped_area;
Andrew Morton16d69262008-07-25 19:44:36 -0700298}
299#endif
Rusty Russell912985d2008-08-12 17:52:52 -0500300
Xiao Guangrong45888a02010-08-22 19:08:57 +0800301/*
302 * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
303 * back to the regular GUP.
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300304 * If the architecture not support this function, simply return with no
Xiao Guangrong45888a02010-08-22 19:08:57 +0800305 * page pinned
306 */
307int __attribute__((weak)) __get_user_pages_fast(unsigned long start,
308 int nr_pages, int write, struct page **pages)
309{
310 return 0;
311}
312EXPORT_SYMBOL_GPL(__get_user_pages_fast);
313
Andy Grover9de100d2009-04-13 14:40:05 -0700314/**
315 * get_user_pages_fast() - pin user pages in memory
316 * @start: starting user address
317 * @nr_pages: number of pages from start to pin
318 * @write: whether pages will be written to
319 * @pages: array that receives pointers to the pages pinned.
320 * Should be at least nr_pages long.
321 *
Andy Grover9de100d2009-04-13 14:40:05 -0700322 * Returns number of pages pinned. This may be fewer than the number
323 * requested. If nr_pages is 0 or negative, returns 0. If no pages
324 * were pinned, returns -errno.
Nick Piggind2bf6be2009-06-16 15:31:39 -0700325 *
326 * get_user_pages_fast provides equivalent functionality to get_user_pages,
327 * operating on current and current->mm, with force=0 and vma=NULL. However
328 * unlike get_user_pages, it must be called without mmap_sem held.
329 *
330 * get_user_pages_fast may take mmap_sem and page table locks, so no
331 * assumptions can be made about lack of locking. get_user_pages_fast is to be
332 * implemented in a way that is advantageous (vs get_user_pages()) when the
333 * user memory area is already faulted in and present in ptes. However if the
334 * pages have to be faulted in, it may turn out to be slightly slower so
335 * callers need to carefully consider what to use. On many architectures,
336 * get_user_pages_fast simply falls back to get_user_pages.
Andy Grover9de100d2009-04-13 14:40:05 -0700337 */
Rusty Russell912985d2008-08-12 17:52:52 -0500338int __attribute__((weak)) get_user_pages_fast(unsigned long start,
339 int nr_pages, int write, struct page **pages)
340{
341 struct mm_struct *mm = current->mm;
342 int ret;
343
344 down_read(&mm->mmap_sem);
345 ret = get_user_pages(current, mm, start, nr_pages,
346 write, 0, pages, NULL);
347 up_read(&mm->mmap_sem);
348
349 return ret;
350}
351EXPORT_SYMBOL_GPL(get_user_pages_fast);
Eduard - Gabriel Munteanuca2b84cb2009-03-23 15:12:24 +0200352
Al Viroeb36c582012-05-30 20:17:35 -0400353unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
354 unsigned long len, unsigned long prot,
355 unsigned long flag, unsigned long pgoff)
356{
357 unsigned long ret;
358 struct mm_struct *mm = current->mm;
Michel Lespinasse41badc12013-02-22 16:32:47 -0800359 unsigned long populate;
Al Viroeb36c582012-05-30 20:17:35 -0400360
361 ret = security_mmap_file(file, prot, flag);
362 if (!ret) {
363 down_write(&mm->mmap_sem);
Michel Lespinassebebeb3d2013-02-22 16:32:37 -0800364 ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff,
365 &populate);
Al Viroeb36c582012-05-30 20:17:35 -0400366 up_write(&mm->mmap_sem);
Michel Lespinasse41badc12013-02-22 16:32:47 -0800367 if (populate)
368 mm_populate(ret, populate);
Al Viroeb36c582012-05-30 20:17:35 -0400369 }
370 return ret;
371}
372
373unsigned long vm_mmap(struct file *file, unsigned long addr,
374 unsigned long len, unsigned long prot,
375 unsigned long flag, unsigned long offset)
376{
377 if (unlikely(offset + PAGE_ALIGN(len) < offset))
378 return -EINVAL;
379 if (unlikely(offset & ~PAGE_MASK))
380 return -EINVAL;
381
382 return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
383}
384EXPORT_SYMBOL(vm_mmap);
385
Shaohua Li98003392013-02-22 16:34:35 -0800386struct address_space *page_mapping(struct page *page)
387{
388 struct address_space *mapping = page->mapping;
389
Mikulas Patocka03e5ac22014-01-14 17:56:40 -0800390 /* This happens if someone calls flush_dcache_page on slab page */
391 if (unlikely(PageSlab(page)))
392 return NULL;
393
Shaohua Li33806f02013-02-22 16:34:37 -0800394 if (unlikely(PageSwapCache(page))) {
395 swp_entry_t entry;
396
397 entry.val = page_private(page);
398 mapping = swap_address_space(entry);
Joonsoo Kimd2cf5ad2013-09-11 14:21:29 -0700399 } else if ((unsigned long)mapping & PAGE_MAPPING_ANON)
Shaohua Li98003392013-02-22 16:34:35 -0800400 mapping = NULL;
401 return mapping;
402}
403
Jerome Marchand49f0ce52014-01-21 15:49:14 -0800404int overcommit_ratio_handler(struct ctl_table *table, int write,
405 void __user *buffer, size_t *lenp,
406 loff_t *ppos)
407{
408 int ret;
409
410 ret = proc_dointvec(table, write, buffer, lenp, ppos);
411 if (ret == 0 && write)
412 sysctl_overcommit_kbytes = 0;
413 return ret;
414}
415
416int overcommit_kbytes_handler(struct ctl_table *table, int write,
417 void __user *buffer, size_t *lenp,
418 loff_t *ppos)
419{
420 int ret;
421
422 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
423 if (ret == 0 && write)
424 sysctl_overcommit_ratio = 0;
425 return ret;
426}
427
Jerome Marchand00619bc2013-11-12 15:08:31 -0800428/*
429 * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
430 */
431unsigned long vm_commit_limit(void)
432{
Jerome Marchand49f0ce52014-01-21 15:49:14 -0800433 unsigned long allowed;
434
435 if (sysctl_overcommit_kbytes)
436 allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
437 else
438 allowed = ((totalram_pages - hugetlb_total_pages())
439 * sysctl_overcommit_ratio / 100);
440 allowed += total_swap_pages;
441
442 return allowed;
Jerome Marchand00619bc2013-11-12 15:08:31 -0800443}
444
445
Eduard - Gabriel Munteanuca2b84cb2009-03-23 15:12:24 +0200446/* Tracepoints definitions. */
Eduard - Gabriel Munteanuca2b84cb2009-03-23 15:12:24 +0200447EXPORT_TRACEPOINT_SYMBOL(kmalloc);
448EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
449EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
450EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);
451EXPORT_TRACEPOINT_SYMBOL(kfree);
452EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);