blob: 84a1c44242a8a8fd8979a4ada23b145abbf1c1e4 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * mm/mmap.c
3 *
4 * Written by obz.
5 *
Alan Cox046c6882009-01-05 14:06:29 +00006 * Address space accounting code <alan@lxorguk.ukuu.org.uk>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 */
8
Cyril Hrubise8420a82013-04-29 15:08:33 -07009#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/slab.h>
Alexey Dobriyan4af3c9c2007-10-16 23:29:23 -070011#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/mm.h>
13#include <linux/shm.h>
14#include <linux/mman.h>
15#include <linux/pagemap.h>
16#include <linux/swap.h>
17#include <linux/syscalls.h>
Randy.Dunlapc59ede72006-01-11 12:17:46 -080018#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/init.h>
20#include <linux/file.h>
21#include <linux/fs.h>
22#include <linux/personality.h>
23#include <linux/security.h>
24#include <linux/hugetlb.h>
25#include <linux/profile.h>
Paul Gortmakerb95f1b312011-10-16 02:01:52 -040026#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/mount.h>
28#include <linux/mempolicy.h>
29#include <linux/rmap.h>
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070030#include <linux/mmu_notifier.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020031#include <linux/perf_event.h>
Al Viro120a7952010-10-30 02:54:44 -040032#include <linux/audit.h>
Andrea Arcangelib15d00b2011-01-13 15:46:59 -080033#include <linux/khugepaged.h>
Srikar Dronamraju2b144492012-02-09 14:56:42 +053034#include <linux/uprobes.h>
Michel Lespinassed3737182012-12-11 16:01:38 -080035#include <linux/rbtree_augmented.h>
Clark Williamscf4aebc22013-02-07 09:46:59 -060036#include <linux/sched/sysctl.h>
Andrew Shewmaker16408792013-04-29 15:08:12 -070037#include <linux/notifier.h>
38#include <linux/memory.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
40#include <asm/uaccess.h>
41#include <asm/cacheflush.h>
42#include <asm/tlb.h>
Jeremy Fitzhardinged6dd61c2007-05-02 19:27:14 +020043#include <asm/mmu_context.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
Jan Beulich42b77722008-07-23 21:27:10 -070045#include "internal.h"
46
Kirill Korotaev3a459752006-09-07 14:17:04 +040047#ifndef arch_mmap_check
48#define arch_mmap_check(addr, len, flags) (0)
49#endif
50
Martin Schwidefsky08e7d9b2008-02-04 22:29:16 -080051#ifndef arch_rebalance_pgtables
52#define arch_rebalance_pgtables(addr, len) (addr)
53#endif
54
Hugh Dickinse0da3822005-04-19 13:29:15 -070055static void unmap_region(struct mm_struct *mm,
56 struct vm_area_struct *vma, struct vm_area_struct *prev,
57 unsigned long start, unsigned long end);
58
Linus Torvalds1da177e2005-04-16 15:20:36 -070059/* description of effects of mapping type and prot in current implementation.
60 * this is due to the limited x86 page protection hardware. The expected
61 * behavior is in parens:
62 *
63 * map_type prot
64 * PROT_NONE PROT_READ PROT_WRITE PROT_EXEC
65 * MAP_SHARED r: (no) no r: (yes) yes r: (no) yes r: (no) yes
66 * w: (no) no w: (no) no w: (yes) yes w: (no) no
67 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
68 *
69 * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes
70 * w: (no) no w: (no) no w: (copy) copy w: (no) no
71 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
72 *
73 */
74pgprot_t protection_map[16] = {
75 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
76 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
77};
78
Hugh Dickins804af2c2006-07-26 21:39:49 +010079pgprot_t vm_get_page_prot(unsigned long vm_flags)
80{
Dave Kleikampb845f312008-07-08 00:28:51 +100081 return __pgprot(pgprot_val(protection_map[vm_flags &
82 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
83 pgprot_val(arch_vm_get_page_prot(vm_flags)));
Hugh Dickins804af2c2006-07-26 21:39:49 +010084}
85EXPORT_SYMBOL(vm_get_page_prot);
86
Shaohua Li34679d72011-05-24 17:11:18 -070087int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
88int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
Christoph Lameterc3d8c142005-09-06 15:16:33 -070089int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
Andrew Shewmakerc9b1d092013-04-29 15:08:10 -070090unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
Andrew Shewmaker4eeab4f2013-04-29 15:08:11 -070091unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
Shaohua Li34679d72011-05-24 17:11:18 -070092/*
93 * Make sure vm_committed_as in one cacheline and not cacheline shared with
94 * other variables. It can be updated by several CPUs frequently.
95 */
96struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
Linus Torvalds1da177e2005-04-16 15:20:36 -070097
98/*
K. Y. Srinivasan997071b2012-11-15 14:34:42 -080099 * The global memory commitment made in the system can be a metric
100 * that can be used to drive ballooning decisions when Linux is hosted
101 * as a guest. On Hyper-V, the host implements a policy engine for dynamically
102 * balancing memory across competing virtual machines that are hosted.
103 * Several metrics drive this policy engine including the guest reported
104 * memory commitment.
105 */
106unsigned long vm_memory_committed(void)
107{
108 return percpu_counter_read_positive(&vm_committed_as);
109}
110EXPORT_SYMBOL_GPL(vm_memory_committed);
111
112/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 * Check that a process has enough memory to allocate a new virtual
114 * mapping. 0 means there is enough memory for the allocation to
115 * succeed and -ENOMEM implies there is not.
116 *
117 * We currently support three overcommit policies, which are set via the
118 * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting
119 *
120 * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
121 * Additional code 2002 Jul 20 by Robert Love.
122 *
123 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
124 *
125 * Note this is a helper function intended to be used by LSMs which
126 * wish to use this logic.
127 */
Alan Cox34b4e4a2007-08-22 14:01:28 -0700128int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129{
Roman Gushchin992f1ca2015-02-11 15:28:39 -0800130 long free, allowed, reserve;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131
132 vm_acct_memory(pages);
133
134 /*
135 * Sometimes we want to use more memory than we have
136 */
137 if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
138 return 0;
139
140 if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
Dmitry Finkc15bef32011-07-25 17:12:19 -0700141 free = global_page_state(NR_FREE_PAGES);
142 free += global_page_state(NR_FILE_PAGES);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143
Dmitry Finkc15bef32011-07-25 17:12:19 -0700144 /*
145 * shmem pages shouldn't be counted as free in this
146 * case, they can't be purged, only swapped out, and
147 * that won't affect the overall amount of available
148 * memory in the system.
149 */
150 free -= global_page_state(NR_SHMEM);
151
Shaohua Liec8acf22013-02-22 16:34:38 -0800152 free += get_nr_swap_pages();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153
154 /*
155 * Any slabs which are created with the
156 * SLAB_RECLAIM_ACCOUNT flag claim to have contents
157 * which are reclaimable, under pressure. The dentry
158 * cache and most inode caches should fall into this
159 */
Christoph Lameter972d1a72006-09-25 23:31:51 -0700160 free += global_page_state(NR_SLAB_RECLAIMABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161
162 /*
Dmitry Finkc15bef32011-07-25 17:12:19 -0700163 * Leave reserved pages. The pages are not for anonymous pages.
164 */
165 if (free <= totalreserve_pages)
166 goto error;
167 else
168 free -= totalreserve_pages;
169
170 /*
Andrew Shewmaker4eeab4f2013-04-29 15:08:11 -0700171 * Reserve some for root
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172 */
173 if (!cap_sys_admin)
Andrew Shewmaker4eeab4f2013-04-29 15:08:11 -0700174 free -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175
176 if (free > pages)
177 return 0;
178
Hideo AOKI6d9f7832006-04-10 22:53:00 -0700179 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180 }
181
182 allowed = (totalram_pages - hugetlb_total_pages())
183 * sysctl_overcommit_ratio / 100;
184 /*
Andrew Shewmaker4eeab4f2013-04-29 15:08:11 -0700185 * Reserve some for root
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 */
187 if (!cap_sys_admin)
Andrew Shewmaker4eeab4f2013-04-29 15:08:11 -0700188 allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 allowed += total_swap_pages;
190
Andrew Shewmakerc9b1d092013-04-29 15:08:10 -0700191 /*
192 * Don't let a single process grow so big a user can't recover
193 */
194 if (mm) {
195 reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
Roman Gushchin992f1ca2015-02-11 15:28:39 -0800196 allowed -= min_t(long, mm->total_vm / 32, reserve);
Andrew Shewmakerc9b1d092013-04-29 15:08:10 -0700197 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198
KOSAKI Motohiro00a62ce2009-04-30 15:08:51 -0700199 if (percpu_counter_read_positive(&vm_committed_as) < allowed)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 return 0;
Hideo AOKI6d9f7832006-04-10 22:53:00 -0700201error:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 vm_unacct_memory(pages);
203
204 return -ENOMEM;
205}
206
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207/*
Peter Zijlstra3d48ae42011-05-24 17:12:06 -0700208 * Requires inode->i_mapping->i_mmap_mutex
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 */
210static void __remove_shared_vm_struct(struct vm_area_struct *vma,
211 struct file *file, struct address_space *mapping)
212{
213 if (vma->vm_flags & VM_DENYWRITE)
Al Viro496ad9a2013-01-23 17:07:38 -0500214 atomic_inc(&file_inode(file)->i_writecount);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 if (vma->vm_flags & VM_SHARED)
216 mapping->i_mmap_writable--;
217
218 flush_dcache_mmap_lock(mapping);
219 if (unlikely(vma->vm_flags & VM_NONLINEAR))
Michel Lespinasse6b2dbba2012-10-08 16:31:25 -0700220 list_del_init(&vma->shared.nonlinear);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 else
Michel Lespinasse6b2dbba2012-10-08 16:31:25 -0700222 vma_interval_tree_remove(vma, &mapping->i_mmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223 flush_dcache_mmap_unlock(mapping);
224}
225
226/*
Michel Lespinasse6b2dbba2012-10-08 16:31:25 -0700227 * Unlink a file-based vm structure from its interval tree, to hide
Hugh Dickinsa8fb5612005-10-29 18:15:57 -0700228 * vma from rmap and vmtruncate before freeing its page tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229 */
Hugh Dickinsa8fb5612005-10-29 18:15:57 -0700230void unlink_file_vma(struct vm_area_struct *vma)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231{
232 struct file *file = vma->vm_file;
233
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 if (file) {
235 struct address_space *mapping = file->f_mapping;
Peter Zijlstra3d48ae42011-05-24 17:12:06 -0700236 mutex_lock(&mapping->i_mmap_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 __remove_shared_vm_struct(vma, file, mapping);
Peter Zijlstra3d48ae42011-05-24 17:12:06 -0700238 mutex_unlock(&mapping->i_mmap_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239 }
Hugh Dickinsa8fb5612005-10-29 18:15:57 -0700240}
241
242/*
243 * Close a vm structure and free it, returning the next.
244 */
245static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
246{
247 struct vm_area_struct *next = vma->vm_next;
248
Hugh Dickinsa8fb5612005-10-29 18:15:57 -0700249 might_sleep();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 if (vma->vm_ops && vma->vm_ops->close)
251 vma->vm_ops->close(vma);
Konstantin Khlebnikove9714ac2012-10-08 16:28:54 -0700252 if (vma->vm_file)
Hugh Dickinsa8fb5612005-10-29 18:15:57 -0700253 fput(vma->vm_file);
Lee Schermerhornf0be3d32008-04-28 02:13:08 -0700254 mpol_put(vma_policy(vma));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255 kmem_cache_free(vm_area_cachep, vma);
Hugh Dickinsa8fb5612005-10-29 18:15:57 -0700256 return next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257}
258
Linus Torvaldse4eb1ff2012-04-20 15:35:40 -0700259static unsigned long do_brk(unsigned long addr, unsigned long len);
260
Heiko Carstens6a6160a2009-01-14 14:14:15 +0100261SYSCALL_DEFINE1(brk, unsigned long, brk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262{
263 unsigned long rlim, retval;
264 unsigned long newbrk, oldbrk;
265 struct mm_struct *mm = current->mm;
Jiri Kosinaa5b45922008-06-05 22:46:05 -0700266 unsigned long min_brk;
Michel Lespinasse128557f2013-02-22 16:32:40 -0800267 bool populate;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268
269 down_write(&mm->mmap_sem);
270
Jiri Kosinaa5b45922008-06-05 22:46:05 -0700271#ifdef CONFIG_COMPAT_BRK
Jiri Kosina5520e892011-01-13 15:47:23 -0800272 /*
273 * CONFIG_COMPAT_BRK can still be overridden by setting
274 * randomize_va_space to 2, which will still cause mm->start_brk
275 * to be arbitrarily shifted
276 */
Jiri Kosina4471a672011-04-14 15:22:09 -0700277 if (current->brk_randomized)
Jiri Kosina5520e892011-01-13 15:47:23 -0800278 min_brk = mm->start_brk;
279 else
280 min_brk = mm->end_data;
Jiri Kosinaa5b45922008-06-05 22:46:05 -0700281#else
282 min_brk = mm->start_brk;
283#endif
284 if (brk < min_brk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285 goto out;
Ram Gupta1e624192006-04-10 22:52:57 -0700286
287 /*
288 * Check against rlimit here. If this check is done later after the test
289 * of oldbrk with newbrk then it can escape the test and let the data
290 * segment grow beyond its set limit the in case where the limit is
291 * not page aligned -Ram Gupta
292 */
Jiri Slaby59e99e52010-03-05 13:41:44 -0800293 rlim = rlimit(RLIMIT_DATA);
Jiri Kosinac1d171a2008-01-30 13:30:40 +0100294 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
295 (mm->end_data - mm->start_data) > rlim)
Ram Gupta1e624192006-04-10 22:52:57 -0700296 goto out;
297
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 newbrk = PAGE_ALIGN(brk);
299 oldbrk = PAGE_ALIGN(mm->brk);
300 if (oldbrk == newbrk)
301 goto set_brk;
302
303 /* Always allow shrinking brk. */
304 if (brk <= mm->brk) {
305 if (!do_munmap(mm, newbrk, oldbrk-newbrk))
306 goto set_brk;
307 goto out;
308 }
309
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310 /* Check against existing mmap mappings. */
311 if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
312 goto out;
313
314 /* Ok, looks good - let it rip. */
315 if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk)
316 goto out;
Michel Lespinasse128557f2013-02-22 16:32:40 -0800317
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318set_brk:
319 mm->brk = brk;
Michel Lespinasse128557f2013-02-22 16:32:40 -0800320 populate = newbrk > oldbrk && (mm->def_flags & VM_LOCKED) != 0;
321 up_write(&mm->mmap_sem);
322 if (populate)
323 mm_populate(oldbrk, newbrk - oldbrk);
324 return brk;
325
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326out:
327 retval = mm->brk;
328 up_write(&mm->mmap_sem);
329 return retval;
330}
331
Michel Lespinassed3737182012-12-11 16:01:38 -0800332static long vma_compute_subtree_gap(struct vm_area_struct *vma)
333{
334 unsigned long max, subtree_gap;
335 max = vma->vm_start;
336 if (vma->vm_prev)
337 max -= vma->vm_prev->vm_end;
338 if (vma->vm_rb.rb_left) {
339 subtree_gap = rb_entry(vma->vm_rb.rb_left,
340 struct vm_area_struct, vm_rb)->rb_subtree_gap;
341 if (subtree_gap > max)
342 max = subtree_gap;
343 }
344 if (vma->vm_rb.rb_right) {
345 subtree_gap = rb_entry(vma->vm_rb.rb_right,
346 struct vm_area_struct, vm_rb)->rb_subtree_gap;
347 if (subtree_gap > max)
348 max = subtree_gap;
349 }
350 return max;
351}
352
Michel Lespinasseed8ea812012-10-08 16:31:45 -0700353#ifdef CONFIG_DEBUG_VM_RB
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354static int browse_rb(struct rb_root *root)
355{
Michel Lespinasse5a0768f2012-12-11 16:01:42 -0800356 int i = 0, j, bug = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 struct rb_node *nd, *pn = NULL;
358 unsigned long prev = 0, pend = 0;
359
360 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
361 struct vm_area_struct *vma;
362 vma = rb_entry(nd, struct vm_area_struct, vm_rb);
Michel Lespinasse5a0768f2012-12-11 16:01:42 -0800363 if (vma->vm_start < prev) {
364 printk("vm_start %lx prev %lx\n", vma->vm_start, prev);
365 bug = 1;
366 }
367 if (vma->vm_start < pend) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 printk("vm_start %lx pend %lx\n", vma->vm_start, pend);
Michel Lespinasse5a0768f2012-12-11 16:01:42 -0800369 bug = 1;
370 }
371 if (vma->vm_start > vma->vm_end) {
372 printk("vm_end %lx < vm_start %lx\n",
373 vma->vm_end, vma->vm_start);
374 bug = 1;
375 }
376 if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) {
377 printk("free gap %lx, correct %lx\n",
378 vma->rb_subtree_gap,
379 vma_compute_subtree_gap(vma));
380 bug = 1;
381 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 i++;
383 pn = nd;
David Millerd1af65d2007-02-28 20:13:13 -0800384 prev = vma->vm_start;
385 pend = vma->vm_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386 }
387 j = 0;
Michel Lespinasse5a0768f2012-12-11 16:01:42 -0800388 for (nd = pn; nd; nd = rb_prev(nd))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389 j++;
Michel Lespinasse5a0768f2012-12-11 16:01:42 -0800390 if (i != j) {
391 printk("backwards %d, forwards %d\n", j, i);
392 bug = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 }
Michel Lespinasse5a0768f2012-12-11 16:01:42 -0800394 return bug ? -1 : i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395}
396
Michel Lespinassed3737182012-12-11 16:01:38 -0800397static void validate_mm_rb(struct rb_root *root, struct vm_area_struct *ignore)
398{
399 struct rb_node *nd;
400
401 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
402 struct vm_area_struct *vma;
403 vma = rb_entry(nd, struct vm_area_struct, vm_rb);
404 BUG_ON(vma != ignore &&
405 vma->rb_subtree_gap != vma_compute_subtree_gap(vma));
406 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407}
408
409void validate_mm(struct mm_struct *mm)
410{
411 int bug = 0;
412 int i = 0;
Michel Lespinasse5a0768f2012-12-11 16:01:42 -0800413 unsigned long highest_address = 0;
Michel Lespinasseed8ea812012-10-08 16:31:45 -0700414 struct vm_area_struct *vma = mm->mmap;
415 while (vma) {
416 struct anon_vma_chain *avc;
Michel Lespinasse63c3b902012-11-16 14:14:47 -0800417 vma_lock_anon_vma(vma);
Michel Lespinasseed8ea812012-10-08 16:31:45 -0700418 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
419 anon_vma_interval_tree_verify(avc);
Michel Lespinasse63c3b902012-11-16 14:14:47 -0800420 vma_unlock_anon_vma(vma);
Michel Lespinasse5a0768f2012-12-11 16:01:42 -0800421 highest_address = vma->vm_end;
Michel Lespinasseed8ea812012-10-08 16:31:45 -0700422 vma = vma->vm_next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 i++;
424 }
Michel Lespinasse5a0768f2012-12-11 16:01:42 -0800425 if (i != mm->map_count) {
426 printk("map_count %d vm_next %d\n", mm->map_count, i);
427 bug = 1;
428 }
429 if (highest_address != mm->highest_vm_end) {
430 printk("mm->highest_vm_end %lx, found %lx\n",
431 mm->highest_vm_end, highest_address);
432 bug = 1;
433 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434 i = browse_rb(&mm->mm_rb);
Michel Lespinasse5a0768f2012-12-11 16:01:42 -0800435 if (i != mm->map_count) {
436 printk("map_count %d rb %d\n", mm->map_count, i);
437 bug = 1;
438 }
Eric Sesterhenn46a350e2006-04-01 01:23:29 +0200439 BUG_ON(bug);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440}
441#else
Michel Lespinassed3737182012-12-11 16:01:38 -0800442#define validate_mm_rb(root, ignore) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443#define validate_mm(mm) do { } while (0)
444#endif
445
Michel Lespinassed3737182012-12-11 16:01:38 -0800446RB_DECLARE_CALLBACKS(static, vma_gap_callbacks, struct vm_area_struct, vm_rb,
447 unsigned long, rb_subtree_gap, vma_compute_subtree_gap)
448
449/*
450 * Update augmented rbtree rb_subtree_gap values after vma->vm_start or
451 * vma->vm_prev->vm_end values changed, without modifying the vma's position
452 * in the rbtree.
453 */
454static void vma_gap_update(struct vm_area_struct *vma)
455{
456 /*
457 * As it turns out, RB_DECLARE_CALLBACKS() already created a callback
458 * function that does exacltly what we want.
459 */
460 vma_gap_callbacks_propagate(&vma->vm_rb, NULL);
461}
462
463static inline void vma_rb_insert(struct vm_area_struct *vma,
464 struct rb_root *root)
465{
466 /* All rb_subtree_gap values must be consistent prior to insertion */
467 validate_mm_rb(root, NULL);
468
469 rb_insert_augmented(&vma->vm_rb, root, &vma_gap_callbacks);
470}
471
472static void vma_rb_erase(struct vm_area_struct *vma, struct rb_root *root)
473{
474 /*
475 * All rb_subtree_gap values must be consistent prior to erase,
476 * with the possible exception of the vma being erased.
477 */
478 validate_mm_rb(root, vma);
479
480 /*
481 * Note rb_erase_augmented is a fairly large inline function,
482 * so make sure we instantiate it only once with our desired
483 * augmented rbtree callbacks.
484 */
485 rb_erase_augmented(&vma->vm_rb, root, &vma_gap_callbacks);
486}
487
Michel Lespinassebf181b92012-10-08 16:31:39 -0700488/*
489 * vma has some anon_vma assigned, and is already inserted on that
490 * anon_vma's interval trees.
491 *
492 * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the
493 * vma must be removed from the anon_vma's interval trees using
494 * anon_vma_interval_tree_pre_update_vma().
495 *
496 * After the update, the vma will be reinserted using
497 * anon_vma_interval_tree_post_update_vma().
498 *
499 * The entire update must be protected by exclusive mmap_sem and by
500 * the root anon_vma's mutex.
501 */
502static inline void
503anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma)
504{
505 struct anon_vma_chain *avc;
506
507 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
508 anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root);
509}
510
511static inline void
512anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma)
513{
514 struct anon_vma_chain *avc;
515
516 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
517 anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root);
518}
519
Hugh Dickins6597d782012-10-08 16:29:07 -0700520static int find_vma_links(struct mm_struct *mm, unsigned long addr,
521 unsigned long end, struct vm_area_struct **pprev,
522 struct rb_node ***rb_link, struct rb_node **rb_parent)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523{
Hugh Dickins6597d782012-10-08 16:29:07 -0700524 struct rb_node **__rb_link, *__rb_parent, *rb_prev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525
526 __rb_link = &mm->mm_rb.rb_node;
527 rb_prev = __rb_parent = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528
529 while (*__rb_link) {
530 struct vm_area_struct *vma_tmp;
531
532 __rb_parent = *__rb_link;
533 vma_tmp = rb_entry(__rb_parent, struct vm_area_struct, vm_rb);
534
535 if (vma_tmp->vm_end > addr) {
Hugh Dickins6597d782012-10-08 16:29:07 -0700536 /* Fail if an existing vma overlaps the area */
537 if (vma_tmp->vm_start < end)
538 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 __rb_link = &__rb_parent->rb_left;
540 } else {
541 rb_prev = __rb_parent;
542 __rb_link = &__rb_parent->rb_right;
543 }
544 }
545
546 *pprev = NULL;
547 if (rb_prev)
548 *pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
549 *rb_link = __rb_link;
550 *rb_parent = __rb_parent;
Hugh Dickins6597d782012-10-08 16:29:07 -0700551 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552}
553
Cyril Hrubise8420a82013-04-29 15:08:33 -0700554static unsigned long count_vma_pages_range(struct mm_struct *mm,
555 unsigned long addr, unsigned long end)
556{
557 unsigned long nr_pages = 0;
558 struct vm_area_struct *vma;
559
560 /* Find first overlaping mapping */
561 vma = find_vma_intersection(mm, addr, end);
562 if (!vma)
563 return 0;
564
565 nr_pages = (min(end, vma->vm_end) -
566 max(addr, vma->vm_start)) >> PAGE_SHIFT;
567
568 /* Iterate over the rest of the overlaps */
569 for (vma = vma->vm_next; vma; vma = vma->vm_next) {
570 unsigned long overlap_len;
571
572 if (vma->vm_start > end)
573 break;
574
575 overlap_len = min(end, vma->vm_end) - vma->vm_start;
576 nr_pages += overlap_len >> PAGE_SHIFT;
577 }
578
579 return nr_pages;
580}
581
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
583 struct rb_node **rb_link, struct rb_node *rb_parent)
584{
Michel Lespinassed3737182012-12-11 16:01:38 -0800585 /* Update tracking information for the gap following the new vma. */
586 if (vma->vm_next)
587 vma_gap_update(vma->vm_next);
588 else
589 mm->highest_vm_end = vma->vm_end;
590
591 /*
592 * vma->vm_prev wasn't known when we followed the rbtree to find the
593 * correct insertion point for that vma. As a result, we could not
594 * update the vma vm_rb parents rb_subtree_gap values on the way down.
595 * So, we first insert the vma with a zero rb_subtree_gap value
596 * (to be consistent with what we did on the way down), and then
597 * immediately update the gap to the correct value. Finally we
598 * rebalance the rbtree after all augmented values have been set.
599 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600 rb_link_node(&vma->vm_rb, rb_parent, rb_link);
Michel Lespinassed3737182012-12-11 16:01:38 -0800601 vma->rb_subtree_gap = 0;
602 vma_gap_update(vma);
603 vma_rb_insert(vma, &mm->mm_rb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604}
605
Denys Vlasenkocb8f4882008-10-18 20:27:01 -0700606static void __vma_link_file(struct vm_area_struct *vma)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607{
ZhenwenXu48aae422009-01-06 14:40:21 -0800608 struct file *file;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609
610 file = vma->vm_file;
611 if (file) {
612 struct address_space *mapping = file->f_mapping;
613
614 if (vma->vm_flags & VM_DENYWRITE)
Al Viro496ad9a2013-01-23 17:07:38 -0500615 atomic_dec(&file_inode(file)->i_writecount);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616 if (vma->vm_flags & VM_SHARED)
617 mapping->i_mmap_writable++;
618
619 flush_dcache_mmap_lock(mapping);
620 if (unlikely(vma->vm_flags & VM_NONLINEAR))
621 vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
622 else
Michel Lespinasse6b2dbba2012-10-08 16:31:25 -0700623 vma_interval_tree_insert(vma, &mapping->i_mmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624 flush_dcache_mmap_unlock(mapping);
625 }
626}
627
628static void
629__vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
630 struct vm_area_struct *prev, struct rb_node **rb_link,
631 struct rb_node *rb_parent)
632{
633 __vma_link_list(mm, vma, prev, rb_parent);
634 __vma_link_rb(mm, vma, rb_link, rb_parent);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635}
636
637static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
638 struct vm_area_struct *prev, struct rb_node **rb_link,
639 struct rb_node *rb_parent)
640{
641 struct address_space *mapping = NULL;
642
643 if (vma->vm_file)
644 mapping = vma->vm_file->f_mapping;
645
Peter Zijlstra97a89412011-05-24 17:12:04 -0700646 if (mapping)
Peter Zijlstra3d48ae42011-05-24 17:12:06 -0700647 mutex_lock(&mapping->i_mmap_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648
649 __vma_link(mm, vma, prev, rb_link, rb_parent);
650 __vma_link_file(vma);
651
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652 if (mapping)
Peter Zijlstra3d48ae42011-05-24 17:12:06 -0700653 mutex_unlock(&mapping->i_mmap_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654
655 mm->map_count++;
656 validate_mm(mm);
657}
658
659/*
Kautuk Consul88f6b4c2012-03-21 16:34:16 -0700660 * Helper for vma_adjust() in the split_vma insert case: insert a vma into the
Michel Lespinasse6b2dbba2012-10-08 16:31:25 -0700661 * mm's list and rbtree. It has already been inserted into the interval tree.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662 */
ZhenwenXu48aae422009-01-06 14:40:21 -0800663static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664{
Hugh Dickins6597d782012-10-08 16:29:07 -0700665 struct vm_area_struct *prev;
ZhenwenXu48aae422009-01-06 14:40:21 -0800666 struct rb_node **rb_link, *rb_parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667
Hugh Dickins6597d782012-10-08 16:29:07 -0700668 if (find_vma_links(mm, vma->vm_start, vma->vm_end,
669 &prev, &rb_link, &rb_parent))
670 BUG();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 __vma_link(mm, vma, prev, rb_link, rb_parent);
672 mm->map_count++;
673}
674
675static inline void
676__vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
677 struct vm_area_struct *prev)
678{
Michel Lespinassed3737182012-12-11 16:01:38 -0800679 struct vm_area_struct *next;
Linus Torvalds297c5ee2010-08-20 16:24:55 -0700680
Michel Lespinassed3737182012-12-11 16:01:38 -0800681 vma_rb_erase(vma, &mm->mm_rb);
682 prev->vm_next = next = vma->vm_next;
Linus Torvalds297c5ee2010-08-20 16:24:55 -0700683 if (next)
684 next->vm_prev = prev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685 if (mm->mmap_cache == vma)
686 mm->mmap_cache = prev;
687}
688
689/*
690 * We cannot adjust vm_start, vm_end, vm_pgoff fields of a vma that
691 * is already present in an i_mmap tree without adjusting the tree.
692 * The following helper function should be used when such adjustments
693 * are necessary. The "insert" vma (if any) is to be inserted
694 * before we drop the necessary locks.
695 */
Rik van Riel5beb4932010-03-05 13:42:07 -0800696int vma_adjust(struct vm_area_struct *vma, unsigned long start,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
698{
699 struct mm_struct *mm = vma->vm_mm;
700 struct vm_area_struct *next = vma->vm_next;
701 struct vm_area_struct *importer = NULL;
702 struct address_space *mapping = NULL;
Michel Lespinasse6b2dbba2012-10-08 16:31:25 -0700703 struct rb_root *root = NULL;
Rik van Riel012f18002010-08-09 17:18:40 -0700704 struct anon_vma *anon_vma = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705 struct file *file = vma->vm_file;
Michel Lespinassed3737182012-12-11 16:01:38 -0800706 bool start_changed = false, end_changed = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707 long adjust_next = 0;
708 int remove_next = 0;
709
710 if (next && !insert) {
Linus Torvalds287d97a2010-04-10 15:22:30 -0700711 struct vm_area_struct *exporter = NULL;
712
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 if (end >= next->vm_end) {
714 /*
715 * vma expands, overlapping all the next, and
716 * perhaps the one after too (mprotect case 6).
717 */
718again: remove_next = 1 + (end > next->vm_end);
719 end = next->vm_end;
Linus Torvalds287d97a2010-04-10 15:22:30 -0700720 exporter = next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721 importer = vma;
722 } else if (end > next->vm_start) {
723 /*
724 * vma expands, overlapping part of the next:
725 * mprotect case 5 shifting the boundary up.
726 */
727 adjust_next = (end - next->vm_start) >> PAGE_SHIFT;
Linus Torvalds287d97a2010-04-10 15:22:30 -0700728 exporter = next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729 importer = vma;
730 } else if (end < vma->vm_end) {
731 /*
732 * vma shrinks, and !insert tells it's not
733 * split_vma inserting another: so it must be
734 * mprotect case 4 shifting the boundary down.
735 */
736 adjust_next = - ((vma->vm_end - end) >> PAGE_SHIFT);
Linus Torvalds287d97a2010-04-10 15:22:30 -0700737 exporter = vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738 importer = next;
739 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740
Rik van Riel5beb4932010-03-05 13:42:07 -0800741 /*
742 * Easily overlooked: when mprotect shifts the boundary,
743 * make sure the expanding vma has anon_vma set if the
744 * shrinking vma had, to cover any anon pages imported.
745 */
Linus Torvalds287d97a2010-04-10 15:22:30 -0700746 if (exporter && exporter->anon_vma && !importer->anon_vma) {
747 if (anon_vma_clone(importer, exporter))
Rik van Riel5beb4932010-03-05 13:42:07 -0800748 return -ENOMEM;
Linus Torvalds287d97a2010-04-10 15:22:30 -0700749 importer->anon_vma = exporter->anon_vma;
Rik van Riel5beb4932010-03-05 13:42:07 -0800750 }
751 }
752
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753 if (file) {
754 mapping = file->f_mapping;
Srikar Dronamraju682968e2012-03-30 23:56:46 +0530755 if (!(vma->vm_flags & VM_NONLINEAR)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756 root = &mapping->i_mmap;
Srikar Dronamrajucbc91f72012-04-11 16:05:27 +0530757 uprobe_munmap(vma, vma->vm_start, vma->vm_end);
Srikar Dronamraju682968e2012-03-30 23:56:46 +0530758
759 if (adjust_next)
Srikar Dronamrajucbc91f72012-04-11 16:05:27 +0530760 uprobe_munmap(next, next->vm_start,
761 next->vm_end);
Srikar Dronamraju682968e2012-03-30 23:56:46 +0530762 }
763
Peter Zijlstra3d48ae42011-05-24 17:12:06 -0700764 mutex_lock(&mapping->i_mmap_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765 if (insert) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766 /*
Michel Lespinasse6b2dbba2012-10-08 16:31:25 -0700767 * Put into interval tree now, so instantiated pages
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768 * are visible to arm/parisc __flush_dcache_page
769 * throughout; but we cannot insert into address
770 * space until vma start or end is updated.
771 */
772 __vma_link_file(insert);
773 }
774 }
775
Andrea Arcangeli94fcc582011-01-13 15:47:08 -0800776 vma_adjust_trans_huge(vma, start, end, adjust_next);
777
Michel Lespinassebf181b92012-10-08 16:31:39 -0700778 anon_vma = vma->anon_vma;
779 if (!anon_vma && adjust_next)
780 anon_vma = next->anon_vma;
781 if (anon_vma) {
Michel Lespinasseca42b262012-10-08 16:30:01 -0700782 VM_BUG_ON(adjust_next && next->anon_vma &&
783 anon_vma != next->anon_vma);
Ingo Molnar4fc3f1d2012-12-02 19:56:50 +0000784 anon_vma_lock_write(anon_vma);
Michel Lespinassebf181b92012-10-08 16:31:39 -0700785 anon_vma_interval_tree_pre_update_vma(vma);
786 if (adjust_next)
787 anon_vma_interval_tree_pre_update_vma(next);
788 }
Rik van Riel012f18002010-08-09 17:18:40 -0700789
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 if (root) {
791 flush_dcache_mmap_lock(mapping);
Michel Lespinasse6b2dbba2012-10-08 16:31:25 -0700792 vma_interval_tree_remove(vma, root);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 if (adjust_next)
Michel Lespinasse6b2dbba2012-10-08 16:31:25 -0700794 vma_interval_tree_remove(next, root);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 }
796
Michel Lespinassed3737182012-12-11 16:01:38 -0800797 if (start != vma->vm_start) {
798 vma->vm_start = start;
799 start_changed = true;
800 }
801 if (end != vma->vm_end) {
802 vma->vm_end = end;
803 end_changed = true;
804 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805 vma->vm_pgoff = pgoff;
806 if (adjust_next) {
807 next->vm_start += adjust_next << PAGE_SHIFT;
808 next->vm_pgoff += adjust_next;
809 }
810
811 if (root) {
812 if (adjust_next)
Michel Lespinasse6b2dbba2012-10-08 16:31:25 -0700813 vma_interval_tree_insert(next, root);
814 vma_interval_tree_insert(vma, root);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815 flush_dcache_mmap_unlock(mapping);
816 }
817
818 if (remove_next) {
819 /*
820 * vma_merge has merged next into vma, and needs
821 * us to remove next before dropping the locks.
822 */
823 __vma_unlink(mm, next, vma);
824 if (file)
825 __remove_shared_vm_struct(next, file, mapping);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826 } else if (insert) {
827 /*
828 * split_vma has split insert from vma, and needs
829 * us to insert it before dropping the locks
830 * (it may either follow vma or precede it).
831 */
832 __insert_vm_struct(mm, insert);
Michel Lespinassed3737182012-12-11 16:01:38 -0800833 } else {
834 if (start_changed)
835 vma_gap_update(vma);
836 if (end_changed) {
837 if (!next)
838 mm->highest_vm_end = end;
839 else if (!adjust_next)
840 vma_gap_update(next);
841 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842 }
843
Michel Lespinassebf181b92012-10-08 16:31:39 -0700844 if (anon_vma) {
845 anon_vma_interval_tree_post_update_vma(vma);
846 if (adjust_next)
847 anon_vma_interval_tree_post_update_vma(next);
Konstantin Khlebnikov08b52702013-02-22 16:34:40 -0800848 anon_vma_unlock_write(anon_vma);
Michel Lespinassebf181b92012-10-08 16:31:39 -0700849 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850 if (mapping)
Peter Zijlstra3d48ae42011-05-24 17:12:06 -0700851 mutex_unlock(&mapping->i_mmap_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530853 if (root) {
Ingo Molnar7b2d81d2012-02-17 09:27:41 +0100854 uprobe_mmap(vma);
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530855
856 if (adjust_next)
Ingo Molnar7b2d81d2012-02-17 09:27:41 +0100857 uprobe_mmap(next);
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530858 }
859
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860 if (remove_next) {
Matt Helsley925d1c42008-04-29 01:01:36 -0700861 if (file) {
Srikar Dronamrajucbc91f72012-04-11 16:05:27 +0530862 uprobe_munmap(next, next->vm_start, next->vm_end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863 fput(file);
Matt Helsley925d1c42008-04-29 01:01:36 -0700864 }
Rik van Riel5beb4932010-03-05 13:42:07 -0800865 if (next->anon_vma)
866 anon_vma_merge(vma, next);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867 mm->map_count--;
Oleg Nesterov0bd6f782013-07-31 13:53:28 -0700868 mpol_put(vma_policy(next));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869 kmem_cache_free(vm_area_cachep, next);
870 /*
871 * In mprotect's case 6 (see comments on vma_merge),
872 * we must remove another next too. It would clutter
873 * up the code too much to do both in one go.
874 */
Michel Lespinassed3737182012-12-11 16:01:38 -0800875 next = vma->vm_next;
876 if (remove_next == 2)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877 goto again;
Michel Lespinassed3737182012-12-11 16:01:38 -0800878 else if (next)
879 vma_gap_update(next);
880 else
881 mm->highest_vm_end = end;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882 }
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530883 if (insert && file)
Ingo Molnar7b2d81d2012-02-17 09:27:41 +0100884 uprobe_mmap(insert);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885
886 validate_mm(mm);
Rik van Riel5beb4932010-03-05 13:42:07 -0800887
888 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889}
890
891/*
892 * If the vma has a ->close operation then the driver probably needs to release
893 * per-vma resources, so we don't attempt to merge those.
894 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895static inline int is_mergeable_vma(struct vm_area_struct *vma,
Colin Cross6ebfe582013-06-26 17:26:01 -0700896 struct file *file, unsigned long vm_flags,
897 const char __user *anon_name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898{
Konstantin Khlebnikov0b173bc2012-10-08 16:28:46 -0700899 if (vma->vm_flags ^ vm_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 return 0;
901 if (vma->vm_file != file)
902 return 0;
903 if (vma->vm_ops && vma->vm_ops->close)
904 return 0;
Colin Cross6ebfe582013-06-26 17:26:01 -0700905 if (vma_get_anon_name(vma) != anon_name)
906 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 return 1;
908}
909
910static inline int is_mergeable_anon_vma(struct anon_vma *anon_vma1,
Shaohua Li965f55d2011-05-24 17:11:20 -0700911 struct anon_vma *anon_vma2,
912 struct vm_area_struct *vma)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913{
Shaohua Li965f55d2011-05-24 17:11:20 -0700914 /*
915 * The list_is_singular() test is to avoid merging VMA cloned from
916 * parents. This can improve scalability caused by anon_vma lock.
917 */
918 if ((!anon_vma1 || !anon_vma2) && (!vma ||
919 list_is_singular(&vma->anon_vma_chain)))
920 return 1;
921 return anon_vma1 == anon_vma2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922}
923
924/*
925 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
926 * in front of (at a lower virtual address and file offset than) the vma.
927 *
928 * We cannot merge two vmas if they have differently assigned (non-NULL)
929 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
930 *
931 * We don't check here for the merged mmap wrapping around the end of pagecache
932 * indices (16TB on ia32) because do_mmap_pgoff() does not permit mmap's which
933 * wrap, nor mmaps which cover the final page at index -1UL.
934 */
935static int
936can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
Colin Cross6ebfe582013-06-26 17:26:01 -0700937 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff,
938 const char __user *anon_name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939{
Colin Cross6ebfe582013-06-26 17:26:01 -0700940 if (is_mergeable_vma(vma, file, vm_flags, anon_name) &&
Shaohua Li965f55d2011-05-24 17:11:20 -0700941 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942 if (vma->vm_pgoff == vm_pgoff)
943 return 1;
944 }
945 return 0;
946}
947
948/*
949 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
950 * beyond (at a higher virtual address and file offset than) the vma.
951 *
952 * We cannot merge two vmas if they have differently assigned (non-NULL)
953 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
954 */
955static int
956can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
Colin Cross6ebfe582013-06-26 17:26:01 -0700957 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff,
958 const char __user *anon_name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959{
Colin Cross6ebfe582013-06-26 17:26:01 -0700960 if (is_mergeable_vma(vma, file, vm_flags, anon_name) &&
Shaohua Li965f55d2011-05-24 17:11:20 -0700961 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962 pgoff_t vm_pglen;
963 vm_pglen = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
964 if (vma->vm_pgoff + vm_pglen == vm_pgoff)
965 return 1;
966 }
967 return 0;
968}
969
970/*
Colin Cross6ebfe582013-06-26 17:26:01 -0700971 * Given a mapping request (addr,end,vm_flags,file,pgoff,anon_name),
972 * figure out whether that can be merged with its predecessor or its
973 * successor. Or both (it neatly fills a hole).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974 *
975 * In most cases - when called for mmap, brk or mremap - [addr,end) is
976 * certain not to be mapped by the time vma_merge is called; but when
977 * called for mprotect, it is certain to be already mapped (either at
978 * an offset within prev, or at the start of next), and the flags of
979 * this area are about to be changed to vm_flags - and the no-change
980 * case has already been eliminated.
981 *
982 * The following mprotect cases have to be considered, where AAAA is
983 * the area passed down from mprotect_fixup, never extending beyond one
984 * vma, PPPPPP is the prev vma specified, and NNNNNN the next vma after:
985 *
986 * AAAA AAAA AAAA AAAA
987 * PPPPPPNNNNNN PPPPPPNNNNNN PPPPPPNNNNNN PPPPNNNNXXXX
988 * cannot merge might become might become might become
989 * PPNNNNNNNNNN PPPPPPPPPPNN PPPPPPPPPPPP 6 or
990 * mmap, brk or case 4 below case 5 below PPPPPPPPXXXX 7 or
991 * mremap move: PPPPNNNNNNNN 8
992 * AAAA
993 * PPPP NNNN PPPPPPPPPPPP PPPPPPPPNNNN PPPPNNNNNNNN
994 * might become case 1 below case 2 below case 3 below
995 *
996 * Odd one out? Case 8, because it extends NNNN but needs flags of XXXX:
997 * mprotect_fixup updates vm_flags & vm_page_prot on successful return.
998 */
999struct vm_area_struct *vma_merge(struct mm_struct *mm,
1000 struct vm_area_struct *prev, unsigned long addr,
1001 unsigned long end, unsigned long vm_flags,
1002 struct anon_vma *anon_vma, struct file *file,
Colin Cross6ebfe582013-06-26 17:26:01 -07001003 pgoff_t pgoff, struct mempolicy *policy,
1004 const char __user *anon_name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005{
1006 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
1007 struct vm_area_struct *area, *next;
Rik van Riel5beb4932010-03-05 13:42:07 -08001008 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009
1010 /*
1011 * We later require that vma->vm_flags == vm_flags,
1012 * so this tests vma->vm_flags & VM_SPECIAL, too.
1013 */
1014 if (vm_flags & VM_SPECIAL)
1015 return NULL;
1016
1017 if (prev)
1018 next = prev->vm_next;
1019 else
1020 next = mm->mmap;
1021 area = next;
1022 if (next && next->vm_end == end) /* cases 6, 7, 8 */
1023 next = next->vm_next;
1024
1025 /*
1026 * Can it merge with the predecessor?
1027 */
1028 if (prev && prev->vm_end == addr &&
1029 mpol_equal(vma_policy(prev), policy) &&
Colin Cross6ebfe582013-06-26 17:26:01 -07001030 can_vma_merge_after(prev, vm_flags, anon_vma,
1031 file, pgoff, anon_name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032 /*
1033 * OK, it can. Can we now merge in the successor as well?
1034 */
1035 if (next && end == next->vm_start &&
1036 mpol_equal(policy, vma_policy(next)) &&
Colin Cross6ebfe582013-06-26 17:26:01 -07001037 can_vma_merge_before(next, vm_flags, anon_vma,
1038 file, pgoff+pglen, anon_name) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039 is_mergeable_anon_vma(prev->anon_vma,
Shaohua Li965f55d2011-05-24 17:11:20 -07001040 next->anon_vma, NULL)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041 /* cases 1, 6 */
Rik van Riel5beb4932010-03-05 13:42:07 -08001042 err = vma_adjust(prev, prev->vm_start,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001043 next->vm_end, prev->vm_pgoff, NULL);
1044 } else /* cases 2, 5, 7 */
Rik van Riel5beb4932010-03-05 13:42:07 -08001045 err = vma_adjust(prev, prev->vm_start,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046 end, prev->vm_pgoff, NULL);
Rik van Riel5beb4932010-03-05 13:42:07 -08001047 if (err)
1048 return NULL;
Andrea Arcangelib15d00b2011-01-13 15:46:59 -08001049 khugepaged_enter_vma_merge(prev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050 return prev;
1051 }
1052
1053 /*
1054 * Can this new request be merged in front of next?
1055 */
1056 if (next && end == next->vm_start &&
1057 mpol_equal(policy, vma_policy(next)) &&
Colin Cross6ebfe582013-06-26 17:26:01 -07001058 can_vma_merge_before(next, vm_flags, anon_vma,
1059 file, pgoff+pglen, anon_name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060 if (prev && addr < prev->vm_end) /* case 4 */
Rik van Riel5beb4932010-03-05 13:42:07 -08001061 err = vma_adjust(prev, prev->vm_start,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062 addr, prev->vm_pgoff, NULL);
1063 else /* cases 3, 8 */
Rik van Riel5beb4932010-03-05 13:42:07 -08001064 err = vma_adjust(area, addr, next->vm_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065 next->vm_pgoff - pglen, NULL);
Rik van Riel5beb4932010-03-05 13:42:07 -08001066 if (err)
1067 return NULL;
Andrea Arcangelib15d00b2011-01-13 15:46:59 -08001068 khugepaged_enter_vma_merge(area);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069 return area;
1070 }
1071
1072 return NULL;
1073}
1074
1075/*
Linus Torvaldsd0e9fe12010-04-10 10:36:19 -07001076 * Rough compatbility check to quickly see if it's even worth looking
1077 * at sharing an anon_vma.
1078 *
1079 * They need to have the same vm_file, and the flags can only differ
1080 * in things that mprotect may change.
1081 *
1082 * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that
1083 * we can merge the two vma's. For example, we refuse to merge a vma if
1084 * there is a vm_ops->close() function, because that indicates that the
1085 * driver is doing some kind of reference counting. But that doesn't
1086 * really matter for the anon_vma sharing case.
1087 */
1088static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b)
1089{
1090 return a->vm_end == b->vm_start &&
1091 mpol_equal(vma_policy(a), vma_policy(b)) &&
1092 a->vm_file == b->vm_file &&
1093 !((a->vm_flags ^ b->vm_flags) & ~(VM_READ|VM_WRITE|VM_EXEC)) &&
1094 b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT);
1095}
1096
1097/*
1098 * Do some basic sanity checking to see if we can re-use the anon_vma
1099 * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be
1100 * the same as 'old', the other will be the new one that is trying
1101 * to share the anon_vma.
1102 *
1103 * NOTE! This runs with mm_sem held for reading, so it is possible that
1104 * the anon_vma of 'old' is concurrently in the process of being set up
1105 * by another page fault trying to merge _that_. But that's ok: if it
1106 * is being set up, that automatically means that it will be a singleton
1107 * acceptable for merging, so we can do all of this optimistically. But
1108 * we do that ACCESS_ONCE() to make sure that we never re-load the pointer.
1109 *
1110 * IOW: that the "list_is_singular()" test on the anon_vma_chain only
1111 * matters for the 'stable anon_vma' case (ie the thing we want to avoid
1112 * is to return an anon_vma that is "complex" due to having gone through
1113 * a fork).
1114 *
1115 * We also make sure that the two vma's are compatible (adjacent,
1116 * and with the same memory policies). That's all stable, even with just
1117 * a read lock on the mm_sem.
1118 */
1119static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b)
1120{
1121 if (anon_vma_compatible(a, b)) {
1122 struct anon_vma *anon_vma = ACCESS_ONCE(old->anon_vma);
1123
1124 if (anon_vma && list_is_singular(&old->anon_vma_chain))
1125 return anon_vma;
1126 }
1127 return NULL;
1128}
1129
1130/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131 * find_mergeable_anon_vma is used by anon_vma_prepare, to check
1132 * neighbouring vmas for a suitable anon_vma, before it goes off
1133 * to allocate a new anon_vma. It checks because a repetitive
1134 * sequence of mprotects and faults may otherwise lead to distinct
1135 * anon_vmas being allocated, preventing vma merge in subsequent
1136 * mprotect.
1137 */
1138struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
1139{
Linus Torvaldsd0e9fe12010-04-10 10:36:19 -07001140 struct anon_vma *anon_vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141 struct vm_area_struct *near;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142
1143 near = vma->vm_next;
1144 if (!near)
1145 goto try_prev;
1146
Linus Torvaldsd0e9fe12010-04-10 10:36:19 -07001147 anon_vma = reusable_anon_vma(near, vma, near);
1148 if (anon_vma)
1149 return anon_vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150try_prev:
Linus Torvalds9be34c92011-06-16 00:35:09 -07001151 near = vma->vm_prev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152 if (!near)
1153 goto none;
1154
Linus Torvaldsd0e9fe12010-04-10 10:36:19 -07001155 anon_vma = reusable_anon_vma(near, near, vma);
1156 if (anon_vma)
1157 return anon_vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158none:
1159 /*
1160 * There's no absolute need to look only at touching neighbours:
1161 * we could search further afield for "compatible" anon_vmas.
1162 * But it would probably just be a waste of time searching,
1163 * or lead to too many vmas hanging off the same anon_vma.
1164 * We're trying to allow mprotect remerging later on,
1165 * not trying to minimize memory used for anon_vmas.
1166 */
1167 return NULL;
1168}
1169
1170#ifdef CONFIG_PROC_FS
Hugh Dickinsab50b8e2005-10-29 18:15:56 -07001171void vm_stat_account(struct mm_struct *mm, unsigned long flags,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172 struct file *file, long pages)
1173{
1174 const unsigned long stack_flags
1175 = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
1176
Huang Shijie44de9d02012-07-31 16:41:49 -07001177 mm->total_vm += pages;
1178
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179 if (file) {
1180 mm->shared_vm += pages;
1181 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
1182 mm->exec_vm += pages;
1183 } else if (flags & stack_flags)
1184 mm->stack_vm += pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185}
1186#endif /* CONFIG_PROC_FS */
1187
1188/*
Al Viro40401532012-02-13 03:58:52 +00001189 * If a hint addr is less than mmap_min_addr change hint to be as
1190 * low as possible but still greater than mmap_min_addr
1191 */
1192static inline unsigned long round_hint_to_min(unsigned long hint)
1193{
1194 hint &= PAGE_MASK;
1195 if (((void *)hint != NULL) &&
1196 (hint < mmap_min_addr))
1197 return PAGE_ALIGN(mmap_min_addr);
1198 return hint;
1199}
1200
1201/*
Jianjun Kong27f5de72009-09-17 19:26:26 -07001202 * The caller must hold down_write(&current->mm->mmap_sem).
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203 */
1204
Al Viroe3fc6292012-05-30 20:08:42 -04001205unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206 unsigned long len, unsigned long prot,
Michel Lespinassebebeb3d2013-02-22 16:32:37 -08001207 unsigned long flags, unsigned long pgoff,
Michel Lespinasse41badc12013-02-22 16:32:47 -08001208 unsigned long *populate)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209{
1210 struct mm_struct * mm = current->mm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211 struct inode *inode;
KOSAKI Motohiroca16d142011-05-26 19:16:19 +09001212 vm_flags_t vm_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213
Michel Lespinasse41badc12013-02-22 16:32:47 -08001214 *populate = 0;
Michel Lespinassebebeb3d2013-02-22 16:32:37 -08001215
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216 /*
1217 * Does the application expect PROT_READ to imply PROT_EXEC?
1218 *
1219 * (the exception is when the underlying filesystem is noexec
1220 * mounted, in which case we dont add PROT_EXEC.)
1221 */
1222 if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
Josef "Jeff" Sipekd3ac7f82006-12-08 02:36:44 -08001223 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224 prot |= PROT_EXEC;
1225
1226 if (!len)
1227 return -EINVAL;
1228
Eric Paris7cd94142007-11-26 18:47:40 -05001229 if (!(flags & MAP_FIXED))
1230 addr = round_hint_to_min(addr);
1231
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232 /* Careful about overflows.. */
1233 len = PAGE_ALIGN(len);
Al Viro9206de92009-12-03 15:23:11 -05001234 if (!len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235 return -ENOMEM;
1236
1237 /* offset overflow? */
1238 if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
1239 return -EOVERFLOW;
1240
1241 /* Too many mappings? */
1242 if (mm->map_count > sysctl_max_map_count)
1243 return -ENOMEM;
1244
1245 /* Obtain the address to map to. we verify (or select) it and ensure
1246 * that it represents a valid section of the address space.
1247 */
1248 addr = get_unmapped_area(file, addr, len, pgoff, flags);
1249 if (addr & ~PAGE_MASK)
1250 return addr;
1251
1252 /* Do simple checking here so the lower-level routines won't have
1253 * to. we assume access permissions have been handled by the open
1254 * of the memory object, so we don't do any here.
1255 */
1256 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
1257 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
1258
Huang Shijiecdf7b342009-09-21 17:03:36 -07001259 if (flags & MAP_LOCKED)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001260 if (!can_do_mlock())
1261 return -EPERM;
Rik van Rielba470de2008-10-18 20:26:50 -07001262
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263 /* mlock MCL_FUTURE? */
1264 if (vm_flags & VM_LOCKED) {
1265 unsigned long locked, lock_limit;
Chris Wright93ea1d02005-05-01 08:58:38 -07001266 locked = len >> PAGE_SHIFT;
1267 locked += mm->locked_vm;
Jiri Slaby59e99e52010-03-05 13:41:44 -08001268 lock_limit = rlimit(RLIMIT_MEMLOCK);
Chris Wright93ea1d02005-05-01 08:58:38 -07001269 lock_limit >>= PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
1271 return -EAGAIN;
1272 }
1273
Al Viro496ad9a2013-01-23 17:07:38 -05001274 inode = file ? file_inode(file) : NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275
1276 if (file) {
1277 switch (flags & MAP_TYPE) {
1278 case MAP_SHARED:
1279 if ((prot&PROT_WRITE) && !(file->f_mode&FMODE_WRITE))
1280 return -EACCES;
1281
1282 /*
1283 * Make sure we don't allow writing to an append-only
1284 * file..
1285 */
1286 if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE))
1287 return -EACCES;
1288
1289 /*
1290 * Make sure there are no mandatory locks on the file.
1291 */
1292 if (locks_verify_locked(inode))
1293 return -EAGAIN;
1294
1295 vm_flags |= VM_SHARED | VM_MAYSHARE;
1296 if (!(file->f_mode & FMODE_WRITE))
1297 vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
1298
1299 /* fall through */
1300 case MAP_PRIVATE:
1301 if (!(file->f_mode & FMODE_READ))
1302 return -EACCES;
Josef "Jeff" Sipekd3ac7f82006-12-08 02:36:44 -08001303 if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) {
Linus Torvalds80c56062006-10-15 14:09:55 -07001304 if (vm_flags & VM_EXEC)
1305 return -EPERM;
1306 vm_flags &= ~VM_MAYEXEC;
1307 }
Linus Torvalds80c56062006-10-15 14:09:55 -07001308
1309 if (!file->f_op || !file->f_op->mmap)
1310 return -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001311 break;
1312
1313 default:
1314 return -EINVAL;
1315 }
1316 } else {
1317 switch (flags & MAP_TYPE) {
1318 case MAP_SHARED:
Tejun Heoce363942008-09-03 16:09:47 +02001319 /*
1320 * Ignore pgoff.
1321 */
1322 pgoff = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323 vm_flags |= VM_SHARED | VM_MAYSHARE;
1324 break;
1325 case MAP_PRIVATE:
1326 /*
1327 * Set pgoff according to addr for anon_vma.
1328 */
1329 pgoff = addr >> PAGE_SHIFT;
1330 break;
1331 default:
1332 return -EINVAL;
1333 }
1334 }
1335
Michel Lespinassec22c0d62013-02-22 16:32:43 -08001336 /*
1337 * Set 'VM_NORESERVE' if we should not account for the
1338 * memory use of this mapping.
1339 */
1340 if (flags & MAP_NORESERVE) {
1341 /* We honor MAP_NORESERVE if allowed to overcommit */
1342 if (sysctl_overcommit_memory != OVERCOMMIT_NEVER)
1343 vm_flags |= VM_NORESERVE;
1344
1345 /* hugetlb applies strict overcommit unless MAP_NORESERVE */
1346 if (file && is_file_hugepages(file))
1347 vm_flags |= VM_NORESERVE;
1348 }
1349
1350 addr = mmap_region(file, addr, len, vm_flags, pgoff);
Michel Lespinasse09a9f1d2013-03-28 16:26:23 -07001351 if (!IS_ERR_VALUE(addr) &&
1352 ((vm_flags & VM_LOCKED) ||
1353 (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE))
Michel Lespinasse41badc12013-02-22 16:32:47 -08001354 *populate = len;
Michel Lespinassebebeb3d2013-02-22 16:32:37 -08001355 return addr;
Miklos Szeredi0165ab42007-07-15 23:38:26 -07001356}
Linus Torvalds6be5ceb2012-04-20 17:13:58 -07001357
Hugh Dickins66f0dc42009-12-30 20:17:34 +00001358SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1359 unsigned long, prot, unsigned long, flags,
1360 unsigned long, fd, unsigned long, pgoff)
1361{
1362 struct file *file = NULL;
1363 unsigned long retval = -EBADF;
1364
1365 if (!(flags & MAP_ANONYMOUS)) {
Al Viro120a7952010-10-30 02:54:44 -04001366 audit_mmap_fd(fd, flags);
Hugh Dickins66f0dc42009-12-30 20:17:34 +00001367 if (unlikely(flags & MAP_HUGETLB))
1368 return -EINVAL;
1369 file = fget(fd);
1370 if (!file)
1371 goto out;
Naoya Horiguchiaf73e4d2013-05-07 16:18:13 -07001372 if (is_file_hugepages(file))
1373 len = ALIGN(len, huge_page_size(hstate_file(file)));
Hugh Dickins66f0dc42009-12-30 20:17:34 +00001374 } else if (flags & MAP_HUGETLB) {
1375 struct user_struct *user = NULL;
Li Zefan091d0d52013-05-09 15:08:15 +08001376 struct hstate *hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) &
1377 SHM_HUGE_MASK);
Naoya Horiguchiaf73e4d2013-05-07 16:18:13 -07001378
Li Zefan091d0d52013-05-09 15:08:15 +08001379 if (!hs)
1380 return -EINVAL;
1381
1382 len = ALIGN(len, huge_page_size(hs));
Hugh Dickins66f0dc42009-12-30 20:17:34 +00001383 /*
1384 * VM_NORESERVE is used because the reservations will be
1385 * taken when vm_ops->mmap() is called
1386 * A dummy user value is used because we are not locking
1387 * memory so no accounting is necessary
1388 */
Naoya Horiguchiaf73e4d2013-05-07 16:18:13 -07001389 file = hugetlb_file_setup(HUGETLB_ANON_FILE, len,
Andi Kleen42d73952012-12-11 16:01:34 -08001390 VM_NORESERVE,
1391 &user, HUGETLB_ANONHUGE_INODE,
1392 (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
Hugh Dickins66f0dc42009-12-30 20:17:34 +00001393 if (IS_ERR(file))
1394 return PTR_ERR(file);
1395 }
1396
1397 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
1398
Al Viroeb36c582012-05-30 20:17:35 -04001399 retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
Hugh Dickins66f0dc42009-12-30 20:17:34 +00001400 if (file)
1401 fput(file);
1402out:
1403 return retval;
1404}
1405
Christoph Hellwiga4679372010-03-10 15:21:15 -08001406#ifdef __ARCH_WANT_SYS_OLD_MMAP
1407struct mmap_arg_struct {
1408 unsigned long addr;
1409 unsigned long len;
1410 unsigned long prot;
1411 unsigned long flags;
1412 unsigned long fd;
1413 unsigned long offset;
1414};
1415
1416SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
1417{
1418 struct mmap_arg_struct a;
1419
1420 if (copy_from_user(&a, arg, sizeof(a)))
1421 return -EFAULT;
1422 if (a.offset & ~PAGE_MASK)
1423 return -EINVAL;
1424
1425 return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
1426 a.offset >> PAGE_SHIFT);
1427}
1428#endif /* __ARCH_WANT_SYS_OLD_MMAP */
1429
Alexey Dobriyan4e950f62007-07-30 02:36:13 +04001430/*
1431 * Some shared mappigns will want the pages marked read-only
1432 * to track write events. If so, we'll downgrade vm_page_prot
1433 * to the private version (using protection_map[] without the
1434 * VM_SHARED bit).
1435 */
1436int vma_wants_writenotify(struct vm_area_struct *vma)
1437{
KOSAKI Motohiroca16d142011-05-26 19:16:19 +09001438 vm_flags_t vm_flags = vma->vm_flags;
Alexey Dobriyan4e950f62007-07-30 02:36:13 +04001439
1440 /* If it was private or non-writable, the write bit is already clear */
1441 if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
1442 return 0;
1443
1444 /* The backer wishes to know when pages are first written to? */
1445 if (vma->vm_ops && vma->vm_ops->page_mkwrite)
1446 return 1;
1447
1448 /* The open routine did something to the protections already? */
1449 if (pgprot_val(vma->vm_page_prot) !=
Coly Li3ed75eb2007-10-18 23:39:15 -07001450 pgprot_val(vm_get_page_prot(vm_flags)))
Alexey Dobriyan4e950f62007-07-30 02:36:13 +04001451 return 0;
1452
1453 /* Specialty mapping? */
Konstantin Khlebnikov4b6e1e32012-10-08 16:28:40 -07001454 if (vm_flags & VM_PFNMAP)
Alexey Dobriyan4e950f62007-07-30 02:36:13 +04001455 return 0;
1456
1457 /* Can the mapping track the dirty pages? */
1458 return vma->vm_file && vma->vm_file->f_mapping &&
1459 mapping_cap_account_dirty(vma->vm_file->f_mapping);
1460}
1461
Linus Torvaldsfc8744a2009-01-31 15:08:56 -08001462/*
1463 * We account for memory if it's a private writeable mapping,
Mel Gorman5a6fe122009-02-10 14:02:27 +00001464 * not hugepages and VM_NORESERVE wasn't set.
Linus Torvaldsfc8744a2009-01-31 15:08:56 -08001465 */
KOSAKI Motohiroca16d142011-05-26 19:16:19 +09001466static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags)
Linus Torvaldsfc8744a2009-01-31 15:08:56 -08001467{
Mel Gorman5a6fe122009-02-10 14:02:27 +00001468 /*
1469 * hugetlb has its own accounting separate from the core VM
1470 * VM_HUGETLB may not be set yet so we cannot check for that flag.
1471 */
1472 if (file && is_file_hugepages(file))
1473 return 0;
1474
Linus Torvaldsfc8744a2009-01-31 15:08:56 -08001475 return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE;
1476}
1477
Miklos Szeredi0165ab42007-07-15 23:38:26 -07001478unsigned long mmap_region(struct file *file, unsigned long addr,
Michel Lespinassec22c0d62013-02-22 16:32:43 -08001479 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff)
Miklos Szeredi0165ab42007-07-15 23:38:26 -07001480{
1481 struct mm_struct *mm = current->mm;
1482 struct vm_area_struct *vma, *prev;
1483 int correct_wcount = 0;
1484 int error;
1485 struct rb_node **rb_link, *rb_parent;
1486 unsigned long charged = 0;
Al Viro496ad9a2013-01-23 17:07:38 -05001487 struct inode *inode = file ? file_inode(file) : NULL;
Miklos Szeredi0165ab42007-07-15 23:38:26 -07001488
Cyril Hrubise8420a82013-04-29 15:08:33 -07001489 /* Check against address space limit. */
1490 if (!may_expand_vm(mm, len >> PAGE_SHIFT)) {
1491 unsigned long nr_pages;
1492
1493 /*
1494 * MAP_FIXED may remove pages of mappings that intersects with
1495 * requested mapping. Account for the pages it would unmap.
1496 */
1497 if (!(vm_flags & MAP_FIXED))
1498 return -ENOMEM;
1499
1500 nr_pages = count_vma_pages_range(mm, addr, addr + len);
1501
1502 if (!may_expand_vm(mm, (len >> PAGE_SHIFT) - nr_pages))
1503 return -ENOMEM;
1504 }
1505
Linus Torvalds1da177e2005-04-16 15:20:36 -07001506 /* Clear old maps */
1507 error = -ENOMEM;
1508munmap_back:
Hugh Dickins6597d782012-10-08 16:29:07 -07001509 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001510 if (do_munmap(mm, addr, len))
1511 return -ENOMEM;
1512 goto munmap_back;
1513 }
1514
Linus Torvaldsfc8744a2009-01-31 15:08:56 -08001515 /*
Linus Torvaldsfc8744a2009-01-31 15:08:56 -08001516 * Private writable mapping: check memory availability
1517 */
Mel Gorman5a6fe122009-02-10 14:02:27 +00001518 if (accountable_mapping(file, vm_flags)) {
Linus Torvaldsfc8744a2009-01-31 15:08:56 -08001519 charged = len >> PAGE_SHIFT;
Al Viro191c5422012-02-13 03:58:52 +00001520 if (security_vm_enough_memory_mm(mm, charged))
Linus Torvaldsfc8744a2009-01-31 15:08:56 -08001521 return -ENOMEM;
1522 vm_flags |= VM_ACCOUNT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523 }
1524
1525 /*
Linus Torvaldsde33c8d2009-01-29 17:46:42 -08001526 * Can we just expand an old mapping?
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527 */
Colin Cross6ebfe582013-06-26 17:26:01 -07001528 vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff,
1529 NULL, NULL);
Linus Torvaldsde33c8d2009-01-29 17:46:42 -08001530 if (vma)
1531 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532
1533 /*
1534 * Determine the object being mapped and call the appropriate
1535 * specific mapper. the address has already been validated, but
1536 * not unmapped, but the maps are removed from the list.
1537 */
Pekka Enbergc5e3b832006-03-25 03:06:43 -08001538 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539 if (!vma) {
1540 error = -ENOMEM;
1541 goto unacct_error;
1542 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543
1544 vma->vm_mm = mm;
1545 vma->vm_start = addr;
1546 vma->vm_end = addr + len;
1547 vma->vm_flags = vm_flags;
Coly Li3ed75eb2007-10-18 23:39:15 -07001548 vma->vm_page_prot = vm_get_page_prot(vm_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549 vma->vm_pgoff = pgoff;
Rik van Riel5beb4932010-03-05 13:42:07 -08001550 INIT_LIST_HEAD(&vma->anon_vma_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551
Hugh Dickinsce8fea72012-03-06 12:28:52 -08001552 error = -EINVAL; /* when rejecting VM_GROWSDOWN|VM_GROWSUP */
1553
Linus Torvalds1da177e2005-04-16 15:20:36 -07001554 if (file) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
1556 goto free_vma;
1557 if (vm_flags & VM_DENYWRITE) {
1558 error = deny_write_access(file);
1559 if (error)
1560 goto free_vma;
1561 correct_wcount = 1;
1562 }
Al Virocb0942b2012-08-27 14:48:26 -04001563 vma->vm_file = get_file(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564 error = file->f_op->mmap(file, vma);
1565 if (error)
1566 goto unmap_and_free_vma;
Huang Shijief8dbf0a72009-09-21 17:03:41 -07001567
1568 /* Can addr have changed??
1569 *
1570 * Answer: Yes, several device drivers can do it in their
1571 * f_op->mmap method. -DaveM
Joonsoo Kim2897b4d2012-12-12 13:51:53 -08001572 * Bug: If addr is changed, prev, rb_link, rb_parent should
1573 * be updated for vma_link()
Huang Shijief8dbf0a72009-09-21 17:03:41 -07001574 */
Joonsoo Kim2897b4d2012-12-12 13:51:53 -08001575 WARN_ON_ONCE(addr != vma->vm_start);
1576
Huang Shijief8dbf0a72009-09-21 17:03:41 -07001577 addr = vma->vm_start;
1578 pgoff = vma->vm_pgoff;
1579 vm_flags = vma->vm_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580 } else if (vm_flags & VM_SHARED) {
Al Viro835ee792012-03-05 06:39:47 +00001581 if (unlikely(vm_flags & (VM_GROWSDOWN|VM_GROWSUP)))
1582 goto free_vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583 error = shmem_zero_setup(vma);
1584 if (error)
1585 goto free_vma;
1586 }
1587
Magnus Dammc9d0bf22009-12-14 17:59:49 -08001588 if (vma_wants_writenotify(vma)) {
1589 pgprot_t pprot = vma->vm_page_prot;
1590
1591 /* Can vma->vm_page_prot have changed??
1592 *
1593 * Answer: Yes, drivers may have changed it in their
1594 * f_op->mmap method.
1595 *
1596 * Ensures that vmas marked as uncached stay that way.
1597 */
Hugh Dickins1ddd4392007-10-22 20:45:12 -07001598 vma->vm_page_prot = vm_get_page_prot(vm_flags & ~VM_SHARED);
Magnus Dammc9d0bf22009-12-14 17:59:49 -08001599 if (pgprot_val(pprot) == pgprot_val(pgprot_noncached(pprot)))
1600 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1601 }
Peter Zijlstrad08b3852006-09-25 23:30:57 -07001602
Linus Torvaldsde33c8d2009-01-29 17:46:42 -08001603 vma_link(mm, vma, prev, rb_link, rb_parent);
1604 file = vma->vm_file;
Oleg Nesterov4d3d5b42008-04-28 02:12:10 -07001605
1606 /* Once vma denies write, undo our temporary denial count */
1607 if (correct_wcount)
1608 atomic_inc(&inode->i_writecount);
1609out:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001610 perf_event_mmap(vma);
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02001611
Hugh Dickinsab50b8e2005-10-29 18:15:56 -07001612 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613 if (vm_flags & VM_LOCKED) {
Michel Lespinassebebeb3d2013-02-22 16:32:37 -08001614 if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
1615 vma == get_gate_vma(current->mm)))
KOSAKI Motohiro06f9d8c2010-03-05 13:41:43 -08001616 mm->locked_vm += (len >> PAGE_SHIFT);
Michel Lespinassebebeb3d2013-02-22 16:32:37 -08001617 else
1618 vma->vm_flags &= ~VM_LOCKED;
1619 }
Srikar Dronamraju2b144492012-02-09 14:56:42 +05301620
Oleg Nesterovc7a3a882012-08-19 19:10:42 +02001621 if (file)
1622 uprobe_mmap(vma);
Srikar Dronamraju2b144492012-02-09 14:56:42 +05301623
Linus Torvalds1da177e2005-04-16 15:20:36 -07001624 return addr;
1625
1626unmap_and_free_vma:
1627 if (correct_wcount)
1628 atomic_inc(&inode->i_writecount);
1629 vma->vm_file = NULL;
1630 fput(file);
1631
1632 /* Undo any partial mapping done by a device driver. */
Hugh Dickinse0da3822005-04-19 13:29:15 -07001633 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
1634 charged = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001635free_vma:
1636 kmem_cache_free(vm_area_cachep, vma);
1637unacct_error:
1638 if (charged)
1639 vm_unacct_memory(charged);
1640 return error;
1641}
1642
Michel Lespinassedb4fbfb2012-12-11 16:01:49 -08001643unsigned long unmapped_area(struct vm_unmapped_area_info *info)
1644{
1645 /*
1646 * We implement the search by looking for an rbtree node that
1647 * immediately follows a suitable gap. That is,
1648 * - gap_start = vma->vm_prev->vm_end <= info->high_limit - length;
1649 * - gap_end = vma->vm_start >= info->low_limit + length;
1650 * - gap_end - gap_start >= length
1651 */
1652
1653 struct mm_struct *mm = current->mm;
1654 struct vm_area_struct *vma;
1655 unsigned long length, low_limit, high_limit, gap_start, gap_end;
1656
1657 /* Adjust search length to account for worst case alignment overhead */
1658 length = info->length + info->align_mask;
1659 if (length < info->length)
1660 return -ENOMEM;
1661
1662 /* Adjust search limits by the desired length */
1663 if (info->high_limit < length)
1664 return -ENOMEM;
1665 high_limit = info->high_limit - length;
1666
1667 if (info->low_limit > high_limit)
1668 return -ENOMEM;
1669 low_limit = info->low_limit + length;
1670
1671 /* Check if rbtree root looks promising */
1672 if (RB_EMPTY_ROOT(&mm->mm_rb))
1673 goto check_highest;
1674 vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
1675 if (vma->rb_subtree_gap < length)
1676 goto check_highest;
1677
1678 while (true) {
1679 /* Visit left subtree if it looks promising */
1680 gap_end = vma->vm_start;
1681 if (gap_end >= low_limit && vma->vm_rb.rb_left) {
1682 struct vm_area_struct *left =
1683 rb_entry(vma->vm_rb.rb_left,
1684 struct vm_area_struct, vm_rb);
1685 if (left->rb_subtree_gap >= length) {
1686 vma = left;
1687 continue;
1688 }
1689 }
1690
1691 gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
1692check_current:
1693 /* Check if current node has a suitable gap */
1694 if (gap_start > high_limit)
1695 return -ENOMEM;
1696 if (gap_end >= low_limit && gap_end - gap_start >= length)
1697 goto found;
1698
1699 /* Visit right subtree if it looks promising */
1700 if (vma->vm_rb.rb_right) {
1701 struct vm_area_struct *right =
1702 rb_entry(vma->vm_rb.rb_right,
1703 struct vm_area_struct, vm_rb);
1704 if (right->rb_subtree_gap >= length) {
1705 vma = right;
1706 continue;
1707 }
1708 }
1709
1710 /* Go back up the rbtree to find next candidate node */
1711 while (true) {
1712 struct rb_node *prev = &vma->vm_rb;
1713 if (!rb_parent(prev))
1714 goto check_highest;
1715 vma = rb_entry(rb_parent(prev),
1716 struct vm_area_struct, vm_rb);
1717 if (prev == vma->vm_rb.rb_left) {
1718 gap_start = vma->vm_prev->vm_end;
1719 gap_end = vma->vm_start;
1720 goto check_current;
1721 }
1722 }
1723 }
1724
1725check_highest:
1726 /* Check highest gap, which does not precede any rbtree node */
1727 gap_start = mm->highest_vm_end;
1728 gap_end = ULONG_MAX; /* Only for VM_BUG_ON below */
1729 if (gap_start > high_limit)
1730 return -ENOMEM;
1731
1732found:
1733 /* We found a suitable gap. Clip it with the original low_limit. */
1734 if (gap_start < info->low_limit)
1735 gap_start = info->low_limit;
1736
1737 /* Adjust gap address to the desired alignment */
1738 gap_start += (info->align_offset - gap_start) & info->align_mask;
1739
1740 VM_BUG_ON(gap_start + info->length > info->high_limit);
1741 VM_BUG_ON(gap_start + info->length > gap_end);
1742 return gap_start;
1743}
1744
1745unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
1746{
1747 struct mm_struct *mm = current->mm;
1748 struct vm_area_struct *vma;
1749 unsigned long length, low_limit, high_limit, gap_start, gap_end;
1750
1751 /* Adjust search length to account for worst case alignment overhead */
1752 length = info->length + info->align_mask;
1753 if (length < info->length)
1754 return -ENOMEM;
1755
1756 /*
1757 * Adjust search limits by the desired length.
1758 * See implementation comment at top of unmapped_area().
1759 */
1760 gap_end = info->high_limit;
1761 if (gap_end < length)
1762 return -ENOMEM;
1763 high_limit = gap_end - length;
1764
1765 if (info->low_limit > high_limit)
1766 return -ENOMEM;
1767 low_limit = info->low_limit + length;
1768
1769 /* Check highest gap, which does not precede any rbtree node */
1770 gap_start = mm->highest_vm_end;
1771 if (gap_start <= high_limit)
1772 goto found_highest;
1773
1774 /* Check if rbtree root looks promising */
1775 if (RB_EMPTY_ROOT(&mm->mm_rb))
1776 return -ENOMEM;
1777 vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
1778 if (vma->rb_subtree_gap < length)
1779 return -ENOMEM;
1780
1781 while (true) {
1782 /* Visit right subtree if it looks promising */
1783 gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
1784 if (gap_start <= high_limit && vma->vm_rb.rb_right) {
1785 struct vm_area_struct *right =
1786 rb_entry(vma->vm_rb.rb_right,
1787 struct vm_area_struct, vm_rb);
1788 if (right->rb_subtree_gap >= length) {
1789 vma = right;
1790 continue;
1791 }
1792 }
1793
1794check_current:
1795 /* Check if current node has a suitable gap */
1796 gap_end = vma->vm_start;
1797 if (gap_end < low_limit)
1798 return -ENOMEM;
1799 if (gap_start <= high_limit && gap_end - gap_start >= length)
1800 goto found;
1801
1802 /* Visit left subtree if it looks promising */
1803 if (vma->vm_rb.rb_left) {
1804 struct vm_area_struct *left =
1805 rb_entry(vma->vm_rb.rb_left,
1806 struct vm_area_struct, vm_rb);
1807 if (left->rb_subtree_gap >= length) {
1808 vma = left;
1809 continue;
1810 }
1811 }
1812
1813 /* Go back up the rbtree to find next candidate node */
1814 while (true) {
1815 struct rb_node *prev = &vma->vm_rb;
1816 if (!rb_parent(prev))
1817 return -ENOMEM;
1818 vma = rb_entry(rb_parent(prev),
1819 struct vm_area_struct, vm_rb);
1820 if (prev == vma->vm_rb.rb_right) {
1821 gap_start = vma->vm_prev ?
1822 vma->vm_prev->vm_end : 0;
1823 goto check_current;
1824 }
1825 }
1826 }
1827
1828found:
1829 /* We found a suitable gap. Clip it with the original high_limit. */
1830 if (gap_end > info->high_limit)
1831 gap_end = info->high_limit;
1832
1833found_highest:
1834 /* Compute highest gap address at the desired alignment */
1835 gap_end -= info->length;
1836 gap_end -= (gap_end - info->align_offset) & info->align_mask;
1837
1838 VM_BUG_ON(gap_end < info->low_limit);
1839 VM_BUG_ON(gap_end < gap_start);
1840 return gap_end;
1841}
1842
Linus Torvalds1da177e2005-04-16 15:20:36 -07001843/* Get an address range which is currently unmapped.
1844 * For shmat() with addr=0.
1845 *
1846 * Ugly calling convention alert:
1847 * Return value with the low bits set means error value,
1848 * ie
1849 * if (ret & ~PAGE_MASK)
1850 * error = ret;
1851 *
1852 * This function "knows" that -ENOMEM has the bits set.
1853 */
1854#ifndef HAVE_ARCH_UNMAPPED_AREA
1855unsigned long
1856arch_get_unmapped_area(struct file *filp, unsigned long addr,
1857 unsigned long len, unsigned long pgoff, unsigned long flags)
1858{
1859 struct mm_struct *mm = current->mm;
1860 struct vm_area_struct *vma;
Michel Lespinassedb4fbfb2012-12-11 16:01:49 -08001861 struct vm_unmapped_area_info info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001862
Akira Takeuchi3cbafaa2013-11-12 15:08:21 -08001863 if (len > TASK_SIZE - mmap_min_addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001864 return -ENOMEM;
1865
Benjamin Herrenschmidt06abdfb2007-05-06 14:50:13 -07001866 if (flags & MAP_FIXED)
1867 return addr;
1868
Linus Torvalds1da177e2005-04-16 15:20:36 -07001869 if (addr) {
1870 addr = PAGE_ALIGN(addr);
1871 vma = find_vma(mm, addr);
Akira Takeuchi3cbafaa2013-11-12 15:08:21 -08001872 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001873 (!vma || addr + len <= vma->vm_start))
1874 return addr;
1875 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001876
Michel Lespinassedb4fbfb2012-12-11 16:01:49 -08001877 info.flags = 0;
1878 info.length = len;
1879 info.low_limit = TASK_UNMAPPED_BASE;
1880 info.high_limit = TASK_SIZE;
1881 info.align_mask = 0;
1882 return vm_unmapped_area(&info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001883}
1884#endif
1885
Wolfgang Wander1363c3c2005-06-21 17:14:49 -07001886void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001887{
1888 /*
1889 * Is this a new hole at the lowest possible address?
1890 */
Xiao Guangrongf44d2192012-03-21 16:33:56 -07001891 if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache)
Wolfgang Wander1363c3c2005-06-21 17:14:49 -07001892 mm->free_area_cache = addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001893}
1894
1895/*
1896 * This mmap-allocator allocates new areas top-down from below the
1897 * stack's low limit (the base):
1898 */
1899#ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1900unsigned long
1901arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
1902 const unsigned long len, const unsigned long pgoff,
1903 const unsigned long flags)
1904{
1905 struct vm_area_struct *vma;
1906 struct mm_struct *mm = current->mm;
Michel Lespinassedb4fbfb2012-12-11 16:01:49 -08001907 unsigned long addr = addr0;
1908 struct vm_unmapped_area_info info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001909
1910 /* requested length too big for entire address space */
Akira Takeuchi3cbafaa2013-11-12 15:08:21 -08001911 if (len > TASK_SIZE - mmap_min_addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912 return -ENOMEM;
1913
Benjamin Herrenschmidt06abdfb2007-05-06 14:50:13 -07001914 if (flags & MAP_FIXED)
1915 return addr;
1916
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917 /* requesting a specific address */
1918 if (addr) {
1919 addr = PAGE_ALIGN(addr);
1920 vma = find_vma(mm, addr);
Akira Takeuchi3cbafaa2013-11-12 15:08:21 -08001921 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001922 (!vma || addr + len <= vma->vm_start))
1923 return addr;
1924 }
1925
Michel Lespinassedb4fbfb2012-12-11 16:01:49 -08001926 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
1927 info.length = len;
Akira Takeuchi3cbafaa2013-11-12 15:08:21 -08001928 info.low_limit = max(PAGE_SIZE, mmap_min_addr);
Michel Lespinassedb4fbfb2012-12-11 16:01:49 -08001929 info.high_limit = mm->mmap_base;
1930 info.align_mask = 0;
1931 addr = vm_unmapped_area(&info);
Xiao Guangrongb716ad92012-03-21 16:33:56 -07001932
Linus Torvalds1da177e2005-04-16 15:20:36 -07001933 /*
1934 * A failed mmap() very likely causes application failure,
1935 * so fall back to the bottom-up function here. This scenario
1936 * can happen with large stack limits and large mmap()
1937 * allocations.
1938 */
Michel Lespinassedb4fbfb2012-12-11 16:01:49 -08001939 if (addr & ~PAGE_MASK) {
1940 VM_BUG_ON(addr != -ENOMEM);
1941 info.flags = 0;
1942 info.low_limit = TASK_UNMAPPED_BASE;
1943 info.high_limit = TASK_SIZE;
1944 addr = vm_unmapped_area(&info);
1945 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001946
1947 return addr;
1948}
1949#endif
1950
Wolfgang Wander1363c3c2005-06-21 17:14:49 -07001951void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001952{
1953 /*
1954 * Is this a new hole at the highest possible address?
1955 */
Wolfgang Wander1363c3c2005-06-21 17:14:49 -07001956 if (addr > mm->free_area_cache)
1957 mm->free_area_cache = addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958
1959 /* dont allow allocations above current base */
Wolfgang Wander1363c3c2005-06-21 17:14:49 -07001960 if (mm->free_area_cache > mm->mmap_base)
1961 mm->free_area_cache = mm->mmap_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001962}
1963
1964unsigned long
1965get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
1966 unsigned long pgoff, unsigned long flags)
1967{
Benjamin Herrenschmidt06abdfb2007-05-06 14:50:13 -07001968 unsigned long (*get_area)(struct file *, unsigned long,
1969 unsigned long, unsigned long, unsigned long);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001970
Al Viro9206de92009-12-03 15:23:11 -05001971 unsigned long error = arch_mmap_check(addr, len, flags);
1972 if (error)
1973 return error;
1974
1975 /* Careful about overflows.. */
1976 if (len > TASK_SIZE)
1977 return -ENOMEM;
1978
Benjamin Herrenschmidt06abdfb2007-05-06 14:50:13 -07001979 get_area = current->mm->get_unmapped_area;
1980 if (file && file->f_op && file->f_op->get_unmapped_area)
1981 get_area = file->f_op->get_unmapped_area;
1982 addr = get_area(file, addr, len, pgoff, flags);
1983 if (IS_ERR_VALUE(addr))
1984 return addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001985
Linus Torvalds07ab67c2005-05-19 22:43:37 -07001986 if (addr > TASK_SIZE - len)
1987 return -ENOMEM;
1988 if (addr & ~PAGE_MASK)
1989 return -EINVAL;
Benjamin Herrenschmidt06abdfb2007-05-06 14:50:13 -07001990
Al Viro9ac4ed42012-05-30 17:13:15 -04001991 addr = arch_rebalance_pgtables(addr, len);
1992 error = security_mmap_addr(addr);
1993 return error ? error : addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994}
1995
1996EXPORT_SYMBOL(get_unmapped_area);
1997
1998/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
ZhenwenXu48aae422009-01-06 14:40:21 -08001999struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002000{
2001 struct vm_area_struct *vma = NULL;
2002
Rajman Mekaco841e31e2012-05-29 15:06:21 -07002003 /* Check the cache first. */
2004 /* (Cache hit rate is typically around 35%.) */
Jan Stancekb6a9b7f2013-04-04 11:35:10 -07002005 vma = ACCESS_ONCE(mm->mmap_cache);
Rajman Mekaco841e31e2012-05-29 15:06:21 -07002006 if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {
2007 struct rb_node *rb_node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002008
Rajman Mekaco841e31e2012-05-29 15:06:21 -07002009 rb_node = mm->mm_rb.rb_node;
2010 vma = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002011
Rajman Mekaco841e31e2012-05-29 15:06:21 -07002012 while (rb_node) {
2013 struct vm_area_struct *vma_tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002014
Rajman Mekaco841e31e2012-05-29 15:06:21 -07002015 vma_tmp = rb_entry(rb_node,
2016 struct vm_area_struct, vm_rb);
2017
2018 if (vma_tmp->vm_end > addr) {
2019 vma = vma_tmp;
2020 if (vma_tmp->vm_start <= addr)
2021 break;
2022 rb_node = rb_node->rb_left;
2023 } else
2024 rb_node = rb_node->rb_right;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002025 }
Rajman Mekaco841e31e2012-05-29 15:06:21 -07002026 if (vma)
2027 mm->mmap_cache = vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002028 }
2029 return vma;
2030}
2031
2032EXPORT_SYMBOL(find_vma);
2033
KOSAKI Motohiro6bd48372012-01-10 15:08:07 -08002034/*
2035 * Same as find_vma, but also return a pointer to the previous VMA in *pprev.
KOSAKI Motohiro6bd48372012-01-10 15:08:07 -08002036 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002037struct vm_area_struct *
2038find_vma_prev(struct mm_struct *mm, unsigned long addr,
2039 struct vm_area_struct **pprev)
2040{
KOSAKI Motohiro6bd48372012-01-10 15:08:07 -08002041 struct vm_area_struct *vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002042
KOSAKI Motohiro6bd48372012-01-10 15:08:07 -08002043 vma = find_vma(mm, addr);
Mikulas Patocka83cd9042012-03-04 19:52:03 -05002044 if (vma) {
2045 *pprev = vma->vm_prev;
2046 } else {
2047 struct rb_node *rb_node = mm->mm_rb.rb_node;
2048 *pprev = NULL;
2049 while (rb_node) {
2050 *pprev = rb_entry(rb_node, struct vm_area_struct, vm_rb);
2051 rb_node = rb_node->rb_right;
2052 }
2053 }
KOSAKI Motohiro6bd48372012-01-10 15:08:07 -08002054 return vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002055}
2056
2057/*
2058 * Verify that the stack growth is acceptable and
2059 * update accounting. This is shared with both the
2060 * grow-up and grow-down cases.
2061 */
ZhenwenXu48aae422009-01-06 14:40:21 -08002062static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002063{
2064 struct mm_struct *mm = vma->vm_mm;
2065 struct rlimit *rlim = current->signal->rlim;
Linus Torvalds7d702b42015-01-11 11:33:57 -08002066 unsigned long new_start, actual_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067
2068 /* address space limit tests */
akpm@osdl.org119f6572005-05-01 08:58:35 -07002069 if (!may_expand_vm(mm, grow))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002070 return -ENOMEM;
2071
2072 /* Stack limit test */
Linus Torvalds7d702b42015-01-11 11:33:57 -08002073 actual_size = size;
2074 if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
2075 actual_size -= PAGE_SIZE;
2076 if (actual_size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002077 return -ENOMEM;
2078
2079 /* mlock limit tests */
2080 if (vma->vm_flags & VM_LOCKED) {
2081 unsigned long locked;
2082 unsigned long limit;
2083 locked = mm->locked_vm + grow;
Jiri Slaby59e99e52010-03-05 13:41:44 -08002084 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
2085 limit >>= PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002086 if (locked > limit && !capable(CAP_IPC_LOCK))
2087 return -ENOMEM;
2088 }
2089
Adam Litke0d59a012007-01-30 14:35:39 -08002090 /* Check to ensure the stack will not grow into a hugetlb-only region */
2091 new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start :
2092 vma->vm_end - size;
2093 if (is_hugepage_only_range(vma->vm_mm, new_start, size))
2094 return -EFAULT;
2095
Linus Torvalds1da177e2005-04-16 15:20:36 -07002096 /*
2097 * Overcommit.. This must be the final test, as it will
2098 * update security statistics.
2099 */
Hugh Dickins05fa1992009-04-16 21:58:12 +01002100 if (security_vm_enough_memory_mm(mm, grow))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101 return -ENOMEM;
2102
2103 /* Ok, everything looks good - let it rip */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104 if (vma->vm_flags & VM_LOCKED)
2105 mm->locked_vm += grow;
Hugh Dickinsab50b8e2005-10-29 18:15:56 -07002106 vm_stat_account(mm, vma->vm_flags, vma->vm_file, grow);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107 return 0;
2108}
2109
Hugh Dickins46dea3d2005-10-29 18:16:20 -07002110#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002111/*
Hugh Dickins46dea3d2005-10-29 18:16:20 -07002112 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
2113 * vma is the last one with address > vma->vm_end. Have to extend vma.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002114 */
Hugh Dickins46dea3d2005-10-29 18:16:20 -07002115int expand_upwards(struct vm_area_struct *vma, unsigned long address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002116{
2117 int error;
2118
2119 if (!(vma->vm_flags & VM_GROWSUP))
2120 return -EFAULT;
2121
2122 /*
2123 * We must make sure the anon_vma is allocated
2124 * so that the anon_vma locking is not a noop.
2125 */
2126 if (unlikely(anon_vma_prepare(vma)))
2127 return -ENOMEM;
Rik van Rielbb4a3402010-08-09 17:18:37 -07002128 vma_lock_anon_vma(vma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002129
2130 /*
2131 * vma->vm_start/vm_end cannot change under us because the caller
2132 * is required to hold the mmap_sem in read mode. We need the
2133 * anon_vma lock to serialize against concurrent expand_stacks.
Helge Deller06b32f32006-12-19 19:28:33 +01002134 * Also guard against wrapping around to address 0.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135 */
Helge Deller06b32f32006-12-19 19:28:33 +01002136 if (address < PAGE_ALIGN(address+4))
2137 address = PAGE_ALIGN(address+4);
2138 else {
Rik van Rielbb4a3402010-08-09 17:18:37 -07002139 vma_unlock_anon_vma(vma);
Helge Deller06b32f32006-12-19 19:28:33 +01002140 return -ENOMEM;
2141 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002142 error = 0;
2143
2144 /* Somebody else might have raced and expanded it already */
2145 if (address > vma->vm_end) {
2146 unsigned long size, grow;
2147
2148 size = address - vma->vm_start;
2149 grow = (address - vma->vm_end) >> PAGE_SHIFT;
2150
Hugh Dickins42c36f62011-05-09 17:44:42 -07002151 error = -ENOMEM;
2152 if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) {
2153 error = acct_stack_growth(vma, size, grow);
2154 if (!error) {
Michel Lespinasse41289972012-12-12 13:52:25 -08002155 /*
2156 * vma_gap_update() doesn't support concurrent
2157 * updates, but we only hold a shared mmap_sem
2158 * lock here, so we need to protect against
2159 * concurrent vma expansions.
2160 * vma_lock_anon_vma() doesn't help here, as
2161 * we don't guarantee that all growable vmas
2162 * in a mm share the same root anon vma.
2163 * So, we reuse mm->page_table_lock to guard
2164 * against concurrent vma expansions.
2165 */
2166 spin_lock(&vma->vm_mm->page_table_lock);
Michel Lespinassebf181b92012-10-08 16:31:39 -07002167 anon_vma_interval_tree_pre_update_vma(vma);
Hugh Dickins42c36f62011-05-09 17:44:42 -07002168 vma->vm_end = address;
Michel Lespinassebf181b92012-10-08 16:31:39 -07002169 anon_vma_interval_tree_post_update_vma(vma);
Michel Lespinassed3737182012-12-11 16:01:38 -08002170 if (vma->vm_next)
2171 vma_gap_update(vma->vm_next);
2172 else
2173 vma->vm_mm->highest_vm_end = address;
Michel Lespinasse41289972012-12-12 13:52:25 -08002174 spin_unlock(&vma->vm_mm->page_table_lock);
2175
Hugh Dickins42c36f62011-05-09 17:44:42 -07002176 perf_event_mmap(vma);
2177 }
Eric B Munson3af9e852010-05-18 15:30:49 +01002178 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179 }
Rik van Rielbb4a3402010-08-09 17:18:37 -07002180 vma_unlock_anon_vma(vma);
Andrea Arcangelib15d00b2011-01-13 15:46:59 -08002181 khugepaged_enter_vma_merge(vma);
Michel Lespinasseed8ea812012-10-08 16:31:45 -07002182 validate_mm(vma->vm_mm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002183 return error;
2184}
Hugh Dickins46dea3d2005-10-29 18:16:20 -07002185#endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
2186
Linus Torvalds1da177e2005-04-16 15:20:36 -07002187/*
2188 * vma is the first one with address < vma->vm_start. Have to extend vma.
2189 */
Michal Hockod05f3162011-05-24 17:11:44 -07002190int expand_downwards(struct vm_area_struct *vma,
Ollie Wildb6a2fea2007-07-19 01:48:16 -07002191 unsigned long address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192{
2193 int error;
2194
2195 /*
2196 * We must make sure the anon_vma is allocated
2197 * so that the anon_vma locking is not a noop.
2198 */
2199 if (unlikely(anon_vma_prepare(vma)))
2200 return -ENOMEM;
Eric Paris88694772007-11-26 18:47:26 -05002201
2202 address &= PAGE_MASK;
Al Viroe5467852012-05-30 13:30:51 -04002203 error = security_mmap_addr(address);
Eric Paris88694772007-11-26 18:47:26 -05002204 if (error)
2205 return error;
2206
Rik van Rielbb4a3402010-08-09 17:18:37 -07002207 vma_lock_anon_vma(vma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002208
2209 /*
2210 * vma->vm_start/vm_end cannot change under us because the caller
2211 * is required to hold the mmap_sem in read mode. We need the
2212 * anon_vma lock to serialize against concurrent expand_stacks.
2213 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002214
2215 /* Somebody else might have raced and expanded it already */
2216 if (address < vma->vm_start) {
2217 unsigned long size, grow;
2218
2219 size = vma->vm_end - address;
2220 grow = (vma->vm_start - address) >> PAGE_SHIFT;
2221
Linus Torvaldsa626ca62011-04-13 08:07:28 -07002222 error = -ENOMEM;
2223 if (grow <= vma->vm_pgoff) {
2224 error = acct_stack_growth(vma, size, grow);
2225 if (!error) {
Michel Lespinasse41289972012-12-12 13:52:25 -08002226 /*
2227 * vma_gap_update() doesn't support concurrent
2228 * updates, but we only hold a shared mmap_sem
2229 * lock here, so we need to protect against
2230 * concurrent vma expansions.
2231 * vma_lock_anon_vma() doesn't help here, as
2232 * we don't guarantee that all growable vmas
2233 * in a mm share the same root anon vma.
2234 * So, we reuse mm->page_table_lock to guard
2235 * against concurrent vma expansions.
2236 */
2237 spin_lock(&vma->vm_mm->page_table_lock);
Michel Lespinassebf181b92012-10-08 16:31:39 -07002238 anon_vma_interval_tree_pre_update_vma(vma);
Linus Torvaldsa626ca62011-04-13 08:07:28 -07002239 vma->vm_start = address;
2240 vma->vm_pgoff -= grow;
Michel Lespinassebf181b92012-10-08 16:31:39 -07002241 anon_vma_interval_tree_post_update_vma(vma);
Michel Lespinassed3737182012-12-11 16:01:38 -08002242 vma_gap_update(vma);
Michel Lespinasse41289972012-12-12 13:52:25 -08002243 spin_unlock(&vma->vm_mm->page_table_lock);
2244
Linus Torvaldsa626ca62011-04-13 08:07:28 -07002245 perf_event_mmap(vma);
2246 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247 }
2248 }
Rik van Rielbb4a3402010-08-09 17:18:37 -07002249 vma_unlock_anon_vma(vma);
Andrea Arcangelib15d00b2011-01-13 15:46:59 -08002250 khugepaged_enter_vma_merge(vma);
Michel Lespinasseed8ea812012-10-08 16:31:45 -07002251 validate_mm(vma->vm_mm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252 return error;
2253}
2254
Linus Torvalds09884962013-02-27 08:36:04 -08002255/*
2256 * Note how expand_stack() refuses to expand the stack all the way to
2257 * abut the next virtual mapping, *unless* that mapping itself is also
2258 * a stack mapping. We want to leave room for a guard page, after all
2259 * (the guard page itself is not added here, that is done by the
2260 * actual page faulting logic)
2261 *
2262 * This matches the behavior of the guard page logic (see mm/memory.c:
2263 * check_stack_guard_page()), which only allows the guard page to be
2264 * removed under these circumstances.
2265 */
Ollie Wildb6a2fea2007-07-19 01:48:16 -07002266#ifdef CONFIG_STACK_GROWSUP
2267int expand_stack(struct vm_area_struct *vma, unsigned long address)
2268{
Linus Torvalds09884962013-02-27 08:36:04 -08002269 struct vm_area_struct *next;
2270
2271 address &= PAGE_MASK;
2272 next = vma->vm_next;
2273 if (next && next->vm_start == address + PAGE_SIZE) {
2274 if (!(next->vm_flags & VM_GROWSUP))
2275 return -ENOMEM;
2276 }
Ollie Wildb6a2fea2007-07-19 01:48:16 -07002277 return expand_upwards(vma, address);
2278}
2279
2280struct vm_area_struct *
2281find_extend_vma(struct mm_struct *mm, unsigned long addr)
2282{
2283 struct vm_area_struct *vma, *prev;
2284
2285 addr &= PAGE_MASK;
2286 vma = find_vma_prev(mm, addr, &prev);
2287 if (vma && (vma->vm_start <= addr))
2288 return vma;
Denys Vlasenko1c127182008-11-12 01:24:41 +01002289 if (!prev || expand_stack(prev, addr))
Ollie Wildb6a2fea2007-07-19 01:48:16 -07002290 return NULL;
Michel Lespinassecea10a12013-02-22 16:32:44 -08002291 if (prev->vm_flags & VM_LOCKED)
2292 __mlock_vma_pages_range(prev, addr, prev->vm_end, NULL);
Ollie Wildb6a2fea2007-07-19 01:48:16 -07002293 return prev;
2294}
2295#else
2296int expand_stack(struct vm_area_struct *vma, unsigned long address)
2297{
Linus Torvalds09884962013-02-27 08:36:04 -08002298 struct vm_area_struct *prev;
2299
2300 address &= PAGE_MASK;
2301 prev = vma->vm_prev;
2302 if (prev && prev->vm_end == address) {
2303 if (!(prev->vm_flags & VM_GROWSDOWN))
2304 return -ENOMEM;
2305 }
Ollie Wildb6a2fea2007-07-19 01:48:16 -07002306 return expand_downwards(vma, address);
2307}
2308
Linus Torvalds1da177e2005-04-16 15:20:36 -07002309struct vm_area_struct *
2310find_extend_vma(struct mm_struct * mm, unsigned long addr)
2311{
2312 struct vm_area_struct * vma;
2313 unsigned long start;
2314
2315 addr &= PAGE_MASK;
2316 vma = find_vma(mm,addr);
2317 if (!vma)
2318 return NULL;
2319 if (vma->vm_start <= addr)
2320 return vma;
2321 if (!(vma->vm_flags & VM_GROWSDOWN))
2322 return NULL;
2323 start = vma->vm_start;
2324 if (expand_stack(vma, addr))
2325 return NULL;
Michel Lespinassecea10a12013-02-22 16:32:44 -08002326 if (vma->vm_flags & VM_LOCKED)
2327 __mlock_vma_pages_range(vma, addr, start, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328 return vma;
2329}
2330#endif
2331
Linus Torvalds1da177e2005-04-16 15:20:36 -07002332/*
Hugh Dickins2c0b3812005-10-29 18:15:56 -07002333 * Ok - we have the memory areas we should free on the vma list,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002334 * so release them, and do the vma updates.
Hugh Dickins2c0b3812005-10-29 18:15:56 -07002335 *
2336 * Called with the mm semaphore held.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002337 */
Hugh Dickins2c0b3812005-10-29 18:15:56 -07002338static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002339{
Linus Torvalds4f74d2c2012-05-06 13:54:06 -07002340 unsigned long nr_accounted = 0;
2341
Hugh Dickins365e9c872005-10-29 18:16:18 -07002342 /* Update high watermark before we lower total_vm */
2343 update_hiwater_vm(mm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002344 do {
Hugh Dickins2c0b3812005-10-29 18:15:56 -07002345 long nrpages = vma_pages(vma);
2346
Linus Torvalds4f74d2c2012-05-06 13:54:06 -07002347 if (vma->vm_flags & VM_ACCOUNT)
2348 nr_accounted += nrpages;
Hugh Dickins2c0b3812005-10-29 18:15:56 -07002349 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
Hugh Dickinsa8fb5612005-10-29 18:15:57 -07002350 vma = remove_vma(vma);
Hugh Dickins146425a2005-04-19 13:29:18 -07002351 } while (vma);
Linus Torvalds4f74d2c2012-05-06 13:54:06 -07002352 vm_unacct_memory(nr_accounted);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002353 validate_mm(mm);
2354}
2355
2356/*
2357 * Get rid of page table information in the indicated region.
2358 *
Paolo 'Blaisorblade' Giarrussof10df682005-09-21 09:55:37 -07002359 * Called with the mm semaphore held.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002360 */
2361static void unmap_region(struct mm_struct *mm,
Hugh Dickinse0da3822005-04-19 13:29:15 -07002362 struct vm_area_struct *vma, struct vm_area_struct *prev,
2363 unsigned long start, unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002364{
Hugh Dickinse0da3822005-04-19 13:29:15 -07002365 struct vm_area_struct *next = prev? prev->vm_next: mm->mmap;
Peter Zijlstrad16dfc52011-05-24 17:11:45 -07002366 struct mmu_gather tlb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002367
2368 lru_add_drain();
Linus Torvalds8e220cf2013-08-15 11:42:25 -07002369 tlb_gather_mmu(&tlb, mm, start, end);
Hugh Dickins365e9c872005-10-29 18:16:18 -07002370 update_hiwater_rss(mm);
Linus Torvalds4f74d2c2012-05-06 13:54:06 -07002371 unmap_vmas(&tlb, vma, start, end);
Peter Zijlstrad16dfc52011-05-24 17:11:45 -07002372 free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
Hugh Dickins6ee86302013-04-29 15:07:44 -07002373 next ? next->vm_start : USER_PGTABLES_CEILING);
Peter Zijlstrad16dfc52011-05-24 17:11:45 -07002374 tlb_finish_mmu(&tlb, start, end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002375}
2376
2377/*
2378 * Create a list of vma's touched by the unmap, removing them from the mm's
2379 * vma list as we go..
2380 */
2381static void
2382detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
2383 struct vm_area_struct *prev, unsigned long end)
2384{
2385 struct vm_area_struct **insertion_point;
2386 struct vm_area_struct *tail_vma = NULL;
Wolfgang Wander1363c3c2005-06-21 17:14:49 -07002387 unsigned long addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002388
2389 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
Linus Torvalds297c5ee2010-08-20 16:24:55 -07002390 vma->vm_prev = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002391 do {
Michel Lespinassed3737182012-12-11 16:01:38 -08002392 vma_rb_erase(vma, &mm->mm_rb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002393 mm->map_count--;
2394 tail_vma = vma;
2395 vma = vma->vm_next;
2396 } while (vma && vma->vm_start < end);
2397 *insertion_point = vma;
Michel Lespinassed3737182012-12-11 16:01:38 -08002398 if (vma) {
Linus Torvalds297c5ee2010-08-20 16:24:55 -07002399 vma->vm_prev = prev;
Michel Lespinassed3737182012-12-11 16:01:38 -08002400 vma_gap_update(vma);
2401 } else
2402 mm->highest_vm_end = prev ? prev->vm_end : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002403 tail_vma->vm_next = NULL;
Wolfgang Wander1363c3c2005-06-21 17:14:49 -07002404 if (mm->unmap_area == arch_unmap_area)
2405 addr = prev ? prev->vm_end : mm->mmap_base;
2406 else
2407 addr = vma ? vma->vm_start : mm->mmap_base;
2408 mm->unmap_area(mm, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002409 mm->mmap_cache = NULL; /* Kill the cache. */
2410}
2411
2412/*
KOSAKI Motohiro659ace52009-12-14 17:57:56 -08002413 * __split_vma() bypasses sysctl_max_map_count checking. We use this on the
2414 * munmap path where it doesn't make sense to fail.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002415 */
KOSAKI Motohiro659ace52009-12-14 17:57:56 -08002416static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002417 unsigned long addr, int new_below)
2418{
2419 struct mempolicy *pol;
2420 struct vm_area_struct *new;
Rik van Riel5beb4932010-03-05 13:42:07 -08002421 int err = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002422
Andi Kleena5516432008-07-23 21:27:41 -07002423 if (is_vm_hugetlb_page(vma) && (addr &
2424 ~(huge_page_mask(hstate_vma(vma)))))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002425 return -EINVAL;
2426
Christoph Lametere94b1762006-12-06 20:33:17 -08002427 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002428 if (!new)
Rik van Riel5beb4932010-03-05 13:42:07 -08002429 goto out_err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002430
2431 /* most fields are the same, copy all, and then fixup */
2432 *new = *vma;
2433
Rik van Riel5beb4932010-03-05 13:42:07 -08002434 INIT_LIST_HEAD(&new->anon_vma_chain);
2435
Linus Torvalds1da177e2005-04-16 15:20:36 -07002436 if (new_below)
2437 new->vm_end = addr;
2438 else {
2439 new->vm_start = addr;
2440 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
2441 }
2442
Lee Schermerhorn846a16b2008-04-28 02:13:09 -07002443 pol = mpol_dup(vma_policy(vma));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002444 if (IS_ERR(pol)) {
Rik van Riel5beb4932010-03-05 13:42:07 -08002445 err = PTR_ERR(pol);
2446 goto out_free_vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002447 }
2448 vma_set_policy(new, pol);
2449
Rik van Riel5beb4932010-03-05 13:42:07 -08002450 if (anon_vma_clone(new, vma))
2451 goto out_free_mpol;
2452
Konstantin Khlebnikove9714ac2012-10-08 16:28:54 -07002453 if (new->vm_file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002454 get_file(new->vm_file);
2455
2456 if (new->vm_ops && new->vm_ops->open)
2457 new->vm_ops->open(new);
2458
2459 if (new_below)
Rik van Riel5beb4932010-03-05 13:42:07 -08002460 err = vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff +
Linus Torvalds1da177e2005-04-16 15:20:36 -07002461 ((addr - new->vm_start) >> PAGE_SHIFT), new);
2462 else
Rik van Riel5beb4932010-03-05 13:42:07 -08002463 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002464
Rik van Riel5beb4932010-03-05 13:42:07 -08002465 /* Success. */
2466 if (!err)
2467 return 0;
2468
2469 /* Clean everything up if vma_adjust failed. */
Rik van Riel58927532010-04-26 12:33:03 -04002470 if (new->vm_ops && new->vm_ops->close)
2471 new->vm_ops->close(new);
Konstantin Khlebnikove9714ac2012-10-08 16:28:54 -07002472 if (new->vm_file)
Rik van Riel5beb4932010-03-05 13:42:07 -08002473 fput(new->vm_file);
Andrea Arcangeli2aeadc32010-09-22 13:05:12 -07002474 unlink_anon_vmas(new);
Rik van Riel5beb4932010-03-05 13:42:07 -08002475 out_free_mpol:
2476 mpol_put(pol);
2477 out_free_vma:
2478 kmem_cache_free(vm_area_cachep, new);
2479 out_err:
2480 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002481}
2482
KOSAKI Motohiro659ace52009-12-14 17:57:56 -08002483/*
2484 * Split a vma into two pieces at address 'addr', a new vma is allocated
2485 * either for the first part or the tail.
2486 */
2487int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
2488 unsigned long addr, int new_below)
2489{
2490 if (mm->map_count >= sysctl_max_map_count)
2491 return -ENOMEM;
2492
2493 return __split_vma(mm, vma, addr, new_below);
2494}
2495
Linus Torvalds1da177e2005-04-16 15:20:36 -07002496/* Munmap is split into 2 main parts -- this part which finds
2497 * what needs doing, and the areas themselves, which do the
2498 * work. This now handles partial unmappings.
2499 * Jeremy Fitzhardinge <jeremy@goop.org>
2500 */
2501int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
2502{
2503 unsigned long end;
Hugh Dickins146425a2005-04-19 13:29:18 -07002504 struct vm_area_struct *vma, *prev, *last;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002505
2506 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
2507 return -EINVAL;
2508
2509 if ((len = PAGE_ALIGN(len)) == 0)
2510 return -EINVAL;
2511
2512 /* Find the first overlapping VMA */
Linus Torvalds9be34c92011-06-16 00:35:09 -07002513 vma = find_vma(mm, start);
Hugh Dickins146425a2005-04-19 13:29:18 -07002514 if (!vma)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002515 return 0;
Linus Torvalds9be34c92011-06-16 00:35:09 -07002516 prev = vma->vm_prev;
Hugh Dickins146425a2005-04-19 13:29:18 -07002517 /* we have start < vma->vm_end */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002518
2519 /* if it doesn't overlap, we have nothing.. */
2520 end = start + len;
Hugh Dickins146425a2005-04-19 13:29:18 -07002521 if (vma->vm_start >= end)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002522 return 0;
2523
2524 /*
2525 * If we need to split any vma, do it now to save pain later.
2526 *
2527 * Note: mremap's move_vma VM_ACCOUNT handling assumes a partially
2528 * unmapped vm_area_struct will remain in use: so lower split_vma
2529 * places tmp vma above, and higher split_vma places tmp vma below.
2530 */
Hugh Dickins146425a2005-04-19 13:29:18 -07002531 if (start > vma->vm_start) {
KOSAKI Motohiro659ace52009-12-14 17:57:56 -08002532 int error;
2533
2534 /*
2535 * Make sure that map_count on return from munmap() will
2536 * not exceed its limit; but let map_count go just above
2537 * its limit temporarily, to help free resources as expected.
2538 */
2539 if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count)
2540 return -ENOMEM;
2541
2542 error = __split_vma(mm, vma, start, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002543 if (error)
2544 return error;
Hugh Dickins146425a2005-04-19 13:29:18 -07002545 prev = vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002546 }
2547
2548 /* Does it split the last one? */
2549 last = find_vma(mm, end);
2550 if (last && end > last->vm_start) {
KOSAKI Motohiro659ace52009-12-14 17:57:56 -08002551 int error = __split_vma(mm, last, end, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002552 if (error)
2553 return error;
2554 }
Hugh Dickins146425a2005-04-19 13:29:18 -07002555 vma = prev? prev->vm_next: mm->mmap;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002556
2557 /*
Rik van Rielba470de2008-10-18 20:26:50 -07002558 * unlock any mlock()ed ranges before detaching vmas
2559 */
2560 if (mm->locked_vm) {
2561 struct vm_area_struct *tmp = vma;
2562 while (tmp && tmp->vm_start < end) {
2563 if (tmp->vm_flags & VM_LOCKED) {
2564 mm->locked_vm -= vma_pages(tmp);
2565 munlock_vma_pages_all(tmp);
2566 }
2567 tmp = tmp->vm_next;
2568 }
2569 }
2570
2571 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002572 * Remove the vma's, and unmap the actual pages
2573 */
Hugh Dickins146425a2005-04-19 13:29:18 -07002574 detach_vmas_to_be_unmapped(mm, vma, prev, end);
2575 unmap_region(mm, vma, prev, start, end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002576
2577 /* Fix up all other VM information */
Hugh Dickins2c0b3812005-10-29 18:15:56 -07002578 remove_vma_list(mm, vma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002579
2580 return 0;
2581}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002582
Al Virobfce2812012-04-20 21:57:04 -04002583int vm_munmap(unsigned long start, size_t len)
Linus Torvaldsa46ef992012-04-20 16:20:01 -07002584{
2585 int ret;
Al Virobfce2812012-04-20 21:57:04 -04002586 struct mm_struct *mm = current->mm;
Linus Torvaldsa46ef992012-04-20 16:20:01 -07002587
2588 down_write(&mm->mmap_sem);
2589 ret = do_munmap(mm, start, len);
2590 up_write(&mm->mmap_sem);
2591 return ret;
2592}
2593EXPORT_SYMBOL(vm_munmap);
2594
Heiko Carstens6a6160a2009-01-14 14:14:15 +01002595SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002596{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002597 profile_munmap(addr);
Al Virobfce2812012-04-20 21:57:04 -04002598 return vm_munmap(addr, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002599}
2600
2601static inline void verify_mm_writelocked(struct mm_struct *mm)
2602{
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002603#ifdef CONFIG_DEBUG_VM
Linus Torvalds1da177e2005-04-16 15:20:36 -07002604 if (unlikely(down_read_trylock(&mm->mmap_sem))) {
2605 WARN_ON(1);
2606 up_read(&mm->mmap_sem);
2607 }
2608#endif
2609}
2610
2611/*
2612 * this is really a simplified "do_mmap". it only handles
2613 * anonymous maps. eventually we may be able to do some
2614 * brk-specific accounting here.
2615 */
Linus Torvaldse4eb1ff2012-04-20 15:35:40 -07002616static unsigned long do_brk(unsigned long addr, unsigned long len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002617{
2618 struct mm_struct * mm = current->mm;
2619 struct vm_area_struct * vma, * prev;
2620 unsigned long flags;
2621 struct rb_node ** rb_link, * rb_parent;
2622 pgoff_t pgoff = addr >> PAGE_SHIFT;
Kirill Korotaev3a459752006-09-07 14:17:04 +04002623 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002624
2625 len = PAGE_ALIGN(len);
2626 if (!len)
2627 return addr;
2628
Kirill Korotaev3a459752006-09-07 14:17:04 +04002629 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
2630
Al Viro2c6a1012009-12-03 19:40:46 -05002631 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
2632 if (error & ~PAGE_MASK)
Kirill Korotaev3a459752006-09-07 14:17:04 +04002633 return error;
2634
Linus Torvalds1da177e2005-04-16 15:20:36 -07002635 /*
2636 * mlock MCL_FUTURE?
2637 */
2638 if (mm->def_flags & VM_LOCKED) {
2639 unsigned long locked, lock_limit;
Chris Wright93ea1d02005-05-01 08:58:38 -07002640 locked = len >> PAGE_SHIFT;
2641 locked += mm->locked_vm;
Jiri Slaby59e99e52010-03-05 13:41:44 -08002642 lock_limit = rlimit(RLIMIT_MEMLOCK);
Chris Wright93ea1d02005-05-01 08:58:38 -07002643 lock_limit >>= PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002644 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
2645 return -EAGAIN;
2646 }
2647
2648 /*
2649 * mm->mmap_sem is required to protect against another thread
2650 * changing the mappings in case we sleep.
2651 */
2652 verify_mm_writelocked(mm);
2653
2654 /*
2655 * Clear old maps. this also does some error checking for us
2656 */
2657 munmap_back:
Hugh Dickins6597d782012-10-08 16:29:07 -07002658 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002659 if (do_munmap(mm, addr, len))
2660 return -ENOMEM;
2661 goto munmap_back;
2662 }
2663
2664 /* Check against address space limits *after* clearing old maps... */
akpm@osdl.org119f6572005-05-01 08:58:35 -07002665 if (!may_expand_vm(mm, len >> PAGE_SHIFT))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002666 return -ENOMEM;
2667
2668 if (mm->map_count > sysctl_max_map_count)
2669 return -ENOMEM;
2670
Al Viro191c5422012-02-13 03:58:52 +00002671 if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002672 return -ENOMEM;
2673
Linus Torvalds1da177e2005-04-16 15:20:36 -07002674 /* Can we just expand an old private anonymous mapping? */
Rik van Rielba470de2008-10-18 20:26:50 -07002675 vma = vma_merge(mm, prev, addr, addr + len, flags,
Colin Cross6ebfe582013-06-26 17:26:01 -07002676 NULL, NULL, pgoff, NULL, NULL);
Rik van Rielba470de2008-10-18 20:26:50 -07002677 if (vma)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002678 goto out;
2679
2680 /*
2681 * create a vma struct for an anonymous mapping
2682 */
Pekka Enbergc5e3b832006-03-25 03:06:43 -08002683 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002684 if (!vma) {
2685 vm_unacct_memory(len >> PAGE_SHIFT);
2686 return -ENOMEM;
2687 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002688
Rik van Riel5beb4932010-03-05 13:42:07 -08002689 INIT_LIST_HEAD(&vma->anon_vma_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002690 vma->vm_mm = mm;
2691 vma->vm_start = addr;
2692 vma->vm_end = addr + len;
2693 vma->vm_pgoff = pgoff;
2694 vma->vm_flags = flags;
Coly Li3ed75eb2007-10-18 23:39:15 -07002695 vma->vm_page_prot = vm_get_page_prot(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002696 vma_link(mm, vma, prev, rb_link, rb_parent);
2697out:
Eric B Munson3af9e852010-05-18 15:30:49 +01002698 perf_event_mmap(vma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002699 mm->total_vm += len >> PAGE_SHIFT;
Michel Lespinasse128557f2013-02-22 16:32:40 -08002700 if (flags & VM_LOCKED)
2701 mm->locked_vm += (len >> PAGE_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002702 return addr;
2703}
2704
Linus Torvaldse4eb1ff2012-04-20 15:35:40 -07002705unsigned long vm_brk(unsigned long addr, unsigned long len)
2706{
2707 struct mm_struct *mm = current->mm;
2708 unsigned long ret;
Michel Lespinasse128557f2013-02-22 16:32:40 -08002709 bool populate;
Linus Torvaldse4eb1ff2012-04-20 15:35:40 -07002710
2711 down_write(&mm->mmap_sem);
2712 ret = do_brk(addr, len);
Michel Lespinasse128557f2013-02-22 16:32:40 -08002713 populate = ((mm->def_flags & VM_LOCKED) != 0);
Linus Torvaldse4eb1ff2012-04-20 15:35:40 -07002714 up_write(&mm->mmap_sem);
Michel Lespinasse128557f2013-02-22 16:32:40 -08002715 if (populate)
2716 mm_populate(addr, len);
Linus Torvaldse4eb1ff2012-04-20 15:35:40 -07002717 return ret;
2718}
2719EXPORT_SYMBOL(vm_brk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002720
2721/* Release all mmaps. */
2722void exit_mmap(struct mm_struct *mm)
2723{
Peter Zijlstrad16dfc52011-05-24 17:11:45 -07002724 struct mmu_gather tlb;
Rik van Rielba470de2008-10-18 20:26:50 -07002725 struct vm_area_struct *vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002726 unsigned long nr_accounted = 0;
2727
Jeremy Fitzhardinged6dd61c2007-05-02 19:27:14 +02002728 /* mm's last user has gone, and its about to be pulled down */
Andrea Arcangelicddb8a52008-07-28 15:46:29 -07002729 mmu_notifier_release(mm);
Jeremy Fitzhardinged6dd61c2007-05-02 19:27:14 +02002730
Rik van Rielba470de2008-10-18 20:26:50 -07002731 if (mm->locked_vm) {
2732 vma = mm->mmap;
2733 while (vma) {
2734 if (vma->vm_flags & VM_LOCKED)
2735 munlock_vma_pages_all(vma);
2736 vma = vma->vm_next;
2737 }
2738 }
Jeremy Fitzhardinge9480c532009-02-11 13:04:41 -08002739
2740 arch_exit_mmap(mm);
2741
Rik van Rielba470de2008-10-18 20:26:50 -07002742 vma = mm->mmap;
Jeremy Fitzhardinge9480c532009-02-11 13:04:41 -08002743 if (!vma) /* Can happen if dup_mmap() received an OOM */
2744 return;
2745
Linus Torvalds1da177e2005-04-16 15:20:36 -07002746 lru_add_drain();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002747 flush_cache_mm(mm);
Linus Torvalds8e220cf2013-08-15 11:42:25 -07002748 tlb_gather_mmu(&tlb, mm, 0, -1);
Oleg Nesterov901608d2009-01-06 14:40:29 -08002749 /* update_hiwater_rss(mm) here? but nobody should be looking */
Hugh Dickinse0da3822005-04-19 13:29:15 -07002750 /* Use -1 here to ensure all VMAs in the mm are unmapped */
Linus Torvalds4f74d2c2012-05-06 13:54:06 -07002751 unmap_vmas(&tlb, vma, 0, -1);
Hugh Dickins9ba69292009-09-21 17:02:20 -07002752
Hugh Dickins6ee86302013-04-29 15:07:44 -07002753 free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING);
Al Viro853f5e22012-03-05 14:03:47 -05002754 tlb_finish_mmu(&tlb, 0, -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002755
Linus Torvalds1da177e2005-04-16 15:20:36 -07002756 /*
Hugh Dickins8f4f8c12005-10-29 18:16:29 -07002757 * Walk the list again, actually closing and freeing it,
2758 * with preemption enabled, without holding any MM locks.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002759 */
Linus Torvalds4f74d2c2012-05-06 13:54:06 -07002760 while (vma) {
2761 if (vma->vm_flags & VM_ACCOUNT)
2762 nr_accounted += vma_pages(vma);
Hugh Dickinsa8fb5612005-10-29 18:15:57 -07002763 vma = remove_vma(vma);
Linus Torvalds4f74d2c2012-05-06 13:54:06 -07002764 }
2765 vm_unacct_memory(nr_accounted);
Hugh Dickinse0da3822005-04-19 13:29:15 -07002766
Hugh Dickinsf9aed622012-08-21 16:15:45 -07002767 WARN_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002768}
2769
2770/* Insert vm structure into process list sorted by address
2771 * and into the inode's i_mmap tree. If vm_file is non-NULL
Peter Zijlstra3d48ae42011-05-24 17:12:06 -07002772 * then i_mmap_mutex is taken here.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002773 */
Hugh Dickins6597d782012-10-08 16:29:07 -07002774int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002775{
Hugh Dickins6597d782012-10-08 16:29:07 -07002776 struct vm_area_struct *prev;
2777 struct rb_node **rb_link, *rb_parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002778
2779 /*
2780 * The vm_pgoff of a purely anonymous vma should be irrelevant
2781 * until its first write fault, when page's anon_vma and index
2782 * are set. But now set the vm_pgoff it will almost certainly
2783 * end up with (unless mremap moves it elsewhere before that
2784 * first wfault), so /proc/pid/maps tells a consistent story.
2785 *
2786 * By setting it to reflect the virtual start address of the
2787 * vma, merges and splits can happen in a seamless way, just
2788 * using the existing file pgoff checks and manipulations.
2789 * Similarly in do_mmap_pgoff and in do_brk.
2790 */
2791 if (!vma->vm_file) {
2792 BUG_ON(vma->anon_vma);
2793 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
2794 }
Hugh Dickins6597d782012-10-08 16:29:07 -07002795 if (find_vma_links(mm, vma->vm_start, vma->vm_end,
2796 &prev, &rb_link, &rb_parent))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002797 return -ENOMEM;
Hugh Dickins2fd4ef82005-09-14 06:13:02 +01002798 if ((vma->vm_flags & VM_ACCOUNT) &&
Alan Cox34b4e4a2007-08-22 14:01:28 -07002799 security_vm_enough_memory_mm(mm, vma_pages(vma)))
Hugh Dickins2fd4ef82005-09-14 06:13:02 +01002800 return -ENOMEM;
Srikar Dronamraju2b144492012-02-09 14:56:42 +05302801
Linus Torvalds1da177e2005-04-16 15:20:36 -07002802 vma_link(mm, vma, prev, rb_link, rb_parent);
2803 return 0;
2804}
2805
2806/*
2807 * Copy the vma structure to a new location in the same mm,
2808 * prior to moving page table entries, to effect an mremap move.
2809 */
2810struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
Michel Lespinasse38a76012012-10-08 16:31:50 -07002811 unsigned long addr, unsigned long len, pgoff_t pgoff,
2812 bool *need_rmap_locks)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002813{
2814 struct vm_area_struct *vma = *vmap;
2815 unsigned long vma_start = vma->vm_start;
2816 struct mm_struct *mm = vma->vm_mm;
2817 struct vm_area_struct *new_vma, *prev;
2818 struct rb_node **rb_link, *rb_parent;
2819 struct mempolicy *pol;
Andrea Arcangeli948f0172012-01-10 15:08:05 -08002820 bool faulted_in_anon_vma = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002821
2822 /*
2823 * If anonymous vma has not yet been faulted, update new pgoff
2824 * to match new location, to increase its chance of merging.
2825 */
Andrea Arcangeli948f0172012-01-10 15:08:05 -08002826 if (unlikely(!vma->vm_file && !vma->anon_vma)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002827 pgoff = addr >> PAGE_SHIFT;
Andrea Arcangeli948f0172012-01-10 15:08:05 -08002828 faulted_in_anon_vma = false;
2829 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002830
Hugh Dickins6597d782012-10-08 16:29:07 -07002831 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent))
2832 return NULL; /* should never get here */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002833 new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
Colin Cross6ebfe582013-06-26 17:26:01 -07002834 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
2835 vma_get_anon_name(vma));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002836 if (new_vma) {
2837 /*
2838 * Source vma may have been merged into new_vma
2839 */
Andrea Arcangeli948f0172012-01-10 15:08:05 -08002840 if (unlikely(vma_start >= new_vma->vm_start &&
2841 vma_start < new_vma->vm_end)) {
2842 /*
2843 * The only way we can get a vma_merge with
2844 * self during an mremap is if the vma hasn't
2845 * been faulted in yet and we were allowed to
2846 * reset the dst vma->vm_pgoff to the
2847 * destination address of the mremap to allow
2848 * the merge to happen. mremap must change the
2849 * vm_pgoff linearity between src and dst vmas
2850 * (in turn preventing a vma_merge) to be
2851 * safe. It is only safe to keep the vm_pgoff
2852 * linear if there are no pages mapped yet.
2853 */
2854 VM_BUG_ON(faulted_in_anon_vma);
Michel Lespinasse38a76012012-10-08 16:31:50 -07002855 *vmap = vma = new_vma;
Michel Lespinasse108d6642012-10-08 16:31:36 -07002856 }
Michel Lespinasse38a76012012-10-08 16:31:50 -07002857 *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002858 } else {
Christoph Lametere94b1762006-12-06 20:33:17 -08002859 new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002860 if (new_vma) {
2861 *new_vma = *vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002862 new_vma->vm_start = addr;
2863 new_vma->vm_end = addr + len;
2864 new_vma->vm_pgoff = pgoff;
Michel Lespinasse523d4e22012-10-08 16:31:48 -07002865 pol = mpol_dup(vma_policy(vma));
2866 if (IS_ERR(pol))
2867 goto out_free_vma;
2868 vma_set_policy(new_vma, pol);
2869 INIT_LIST_HEAD(&new_vma->anon_vma_chain);
2870 if (anon_vma_clone(new_vma, vma))
2871 goto out_free_mempol;
Konstantin Khlebnikove9714ac2012-10-08 16:28:54 -07002872 if (new_vma->vm_file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002873 get_file(new_vma->vm_file);
2874 if (new_vma->vm_ops && new_vma->vm_ops->open)
2875 new_vma->vm_ops->open(new_vma);
2876 vma_link(mm, new_vma, prev, rb_link, rb_parent);
Michel Lespinasse38a76012012-10-08 16:31:50 -07002877 *need_rmap_locks = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002878 }
2879 }
2880 return new_vma;
Rik van Riel5beb4932010-03-05 13:42:07 -08002881
2882 out_free_mempol:
2883 mpol_put(pol);
2884 out_free_vma:
2885 kmem_cache_free(vm_area_cachep, new_vma);
2886 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002887}
akpm@osdl.org119f6572005-05-01 08:58:35 -07002888
2889/*
2890 * Return true if the calling process may expand its vm space by the passed
2891 * number of pages
2892 */
2893int may_expand_vm(struct mm_struct *mm, unsigned long npages)
2894{
2895 unsigned long cur = mm->total_vm; /* pages */
2896 unsigned long lim;
2897
Jiri Slaby59e99e52010-03-05 13:41:44 -08002898 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
akpm@osdl.org119f6572005-05-01 08:58:35 -07002899
2900 if (cur + npages > lim)
2901 return 0;
2902 return 1;
2903}
Roland McGrathfa5dc222007-02-08 14:20:41 -08002904
2905
Nick Pigginb1d0e4f2008-02-09 01:15:19 +01002906static int special_mapping_fault(struct vm_area_struct *vma,
2907 struct vm_fault *vmf)
Roland McGrathfa5dc222007-02-08 14:20:41 -08002908{
Nick Pigginb1d0e4f2008-02-09 01:15:19 +01002909 pgoff_t pgoff;
Roland McGrathfa5dc222007-02-08 14:20:41 -08002910 struct page **pages;
2911
Nick Pigginb1d0e4f2008-02-09 01:15:19 +01002912 /*
2913 * special mappings have no vm_file, and in that case, the mm
2914 * uses vm_pgoff internally. So we have to subtract it from here.
2915 * We are allowed to do this because we are the mm; do not copy
2916 * this code into drivers!
2917 */
2918 pgoff = vmf->pgoff - vma->vm_pgoff;
Roland McGrathfa5dc222007-02-08 14:20:41 -08002919
Nick Pigginb1d0e4f2008-02-09 01:15:19 +01002920 for (pages = vma->vm_private_data; pgoff && *pages; ++pages)
2921 pgoff--;
Roland McGrathfa5dc222007-02-08 14:20:41 -08002922
2923 if (*pages) {
2924 struct page *page = *pages;
2925 get_page(page);
Nick Pigginb1d0e4f2008-02-09 01:15:19 +01002926 vmf->page = page;
2927 return 0;
Roland McGrathfa5dc222007-02-08 14:20:41 -08002928 }
2929
Nick Pigginb1d0e4f2008-02-09 01:15:19 +01002930 return VM_FAULT_SIGBUS;
Roland McGrathfa5dc222007-02-08 14:20:41 -08002931}
2932
2933/*
2934 * Having a close hook prevents vma merging regardless of flags.
2935 */
2936static void special_mapping_close(struct vm_area_struct *vma)
2937{
2938}
2939
Alexey Dobriyanf0f37e22009-09-27 22:29:37 +04002940static const struct vm_operations_struct special_mapping_vmops = {
Roland McGrathfa5dc222007-02-08 14:20:41 -08002941 .close = special_mapping_close,
Nick Pigginb1d0e4f2008-02-09 01:15:19 +01002942 .fault = special_mapping_fault,
Roland McGrathfa5dc222007-02-08 14:20:41 -08002943};
2944
2945/*
2946 * Called with mm->mmap_sem held for writing.
2947 * Insert a new vma covering the given region, with the given flags.
2948 * Its pages are supplied by the given array of struct page *.
2949 * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated.
2950 * The region past the last page supplied will always produce SIGBUS.
2951 * The array pointer and the pages it points to are assumed to stay alive
2952 * for as long as this mapping might exist.
2953 */
2954int install_special_mapping(struct mm_struct *mm,
2955 unsigned long addr, unsigned long len,
2956 unsigned long vm_flags, struct page **pages)
2957{
Tavis Ormandy462e635e2010-12-09 15:29:42 +01002958 int ret;
Roland McGrathfa5dc222007-02-08 14:20:41 -08002959 struct vm_area_struct *vma;
2960
2961 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
2962 if (unlikely(vma == NULL))
2963 return -ENOMEM;
2964
Rik van Riel5beb4932010-03-05 13:42:07 -08002965 INIT_LIST_HEAD(&vma->anon_vma_chain);
Roland McGrathfa5dc222007-02-08 14:20:41 -08002966 vma->vm_mm = mm;
2967 vma->vm_start = addr;
2968 vma->vm_end = addr + len;
2969
Nick Piggin2f987352008-02-02 03:08:53 +01002970 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
Coly Li3ed75eb2007-10-18 23:39:15 -07002971 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
Roland McGrathfa5dc222007-02-08 14:20:41 -08002972
2973 vma->vm_ops = &special_mapping_vmops;
2974 vma->vm_private_data = pages;
2975
Tavis Ormandy462e635e2010-12-09 15:29:42 +01002976 ret = insert_vm_struct(mm, vma);
2977 if (ret)
2978 goto out;
Roland McGrathfa5dc222007-02-08 14:20:41 -08002979
2980 mm->total_vm += len >> PAGE_SHIFT;
2981
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002982 perf_event_mmap(vma);
Peter Zijlstra089dd792009-06-05 14:04:55 +02002983
Roland McGrathfa5dc222007-02-08 14:20:41 -08002984 return 0;
Tavis Ormandy462e635e2010-12-09 15:29:42 +01002985
2986out:
2987 kmem_cache_free(vm_area_cachep, vma);
2988 return ret;
Roland McGrathfa5dc222007-02-08 14:20:41 -08002989}
Andrea Arcangeli7906d002008-07-28 15:46:26 -07002990
2991static DEFINE_MUTEX(mm_all_locks_mutex);
2992
Peter Zijlstra454ed842008-08-11 09:30:25 +02002993static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
Andrea Arcangeli7906d002008-07-28 15:46:26 -07002994{
Michel Lespinassebf181b92012-10-08 16:31:39 -07002995 if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_node)) {
Andrea Arcangeli7906d002008-07-28 15:46:26 -07002996 /*
2997 * The LSB of head.next can't change from under us
2998 * because we hold the mm_all_locks_mutex.
2999 */
Jiri Kosina572043c2013-01-11 14:31:59 -08003000 down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_sem);
Andrea Arcangeli7906d002008-07-28 15:46:26 -07003001 /*
3002 * We can safely modify head.next after taking the
Ingo Molnar5a505082012-12-02 19:56:46 +00003003 * anon_vma->root->rwsem. If some other vma in this mm shares
Andrea Arcangeli7906d002008-07-28 15:46:26 -07003004 * the same anon_vma we won't take it again.
3005 *
3006 * No need of atomic instructions here, head.next
3007 * can't change from under us thanks to the
Ingo Molnar5a505082012-12-02 19:56:46 +00003008 * anon_vma->root->rwsem.
Andrea Arcangeli7906d002008-07-28 15:46:26 -07003009 */
3010 if (__test_and_set_bit(0, (unsigned long *)
Michel Lespinassebf181b92012-10-08 16:31:39 -07003011 &anon_vma->root->rb_root.rb_node))
Andrea Arcangeli7906d002008-07-28 15:46:26 -07003012 BUG();
3013 }
3014}
3015
Peter Zijlstra454ed842008-08-11 09:30:25 +02003016static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
Andrea Arcangeli7906d002008-07-28 15:46:26 -07003017{
3018 if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
3019 /*
3020 * AS_MM_ALL_LOCKS can't change from under us because
3021 * we hold the mm_all_locks_mutex.
3022 *
3023 * Operations on ->flags have to be atomic because
3024 * even if AS_MM_ALL_LOCKS is stable thanks to the
3025 * mm_all_locks_mutex, there may be other cpus
3026 * changing other bitflags in parallel to us.
3027 */
3028 if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
3029 BUG();
Peter Zijlstra3d48ae42011-05-24 17:12:06 -07003030 mutex_lock_nest_lock(&mapping->i_mmap_mutex, &mm->mmap_sem);
Andrea Arcangeli7906d002008-07-28 15:46:26 -07003031 }
3032}
3033
3034/*
3035 * This operation locks against the VM for all pte/vma/mm related
3036 * operations that could ever happen on a certain mm. This includes
3037 * vmtruncate, try_to_unmap, and all page faults.
3038 *
3039 * The caller must take the mmap_sem in write mode before calling
3040 * mm_take_all_locks(). The caller isn't allowed to release the
3041 * mmap_sem until mm_drop_all_locks() returns.
3042 *
3043 * mmap_sem in write mode is required in order to block all operations
3044 * that could modify pagetables and free pages without need of
3045 * altering the vma layout (for example populate_range() with
3046 * nonlinear vmas). It's also needed in write mode to avoid new
3047 * anon_vmas to be associated with existing vmas.
3048 *
3049 * A single task can't take more than one mm_take_all_locks() in a row
3050 * or it would deadlock.
3051 *
Michel Lespinassebf181b92012-10-08 16:31:39 -07003052 * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in
Andrea Arcangeli7906d002008-07-28 15:46:26 -07003053 * mapping->flags avoid to take the same lock twice, if more than one
3054 * vma in this mm is backed by the same anon_vma or address_space.
3055 *
3056 * We can take all the locks in random order because the VM code
Yuanhan Liu631b0cf2013-02-04 14:28:48 -08003057 * taking i_mmap_mutex or anon_vma->rwsem outside the mmap_sem never
Andrea Arcangeli7906d002008-07-28 15:46:26 -07003058 * takes more than one of them in a row. Secondly we're protected
3059 * against a concurrent mm_take_all_locks() by the mm_all_locks_mutex.
3060 *
3061 * mm_take_all_locks() and mm_drop_all_locks are expensive operations
3062 * that may have to take thousand of locks.
3063 *
3064 * mm_take_all_locks() can fail if it's interrupted by signals.
3065 */
3066int mm_take_all_locks(struct mm_struct *mm)
3067{
3068 struct vm_area_struct *vma;
Rik van Riel5beb4932010-03-05 13:42:07 -08003069 struct anon_vma_chain *avc;
Andrea Arcangeli7906d002008-07-28 15:46:26 -07003070
3071 BUG_ON(down_read_trylock(&mm->mmap_sem));
3072
3073 mutex_lock(&mm_all_locks_mutex);
3074
3075 for (vma = mm->mmap; vma; vma = vma->vm_next) {
3076 if (signal_pending(current))
3077 goto out_unlock;
Andrea Arcangeli7906d002008-07-28 15:46:26 -07003078 if (vma->vm_file && vma->vm_file->f_mapping)
Peter Zijlstra454ed842008-08-11 09:30:25 +02003079 vm_lock_mapping(mm, vma->vm_file->f_mapping);
Andrea Arcangeli7906d002008-07-28 15:46:26 -07003080 }
Peter Zijlstra7cd5a022008-08-11 09:30:25 +02003081
3082 for (vma = mm->mmap; vma; vma = vma->vm_next) {
3083 if (signal_pending(current))
3084 goto out_unlock;
3085 if (vma->anon_vma)
Rik van Riel5beb4932010-03-05 13:42:07 -08003086 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
3087 vm_lock_anon_vma(mm, avc->anon_vma);
Peter Zijlstra7cd5a022008-08-11 09:30:25 +02003088 }
3089
Kautuk Consul584cff52011-10-31 17:08:59 -07003090 return 0;
Andrea Arcangeli7906d002008-07-28 15:46:26 -07003091
3092out_unlock:
Kautuk Consul584cff52011-10-31 17:08:59 -07003093 mm_drop_all_locks(mm);
3094 return -EINTR;
Andrea Arcangeli7906d002008-07-28 15:46:26 -07003095}
3096
3097static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
3098{
Michel Lespinassebf181b92012-10-08 16:31:39 -07003099 if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_node)) {
Andrea Arcangeli7906d002008-07-28 15:46:26 -07003100 /*
3101 * The LSB of head.next can't change to 0 from under
3102 * us because we hold the mm_all_locks_mutex.
3103 *
3104 * We must however clear the bitflag before unlocking
Michel Lespinassebf181b92012-10-08 16:31:39 -07003105 * the vma so the users using the anon_vma->rb_root will
Andrea Arcangeli7906d002008-07-28 15:46:26 -07003106 * never see our bitflag.
3107 *
3108 * No need of atomic instructions here, head.next
3109 * can't change from under us until we release the
Ingo Molnar5a505082012-12-02 19:56:46 +00003110 * anon_vma->root->rwsem.
Andrea Arcangeli7906d002008-07-28 15:46:26 -07003111 */
3112 if (!__test_and_clear_bit(0, (unsigned long *)
Michel Lespinassebf181b92012-10-08 16:31:39 -07003113 &anon_vma->root->rb_root.rb_node))
Andrea Arcangeli7906d002008-07-28 15:46:26 -07003114 BUG();
Konstantin Khlebnikov08b52702013-02-22 16:34:40 -08003115 anon_vma_unlock_write(anon_vma);
Andrea Arcangeli7906d002008-07-28 15:46:26 -07003116 }
3117}
3118
3119static void vm_unlock_mapping(struct address_space *mapping)
3120{
3121 if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
3122 /*
3123 * AS_MM_ALL_LOCKS can't change to 0 from under us
3124 * because we hold the mm_all_locks_mutex.
3125 */
Peter Zijlstra3d48ae42011-05-24 17:12:06 -07003126 mutex_unlock(&mapping->i_mmap_mutex);
Andrea Arcangeli7906d002008-07-28 15:46:26 -07003127 if (!test_and_clear_bit(AS_MM_ALL_LOCKS,
3128 &mapping->flags))
3129 BUG();
3130 }
3131}
3132
3133/*
3134 * The mmap_sem cannot be released by the caller until
3135 * mm_drop_all_locks() returns.
3136 */
3137void mm_drop_all_locks(struct mm_struct *mm)
3138{
3139 struct vm_area_struct *vma;
Rik van Riel5beb4932010-03-05 13:42:07 -08003140 struct anon_vma_chain *avc;
Andrea Arcangeli7906d002008-07-28 15:46:26 -07003141
3142 BUG_ON(down_read_trylock(&mm->mmap_sem));
3143 BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
3144
3145 for (vma = mm->mmap; vma; vma = vma->vm_next) {
3146 if (vma->anon_vma)
Rik van Riel5beb4932010-03-05 13:42:07 -08003147 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
3148 vm_unlock_anon_vma(avc->anon_vma);
Andrea Arcangeli7906d002008-07-28 15:46:26 -07003149 if (vma->vm_file && vma->vm_file->f_mapping)
3150 vm_unlock_mapping(vma->vm_file->f_mapping);
3151 }
3152
3153 mutex_unlock(&mm_all_locks_mutex);
3154}
David Howells8feae132009-01-08 12:04:47 +00003155
3156/*
3157 * initialise the VMA slab
3158 */
3159void __init mmap_init(void)
3160{
KOSAKI Motohiro00a62ce2009-04-30 15:08:51 -07003161 int ret;
3162
3163 ret = percpu_counter_init(&vm_committed_as, 0);
3164 VM_BUG_ON(ret);
David Howells8feae132009-01-08 12:04:47 +00003165}
Andrew Shewmakerc9b1d092013-04-29 15:08:10 -07003166
3167/*
3168 * Initialise sysctl_user_reserve_kbytes.
3169 *
3170 * This is intended to prevent a user from starting a single memory hogging
3171 * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER
3172 * mode.
3173 *
3174 * The default value is min(3% of free memory, 128MB)
3175 * 128MB is enough to recover with sshd/login, bash, and top/kill.
3176 */
Andrew Shewmaker16408792013-04-29 15:08:12 -07003177static int init_user_reserve(void)
Andrew Shewmakerc9b1d092013-04-29 15:08:10 -07003178{
3179 unsigned long free_kbytes;
3180
3181 free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
3182
3183 sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17);
3184 return 0;
3185}
3186module_init(init_user_reserve)
Andrew Shewmaker4eeab4f2013-04-29 15:08:11 -07003187
3188/*
3189 * Initialise sysctl_admin_reserve_kbytes.
3190 *
3191 * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin
3192 * to log in and kill a memory hogging process.
3193 *
3194 * Systems with more than 256MB will reserve 8MB, enough to recover
3195 * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will
3196 * only reserve 3% of free pages by default.
3197 */
Andrew Shewmaker16408792013-04-29 15:08:12 -07003198static int init_admin_reserve(void)
Andrew Shewmaker4eeab4f2013-04-29 15:08:11 -07003199{
3200 unsigned long free_kbytes;
3201
3202 free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
3203
3204 sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13);
3205 return 0;
3206}
3207module_init(init_admin_reserve)
Andrew Shewmaker16408792013-04-29 15:08:12 -07003208
3209/*
3210 * Reinititalise user and admin reserves if memory is added or removed.
3211 *
3212 * The default user reserve max is 128MB, and the default max for the
3213 * admin reserve is 8MB. These are usually, but not always, enough to
3214 * enable recovery from a memory hogging process using login/sshd, a shell,
3215 * and tools like top. It may make sense to increase or even disable the
3216 * reserve depending on the existence of swap or variations in the recovery
3217 * tools. So, the admin may have changed them.
3218 *
3219 * If memory is added and the reserves have been eliminated or increased above
3220 * the default max, then we'll trust the admin.
3221 *
3222 * If memory is removed and there isn't enough free memory, then we
3223 * need to reset the reserves.
3224 *
3225 * Otherwise keep the reserve set by the admin.
3226 */
3227static int reserve_mem_notifier(struct notifier_block *nb,
3228 unsigned long action, void *data)
3229{
3230 unsigned long tmp, free_kbytes;
3231
3232 switch (action) {
3233 case MEM_ONLINE:
3234 /* Default max is 128MB. Leave alone if modified by operator. */
3235 tmp = sysctl_user_reserve_kbytes;
3236 if (0 < tmp && tmp < (1UL << 17))
3237 init_user_reserve();
3238
3239 /* Default max is 8MB. Leave alone if modified by operator. */
3240 tmp = sysctl_admin_reserve_kbytes;
3241 if (0 < tmp && tmp < (1UL << 13))
3242 init_admin_reserve();
3243
3244 break;
3245 case MEM_OFFLINE:
3246 free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
3247
3248 if (sysctl_user_reserve_kbytes > free_kbytes) {
3249 init_user_reserve();
3250 pr_info("vm.user_reserve_kbytes reset to %lu\n",
3251 sysctl_user_reserve_kbytes);
3252 }
3253
3254 if (sysctl_admin_reserve_kbytes > free_kbytes) {
3255 init_admin_reserve();
3256 pr_info("vm.admin_reserve_kbytes reset to %lu\n",
3257 sysctl_admin_reserve_kbytes);
3258 }
3259 break;
3260 default:
3261 break;
3262 }
3263 return NOTIFY_OK;
3264}
3265
3266static struct notifier_block reserve_mem_nb = {
3267 .notifier_call = reserve_mem_notifier,
3268};
3269
3270static int __meminit init_reserve_notifier(void)
3271{
3272 if (register_hotmemory_notifier(&reserve_mem_nb))
3273 printk("Failed registering memory add/remove notifier for admin reserve");
3274
3275 return 0;
3276}
3277module_init(init_reserve_notifier)