aboutsummaryrefslogtreecommitdiff
path: root/mm/mlock.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-05-04 21:30:28 -0700
committerGreg Kroah-Hartman <gregkh@suse.de>2011-05-09 15:06:51 -0700
commit1e79fafb77908cd89d3ece2b297ff21ac55e0bef (patch)
tree129dec7d181cfa584dba3ed443fa63b59f3c2c76 /mm/mlock.c
parenta4dbc2902ff426f9ded7542eeb3f347442f7fc1f (diff)
VM: skip the stack guard page lookup in get_user_pages only for mlock
commit a1fde08c74e90accd62d4cfdbf580d2ede938fe7 upstream. The logic in __get_user_pages() used to skip the stack guard page lookup whenever the caller wasn't interested in seeing what the actual page was. But Michel Lespinasse points out that there are cases where we don't care about the physical page itself (so 'pages' may be NULL), but do want to make sure a page is mapped into the virtual address space. So using the existence of the "pages" array as an indication of whether to look up the guard page or not isn't actually so great, and we really should just use the FOLL_MLOCK bit. But because that bit was only set for the VM_LOCKED case (and not all vma's necessarily have it, even for mlock()), we couldn't do that originally. Fix that by moving the VM_LOCKED check deeper into the call-chain, which actually simplifies many things. Now mlock() gets simpler, and we can also check for FOLL_MLOCK in __get_user_pages() and the code ends up much more straightforward. Reported-and-reviewed-by: Michel Lespinasse <walken@google.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'mm/mlock.c')
-rw-r--r--mm/mlock.c5
1 files changed, 1 insertions, 4 deletions
diff --git a/mm/mlock.c b/mm/mlock.c
index da23be42c90..c8e77909c04 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -162,7 +162,7 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
VM_BUG_ON(end > vma->vm_end);
VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
- gup_flags = FOLL_TOUCH;
+ gup_flags = FOLL_TOUCH | FOLL_MLOCK;
/*
* We want to touch writable mappings with a write fault in order
* to break COW, except for shared mappings because these don't COW
@@ -178,9 +178,6 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))
gup_flags |= FOLL_FORCE;
- if (vma->vm_flags & VM_LOCKED)
- gup_flags |= FOLL_MLOCK;
-
return __get_user_pages(current, mm, addr, nr_pages, gup_flags,
NULL, NULL, nonblocking);
}