blob: 64d83fa8d93aa9325d5fe72e78e67776d0b7352c [file] [log] [blame]
Ingo Molnar6e84f312017-02-08 18:51:29 +01001#ifndef _LINUX_SCHED_MM_H
2#define _LINUX_SCHED_MM_H
3
4#include <linux/sched.h>
Ingo Molnar589ee622017-02-04 00:16:44 +01005#include <linux/mm_types.h>
Ingo Molnarfd771232017-02-02 20:56:33 +01006#include <linux/gfp.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +01007
Ingo Molnar68e21be2017-02-01 19:08:20 +01008/*
9 * Routines for handling mm_structs
10 */
11extern struct mm_struct * mm_alloc(void);
12
13/**
14 * mmgrab() - Pin a &struct mm_struct.
15 * @mm: The &struct mm_struct to pin.
16 *
17 * Make sure that @mm will not get freed even after the owning task
18 * exits. This doesn't guarantee that the associated address space
19 * will still exist later on and mmget_not_zero() has to be used before
20 * accessing it.
21 *
22 * This is a preferred way to to pin @mm for a longer/unbounded amount
23 * of time.
24 *
25 * Use mmdrop() to release the reference acquired by mmgrab().
26 *
27 * See also <Documentation/vm/active_mm.txt> for an in-depth explanation
28 * of &mm_struct.mm_count vs &mm_struct.mm_users.
29 */
30static inline void mmgrab(struct mm_struct *mm)
31{
32 atomic_inc(&mm->mm_count);
33}
34
35/* mmdrop drops the mm and the page tables */
36extern void __mmdrop(struct mm_struct *);
37static inline void mmdrop(struct mm_struct *mm)
38{
39 if (unlikely(atomic_dec_and_test(&mm->mm_count)))
40 __mmdrop(mm);
41}
42
43static inline void mmdrop_async_fn(struct work_struct *work)
44{
45 struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work);
46 __mmdrop(mm);
47}
48
49static inline void mmdrop_async(struct mm_struct *mm)
50{
51 if (unlikely(atomic_dec_and_test(&mm->mm_count))) {
52 INIT_WORK(&mm->async_put_work, mmdrop_async_fn);
53 schedule_work(&mm->async_put_work);
54 }
55}
56
57/**
58 * mmget() - Pin the address space associated with a &struct mm_struct.
59 * @mm: The address space to pin.
60 *
61 * Make sure that the address space of the given &struct mm_struct doesn't
62 * go away. This does not protect against parts of the address space being
63 * modified or freed, however.
64 *
65 * Never use this function to pin this address space for an
66 * unbounded/indefinite amount of time.
67 *
68 * Use mmput() to release the reference acquired by mmget().
69 *
70 * See also <Documentation/vm/active_mm.txt> for an in-depth explanation
71 * of &mm_struct.mm_count vs &mm_struct.mm_users.
72 */
73static inline void mmget(struct mm_struct *mm)
74{
75 atomic_inc(&mm->mm_users);
76}
77
78static inline bool mmget_not_zero(struct mm_struct *mm)
79{
80 return atomic_inc_not_zero(&mm->mm_users);
81}
82
83/* mmput gets rid of the mappings and all user-space */
84extern void mmput(struct mm_struct *);
85#ifdef CONFIG_MMU
86/* same as above but performs the slow path from the async context. Can
87 * be called from the atomic context as well
88 */
89extern void mmput_async(struct mm_struct *);
90#endif
91
92/* Grab a reference to a task's mm, if it is not already going away */
93extern struct mm_struct *get_task_mm(struct task_struct *task);
94/*
95 * Grab a reference to a task's mm, if it is not already going away
96 * and ptrace_may_access with the mode parameter passed to it
97 * succeeds.
98 */
99extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
100/* Remove the current tasks stale references to the old mm_struct */
101extern void mm_release(struct task_struct *, struct mm_struct *);
102
Ingo Molnar4240c8b2017-02-02 12:18:24 +0100103#ifdef CONFIG_MEMCG
104extern void mm_update_next_owner(struct mm_struct *mm);
105#else
106static inline void mm_update_next_owner(struct mm_struct *mm)
107{
108}
109#endif /* CONFIG_MEMCG */
110
111#ifdef CONFIG_MMU
112extern void arch_pick_mmap_layout(struct mm_struct *mm);
113extern unsigned long
114arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
115 unsigned long, unsigned long);
116extern unsigned long
117arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
118 unsigned long len, unsigned long pgoff,
119 unsigned long flags);
120#else
121static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
122#endif
123
Ingo Molnard026ce72017-02-02 12:32:21 +0100124static inline bool in_vfork(struct task_struct *tsk)
125{
126 bool ret;
127
128 /*
129 * need RCU to access ->real_parent if CLONE_VM was used along with
130 * CLONE_PARENT.
131 *
132 * We check real_parent->mm == tsk->mm because CLONE_VFORK does not
133 * imply CLONE_VM
134 *
135 * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus
136 * ->real_parent is not necessarily the task doing vfork(), so in
137 * theory we can't rely on task_lock() if we want to dereference it.
138 *
139 * And in this case we can't trust the real_parent->mm == tsk->mm
140 * check, it can be false negative. But we do not care, if init or
141 * another oom-unkillable task does this it should blame itself.
142 */
143 rcu_read_lock();
144 ret = tsk->vfork_done && tsk->real_parent->mm == tsk->mm;
145 rcu_read_unlock();
146
147 return ret;
148}
149
Ingo Molnar6e84f312017-02-08 18:51:29 +0100150#endif /* _LINUX_SCHED_MM_H */