blob: be1ae55f5ab9791ea63bf67f785c82b413256f4a [file] [log] [blame]
Ingo Molnar6e84f312017-02-08 18:51:29 +01001#ifndef _LINUX_SCHED_MM_H
2#define _LINUX_SCHED_MM_H
3
4#include <linux/sched.h>
Ingo Molnar589ee622017-02-04 00:16:44 +01005#include <linux/mm_types.h>
Ingo Molnarfd771232017-02-02 20:56:33 +01006#include <linux/gfp.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +01007
Ingo Molnar68e21be2017-02-01 19:08:20 +01008/*
9 * Routines for handling mm_structs
10 */
11extern struct mm_struct * mm_alloc(void);
12
13/**
14 * mmgrab() - Pin a &struct mm_struct.
15 * @mm: The &struct mm_struct to pin.
16 *
17 * Make sure that @mm will not get freed even after the owning task
18 * exits. This doesn't guarantee that the associated address space
19 * will still exist later on and mmget_not_zero() has to be used before
20 * accessing it.
21 *
22 * This is a preferred way to to pin @mm for a longer/unbounded amount
23 * of time.
24 *
25 * Use mmdrop() to release the reference acquired by mmgrab().
26 *
27 * See also <Documentation/vm/active_mm.txt> for an in-depth explanation
28 * of &mm_struct.mm_count vs &mm_struct.mm_users.
29 */
30static inline void mmgrab(struct mm_struct *mm)
31{
32 atomic_inc(&mm->mm_count);
33}
34
35/* mmdrop drops the mm and the page tables */
36extern void __mmdrop(struct mm_struct *);
37static inline void mmdrop(struct mm_struct *mm)
38{
39 if (unlikely(atomic_dec_and_test(&mm->mm_count)))
40 __mmdrop(mm);
41}
42
43static inline void mmdrop_async_fn(struct work_struct *work)
44{
45 struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work);
46 __mmdrop(mm);
47}
48
49static inline void mmdrop_async(struct mm_struct *mm)
50{
51 if (unlikely(atomic_dec_and_test(&mm->mm_count))) {
52 INIT_WORK(&mm->async_put_work, mmdrop_async_fn);
53 schedule_work(&mm->async_put_work);
54 }
55}
56
57/**
58 * mmget() - Pin the address space associated with a &struct mm_struct.
59 * @mm: The address space to pin.
60 *
61 * Make sure that the address space of the given &struct mm_struct doesn't
62 * go away. This does not protect against parts of the address space being
63 * modified or freed, however.
64 *
65 * Never use this function to pin this address space for an
66 * unbounded/indefinite amount of time.
67 *
68 * Use mmput() to release the reference acquired by mmget().
69 *
70 * See also <Documentation/vm/active_mm.txt> for an in-depth explanation
71 * of &mm_struct.mm_count vs &mm_struct.mm_users.
72 */
73static inline void mmget(struct mm_struct *mm)
74{
75 atomic_inc(&mm->mm_users);
76}
77
78static inline bool mmget_not_zero(struct mm_struct *mm)
79{
80 return atomic_inc_not_zero(&mm->mm_users);
81}
82
83/* mmput gets rid of the mappings and all user-space */
84extern void mmput(struct mm_struct *);
85#ifdef CONFIG_MMU
86/* same as above but performs the slow path from the async context. Can
87 * be called from the atomic context as well
88 */
89extern void mmput_async(struct mm_struct *);
90#endif
91
92/* Grab a reference to a task's mm, if it is not already going away */
93extern struct mm_struct *get_task_mm(struct task_struct *task);
94/*
95 * Grab a reference to a task's mm, if it is not already going away
96 * and ptrace_may_access with the mode parameter passed to it
97 * succeeds.
98 */
99extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
100/* Remove the current tasks stale references to the old mm_struct */
101extern void mm_release(struct task_struct *, struct mm_struct *);
102
Ingo Molnar6e84f312017-02-08 18:51:29 +0100103#endif /* _LINUX_SCHED_MM_H */