blob: be12c534fd592e84fd81ca0b3b6c5182ecb2f6fe [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_MMU_CONTEXT_H
2#define _ASM_X86_MMU_CONTEXT_H
Jeremy Fitzhardingec3c2fee2008-06-25 00:19:07 -04003
4#include <asm/desc.h>
Arun Sharma600634972011-07-26 16:09:06 -07005#include <linux/atomic.h>
Jeremy Fitzhardingec3c2fee2008-06-25 00:19:07 -04006#include <asm/pgalloc.h>
7#include <asm/tlbflush.h>
8#include <asm/paravirt.h>
9#ifndef CONFIG_PARAVIRT
10#include <asm-generic/mm_hooks.h>
11
12static inline void paravirt_activate_mm(struct mm_struct *prev,
13 struct mm_struct *next)
14{
15}
16#endif /* !CONFIG_PARAVIRT */
17
18/*
19 * Used for LDT copy/destruction.
20 */
21int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
22void destroy_context(struct mm_struct *mm);
23
Brian Gerst6826c8f2009-01-21 17:26:06 +090024
25static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
26{
27#ifdef CONFIG_SMP
Alex Shic6ae41e2012-05-11 15:35:27 +080028 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
29 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
Thomas Gleixner96a388d2007-10-11 11:20:03 +020030#endif
Brian Gerst6826c8f2009-01-21 17:26:06 +090031}
32
33static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
34 struct task_struct *tsk)
35{
36 unsigned cpu = smp_processor_id();
37
38 if (likely(prev != next)) {
Brian Gerst6826c8f2009-01-21 17:26:06 +090039#ifdef CONFIG_SMP
Alex Shic6ae41e2012-05-11 15:35:27 +080040 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
41 this_cpu_write(cpu_tlbstate.active_mm, next);
Brian Gerst6826c8f2009-01-21 17:26:06 +090042#endif
Rusty Russell78f1c4d2009-09-24 09:34:51 -060043 cpumask_set_cpu(cpu, mm_cpumask(next));
Brian Gerst6826c8f2009-01-21 17:26:06 +090044
45 /* Re-load page tables */
46 load_cr3(next->pgd);
47
Rik van Rielc18c0f92013-07-31 22:14:21 -040048 /* Stop flush ipis for the previous mm */
Suresh Siddha831d52b2011-02-03 12:20:04 -080049 cpumask_clear_cpu(cpu, mm_cpumask(prev));
50
Rik van Rielc18c0f92013-07-31 22:14:21 -040051 /* Load the LDT, if the LDT is different: */
Brian Gerst6826c8f2009-01-21 17:26:06 +090052 if (unlikely(prev->context.ldt != next->context.ldt))
53 load_LDT_nolock(&next->context);
54 }
55#ifdef CONFIG_SMP
Rik van Rielc18c0f92013-07-31 22:14:21 -040056 else {
Alex Shic6ae41e2012-05-11 15:35:27 +080057 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
58 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
Brian Gerst6826c8f2009-01-21 17:26:06 +090059
Rik van Rielc18c0f92013-07-31 22:14:21 -040060 if (!cpumask_test_cpu(cpu, mm_cpumask(next))) {
61 /*
62 * On established mms, the mm_cpumask is only changed
63 * from irq context, from ptep_clear_flush() while in
64 * lazy tlb mode, and here. Irqs are blocked during
65 * schedule, protecting us from simultaneous changes.
66 */
67 cpumask_set_cpu(cpu, mm_cpumask(next));
68 /*
69 * We were in lazy tlb mode and leave_mm disabled
Brian Gerst6826c8f2009-01-21 17:26:06 +090070 * tlb flush IPI delivery. We must reload CR3
71 * to make sure to use no freed page tables.
72 */
73 load_cr3(next->pgd);
74 load_LDT_nolock(&next->context);
75 }
76 }
77#endif
78}
Jeremy Fitzhardingec3c2fee2008-06-25 00:19:07 -040079
80#define activate_mm(prev, next) \
81do { \
82 paravirt_activate_mm((prev), (next)); \
83 switch_mm((prev), (next), NULL); \
84} while (0);
85
Brian Gerst6826c8f2009-01-21 17:26:06 +090086#ifdef CONFIG_X86_32
87#define deactivate_mm(tsk, mm) \
88do { \
Tejun Heoccbeed32009-02-09 22:17:40 +090089 lazy_load_gs(0); \
Brian Gerst6826c8f2009-01-21 17:26:06 +090090} while (0)
91#else
92#define deactivate_mm(tsk, mm) \
93do { \
94 load_gs_index(0); \
95 loadsegment(fs, 0); \
96} while (0)
97#endif
Jeremy Fitzhardingec3c2fee2008-06-25 00:19:07 -040098
H. Peter Anvin1965aae2008-10-22 22:26:29 -070099#endif /* _ASM_X86_MMU_CONTEXT_H */