aboutsummaryrefslogtreecommitdiff
path: root/arch/tile/include/asm/mmu_context.h
blob: 9bc0d0725c28da6ce73f026784dea406609adb88 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
/*
 * Copyright 2010 Tilera Corporation. All Rights Reserved.
 *
 *   This program is free software; you can redistribute it and/or
 *   modify it under the terms of the GNU General Public License
 *   as published by the Free Software Foundation, version 2.
 *
 *   This program is distributed in the hope that it will be useful, but
 *   WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
 *   NON INFRINGEMENT.  See the GNU General Public License for
 *   more details.
 */

#ifndef _ASM_TILE_MMU_CONTEXT_H
#define _ASM_TILE_MMU_CONTEXT_H

#include <linux/smp.h>
#include <asm/setup.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/homecache.h>
#include <asm-generic/mm_hooks.h>

static inline int
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
	return 0;
}

/* Note that arch/tile/kernel/head.S also calls hv_install_context() */
static inline void __install_page_table(pgd_t *pgdir, int asid, pgprot_t prot)
{
	/* FIXME: DIRECTIO should not always be set. FIXME. */
	int rc = hv_install_context(__pa(pgdir), prot, asid, HV_CTX_DIRECTIO);
	if (rc < 0)
		panic("hv_install_context failed: %d", rc);
}

static inline void install_page_table(pgd_t *pgdir, int asid)
{
	pte_t *ptep = virt_to_pte(NULL, (unsigned long)pgdir);
	__install_page_table(pgdir, asid, *ptep);
}

/*
 * "Lazy" TLB mode is entered when we are switching to a kernel task,
 * which borrows the mm of the previous task.  The goal of this
 * optimization is to avoid having to install a new page table.  On
 * early x86 machines (where the concept originated) you couldn't do
 * anything short of a full page table install for invalidation, so
 * handling a remote TLB invalidate required doing a page table
 * re-install.  Someone clearly decided that it was silly to keep
 * doing this while in "lazy" TLB mode, so the optimization involves
 * installing the swapper page table instead the first time one
 * occurs, and clearing the cpu out of cpu_vm_mask, so the cpu running
 * the kernel task doesn't need to take any more interrupts.  At that
 * point it's then necessary to explicitly reinstall it when context
 * switching back to the original mm.
 *
 * On Tile, we have to do a page-table install whenever DMA is enabled,
 * so in that case lazy mode doesn't help anyway.  And more generally,
 * we have efficient per-page TLB shootdown, and don't expect to spend
 * that much time in kernel tasks in general, so just leaving the
 * kernel task borrowing the old page table, but handling TLB
 * shootdowns, is a reasonable thing to do.  And importantly, this
 * lets us use the hypervisor's internal APIs for TLB shootdown, which
 * means we don't have to worry about having TLB shootdowns blocked
 * when Linux is disabling interrupts; see the page migration code for
 * an example of where it's important for TLB shootdowns to complete
 * even when interrupts are disabled at the Linux level.
 */
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *t)
{
#if CHIP_HAS_TILE_DMA()
	/*
	 * We have to do an "identity" page table switch in order to
	 * clear any pending DMA interrupts.
	 */
	if (current->thread.tile_dma_state.enabled)
		install_page_table(mm->pgd, __get_cpu_var(current_asid));
#endif
}

static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
			     struct task_struct *tsk)
{
	if (likely(prev != next)) {

		int cpu = smp_processor_id();

		/* Pick new ASID. */
		int asid = __get_cpu_var(current_asid) + 1;
		if (asid > max_asid) {
			asid = min_asid;
			local_flush_tlb();
		}
		__get_cpu_var(current_asid) = asid;

		/* Clear cpu from the old mm, and set it in the new one. */
		cpumask_clear_cpu(cpu, &prev->cpu_vm_mask);
		cpumask_set_cpu(cpu, &next->cpu_vm_mask);

		/* Re-load page tables */
		install_page_table(next->pgd, asid);

		/* See how we should set the red/black cache info */
		check_mm_caching(prev, next);

		/*
		 * Since we're changing to a new mm, we have to flush
		 * the icache in case some physical page now being mapped
		 * has subsequently been repurposed and has new code.
		 */
		__flush_icache();

	}
}

static inline void activate_mm(struct mm_struct *prev_mm,
			       struct mm_struct *next_mm)
{
	switch_mm(prev_mm, next_mm, NULL);
}

#define destroy_context(mm)		do { } while (0)
#define deactivate_mm(tsk, mm)          do { } while (0)

#endif /* _ASM_TILE_MMU_CONTEXT_H */