Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University |
| 3 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify |
| 6 | * it under the terms of the GNU General Public License, version 2, as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program; if not, write to the Free Software |
| 16 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. |
| 17 | */ |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame^] | 18 | |
| 19 | #include <linux/mman.h> |
| 20 | #include <linux/kvm_host.h> |
| 21 | #include <linux/io.h> |
| 22 | #include <asm/idmap.h> |
| 23 | #include <asm/pgalloc.h> |
| 24 | #include <asm/kvm_arm.h> |
| 25 | #include <asm/kvm_mmu.h> |
| 26 | #include <asm/mach/map.h> |
| 27 | |
| 28 | extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[]; |
| 29 | |
| 30 | static DEFINE_MUTEX(kvm_hyp_pgd_mutex); |
| 31 | |
| 32 | static void kvm_set_pte(pte_t *pte, pte_t new_pte) |
| 33 | { |
| 34 | pte_val(*pte) = new_pte; |
| 35 | /* |
| 36 | * flush_pmd_entry just takes a void pointer and cleans the necessary |
| 37 | * cache entries, so we can reuse the function for ptes. |
| 38 | */ |
| 39 | flush_pmd_entry(pte); |
| 40 | } |
| 41 | |
| 42 | static void free_ptes(pmd_t *pmd, unsigned long addr) |
| 43 | { |
| 44 | pte_t *pte; |
| 45 | unsigned int i; |
| 46 | |
| 47 | for (i = 0; i < PTRS_PER_PMD; i++, addr += PMD_SIZE) { |
| 48 | if (!pmd_none(*pmd) && pmd_table(*pmd)) { |
| 49 | pte = pte_offset_kernel(pmd, addr); |
| 50 | pte_free_kernel(NULL, pte); |
| 51 | } |
| 52 | pmd++; |
| 53 | } |
| 54 | } |
| 55 | |
| 56 | /** |
| 57 | * free_hyp_pmds - free a Hyp-mode level-2 tables and child level-3 tables |
| 58 | * |
| 59 | * Assumes this is a page table used strictly in Hyp-mode and therefore contains |
| 60 | * only mappings in the kernel memory area, which is above PAGE_OFFSET. |
| 61 | */ |
| 62 | void free_hyp_pmds(void) |
| 63 | { |
| 64 | pgd_t *pgd; |
| 65 | pud_t *pud; |
| 66 | pmd_t *pmd; |
| 67 | unsigned long addr; |
| 68 | |
| 69 | mutex_lock(&kvm_hyp_pgd_mutex); |
| 70 | for (addr = PAGE_OFFSET; addr != 0; addr += PGDIR_SIZE) { |
| 71 | pgd = hyp_pgd + pgd_index(addr); |
| 72 | pud = pud_offset(pgd, addr); |
| 73 | |
| 74 | if (pud_none(*pud)) |
| 75 | continue; |
| 76 | BUG_ON(pud_bad(*pud)); |
| 77 | |
| 78 | pmd = pmd_offset(pud, addr); |
| 79 | free_ptes(pmd, addr); |
| 80 | pmd_free(NULL, pmd); |
| 81 | pud_clear(pud); |
| 82 | } |
| 83 | mutex_unlock(&kvm_hyp_pgd_mutex); |
| 84 | } |
| 85 | |
| 86 | static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start, |
| 87 | unsigned long end) |
| 88 | { |
| 89 | pte_t *pte; |
| 90 | unsigned long addr; |
| 91 | struct page *page; |
| 92 | |
| 93 | for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) { |
| 94 | pte = pte_offset_kernel(pmd, addr); |
| 95 | BUG_ON(!virt_addr_valid(addr)); |
| 96 | page = virt_to_page(addr); |
| 97 | kvm_set_pte(pte, mk_pte(page, PAGE_HYP)); |
| 98 | } |
| 99 | } |
| 100 | |
| 101 | static void create_hyp_io_pte_mappings(pmd_t *pmd, unsigned long start, |
| 102 | unsigned long end, |
| 103 | unsigned long *pfn_base) |
| 104 | { |
| 105 | pte_t *pte; |
| 106 | unsigned long addr; |
| 107 | |
| 108 | for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) { |
| 109 | pte = pte_offset_kernel(pmd, addr); |
| 110 | BUG_ON(pfn_valid(*pfn_base)); |
| 111 | kvm_set_pte(pte, pfn_pte(*pfn_base, PAGE_HYP_DEVICE)); |
| 112 | (*pfn_base)++; |
| 113 | } |
| 114 | } |
| 115 | |
| 116 | static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start, |
| 117 | unsigned long end, unsigned long *pfn_base) |
| 118 | { |
| 119 | pmd_t *pmd; |
| 120 | pte_t *pte; |
| 121 | unsigned long addr, next; |
| 122 | |
| 123 | for (addr = start; addr < end; addr = next) { |
| 124 | pmd = pmd_offset(pud, addr); |
| 125 | |
| 126 | BUG_ON(pmd_sect(*pmd)); |
| 127 | |
| 128 | if (pmd_none(*pmd)) { |
| 129 | pte = pte_alloc_one_kernel(NULL, addr); |
| 130 | if (!pte) { |
| 131 | kvm_err("Cannot allocate Hyp pte\n"); |
| 132 | return -ENOMEM; |
| 133 | } |
| 134 | pmd_populate_kernel(NULL, pmd, pte); |
| 135 | } |
| 136 | |
| 137 | next = pmd_addr_end(addr, end); |
| 138 | |
| 139 | /* |
| 140 | * If pfn_base is NULL, we map kernel pages into HYP with the |
| 141 | * virtual address. Otherwise, this is considered an I/O |
| 142 | * mapping and we map the physical region starting at |
| 143 | * *pfn_base to [start, end[. |
| 144 | */ |
| 145 | if (!pfn_base) |
| 146 | create_hyp_pte_mappings(pmd, addr, next); |
| 147 | else |
| 148 | create_hyp_io_pte_mappings(pmd, addr, next, pfn_base); |
| 149 | } |
| 150 | |
| 151 | return 0; |
| 152 | } |
| 153 | |
| 154 | static int __create_hyp_mappings(void *from, void *to, unsigned long *pfn_base) |
| 155 | { |
| 156 | unsigned long start = (unsigned long)from; |
| 157 | unsigned long end = (unsigned long)to; |
| 158 | pgd_t *pgd; |
| 159 | pud_t *pud; |
| 160 | pmd_t *pmd; |
| 161 | unsigned long addr, next; |
| 162 | int err = 0; |
| 163 | |
| 164 | BUG_ON(start > end); |
| 165 | if (start < PAGE_OFFSET) |
| 166 | return -EINVAL; |
| 167 | |
| 168 | mutex_lock(&kvm_hyp_pgd_mutex); |
| 169 | for (addr = start; addr < end; addr = next) { |
| 170 | pgd = hyp_pgd + pgd_index(addr); |
| 171 | pud = pud_offset(pgd, addr); |
| 172 | |
| 173 | if (pud_none_or_clear_bad(pud)) { |
| 174 | pmd = pmd_alloc_one(NULL, addr); |
| 175 | if (!pmd) { |
| 176 | kvm_err("Cannot allocate Hyp pmd\n"); |
| 177 | err = -ENOMEM; |
| 178 | goto out; |
| 179 | } |
| 180 | pud_populate(NULL, pud, pmd); |
| 181 | } |
| 182 | |
| 183 | next = pgd_addr_end(addr, end); |
| 184 | err = create_hyp_pmd_mappings(pud, addr, next, pfn_base); |
| 185 | if (err) |
| 186 | goto out; |
| 187 | } |
| 188 | out: |
| 189 | mutex_unlock(&kvm_hyp_pgd_mutex); |
| 190 | return err; |
| 191 | } |
| 192 | |
| 193 | /** |
| 194 | * create_hyp_mappings - map a kernel virtual address range in Hyp mode |
| 195 | * @from: The virtual kernel start address of the range |
| 196 | * @to: The virtual kernel end address of the range (exclusive) |
| 197 | * |
| 198 | * The same virtual address as the kernel virtual address is also used in |
| 199 | * Hyp-mode mapping to the same underlying physical pages. |
| 200 | * |
| 201 | * Note: Wrapping around zero in the "to" address is not supported. |
| 202 | */ |
| 203 | int create_hyp_mappings(void *from, void *to) |
| 204 | { |
| 205 | return __create_hyp_mappings(from, to, NULL); |
| 206 | } |
| 207 | |
| 208 | /** |
| 209 | * create_hyp_io_mappings - map a physical IO range in Hyp mode |
| 210 | * @from: The virtual HYP start address of the range |
| 211 | * @to: The virtual HYP end address of the range (exclusive) |
| 212 | * @addr: The physical start address which gets mapped |
| 213 | */ |
| 214 | int create_hyp_io_mappings(void *from, void *to, phys_addr_t addr) |
| 215 | { |
| 216 | unsigned long pfn = __phys_to_pfn(addr); |
| 217 | return __create_hyp_mappings(from, to, &pfn); |
| 218 | } |
| 219 | |
| 220 | int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) |
| 221 | { |
| 222 | return -EINVAL; |
| 223 | } |
| 224 | |
| 225 | phys_addr_t kvm_mmu_get_httbr(void) |
| 226 | { |
| 227 | VM_BUG_ON(!virt_addr_valid(hyp_pgd)); |
| 228 | return virt_to_phys(hyp_pgd); |
| 229 | } |
| 230 | |
| 231 | int kvm_mmu_init(void) |
| 232 | { |
| 233 | return hyp_pgd ? 0 : -ENOMEM; |
| 234 | } |
| 235 | |
| 236 | /** |
| 237 | * kvm_clear_idmap - remove all idmaps from the hyp pgd |
| 238 | * |
| 239 | * Free the underlying pmds for all pgds in range and clear the pgds (but |
| 240 | * don't free them) afterwards. |
| 241 | */ |
| 242 | void kvm_clear_hyp_idmap(void) |
| 243 | { |
| 244 | unsigned long addr, end; |
| 245 | unsigned long next; |
| 246 | pgd_t *pgd = hyp_pgd; |
| 247 | pud_t *pud; |
| 248 | pmd_t *pmd; |
| 249 | |
| 250 | addr = virt_to_phys(__hyp_idmap_text_start); |
| 251 | end = virt_to_phys(__hyp_idmap_text_end); |
| 252 | |
| 253 | pgd += pgd_index(addr); |
| 254 | do { |
| 255 | next = pgd_addr_end(addr, end); |
| 256 | if (pgd_none_or_clear_bad(pgd)) |
| 257 | continue; |
| 258 | pud = pud_offset(pgd, addr); |
| 259 | pmd = pmd_offset(pud, addr); |
| 260 | |
| 261 | pud_clear(pud); |
| 262 | clean_pmd_entry(pmd); |
| 263 | pmd_free(NULL, (pmd_t *)((unsigned long)pmd & PAGE_MASK)); |
| 264 | } while (pgd++, addr = next, addr < end); |
| 265 | } |