blob: e04613906f1bbcee598feaab440da1e7e0452ed7 [file] [log] [blame]
Christoffer Dall749cf76c2013-01-20 18:28:06 -05001/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
Christoffer Dall342cd0a2013-01-20 18:28:06 -050018
19#include <linux/mman.h>
20#include <linux/kvm_host.h>
21#include <linux/io.h>
Christoffer Dall45e96ea2013-01-20 18:43:58 -050022#include <trace/events/kvm.h>
Christoffer Dall342cd0a2013-01-20 18:28:06 -050023#include <asm/pgalloc.h>
Christoffer Dall94f8e642013-01-20 18:28:12 -050024#include <asm/cacheflush.h>
Christoffer Dall342cd0a2013-01-20 18:28:06 -050025#include <asm/kvm_arm.h>
26#include <asm/kvm_mmu.h>
Christoffer Dall45e96ea2013-01-20 18:43:58 -050027#include <asm/kvm_mmio.h>
Christoffer Dalld5d81842013-01-20 18:28:07 -050028#include <asm/kvm_asm.h>
Christoffer Dall94f8e642013-01-20 18:28:12 -050029#include <asm/kvm_emulate.h>
Christoffer Dalld5d81842013-01-20 18:28:07 -050030
31#include "trace.h"
Christoffer Dall342cd0a2013-01-20 18:28:06 -050032
33extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
34
Marc Zyngier5a677ce2013-04-12 19:12:06 +010035static pgd_t *boot_hyp_pgd;
Marc Zyngier2fb41052013-04-12 19:12:03 +010036static pgd_t *hyp_pgd;
Christoffer Dall342cd0a2013-01-20 18:28:06 -050037static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
38
Marc Zyngier5a677ce2013-04-12 19:12:06 +010039static void *init_bounce_page;
40static unsigned long hyp_idmap_start;
41static unsigned long hyp_idmap_end;
42static phys_addr_t hyp_idmap_vector;
43
Marc Zyngier48762762013-01-28 15:27:00 +000044static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
Christoffer Dalld5d81842013-01-20 18:28:07 -050045{
Marc Zyngierd4cb9df52013-05-14 12:11:34 +010046 /*
47 * This function also gets called when dealing with HYP page
48 * tables. As HYP doesn't have an associated struct kvm (and
49 * the HYP page tables are fairly static), we don't do
50 * anything there.
51 */
52 if (kvm)
53 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
Christoffer Dalld5d81842013-01-20 18:28:07 -050054}
55
Christoffer Dalld5d81842013-01-20 18:28:07 -050056static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
57 int min, int max)
58{
59 void *page;
60
61 BUG_ON(max > KVM_NR_MEM_OBJS);
62 if (cache->nobjs >= min)
63 return 0;
64 while (cache->nobjs < max) {
65 page = (void *)__get_free_page(PGALLOC_GFP);
66 if (!page)
67 return -ENOMEM;
68 cache->objects[cache->nobjs++] = page;
69 }
70 return 0;
71}
72
73static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
74{
75 while (mc->nobjs)
76 free_page((unsigned long)mc->objects[--mc->nobjs]);
77}
78
79static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
80{
81 void *p;
82
83 BUG_ON(!mc || !mc->nobjs);
84 p = mc->objects[--mc->nobjs];
85 return p;
86}
87
Marc Zyngierd4cb9df52013-05-14 12:11:34 +010088static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
Christoffer Dall342cd0a2013-01-20 18:28:06 -050089{
Marc Zyngier4f728272013-04-12 19:12:05 +010090 pmd_t *pmd_table = pmd_offset(pud, 0);
91 pud_clear(pud);
Marc Zyngierd4cb9df52013-05-14 12:11:34 +010092 kvm_tlb_flush_vmid_ipa(kvm, addr);
Marc Zyngier4f728272013-04-12 19:12:05 +010093 pmd_free(NULL, pmd_table);
94 put_page(virt_to_page(pud));
95}
Christoffer Dall342cd0a2013-01-20 18:28:06 -050096
Marc Zyngierd4cb9df52013-05-14 12:11:34 +010097static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
Marc Zyngier4f728272013-04-12 19:12:05 +010098{
99 pte_t *pte_table = pte_offset_kernel(pmd, 0);
100 pmd_clear(pmd);
Marc Zyngierd4cb9df52013-05-14 12:11:34 +0100101 kvm_tlb_flush_vmid_ipa(kvm, addr);
Marc Zyngier4f728272013-04-12 19:12:05 +0100102 pte_free_kernel(NULL, pte_table);
103 put_page(virt_to_page(pmd));
104}
105
106static bool pmd_empty(pmd_t *pmd)
107{
108 struct page *pmd_page = virt_to_page(pmd);
109 return page_count(pmd_page) == 1;
110}
111
Marc Zyngierd4cb9df52013-05-14 12:11:34 +0100112static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr)
Marc Zyngier4f728272013-04-12 19:12:05 +0100113{
114 if (pte_present(*pte)) {
115 kvm_set_pte(pte, __pte(0));
116 put_page(virt_to_page(pte));
Marc Zyngierd4cb9df52013-05-14 12:11:34 +0100117 kvm_tlb_flush_vmid_ipa(kvm, addr);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500118 }
119}
120
Marc Zyngier4f728272013-04-12 19:12:05 +0100121static bool pte_empty(pte_t *pte)
122{
123 struct page *pte_page = virt_to_page(pte);
124 return page_count(pte_page) == 1;
125}
126
Marc Zyngierd4cb9df52013-05-14 12:11:34 +0100127static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
128 unsigned long long start, u64 size)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500129{
130 pgd_t *pgd;
131 pud_t *pud;
132 pmd_t *pmd;
Marc Zyngier4f728272013-04-12 19:12:05 +0100133 pte_t *pte;
134 unsigned long long addr = start, end = start + size;
135 u64 range;
Marc Zyngier000d3992013-03-05 02:43:17 +0000136
Marc Zyngier4f728272013-04-12 19:12:05 +0100137 while (addr < end) {
138 pgd = pgdp + pgd_index(addr);
139 pud = pud_offset(pgd, addr);
140 if (pud_none(*pud)) {
141 addr += PUD_SIZE;
142 continue;
143 }
Marc Zyngier000d3992013-03-05 02:43:17 +0000144
Marc Zyngier4f728272013-04-12 19:12:05 +0100145 pmd = pmd_offset(pud, addr);
146 if (pmd_none(*pmd)) {
147 addr += PMD_SIZE;
148 continue;
149 }
Marc Zyngier000d3992013-03-05 02:43:17 +0000150
Marc Zyngier4f728272013-04-12 19:12:05 +0100151 pte = pte_offset_kernel(pmd, addr);
Marc Zyngierd4cb9df52013-05-14 12:11:34 +0100152 clear_pte_entry(kvm, pte, addr);
Marc Zyngier4f728272013-04-12 19:12:05 +0100153 range = PAGE_SIZE;
154
155 /* If we emptied the pte, walk back up the ladder */
156 if (pte_empty(pte)) {
Marc Zyngierd4cb9df52013-05-14 12:11:34 +0100157 clear_pmd_entry(kvm, pmd, addr);
Marc Zyngier4f728272013-04-12 19:12:05 +0100158 range = PMD_SIZE;
159 if (pmd_empty(pmd)) {
Marc Zyngierd4cb9df52013-05-14 12:11:34 +0100160 clear_pud_entry(kvm, pud, addr);
Marc Zyngier4f728272013-04-12 19:12:05 +0100161 range = PUD_SIZE;
162 }
163 }
164
165 addr += range;
166 }
Marc Zyngier000d3992013-03-05 02:43:17 +0000167}
168
169/**
Marc Zyngierd157f4a2013-04-12 19:12:07 +0100170 * free_boot_hyp_pgd - free HYP boot page tables
171 *
172 * Free the HYP boot page tables. The bounce page is also freed.
173 */
174void free_boot_hyp_pgd(void)
175{
176 mutex_lock(&kvm_hyp_pgd_mutex);
177
178 if (boot_hyp_pgd) {
Marc Zyngierd4cb9df52013-05-14 12:11:34 +0100179 unmap_range(NULL, boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
180 unmap_range(NULL, boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
Marc Zyngierd157f4a2013-04-12 19:12:07 +0100181 kfree(boot_hyp_pgd);
182 boot_hyp_pgd = NULL;
183 }
184
185 if (hyp_pgd)
Marc Zyngierd4cb9df52013-05-14 12:11:34 +0100186 unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
Marc Zyngierd157f4a2013-04-12 19:12:07 +0100187
188 kfree(init_bounce_page);
189 init_bounce_page = NULL;
190
191 mutex_unlock(&kvm_hyp_pgd_mutex);
192}
193
194/**
Marc Zyngier4f728272013-04-12 19:12:05 +0100195 * free_hyp_pgds - free Hyp-mode page tables
Marc Zyngier000d3992013-03-05 02:43:17 +0000196 *
Marc Zyngier5a677ce2013-04-12 19:12:06 +0100197 * Assumes hyp_pgd is a page table used strictly in Hyp-mode and
198 * therefore contains either mappings in the kernel memory area (above
199 * PAGE_OFFSET), or device mappings in the vmalloc range (from
200 * VMALLOC_START to VMALLOC_END).
201 *
202 * boot_hyp_pgd should only map two pages for the init code.
Marc Zyngier000d3992013-03-05 02:43:17 +0000203 */
Marc Zyngier4f728272013-04-12 19:12:05 +0100204void free_hyp_pgds(void)
Marc Zyngier000d3992013-03-05 02:43:17 +0000205{
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500206 unsigned long addr;
207
Marc Zyngierd157f4a2013-04-12 19:12:07 +0100208 free_boot_hyp_pgd();
Marc Zyngier4f728272013-04-12 19:12:05 +0100209
Marc Zyngierd157f4a2013-04-12 19:12:07 +0100210 mutex_lock(&kvm_hyp_pgd_mutex);
Marc Zyngier5a677ce2013-04-12 19:12:06 +0100211
Marc Zyngier4f728272013-04-12 19:12:05 +0100212 if (hyp_pgd) {
213 for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE)
Marc Zyngierd4cb9df52013-05-14 12:11:34 +0100214 unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
Marc Zyngier4f728272013-04-12 19:12:05 +0100215 for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
Marc Zyngierd4cb9df52013-05-14 12:11:34 +0100216 unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
217
Marc Zyngier4f728272013-04-12 19:12:05 +0100218 kfree(hyp_pgd);
Marc Zyngierd157f4a2013-04-12 19:12:07 +0100219 hyp_pgd = NULL;
Marc Zyngier4f728272013-04-12 19:12:05 +0100220 }
221
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500222 mutex_unlock(&kvm_hyp_pgd_mutex);
223}
224
225static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
Marc Zyngier6060df82013-04-12 19:12:01 +0100226 unsigned long end, unsigned long pfn,
227 pgprot_t prot)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500228{
229 pte_t *pte;
230 unsigned long addr;
231
Marc Zyngier3562c762013-04-12 19:12:02 +0100232 addr = start;
233 do {
Marc Zyngier6060df82013-04-12 19:12:01 +0100234 pte = pte_offset_kernel(pmd, addr);
235 kvm_set_pte(pte, pfn_pte(pfn, prot));
Marc Zyngier4f728272013-04-12 19:12:05 +0100236 get_page(virt_to_page(pte));
Marc Zyngier5a677ce2013-04-12 19:12:06 +0100237 kvm_flush_dcache_to_poc(pte, sizeof(*pte));
Marc Zyngier6060df82013-04-12 19:12:01 +0100238 pfn++;
Marc Zyngier3562c762013-04-12 19:12:02 +0100239 } while (addr += PAGE_SIZE, addr != end);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500240}
241
242static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
Marc Zyngier6060df82013-04-12 19:12:01 +0100243 unsigned long end, unsigned long pfn,
244 pgprot_t prot)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500245{
246 pmd_t *pmd;
247 pte_t *pte;
248 unsigned long addr, next;
249
Marc Zyngier3562c762013-04-12 19:12:02 +0100250 addr = start;
251 do {
Marc Zyngier6060df82013-04-12 19:12:01 +0100252 pmd = pmd_offset(pud, addr);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500253
254 BUG_ON(pmd_sect(*pmd));
255
256 if (pmd_none(*pmd)) {
Marc Zyngier6060df82013-04-12 19:12:01 +0100257 pte = pte_alloc_one_kernel(NULL, addr);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500258 if (!pte) {
259 kvm_err("Cannot allocate Hyp pte\n");
260 return -ENOMEM;
261 }
262 pmd_populate_kernel(NULL, pmd, pte);
Marc Zyngier4f728272013-04-12 19:12:05 +0100263 get_page(virt_to_page(pmd));
Marc Zyngier5a677ce2013-04-12 19:12:06 +0100264 kvm_flush_dcache_to_poc(pmd, sizeof(*pmd));
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500265 }
266
267 next = pmd_addr_end(addr, end);
268
Marc Zyngier6060df82013-04-12 19:12:01 +0100269 create_hyp_pte_mappings(pmd, addr, next, pfn, prot);
270 pfn += (next - addr) >> PAGE_SHIFT;
Marc Zyngier3562c762013-04-12 19:12:02 +0100271 } while (addr = next, addr != end);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500272
273 return 0;
274}
275
Marc Zyngier6060df82013-04-12 19:12:01 +0100276static int __create_hyp_mappings(pgd_t *pgdp,
277 unsigned long start, unsigned long end,
278 unsigned long pfn, pgprot_t prot)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500279{
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500280 pgd_t *pgd;
281 pud_t *pud;
282 pmd_t *pmd;
283 unsigned long addr, next;
284 int err = 0;
285
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500286 mutex_lock(&kvm_hyp_pgd_mutex);
Marc Zyngier3562c762013-04-12 19:12:02 +0100287 addr = start & PAGE_MASK;
288 end = PAGE_ALIGN(end);
289 do {
Marc Zyngier6060df82013-04-12 19:12:01 +0100290 pgd = pgdp + pgd_index(addr);
291 pud = pud_offset(pgd, addr);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500292
293 if (pud_none_or_clear_bad(pud)) {
Marc Zyngier6060df82013-04-12 19:12:01 +0100294 pmd = pmd_alloc_one(NULL, addr);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500295 if (!pmd) {
296 kvm_err("Cannot allocate Hyp pmd\n");
297 err = -ENOMEM;
298 goto out;
299 }
300 pud_populate(NULL, pud, pmd);
Marc Zyngier4f728272013-04-12 19:12:05 +0100301 get_page(virt_to_page(pud));
Marc Zyngier5a677ce2013-04-12 19:12:06 +0100302 kvm_flush_dcache_to_poc(pud, sizeof(*pud));
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500303 }
304
305 next = pgd_addr_end(addr, end);
Marc Zyngier6060df82013-04-12 19:12:01 +0100306 err = create_hyp_pmd_mappings(pud, addr, next, pfn, prot);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500307 if (err)
308 goto out;
Marc Zyngier6060df82013-04-12 19:12:01 +0100309 pfn += (next - addr) >> PAGE_SHIFT;
Marc Zyngier3562c762013-04-12 19:12:02 +0100310 } while (addr = next, addr != end);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500311out:
312 mutex_unlock(&kvm_hyp_pgd_mutex);
313 return err;
314}
315
Christoffer Dall9cce2a82013-11-15 13:14:12 -0800316static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
317{
318 if (!is_vmalloc_addr(kaddr)) {
319 BUG_ON(!virt_addr_valid(kaddr));
320 return __pa(kaddr);
321 } else {
322 return page_to_phys(vmalloc_to_page(kaddr)) +
323 offset_in_page(kaddr);
324 }
325}
326
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500327/**
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100328 * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500329 * @from: The virtual kernel start address of the range
330 * @to: The virtual kernel end address of the range (exclusive)
331 *
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100332 * The same virtual address as the kernel virtual address is also used
333 * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
334 * physical pages.
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500335 */
336int create_hyp_mappings(void *from, void *to)
337{
Christoffer Dall9cce2a82013-11-15 13:14:12 -0800338 phys_addr_t phys_addr;
339 unsigned long virt_addr;
Marc Zyngier6060df82013-04-12 19:12:01 +0100340 unsigned long start = KERN_TO_HYP((unsigned long)from);
341 unsigned long end = KERN_TO_HYP((unsigned long)to);
342
Christoffer Dall9cce2a82013-11-15 13:14:12 -0800343 start = start & PAGE_MASK;
344 end = PAGE_ALIGN(end);
Marc Zyngier6060df82013-04-12 19:12:01 +0100345
Christoffer Dall9cce2a82013-11-15 13:14:12 -0800346 for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
347 int err;
348
349 phys_addr = kvm_kaddr_to_phys(from + virt_addr - start);
350 err = __create_hyp_mappings(hyp_pgd, virt_addr,
351 virt_addr + PAGE_SIZE,
352 __phys_to_pfn(phys_addr),
353 PAGE_HYP);
354 if (err)
355 return err;
356 }
357
358 return 0;
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500359}
360
361/**
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100362 * create_hyp_io_mappings - duplicate a kernel IO mapping into Hyp mode
363 * @from: The kernel start VA of the range
364 * @to: The kernel end VA of the range (exclusive)
Marc Zyngier6060df82013-04-12 19:12:01 +0100365 * @phys_addr: The physical start address which gets mapped
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100366 *
367 * The resulting HYP VA is the same as the kernel VA, modulo
368 * HYP_PAGE_OFFSET.
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500369 */
Marc Zyngier6060df82013-04-12 19:12:01 +0100370int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500371{
Marc Zyngier6060df82013-04-12 19:12:01 +0100372 unsigned long start = KERN_TO_HYP((unsigned long)from);
373 unsigned long end = KERN_TO_HYP((unsigned long)to);
374
375 /* Check for a valid kernel IO mapping */
376 if (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1))
377 return -EINVAL;
378
379 return __create_hyp_mappings(hyp_pgd, start, end,
380 __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500381}
382
Christoffer Dalld5d81842013-01-20 18:28:07 -0500383/**
384 * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
385 * @kvm: The KVM struct pointer for the VM.
386 *
387 * Allocates the 1st level table only of size defined by S2_PGD_ORDER (can
388 * support either full 40-bit input addresses or limited to 32-bit input
389 * addresses). Clears the allocated pages.
390 *
391 * Note we don't need locking here as this is only called when the VM is
392 * created, which can only be done once.
393 */
394int kvm_alloc_stage2_pgd(struct kvm *kvm)
395{
396 pgd_t *pgd;
397
398 if (kvm->arch.pgd != NULL) {
399 kvm_err("kvm_arch already initialized?\n");
400 return -EINVAL;
401 }
402
403 pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, S2_PGD_ORDER);
404 if (!pgd)
405 return -ENOMEM;
406
407 /* stage-2 pgd must be aligned to its size */
408 VM_BUG_ON((unsigned long)pgd & (S2_PGD_SIZE - 1));
409
410 memset(pgd, 0, PTRS_PER_S2_PGD * sizeof(pgd_t));
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100411 kvm_clean_pgd(pgd);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500412 kvm->arch.pgd = pgd;
413
414 return 0;
415}
416
Christoffer Dalld5d81842013-01-20 18:28:07 -0500417/**
418 * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
419 * @kvm: The VM pointer
420 * @start: The intermediate physical base address of the range to unmap
421 * @size: The size of the area to unmap
422 *
423 * Clear a range of stage-2 mappings, lowering the various ref-counts. Must
424 * be called while holding mmu_lock (unless for freeing the stage2 pgd before
425 * destroying the VM), otherwise another faulting VCPU may come in and mess
426 * with things behind our backs.
427 */
428static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
429{
Marc Zyngierd4cb9df52013-05-14 12:11:34 +0100430 unmap_range(kvm, kvm->arch.pgd, start, size);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500431}
432
433/**
434 * kvm_free_stage2_pgd - free all stage-2 tables
435 * @kvm: The KVM struct pointer for the VM.
436 *
437 * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
438 * underlying level-2 and level-3 tables before freeing the actual level-1 table
439 * and setting the struct pointer to NULL.
440 *
441 * Note we don't need locking here as this is only called when the VM is
442 * destroyed, which can only be done once.
443 */
444void kvm_free_stage2_pgd(struct kvm *kvm)
445{
446 if (kvm->arch.pgd == NULL)
447 return;
448
449 unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
450 free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER);
451 kvm->arch.pgd = NULL;
452}
453
454
455static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
456 phys_addr_t addr, const pte_t *new_pte, bool iomap)
457{
458 pgd_t *pgd;
459 pud_t *pud;
460 pmd_t *pmd;
461 pte_t *pte, old_pte;
462
463 /* Create 2nd stage page table mapping - Level 1 */
464 pgd = kvm->arch.pgd + pgd_index(addr);
465 pud = pud_offset(pgd, addr);
466 if (pud_none(*pud)) {
467 if (!cache)
468 return 0; /* ignore calls from kvm_set_spte_hva */
469 pmd = mmu_memory_cache_alloc(cache);
470 pud_populate(NULL, pud, pmd);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500471 get_page(virt_to_page(pud));
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100472 }
473
474 pmd = pmd_offset(pud, addr);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500475
476 /* Create 2nd stage page table mapping - Level 2 */
477 if (pmd_none(*pmd)) {
478 if (!cache)
479 return 0; /* ignore calls from kvm_set_spte_hva */
480 pte = mmu_memory_cache_alloc(cache);
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100481 kvm_clean_pte(pte);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500482 pmd_populate_kernel(NULL, pmd, pte);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500483 get_page(virt_to_page(pmd));
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100484 }
485
486 pte = pte_offset_kernel(pmd, addr);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500487
488 if (iomap && pte_present(*pte))
489 return -EFAULT;
490
491 /* Create 2nd stage page table mapping - Level 3 */
492 old_pte = *pte;
493 kvm_set_pte(pte, *new_pte);
494 if (pte_present(old_pte))
Marc Zyngier48762762013-01-28 15:27:00 +0000495 kvm_tlb_flush_vmid_ipa(kvm, addr);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500496 else
497 get_page(virt_to_page(pte));
498
499 return 0;
500}
501
502/**
503 * kvm_phys_addr_ioremap - map a device range to guest IPA
504 *
505 * @kvm: The KVM pointer
506 * @guest_ipa: The IPA at which to insert the mapping
507 * @pa: The physical address of the device
508 * @size: The size of the mapping
509 */
510int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
511 phys_addr_t pa, unsigned long size)
512{
513 phys_addr_t addr, end;
514 int ret = 0;
515 unsigned long pfn;
516 struct kvm_mmu_memory_cache cache = { 0, };
517
518 end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK;
519 pfn = __phys_to_pfn(pa);
520
521 for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100522 pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE);
523 kvm_set_s2pte_writable(&pte);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500524
525 ret = mmu_topup_memory_cache(&cache, 2, 2);
526 if (ret)
527 goto out;
528 spin_lock(&kvm->mmu_lock);
529 ret = stage2_set_pte(kvm, &cache, addr, &pte, true);
530 spin_unlock(&kvm->mmu_lock);
531 if (ret)
532 goto out;
533
534 pfn++;
535 }
536
537out:
538 mmu_free_memory_cache(&cache);
539 return ret;
540}
541
Christoffer Dall94f8e642013-01-20 18:28:12 -0500542static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
543 gfn_t gfn, struct kvm_memory_slot *memslot,
544 unsigned long fault_status)
545{
546 pte_t new_pte;
547 pfn_t pfn;
548 int ret;
549 bool write_fault, writable;
550 unsigned long mmu_seq;
551 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
552
Marc Zyngier7393b592012-09-17 19:27:09 +0100553 write_fault = kvm_is_write_fault(kvm_vcpu_get_hsr(vcpu));
Christoffer Dall94f8e642013-01-20 18:28:12 -0500554 if (fault_status == FSC_PERM && !write_fault) {
555 kvm_err("Unexpected L2 read permission error\n");
556 return -EFAULT;
557 }
558
559 /* We need minimum second+third level pages */
560 ret = mmu_topup_memory_cache(memcache, 2, KVM_NR_MEM_OBJS);
561 if (ret)
562 return ret;
563
564 mmu_seq = vcpu->kvm->mmu_notifier_seq;
565 /*
566 * Ensure the read of mmu_notifier_seq happens before we call
567 * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
568 * the page we just got a reference to gets unmapped before we have a
569 * chance to grab the mmu_lock, which ensure that if the page gets
570 * unmapped afterwards, the call to kvm_unmap_hva will take it away
571 * from us again properly. This smp_rmb() interacts with the smp_wmb()
572 * in kvm_mmu_notifier_invalidate_<page|range_end>.
573 */
574 smp_rmb();
575
576 pfn = gfn_to_pfn_prot(vcpu->kvm, gfn, write_fault, &writable);
577 if (is_error_pfn(pfn))
578 return -EFAULT;
579
580 new_pte = pfn_pte(pfn, PAGE_S2);
581 coherent_icache_guest_page(vcpu->kvm, gfn);
582
583 spin_lock(&vcpu->kvm->mmu_lock);
584 if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
585 goto out_unlock;
586 if (writable) {
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100587 kvm_set_s2pte_writable(&new_pte);
Christoffer Dall94f8e642013-01-20 18:28:12 -0500588 kvm_set_pfn_dirty(pfn);
589 }
590 stage2_set_pte(vcpu->kvm, memcache, fault_ipa, &new_pte, false);
591
592out_unlock:
593 spin_unlock(&vcpu->kvm->mmu_lock);
594 kvm_release_pfn_clean(pfn);
595 return 0;
596}
597
598/**
599 * kvm_handle_guest_abort - handles all 2nd stage aborts
600 * @vcpu: the VCPU pointer
601 * @run: the kvm_run structure
602 *
603 * Any abort that gets to the host is almost guaranteed to be caused by a
604 * missing second stage translation table entry, which can mean that either the
605 * guest simply needs more memory and we must allocate an appropriate page or it
606 * can mean that the guest tried to access I/O memory, which is emulated by user
607 * space. The distinction is based on the IPA causing the fault and whether this
608 * memory region has been registered as standard RAM by user space.
609 */
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500610int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
611{
Christoffer Dall94f8e642013-01-20 18:28:12 -0500612 unsigned long fault_status;
613 phys_addr_t fault_ipa;
614 struct kvm_memory_slot *memslot;
615 bool is_iabt;
616 gfn_t gfn;
617 int ret, idx;
618
Marc Zyngier52d1dba2012-10-15 10:33:38 +0100619 is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
Marc Zyngier7393b592012-09-17 19:27:09 +0100620 fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
Christoffer Dall94f8e642013-01-20 18:28:12 -0500621
Marc Zyngier7393b592012-09-17 19:27:09 +0100622 trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu),
623 kvm_vcpu_get_hfar(vcpu), fault_ipa);
Christoffer Dall94f8e642013-01-20 18:28:12 -0500624
625 /* Check the stage-2 fault is trans. fault or write fault */
Marc Zyngier1cc287d2012-09-18 14:14:35 +0100626 fault_status = kvm_vcpu_trap_get_fault(vcpu);
Christoffer Dall94f8e642013-01-20 18:28:12 -0500627 if (fault_status != FSC_FAULT && fault_status != FSC_PERM) {
Marc Zyngier52d1dba2012-10-15 10:33:38 +0100628 kvm_err("Unsupported fault status: EC=%#x DFCS=%#lx\n",
629 kvm_vcpu_trap_get_class(vcpu), fault_status);
Christoffer Dall94f8e642013-01-20 18:28:12 -0500630 return -EFAULT;
631 }
632
633 idx = srcu_read_lock(&vcpu->kvm->srcu);
634
635 gfn = fault_ipa >> PAGE_SHIFT;
636 if (!kvm_is_visible_gfn(vcpu->kvm, gfn)) {
637 if (is_iabt) {
638 /* Prefetch Abort on I/O address */
Marc Zyngier7393b592012-09-17 19:27:09 +0100639 kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
Christoffer Dall94f8e642013-01-20 18:28:12 -0500640 ret = 1;
641 goto out_unlock;
642 }
643
644 if (fault_status != FSC_FAULT) {
645 kvm_err("Unsupported fault status on io memory: %#lx\n",
646 fault_status);
647 ret = -EFAULT;
648 goto out_unlock;
649 }
650
Marc Zyngiercfe39502012-12-12 14:42:09 +0000651 /*
652 * The IPA is reported as [MAX:12], so we need to
653 * complement it with the bottom 12 bits from the
654 * faulting VA. This is always 12 bits, irrespective
655 * of the page size.
656 */
657 fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
Christoffer Dall45e96ea2013-01-20 18:43:58 -0500658 ret = io_mem_abort(vcpu, run, fault_ipa);
Christoffer Dall94f8e642013-01-20 18:28:12 -0500659 goto out_unlock;
660 }
661
662 memslot = gfn_to_memslot(vcpu->kvm, gfn);
Christoffer Dall94f8e642013-01-20 18:28:12 -0500663
664 ret = user_mem_abort(vcpu, fault_ipa, gfn, memslot, fault_status);
665 if (ret == 0)
666 ret = 1;
667out_unlock:
668 srcu_read_unlock(&vcpu->kvm->srcu, idx);
669 return ret;
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500670}
671
Christoffer Dalld5d81842013-01-20 18:28:07 -0500672static void handle_hva_to_gpa(struct kvm *kvm,
673 unsigned long start,
674 unsigned long end,
675 void (*handler)(struct kvm *kvm,
676 gpa_t gpa, void *data),
677 void *data)
678{
679 struct kvm_memslots *slots;
680 struct kvm_memory_slot *memslot;
681
682 slots = kvm_memslots(kvm);
683
684 /* we only care about the pages that the guest sees */
685 kvm_for_each_memslot(memslot, slots) {
686 unsigned long hva_start, hva_end;
687 gfn_t gfn, gfn_end;
688
689 hva_start = max(start, memslot->userspace_addr);
690 hva_end = min(end, memslot->userspace_addr +
691 (memslot->npages << PAGE_SHIFT));
692 if (hva_start >= hva_end)
693 continue;
694
695 /*
696 * {gfn(page) | page intersects with [hva_start, hva_end)} =
697 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
698 */
699 gfn = hva_to_gfn_memslot(hva_start, memslot);
700 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
701
702 for (; gfn < gfn_end; ++gfn) {
703 gpa_t gpa = gfn << PAGE_SHIFT;
704 handler(kvm, gpa, data);
705 }
706 }
707}
708
709static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
710{
711 unmap_stage2_range(kvm, gpa, PAGE_SIZE);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500712}
713
714int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
715{
716 unsigned long end = hva + PAGE_SIZE;
717
718 if (!kvm->arch.pgd)
719 return 0;
720
721 trace_kvm_unmap_hva(hva);
722 handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL);
723 return 0;
724}
725
726int kvm_unmap_hva_range(struct kvm *kvm,
727 unsigned long start, unsigned long end)
728{
729 if (!kvm->arch.pgd)
730 return 0;
731
732 trace_kvm_unmap_hva_range(start, end);
733 handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
734 return 0;
735}
736
737static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data)
738{
739 pte_t *pte = (pte_t *)data;
740
741 stage2_set_pte(kvm, NULL, gpa, pte, false);
742}
743
744
745void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
746{
747 unsigned long end = hva + PAGE_SIZE;
748 pte_t stage2_pte;
749
750 if (!kvm->arch.pgd)
751 return;
752
753 trace_kvm_set_spte_hva(hva);
754 stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2);
755 handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
756}
757
758void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
759{
760 mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
761}
762
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500763phys_addr_t kvm_mmu_get_httbr(void)
764{
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500765 return virt_to_phys(hyp_pgd);
766}
767
Marc Zyngier5a677ce2013-04-12 19:12:06 +0100768phys_addr_t kvm_mmu_get_boot_httbr(void)
769{
770 return virt_to_phys(boot_hyp_pgd);
771}
772
773phys_addr_t kvm_get_idmap_vector(void)
774{
775 return hyp_idmap_vector;
776}
777
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500778int kvm_mmu_init(void)
779{
Marc Zyngier2fb41052013-04-12 19:12:03 +0100780 int err;
781
Marc Zyngier5a677ce2013-04-12 19:12:06 +0100782 hyp_idmap_start = virt_to_phys(__hyp_idmap_text_start);
783 hyp_idmap_end = virt_to_phys(__hyp_idmap_text_end);
784 hyp_idmap_vector = virt_to_phys(__kvm_hyp_init);
785
786 if ((hyp_idmap_start ^ hyp_idmap_end) & PAGE_MASK) {
787 /*
788 * Our init code is crossing a page boundary. Allocate
789 * a bounce page, copy the code over and use that.
790 */
791 size_t len = __hyp_idmap_text_end - __hyp_idmap_text_start;
792 phys_addr_t phys_base;
793
794 init_bounce_page = kmalloc(PAGE_SIZE, GFP_KERNEL);
795 if (!init_bounce_page) {
796 kvm_err("Couldn't allocate HYP init bounce page\n");
797 err = -ENOMEM;
798 goto out;
799 }
800
801 memcpy(init_bounce_page, __hyp_idmap_text_start, len);
802 /*
803 * Warning: the code we just copied to the bounce page
804 * must be flushed to the point of coherency.
805 * Otherwise, the data may be sitting in L2, and HYP
806 * mode won't be able to observe it as it runs with
807 * caches off at that point.
808 */
809 kvm_flush_dcache_to_poc(init_bounce_page, len);
810
811 phys_base = virt_to_phys(init_bounce_page);
812 hyp_idmap_vector += phys_base - hyp_idmap_start;
813 hyp_idmap_start = phys_base;
814 hyp_idmap_end = phys_base + len;
815
816 kvm_info("Using HYP init bounce page @%lx\n",
817 (unsigned long)phys_base);
818 }
819
Marc Zyngier2fb41052013-04-12 19:12:03 +0100820 hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
Marc Zyngier5a677ce2013-04-12 19:12:06 +0100821 boot_hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
822 if (!hyp_pgd || !boot_hyp_pgd) {
Christoffer Dalld5d81842013-01-20 18:28:07 -0500823 kvm_err("Hyp mode PGD not allocated\n");
Marc Zyngier2fb41052013-04-12 19:12:03 +0100824 err = -ENOMEM;
825 goto out;
826 }
827
828 /* Create the idmap in the boot page tables */
829 err = __create_hyp_mappings(boot_hyp_pgd,
830 hyp_idmap_start, hyp_idmap_end,
831 __phys_to_pfn(hyp_idmap_start),
832 PAGE_HYP);
833
834 if (err) {
835 kvm_err("Failed to idmap %lx-%lx\n",
836 hyp_idmap_start, hyp_idmap_end);
837 goto out;
Christoffer Dalld5d81842013-01-20 18:28:07 -0500838 }
839
Marc Zyngier5a677ce2013-04-12 19:12:06 +0100840 /* Map the very same page at the trampoline VA */
841 err = __create_hyp_mappings(boot_hyp_pgd,
842 TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE,
843 __phys_to_pfn(hyp_idmap_start),
844 PAGE_HYP);
845 if (err) {
846 kvm_err("Failed to map trampoline @%lx into boot HYP pgd\n",
847 TRAMPOLINE_VA);
848 goto out;
849 }
850
851 /* Map the same page again into the runtime page tables */
852 err = __create_hyp_mappings(hyp_pgd,
853 TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE,
854 __phys_to_pfn(hyp_idmap_start),
855 PAGE_HYP);
856 if (err) {
857 kvm_err("Failed to map trampoline @%lx into runtime HYP pgd\n",
858 TRAMPOLINE_VA);
859 goto out;
860 }
861
Christoffer Dalld5d81842013-01-20 18:28:07 -0500862 return 0;
Marc Zyngier2fb41052013-04-12 19:12:03 +0100863out:
Marc Zyngier4f728272013-04-12 19:12:05 +0100864 free_hyp_pgds();
Marc Zyngier2fb41052013-04-12 19:12:03 +0100865 return err;
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500866}