blob: ead6b16eeb095de5292ad9e868746b80a935b2c0 [file] [log] [blame]
Christoffer Dall749cf76c2013-01-20 18:28:06 -05001/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
Christoffer Dall342cd0a2013-01-20 18:28:06 -050018
19#include <linux/mman.h>
20#include <linux/kvm_host.h>
21#include <linux/io.h>
Christoffer Dall45e96ea2013-01-20 18:43:58 -050022#include <trace/events/kvm.h>
Christoffer Dall342cd0a2013-01-20 18:28:06 -050023#include <asm/idmap.h>
24#include <asm/pgalloc.h>
Christoffer Dall94f8e642013-01-20 18:28:12 -050025#include <asm/cacheflush.h>
Christoffer Dall342cd0a2013-01-20 18:28:06 -050026#include <asm/kvm_arm.h>
27#include <asm/kvm_mmu.h>
Christoffer Dall45e96ea2013-01-20 18:43:58 -050028#include <asm/kvm_mmio.h>
Christoffer Dalld5d81842013-01-20 18:28:07 -050029#include <asm/kvm_asm.h>
Christoffer Dall94f8e642013-01-20 18:28:12 -050030#include <asm/kvm_emulate.h>
Christoffer Dalld5d81842013-01-20 18:28:07 -050031
32#include "trace.h"
Christoffer Dall342cd0a2013-01-20 18:28:06 -050033
34extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
35
36static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
37
Christoffer Dalld5d81842013-01-20 18:28:07 -050038static void kvm_tlb_flush_vmid(struct kvm *kvm)
39{
40 kvm_call_hyp(__kvm_tlb_flush_vmid, kvm);
41}
42
Christoffer Dalld5d81842013-01-20 18:28:07 -050043static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
44 int min, int max)
45{
46 void *page;
47
48 BUG_ON(max > KVM_NR_MEM_OBJS);
49 if (cache->nobjs >= min)
50 return 0;
51 while (cache->nobjs < max) {
52 page = (void *)__get_free_page(PGALLOC_GFP);
53 if (!page)
54 return -ENOMEM;
55 cache->objects[cache->nobjs++] = page;
56 }
57 return 0;
58}
59
60static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
61{
62 while (mc->nobjs)
63 free_page((unsigned long)mc->objects[--mc->nobjs]);
64}
65
66static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
67{
68 void *p;
69
70 BUG_ON(!mc || !mc->nobjs);
71 p = mc->objects[--mc->nobjs];
72 return p;
73}
74
Christoffer Dall342cd0a2013-01-20 18:28:06 -050075static void free_ptes(pmd_t *pmd, unsigned long addr)
76{
77 pte_t *pte;
78 unsigned int i;
79
80 for (i = 0; i < PTRS_PER_PMD; i++, addr += PMD_SIZE) {
81 if (!pmd_none(*pmd) && pmd_table(*pmd)) {
82 pte = pte_offset_kernel(pmd, addr);
83 pte_free_kernel(NULL, pte);
84 }
85 pmd++;
86 }
87}
88
89/**
90 * free_hyp_pmds - free a Hyp-mode level-2 tables and child level-3 tables
91 *
92 * Assumes this is a page table used strictly in Hyp-mode and therefore contains
93 * only mappings in the kernel memory area, which is above PAGE_OFFSET.
94 */
95void free_hyp_pmds(void)
96{
97 pgd_t *pgd;
98 pud_t *pud;
99 pmd_t *pmd;
100 unsigned long addr;
101
102 mutex_lock(&kvm_hyp_pgd_mutex);
103 for (addr = PAGE_OFFSET; addr != 0; addr += PGDIR_SIZE) {
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100104 unsigned long hyp_addr = KERN_TO_HYP(addr);
105 pgd = hyp_pgd + pgd_index(hyp_addr);
106 pud = pud_offset(pgd, hyp_addr);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500107
108 if (pud_none(*pud))
109 continue;
110 BUG_ON(pud_bad(*pud));
111
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100112 pmd = pmd_offset(pud, hyp_addr);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500113 free_ptes(pmd, addr);
114 pmd_free(NULL, pmd);
115 pud_clear(pud);
116 }
117 mutex_unlock(&kvm_hyp_pgd_mutex);
118}
119
120static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
121 unsigned long end)
122{
123 pte_t *pte;
124 unsigned long addr;
125 struct page *page;
126
127 for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100128 unsigned long hyp_addr = KERN_TO_HYP(addr);
129
130 pte = pte_offset_kernel(pmd, hyp_addr);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500131 BUG_ON(!virt_addr_valid(addr));
132 page = virt_to_page(addr);
133 kvm_set_pte(pte, mk_pte(page, PAGE_HYP));
134 }
135}
136
137static void create_hyp_io_pte_mappings(pmd_t *pmd, unsigned long start,
138 unsigned long end,
139 unsigned long *pfn_base)
140{
141 pte_t *pte;
142 unsigned long addr;
143
144 for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100145 unsigned long hyp_addr = KERN_TO_HYP(addr);
146
147 pte = pte_offset_kernel(pmd, hyp_addr);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500148 BUG_ON(pfn_valid(*pfn_base));
149 kvm_set_pte(pte, pfn_pte(*pfn_base, PAGE_HYP_DEVICE));
150 (*pfn_base)++;
151 }
152}
153
154static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
155 unsigned long end, unsigned long *pfn_base)
156{
157 pmd_t *pmd;
158 pte_t *pte;
159 unsigned long addr, next;
160
161 for (addr = start; addr < end; addr = next) {
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100162 unsigned long hyp_addr = KERN_TO_HYP(addr);
163 pmd = pmd_offset(pud, hyp_addr);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500164
165 BUG_ON(pmd_sect(*pmd));
166
167 if (pmd_none(*pmd)) {
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100168 pte = pte_alloc_one_kernel(NULL, hyp_addr);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500169 if (!pte) {
170 kvm_err("Cannot allocate Hyp pte\n");
171 return -ENOMEM;
172 }
173 pmd_populate_kernel(NULL, pmd, pte);
174 }
175
176 next = pmd_addr_end(addr, end);
177
178 /*
179 * If pfn_base is NULL, we map kernel pages into HYP with the
180 * virtual address. Otherwise, this is considered an I/O
181 * mapping and we map the physical region starting at
182 * *pfn_base to [start, end[.
183 */
184 if (!pfn_base)
185 create_hyp_pte_mappings(pmd, addr, next);
186 else
187 create_hyp_io_pte_mappings(pmd, addr, next, pfn_base);
188 }
189
190 return 0;
191}
192
193static int __create_hyp_mappings(void *from, void *to, unsigned long *pfn_base)
194{
195 unsigned long start = (unsigned long)from;
196 unsigned long end = (unsigned long)to;
197 pgd_t *pgd;
198 pud_t *pud;
199 pmd_t *pmd;
200 unsigned long addr, next;
201 int err = 0;
202
203 BUG_ON(start > end);
204 if (start < PAGE_OFFSET)
205 return -EINVAL;
206
207 mutex_lock(&kvm_hyp_pgd_mutex);
208 for (addr = start; addr < end; addr = next) {
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100209 unsigned long hyp_addr = KERN_TO_HYP(addr);
210 pgd = hyp_pgd + pgd_index(hyp_addr);
211 pud = pud_offset(pgd, hyp_addr);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500212
213 if (pud_none_or_clear_bad(pud)) {
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100214 pmd = pmd_alloc_one(NULL, hyp_addr);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500215 if (!pmd) {
216 kvm_err("Cannot allocate Hyp pmd\n");
217 err = -ENOMEM;
218 goto out;
219 }
220 pud_populate(NULL, pud, pmd);
221 }
222
223 next = pgd_addr_end(addr, end);
224 err = create_hyp_pmd_mappings(pud, addr, next, pfn_base);
225 if (err)
226 goto out;
227 }
228out:
229 mutex_unlock(&kvm_hyp_pgd_mutex);
230 return err;
231}
232
233/**
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100234 * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500235 * @from: The virtual kernel start address of the range
236 * @to: The virtual kernel end address of the range (exclusive)
237 *
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100238 * The same virtual address as the kernel virtual address is also used
239 * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
240 * physical pages.
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500241 *
242 * Note: Wrapping around zero in the "to" address is not supported.
243 */
244int create_hyp_mappings(void *from, void *to)
245{
246 return __create_hyp_mappings(from, to, NULL);
247}
248
249/**
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100250 * create_hyp_io_mappings - duplicate a kernel IO mapping into Hyp mode
251 * @from: The kernel start VA of the range
252 * @to: The kernel end VA of the range (exclusive)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500253 * @addr: The physical start address which gets mapped
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100254 *
255 * The resulting HYP VA is the same as the kernel VA, modulo
256 * HYP_PAGE_OFFSET.
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500257 */
258int create_hyp_io_mappings(void *from, void *to, phys_addr_t addr)
259{
260 unsigned long pfn = __phys_to_pfn(addr);
261 return __create_hyp_mappings(from, to, &pfn);
262}
263
Christoffer Dalld5d81842013-01-20 18:28:07 -0500264/**
265 * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
266 * @kvm: The KVM struct pointer for the VM.
267 *
268 * Allocates the 1st level table only of size defined by S2_PGD_ORDER (can
269 * support either full 40-bit input addresses or limited to 32-bit input
270 * addresses). Clears the allocated pages.
271 *
272 * Note we don't need locking here as this is only called when the VM is
273 * created, which can only be done once.
274 */
275int kvm_alloc_stage2_pgd(struct kvm *kvm)
276{
277 pgd_t *pgd;
278
279 if (kvm->arch.pgd != NULL) {
280 kvm_err("kvm_arch already initialized?\n");
281 return -EINVAL;
282 }
283
284 pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, S2_PGD_ORDER);
285 if (!pgd)
286 return -ENOMEM;
287
288 /* stage-2 pgd must be aligned to its size */
289 VM_BUG_ON((unsigned long)pgd & (S2_PGD_SIZE - 1));
290
291 memset(pgd, 0, PTRS_PER_S2_PGD * sizeof(pgd_t));
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100292 kvm_clean_pgd(pgd);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500293 kvm->arch.pgd = pgd;
294
295 return 0;
296}
297
298static void clear_pud_entry(pud_t *pud)
299{
300 pmd_t *pmd_table = pmd_offset(pud, 0);
301 pud_clear(pud);
302 pmd_free(NULL, pmd_table);
303 put_page(virt_to_page(pud));
304}
305
306static void clear_pmd_entry(pmd_t *pmd)
307{
308 pte_t *pte_table = pte_offset_kernel(pmd, 0);
309 pmd_clear(pmd);
310 pte_free_kernel(NULL, pte_table);
311 put_page(virt_to_page(pmd));
312}
313
314static bool pmd_empty(pmd_t *pmd)
315{
316 struct page *pmd_page = virt_to_page(pmd);
317 return page_count(pmd_page) == 1;
318}
319
320static void clear_pte_entry(pte_t *pte)
321{
322 if (pte_present(*pte)) {
323 kvm_set_pte(pte, __pte(0));
324 put_page(virt_to_page(pte));
325 }
326}
327
328static bool pte_empty(pte_t *pte)
329{
330 struct page *pte_page = virt_to_page(pte);
331 return page_count(pte_page) == 1;
332}
333
334/**
335 * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
336 * @kvm: The VM pointer
337 * @start: The intermediate physical base address of the range to unmap
338 * @size: The size of the area to unmap
339 *
340 * Clear a range of stage-2 mappings, lowering the various ref-counts. Must
341 * be called while holding mmu_lock (unless for freeing the stage2 pgd before
342 * destroying the VM), otherwise another faulting VCPU may come in and mess
343 * with things behind our backs.
344 */
345static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
346{
347 pgd_t *pgd;
348 pud_t *pud;
349 pmd_t *pmd;
350 pte_t *pte;
351 phys_addr_t addr = start, end = start + size;
352 u64 range;
353
354 while (addr < end) {
355 pgd = kvm->arch.pgd + pgd_index(addr);
356 pud = pud_offset(pgd, addr);
357 if (pud_none(*pud)) {
358 addr += PUD_SIZE;
359 continue;
360 }
361
362 pmd = pmd_offset(pud, addr);
363 if (pmd_none(*pmd)) {
364 addr += PMD_SIZE;
365 continue;
366 }
367
368 pte = pte_offset_kernel(pmd, addr);
369 clear_pte_entry(pte);
370 range = PAGE_SIZE;
371
372 /* If we emptied the pte, walk back up the ladder */
373 if (pte_empty(pte)) {
374 clear_pmd_entry(pmd);
375 range = PMD_SIZE;
376 if (pmd_empty(pmd)) {
377 clear_pud_entry(pud);
378 range = PUD_SIZE;
379 }
380 }
381
382 addr += range;
383 }
384}
385
386/**
387 * kvm_free_stage2_pgd - free all stage-2 tables
388 * @kvm: The KVM struct pointer for the VM.
389 *
390 * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
391 * underlying level-2 and level-3 tables before freeing the actual level-1 table
392 * and setting the struct pointer to NULL.
393 *
394 * Note we don't need locking here as this is only called when the VM is
395 * destroyed, which can only be done once.
396 */
397void kvm_free_stage2_pgd(struct kvm *kvm)
398{
399 if (kvm->arch.pgd == NULL)
400 return;
401
402 unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
403 free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER);
404 kvm->arch.pgd = NULL;
405}
406
407
408static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
409 phys_addr_t addr, const pte_t *new_pte, bool iomap)
410{
411 pgd_t *pgd;
412 pud_t *pud;
413 pmd_t *pmd;
414 pte_t *pte, old_pte;
415
416 /* Create 2nd stage page table mapping - Level 1 */
417 pgd = kvm->arch.pgd + pgd_index(addr);
418 pud = pud_offset(pgd, addr);
419 if (pud_none(*pud)) {
420 if (!cache)
421 return 0; /* ignore calls from kvm_set_spte_hva */
422 pmd = mmu_memory_cache_alloc(cache);
423 pud_populate(NULL, pud, pmd);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500424 get_page(virt_to_page(pud));
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100425 }
426
427 pmd = pmd_offset(pud, addr);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500428
429 /* Create 2nd stage page table mapping - Level 2 */
430 if (pmd_none(*pmd)) {
431 if (!cache)
432 return 0; /* ignore calls from kvm_set_spte_hva */
433 pte = mmu_memory_cache_alloc(cache);
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100434 kvm_clean_pte(pte);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500435 pmd_populate_kernel(NULL, pmd, pte);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500436 get_page(virt_to_page(pmd));
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100437 }
438
439 pte = pte_offset_kernel(pmd, addr);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500440
441 if (iomap && pte_present(*pte))
442 return -EFAULT;
443
444 /* Create 2nd stage page table mapping - Level 3 */
445 old_pte = *pte;
446 kvm_set_pte(pte, *new_pte);
447 if (pte_present(old_pte))
448 kvm_tlb_flush_vmid(kvm);
449 else
450 get_page(virt_to_page(pte));
451
452 return 0;
453}
454
455/**
456 * kvm_phys_addr_ioremap - map a device range to guest IPA
457 *
458 * @kvm: The KVM pointer
459 * @guest_ipa: The IPA at which to insert the mapping
460 * @pa: The physical address of the device
461 * @size: The size of the mapping
462 */
463int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
464 phys_addr_t pa, unsigned long size)
465{
466 phys_addr_t addr, end;
467 int ret = 0;
468 unsigned long pfn;
469 struct kvm_mmu_memory_cache cache = { 0, };
470
471 end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK;
472 pfn = __phys_to_pfn(pa);
473
474 for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100475 pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE);
476 kvm_set_s2pte_writable(&pte);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500477
478 ret = mmu_topup_memory_cache(&cache, 2, 2);
479 if (ret)
480 goto out;
481 spin_lock(&kvm->mmu_lock);
482 ret = stage2_set_pte(kvm, &cache, addr, &pte, true);
483 spin_unlock(&kvm->mmu_lock);
484 if (ret)
485 goto out;
486
487 pfn++;
488 }
489
490out:
491 mmu_free_memory_cache(&cache);
492 return ret;
493}
494
Christoffer Dall94f8e642013-01-20 18:28:12 -0500495static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
496 gfn_t gfn, struct kvm_memory_slot *memslot,
497 unsigned long fault_status)
498{
499 pte_t new_pte;
500 pfn_t pfn;
501 int ret;
502 bool write_fault, writable;
503 unsigned long mmu_seq;
504 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
505
Marc Zyngier7393b592012-09-17 19:27:09 +0100506 write_fault = kvm_is_write_fault(kvm_vcpu_get_hsr(vcpu));
Christoffer Dall94f8e642013-01-20 18:28:12 -0500507 if (fault_status == FSC_PERM && !write_fault) {
508 kvm_err("Unexpected L2 read permission error\n");
509 return -EFAULT;
510 }
511
512 /* We need minimum second+third level pages */
513 ret = mmu_topup_memory_cache(memcache, 2, KVM_NR_MEM_OBJS);
514 if (ret)
515 return ret;
516
517 mmu_seq = vcpu->kvm->mmu_notifier_seq;
518 /*
519 * Ensure the read of mmu_notifier_seq happens before we call
520 * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
521 * the page we just got a reference to gets unmapped before we have a
522 * chance to grab the mmu_lock, which ensure that if the page gets
523 * unmapped afterwards, the call to kvm_unmap_hva will take it away
524 * from us again properly. This smp_rmb() interacts with the smp_wmb()
525 * in kvm_mmu_notifier_invalidate_<page|range_end>.
526 */
527 smp_rmb();
528
529 pfn = gfn_to_pfn_prot(vcpu->kvm, gfn, write_fault, &writable);
530 if (is_error_pfn(pfn))
531 return -EFAULT;
532
533 new_pte = pfn_pte(pfn, PAGE_S2);
534 coherent_icache_guest_page(vcpu->kvm, gfn);
535
536 spin_lock(&vcpu->kvm->mmu_lock);
537 if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
538 goto out_unlock;
539 if (writable) {
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100540 kvm_set_s2pte_writable(&new_pte);
Christoffer Dall94f8e642013-01-20 18:28:12 -0500541 kvm_set_pfn_dirty(pfn);
542 }
543 stage2_set_pte(vcpu->kvm, memcache, fault_ipa, &new_pte, false);
544
545out_unlock:
546 spin_unlock(&vcpu->kvm->mmu_lock);
547 kvm_release_pfn_clean(pfn);
548 return 0;
549}
550
551/**
552 * kvm_handle_guest_abort - handles all 2nd stage aborts
553 * @vcpu: the VCPU pointer
554 * @run: the kvm_run structure
555 *
556 * Any abort that gets to the host is almost guaranteed to be caused by a
557 * missing second stage translation table entry, which can mean that either the
558 * guest simply needs more memory and we must allocate an appropriate page or it
559 * can mean that the guest tried to access I/O memory, which is emulated by user
560 * space. The distinction is based on the IPA causing the fault and whether this
561 * memory region has been registered as standard RAM by user space.
562 */
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500563int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
564{
Christoffer Dall94f8e642013-01-20 18:28:12 -0500565 unsigned long fault_status;
566 phys_addr_t fault_ipa;
567 struct kvm_memory_slot *memslot;
568 bool is_iabt;
569 gfn_t gfn;
570 int ret, idx;
571
Marc Zyngier52d1dba2012-10-15 10:33:38 +0100572 is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
Marc Zyngier7393b592012-09-17 19:27:09 +0100573 fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
Christoffer Dall94f8e642013-01-20 18:28:12 -0500574
Marc Zyngier7393b592012-09-17 19:27:09 +0100575 trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu),
576 kvm_vcpu_get_hfar(vcpu), fault_ipa);
Christoffer Dall94f8e642013-01-20 18:28:12 -0500577
578 /* Check the stage-2 fault is trans. fault or write fault */
Marc Zyngier1cc287d2012-09-18 14:14:35 +0100579 fault_status = kvm_vcpu_trap_get_fault(vcpu);
Christoffer Dall94f8e642013-01-20 18:28:12 -0500580 if (fault_status != FSC_FAULT && fault_status != FSC_PERM) {
Marc Zyngier52d1dba2012-10-15 10:33:38 +0100581 kvm_err("Unsupported fault status: EC=%#x DFCS=%#lx\n",
582 kvm_vcpu_trap_get_class(vcpu), fault_status);
Christoffer Dall94f8e642013-01-20 18:28:12 -0500583 return -EFAULT;
584 }
585
586 idx = srcu_read_lock(&vcpu->kvm->srcu);
587
588 gfn = fault_ipa >> PAGE_SHIFT;
589 if (!kvm_is_visible_gfn(vcpu->kvm, gfn)) {
590 if (is_iabt) {
591 /* Prefetch Abort on I/O address */
Marc Zyngier7393b592012-09-17 19:27:09 +0100592 kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
Christoffer Dall94f8e642013-01-20 18:28:12 -0500593 ret = 1;
594 goto out_unlock;
595 }
596
597 if (fault_status != FSC_FAULT) {
598 kvm_err("Unsupported fault status on io memory: %#lx\n",
599 fault_status);
600 ret = -EFAULT;
601 goto out_unlock;
602 }
603
Christoffer Dall45e96ea2013-01-20 18:43:58 -0500604 /* Adjust page offset */
Marc Zyngier7393b592012-09-17 19:27:09 +0100605 fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ~PAGE_MASK;
Christoffer Dall45e96ea2013-01-20 18:43:58 -0500606 ret = io_mem_abort(vcpu, run, fault_ipa);
Christoffer Dall94f8e642013-01-20 18:28:12 -0500607 goto out_unlock;
608 }
609
610 memslot = gfn_to_memslot(vcpu->kvm, gfn);
Christoffer Dall94f8e642013-01-20 18:28:12 -0500611
612 ret = user_mem_abort(vcpu, fault_ipa, gfn, memslot, fault_status);
613 if (ret == 0)
614 ret = 1;
615out_unlock:
616 srcu_read_unlock(&vcpu->kvm->srcu, idx);
617 return ret;
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500618}
619
Christoffer Dalld5d81842013-01-20 18:28:07 -0500620static void handle_hva_to_gpa(struct kvm *kvm,
621 unsigned long start,
622 unsigned long end,
623 void (*handler)(struct kvm *kvm,
624 gpa_t gpa, void *data),
625 void *data)
626{
627 struct kvm_memslots *slots;
628 struct kvm_memory_slot *memslot;
629
630 slots = kvm_memslots(kvm);
631
632 /* we only care about the pages that the guest sees */
633 kvm_for_each_memslot(memslot, slots) {
634 unsigned long hva_start, hva_end;
635 gfn_t gfn, gfn_end;
636
637 hva_start = max(start, memslot->userspace_addr);
638 hva_end = min(end, memslot->userspace_addr +
639 (memslot->npages << PAGE_SHIFT));
640 if (hva_start >= hva_end)
641 continue;
642
643 /*
644 * {gfn(page) | page intersects with [hva_start, hva_end)} =
645 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
646 */
647 gfn = hva_to_gfn_memslot(hva_start, memslot);
648 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
649
650 for (; gfn < gfn_end; ++gfn) {
651 gpa_t gpa = gfn << PAGE_SHIFT;
652 handler(kvm, gpa, data);
653 }
654 }
655}
656
657static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
658{
659 unmap_stage2_range(kvm, gpa, PAGE_SIZE);
660 kvm_tlb_flush_vmid(kvm);
661}
662
663int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
664{
665 unsigned long end = hva + PAGE_SIZE;
666
667 if (!kvm->arch.pgd)
668 return 0;
669
670 trace_kvm_unmap_hva(hva);
671 handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL);
672 return 0;
673}
674
675int kvm_unmap_hva_range(struct kvm *kvm,
676 unsigned long start, unsigned long end)
677{
678 if (!kvm->arch.pgd)
679 return 0;
680
681 trace_kvm_unmap_hva_range(start, end);
682 handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
683 return 0;
684}
685
686static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data)
687{
688 pte_t *pte = (pte_t *)data;
689
690 stage2_set_pte(kvm, NULL, gpa, pte, false);
691}
692
693
694void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
695{
696 unsigned long end = hva + PAGE_SIZE;
697 pte_t stage2_pte;
698
699 if (!kvm->arch.pgd)
700 return;
701
702 trace_kvm_set_spte_hva(hva);
703 stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2);
704 handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
705}
706
707void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
708{
709 mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
710}
711
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500712phys_addr_t kvm_mmu_get_httbr(void)
713{
714 VM_BUG_ON(!virt_addr_valid(hyp_pgd));
715 return virt_to_phys(hyp_pgd);
716}
717
718int kvm_mmu_init(void)
719{
Christoffer Dalld5d81842013-01-20 18:28:07 -0500720 if (!hyp_pgd) {
721 kvm_err("Hyp mode PGD not allocated\n");
722 return -ENOMEM;
723 }
724
725 return 0;
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500726}
727
728/**
729 * kvm_clear_idmap - remove all idmaps from the hyp pgd
730 *
731 * Free the underlying pmds for all pgds in range and clear the pgds (but
732 * don't free them) afterwards.
733 */
734void kvm_clear_hyp_idmap(void)
735{
736 unsigned long addr, end;
737 unsigned long next;
738 pgd_t *pgd = hyp_pgd;
739 pud_t *pud;
740 pmd_t *pmd;
741
742 addr = virt_to_phys(__hyp_idmap_text_start);
743 end = virt_to_phys(__hyp_idmap_text_end);
744
745 pgd += pgd_index(addr);
746 do {
747 next = pgd_addr_end(addr, end);
748 if (pgd_none_or_clear_bad(pgd))
749 continue;
750 pud = pud_offset(pgd, addr);
751 pmd = pmd_offset(pud, addr);
752
753 pud_clear(pud);
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100754 kvm_clean_pmd_entry(pmd);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500755 pmd_free(NULL, (pmd_t *)((unsigned long)pmd & PAGE_MASK));
756 } while (pgd++, addr = next, addr < end);
757}