blob: 3756dd3e85c22bd65af0e0e027da9309e5d615fe [file] [log] [blame]
Christoffer Dall749cf76c2013-01-20 18:28:06 -05001/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
Christoffer Dall342cd0a2013-01-20 18:28:06 -050018
19#include <linux/mman.h>
20#include <linux/kvm_host.h>
21#include <linux/io.h>
Christoffer Dallad361f02012-11-01 17:14:45 +010022#include <linux/hugetlb.h>
Christoffer Dall45e96ea2013-01-20 18:43:58 -050023#include <trace/events/kvm.h>
Christoffer Dall342cd0a2013-01-20 18:28:06 -050024#include <asm/pgalloc.h>
Christoffer Dall94f8e642013-01-20 18:28:12 -050025#include <asm/cacheflush.h>
Christoffer Dall342cd0a2013-01-20 18:28:06 -050026#include <asm/kvm_arm.h>
27#include <asm/kvm_mmu.h>
Christoffer Dall45e96ea2013-01-20 18:43:58 -050028#include <asm/kvm_mmio.h>
Christoffer Dalld5d81842013-01-20 18:28:07 -050029#include <asm/kvm_asm.h>
Christoffer Dall94f8e642013-01-20 18:28:12 -050030#include <asm/kvm_emulate.h>
Christoffer Dalld5d81842013-01-20 18:28:07 -050031
32#include "trace.h"
Christoffer Dall342cd0a2013-01-20 18:28:06 -050033
34extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
35
Marc Zyngier5a677ce2013-04-12 19:12:06 +010036static pgd_t *boot_hyp_pgd;
Marc Zyngier2fb41052013-04-12 19:12:03 +010037static pgd_t *hyp_pgd;
Christoffer Dall342cd0a2013-01-20 18:28:06 -050038static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
39
Marc Zyngier5a677ce2013-04-12 19:12:06 +010040static void *init_bounce_page;
41static unsigned long hyp_idmap_start;
42static unsigned long hyp_idmap_end;
43static phys_addr_t hyp_idmap_vector;
44
Christoffer Dall38f791a2014-10-10 12:14:28 +020045#define hyp_pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t))
Mark Salter5d4e08c2014-03-28 14:25:19 +000046
Christoffer Dall9b5fdb92013-10-02 15:32:01 -070047#define kvm_pmd_huge(_x) (pmd_huge(_x) || pmd_trans_huge(_x))
Christoffer Dallad361f02012-11-01 17:14:45 +010048
Marc Zyngier48762762013-01-28 15:27:00 +000049static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
Christoffer Dalld5d81842013-01-20 18:28:07 -050050{
Marc Zyngierd4cb9df52013-05-14 12:11:34 +010051 /*
52 * This function also gets called when dealing with HYP page
53 * tables. As HYP doesn't have an associated struct kvm (and
54 * the HYP page tables are fairly static), we don't do
55 * anything there.
56 */
57 if (kvm)
58 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
Christoffer Dalld5d81842013-01-20 18:28:07 -050059}
60
Christoffer Dalld5d81842013-01-20 18:28:07 -050061static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
62 int min, int max)
63{
64 void *page;
65
66 BUG_ON(max > KVM_NR_MEM_OBJS);
67 if (cache->nobjs >= min)
68 return 0;
69 while (cache->nobjs < max) {
70 page = (void *)__get_free_page(PGALLOC_GFP);
71 if (!page)
72 return -ENOMEM;
73 cache->objects[cache->nobjs++] = page;
74 }
75 return 0;
76}
77
78static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
79{
80 while (mc->nobjs)
81 free_page((unsigned long)mc->objects[--mc->nobjs]);
82}
83
84static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
85{
86 void *p;
87
88 BUG_ON(!mc || !mc->nobjs);
89 p = mc->objects[--mc->nobjs];
90 return p;
91}
92
Christoffer Dall4f853a72014-05-09 23:31:31 +020093static void clear_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr)
Marc Zyngier979acd52013-08-06 13:05:48 +010094{
Christoffer Dall4f853a72014-05-09 23:31:31 +020095 pud_t *pud_table __maybe_unused = pud_offset(pgd, 0);
96 pgd_clear(pgd);
97 kvm_tlb_flush_vmid_ipa(kvm, addr);
98 pud_free(NULL, pud_table);
99 put_page(virt_to_page(pgd));
Marc Zyngier979acd52013-08-06 13:05:48 +0100100}
101
Marc Zyngierd4cb9df52013-05-14 12:11:34 +0100102static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500103{
Christoffer Dall4f853a72014-05-09 23:31:31 +0200104 pmd_t *pmd_table = pmd_offset(pud, 0);
105 VM_BUG_ON(pud_huge(*pud));
106 pud_clear(pud);
107 kvm_tlb_flush_vmid_ipa(kvm, addr);
108 pmd_free(NULL, pmd_table);
Marc Zyngier4f728272013-04-12 19:12:05 +0100109 put_page(virt_to_page(pud));
110}
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500111
Marc Zyngierd4cb9df52013-05-14 12:11:34 +0100112static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
Marc Zyngier4f728272013-04-12 19:12:05 +0100113{
Christoffer Dall4f853a72014-05-09 23:31:31 +0200114 pte_t *pte_table = pte_offset_kernel(pmd, 0);
115 VM_BUG_ON(kvm_pmd_huge(*pmd));
116 pmd_clear(pmd);
117 kvm_tlb_flush_vmid_ipa(kvm, addr);
118 pte_free_kernel(NULL, pte_table);
Marc Zyngier4f728272013-04-12 19:12:05 +0100119 put_page(virt_to_page(pmd));
120}
121
Christoffer Dall4f853a72014-05-09 23:31:31 +0200122static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
123 phys_addr_t addr, phys_addr_t end)
Marc Zyngier4f728272013-04-12 19:12:05 +0100124{
Christoffer Dall4f853a72014-05-09 23:31:31 +0200125 phys_addr_t start_addr = addr;
126 pte_t *pte, *start_pte;
127
128 start_pte = pte = pte_offset_kernel(pmd, addr);
129 do {
130 if (!pte_none(*pte)) {
131 kvm_set_pte(pte, __pte(0));
132 put_page(virt_to_page(pte));
133 kvm_tlb_flush_vmid_ipa(kvm, addr);
134 }
135 } while (pte++, addr += PAGE_SIZE, addr != end);
136
Christoffer Dall38f791a2014-10-10 12:14:28 +0200137 if (kvm_pte_table_empty(kvm, start_pte))
Christoffer Dall4f853a72014-05-09 23:31:31 +0200138 clear_pmd_entry(kvm, pmd, start_addr);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500139}
140
Christoffer Dall4f853a72014-05-09 23:31:31 +0200141static void unmap_pmds(struct kvm *kvm, pud_t *pud,
142 phys_addr_t addr, phys_addr_t end)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500143{
Christoffer Dall4f853a72014-05-09 23:31:31 +0200144 phys_addr_t next, start_addr = addr;
145 pmd_t *pmd, *start_pmd;
Marc Zyngier000d3992013-03-05 02:43:17 +0000146
Christoffer Dall4f853a72014-05-09 23:31:31 +0200147 start_pmd = pmd = pmd_offset(pud, addr);
148 do {
149 next = kvm_pmd_addr_end(addr, end);
150 if (!pmd_none(*pmd)) {
151 if (kvm_pmd_huge(*pmd)) {
152 pmd_clear(pmd);
153 kvm_tlb_flush_vmid_ipa(kvm, addr);
154 put_page(virt_to_page(pmd));
155 } else {
156 unmap_ptes(kvm, pmd, addr, next);
Marc Zyngier4f728272013-04-12 19:12:05 +0100157 }
158 }
Christoffer Dall4f853a72014-05-09 23:31:31 +0200159 } while (pmd++, addr = next, addr != end);
Marc Zyngier4f728272013-04-12 19:12:05 +0100160
Christoffer Dall38f791a2014-10-10 12:14:28 +0200161 if (kvm_pmd_table_empty(kvm, start_pmd))
Christoffer Dall4f853a72014-05-09 23:31:31 +0200162 clear_pud_entry(kvm, pud, start_addr);
163}
164
165static void unmap_puds(struct kvm *kvm, pgd_t *pgd,
166 phys_addr_t addr, phys_addr_t end)
167{
168 phys_addr_t next, start_addr = addr;
169 pud_t *pud, *start_pud;
170
171 start_pud = pud = pud_offset(pgd, addr);
172 do {
173 next = kvm_pud_addr_end(addr, end);
174 if (!pud_none(*pud)) {
175 if (pud_huge(*pud)) {
176 pud_clear(pud);
177 kvm_tlb_flush_vmid_ipa(kvm, addr);
178 put_page(virt_to_page(pud));
179 } else {
180 unmap_pmds(kvm, pud, addr, next);
181 }
182 }
183 } while (pud++, addr = next, addr != end);
184
Christoffer Dall38f791a2014-10-10 12:14:28 +0200185 if (kvm_pud_table_empty(kvm, start_pud))
Christoffer Dall4f853a72014-05-09 23:31:31 +0200186 clear_pgd_entry(kvm, pgd, start_addr);
187}
188
189
190static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
191 phys_addr_t start, u64 size)
192{
193 pgd_t *pgd;
194 phys_addr_t addr = start, end = start + size;
195 phys_addr_t next;
196
197 pgd = pgdp + pgd_index(addr);
198 do {
199 next = kvm_pgd_addr_end(addr, end);
200 unmap_puds(kvm, pgd, addr, next);
201 } while (pgd++, addr = next, addr != end);
Marc Zyngier000d3992013-03-05 02:43:17 +0000202}
203
Marc Zyngier9d218a12014-01-15 12:50:23 +0000204static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
205 phys_addr_t addr, phys_addr_t end)
206{
207 pte_t *pte;
208
209 pte = pte_offset_kernel(pmd, addr);
210 do {
211 if (!pte_none(*pte)) {
212 hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
213 kvm_flush_dcache_to_poc((void*)hva, PAGE_SIZE);
214 }
215 } while (pte++, addr += PAGE_SIZE, addr != end);
216}
217
218static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
219 phys_addr_t addr, phys_addr_t end)
220{
221 pmd_t *pmd;
222 phys_addr_t next;
223
224 pmd = pmd_offset(pud, addr);
225 do {
226 next = kvm_pmd_addr_end(addr, end);
227 if (!pmd_none(*pmd)) {
228 if (kvm_pmd_huge(*pmd)) {
229 hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
230 kvm_flush_dcache_to_poc((void*)hva, PMD_SIZE);
231 } else {
232 stage2_flush_ptes(kvm, pmd, addr, next);
233 }
234 }
235 } while (pmd++, addr = next, addr != end);
236}
237
238static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
239 phys_addr_t addr, phys_addr_t end)
240{
241 pud_t *pud;
242 phys_addr_t next;
243
244 pud = pud_offset(pgd, addr);
245 do {
246 next = kvm_pud_addr_end(addr, end);
247 if (!pud_none(*pud)) {
248 if (pud_huge(*pud)) {
249 hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
250 kvm_flush_dcache_to_poc((void*)hva, PUD_SIZE);
251 } else {
252 stage2_flush_pmds(kvm, pud, addr, next);
253 }
254 }
255 } while (pud++, addr = next, addr != end);
256}
257
258static void stage2_flush_memslot(struct kvm *kvm,
259 struct kvm_memory_slot *memslot)
260{
261 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
262 phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
263 phys_addr_t next;
264 pgd_t *pgd;
265
266 pgd = kvm->arch.pgd + pgd_index(addr);
267 do {
268 next = kvm_pgd_addr_end(addr, end);
269 stage2_flush_puds(kvm, pgd, addr, next);
270 } while (pgd++, addr = next, addr != end);
271}
272
273/**
274 * stage2_flush_vm - Invalidate cache for pages mapped in stage 2
275 * @kvm: The struct kvm pointer
276 *
277 * Go through the stage 2 page tables and invalidate any cache lines
278 * backing memory already mapped to the VM.
279 */
280void stage2_flush_vm(struct kvm *kvm)
281{
282 struct kvm_memslots *slots;
283 struct kvm_memory_slot *memslot;
284 int idx;
285
286 idx = srcu_read_lock(&kvm->srcu);
287 spin_lock(&kvm->mmu_lock);
288
289 slots = kvm_memslots(kvm);
290 kvm_for_each_memslot(memslot, slots)
291 stage2_flush_memslot(kvm, memslot);
292
293 spin_unlock(&kvm->mmu_lock);
294 srcu_read_unlock(&kvm->srcu, idx);
295}
296
Marc Zyngier000d3992013-03-05 02:43:17 +0000297/**
Marc Zyngierd157f4a2013-04-12 19:12:07 +0100298 * free_boot_hyp_pgd - free HYP boot page tables
299 *
300 * Free the HYP boot page tables. The bounce page is also freed.
301 */
302void free_boot_hyp_pgd(void)
303{
304 mutex_lock(&kvm_hyp_pgd_mutex);
305
306 if (boot_hyp_pgd) {
Marc Zyngierd4cb9df52013-05-14 12:11:34 +0100307 unmap_range(NULL, boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
308 unmap_range(NULL, boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
Christoffer Dall38f791a2014-10-10 12:14:28 +0200309 free_pages((unsigned long)boot_hyp_pgd, hyp_pgd_order);
Marc Zyngierd157f4a2013-04-12 19:12:07 +0100310 boot_hyp_pgd = NULL;
311 }
312
313 if (hyp_pgd)
Marc Zyngierd4cb9df52013-05-14 12:11:34 +0100314 unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
Marc Zyngierd157f4a2013-04-12 19:12:07 +0100315
Mark Salter5d4e08c2014-03-28 14:25:19 +0000316 free_page((unsigned long)init_bounce_page);
Marc Zyngierd157f4a2013-04-12 19:12:07 +0100317 init_bounce_page = NULL;
318
319 mutex_unlock(&kvm_hyp_pgd_mutex);
320}
321
322/**
Marc Zyngier4f728272013-04-12 19:12:05 +0100323 * free_hyp_pgds - free Hyp-mode page tables
Marc Zyngier000d3992013-03-05 02:43:17 +0000324 *
Marc Zyngier5a677ce2013-04-12 19:12:06 +0100325 * Assumes hyp_pgd is a page table used strictly in Hyp-mode and
326 * therefore contains either mappings in the kernel memory area (above
327 * PAGE_OFFSET), or device mappings in the vmalloc range (from
328 * VMALLOC_START to VMALLOC_END).
329 *
330 * boot_hyp_pgd should only map two pages for the init code.
Marc Zyngier000d3992013-03-05 02:43:17 +0000331 */
Marc Zyngier4f728272013-04-12 19:12:05 +0100332void free_hyp_pgds(void)
Marc Zyngier000d3992013-03-05 02:43:17 +0000333{
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500334 unsigned long addr;
335
Marc Zyngierd157f4a2013-04-12 19:12:07 +0100336 free_boot_hyp_pgd();
Marc Zyngier4f728272013-04-12 19:12:05 +0100337
Marc Zyngierd157f4a2013-04-12 19:12:07 +0100338 mutex_lock(&kvm_hyp_pgd_mutex);
Marc Zyngier5a677ce2013-04-12 19:12:06 +0100339
Marc Zyngier4f728272013-04-12 19:12:05 +0100340 if (hyp_pgd) {
341 for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE)
Marc Zyngierd4cb9df52013-05-14 12:11:34 +0100342 unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
Marc Zyngier4f728272013-04-12 19:12:05 +0100343 for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
Marc Zyngierd4cb9df52013-05-14 12:11:34 +0100344 unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
345
Christoffer Dall38f791a2014-10-10 12:14:28 +0200346 free_pages((unsigned long)hyp_pgd, hyp_pgd_order);
Marc Zyngierd157f4a2013-04-12 19:12:07 +0100347 hyp_pgd = NULL;
Marc Zyngier4f728272013-04-12 19:12:05 +0100348 }
349
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500350 mutex_unlock(&kvm_hyp_pgd_mutex);
351}
352
353static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
Marc Zyngier6060df82013-04-12 19:12:01 +0100354 unsigned long end, unsigned long pfn,
355 pgprot_t prot)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500356{
357 pte_t *pte;
358 unsigned long addr;
359
Marc Zyngier3562c762013-04-12 19:12:02 +0100360 addr = start;
361 do {
Marc Zyngier6060df82013-04-12 19:12:01 +0100362 pte = pte_offset_kernel(pmd, addr);
363 kvm_set_pte(pte, pfn_pte(pfn, prot));
Marc Zyngier4f728272013-04-12 19:12:05 +0100364 get_page(virt_to_page(pte));
Marc Zyngier5a677ce2013-04-12 19:12:06 +0100365 kvm_flush_dcache_to_poc(pte, sizeof(*pte));
Marc Zyngier6060df82013-04-12 19:12:01 +0100366 pfn++;
Marc Zyngier3562c762013-04-12 19:12:02 +0100367 } while (addr += PAGE_SIZE, addr != end);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500368}
369
370static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
Marc Zyngier6060df82013-04-12 19:12:01 +0100371 unsigned long end, unsigned long pfn,
372 pgprot_t prot)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500373{
374 pmd_t *pmd;
375 pte_t *pte;
376 unsigned long addr, next;
377
Marc Zyngier3562c762013-04-12 19:12:02 +0100378 addr = start;
379 do {
Marc Zyngier6060df82013-04-12 19:12:01 +0100380 pmd = pmd_offset(pud, addr);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500381
382 BUG_ON(pmd_sect(*pmd));
383
384 if (pmd_none(*pmd)) {
Marc Zyngier6060df82013-04-12 19:12:01 +0100385 pte = pte_alloc_one_kernel(NULL, addr);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500386 if (!pte) {
387 kvm_err("Cannot allocate Hyp pte\n");
388 return -ENOMEM;
389 }
390 pmd_populate_kernel(NULL, pmd, pte);
Marc Zyngier4f728272013-04-12 19:12:05 +0100391 get_page(virt_to_page(pmd));
Marc Zyngier5a677ce2013-04-12 19:12:06 +0100392 kvm_flush_dcache_to_poc(pmd, sizeof(*pmd));
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500393 }
394
395 next = pmd_addr_end(addr, end);
396
Marc Zyngier6060df82013-04-12 19:12:01 +0100397 create_hyp_pte_mappings(pmd, addr, next, pfn, prot);
398 pfn += (next - addr) >> PAGE_SHIFT;
Marc Zyngier3562c762013-04-12 19:12:02 +0100399 } while (addr = next, addr != end);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500400
401 return 0;
402}
403
Christoffer Dall38f791a2014-10-10 12:14:28 +0200404static int create_hyp_pud_mappings(pgd_t *pgd, unsigned long start,
405 unsigned long end, unsigned long pfn,
406 pgprot_t prot)
407{
408 pud_t *pud;
409 pmd_t *pmd;
410 unsigned long addr, next;
411 int ret;
412
413 addr = start;
414 do {
415 pud = pud_offset(pgd, addr);
416
417 if (pud_none_or_clear_bad(pud)) {
418 pmd = pmd_alloc_one(NULL, addr);
419 if (!pmd) {
420 kvm_err("Cannot allocate Hyp pmd\n");
421 return -ENOMEM;
422 }
423 pud_populate(NULL, pud, pmd);
424 get_page(virt_to_page(pud));
425 kvm_flush_dcache_to_poc(pud, sizeof(*pud));
426 }
427
428 next = pud_addr_end(addr, end);
429 ret = create_hyp_pmd_mappings(pud, addr, next, pfn, prot);
430 if (ret)
431 return ret;
432 pfn += (next - addr) >> PAGE_SHIFT;
433 } while (addr = next, addr != end);
434
435 return 0;
436}
437
Marc Zyngier6060df82013-04-12 19:12:01 +0100438static int __create_hyp_mappings(pgd_t *pgdp,
439 unsigned long start, unsigned long end,
440 unsigned long pfn, pgprot_t prot)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500441{
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500442 pgd_t *pgd;
443 pud_t *pud;
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500444 unsigned long addr, next;
445 int err = 0;
446
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500447 mutex_lock(&kvm_hyp_pgd_mutex);
Marc Zyngier3562c762013-04-12 19:12:02 +0100448 addr = start & PAGE_MASK;
449 end = PAGE_ALIGN(end);
450 do {
Marc Zyngier6060df82013-04-12 19:12:01 +0100451 pgd = pgdp + pgd_index(addr);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500452
Christoffer Dall38f791a2014-10-10 12:14:28 +0200453 if (pgd_none(*pgd)) {
454 pud = pud_alloc_one(NULL, addr);
455 if (!pud) {
456 kvm_err("Cannot allocate Hyp pud\n");
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500457 err = -ENOMEM;
458 goto out;
459 }
Christoffer Dall38f791a2014-10-10 12:14:28 +0200460 pgd_populate(NULL, pgd, pud);
461 get_page(virt_to_page(pgd));
462 kvm_flush_dcache_to_poc(pgd, sizeof(*pgd));
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500463 }
464
465 next = pgd_addr_end(addr, end);
Christoffer Dall38f791a2014-10-10 12:14:28 +0200466 err = create_hyp_pud_mappings(pgd, addr, next, pfn, prot);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500467 if (err)
468 goto out;
Marc Zyngier6060df82013-04-12 19:12:01 +0100469 pfn += (next - addr) >> PAGE_SHIFT;
Marc Zyngier3562c762013-04-12 19:12:02 +0100470 } while (addr = next, addr != end);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500471out:
472 mutex_unlock(&kvm_hyp_pgd_mutex);
473 return err;
474}
475
Christoffer Dall40c27292013-11-15 13:14:12 -0800476static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
477{
478 if (!is_vmalloc_addr(kaddr)) {
479 BUG_ON(!virt_addr_valid(kaddr));
480 return __pa(kaddr);
481 } else {
482 return page_to_phys(vmalloc_to_page(kaddr)) +
483 offset_in_page(kaddr);
484 }
485}
486
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500487/**
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100488 * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500489 * @from: The virtual kernel start address of the range
490 * @to: The virtual kernel end address of the range (exclusive)
491 *
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100492 * The same virtual address as the kernel virtual address is also used
493 * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
494 * physical pages.
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500495 */
496int create_hyp_mappings(void *from, void *to)
497{
Christoffer Dall40c27292013-11-15 13:14:12 -0800498 phys_addr_t phys_addr;
499 unsigned long virt_addr;
Marc Zyngier6060df82013-04-12 19:12:01 +0100500 unsigned long start = KERN_TO_HYP((unsigned long)from);
501 unsigned long end = KERN_TO_HYP((unsigned long)to);
502
Christoffer Dall40c27292013-11-15 13:14:12 -0800503 start = start & PAGE_MASK;
504 end = PAGE_ALIGN(end);
Marc Zyngier6060df82013-04-12 19:12:01 +0100505
Christoffer Dall40c27292013-11-15 13:14:12 -0800506 for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
507 int err;
508
509 phys_addr = kvm_kaddr_to_phys(from + virt_addr - start);
510 err = __create_hyp_mappings(hyp_pgd, virt_addr,
511 virt_addr + PAGE_SIZE,
512 __phys_to_pfn(phys_addr),
513 PAGE_HYP);
514 if (err)
515 return err;
516 }
517
518 return 0;
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500519}
520
521/**
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100522 * create_hyp_io_mappings - duplicate a kernel IO mapping into Hyp mode
523 * @from: The kernel start VA of the range
524 * @to: The kernel end VA of the range (exclusive)
Marc Zyngier6060df82013-04-12 19:12:01 +0100525 * @phys_addr: The physical start address which gets mapped
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100526 *
527 * The resulting HYP VA is the same as the kernel VA, modulo
528 * HYP_PAGE_OFFSET.
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500529 */
Marc Zyngier6060df82013-04-12 19:12:01 +0100530int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500531{
Marc Zyngier6060df82013-04-12 19:12:01 +0100532 unsigned long start = KERN_TO_HYP((unsigned long)from);
533 unsigned long end = KERN_TO_HYP((unsigned long)to);
534
535 /* Check for a valid kernel IO mapping */
536 if (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1))
537 return -EINVAL;
538
539 return __create_hyp_mappings(hyp_pgd, start, end,
540 __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500541}
542
Christoffer Dalld5d81842013-01-20 18:28:07 -0500543/**
544 * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
545 * @kvm: The KVM struct pointer for the VM.
546 *
547 * Allocates the 1st level table only of size defined by S2_PGD_ORDER (can
548 * support either full 40-bit input addresses or limited to 32-bit input
549 * addresses). Clears the allocated pages.
550 *
551 * Note we don't need locking here as this is only called when the VM is
552 * created, which can only be done once.
553 */
554int kvm_alloc_stage2_pgd(struct kvm *kvm)
555{
Christoffer Dall38f791a2014-10-10 12:14:28 +0200556 int ret;
Christoffer Dalld5d81842013-01-20 18:28:07 -0500557 pgd_t *pgd;
558
559 if (kvm->arch.pgd != NULL) {
560 kvm_err("kvm_arch already initialized?\n");
561 return -EINVAL;
562 }
563
Christoffer Dall38f791a2014-10-10 12:14:28 +0200564 if (KVM_PREALLOC_LEVEL > 0) {
565 /*
566 * Allocate fake pgd for the page table manipulation macros to
567 * work. This is not used by the hardware and we have no
568 * alignment requirement for this allocation.
569 */
570 pgd = (pgd_t *)kmalloc(PTRS_PER_S2_PGD * sizeof(pgd_t),
571 GFP_KERNEL | __GFP_ZERO);
572 } else {
573 /*
574 * Allocate actual first-level Stage-2 page table used by the
575 * hardware for Stage-2 page table walks.
576 */
577 pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, S2_PGD_ORDER);
578 }
579
Christoffer Dalld5d81842013-01-20 18:28:07 -0500580 if (!pgd)
581 return -ENOMEM;
582
Christoffer Dall38f791a2014-10-10 12:14:28 +0200583 ret = kvm_prealloc_hwpgd(kvm, pgd);
584 if (ret)
585 goto out_err;
586
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100587 kvm_clean_pgd(pgd);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500588 kvm->arch.pgd = pgd;
Christoffer Dalld5d81842013-01-20 18:28:07 -0500589 return 0;
Christoffer Dall38f791a2014-10-10 12:14:28 +0200590out_err:
591 if (KVM_PREALLOC_LEVEL > 0)
592 kfree(pgd);
593 else
594 free_pages((unsigned long)pgd, S2_PGD_ORDER);
595 return ret;
Christoffer Dalld5d81842013-01-20 18:28:07 -0500596}
597
Christoffer Dalld5d81842013-01-20 18:28:07 -0500598/**
599 * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
600 * @kvm: The VM pointer
601 * @start: The intermediate physical base address of the range to unmap
602 * @size: The size of the area to unmap
603 *
604 * Clear a range of stage-2 mappings, lowering the various ref-counts. Must
605 * be called while holding mmu_lock (unless for freeing the stage2 pgd before
606 * destroying the VM), otherwise another faulting VCPU may come in and mess
607 * with things behind our backs.
608 */
609static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
610{
Marc Zyngierd4cb9df52013-05-14 12:11:34 +0100611 unmap_range(kvm, kvm->arch.pgd, start, size);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500612}
613
Christoffer Dall957db102014-11-27 10:35:03 +0100614static void stage2_unmap_memslot(struct kvm *kvm,
615 struct kvm_memory_slot *memslot)
616{
617 hva_t hva = memslot->userspace_addr;
618 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
619 phys_addr_t size = PAGE_SIZE * memslot->npages;
620 hva_t reg_end = hva + size;
621
622 /*
623 * A memory region could potentially cover multiple VMAs, and any holes
624 * between them, so iterate over all of them to find out if we should
625 * unmap any of them.
626 *
627 * +--------------------------------------------+
628 * +---------------+----------------+ +----------------+
629 * | : VMA 1 | VMA 2 | | VMA 3 : |
630 * +---------------+----------------+ +----------------+
631 * | memory region |
632 * +--------------------------------------------+
633 */
634 do {
635 struct vm_area_struct *vma = find_vma(current->mm, hva);
636 hva_t vm_start, vm_end;
637
638 if (!vma || vma->vm_start >= reg_end)
639 break;
640
641 /*
642 * Take the intersection of this VMA with the memory region
643 */
644 vm_start = max(hva, vma->vm_start);
645 vm_end = min(reg_end, vma->vm_end);
646
647 if (!(vma->vm_flags & VM_PFNMAP)) {
648 gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
649 unmap_stage2_range(kvm, gpa, vm_end - vm_start);
650 }
651 hva = vm_end;
652 } while (hva < reg_end);
653}
654
655/**
656 * stage2_unmap_vm - Unmap Stage-2 RAM mappings
657 * @kvm: The struct kvm pointer
658 *
659 * Go through the memregions and unmap any reguler RAM
660 * backing memory already mapped to the VM.
661 */
662void stage2_unmap_vm(struct kvm *kvm)
663{
664 struct kvm_memslots *slots;
665 struct kvm_memory_slot *memslot;
666 int idx;
667
668 idx = srcu_read_lock(&kvm->srcu);
669 spin_lock(&kvm->mmu_lock);
670
671 slots = kvm_memslots(kvm);
672 kvm_for_each_memslot(memslot, slots)
673 stage2_unmap_memslot(kvm, memslot);
674
675 spin_unlock(&kvm->mmu_lock);
676 srcu_read_unlock(&kvm->srcu, idx);
677}
678
Christoffer Dalld5d81842013-01-20 18:28:07 -0500679/**
680 * kvm_free_stage2_pgd - free all stage-2 tables
681 * @kvm: The KVM struct pointer for the VM.
682 *
683 * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
684 * underlying level-2 and level-3 tables before freeing the actual level-1 table
685 * and setting the struct pointer to NULL.
686 *
687 * Note we don't need locking here as this is only called when the VM is
688 * destroyed, which can only be done once.
689 */
690void kvm_free_stage2_pgd(struct kvm *kvm)
691{
692 if (kvm->arch.pgd == NULL)
693 return;
694
695 unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
Christoffer Dall38f791a2014-10-10 12:14:28 +0200696 kvm_free_hwpgd(kvm);
697 if (KVM_PREALLOC_LEVEL > 0)
698 kfree(kvm->arch.pgd);
699 else
700 free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500701 kvm->arch.pgd = NULL;
702}
703
Christoffer Dall38f791a2014-10-10 12:14:28 +0200704static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
705 phys_addr_t addr)
706{
707 pgd_t *pgd;
708 pud_t *pud;
709
710 pgd = kvm->arch.pgd + pgd_index(addr);
711 if (WARN_ON(pgd_none(*pgd))) {
712 if (!cache)
713 return NULL;
714 pud = mmu_memory_cache_alloc(cache);
715 pgd_populate(NULL, pgd, pud);
716 get_page(virt_to_page(pgd));
717 }
718
719 return pud_offset(pgd, addr);
720}
721
Christoffer Dallad361f02012-11-01 17:14:45 +0100722static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
723 phys_addr_t addr)
Christoffer Dalld5d81842013-01-20 18:28:07 -0500724{
Christoffer Dalld5d81842013-01-20 18:28:07 -0500725 pud_t *pud;
726 pmd_t *pmd;
Christoffer Dalld5d81842013-01-20 18:28:07 -0500727
Christoffer Dall38f791a2014-10-10 12:14:28 +0200728 pud = stage2_get_pud(kvm, cache, addr);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500729 if (pud_none(*pud)) {
730 if (!cache)
Christoffer Dallad361f02012-11-01 17:14:45 +0100731 return NULL;
Christoffer Dalld5d81842013-01-20 18:28:07 -0500732 pmd = mmu_memory_cache_alloc(cache);
733 pud_populate(NULL, pud, pmd);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500734 get_page(virt_to_page(pud));
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100735 }
736
Christoffer Dallad361f02012-11-01 17:14:45 +0100737 return pmd_offset(pud, addr);
738}
Christoffer Dalld5d81842013-01-20 18:28:07 -0500739
Christoffer Dallad361f02012-11-01 17:14:45 +0100740static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
741 *cache, phys_addr_t addr, const pmd_t *new_pmd)
742{
743 pmd_t *pmd, old_pmd;
744
745 pmd = stage2_get_pmd(kvm, cache, addr);
746 VM_BUG_ON(!pmd);
747
748 /*
749 * Mapping in huge pages should only happen through a fault. If a
750 * page is merged into a transparent huge page, the individual
751 * subpages of that huge page should be unmapped through MMU
752 * notifiers before we get here.
753 *
754 * Merging of CompoundPages is not supported; they should become
755 * splitting first, unmapped, merged, and mapped back in on-demand.
756 */
757 VM_BUG_ON(pmd_present(*pmd) && pmd_pfn(*pmd) != pmd_pfn(*new_pmd));
758
759 old_pmd = *pmd;
760 kvm_set_pmd(pmd, *new_pmd);
761 if (pmd_present(old_pmd))
762 kvm_tlb_flush_vmid_ipa(kvm, addr);
763 else
764 get_page(virt_to_page(pmd));
765 return 0;
766}
767
768static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
769 phys_addr_t addr, const pte_t *new_pte, bool iomap)
770{
771 pmd_t *pmd;
772 pte_t *pte, old_pte;
773
Christoffer Dall38f791a2014-10-10 12:14:28 +0200774 /* Create stage-2 page table mapping - Levels 0 and 1 */
Christoffer Dallad361f02012-11-01 17:14:45 +0100775 pmd = stage2_get_pmd(kvm, cache, addr);
776 if (!pmd) {
777 /*
778 * Ignore calls from kvm_set_spte_hva for unallocated
779 * address ranges.
780 */
781 return 0;
782 }
783
784 /* Create stage-2 page mappings - Level 2 */
Christoffer Dalld5d81842013-01-20 18:28:07 -0500785 if (pmd_none(*pmd)) {
786 if (!cache)
787 return 0; /* ignore calls from kvm_set_spte_hva */
788 pte = mmu_memory_cache_alloc(cache);
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100789 kvm_clean_pte(pte);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500790 pmd_populate_kernel(NULL, pmd, pte);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500791 get_page(virt_to_page(pmd));
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100792 }
793
794 pte = pte_offset_kernel(pmd, addr);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500795
796 if (iomap && pte_present(*pte))
797 return -EFAULT;
798
799 /* Create 2nd stage page table mapping - Level 3 */
800 old_pte = *pte;
801 kvm_set_pte(pte, *new_pte);
802 if (pte_present(old_pte))
Marc Zyngier48762762013-01-28 15:27:00 +0000803 kvm_tlb_flush_vmid_ipa(kvm, addr);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500804 else
805 get_page(virt_to_page(pte));
806
807 return 0;
808}
809
810/**
811 * kvm_phys_addr_ioremap - map a device range to guest IPA
812 *
813 * @kvm: The KVM pointer
814 * @guest_ipa: The IPA at which to insert the mapping
815 * @pa: The physical address of the device
816 * @size: The size of the mapping
817 */
818int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
Ard Biesheuvelc40f2f82014-09-17 14:56:18 -0700819 phys_addr_t pa, unsigned long size, bool writable)
Christoffer Dalld5d81842013-01-20 18:28:07 -0500820{
821 phys_addr_t addr, end;
822 int ret = 0;
823 unsigned long pfn;
824 struct kvm_mmu_memory_cache cache = { 0, };
825
826 end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK;
827 pfn = __phys_to_pfn(pa);
828
829 for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100830 pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500831
Ard Biesheuvelc40f2f82014-09-17 14:56:18 -0700832 if (writable)
833 kvm_set_s2pte_writable(&pte);
834
Christoffer Dall38f791a2014-10-10 12:14:28 +0200835 ret = mmu_topup_memory_cache(&cache, KVM_MMU_CACHE_MIN_PAGES,
836 KVM_NR_MEM_OBJS);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500837 if (ret)
838 goto out;
839 spin_lock(&kvm->mmu_lock);
840 ret = stage2_set_pte(kvm, &cache, addr, &pte, true);
841 spin_unlock(&kvm->mmu_lock);
842 if (ret)
843 goto out;
844
845 pfn++;
846 }
847
848out:
849 mmu_free_memory_cache(&cache);
850 return ret;
851}
852
Christoffer Dall9b5fdb92013-10-02 15:32:01 -0700853static bool transparent_hugepage_adjust(pfn_t *pfnp, phys_addr_t *ipap)
854{
855 pfn_t pfn = *pfnp;
856 gfn_t gfn = *ipap >> PAGE_SHIFT;
857
858 if (PageTransCompound(pfn_to_page(pfn))) {
859 unsigned long mask;
860 /*
861 * The address we faulted on is backed by a transparent huge
862 * page. However, because we map the compound huge page and
863 * not the individual tail page, we need to transfer the
864 * refcount to the head page. We have to be careful that the
865 * THP doesn't start to split while we are adjusting the
866 * refcounts.
867 *
868 * We are sure this doesn't happen, because mmu_notifier_retry
869 * was successful and we are holding the mmu_lock, so if this
870 * THP is trying to split, it will be blocked in the mmu
871 * notifier before touching any of the pages, specifically
872 * before being able to call __split_huge_page_refcount().
873 *
874 * We can therefore safely transfer the refcount from PG_tail
875 * to PG_head and switch the pfn from a tail page to the head
876 * page accordingly.
877 */
878 mask = PTRS_PER_PMD - 1;
879 VM_BUG_ON((gfn & mask) != (pfn & mask));
880 if (pfn & mask) {
881 *ipap &= PMD_MASK;
882 kvm_release_pfn_clean(pfn);
883 pfn &= ~mask;
884 kvm_get_pfn(pfn);
885 *pfnp = pfn;
886 }
887
888 return true;
889 }
890
891 return false;
892}
893
Ard Biesheuvela7d079c2014-09-09 11:27:09 +0100894static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
895{
896 if (kvm_vcpu_trap_is_iabt(vcpu))
897 return false;
898
899 return kvm_vcpu_dabt_iswrite(vcpu);
900}
901
Ard Biesheuvel07a97482014-11-10 08:33:55 +0000902static bool kvm_is_device_pfn(unsigned long pfn)
903{
904 return !pfn_valid(pfn);
905}
906
Christoffer Dall94f8e642013-01-20 18:28:12 -0500907static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
Christoffer Dall98047882014-08-19 12:18:04 +0200908 struct kvm_memory_slot *memslot, unsigned long hva,
Christoffer Dall94f8e642013-01-20 18:28:12 -0500909 unsigned long fault_status)
910{
Christoffer Dall94f8e642013-01-20 18:28:12 -0500911 int ret;
Christoffer Dall9b5fdb92013-10-02 15:32:01 -0700912 bool write_fault, writable, hugetlb = false, force_pte = false;
Christoffer Dall94f8e642013-01-20 18:28:12 -0500913 unsigned long mmu_seq;
Christoffer Dallad361f02012-11-01 17:14:45 +0100914 gfn_t gfn = fault_ipa >> PAGE_SHIFT;
Christoffer Dallad361f02012-11-01 17:14:45 +0100915 struct kvm *kvm = vcpu->kvm;
Christoffer Dall94f8e642013-01-20 18:28:12 -0500916 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
Christoffer Dallad361f02012-11-01 17:14:45 +0100917 struct vm_area_struct *vma;
918 pfn_t pfn;
Kim Phillipsb8865762014-06-26 01:45:51 +0100919 pgprot_t mem_type = PAGE_S2;
Laszlo Ersek840f4bf2014-11-17 14:58:52 +0000920 bool fault_ipa_uncached;
Christoffer Dall94f8e642013-01-20 18:28:12 -0500921
Ard Biesheuvela7d079c2014-09-09 11:27:09 +0100922 write_fault = kvm_is_write_fault(vcpu);
Christoffer Dall94f8e642013-01-20 18:28:12 -0500923 if (fault_status == FSC_PERM && !write_fault) {
924 kvm_err("Unexpected L2 read permission error\n");
925 return -EFAULT;
926 }
927
Christoffer Dallad361f02012-11-01 17:14:45 +0100928 /* Let's check if we will get back a huge page backed by hugetlbfs */
929 down_read(&current->mm->mmap_sem);
930 vma = find_vma_intersection(current->mm, hva, hva + 1);
Ard Biesheuvel37b54402014-09-17 14:56:17 -0700931 if (unlikely(!vma)) {
932 kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
933 up_read(&current->mm->mmap_sem);
934 return -EFAULT;
935 }
936
Christoffer Dallad361f02012-11-01 17:14:45 +0100937 if (is_vm_hugetlb_page(vma)) {
938 hugetlb = true;
939 gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
Christoffer Dall9b5fdb92013-10-02 15:32:01 -0700940 } else {
941 /*
Marc Zyngier136d7372013-12-13 16:56:06 +0000942 * Pages belonging to memslots that don't have the same
943 * alignment for userspace and IPA cannot be mapped using
944 * block descriptors even if the pages belong to a THP for
945 * the process, because the stage-2 block descriptor will
946 * cover more than a single THP and we loose atomicity for
947 * unmapping, updates, and splits of the THP or other pages
948 * in the stage-2 block range.
Christoffer Dall9b5fdb92013-10-02 15:32:01 -0700949 */
Marc Zyngier136d7372013-12-13 16:56:06 +0000950 if ((memslot->userspace_addr & ~PMD_MASK) !=
951 ((memslot->base_gfn << PAGE_SHIFT) & ~PMD_MASK))
Christoffer Dall9b5fdb92013-10-02 15:32:01 -0700952 force_pte = true;
Christoffer Dallad361f02012-11-01 17:14:45 +0100953 }
954 up_read(&current->mm->mmap_sem);
955
Christoffer Dall94f8e642013-01-20 18:28:12 -0500956 /* We need minimum second+third level pages */
Christoffer Dall38f791a2014-10-10 12:14:28 +0200957 ret = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES,
958 KVM_NR_MEM_OBJS);
Christoffer Dall94f8e642013-01-20 18:28:12 -0500959 if (ret)
960 return ret;
961
962 mmu_seq = vcpu->kvm->mmu_notifier_seq;
963 /*
964 * Ensure the read of mmu_notifier_seq happens before we call
965 * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
966 * the page we just got a reference to gets unmapped before we have a
967 * chance to grab the mmu_lock, which ensure that if the page gets
968 * unmapped afterwards, the call to kvm_unmap_hva will take it away
969 * from us again properly. This smp_rmb() interacts with the smp_wmb()
970 * in kvm_mmu_notifier_invalidate_<page|range_end>.
971 */
972 smp_rmb();
973
Christoffer Dallad361f02012-11-01 17:14:45 +0100974 pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
Christoffer Dall94f8e642013-01-20 18:28:12 -0500975 if (is_error_pfn(pfn))
976 return -EFAULT;
977
Ard Biesheuvel07a97482014-11-10 08:33:55 +0000978 if (kvm_is_device_pfn(pfn))
Kim Phillipsb8865762014-06-26 01:45:51 +0100979 mem_type = PAGE_S2_DEVICE;
980
Christoffer Dallad361f02012-11-01 17:14:45 +0100981 spin_lock(&kvm->mmu_lock);
982 if (mmu_notifier_retry(kvm, mmu_seq))
Christoffer Dall94f8e642013-01-20 18:28:12 -0500983 goto out_unlock;
Christoffer Dall9b5fdb92013-10-02 15:32:01 -0700984 if (!hugetlb && !force_pte)
985 hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa);
Christoffer Dallad361f02012-11-01 17:14:45 +0100986
Ard Biesheuvel849260c2014-11-17 14:58:53 +0000987 fault_ipa_uncached = memslot->flags & KVM_MEMSLOT_INCOHERENT;
Laszlo Ersek840f4bf2014-11-17 14:58:52 +0000988
Christoffer Dallad361f02012-11-01 17:14:45 +0100989 if (hugetlb) {
Kim Phillipsb8865762014-06-26 01:45:51 +0100990 pmd_t new_pmd = pfn_pmd(pfn, mem_type);
Christoffer Dallad361f02012-11-01 17:14:45 +0100991 new_pmd = pmd_mkhuge(new_pmd);
992 if (writable) {
993 kvm_set_s2pmd_writable(&new_pmd);
994 kvm_set_pfn_dirty(pfn);
995 }
Laszlo Ersek840f4bf2014-11-17 14:58:52 +0000996 coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE,
997 fault_ipa_uncached);
Christoffer Dallad361f02012-11-01 17:14:45 +0100998 ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
999 } else {
Kim Phillipsb8865762014-06-26 01:45:51 +01001000 pte_t new_pte = pfn_pte(pfn, mem_type);
Christoffer Dallad361f02012-11-01 17:14:45 +01001001 if (writable) {
1002 kvm_set_s2pte_writable(&new_pte);
1003 kvm_set_pfn_dirty(pfn);
1004 }
Laszlo Ersek840f4bf2014-11-17 14:58:52 +00001005 coherent_cache_guest_page(vcpu, hva, PAGE_SIZE,
1006 fault_ipa_uncached);
Kim Phillipsb8865762014-06-26 01:45:51 +01001007 ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte,
Steve Capper3d08c622014-10-14 15:02:15 +01001008 pgprot_val(mem_type) == pgprot_val(PAGE_S2_DEVICE));
Christoffer Dall94f8e642013-01-20 18:28:12 -05001009 }
Christoffer Dallad361f02012-11-01 17:14:45 +01001010
Christoffer Dall94f8e642013-01-20 18:28:12 -05001011
1012out_unlock:
Christoffer Dallad361f02012-11-01 17:14:45 +01001013 spin_unlock(&kvm->mmu_lock);
Christoffer Dall94f8e642013-01-20 18:28:12 -05001014 kvm_release_pfn_clean(pfn);
Christoffer Dallad361f02012-11-01 17:14:45 +01001015 return ret;
Christoffer Dall94f8e642013-01-20 18:28:12 -05001016}
1017
1018/**
1019 * kvm_handle_guest_abort - handles all 2nd stage aborts
1020 * @vcpu: the VCPU pointer
1021 * @run: the kvm_run structure
1022 *
1023 * Any abort that gets to the host is almost guaranteed to be caused by a
1024 * missing second stage translation table entry, which can mean that either the
1025 * guest simply needs more memory and we must allocate an appropriate page or it
1026 * can mean that the guest tried to access I/O memory, which is emulated by user
1027 * space. The distinction is based on the IPA causing the fault and whether this
1028 * memory region has been registered as standard RAM by user space.
1029 */
Christoffer Dall342cd0a2013-01-20 18:28:06 -05001030int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
1031{
Christoffer Dall94f8e642013-01-20 18:28:12 -05001032 unsigned long fault_status;
1033 phys_addr_t fault_ipa;
1034 struct kvm_memory_slot *memslot;
Christoffer Dall98047882014-08-19 12:18:04 +02001035 unsigned long hva;
1036 bool is_iabt, write_fault, writable;
Christoffer Dall94f8e642013-01-20 18:28:12 -05001037 gfn_t gfn;
1038 int ret, idx;
1039
Marc Zyngier52d1dba2012-10-15 10:33:38 +01001040 is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
Marc Zyngier7393b592012-09-17 19:27:09 +01001041 fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
Christoffer Dall94f8e642013-01-20 18:28:12 -05001042
Marc Zyngier7393b592012-09-17 19:27:09 +01001043 trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu),
1044 kvm_vcpu_get_hfar(vcpu), fault_ipa);
Christoffer Dall94f8e642013-01-20 18:28:12 -05001045
1046 /* Check the stage-2 fault is trans. fault or write fault */
Christoffer Dall0496daa52014-09-26 12:29:34 +02001047 fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
Christoffer Dall94f8e642013-01-20 18:28:12 -05001048 if (fault_status != FSC_FAULT && fault_status != FSC_PERM) {
Christoffer Dall0496daa52014-09-26 12:29:34 +02001049 kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
1050 kvm_vcpu_trap_get_class(vcpu),
1051 (unsigned long)kvm_vcpu_trap_get_fault(vcpu),
1052 (unsigned long)kvm_vcpu_get_hsr(vcpu));
Christoffer Dall94f8e642013-01-20 18:28:12 -05001053 return -EFAULT;
1054 }
1055
1056 idx = srcu_read_lock(&vcpu->kvm->srcu);
1057
1058 gfn = fault_ipa >> PAGE_SHIFT;
Christoffer Dall98047882014-08-19 12:18:04 +02001059 memslot = gfn_to_memslot(vcpu->kvm, gfn);
1060 hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
Ard Biesheuvela7d079c2014-09-09 11:27:09 +01001061 write_fault = kvm_is_write_fault(vcpu);
Christoffer Dall98047882014-08-19 12:18:04 +02001062 if (kvm_is_error_hva(hva) || (write_fault && !writable)) {
Christoffer Dall94f8e642013-01-20 18:28:12 -05001063 if (is_iabt) {
1064 /* Prefetch Abort on I/O address */
Marc Zyngier7393b592012-09-17 19:27:09 +01001065 kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
Christoffer Dall94f8e642013-01-20 18:28:12 -05001066 ret = 1;
1067 goto out_unlock;
1068 }
1069
Marc Zyngiercfe39502012-12-12 14:42:09 +00001070 /*
1071 * The IPA is reported as [MAX:12], so we need to
1072 * complement it with the bottom 12 bits from the
1073 * faulting VA. This is always 12 bits, irrespective
1074 * of the page size.
1075 */
1076 fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
Christoffer Dall45e96ea2013-01-20 18:43:58 -05001077 ret = io_mem_abort(vcpu, run, fault_ipa);
Christoffer Dall94f8e642013-01-20 18:28:12 -05001078 goto out_unlock;
1079 }
1080
Christoffer Dallc3058d52014-10-10 12:14:29 +02001081 /* Userspace should not be able to register out-of-bounds IPAs */
1082 VM_BUG_ON(fault_ipa >= KVM_PHYS_SIZE);
1083
Christoffer Dall98047882014-08-19 12:18:04 +02001084 ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
Christoffer Dall94f8e642013-01-20 18:28:12 -05001085 if (ret == 0)
1086 ret = 1;
1087out_unlock:
1088 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1089 return ret;
Christoffer Dall342cd0a2013-01-20 18:28:06 -05001090}
1091
Christoffer Dalld5d81842013-01-20 18:28:07 -05001092static void handle_hva_to_gpa(struct kvm *kvm,
1093 unsigned long start,
1094 unsigned long end,
1095 void (*handler)(struct kvm *kvm,
1096 gpa_t gpa, void *data),
1097 void *data)
1098{
1099 struct kvm_memslots *slots;
1100 struct kvm_memory_slot *memslot;
1101
1102 slots = kvm_memslots(kvm);
1103
1104 /* we only care about the pages that the guest sees */
1105 kvm_for_each_memslot(memslot, slots) {
1106 unsigned long hva_start, hva_end;
1107 gfn_t gfn, gfn_end;
1108
1109 hva_start = max(start, memslot->userspace_addr);
1110 hva_end = min(end, memslot->userspace_addr +
1111 (memslot->npages << PAGE_SHIFT));
1112 if (hva_start >= hva_end)
1113 continue;
1114
1115 /*
1116 * {gfn(page) | page intersects with [hva_start, hva_end)} =
1117 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
1118 */
1119 gfn = hva_to_gfn_memslot(hva_start, memslot);
1120 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
1121
1122 for (; gfn < gfn_end; ++gfn) {
1123 gpa_t gpa = gfn << PAGE_SHIFT;
1124 handler(kvm, gpa, data);
1125 }
1126 }
1127}
1128
1129static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
1130{
1131 unmap_stage2_range(kvm, gpa, PAGE_SIZE);
Christoffer Dalld5d81842013-01-20 18:28:07 -05001132}
1133
1134int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
1135{
1136 unsigned long end = hva + PAGE_SIZE;
1137
1138 if (!kvm->arch.pgd)
1139 return 0;
1140
1141 trace_kvm_unmap_hva(hva);
1142 handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL);
1143 return 0;
1144}
1145
1146int kvm_unmap_hva_range(struct kvm *kvm,
1147 unsigned long start, unsigned long end)
1148{
1149 if (!kvm->arch.pgd)
1150 return 0;
1151
1152 trace_kvm_unmap_hva_range(start, end);
1153 handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
1154 return 0;
1155}
1156
1157static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data)
1158{
1159 pte_t *pte = (pte_t *)data;
1160
1161 stage2_set_pte(kvm, NULL, gpa, pte, false);
1162}
1163
1164
1165void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
1166{
1167 unsigned long end = hva + PAGE_SIZE;
1168 pte_t stage2_pte;
1169
1170 if (!kvm->arch.pgd)
1171 return;
1172
1173 trace_kvm_set_spte_hva(hva);
1174 stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2);
1175 handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
1176}
1177
1178void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
1179{
1180 mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
1181}
1182
Christoffer Dall342cd0a2013-01-20 18:28:06 -05001183phys_addr_t kvm_mmu_get_httbr(void)
1184{
Christoffer Dall342cd0a2013-01-20 18:28:06 -05001185 return virt_to_phys(hyp_pgd);
1186}
1187
Marc Zyngier5a677ce2013-04-12 19:12:06 +01001188phys_addr_t kvm_mmu_get_boot_httbr(void)
1189{
1190 return virt_to_phys(boot_hyp_pgd);
1191}
1192
1193phys_addr_t kvm_get_idmap_vector(void)
1194{
1195 return hyp_idmap_vector;
1196}
1197
Christoffer Dall342cd0a2013-01-20 18:28:06 -05001198int kvm_mmu_init(void)
1199{
Marc Zyngier2fb41052013-04-12 19:12:03 +01001200 int err;
1201
Santosh Shilimkar4fda3422013-11-19 14:59:12 -05001202 hyp_idmap_start = kvm_virt_to_phys(__hyp_idmap_text_start);
1203 hyp_idmap_end = kvm_virt_to_phys(__hyp_idmap_text_end);
1204 hyp_idmap_vector = kvm_virt_to_phys(__kvm_hyp_init);
Marc Zyngier5a677ce2013-04-12 19:12:06 +01001205
1206 if ((hyp_idmap_start ^ hyp_idmap_end) & PAGE_MASK) {
1207 /*
1208 * Our init code is crossing a page boundary. Allocate
1209 * a bounce page, copy the code over and use that.
1210 */
1211 size_t len = __hyp_idmap_text_end - __hyp_idmap_text_start;
1212 phys_addr_t phys_base;
1213
Mark Salter5d4e08c2014-03-28 14:25:19 +00001214 init_bounce_page = (void *)__get_free_page(GFP_KERNEL);
Marc Zyngier5a677ce2013-04-12 19:12:06 +01001215 if (!init_bounce_page) {
1216 kvm_err("Couldn't allocate HYP init bounce page\n");
1217 err = -ENOMEM;
1218 goto out;
1219 }
1220
1221 memcpy(init_bounce_page, __hyp_idmap_text_start, len);
1222 /*
1223 * Warning: the code we just copied to the bounce page
1224 * must be flushed to the point of coherency.
1225 * Otherwise, the data may be sitting in L2, and HYP
1226 * mode won't be able to observe it as it runs with
1227 * caches off at that point.
1228 */
1229 kvm_flush_dcache_to_poc(init_bounce_page, len);
1230
Santosh Shilimkar4fda3422013-11-19 14:59:12 -05001231 phys_base = kvm_virt_to_phys(init_bounce_page);
Marc Zyngier5a677ce2013-04-12 19:12:06 +01001232 hyp_idmap_vector += phys_base - hyp_idmap_start;
1233 hyp_idmap_start = phys_base;
1234 hyp_idmap_end = phys_base + len;
1235
1236 kvm_info("Using HYP init bounce page @%lx\n",
1237 (unsigned long)phys_base);
1238 }
1239
Christoffer Dall38f791a2014-10-10 12:14:28 +02001240 hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order);
1241 boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order);
Mark Salter5d4e08c2014-03-28 14:25:19 +00001242
Marc Zyngier5a677ce2013-04-12 19:12:06 +01001243 if (!hyp_pgd || !boot_hyp_pgd) {
Christoffer Dalld5d81842013-01-20 18:28:07 -05001244 kvm_err("Hyp mode PGD not allocated\n");
Marc Zyngier2fb41052013-04-12 19:12:03 +01001245 err = -ENOMEM;
1246 goto out;
1247 }
1248
1249 /* Create the idmap in the boot page tables */
1250 err = __create_hyp_mappings(boot_hyp_pgd,
1251 hyp_idmap_start, hyp_idmap_end,
1252 __phys_to_pfn(hyp_idmap_start),
1253 PAGE_HYP);
1254
1255 if (err) {
1256 kvm_err("Failed to idmap %lx-%lx\n",
1257 hyp_idmap_start, hyp_idmap_end);
1258 goto out;
Christoffer Dalld5d81842013-01-20 18:28:07 -05001259 }
1260
Marc Zyngier5a677ce2013-04-12 19:12:06 +01001261 /* Map the very same page at the trampoline VA */
1262 err = __create_hyp_mappings(boot_hyp_pgd,
1263 TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE,
1264 __phys_to_pfn(hyp_idmap_start),
1265 PAGE_HYP);
1266 if (err) {
1267 kvm_err("Failed to map trampoline @%lx into boot HYP pgd\n",
1268 TRAMPOLINE_VA);
1269 goto out;
1270 }
1271
1272 /* Map the same page again into the runtime page tables */
1273 err = __create_hyp_mappings(hyp_pgd,
1274 TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE,
1275 __phys_to_pfn(hyp_idmap_start),
1276 PAGE_HYP);
1277 if (err) {
1278 kvm_err("Failed to map trampoline @%lx into runtime HYP pgd\n",
1279 TRAMPOLINE_VA);
1280 goto out;
1281 }
1282
Christoffer Dalld5d81842013-01-20 18:28:07 -05001283 return 0;
Marc Zyngier2fb41052013-04-12 19:12:03 +01001284out:
Marc Zyngier4f728272013-04-12 19:12:05 +01001285 free_hyp_pgds();
Marc Zyngier2fb41052013-04-12 19:12:03 +01001286 return err;
Christoffer Dall342cd0a2013-01-20 18:28:06 -05001287}
Eric Augerdf6ce242014-06-06 11:10:23 +02001288
1289void kvm_arch_commit_memory_region(struct kvm *kvm,
1290 struct kvm_userspace_memory_region *mem,
1291 const struct kvm_memory_slot *old,
1292 enum kvm_mr_change change)
1293{
Eric Augerdf6ce242014-06-06 11:10:23 +02001294}
1295
1296int kvm_arch_prepare_memory_region(struct kvm *kvm,
1297 struct kvm_memory_slot *memslot,
1298 struct kvm_userspace_memory_region *mem,
1299 enum kvm_mr_change change)
1300{
Ard Biesheuvel8eef9122014-10-10 17:00:32 +02001301 hva_t hva = mem->userspace_addr;
1302 hva_t reg_end = hva + mem->memory_size;
1303 bool writable = !(mem->flags & KVM_MEM_READONLY);
1304 int ret = 0;
1305
1306 if (change != KVM_MR_CREATE && change != KVM_MR_MOVE)
1307 return 0;
1308
1309 /*
Christoffer Dallc3058d52014-10-10 12:14:29 +02001310 * Prevent userspace from creating a memory region outside of the IPA
1311 * space addressable by the KVM guest IPA space.
1312 */
1313 if (memslot->base_gfn + memslot->npages >=
1314 (KVM_PHYS_SIZE >> PAGE_SHIFT))
1315 return -EFAULT;
1316
1317 /*
Ard Biesheuvel8eef9122014-10-10 17:00:32 +02001318 * A memory region could potentially cover multiple VMAs, and any holes
1319 * between them, so iterate over all of them to find out if we can map
1320 * any of them right now.
1321 *
1322 * +--------------------------------------------+
1323 * +---------------+----------------+ +----------------+
1324 * | : VMA 1 | VMA 2 | | VMA 3 : |
1325 * +---------------+----------------+ +----------------+
1326 * | memory region |
1327 * +--------------------------------------------+
1328 */
1329 do {
1330 struct vm_area_struct *vma = find_vma(current->mm, hva);
1331 hva_t vm_start, vm_end;
1332
1333 if (!vma || vma->vm_start >= reg_end)
1334 break;
1335
1336 /*
1337 * Mapping a read-only VMA is only allowed if the
1338 * memory region is configured as read-only.
1339 */
1340 if (writable && !(vma->vm_flags & VM_WRITE)) {
1341 ret = -EPERM;
1342 break;
1343 }
1344
1345 /*
1346 * Take the intersection of this VMA with the memory region
1347 */
1348 vm_start = max(hva, vma->vm_start);
1349 vm_end = min(reg_end, vma->vm_end);
1350
1351 if (vma->vm_flags & VM_PFNMAP) {
1352 gpa_t gpa = mem->guest_phys_addr +
1353 (vm_start - mem->userspace_addr);
1354 phys_addr_t pa = (vma->vm_pgoff << PAGE_SHIFT) +
1355 vm_start - vma->vm_start;
1356
1357 ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
1358 vm_end - vm_start,
1359 writable);
1360 if (ret)
1361 break;
1362 }
1363 hva = vm_end;
1364 } while (hva < reg_end);
1365
Ard Biesheuvel849260c2014-11-17 14:58:53 +00001366 spin_lock(&kvm->mmu_lock);
1367 if (ret)
Ard Biesheuvel8eef9122014-10-10 17:00:32 +02001368 unmap_stage2_range(kvm, mem->guest_phys_addr, mem->memory_size);
Ard Biesheuvel849260c2014-11-17 14:58:53 +00001369 else
1370 stage2_flush_memslot(kvm, memslot);
1371 spin_unlock(&kvm->mmu_lock);
Ard Biesheuvel8eef9122014-10-10 17:00:32 +02001372 return ret;
Eric Augerdf6ce242014-06-06 11:10:23 +02001373}
1374
1375void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
1376 struct kvm_memory_slot *dont)
1377{
1378}
1379
1380int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1381 unsigned long npages)
1382{
Ard Biesheuvel849260c2014-11-17 14:58:53 +00001383 /*
1384 * Readonly memslots are not incoherent with the caches by definition,
1385 * but in practice, they are used mostly to emulate ROMs or NOR flashes
1386 * that the guest may consider devices and hence map as uncached.
1387 * To prevent incoherency issues in these cases, tag all readonly
1388 * regions as incoherent.
1389 */
1390 if (slot->flags & KVM_MEM_READONLY)
1391 slot->flags |= KVM_MEMSLOT_INCOHERENT;
Eric Augerdf6ce242014-06-06 11:10:23 +02001392 return 0;
1393}
1394
1395void kvm_arch_memslots_updated(struct kvm *kvm)
1396{
1397}
1398
1399void kvm_arch_flush_shadow_all(struct kvm *kvm)
1400{
1401}
1402
1403void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1404 struct kvm_memory_slot *slot)
1405{
Ard Biesheuvel8eef9122014-10-10 17:00:32 +02001406 gpa_t gpa = slot->base_gfn << PAGE_SHIFT;
1407 phys_addr_t size = slot->npages << PAGE_SHIFT;
1408
1409 spin_lock(&kvm->mmu_lock);
1410 unmap_stage2_range(kvm, gpa, size);
1411 spin_unlock(&kvm->mmu_lock);
Eric Augerdf6ce242014-06-06 11:10:23 +02001412}