blob: 6034697ede3f3ac5e7a1060ce3026ba5d9f26714 [file] [log] [blame]
Christoffer Dall749cf76c2013-01-20 18:28:06 -05001/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
Christoffer Dall342cd0a2013-01-20 18:28:06 -050018
19#include <linux/mman.h>
20#include <linux/kvm_host.h>
21#include <linux/io.h>
Christoffer Dallad361f02012-11-01 17:14:45 +010022#include <linux/hugetlb.h>
Christoffer Dall45e96ea2013-01-20 18:43:58 -050023#include <trace/events/kvm.h>
Christoffer Dall342cd0a2013-01-20 18:28:06 -050024#include <asm/pgalloc.h>
Christoffer Dall94f8e642013-01-20 18:28:12 -050025#include <asm/cacheflush.h>
Christoffer Dall342cd0a2013-01-20 18:28:06 -050026#include <asm/kvm_arm.h>
27#include <asm/kvm_mmu.h>
Christoffer Dall45e96ea2013-01-20 18:43:58 -050028#include <asm/kvm_mmio.h>
Christoffer Dalld5d81842013-01-20 18:28:07 -050029#include <asm/kvm_asm.h>
Christoffer Dall94f8e642013-01-20 18:28:12 -050030#include <asm/kvm_emulate.h>
Christoffer Dalld5d81842013-01-20 18:28:07 -050031
32#include "trace.h"
Christoffer Dall342cd0a2013-01-20 18:28:06 -050033
34extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
35
Marc Zyngier5a677ce2013-04-12 19:12:06 +010036static pgd_t *boot_hyp_pgd;
Marc Zyngier2fb41052013-04-12 19:12:03 +010037static pgd_t *hyp_pgd;
Christoffer Dall342cd0a2013-01-20 18:28:06 -050038static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
39
Marc Zyngier5a677ce2013-04-12 19:12:06 +010040static void *init_bounce_page;
41static unsigned long hyp_idmap_start;
42static unsigned long hyp_idmap_end;
43static phys_addr_t hyp_idmap_vector;
44
Christoffer Dall38f791a2014-10-10 12:14:28 +020045#define hyp_pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t))
Mark Salter5d4e08c2014-03-28 14:25:19 +000046
Christoffer Dall9b5fdb92013-10-02 15:32:01 -070047#define kvm_pmd_huge(_x) (pmd_huge(_x) || pmd_trans_huge(_x))
Mario Smarduchc6473552015-01-15 15:58:56 -080048#define kvm_pud_huge(_x) pud_huge(_x)
Christoffer Dallad361f02012-11-01 17:14:45 +010049
Mario Smarduch15a49a42015-01-15 15:58:58 -080050#define KVM_S2PTE_FLAG_IS_IOMAP (1UL << 0)
51#define KVM_S2_FLAG_LOGGING_ACTIVE (1UL << 1)
52
53static bool memslot_is_logging(struct kvm_memory_slot *memslot)
54{
Mario Smarduch15a49a42015-01-15 15:58:58 -080055 return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY);
Mario Smarduch72760302015-01-15 15:59:01 -080056}
57
58/**
59 * kvm_flush_remote_tlbs() - flush all VM TLB entries for v7/8
60 * @kvm: pointer to kvm structure.
61 *
62 * Interface to HYP function to flush all VM TLB entries
63 */
64void kvm_flush_remote_tlbs(struct kvm *kvm)
65{
66 kvm_call_hyp(__kvm_tlb_flush_vmid, kvm);
Mario Smarduch15a49a42015-01-15 15:58:58 -080067}
68
Marc Zyngier48762762013-01-28 15:27:00 +000069static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
Christoffer Dalld5d81842013-01-20 18:28:07 -050070{
Marc Zyngierd4cb9df52013-05-14 12:11:34 +010071 /*
72 * This function also gets called when dealing with HYP page
73 * tables. As HYP doesn't have an associated struct kvm (and
74 * the HYP page tables are fairly static), we don't do
75 * anything there.
76 */
77 if (kvm)
78 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
Christoffer Dalld5d81842013-01-20 18:28:07 -050079}
80
Mario Smarduch15a49a42015-01-15 15:58:58 -080081/**
82 * stage2_dissolve_pmd() - clear and flush huge PMD entry
83 * @kvm: pointer to kvm structure.
84 * @addr: IPA
85 * @pmd: pmd pointer for IPA
86 *
87 * Function clears a PMD entry, flushes addr 1st and 2nd stage TLBs. Marks all
88 * pages in the range dirty.
89 */
90static void stage2_dissolve_pmd(struct kvm *kvm, phys_addr_t addr, pmd_t *pmd)
91{
92 if (!kvm_pmd_huge(*pmd))
93 return;
94
95 pmd_clear(pmd);
96 kvm_tlb_flush_vmid_ipa(kvm, addr);
97 put_page(virt_to_page(pmd));
98}
99
Christoffer Dalld5d81842013-01-20 18:28:07 -0500100static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
101 int min, int max)
102{
103 void *page;
104
105 BUG_ON(max > KVM_NR_MEM_OBJS);
106 if (cache->nobjs >= min)
107 return 0;
108 while (cache->nobjs < max) {
109 page = (void *)__get_free_page(PGALLOC_GFP);
110 if (!page)
111 return -ENOMEM;
112 cache->objects[cache->nobjs++] = page;
113 }
114 return 0;
115}
116
117static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
118{
119 while (mc->nobjs)
120 free_page((unsigned long)mc->objects[--mc->nobjs]);
121}
122
123static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
124{
125 void *p;
126
127 BUG_ON(!mc || !mc->nobjs);
128 p = mc->objects[--mc->nobjs];
129 return p;
130}
131
Christoffer Dall4f853a72014-05-09 23:31:31 +0200132static void clear_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr)
Marc Zyngier979acd52013-08-06 13:05:48 +0100133{
Christoffer Dall4f853a72014-05-09 23:31:31 +0200134 pud_t *pud_table __maybe_unused = pud_offset(pgd, 0);
135 pgd_clear(pgd);
136 kvm_tlb_flush_vmid_ipa(kvm, addr);
137 pud_free(NULL, pud_table);
138 put_page(virt_to_page(pgd));
Marc Zyngier979acd52013-08-06 13:05:48 +0100139}
140
Marc Zyngierd4cb9df52013-05-14 12:11:34 +0100141static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500142{
Christoffer Dall4f853a72014-05-09 23:31:31 +0200143 pmd_t *pmd_table = pmd_offset(pud, 0);
144 VM_BUG_ON(pud_huge(*pud));
145 pud_clear(pud);
146 kvm_tlb_flush_vmid_ipa(kvm, addr);
147 pmd_free(NULL, pmd_table);
Marc Zyngier4f728272013-04-12 19:12:05 +0100148 put_page(virt_to_page(pud));
149}
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500150
Marc Zyngierd4cb9df52013-05-14 12:11:34 +0100151static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
Marc Zyngier4f728272013-04-12 19:12:05 +0100152{
Christoffer Dall4f853a72014-05-09 23:31:31 +0200153 pte_t *pte_table = pte_offset_kernel(pmd, 0);
154 VM_BUG_ON(kvm_pmd_huge(*pmd));
155 pmd_clear(pmd);
156 kvm_tlb_flush_vmid_ipa(kvm, addr);
157 pte_free_kernel(NULL, pte_table);
Marc Zyngier4f728272013-04-12 19:12:05 +0100158 put_page(virt_to_page(pmd));
159}
160
Christoffer Dall4f853a72014-05-09 23:31:31 +0200161static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
162 phys_addr_t addr, phys_addr_t end)
Marc Zyngier4f728272013-04-12 19:12:05 +0100163{
Christoffer Dall4f853a72014-05-09 23:31:31 +0200164 phys_addr_t start_addr = addr;
165 pte_t *pte, *start_pte;
166
167 start_pte = pte = pte_offset_kernel(pmd, addr);
168 do {
169 if (!pte_none(*pte)) {
170 kvm_set_pte(pte, __pte(0));
171 put_page(virt_to_page(pte));
172 kvm_tlb_flush_vmid_ipa(kvm, addr);
173 }
174 } while (pte++, addr += PAGE_SIZE, addr != end);
175
Christoffer Dall38f791a2014-10-10 12:14:28 +0200176 if (kvm_pte_table_empty(kvm, start_pte))
Christoffer Dall4f853a72014-05-09 23:31:31 +0200177 clear_pmd_entry(kvm, pmd, start_addr);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500178}
179
Christoffer Dall4f853a72014-05-09 23:31:31 +0200180static void unmap_pmds(struct kvm *kvm, pud_t *pud,
181 phys_addr_t addr, phys_addr_t end)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500182{
Christoffer Dall4f853a72014-05-09 23:31:31 +0200183 phys_addr_t next, start_addr = addr;
184 pmd_t *pmd, *start_pmd;
Marc Zyngier000d3992013-03-05 02:43:17 +0000185
Christoffer Dall4f853a72014-05-09 23:31:31 +0200186 start_pmd = pmd = pmd_offset(pud, addr);
187 do {
188 next = kvm_pmd_addr_end(addr, end);
189 if (!pmd_none(*pmd)) {
190 if (kvm_pmd_huge(*pmd)) {
191 pmd_clear(pmd);
192 kvm_tlb_flush_vmid_ipa(kvm, addr);
193 put_page(virt_to_page(pmd));
194 } else {
195 unmap_ptes(kvm, pmd, addr, next);
Marc Zyngier4f728272013-04-12 19:12:05 +0100196 }
197 }
Christoffer Dall4f853a72014-05-09 23:31:31 +0200198 } while (pmd++, addr = next, addr != end);
Marc Zyngier4f728272013-04-12 19:12:05 +0100199
Christoffer Dall38f791a2014-10-10 12:14:28 +0200200 if (kvm_pmd_table_empty(kvm, start_pmd))
Christoffer Dall4f853a72014-05-09 23:31:31 +0200201 clear_pud_entry(kvm, pud, start_addr);
202}
203
204static void unmap_puds(struct kvm *kvm, pgd_t *pgd,
205 phys_addr_t addr, phys_addr_t end)
206{
207 phys_addr_t next, start_addr = addr;
208 pud_t *pud, *start_pud;
209
210 start_pud = pud = pud_offset(pgd, addr);
211 do {
212 next = kvm_pud_addr_end(addr, end);
213 if (!pud_none(*pud)) {
214 if (pud_huge(*pud)) {
215 pud_clear(pud);
216 kvm_tlb_flush_vmid_ipa(kvm, addr);
217 put_page(virt_to_page(pud));
218 } else {
219 unmap_pmds(kvm, pud, addr, next);
220 }
221 }
222 } while (pud++, addr = next, addr != end);
223
Christoffer Dall38f791a2014-10-10 12:14:28 +0200224 if (kvm_pud_table_empty(kvm, start_pud))
Christoffer Dall4f853a72014-05-09 23:31:31 +0200225 clear_pgd_entry(kvm, pgd, start_addr);
226}
227
228
229static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
230 phys_addr_t start, u64 size)
231{
232 pgd_t *pgd;
233 phys_addr_t addr = start, end = start + size;
234 phys_addr_t next;
235
236 pgd = pgdp + pgd_index(addr);
237 do {
238 next = kvm_pgd_addr_end(addr, end);
Mark Rutland7cbb87d2014-10-28 19:36:45 +0000239 if (!pgd_none(*pgd))
240 unmap_puds(kvm, pgd, addr, next);
Christoffer Dall4f853a72014-05-09 23:31:31 +0200241 } while (pgd++, addr = next, addr != end);
Marc Zyngier000d3992013-03-05 02:43:17 +0000242}
243
Marc Zyngier9d218a12014-01-15 12:50:23 +0000244static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
245 phys_addr_t addr, phys_addr_t end)
246{
247 pte_t *pte;
248
249 pte = pte_offset_kernel(pmd, addr);
250 do {
251 if (!pte_none(*pte)) {
252 hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
253 kvm_flush_dcache_to_poc((void*)hva, PAGE_SIZE);
254 }
255 } while (pte++, addr += PAGE_SIZE, addr != end);
256}
257
258static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
259 phys_addr_t addr, phys_addr_t end)
260{
261 pmd_t *pmd;
262 phys_addr_t next;
263
264 pmd = pmd_offset(pud, addr);
265 do {
266 next = kvm_pmd_addr_end(addr, end);
267 if (!pmd_none(*pmd)) {
268 if (kvm_pmd_huge(*pmd)) {
269 hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
270 kvm_flush_dcache_to_poc((void*)hva, PMD_SIZE);
271 } else {
272 stage2_flush_ptes(kvm, pmd, addr, next);
273 }
274 }
275 } while (pmd++, addr = next, addr != end);
276}
277
278static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
279 phys_addr_t addr, phys_addr_t end)
280{
281 pud_t *pud;
282 phys_addr_t next;
283
284 pud = pud_offset(pgd, addr);
285 do {
286 next = kvm_pud_addr_end(addr, end);
287 if (!pud_none(*pud)) {
288 if (pud_huge(*pud)) {
289 hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
290 kvm_flush_dcache_to_poc((void*)hva, PUD_SIZE);
291 } else {
292 stage2_flush_pmds(kvm, pud, addr, next);
293 }
294 }
295 } while (pud++, addr = next, addr != end);
296}
297
298static void stage2_flush_memslot(struct kvm *kvm,
299 struct kvm_memory_slot *memslot)
300{
301 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
302 phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
303 phys_addr_t next;
304 pgd_t *pgd;
305
306 pgd = kvm->arch.pgd + pgd_index(addr);
307 do {
308 next = kvm_pgd_addr_end(addr, end);
309 stage2_flush_puds(kvm, pgd, addr, next);
310 } while (pgd++, addr = next, addr != end);
311}
312
313/**
314 * stage2_flush_vm - Invalidate cache for pages mapped in stage 2
315 * @kvm: The struct kvm pointer
316 *
317 * Go through the stage 2 page tables and invalidate any cache lines
318 * backing memory already mapped to the VM.
319 */
320void stage2_flush_vm(struct kvm *kvm)
321{
322 struct kvm_memslots *slots;
323 struct kvm_memory_slot *memslot;
324 int idx;
325
326 idx = srcu_read_lock(&kvm->srcu);
327 spin_lock(&kvm->mmu_lock);
328
329 slots = kvm_memslots(kvm);
330 kvm_for_each_memslot(memslot, slots)
331 stage2_flush_memslot(kvm, memslot);
332
333 spin_unlock(&kvm->mmu_lock);
334 srcu_read_unlock(&kvm->srcu, idx);
335}
336
Marc Zyngier000d3992013-03-05 02:43:17 +0000337/**
Marc Zyngierd157f4a2013-04-12 19:12:07 +0100338 * free_boot_hyp_pgd - free HYP boot page tables
339 *
340 * Free the HYP boot page tables. The bounce page is also freed.
341 */
342void free_boot_hyp_pgd(void)
343{
344 mutex_lock(&kvm_hyp_pgd_mutex);
345
346 if (boot_hyp_pgd) {
Marc Zyngierd4cb9df52013-05-14 12:11:34 +0100347 unmap_range(NULL, boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
348 unmap_range(NULL, boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
Christoffer Dall38f791a2014-10-10 12:14:28 +0200349 free_pages((unsigned long)boot_hyp_pgd, hyp_pgd_order);
Marc Zyngierd157f4a2013-04-12 19:12:07 +0100350 boot_hyp_pgd = NULL;
351 }
352
353 if (hyp_pgd)
Marc Zyngierd4cb9df52013-05-14 12:11:34 +0100354 unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
Marc Zyngierd157f4a2013-04-12 19:12:07 +0100355
Mark Salter5d4e08c2014-03-28 14:25:19 +0000356 free_page((unsigned long)init_bounce_page);
Marc Zyngierd157f4a2013-04-12 19:12:07 +0100357 init_bounce_page = NULL;
358
359 mutex_unlock(&kvm_hyp_pgd_mutex);
360}
361
362/**
Marc Zyngier4f728272013-04-12 19:12:05 +0100363 * free_hyp_pgds - free Hyp-mode page tables
Marc Zyngier000d3992013-03-05 02:43:17 +0000364 *
Marc Zyngier5a677ce2013-04-12 19:12:06 +0100365 * Assumes hyp_pgd is a page table used strictly in Hyp-mode and
366 * therefore contains either mappings in the kernel memory area (above
367 * PAGE_OFFSET), or device mappings in the vmalloc range (from
368 * VMALLOC_START to VMALLOC_END).
369 *
370 * boot_hyp_pgd should only map two pages for the init code.
Marc Zyngier000d3992013-03-05 02:43:17 +0000371 */
Marc Zyngier4f728272013-04-12 19:12:05 +0100372void free_hyp_pgds(void)
Marc Zyngier000d3992013-03-05 02:43:17 +0000373{
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500374 unsigned long addr;
375
Marc Zyngierd157f4a2013-04-12 19:12:07 +0100376 free_boot_hyp_pgd();
Marc Zyngier4f728272013-04-12 19:12:05 +0100377
Marc Zyngierd157f4a2013-04-12 19:12:07 +0100378 mutex_lock(&kvm_hyp_pgd_mutex);
Marc Zyngier5a677ce2013-04-12 19:12:06 +0100379
Marc Zyngier4f728272013-04-12 19:12:05 +0100380 if (hyp_pgd) {
381 for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE)
Marc Zyngierd4cb9df52013-05-14 12:11:34 +0100382 unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
Marc Zyngier4f728272013-04-12 19:12:05 +0100383 for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
Marc Zyngierd4cb9df52013-05-14 12:11:34 +0100384 unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
385
Christoffer Dall38f791a2014-10-10 12:14:28 +0200386 free_pages((unsigned long)hyp_pgd, hyp_pgd_order);
Marc Zyngierd157f4a2013-04-12 19:12:07 +0100387 hyp_pgd = NULL;
Marc Zyngier4f728272013-04-12 19:12:05 +0100388 }
389
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500390 mutex_unlock(&kvm_hyp_pgd_mutex);
391}
392
393static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
Marc Zyngier6060df82013-04-12 19:12:01 +0100394 unsigned long end, unsigned long pfn,
395 pgprot_t prot)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500396{
397 pte_t *pte;
398 unsigned long addr;
399
Marc Zyngier3562c762013-04-12 19:12:02 +0100400 addr = start;
401 do {
Marc Zyngier6060df82013-04-12 19:12:01 +0100402 pte = pte_offset_kernel(pmd, addr);
403 kvm_set_pte(pte, pfn_pte(pfn, prot));
Marc Zyngier4f728272013-04-12 19:12:05 +0100404 get_page(virt_to_page(pte));
Marc Zyngier5a677ce2013-04-12 19:12:06 +0100405 kvm_flush_dcache_to_poc(pte, sizeof(*pte));
Marc Zyngier6060df82013-04-12 19:12:01 +0100406 pfn++;
Marc Zyngier3562c762013-04-12 19:12:02 +0100407 } while (addr += PAGE_SIZE, addr != end);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500408}
409
410static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
Marc Zyngier6060df82013-04-12 19:12:01 +0100411 unsigned long end, unsigned long pfn,
412 pgprot_t prot)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500413{
414 pmd_t *pmd;
415 pte_t *pte;
416 unsigned long addr, next;
417
Marc Zyngier3562c762013-04-12 19:12:02 +0100418 addr = start;
419 do {
Marc Zyngier6060df82013-04-12 19:12:01 +0100420 pmd = pmd_offset(pud, addr);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500421
422 BUG_ON(pmd_sect(*pmd));
423
424 if (pmd_none(*pmd)) {
Marc Zyngier6060df82013-04-12 19:12:01 +0100425 pte = pte_alloc_one_kernel(NULL, addr);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500426 if (!pte) {
427 kvm_err("Cannot allocate Hyp pte\n");
428 return -ENOMEM;
429 }
430 pmd_populate_kernel(NULL, pmd, pte);
Marc Zyngier4f728272013-04-12 19:12:05 +0100431 get_page(virt_to_page(pmd));
Marc Zyngier5a677ce2013-04-12 19:12:06 +0100432 kvm_flush_dcache_to_poc(pmd, sizeof(*pmd));
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500433 }
434
435 next = pmd_addr_end(addr, end);
436
Marc Zyngier6060df82013-04-12 19:12:01 +0100437 create_hyp_pte_mappings(pmd, addr, next, pfn, prot);
438 pfn += (next - addr) >> PAGE_SHIFT;
Marc Zyngier3562c762013-04-12 19:12:02 +0100439 } while (addr = next, addr != end);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500440
441 return 0;
442}
443
Christoffer Dall38f791a2014-10-10 12:14:28 +0200444static int create_hyp_pud_mappings(pgd_t *pgd, unsigned long start,
445 unsigned long end, unsigned long pfn,
446 pgprot_t prot)
447{
448 pud_t *pud;
449 pmd_t *pmd;
450 unsigned long addr, next;
451 int ret;
452
453 addr = start;
454 do {
455 pud = pud_offset(pgd, addr);
456
457 if (pud_none_or_clear_bad(pud)) {
458 pmd = pmd_alloc_one(NULL, addr);
459 if (!pmd) {
460 kvm_err("Cannot allocate Hyp pmd\n");
461 return -ENOMEM;
462 }
463 pud_populate(NULL, pud, pmd);
464 get_page(virt_to_page(pud));
465 kvm_flush_dcache_to_poc(pud, sizeof(*pud));
466 }
467
468 next = pud_addr_end(addr, end);
469 ret = create_hyp_pmd_mappings(pud, addr, next, pfn, prot);
470 if (ret)
471 return ret;
472 pfn += (next - addr) >> PAGE_SHIFT;
473 } while (addr = next, addr != end);
474
475 return 0;
476}
477
Marc Zyngier6060df82013-04-12 19:12:01 +0100478static int __create_hyp_mappings(pgd_t *pgdp,
479 unsigned long start, unsigned long end,
480 unsigned long pfn, pgprot_t prot)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500481{
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500482 pgd_t *pgd;
483 pud_t *pud;
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500484 unsigned long addr, next;
485 int err = 0;
486
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500487 mutex_lock(&kvm_hyp_pgd_mutex);
Marc Zyngier3562c762013-04-12 19:12:02 +0100488 addr = start & PAGE_MASK;
489 end = PAGE_ALIGN(end);
490 do {
Marc Zyngier6060df82013-04-12 19:12:01 +0100491 pgd = pgdp + pgd_index(addr);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500492
Christoffer Dall38f791a2014-10-10 12:14:28 +0200493 if (pgd_none(*pgd)) {
494 pud = pud_alloc_one(NULL, addr);
495 if (!pud) {
496 kvm_err("Cannot allocate Hyp pud\n");
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500497 err = -ENOMEM;
498 goto out;
499 }
Christoffer Dall38f791a2014-10-10 12:14:28 +0200500 pgd_populate(NULL, pgd, pud);
501 get_page(virt_to_page(pgd));
502 kvm_flush_dcache_to_poc(pgd, sizeof(*pgd));
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500503 }
504
505 next = pgd_addr_end(addr, end);
Christoffer Dall38f791a2014-10-10 12:14:28 +0200506 err = create_hyp_pud_mappings(pgd, addr, next, pfn, prot);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500507 if (err)
508 goto out;
Marc Zyngier6060df82013-04-12 19:12:01 +0100509 pfn += (next - addr) >> PAGE_SHIFT;
Marc Zyngier3562c762013-04-12 19:12:02 +0100510 } while (addr = next, addr != end);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500511out:
512 mutex_unlock(&kvm_hyp_pgd_mutex);
513 return err;
514}
515
Christoffer Dall40c27292013-11-15 13:14:12 -0800516static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
517{
518 if (!is_vmalloc_addr(kaddr)) {
519 BUG_ON(!virt_addr_valid(kaddr));
520 return __pa(kaddr);
521 } else {
522 return page_to_phys(vmalloc_to_page(kaddr)) +
523 offset_in_page(kaddr);
524 }
525}
526
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500527/**
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100528 * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500529 * @from: The virtual kernel start address of the range
530 * @to: The virtual kernel end address of the range (exclusive)
531 *
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100532 * The same virtual address as the kernel virtual address is also used
533 * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
534 * physical pages.
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500535 */
536int create_hyp_mappings(void *from, void *to)
537{
Christoffer Dall40c27292013-11-15 13:14:12 -0800538 phys_addr_t phys_addr;
539 unsigned long virt_addr;
Marc Zyngier6060df82013-04-12 19:12:01 +0100540 unsigned long start = KERN_TO_HYP((unsigned long)from);
541 unsigned long end = KERN_TO_HYP((unsigned long)to);
542
Christoffer Dall40c27292013-11-15 13:14:12 -0800543 start = start & PAGE_MASK;
544 end = PAGE_ALIGN(end);
Marc Zyngier6060df82013-04-12 19:12:01 +0100545
Christoffer Dall40c27292013-11-15 13:14:12 -0800546 for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
547 int err;
548
549 phys_addr = kvm_kaddr_to_phys(from + virt_addr - start);
550 err = __create_hyp_mappings(hyp_pgd, virt_addr,
551 virt_addr + PAGE_SIZE,
552 __phys_to_pfn(phys_addr),
553 PAGE_HYP);
554 if (err)
555 return err;
556 }
557
558 return 0;
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500559}
560
561/**
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100562 * create_hyp_io_mappings - duplicate a kernel IO mapping into Hyp mode
563 * @from: The kernel start VA of the range
564 * @to: The kernel end VA of the range (exclusive)
Marc Zyngier6060df82013-04-12 19:12:01 +0100565 * @phys_addr: The physical start address which gets mapped
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100566 *
567 * The resulting HYP VA is the same as the kernel VA, modulo
568 * HYP_PAGE_OFFSET.
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500569 */
Marc Zyngier6060df82013-04-12 19:12:01 +0100570int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500571{
Marc Zyngier6060df82013-04-12 19:12:01 +0100572 unsigned long start = KERN_TO_HYP((unsigned long)from);
573 unsigned long end = KERN_TO_HYP((unsigned long)to);
574
575 /* Check for a valid kernel IO mapping */
576 if (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1))
577 return -EINVAL;
578
579 return __create_hyp_mappings(hyp_pgd, start, end,
580 __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500581}
582
Christoffer Dalld5d81842013-01-20 18:28:07 -0500583/**
584 * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
585 * @kvm: The KVM struct pointer for the VM.
586 *
587 * Allocates the 1st level table only of size defined by S2_PGD_ORDER (can
588 * support either full 40-bit input addresses or limited to 32-bit input
589 * addresses). Clears the allocated pages.
590 *
591 * Note we don't need locking here as this is only called when the VM is
592 * created, which can only be done once.
593 */
594int kvm_alloc_stage2_pgd(struct kvm *kvm)
595{
Christoffer Dall38f791a2014-10-10 12:14:28 +0200596 int ret;
Christoffer Dalld5d81842013-01-20 18:28:07 -0500597 pgd_t *pgd;
598
599 if (kvm->arch.pgd != NULL) {
600 kvm_err("kvm_arch already initialized?\n");
601 return -EINVAL;
602 }
603
Christoffer Dall38f791a2014-10-10 12:14:28 +0200604 if (KVM_PREALLOC_LEVEL > 0) {
605 /*
606 * Allocate fake pgd for the page table manipulation macros to
607 * work. This is not used by the hardware and we have no
608 * alignment requirement for this allocation.
609 */
610 pgd = (pgd_t *)kmalloc(PTRS_PER_S2_PGD * sizeof(pgd_t),
611 GFP_KERNEL | __GFP_ZERO);
612 } else {
613 /*
614 * Allocate actual first-level Stage-2 page table used by the
615 * hardware for Stage-2 page table walks.
616 */
617 pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, S2_PGD_ORDER);
618 }
619
Christoffer Dalld5d81842013-01-20 18:28:07 -0500620 if (!pgd)
621 return -ENOMEM;
622
Christoffer Dall38f791a2014-10-10 12:14:28 +0200623 ret = kvm_prealloc_hwpgd(kvm, pgd);
624 if (ret)
625 goto out_err;
626
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100627 kvm_clean_pgd(pgd);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500628 kvm->arch.pgd = pgd;
Christoffer Dalld5d81842013-01-20 18:28:07 -0500629 return 0;
Christoffer Dall38f791a2014-10-10 12:14:28 +0200630out_err:
631 if (KVM_PREALLOC_LEVEL > 0)
632 kfree(pgd);
633 else
634 free_pages((unsigned long)pgd, S2_PGD_ORDER);
635 return ret;
Christoffer Dalld5d81842013-01-20 18:28:07 -0500636}
637
Christoffer Dalld5d81842013-01-20 18:28:07 -0500638/**
639 * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
640 * @kvm: The VM pointer
641 * @start: The intermediate physical base address of the range to unmap
642 * @size: The size of the area to unmap
643 *
644 * Clear a range of stage-2 mappings, lowering the various ref-counts. Must
645 * be called while holding mmu_lock (unless for freeing the stage2 pgd before
646 * destroying the VM), otherwise another faulting VCPU may come in and mess
647 * with things behind our backs.
648 */
649static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
650{
Marc Zyngierd4cb9df52013-05-14 12:11:34 +0100651 unmap_range(kvm, kvm->arch.pgd, start, size);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500652}
653
Christoffer Dall957db102014-11-27 10:35:03 +0100654static void stage2_unmap_memslot(struct kvm *kvm,
655 struct kvm_memory_slot *memslot)
656{
657 hva_t hva = memslot->userspace_addr;
658 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
659 phys_addr_t size = PAGE_SIZE * memslot->npages;
660 hva_t reg_end = hva + size;
661
662 /*
663 * A memory region could potentially cover multiple VMAs, and any holes
664 * between them, so iterate over all of them to find out if we should
665 * unmap any of them.
666 *
667 * +--------------------------------------------+
668 * +---------------+----------------+ +----------------+
669 * | : VMA 1 | VMA 2 | | VMA 3 : |
670 * +---------------+----------------+ +----------------+
671 * | memory region |
672 * +--------------------------------------------+
673 */
674 do {
675 struct vm_area_struct *vma = find_vma(current->mm, hva);
676 hva_t vm_start, vm_end;
677
678 if (!vma || vma->vm_start >= reg_end)
679 break;
680
681 /*
682 * Take the intersection of this VMA with the memory region
683 */
684 vm_start = max(hva, vma->vm_start);
685 vm_end = min(reg_end, vma->vm_end);
686
687 if (!(vma->vm_flags & VM_PFNMAP)) {
688 gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
689 unmap_stage2_range(kvm, gpa, vm_end - vm_start);
690 }
691 hva = vm_end;
692 } while (hva < reg_end);
693}
694
695/**
696 * stage2_unmap_vm - Unmap Stage-2 RAM mappings
697 * @kvm: The struct kvm pointer
698 *
699 * Go through the memregions and unmap any reguler RAM
700 * backing memory already mapped to the VM.
701 */
702void stage2_unmap_vm(struct kvm *kvm)
703{
704 struct kvm_memslots *slots;
705 struct kvm_memory_slot *memslot;
706 int idx;
707
708 idx = srcu_read_lock(&kvm->srcu);
709 spin_lock(&kvm->mmu_lock);
710
711 slots = kvm_memslots(kvm);
712 kvm_for_each_memslot(memslot, slots)
713 stage2_unmap_memslot(kvm, memslot);
714
715 spin_unlock(&kvm->mmu_lock);
716 srcu_read_unlock(&kvm->srcu, idx);
717}
718
Christoffer Dalld5d81842013-01-20 18:28:07 -0500719/**
720 * kvm_free_stage2_pgd - free all stage-2 tables
721 * @kvm: The KVM struct pointer for the VM.
722 *
723 * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
724 * underlying level-2 and level-3 tables before freeing the actual level-1 table
725 * and setting the struct pointer to NULL.
726 *
727 * Note we don't need locking here as this is only called when the VM is
728 * destroyed, which can only be done once.
729 */
730void kvm_free_stage2_pgd(struct kvm *kvm)
731{
732 if (kvm->arch.pgd == NULL)
733 return;
734
735 unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
Christoffer Dall38f791a2014-10-10 12:14:28 +0200736 kvm_free_hwpgd(kvm);
737 if (KVM_PREALLOC_LEVEL > 0)
738 kfree(kvm->arch.pgd);
739 else
740 free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500741 kvm->arch.pgd = NULL;
742}
743
Christoffer Dall38f791a2014-10-10 12:14:28 +0200744static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
745 phys_addr_t addr)
746{
747 pgd_t *pgd;
748 pud_t *pud;
749
750 pgd = kvm->arch.pgd + pgd_index(addr);
751 if (WARN_ON(pgd_none(*pgd))) {
752 if (!cache)
753 return NULL;
754 pud = mmu_memory_cache_alloc(cache);
755 pgd_populate(NULL, pgd, pud);
756 get_page(virt_to_page(pgd));
757 }
758
759 return pud_offset(pgd, addr);
760}
761
Christoffer Dallad361f02012-11-01 17:14:45 +0100762static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
763 phys_addr_t addr)
Christoffer Dalld5d81842013-01-20 18:28:07 -0500764{
Christoffer Dalld5d81842013-01-20 18:28:07 -0500765 pud_t *pud;
766 pmd_t *pmd;
Christoffer Dalld5d81842013-01-20 18:28:07 -0500767
Christoffer Dall38f791a2014-10-10 12:14:28 +0200768 pud = stage2_get_pud(kvm, cache, addr);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500769 if (pud_none(*pud)) {
770 if (!cache)
Christoffer Dallad361f02012-11-01 17:14:45 +0100771 return NULL;
Christoffer Dalld5d81842013-01-20 18:28:07 -0500772 pmd = mmu_memory_cache_alloc(cache);
773 pud_populate(NULL, pud, pmd);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500774 get_page(virt_to_page(pud));
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100775 }
776
Christoffer Dallad361f02012-11-01 17:14:45 +0100777 return pmd_offset(pud, addr);
778}
Christoffer Dalld5d81842013-01-20 18:28:07 -0500779
Christoffer Dallad361f02012-11-01 17:14:45 +0100780static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
781 *cache, phys_addr_t addr, const pmd_t *new_pmd)
782{
783 pmd_t *pmd, old_pmd;
784
785 pmd = stage2_get_pmd(kvm, cache, addr);
786 VM_BUG_ON(!pmd);
787
788 /*
789 * Mapping in huge pages should only happen through a fault. If a
790 * page is merged into a transparent huge page, the individual
791 * subpages of that huge page should be unmapped through MMU
792 * notifiers before we get here.
793 *
794 * Merging of CompoundPages is not supported; they should become
795 * splitting first, unmapped, merged, and mapped back in on-demand.
796 */
797 VM_BUG_ON(pmd_present(*pmd) && pmd_pfn(*pmd) != pmd_pfn(*new_pmd));
798
799 old_pmd = *pmd;
800 kvm_set_pmd(pmd, *new_pmd);
801 if (pmd_present(old_pmd))
802 kvm_tlb_flush_vmid_ipa(kvm, addr);
803 else
804 get_page(virt_to_page(pmd));
805 return 0;
806}
807
808static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
Mario Smarduch15a49a42015-01-15 15:58:58 -0800809 phys_addr_t addr, const pte_t *new_pte,
810 unsigned long flags)
Christoffer Dallad361f02012-11-01 17:14:45 +0100811{
812 pmd_t *pmd;
813 pte_t *pte, old_pte;
Mario Smarduch15a49a42015-01-15 15:58:58 -0800814 bool iomap = flags & KVM_S2PTE_FLAG_IS_IOMAP;
815 bool logging_active = flags & KVM_S2_FLAG_LOGGING_ACTIVE;
816
817 VM_BUG_ON(logging_active && !cache);
Christoffer Dallad361f02012-11-01 17:14:45 +0100818
Christoffer Dall38f791a2014-10-10 12:14:28 +0200819 /* Create stage-2 page table mapping - Levels 0 and 1 */
Christoffer Dallad361f02012-11-01 17:14:45 +0100820 pmd = stage2_get_pmd(kvm, cache, addr);
821 if (!pmd) {
822 /*
823 * Ignore calls from kvm_set_spte_hva for unallocated
824 * address ranges.
825 */
826 return 0;
827 }
828
Mario Smarduch15a49a42015-01-15 15:58:58 -0800829 /*
830 * While dirty page logging - dissolve huge PMD, then continue on to
831 * allocate page.
832 */
833 if (logging_active)
834 stage2_dissolve_pmd(kvm, addr, pmd);
835
Christoffer Dallad361f02012-11-01 17:14:45 +0100836 /* Create stage-2 page mappings - Level 2 */
Christoffer Dalld5d81842013-01-20 18:28:07 -0500837 if (pmd_none(*pmd)) {
838 if (!cache)
839 return 0; /* ignore calls from kvm_set_spte_hva */
840 pte = mmu_memory_cache_alloc(cache);
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100841 kvm_clean_pte(pte);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500842 pmd_populate_kernel(NULL, pmd, pte);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500843 get_page(virt_to_page(pmd));
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100844 }
845
846 pte = pte_offset_kernel(pmd, addr);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500847
848 if (iomap && pte_present(*pte))
849 return -EFAULT;
850
851 /* Create 2nd stage page table mapping - Level 3 */
852 old_pte = *pte;
853 kvm_set_pte(pte, *new_pte);
854 if (pte_present(old_pte))
Marc Zyngier48762762013-01-28 15:27:00 +0000855 kvm_tlb_flush_vmid_ipa(kvm, addr);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500856 else
857 get_page(virt_to_page(pte));
858
859 return 0;
860}
861
862/**
863 * kvm_phys_addr_ioremap - map a device range to guest IPA
864 *
865 * @kvm: The KVM pointer
866 * @guest_ipa: The IPA at which to insert the mapping
867 * @pa: The physical address of the device
868 * @size: The size of the mapping
869 */
870int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
Ard Biesheuvelc40f2f82014-09-17 14:56:18 -0700871 phys_addr_t pa, unsigned long size, bool writable)
Christoffer Dalld5d81842013-01-20 18:28:07 -0500872{
873 phys_addr_t addr, end;
874 int ret = 0;
875 unsigned long pfn;
876 struct kvm_mmu_memory_cache cache = { 0, };
877
878 end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK;
879 pfn = __phys_to_pfn(pa);
880
881 for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100882 pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500883
Ard Biesheuvelc40f2f82014-09-17 14:56:18 -0700884 if (writable)
885 kvm_set_s2pte_writable(&pte);
886
Christoffer Dall38f791a2014-10-10 12:14:28 +0200887 ret = mmu_topup_memory_cache(&cache, KVM_MMU_CACHE_MIN_PAGES,
888 KVM_NR_MEM_OBJS);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500889 if (ret)
890 goto out;
891 spin_lock(&kvm->mmu_lock);
Mario Smarduch15a49a42015-01-15 15:58:58 -0800892 ret = stage2_set_pte(kvm, &cache, addr, &pte,
893 KVM_S2PTE_FLAG_IS_IOMAP);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500894 spin_unlock(&kvm->mmu_lock);
895 if (ret)
896 goto out;
897
898 pfn++;
899 }
900
901out:
902 mmu_free_memory_cache(&cache);
903 return ret;
904}
905
Christoffer Dall9b5fdb92013-10-02 15:32:01 -0700906static bool transparent_hugepage_adjust(pfn_t *pfnp, phys_addr_t *ipap)
907{
908 pfn_t pfn = *pfnp;
909 gfn_t gfn = *ipap >> PAGE_SHIFT;
910
911 if (PageTransCompound(pfn_to_page(pfn))) {
912 unsigned long mask;
913 /*
914 * The address we faulted on is backed by a transparent huge
915 * page. However, because we map the compound huge page and
916 * not the individual tail page, we need to transfer the
917 * refcount to the head page. We have to be careful that the
918 * THP doesn't start to split while we are adjusting the
919 * refcounts.
920 *
921 * We are sure this doesn't happen, because mmu_notifier_retry
922 * was successful and we are holding the mmu_lock, so if this
923 * THP is trying to split, it will be blocked in the mmu
924 * notifier before touching any of the pages, specifically
925 * before being able to call __split_huge_page_refcount().
926 *
927 * We can therefore safely transfer the refcount from PG_tail
928 * to PG_head and switch the pfn from a tail page to the head
929 * page accordingly.
930 */
931 mask = PTRS_PER_PMD - 1;
932 VM_BUG_ON((gfn & mask) != (pfn & mask));
933 if (pfn & mask) {
934 *ipap &= PMD_MASK;
935 kvm_release_pfn_clean(pfn);
936 pfn &= ~mask;
937 kvm_get_pfn(pfn);
938 *pfnp = pfn;
939 }
940
941 return true;
942 }
943
944 return false;
945}
946
Ard Biesheuvela7d079c2014-09-09 11:27:09 +0100947static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
948{
949 if (kvm_vcpu_trap_is_iabt(vcpu))
950 return false;
951
952 return kvm_vcpu_dabt_iswrite(vcpu);
953}
954
Ard Biesheuvelbb55e9b2014-11-10 09:33:55 +0100955static bool kvm_is_device_pfn(unsigned long pfn)
956{
957 return !pfn_valid(pfn);
958}
959
Mario Smarduchc6473552015-01-15 15:58:56 -0800960/**
961 * stage2_wp_ptes - write protect PMD range
962 * @pmd: pointer to pmd entry
963 * @addr: range start address
964 * @end: range end address
965 */
966static void stage2_wp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end)
967{
968 pte_t *pte;
969
970 pte = pte_offset_kernel(pmd, addr);
971 do {
972 if (!pte_none(*pte)) {
973 if (!kvm_s2pte_readonly(pte))
974 kvm_set_s2pte_readonly(pte);
975 }
976 } while (pte++, addr += PAGE_SIZE, addr != end);
977}
978
979/**
980 * stage2_wp_pmds - write protect PUD range
981 * @pud: pointer to pud entry
982 * @addr: range start address
983 * @end: range end address
984 */
985static void stage2_wp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end)
986{
987 pmd_t *pmd;
988 phys_addr_t next;
989
990 pmd = pmd_offset(pud, addr);
991
992 do {
993 next = kvm_pmd_addr_end(addr, end);
994 if (!pmd_none(*pmd)) {
995 if (kvm_pmd_huge(*pmd)) {
996 if (!kvm_s2pmd_readonly(pmd))
997 kvm_set_s2pmd_readonly(pmd);
998 } else {
999 stage2_wp_ptes(pmd, addr, next);
1000 }
1001 }
1002 } while (pmd++, addr = next, addr != end);
1003}
1004
1005/**
1006 * stage2_wp_puds - write protect PGD range
1007 * @pgd: pointer to pgd entry
1008 * @addr: range start address
1009 * @end: range end address
1010 *
1011 * Process PUD entries, for a huge PUD we cause a panic.
1012 */
1013static void stage2_wp_puds(pgd_t *pgd, phys_addr_t addr, phys_addr_t end)
1014{
1015 pud_t *pud;
1016 phys_addr_t next;
1017
1018 pud = pud_offset(pgd, addr);
1019 do {
1020 next = kvm_pud_addr_end(addr, end);
1021 if (!pud_none(*pud)) {
1022 /* TODO:PUD not supported, revisit later if supported */
1023 BUG_ON(kvm_pud_huge(*pud));
1024 stage2_wp_pmds(pud, addr, next);
1025 }
1026 } while (pud++, addr = next, addr != end);
1027}
1028
1029/**
1030 * stage2_wp_range() - write protect stage2 memory region range
1031 * @kvm: The KVM pointer
1032 * @addr: Start address of range
1033 * @end: End address of range
1034 */
1035static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
1036{
1037 pgd_t *pgd;
1038 phys_addr_t next;
1039
1040 pgd = kvm->arch.pgd + pgd_index(addr);
1041 do {
1042 /*
1043 * Release kvm_mmu_lock periodically if the memory region is
1044 * large. Otherwise, we may see kernel panics with
Christoffer Dall227ea8182015-01-23 10:49:31 +01001045 * CONFIG_DETECT_HUNG_TASK, CONFIG_LOCKUP_DETECTOR,
1046 * CONFIG_LOCKDEP. Additionally, holding the lock too long
Mario Smarduchc6473552015-01-15 15:58:56 -08001047 * will also starve other vCPUs.
1048 */
1049 if (need_resched() || spin_needbreak(&kvm->mmu_lock))
1050 cond_resched_lock(&kvm->mmu_lock);
1051
1052 next = kvm_pgd_addr_end(addr, end);
1053 if (pgd_present(*pgd))
1054 stage2_wp_puds(pgd, addr, next);
1055 } while (pgd++, addr = next, addr != end);
1056}
1057
1058/**
1059 * kvm_mmu_wp_memory_region() - write protect stage 2 entries for memory slot
1060 * @kvm: The KVM pointer
1061 * @slot: The memory slot to write protect
1062 *
1063 * Called to start logging dirty pages after memory region
1064 * KVM_MEM_LOG_DIRTY_PAGES operation is called. After this function returns
1065 * all present PMD and PTEs are write protected in the memory region.
1066 * Afterwards read of dirty page log can be called.
1067 *
1068 * Acquires kvm_mmu_lock. Called with kvm->slots_lock mutex acquired,
1069 * serializing operations for VM memory regions.
1070 */
1071void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
1072{
1073 struct kvm_memory_slot *memslot = id_to_memslot(kvm->memslots, slot);
1074 phys_addr_t start = memslot->base_gfn << PAGE_SHIFT;
1075 phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
1076
1077 spin_lock(&kvm->mmu_lock);
1078 stage2_wp_range(kvm, start, end);
1079 spin_unlock(&kvm->mmu_lock);
1080 kvm_flush_remote_tlbs(kvm);
1081}
Mario Smarduch53c810c2015-01-15 15:58:57 -08001082
1083/**
Kai Huang3b0f1d02015-01-28 10:54:23 +08001084 * kvm_mmu_write_protect_pt_masked() - write protect dirty pages
Mario Smarduch53c810c2015-01-15 15:58:57 -08001085 * @kvm: The KVM pointer
1086 * @slot: The memory slot associated with mask
1087 * @gfn_offset: The gfn offset in memory slot
1088 * @mask: The mask of dirty pages at offset 'gfn_offset' in this memory
1089 * slot to be write protected
1090 *
1091 * Walks bits set in mask write protects the associated pte's. Caller must
1092 * acquire kvm_mmu_lock.
1093 */
Kai Huang3b0f1d02015-01-28 10:54:23 +08001094static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
Mario Smarduch53c810c2015-01-15 15:58:57 -08001095 struct kvm_memory_slot *slot,
1096 gfn_t gfn_offset, unsigned long mask)
1097{
1098 phys_addr_t base_gfn = slot->base_gfn + gfn_offset;
1099 phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT;
1100 phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
1101
1102 stage2_wp_range(kvm, start, end);
1103}
Mario Smarduchc6473552015-01-15 15:58:56 -08001104
Kai Huang3b0f1d02015-01-28 10:54:23 +08001105/*
1106 * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
1107 * dirty pages.
1108 *
1109 * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
1110 * enable dirty logging for them.
1111 */
1112void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
1113 struct kvm_memory_slot *slot,
1114 gfn_t gfn_offset, unsigned long mask)
1115{
1116 kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
1117}
1118
Christoffer Dall94f8e642013-01-20 18:28:12 -05001119static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
Christoffer Dall98047882014-08-19 12:18:04 +02001120 struct kvm_memory_slot *memslot, unsigned long hva,
Christoffer Dall94f8e642013-01-20 18:28:12 -05001121 unsigned long fault_status)
1122{
Christoffer Dall94f8e642013-01-20 18:28:12 -05001123 int ret;
Christoffer Dall9b5fdb92013-10-02 15:32:01 -07001124 bool write_fault, writable, hugetlb = false, force_pte = false;
Christoffer Dall94f8e642013-01-20 18:28:12 -05001125 unsigned long mmu_seq;
Christoffer Dallad361f02012-11-01 17:14:45 +01001126 gfn_t gfn = fault_ipa >> PAGE_SHIFT;
Christoffer Dallad361f02012-11-01 17:14:45 +01001127 struct kvm *kvm = vcpu->kvm;
Christoffer Dall94f8e642013-01-20 18:28:12 -05001128 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
Christoffer Dallad361f02012-11-01 17:14:45 +01001129 struct vm_area_struct *vma;
1130 pfn_t pfn;
Kim Phillipsb8865762014-06-26 01:45:51 +01001131 pgprot_t mem_type = PAGE_S2;
Laszlo Ersek840f4bf2014-11-17 14:58:52 +00001132 bool fault_ipa_uncached;
Mario Smarduch15a49a42015-01-15 15:58:58 -08001133 bool logging_active = memslot_is_logging(memslot);
1134 unsigned long flags = 0;
Christoffer Dall94f8e642013-01-20 18:28:12 -05001135
Ard Biesheuvela7d079c2014-09-09 11:27:09 +01001136 write_fault = kvm_is_write_fault(vcpu);
Christoffer Dall94f8e642013-01-20 18:28:12 -05001137 if (fault_status == FSC_PERM && !write_fault) {
1138 kvm_err("Unexpected L2 read permission error\n");
1139 return -EFAULT;
1140 }
1141
Christoffer Dallad361f02012-11-01 17:14:45 +01001142 /* Let's check if we will get back a huge page backed by hugetlbfs */
1143 down_read(&current->mm->mmap_sem);
1144 vma = find_vma_intersection(current->mm, hva, hva + 1);
Ard Biesheuvel37b54402014-09-17 14:56:17 -07001145 if (unlikely(!vma)) {
1146 kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
1147 up_read(&current->mm->mmap_sem);
1148 return -EFAULT;
1149 }
1150
Mario Smarduch15a49a42015-01-15 15:58:58 -08001151 if (is_vm_hugetlb_page(vma) && !logging_active) {
Christoffer Dallad361f02012-11-01 17:14:45 +01001152 hugetlb = true;
1153 gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
Christoffer Dall9b5fdb92013-10-02 15:32:01 -07001154 } else {
1155 /*
Marc Zyngier136d7372013-12-13 16:56:06 +00001156 * Pages belonging to memslots that don't have the same
1157 * alignment for userspace and IPA cannot be mapped using
1158 * block descriptors even if the pages belong to a THP for
1159 * the process, because the stage-2 block descriptor will
1160 * cover more than a single THP and we loose atomicity for
1161 * unmapping, updates, and splits of the THP or other pages
1162 * in the stage-2 block range.
Christoffer Dall9b5fdb92013-10-02 15:32:01 -07001163 */
Marc Zyngier136d7372013-12-13 16:56:06 +00001164 if ((memslot->userspace_addr & ~PMD_MASK) !=
1165 ((memslot->base_gfn << PAGE_SHIFT) & ~PMD_MASK))
Christoffer Dall9b5fdb92013-10-02 15:32:01 -07001166 force_pte = true;
Christoffer Dallad361f02012-11-01 17:14:45 +01001167 }
1168 up_read(&current->mm->mmap_sem);
1169
Christoffer Dall94f8e642013-01-20 18:28:12 -05001170 /* We need minimum second+third level pages */
Christoffer Dall38f791a2014-10-10 12:14:28 +02001171 ret = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES,
1172 KVM_NR_MEM_OBJS);
Christoffer Dall94f8e642013-01-20 18:28:12 -05001173 if (ret)
1174 return ret;
1175
1176 mmu_seq = vcpu->kvm->mmu_notifier_seq;
1177 /*
1178 * Ensure the read of mmu_notifier_seq happens before we call
1179 * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
1180 * the page we just got a reference to gets unmapped before we have a
1181 * chance to grab the mmu_lock, which ensure that if the page gets
1182 * unmapped afterwards, the call to kvm_unmap_hva will take it away
1183 * from us again properly. This smp_rmb() interacts with the smp_wmb()
1184 * in kvm_mmu_notifier_invalidate_<page|range_end>.
1185 */
1186 smp_rmb();
1187
Christoffer Dallad361f02012-11-01 17:14:45 +01001188 pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
Christoffer Dall94f8e642013-01-20 18:28:12 -05001189 if (is_error_pfn(pfn))
1190 return -EFAULT;
1191
Mario Smarduch15a49a42015-01-15 15:58:58 -08001192 if (kvm_is_device_pfn(pfn)) {
Kim Phillipsb8865762014-06-26 01:45:51 +01001193 mem_type = PAGE_S2_DEVICE;
Mario Smarduch15a49a42015-01-15 15:58:58 -08001194 flags |= KVM_S2PTE_FLAG_IS_IOMAP;
1195 } else if (logging_active) {
1196 /*
1197 * Faults on pages in a memslot with logging enabled
1198 * should not be mapped with huge pages (it introduces churn
1199 * and performance degradation), so force a pte mapping.
1200 */
1201 force_pte = true;
1202 flags |= KVM_S2_FLAG_LOGGING_ACTIVE;
1203
1204 /*
1205 * Only actually map the page as writable if this was a write
1206 * fault.
1207 */
1208 if (!write_fault)
1209 writable = false;
1210 }
Kim Phillipsb8865762014-06-26 01:45:51 +01001211
Christoffer Dallad361f02012-11-01 17:14:45 +01001212 spin_lock(&kvm->mmu_lock);
1213 if (mmu_notifier_retry(kvm, mmu_seq))
Christoffer Dall94f8e642013-01-20 18:28:12 -05001214 goto out_unlock;
Mario Smarduch15a49a42015-01-15 15:58:58 -08001215
Christoffer Dall9b5fdb92013-10-02 15:32:01 -07001216 if (!hugetlb && !force_pte)
1217 hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa);
Christoffer Dallad361f02012-11-01 17:14:45 +01001218
Ard Biesheuvel849260c2014-11-17 14:58:53 +00001219 fault_ipa_uncached = memslot->flags & KVM_MEMSLOT_INCOHERENT;
Laszlo Ersek840f4bf2014-11-17 14:58:52 +00001220
Christoffer Dallad361f02012-11-01 17:14:45 +01001221 if (hugetlb) {
Kim Phillipsb8865762014-06-26 01:45:51 +01001222 pmd_t new_pmd = pfn_pmd(pfn, mem_type);
Christoffer Dallad361f02012-11-01 17:14:45 +01001223 new_pmd = pmd_mkhuge(new_pmd);
1224 if (writable) {
1225 kvm_set_s2pmd_writable(&new_pmd);
1226 kvm_set_pfn_dirty(pfn);
1227 }
Laszlo Ersek840f4bf2014-11-17 14:58:52 +00001228 coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE,
1229 fault_ipa_uncached);
Christoffer Dallad361f02012-11-01 17:14:45 +01001230 ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
1231 } else {
Kim Phillipsb8865762014-06-26 01:45:51 +01001232 pte_t new_pte = pfn_pte(pfn, mem_type);
Mario Smarduch15a49a42015-01-15 15:58:58 -08001233
Christoffer Dallad361f02012-11-01 17:14:45 +01001234 if (writable) {
1235 kvm_set_s2pte_writable(&new_pte);
1236 kvm_set_pfn_dirty(pfn);
Mario Smarduch15a49a42015-01-15 15:58:58 -08001237 mark_page_dirty(kvm, gfn);
Christoffer Dallad361f02012-11-01 17:14:45 +01001238 }
Laszlo Ersek840f4bf2014-11-17 14:58:52 +00001239 coherent_cache_guest_page(vcpu, hva, PAGE_SIZE,
1240 fault_ipa_uncached);
Mario Smarduch15a49a42015-01-15 15:58:58 -08001241 ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, flags);
Christoffer Dall94f8e642013-01-20 18:28:12 -05001242 }
Christoffer Dallad361f02012-11-01 17:14:45 +01001243
Christoffer Dall94f8e642013-01-20 18:28:12 -05001244out_unlock:
Christoffer Dallad361f02012-11-01 17:14:45 +01001245 spin_unlock(&kvm->mmu_lock);
Christoffer Dall94f8e642013-01-20 18:28:12 -05001246 kvm_release_pfn_clean(pfn);
Christoffer Dallad361f02012-11-01 17:14:45 +01001247 return ret;
Christoffer Dall94f8e642013-01-20 18:28:12 -05001248}
1249
1250/**
1251 * kvm_handle_guest_abort - handles all 2nd stage aborts
1252 * @vcpu: the VCPU pointer
1253 * @run: the kvm_run structure
1254 *
1255 * Any abort that gets to the host is almost guaranteed to be caused by a
1256 * missing second stage translation table entry, which can mean that either the
1257 * guest simply needs more memory and we must allocate an appropriate page or it
1258 * can mean that the guest tried to access I/O memory, which is emulated by user
1259 * space. The distinction is based on the IPA causing the fault and whether this
1260 * memory region has been registered as standard RAM by user space.
1261 */
Christoffer Dall342cd0a2013-01-20 18:28:06 -05001262int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
1263{
Christoffer Dall94f8e642013-01-20 18:28:12 -05001264 unsigned long fault_status;
1265 phys_addr_t fault_ipa;
1266 struct kvm_memory_slot *memslot;
Christoffer Dall98047882014-08-19 12:18:04 +02001267 unsigned long hva;
1268 bool is_iabt, write_fault, writable;
Christoffer Dall94f8e642013-01-20 18:28:12 -05001269 gfn_t gfn;
1270 int ret, idx;
1271
Marc Zyngier52d1dba2012-10-15 10:33:38 +01001272 is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
Marc Zyngier7393b592012-09-17 19:27:09 +01001273 fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
Christoffer Dall94f8e642013-01-20 18:28:12 -05001274
Marc Zyngier7393b592012-09-17 19:27:09 +01001275 trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu),
1276 kvm_vcpu_get_hfar(vcpu), fault_ipa);
Christoffer Dall94f8e642013-01-20 18:28:12 -05001277
1278 /* Check the stage-2 fault is trans. fault or write fault */
Christoffer Dall0496daa52014-09-26 12:29:34 +02001279 fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
Christoffer Dall94f8e642013-01-20 18:28:12 -05001280 if (fault_status != FSC_FAULT && fault_status != FSC_PERM) {
Christoffer Dall0496daa52014-09-26 12:29:34 +02001281 kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
1282 kvm_vcpu_trap_get_class(vcpu),
1283 (unsigned long)kvm_vcpu_trap_get_fault(vcpu),
1284 (unsigned long)kvm_vcpu_get_hsr(vcpu));
Christoffer Dall94f8e642013-01-20 18:28:12 -05001285 return -EFAULT;
1286 }
1287
1288 idx = srcu_read_lock(&vcpu->kvm->srcu);
1289
1290 gfn = fault_ipa >> PAGE_SHIFT;
Christoffer Dall98047882014-08-19 12:18:04 +02001291 memslot = gfn_to_memslot(vcpu->kvm, gfn);
1292 hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
Ard Biesheuvela7d079c2014-09-09 11:27:09 +01001293 write_fault = kvm_is_write_fault(vcpu);
Christoffer Dall98047882014-08-19 12:18:04 +02001294 if (kvm_is_error_hva(hva) || (write_fault && !writable)) {
Christoffer Dall94f8e642013-01-20 18:28:12 -05001295 if (is_iabt) {
1296 /* Prefetch Abort on I/O address */
Marc Zyngier7393b592012-09-17 19:27:09 +01001297 kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
Christoffer Dall94f8e642013-01-20 18:28:12 -05001298 ret = 1;
1299 goto out_unlock;
1300 }
1301
Marc Zyngiercfe39502012-12-12 14:42:09 +00001302 /*
1303 * The IPA is reported as [MAX:12], so we need to
1304 * complement it with the bottom 12 bits from the
1305 * faulting VA. This is always 12 bits, irrespective
1306 * of the page size.
1307 */
1308 fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
Christoffer Dall45e96ea2013-01-20 18:43:58 -05001309 ret = io_mem_abort(vcpu, run, fault_ipa);
Christoffer Dall94f8e642013-01-20 18:28:12 -05001310 goto out_unlock;
1311 }
1312
Christoffer Dallc3058d52014-10-10 12:14:29 +02001313 /* Userspace should not be able to register out-of-bounds IPAs */
1314 VM_BUG_ON(fault_ipa >= KVM_PHYS_SIZE);
1315
Christoffer Dall98047882014-08-19 12:18:04 +02001316 ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
Christoffer Dall94f8e642013-01-20 18:28:12 -05001317 if (ret == 0)
1318 ret = 1;
1319out_unlock:
1320 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1321 return ret;
Christoffer Dall342cd0a2013-01-20 18:28:06 -05001322}
1323
Christoffer Dalld5d81842013-01-20 18:28:07 -05001324static void handle_hva_to_gpa(struct kvm *kvm,
1325 unsigned long start,
1326 unsigned long end,
1327 void (*handler)(struct kvm *kvm,
1328 gpa_t gpa, void *data),
1329 void *data)
1330{
1331 struct kvm_memslots *slots;
1332 struct kvm_memory_slot *memslot;
1333
1334 slots = kvm_memslots(kvm);
1335
1336 /* we only care about the pages that the guest sees */
1337 kvm_for_each_memslot(memslot, slots) {
1338 unsigned long hva_start, hva_end;
1339 gfn_t gfn, gfn_end;
1340
1341 hva_start = max(start, memslot->userspace_addr);
1342 hva_end = min(end, memslot->userspace_addr +
1343 (memslot->npages << PAGE_SHIFT));
1344 if (hva_start >= hva_end)
1345 continue;
1346
1347 /*
1348 * {gfn(page) | page intersects with [hva_start, hva_end)} =
1349 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
1350 */
1351 gfn = hva_to_gfn_memslot(hva_start, memslot);
1352 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
1353
1354 for (; gfn < gfn_end; ++gfn) {
1355 gpa_t gpa = gfn << PAGE_SHIFT;
1356 handler(kvm, gpa, data);
1357 }
1358 }
1359}
1360
1361static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
1362{
1363 unmap_stage2_range(kvm, gpa, PAGE_SIZE);
Christoffer Dalld5d81842013-01-20 18:28:07 -05001364}
1365
1366int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
1367{
1368 unsigned long end = hva + PAGE_SIZE;
1369
1370 if (!kvm->arch.pgd)
1371 return 0;
1372
1373 trace_kvm_unmap_hva(hva);
1374 handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL);
1375 return 0;
1376}
1377
1378int kvm_unmap_hva_range(struct kvm *kvm,
1379 unsigned long start, unsigned long end)
1380{
1381 if (!kvm->arch.pgd)
1382 return 0;
1383
1384 trace_kvm_unmap_hva_range(start, end);
1385 handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
1386 return 0;
1387}
1388
1389static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data)
1390{
1391 pte_t *pte = (pte_t *)data;
1392
Mario Smarduch15a49a42015-01-15 15:58:58 -08001393 /*
1394 * We can always call stage2_set_pte with KVM_S2PTE_FLAG_LOGGING_ACTIVE
1395 * flag clear because MMU notifiers will have unmapped a huge PMD before
1396 * calling ->change_pte() (which in turn calls kvm_set_spte_hva()) and
1397 * therefore stage2_set_pte() never needs to clear out a huge PMD
1398 * through this calling path.
1399 */
1400 stage2_set_pte(kvm, NULL, gpa, pte, 0);
Christoffer Dalld5d81842013-01-20 18:28:07 -05001401}
1402
1403
1404void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
1405{
1406 unsigned long end = hva + PAGE_SIZE;
1407 pte_t stage2_pte;
1408
1409 if (!kvm->arch.pgd)
1410 return;
1411
1412 trace_kvm_set_spte_hva(hva);
1413 stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2);
1414 handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
1415}
1416
1417void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
1418{
1419 mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
1420}
1421
Christoffer Dall342cd0a2013-01-20 18:28:06 -05001422phys_addr_t kvm_mmu_get_httbr(void)
1423{
Christoffer Dall342cd0a2013-01-20 18:28:06 -05001424 return virt_to_phys(hyp_pgd);
1425}
1426
Marc Zyngier5a677ce2013-04-12 19:12:06 +01001427phys_addr_t kvm_mmu_get_boot_httbr(void)
1428{
1429 return virt_to_phys(boot_hyp_pgd);
1430}
1431
1432phys_addr_t kvm_get_idmap_vector(void)
1433{
1434 return hyp_idmap_vector;
1435}
1436
Christoffer Dall342cd0a2013-01-20 18:28:06 -05001437int kvm_mmu_init(void)
1438{
Marc Zyngier2fb41052013-04-12 19:12:03 +01001439 int err;
1440
Santosh Shilimkar4fda3422013-11-19 14:59:12 -05001441 hyp_idmap_start = kvm_virt_to_phys(__hyp_idmap_text_start);
1442 hyp_idmap_end = kvm_virt_to_phys(__hyp_idmap_text_end);
1443 hyp_idmap_vector = kvm_virt_to_phys(__kvm_hyp_init);
Marc Zyngier5a677ce2013-04-12 19:12:06 +01001444
1445 if ((hyp_idmap_start ^ hyp_idmap_end) & PAGE_MASK) {
1446 /*
1447 * Our init code is crossing a page boundary. Allocate
1448 * a bounce page, copy the code over and use that.
1449 */
1450 size_t len = __hyp_idmap_text_end - __hyp_idmap_text_start;
1451 phys_addr_t phys_base;
1452
Mark Salter5d4e08c2014-03-28 14:25:19 +00001453 init_bounce_page = (void *)__get_free_page(GFP_KERNEL);
Marc Zyngier5a677ce2013-04-12 19:12:06 +01001454 if (!init_bounce_page) {
1455 kvm_err("Couldn't allocate HYP init bounce page\n");
1456 err = -ENOMEM;
1457 goto out;
1458 }
1459
1460 memcpy(init_bounce_page, __hyp_idmap_text_start, len);
1461 /*
1462 * Warning: the code we just copied to the bounce page
1463 * must be flushed to the point of coherency.
1464 * Otherwise, the data may be sitting in L2, and HYP
1465 * mode won't be able to observe it as it runs with
1466 * caches off at that point.
1467 */
1468 kvm_flush_dcache_to_poc(init_bounce_page, len);
1469
Santosh Shilimkar4fda3422013-11-19 14:59:12 -05001470 phys_base = kvm_virt_to_phys(init_bounce_page);
Marc Zyngier5a677ce2013-04-12 19:12:06 +01001471 hyp_idmap_vector += phys_base - hyp_idmap_start;
1472 hyp_idmap_start = phys_base;
1473 hyp_idmap_end = phys_base + len;
1474
1475 kvm_info("Using HYP init bounce page @%lx\n",
1476 (unsigned long)phys_base);
1477 }
1478
Christoffer Dall38f791a2014-10-10 12:14:28 +02001479 hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order);
1480 boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order);
Mark Salter5d4e08c2014-03-28 14:25:19 +00001481
Marc Zyngier5a677ce2013-04-12 19:12:06 +01001482 if (!hyp_pgd || !boot_hyp_pgd) {
Christoffer Dalld5d81842013-01-20 18:28:07 -05001483 kvm_err("Hyp mode PGD not allocated\n");
Marc Zyngier2fb41052013-04-12 19:12:03 +01001484 err = -ENOMEM;
1485 goto out;
1486 }
1487
1488 /* Create the idmap in the boot page tables */
1489 err = __create_hyp_mappings(boot_hyp_pgd,
1490 hyp_idmap_start, hyp_idmap_end,
1491 __phys_to_pfn(hyp_idmap_start),
1492 PAGE_HYP);
1493
1494 if (err) {
1495 kvm_err("Failed to idmap %lx-%lx\n",
1496 hyp_idmap_start, hyp_idmap_end);
1497 goto out;
Christoffer Dalld5d81842013-01-20 18:28:07 -05001498 }
1499
Marc Zyngier5a677ce2013-04-12 19:12:06 +01001500 /* Map the very same page at the trampoline VA */
1501 err = __create_hyp_mappings(boot_hyp_pgd,
1502 TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE,
1503 __phys_to_pfn(hyp_idmap_start),
1504 PAGE_HYP);
1505 if (err) {
1506 kvm_err("Failed to map trampoline @%lx into boot HYP pgd\n",
1507 TRAMPOLINE_VA);
1508 goto out;
1509 }
1510
1511 /* Map the same page again into the runtime page tables */
1512 err = __create_hyp_mappings(hyp_pgd,
1513 TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE,
1514 __phys_to_pfn(hyp_idmap_start),
1515 PAGE_HYP);
1516 if (err) {
1517 kvm_err("Failed to map trampoline @%lx into runtime HYP pgd\n",
1518 TRAMPOLINE_VA);
1519 goto out;
1520 }
1521
Christoffer Dalld5d81842013-01-20 18:28:07 -05001522 return 0;
Marc Zyngier2fb41052013-04-12 19:12:03 +01001523out:
Marc Zyngier4f728272013-04-12 19:12:05 +01001524 free_hyp_pgds();
Marc Zyngier2fb41052013-04-12 19:12:03 +01001525 return err;
Christoffer Dall342cd0a2013-01-20 18:28:06 -05001526}
Eric Augerdf6ce242014-06-06 11:10:23 +02001527
1528void kvm_arch_commit_memory_region(struct kvm *kvm,
1529 struct kvm_userspace_memory_region *mem,
1530 const struct kvm_memory_slot *old,
1531 enum kvm_mr_change change)
1532{
Mario Smarduchc6473552015-01-15 15:58:56 -08001533 /*
1534 * At this point memslot has been committed and there is an
1535 * allocated dirty_bitmap[], dirty pages will be be tracked while the
1536 * memory slot is write protected.
1537 */
1538 if (change != KVM_MR_DELETE && mem->flags & KVM_MEM_LOG_DIRTY_PAGES)
1539 kvm_mmu_wp_memory_region(kvm, mem->slot);
Eric Augerdf6ce242014-06-06 11:10:23 +02001540}
1541
1542int kvm_arch_prepare_memory_region(struct kvm *kvm,
1543 struct kvm_memory_slot *memslot,
1544 struct kvm_userspace_memory_region *mem,
1545 enum kvm_mr_change change)
1546{
Ard Biesheuvel8eef9122014-10-10 17:00:32 +02001547 hva_t hva = mem->userspace_addr;
1548 hva_t reg_end = hva + mem->memory_size;
1549 bool writable = !(mem->flags & KVM_MEM_READONLY);
1550 int ret = 0;
1551
Mario Smarduch15a49a42015-01-15 15:58:58 -08001552 if (change != KVM_MR_CREATE && change != KVM_MR_MOVE &&
1553 change != KVM_MR_FLAGS_ONLY)
Ard Biesheuvel8eef9122014-10-10 17:00:32 +02001554 return 0;
1555
1556 /*
Christoffer Dallc3058d52014-10-10 12:14:29 +02001557 * Prevent userspace from creating a memory region outside of the IPA
1558 * space addressable by the KVM guest IPA space.
1559 */
1560 if (memslot->base_gfn + memslot->npages >=
1561 (KVM_PHYS_SIZE >> PAGE_SHIFT))
1562 return -EFAULT;
1563
1564 /*
Ard Biesheuvel8eef9122014-10-10 17:00:32 +02001565 * A memory region could potentially cover multiple VMAs, and any holes
1566 * between them, so iterate over all of them to find out if we can map
1567 * any of them right now.
1568 *
1569 * +--------------------------------------------+
1570 * +---------------+----------------+ +----------------+
1571 * | : VMA 1 | VMA 2 | | VMA 3 : |
1572 * +---------------+----------------+ +----------------+
1573 * | memory region |
1574 * +--------------------------------------------+
1575 */
1576 do {
1577 struct vm_area_struct *vma = find_vma(current->mm, hva);
1578 hva_t vm_start, vm_end;
1579
1580 if (!vma || vma->vm_start >= reg_end)
1581 break;
1582
1583 /*
1584 * Mapping a read-only VMA is only allowed if the
1585 * memory region is configured as read-only.
1586 */
1587 if (writable && !(vma->vm_flags & VM_WRITE)) {
1588 ret = -EPERM;
1589 break;
1590 }
1591
1592 /*
1593 * Take the intersection of this VMA with the memory region
1594 */
1595 vm_start = max(hva, vma->vm_start);
1596 vm_end = min(reg_end, vma->vm_end);
1597
1598 if (vma->vm_flags & VM_PFNMAP) {
1599 gpa_t gpa = mem->guest_phys_addr +
1600 (vm_start - mem->userspace_addr);
1601 phys_addr_t pa = (vma->vm_pgoff << PAGE_SHIFT) +
1602 vm_start - vma->vm_start;
1603
Mario Smarduch15a49a42015-01-15 15:58:58 -08001604 /* IO region dirty page logging not allowed */
1605 if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES)
1606 return -EINVAL;
1607
Ard Biesheuvel8eef9122014-10-10 17:00:32 +02001608 ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
1609 vm_end - vm_start,
1610 writable);
1611 if (ret)
1612 break;
1613 }
1614 hva = vm_end;
1615 } while (hva < reg_end);
1616
Mario Smarduch15a49a42015-01-15 15:58:58 -08001617 if (change == KVM_MR_FLAGS_ONLY)
1618 return ret;
1619
Ard Biesheuvel849260c2014-11-17 14:58:53 +00001620 spin_lock(&kvm->mmu_lock);
1621 if (ret)
Ard Biesheuvel8eef9122014-10-10 17:00:32 +02001622 unmap_stage2_range(kvm, mem->guest_phys_addr, mem->memory_size);
Ard Biesheuvel849260c2014-11-17 14:58:53 +00001623 else
1624 stage2_flush_memslot(kvm, memslot);
1625 spin_unlock(&kvm->mmu_lock);
Ard Biesheuvel8eef9122014-10-10 17:00:32 +02001626 return ret;
Eric Augerdf6ce242014-06-06 11:10:23 +02001627}
1628
1629void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
1630 struct kvm_memory_slot *dont)
1631{
1632}
1633
1634int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1635 unsigned long npages)
1636{
Ard Biesheuvel849260c2014-11-17 14:58:53 +00001637 /*
1638 * Readonly memslots are not incoherent with the caches by definition,
1639 * but in practice, they are used mostly to emulate ROMs or NOR flashes
1640 * that the guest may consider devices and hence map as uncached.
1641 * To prevent incoherency issues in these cases, tag all readonly
1642 * regions as incoherent.
1643 */
1644 if (slot->flags & KVM_MEM_READONLY)
1645 slot->flags |= KVM_MEMSLOT_INCOHERENT;
Eric Augerdf6ce242014-06-06 11:10:23 +02001646 return 0;
1647}
1648
1649void kvm_arch_memslots_updated(struct kvm *kvm)
1650{
1651}
1652
1653void kvm_arch_flush_shadow_all(struct kvm *kvm)
1654{
1655}
1656
1657void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1658 struct kvm_memory_slot *slot)
1659{
Ard Biesheuvel8eef9122014-10-10 17:00:32 +02001660 gpa_t gpa = slot->base_gfn << PAGE_SHIFT;
1661 phys_addr_t size = slot->npages << PAGE_SHIFT;
1662
1663 spin_lock(&kvm->mmu_lock);
1664 unmap_stage2_range(kvm, gpa, size);
1665 spin_unlock(&kvm->mmu_lock);
Eric Augerdf6ce242014-06-06 11:10:23 +02001666}