blob: a629f2c1d0f968e7a39e9d8ab98e0e1bee32d492 [file] [log] [blame]
Christoffer Dall749cf76c2013-01-20 18:28:06 -05001/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Authors: Rusty Russell <rusty@rustcorp.com.au>
4 * Christoffer Dall <c.dall@virtualopensystems.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18 */
Christoffer Dall5b3e5e52013-01-20 18:28:09 -050019#include <linux/mm.h>
Christoffer Dall749cf76c2013-01-20 18:28:06 -050020#include <linux/kvm_host.h>
Christoffer Dall11382452013-01-20 18:28:10 -050021#include <linux/uaccess.h>
Christoffer Dall5b3e5e52013-01-20 18:28:09 -050022#include <asm/kvm_arm.h>
23#include <asm/kvm_host.h>
24#include <asm/kvm_emulate.h>
25#include <asm/kvm_coproc.h>
26#include <asm/cacheflush.h>
27#include <asm/cputype.h>
28#include <trace/events/kvm.h>
Rusty Russell4fe21e42013-01-20 18:28:11 -050029#include <asm/vfp.h>
30#include "../vfp/vfpinstr.h"
Christoffer Dall749cf76c2013-01-20 18:28:06 -050031
Christoffer Dall5b3e5e52013-01-20 18:28:09 -050032#include "trace.h"
33#include "coproc.h"
34
35
36/******************************************************************************
37 * Co-processor emulation
38 *****************************************************************************/
39
Christoffer Dallc27581e2013-01-20 18:28:10 -050040/* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
41static u32 cache_levels;
42
43/* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
44#define CSSELR_MAX 12
45
Christoffer Dall5b3e5e52013-01-20 18:28:09 -050046int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run)
47{
48 kvm_inject_undefined(vcpu);
49 return 1;
50}
51
52int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
53{
54 /*
55 * We can get here, if the host has been built without VFPv3 support,
56 * but the guest attempted a floating point operation.
57 */
58 kvm_inject_undefined(vcpu);
59 return 1;
60}
61
62int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
63{
64 kvm_inject_undefined(vcpu);
65 return 1;
66}
67
68int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
69{
70 kvm_inject_undefined(vcpu);
71 return 1;
72}
73
Jonathan Austin5e1ddf62013-09-26 16:49:28 +010074static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
75{
76 /*
77 * Compute guest MPIDR. No need to mess around with different clusters
78 * but we read the 'U' bit from the underlying hardware directly.
79 */
80 vcpu->arch.cp15[c0_MPIDR] = (read_cpuid_mpidr() & MPIDR_SMP_BITMASK)
81 | vcpu->vcpu_id;
82}
83
84/* TRM entries A7:4.3.31 A15:4.3.28 - RO WI */
85static bool access_actlr(struct kvm_vcpu *vcpu,
86 const struct coproc_params *p,
87 const struct coproc_reg *r)
88{
89 if (p->is_write)
90 return ignore_write(vcpu, p);
91
92 *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c1_ACTLR];
93 return true;
94}
95
96/* TRM entries A7:4.3.56, A15:4.3.60 - R/O. */
97static bool access_cbar(struct kvm_vcpu *vcpu,
98 const struct coproc_params *p,
99 const struct coproc_reg *r)
100{
101 if (p->is_write)
102 return write_to_read_only(vcpu, p);
103 return read_zero(vcpu, p);
104}
105
106/* TRM entries A7:4.3.49, A15:4.3.48 - R/O WI */
107static bool access_l2ctlr(struct kvm_vcpu *vcpu,
108 const struct coproc_params *p,
109 const struct coproc_reg *r)
110{
111 if (p->is_write)
112 return ignore_write(vcpu, p);
113
114 *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c9_L2CTLR];
115 return true;
116}
117
118static void reset_l2ctlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
119{
120 u32 l2ctlr, ncores;
121
122 asm volatile("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr));
123 l2ctlr &= ~(3 << 24);
124 ncores = atomic_read(&vcpu->kvm->online_vcpus) - 1;
125 l2ctlr |= (ncores & 3) << 24;
126
127 vcpu->arch.cp15[c9_L2CTLR] = l2ctlr;
128}
129
130static void reset_actlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
131{
132 u32 actlr;
133
134 /* ACTLR contains SMP bit: make sure you create all cpus first! */
135 asm volatile("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr));
136 /* Make the SMP bit consistent with the guest configuration */
137 if (atomic_read(&vcpu->kvm->online_vcpus) > 1)
138 actlr |= 1U << 6;
139 else
140 actlr &= ~(1U << 6);
141
142 vcpu->arch.cp15[c1_ACTLR] = actlr;
143}
144
145/*
146 * TRM entries: A7:4.3.50, A15:4.3.49
147 * R/O WI (even if NSACR.NS_L2ERR, a write of 1 is ignored).
148 */
149static bool access_l2ectlr(struct kvm_vcpu *vcpu,
150 const struct coproc_params *p,
151 const struct coproc_reg *r)
152{
153 if (p->is_write)
154 return ignore_write(vcpu, p);
155
156 *vcpu_reg(vcpu, p->Rt1) = 0;
157 return true;
158}
159
Christoffer Dall5b3e5e52013-01-20 18:28:09 -0500160/* See note at ARM ARM B1.14.4 */
161static bool access_dcsw(struct kvm_vcpu *vcpu,
162 const struct coproc_params *p,
163 const struct coproc_reg *r)
164{
Marc Zyngierdb730d82012-10-03 11:17:02 +0100165 unsigned long val;
Christoffer Dall5b3e5e52013-01-20 18:28:09 -0500166 int cpu;
167
Christoffer Dall5b3e5e52013-01-20 18:28:09 -0500168 if (!p->is_write)
169 return read_from_write_only(vcpu, p);
170
Marc Zyngier15bbc1b22013-04-17 12:09:09 -0700171 cpu = get_cpu();
172
Christoffer Dall5b3e5e52013-01-20 18:28:09 -0500173 cpumask_setall(&vcpu->arch.require_dcache_flush);
174 cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
175
176 /* If we were already preempted, take the long way around */
177 if (cpu != vcpu->arch.last_pcpu) {
178 flush_cache_all();
179 goto done;
180 }
181
182 val = *vcpu_reg(vcpu, p->Rt1);
183
184 switch (p->CRm) {
185 case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */
186 case 14: /* DCCISW */
187 asm volatile("mcr p15, 0, %0, c7, c14, 2" : : "r" (val));
188 break;
189
190 case 10: /* DCCSW */
191 asm volatile("mcr p15, 0, %0, c7, c10, 2" : : "r" (val));
192 break;
193 }
194
195done:
196 put_cpu();
197
198 return true;
199}
200
201/*
202 * We could trap ID_DFR0 and tell the guest we don't support performance
203 * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was
204 * NAKed, so it will read the PMCR anyway.
205 *
206 * Therefore we tell the guest we have 0 counters. Unfortunately, we
207 * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
208 * all PM registers, which doesn't crash the guest kernel at least.
209 */
210static bool pm_fake(struct kvm_vcpu *vcpu,
211 const struct coproc_params *p,
212 const struct coproc_reg *r)
213{
214 if (p->is_write)
215 return ignore_write(vcpu, p);
216 else
217 return read_zero(vcpu, p);
218}
219
220#define access_pmcr pm_fake
221#define access_pmcntenset pm_fake
222#define access_pmcntenclr pm_fake
223#define access_pmovsr pm_fake
224#define access_pmselr pm_fake
225#define access_pmceid0 pm_fake
226#define access_pmceid1 pm_fake
227#define access_pmccntr pm_fake
228#define access_pmxevtyper pm_fake
229#define access_pmxevcntr pm_fake
230#define access_pmuserenr pm_fake
231#define access_pmintenset pm_fake
232#define access_pmintenclr pm_fake
233
234/* Architected CP15 registers.
Christoffer Dall008fe512013-08-05 18:08:41 -0700235 * CRn denotes the primary register number, but is copied to the CRm in the
236 * user space API for 64-bit register access in line with the terminology used
237 * in the ARM ARM.
238 * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit
239 * registers preceding 32-bit ones.
Christoffer Dall5b3e5e52013-01-20 18:28:09 -0500240 */
241static const struct coproc_reg cp15_regs[] = {
Jonathan Austin5e1ddf62013-09-26 16:49:28 +0100242 /* MPIDR: we use VMPIDR for guest access. */
243 { CRn( 0), CRm( 0), Op1( 0), Op2( 5), is32,
244 NULL, reset_mpidr, c0_MPIDR },
245
Christoffer Dall5b3e5e52013-01-20 18:28:09 -0500246 /* CSSELR: swapped by interrupt.S. */
247 { CRn( 0), CRm( 0), Op1( 2), Op2( 0), is32,
248 NULL, reset_unknown, c0_CSSELR },
249
Jonathan Austin5e1ddf62013-09-26 16:49:28 +0100250 /* ACTLR: trapped by HCR.TAC bit. */
251 { CRn( 1), CRm( 0), Op1( 0), Op2( 1), is32,
252 access_actlr, reset_actlr, c1_ACTLR },
253
254 /* CPACR: swapped by interrupt.S. */
255 { CRn( 1), CRm( 0), Op1( 0), Op2( 2), is32,
256 NULL, reset_val, c1_CPACR, 0x00000000 },
257
Christoffer Dall5b3e5e52013-01-20 18:28:09 -0500258 /* TTBR0/TTBR1: swapped by interrupt.S. */
Christoffer Dall008fe512013-08-05 18:08:41 -0700259 { CRm64( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 },
260 { CRm64( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 },
Christoffer Dall5b3e5e52013-01-20 18:28:09 -0500261
262 /* TTBCR: swapped by interrupt.S. */
263 { CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32,
264 NULL, reset_val, c2_TTBCR, 0x00000000 },
265
266 /* DACR: swapped by interrupt.S. */
267 { CRn( 3), CRm( 0), Op1( 0), Op2( 0), is32,
268 NULL, reset_unknown, c3_DACR },
269
270 /* DFSR/IFSR/ADFSR/AIFSR: swapped by interrupt.S. */
271 { CRn( 5), CRm( 0), Op1( 0), Op2( 0), is32,
272 NULL, reset_unknown, c5_DFSR },
273 { CRn( 5), CRm( 0), Op1( 0), Op2( 1), is32,
274 NULL, reset_unknown, c5_IFSR },
275 { CRn( 5), CRm( 1), Op1( 0), Op2( 0), is32,
276 NULL, reset_unknown, c5_ADFSR },
277 { CRn( 5), CRm( 1), Op1( 0), Op2( 1), is32,
278 NULL, reset_unknown, c5_AIFSR },
279
280 /* DFAR/IFAR: swapped by interrupt.S. */
281 { CRn( 6), CRm( 0), Op1( 0), Op2( 0), is32,
282 NULL, reset_unknown, c6_DFAR },
283 { CRn( 6), CRm( 0), Op1( 0), Op2( 2), is32,
284 NULL, reset_unknown, c6_IFAR },
Marc Zyngier921fa4d2013-06-21 13:08:46 +0100285
286 /* PAR swapped by interrupt.S */
Christoffer Dall008fe512013-08-05 18:08:41 -0700287 { CRm64( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR },
Marc Zyngier921fa4d2013-06-21 13:08:46 +0100288
Christoffer Dall5b3e5e52013-01-20 18:28:09 -0500289 /*
290 * DC{C,I,CI}SW operations:
291 */
292 { CRn( 7), CRm( 6), Op1( 0), Op2( 2), is32, access_dcsw},
293 { CRn( 7), CRm(10), Op1( 0), Op2( 2), is32, access_dcsw},
294 { CRn( 7), CRm(14), Op1( 0), Op2( 2), is32, access_dcsw},
295 /*
Jonathan Austin5e1ddf62013-09-26 16:49:28 +0100296 * L2CTLR access (guest wants to know #CPUs).
297 */
298 { CRn( 9), CRm( 0), Op1( 1), Op2( 2), is32,
299 access_l2ctlr, reset_l2ctlr, c9_L2CTLR },
300 { CRn( 9), CRm( 0), Op1( 1), Op2( 3), is32, access_l2ectlr},
301
302 /*
Christoffer Dall5b3e5e52013-01-20 18:28:09 -0500303 * Dummy performance monitor implementation.
304 */
305 { CRn( 9), CRm(12), Op1( 0), Op2( 0), is32, access_pmcr},
306 { CRn( 9), CRm(12), Op1( 0), Op2( 1), is32, access_pmcntenset},
307 { CRn( 9), CRm(12), Op1( 0), Op2( 2), is32, access_pmcntenclr},
308 { CRn( 9), CRm(12), Op1( 0), Op2( 3), is32, access_pmovsr},
309 { CRn( 9), CRm(12), Op1( 0), Op2( 5), is32, access_pmselr},
310 { CRn( 9), CRm(12), Op1( 0), Op2( 6), is32, access_pmceid0},
311 { CRn( 9), CRm(12), Op1( 0), Op2( 7), is32, access_pmceid1},
312 { CRn( 9), CRm(13), Op1( 0), Op2( 0), is32, access_pmccntr},
313 { CRn( 9), CRm(13), Op1( 0), Op2( 1), is32, access_pmxevtyper},
314 { CRn( 9), CRm(13), Op1( 0), Op2( 2), is32, access_pmxevcntr},
315 { CRn( 9), CRm(14), Op1( 0), Op2( 0), is32, access_pmuserenr},
316 { CRn( 9), CRm(14), Op1( 0), Op2( 1), is32, access_pmintenset},
317 { CRn( 9), CRm(14), Op1( 0), Op2( 2), is32, access_pmintenclr},
318
319 /* PRRR/NMRR (aka MAIR0/MAIR1): swapped by interrupt.S. */
320 { CRn(10), CRm( 2), Op1( 0), Op2( 0), is32,
321 NULL, reset_unknown, c10_PRRR},
322 { CRn(10), CRm( 2), Op1( 0), Op2( 1), is32,
323 NULL, reset_unknown, c10_NMRR},
324
325 /* VBAR: swapped by interrupt.S. */
326 { CRn(12), CRm( 0), Op1( 0), Op2( 0), is32,
327 NULL, reset_val, c12_VBAR, 0x00000000 },
328
329 /* CONTEXTIDR/TPIDRURW/TPIDRURO/TPIDRPRW: swapped by interrupt.S. */
330 { CRn(13), CRm( 0), Op1( 0), Op2( 1), is32,
331 NULL, reset_val, c13_CID, 0x00000000 },
332 { CRn(13), CRm( 0), Op1( 0), Op2( 2), is32,
333 NULL, reset_unknown, c13_TID_URW },
334 { CRn(13), CRm( 0), Op1( 0), Op2( 3), is32,
335 NULL, reset_unknown, c13_TID_URO },
336 { CRn(13), CRm( 0), Op1( 0), Op2( 4), is32,
337 NULL, reset_unknown, c13_TID_PRIV },
Marc Zyngierc7e3ba62013-01-23 13:21:59 -0500338
339 /* CNTKCTL: swapped by interrupt.S. */
340 { CRn(14), CRm( 1), Op1( 0), Op2( 0), is32,
341 NULL, reset_val, c14_CNTKCTL, 0x00000000 },
Jonathan Austin5e1ddf62013-09-26 16:49:28 +0100342
343 /* The Configuration Base Address Register. */
344 { CRn(15), CRm( 0), Op1( 4), Op2( 0), is32, access_cbar},
Christoffer Dall5b3e5e52013-01-20 18:28:09 -0500345};
346
347/* Target specific emulation tables */
348static struct kvm_coproc_target_table *target_tables[KVM_ARM_NUM_TARGETS];
349
350void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table)
351{
Jonathan Austin5e1ddf62013-09-26 16:49:28 +0100352 unsigned int i;
353
354 for (i = 1; i < table->num; i++)
355 BUG_ON(cmp_reg(&table->table[i-1],
356 &table->table[i]) >= 0);
357
Christoffer Dall5b3e5e52013-01-20 18:28:09 -0500358 target_tables[table->target] = table;
359}
360
361/* Get specific register table for this target. */
362static const struct coproc_reg *get_target_table(unsigned target, size_t *num)
363{
364 struct kvm_coproc_target_table *table;
365
366 table = target_tables[target];
367 *num = table->num;
368 return table->table;
369}
370
371static const struct coproc_reg *find_reg(const struct coproc_params *params,
372 const struct coproc_reg table[],
373 unsigned int num)
374{
375 unsigned int i;
376
377 for (i = 0; i < num; i++) {
378 const struct coproc_reg *r = &table[i];
379
380 if (params->is_64bit != r->is_64)
381 continue;
382 if (params->CRn != r->CRn)
383 continue;
384 if (params->CRm != r->CRm)
385 continue;
386 if (params->Op1 != r->Op1)
387 continue;
388 if (params->Op2 != r->Op2)
389 continue;
390
391 return r;
392 }
393 return NULL;
394}
395
396static int emulate_cp15(struct kvm_vcpu *vcpu,
397 const struct coproc_params *params)
398{
399 size_t num;
400 const struct coproc_reg *table, *r;
401
402 trace_kvm_emulate_cp15_imp(params->Op1, params->Rt1, params->CRn,
403 params->CRm, params->Op2, params->is_write);
404
405 table = get_target_table(vcpu->arch.target, &num);
406
407 /* Search target-specific then generic table. */
408 r = find_reg(params, table, num);
409 if (!r)
410 r = find_reg(params, cp15_regs, ARRAY_SIZE(cp15_regs));
411
412 if (likely(r)) {
413 /* If we don't have an accessor, we should never get here! */
414 BUG_ON(!r->access);
415
416 if (likely(r->access(vcpu, params, r))) {
417 /* Skip instruction, since it was emulated */
Marc Zyngier23b415d2012-09-18 12:07:06 +0100418 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
Christoffer Dall5b3e5e52013-01-20 18:28:09 -0500419 return 1;
420 }
421 /* If access function fails, it should complain. */
422 } else {
Marc Zyngierdb730d82012-10-03 11:17:02 +0100423 kvm_err("Unsupported guest CP15 access at: %08lx\n",
Christoffer Dall5b3e5e52013-01-20 18:28:09 -0500424 *vcpu_pc(vcpu));
425 print_cp_instr(params);
426 }
427 kvm_inject_undefined(vcpu);
428 return 1;
429}
430
431/**
432 * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access
433 * @vcpu: The VCPU pointer
434 * @run: The kvm_run struct
435 */
436int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
437{
438 struct coproc_params params;
439
Marc Zyngier7393b592012-09-17 19:27:09 +0100440 params.CRm = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf;
441 params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf;
442 params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0);
Christoffer Dall5b3e5e52013-01-20 18:28:09 -0500443 params.is_64bit = true;
444
Marc Zyngier7393b592012-09-17 19:27:09 +0100445 params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 16) & 0xf;
Christoffer Dall5b3e5e52013-01-20 18:28:09 -0500446 params.Op2 = 0;
Marc Zyngier7393b592012-09-17 19:27:09 +0100447 params.Rt2 = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf;
Christoffer Dall5b3e5e52013-01-20 18:28:09 -0500448 params.CRn = 0;
449
450 return emulate_cp15(vcpu, &params);
451}
452
453static void reset_coproc_regs(struct kvm_vcpu *vcpu,
454 const struct coproc_reg *table, size_t num)
455{
456 unsigned long i;
457
458 for (i = 0; i < num; i++)
459 if (table[i].reset)
460 table[i].reset(vcpu, &table[i]);
461}
462
463/**
464 * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
465 * @vcpu: The VCPU pointer
466 * @run: The kvm_run struct
467 */
468int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
469{
470 struct coproc_params params;
471
Marc Zyngier7393b592012-09-17 19:27:09 +0100472 params.CRm = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf;
473 params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf;
474 params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0);
Christoffer Dall5b3e5e52013-01-20 18:28:09 -0500475 params.is_64bit = false;
476
Marc Zyngier7393b592012-09-17 19:27:09 +0100477 params.CRn = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf;
478 params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 14) & 0x7;
479 params.Op2 = (kvm_vcpu_get_hsr(vcpu) >> 17) & 0x7;
Christoffer Dall5b3e5e52013-01-20 18:28:09 -0500480 params.Rt2 = 0;
481
482 return emulate_cp15(vcpu, &params);
483}
484
Christoffer Dall11382452013-01-20 18:28:10 -0500485/******************************************************************************
486 * Userspace API
487 *****************************************************************************/
488
489static bool index_to_params(u64 id, struct coproc_params *params)
490{
491 switch (id & KVM_REG_SIZE_MASK) {
492 case KVM_REG_SIZE_U32:
493 /* Any unused index bits means it's not valid. */
494 if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
495 | KVM_REG_ARM_COPROC_MASK
496 | KVM_REG_ARM_32_CRN_MASK
497 | KVM_REG_ARM_CRM_MASK
498 | KVM_REG_ARM_OPC1_MASK
499 | KVM_REG_ARM_32_OPC2_MASK))
500 return false;
501
502 params->is_64bit = false;
503 params->CRn = ((id & KVM_REG_ARM_32_CRN_MASK)
504 >> KVM_REG_ARM_32_CRN_SHIFT);
505 params->CRm = ((id & KVM_REG_ARM_CRM_MASK)
506 >> KVM_REG_ARM_CRM_SHIFT);
507 params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK)
508 >> KVM_REG_ARM_OPC1_SHIFT);
509 params->Op2 = ((id & KVM_REG_ARM_32_OPC2_MASK)
510 >> KVM_REG_ARM_32_OPC2_SHIFT);
511 return true;
512 case KVM_REG_SIZE_U64:
513 /* Any unused index bits means it's not valid. */
514 if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
515 | KVM_REG_ARM_COPROC_MASK
516 | KVM_REG_ARM_CRM_MASK
517 | KVM_REG_ARM_OPC1_MASK))
518 return false;
519 params->is_64bit = true;
Christoffer Dall008fe512013-08-05 18:08:41 -0700520 /* CRm to CRn: see cp15_to_index for details */
521 params->CRn = ((id & KVM_REG_ARM_CRM_MASK)
Christoffer Dall11382452013-01-20 18:28:10 -0500522 >> KVM_REG_ARM_CRM_SHIFT);
523 params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK)
524 >> KVM_REG_ARM_OPC1_SHIFT);
525 params->Op2 = 0;
Christoffer Dall008fe512013-08-05 18:08:41 -0700526 params->CRm = 0;
Christoffer Dall11382452013-01-20 18:28:10 -0500527 return true;
528 default:
529 return false;
530 }
531}
532
533/* Decode an index value, and find the cp15 coproc_reg entry. */
534static const struct coproc_reg *index_to_coproc_reg(struct kvm_vcpu *vcpu,
535 u64 id)
536{
537 size_t num;
538 const struct coproc_reg *table, *r;
539 struct coproc_params params;
540
541 /* We only do cp15 for now. */
542 if ((id & KVM_REG_ARM_COPROC_MASK) >> KVM_REG_ARM_COPROC_SHIFT != 15)
543 return NULL;
544
545 if (!index_to_params(id, &params))
546 return NULL;
547
548 table = get_target_table(vcpu->arch.target, &num);
549 r = find_reg(&params, table, num);
550 if (!r)
551 r = find_reg(&params, cp15_regs, ARRAY_SIZE(cp15_regs));
552
553 /* Not saved in the cp15 array? */
554 if (r && !r->reg)
555 r = NULL;
556
557 return r;
558}
559
560/*
561 * These are the invariant cp15 registers: we let the guest see the host
562 * versions of these, so they're part of the guest state.
563 *
564 * A future CPU may provide a mechanism to present different values to
565 * the guest, or a future kvm may trap them.
566 */
567/* Unfortunately, there's no register-argument for mrc, so generate. */
568#define FUNCTION_FOR32(crn, crm, op1, op2, name) \
569 static void get_##name(struct kvm_vcpu *v, \
570 const struct coproc_reg *r) \
571 { \
572 u32 val; \
573 \
574 asm volatile("mrc p15, " __stringify(op1) \
575 ", %0, c" __stringify(crn) \
576 ", c" __stringify(crm) \
577 ", " __stringify(op2) "\n" : "=r" (val)); \
578 ((struct coproc_reg *)r)->val = val; \
579 }
580
581FUNCTION_FOR32(0, 0, 0, 0, MIDR)
582FUNCTION_FOR32(0, 0, 0, 1, CTR)
583FUNCTION_FOR32(0, 0, 0, 2, TCMTR)
584FUNCTION_FOR32(0, 0, 0, 3, TLBTR)
585FUNCTION_FOR32(0, 0, 0, 6, REVIDR)
586FUNCTION_FOR32(0, 1, 0, 0, ID_PFR0)
587FUNCTION_FOR32(0, 1, 0, 1, ID_PFR1)
588FUNCTION_FOR32(0, 1, 0, 2, ID_DFR0)
589FUNCTION_FOR32(0, 1, 0, 3, ID_AFR0)
590FUNCTION_FOR32(0, 1, 0, 4, ID_MMFR0)
591FUNCTION_FOR32(0, 1, 0, 5, ID_MMFR1)
592FUNCTION_FOR32(0, 1, 0, 6, ID_MMFR2)
593FUNCTION_FOR32(0, 1, 0, 7, ID_MMFR3)
594FUNCTION_FOR32(0, 2, 0, 0, ID_ISAR0)
595FUNCTION_FOR32(0, 2, 0, 1, ID_ISAR1)
596FUNCTION_FOR32(0, 2, 0, 2, ID_ISAR2)
597FUNCTION_FOR32(0, 2, 0, 3, ID_ISAR3)
598FUNCTION_FOR32(0, 2, 0, 4, ID_ISAR4)
599FUNCTION_FOR32(0, 2, 0, 5, ID_ISAR5)
600FUNCTION_FOR32(0, 0, 1, 1, CLIDR)
601FUNCTION_FOR32(0, 0, 1, 7, AIDR)
602
603/* ->val is filled in by kvm_invariant_coproc_table_init() */
604static struct coproc_reg invariant_cp15[] = {
605 { CRn( 0), CRm( 0), Op1( 0), Op2( 0), is32, NULL, get_MIDR },
606 { CRn( 0), CRm( 0), Op1( 0), Op2( 1), is32, NULL, get_CTR },
607 { CRn( 0), CRm( 0), Op1( 0), Op2( 2), is32, NULL, get_TCMTR },
608 { CRn( 0), CRm( 0), Op1( 0), Op2( 3), is32, NULL, get_TLBTR },
609 { CRn( 0), CRm( 0), Op1( 0), Op2( 6), is32, NULL, get_REVIDR },
610
611 { CRn( 0), CRm( 1), Op1( 0), Op2( 0), is32, NULL, get_ID_PFR0 },
612 { CRn( 0), CRm( 1), Op1( 0), Op2( 1), is32, NULL, get_ID_PFR1 },
613 { CRn( 0), CRm( 1), Op1( 0), Op2( 2), is32, NULL, get_ID_DFR0 },
614 { CRn( 0), CRm( 1), Op1( 0), Op2( 3), is32, NULL, get_ID_AFR0 },
615 { CRn( 0), CRm( 1), Op1( 0), Op2( 4), is32, NULL, get_ID_MMFR0 },
616 { CRn( 0), CRm( 1), Op1( 0), Op2( 5), is32, NULL, get_ID_MMFR1 },
617 { CRn( 0), CRm( 1), Op1( 0), Op2( 6), is32, NULL, get_ID_MMFR2 },
618 { CRn( 0), CRm( 1), Op1( 0), Op2( 7), is32, NULL, get_ID_MMFR3 },
619
620 { CRn( 0), CRm( 2), Op1( 0), Op2( 0), is32, NULL, get_ID_ISAR0 },
621 { CRn( 0), CRm( 2), Op1( 0), Op2( 1), is32, NULL, get_ID_ISAR1 },
622 { CRn( 0), CRm( 2), Op1( 0), Op2( 2), is32, NULL, get_ID_ISAR2 },
623 { CRn( 0), CRm( 2), Op1( 0), Op2( 3), is32, NULL, get_ID_ISAR3 },
624 { CRn( 0), CRm( 2), Op1( 0), Op2( 4), is32, NULL, get_ID_ISAR4 },
625 { CRn( 0), CRm( 2), Op1( 0), Op2( 5), is32, NULL, get_ID_ISAR5 },
626
627 { CRn( 0), CRm( 0), Op1( 1), Op2( 1), is32, NULL, get_CLIDR },
628 { CRn( 0), CRm( 0), Op1( 1), Op2( 7), is32, NULL, get_AIDR },
629};
630
631static int reg_from_user(void *val, const void __user *uaddr, u64 id)
632{
633 /* This Just Works because we are little endian. */
634 if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
635 return -EFAULT;
636 return 0;
637}
638
639static int reg_to_user(void __user *uaddr, const void *val, u64 id)
640{
641 /* This Just Works because we are little endian. */
642 if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
643 return -EFAULT;
644 return 0;
645}
646
647static int get_invariant_cp15(u64 id, void __user *uaddr)
648{
649 struct coproc_params params;
650 const struct coproc_reg *r;
651
652 if (!index_to_params(id, &params))
653 return -ENOENT;
654
655 r = find_reg(&params, invariant_cp15, ARRAY_SIZE(invariant_cp15));
656 if (!r)
657 return -ENOENT;
658
659 return reg_to_user(uaddr, &r->val, id);
660}
661
662static int set_invariant_cp15(u64 id, void __user *uaddr)
663{
664 struct coproc_params params;
665 const struct coproc_reg *r;
666 int err;
667 u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */
668
669 if (!index_to_params(id, &params))
670 return -ENOENT;
671 r = find_reg(&params, invariant_cp15, ARRAY_SIZE(invariant_cp15));
672 if (!r)
673 return -ENOENT;
674
675 err = reg_from_user(&val, uaddr, id);
676 if (err)
677 return err;
678
679 /* This is what we mean by invariant: you can't change it. */
680 if (r->val != val)
681 return -EINVAL;
682
683 return 0;
684}
685
Christoffer Dallc27581e2013-01-20 18:28:10 -0500686static bool is_valid_cache(u32 val)
687{
688 u32 level, ctype;
689
690 if (val >= CSSELR_MAX)
691 return -ENOENT;
692
693 /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */
694 level = (val >> 1);
695 ctype = (cache_levels >> (level * 3)) & 7;
696
697 switch (ctype) {
698 case 0: /* No cache */
699 return false;
700 case 1: /* Instruction cache only */
701 return (val & 1);
702 case 2: /* Data cache only */
703 case 4: /* Unified cache */
704 return !(val & 1);
705 case 3: /* Separate instruction and data caches */
706 return true;
707 default: /* Reserved: we can't know instruction or data. */
708 return false;
709 }
710}
711
712/* Which cache CCSIDR represents depends on CSSELR value. */
713static u32 get_ccsidr(u32 csselr)
714{
715 u32 ccsidr;
716
717 /* Make sure noone else changes CSSELR during this! */
718 local_irq_disable();
719 /* Put value into CSSELR */
720 asm volatile("mcr p15, 2, %0, c0, c0, 0" : : "r" (csselr));
721 isb();
722 /* Read result out of CCSIDR */
723 asm volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (ccsidr));
724 local_irq_enable();
725
726 return ccsidr;
727}
728
729static int demux_c15_get(u64 id, void __user *uaddr)
730{
731 u32 val;
732 u32 __user *uval = uaddr;
733
734 /* Fail if we have unknown bits set. */
735 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
736 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
737 return -ENOENT;
738
739 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
740 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
741 if (KVM_REG_SIZE(id) != 4)
742 return -ENOENT;
743 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
744 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
745 if (!is_valid_cache(val))
746 return -ENOENT;
747
748 return put_user(get_ccsidr(val), uval);
749 default:
750 return -ENOENT;
751 }
752}
753
754static int demux_c15_set(u64 id, void __user *uaddr)
755{
756 u32 val, newval;
757 u32 __user *uval = uaddr;
758
759 /* Fail if we have unknown bits set. */
760 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
761 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
762 return -ENOENT;
763
764 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
765 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
766 if (KVM_REG_SIZE(id) != 4)
767 return -ENOENT;
768 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
769 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
770 if (!is_valid_cache(val))
771 return -ENOENT;
772
773 if (get_user(newval, uval))
774 return -EFAULT;
775
776 /* This is also invariant: you can't change it. */
777 if (newval != get_ccsidr(val))
778 return -EINVAL;
779 return 0;
780 default:
781 return -ENOENT;
782 }
783}
784
Rusty Russell4fe21e42013-01-20 18:28:11 -0500785#ifdef CONFIG_VFPv3
786static const int vfp_sysregs[] = { KVM_REG_ARM_VFP_FPEXC,
787 KVM_REG_ARM_VFP_FPSCR,
788 KVM_REG_ARM_VFP_FPINST,
789 KVM_REG_ARM_VFP_FPINST2,
790 KVM_REG_ARM_VFP_MVFR0,
791 KVM_REG_ARM_VFP_MVFR1,
792 KVM_REG_ARM_VFP_FPSID };
793
794static unsigned int num_fp_regs(void)
795{
796 if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK) >> MVFR0_A_SIMD_BIT) == 2)
797 return 32;
798 else
799 return 16;
800}
801
802static unsigned int num_vfp_regs(void)
803{
804 /* Normal FP regs + control regs. */
805 return num_fp_regs() + ARRAY_SIZE(vfp_sysregs);
806}
807
808static int copy_vfp_regids(u64 __user *uindices)
809{
810 unsigned int i;
811 const u64 u32reg = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP;
812 const u64 u64reg = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP;
813
814 for (i = 0; i < num_fp_regs(); i++) {
815 if (put_user((u64reg | KVM_REG_ARM_VFP_BASE_REG) + i,
816 uindices))
817 return -EFAULT;
818 uindices++;
819 }
820
821 for (i = 0; i < ARRAY_SIZE(vfp_sysregs); i++) {
822 if (put_user(u32reg | vfp_sysregs[i], uindices))
823 return -EFAULT;
824 uindices++;
825 }
826
827 return num_vfp_regs();
828}
829
830static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
831{
832 u32 vfpid = (id & KVM_REG_ARM_VFP_MASK);
833 u32 val;
834
835 /* Fail if we have unknown bits set. */
836 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
837 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
838 return -ENOENT;
839
840 if (vfpid < num_fp_regs()) {
841 if (KVM_REG_SIZE(id) != 8)
842 return -ENOENT;
843 return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpregs[vfpid],
844 id);
845 }
846
847 /* FP control registers are all 32 bit. */
848 if (KVM_REG_SIZE(id) != 4)
849 return -ENOENT;
850
851 switch (vfpid) {
852 case KVM_REG_ARM_VFP_FPEXC:
853 return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpexc, id);
854 case KVM_REG_ARM_VFP_FPSCR:
855 return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpscr, id);
856 case KVM_REG_ARM_VFP_FPINST:
857 return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpinst, id);
858 case KVM_REG_ARM_VFP_FPINST2:
859 return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpinst2, id);
860 case KVM_REG_ARM_VFP_MVFR0:
861 val = fmrx(MVFR0);
862 return reg_to_user(uaddr, &val, id);
863 case KVM_REG_ARM_VFP_MVFR1:
864 val = fmrx(MVFR1);
865 return reg_to_user(uaddr, &val, id);
866 case KVM_REG_ARM_VFP_FPSID:
867 val = fmrx(FPSID);
868 return reg_to_user(uaddr, &val, id);
869 default:
870 return -ENOENT;
871 }
872}
873
874static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr)
875{
876 u32 vfpid = (id & KVM_REG_ARM_VFP_MASK);
877 u32 val;
878
879 /* Fail if we have unknown bits set. */
880 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
881 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
882 return -ENOENT;
883
884 if (vfpid < num_fp_regs()) {
885 if (KVM_REG_SIZE(id) != 8)
886 return -ENOENT;
887 return reg_from_user(&vcpu->arch.vfp_guest.fpregs[vfpid],
888 uaddr, id);
889 }
890
891 /* FP control registers are all 32 bit. */
892 if (KVM_REG_SIZE(id) != 4)
893 return -ENOENT;
894
895 switch (vfpid) {
896 case KVM_REG_ARM_VFP_FPEXC:
897 return reg_from_user(&vcpu->arch.vfp_guest.fpexc, uaddr, id);
898 case KVM_REG_ARM_VFP_FPSCR:
899 return reg_from_user(&vcpu->arch.vfp_guest.fpscr, uaddr, id);
900 case KVM_REG_ARM_VFP_FPINST:
901 return reg_from_user(&vcpu->arch.vfp_guest.fpinst, uaddr, id);
902 case KVM_REG_ARM_VFP_FPINST2:
903 return reg_from_user(&vcpu->arch.vfp_guest.fpinst2, uaddr, id);
904 /* These are invariant. */
905 case KVM_REG_ARM_VFP_MVFR0:
906 if (reg_from_user(&val, uaddr, id))
907 return -EFAULT;
908 if (val != fmrx(MVFR0))
909 return -EINVAL;
910 return 0;
911 case KVM_REG_ARM_VFP_MVFR1:
912 if (reg_from_user(&val, uaddr, id))
913 return -EFAULT;
914 if (val != fmrx(MVFR1))
915 return -EINVAL;
916 return 0;
917 case KVM_REG_ARM_VFP_FPSID:
918 if (reg_from_user(&val, uaddr, id))
919 return -EFAULT;
920 if (val != fmrx(FPSID))
921 return -EINVAL;
922 return 0;
923 default:
924 return -ENOENT;
925 }
926}
927#else /* !CONFIG_VFPv3 */
928static unsigned int num_vfp_regs(void)
929{
930 return 0;
931}
932
933static int copy_vfp_regids(u64 __user *uindices)
934{
935 return 0;
936}
937
938static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
939{
940 return -ENOENT;
941}
942
943static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr)
944{
945 return -ENOENT;
946}
947#endif /* !CONFIG_VFPv3 */
948
Christoffer Dall11382452013-01-20 18:28:10 -0500949int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
950{
951 const struct coproc_reg *r;
952 void __user *uaddr = (void __user *)(long)reg->addr;
953
Christoffer Dallc27581e2013-01-20 18:28:10 -0500954 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
955 return demux_c15_get(reg->id, uaddr);
956
Rusty Russell4fe21e42013-01-20 18:28:11 -0500957 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_VFP)
958 return vfp_get_reg(vcpu, reg->id, uaddr);
959
Christoffer Dall11382452013-01-20 18:28:10 -0500960 r = index_to_coproc_reg(vcpu, reg->id);
961 if (!r)
962 return get_invariant_cp15(reg->id, uaddr);
963
964 /* Note: copies two regs if size is 64 bit. */
965 return reg_to_user(uaddr, &vcpu->arch.cp15[r->reg], reg->id);
966}
967
968int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
969{
970 const struct coproc_reg *r;
971 void __user *uaddr = (void __user *)(long)reg->addr;
972
Christoffer Dallc27581e2013-01-20 18:28:10 -0500973 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
974 return demux_c15_set(reg->id, uaddr);
975
Rusty Russell4fe21e42013-01-20 18:28:11 -0500976 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_VFP)
977 return vfp_set_reg(vcpu, reg->id, uaddr);
978
Christoffer Dall11382452013-01-20 18:28:10 -0500979 r = index_to_coproc_reg(vcpu, reg->id);
980 if (!r)
981 return set_invariant_cp15(reg->id, uaddr);
982
983 /* Note: copies two regs if size is 64 bit */
984 return reg_from_user(&vcpu->arch.cp15[r->reg], uaddr, reg->id);
985}
986
Christoffer Dallc27581e2013-01-20 18:28:10 -0500987static unsigned int num_demux_regs(void)
988{
989 unsigned int i, count = 0;
990
991 for (i = 0; i < CSSELR_MAX; i++)
992 if (is_valid_cache(i))
993 count++;
994
995 return count;
996}
997
998static int write_demux_regids(u64 __user *uindices)
999{
1000 u64 val = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
1001 unsigned int i;
1002
1003 val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
1004 for (i = 0; i < CSSELR_MAX; i++) {
1005 if (!is_valid_cache(i))
1006 continue;
1007 if (put_user(val | i, uindices))
1008 return -EFAULT;
1009 uindices++;
1010 }
1011 return 0;
1012}
1013
Christoffer Dall11382452013-01-20 18:28:10 -05001014static u64 cp15_to_index(const struct coproc_reg *reg)
1015{
1016 u64 val = KVM_REG_ARM | (15 << KVM_REG_ARM_COPROC_SHIFT);
1017 if (reg->is_64) {
1018 val |= KVM_REG_SIZE_U64;
1019 val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
Christoffer Dall008fe512013-08-05 18:08:41 -07001020 /*
1021 * CRn always denotes the primary coproc. reg. nr. for the
1022 * in-kernel representation, but the user space API uses the
1023 * CRm for the encoding, because it is modelled after the
1024 * MRRC/MCRR instructions: see the ARM ARM rev. c page
1025 * B3-1445
1026 */
1027 val |= (reg->CRn << KVM_REG_ARM_CRM_SHIFT);
Christoffer Dall11382452013-01-20 18:28:10 -05001028 } else {
1029 val |= KVM_REG_SIZE_U32;
1030 val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
1031 val |= (reg->Op2 << KVM_REG_ARM_32_OPC2_SHIFT);
1032 val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT);
1033 val |= (reg->CRn << KVM_REG_ARM_32_CRN_SHIFT);
1034 }
1035 return val;
1036}
1037
1038static bool copy_reg_to_user(const struct coproc_reg *reg, u64 __user **uind)
1039{
1040 if (!*uind)
1041 return true;
1042
1043 if (put_user(cp15_to_index(reg), *uind))
1044 return false;
1045
1046 (*uind)++;
1047 return true;
1048}
1049
1050/* Assumed ordered tables, see kvm_coproc_table_init. */
1051static int walk_cp15(struct kvm_vcpu *vcpu, u64 __user *uind)
1052{
1053 const struct coproc_reg *i1, *i2, *end1, *end2;
1054 unsigned int total = 0;
1055 size_t num;
1056
1057 /* We check for duplicates here, to allow arch-specific overrides. */
1058 i1 = get_target_table(vcpu->arch.target, &num);
1059 end1 = i1 + num;
1060 i2 = cp15_regs;
1061 end2 = cp15_regs + ARRAY_SIZE(cp15_regs);
1062
1063 BUG_ON(i1 == end1 || i2 == end2);
1064
1065 /* Walk carefully, as both tables may refer to the same register. */
1066 while (i1 || i2) {
1067 int cmp = cmp_reg(i1, i2);
1068 /* target-specific overrides generic entry. */
1069 if (cmp <= 0) {
1070 /* Ignore registers we trap but don't save. */
1071 if (i1->reg) {
1072 if (!copy_reg_to_user(i1, &uind))
1073 return -EFAULT;
1074 total++;
1075 }
1076 } else {
1077 /* Ignore registers we trap but don't save. */
1078 if (i2->reg) {
1079 if (!copy_reg_to_user(i2, &uind))
1080 return -EFAULT;
1081 total++;
1082 }
1083 }
1084
1085 if (cmp <= 0 && ++i1 == end1)
1086 i1 = NULL;
1087 if (cmp >= 0 && ++i2 == end2)
1088 i2 = NULL;
1089 }
1090 return total;
1091}
1092
1093unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu)
1094{
1095 return ARRAY_SIZE(invariant_cp15)
Christoffer Dallc27581e2013-01-20 18:28:10 -05001096 + num_demux_regs()
Rusty Russell4fe21e42013-01-20 18:28:11 -05001097 + num_vfp_regs()
Christoffer Dall11382452013-01-20 18:28:10 -05001098 + walk_cp15(vcpu, (u64 __user *)NULL);
1099}
1100
1101int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
1102{
1103 unsigned int i;
1104 int err;
1105
1106 /* Then give them all the invariant registers' indices. */
1107 for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++) {
1108 if (put_user(cp15_to_index(&invariant_cp15[i]), uindices))
1109 return -EFAULT;
1110 uindices++;
1111 }
1112
1113 err = walk_cp15(vcpu, uindices);
Christoffer Dallc27581e2013-01-20 18:28:10 -05001114 if (err < 0)
1115 return err;
1116 uindices += err;
1117
Rusty Russell4fe21e42013-01-20 18:28:11 -05001118 err = copy_vfp_regids(uindices);
1119 if (err < 0)
1120 return err;
1121 uindices += err;
1122
Christoffer Dallc27581e2013-01-20 18:28:10 -05001123 return write_demux_regids(uindices);
Christoffer Dall11382452013-01-20 18:28:10 -05001124}
1125
Christoffer Dall5b3e5e52013-01-20 18:28:09 -05001126void kvm_coproc_table_init(void)
1127{
1128 unsigned int i;
1129
1130 /* Make sure tables are unique and in order. */
1131 for (i = 1; i < ARRAY_SIZE(cp15_regs); i++)
1132 BUG_ON(cmp_reg(&cp15_regs[i-1], &cp15_regs[i]) >= 0);
Christoffer Dall11382452013-01-20 18:28:10 -05001133
1134 /* We abuse the reset function to overwrite the table itself. */
1135 for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++)
1136 invariant_cp15[i].reset(NULL, &invariant_cp15[i]);
Christoffer Dallc27581e2013-01-20 18:28:10 -05001137
1138 /*
1139 * CLIDR format is awkward, so clean it up. See ARM B4.1.20:
1140 *
1141 * If software reads the Cache Type fields from Ctype1
1142 * upwards, once it has seen a value of 0b000, no caches
1143 * exist at further-out levels of the hierarchy. So, for
1144 * example, if Ctype3 is the first Cache Type field with a
1145 * value of 0b000, the values of Ctype4 to Ctype7 must be
1146 * ignored.
1147 */
1148 asm volatile("mrc p15, 1, %0, c0, c0, 1" : "=r" (cache_levels));
1149 for (i = 0; i < 7; i++)
1150 if (((cache_levels >> (i*3)) & 7) == 0)
1151 break;
1152 /* Clear all higher bits. */
1153 cache_levels &= (1 << (i*3))-1;
Christoffer Dall5b3e5e52013-01-20 18:28:09 -05001154}
1155
1156/**
1157 * kvm_reset_coprocs - sets cp15 registers to reset value
1158 * @vcpu: The VCPU pointer
1159 *
1160 * This function finds the right table above and sets the registers on the
1161 * virtual CPU struct to their architecturally defined reset values.
1162 */
Christoffer Dall749cf76c2013-01-20 18:28:06 -05001163void kvm_reset_coprocs(struct kvm_vcpu *vcpu)
1164{
Christoffer Dall5b3e5e52013-01-20 18:28:09 -05001165 size_t num;
1166 const struct coproc_reg *table;
1167
1168 /* Catch someone adding a register without putting in reset entry. */
1169 memset(vcpu->arch.cp15, 0x42, sizeof(vcpu->arch.cp15));
1170
1171 /* Generic chip reset first (so target could override). */
1172 reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs));
1173
1174 table = get_target_table(vcpu->arch.target, &num);
1175 reset_coproc_regs(vcpu, table, num);
1176
1177 for (num = 1; num < NR_CP15_REGS; num++)
1178 if (vcpu->arch.cp15[num] == 0x42424242)
1179 panic("Didn't reset vcpu->arch.cp15[%zi]", num);
Christoffer Dall749cf76c2013-01-20 18:28:06 -05001180}