blob: cce901a510fa37ef1f2448c35ba7e266b8ab5048 [file] [log] [blame]
Marc Zyngieraa024c22013-01-20 18:28:13 -05001/*
2 * Copyright (C) 2012 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/kvm_host.h>
19#include <linux/wait.h>
20
Marc Zyngier3d6b7ab2013-10-18 18:19:03 +010021#include <asm/cputype.h>
Marc Zyngieraa024c22013-01-20 18:28:13 -050022#include <asm/kvm_emulate.h>
23#include <asm/kvm_psci.h>
24
25/*
26 * This is an implementation of the Power State Coordination Interface
27 * as described in ARM document number ARM DEN 0022A.
28 */
29
Anup Patel4c7726b2014-04-29 11:24:21 +053030#define AFFINITY_MASK(level) ~((0x1UL << ((level) * MPIDR_LEVEL_BITS)) - 1)
31
32static unsigned long psci_affinity_mask(unsigned long affinity_level)
33{
34 if (affinity_level <= 3)
35 return MPIDR_HWID_BITMASK & AFFINITY_MASK(affinity_level);
36
37 return 0;
38}
39
Marc Zyngieraa024c22013-01-20 18:28:13 -050040static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
41{
42 vcpu->arch.pause = true;
43}
44
45static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
46{
47 struct kvm *kvm = source_vcpu->kvm;
Marc Zyngier3d6b7ab2013-10-18 18:19:03 +010048 struct kvm_vcpu *vcpu = NULL, *tmp;
Marc Zyngieraa024c22013-01-20 18:28:13 -050049 wait_queue_head_t *wq;
50 unsigned long cpu_id;
Marc Zyngier3d6b7ab2013-10-18 18:19:03 +010051 unsigned long mpidr;
Marc Zyngieraa024c22013-01-20 18:28:13 -050052 phys_addr_t target_pc;
Marc Zyngier3d6b7ab2013-10-18 18:19:03 +010053 int i;
Marc Zyngieraa024c22013-01-20 18:28:13 -050054
55 cpu_id = *vcpu_reg(source_vcpu, 1);
56 if (vcpu_mode_is_32bit(source_vcpu))
57 cpu_id &= ~((u32) 0);
58
Marc Zyngier3d6b7ab2013-10-18 18:19:03 +010059 kvm_for_each_vcpu(i, tmp, kvm) {
60 mpidr = kvm_vcpu_get_mpidr(tmp);
61 if ((mpidr & MPIDR_HWID_BITMASK) == (cpu_id & MPIDR_HWID_BITMASK)) {
62 vcpu = tmp;
63 break;
64 }
65 }
66
Christoffer Dall7e2c9ce2013-11-19 17:43:19 -080067 /*
68 * Make sure the caller requested a valid CPU and that the CPU is
69 * turned off.
70 */
71 if (!vcpu || !vcpu->arch.pause)
Anup Patel4ccf6ab2014-04-29 11:24:16 +053072 return PSCI_RET_INVALID_PARAMS;
Marc Zyngieraa024c22013-01-20 18:28:13 -050073
74 target_pc = *vcpu_reg(source_vcpu, 2);
75
Marc Zyngieraa024c22013-01-20 18:28:13 -050076 kvm_reset_vcpu(vcpu);
77
78 /* Gracefully handle Thumb2 entry point */
79 if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
80 target_pc &= ~((phys_addr_t) 1);
81 vcpu_set_thumb(vcpu);
82 }
83
Marc Zyngier53e38962013-11-05 14:12:15 +000084 /* Propagate caller endianness */
85 if (kvm_vcpu_is_be(source_vcpu))
86 kvm_vcpu_set_be(vcpu);
87
Marc Zyngieraa024c22013-01-20 18:28:13 -050088 *vcpu_pc(vcpu) = target_pc;
89 vcpu->arch.pause = false;
90 smp_mb(); /* Make sure the above is visible */
91
Christoffer Dall7e2c9ce2013-11-19 17:43:19 -080092 wq = kvm_arch_vcpu_wq(vcpu);
Marc Zyngieraa024c22013-01-20 18:28:13 -050093 wake_up_interruptible(wq);
94
Anup Patel4ccf6ab2014-04-29 11:24:16 +053095 return PSCI_RET_SUCCESS;
96}
97
Anup Patel4c7726b2014-04-29 11:24:21 +053098static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
99{
100 int i;
101 unsigned long mpidr;
102 unsigned long target_affinity;
103 unsigned long target_affinity_mask;
104 unsigned long lowest_affinity_level;
105 struct kvm *kvm = vcpu->kvm;
106 struct kvm_vcpu *tmp;
107
108 target_affinity = *vcpu_reg(vcpu, 1);
109 lowest_affinity_level = *vcpu_reg(vcpu, 2);
110
111 /* Determine target affinity mask */
112 target_affinity_mask = psci_affinity_mask(lowest_affinity_level);
113 if (!target_affinity_mask)
114 return PSCI_RET_INVALID_PARAMS;
115
116 /* Ignore other bits of target affinity */
117 target_affinity &= target_affinity_mask;
118
119 /*
120 * If one or more VCPU matching target affinity are running
121 * then ON else OFF
122 */
123 kvm_for_each_vcpu(i, tmp, kvm) {
124 mpidr = kvm_vcpu_get_mpidr(tmp);
125 if (((mpidr & target_affinity_mask) == target_affinity) &&
126 !tmp->arch.pause) {
127 return PSCI_0_2_AFFINITY_LEVEL_ON;
128 }
129 }
130
131 return PSCI_0_2_AFFINITY_LEVEL_OFF;
132}
133
Anup Patel2834b562014-04-29 11:24:20 +0530134static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type)
135{
136 memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
137 vcpu->run->system_event.type = type;
138 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
139}
140
141static void kvm_psci_system_off(struct kvm_vcpu *vcpu)
142{
143 kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_SHUTDOWN);
144}
145
146static void kvm_psci_system_reset(struct kvm_vcpu *vcpu)
147{
148 kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET);
149}
150
Anup Patel4ccf6ab2014-04-29 11:24:16 +0530151int kvm_psci_version(struct kvm_vcpu *vcpu)
152{
153 if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features))
154 return KVM_ARM_PSCI_0_2;
155
156 return KVM_ARM_PSCI_0_1;
157}
158
Anup Patelc6debeb2014-04-29 11:24:18 +0530159static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
Anup Patel4ccf6ab2014-04-29 11:24:16 +0530160{
Anup Patel2834b562014-04-29 11:24:20 +0530161 int ret = 1;
Anup Patel4ccf6ab2014-04-29 11:24:16 +0530162 unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0);
163 unsigned long val;
164
165 switch (psci_fn) {
166 case PSCI_0_2_FN_PSCI_VERSION:
167 /*
168 * Bits[31:16] = Major Version = 0
169 * Bits[15:0] = Minor Version = 2
170 */
171 val = 2;
172 break;
173 case PSCI_0_2_FN_CPU_OFF:
174 kvm_psci_vcpu_off(vcpu);
175 val = PSCI_RET_SUCCESS;
176 break;
177 case PSCI_0_2_FN_CPU_ON:
178 case PSCI_0_2_FN64_CPU_ON:
179 val = kvm_psci_vcpu_on(vcpu);
180 break;
Anup Patel4c7726b2014-04-29 11:24:21 +0530181 case PSCI_0_2_FN_AFFINITY_INFO:
182 case PSCI_0_2_FN64_AFFINITY_INFO:
183 val = kvm_psci_vcpu_affinity_info(vcpu);
184 break;
Anup Patelf11d09d2014-04-29 11:24:22 +0530185 case PSCI_0_2_FN_MIGRATE:
186 case PSCI_0_2_FN64_MIGRATE:
187 val = PSCI_RET_NOT_SUPPORTED;
188 break;
189 case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
190 /*
191 * Trusted OS is MP hence does not require migration
192 * or
193 * Trusted OS is not present
194 */
195 val = PSCI_0_2_TOS_MP;
196 break;
197 case PSCI_0_2_FN_MIGRATE_INFO_UP_CPU:
198 case PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU:
199 val = PSCI_RET_NOT_SUPPORTED;
200 break;
Anup Patel2834b562014-04-29 11:24:20 +0530201 case PSCI_0_2_FN_SYSTEM_OFF:
202 kvm_psci_system_off(vcpu);
203 /*
204 * We should'nt be going back to guest VCPU after
205 * receiving SYSTEM_OFF request.
206 *
207 * If user space accidently/deliberately resumes
208 * guest VCPU after SYSTEM_OFF request then guest
209 * VCPU should see internal failure from PSCI return
210 * value. To achieve this, we preload r0 (or x0) with
211 * PSCI return value INTERNAL_FAILURE.
212 */
213 val = PSCI_RET_INTERNAL_FAILURE;
214 ret = 0;
215 break;
216 case PSCI_0_2_FN_SYSTEM_RESET:
217 kvm_psci_system_reset(vcpu);
218 /*
219 * Same reason as SYSTEM_OFF for preloading r0 (or x0)
220 * with PSCI return value INTERNAL_FAILURE.
221 */
222 val = PSCI_RET_INTERNAL_FAILURE;
223 ret = 0;
224 break;
Anup Patel4ccf6ab2014-04-29 11:24:16 +0530225 case PSCI_0_2_FN_CPU_SUSPEND:
Anup Patel4ccf6ab2014-04-29 11:24:16 +0530226 case PSCI_0_2_FN64_CPU_SUSPEND:
Anup Patel4ccf6ab2014-04-29 11:24:16 +0530227 val = PSCI_RET_NOT_SUPPORTED;
228 break;
229 default:
Anup Patelc6debeb2014-04-29 11:24:18 +0530230 return -EINVAL;
Anup Patel4ccf6ab2014-04-29 11:24:16 +0530231 }
232
233 *vcpu_reg(vcpu, 0) = val;
Anup Patel2834b562014-04-29 11:24:20 +0530234 return ret;
Anup Patel4ccf6ab2014-04-29 11:24:16 +0530235}
236
Anup Patelc6debeb2014-04-29 11:24:18 +0530237static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
Anup Patel4ccf6ab2014-04-29 11:24:16 +0530238{
239 unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0);
240 unsigned long val;
241
242 switch (psci_fn) {
243 case KVM_PSCI_FN_CPU_OFF:
244 kvm_psci_vcpu_off(vcpu);
245 val = PSCI_RET_SUCCESS;
246 break;
247 case KVM_PSCI_FN_CPU_ON:
248 val = kvm_psci_vcpu_on(vcpu);
249 break;
250 case KVM_PSCI_FN_CPU_SUSPEND:
251 case KVM_PSCI_FN_MIGRATE:
252 val = PSCI_RET_NOT_SUPPORTED;
253 break;
254 default:
Anup Patelc6debeb2014-04-29 11:24:18 +0530255 return -EINVAL;
Anup Patel4ccf6ab2014-04-29 11:24:16 +0530256 }
257
258 *vcpu_reg(vcpu, 0) = val;
Anup Patelc6debeb2014-04-29 11:24:18 +0530259 return 1;
Marc Zyngieraa024c22013-01-20 18:28:13 -0500260}
261
262/**
263 * kvm_psci_call - handle PSCI call if r0 value is in range
264 * @vcpu: Pointer to the VCPU struct
265 *
Dave P Martin2da084a2013-05-01 17:49:28 +0100266 * Handle PSCI calls from guests through traps from HVC instructions.
Anup Patelc6debeb2014-04-29 11:24:18 +0530267 * The calling convention is similar to SMC calls to the secure world
268 * where the function number is placed in r0.
269 *
270 * This function returns: > 0 (success), 0 (success but exit to user
271 * space), and < 0 (errors)
272 *
273 * Errors:
274 * -EINVAL: Unrecognized PSCI function
Marc Zyngieraa024c22013-01-20 18:28:13 -0500275 */
Anup Patelc6debeb2014-04-29 11:24:18 +0530276int kvm_psci_call(struct kvm_vcpu *vcpu)
Marc Zyngieraa024c22013-01-20 18:28:13 -0500277{
Anup Patel4ccf6ab2014-04-29 11:24:16 +0530278 switch (kvm_psci_version(vcpu)) {
279 case KVM_ARM_PSCI_0_2:
280 return kvm_psci_0_2_call(vcpu);
281 case KVM_ARM_PSCI_0_1:
282 return kvm_psci_0_1_call(vcpu);
Marc Zyngieraa024c22013-01-20 18:28:13 -0500283 default:
Anup Patelc6debeb2014-04-29 11:24:18 +0530284 return -EINVAL;
Anup Patel4ccf6ab2014-04-29 11:24:16 +0530285 };
Marc Zyngieraa024c22013-01-20 18:28:13 -0500286}