blob: 54757301ca5a4302b099c045e9c0b74f8fb2fecf [file] [log] [blame]
Nicolas Pitre81543a92012-10-19 20:48:50 -04001/*
2 * arch/arm/mach-vexpress/tc2_pm.c - TC2 power management support
3 *
4 * Created by: Nicolas Pitre, October 2012
5 * Copyright: (C) 2012 Linaro Limited
6 *
7 * Some portions of this file were originally written by Achin Gupta
8 * Copyright: (C) 2012 ARM Limited
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/init.h>
16#include <linux/kernel.h>
17#include <linux/spinlock.h>
18#include <linux/errno.h>
Lorenzo Pieralisi0b94e1b2013-02-05 11:09:16 +000019#include <linux/irqchip/arm-gic.h>
Nicolas Pitre81543a92012-10-19 20:48:50 -040020
21#include <asm/mcpm.h>
22#include <asm/proc-fns.h>
23#include <asm/cacheflush.h>
24#include <asm/cputype.h>
25#include <asm/cp15.h>
26
27#include <mach/motherboard.h>
28
29#include <linux/vexpress.h>
30#include <linux/arm-cci.h>
31
32/*
33 * We can't use regular spinlocks. In the switcher case, it is possible
34 * for an outbound CPU to call power_down() after its inbound counterpart
35 * is already live using the same logical CPU number which trips lockdep
36 * debugging.
37 */
38static arch_spinlock_t tc2_pm_lock = __ARCH_SPIN_LOCK_UNLOCKED;
39
40static int tc2_pm_use_count[3][2];
41
42static int tc2_pm_power_up(unsigned int cpu, unsigned int cluster)
43{
44 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
45 if (cluster >= 2 || cpu >= vexpress_spc_get_nb_cpus(cluster))
46 return -EINVAL;
47
48 /*
49 * Since this is called with IRQs enabled, and no arch_spin_lock_irq
50 * variant exists, we need to disable IRQs manually here.
51 */
52 local_irq_disable();
53 arch_spin_lock(&tc2_pm_lock);
54
55 if (!tc2_pm_use_count[0][cluster] &&
56 !tc2_pm_use_count[1][cluster] &&
57 !tc2_pm_use_count[2][cluster])
58 vexpress_spc_powerdown_enable(cluster, 0);
59
60 tc2_pm_use_count[cpu][cluster]++;
61 if (tc2_pm_use_count[cpu][cluster] == 1) {
62 vexpress_spc_write_bxaddr_reg(cluster, cpu,
63 virt_to_phys(mcpm_entry_point));
64 vexpress_spc_set_cpu_wakeup_irq(cpu, cluster, 1);
65 } else if (tc2_pm_use_count[cpu][cluster] != 2) {
66 /*
67 * The only possible values are:
68 * 0 = CPU down
69 * 1 = CPU (still) up
70 * 2 = CPU requested to be up before it had a chance
71 * to actually make itself down.
72 * Any other value is a bug.
73 */
74 BUG();
75 }
76
77 arch_spin_unlock(&tc2_pm_lock);
78 local_irq_enable();
79
80 return 0;
81}
82
Nicolas Pitre3ce60842012-12-10 00:22:06 -050083static void tc2_pm_down(u64 residency)
Nicolas Pitre81543a92012-10-19 20:48:50 -040084{
85 unsigned int mpidr, cpu, cluster;
86 bool last_man = false, skip_wfi = false;
87
88 mpidr = read_cpuid_mpidr();
89 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
90 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
91
92 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
93 BUG_ON(cluster >= 2 || cpu >= vexpress_spc_get_nb_cpus(cluster));
94
95 __mcpm_cpu_going_down(cpu, cluster);
96
97 arch_spin_lock(&tc2_pm_lock);
98 BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);
99 tc2_pm_use_count[cpu][cluster]--;
100 if (tc2_pm_use_count[cpu][cluster] == 0) {
101 vexpress_spc_set_cpu_wakeup_irq(cpu, cluster, 1);
102 if (!tc2_pm_use_count[0][cluster] &&
103 !tc2_pm_use_count[1][cluster] &&
Nicolas Pitre3ce60842012-12-10 00:22:06 -0500104 !tc2_pm_use_count[2][cluster] &&
105 (!residency || residency > 5000)) {
Nicolas Pitre81543a92012-10-19 20:48:50 -0400106 vexpress_spc_powerdown_enable(cluster, 1);
107 vexpress_spc_set_global_wakeup_intr(1);
108 last_man = true;
109 }
110 } else if (tc2_pm_use_count[cpu][cluster] == 1) {
111 /*
112 * A power_up request went ahead of us.
113 * Even if we do not want to shut this CPU down,
114 * the caller expects a certain state as if the WFI
115 * was aborted. So let's continue with cache cleaning.
116 */
117 skip_wfi = true;
118 } else
119 BUG();
120
Lorenzo Pieralisi0b94e1b2013-02-05 11:09:16 +0000121 gic_cpu_if_down();
122
Nicolas Pitre81543a92012-10-19 20:48:50 -0400123 if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
124 arch_spin_unlock(&tc2_pm_lock);
125
126 set_cr(get_cr() & ~CR_C);
127 flush_cache_all();
128 asm volatile ("clrex");
129 set_auxcr(get_auxcr() & ~(1 << 6));
130
131 disable_cci(cluster);
132
133 /*
134 * Ensure that both C & I bits are disabled in the SCTLR
135 * before disabling ACE snoops. This ensures that no
136 * coherency traffic will originate from this cpu after
137 * ACE snoops are turned off.
138 */
139 cpu_proc_fin();
140
141 __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
142 } else {
143 /*
144 * If last man then undo any setup done previously.
145 */
146 if (last_man) {
147 vexpress_spc_powerdown_enable(cluster, 0);
148 vexpress_spc_set_global_wakeup_intr(0);
149 }
150
151 arch_spin_unlock(&tc2_pm_lock);
152
153 set_cr(get_cr() & ~CR_C);
154 flush_cache_louis();
155 asm volatile ("clrex");
156 set_auxcr(get_auxcr() & ~(1 << 6));
157 }
158
159 __mcpm_cpu_down(cpu, cluster);
160
161 /* Now we are prepared for power-down, do it: */
162 if (!skip_wfi)
163 wfi();
164
165 /* Not dead at this point? Let our caller cope. */
166}
167
Nicolas Pitre3ce60842012-12-10 00:22:06 -0500168static void tc2_pm_power_down(void)
169{
170 tc2_pm_down(0);
171}
172
173static void tc2_pm_suspend(u64 residency)
174{
Lorenzo Pieralisiaa55d812013-03-14 14:07:20 +0000175 extern void tc2_resume(void);
Nicolas Pitre3ce60842012-12-10 00:22:06 -0500176 unsigned int mpidr, cpu, cluster;
177
178 mpidr = read_cpuid_mpidr();
179 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
180 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
181 vexpress_spc_write_bxaddr_reg(cluster, cpu,
Lorenzo Pieralisiaa55d812013-03-14 14:07:20 +0000182 virt_to_phys(tc2_resume));
183
Nicolas Pitre3ce60842012-12-10 00:22:06 -0500184 tc2_pm_down(residency);
185}
186
Nicolas Pitre81543a92012-10-19 20:48:50 -0400187static void tc2_pm_powered_up(void)
188{
189 unsigned int mpidr, cpu, cluster;
190 unsigned long flags;
191
192 mpidr = read_cpuid_mpidr();
193 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
194 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
195
196 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
197 BUG_ON(cluster >= 2 || cpu >= vexpress_spc_get_nb_cpus(cluster));
198
199 local_irq_save(flags);
200 arch_spin_lock(&tc2_pm_lock);
201
202 if (!tc2_pm_use_count[0][cluster] &&
203 !tc2_pm_use_count[1][cluster] &&
204 !tc2_pm_use_count[2][cluster]) {
205 vexpress_spc_powerdown_enable(cluster, 0);
206 vexpress_spc_set_global_wakeup_intr(0);
207 }
208
209 if (!tc2_pm_use_count[cpu][cluster])
210 tc2_pm_use_count[cpu][cluster] = 1;
211
212 vexpress_spc_set_cpu_wakeup_irq(cpu, cluster, 0);
213 vexpress_spc_write_bxaddr_reg(cluster, cpu, 0);
214
215 arch_spin_unlock(&tc2_pm_lock);
216 local_irq_restore(flags);
217}
218
219static const struct mcpm_platform_ops tc2_pm_power_ops = {
220 .power_up = tc2_pm_power_up,
221 .power_down = tc2_pm_power_down,
Nicolas Pitre3ce60842012-12-10 00:22:06 -0500222 .suspend = tc2_pm_suspend,
Nicolas Pitre81543a92012-10-19 20:48:50 -0400223 .powered_up = tc2_pm_powered_up,
224};
225
226static void __init tc2_pm_usage_count_init(void)
227{
228 unsigned int mpidr, cpu, cluster;
229
230 mpidr = read_cpuid_mpidr();
231 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
232 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
233
234 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
235 BUG_ON(cpu >= 3 || cluster >= 2);
236 tc2_pm_use_count[cpu][cluster] = 1;
237}
238
239extern void tc2_pm_power_up_setup(unsigned int affinity_level);
240
241static int __init tc2_pm_init(void)
242{
243 int ret;
244
245 if (!vexpress_spc_check_loaded())
246 return -ENODEV;
247
248 tc2_pm_usage_count_init();
249
250 ret = mcpm_platform_register(&tc2_pm_power_ops);
251 if (!ret)
252 ret = mcpm_sync_init(tc2_pm_power_up_setup);
253 if (!ret)
254 pr_info("TC2 power management initialized\n");
255 return ret;
256}
257
258early_initcall(tc2_pm_init);