summaryrefslogtreecommitdiff
path: root/big-little/secure_world/secure_resets.c
blob: a7348380f1d16e15558da3caa42e013a6be5e111 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
/*
 * Copyright (c) 2012, ARM Limited. All rights reserved.
 *       
 * Redistribution and use in source and binary forms, with
 * or without modification, are permitted provided that the
 * following conditions are met:
 *     
 * Redistributions of source code must retain the above
 * copyright notice, this list of conditions and the 
 * following disclaimer.
 *
 * Redistributions in binary form must reproduce the
 * above copyright notice, this list of conditions and 
 * the following disclaimer in the documentation 
 * and/or other materials provided with the distribution.
 *      
 * Neither the name of ARM nor the names of its
 * contributors may be used to endorse or promote products
 * derived from this software without specific prior written
 * permission.                        
 */

#include "secure_world.h"
#include "events.h"
#include "bakery.h"

/* Bakery lock to serialize access to the tube. */
bakery_t lock_tube0 __attribute__ ((section("BL_SEC_DV_PAGE"))) = {
0};

/*
 * Compile time switch to decided whether the outbound
 * L2 will be kept on always for inbound cache warming
 * or it will be flushed and reset after the BL context
 * has been picked up.
 */
static unsigned flush_ob_l2 = FLUSH_OB_L2;

#if FLUSH_L2_FIX
/*
 * TODO:
 * Dirty hack for backward compatibility. This
 * variable helps determine whether this is the
 * first switch.
 */
static unsigned switch_count = 0;
#endif

#if FM_BETA
/*
 * Variable in secure world to indicate the
 * reset type i.e. cold (0) or warm reset (!0).
 */
unsigned ve_reset_type[NUM_CPUS];
#endif

/*
 * Allocate secure events in our device page
 */
unsigned event[MAX_CORES][MAX_EVENTS]
    __attribute__ ((section("BL_SEC_DV_PAGE")));

/*
 * Normal spinlock to guard inbound cluster registers
 * in the KFSCB. It will always be used when the MMU 
 * is on. Each cluster will anyways use it sequentially.
 */
static unsigned lock_ib_kfscb;

/*
 * Bakery lock to guard outbound cluster registers in 
 * KFSCB. It will always be used when the MMU is off.
 * Each cluster will anyways use it sequentially
 */
static bakery_t lock_ob_kfscb __attribute__ ((section("BL_SEC_DV_PAGE"))) = {
0};

/*
 * Small stacks for after we have turned our caches off.
 */
static unsigned long long powerdown_stacks[NUM_CPUS][32]
    __attribute__ ((section("BL_SEC_DV_PAGE")));

unsigned long long *get_powerdown_stack(unsigned cpu_id)
{
	return &powerdown_stacks[cpu_id + 1][0];
}

static void (*get_reset_handler(unsigned cluster_id, unsigned cpu_id)) (void) {
#if FM_BETA
	return (void (*)(void))ve_reset_type[cpu_id];
#else
	return (void (*)(void))read32(KFSCB_BASE + RST_HANDLER0 +
				      ((cpu_id + (cluster_id << 2)) << 3));
#endif
}

/*
 * The way a warm reset is detected has changed in the post beta FastModels.
 * The following workarounds make the earlier approach coexist with the
 * new one. Instead of dealing with a function pointer, they manipulate a
 * variable.
 */
void set_reset_handler(unsigned cluster_id, unsigned cpu_id,
		       void (*handler) (void))
{
	void (*prev_reset_handler) (void) =
	    get_reset_handler(cluster_id, cpu_id);

	if (prev_reset_handler != handler) {
#if FM_BETA
		ve_reset_type[cpu_id]++;
		cln_dcache_mva_poc(&ve_reset_type[cpu_id]);
#else
		write32(KFSCB_BASE + RST_HANDLER0 +
			((cpu_id + (cluster_id << 2)) << 3), (unsigned)handler);
		dsb();
#endif
	}

	return;
}

unsigned get_inbound()
{
	return !read_clusterid();
}

/*
 * Simple function which will bring our corresponding core out of reset
 */
void powerup_ib_core(unsigned cluster_id, unsigned cpu_id)
{
	unsigned rst_stat_reg = 0x0;
	unsigned cpu_mask = 0x0;

#if FLUSH_L2_FIX
	if (0 == switch_count) {
		set_event(FLUSH_L2, cpu_id);
		/*
		 * We really do not care about a race to update
		 * this variable as long it has a non-zero value
		 * after a switch.
		 */
		switch_count++;
	}
#endif

	write_trace(&lock_tube0, SEC_TUBE0, "Powerup Inbound", read_cntpct(),
		    0x0, 0x0);

	spin_lock(&lock_ib_kfscb);
	rst_stat_reg = read32(KFSCB_BASE + RST_STAT0 + (cluster_id << 2));
	cpu_mask = 1 << 8 | (1 << 4) << cpu_id | 1 << cpu_id;
	rst_stat_reg &= ~cpu_mask;
	write32(KFSCB_BASE + RST_HOLD0 + (cluster_id << 2), rst_stat_reg);
	spin_unlock(&lock_ib_kfscb);

	return;
}

/*
 * Simple function to place a core in the outbound cluster
 * in reset.
 */
void powerdown_ob_core(unsigned cluster_id, unsigned cpu_id)
{
	unsigned val = 0x0;
	unsigned mask = 0x0;

	get_bakery_spinlock(cpu_id, &lock_ob_kfscb);

	val = read32(KFSCB_BASE + RST_HOLD0 + (cluster_id << 2));
	mask = (1 << cpu_id) << 4;
	val |= mask;
	write32(KFSCB_BASE + RST_HOLD0 + (cluster_id << 2), val);

	release_bakery_spinlock(cpu_id, &lock_ob_kfscb);

	return;
}

/*
 * Simple function to the outbound cluster in reset.
 */
void powerdown_ob_cluster(unsigned cluster_id, unsigned cpu_id)
{
	unsigned val = 0x0;
	unsigned mask = 0x0;

	get_bakery_spinlock(cpu_id, &lock_ob_kfscb);

	val = read32(KFSCB_BASE + RST_HOLD0 + (cluster_id << 2));
	mask = 1 << 8;
	val |= mask;
	write32(KFSCB_BASE + RST_HOLD0 + (cluster_id << 2), val);

	release_bakery_spinlock(cpu_id, &lock_ob_kfscb);

	return;
}

/*
 * Do not use this function for Read-Modify-Write of KFSCB registers
 * as it does not hold a lock.
 */
unsigned reset_status(unsigned cluster_id, unsigned rst_level,
		      unsigned cpu_mask)
{
	unsigned rst_stat_reg = 0x0;

	rst_stat_reg = read32(KFSCB_BASE + RST_STAT0 + (cluster_id << 2));

	switch (rst_level) {
	case CLUSTER_RESET:
		return rst_stat_reg >> 8;
	case CORE_PORESET:
		return ((rst_stat_reg >> 4) & 0xf) & cpu_mask;
	case CORE_RESET:
		return (rst_stat_reg & 0xf) & cpu_mask;
	default:
		return 0;
	}
}

void do_power_op(unsigned cpu_mask, unsigned op_type)
{
	unsigned cpu_id = read_cpuid();
	unsigned cluster_id = read_clusterid();
	unsigned secondary_mask = 0x0;
	unsigned first_cpu = find_first_cpu();

	/*
	 * Brute force way of cleaning the L1 and L2 caches of the outbound cluster.
	 * All cpus flush their L1 caches. The 'first_cpu' waits for the others to
	 * finish this operation before flushing the L2
	 */
	write_trace(&lock_tube0, SEC_TUBE0, "L1 Flush Begin", read_cntpct(),
		    0x0, 0x0);
	write_sctlr(read_sctlr() & ~CR_C & ~CR_M);
	dsb();
	isb();
	inv_icache_all();
	cache_maint_op(L1, CLN_INV);
	disable_coherency();
	write_trace(&lock_tube0, SEC_TUBE0, "L1 Flush End", read_cntpct(), 0x0,
		    0x0);
	if (OP_TYPE_HP != op_type)
		set_event(SEC_L1_DONE, cpu_id);

	/* This code will never be executed for hotplug */
	if (cpu_id == first_cpu) {

		wait_for_events(SEC_L1_DONE, cpu_mask);

		if (flush_ob_l2) {
#if FLUSH_L2_FIX
			wait_for_event(FLUSH_L2, cpu_id);
			reset_event(FLUSH_L2, cpu_id);
#endif
			write_trace(&lock_tube0, SEC_TUBE0, "L2 Flush Begin",
				    read_cntpct(), 0x0, 0x0);
			cache_maint_op(L2, CLN_INV);
			write_trace(&lock_tube0, SEC_TUBE0, "L2 Flush End",
				    read_cntpct(), 0x0, 0x0);

			/* Turn off CCI snoops & DVM messages */
			if (cluster_id)
				write32(A7_SL_IFACE_BASE + SNOOP_CTLR_REG, 0x0);
			else
				write32(A15_SL_IFACE_BASE + SNOOP_CTLR_REG,
					0x0);

			dsb();

			/* Wait for the dust to settle down */
			while (read32(CCI_BASE + STATUS_REG) & 0x1) ;
		}

		/********************* RESET HANDLING **************************************
                 * Secondaries place themselves in reset while the 'first_cpu' waits for 
                 * them to do so.
                 ***************************************************************************/

		secondary_mask = cpu_mask & ~(1 << cpu_id);

		/* Wait for other cpus to enter reset */
		while (secondary_mask !=
		       reset_status(cluster_id, CORE_PORESET, secondary_mask)) ;

		if (flush_ob_l2)
			powerdown_ob_cluster(cluster_id, cpu_id);
		else
			powerdown_ob_core(cluster_id, cpu_id);

	} else {
		switch (op_type) {
		case (OP_TYPE_HP):
			get_bakery_spinlock(cpu_id, &lock_ob_kfscb);
			write32(KFSCB_BASE + RST_HOLD0 + (cluster_id << 2),
				cpu_mask);
			release_bakery_spinlock(cpu_id, &lock_ob_kfscb);
			break;
		case (OP_TYPE_SWITCH):
			powerdown_ob_core(cluster_id, cpu_id);
			break;
		default:
			panic();
		}
	}

	write_trace(&lock_tube0, SEC_TUBE0, "Reset Initiated", read_cntpct(),
		    0x0, 0x0);
	return;
}