summaryrefslogtreecommitdiff
path: root/big-little/switcher/trigger/async_switchover.c
blob: b8585e715eb8676dc6fd23b2586e247e8d9dc9ed (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
/*
 * Copyright (c) 2012, ARM Limited. All rights reserved.
 *       
 * Redistribution and use in source and binary forms, with
 * or without modification, are permitted provided that the
 * following conditions are met:
 *     
 * Redistributions of source code must retain the above
 * copyright notice, this list of conditions and the 
 * following disclaimer.
 *
 * Redistributions in binary form must reproduce the
 * above copyright notice, this list of conditions and 
 * the following disclaimer in the documentation 
 * and/or other materials provided with the distribution.
 *      
 * Neither the name of ARM nor the names of its
 * contributors may be used to endorse or promote products
 * derived from this software without specific prior written
 * permission.                        
 */

#include "ipi.h"
#include "virt_helpers.h"
#include "misc.h"
#include "stdlib.h"
#include "gic_registers.h"

extern void gic_enable_int(unsigned);
extern void gic_disable_int(unsigned);
extern void gic_eoi_int(unsigned);
extern void gic_deactivate_int(unsigned);
extern int __rand_r(struct _rand_state *);

static unsigned timer_count;
/* Support for the switchover interval randomly but sanely  */
static unsigned rand_async_switches = RAND_ASYNC;
/* Use HYP timer for async switches  */
unsigned hyp_timer_trigger = USE_HYP_TIMERS;
/*
 * Mask that is populated at the beginning of each
 * switch to indicate which cpus will take part in
 * switching. Its derived from the RST_HLDx register
 * and effects the event mechanism.
 */
unsigned switchable_cpus_mask = 0;

static void ack_trigger(void)
{
	unsigned ctl = 0;

	ctl = read_cnthp_ctl();
	if (ctl & TIMER_IRQ_STAT) {
		/* Disable timer and mask interrupt */
		write_cnthp_ctl(TIMER_MASK_IRQ);
	} else {
		printf("Spurious HYP timer irq \n");
		panic();
	}

	return;
}

/*
 * Utility routine to indicate whether a given cluster
 * is in reset or not.
 */
unsigned cluster_reset_status(unsigned cluster_id)
{
	return read32(KFSCB_BASE + RST_STAT0 + (cluster_id << 2)) & (1 << 8);
}

/*
 * Broadcast first available IPI so that all cpus can start switching to 
 * the other cluster.
 */
void signal_switchover(void)
{
	unsigned cluster_id = read_clusterid();
	unsigned cpuif_mask = 0;

	/*
	 * Read the RST_HLDx register of this cluster to get the latest
	 * cpu reset status and send the IPI to only those cpus that are
	 * active.
	 *
	 * NOTE:
	 * 1. Reading the RST_HLDx instead of RST_STATx to allow any
	 *    last cpus to power down if they are in the process.
	 * 2. We do not need locks around this variable as only one cpu
	 *    will change it during a switch and always after the previous
	 *    switch has completed.
	 */
	switchable_cpus_mask =
	    read32(KFSCB_BASE + RST_HOLD0 + (cluster_id << 2));
	switchable_cpus_mask = (switchable_cpus_mask >> 4) & 0xf;
	switchable_cpus_mask ^= (1 << CLUSTER_CPU_COUNT(cluster_id)) - 1;

	/*
	 * Map the target cpuids to their cpu interfaces as the 1:1 mapping
	 * no longer exists with the external vGIC.
	 */
	cpuif_mask = get_cpuif_mask(switchable_cpus_mask);

	/*
	 * Send an ipi to all the online cpus in the cluster including ourselves
	 * to start a switch to the inbound cluster.
	 */
	send_hyp_ipi(cpuif_mask, IPI_CLUSTER_SWITCH);

	return;
}

unsigned check_switchover_ipi(unsigned cpu_if, unsigned ipi_no)
{
	unsigned type = 0;

	type = get_hyp_ipi(cpu_if, ipi_no);
	if (type == IPI_CLUSTER_SWITCH)
		return TRUE;
	else
		return FALSE;

}

unsigned check_trigger(unsigned int_id, unsigned int_ack)
{
	unsigned cpuid = read_cpuid(), cluster_id = read_clusterid();
	unsigned platform = (read32(KFSCB_BASE + KFS_ID) >> 20) & 0xf;

	/*
	 * If we are not using HYP mode timers for triggering a switchover
	 * then check whether this is a suitable local timer interrupt to
	 * switch
	 */
	if (hyp_timer_trigger == FALSE) {
		/*
		 * We need to hijack every 128th timer interrupt on cpu0 and
		 * use it as a stimulus to switchover
		 */
		if (cpuid == 0 && int_id == LCL_TIMER_IRQ)
			timer_count++;

		if (timer_count & LCL_TIMER_FREQ)
			return FALSE;
	}
	/*
	 * Trigger a switchover upon getting a HYP timer IRQ. Its
	 * targetted only to cpu0.
	 */
	else if (int_id != HYP_TIMER_IRQ)
		return FALSE;

	/*
	 * Do the needful now that it is confirmed that we need to move
	 * to the other cluster
	 */

	/* Indicator on emulation that switches are actually taking place */
	if (platform != 0x1)
		printf("%d", cluster_id);

	/* Do not switch till previous one has not completed */
	while (FALSE == cluster_reset_status(!cluster_id)) ;

	/*
	 * Send an IPI to all the cores in this cluster to start
	 * a switchover.
	 */
	signal_switchover();

	if (hyp_timer_trigger)
		ack_trigger();
	else
		/* 
		 * Complete handling of the local timer interrupt at the physical gic
		 * level. Its disabled as its level triggerred and will reassert as 
		 * soon as we leave this function since its not been cleared at the 
		 * peripheral just yet. The local timer context is saved and this irq
		 * cleared while saving the context. The interrupt is enabled then.
		 */
		gic_disable_int(int_id);

	/* Finish handling this interrupt */
	gic_eoi_int(int_ack);
	if (read32(GIC_IC_PHY_BASE + GICC_CTL) & 0x200)
		gic_deactivate_int(int_ack);

	return TRUE;
}

void keep_trigger_alive(void)
{
	/*
	 * The OS might have disabled the HYP timer interrupt
	 * while setting up its view of the vGIC. So enable
	 * it if disabled upon receiving any other interrupt.
	 * Better than virtualising vGIC accesses on the TARGET
	 * CPU.
	 */
	if (hyp_timer_trigger)
		if (!
		    (read32(GIC_ID_PHY_BASE + GICD_ENABLESET) &
		     (1 << HYP_TIMER_IRQ)))
			gic_enable_int(HYP_TIMER_IRQ);

	return;
}

void enable_trigger(unsigned tval)
{
	unsigned ctl = TIMER_ENABLE;
	unsigned platform = read32((KFSCB_BASE + KFS_ID) >> 20) & 0xf;

	/*
	 * No need to lock this as its accessed by only one cpu
	 * per cluster and that too one at a time.
	 */
	static unsigned int rand_no = 0xdeadbeef;
	static struct _rand_state buffer;

	/*
	 * Nothing needs to be done if physical local timers
	 * are being used for doing a switchover.
	 */
	if (hyp_timer_trigger == TRUE) {
		if (rand_async_switches) {
			_srand_r(&buffer, rand_no);
			rand_no = (unsigned)_rand_r(&buffer);
		}

		/* Enable timer and unmask interrupt */
		write_cnthp_ctl(ctl);

		if (rand_async_switches) {
			unsigned interval;

			/*
			 * TODO: Assuming that the tval is always 12000000
			 * Increment or decrement the timer value randomly
			 * but never by more than a factor of 10
			 */
			if (rand_no % 2)
				interval = tval * (rand_no % 10);
			else
				interval = tval / (rand_no % 10);

			write_cnthp_tval(interval);

		} else {
			/*
			 * Program the timer to fire every 12000000 instructions
			 * on the FastModel while 1500000 cycles on the Emulator
			 */
			if (platform == 0x1)
				write_cnthp_tval(tval);
			else
				write_cnthp_tval(tval >> 3);
		}

		gic_enable_int(HYP_TIMER_IRQ);
	}

	return;
}