summaryrefslogtreecommitdiff
path: root/big-little/lib/virt_events.c
blob: 33e9d3b8dba716fa85164efc73c506d2541c7057 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
/*
 * Copyright (c) 2012, ARM Limited. All rights reserved.
 *       
 * Redistribution and use in source and binary forms, with
 * or without modification, are permitted provided that the
 * following conditions are met:
 *     
 * Redistributions of source code must retain the above
 * copyright notice, this list of conditions and the 
 * following disclaimer.
 *
 * Redistributions in binary form must reproduce the
 * above copyright notice, this list of conditions and 
 * the following disclaimer in the documentation 
 * and/or other materials provided with the distribution.
 *      
 * Neither the name of ARM nor the names of its
 * contributors may be used to endorse or promote products
 * derived from this software without specific prior written
 * permission.                        
 */

#include "events.h"
#include "misc.h"
#include "virt_helpers.h"

/*
 * cpu ids are used as is when "switcher" is true. In the
 * "always on" case absolute cpu ids are used i.e 0-7 for
 * an MPx4+MPx4 configuration.
 */
/*
 * Pick up the event definition from the world that wants
 * to use them.
 */
extern unsigned event[][MAX_EVENTS];

/*
 * Set the specified event for that cpu.
 */
void set_event(unsigned event_id, unsigned cpu_id)
{
	event[cpu_id][event_id] = TRUE;
	dsb();
	sev();
	return;
}

inline unsigned get_event(unsigned event_id, unsigned cpu_id)
{
	return event[cpu_id][event_id];
}

void reset_event(unsigned event_id, unsigned cpu_id)
{
	event[cpu_id][event_id] = FALSE;
	return;
}

void wait_for_event(unsigned event_id, unsigned cpu_id)
{
	while (FALSE == get_event(event_id, cpu_id)) {
		wfe();
	}

	return;
}

/*
 * Wait for events from each core. Its a little trickier than
 * waiting for a single event. The event register as per the
 * architecture is just a single bit to flag an event rather
 * than the number of events. If multiple events are sent by
 * the time we enter wfe() then each flag variable should be
 * checked.
 */
void wait_for_events(unsigned event_id)
{
	unsigned ctr, event_count = 0, num_cpus = 0;

	if (switcher) {
		num_cpus = num_secondaries() + 1;
	} else {
		num_cpus = CLUSTER_CPU_COUNT(host_cluster)
		    + CLUSTER_CPU_COUNT(!host_cluster);
	}

	do {
		for (ctr = 0; ctr < num_cpus; ctr++) {
			if (TRUE == get_event(event_id, ctr)) {
				event_count++;
				reset_event(event_id, ctr);
			}
		}

		if (event_count != num_cpus)
			wfe();
		else
			break;
	} while (1);

	return;
}

void set_events(unsigned event_id)
{
	unsigned ctr, num_cpus = 0;

	if (switcher) {
		num_cpus = num_secondaries() + 1;
	} else {
		num_cpus = CLUSTER_CPU_COUNT(host_cluster)
		    + CLUSTER_CPU_COUNT(!host_cluster);
	}

	for (ctr = 0; ctr < num_cpus; ctr++) {
		set_event(event_id, ctr);
	}
	return;
}