aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/platforms/cell/cbe_regs.c
blob: f3917e7a5b443dae4580cc67a8cbc3c132040c56 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
/*
 * cbe_regs.c
 *
 * Accessor routines for the various MMIO register blocks of the CBE
 *
 * (c) 2006 Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp.
 */

#include <linux/percpu.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>

#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/prom.h>
#include <asm/ptrace.h>
#include <asm/cell-regs.h>

/*
 * Current implementation uses "cpu" nodes. We build our own mapping
 * array of cpu numbers to cpu nodes locally for now to allow interrupt
 * time code to have a fast path rather than call of_get_cpu_node(). If
 * we implement cpu hotplug, we'll have to install an appropriate norifier
 * in order to release references to the cpu going away
 */
static struct cbe_regs_map
{
	struct device_node *cpu_node;
	struct device_node *be_node;
	struct cbe_pmd_regs __iomem *pmd_regs;
	struct cbe_iic_regs __iomem *iic_regs;
	struct cbe_mic_tm_regs __iomem *mic_tm_regs;
	struct cbe_pmd_shadow_regs pmd_shadow_regs;
} cbe_regs_maps[MAX_CBE];
static int cbe_regs_map_count;

static struct cbe_thread_map
{
	struct device_node *cpu_node;
	struct device_node *be_node;
	struct cbe_regs_map *regs;
	unsigned int thread_id;
	unsigned int cbe_id;
} cbe_thread_map[NR_CPUS];

static cpumask_t cbe_local_mask[MAX_CBE] = { [0 ... MAX_CBE-1] = {CPU_BITS_NONE} };
static cpumask_t cbe_first_online_cpu = { CPU_BITS_NONE };

static struct cbe_regs_map *cbe_find_map(struct device_node *np)
{
	int i;
	struct device_node *tmp_np;

	if (strcasecmp(np->type, "spe")) {
		for (i = 0; i < cbe_regs_map_count; i++)
			if (cbe_regs_maps[i].cpu_node == np ||
			    cbe_regs_maps[i].be_node == np)
				return &cbe_regs_maps[i];
		return NULL;
	}

	if (np->data)
		return np->data;

	/* walk up path until cpu or be node was found */
	tmp_np = np;
	do {
		tmp_np = tmp_np->parent;
		/* on a correct devicetree we wont get up to root */
		BUG_ON(!tmp_np);
	} while (strcasecmp(tmp_np->type, "cpu") &&
		 strcasecmp(tmp_np->type, "be"));

	np->data = cbe_find_map(tmp_np);

	return np->data;
}

struct cbe_pmd_regs __iomem *cbe_get_pmd_regs(struct device_node *np)
{
	struct cbe_regs_map *map = cbe_find_map(np);
	if (map == NULL)
		return NULL;
	return map->pmd_regs;
}
EXPORT_SYMBOL_GPL(cbe_get_pmd_regs);

struct cbe_pmd_regs __iomem *cbe_get_cpu_pmd_regs(int cpu)
{
	struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
	if (map == NULL)
		return NULL;
	return map->pmd_regs;
}
EXPORT_SYMBOL_GPL(cbe_get_cpu_pmd_regs);

struct cbe_pmd_shadow_regs *cbe_get_pmd_shadow_regs(struct device_node *np)
{
	struct cbe_regs_map *map = cbe_find_map(np);
	if (map == NULL)
		return NULL;
	return &map->pmd_shadow_regs;
}

struct cbe_pmd_shadow_regs *cbe_get_cpu_pmd_shadow_regs(int cpu)
{
	struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
	if (map == NULL)
		return NULL;
	return &map->pmd_shadow_regs;
}

struct cbe_iic_regs __iomem *cbe_get_iic_regs(struct device_node *np)
{
	struct cbe_regs_map *map = cbe_find_map(np);
	if (map == NULL)
		return NULL;
	return map->iic_regs;
}

struct cbe_iic_regs __iomem *cbe_get_cpu_iic_regs(int cpu)
{
	struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
	if (map == NULL)
		return NULL;
	return map->iic_regs;
}

struct cbe_mic_tm_regs __iomem *cbe_get_mic_tm_regs(struct device_node *np)
{
	struct cbe_regs_map *map = cbe_find_map(np);
	if (map == NULL)
		return NULL;
	return map->mic_tm_regs;
}

struct cbe_mic_tm_regs __iomem *cbe_get_cpu_mic_tm_regs(int cpu)
{
	struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
	if (map == NULL)
		return NULL;
	return map->mic_tm_regs;
}
EXPORT_SYMBOL_GPL(cbe_get_cpu_mic_tm_regs);

u32 cbe_get_hw_thread_id(int cpu)
{
	return cbe_thread_map[cpu].thread_id;
}
EXPORT_SYMBOL_GPL(cbe_get_hw_thread_id);

u32 cbe_cpu_to_node(int cpu)
{
	return cbe_thread_map[cpu].cbe_id;
}
EXPORT_SYMBOL_GPL(cbe_cpu_to_node);

u32 cbe_node_to_cpu(int node)
{
	return cpumask_first(&cbe_local_mask[node]);

}
EXPORT_SYMBOL_GPL(cbe_node_to_cpu);

static struct device_node *cbe_get_be_node(int cpu_id)
{
	struct device_node *np;

	for_each_node_by_type (np, "be") {
		int len,i;
		const phandle *cpu_handle;

		cpu_handle = of_get_property(np, "cpus", &len);

		/*
		 * the CAB SLOF tree is non compliant, so we just assume
		 * there is only one node
		 */
		if (WARN_ON_ONCE(!cpu_handle))
			return np;

		for (i=0; i<len; i++)
			if (of_find_node_by_phandle(cpu_handle[i]) == of_get_cpu_node(cpu_id, NULL))
				return np;
	}

	return NULL;
}

void __init cbe_fill_regs_map(struct cbe_regs_map *map)
{
	if(map->be_node) {
		struct device_node *be, *np;

		be = map->be_node;

		for_each_node_by_type(np, "pervasive")
			if (of_get_parent(np) == be)
				map->pmd_regs = of_iomap(np, 0);

		for_each_node_by_type(np, "CBEA-Internal-Interrupt-Controller")
			if (of_get_parent(np) == be)
				map->iic_regs = of_iomap(np, 2);

		for_each_node_by_type(np, "mic-tm")
			if (of_get_parent(np) == be)
				map->mic_tm_regs = of_iomap(np, 0);
	} else {
		struct device_node *cpu;
		/* That hack must die die die ! */
		const struct address_prop {
			unsigned long address;
			unsigned int len;
		} __attribute__((packed)) *prop;

		cpu = map->cpu_node;

		prop = of_get_property(cpu, "pervasive", NULL);
		if (prop != NULL)
			map->pmd_regs = ioremap(prop->address, prop->len);

		prop = of_get_property(cpu, "iic", NULL);
		if (prop != NULL)
			map->iic_regs = ioremap(prop->address, prop->len);

		prop = of_get_property(cpu, "mic-tm", NULL);
		if (prop != NULL)
			map->mic_tm_regs = ioremap(prop->address, prop->len);
	}
}


void __init cbe_regs_init(void)
{
	int i;
	unsigned int thread_id;
	struct device_node *cpu;

	/* Build local fast map of CPUs */
	for_each_possible_cpu(i) {
		cbe_thread_map[i].cpu_node = of_get_cpu_node(i, &thread_id);
		cbe_thread_map[i].be_node = cbe_get_be_node(i);
		cbe_thread_map[i].thread_id = thread_id;
	}

	/* Find maps for each device tree CPU */
	for_each_node_by_type(cpu, "cpu") {
		struct cbe_regs_map *map;
		unsigned int cbe_id;

		cbe_id = cbe_regs_map_count++;
		map = &cbe_regs_maps[cbe_id];

		if (cbe_regs_map_count > MAX_CBE) {
			printk(KERN_ERR "cbe_regs: More BE chips than supported"
			       "!\n");
			cbe_regs_map_count--;
			of_node_put(cpu);
			return;
		}
		map->cpu_node = cpu;

		for_each_possible_cpu(i) {
			struct cbe_thread_map *thread = &cbe_thread_map[i];

			if (thread->cpu_node == cpu) {
				thread->regs = map;
				thread->cbe_id = cbe_id;
				map->be_node = thread->be_node;
				cpumask_set_cpu(i, &cbe_local_mask[cbe_id]);
				if(thread->thread_id == 0)
					cpumask_set_cpu(i, &cbe_first_online_cpu);
			}
		}

		cbe_fill_regs_map(map);
	}
}