aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/sysdev/cpm_common.c
blob: 88b9812c854fdc2de978871012eeb3c507717569 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
/*
 * Common CPM code
 *
 * Author: Scott Wood <scottwood@freescale.com>
 *
 * Copyright 2007 Freescale Semiconductor, Inc.
 *
 * Some parts derived from commproc.c/cpm2_common.c, which is:
 * Copyright (c) 1997 Dan error_act (dmalek@jlc.net)
 * Copyright (c) 1999-2001 Dan Malek <dan@embeddedalley.com>
 * Copyright (c) 2000 MontaVista Software, Inc (source@mvista.com)
 * 2006 (c) MontaVista Software, Inc.
 * Vitaly Bordug <vbordug@ru.mvista.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 */

#include <linux/init.h>
#include <linux/of_device.h>
#include <linux/spinlock.h>
#include <linux/of.h>
#include <linux/slab.h>

#include <asm/udbg.h>
#include <asm/io.h>
#include <asm/system.h>
#include <asm/rheap.h>
#include <asm/cpm.h>

#include <mm/mmu_decl.h>

#if defined(CONFIG_CPM2) || defined(CONFIG_8xx_GPIO)
#include <linux/of_gpio.h>
#endif

#ifdef CONFIG_PPC_EARLY_DEBUG_CPM
static u32 __iomem *cpm_udbg_txdesc =
	(u32 __iomem __force *)CONFIG_PPC_EARLY_DEBUG_CPM_ADDR;

static void udbg_putc_cpm(char c)
{
	u8 __iomem *txbuf = (u8 __iomem __force *)in_be32(&cpm_udbg_txdesc[1]);

	if (c == '\n')
		udbg_putc_cpm('\r');

	while (in_be32(&cpm_udbg_txdesc[0]) & 0x80000000)
		;

	out_8(txbuf, c);
	out_be32(&cpm_udbg_txdesc[0], 0xa0000001);
}

void __init udbg_init_cpm(void)
{
	if (cpm_udbg_txdesc) {
#ifdef CONFIG_CPM2
		setbat(1, 0xf0000000, 0xf0000000, 1024*1024, PAGE_KERNEL_NCG);
#endif
		udbg_putc = udbg_putc_cpm;
	}
}
#endif

static spinlock_t cpm_muram_lock;
static rh_block_t cpm_boot_muram_rh_block[16];
static rh_info_t cpm_muram_info;
static u8 __iomem *muram_vbase;
static phys_addr_t muram_pbase;

/* Max address size we deal with */
#define OF_MAX_ADDR_CELLS	4

int cpm_muram_init(void)
{
	struct device_node *np;
	struct resource r;
	u32 zero[OF_MAX_ADDR_CELLS] = {};
	resource_size_t max = 0;
	int i = 0;
	int ret = 0;

	if (muram_pbase)
		return 0;

	spin_lock_init(&cpm_muram_lock);
	/* initialize the info header */
	rh_init(&cpm_muram_info, 1,
	        sizeof(cpm_boot_muram_rh_block) /
	        sizeof(cpm_boot_muram_rh_block[0]),
	        cpm_boot_muram_rh_block);

	np = of_find_compatible_node(NULL, NULL, "fsl,cpm-muram-data");
	if (!np) {
		/* try legacy bindings */
		np = of_find_node_by_name(NULL, "data-only");
		if (!np) {
			printk(KERN_ERR "Cannot find CPM muram data node");
			ret = -ENODEV;
			goto out;
		}
	}

	muram_pbase = of_translate_address(np, zero);
	if (muram_pbase == (phys_addr_t)OF_BAD_ADDR) {
		printk(KERN_ERR "Cannot translate zero through CPM muram node");
		ret = -ENODEV;
		goto out;
	}

	while (of_address_to_resource(np, i++, &r) == 0) {
		if (r.end > max)
			max = r.end;

		rh_attach_region(&cpm_muram_info, r.start - muram_pbase,
		                 r.end - r.start + 1);
	}

	muram_vbase = ioremap(muram_pbase, max - muram_pbase + 1);
	if (!muram_vbase) {
		printk(KERN_ERR "Cannot map CPM muram");
		ret = -ENOMEM;
	}

out:
	of_node_put(np);
	return ret;
}

/**
 * cpm_muram_alloc - allocate the requested size worth of multi-user ram
 * @size: number of bytes to allocate
 * @align: requested alignment, in bytes
 *
 * This function returns an offset into the muram area.
 * Use cpm_dpram_addr() to get the virtual address of the area.
 * Use cpm_muram_free() to free the allocation.
 */
unsigned long cpm_muram_alloc(unsigned long size, unsigned long align)
{
	unsigned long start;
	unsigned long flags;

	spin_lock_irqsave(&cpm_muram_lock, flags);
	cpm_muram_info.alignment = align;
	start = rh_alloc(&cpm_muram_info, size, "commproc");
	spin_unlock_irqrestore(&cpm_muram_lock, flags);

	return start;
}
EXPORT_SYMBOL(cpm_muram_alloc);

/**
 * cpm_muram_free - free a chunk of multi-user ram
 * @offset: The beginning of the chunk as returned by cpm_muram_alloc().
 */
int cpm_muram_free(unsigned long offset)
{
	int ret;
	unsigned long flags;

	spin_lock_irqsave(&cpm_muram_lock, flags);
	ret = rh_free(&cpm_muram_info, offset);
	spin_unlock_irqrestore(&cpm_muram_lock, flags);

	return ret;
}
EXPORT_SYMBOL(cpm_muram_free);

/**
 * cpm_muram_alloc_fixed - reserve a specific region of multi-user ram
 * @offset: the offset into the muram area to reserve
 * @size: the number of bytes to reserve
 *
 * This function returns "start" on success, -ENOMEM on failure.
 * Use cpm_dpram_addr() to get the virtual address of the area.
 * Use cpm_muram_free() to free the allocation.
 */
unsigned long cpm_muram_alloc_fixed(unsigned long offset, unsigned long size)
{
	unsigned long start;
	unsigned long flags;

	spin_lock_irqsave(&cpm_muram_lock, flags);
	cpm_muram_info.alignment = 1;
	start = rh_alloc_fixed(&cpm_muram_info, offset, size, "commproc");
	spin_unlock_irqrestore(&cpm_muram_lock, flags);

	return start;
}
EXPORT_SYMBOL(cpm_muram_alloc_fixed);

/**
 * cpm_muram_addr - turn a muram offset into a virtual address
 * @offset: muram offset to convert
 */
void __iomem *cpm_muram_addr(unsigned long offset)
{
	return muram_vbase + offset;
}
EXPORT_SYMBOL(cpm_muram_addr);

unsigned long cpm_muram_offset(void __iomem *addr)
{
	return addr - (void __iomem *)muram_vbase;
}
EXPORT_SYMBOL(cpm_muram_offset);

/**
 * cpm_muram_dma - turn a muram virtual address into a DMA address
 * @offset: virtual address from cpm_muram_addr() to convert
 */
dma_addr_t cpm_muram_dma(void __iomem *addr)
{
	return muram_pbase + ((u8 __iomem *)addr - muram_vbase);
}
EXPORT_SYMBOL(cpm_muram_dma);

#if defined(CONFIG_CPM2) || defined(CONFIG_8xx_GPIO)

struct cpm2_ioports {
	u32 dir, par, sor, odr, dat;
	u32 res[3];
};

struct cpm2_gpio32_chip {
	struct of_mm_gpio_chip mm_gc;
	spinlock_t lock;

	/* shadowed data register to clear/set bits safely */
	u32 cpdata;
};

static inline struct cpm2_gpio32_chip *
to_cpm2_gpio32_chip(struct of_mm_gpio_chip *mm_gc)
{
	return container_of(mm_gc, struct cpm2_gpio32_chip, mm_gc);
}

static void cpm2_gpio32_save_regs(struct of_mm_gpio_chip *mm_gc)
{
	struct cpm2_gpio32_chip *cpm2_gc = to_cpm2_gpio32_chip(mm_gc);
	struct cpm2_ioports __iomem *iop = mm_gc->regs;

	cpm2_gc->cpdata = in_be32(&iop->dat);
}

static int cpm2_gpio32_get(struct gpio_chip *gc, unsigned int gpio)
{
	struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
	struct cpm2_ioports __iomem *iop = mm_gc->regs;
	u32 pin_mask;

	pin_mask = 1 << (31 - gpio);

	return !!(in_be32(&iop->dat) & pin_mask);
}

static void __cpm2_gpio32_set(struct of_mm_gpio_chip *mm_gc, u32 pin_mask,
	int value)
{
	struct cpm2_gpio32_chip *cpm2_gc = to_cpm2_gpio32_chip(mm_gc);
	struct cpm2_ioports __iomem *iop = mm_gc->regs;

	if (value)
		cpm2_gc->cpdata |= pin_mask;
	else
		cpm2_gc->cpdata &= ~pin_mask;

	out_be32(&iop->dat, cpm2_gc->cpdata);
}

static void cpm2_gpio32_set(struct gpio_chip *gc, unsigned int gpio, int value)
{
	struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
	struct cpm2_gpio32_chip *cpm2_gc = to_cpm2_gpio32_chip(mm_gc);
	unsigned long flags;
	u32 pin_mask = 1 << (31 - gpio);

	spin_lock_irqsave(&cpm2_gc->lock, flags);

	__cpm2_gpio32_set(mm_gc, pin_mask, value);

	spin_unlock_irqrestore(&cpm2_gc->lock, flags);
}

static int cpm2_gpio32_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
{
	struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
	struct cpm2_gpio32_chip *cpm2_gc = to_cpm2_gpio32_chip(mm_gc);
	struct cpm2_ioports __iomem *iop = mm_gc->regs;
	unsigned long flags;
	u32 pin_mask = 1 << (31 - gpio);

	spin_lock_irqsave(&cpm2_gc->lock, flags);

	setbits32(&iop->dir, pin_mask);
	__cpm2_gpio32_set(mm_gc, pin_mask, val);

	spin_unlock_irqrestore(&cpm2_gc->lock, flags);

	return 0;
}

static int cpm2_gpio32_dir_in(struct gpio_chip *gc, unsigned int gpio)
{
	struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
	struct cpm2_gpio32_chip *cpm2_gc = to_cpm2_gpio32_chip(mm_gc);
	struct cpm2_ioports __iomem *iop = mm_gc->regs;
	unsigned long flags;
	u32 pin_mask = 1 << (31 - gpio);

	spin_lock_irqsave(&cpm2_gc->lock, flags);

	clrbits32(&iop->dir, pin_mask);

	spin_unlock_irqrestore(&cpm2_gc->lock, flags);

	return 0;
}

int cpm2_gpiochip_add32(struct device_node *np)
{
	struct cpm2_gpio32_chip *cpm2_gc;
	struct of_mm_gpio_chip *mm_gc;
	struct of_gpio_chip *of_gc;
	struct gpio_chip *gc;

	cpm2_gc = kzalloc(sizeof(*cpm2_gc), GFP_KERNEL);
	if (!cpm2_gc)
		return -ENOMEM;

	spin_lock_init(&cpm2_gc->lock);

	mm_gc = &cpm2_gc->mm_gc;
	of_gc = &mm_gc->of_gc;
	gc = &of_gc->gc;

	mm_gc->save_regs = cpm2_gpio32_save_regs;
	of_gc->gpio_cells = 2;
	gc->ngpio = 32;
	gc->direction_input = cpm2_gpio32_dir_in;
	gc->direction_output = cpm2_gpio32_dir_out;
	gc->get = cpm2_gpio32_get;
	gc->set = cpm2_gpio32_set;

	return of_mm_gpiochip_add(np, mm_gc);
}
#endif /* CONFIG_CPM2 || CONFIG_8xx_GPIO */