aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/lib/dma-noncoherent.c
blob: 005a28d380af9c94528437d7f6c0940c08085744 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
/*
 *  PowerPC version derived from arch/arm/mm/consistent.c
 *    Copyright (C) 2001 Dan Malek (dmalek@jlc.net)
 *
 *  Copyright (C) 2000 Russell King
 *
 * Consistent memory allocators.  Used for DMA devices that want to
 * share uncached memory with the processor core.  The function return
 * is the virtual address and 'dma_handle' is the physical address.
 * Mostly stolen from the ARM port, with some changes for PowerPC.
 *						-- Dan
 *
 * Reorganized to get rid of the arch-specific consistent_* functions
 * and provide non-coherent implementations for the DMA API. -Matt
 *
 * Added in_interrupt() safe dma_alloc_coherent()/dma_free_coherent()
 * implementation. This is pulled straight from ARM and barely
 * modified. -Matt
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/highmem.h>
#include <linux/dma-mapping.h>
#include <linux/vmalloc.h>

#include <asm/tlbflush.h>

/*
 * Allocate DMA-coherent memory space and return both the kernel remapped
 * virtual and bus address for that space.
 */
void *
__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp)
{
	struct page *page;
	unsigned long order;
	int i;
	unsigned int nr_pages = PAGE_ALIGN(size)>>PAGE_SHIFT;
	unsigned int array_size = nr_pages * sizeof(struct page *);
	struct page **pages;
	struct page *end;
	u64 mask = 0x00ffffff, limit; /* ISA default */
	struct vm_struct *area;

	BUG_ON(!mem_init_done);
	size = PAGE_ALIGN(size);
	limit = (mask + 1) & ~mask;
	if (limit && size >= limit) {
		printk(KERN_WARNING "coherent allocation too big (requested "
				"%#x mask %#Lx)\n", size, mask);
		return NULL;
	}

	order = get_order(size);

	if (mask != 0xffffffff)
		gfp |= GFP_DMA;

	page = alloc_pages(gfp, order);
	if (!page)
		goto no_page;

	end = page + (1 << order);

	/*
	 * Invalidate any data that might be lurking in the
	 * kernel direct-mapped region for device DMA.
	 */
	{
		unsigned long kaddr = (unsigned long)page_address(page);
		memset(page_address(page), 0, size);
		flush_dcache_range(kaddr, kaddr + size);
	}

	split_page(page, order);

	/*
	 * Set the "dma handle"
	 */
	*handle = page_to_phys(page);

	area = get_vm_area_caller(size, VM_IOREMAP,
			__builtin_return_address(1));
	if (!area)
		goto out_free_pages;

	if (array_size > PAGE_SIZE) {
		pages = vmalloc(array_size);
		area->flags |= VM_VPAGES;
	} else {
		pages = kmalloc(array_size, GFP_KERNEL);
	}
	if (!pages)
		goto out_free_area;

	area->pages = pages;
	area->nr_pages = nr_pages;

	for (i = 0; i < nr_pages; i++)
		pages[i] = page + i;

	if (map_vm_area(area, pgprot_noncached(PAGE_KERNEL), &pages))
		goto out_unmap;

	/*
	 * Free the otherwise unused pages.
	 */
	page += nr_pages;
	while (page < end) {
		__free_page(page);
		page++;
	}

	return area->addr;
out_unmap:
	vunmap(area->addr);
	if (array_size > PAGE_SIZE)
		vfree(pages);
	else
		kfree(pages);
	goto out_free_pages;
out_free_area:
	free_vm_area(area);
out_free_pages:
	if (page)
		__free_pages(page, order);
no_page:
	return NULL;
}
EXPORT_SYMBOL(__dma_alloc_coherent);

/*
 * free a page as defined by the above mapping.
 */
void __dma_free_coherent(size_t size, void *vaddr)
{
	vfree(vaddr);

}
EXPORT_SYMBOL(__dma_free_coherent);

/*
 * make an area consistent.
 */
void __dma_sync(void *vaddr, size_t size, int direction)
{
	unsigned long start = (unsigned long)vaddr;
	unsigned long end   = start + size;

	switch (direction) {
	case DMA_NONE:
		BUG();
	case DMA_FROM_DEVICE:
		/*
		 * invalidate only when cache-line aligned otherwise there is
		 * the potential for discarding uncommitted data from the cache
		 */
		if ((start & (L1_CACHE_BYTES - 1)) || (size & (L1_CACHE_BYTES - 1)))
			flush_dcache_range(start, end);
		else
			invalidate_dcache_range(start, end);
		break;
	case DMA_TO_DEVICE:		/* writeback only */
		clean_dcache_range(start, end);
		break;
	case DMA_BIDIRECTIONAL:	/* writeback and invalidate */
		flush_dcache_range(start, end);
		break;
	}
}
EXPORT_SYMBOL(__dma_sync);

#ifdef CONFIG_HIGHMEM
/*
 * __dma_sync_page() implementation for systems using highmem.
 * In this case, each page of a buffer must be kmapped/kunmapped
 * in order to have a virtual address for __dma_sync(). This must
 * not sleep so kmap_atomic()/kunmap_atomic() are used.
 *
 * Note: yes, it is possible and correct to have a buffer extend
 * beyond the first page.
 */
static inline void __dma_sync_page_highmem(struct page *page,
		unsigned long offset, size_t size, int direction)
{
	size_t seg_size = min((size_t)(PAGE_SIZE - offset), size);
	size_t cur_size = seg_size;
	unsigned long flags, start, seg_offset = offset;
	int nr_segs = 1 + ((size - seg_size) + PAGE_SIZE - 1)/PAGE_SIZE;
	int seg_nr = 0;

	local_irq_save(flags);

	do {
		start = (unsigned long)kmap_atomic(page + seg_nr,
				KM_PPC_SYNC_PAGE) + seg_offset;

		/* Sync this buffer segment */
		__dma_sync((void *)start, seg_size, direction);
		kunmap_atomic((void *)start, KM_PPC_SYNC_PAGE);
		seg_nr++;

		/* Calculate next buffer segment size */
		seg_size = min((size_t)PAGE_SIZE, size - cur_size);

		/* Add the segment size to our running total */
		cur_size += seg_size;
		seg_offset = 0;
	} while (seg_nr < nr_segs);

	local_irq_restore(flags);
}
#endif /* CONFIG_HIGHMEM */

/*
 * __dma_sync_page makes memory consistent. identical to __dma_sync, but
 * takes a struct page instead of a virtual address
 */
void __dma_sync_page(struct page *page, unsigned long offset,
	size_t size, int direction)
{
#ifdef CONFIG_HIGHMEM
	__dma_sync_page_highmem(page, offset, size, direction);
#else
	unsigned long start = (unsigned long)page_address(page) + offset;
	__dma_sync((void *)start, size, direction);
#endif
}
EXPORT_SYMBOL(__dma_sync_page);