blob: ad8a85bf852f911229ed13e678e7ab38b0063ba0 [file] [log] [blame]
Marek Szyprowskic64be2b2011-12-29 13:09:51 +01001/*
2 * Contiguous Memory Allocator for DMA mapping framework
3 * Copyright (c) 2010-2011 by Samsung Electronics.
4 * Written by:
5 * Marek Szyprowski <m.szyprowski@samsung.com>
6 * Michal Nazarewicz <mina86@mina86.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of the
11 * License or (at your optional) any later version of the license.
12 */
13
14#define pr_fmt(fmt) "cma: " fmt
15
16#ifdef CONFIG_CMA_DEBUG
17#ifndef DEBUG
18# define DEBUG
19#endif
20#endif
21
22#include <asm/page.h>
23#include <asm/dma-contiguous.h>
24
25#include <linux/memblock.h>
26#include <linux/err.h>
27#include <linux/mm.h>
28#include <linux/mutex.h>
29#include <linux/page-isolation.h>
Laurent Pinchart446c82f2012-10-18 09:29:44 +020030#include <linux/sizes.h>
Marek Szyprowskic64be2b2011-12-29 13:09:51 +010031#include <linux/slab.h>
32#include <linux/swap.h>
33#include <linux/mm_types.h>
34#include <linux/dma-contiguous.h>
Joonsoo Kima15bc0b2014-08-06 16:05:21 -070035#include <linux/log2.h>
Marek Szyprowskic64be2b2011-12-29 13:09:51 +010036
Marek Szyprowskic64be2b2011-12-29 13:09:51 +010037struct cma {
38 unsigned long base_pfn;
39 unsigned long count;
40 unsigned long *bitmap;
Joonsoo Kime0bdb372014-08-06 16:05:23 -070041 unsigned int order_per_bit; /* Order of pages represented by one bit */
Laura Abbott7ee793a2014-02-25 11:01:19 -080042 struct mutex lock;
Marek Szyprowskic64be2b2011-12-29 13:09:51 +010043};
44
45struct cma *dma_contiguous_default_area;
46
47#ifdef CONFIG_CMA_SIZE_MBYTES
48#define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
49#else
50#define CMA_SIZE_MBYTES 0
51#endif
52
53/*
54 * Default global CMA area size can be defined in kernel's .config.
Michael Opdenacker73678802013-09-18 06:04:48 +020055 * This is useful mainly for distro maintainers to create a kernel
Marek Szyprowskic64be2b2011-12-29 13:09:51 +010056 * that works correctly for most supported systems.
57 * The size can be set in bytes or as a percentage of the total memory
58 * in the system.
59 *
60 * Users, who want to set the size of global CMA area for their system
61 * should use cma= kernel parameter.
62 */
Vitaly Andrianov40097932012-12-05 09:29:25 -050063static const phys_addr_t size_bytes = CMA_SIZE_MBYTES * SZ_1M;
64static phys_addr_t size_cmdline = -1;
Akinobu Mita5ea3b1b2014-06-04 16:06:54 -070065static phys_addr_t base_cmdline;
66static phys_addr_t limit_cmdline;
Marek Szyprowskic64be2b2011-12-29 13:09:51 +010067
68static int __init early_cma(char *p)
69{
70 pr_debug("%s(%s)\n", __func__, p);
71 size_cmdline = memparse(p, &p);
Akinobu Mita5ea3b1b2014-06-04 16:06:54 -070072 if (*p != '@')
73 return 0;
74 base_cmdline = memparse(p + 1, &p);
75 if (*p != '-') {
76 limit_cmdline = base_cmdline + size_cmdline;
77 return 0;
78 }
79 limit_cmdline = memparse(p + 1, &p);
80
Marek Szyprowskic64be2b2011-12-29 13:09:51 +010081 return 0;
82}
83early_param("cma", early_cma);
84
85#ifdef CONFIG_CMA_SIZE_PERCENTAGE
86
Vitaly Andrianov40097932012-12-05 09:29:25 -050087static phys_addr_t __init __maybe_unused cma_early_percent_memory(void)
Marek Szyprowskic64be2b2011-12-29 13:09:51 +010088{
89 struct memblock_region *reg;
90 unsigned long total_pages = 0;
91
92 /*
93 * We cannot use memblock_phys_mem_size() here, because
94 * memblock_analyze() has not been called yet.
95 */
96 for_each_memblock(memory, reg)
97 total_pages += memblock_region_memory_end_pfn(reg) -
98 memblock_region_memory_base_pfn(reg);
99
100 return (total_pages * CONFIG_CMA_SIZE_PERCENTAGE / 100) << PAGE_SHIFT;
101}
102
103#else
104
Vitaly Andrianov40097932012-12-05 09:29:25 -0500105static inline __maybe_unused phys_addr_t cma_early_percent_memory(void)
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100106{
107 return 0;
108}
109
110#endif
111
112/**
Marek Szyprowskia2547382013-07-29 14:31:45 +0200113 * dma_contiguous_reserve() - reserve area(s) for contiguous memory handling
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100114 * @limit: End address of the reserved memory (optional, 0 for any).
115 *
116 * This function reserves memory from early allocator. It should be
117 * called by arch specific code once the early allocator (memblock or bootmem)
118 * has been activated and all other subsystems have already allocated/reserved
119 * memory.
120 */
121void __init dma_contiguous_reserve(phys_addr_t limit)
122{
Vitaly Andrianov40097932012-12-05 09:29:25 -0500123 phys_addr_t selected_size = 0;
Akinobu Mita5ea3b1b2014-06-04 16:06:54 -0700124 phys_addr_t selected_base = 0;
125 phys_addr_t selected_limit = limit;
126 bool fixed = false;
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100127
128 pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
129
130 if (size_cmdline != -1) {
131 selected_size = size_cmdline;
Akinobu Mita5ea3b1b2014-06-04 16:06:54 -0700132 selected_base = base_cmdline;
133 selected_limit = min_not_zero(limit_cmdline, limit);
134 if (base_cmdline + size_cmdline == limit_cmdline)
135 fixed = true;
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100136 } else {
137#ifdef CONFIG_CMA_SIZE_SEL_MBYTES
138 selected_size = size_bytes;
139#elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE)
140 selected_size = cma_early_percent_memory();
141#elif defined(CONFIG_CMA_SIZE_SEL_MIN)
142 selected_size = min(size_bytes, cma_early_percent_memory());
143#elif defined(CONFIG_CMA_SIZE_SEL_MAX)
144 selected_size = max(size_bytes, cma_early_percent_memory());
145#endif
146 }
147
Marek Szyprowskia2547382013-07-29 14:31:45 +0200148 if (selected_size && !dma_contiguous_default_area) {
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100149 pr_debug("%s: reserving %ld MiB for global area\n", __func__,
Vitaly Andrianov40097932012-12-05 09:29:25 -0500150 (unsigned long)selected_size / SZ_1M);
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100151
Akinobu Mita5ea3b1b2014-06-04 16:06:54 -0700152 dma_contiguous_reserve_area(selected_size, selected_base,
153 selected_limit,
154 &dma_contiguous_default_area,
155 fixed);
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100156 }
Akinobu Mita5ea3b1b2014-06-04 16:06:54 -0700157}
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100158
159static DEFINE_MUTEX(cma_mutex);
160
Joonsoo Kime0bdb372014-08-06 16:05:23 -0700161static unsigned long cma_bitmap_aligned_mask(struct cma *cma, int align_order)
162{
163 return (1UL << (align_order >> cma->order_per_bit)) - 1;
164}
165
166static unsigned long cma_bitmap_maxno(struct cma *cma)
167{
168 return cma->count >> cma->order_per_bit;
169}
170
171static unsigned long cma_bitmap_pages_to_bits(struct cma *cma,
172 unsigned long pages)
173{
174 return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
175}
176
177static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, int count)
178{
179 unsigned long bitmap_no, bitmap_count;
180
181 bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
182 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
183
184 mutex_lock(&cma->lock);
185 bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
186 mutex_unlock(&cma->lock);
187}
188
Marek Szyprowskia2547382013-07-29 14:31:45 +0200189static int __init cma_activate_area(struct cma *cma)
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100190{
Joonsoo Kime0bdb372014-08-06 16:05:23 -0700191 int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long);
Marek Szyprowskia2547382013-07-29 14:31:45 +0200192 unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
193 unsigned i = cma->count >> pageblock_order;
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100194 struct zone *zone;
195
Marek Szyprowskia2547382013-07-29 14:31:45 +0200196 cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
197
198 if (!cma->bitmap)
199 return -ENOMEM;
200
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100201 WARN_ON_ONCE(!pfn_valid(pfn));
202 zone = page_zone(pfn_to_page(pfn));
203
204 do {
205 unsigned j;
206 base_pfn = pfn;
207 for (j = pageblock_nr_pages; j; --j, pfn++) {
208 WARN_ON_ONCE(!pfn_valid(pfn));
Joonsoo Kimfe8eea42014-06-23 13:22:07 -0700209 /*
210 * alloc_contig_range requires the pfn range
211 * specified to be in the same zone. Make this
212 * simple by forcing the entire CMA resv range
213 * to be in the same zone.
214 */
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100215 if (page_zone(pfn_to_page(pfn)) != zone)
Joonsoo Kimfe8eea42014-06-23 13:22:07 -0700216 goto err;
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100217 }
218 init_cma_reserved_pageblock(pfn_to_page(base_pfn));
219 } while (--i);
Marek Szyprowskia2547382013-07-29 14:31:45 +0200220
Laura Abbott7ee793a2014-02-25 11:01:19 -0800221 mutex_init(&cma->lock);
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100222 return 0;
Joonsoo Kimfe8eea42014-06-23 13:22:07 -0700223
224err:
225 kfree(cma->bitmap);
226 return -EINVAL;
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100227}
228
Marek Szyprowskia2547382013-07-29 14:31:45 +0200229static struct cma cma_areas[MAX_CMA_AREAS];
230static unsigned cma_area_count;
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100231
232static int __init cma_init_reserved_areas(void)
233{
Marek Szyprowskia2547382013-07-29 14:31:45 +0200234 int i;
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100235
Marek Szyprowskia2547382013-07-29 14:31:45 +0200236 for (i = 0; i < cma_area_count; i++) {
237 int ret = cma_activate_area(&cma_areas[i]);
238 if (ret)
239 return ret;
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100240 }
Marek Szyprowskia2547382013-07-29 14:31:45 +0200241
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100242 return 0;
243}
244core_initcall(cma_init_reserved_areas);
245
Joonsoo Kim3162bbd2014-08-06 16:05:19 -0700246static int __init __dma_contiguous_reserve_area(phys_addr_t size,
Joonsoo Kime0bdb372014-08-06 16:05:23 -0700247 phys_addr_t base, phys_addr_t limit,
248 phys_addr_t alignment, unsigned int order_per_bit,
249 struct cma **res_cma, bool fixed)
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100250{
Marek Szyprowskia2547382013-07-29 14:31:45 +0200251 struct cma *cma = &cma_areas[cma_area_count];
Marek Szyprowskia2547382013-07-29 14:31:45 +0200252 int ret = 0;
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100253
Joonsoo Kima15bc0b2014-08-06 16:05:21 -0700254 pr_debug("%s(size %lx, base %08lx, limit %08lx alignment %08lx)\n",
255 __func__, (unsigned long)size, (unsigned long)base,
256 (unsigned long)limit, (unsigned long)alignment);
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100257
Marek Szyprowskia2547382013-07-29 14:31:45 +0200258 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100259 pr_err("Not enough slots for CMA reserved regions!\n");
260 return -ENOSPC;
261 }
262
263 if (!size)
264 return -EINVAL;
265
Joonsoo Kima15bc0b2014-08-06 16:05:21 -0700266 if (alignment && !is_power_of_2(alignment))
267 return -EINVAL;
268
269 /*
270 * Sanitise input arguments.
271 * Pages both ends in CMA area could be merged into adjacent unmovable
272 * migratetype page by page allocator's buddy algorithm. In the case,
273 * you couldn't get a contiguous memory, which is not what we want.
274 */
275 alignment = max(alignment,
276 (phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order));
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100277 base = ALIGN(base, alignment);
278 size = ALIGN(size, alignment);
279 limit &= ~(alignment - 1);
280
Joonsoo Kime0bdb372014-08-06 16:05:23 -0700281 /* size should be aligned with order_per_bit */
282 if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
283 return -EINVAL;
284
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100285 /* Reserve memory */
Akinobu Mita5ea3b1b2014-06-04 16:06:54 -0700286 if (base && fixed) {
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100287 if (memblock_is_region_reserved(base, size) ||
288 memblock_reserve(base, size) < 0) {
Marek Szyprowskia2547382013-07-29 14:31:45 +0200289 ret = -EBUSY;
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100290 goto err;
291 }
292 } else {
Akinobu Mita5ea3b1b2014-06-04 16:06:54 -0700293 phys_addr_t addr = memblock_alloc_range(size, alignment, base,
294 limit);
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100295 if (!addr) {
Marek Szyprowskia2547382013-07-29 14:31:45 +0200296 ret = -ENOMEM;
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100297 goto err;
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100298 } else {
299 base = addr;
300 }
301 }
302
303 /*
304 * Each reserved area must be initialised later, when more kernel
305 * subsystems (like slab allocator) are available.
306 */
Marek Szyprowskia2547382013-07-29 14:31:45 +0200307 cma->base_pfn = PFN_DOWN(base);
308 cma->count = size >> PAGE_SHIFT;
Joonsoo Kime0bdb372014-08-06 16:05:23 -0700309 cma->order_per_bit = order_per_bit;
Marek Szyprowskia2547382013-07-29 14:31:45 +0200310 *res_cma = cma;
311 cma_area_count++;
312
Vitaly Andrianov40097932012-12-05 09:29:25 -0500313 pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M,
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100314 (unsigned long)base);
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100315 return 0;
Joonsoo Kim3162bbd2014-08-06 16:05:19 -0700316
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100317err:
Vitaly Andrianov40097932012-12-05 09:29:25 -0500318 pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
Marek Szyprowskia2547382013-07-29 14:31:45 +0200319 return ret;
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100320}
321
Joonsoo Kim3162bbd2014-08-06 16:05:19 -0700322/**
323 * dma_contiguous_reserve_area() - reserve custom contiguous area
324 * @size: Size of the reserved area (in bytes),
325 * @base: Base address of the reserved area optional, use 0 for any
326 * @limit: End address of the reserved memory (optional, 0 for any).
327 * @res_cma: Pointer to store the created cma region.
328 * @fixed: hint about where to place the reserved area
329 *
330 * This function reserves memory from early allocator. It should be
331 * called by arch specific code once the early allocator (memblock or bootmem)
332 * has been activated and all other subsystems have already allocated/reserved
333 * memory. This function allows to create custom reserved areas for specific
334 * devices.
335 *
336 * If @fixed is true, reserve contiguous area at exactly @base. If false,
337 * reserve in range from @base to @limit.
338 */
339int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
340 phys_addr_t limit, struct cma **res_cma,
341 bool fixed)
342{
343 int ret;
344
Joonsoo Kime0bdb372014-08-06 16:05:23 -0700345 ret = __dma_contiguous_reserve_area(size, base, limit, 0, 0,
Joonsoo Kima15bc0b2014-08-06 16:05:21 -0700346 res_cma, fixed);
Joonsoo Kim3162bbd2014-08-06 16:05:19 -0700347 if (ret)
348 return ret;
349
350 /* Architecture specific contiguous memory fixup. */
351 dma_contiguous_early_fixup(PFN_PHYS((*res_cma)->base_pfn),
352 (*res_cma)->count << PAGE_SHIFT);
353
354 return 0;
355}
356
Joonsoo Kim3162bbd2014-08-06 16:05:19 -0700357static struct page *__dma_alloc_from_contiguous(struct cma *cma, int count,
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100358 unsigned int align)
359{
Joonsoo Kime0bdb372014-08-06 16:05:23 -0700360 unsigned long mask, pfn, start = 0;
361 unsigned long bitmap_maxno, bitmap_no, bitmap_count;
Michal Nazarewiczbdd43cb2012-09-05 07:50:41 +0200362 struct page *page = NULL;
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100363 int ret;
364
365 if (!cma || !cma->count)
366 return NULL;
367
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100368 pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
369 count, align);
370
371 if (!count)
372 return NULL;
373
Joonsoo Kime0bdb372014-08-06 16:05:23 -0700374 mask = cma_bitmap_aligned_mask(cma, align);
375 bitmap_maxno = cma_bitmap_maxno(cma);
376 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100377
378 for (;;) {
Laura Abbott7ee793a2014-02-25 11:01:19 -0800379 mutex_lock(&cma->lock);
Joonsoo Kime0bdb372014-08-06 16:05:23 -0700380 bitmap_no = bitmap_find_next_zero_area(cma->bitmap,
381 bitmap_maxno, start, bitmap_count, mask);
382 if (bitmap_no >= bitmap_maxno) {
Joonsoo Kimf70e3c42014-05-29 15:29:18 +0900383 mutex_unlock(&cma->lock);
Michal Nazarewiczbdd43cb2012-09-05 07:50:41 +0200384 break;
Laura Abbott7ee793a2014-02-25 11:01:19 -0800385 }
Joonsoo Kime0bdb372014-08-06 16:05:23 -0700386 bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
Laura Abbott7ee793a2014-02-25 11:01:19 -0800387 /*
388 * It's safe to drop the lock here. We've marked this region for
389 * our exclusive use. If the migration fails we will take the
390 * lock again and unmark it.
391 */
392 mutex_unlock(&cma->lock);
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100393
Joonsoo Kime0bdb372014-08-06 16:05:23 -0700394 pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
Laura Abbott7ee793a2014-02-25 11:01:19 -0800395 mutex_lock(&cma_mutex);
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100396 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
Laura Abbott7ee793a2014-02-25 11:01:19 -0800397 mutex_unlock(&cma_mutex);
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100398 if (ret == 0) {
Michal Nazarewiczbdd43cb2012-09-05 07:50:41 +0200399 page = pfn_to_page(pfn);
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100400 break;
401 } else if (ret != -EBUSY) {
Joonsoo Kime0bdb372014-08-06 16:05:23 -0700402 cma_clear_bitmap(cma, pfn, count);
Michal Nazarewiczbdd43cb2012-09-05 07:50:41 +0200403 break;
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100404 }
Joonsoo Kime0bdb372014-08-06 16:05:23 -0700405 cma_clear_bitmap(cma, pfn, count);
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100406 pr_debug("%s(): memory range at %p is busy, retrying\n",
407 __func__, pfn_to_page(pfn));
408 /* try again with a bit different memory target */
Joonsoo Kime0bdb372014-08-06 16:05:23 -0700409 start = bitmap_no + mask + 1;
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100410 }
411
Michal Nazarewiczbdd43cb2012-09-05 07:50:41 +0200412 pr_debug("%s(): returned %p\n", __func__, page);
413 return page;
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100414}
415
416/**
Joonsoo Kim3162bbd2014-08-06 16:05:19 -0700417 * dma_alloc_from_contiguous() - allocate pages from contiguous area
418 * @dev: Pointer to device for which the allocation is performed.
419 * @count: Requested number of pages.
420 * @align: Requested alignment of pages (in PAGE_SIZE order).
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100421 *
Joonsoo Kim3162bbd2014-08-06 16:05:19 -0700422 * This function allocates memory buffer for specified device. It uses
423 * device specific contiguous memory area if available or the default
424 * global one. Requires architecture specific dev_get_cma_area() helper
425 * function.
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100426 */
Joonsoo Kim3162bbd2014-08-06 16:05:19 -0700427struct page *dma_alloc_from_contiguous(struct device *dev, int count,
428 unsigned int align)
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100429{
430 struct cma *cma = dev_get_cma_area(dev);
Joonsoo Kim3162bbd2014-08-06 16:05:19 -0700431
432 if (align > CONFIG_CMA_ALIGNMENT)
433 align = CONFIG_CMA_ALIGNMENT;
434
435 return __dma_alloc_from_contiguous(cma, count, align);
436}
437
438static bool __dma_release_from_contiguous(struct cma *cma, struct page *pages,
439 int count)
440{
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100441 unsigned long pfn;
442
443 if (!cma || !pages)
444 return false;
445
446 pr_debug("%s(page %p)\n", __func__, (void *)pages);
447
448 pfn = page_to_pfn(pages);
449
450 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
451 return false;
452
453 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
454
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100455 free_contig_range(pfn, count);
Joonsoo Kime0bdb372014-08-06 16:05:23 -0700456 cma_clear_bitmap(cma, pfn, count);
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100457
458 return true;
459}
Joonsoo Kim3162bbd2014-08-06 16:05:19 -0700460
461/**
462 * dma_release_from_contiguous() - release allocated pages
463 * @dev: Pointer to device for which the pages were allocated.
464 * @pages: Allocated pages.
465 * @count: Number of allocated pages.
466 *
467 * This function releases memory allocated by dma_alloc_from_contiguous().
468 * It returns false when provided pages do not belong to contiguous area and
469 * true otherwise.
470 */
471bool dma_release_from_contiguous(struct device *dev, struct page *pages,
472 int count)
473{
474 struct cma *cma = dev_get_cma_area(dev);
475
476 return __dma_release_from_contiguous(cma, pages, count);
477}