aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMark Brown <broonie@linaro.org>2013-12-13 18:50:12 +0000
committerMark Brown <broonie@linaro.org>2013-12-13 18:50:12 +0000
commit679160a872dfe88bf34e50f1939e190ba59e534e (patch)
treefe342d02e224d006a23745a657148ce62b54fee4
parenteed4bafd63b5584adb832a582a931ecba41dec55 (diff)
parent8f1c42284159ebd6851e1e75507f2ad6fba87d0f (diff)
Merge branch 'android-3.10' of https://android.googlesource.com/kernel/common into lsk-v3.10-aospv3.10/topic/aosp
-rw-r--r--android/configs/README2
-rw-r--r--android/configs/android-base.cfg1
-rw-r--r--android/configs/android-recommended.cfg9
-rw-r--r--arch/arm/common/fiq_glue.S25
-rw-r--r--arch/arm/common/fiq_glue_setup.c53
-rw-r--r--arch/arm/include/asm/fiq_glue.h3
-rw-r--r--drivers/staging/android/ion/Kconfig14
-rw-r--r--drivers/staging/android/ion/Makefile1
-rw-r--r--drivers/staging/android/ion/compat_ion.c4
-rw-r--r--drivers/staging/android/ion/ion.c220
-rw-r--r--drivers/staging/android/ion/ion.h2
-rw-r--r--drivers/staging/android/ion/ion_carveout_heap.c119
-rw-r--r--drivers/staging/android/ion/ion_chunk_heap.c59
-rw-r--r--drivers/staging/android/ion/ion_cma_heap.c34
-rw-r--r--drivers/staging/android/ion/ion_heap.c100
-rw-r--r--drivers/staging/android/ion/ion_page_pool.c10
-rw-r--r--drivers/staging/android/ion/ion_priv.h16
-rw-r--r--drivers/staging/android/ion/ion_system_heap.c134
-rw-r--r--drivers/staging/android/ion/ion_test.c281
-rw-r--r--drivers/staging/android/uapi/ion_test.h71
20 files changed, 797 insertions, 361 deletions
diff --git a/android/configs/README b/android/configs/README
index 391dffa6f85..8798731f890 100644
--- a/android/configs/README
+++ b/android/configs/README
@@ -11,3 +11,5 @@ way to enable these options would be:
This will generate a .config that can then be used to save a new defconfig or
compile a new kernel with Android features enabled.
+Because there is no tool to consistently generate these config fragments,
+lets keep them alphabetically sorted instead of random.
diff --git a/android/configs/android-base.cfg b/android/configs/android-base.cfg
index 2b98436550c..25162024f88 100644
--- a/android/configs/android-base.cfg
+++ b/android/configs/android-base.cfg
@@ -1,3 +1,4 @@
+# KEEP ALPHABETICALLY SORTED
# CONFIG_INET_LRO is not set
# CONFIG_MODULES is not set
# CONFIG_OABI_COMPAT is not set
diff --git a/android/configs/android-recommended.cfg b/android/configs/android-recommended.cfg
index 546c37f552b..b0120f678cc 100644
--- a/android/configs/android-recommended.cfg
+++ b/android/configs/android-recommended.cfg
@@ -1,7 +1,4 @@
-CONFIG_PANIC_TIMEOUT=5
-CONFIG_KALLSYMS_ALL=y
-CONFIG_PERF_EVENTS=y
-CONFIG_COMPACTION=y
+# KEEP ALPHABETICALLY SORTED
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_LEGACY_PTYS is not set
@@ -14,6 +11,7 @@ CONFIG_BACKLIGHT_LCD_SUPPORT=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_COMPACTION=y
CONFIG_DM_UEVENT=y
CONFIG_DRAGONRISE_FF=y
CONFIG_EXT4_FS=y
@@ -81,6 +79,7 @@ CONFIG_ION=y
CONFIG_JOYSTICK_XPAD=y
CONFIG_JOYSTICK_XPAD_FF=y
CONFIG_JOYSTICK_XPAD_LEDS=y
+CONFIG_KALLSYMS_ALL=y
CONFIG_KSM=y
CONFIG_LOGIG940_FF=y
CONFIG_LOGIRUMBLEPAD2_FF=y
@@ -88,7 +87,9 @@ CONFIG_LOGITECH_FF=y
CONFIG_MD=y
CONFIG_MEDIA_SUPPORT=y
CONFIG_MSDOS_FS=y
+CONFIG_PANIC_TIMEOUT=5
CONFIG_PANTHERLORD_FF=y
+CONFIG_PERF_EVENTS=y
CONFIG_PM_DEBUG=y
CONFIG_PM_RUNTIME=y
CONFIG_PM_WAKELOCKS_LIMIT=0
diff --git a/arch/arm/common/fiq_glue.S b/arch/arm/common/fiq_glue.S
index 9e3455a09f8..24b42cec481 100644
--- a/arch/arm/common/fiq_glue.S
+++ b/arch/arm/common/fiq_glue.S
@@ -22,13 +22,14 @@
/* fiq stack: r0-r15,cpsr,spsr of interrupted mode */
ENTRY(fiq_glue)
- /* store pc, cpsr from previous mode */
+ /* store pc, cpsr from previous mode, reserve space for spsr */
mrs r12, spsr
- sub r11, lr, #4
+ sub lr, lr, #4
subs r10, #1
bne nested_fiq
- stmfd sp!, {r11-r12, lr}
+ str r12, [sp, #-8]!
+ str lr, [sp, #-4]!
/* store r8-r14 from previous mode */
sub sp, sp, #(7 * 4)
@@ -85,12 +86,15 @@ fiq_from_usr_mode_exit:
msr cpsr_c, #(FIQ_MODE | PSR_I_BIT | PSR_F_BIT)
ldmfd sp!, {r0-r7}
- add sp, sp, #(7 * 4)
- ldmfd sp!, {r11-r12, lr}
+ ldr lr, [sp, #(4 * 7)]
+ ldr r12, [sp, #(4 * 8)]
+ add sp, sp, #(10 * 4)
exit_fiq:
msr spsr_cxsf, r12
add r10, #1
- movs pc, r11
+ cmp r11, #0
+ moveqs pc, lr
+ bx r11 /* jump to custom fiq return function */
nested_fiq:
orr r12, r12, #(PSR_F_BIT)
@@ -98,14 +102,17 @@ nested_fiq:
fiq_glue_end:
-ENTRY(fiq_glue_setup) /* func, data, sp */
- mrs r3, cpsr
+ENTRY(fiq_glue_setup) /* func, data, sp, smc call number */
+ stmfd sp!, {r4}
+ mrs r4, cpsr
msr cpsr_c, #(FIQ_MODE | PSR_I_BIT | PSR_F_BIT)
movs r8, r0
mov r9, r1
mov sp, r2
+ mov r11, r3
moveq r10, #0
movne r10, #1
- msr cpsr_c, r3
+ msr cpsr_c, r4
+ ldmfd sp!, {r4}
bx lr
diff --git a/arch/arm/common/fiq_glue_setup.c b/arch/arm/common/fiq_glue_setup.c
index 4044c7db95c..8cb1b611c6d 100644
--- a/arch/arm/common/fiq_glue_setup.c
+++ b/arch/arm/common/fiq_glue_setup.c
@@ -18,20 +18,23 @@
#include <asm/fiq_glue.h>
extern unsigned char fiq_glue, fiq_glue_end;
-extern void fiq_glue_setup(void *func, void *data, void *sp);
+extern void fiq_glue_setup(void *func, void *data, void *sp,
+ fiq_return_handler_t fiq_return_handler);
static struct fiq_handler fiq_debbuger_fiq_handler = {
.name = "fiq_glue",
};
DEFINE_PER_CPU(void *, fiq_stack);
static struct fiq_glue_handler *current_handler;
+static fiq_return_handler_t fiq_return_handler;
static DEFINE_MUTEX(fiq_glue_lock);
static void fiq_glue_setup_helper(void *info)
{
struct fiq_glue_handler *handler = info;
fiq_glue_setup(handler->fiq, handler,
- __get_cpu_var(fiq_stack) + THREAD_START_SP);
+ __get_cpu_var(fiq_stack) + THREAD_START_SP,
+ fiq_return_handler);
}
int fiq_glue_register_handler(struct fiq_glue_handler *handler)
@@ -80,6 +83,49 @@ err_busy:
return ret;
}
+static void fiq_glue_update_return_handler(void (*fiq_return)(void))
+{
+ fiq_return_handler = fiq_return;
+ if (current_handler)
+ on_each_cpu(fiq_glue_setup_helper, current_handler, true);
+}
+
+int fiq_glue_set_return_handler(void (*fiq_return)(void))
+{
+ int ret;
+
+ mutex_lock(&fiq_glue_lock);
+ if (fiq_return_handler) {
+ ret = -EBUSY;
+ goto err_busy;
+ }
+ fiq_glue_update_return_handler(fiq_return);
+ ret = 0;
+err_busy:
+ mutex_unlock(&fiq_glue_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(fiq_glue_set_return_handler);
+
+int fiq_glue_clear_return_handler(void (*fiq_return)(void))
+{
+ int ret;
+
+ mutex_lock(&fiq_glue_lock);
+ if (WARN_ON(fiq_return_handler != fiq_return)) {
+ ret = -EINVAL;
+ goto err_inval;
+ }
+ fiq_glue_update_return_handler(NULL);
+ ret = 0;
+err_inval:
+ mutex_unlock(&fiq_glue_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(fiq_glue_clear_return_handler);
+
/**
* fiq_glue_resume - Restore fiqs after suspend or low power idle states
*
@@ -93,7 +139,8 @@ void fiq_glue_resume(void)
if (!current_handler)
return;
fiq_glue_setup(current_handler->fiq, current_handler,
- __get_cpu_var(fiq_stack) + THREAD_START_SP);
+ __get_cpu_var(fiq_stack) + THREAD_START_SP,
+ fiq_return_handler);
if (current_handler->resume)
current_handler->resume(current_handler);
}
diff --git a/arch/arm/include/asm/fiq_glue.h b/arch/arm/include/asm/fiq_glue.h
index d54c29db97a..a9e244f9f19 100644
--- a/arch/arm/include/asm/fiq_glue.h
+++ b/arch/arm/include/asm/fiq_glue.h
@@ -18,8 +18,11 @@ struct fiq_glue_handler {
void (*fiq)(struct fiq_glue_handler *h, void *regs, void *svc_sp);
void (*resume)(struct fiq_glue_handler *h);
};
+typedef void (*fiq_return_handler_t)(void);
int fiq_glue_register_handler(struct fiq_glue_handler *handler);
+int fiq_glue_set_return_handler(fiq_return_handler_t fiq_return);
+int fiq_glue_clear_return_handler(fiq_return_handler_t fiq_return);
#ifdef CONFIG_FIQ_GLUE
void fiq_glue_resume(void);
diff --git a/drivers/staging/android/ion/Kconfig b/drivers/staging/android/ion/Kconfig
index c62f2cbb9e8..a342d96ee4f 100644
--- a/drivers/staging/android/ion/Kconfig
+++ b/drivers/staging/android/ion/Kconfig
@@ -1,10 +1,20 @@
menuconfig ION
tristate "Ion Memory Manager"
- depends on ARM
select GENERIC_ALLOCATOR
select DMA_SHARED_BUFFER
+ ---help---
+ Chose this option to enable the ION Memory Manager,
+ used by Android to efficiently allocate buffers
+ from userspace that can be shared between drivers.
+ If you're not using Android its probably safe to
+ say N here.
+
+config ION_TEST
+ tristate "Ion Test Device"
+ depends on ION
help
- Chose this option to enable the ION Memory Manager.
+ Choose this option to create a device that can be used to test the
+ kernel and device side ION functions.
config ION_TEGRA
tristate "Ion for Tegra"
diff --git a/drivers/staging/android/ion/Makefile b/drivers/staging/android/ion/Makefile
index 9c956659124..75039b98eeb 100644
--- a/drivers/staging/android/ion/Makefile
+++ b/drivers/staging/android/ion/Makefile
@@ -1,5 +1,6 @@
obj-$(CONFIG_ION) += ion.o ion_heap.o ion_page_pool.o ion_system_heap.o \
ion_carveout_heap.o ion_chunk_heap.o ion_cma_heap.o
+obj-$(CONFIG_ION_TEST) += ion_test.o
ifdef CONFIG_COMPAT
obj-$(CONFIG_ION) += compat_ion.o
endif
diff --git a/drivers/staging/android/ion/compat_ion.c b/drivers/staging/android/ion/compat_ion.c
index a89b067ec05..e9a8132cd56 100644
--- a/drivers/staging/android/ion/compat_ion.c
+++ b/drivers/staging/android/ion/compat_ion.c
@@ -91,8 +91,8 @@ static int compat_get_ion_custom_data(
struct compat_ion_custom_data __user *data32,
struct ion_custom_data __user *data)
{
- compat_uint_t cmd;
- compat_ulong_t arg;
+ compat_uint_t cmd;
+ compat_ulong_t arg;
int err;
err = get_user(cmd, &data32->cmd);
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 4d19716cabd..7522b0be174 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -110,8 +110,8 @@ struct ion_handle {
bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
{
- return ((buffer->flags & ION_FLAG_CACHED) &&
- !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
+ return (buffer->flags & ION_FLAG_CACHED) &&
+ !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
}
bool ion_buffer_cached(struct ion_buffer *buffer)
@@ -202,7 +202,8 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
buffer->size = len;
table = heap->ops->map_dma(heap, buffer);
- if (WARN_ONCE(table == NULL, "heap->ops->map_dma should return ERR_PTR on error"))
+ if (WARN_ONCE(table == NULL,
+ "heap->ops->map_dma should return ERR_PTR on error"))
table = ERR_PTR(-EINVAL);
if (IS_ERR(table)) {
heap->ops->free(buffer);
@@ -424,7 +425,8 @@ static struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
return handle ? handle : ERR_PTR(-EINVAL);
}
-static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
+static bool ion_handle_validate(struct ion_client *client,
+ struct ion_handle *handle)
{
WARN_ON(!mutex_is_locked(&client->lock));
return (idr_find(&client->idr, handle->id) == handle);
@@ -432,22 +434,16 @@ static bool ion_handle_validate(struct ion_client *client, struct ion_handle *ha
static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
{
- int rc;
+ int id;
struct rb_node **p = &client->handles.rb_node;
struct rb_node *parent = NULL;
struct ion_handle *entry;
- do {
- int id;
- rc = idr_pre_get(&client->idr, GFP_KERNEL);
- if (!rc)
- return -ENOMEM;
- rc = idr_get_new_above(&client->idr, handle, 1, &id);
- handle->id = id;
- } while (rc == -EAGAIN);
+ id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
+ if (id < 0)
+ return id;
- if (rc < 0)
- return rc;
+ handle->id = id;
while (*p) {
parent = *p;
@@ -477,7 +473,7 @@ struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
struct ion_heap *heap;
int ret;
- pr_debug("%s: len %d align %d heap_id_mask %u flags %x\n", __func__,
+ pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
len, align, heap_id_mask, flags);
/*
* traverse the list of heaps available in this system in priority
@@ -584,7 +580,8 @@ static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
return buffer->vaddr;
}
vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
- if (WARN_ONCE(vaddr == NULL, "heap->ops->map_kernel should return ERR_PTR on error"))
+ if (WARN_ONCE(vaddr == NULL,
+ "heap->ops->map_kernel should return ERR_PTR on error"))
return ERR_PTR(-EINVAL);
if (IS_ERR(vaddr))
return vaddr;
@@ -675,7 +672,7 @@ static int ion_debug_client_show(struct seq_file *s, void *unused)
struct ion_client *client = s->private;
struct rb_node *n;
size_t sizes[ION_NUM_HEAP_IDS] = {0};
- const char *names[ION_NUM_HEAP_IDS] = {0};
+ const char *names[ION_NUM_HEAP_IDS] = {NULL};
int i;
mutex_lock(&client->lock);
@@ -694,7 +691,7 @@ static int ion_debug_client_show(struct seq_file *s, void *unused)
for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
if (!names[i])
continue;
- seq_printf(s, "%16.16s: %16u\n", names[i], sizes[i]);
+ seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
}
return 0;
}
@@ -786,7 +783,6 @@ void ion_client_destroy(struct ion_client *client)
ion_handle_destroy(&handle->ref);
}
- idr_remove_all(&client->idr);
idr_destroy(&client->idr);
down_write(&dev->lock);
@@ -894,17 +890,18 @@ static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
mutex_unlock(&buffer->lock);
}
-int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct ion_buffer *buffer = vma->vm_private_data;
+ unsigned long pfn;
int ret;
mutex_lock(&buffer->lock);
ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
-
BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
- ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address,
- ion_buffer_page(buffer->pages[vmf->pgoff]));
+
+ pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
+ ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
mutex_unlock(&buffer->lock);
if (ret)
return VM_FAULT_ERROR;
@@ -945,7 +942,7 @@ static void ion_vm_close(struct vm_area_struct *vma)
mutex_unlock(&buffer->lock);
}
-struct vm_operations_struct ion_vma_ops = {
+static struct vm_operations_struct ion_vma_ops = {
.open = ion_vm_open,
.close = ion_vm_close,
.fault = ion_vm_fault,
@@ -963,6 +960,8 @@ static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
}
if (ion_buffer_fault_user_mappings(buffer)) {
+ vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
+ VM_DONTDUMP;
vma->vm_private_data = buffer;
vma->vm_ops = &ion_vma_ops;
ion_vm_open(vma);
@@ -1034,7 +1033,7 @@ static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
mutex_unlock(&buffer->lock);
}
-struct dma_buf_ops dma_buf_ops = {
+static struct dma_buf_ops dma_buf_ops = {
.map_dma_buf = ion_map_dma_buf,
.unmap_dma_buf = ion_unmap_dma_buf,
.mmap = ion_mmap,
@@ -1164,41 +1163,65 @@ static int ion_sync_for_device(struct ion_client *client, int fd)
return 0;
}
+/* fix up the cases where the ioctl direction bits are incorrect */
+static unsigned int ion_ioctl_dir(unsigned int cmd)
+{
+ switch (cmd) {
+ case ION_IOC_SYNC:
+ case ION_IOC_FREE:
+ case ION_IOC_CUSTOM:
+ return _IOC_WRITE;
+ default:
+ return _IOC_DIR(cmd);
+ }
+}
+
static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct ion_client *client = filp->private_data;
+ struct ion_device *dev = client->dev;
+ struct ion_handle *cleanup_handle = NULL;
+ int ret = 0;
+ unsigned int dir;
+
+ union {
+ struct ion_fd_data fd;
+ struct ion_allocation_data allocation;
+ struct ion_handle_data handle;
+ struct ion_custom_data custom;
+ } data;
+
+ dir = ion_ioctl_dir(cmd);
+
+ if (_IOC_SIZE(cmd) > sizeof(data))
+ return -EINVAL;
+
+ if (dir & _IOC_WRITE)
+ if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
+ return -EFAULT;
switch (cmd) {
case ION_IOC_ALLOC:
{
- struct ion_allocation_data data;
struct ion_handle *handle;
- if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
- return -EFAULT;
- handle = ion_alloc(client, data.len, data.align,
- data.heap_id_mask, data.flags);
-
+ handle = ion_alloc(client, data.allocation.len,
+ data.allocation.align,
+ data.allocation.heap_id_mask,
+ data.allocation.flags);
if (IS_ERR(handle))
return PTR_ERR(handle);
- data.handle = handle->id;
+ data.allocation.handle = handle->id;
- if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
- ion_free(client, handle);
- return -EFAULT;
- }
+ cleanup_handle = handle;
break;
}
case ION_IOC_FREE:
{
- struct ion_handle_data data;
struct ion_handle *handle;
- if (copy_from_user(&data, (void __user *)arg,
- sizeof(struct ion_handle_data)))
- return -EFAULT;
- handle = ion_handle_get_by_id(client, data.handle);
+ handle = ion_handle_get_by_id(client, data.handle.handle);
if (IS_ERR(handle))
return PTR_ERR(handle);
ion_free(client, handle);
@@ -1208,68 +1231,52 @@ static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
case ION_IOC_SHARE:
case ION_IOC_MAP:
{
- struct ion_fd_data data;
struct ion_handle *handle;
- if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
- return -EFAULT;
- handle = ion_handle_get_by_id(client, data.handle);
+ handle = ion_handle_get_by_id(client, data.handle.handle);
if (IS_ERR(handle))
return PTR_ERR(handle);
- data.fd = ion_share_dma_buf_fd(client, handle);
+ data.fd.fd = ion_share_dma_buf_fd(client, handle);
ion_handle_put(handle);
- if (copy_to_user((void __user *)arg, &data, sizeof(data)))
- return -EFAULT;
- if (data.fd < 0)
- return data.fd;
+ if (data.fd.fd < 0)
+ ret = data.fd.fd;
break;
}
case ION_IOC_IMPORT:
{
- struct ion_fd_data data;
struct ion_handle *handle;
- int ret = 0;
- if (copy_from_user(&data, (void __user *)arg,
- sizeof(struct ion_fd_data)))
- return -EFAULT;
- handle = ion_import_dma_buf(client, data.fd);
+ handle = ion_import_dma_buf(client, data.fd.fd);
if (IS_ERR(handle))
ret = PTR_ERR(handle);
else
- data.handle = handle->id;
-
- if (copy_to_user((void __user *)arg, &data,
- sizeof(struct ion_fd_data)))
- return -EFAULT;
- if (ret < 0)
- return ret;
+ data.handle.handle = handle->id;
break;
}
case ION_IOC_SYNC:
{
- struct ion_fd_data data;
- if (copy_from_user(&data, (void __user *)arg,
- sizeof(struct ion_fd_data)))
- return -EFAULT;
- ion_sync_for_device(client, data.fd);
+ ret = ion_sync_for_device(client, data.fd.fd);
break;
}
case ION_IOC_CUSTOM:
{
- struct ion_device *dev = client->dev;
- struct ion_custom_data data;
-
if (!dev->custom_ioctl)
return -ENOTTY;
- if (copy_from_user(&data, (void __user *)arg,
- sizeof(struct ion_custom_data)))
- return -EFAULT;
- return dev->custom_ioctl(client, data.cmd, data.arg);
+ ret = dev->custom_ioctl(client, data.custom.cmd,
+ data.custom.arg);
+ break;
}
default:
return -ENOTTY;
}
- return 0;
+
+ if (dir & _IOC_READ) {
+ if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
+ if (cleanup_handle)
+ ion_free(client, cleanup_handle);
+ return -EFAULT;
+ }
+ }
+ return ret;
}
static int ion_release(struct inode *inode, struct file *file)
@@ -1343,10 +1350,10 @@ static int ion_debug_heap_show(struct seq_file *s, void *unused)
char task_comm[TASK_COMM_LEN];
get_task_comm(task_comm, client->task);
- seq_printf(s, "%16.s %16u %16u\n", task_comm,
+ seq_printf(s, "%16.s %16u %16zu\n", task_comm,
client->pid, size);
} else {
- seq_printf(s, "%16.s %16u %16u\n", client->name,
+ seq_printf(s, "%16.s %16u %16zu\n", client->name,
client->pid, size);
}
}
@@ -1361,19 +1368,20 @@ static int ion_debug_heap_show(struct seq_file *s, void *unused)
continue;
total_size += buffer->size;
if (!buffer->handle_count) {
- seq_printf(s, "%16.s %16u %16u %d %d\n", buffer->task_comm,
- buffer->pid, buffer->size, buffer->kmap_cnt,
+ seq_printf(s, "%16.s %16u %16zu %d %d\n",
+ buffer->task_comm, buffer->pid,
+ buffer->size, buffer->kmap_cnt,
atomic_read(&buffer->ref.refcount));
total_orphaned_size += buffer->size;
}
}
mutex_unlock(&dev->buffer_lock);
seq_printf(s, "----------------------------------------------------\n");
- seq_printf(s, "%16.s %16u\n", "total orphaned",
+ seq_printf(s, "%16.s %16zu\n", "total orphaned",
total_orphaned_size);
- seq_printf(s, "%16.s %16u\n", "total ", total_size);
+ seq_printf(s, "%16.s %16zu\n", "total ", total_size);
if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
- seq_printf(s, "%16.s %16u\n", "deferred free",
+ seq_printf(s, "%16.s %16zu\n", "deferred free",
heap->free_list_size);
seq_printf(s, "----------------------------------------------------\n");
@@ -1398,39 +1406,39 @@ static const struct file_operations debug_heap_fops = {
#ifdef DEBUG_HEAP_SHRINKER
static int debug_shrink_set(void *data, u64 val)
{
- struct ion_heap *heap = data;
- struct shrink_control sc;
- int objs;
+ struct ion_heap *heap = data;
+ struct shrink_control sc;
+ int objs;
- sc.gfp_mask = -1;
- sc.nr_to_scan = 0;
+ sc.gfp_mask = -1;
+ sc.nr_to_scan = 0;
- if (!val)
- return 0;
+ if (!val)
+ return 0;
- objs = heap->shrinker.shrink(&heap->shrinker, &sc);
- sc.nr_to_scan = objs;
+ objs = heap->shrinker.shrink(&heap->shrinker, &sc);
+ sc.nr_to_scan = objs;
- heap->shrinker.shrink(&heap->shrinker, &sc);
- return 0;
+ heap->shrinker.shrink(&heap->shrinker, &sc);
+ return 0;
}
static int debug_shrink_get(void *data, u64 *val)
{
- struct ion_heap *heap = data;
- struct shrink_control sc;
- int objs;
+ struct ion_heap *heap = data;
+ struct shrink_control sc;
+ int objs;
- sc.gfp_mask = -1;
- sc.nr_to_scan = 0;
+ sc.gfp_mask = -1;
+ sc.nr_to_scan = 0;
- objs = heap->shrinker.shrink(&heap->shrinker, &sc);
- *val = objs;
- return 0;
+ objs = heap->shrinker.shrink(&heap->shrinker, &sc);
+ *val = objs;
+ return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
- debug_shrink_set, "%llu\n");
+ debug_shrink_set, "%llu\n");
#endif
void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
@@ -1529,11 +1537,11 @@ void __init ion_reserve(struct ion_platform_data *data)
int ret = memblock_reserve(data->heaps[i].base,
data->heaps[i].size);
if (ret)
- pr_err("memblock reserve of %x@%lx failed\n",
+ pr_err("memblock reserve of %zx@%lx failed\n",
data->heaps[i].size,
data->heaps[i].base);
}
- pr_info("%s: %s reserved base %lx size %d\n", __func__,
+ pr_info("%s: %s reserved base %lx size %zu\n", __func__,
data->heaps[i].name,
data->heaps[i].base,
data->heaps[i].size);
diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h
index 962e1c58d36..dcd2a0cdb19 100644
--- a/drivers/staging/android/ion/ion.h
+++ b/drivers/staging/android/ion/ion.h
@@ -38,7 +38,7 @@ struct ion_buffer;
* struct ion_platform_heap - defines a heap in the given platform
* @type: type of the heap from ion_heap_type enum
* @id: unique identifier for heap. When allocating higher numbers
- * will be allocated from first. At allocation these are passed
+ * will be allocated from first. At allocation these are passed
* as a bit mask and therefore can not exceed ION_NUM_HEAP_IDS.
* @name: used for debug purposes
* @base: base address of heap in physical memory if applicable
diff --git a/drivers/staging/android/ion/ion_carveout_heap.c b/drivers/staging/android/ion/ion_carveout_heap.c
index 4a94b17da67..5165de2ce34 100644
--- a/drivers/staging/android/ion/ion_carveout_heap.c
+++ b/drivers/staging/android/ion/ion_carveout_heap.c
@@ -14,7 +14,7 @@
*
*/
#include <linux/spinlock.h>
-
+#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/genalloc.h>
#include <linux/io.h>
@@ -25,8 +25,6 @@
#include "ion.h"
#include "ion_priv.h"
-#include <asm/mach/map.h>
-
struct ion_carveout_heap {
struct ion_heap heap;
struct gen_pool *pool;
@@ -62,7 +60,11 @@ static int ion_carveout_heap_phys(struct ion_heap *heap,
struct ion_buffer *buffer,
ion_phys_addr_t *addr, size_t *len)
{
- *addr = buffer->priv_phys;
+ struct sg_table *table = buffer->priv_virt;
+ struct page *page = sg_page(table->sgl);
+ ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
+
+ *addr = paddr;
*len = buffer->size;
return 0;
}
@@ -72,75 +74,66 @@ static int ion_carveout_heap_allocate(struct ion_heap *heap,
unsigned long size, unsigned long align,
unsigned long flags)
{
- buffer->priv_phys = ion_carveout_allocate(heap, size, align);
- return buffer->priv_phys == ION_CARVEOUT_ALLOCATE_FAIL ? -ENOMEM : 0;
-}
-
-static void ion_carveout_heap_free(struct ion_buffer *buffer)
-{
- struct ion_heap *heap = buffer->heap;
-
- ion_carveout_free(heap, buffer->priv_phys, buffer->size);
- buffer->priv_phys = ION_CARVEOUT_ALLOCATE_FAIL;
-}
-
-struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
struct sg_table *table;
+ ion_phys_addr_t paddr;
int ret;
+ if (align > PAGE_SIZE)
+ return -EINVAL;
+
table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!table)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
ret = sg_alloc_table(table, 1, GFP_KERNEL);
- if (ret) {
- kfree(table);
- return ERR_PTR(ret);
+ if (ret)
+ goto err_free;
+
+ paddr = ion_carveout_allocate(heap, size, align);
+ if (paddr == ION_CARVEOUT_ALLOCATE_FAIL) {
+ ret = -ENOMEM;
+ goto err_free_table;
}
- sg_set_page(table->sgl, phys_to_page(buffer->priv_phys), buffer->size,
- 0);
- return table;
-}
-void ion_carveout_heap_unmap_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- sg_free_table(buffer->sg_table);
+ sg_set_page(table->sgl, pfn_to_page(PFN_DOWN(paddr)), size, 0);
+ buffer->priv_virt = table;
+
+ return 0;
+
+err_free_table:
+ sg_free_table(table);
+err_free:
+ kfree(table);
+ return ret;
}
-void *ion_carveout_heap_map_kernel(struct ion_heap *heap,
- struct ion_buffer *buffer)
+static void ion_carveout_heap_free(struct ion_buffer *buffer)
{
- void *ret;
- int mtype = MT_MEMORY_NONCACHED;
+ struct ion_heap *heap = buffer->heap;
+ struct sg_table *table = buffer->priv_virt;
+ struct page *page = sg_page(table->sgl);
+ ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
- if (buffer->flags & ION_FLAG_CACHED)
- mtype = MT_MEMORY;
+ ion_heap_buffer_zero(buffer);
- ret = __arm_ioremap(buffer->priv_phys, buffer->size,
- mtype);
- if (ret == NULL)
- return ERR_PTR(-ENOMEM);
+ if (ion_buffer_cached(buffer))
+ dma_sync_sg_for_device(NULL, table->sgl, table->nents,
+ DMA_BIDIRECTIONAL);
- return ret;
+ ion_carveout_free(heap, paddr, buffer->size);
+ sg_free_table(table);
+ kfree(table);
}
-void ion_carveout_heap_unmap_kernel(struct ion_heap *heap,
- struct ion_buffer *buffer)
+static struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
{
- __arm_iounmap(buffer->vaddr);
- buffer->vaddr = NULL;
- return;
+ return buffer->priv_virt;
}
-int ion_carveout_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
- struct vm_area_struct *vma)
+static void ion_carveout_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
{
- return remap_pfn_range(vma, vma->vm_start,
- __phys_to_pfn(buffer->priv_phys) + vma->vm_pgoff,
- vma->vm_end - vma->vm_start,
- pgprot_noncached(vma->vm_page_prot));
+ return;
}
static struct ion_heap_ops carveout_heap_ops = {
@@ -149,14 +142,27 @@ static struct ion_heap_ops carveout_heap_ops = {
.phys = ion_carveout_heap_phys,
.map_dma = ion_carveout_heap_map_dma,
.unmap_dma = ion_carveout_heap_unmap_dma,
- .map_user = ion_carveout_heap_map_user,
- .map_kernel = ion_carveout_heap_map_kernel,
- .unmap_kernel = ion_carveout_heap_unmap_kernel,
+ .map_user = ion_heap_map_user,
+ .map_kernel = ion_heap_map_kernel,
+ .unmap_kernel = ion_heap_unmap_kernel,
};
struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
{
struct ion_carveout_heap *carveout_heap;
+ int ret;
+
+ struct page *page;
+ size_t size;
+
+ page = pfn_to_page(PFN_DOWN(heap_data->base));
+ size = heap_data->size;
+
+ ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL);
+
+ ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
+ if (ret)
+ return ERR_PTR(ret);
carveout_heap = kzalloc(sizeof(struct ion_carveout_heap), GFP_KERNEL);
if (!carveout_heap)
@@ -172,6 +178,7 @@ struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
-1);
carveout_heap->heap.ops = &carveout_heap_ops;
carveout_heap->heap.type = ION_HEAP_TYPE_CARVEOUT;
+ carveout_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
return &carveout_heap->heap;
}
diff --git a/drivers/staging/android/ion/ion_chunk_heap.c b/drivers/staging/android/ion/ion_chunk_heap.c
index 19e13ecaa1c..ca20d627960 100644
--- a/drivers/staging/android/ion/ion_chunk_heap.c
+++ b/drivers/staging/android/ion/ion_chunk_heap.c
@@ -13,7 +13,6 @@
* GNU General Public License for more details.
*
*/
-//#include <linux/spinlock.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/genalloc.h>
@@ -25,8 +24,6 @@
#include "ion.h"
#include "ion_priv.h"
-#include <asm/mach/map.h>
-
struct ion_chunk_heap {
struct ion_heap heap;
struct gen_pool *pool;
@@ -49,8 +46,8 @@ static int ion_chunk_heap_allocate(struct ion_heap *heap,
unsigned long num_chunks;
unsigned long allocated_size;
- if (ion_buffer_fault_user_mappings(buffer))
- return -ENOMEM;
+ if (align > chunk_heap->chunk_size)
+ return -EINVAL;
allocated_size = ALIGN(size, chunk_heap->chunk_size);
num_chunks = allocated_size / chunk_heap->chunk_size;
@@ -73,7 +70,8 @@ static int ion_chunk_heap_allocate(struct ion_heap *heap,
chunk_heap->chunk_size);
if (!paddr)
goto err;
- sg_set_page(sg, phys_to_page(paddr), chunk_heap->chunk_size, 0);
+ sg_set_page(sg, pfn_to_page(PFN_DOWN(paddr)),
+ chunk_heap->chunk_size, 0);
sg = sg_next(sg);
}
@@ -119,14 +117,14 @@ static void ion_chunk_heap_free(struct ion_buffer *buffer)
kfree(table);
}
-struct sg_table *ion_chunk_heap_map_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
+static struct sg_table *ion_chunk_heap_map_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
{
return buffer->priv_virt;
}
-void ion_chunk_heap_unmap_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
+static void ion_chunk_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
{
return;
}
@@ -144,9 +142,18 @@ static struct ion_heap_ops chunk_heap_ops = {
struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
{
struct ion_chunk_heap *chunk_heap;
- struct vm_struct *vm_struct;
- pgprot_t pgprot = pgprot_writecombine(PAGE_KERNEL);
- int i, ret;
+ int ret;
+ struct page *page;
+ size_t size;
+
+ page = pfn_to_page(PFN_DOWN(heap_data->base));
+ size = heap_data->size;
+
+ ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL);
+
+ ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
+ if (ret)
+ return ERR_PTR(ret);
chunk_heap = kzalloc(sizeof(struct ion_chunk_heap), GFP_KERNEL);
if (!chunk_heap)
@@ -163,39 +170,15 @@ struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
chunk_heap->size = heap_data->size;
chunk_heap->allocated = 0;
- vm_struct = get_vm_area(PAGE_SIZE, VM_ALLOC);
- if (!vm_struct) {
- ret = -ENOMEM;
- goto error;
- }
- for (i = 0; i < chunk_heap->size; i += PAGE_SIZE) {
- struct page *page = phys_to_page(chunk_heap->base + i);
- struct page **pages = &page;
-
- ret = map_vm_area(vm_struct, pgprot, &pages);
- if (ret)
- goto error_map_vm_area;
- memset(vm_struct->addr, 0, PAGE_SIZE);
- unmap_kernel_range((unsigned long)vm_struct->addr, PAGE_SIZE);
- }
- free_vm_area(vm_struct);
-
- ion_pages_sync_for_device(NULL, pfn_to_page(PFN_DOWN(heap_data->base)),
- heap_data->size, DMA_BIDIRECTIONAL);
-
gen_pool_add(chunk_heap->pool, chunk_heap->base, heap_data->size, -1);
chunk_heap->heap.ops = &chunk_heap_ops;
chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK;
chunk_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
- pr_info("%s: base %lu size %u align %ld\n", __func__, chunk_heap->base,
+ pr_info("%s: base %lu size %zu align %ld\n", __func__, chunk_heap->base,
heap_data->size, heap_data->align);
return &chunk_heap->heap;
-error_map_vm_area:
- free_vm_area(vm_struct);
-error:
- gen_pool_destroy(chunk_heap->pool);
error_gen_pool_create:
kfree(chunk_heap);
return ERR_PTR(ret);
diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c
index 55d6003f546..4418bda7647 100644
--- a/drivers/staging/android/ion/ion_cma_heap.c
+++ b/drivers/staging/android/ion/ion_cma_heap.c
@@ -44,8 +44,8 @@ struct ion_cma_buffer_info {
* This function could be replaced by dma_common_get_sgtable
* as soon as it will avalaible.
*/
-int ion_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
- void *cpu_addr, dma_addr_t handle, size_t size)
+static int ion_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t handle, size_t size)
{
struct page *page = virt_to_page(cpu_addr);
int ret;
@@ -69,13 +69,20 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
dev_dbg(dev, "Request buffer allocation len %ld\n", len);
+ if (buffer->flags & ION_FLAG_CACHED)
+ return -EINVAL;
+
+ if (align > PAGE_SIZE)
+ return -EINVAL;
+
info = kzalloc(sizeof(struct ion_cma_buffer_info), GFP_KERNEL);
if (!info) {
dev_err(dev, "Can't allocate buffer info\n");
return ION_CMA_ALLOCATE_FAILED;
}
- info->cpu_addr = dma_alloc_coherent(dev, len, &(info->handle), 0);
+ info->cpu_addr = dma_alloc_coherent(dev, len, &(info->handle),
+ GFP_HIGHUSER | __GFP_ZERO);
if (!info->cpu_addr) {
dev_err(dev, "Fail to allocate buffer\n");
@@ -128,8 +135,8 @@ static int ion_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer,
struct device *dev = cma_heap->dev;
struct ion_cma_buffer_info *info = buffer->priv_virt;
- dev_dbg(dev, "Return buffer %p physical address 0x%x\n", buffer,
- info->handle);
+ dev_dbg(dev, "Return buffer %p physical address 0x%pa\n", buffer,
+ &info->handle);
*addr = info->handle;
*len = buffer->size;
@@ -137,16 +144,16 @@ static int ion_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer,
return 0;
}
-struct sg_table *ion_cma_heap_map_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
+static struct sg_table *ion_cma_heap_map_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
{
struct ion_cma_buffer_info *info = buffer->priv_virt;
return info->table;
}
-void ion_cma_heap_unmap_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
+static void ion_cma_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
{
return;
}
@@ -162,13 +169,19 @@ static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer,
buffer->size);
}
-void *ion_cma_map_kernel(struct ion_heap *heap, struct ion_buffer *buffer)
+static void *ion_cma_map_kernel(struct ion_heap *heap,
+ struct ion_buffer *buffer)
{
struct ion_cma_buffer_info *info = buffer->priv_virt;
/* kernel memory mapping has been done at allocation time */
return info->cpu_addr;
}
+static void ion_cma_unmap_kernel(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+}
+
static struct ion_heap_ops ion_cma_ops = {
.allocate = ion_cma_allocate,
.free = ion_cma_free,
@@ -177,6 +190,7 @@ static struct ion_heap_ops ion_cma_ops = {
.phys = ion_cma_phys,
.map_user = ion_cma_mmap,
.map_kernel = ion_cma_map_kernel,
+ .unmap_kernel = ion_cma_unmap_kernel,
};
struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data)
diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c
index f32f4e69765..5b01e9e30a4 100644
--- a/drivers/staging/android/ion/ion_heap.c
+++ b/drivers/staging/android/ion/ion_heap.c
@@ -38,7 +38,7 @@ void *ion_heap_map_kernel(struct ion_heap *heap,
struct page **tmp = pages;
if (!pages)
- return 0;
+ return NULL;
if (buffer->flags & ION_FLAG_CACHED)
pgprot = PAGE_KERNEL;
@@ -49,9 +49,8 @@ void *ion_heap_map_kernel(struct ion_heap *heap,
int npages_this_entry = PAGE_ALIGN(sg->length) / PAGE_SIZE;
struct page *page = sg_page(sg);
BUG_ON(i >= npages);
- for (j = 0; j < npages_this_entry; j++) {
+ for (j = 0; j < npages_this_entry; j++)
*(tmp++) = page++;
- }
}
vaddr = vmap(pages, npages, VM_MAP, pgprot);
vfree(pages);
@@ -76,6 +75,7 @@ int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
struct scatterlist *sg;
int i;
+ int ret;
for_each_sg(table->sgl, sg, table->nents, i) {
struct page *page = sg_page(sg);
@@ -91,8 +91,10 @@ int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
offset = 0;
}
len = min(len, remainder);
- remap_pfn_range(vma, addr, page_to_pfn(page), len,
+ ret = remap_pfn_range(vma, addr, page_to_pfn(page), len,
vma->vm_page_prot);
+ if (ret)
+ return ret;
addr += len;
if (addr >= vma->vm_end)
return 0;
@@ -100,71 +102,63 @@ int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
return 0;
}
-int ion_heap_buffer_zero(struct ion_buffer *buffer)
+static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot)
{
- struct sg_table *table = buffer->sg_table;
- pgprot_t pgprot;
- struct scatterlist *sg;
- struct vm_struct *vm_struct;
- int i, j, ret = 0;
-
- if (buffer->flags & ION_FLAG_CACHED)
- pgprot = PAGE_KERNEL;
- else
- pgprot = pgprot_writecombine(PAGE_KERNEL);
-
- vm_struct = get_vm_area(PAGE_SIZE, VM_ALLOC);
- if (!vm_struct)
+ void *addr = vm_map_ram(pages, num, -1, pgprot);
+ if (!addr)
return -ENOMEM;
+ memset(addr, 0, PAGE_SIZE * num);
+ vm_unmap_ram(addr, num);
- for_each_sg(table->sgl, sg, table->nents, i) {
- struct page *page = sg_page(sg);
- unsigned long len = sg->length;
+ return 0;
+}
- for (j = 0; j < len / PAGE_SIZE; j++) {
- struct page *sub_page = page + j;
- struct page **pages = &sub_page;
- ret = map_vm_area(vm_struct, pgprot, &pages);
+static int ion_heap_sglist_zero(struct scatterlist *sgl, unsigned int nents,
+ pgprot_t pgprot)
+{
+ int p = 0;
+ int ret = 0;
+ struct sg_page_iter piter;
+ struct page *pages[32];
+
+ for_each_sg_page(sgl, &piter, nents, 0) {
+ pages[p++] = sg_page_iter_page(&piter);
+ if (p == ARRAY_SIZE(pages)) {
+ ret = ion_heap_clear_pages(pages, p, pgprot);
if (ret)
- goto end;
- memset(vm_struct->addr, 0, PAGE_SIZE);
- unmap_kernel_range((unsigned long)vm_struct->addr,
- PAGE_SIZE);
+ return ret;
+ p = 0;
}
}
-end:
- free_vm_area(vm_struct);
+ if (p)
+ ret = ion_heap_clear_pages(pages, p, pgprot);
+
return ret;
}
-struct page *ion_heap_alloc_pages(struct ion_buffer *buffer, gfp_t gfp_flags,
- unsigned int order)
+int ion_heap_buffer_zero(struct ion_buffer *buffer)
{
- struct page *page = alloc_pages(gfp_flags, order);
-
- if (!page)
- return page;
+ struct sg_table *table = buffer->sg_table;
+ pgprot_t pgprot;
- if (ion_buffer_fault_user_mappings(buffer))
- split_page(page, order);
+ if (buffer->flags & ION_FLAG_CACHED)
+ pgprot = PAGE_KERNEL;
+ else
+ pgprot = pgprot_writecombine(PAGE_KERNEL);
- return page;
+ return ion_heap_sglist_zero(table->sgl, table->nents, pgprot);
}
-void ion_heap_free_pages(struct ion_buffer *buffer, struct page *page,
- unsigned int order)
+int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot)
{
- int i;
+ struct scatterlist sg;
- if (!ion_buffer_fault_user_mappings(buffer)) {
- __free_pages(page, order);
- return;
- }
- for (i = 0; i < (1 << order); i++)
- __free_page(page + i);
+ sg_init_table(&sg, 1);
+ sg_set_page(&sg, page, size, 0);
+ return ion_heap_sglist_zero(&sg, 1, pgprot);
}
-void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer * buffer)
+void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer)
{
rt_mutex_lock(&heap->lock);
list_add(&buffer->list, &heap->free_list);
@@ -200,16 +194,16 @@ size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
if (total_drained >= size)
break;
list_del(&buffer->list);
- ion_buffer_destroy(buffer);
heap->free_list_size -= buffer->size;
total_drained += buffer->size;
+ ion_buffer_destroy(buffer);
}
rt_mutex_unlock(&heap->lock);
return total_drained;
}
-int ion_heap_deferred_free(void *data)
+static int ion_heap_deferred_free(void *data)
{
struct ion_heap *heap = data;
@@ -281,7 +275,7 @@ struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
}
if (IS_ERR_OR_NULL(heap)) {
- pr_err("%s: error creating heap %s type %d base %lu size %u\n",
+ pr_err("%s: error creating heap %s type %d base %lu size %zu\n",
__func__, heap_data->name, heap_data->type,
heap_data->base, heap_data->size);
return ERR_PTR(-EINVAL);
diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c
index b052ff6bf38..f087a02770a 100644
--- a/drivers/staging/android/ion/ion_page_pool.c
+++ b/drivers/staging/android/ion/ion_page_pool.c
@@ -108,7 +108,7 @@ void *ion_page_pool_alloc(struct ion_page_pool *pool)
return page;
}
-void ion_page_pool_free(struct ion_page_pool *pool, struct page* page)
+void ion_page_pool_free(struct ion_page_pool *pool, struct page *page)
{
int ret;
@@ -134,7 +134,7 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
int i;
bool high;
- high = gfp_mask & __GFP_HIGHMEM;
+ high = !!(gfp_mask & __GFP_HIGHMEM);
if (nr_to_scan == 0)
return ion_page_pool_total(pool, high);
@@ -143,10 +143,10 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
struct page *page;
mutex_lock(&pool->mutex);
- if (high && pool->high_count) {
- page = ion_page_pool_remove(pool, true);
- } else if (pool->low_count) {
+ if (pool->low_count) {
page = ion_page_pool_remove(pool, false);
+ } else if (high && pool->high_count) {
+ page = ion_page_pool_remove(pool, true);
} else {
mutex_unlock(&pool->mutex);
break;
diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h
index ea87b54987e..19691c0d24c 100644
--- a/drivers/staging/android/ion/ion_priv.h
+++ b/drivers/staging/android/ion/ion_priv.h
@@ -215,19 +215,7 @@ void ion_heap_unmap_kernel(struct ion_heap *, struct ion_buffer *);
int ion_heap_map_user(struct ion_heap *, struct ion_buffer *,
struct vm_area_struct *);
int ion_heap_buffer_zero(struct ion_buffer *buffer);
-
-/**
- * ion_heap_alloc_pages - allocate pages from alloc_pages
- * @buffer: the buffer to allocate for, used to extract the flags
- * @gfp_flags: the gfp_t for the allocation
- * @order: the order of the allocatoin
- *
- * This funciton allocations from alloc pages and also does any other
- * necessary operations based on the buffer->flags. For buffers which
- * will be faulted in the pages are split using split_page
- */
-struct page *ion_heap_alloc_pages(struct ion_buffer *buffer, gfp_t gfp_flags,
- unsigned int order);
+int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot);
/**
* ion_heap_init_deferred_free -- initialize deferred free functionality
@@ -242,7 +230,7 @@ int ion_heap_init_deferred_free(struct ion_heap *heap);
/**
* ion_heap_freelist_add - add a buffer to the deferred free list
* @heap: the heap
- * @buffer: the buffer
+ * @buffer: the buffer
*
* Adds an item to the deferred freelist.
*/
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
index ecae16f2109..05e9dc93980 100644
--- a/drivers/staging/android/ion/ion_system_heap.c
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -26,11 +26,9 @@
#include "ion.h"
#include "ion_priv.h"
-static unsigned int high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO |
- __GFP_NOWARN | __GFP_NORETRY) &
- ~__GFP_WAIT;
-static unsigned int low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO |
- __GFP_NOWARN);
+static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN |
+ __GFP_NORETRY) & ~__GFP_WAIT;
+static gfp_t low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN);
static const unsigned int orders[] = {8, 4, 0};
static const int num_orders = ARRAY_SIZE(orders);
static int order_to_index(unsigned int order)
@@ -74,14 +72,14 @@ static struct page *alloc_buffer_page(struct ion_system_heap *heap,
if (order > 4)
gfp_flags = high_order_gfp_flags;
- page = ion_heap_alloc_pages(buffer, gfp_flags, order);
+ page = alloc_pages(gfp_flags, order);
if (!page)
- return 0;
+ return NULL;
ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order,
DMA_BIDIRECTIONAL);
}
if (!page)
- return 0;
+ return NULL;
return page;
}
@@ -91,15 +89,10 @@ static void free_buffer_page(struct ion_system_heap *heap,
unsigned int order)
{
bool cached = ion_buffer_cached(buffer);
- bool split_pages = ion_buffer_fault_user_mappings(buffer);
- int i;
if (!cached) {
struct ion_page_pool *pool = heap->pools[order_to_index(order)];
ion_page_pool_free(pool, page);
- } else if (split_pages) {
- for (i = 0; i < (1 << order); i++)
- __free_page(page + i);
} else {
__free_pages(page, order);
}
@@ -150,9 +143,13 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
long size_remaining = PAGE_ALIGN(size);
unsigned int max_order = orders[0];
+ if (align > PAGE_SIZE)
+ return -EINVAL;
+
INIT_LIST_HEAD(&pages);
while (size_remaining > 0) {
- info = alloc_largest_available(sys_heap, buffer, size_remaining, max_order);
+ info = alloc_largest_available(sys_heap, buffer, size_remaining,
+ max_order);
if (!info)
goto err;
list_add_tail(&info->list, &pages);
@@ -160,8 +157,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
max_order = info->order;
i++;
}
-
- table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+ table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!table)
goto err;
@@ -183,14 +179,14 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
err1:
kfree(table);
err:
- list_for_each_entry(info, &pages, list) {
+ list_for_each_entry_safe(info, tmp_info, &pages, list) {
free_buffer_page(sys_heap, buffer, info->page, info->order);
kfree(info);
}
return -ENOMEM;
}
-void ion_system_heap_free(struct ion_buffer *buffer)
+static void ion_system_heap_free(struct ion_buffer *buffer)
{
struct ion_heap *heap = buffer->heap;
struct ion_system_heap *sys_heap = container_of(heap,
@@ -214,14 +210,14 @@ void ion_system_heap_free(struct ion_buffer *buffer)
kfree(table);
}
-struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
+static struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
{
return buffer->priv_virt;
}
-void ion_system_heap_unmap_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
+static void ion_system_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
{
return;
}
@@ -362,61 +358,83 @@ static int ion_system_contig_heap_allocate(struct ion_heap *heap,
unsigned long align,
unsigned long flags)
{
- buffer->priv_virt = kzalloc(len, GFP_KERNEL);
- if (!buffer->priv_virt)
+ int order = get_order(len);
+ struct page *page;
+ struct sg_table *table;
+ unsigned long i;
+ int ret;
+
+ if (align > (PAGE_SIZE << order))
+ return -EINVAL;
+
+ page = alloc_pages(low_order_gfp_flags, order);
+ if (!page)
return -ENOMEM;
+
+ split_page(page, order);
+
+ len = PAGE_ALIGN(len);
+ for (i = len >> PAGE_SHIFT; i < (1 << order); i++)
+ __free_page(page + i);
+
+ table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!table) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = sg_alloc_table(table, 1, GFP_KERNEL);
+ if (ret)
+ goto out;
+
+ sg_set_page(table->sgl, page, len, 0);
+
+ buffer->priv_virt = table;
+
+ ion_pages_sync_for_device(NULL, page, len, DMA_BIDIRECTIONAL);
+
return 0;
+
+out:
+ for (i = 0; i < len >> PAGE_SHIFT; i++)
+ __free_page(page + i);
+ kfree(table);
+ return ret;
}
-void ion_system_contig_heap_free(struct ion_buffer *buffer)
+static void ion_system_contig_heap_free(struct ion_buffer *buffer)
{
- kfree(buffer->priv_virt);
+ struct sg_table *table = buffer->priv_virt;
+ struct page *page = sg_page(table->sgl);
+ unsigned long pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT;
+ unsigned long i;
+
+ for (i = 0; i < pages; i++)
+ __free_page(page + i);
+ sg_free_table(table);
+ kfree(table);
}
static int ion_system_contig_heap_phys(struct ion_heap *heap,
struct ion_buffer *buffer,
ion_phys_addr_t *addr, size_t *len)
{
- *addr = virt_to_phys(buffer->priv_virt);
+ struct sg_table *table = buffer->priv_virt;
+ struct page *page = sg_page(table->sgl);
+ *addr = page_to_phys(page);
*len = buffer->size;
return 0;
}
-struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
+static struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
- struct sg_table *table;
- int ret;
-
- table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
- if (!table)
- return ERR_PTR(-ENOMEM);
- ret = sg_alloc_table(table, 1, GFP_KERNEL);
- if (ret) {
- kfree(table);
- return ERR_PTR(ret);
- }
- sg_set_page(table->sgl, virt_to_page(buffer->priv_virt), buffer->size,
- 0);
- return table;
-}
-
-void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- sg_free_table(buffer->sg_table);
- kfree(buffer->sg_table);
+ return buffer->priv_virt;
}
-int ion_system_contig_heap_map_user(struct ion_heap *heap,
- struct ion_buffer *buffer,
- struct vm_area_struct *vma)
+static void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
{
- unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv_virt));
- return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot);
-
}
static struct ion_heap_ops kmalloc_ops = {
@@ -427,7 +445,7 @@ static struct ion_heap_ops kmalloc_ops = {
.unmap_dma = ion_system_contig_heap_unmap_dma,
.map_kernel = ion_heap_map_kernel,
.unmap_kernel = ion_heap_unmap_kernel,
- .map_user = ion_system_contig_heap_map_user,
+ .map_user = ion_heap_map_user,
};
struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused)
diff --git a/drivers/staging/android/ion/ion_test.c b/drivers/staging/android/ion/ion_test.c
new file mode 100644
index 00000000000..3e20349baf7
--- /dev/null
+++ b/drivers/staging/android/ion/ion_test.c
@@ -0,0 +1,281 @@
+/*
+ *
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "ion-test: " fmt
+
+#include <linux/dma-buf.h>
+#include <linux/dma-direction.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+
+#include "ion.h"
+#include "../uapi/ion_test.h"
+
+#define u64_to_uptr(x) ((void __user *)(unsigned long)(x))
+
+struct ion_test_device {
+ struct miscdevice misc;
+};
+
+struct ion_test_data {
+ struct dma_buf *dma_buf;
+ struct device *dev;
+};
+
+static int ion_handle_test_dma(struct device *dev, struct dma_buf *dma_buf,
+ void __user *ptr, size_t offset, size_t size, bool write)
+{
+ int ret = 0;
+ struct dma_buf_attachment *attach;
+ struct sg_table *table;
+ pgprot_t pgprot = pgprot_writecombine(PAGE_KERNEL);
+ enum dma_data_direction dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+ struct sg_page_iter sg_iter;
+ unsigned long offset_page;
+
+ attach = dma_buf_attach(dma_buf, dev);
+ if (IS_ERR(attach))
+ return PTR_ERR(attach);
+
+ table = dma_buf_map_attachment(attach, dir);
+ if (IS_ERR(table))
+ return PTR_ERR(table);
+
+ offset_page = offset >> PAGE_SHIFT;
+ offset %= PAGE_SIZE;
+
+ for_each_sg_page(table->sgl, &sg_iter, table->nents, offset_page) {
+ struct page *page = sg_page_iter_page(&sg_iter);
+ void *vaddr = vmap(&page, 1, VM_MAP, pgprot);
+ size_t to_copy = PAGE_SIZE - offset;
+
+ to_copy = min(to_copy, size);
+ if (!vaddr) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ if (write)
+ ret = copy_from_user(vaddr + offset, ptr, to_copy);
+ else
+ ret = copy_to_user(ptr, vaddr + offset, to_copy);
+
+ vunmap(vaddr);
+ if (ret) {
+ ret = -EFAULT;
+ goto err;
+ }
+ size -= to_copy;
+ if (!size)
+ break;
+ ptr += to_copy;
+ offset = 0;
+ }
+
+err:
+ dma_buf_unmap_attachment(attach, table, dir);
+ dma_buf_detach(dma_buf, attach);
+ return ret;
+}
+
+static int ion_handle_test_kernel(struct dma_buf *dma_buf, void __user *ptr,
+ size_t offset, size_t size, bool write)
+{
+ int ret;
+ unsigned long page_offset = offset >> PAGE_SHIFT;
+ size_t copy_offset = offset % PAGE_SIZE;
+ size_t copy_size = size;
+ enum dma_data_direction dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+
+ if (offset > dma_buf->size || size > dma_buf->size - offset)
+ return -EINVAL;
+
+ ret = dma_buf_begin_cpu_access(dma_buf, offset, size, dir);
+ if (ret)
+ return ret;
+
+ while (copy_size > 0) {
+ size_t to_copy;
+ void *vaddr = dma_buf_kmap(dma_buf, page_offset);
+
+ if (!vaddr)
+ goto err;
+
+ to_copy = min_t(size_t, PAGE_SIZE - copy_offset, copy_size);
+
+ if (write)
+ ret = copy_from_user(vaddr + copy_offset, ptr, to_copy);
+ else
+ ret = copy_to_user(ptr, vaddr + copy_offset, to_copy);
+
+ dma_buf_kunmap(dma_buf, page_offset, vaddr);
+ if (ret) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ copy_size -= to_copy;
+ ptr += to_copy;
+ page_offset++;
+ copy_offset = 0;
+ }
+err:
+ dma_buf_end_cpu_access(dma_buf, offset, size, dir);
+ return ret;
+}
+
+static long ion_test_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct ion_test_data *test_data = filp->private_data;
+ int ret = 0;
+
+ union {
+ struct ion_test_rw_data test_rw;
+ } data;
+
+ if (_IOC_SIZE(cmd) > sizeof(data))
+ return -EINVAL;
+
+ if (_IOC_DIR(cmd) & _IOC_WRITE)
+ if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
+ return -EFAULT;
+
+ switch (cmd) {
+ case ION_IOC_TEST_SET_FD:
+ {
+ struct dma_buf *dma_buf = NULL;
+ int fd = arg;
+
+ if (fd >= 0) {
+ dma_buf = dma_buf_get((int)arg);
+ if (IS_ERR(dma_buf))
+ return PTR_ERR(dma_buf);
+ }
+ if (test_data->dma_buf)
+ dma_buf_put(test_data->dma_buf);
+ test_data->dma_buf = dma_buf;
+ break;
+ }
+ case ION_IOC_TEST_DMA_MAPPING:
+ {
+ ret = ion_handle_test_dma(test_data->dev, test_data->dma_buf,
+ u64_to_uptr(data.test_rw.ptr),
+ data.test_rw.offset, data.test_rw.size,
+ data.test_rw.write);
+ break;
+ }
+ case ION_IOC_TEST_KERNEL_MAPPING:
+ {
+ ret = ion_handle_test_kernel(test_data->dma_buf,
+ u64_to_uptr(data.test_rw.ptr),
+ data.test_rw.offset, data.test_rw.size,
+ data.test_rw.write);
+ break;
+ }
+ default:
+ return -ENOTTY;
+ }
+
+ if (_IOC_DIR(cmd) & _IOC_READ) {
+ if (copy_to_user((void __user *)arg, &data, sizeof(data)))
+ return -EFAULT;
+ }
+ return ret;
+}
+
+static int ion_test_open(struct inode *inode, struct file *file)
+{
+ struct ion_test_data *data;
+ struct miscdevice *miscdev = file->private_data;
+
+ data = kzalloc(sizeof(struct ion_test_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->dev = miscdev->parent;
+
+ file->private_data = data;
+
+ return 0;
+}
+
+static int ion_test_release(struct inode *inode, struct file *file)
+{
+ struct ion_test_data *data = file->private_data;
+
+ kfree(data);
+
+ return 0;
+}
+
+static const struct file_operations ion_test_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = ion_test_ioctl,
+ .open = ion_test_open,
+ .release = ion_test_release,
+};
+
+static int __init ion_test_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct ion_test_device *testdev;
+
+ testdev = devm_kzalloc(&pdev->dev, sizeof(struct ion_test_device),
+ GFP_KERNEL);
+ if (!testdev)
+ return -ENOMEM;
+
+ testdev->misc.minor = MISC_DYNAMIC_MINOR;
+ testdev->misc.name = "ion-test";
+ testdev->misc.fops = &ion_test_fops;
+ testdev->misc.parent = &pdev->dev;
+ ret = misc_register(&testdev->misc);
+ if (ret) {
+ pr_err("failed to register misc device.\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, testdev);
+
+ return 0;
+}
+
+static struct platform_driver ion_test_platform_driver = {
+ .driver = {
+ .name = "ion-test",
+ },
+};
+
+static int __init ion_test_init(void)
+{
+ platform_device_register_simple("ion-test", -1, NULL, 0);
+ return platform_driver_probe(&ion_test_platform_driver, ion_test_probe);
+}
+
+static void __exit ion_test_exit(void)
+{
+ platform_driver_unregister(&ion_test_platform_driver);
+}
+
+module_init(ion_test_init);
+module_exit(ion_test_exit);
diff --git a/drivers/staging/android/uapi/ion_test.h b/drivers/staging/android/uapi/ion_test.h
new file mode 100644
index 00000000000..352379a0269
--- /dev/null
+++ b/drivers/staging/android/uapi/ion_test.h
@@ -0,0 +1,71 @@
+/*
+ * drivers/staging/android/uapi/ion.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_ION_TEST_H
+#define _UAPI_LINUX_ION_TEST_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+typedef int ion_user_handle_t;
+
+/**
+ * struct ion_test_rw_data - metadata passed to the kernel to read handle
+ * @ptr: a pointer to an area at least as large as size
+ * @offset: offset into the ion buffer to start reading
+ * @size: size to read or write
+ * @write: 1 to write, 0 to read
+ */
+struct ion_test_rw_data {
+ __u64 ptr;
+ __u64 offset;
+ __u64 size;
+ int write;
+};
+
+#define ION_IOC_MAGIC 'I'
+
+/**
+ * DOC: ION_IOC_TEST_SET_DMA_BUF - attach a dma buf to the test driver
+ *
+ * Attaches a dma buf fd to the test driver. Passing a second fd or -1 will
+ * release the first fd.
+ */
+#define ION_IOC_TEST_SET_FD \
+ _IO(ION_IOC_MAGIC, 0xf0)
+
+/**
+ * DOC: ION_IOC_TEST_DMA_MAPPING - read or write memory from a handle as DMA
+ *
+ * Reads or writes the memory from a handle using an uncached mapping. Can be
+ * used by unit tests to emulate a DMA engine as close as possible. Only
+ * expected to be used for debugging and testing, may not always be available.
+ */
+#define ION_IOC_TEST_DMA_MAPPING \
+ _IOW(ION_IOC_MAGIC, 0xf1, struct ion_test_rw_data)
+
+/**
+ * DOC: ION_IOC_TEST_KERNEL_MAPPING - read or write memory from a handle
+ *
+ * Reads or writes the memory from a handle using a kernel mapping. Can be
+ * used by unit tests to test heap map_kernel functions. Only expected to be
+ * used for debugging and testing, may not always be available.
+ */
+#define ION_IOC_TEST_KERNEL_MAPPING \
+ _IOW(ION_IOC_MAGIC, 0xf2, struct ion_test_rw_data)
+
+
+#endif /* _UAPI_LINUX_ION_H */