aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorjoakim <joakim.landberg@stericsson.com>2010-08-27 10:04:23 +0200
committerJohn Rigby <john.rigby@linaro.org>2010-09-02 22:45:53 -0600
commitd6af4c608223547b5e0ecfcacfbc4c2fa7b3a5ae (patch)
tree78afaa7c8a09d8c9852530bf4c4ab8ad88548d53
parent9276a2f05cd460d05c25729021ec71b9ad6eeee9 (diff)
downloadlinux-2.6.34-ux500-d6af4c608223547b5e0ecfcacfbc4c2fa7b3a5ae.tar.gz
Add hardware memory driver to kernel
This driver provides a way to allocate contiguous system memory which can be used by hardware. Mcde has been modified in order to take advantage of hwmem. ST-Ericsson ID: WP270489 Signed-off-by: Joakim Landberg <joakim.landberg@stericsson.com> Change-Id: I5bf22754a343f8ba3dceb6305ce780f6e1a8b379 Reviewed-on: http://gerrit.lud.stericsson.com/gerrit/4205 Reviewed-by: Linus WALLEIJ <linus.walleij@stericsson.com>
-rwxr-xr-xarch/arm/mach-ux500/board-mop500.c1
-rwxr-xr-xarch/arm/mach-ux500/devices.c31
-rw-r--r--arch/arm/mach-ux500/include/mach/devices.h1
-rw-r--r--drivers/misc/Kconfig9
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/hwmem/Makefile3
-rw-r--r--drivers/misc/hwmem/hwmem-ioctl.c431
-rw-r--r--drivers/misc/hwmem/hwmem-main.c493
-rw-r--r--drivers/video/mcde/mcde_fb.c40
-rw-r--r--include/linux/hwmem.h528
-rw-r--r--include/video/mcde_fb.h5
-rw-r--r--kernel.spec6
12 files changed, 1545 insertions, 4 deletions
diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c
index ce173ed335e..fe33cf5d5ba 100755
--- a/arch/arm/mach-ux500/board-mop500.c
+++ b/arch/arm/mach-ux500/board-mop500.c
@@ -1316,6 +1316,7 @@ static struct platform_device *platform_board_devs[] __initdata = {
&ab8500_bm_device,
&ste_ff_vibra_device,
&ux500_musb_device,
+ &ux500_hwmem_device,
&ux500_mcde_device,
&ux500_b2r2_device,
#ifdef CONFIG_ANDROID_PMEM
diff --git a/arch/arm/mach-ux500/devices.c b/arch/arm/mach-ux500/devices.c
index e962102e51d..6b9cc39ebc6 100755
--- a/arch/arm/mach-ux500/devices.c
+++ b/arch/arm/mach-ux500/devices.c
@@ -45,6 +45,7 @@
#include <mach/uart.h>
#include <mach/setup.h>
#include <mach/kpd.h>
+#include <linux/hwmem.h>
void __init u8500_register_device(struct platform_device *dev, void *data)
{
@@ -327,6 +328,36 @@ struct platform_device ux500_b2r2_device = {
.resource = b2r2_resources,
};
+static struct hwmem_platform_data hwmem_pdata = {
+ .start = 0,
+ .size = 0,
+};
+
+static int __init early_hwmem(char *p)
+{
+ hwmem_pdata.size = memparse(p, &p);
+
+ if (*p != '@')
+ goto no_at;
+
+ hwmem_pdata.start = memparse(p + 1, &p);
+
+ return 0;
+
+no_at:
+ hwmem_pdata.size = 0;
+
+ return -EINVAL;
+}
+early_param("hwmem", early_hwmem);
+
+struct platform_device ux500_hwmem_device = {
+ .name = "hwmem",
+ .dev = {
+ .platform_data = &hwmem_pdata,
+ },
+};
+
#ifdef CONFIG_ANDROID_PMEM
static int __init early_pmem_generic_parse(char *p, struct android_pmem_platform_data * data)
{
diff --git a/arch/arm/mach-ux500/include/mach/devices.h b/arch/arm/mach-ux500/include/mach/devices.h
index d334ef897c4..0d16c7f1a29 100644
--- a/arch/arm/mach-ux500/include/mach/devices.h
+++ b/arch/arm/mach-ux500/include/mach/devices.h
@@ -32,6 +32,7 @@ extern struct platform_device u8500_hsit_device;
extern struct platform_device u8500_hsir_device;
extern struct platform_device u8500_shrm_device;
extern struct platform_device ux500_b2r2_device;
+extern struct platform_device ux500_hwmem_device;
extern struct platform_device u8500_pmem_device;
extern struct platform_device u8500_pmem_mio_device;
extern struct platform_device u8500_pmem_hwb_device;
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 7b0f51f11eb..5deb058567e 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -393,6 +393,15 @@ config STE_IRRC
bool "STEricsson Infra Red Remote Control Driver"
default n
+config HWMEM
+ bool "Hardware memory driver"
+ default n
+ help
+ This driver provides a way to allocate contiguous system memory which
+ can be used by hardware. It also enables accessing hwmem allocated
+ memory buffers through a secure id which can be shared across processes.
+
+
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 8c0bbe0323c..2db9cda9f6e 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -42,3 +42,4 @@ obj-$(CONFIG_U8500_SHRM) += shrm/
obj-$(CONFIG_STE_IRRC) += irrc.o
obj-$(CONFIG_AB8500_DENC) += ab8500_denc/
obj-$(CONFIG_STE_AUDIO_IO_DEV) += audio_io_dev/
+obj-$(CONFIG_HWMEM) += hwmem/
diff --git a/drivers/misc/hwmem/Makefile b/drivers/misc/hwmem/Makefile
new file mode 100644
index 00000000000..a248fa33b4e
--- /dev/null
+++ b/drivers/misc/hwmem/Makefile
@@ -0,0 +1,3 @@
+hwmem-objs := hwmem-main.o hwmem-ioctl.o
+
+obj-$(CONFIG_HWMEM) += hwmem.o
diff --git a/drivers/misc/hwmem/hwmem-ioctl.c b/drivers/misc/hwmem/hwmem-ioctl.c
new file mode 100644
index 00000000000..28e92cfd8d5
--- /dev/null
+++ b/drivers/misc/hwmem/hwmem-ioctl.c
@@ -0,0 +1,431 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ *
+ * Hardware memory driver, hwmem
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/idr.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/mm_types.h>
+#include <linux/hwmem.h>
+
+/*
+ * TODO:
+ * Make sure ids can double as mmap offsets. Check how the kernel handles
+ * offsets and make sure our ids fullfill all the requirements.
+ *
+ * Count pin unpin at this level to ensure applications can't interfer
+ * with each other.
+ */
+
+struct hwmem_file {
+ struct mutex lock;
+ struct idr idr; /* id -> struct hwmem_alloc*, ref counted */
+ struct hwmem_alloc *fd_alloc; /* Ref counted */
+};
+
+static int create_id(struct hwmem_file *hwfile, struct hwmem_alloc *alloc)
+{
+ int id, ret;
+
+ while (true) {
+ if (idr_pre_get(&hwfile->idr, GFP_KERNEL) == 0) {
+ return -ENOMEM;
+ }
+
+ ret = idr_get_new_above(&hwfile->idr, alloc, 1, &id);
+ if (ret == 0)
+ break;
+ else if (ret != -EAGAIN)
+ return -ENOMEM;
+ }
+
+ return (id << PAGE_SHIFT); /* TODO: Probably not OK but works for now. */
+}
+
+static void remove_id(struct hwmem_file *hwfile, int id)
+{
+ idr_remove(&hwfile->idr, id >> PAGE_SHIFT);
+}
+
+static struct hwmem_alloc *resolve_id(struct hwmem_file *hwfile, int id)
+{
+ struct hwmem_alloc *alloc;
+
+ alloc = id ? idr_find(&hwfile->idr, id >> PAGE_SHIFT) : hwfile->fd_alloc;
+ if (alloc == NULL)
+ alloc = ERR_PTR(-EINVAL);
+
+ return alloc;
+}
+
+static int alloc(struct hwmem_file *hwfile, struct hwmem_alloc_request *req)
+{
+ int ret;
+ struct hwmem_alloc *alloc;
+
+ alloc = hwmem_alloc(req->size, req->flags, req->default_access,
+ req->mem_type);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ ret = create_id(hwfile, alloc);
+ if (ret < 0)
+ hwmem_release(alloc);
+
+ return ret;
+}
+
+static int alloc_fd(struct hwmem_file *hwfile, struct hwmem_alloc_request *req)
+{
+ struct hwmem_alloc *alloc;
+
+ if (hwfile->fd_alloc)
+ return -EBUSY;
+
+ alloc = hwmem_alloc(req->size, req->flags, req->default_access,
+ req->mem_type);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ hwfile->fd_alloc = alloc;
+
+ return 0;
+}
+
+static int release(struct hwmem_file *hwfile, s32 id)
+{
+ struct hwmem_alloc *alloc;
+
+ alloc = resolve_id(hwfile, id);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ remove_id(hwfile, id);
+ hwmem_release(alloc);
+
+ return 0;
+}
+
+static int hwmem_ioctl_set_domain(struct hwmem_file *hwfile,
+ struct hwmem_set_domain_request *req)
+{
+ struct hwmem_alloc *alloc;
+
+ alloc = resolve_id(hwfile, req->id);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ return hwmem_set_domain(alloc, req->access, req->domain, &req->region);
+}
+
+static int pin(struct hwmem_file *hwfile, struct hwmem_pin_request *req)
+{
+ struct hwmem_alloc *alloc;
+
+ alloc = resolve_id(hwfile, req->id);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ return hwmem_pin(alloc, &req->phys_addr, req->scattered_addrs);
+}
+
+static int unpin(struct hwmem_file *hwfile, s32 id)
+{
+ struct hwmem_alloc *alloc;
+
+ alloc = resolve_id(hwfile, id);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ hwmem_unpin(alloc);
+
+ return 0;
+}
+
+static int set_access(struct hwmem_file *hwfile,
+ struct hwmem_set_access_request *req)
+{
+ struct hwmem_alloc *alloc;
+
+ alloc = resolve_id(hwfile, req->id);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ return hwmem_set_access(alloc, req->access, req->pid);
+}
+
+static int get_info(struct hwmem_file *hwfile,
+ struct hwmem_get_info_request *req)
+{
+ struct hwmem_alloc *alloc;
+
+ alloc = resolve_id(hwfile, req->id);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ hwmem_get_info(alloc, &req->size, &req->mem_type, &req->access);
+
+ return 0;
+}
+
+static int export(struct hwmem_file *hwfile, s32 id)
+{
+ struct hwmem_alloc *alloc;
+
+ alloc = resolve_id(hwfile, id);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ /*
+ * TODO: The user could be about to send the buffer to a driver but
+ * there is a chance the current thread group don't have import rights
+ * if it gained access to the buffer via a inter-process fd transfer
+ * (fork, Android binder), if this is the case the driver will not be
+ * able to resolve the buffer name. To avoid this situation we give the
+ * current thread group import rights. This will not breach the
+ * security as the process already has access to the buffer (otherwise
+ * it would not be able to get here). This is not a problem right now
+ * as access control is not yet implemented.
+ */
+
+ return hwmem_get_name(alloc);
+}
+
+static int import(struct hwmem_file *hwfile, struct hwmem_import_request *req)
+{
+ int ret;
+ struct hwmem_alloc *alloc;
+
+ alloc = hwmem_resolve_by_name(req->name, &req->size, &req->mem_type,
+ &req->access);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ ret = create_id(hwfile, alloc);
+ if (ret < 0)
+ hwmem_release(alloc);
+
+ return ret;
+}
+
+static int import_fd(struct hwmem_file *hwfile, struct hwmem_import_request *req)
+{
+ struct hwmem_alloc *alloc;
+
+ if (hwfile->fd_alloc)
+ return -EBUSY;
+
+ alloc = hwmem_resolve_by_name(req->name, &req->size, &req->mem_type,
+ &req->access);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ hwfile->fd_alloc = alloc;
+
+ return 0;
+}
+
+static int hwmem_open(struct inode *inode, struct file *file)
+{
+ struct hwmem_file *hwfile;
+
+ hwfile = kzalloc(sizeof(struct hwmem_file), GFP_KERNEL);
+ if (hwfile == NULL)
+ return -ENOMEM;
+
+ idr_init(&hwfile->idr);
+ mutex_init(&hwfile->lock);
+ file->private_data = hwfile;
+
+ return 0;
+}
+
+static int hwmem_ioctl_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct hwmem_file *hwfile = (struct hwmem_file*)file->private_data;
+ struct hwmem_alloc *alloc;
+
+ alloc = resolve_id(hwfile, vma->vm_pgoff << PAGE_SHIFT);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ return hwmem_mmap(alloc, vma);
+}
+
+static int hwmem_release_idr_for_each_wrapper(int id, void* ptr, void* data)
+{
+ hwmem_release((struct hwmem_alloc*)ptr);
+
+ return 0;
+}
+
+int hwmem_release_fop(struct inode *inode, struct file *file)
+{
+ struct hwmem_file *hwfile = (struct hwmem_file*)file->private_data;
+
+ idr_for_each(&hwfile->idr, hwmem_release_idr_for_each_wrapper, NULL);
+ idr_destroy(&hwfile->idr);
+
+ if (hwfile->fd_alloc)
+ hwmem_release(hwfile->fd_alloc);
+
+ mutex_destroy(&hwfile->lock);
+
+ kfree(hwfile);
+
+ return 0;
+}
+
+long hwmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int ret = -ENOSYS;
+ struct hwmem_file *hwfile = (struct hwmem_file *)file->private_data;
+
+ mutex_lock(&hwfile->lock);
+
+ switch (cmd) {
+ case HWMEM_ALLOC_IOC:
+ {
+ struct hwmem_alloc_request req;
+ if (copy_from_user(&req, (void __user *)arg,
+ sizeof(struct hwmem_alloc_request)))
+ ret = -EFAULT;
+ else
+ ret = alloc(hwfile, &req);
+ }
+ break;
+ case HWMEM_ALLOC_FD_IOC:
+ {
+ struct hwmem_alloc_request req;
+ if (copy_from_user(&req, (void __user *)arg,
+ sizeof(struct hwmem_alloc_request)))
+ ret = -EFAULT;
+ else
+ ret = alloc_fd(hwfile, &req);
+ }
+ break;
+ case HWMEM_RELEASE_IOC:
+ ret = release(hwfile, (s32)arg);
+ break;
+ case HWMEM_SET_DOMAIN_IOC:
+ {
+ struct hwmem_set_domain_request req;
+ if (copy_from_user(&req, (void __user *)arg,
+ sizeof(struct hwmem_set_domain_request)))
+ ret = -EFAULT;
+ else
+ ret = hwmem_ioctl_set_domain(hwfile, &req);
+ }
+ break;
+ case HWMEM_PIN_IOC:
+ {
+ struct hwmem_pin_request req;
+ /*
+ * TODO: Validate and copy scattered_addrs. Not a
+ * problem right now as it's never used.
+ */
+ if (copy_from_user(&req, (void __user *)arg,
+ sizeof(struct hwmem_pin_request)))
+ ret = -EFAULT;
+ else
+ ret = pin(hwfile, &req);
+ if (ret == 0 && copy_to_user((void __user *)arg, &req,
+ sizeof(struct hwmem_pin_request)))
+ ret = -EFAULT;
+ }
+ break;
+ case HWMEM_UNPIN_IOC:
+ ret = unpin(hwfile, (s32)arg);
+ break;
+ case HWMEM_SET_ACCESS_IOC:
+ {
+ struct hwmem_set_access_request req;
+ if (copy_from_user(&req, (void __user *)arg,
+ sizeof(struct hwmem_set_access_request)))
+ ret = -EFAULT;
+ else
+ ret = set_access(hwfile, &req);
+ }
+ break;
+ case HWMEM_GET_INFO_IOC:
+ {
+ struct hwmem_get_info_request req;
+ if (copy_from_user(&req, (void __user *)arg,
+ sizeof(struct hwmem_get_info_request)))
+ ret = -EFAULT;
+ else
+ ret = get_info(hwfile, &req);
+ if (ret == 0 && copy_to_user((void __user *)arg, &req,
+ sizeof(struct hwmem_get_info_request)))
+ ret = -EFAULT;
+ }
+ break;
+ case HWMEM_EXPORT_IOC:
+ ret = export(hwfile, (s32)arg);
+ break;
+ case HWMEM_IMPORT_IOC:
+ {
+ struct hwmem_import_request req;
+ if (copy_from_user(&req, (void __user *)arg,
+ sizeof(struct hwmem_import_request)))
+ ret = -EFAULT;
+ else
+ ret = import(hwfile, &req);
+ if (ret >= 0 && copy_to_user((void __user *)arg, &req,
+ sizeof(struct hwmem_import_request)))
+ ret = -EFAULT;
+ }
+ break;
+ case HWMEM_IMPORT_FD_IOC:
+ {
+ struct hwmem_import_request req;
+ if (copy_from_user(&req, (void __user *)arg,
+ sizeof(struct hwmem_import_request)))
+ ret = -EFAULT;
+ else
+ ret = import_fd(hwfile, &req);
+ if (ret == 0 && copy_to_user((void __user *)arg, &req,
+ sizeof(struct hwmem_import_request)))
+ ret = -EFAULT;
+ }
+ break;
+ }
+
+ mutex_unlock(&hwfile->lock);
+
+ return ret;
+}
+
+static struct file_operations hwmem_fops = {
+ .open = hwmem_open,
+ .mmap = hwmem_ioctl_mmap,
+ .unlocked_ioctl = hwmem_ioctl,
+ .release = hwmem_release_fop,
+};
+
+static struct miscdevice hwmem_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "hwmem",
+ .fops = &hwmem_fops,
+};
+
+int __init hwmem_ioctl_init(void)
+{
+ return misc_register(&hwmem_device);
+}
+
+void __exit hwmem_ioctl_exit(void)
+{
+ misc_deregister(&hwmem_device);
+}
diff --git a/drivers/misc/hwmem/hwmem-main.c b/drivers/misc/hwmem/hwmem-main.c
new file mode 100644
index 00000000000..303b0bcf2ec
--- /dev/null
+++ b/drivers/misc/hwmem/hwmem-main.c
@@ -0,0 +1,493 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ *
+ * Hardware memory driver, hwmem
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/idr.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/pid.h>
+#include <linux/list.h>
+#include <linux/hwmem.h>
+
+/*
+ * TODO:
+ * Investigate startup and shutdown, what requirements are there and do we
+ * fulfill them?
+ */
+
+struct alloc_threadg_info {
+ struct list_head list;
+ struct pid *threadg_pid; /* Ref counted */
+};
+
+struct hwmem_alloc {
+ struct list_head list;
+ atomic_t ref_cnt;
+ enum hwmem_alloc_flags flags;
+ u32 start;
+ u32 size;
+ u32 name;
+ struct list_head threadg_info_list;
+};
+
+static struct platform_device *hwdev;
+
+static u32 hwmem_start = 0;
+static u32 hwmem_size = 0;
+
+static LIST_HEAD(alloc_list);
+static DEFINE_IDR(global_idr);
+static DEFINE_MUTEX(lock);
+
+static void vm_open(struct vm_area_struct *vma);
+static void vm_close(struct vm_area_struct *vma);
+static struct vm_operations_struct vm_ops = {
+ .open = vm_open,
+ .close = vm_close,
+};
+
+/* Helpers */
+
+static void destroy_alloc_threadg_info(
+ struct alloc_threadg_info *alloc_threadg_info)
+{
+ kfree(alloc_threadg_info);
+}
+
+static void clean_alloc_threadg_info_list(struct hwmem_alloc *alloc)
+{
+ while (list_empty(&alloc->threadg_info_list) == 0) {
+ struct alloc_threadg_info *i = list_first_entry(
+ &alloc->threadg_info_list,
+ struct alloc_threadg_info, list);
+
+ list_del(&i->list);
+
+ destroy_alloc_threadg_info(i);
+ }
+}
+
+static void clean_alloc(struct hwmem_alloc *alloc)
+{
+ if (alloc->name) {
+ idr_remove(&global_idr, alloc->name);
+ alloc->name = 0;
+ }
+
+ clean_alloc_threadg_info_list(alloc);
+}
+
+static void destroy_alloc(struct hwmem_alloc *alloc)
+{
+ clean_alloc(alloc);
+
+ kfree(alloc);
+}
+
+static void __hwmem_release(struct hwmem_alloc *alloc)
+{
+ struct hwmem_alloc *other;
+
+ clean_alloc(alloc);
+
+ other = list_entry(alloc->list.prev, struct hwmem_alloc, list);
+ if (alloc->list.prev != &alloc_list && atomic_read(&other->ref_cnt) == 0) {
+ other->size += alloc->size;
+ list_del(&alloc->list);
+ destroy_alloc(alloc);
+ alloc = other;
+ }
+ other = list_entry(alloc->list.next, struct hwmem_alloc, list);
+ if (alloc->list.next != &alloc_list && atomic_read(&other->ref_cnt) == 0) {
+ alloc->size += other->size;
+ list_del(&other->list);
+ destroy_alloc(other);
+ }
+}
+
+static struct hwmem_alloc *find_free_alloc_bestfit(u32 size)
+{
+ u32 best_diff = ~0;
+ struct hwmem_alloc *alloc = NULL, *i;
+
+ list_for_each_entry(i, &alloc_list, list)
+ {
+ u32 diff = i->size - size;
+ if (atomic_read(&i->ref_cnt) > 0 || i->size < size)
+ continue;
+ if (diff < best_diff) {
+ alloc = i;
+ best_diff = diff;
+ }
+ }
+
+ return alloc != NULL ? alloc : ERR_PTR(-ENOMEM);
+}
+
+static struct hwmem_alloc *split_allocation(struct hwmem_alloc *alloc,
+ u32 new_alloc_size)
+{
+ struct hwmem_alloc *new_alloc;
+
+ new_alloc = kzalloc(sizeof(struct hwmem_alloc), GFP_KERNEL);
+ if (new_alloc == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ atomic_inc(&new_alloc->ref_cnt);
+ INIT_LIST_HEAD(&new_alloc->threadg_info_list);
+ new_alloc->start = alloc->start;
+ new_alloc->size = new_alloc_size;
+ alloc->size -= new_alloc_size;
+ alloc->start += new_alloc_size;
+
+ list_add_tail(&new_alloc->list, &alloc->list);
+
+ return new_alloc;
+}
+
+static int init_alloc_list(void)
+{
+ struct hwmem_alloc *first_alloc;
+
+ first_alloc = kzalloc(sizeof(struct hwmem_alloc), GFP_KERNEL);
+ if (first_alloc == NULL)
+ return -ENOMEM;
+
+ first_alloc->start = hwmem_start;
+ first_alloc->size = hwmem_size;
+ INIT_LIST_HEAD(&first_alloc->threadg_info_list);
+
+ list_add_tail(&first_alloc->list, &alloc_list);
+
+ return 0;
+}
+
+static void clean_alloc_list(void)
+{
+ while (list_empty(&alloc_list) == 0) {
+ struct hwmem_alloc *i = list_first_entry(&alloc_list,
+ struct hwmem_alloc, list);
+
+ list_del(&i->list);
+
+ destroy_alloc(i);
+ }
+}
+
+/* HWMEM API */
+
+struct hwmem_alloc *hwmem_alloc(u32 size, enum hwmem_alloc_flags flags,
+ enum hwmem_access def_access, enum hwmem_mem_type mem_type)
+{
+ struct hwmem_alloc *alloc;
+
+ mutex_lock(&lock);
+
+ size = PAGE_ALIGN(size);
+
+ alloc = find_free_alloc_bestfit(size);
+ if (IS_ERR(alloc)) {
+ dev_info(&hwdev->dev, "Allocation failed, no free slot\n");
+ goto no_slot;
+ }
+
+ if (size < alloc->size) {
+ alloc = split_allocation(alloc, size);
+ if (IS_ERR(alloc))
+ goto split_alloc_failed;
+ }
+ else
+ atomic_inc(&alloc->ref_cnt);
+
+ alloc->flags = flags;
+
+ goto out;
+
+split_alloc_failed:
+no_slot:
+out:
+ mutex_unlock(&lock);
+
+ return alloc;
+}
+EXPORT_SYMBOL(hwmem_alloc);
+
+void hwmem_release(struct hwmem_alloc *alloc)
+{
+ mutex_lock(&lock);
+
+ if (atomic_dec_and_test(&alloc->ref_cnt))
+ __hwmem_release(alloc);
+
+ mutex_unlock(&lock);
+}
+EXPORT_SYMBOL(hwmem_release);
+
+int hwmem_set_domain(struct hwmem_alloc *alloc, enum hwmem_access access,
+ enum hwmem_domain domain, struct hwmem_region *region)
+{
+ mutex_lock(&lock);
+
+ if (domain == HWMEM_DOMAIN_SYNC)
+ /*
+ * TODO: Here we want to drain write buffers and wait for
+ * completion but there is no linux function that does that.
+ * On ARMv7 wmb() is implemented in a way that fullfills our
+ * requirements so this should be fine for now.
+ */
+ wmb();
+
+ mutex_unlock(&lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(hwmem_set_domain);
+
+int hwmem_pin(struct hwmem_alloc *alloc, uint32_t *phys_addr,
+ uint32_t *scattered_phys_addrs)
+{
+ mutex_lock(&lock);
+
+ *phys_addr = alloc->start;
+
+ mutex_unlock(&lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(hwmem_pin);
+
+void hwmem_unpin(struct hwmem_alloc *alloc)
+{
+}
+EXPORT_SYMBOL(hwmem_unpin);
+
+static void vm_open(struct vm_area_struct *vma)
+{
+ atomic_inc(&((struct hwmem_alloc *)vma->vm_private_data)->ref_cnt);
+}
+
+static void vm_close(struct vm_area_struct *vma)
+{
+ hwmem_release((struct hwmem_alloc *)vma->vm_private_data);
+}
+
+int hwmem_mmap(struct hwmem_alloc *alloc, struct vm_area_struct *vma)
+{
+ int ret = 0;
+ unsigned long vma_size = vma->vm_end - vma->vm_start;
+
+ mutex_lock(&lock);
+
+ if (vma_size > (unsigned long)alloc->size) {
+ ret = -EINVAL;
+ goto illegal_size;
+ }
+
+ vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTEXPAND;
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ vma->vm_private_data = (void *)alloc;
+ atomic_inc(&alloc->ref_cnt);
+ vma->vm_ops = &vm_ops;
+
+ ret = remap_pfn_range(vma, vma->vm_start, alloc->start >> PAGE_SHIFT,
+ min(vma_size, (unsigned long)alloc->size), vma->vm_page_prot);
+ if (ret < 0)
+ goto map_failed;
+
+ goto out;
+
+map_failed:
+ atomic_dec(&alloc->ref_cnt);
+illegal_size:
+out:
+ mutex_unlock(&lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(hwmem_mmap);
+
+int hwmem_set_access(struct hwmem_alloc *alloc, enum hwmem_access access,
+ pid_t pid)
+{
+ return 0;
+}
+EXPORT_SYMBOL(hwmem_set_access);
+
+void hwmem_get_info(struct hwmem_alloc *alloc, uint32_t *size,
+ enum hwmem_mem_type *mem_type, enum hwmem_access *access)
+{
+ mutex_lock(&lock);
+
+ *size = alloc->size;
+ *mem_type = HWMEM_MEM_CONTIGUOUS_SYS;
+ *access = HWMEM_ACCESS_READ | HWMEM_ACCESS_WRITE | HWMEM_ACCESS_IMPORT;
+
+ mutex_unlock(&lock);
+}
+EXPORT_SYMBOL(hwmem_get_info);
+
+int hwmem_get_name(struct hwmem_alloc *alloc)
+{
+ int ret = 0, name;
+
+ mutex_lock(&lock);
+
+ if (alloc->name != 0) {
+ ret = alloc->name;
+ goto out;
+ }
+
+ while (true) {
+ if (idr_pre_get(&global_idr, GFP_KERNEL) == 0) {
+ ret = -ENOMEM;
+ goto pre_get_id_failed;
+ }
+
+ ret = idr_get_new_above(&global_idr, alloc, 1, &name);
+ if (ret == 0)
+ break;
+ else if (ret != -EAGAIN)
+ goto get_id_failed;
+ }
+
+ alloc->name = name;
+
+ ret = name;
+ goto out;
+
+get_id_failed:
+pre_get_id_failed:
+
+out:
+ mutex_unlock(&lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(hwmem_get_name);
+
+struct hwmem_alloc *hwmem_resolve_by_name(s32 name, u32 *size,
+ enum hwmem_mem_type *mem_type, enum hwmem_access *access)
+{
+ struct hwmem_alloc *alloc;
+
+ mutex_lock(&lock);
+
+ alloc = idr_find(&global_idr, name);
+ if (alloc == NULL) {
+ alloc = ERR_PTR(-EINVAL);
+ goto find_failed;
+ }
+ atomic_inc(&alloc->ref_cnt);
+ *size = alloc->size;
+ *mem_type = HWMEM_MEM_CONTIGUOUS_SYS;
+ *access = HWMEM_ACCESS_READ | HWMEM_ACCESS_WRITE | HWMEM_ACCESS_IMPORT;
+
+ goto out;
+
+find_failed:
+
+out:
+ mutex_unlock(&lock);
+
+ return alloc;
+}
+EXPORT_SYMBOL(hwmem_resolve_by_name);
+
+/* Module */
+
+extern int hwmem_ioctl_init(void);
+extern void hwmem_ioctl_exit(void);
+
+static int __devinit hwmem_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct hwmem_platform_data *platform_data = pdev->dev.platform_data;
+
+ if (hwdev || platform_data->size == 0) {
+ dev_info(&pdev->dev, "hwdev || platform_data->size == 0\n");
+ return -EINVAL;
+ }
+
+ hwdev = pdev;
+ hwmem_start = platform_data->start;
+ hwmem_size = platform_data->size;
+
+ ret = init_alloc_list();
+ if (ret < 0)
+ goto init_alloc_list_failed;
+
+ ret = hwmem_ioctl_init();
+ if (ret)
+ goto ioctl_init_failed;
+
+ dev_info(&pdev->dev, "Hwmem probed, device contains %#x bytes\n", hwmem_size);
+
+ goto out;
+
+ioctl_init_failed:
+ clean_alloc_list();
+init_alloc_list_failed:
+ hwdev = NULL;
+out:
+ return ret;
+}
+
+static int __devexit hwmem_remove(struct platform_device *pdev)
+{
+ /*
+ * TODO: This should never happen but if it does we risk crashing the system.
+ * After this call I assume pdev ie hwdev is invalid and must not be used but
+ * we will nonetheless use it when printing in the log.
+ */
+ printk(KERN_ERR "Hwmem device removed. Hwmem driver can not yet handle this.\n Any usage of hwmem beyond this point may cause the system to crash.\n");
+
+ return 0;
+}
+
+static struct platform_driver hwmem_driver = {
+ .probe = hwmem_probe,
+ .remove = hwmem_remove,
+ .driver = {
+ .name = "hwmem",
+ },
+};
+
+static int __init hwmem_init(void)
+{
+ return platform_driver_register(&hwmem_driver);
+}
+subsys_initcall(hwmem_init);
+
+static void __exit hwmem_exit(void)
+{
+ hwmem_ioctl_exit();
+
+ platform_driver_unregister(&hwmem_driver);
+
+ /*
+ * TODO: Release allocated resources! Not a big issue right now as
+ * this code is always built into the kernel and thus this function
+ * is never called.
+ */
+}
+module_exit(hwmem_exit);
+
+MODULE_AUTHOR("Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Hardware memory driver");
+
diff --git a/drivers/video/mcde/mcde_fb.c b/drivers/video/mcde/mcde_fb.c
index b5515b85a18..e103085aee1 100644
--- a/drivers/video/mcde/mcde_fb.c
+++ b/drivers/video/mcde/mcde_fb.c
@@ -16,6 +16,9 @@
#include <linux/mm.h>
#include <linux/dma-mapping.h>
+#include <linux/hwmem.h>
+#include <linux/io.h>
+
#include <video/mcde_fb.h>
#define MCDE_FB_BPP_MAX 16
@@ -189,15 +192,32 @@ static int reallocate_fb_mem(struct fb_info *fbi, u32 size)
/* TODO: hwmem */
#ifdef CONFIG_MCDE_FB_AVOID_REALLOC
if (!fbi->screen_base) {
+ struct mcde_fb *mfb = to_mcde_fb(fbi);
+ struct hwmem_alloc *alloc;
+ uint32_t phys_addr;
+ int name;
size_max = MCDE_FB_BPP_MAX / 8 * MCDE_FB_VXRES_MAX *
MCDE_FB_VYRES_MAX;
- vaddr = dma_alloc_coherent(fbi->dev, size_max, &paddr,
- GFP_KERNEL|GFP_DMA);
- if (!vaddr)
- return -ENOMEM;
+ alloc = hwmem_alloc(size_max, HWMEM_ALLOC_BUFFERED,
+ (HWMEM_ACCESS_READ | HWMEM_ACCESS_WRITE | HWMEM_ACCESS_IMPORT),
+ HWMEM_MEM_CONTIGUOUS_SYS);
+
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+ name = hwmem_get_name(alloc);
+ if (name < 0) {
+ hwmem_release(alloc);
+ return name;
+ }
+
+ (void)hwmem_pin(alloc, &phys_addr, NULL);
+ paddr = phys_addr;
+ vaddr = ioremap(phys_addr, size_max);
fbi->screen_base = vaddr;
fbi->fix.smem_start = paddr;
+ mfb->alloc = alloc;
+ mfb->alloc_name = name;
}
#else
vaddr = dma_alloc_coherent(fbi->dev, size, &paddr, GFP_KERNEL|GFP_DMA);
@@ -510,6 +530,17 @@ static void mcde_fb_rotate(struct fb_info *fbi, int rotate)
dev_vdbg(fbi->dev, "%s\n", __func__);
}
+static int mcde_fb_ioctl(struct fb_info *fbi, unsigned int cmd,
+ unsigned long arg)
+{
+ struct mcde_fb *mfb = to_mcde_fb(fbi);
+
+ if (cmd == MCDE_GET_BUFFER_NAME_IOC)
+ return mfb->alloc_name;
+
+ return -EINVAL;
+}
+
static struct fb_ops fb_ops = {
/* creg, cmap */
.owner = THIS_MODULE,
@@ -525,6 +556,7 @@ static struct fb_ops fb_ops = {
.fb_blank = mcde_fb_blank,
.fb_pan_display = mcde_fb_pan_display,
.fb_rotate = mcde_fb_rotate,
+ .fb_ioctl = mcde_fb_ioctl,
};
/* FB driver */
diff --git a/include/linux/hwmem.h b/include/linux/hwmem.h
new file mode 100644
index 00000000000..2fe63f6fd05
--- /dev/null
+++ b/include/linux/hwmem.h
@@ -0,0 +1,528 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ *
+ * ST-Ericsson HW memory driver
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef _HWMEM_H_
+#define _HWMEM_H_
+
+#if !defined(__KERNEL__) && !defined(_KERNEL)
+#include <stdint.h>
+#include <sys/types.h>
+#else
+#include <linux/types.h>
+#endif
+
+#define HWMEM_DEFAULT_DEVICE_NAME "hwmem"
+
+/**
+ * @brief Flags defining behavior of allocation
+ */
+enum hwmem_alloc_flags {
+ /**
+ * @brief Buffer will not be cached and not buffered
+ */
+ HWMEM_ALLOC_UNCACHED = (0 << 0),
+ /**
+ * @brief Buffer will be buffered, but not cached
+ */
+ HWMEM_ALLOC_BUFFERED = (1 << 0),
+ /**
+ * @brief Buffer will be cached and buffered, use cache hints to be
+ * more specific
+ */
+ HWMEM_ALLOC_CACHED = (3 << 0),
+ /**
+ * @brief Buffer should be cached write-back in both level 1 and 2 cache
+ */
+ HWMEM_ALLOC_CACHE_HINT_WB = (1 << 2),
+ /**
+ * @brief Buffer should be cached write-through in both level 1 and
+ * 2 cache
+ */
+ HWMEM_ALLOC_CACHE_HINT_WT = (2 << 2),
+ /**
+ * @brief Buffer should be cached write-back in level 1 cache
+ */
+ HWMEM_ALLOC_CACHE_HINT_WB_INNER = (3 << 2),
+ /**
+ * @brief Buffer should be cached write-through in level 1 cache
+ */
+ HWMEM_ALLOC_CACHE_HINT_WT_INNER = (4 << 2),
+ HWMEM_ALLOC_CACHE_HINT_MASK = 0x1C,
+};
+
+/**
+ * @brief Flags defining buffer access mode.
+ */
+enum hwmem_access {
+ /**
+ * @brief Buffer will be read from.
+ */
+ HWMEM_ACCESS_READ = (1 << 0),
+ /**
+ * @brief Buffer will be written to.
+ */
+ HWMEM_ACCESS_WRITE = (1 << 1),
+ /**
+ * @brief Buffer will be imported.
+ */
+ HWMEM_ACCESS_IMPORT = (1 << 2),
+};
+
+/**
+ * @brief Flags defining memory type.
+ */
+enum hwmem_mem_type {
+ /**
+ * @brief Scattered system memory. Currently not supported!
+ */
+ HWMEM_MEM_SCATTERED_SYS = (1 << 0),
+ /**
+ * @brief Contiguous system memory.
+ */
+ HWMEM_MEM_CONTIGUOUS_SYS = (1 << 1),
+};
+
+/**
+ * @brief Values defining memory domain.
+ */
+enum hwmem_domain {
+ /**
+ * @brief This value specifies the neutral memory domain. Setting this
+ * domain will syncronize all supported memory domains (currently CPU).
+ */
+ HWMEM_DOMAIN_SYNC = 0,
+ /**
+ * @brief This value specifies the CPU memory domain.
+ */
+ HWMEM_DOMAIN_CPU = 1,
+};
+
+/**
+ * @brief Structure defining a region of a memory buffer.
+ *
+ * A buffer is defined to contain a number of equally sized blocks. Each block
+ * has a part of it included in the region [<start>-<end>). That is
+ * <end>-<start> bytes. Each block is <size> bytes long. Total number of bytes
+ * in the region is (<end> - <start>) * <count>. First byte of the region is
+ * <skip> * <size> + <start> bytes into the buffer.
+ *
+ * Here's an example of a region in a graphics buffer (X = buffer, R = region):
+ *
+ * XXXXXXXXXXXXXXXXXXXX \
+ * XXXXXXXXXXXXXXXXXXXX |-- skip = 3
+ * XXXXXXXXXXXXXXXXXXXX /
+ * XXRRRRRRRRXXXXXXXXXX \
+ * XXRRRRRRRRXXXXXXXXXX |-- count = 4
+ * XXRRRRRRRRXXXXXXXXXX |
+ * XXRRRRRRRRXXXXXXXXXX /
+ * XXXXXXXXXXXXXXXXXXXX
+ * --| start = 2
+ * ----------| end = 10
+ * --------------------| size = 20
+ */
+struct hwmem_region {
+ /**
+ * @brief Indicates that region starts skip * size bytes from beginning
+ * of buffer.
+ */
+ uint32_t skip;
+ /**
+ * @brief The number of blocks included in this region.
+ */
+ uint32_t count;
+ /**
+ * @brief The index of the first byte included in this block.
+ */
+ uint32_t start;
+ /**
+ * @brief The index of the last byte included in this block plus one.
+ */
+ uint32_t end;
+ /**
+ * @brief The size in bytes of each block.
+ */
+ uint32_t size;
+};
+
+/* User space API */
+
+/**
+ * @brief Alloc request data.
+ */
+struct hwmem_alloc_request {
+ /**
+ * @brief [in] Size of requested allocation in bytes. Size will be
+ * aligned to PAGE_SIZE bytes.
+ */
+ uint32_t size;
+ /**
+ * @brief [in] Flags describing requested allocation options.
+ */
+ uint32_t flags; /* enum hwmem_alloc_flags */
+ /**
+ * @brief [in] Default access rights for buffer.
+ */
+ uint32_t default_access; /* enum hwmem_access */
+ /**
+ * @brief [in] Memory type of the buffer.
+ */
+ uint32_t mem_type; /* enum hwmem_mem_type */
+};
+
+/**
+ * @brief Set domain request data.
+ */
+struct hwmem_set_domain_request {
+ /**
+ * @brief [in] Identifier of buffer to be prepared. If 0 is specified
+ * the buffer associated with the current file instance will be used.
+ */
+ int32_t id;
+ /**
+ * @brief [in] Value specifying the new memory domain.
+ */
+ uint32_t domain; /* enum hwmem_domain */
+ /**
+ * @brief [in] Flags specifying access mode of the operation.
+ *
+ * One of HWMEM_ACCESS_READ and HWMEM_ACCESS_WRITE is required.
+ * For details, @see enum hwmem_access.
+ */
+ uint32_t access; /* enum hwmem_access */
+ /**
+ * @brief [in] The region of bytes to be prepared.
+ *
+ * For details, @see struct hwmem_region.
+ */
+ struct hwmem_region region;
+};
+
+/**
+ * @brief Pin request data.
+ */
+struct hwmem_pin_request {
+ /**
+ * @brief [in] Identifier of buffer to be pinned. If 0 is specified,
+ * the buffer associated with the current file instance will be used.
+ */
+ int32_t id;
+ /**
+ * @brief [out] Physical address of first word in buffer.
+ */
+ uint32_t phys_addr;
+ /**
+ * @brief [in] Pointer to buffer for physical addresses of pinned
+ * scattered buffer. Buffer must be (buffer_size / page_size) *
+ * sizeof(uint32_t) bytes.
+ * This field can be NULL for physically contiguos buffers.
+ */
+ uint32_t* scattered_addrs;
+};
+
+/**
+ * @brief Set access rights request data.
+ */
+struct hwmem_set_access_request {
+ /**
+ * @brief [in] Identifier of buffer to be pinned. If 0 is specified,
+ * the buffer associated with the current file instance will be used.
+ */
+ int32_t id;
+ /**
+ * @param access Access value indicating what is allowed.
+ */
+ uint32_t access; /* enum hwmem_access */
+ /**
+ * @param pid Process ID to set rights for.
+ */
+ pid_t pid;
+};
+
+/**
+ * @brief Import request data.
+ */
+struct hwmem_import_request {
+ /**
+ * @brief [in] Global name of buffer to be imported.
+ */
+ int32_t name;
+ /**
+ * @brief [out] Size in bytes of imported buffer.
+ */
+ uint32_t size;
+ /**
+ * @brief [out] Memory type of the imported buffer.
+ */
+ uint32_t mem_type; /* enum hwmem_mem_type */
+ /**
+ * @brief [out] Access rights for the imported buffer.
+ */
+ uint32_t access; /* enum hwmem_access */
+};
+
+/**
+ * @brief Get info request data.
+ */
+struct hwmem_get_info_request {
+ /**
+ * @brief [in] Identifier of buffer to get info about. If 0 is specified,
+ * the buffer associated with the current file instance will be used.
+ */
+ int32_t id;
+ /**
+ * @brief [out] Size in bytes of buffer.
+ */
+ uint32_t size;
+ /**
+ * @brief [out] Memory type of buffer.
+ */
+ uint32_t mem_type; /* enum hwmem_mem_type */
+ /**
+ * @brief [out] Access rights for buffer.
+ */
+ uint32_t access; /* enum hwmem_access */
+};
+
+/**
+ * @brief Allocates <size> number of bytes and returns a buffer identifier.
+ *
+ * Input is a pointer to a hwmem_alloc_request struct.
+ *
+ * @return A buffer identifier on success, or a negative error code.
+ */
+#define HWMEM_ALLOC_IOC _IOW('W', 1, struct hwmem_alloc_request)
+
+/**
+ * @brief Allocates <size> number of bytes and associates the created buffer
+ * with the current file instance.
+ *
+ * If the current file instance is already associated with a buffer the call
+ * will fail. Buffers referenced through files instances shall not be released
+ * with HWMEM_RELEASE_IOC, instead the file instance shall be closed.
+ *
+ * Input is a pointer to a hwmem_alloc_request struct.
+ *
+ * @return Zero on success, or a negative error code.
+ */
+#define HWMEM_ALLOC_FD_IOC _IOW('W', 2, struct hwmem_alloc_request)
+
+/**
+ * @brief Releases buffer.
+ *
+ * Buffers are reference counted and will not be destroyed until the last
+ * reference is released. Bufferes allocated with ALLOC_FD_IOC not allowed.
+ *
+ * Input is the buffer identifier.
+ *
+ * @return Zero on success, or a negative error code.
+ */
+#define HWMEM_RELEASE_IOC _IO('W', 3)
+
+/**
+ * @brief Set the buffer's memory domain and prepares it for access.
+ *
+ * Input is a pointer to a hwmem_set_domain_request struct.
+ *
+ * @return Zero on success, or a negative error code.
+ */
+#define HWMEM_SET_DOMAIN_IOC _IOR('W', 4, struct hwmem_set_domain_request)
+
+/**
+ * @brief Pins the buffer and returns the physical address of the buffer.
+ *
+ * @return Zero on success, or a negative error code.
+ */
+#define HWMEM_PIN_IOC _IOWR('W', 5, struct hwmem_pin_request)
+
+/**
+ * @brief Unpins the buffer.
+ *
+ * @return Zero on success, or a negative error code.
+ */
+#define HWMEM_UNPIN_IOC _IO('W', 6)
+
+/**
+ * @brief Set access rights for buffer.
+ *
+ * @return Zero on success, or a negative error code.
+ */
+#define HWMEM_SET_ACCESS_IOC _IOW('W', 7, struct hwmem_set_access_request)
+
+/**
+ * @brief Get buffer information.
+ *
+ * Input is the buffer identifier. If 0 is specified the buffer associated
+ * with the current file instance will be used.
+ *
+ * @return Zero on success, or a negative error code.
+ */
+#define HWMEM_GET_INFO_IOC _IOWR('W', 8, struct hwmem_get_info_request)
+
+/**
+ * @brief Export the buffer identifier for use in another process.
+ *
+ * The global name will not increase the buffers reference count and will
+ * therefore not keep the buffer alive.
+ *
+ * Input is the buffer identifier. If 0 is specified the buffer associated with
+ * the current file instance will be exported.
+ *
+ * @return A global buffer name on success, or a negative error code.
+ */
+#define HWMEM_EXPORT_IOC _IO('W', 9)
+
+/**
+ * @brief Import a buffer to allow local access to the buffer.
+ *
+ * Input is a pointer to a hwmem_import_request struct. The name must be
+ * set to a valid global name retrieved by a previous call to HWMEM_EXPORT_IOC.
+ *
+ * @return The imported buffer's identifier on success, or a negative error code.
+ */
+#define HWMEM_IMPORT_IOC _IOWR('W', 10, struct hwmem_import_request)
+
+/**
+ * @brief Import a buffer to allow local access to the buffer using fd.
+ *
+ * Input is a pointer to a hwmem_import_request struct. The name must be
+ * set to a valid global name retrieved by a previous call to HWMEM_EXPORT_IOC.
+ *
+ * @return Zero on success, or a negative error code.
+ */
+#define HWMEM_IMPORT_FD_IOC _IOWR('W', 11, struct hwmem_import_request)
+
+#ifdef __KERNEL__
+
+/* Kernel API */
+
+struct hwmem_alloc;
+
+/**
+ * @brief Allocates <size> number of bytes.
+ *
+ * @param size Number of bytes to allocate. All allocations are page aligned.
+ * @param flags Allocation options.
+ * @param def_access Default buffer access rights.
+ * @param mem_type Memory type.
+ *
+ * @return Pointer to allocation, or a negative error code.
+ */
+struct hwmem_alloc *hwmem_alloc(u32 size, enum hwmem_alloc_flags flags,
+ enum hwmem_access def_access, enum hwmem_mem_type mem_type);
+
+/**
+ * @brief Release a previously allocated buffer.
+ * When last reference is released, the buffer will be freed.
+ *
+ * @param alloc Buffer to be released.
+ */
+void hwmem_release(struct hwmem_alloc *alloc);
+
+/**
+ * @brief Set the buffer domain and prepare it for access.
+ *
+ * @param alloc Buffer to be prepared.
+ * @param access Flags defining memory access mode of the call.
+ * @param domain Value specifying the memory domain.
+ * @param region Structure defining the minimum area of the buffer to be
+ * prepared.
+ *
+ * @return Zero on success, or a negative error code.
+ */
+int hwmem_set_domain(struct hwmem_alloc *alloc, enum hwmem_access access,
+ enum hwmem_domain domain, struct hwmem_region *region);
+
+/**
+ * @brief Pins the buffer.
+ *
+ * @param alloc Buffer to be pinned.
+ * @param phys_addr Reference to variable to receive physical address.
+ * @param scattered_phys_addrs Pointer to buffer to receive physical addresses
+ * of all pages in the scattered buffer. Can be NULL if buffer is contigous.
+ * Buffer size must be (buffer_size / page_size) * sizeof(uint32_t) bytes.
+ */
+int hwmem_pin(struct hwmem_alloc *alloc, uint32_t *phys_addr,
+ uint32_t *scattered_phys_addrs);
+
+/**
+ * @brief Unpins the buffer.
+ *
+ * @param alloc Buffer to be unpinned.
+ */
+void hwmem_unpin(struct hwmem_alloc *alloc);
+
+/**
+ * @brief Map the buffer to user space.
+ *
+ * @param alloc Buffer to be unpinned.
+ */
+int hwmem_mmap(struct hwmem_alloc *alloc, struct vm_area_struct *vma);
+
+/**
+ * @brief Set access rights for buffer.
+ *
+ * @param alloc Buffer to set rights for.
+ * @param access Access value indicating what is allowed.
+ * @param pid Process ID to set rights for.
+ */
+int hwmem_set_access(struct hwmem_alloc *alloc, enum hwmem_access access,
+ pid_t pid);
+
+/**
+ * @brief Get buffer information.
+ *
+ * @param alloc Buffer to get information about.
+ * @param size Pointer to size output variable.
+ * @param size Pointer to memory type output variable.
+ * @param size Pointer to access rights output variable.
+ */
+void hwmem_get_info(struct hwmem_alloc *alloc, uint32_t *size,
+ enum hwmem_mem_type *mem_type, enum hwmem_access *access);
+
+/**
+ * @brief Allocate a global buffer name.
+ * Generated buffer name is valid in all processes. Consecutive calls will get
+ * the same name for the same buffer.
+ *
+ * @param alloc Buffer to be made public.
+ *
+ * @return Positive global name on success, or a negative error code.
+ */
+int hwmem_get_name(struct hwmem_alloc *alloc);
+
+/**
+ * @brief Import the global buffer name to allow local access to the buffer.
+ * This call will add a buffer reference. Resulting buffer should be
+ * released with a call to hwmem_release.
+ *
+ * @param name A valid global buffer name.
+ * @param size Pointer to size output variable.
+ * @param size Pointer to memory type output variable.
+ * @param size Pointer to access rights output variable.
+ *
+ * @return Pointer to allocation, or a negative error code.
+ */
+struct hwmem_alloc *hwmem_resolve_by_name(s32 name, u32 *size,
+ enum hwmem_mem_type *mem_type, enum hwmem_access *access);
+
+/* Internal */
+
+struct hwmem_platform_data
+{
+ /* Starting physical address of memory region */
+ unsigned long start;
+ /* Size of memory region */
+ unsigned long size;
+};
+
+#endif
+
+#endif /* _HWMEM_H_ */
diff --git a/include/video/mcde_fb.h b/include/video/mcde_fb.h
index 8aa3d134e74..1ef1c7175e5 100644
--- a/include/video/mcde_fb.h
+++ b/include/video/mcde_fb.h
@@ -17,12 +17,15 @@
#include <stdint.h>
#else
#include <linux/types.h>
+#include <linux/hwmem.h>
#endif
#ifdef __KERNEL__
#include "mcde_dss.h"
#endif
+#define MCDE_GET_BUFFER_NAME_IOC _IO('M', 1)
+
#ifdef __KERNEL__
#define to_mcde_fb(x) ((struct mcde_fb *)(x)->par)
@@ -34,6 +37,8 @@ struct mcde_fb {
u32 pseudo_palette[17];
enum mcde_ovly_pix_fmt pix_fmt;
int id;
+ struct hwmem_alloc *alloc;
+ int alloc_name;
};
/* MCDE fbdev API */
diff --git a/kernel.spec b/kernel.spec
index f5cf7a09a18..ab96429c43d 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -384,10 +384,16 @@ BuildKernel() {
# Enable pmem
scripts/config --file .config --enable CONFIG_ANDROID_PMEM
+ # Enable hwmem
+ scripts/config --file .config --enable CONFIG_HWMEM
+
# STE: Enable conf for external sd-cards.
scripts/config --file .config --enable LEVELSHIFTER_HREF_V1_PLUS
# STE: Enable g_multi USB gadget with RNDIS, CDC Serial and Storage configuration.
scripts/config --file .config --module CONFIG_USB_G_MULTI --enable CONFIG_USB_G_MULTI_RNDIS --disable USB_G_MULTI_CDC
+ # STE: Enable CONFIG_MCDE_FB_AVOID_REALLOC to avoid reallocations.
+ scripts/config --file .config --enable CONFIG_MCDE_FB_AVOID_REALLOC
+ scripts/config --file .config --enable CONFIG_DISPLAY_GENERIC_DSI_PRIMARY_AUTO_SYNC
Arch="x86"
%ifarch %{all_arm}