summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/exynos/exynos_drm_gem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/exynos/exynos_drm_gem.c')
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c435
1 files changed, 219 insertions, 216 deletions
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index d2545560664..d48183e7e05 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -83,157 +83,40 @@ static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
{
- if (!IS_NONCONTIG_BUFFER(flags)) {
- if (size >= SZ_1M)
- return roundup(size, SECTION_SIZE);
- else if (size >= SZ_64K)
- return roundup(size, SZ_64K);
- else
- goto out;
- }
-out:
- return roundup(size, PAGE_SIZE);
-}
-
-struct page **exynos_gem_get_pages(struct drm_gem_object *obj,
- gfp_t gfpmask)
-{
- struct page *p, **pages;
- int i, npages;
-
- npages = obj->size >> PAGE_SHIFT;
-
- pages = drm_malloc_ab(npages, sizeof(struct page *));
- if (pages == NULL)
- return ERR_PTR(-ENOMEM);
-
- for (i = 0; i < npages; i++) {
- p = alloc_page(gfpmask);
- if (IS_ERR(p))
- goto fail;
- pages[i] = p;
- }
-
- return pages;
-
-fail:
- while (--i)
- __free_page(pages[i]);
-
- drm_free_large(pages);
- return ERR_CAST(p);
-}
-
-static void exynos_gem_put_pages(struct drm_gem_object *obj,
- struct page **pages)
-{
- int npages;
-
- npages = obj->size >> PAGE_SHIFT;
-
- while (--npages >= 0)
- __free_page(pages[npages]);
+ /* TODO */
- drm_free_large(pages);
+ return roundup(size, PAGE_SIZE);
}
-static int exynos_drm_gem_map_pages(struct drm_gem_object *obj,
+static int exynos_drm_gem_map_buf(struct drm_gem_object *obj,
struct vm_area_struct *vma,
unsigned long f_vaddr,
pgoff_t page_offset)
{
struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
+ struct scatterlist *sgl;
unsigned long pfn;
+ int i;
- if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
- if (!buf->pages)
- return -EINTR;
-
- pfn = page_to_pfn(buf->pages[page_offset++]);
- } else
- pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset;
-
- return vm_insert_mixed(vma, f_vaddr, pfn);
-}
+ if (!buf->sgt)
+ return -EINTR;
-static int exynos_drm_gem_get_pages(struct drm_gem_object *obj)
-{
- struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
- struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
- struct scatterlist *sgl;
- struct page **pages;
- unsigned int npages, i = 0;
- int ret;
-
- if (buf->pages) {
- DRM_DEBUG_KMS("already allocated.\n");
+ if (page_offset >= (buf->size >> PAGE_SHIFT)) {
+ DRM_ERROR("invalid page offset\n");
return -EINVAL;
}
- pages = exynos_gem_get_pages(obj, GFP_HIGHUSER_MOVABLE);
- if (IS_ERR(pages)) {
- DRM_ERROR("failed to get pages.\n");
- return PTR_ERR(pages);
- }
-
- npages = obj->size >> PAGE_SHIFT;
- buf->page_size = PAGE_SIZE;
-
- buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
- if (!buf->sgt) {
- DRM_ERROR("failed to allocate sg table.\n");
- ret = -ENOMEM;
- goto err;
- }
-
- ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
- if (ret < 0) {
- DRM_ERROR("failed to initialize sg table.\n");
- ret = -EFAULT;
- goto err1;
- }
-
sgl = buf->sgt->sgl;
-
- /* set all pages to sg list. */
- while (i < npages) {
- sg_set_page(sgl, pages[i], PAGE_SIZE, 0);
- sg_dma_address(sgl) = page_to_phys(pages[i]);
- i++;
- sgl = sg_next(sgl);
+ for_each_sg(buf->sgt->sgl, sgl, buf->sgt->nents, i) {
+ if (page_offset < (sgl->length >> PAGE_SHIFT))
+ break;
+ page_offset -= (sgl->length >> PAGE_SHIFT);
}
- /* add some codes for UNCACHED type here. TODO */
-
- buf->pages = pages;
- return ret;
-err1:
- kfree(buf->sgt);
- buf->sgt = NULL;
-err:
- exynos_gem_put_pages(obj, pages);
- return ret;
-
-}
-
-static void exynos_drm_gem_put_pages(struct drm_gem_object *obj)
-{
- struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
- struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
-
- /*
- * if buffer typs is EXYNOS_BO_NONCONTIG then release all pages
- * allocated at gem fault handler.
- */
- sg_free_table(buf->sgt);
- kfree(buf->sgt);
- buf->sgt = NULL;
-
- exynos_gem_put_pages(obj, buf->pages);
- buf->pages = NULL;
+ pfn = __phys_to_pfn(sg_phys(sgl)) + page_offset;
- /* add some codes for UNCACHED type here. TODO */
+ return vm_insert_mixed(vma, f_vaddr, pfn);
}
static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
@@ -270,9 +153,6 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
- if (!buf->pages)
- return;
-
/*
* do not release memory region from exporter.
*
@@ -282,10 +162,7 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
if (obj->import_attach)
goto out;
- if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG)
- exynos_drm_gem_put_pages(obj);
- else
- exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
+ exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
out:
exynos_drm_fini_buf(obj->dev, buf);
@@ -364,22 +241,10 @@ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
/* set memory type and cache attribute from user side. */
exynos_gem_obj->flags = flags;
- /*
- * allocate all pages as desired size if user wants to allocate
- * physically non-continuous memory.
- */
- if (flags & EXYNOS_BO_NONCONTIG) {
- ret = exynos_drm_gem_get_pages(&exynos_gem_obj->base);
- if (ret < 0) {
- drm_gem_object_release(&exynos_gem_obj->base);
- goto err_fini_buf;
- }
- } else {
- ret = exynos_drm_alloc_buf(dev, buf, flags);
- if (ret < 0) {
- drm_gem_object_release(&exynos_gem_obj->base);
- goto err_fini_buf;
- }
+ ret = exynos_drm_alloc_buf(dev, buf, flags);
+ if (ret < 0) {
+ drm_gem_object_release(&exynos_gem_obj->base);
+ goto err_fini_buf;
}
return exynos_gem_obj;
@@ -412,14 +277,14 @@ int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
return 0;
}
-void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
+dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
unsigned int gem_handle,
- struct drm_file *file_priv)
+ struct drm_file *filp)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
struct drm_gem_object *obj;
- obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
+ obj = drm_gem_object_lookup(dev, filp, gem_handle);
if (!obj) {
DRM_ERROR("failed to lookup gem object.\n");
return ERR_PTR(-EINVAL);
@@ -427,25 +292,17 @@ void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
exynos_gem_obj = to_exynos_gem_obj(obj);
- if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
- DRM_DEBUG_KMS("not support NONCONTIG type.\n");
- drm_gem_object_unreference_unlocked(obj);
-
- /* TODO */
- return ERR_PTR(-EINVAL);
- }
-
return &exynos_gem_obj->buffer->dma_addr;
}
void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
unsigned int gem_handle,
- struct drm_file *file_priv)
+ struct drm_file *filp)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
struct drm_gem_object *obj;
- obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
+ obj = drm_gem_object_lookup(dev, filp, gem_handle);
if (!obj) {
DRM_ERROR("failed to lookup gem object.\n");
return;
@@ -453,14 +310,6 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
exynos_gem_obj = to_exynos_gem_obj(obj);
- if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
- DRM_DEBUG_KMS("not support NONCONTIG type.\n");
- drm_gem_object_unreference_unlocked(obj);
-
- /* TODO */
- return;
- }
-
drm_gem_object_unreference_unlocked(obj);
/*
@@ -489,22 +338,57 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
&args->offset);
}
+static struct drm_file *exynos_drm_find_drm_file(struct drm_device *drm_dev,
+ struct file *filp)
+{
+ struct drm_file *file_priv;
+
+ mutex_lock(&drm_dev->struct_mutex);
+
+ /* find current process's drm_file from filelist. */
+ list_for_each_entry(file_priv, &drm_dev->filelist, lhead) {
+ if (file_priv->filp == filp) {
+ mutex_unlock(&drm_dev->struct_mutex);
+ return file_priv;
+ }
+ }
+
+ mutex_unlock(&drm_dev->struct_mutex);
+ WARN_ON(1);
+
+ return ERR_PTR(-EFAULT);
+}
+
static int exynos_drm_gem_mmap_buffer(struct file *filp,
struct vm_area_struct *vma)
{
struct drm_gem_object *obj = filp->private_data;
struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
+ struct drm_device *drm_dev = obj->dev;
struct exynos_drm_gem_buf *buffer;
- unsigned long pfn, vm_size, usize, uaddr = vma->vm_start;
+ struct drm_file *file_priv;
+ unsigned long vm_size;
int ret;
DRM_DEBUG_KMS("%s\n", __FILE__);
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_private_data = obj;
+ vma->vm_ops = drm_dev->driver->gem_vm_ops;
+
+ /* restore it to driver's fops. */
+ filp->f_op = fops_get(drm_dev->driver->fops);
+
+ file_priv = exynos_drm_find_drm_file(drm_dev, filp);
+ if (IS_ERR(file_priv))
+ return PTR_ERR(file_priv);
+
+ /* restore it to drm_file. */
+ filp->private_data = file_priv;
update_vm_cache_attr(exynos_gem_obj, vma);
- vm_size = usize = vma->vm_end - vma->vm_start;
+ vm_size = vma->vm_end - vma->vm_start;
/*
* a buffer contains information to physically continuous memory
@@ -516,40 +400,23 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
if (vm_size > buffer->size)
return -EINVAL;
- if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
- int i = 0;
-
- if (!buffer->pages)
- return -EINVAL;
+ ret = dma_mmap_attrs(drm_dev->dev, vma, buffer->pages,
+ buffer->dma_addr, buffer->size,
+ &buffer->dma_attrs);
+ if (ret < 0) {
+ DRM_ERROR("failed to mmap.\n");
+ return ret;
+ }
- vma->vm_flags |= VM_MIXEDMAP;
+ /*
+ * take a reference to this mapping of the object. And this reference
+ * is unreferenced by the corresponding vm_close call.
+ */
+ drm_gem_object_reference(obj);
- do {
- ret = vm_insert_page(vma, uaddr, buffer->pages[i++]);
- if (ret) {
- DRM_ERROR("failed to remap user space.\n");
- return ret;
- }
-
- uaddr += PAGE_SIZE;
- usize -= PAGE_SIZE;
- } while (usize > 0);
- } else {
- /*
- * get page frame number to physical memory to be mapped
- * to user space.
- */
- pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >>
- PAGE_SHIFT;
-
- DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
-
- if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size,
- vma->vm_page_prot)) {
- DRM_ERROR("failed to remap pfn range.\n");
- return -EAGAIN;
- }
- }
+ mutex_lock(&drm_dev->struct_mutex);
+ drm_vm_open_locked(drm_dev, vma);
+ mutex_unlock(&drm_dev->struct_mutex);
return 0;
}
@@ -578,16 +445,29 @@ int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
- obj->filp->f_op = &exynos_drm_gem_fops;
- obj->filp->private_data = obj;
+ /*
+ * Set specific mmper's fops. And it will be restored by
+ * exynos_drm_gem_mmap_buffer to dev->driver->fops.
+ * This is used to call specific mapper temporarily.
+ */
+ file_priv->filp->f_op = &exynos_drm_gem_fops;
- addr = vm_mmap(obj->filp, 0, args->size,
+ /*
+ * Set gem object to private_data so that specific mmaper
+ * can get the gem object. And it will be restored by
+ * exynos_drm_gem_mmap_buffer to drm_file.
+ */
+ file_priv->filp->private_data = obj;
+
+ addr = vm_mmap(file_priv->filp, 0, args->size,
PROT_READ | PROT_WRITE, MAP_SHARED, 0);
drm_gem_object_unreference_unlocked(obj);
- if (IS_ERR((void *)addr))
+ if (IS_ERR((void *)addr)) {
+ file_priv->filp->private_data = file_priv;
return PTR_ERR((void *)addr);
+ }
args->mapped = addr;
@@ -622,6 +502,129 @@ int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
return 0;
}
+struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma)
+{
+ struct vm_area_struct *vma_copy;
+
+ vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
+ if (!vma_copy)
+ return NULL;
+
+ if (vma->vm_ops && vma->vm_ops->open)
+ vma->vm_ops->open(vma);
+
+ if (vma->vm_file)
+ get_file(vma->vm_file);
+
+ memcpy(vma_copy, vma, sizeof(*vma));
+
+ vma_copy->vm_mm = NULL;
+ vma_copy->vm_next = NULL;
+ vma_copy->vm_prev = NULL;
+
+ return vma_copy;
+}
+
+void exynos_gem_put_vma(struct vm_area_struct *vma)
+{
+ if (!vma)
+ return;
+
+ if (vma->vm_ops && vma->vm_ops->close)
+ vma->vm_ops->close(vma);
+
+ if (vma->vm_file)
+ fput(vma->vm_file);
+
+ kfree(vma);
+}
+
+int exynos_gem_get_pages_from_userptr(unsigned long start,
+ unsigned int npages,
+ struct page **pages,
+ struct vm_area_struct *vma)
+{
+ int get_npages;
+
+ /* the memory region mmaped with VM_PFNMAP. */
+ if (vma_is_io(vma)) {
+ unsigned int i;
+
+ for (i = 0; i < npages; ++i, start += PAGE_SIZE) {
+ unsigned long pfn;
+ int ret = follow_pfn(vma, start, &pfn);
+ if (ret)
+ return ret;
+
+ pages[i] = pfn_to_page(pfn);
+ }
+
+ if (i != npages) {
+ DRM_ERROR("failed to get user_pages.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+
+ get_npages = get_user_pages(current, current->mm, start,
+ npages, 1, 1, pages, NULL);
+ get_npages = max(get_npages, 0);
+ if (get_npages != npages) {
+ DRM_ERROR("failed to get user_pages.\n");
+ while (get_npages)
+ put_page(pages[--get_npages]);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+void exynos_gem_put_pages_to_userptr(struct page **pages,
+ unsigned int npages,
+ struct vm_area_struct *vma)
+{
+ if (!vma_is_io(vma)) {
+ unsigned int i;
+
+ for (i = 0; i < npages; i++) {
+ set_page_dirty_lock(pages[i]);
+
+ /*
+ * undo the reference we took when populating
+ * the table.
+ */
+ put_page(pages[i]);
+ }
+ }
+}
+
+int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
+ struct sg_table *sgt,
+ enum dma_data_direction dir)
+{
+ int nents;
+
+ mutex_lock(&drm_dev->struct_mutex);
+
+ nents = dma_map_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
+ if (!nents) {
+ DRM_ERROR("failed to map sgl with dma.\n");
+ mutex_unlock(&drm_dev->struct_mutex);
+ return nents;
+ }
+
+ mutex_unlock(&drm_dev->struct_mutex);
+ return 0;
+}
+
+void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
+ struct sg_table *sgt,
+ enum dma_data_direction dir)
+{
+ dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
+}
+
int exynos_drm_gem_init_object(struct drm_gem_object *obj)
{
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -753,9 +756,9 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
mutex_lock(&dev->struct_mutex);
- ret = exynos_drm_gem_map_pages(obj, vma, f_vaddr, page_offset);
+ ret = exynos_drm_gem_map_buf(obj, vma, f_vaddr, page_offset);
if (ret < 0)
- DRM_ERROR("failed to map pages.\n");
+ DRM_ERROR("failed to map a buffer with user.\n");
mutex_unlock(&dev->struct_mutex);