blob: ea2c96f9459bf0095dd0ec7abf02831e7ac5630e [file] [log] [blame]
Rob Clarkc8afe682013-06-26 12:44:06 -04001/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/spinlock.h>
19#include <linux/shmem_fs.h>
Rob Clark05b84912013-09-28 11:28:35 -040020#include <linux/dma-buf.h>
Rob Clarkc8afe682013-06-26 12:44:06 -040021
22#include "msm_drv.h"
23#include "msm_gem.h"
Rob Clark7198e6b2013-07-19 12:59:32 -040024#include "msm_gpu.h"
Rob Clarkc8afe682013-06-26 12:44:06 -040025
26
27/* called with dev->struct_mutex held */
28static struct page **get_pages(struct drm_gem_object *obj)
29{
30 struct msm_gem_object *msm_obj = to_msm_bo(obj);
31
32 if (!msm_obj->pages) {
33 struct drm_device *dev = obj->dev;
34 struct page **p = drm_gem_get_pages(obj, 0);
35 int npages = obj->size >> PAGE_SHIFT;
36
37 if (IS_ERR(p)) {
38 dev_err(dev->dev, "could not get pages: %ld\n",
39 PTR_ERR(p));
40 return p;
41 }
42
43 msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
Wei Yongjun1f70e072013-09-11 06:56:12 +080044 if (IS_ERR(msm_obj->sgt)) {
Rob Clarkc8afe682013-06-26 12:44:06 -040045 dev_err(dev->dev, "failed to allocate sgt\n");
Wei Yongjun1f70e072013-09-11 06:56:12 +080046 return ERR_CAST(msm_obj->sgt);
Rob Clarkc8afe682013-06-26 12:44:06 -040047 }
48
49 msm_obj->pages = p;
50
51 /* For non-cached buffers, ensure the new pages are clean
52 * because display controller, GPU, etc. are not coherent:
53 */
54 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
55 dma_map_sg(dev->dev, msm_obj->sgt->sgl,
56 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
57 }
58
59 return msm_obj->pages;
60}
61
62static void put_pages(struct drm_gem_object *obj)
63{
64 struct msm_gem_object *msm_obj = to_msm_bo(obj);
65
66 if (msm_obj->pages) {
67 /* For non-cached buffers, ensure the new pages are clean
68 * because display controller, GPU, etc. are not coherent:
69 */
70 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
71 dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
72 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
73 sg_free_table(msm_obj->sgt);
74 kfree(msm_obj->sgt);
75
76 drm_gem_put_pages(obj, msm_obj->pages, true, false);
77 msm_obj->pages = NULL;
78 }
79}
80
Rob Clark05b84912013-09-28 11:28:35 -040081struct page **msm_gem_get_pages(struct drm_gem_object *obj)
82{
83 struct drm_device *dev = obj->dev;
84 struct page **p;
85 mutex_lock(&dev->struct_mutex);
86 p = get_pages(obj);
87 mutex_unlock(&dev->struct_mutex);
88 return p;
89}
90
91void msm_gem_put_pages(struct drm_gem_object *obj)
92{
93 /* when we start tracking the pin count, then do something here */
94}
95
Rob Clarkc8afe682013-06-26 12:44:06 -040096int msm_gem_mmap_obj(struct drm_gem_object *obj,
97 struct vm_area_struct *vma)
98{
99 struct msm_gem_object *msm_obj = to_msm_bo(obj);
100
101 vma->vm_flags &= ~VM_PFNMAP;
102 vma->vm_flags |= VM_MIXEDMAP;
103
104 if (msm_obj->flags & MSM_BO_WC) {
105 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
106 } else if (msm_obj->flags & MSM_BO_UNCACHED) {
107 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
108 } else {
109 /*
110 * Shunt off cached objs to shmem file so they have their own
111 * address_space (so unmap_mapping_range does what we want,
112 * in particular in the case of mmap'd dmabufs)
113 */
114 fput(vma->vm_file);
115 get_file(obj->filp);
116 vma->vm_pgoff = 0;
117 vma->vm_file = obj->filp;
118
119 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
120 }
121
122 return 0;
123}
124
125int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
126{
127 int ret;
128
129 ret = drm_gem_mmap(filp, vma);
130 if (ret) {
131 DBG("mmap failed: %d", ret);
132 return ret;
133 }
134
135 return msm_gem_mmap_obj(vma->vm_private_data, vma);
136}
137
138int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
139{
140 struct drm_gem_object *obj = vma->vm_private_data;
141 struct msm_gem_object *msm_obj = to_msm_bo(obj);
142 struct drm_device *dev = obj->dev;
143 struct page **pages;
144 unsigned long pfn;
145 pgoff_t pgoff;
146 int ret;
147
148 /* Make sure we don't parallel update on a fault, nor move or remove
149 * something from beneath our feet
150 */
151 ret = mutex_lock_interruptible(&dev->struct_mutex);
152 if (ret)
153 goto out;
154
155 /* make sure we have pages attached now */
156 pages = get_pages(obj);
157 if (IS_ERR(pages)) {
158 ret = PTR_ERR(pages);
159 goto out_unlock;
160 }
161
162 /* We don't use vmf->pgoff since that has the fake offset: */
163 pgoff = ((unsigned long)vmf->virtual_address -
164 vma->vm_start) >> PAGE_SHIFT;
165
166 pfn = page_to_pfn(msm_obj->pages[pgoff]);
167
168 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
169 pfn, pfn << PAGE_SHIFT);
170
171 ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
172
173out_unlock:
174 mutex_unlock(&dev->struct_mutex);
175out:
176 switch (ret) {
177 case -EAGAIN:
Rob Clarkc8afe682013-06-26 12:44:06 -0400178 case 0:
179 case -ERESTARTSYS:
180 case -EINTR:
181 return VM_FAULT_NOPAGE;
182 case -ENOMEM:
183 return VM_FAULT_OOM;
184 default:
185 return VM_FAULT_SIGBUS;
186 }
187}
188
189/** get mmap offset */
190static uint64_t mmap_offset(struct drm_gem_object *obj)
191{
192 struct drm_device *dev = obj->dev;
193 int ret;
194
195 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
196
197 /* Make it mmapable */
198 ret = drm_gem_create_mmap_offset(obj);
199
200 if (ret) {
201 dev_err(dev->dev, "could not allocate mmap offset\n");
202 return 0;
203 }
204
205 return drm_vma_node_offset_addr(&obj->vma_node);
206}
207
208uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
209{
210 uint64_t offset;
211 mutex_lock(&obj->dev->struct_mutex);
212 offset = mmap_offset(obj);
213 mutex_unlock(&obj->dev->struct_mutex);
214 return offset;
215}
216
217/* helpers for dealing w/ iommu: */
218static int map_range(struct iommu_domain *domain, unsigned int iova,
219 struct sg_table *sgt, unsigned int len, int prot)
220{
221 struct scatterlist *sg;
222 unsigned int da = iova;
223 unsigned int i, j;
224 int ret;
225
226 if (!domain || !sgt)
227 return -EINVAL;
228
229 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
230 u32 pa = sg_phys(sg) - sg->offset;
231 size_t bytes = sg->length + sg->offset;
232
233 VERB("map[%d]: %08x %08x(%x)", i, iova, pa, bytes);
234
235 ret = iommu_map(domain, da, pa, bytes, prot);
236 if (ret)
237 goto fail;
238
239 da += bytes;
240 }
241
242 return 0;
243
244fail:
245 da = iova;
246
247 for_each_sg(sgt->sgl, sg, i, j) {
248 size_t bytes = sg->length + sg->offset;
249 iommu_unmap(domain, da, bytes);
250 da += bytes;
251 }
252 return ret;
253}
254
255static void unmap_range(struct iommu_domain *domain, unsigned int iova,
256 struct sg_table *sgt, unsigned int len)
257{
258 struct scatterlist *sg;
259 unsigned int da = iova;
260 int i;
261
262 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
263 size_t bytes = sg->length + sg->offset;
264 size_t unmapped;
265
266 unmapped = iommu_unmap(domain, da, bytes);
267 if (unmapped < bytes)
268 break;
269
270 VERB("unmap[%d]: %08x(%x)", i, iova, bytes);
271
272 BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
273
274 da += bytes;
275 }
276}
277
278/* should be called under struct_mutex.. although it can be called
279 * from atomic context without struct_mutex to acquire an extra
280 * iova ref if you know one is already held.
281 *
282 * That means when I do eventually need to add support for unpinning
283 * the refcnt counter needs to be atomic_t.
284 */
285int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
286 uint32_t *iova)
287{
288 struct msm_gem_object *msm_obj = to_msm_bo(obj);
289 int ret = 0;
290
291 if (!msm_obj->domain[id].iova) {
292 struct msm_drm_private *priv = obj->dev->dev_private;
293 uint32_t offset = (uint32_t)mmap_offset(obj);
294 struct page **pages;
295 pages = get_pages(obj);
296 if (IS_ERR(pages))
297 return PTR_ERR(pages);
298 // XXX ideally we would not map buffers writable when not needed...
299 ret = map_range(priv->iommus[id], offset, msm_obj->sgt,
300 obj->size, IOMMU_READ | IOMMU_WRITE);
301 msm_obj->domain[id].iova = offset;
302 }
303
304 if (!ret)
305 *iova = msm_obj->domain[id].iova;
306
307 return ret;
308}
309
310int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
311{
312 int ret;
313 mutex_lock(&obj->dev->struct_mutex);
314 ret = msm_gem_get_iova_locked(obj, id, iova);
315 mutex_unlock(&obj->dev->struct_mutex);
316 return ret;
317}
318
319void msm_gem_put_iova(struct drm_gem_object *obj, int id)
320{
321 // XXX TODO ..
322 // NOTE: probably don't need a _locked() version.. we wouldn't
323 // normally unmap here, but instead just mark that it could be
324 // unmapped (if the iova refcnt drops to zero), but then later
325 // if another _get_iova_locked() fails we can start unmapping
326 // things that are no longer needed..
327}
328
329int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
330 struct drm_mode_create_dumb *args)
331{
332 args->pitch = align_pitch(args->width, args->bpp);
333 args->size = PAGE_ALIGN(args->pitch * args->height);
334 return msm_gem_new_handle(dev, file, args->size,
335 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
336}
337
Rob Clarkc8afe682013-06-26 12:44:06 -0400338int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
339 uint32_t handle, uint64_t *offset)
340{
341 struct drm_gem_object *obj;
342 int ret = 0;
343
344 /* GEM does all our handle to object mapping */
345 obj = drm_gem_object_lookup(dev, file, handle);
346 if (obj == NULL) {
347 ret = -ENOENT;
348 goto fail;
349 }
350
351 *offset = msm_gem_mmap_offset(obj);
352
353 drm_gem_object_unreference_unlocked(obj);
354
355fail:
356 return ret;
357}
358
359void *msm_gem_vaddr_locked(struct drm_gem_object *obj)
360{
361 struct msm_gem_object *msm_obj = to_msm_bo(obj);
362 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
363 if (!msm_obj->vaddr) {
364 struct page **pages = get_pages(obj);
365 if (IS_ERR(pages))
366 return ERR_CAST(pages);
367 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
368 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
369 }
370 return msm_obj->vaddr;
371}
372
373void *msm_gem_vaddr(struct drm_gem_object *obj)
374{
375 void *ret;
376 mutex_lock(&obj->dev->struct_mutex);
377 ret = msm_gem_vaddr_locked(obj);
378 mutex_unlock(&obj->dev->struct_mutex);
379 return ret;
380}
381
382int msm_gem_queue_inactive_work(struct drm_gem_object *obj,
383 struct work_struct *work)
384{
385 struct drm_device *dev = obj->dev;
386 struct msm_drm_private *priv = dev->dev_private;
Rob Clark7198e6b2013-07-19 12:59:32 -0400387 struct msm_gem_object *msm_obj = to_msm_bo(obj);
388 int ret = 0;
Rob Clarkc8afe682013-06-26 12:44:06 -0400389
Rob Clark7198e6b2013-07-19 12:59:32 -0400390 mutex_lock(&dev->struct_mutex);
391 if (!list_empty(&work->entry)) {
392 ret = -EINVAL;
393 } else if (is_active(msm_obj)) {
394 list_add_tail(&work->entry, &msm_obj->inactive_work);
395 } else {
396 queue_work(priv->wq, work);
397 }
398 mutex_unlock(&dev->struct_mutex);
Rob Clarkc8afe682013-06-26 12:44:06 -0400399
Rob Clark7198e6b2013-07-19 12:59:32 -0400400 return ret;
401}
402
403void msm_gem_move_to_active(struct drm_gem_object *obj,
Rob Clarkbf6811f2013-09-01 13:25:09 -0400404 struct msm_gpu *gpu, bool write, uint32_t fence)
Rob Clark7198e6b2013-07-19 12:59:32 -0400405{
406 struct msm_gem_object *msm_obj = to_msm_bo(obj);
407 msm_obj->gpu = gpu;
Rob Clarkbf6811f2013-09-01 13:25:09 -0400408 if (write)
409 msm_obj->write_fence = fence;
410 else
411 msm_obj->read_fence = fence;
Rob Clark7198e6b2013-07-19 12:59:32 -0400412 list_del_init(&msm_obj->mm_list);
413 list_add_tail(&msm_obj->mm_list, &gpu->active_list);
414}
415
416void msm_gem_move_to_inactive(struct drm_gem_object *obj)
417{
418 struct drm_device *dev = obj->dev;
419 struct msm_drm_private *priv = dev->dev_private;
420 struct msm_gem_object *msm_obj = to_msm_bo(obj);
421
422 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
423
424 msm_obj->gpu = NULL;
Rob Clarkbf6811f2013-09-01 13:25:09 -0400425 msm_obj->read_fence = 0;
426 msm_obj->write_fence = 0;
Rob Clark7198e6b2013-07-19 12:59:32 -0400427 list_del_init(&msm_obj->mm_list);
428 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
429
430 while (!list_empty(&msm_obj->inactive_work)) {
431 struct work_struct *work;
432
433 work = list_first_entry(&msm_obj->inactive_work,
434 struct work_struct, entry);
435
436 list_del_init(&work->entry);
437 queue_work(priv->wq, work);
438 }
439}
440
441int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
442 struct timespec *timeout)
443{
444 struct drm_device *dev = obj->dev;
445 struct msm_gem_object *msm_obj = to_msm_bo(obj);
446 int ret = 0;
447
Rob Clarkf816f272013-09-11 17:34:07 -0400448 if (is_active(msm_obj)) {
Rob Clarkbf6811f2013-09-01 13:25:09 -0400449 uint32_t fence = 0;
Rob Clarkf816f272013-09-11 17:34:07 -0400450
Rob Clarkbf6811f2013-09-01 13:25:09 -0400451 if (op & MSM_PREP_READ)
452 fence = msm_obj->write_fence;
453 if (op & MSM_PREP_WRITE)
454 fence = max(fence, msm_obj->read_fence);
Rob Clarkf816f272013-09-11 17:34:07 -0400455 if (op & MSM_PREP_NOSYNC)
456 timeout = NULL;
457
Rob Clarkbf6811f2013-09-01 13:25:09 -0400458 ret = msm_wait_fence_interruptable(dev, fence, timeout);
459 }
Rob Clark7198e6b2013-07-19 12:59:32 -0400460
461 /* TODO cache maintenance */
462
463 return ret;
464}
465
466int msm_gem_cpu_fini(struct drm_gem_object *obj)
467{
468 /* TODO cache maintenance */
Rob Clarkc8afe682013-06-26 12:44:06 -0400469 return 0;
470}
471
472#ifdef CONFIG_DEBUG_FS
473void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
474{
475 struct drm_device *dev = obj->dev;
476 struct msm_gem_object *msm_obj = to_msm_bo(obj);
477 uint64_t off = drm_vma_node_start(&obj->vma_node);
478
479 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
Rob Clarkbf6811f2013-09-01 13:25:09 -0400480 seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08llx %p %d\n",
Rob Clark7198e6b2013-07-19 12:59:32 -0400481 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
Rob Clarkbf6811f2013-09-01 13:25:09 -0400482 msm_obj->read_fence, msm_obj->write_fence,
483 obj->name, obj->refcount.refcount.counter,
Rob Clarkc8afe682013-06-26 12:44:06 -0400484 off, msm_obj->vaddr, obj->size);
485}
486
487void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
488{
489 struct msm_gem_object *msm_obj;
490 int count = 0;
491 size_t size = 0;
492
493 list_for_each_entry(msm_obj, list, mm_list) {
494 struct drm_gem_object *obj = &msm_obj->base;
495 seq_printf(m, " ");
496 msm_gem_describe(obj, m);
497 count++;
498 size += obj->size;
499 }
500
501 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
502}
503#endif
504
505void msm_gem_free_object(struct drm_gem_object *obj)
506{
507 struct drm_device *dev = obj->dev;
508 struct msm_gem_object *msm_obj = to_msm_bo(obj);
509 int id;
510
511 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
512
Rob Clark7198e6b2013-07-19 12:59:32 -0400513 /* object should not be on active list: */
514 WARN_ON(is_active(msm_obj));
515
Rob Clarkc8afe682013-06-26 12:44:06 -0400516 list_del(&msm_obj->mm_list);
517
518 for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
519 if (msm_obj->domain[id].iova) {
520 struct msm_drm_private *priv = obj->dev->dev_private;
521 uint32_t offset = (uint32_t)mmap_offset(obj);
522 unmap_range(priv->iommus[id], offset,
523 msm_obj->sgt, obj->size);
524 }
525 }
526
527 drm_gem_free_mmap_offset(obj);
528
Rob Clark05b84912013-09-28 11:28:35 -0400529 if (obj->import_attach) {
530 if (msm_obj->vaddr)
531 dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
Rob Clarkc8afe682013-06-26 12:44:06 -0400532
Rob Clark05b84912013-09-28 11:28:35 -0400533 /* Don't drop the pages for imported dmabuf, as they are not
534 * ours, just free the array we allocated:
535 */
536 if (msm_obj->pages)
537 drm_free_large(msm_obj->pages);
538
539 } else {
540 if (msm_obj->vaddr)
541 vunmap(msm_obj->vaddr);
542 put_pages(obj);
543 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400544
Rob Clark7198e6b2013-07-19 12:59:32 -0400545 if (msm_obj->resv == &msm_obj->_resv)
546 reservation_object_fini(msm_obj->resv);
547
Rob Clarkc8afe682013-06-26 12:44:06 -0400548 drm_gem_object_release(obj);
549
550 kfree(msm_obj);
551}
552
553/* convenience method to construct a GEM buffer object, and userspace handle */
554int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
555 uint32_t size, uint32_t flags, uint32_t *handle)
556{
557 struct drm_gem_object *obj;
558 int ret;
559
560 ret = mutex_lock_interruptible(&dev->struct_mutex);
561 if (ret)
562 return ret;
563
564 obj = msm_gem_new(dev, size, flags);
565
566 mutex_unlock(&dev->struct_mutex);
567
568 if (IS_ERR(obj))
569 return PTR_ERR(obj);
570
571 ret = drm_gem_handle_create(file, obj, handle);
572
573 /* drop reference from allocate - handle holds it now */
574 drm_gem_object_unreference_unlocked(obj);
575
576 return ret;
577}
578
Rob Clark05b84912013-09-28 11:28:35 -0400579static int msm_gem_new_impl(struct drm_device *dev,
580 uint32_t size, uint32_t flags,
581 struct drm_gem_object **obj)
Rob Clarkc8afe682013-06-26 12:44:06 -0400582{
583 struct msm_drm_private *priv = dev->dev_private;
584 struct msm_gem_object *msm_obj;
Rob Clarkc8afe682013-06-26 12:44:06 -0400585
586 switch (flags & MSM_BO_CACHE_MASK) {
587 case MSM_BO_UNCACHED:
588 case MSM_BO_CACHED:
589 case MSM_BO_WC:
590 break;
591 default:
592 dev_err(dev->dev, "invalid cache flag: %x\n",
593 (flags & MSM_BO_CACHE_MASK));
Rob Clark05b84912013-09-28 11:28:35 -0400594 return -EINVAL;
Rob Clarkc8afe682013-06-26 12:44:06 -0400595 }
596
597 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
Rob Clark05b84912013-09-28 11:28:35 -0400598 if (!msm_obj)
599 return -ENOMEM;
Rob Clarkc8afe682013-06-26 12:44:06 -0400600
601 msm_obj->flags = flags;
602
Rob Clark7198e6b2013-07-19 12:59:32 -0400603 msm_obj->resv = &msm_obj->_resv;
604 reservation_object_init(msm_obj->resv);
Rob Clarkc8afe682013-06-26 12:44:06 -0400605
Rob Clark7198e6b2013-07-19 12:59:32 -0400606 INIT_LIST_HEAD(&msm_obj->submit_entry);
607 INIT_LIST_HEAD(&msm_obj->inactive_work);
Rob Clarkc8afe682013-06-26 12:44:06 -0400608 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
609
Rob Clark05b84912013-09-28 11:28:35 -0400610 *obj = &msm_obj->base;
611
612 return 0;
613}
614
615struct drm_gem_object *msm_gem_new(struct drm_device *dev,
616 uint32_t size, uint32_t flags)
617{
618 struct drm_gem_object *obj;
619 int ret;
620
621 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
622
623 size = PAGE_ALIGN(size);
624
625 ret = msm_gem_new_impl(dev, size, flags, &obj);
626 if (ret)
627 goto fail;
628
629 ret = drm_gem_object_init(dev, obj, size);
630 if (ret)
631 goto fail;
632
633 return obj;
634
635fail:
636 if (obj)
637 drm_gem_object_unreference_unlocked(obj);
638
639 return ERR_PTR(ret);
640}
641
642struct drm_gem_object *msm_gem_import(struct drm_device *dev,
643 uint32_t size, struct sg_table *sgt)
644{
645 struct msm_gem_object *msm_obj;
646 struct drm_gem_object *obj;
647 int ret, npages;
648
649 size = PAGE_ALIGN(size);
650
651 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
652 if (ret)
653 goto fail;
654
655 drm_gem_private_object_init(dev, obj, size);
656
657 npages = size / PAGE_SIZE;
658
659 msm_obj = to_msm_bo(obj);
660 msm_obj->sgt = sgt;
661 msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
662 if (!msm_obj->pages) {
663 ret = -ENOMEM;
664 goto fail;
665 }
666
667 ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
668 if (ret)
669 goto fail;
670
Rob Clarkc8afe682013-06-26 12:44:06 -0400671 return obj;
672
673fail:
674 if (obj)
675 drm_gem_object_unreference_unlocked(obj);
676
677 return ERR_PTR(ret);
678}