blob: 8207862b70d19823b9ec9908591e94d92ce282ee [file] [log] [blame]
Rob Clarkc8afe682013-06-26 12:44:06 -04001/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/spinlock.h>
19#include <linux/shmem_fs.h>
Rob Clark05b84912013-09-28 11:28:35 -040020#include <linux/dma-buf.h>
Rob Clarkc8afe682013-06-26 12:44:06 -040021
22#include "msm_drv.h"
23#include "msm_gem.h"
Rob Clark7198e6b2013-07-19 12:59:32 -040024#include "msm_gpu.h"
Rob Clark871d8122013-11-16 12:56:06 -050025#include "msm_mmu.h"
Rob Clarkc8afe682013-06-26 12:44:06 -040026
Rob Clark871d8122013-11-16 12:56:06 -050027static dma_addr_t physaddr(struct drm_gem_object *obj)
28{
29 struct msm_gem_object *msm_obj = to_msm_bo(obj);
30 struct msm_drm_private *priv = obj->dev->dev_private;
31 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
32 priv->vram.paddr;
33}
34
35/* allocate pages from VRAM carveout, used when no IOMMU: */
36static struct page **get_pages_vram(struct drm_gem_object *obj,
37 int npages)
38{
39 struct msm_gem_object *msm_obj = to_msm_bo(obj);
40 struct msm_drm_private *priv = obj->dev->dev_private;
41 dma_addr_t paddr;
42 struct page **p;
43 int ret, i;
44
45 p = drm_malloc_ab(npages, sizeof(struct page *));
46 if (!p)
47 return ERR_PTR(-ENOMEM);
48
49 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node,
50 npages, 0, DRM_MM_SEARCH_DEFAULT);
51 if (ret) {
52 drm_free_large(p);
53 return ERR_PTR(ret);
54 }
55
56 paddr = physaddr(obj);
57 for (i = 0; i < npages; i++) {
58 p[i] = phys_to_page(paddr);
59 paddr += PAGE_SIZE;
60 }
61
62 return p;
63}
Rob Clarkc8afe682013-06-26 12:44:06 -040064
65/* called with dev->struct_mutex held */
66static struct page **get_pages(struct drm_gem_object *obj)
67{
68 struct msm_gem_object *msm_obj = to_msm_bo(obj);
69
70 if (!msm_obj->pages) {
71 struct drm_device *dev = obj->dev;
Rob Clark871d8122013-11-16 12:56:06 -050072 struct page **p;
Rob Clarkc8afe682013-06-26 12:44:06 -040073 int npages = obj->size >> PAGE_SHIFT;
74
Rob Clark871d8122013-11-16 12:56:06 -050075 if (iommu_present(&platform_bus_type))
David Herrmann0cdbe8a2014-05-25 12:59:47 +020076 p = drm_gem_get_pages(obj);
Rob Clark871d8122013-11-16 12:56:06 -050077 else
78 p = get_pages_vram(obj, npages);
79
Rob Clarkc8afe682013-06-26 12:44:06 -040080 if (IS_ERR(p)) {
81 dev_err(dev->dev, "could not get pages: %ld\n",
82 PTR_ERR(p));
83 return p;
84 }
85
86 msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
Wei Yongjun1f70e072013-09-11 06:56:12 +080087 if (IS_ERR(msm_obj->sgt)) {
Rob Clarkc8afe682013-06-26 12:44:06 -040088 dev_err(dev->dev, "failed to allocate sgt\n");
Wei Yongjun1f70e072013-09-11 06:56:12 +080089 return ERR_CAST(msm_obj->sgt);
Rob Clarkc8afe682013-06-26 12:44:06 -040090 }
91
92 msm_obj->pages = p;
93
94 /* For non-cached buffers, ensure the new pages are clean
95 * because display controller, GPU, etc. are not coherent:
96 */
97 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
98 dma_map_sg(dev->dev, msm_obj->sgt->sgl,
99 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
100 }
101
102 return msm_obj->pages;
103}
104
105static void put_pages(struct drm_gem_object *obj)
106{
107 struct msm_gem_object *msm_obj = to_msm_bo(obj);
108
109 if (msm_obj->pages) {
110 /* For non-cached buffers, ensure the new pages are clean
111 * because display controller, GPU, etc. are not coherent:
112 */
113 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
114 dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
115 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
116 sg_free_table(msm_obj->sgt);
117 kfree(msm_obj->sgt);
118
Rob Clark871d8122013-11-16 12:56:06 -0500119 if (iommu_present(&platform_bus_type))
120 drm_gem_put_pages(obj, msm_obj->pages, true, false);
Micah Richert1ffa2422014-04-09 14:11:31 -0700121 else {
Rob Clark871d8122013-11-16 12:56:06 -0500122 drm_mm_remove_node(msm_obj->vram_node);
Micah Richert1ffa2422014-04-09 14:11:31 -0700123 drm_free_large(msm_obj->pages);
124 }
Rob Clark871d8122013-11-16 12:56:06 -0500125
Rob Clarkc8afe682013-06-26 12:44:06 -0400126 msm_obj->pages = NULL;
127 }
128}
129
Rob Clark05b84912013-09-28 11:28:35 -0400130struct page **msm_gem_get_pages(struct drm_gem_object *obj)
131{
132 struct drm_device *dev = obj->dev;
133 struct page **p;
134 mutex_lock(&dev->struct_mutex);
135 p = get_pages(obj);
136 mutex_unlock(&dev->struct_mutex);
137 return p;
138}
139
140void msm_gem_put_pages(struct drm_gem_object *obj)
141{
142 /* when we start tracking the pin count, then do something here */
143}
144
Rob Clarkc8afe682013-06-26 12:44:06 -0400145int msm_gem_mmap_obj(struct drm_gem_object *obj,
146 struct vm_area_struct *vma)
147{
148 struct msm_gem_object *msm_obj = to_msm_bo(obj);
149
150 vma->vm_flags &= ~VM_PFNMAP;
151 vma->vm_flags |= VM_MIXEDMAP;
152
153 if (msm_obj->flags & MSM_BO_WC) {
154 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
155 } else if (msm_obj->flags & MSM_BO_UNCACHED) {
156 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
157 } else {
158 /*
159 * Shunt off cached objs to shmem file so they have their own
160 * address_space (so unmap_mapping_range does what we want,
161 * in particular in the case of mmap'd dmabufs)
162 */
163 fput(vma->vm_file);
164 get_file(obj->filp);
165 vma->vm_pgoff = 0;
166 vma->vm_file = obj->filp;
167
168 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
169 }
170
171 return 0;
172}
173
174int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
175{
176 int ret;
177
178 ret = drm_gem_mmap(filp, vma);
179 if (ret) {
180 DBG("mmap failed: %d", ret);
181 return ret;
182 }
183
184 return msm_gem_mmap_obj(vma->vm_private_data, vma);
185}
186
187int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
188{
189 struct drm_gem_object *obj = vma->vm_private_data;
Rob Clarkc8afe682013-06-26 12:44:06 -0400190 struct drm_device *dev = obj->dev;
191 struct page **pages;
192 unsigned long pfn;
193 pgoff_t pgoff;
194 int ret;
195
196 /* Make sure we don't parallel update on a fault, nor move or remove
197 * something from beneath our feet
198 */
199 ret = mutex_lock_interruptible(&dev->struct_mutex);
200 if (ret)
201 goto out;
202
203 /* make sure we have pages attached now */
204 pages = get_pages(obj);
205 if (IS_ERR(pages)) {
206 ret = PTR_ERR(pages);
207 goto out_unlock;
208 }
209
210 /* We don't use vmf->pgoff since that has the fake offset: */
211 pgoff = ((unsigned long)vmf->virtual_address -
212 vma->vm_start) >> PAGE_SHIFT;
213
Rob Clark871d8122013-11-16 12:56:06 -0500214 pfn = page_to_pfn(pages[pgoff]);
Rob Clarkc8afe682013-06-26 12:44:06 -0400215
216 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
217 pfn, pfn << PAGE_SHIFT);
218
219 ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
220
221out_unlock:
222 mutex_unlock(&dev->struct_mutex);
223out:
224 switch (ret) {
225 case -EAGAIN:
Rob Clarkc8afe682013-06-26 12:44:06 -0400226 case 0:
227 case -ERESTARTSYS:
228 case -EINTR:
Rob Clark505886d2013-10-20 11:57:52 -0400229 case -EBUSY:
230 /*
231 * EBUSY is ok: this just means that another thread
232 * already did the job.
233 */
Rob Clarkc8afe682013-06-26 12:44:06 -0400234 return VM_FAULT_NOPAGE;
235 case -ENOMEM:
236 return VM_FAULT_OOM;
237 default:
238 return VM_FAULT_SIGBUS;
239 }
240}
241
242/** get mmap offset */
243static uint64_t mmap_offset(struct drm_gem_object *obj)
244{
245 struct drm_device *dev = obj->dev;
246 int ret;
247
248 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
249
250 /* Make it mmapable */
251 ret = drm_gem_create_mmap_offset(obj);
252
253 if (ret) {
254 dev_err(dev->dev, "could not allocate mmap offset\n");
255 return 0;
256 }
257
258 return drm_vma_node_offset_addr(&obj->vma_node);
259}
260
261uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
262{
263 uint64_t offset;
264 mutex_lock(&obj->dev->struct_mutex);
265 offset = mmap_offset(obj);
266 mutex_unlock(&obj->dev->struct_mutex);
267 return offset;
268}
269
Rob Clarkc8afe682013-06-26 12:44:06 -0400270/* should be called under struct_mutex.. although it can be called
271 * from atomic context without struct_mutex to acquire an extra
272 * iova ref if you know one is already held.
273 *
274 * That means when I do eventually need to add support for unpinning
275 * the refcnt counter needs to be atomic_t.
276 */
277int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
278 uint32_t *iova)
279{
280 struct msm_gem_object *msm_obj = to_msm_bo(obj);
281 int ret = 0;
282
283 if (!msm_obj->domain[id].iova) {
284 struct msm_drm_private *priv = obj->dev->dev_private;
Rob Clark871d8122013-11-16 12:56:06 -0500285 struct page **pages = get_pages(obj);
286
Rob Clarkc8afe682013-06-26 12:44:06 -0400287 if (IS_ERR(pages))
288 return PTR_ERR(pages);
Rob Clark871d8122013-11-16 12:56:06 -0500289
290 if (iommu_present(&platform_bus_type)) {
Rob Clark1c4997f2014-07-01 14:49:55 -0400291 struct msm_mmu *mmu = priv->mmus[id];
292 uint32_t offset;
293
294 if (WARN_ON(!mmu))
295 return -EINVAL;
296
297 offset = (uint32_t)mmap_offset(obj);
Rob Clark871d8122013-11-16 12:56:06 -0500298 ret = mmu->funcs->map(mmu, offset, msm_obj->sgt,
299 obj->size, IOMMU_READ | IOMMU_WRITE);
300 msm_obj->domain[id].iova = offset;
301 } else {
302 msm_obj->domain[id].iova = physaddr(obj);
303 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400304 }
305
306 if (!ret)
307 *iova = msm_obj->domain[id].iova;
308
309 return ret;
310}
311
312int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
313{
Rob Clarkedd4fc62013-09-14 14:01:55 -0400314 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clarkc8afe682013-06-26 12:44:06 -0400315 int ret;
Rob Clarkedd4fc62013-09-14 14:01:55 -0400316
317 /* this is safe right now because we don't unmap until the
318 * bo is deleted:
319 */
320 if (msm_obj->domain[id].iova) {
321 *iova = msm_obj->domain[id].iova;
322 return 0;
323 }
324
Rob Clarkc8afe682013-06-26 12:44:06 -0400325 mutex_lock(&obj->dev->struct_mutex);
326 ret = msm_gem_get_iova_locked(obj, id, iova);
327 mutex_unlock(&obj->dev->struct_mutex);
328 return ret;
329}
330
331void msm_gem_put_iova(struct drm_gem_object *obj, int id)
332{
333 // XXX TODO ..
334 // NOTE: probably don't need a _locked() version.. we wouldn't
335 // normally unmap here, but instead just mark that it could be
336 // unmapped (if the iova refcnt drops to zero), but then later
337 // if another _get_iova_locked() fails we can start unmapping
338 // things that are no longer needed..
339}
340
341int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
342 struct drm_mode_create_dumb *args)
343{
344 args->pitch = align_pitch(args->width, args->bpp);
345 args->size = PAGE_ALIGN(args->pitch * args->height);
346 return msm_gem_new_handle(dev, file, args->size,
347 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
348}
349
Rob Clarkc8afe682013-06-26 12:44:06 -0400350int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
351 uint32_t handle, uint64_t *offset)
352{
353 struct drm_gem_object *obj;
354 int ret = 0;
355
356 /* GEM does all our handle to object mapping */
357 obj = drm_gem_object_lookup(dev, file, handle);
358 if (obj == NULL) {
359 ret = -ENOENT;
360 goto fail;
361 }
362
363 *offset = msm_gem_mmap_offset(obj);
364
365 drm_gem_object_unreference_unlocked(obj);
366
367fail:
368 return ret;
369}
370
371void *msm_gem_vaddr_locked(struct drm_gem_object *obj)
372{
373 struct msm_gem_object *msm_obj = to_msm_bo(obj);
374 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
375 if (!msm_obj->vaddr) {
376 struct page **pages = get_pages(obj);
377 if (IS_ERR(pages))
378 return ERR_CAST(pages);
379 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
380 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
381 }
382 return msm_obj->vaddr;
383}
384
385void *msm_gem_vaddr(struct drm_gem_object *obj)
386{
387 void *ret;
388 mutex_lock(&obj->dev->struct_mutex);
389 ret = msm_gem_vaddr_locked(obj);
390 mutex_unlock(&obj->dev->struct_mutex);
391 return ret;
392}
393
Rob Clarkedd4fc62013-09-14 14:01:55 -0400394/* setup callback for when bo is no longer busy..
395 * TODO probably want to differentiate read vs write..
396 */
397int msm_gem_queue_inactive_cb(struct drm_gem_object *obj,
398 struct msm_fence_cb *cb)
Rob Clarkc8afe682013-06-26 12:44:06 -0400399{
Rob Clark7198e6b2013-07-19 12:59:32 -0400400 struct msm_gem_object *msm_obj = to_msm_bo(obj);
Rob Clark69193e52014-11-07 18:10:04 -0500401 uint32_t fence = msm_gem_fence(msm_obj,
402 MSM_PREP_READ | MSM_PREP_WRITE);
403 return msm_queue_fence_cb(obj->dev, cb, fence);
Rob Clark7198e6b2013-07-19 12:59:32 -0400404}
405
406void msm_gem_move_to_active(struct drm_gem_object *obj,
Rob Clarkbf6811f2013-09-01 13:25:09 -0400407 struct msm_gpu *gpu, bool write, uint32_t fence)
Rob Clark7198e6b2013-07-19 12:59:32 -0400408{
409 struct msm_gem_object *msm_obj = to_msm_bo(obj);
410 msm_obj->gpu = gpu;
Rob Clarkbf6811f2013-09-01 13:25:09 -0400411 if (write)
412 msm_obj->write_fence = fence;
413 else
414 msm_obj->read_fence = fence;
Rob Clark7198e6b2013-07-19 12:59:32 -0400415 list_del_init(&msm_obj->mm_list);
416 list_add_tail(&msm_obj->mm_list, &gpu->active_list);
417}
418
419void msm_gem_move_to_inactive(struct drm_gem_object *obj)
420{
421 struct drm_device *dev = obj->dev;
422 struct msm_drm_private *priv = dev->dev_private;
423 struct msm_gem_object *msm_obj = to_msm_bo(obj);
424
425 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
426
427 msm_obj->gpu = NULL;
Rob Clarkbf6811f2013-09-01 13:25:09 -0400428 msm_obj->read_fence = 0;
429 msm_obj->write_fence = 0;
Rob Clark7198e6b2013-07-19 12:59:32 -0400430 list_del_init(&msm_obj->mm_list);
431 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
Rob Clark7198e6b2013-07-19 12:59:32 -0400432}
433
434int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
435 struct timespec *timeout)
436{
437 struct drm_device *dev = obj->dev;
438 struct msm_gem_object *msm_obj = to_msm_bo(obj);
439 int ret = 0;
440
Rob Clarkf816f272013-09-11 17:34:07 -0400441 if (is_active(msm_obj)) {
Rob Clark69193e52014-11-07 18:10:04 -0500442 uint32_t fence = msm_gem_fence(msm_obj, op);
Rob Clarkf816f272013-09-11 17:34:07 -0400443
Rob Clarkf816f272013-09-11 17:34:07 -0400444 if (op & MSM_PREP_NOSYNC)
445 timeout = NULL;
446
Rob Clarkbf6811f2013-09-01 13:25:09 -0400447 ret = msm_wait_fence_interruptable(dev, fence, timeout);
448 }
Rob Clark7198e6b2013-07-19 12:59:32 -0400449
450 /* TODO cache maintenance */
451
452 return ret;
453}
454
455int msm_gem_cpu_fini(struct drm_gem_object *obj)
456{
457 /* TODO cache maintenance */
Rob Clarkc8afe682013-06-26 12:44:06 -0400458 return 0;
459}
460
461#ifdef CONFIG_DEBUG_FS
462void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
463{
464 struct drm_device *dev = obj->dev;
465 struct msm_gem_object *msm_obj = to_msm_bo(obj);
466 uint64_t off = drm_vma_node_start(&obj->vma_node);
467
468 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
Rob Clarkbf6811f2013-09-01 13:25:09 -0400469 seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08llx %p %d\n",
Rob Clark7198e6b2013-07-19 12:59:32 -0400470 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
Rob Clarkbf6811f2013-09-01 13:25:09 -0400471 msm_obj->read_fence, msm_obj->write_fence,
472 obj->name, obj->refcount.refcount.counter,
Rob Clarkc8afe682013-06-26 12:44:06 -0400473 off, msm_obj->vaddr, obj->size);
474}
475
476void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
477{
478 struct msm_gem_object *msm_obj;
479 int count = 0;
480 size_t size = 0;
481
482 list_for_each_entry(msm_obj, list, mm_list) {
483 struct drm_gem_object *obj = &msm_obj->base;
484 seq_printf(m, " ");
485 msm_gem_describe(obj, m);
486 count++;
487 size += obj->size;
488 }
489
490 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
491}
492#endif
493
494void msm_gem_free_object(struct drm_gem_object *obj)
495{
496 struct drm_device *dev = obj->dev;
Rob Clark871d8122013-11-16 12:56:06 -0500497 struct msm_drm_private *priv = obj->dev->dev_private;
Rob Clarkc8afe682013-06-26 12:44:06 -0400498 struct msm_gem_object *msm_obj = to_msm_bo(obj);
499 int id;
500
501 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
502
Rob Clark7198e6b2013-07-19 12:59:32 -0400503 /* object should not be on active list: */
504 WARN_ON(is_active(msm_obj));
505
Rob Clarkc8afe682013-06-26 12:44:06 -0400506 list_del(&msm_obj->mm_list);
507
508 for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
Rob Clark871d8122013-11-16 12:56:06 -0500509 struct msm_mmu *mmu = priv->mmus[id];
510 if (mmu && msm_obj->domain[id].iova) {
Rob Clarkc8afe682013-06-26 12:44:06 -0400511 uint32_t offset = (uint32_t)mmap_offset(obj);
Rob Clark871d8122013-11-16 12:56:06 -0500512 mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size);
Rob Clarkc8afe682013-06-26 12:44:06 -0400513 }
514 }
515
516 drm_gem_free_mmap_offset(obj);
517
Rob Clark05b84912013-09-28 11:28:35 -0400518 if (obj->import_attach) {
519 if (msm_obj->vaddr)
520 dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
Rob Clarkc8afe682013-06-26 12:44:06 -0400521
Rob Clark05b84912013-09-28 11:28:35 -0400522 /* Don't drop the pages for imported dmabuf, as they are not
523 * ours, just free the array we allocated:
524 */
525 if (msm_obj->pages)
526 drm_free_large(msm_obj->pages);
527
528 } else {
529 if (msm_obj->vaddr)
530 vunmap(msm_obj->vaddr);
531 put_pages(obj);
532 }
Rob Clarkc8afe682013-06-26 12:44:06 -0400533
Rob Clark7198e6b2013-07-19 12:59:32 -0400534 if (msm_obj->resv == &msm_obj->_resv)
535 reservation_object_fini(msm_obj->resv);
536
Rob Clarkc8afe682013-06-26 12:44:06 -0400537 drm_gem_object_release(obj);
538
539 kfree(msm_obj);
540}
541
542/* convenience method to construct a GEM buffer object, and userspace handle */
543int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
544 uint32_t size, uint32_t flags, uint32_t *handle)
545{
546 struct drm_gem_object *obj;
547 int ret;
548
549 ret = mutex_lock_interruptible(&dev->struct_mutex);
550 if (ret)
551 return ret;
552
553 obj = msm_gem_new(dev, size, flags);
554
555 mutex_unlock(&dev->struct_mutex);
556
557 if (IS_ERR(obj))
558 return PTR_ERR(obj);
559
560 ret = drm_gem_handle_create(file, obj, handle);
561
562 /* drop reference from allocate - handle holds it now */
563 drm_gem_object_unreference_unlocked(obj);
564
565 return ret;
566}
567
Rob Clark05b84912013-09-28 11:28:35 -0400568static int msm_gem_new_impl(struct drm_device *dev,
569 uint32_t size, uint32_t flags,
570 struct drm_gem_object **obj)
Rob Clarkc8afe682013-06-26 12:44:06 -0400571{
572 struct msm_drm_private *priv = dev->dev_private;
573 struct msm_gem_object *msm_obj;
Rob Clark871d8122013-11-16 12:56:06 -0500574 unsigned sz;
Rob Clarkc8afe682013-06-26 12:44:06 -0400575
576 switch (flags & MSM_BO_CACHE_MASK) {
577 case MSM_BO_UNCACHED:
578 case MSM_BO_CACHED:
579 case MSM_BO_WC:
580 break;
581 default:
582 dev_err(dev->dev, "invalid cache flag: %x\n",
583 (flags & MSM_BO_CACHE_MASK));
Rob Clark05b84912013-09-28 11:28:35 -0400584 return -EINVAL;
Rob Clarkc8afe682013-06-26 12:44:06 -0400585 }
586
Rob Clark871d8122013-11-16 12:56:06 -0500587 sz = sizeof(*msm_obj);
588 if (!iommu_present(&platform_bus_type))
589 sz += sizeof(struct drm_mm_node);
590
591 msm_obj = kzalloc(sz, GFP_KERNEL);
Rob Clark05b84912013-09-28 11:28:35 -0400592 if (!msm_obj)
593 return -ENOMEM;
Rob Clarkc8afe682013-06-26 12:44:06 -0400594
Rob Clark871d8122013-11-16 12:56:06 -0500595 if (!iommu_present(&platform_bus_type))
596 msm_obj->vram_node = (void *)&msm_obj[1];
597
Rob Clarkc8afe682013-06-26 12:44:06 -0400598 msm_obj->flags = flags;
599
Rob Clark7198e6b2013-07-19 12:59:32 -0400600 msm_obj->resv = &msm_obj->_resv;
601 reservation_object_init(msm_obj->resv);
Rob Clarkc8afe682013-06-26 12:44:06 -0400602
Rob Clark7198e6b2013-07-19 12:59:32 -0400603 INIT_LIST_HEAD(&msm_obj->submit_entry);
Rob Clarkc8afe682013-06-26 12:44:06 -0400604 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
605
Rob Clark05b84912013-09-28 11:28:35 -0400606 *obj = &msm_obj->base;
607
608 return 0;
609}
610
611struct drm_gem_object *msm_gem_new(struct drm_device *dev,
612 uint32_t size, uint32_t flags)
613{
Rob Clark871d8122013-11-16 12:56:06 -0500614 struct drm_gem_object *obj = NULL;
Rob Clark05b84912013-09-28 11:28:35 -0400615 int ret;
616
617 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
618
619 size = PAGE_ALIGN(size);
620
621 ret = msm_gem_new_impl(dev, size, flags, &obj);
622 if (ret)
623 goto fail;
624
Rob Clark871d8122013-11-16 12:56:06 -0500625 if (iommu_present(&platform_bus_type)) {
626 ret = drm_gem_object_init(dev, obj, size);
627 if (ret)
628 goto fail;
629 } else {
630 drm_gem_private_object_init(dev, obj, size);
631 }
Rob Clark05b84912013-09-28 11:28:35 -0400632
633 return obj;
634
635fail:
636 if (obj)
Rob Clark9999f102014-02-04 14:17:32 -0500637 drm_gem_object_unreference(obj);
Rob Clark05b84912013-09-28 11:28:35 -0400638
639 return ERR_PTR(ret);
640}
641
642struct drm_gem_object *msm_gem_import(struct drm_device *dev,
643 uint32_t size, struct sg_table *sgt)
644{
645 struct msm_gem_object *msm_obj;
646 struct drm_gem_object *obj;
647 int ret, npages;
648
Rob Clark871d8122013-11-16 12:56:06 -0500649 /* if we don't have IOMMU, don't bother pretending we can import: */
650 if (!iommu_present(&platform_bus_type)) {
651 dev_err(dev->dev, "cannot import without IOMMU\n");
652 return ERR_PTR(-EINVAL);
653 }
654
Rob Clark05b84912013-09-28 11:28:35 -0400655 size = PAGE_ALIGN(size);
656
657 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
658 if (ret)
659 goto fail;
660
661 drm_gem_private_object_init(dev, obj, size);
662
663 npages = size / PAGE_SIZE;
664
665 msm_obj = to_msm_bo(obj);
666 msm_obj->sgt = sgt;
667 msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
668 if (!msm_obj->pages) {
669 ret = -ENOMEM;
670 goto fail;
671 }
672
673 ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
674 if (ret)
675 goto fail;
676
Rob Clarkc8afe682013-06-26 12:44:06 -0400677 return obj;
678
679fail:
680 if (obj)
681 drm_gem_object_unreference_unlocked(obj);
682
683 return ERR_PTR(ret);
684}