blob: b4218d719770ac95c55e0bbe8f717ecfd336f587 [file] [log] [blame]
Ben Skeggs6ee73862009-12-11 19:24:15 +10001/*
2 * Copyright (C) 2008 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26#include "drmP.h"
27#include "drm.h"
28
29#include "nouveau_drv.h"
30#include "nouveau_drm.h"
31#include "nouveau_dma.h"
32
33#define nouveau_gem_pushbuf_sync(chan) 0
34
35int
36nouveau_gem_object_new(struct drm_gem_object *gem)
37{
38 return 0;
39}
40
41void
42nouveau_gem_object_del(struct drm_gem_object *gem)
43{
44 struct nouveau_bo *nvbo = gem->driver_private;
45 struct ttm_buffer_object *bo = &nvbo->bo;
46
47 if (!nvbo)
48 return;
49 nvbo->gem = NULL;
50
Ben Skeggs6ee73862009-12-11 19:24:15 +100051 if (unlikely(nvbo->pin_refcnt)) {
52 nvbo->pin_refcnt = 1;
53 nouveau_bo_unpin(nvbo);
54 }
55
56 ttm_bo_unref(&bo);
Daniel Vetterfd632aa2010-04-09 19:05:05 +000057
58 drm_gem_object_release(gem);
59 kfree(gem);
Ben Skeggs6ee73862009-12-11 19:24:15 +100060}
61
62int
Ben Skeggsf6d4e622011-06-07 12:25:36 +100063nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
64 uint32_t tile_mode, uint32_t tile_flags,
65 struct nouveau_bo **pnvbo)
Ben Skeggs6ee73862009-12-11 19:24:15 +100066{
Ben Skeggsdb5c8e22011-02-10 13:41:01 +100067 struct drm_nouveau_private *dev_priv = dev->dev_private;
Ben Skeggs6ee73862009-12-11 19:24:15 +100068 struct nouveau_bo *nvbo;
Ben Skeggs6ba9a682011-02-10 14:42:08 +100069 u32 flags = 0;
Ben Skeggs6ee73862009-12-11 19:24:15 +100070 int ret;
71
Ben Skeggs6ba9a682011-02-10 14:42:08 +100072 if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
73 flags |= TTM_PL_FLAG_VRAM;
74 if (domain & NOUVEAU_GEM_DOMAIN_GART)
75 flags |= TTM_PL_FLAG_TT;
76 if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU)
77 flags |= TTM_PL_FLAG_SYSTEM;
78
Ben Skeggsf6d4e622011-06-07 12:25:36 +100079 ret = nouveau_bo_new(dev, NULL, size, align, flags, tile_mode,
Ben Skeggsd550c412011-02-16 08:41:56 +100080 tile_flags, pnvbo);
Ben Skeggs6ee73862009-12-11 19:24:15 +100081 if (ret)
82 return ret;
83 nvbo = *pnvbo;
84
Ben Skeggsdb5c8e22011-02-10 13:41:01 +100085 /* we restrict allowed domains on nv50+ to only the types
86 * that were requested at creation time. not possibly on
87 * earlier chips without busting the ABI.
88 */
89 nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
90 NOUVEAU_GEM_DOMAIN_GART;
91 if (dev_priv->card_type >= NV_50)
92 nvbo->valid_domains &= domain;
93
Ben Skeggs6ee73862009-12-11 19:24:15 +100094 nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
95 if (!nvbo->gem) {
96 nouveau_bo_ref(NULL, pnvbo);
97 return -ENOMEM;
98 }
99
Jan Engelhardt5df23972011-04-04 01:25:18 +0200100 nvbo->bo.persistent_swap_storage = nvbo->gem->filp;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000101 nvbo->gem->driver_private = nvbo;
102 return 0;
103}
104
105static int
106nouveau_gem_info(struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep)
107{
108 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
109
110 if (nvbo->bo.mem.mem_type == TTM_PL_TT)
111 rep->domain = NOUVEAU_GEM_DOMAIN_GART;
112 else
113 rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
114
115 rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
116 rep->offset = nvbo->bo.offset;
Ben Skeggsd550c412011-02-16 08:41:56 +1000117 rep->map_handle = nvbo->bo.addr_space_offset;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000118 rep->tile_mode = nvbo->tile_mode;
119 rep->tile_flags = nvbo->tile_flags;
120 return 0;
121}
122
Ben Skeggs6ee73862009-12-11 19:24:15 +1000123int
124nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
125 struct drm_file *file_priv)
126{
127 struct drm_nouveau_private *dev_priv = dev->dev_private;
128 struct drm_nouveau_gem_new *req = data;
129 struct nouveau_bo *nvbo = NULL;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000130 int ret = 0;
131
Ben Skeggs6ee73862009-12-11 19:24:15 +1000132 if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL))
133 dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping;
134
Ben Skeggs60d2a882010-12-06 15:28:54 +1000135 if (!dev_priv->engine.vram.flags_valid(dev, req->info.tile_flags)) {
136 NV_ERROR(dev, "bad page flags: 0x%08x\n", req->info.tile_flags);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000137 return -EINVAL;
Ben Skeggs60d2a882010-12-06 15:28:54 +1000138 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000139
Ben Skeggsf6d4e622011-06-07 12:25:36 +1000140 ret = nouveau_gem_new(dev, req->info.size, req->align,
Ben Skeggs6ba9a682011-02-10 14:42:08 +1000141 req->info.domain, req->info.tile_mode,
142 req->info.tile_flags, &nvbo);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000143 if (ret)
144 return ret;
145
146 ret = nouveau_gem_info(nvbo->gem, &req->info);
147 if (ret)
148 goto out;
149
150 ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
Dave Airlie29d08b32010-09-27 16:17:17 +1000151 /* drop reference from allocate - handle holds it now */
152 drm_gem_object_unreference_unlocked(nvbo->gem);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000153out:
Ben Skeggs6ee73862009-12-11 19:24:15 +1000154 return ret;
155}
156
157static int
158nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
159 uint32_t write_domains, uint32_t valid_domains)
160{
161 struct nouveau_bo *nvbo = gem->driver_private;
162 struct ttm_buffer_object *bo = &nvbo->bo;
Ben Skeggsdb5c8e22011-02-10 13:41:01 +1000163 uint32_t domains = valid_domains & nvbo->valid_domains &
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100164 (write_domains ? write_domains : read_domains);
165 uint32_t pref_flags = 0, valid_flags = 0;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000166
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100167 if (!domains)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000168 return -EINVAL;
169
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100170 if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
171 valid_flags |= TTM_PL_FLAG_VRAM;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000172
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100173 if (valid_domains & NOUVEAU_GEM_DOMAIN_GART)
174 valid_flags |= TTM_PL_FLAG_TT;
175
176 if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
177 bo->mem.mem_type == TTM_PL_VRAM)
178 pref_flags |= TTM_PL_FLAG_VRAM;
179
180 else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
181 bo->mem.mem_type == TTM_PL_TT)
182 pref_flags |= TTM_PL_FLAG_TT;
183
184 else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
185 pref_flags |= TTM_PL_FLAG_VRAM;
186
187 else
188 pref_flags |= TTM_PL_FLAG_TT;
189
190 nouveau_bo_placement_set(nvbo, pref_flags, valid_flags);
191
Ben Skeggs6ee73862009-12-11 19:24:15 +1000192 return 0;
193}
194
195struct validate_op {
Ben Skeggs6ee73862009-12-11 19:24:15 +1000196 struct list_head vram_list;
197 struct list_head gart_list;
198 struct list_head both_list;
199};
200
201static void
202validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
203{
204 struct list_head *entry, *tmp;
205 struct nouveau_bo *nvbo;
206
207 list_for_each_safe(entry, tmp, list) {
208 nvbo = list_entry(entry, struct nouveau_bo, entry);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000209
Francisco Jerez332b2422010-10-20 23:35:40 +0200210 nouveau_bo_fence(nvbo, fence);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000211
Ben Skeggsa1606a92010-02-12 10:27:35 +1000212 if (unlikely(nvbo->validate_mapped)) {
213 ttm_bo_kunmap(&nvbo->kmap);
214 nvbo->validate_mapped = false;
215 }
216
Ben Skeggs6ee73862009-12-11 19:24:15 +1000217 list_del(&nvbo->entry);
218 nvbo->reserved_by = NULL;
219 ttm_bo_unreserve(&nvbo->bo);
Francisco Jerez374c3af2010-08-29 12:21:16 +0200220 drm_gem_object_unreference_unlocked(nvbo->gem);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000221 }
222}
223
224static void
Luca Barbieri234896a2010-01-06 04:02:45 +0100225validate_fini(struct validate_op *op, struct nouveau_fence* fence)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000226{
Luca Barbieri234896a2010-01-06 04:02:45 +0100227 validate_fini_list(&op->vram_list, fence);
228 validate_fini_list(&op->gart_list, fence);
229 validate_fini_list(&op->both_list, fence);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000230}
231
232static int
233validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
234 struct drm_nouveau_gem_pushbuf_bo *pbbo,
235 int nr_buffers, struct validate_op *op)
236{
237 struct drm_device *dev = chan->dev;
238 struct drm_nouveau_private *dev_priv = dev->dev_private;
239 uint32_t sequence;
240 int trycnt = 0;
241 int ret, i;
242
243 sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
244retry:
245 if (++trycnt > 100000) {
246 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
247 return -EINVAL;
248 }
249
250 for (i = 0; i < nr_buffers; i++) {
251 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
252 struct drm_gem_object *gem;
253 struct nouveau_bo *nvbo;
254
255 gem = drm_gem_object_lookup(dev, file_priv, b->handle);
256 if (!gem) {
257 NV_ERROR(dev, "Unknown handle 0x%08x\n", b->handle);
258 validate_fini(op, NULL);
Chris Wilsonbf79cb92010-08-04 14:19:46 +0100259 return -ENOENT;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000260 }
261 nvbo = gem->driver_private;
262
263 if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
264 NV_ERROR(dev, "multiple instances of buffer %d on "
265 "validation list\n", b->handle);
266 validate_fini(op, NULL);
267 return -EINVAL;
268 }
269
Ben Skeggs938c40e2010-10-12 09:54:54 +1000270 ret = ttm_bo_reserve(&nvbo->bo, true, false, true, sequence);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000271 if (ret) {
272 validate_fini(op, NULL);
Ben Skeggs938c40e2010-10-12 09:54:54 +1000273 if (unlikely(ret == -EAGAIN))
274 ret = ttm_bo_wait_unreserved(&nvbo->bo, true);
Francisco Jerez374c3af2010-08-29 12:21:16 +0200275 drm_gem_object_unreference_unlocked(gem);
Ben Skeggs938c40e2010-10-12 09:54:54 +1000276 if (unlikely(ret)) {
277 if (ret != -ERESTARTSYS)
278 NV_ERROR(dev, "fail reserve\n");
Ben Skeggs6ee73862009-12-11 19:24:15 +1000279 return ret;
Ben Skeggsa1606a92010-02-12 10:27:35 +1000280 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000281 goto retry;
282 }
283
Ben Skeggsa1606a92010-02-12 10:27:35 +1000284 b->user_priv = (uint64_t)(unsigned long)nvbo;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000285 nvbo->reserved_by = file_priv;
286 nvbo->pbbo_index = i;
287 if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
288 (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
289 list_add_tail(&nvbo->entry, &op->both_list);
290 else
291 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
292 list_add_tail(&nvbo->entry, &op->vram_list);
293 else
294 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
295 list_add_tail(&nvbo->entry, &op->gart_list);
296 else {
297 NV_ERROR(dev, "invalid valid domains: 0x%08x\n",
298 b->valid_domains);
Ben Skeggs02088432010-01-21 15:03:23 +1000299 list_add_tail(&nvbo->entry, &op->both_list);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000300 validate_fini(op, NULL);
301 return -EINVAL;
302 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000303 }
304
305 return 0;
306}
307
308static int
309validate_list(struct nouveau_channel *chan, struct list_head *list,
310 struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr)
311{
312 struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
313 (void __force __user *)(uintptr_t)user_pbbo_ptr;
Ben Skeggsa1606a92010-02-12 10:27:35 +1000314 struct drm_device *dev = chan->dev;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000315 struct nouveau_bo *nvbo;
316 int ret, relocs = 0;
317
318 list_for_each_entry(nvbo, list, entry) {
319 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
Ben Skeggs6ee73862009-12-11 19:24:15 +1000320
Francisco Jerez27307232010-09-21 18:57:11 +0200321 ret = nouveau_fence_sync(nvbo->bo.sync_obj, chan);
Ben Skeggs415e6182010-07-23 09:06:52 +1000322 if (unlikely(ret)) {
323 NV_ERROR(dev, "fail pre-validate sync\n");
324 return ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000325 }
326
327 ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
328 b->write_domains,
329 b->valid_domains);
Ben Skeggsa1606a92010-02-12 10:27:35 +1000330 if (unlikely(ret)) {
331 NV_ERROR(dev, "fail set_domain\n");
Ben Skeggs6ee73862009-12-11 19:24:15 +1000332 return ret;
Ben Skeggsa1606a92010-02-12 10:27:35 +1000333 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000334
Ben Skeggs415e6182010-07-23 09:06:52 +1000335 nvbo->channel = (b->read_domains & (1 << 31)) ? NULL : chan;
Ben Skeggs7a45d762010-11-22 08:50:27 +1000336 ret = nouveau_bo_validate(nvbo, true, false, false);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000337 nvbo->channel = NULL;
Ben Skeggsa1606a92010-02-12 10:27:35 +1000338 if (unlikely(ret)) {
Ben Skeggs938c40e2010-10-12 09:54:54 +1000339 if (ret != -ERESTARTSYS)
340 NV_ERROR(dev, "fail ttm_validate\n");
Ben Skeggs6ee73862009-12-11 19:24:15 +1000341 return ret;
Ben Skeggsa1606a92010-02-12 10:27:35 +1000342 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000343
Francisco Jerez27307232010-09-21 18:57:11 +0200344 ret = nouveau_fence_sync(nvbo->bo.sync_obj, chan);
Ben Skeggs415e6182010-07-23 09:06:52 +1000345 if (unlikely(ret)) {
346 NV_ERROR(dev, "fail post-validate sync\n");
347 return ret;
348 }
349
Ben Skeggsa1606a92010-02-12 10:27:35 +1000350 if (nvbo->bo.offset == b->presumed.offset &&
Ben Skeggs6ee73862009-12-11 19:24:15 +1000351 ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
Ben Skeggsa1606a92010-02-12 10:27:35 +1000352 b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
Ben Skeggs6ee73862009-12-11 19:24:15 +1000353 (nvbo->bo.mem.mem_type == TTM_PL_TT &&
Ben Skeggsa1606a92010-02-12 10:27:35 +1000354 b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
Ben Skeggs6ee73862009-12-11 19:24:15 +1000355 continue;
356
357 if (nvbo->bo.mem.mem_type == TTM_PL_TT)
Ben Skeggsa1606a92010-02-12 10:27:35 +1000358 b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000359 else
Ben Skeggsa1606a92010-02-12 10:27:35 +1000360 b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
361 b->presumed.offset = nvbo->bo.offset;
362 b->presumed.valid = 0;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000363 relocs++;
364
Ben Skeggsa1606a92010-02-12 10:27:35 +1000365 if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed,
366 &b->presumed, sizeof(b->presumed)))
Ben Skeggs6ee73862009-12-11 19:24:15 +1000367 return -EFAULT;
368 }
369
370 return relocs;
371}
372
373static int
374nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
375 struct drm_file *file_priv,
376 struct drm_nouveau_gem_pushbuf_bo *pbbo,
377 uint64_t user_buffers, int nr_buffers,
378 struct validate_op *op, int *apply_relocs)
379{
Ben Skeggsa1606a92010-02-12 10:27:35 +1000380 struct drm_device *dev = chan->dev;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000381 int ret, relocs = 0;
382
383 INIT_LIST_HEAD(&op->vram_list);
384 INIT_LIST_HEAD(&op->gart_list);
385 INIT_LIST_HEAD(&op->both_list);
386
Ben Skeggs6ee73862009-12-11 19:24:15 +1000387 if (nr_buffers == 0)
388 return 0;
389
390 ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
Ben Skeggsa1606a92010-02-12 10:27:35 +1000391 if (unlikely(ret)) {
Ben Skeggs938c40e2010-10-12 09:54:54 +1000392 if (ret != -ERESTARTSYS)
393 NV_ERROR(dev, "validate_init\n");
Ben Skeggs6ee73862009-12-11 19:24:15 +1000394 return ret;
Ben Skeggsa1606a92010-02-12 10:27:35 +1000395 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000396
397 ret = validate_list(chan, &op->vram_list, pbbo, user_buffers);
398 if (unlikely(ret < 0)) {
Ben Skeggs938c40e2010-10-12 09:54:54 +1000399 if (ret != -ERESTARTSYS)
400 NV_ERROR(dev, "validate vram_list\n");
Ben Skeggs6ee73862009-12-11 19:24:15 +1000401 validate_fini(op, NULL);
402 return ret;
403 }
404 relocs += ret;
405
406 ret = validate_list(chan, &op->gart_list, pbbo, user_buffers);
407 if (unlikely(ret < 0)) {
Ben Skeggs938c40e2010-10-12 09:54:54 +1000408 if (ret != -ERESTARTSYS)
409 NV_ERROR(dev, "validate gart_list\n");
Ben Skeggs6ee73862009-12-11 19:24:15 +1000410 validate_fini(op, NULL);
411 return ret;
412 }
413 relocs += ret;
414
415 ret = validate_list(chan, &op->both_list, pbbo, user_buffers);
416 if (unlikely(ret < 0)) {
Ben Skeggs938c40e2010-10-12 09:54:54 +1000417 if (ret != -ERESTARTSYS)
418 NV_ERROR(dev, "validate both_list\n");
Ben Skeggs6ee73862009-12-11 19:24:15 +1000419 validate_fini(op, NULL);
420 return ret;
421 }
422 relocs += ret;
423
424 *apply_relocs = relocs;
425 return 0;
426}
427
428static inline void *
429u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
430{
431 void *mem;
432 void __user *userptr = (void __force __user *)(uintptr_t)user;
433
434 mem = kmalloc(nmemb * size, GFP_KERNEL);
435 if (!mem)
436 return ERR_PTR(-ENOMEM);
437
438 if (DRM_COPY_FROM_USER(mem, userptr, nmemb * size)) {
439 kfree(mem);
440 return ERR_PTR(-EFAULT);
441 }
442
443 return mem;
444}
445
446static int
Ben Skeggsa1606a92010-02-12 10:27:35 +1000447nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
448 struct drm_nouveau_gem_pushbuf *req,
449 struct drm_nouveau_gem_pushbuf_bo *bo)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000450{
451 struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
Luca Barbieri12f735b2010-01-10 20:10:53 +0100452 int ret = 0;
453 unsigned i;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000454
Ben Skeggsa1606a92010-02-12 10:27:35 +1000455 reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
Ben Skeggs6ee73862009-12-11 19:24:15 +1000456 if (IS_ERR(reloc))
457 return PTR_ERR(reloc);
458
Ben Skeggsa1606a92010-02-12 10:27:35 +1000459 for (i = 0; i < req->nr_relocs; i++) {
Ben Skeggs6ee73862009-12-11 19:24:15 +1000460 struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
461 struct drm_nouveau_gem_pushbuf_bo *b;
Ben Skeggsa1606a92010-02-12 10:27:35 +1000462 struct nouveau_bo *nvbo;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000463 uint32_t data;
464
Ben Skeggsa1606a92010-02-12 10:27:35 +1000465 if (unlikely(r->bo_index > req->nr_buffers)) {
466 NV_ERROR(dev, "reloc bo index invalid\n");
Ben Skeggs6ee73862009-12-11 19:24:15 +1000467 ret = -EINVAL;
468 break;
469 }
470
471 b = &bo[r->bo_index];
Ben Skeggsa1606a92010-02-12 10:27:35 +1000472 if (b->presumed.valid)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000473 continue;
474
Ben Skeggsa1606a92010-02-12 10:27:35 +1000475 if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
476 NV_ERROR(dev, "reloc container bo index invalid\n");
477 ret = -EINVAL;
478 break;
479 }
480 nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
481
482 if (unlikely(r->reloc_bo_offset + 4 >
483 nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
484 NV_ERROR(dev, "reloc outside of bo\n");
485 ret = -EINVAL;
486 break;
487 }
488
489 if (!nvbo->kmap.virtual) {
490 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
491 &nvbo->kmap);
492 if (ret) {
493 NV_ERROR(dev, "failed kmap for reloc\n");
494 break;
495 }
496 nvbo->validate_mapped = true;
497 }
498
Ben Skeggs6ee73862009-12-11 19:24:15 +1000499 if (r->flags & NOUVEAU_GEM_RELOC_LOW)
Ben Skeggsa1606a92010-02-12 10:27:35 +1000500 data = b->presumed.offset + r->data;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000501 else
502 if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
Ben Skeggsa1606a92010-02-12 10:27:35 +1000503 data = (b->presumed.offset + r->data) >> 32;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000504 else
505 data = r->data;
506
507 if (r->flags & NOUVEAU_GEM_RELOC_OR) {
Ben Skeggsa1606a92010-02-12 10:27:35 +1000508 if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000509 data |= r->tor;
510 else
511 data |= r->vor;
512 }
513
Thomas Hellstrom702adba2010-11-17 12:28:29 +0000514 spin_lock(&nvbo->bo.bdev->fence_lock);
Ben Skeggsa1606a92010-02-12 10:27:35 +1000515 ret = ttm_bo_wait(&nvbo->bo, false, false, false);
Thomas Hellstrom702adba2010-11-17 12:28:29 +0000516 spin_unlock(&nvbo->bo.bdev->fence_lock);
Ben Skeggsa1606a92010-02-12 10:27:35 +1000517 if (ret) {
518 NV_ERROR(dev, "reloc wait_idle failed: %d\n", ret);
519 break;
520 }
Ben Skeggsa1606a92010-02-12 10:27:35 +1000521
522 nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000523 }
524
525 kfree(reloc);
526 return ret;
527}
528
529int
530nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
531 struct drm_file *file_priv)
532{
Ben Skeggsa1606a92010-02-12 10:27:35 +1000533 struct drm_nouveau_private *dev_priv = dev->dev_private;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000534 struct drm_nouveau_gem_pushbuf *req = data;
Ben Skeggsa1606a92010-02-12 10:27:35 +1000535 struct drm_nouveau_gem_pushbuf_push *push;
536 struct drm_nouveau_gem_pushbuf_bo *bo;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000537 struct nouveau_channel *chan;
538 struct validate_op op;
Francisco Jerez6e86e042010-07-03 18:36:39 +0200539 struct nouveau_fence *fence = NULL;
Ben Skeggsa1606a92010-02-12 10:27:35 +1000540 int i, j, ret = 0, do_reloc = 0;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000541
Ben Skeggse8a863c2011-06-01 19:18:48 +1000542 chan = nouveau_channel_get(file_priv, req->channel);
Ben Skeggscff5c132010-10-06 16:16:59 +1000543 if (IS_ERR(chan))
544 return PTR_ERR(chan);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000545
Ben Skeggsa1606a92010-02-12 10:27:35 +1000546 req->vram_available = dev_priv->fb_aper_free;
547 req->gart_available = dev_priv->gart_info.aper_free;
548 if (unlikely(req->nr_push == 0))
549 goto out_next;
550
551 if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
552 NV_ERROR(dev, "pushbuf push count exceeds limit: %d max %d\n",
553 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
Ben Skeggscff5c132010-10-06 16:16:59 +1000554 nouveau_channel_put(&chan);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000555 return -EINVAL;
556 }
557
Ben Skeggsa1606a92010-02-12 10:27:35 +1000558 if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
559 NV_ERROR(dev, "pushbuf bo count exceeds limit: %d max %d\n",
560 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
Ben Skeggscff5c132010-10-06 16:16:59 +1000561 nouveau_channel_put(&chan);
Ben Skeggsa1606a92010-02-12 10:27:35 +1000562 return -EINVAL;
563 }
564
565 if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
566 NV_ERROR(dev, "pushbuf reloc count exceeds limit: %d max %d\n",
567 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
Ben Skeggscff5c132010-10-06 16:16:59 +1000568 nouveau_channel_put(&chan);
Ben Skeggsa1606a92010-02-12 10:27:35 +1000569 return -EINVAL;
570 }
571
572 push = u_memcpya(req->push, req->nr_push, sizeof(*push));
Ben Skeggscff5c132010-10-06 16:16:59 +1000573 if (IS_ERR(push)) {
574 nouveau_channel_put(&chan);
Ben Skeggsa1606a92010-02-12 10:27:35 +1000575 return PTR_ERR(push);
Ben Skeggscff5c132010-10-06 16:16:59 +1000576 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000577
578 bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
579 if (IS_ERR(bo)) {
Ben Skeggsa1606a92010-02-12 10:27:35 +1000580 kfree(push);
Ben Skeggscff5c132010-10-06 16:16:59 +1000581 nouveau_channel_put(&chan);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000582 return PTR_ERR(bo);
583 }
584
Ben Skeggs415e6182010-07-23 09:06:52 +1000585 /* Mark push buffers as being used on PFIFO, the validation code
586 * will then make sure that if the pushbuf bo moves, that they
587 * happen on the kernel channel, which will in turn cause a sync
588 * to happen before we try and submit the push buffer.
589 */
590 for (i = 0; i < req->nr_push; i++) {
591 if (push[i].bo_index >= req->nr_buffers) {
592 NV_ERROR(dev, "push %d buffer not in list\n", i);
593 ret = -EINVAL;
Marcin Slusarz7fa0cba22011-03-07 12:31:35 +0100594 goto out_prevalid;
Ben Skeggs415e6182010-07-23 09:06:52 +1000595 }
596
597 bo[push[i].bo_index].read_domains |= (1 << 31);
598 }
599
Ben Skeggs6ee73862009-12-11 19:24:15 +1000600 /* Validate buffer list */
601 ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
602 req->nr_buffers, &op, &do_reloc);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000603 if (ret) {
Ben Skeggs938c40e2010-10-12 09:54:54 +1000604 if (ret != -ERESTARTSYS)
605 NV_ERROR(dev, "validate: %d\n", ret);
Marcin Slusarz7fa0cba22011-03-07 12:31:35 +0100606 goto out_prevalid;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000607 }
608
Ben Skeggs6ee73862009-12-11 19:24:15 +1000609 /* Apply any relocations that are required */
610 if (do_reloc) {
Ben Skeggsa1606a92010-02-12 10:27:35 +1000611 ret = nouveau_gem_pushbuf_reloc_apply(dev, req, bo);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000612 if (ret) {
613 NV_ERROR(dev, "reloc apply: %d\n", ret);
614 goto out;
615 }
616 }
617
Ben Skeggs9a391ad2010-02-11 16:37:26 +1000618 if (chan->dma.ib_max) {
Ben Skeggsa1606a92010-02-12 10:27:35 +1000619 ret = nouveau_dma_wait(chan, req->nr_push + 1, 6);
Ben Skeggs9a391ad2010-02-11 16:37:26 +1000620 if (ret) {
621 NV_INFO(dev, "nv50cal_space: %d\n", ret);
622 goto out;
623 }
624
Ben Skeggsa1606a92010-02-12 10:27:35 +1000625 for (i = 0; i < req->nr_push; i++) {
626 struct nouveau_bo *nvbo = (void *)(unsigned long)
627 bo[push[i].bo_index].user_priv;
628
629 nv50_dma_push(chan, nvbo, push[i].offset,
630 push[i].length);
631 }
Ben Skeggs9a391ad2010-02-11 16:37:26 +1000632 } else
Francisco Jerezee508b82010-08-25 12:54:53 +0200633 if (dev_priv->chipset >= 0x25) {
Ben Skeggsa1606a92010-02-12 10:27:35 +1000634 ret = RING_SPACE(chan, req->nr_push * 2);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000635 if (ret) {
636 NV_ERROR(dev, "cal_space: %d\n", ret);
637 goto out;
638 }
Ben Skeggsa1606a92010-02-12 10:27:35 +1000639
640 for (i = 0; i < req->nr_push; i++) {
641 struct nouveau_bo *nvbo = (void *)(unsigned long)
642 bo[push[i].bo_index].user_priv;
643 struct drm_mm_node *mem = nvbo->bo.mem.mm_node;
644
645 OUT_RING(chan, ((mem->start << PAGE_SHIFT) +
646 push[i].offset) | 2);
647 OUT_RING(chan, 0);
648 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000649 } else {
Ben Skeggsa1606a92010-02-12 10:27:35 +1000650 ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
Ben Skeggs6ee73862009-12-11 19:24:15 +1000651 if (ret) {
652 NV_ERROR(dev, "jmp_space: %d\n", ret);
653 goto out;
654 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000655
Ben Skeggsa1606a92010-02-12 10:27:35 +1000656 for (i = 0; i < req->nr_push; i++) {
657 struct nouveau_bo *nvbo = (void *)(unsigned long)
658 bo[push[i].bo_index].user_priv;
659 struct drm_mm_node *mem = nvbo->bo.mem.mm_node;
660 uint32_t cmd;
661
662 cmd = chan->pushbuf_base + ((chan->dma.cur + 2) << 2);
663 cmd |= 0x20000000;
664 if (unlikely(cmd != req->suffix0)) {
665 if (!nvbo->kmap.virtual) {
666 ret = ttm_bo_kmap(&nvbo->bo, 0,
667 nvbo->bo.mem.
668 num_pages,
669 &nvbo->kmap);
670 if (ret) {
671 WIND_RING(chan);
672 goto out;
673 }
674 nvbo->validate_mapped = true;
675 }
676
677 nouveau_bo_wr32(nvbo, (push[i].offset +
678 push[i].length - 8) / 4, cmd);
679 }
680
681 OUT_RING(chan, ((mem->start << PAGE_SHIFT) +
682 push[i].offset) | 0x20000000);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000683 OUT_RING(chan, 0);
Ben Skeggsa1606a92010-02-12 10:27:35 +1000684 for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
685 OUT_RING(chan, 0);
686 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000687 }
688
Luca Barbieri234896a2010-01-06 04:02:45 +0100689 ret = nouveau_fence_new(chan, &fence, true);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000690 if (ret) {
691 NV_ERROR(dev, "error fencing pushbuf: %d\n", ret);
692 WIND_RING(chan);
693 goto out;
694 }
695
696out:
Luca Barbieri234896a2010-01-06 04:02:45 +0100697 validate_fini(&op, fence);
Marcin Slusarz382d62e2010-10-20 21:50:24 +0200698 nouveau_fence_unref(&fence);
Marcin Slusarz7fa0cba22011-03-07 12:31:35 +0100699
700out_prevalid:
Ben Skeggs6ee73862009-12-11 19:24:15 +1000701 kfree(bo);
Ben Skeggsa1606a92010-02-12 10:27:35 +1000702 kfree(push);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000703
704out_next:
Ben Skeggs9a391ad2010-02-11 16:37:26 +1000705 if (chan->dma.ib_max) {
706 req->suffix0 = 0x00000000;
707 req->suffix1 = 0x00000000;
708 } else
Francisco Jerezee508b82010-08-25 12:54:53 +0200709 if (dev_priv->chipset >= 0x25) {
Ben Skeggs6ee73862009-12-11 19:24:15 +1000710 req->suffix0 = 0x00020000;
711 req->suffix1 = 0x00000000;
712 } else {
713 req->suffix0 = 0x20000000 |
714 (chan->pushbuf_base + ((chan->dma.cur + 2) << 2));
715 req->suffix1 = 0x00000000;
716 }
717
Ben Skeggscff5c132010-10-06 16:16:59 +1000718 nouveau_channel_put(&chan);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000719 return ret;
720}
721
Ben Skeggs6ee73862009-12-11 19:24:15 +1000722static inline uint32_t
723domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain)
724{
725 uint32_t flags = 0;
726
727 if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
728 flags |= TTM_PL_FLAG_VRAM;
729 if (domain & NOUVEAU_GEM_DOMAIN_GART)
730 flags |= TTM_PL_FLAG_TT;
731
732 return flags;
733}
734
735int
Ben Skeggs6ee73862009-12-11 19:24:15 +1000736nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
737 struct drm_file *file_priv)
738{
739 struct drm_nouveau_gem_cpu_prep *req = data;
740 struct drm_gem_object *gem;
741 struct nouveau_bo *nvbo;
742 bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
743 int ret = -EINVAL;
744
Ben Skeggs6ee73862009-12-11 19:24:15 +1000745 gem = drm_gem_object_lookup(dev, file_priv, req->handle);
746 if (!gem)
Chris Wilsonbf79cb92010-08-04 14:19:46 +0100747 return -ENOENT;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000748 nvbo = nouveau_gem_object(gem);
749
Ben Skeggs21e86c12010-10-11 11:48:45 +1000750 spin_lock(&nvbo->bo.bdev->fence_lock);
751 ret = ttm_bo_wait(&nvbo->bo, true, true, no_wait);
752 spin_unlock(&nvbo->bo.bdev->fence_lock);
Luca Barbieribc9025b2010-02-09 05:49:12 +0000753 drm_gem_object_unreference_unlocked(gem);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000754 return ret;
755}
756
757int
758nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
759 struct drm_file *file_priv)
760{
Ben Skeggs21e86c12010-10-11 11:48:45 +1000761 return 0;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000762}
763
764int
765nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
766 struct drm_file *file_priv)
767{
768 struct drm_nouveau_gem_info *req = data;
769 struct drm_gem_object *gem;
770 int ret;
771
Ben Skeggs6ee73862009-12-11 19:24:15 +1000772 gem = drm_gem_object_lookup(dev, file_priv, req->handle);
773 if (!gem)
Chris Wilsonbf79cb92010-08-04 14:19:46 +0100774 return -ENOENT;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000775
776 ret = nouveau_gem_info(gem, req);
Luca Barbieribc9025b2010-02-09 05:49:12 +0000777 drm_gem_object_unreference_unlocked(gem);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000778 return ret;
779}
780