blob: 3305ae528de4a2b5faaffe7ab601eac8b6492c17 [file] [log] [blame]
Chris Wilson54cf91d2010-11-25 18:00:26 +00001/*
2 * Copyright © 2008,2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uk>
26 *
27 */
28
29#include "drmP.h"
30#include "drm.h"
31#include "i915_drm.h"
32#include "i915_drv.h"
33#include "i915_trace.h"
34#include "intel_drv.h"
35
36struct change_domains {
37 uint32_t invalidate_domains;
38 uint32_t flush_domains;
39 uint32_t flush_rings;
40};
41
42/*
43 * Set the next domain for the specified object. This
44 * may not actually perform the necessary flushing/invaliding though,
45 * as that may want to be batched with other set_domain operations
46 *
47 * This is (we hope) the only really tricky part of gem. The goal
48 * is fairly simple -- track which caches hold bits of the object
49 * and make sure they remain coherent. A few concrete examples may
50 * help to explain how it works. For shorthand, we use the notation
51 * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
52 * a pair of read and write domain masks.
53 *
54 * Case 1: the batch buffer
55 *
56 * 1. Allocated
57 * 2. Written by CPU
58 * 3. Mapped to GTT
59 * 4. Read by GPU
60 * 5. Unmapped from GTT
61 * 6. Freed
62 *
63 * Let's take these a step at a time
64 *
65 * 1. Allocated
66 * Pages allocated from the kernel may still have
67 * cache contents, so we set them to (CPU, CPU) always.
68 * 2. Written by CPU (using pwrite)
69 * The pwrite function calls set_domain (CPU, CPU) and
70 * this function does nothing (as nothing changes)
71 * 3. Mapped by GTT
72 * This function asserts that the object is not
73 * currently in any GPU-based read or write domains
74 * 4. Read by GPU
75 * i915_gem_execbuffer calls set_domain (COMMAND, 0).
76 * As write_domain is zero, this function adds in the
77 * current read domains (CPU+COMMAND, 0).
78 * flush_domains is set to CPU.
79 * invalidate_domains is set to COMMAND
80 * clflush is run to get data out of the CPU caches
81 * then i915_dev_set_domain calls i915_gem_flush to
82 * emit an MI_FLUSH and drm_agp_chipset_flush
83 * 5. Unmapped from GTT
84 * i915_gem_object_unbind calls set_domain (CPU, CPU)
85 * flush_domains and invalidate_domains end up both zero
86 * so no flushing/invalidating happens
87 * 6. Freed
88 * yay, done
89 *
90 * Case 2: The shared render buffer
91 *
92 * 1. Allocated
93 * 2. Mapped to GTT
94 * 3. Read/written by GPU
95 * 4. set_domain to (CPU,CPU)
96 * 5. Read/written by CPU
97 * 6. Read/written by GPU
98 *
99 * 1. Allocated
100 * Same as last example, (CPU, CPU)
101 * 2. Mapped to GTT
102 * Nothing changes (assertions find that it is not in the GPU)
103 * 3. Read/written by GPU
104 * execbuffer calls set_domain (RENDER, RENDER)
105 * flush_domains gets CPU
106 * invalidate_domains gets GPU
107 * clflush (obj)
108 * MI_FLUSH and drm_agp_chipset_flush
109 * 4. set_domain (CPU, CPU)
110 * flush_domains gets GPU
111 * invalidate_domains gets CPU
112 * wait_rendering (obj) to make sure all drawing is complete.
113 * This will include an MI_FLUSH to get the data from GPU
114 * to memory
115 * clflush (obj) to invalidate the CPU cache
116 * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
117 * 5. Read/written by CPU
118 * cache lines are loaded and dirtied
119 * 6. Read written by GPU
120 * Same as last GPU access
121 *
122 * Case 3: The constant buffer
123 *
124 * 1. Allocated
125 * 2. Written by CPU
126 * 3. Read by GPU
127 * 4. Updated (written) by CPU again
128 * 5. Read by GPU
129 *
130 * 1. Allocated
131 * (CPU, CPU)
132 * 2. Written by CPU
133 * (CPU, CPU)
134 * 3. Read by GPU
135 * (CPU+RENDER, 0)
136 * flush_domains = CPU
137 * invalidate_domains = RENDER
138 * clflush (obj)
139 * MI_FLUSH
140 * drm_agp_chipset_flush
141 * 4. Updated (written) by CPU again
142 * (CPU, CPU)
143 * flush_domains = 0 (no previous write domain)
144 * invalidate_domains = 0 (no new read domains)
145 * 5. Read by GPU
146 * (CPU+RENDER, 0)
147 * flush_domains = CPU
148 * invalidate_domains = RENDER
149 * clflush (obj)
150 * MI_FLUSH
151 * drm_agp_chipset_flush
152 */
153static void
154i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
155 struct intel_ring_buffer *ring,
156 struct change_domains *cd)
157{
158 uint32_t invalidate_domains = 0, flush_domains = 0;
159
160 /*
161 * If the object isn't moving to a new write domain,
162 * let the object stay in multiple read domains
163 */
164 if (obj->base.pending_write_domain == 0)
165 obj->base.pending_read_domains |= obj->base.read_domains;
166
167 /*
168 * Flush the current write domain if
169 * the new read domains don't match. Invalidate
170 * any read domains which differ from the old
171 * write domain
172 */
173 if (obj->base.write_domain &&
174 (((obj->base.write_domain != obj->base.pending_read_domains ||
175 obj->ring != ring)) ||
176 (obj->fenced_gpu_access && !obj->pending_fenced_gpu_access))) {
177 flush_domains |= obj->base.write_domain;
178 invalidate_domains |=
179 obj->base.pending_read_domains & ~obj->base.write_domain;
180 }
181 /*
182 * Invalidate any read caches which may have
183 * stale data. That is, any new read domains.
184 */
185 invalidate_domains |= obj->base.pending_read_domains & ~obj->base.read_domains;
186 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
187 i915_gem_clflush_object(obj);
188
189 /* blow away mappings if mapped through GTT */
190 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_GTT)
191 i915_gem_release_mmap(obj);
192
193 /* The actual obj->write_domain will be updated with
194 * pending_write_domain after we emit the accumulated flush for all
195 * of our domain changes in execbuffers (which clears objects'
196 * write_domains). So if we have a current write domain that we
197 * aren't changing, set pending_write_domain to that.
198 */
199 if (flush_domains == 0 && obj->base.pending_write_domain == 0)
200 obj->base.pending_write_domain = obj->base.write_domain;
201
202 cd->invalidate_domains |= invalidate_domains;
203 cd->flush_domains |= flush_domains;
204 if (flush_domains & I915_GEM_GPU_DOMAINS)
205 cd->flush_rings |= obj->ring->id;
206 if (invalidate_domains & I915_GEM_GPU_DOMAINS)
207 cd->flush_rings |= ring->id;
208}
209
Chris Wilson67731b82010-12-08 10:38:14 +0000210struct eb_objects {
211 int and;
212 struct hlist_head buckets[0];
213};
214
215static struct eb_objects *
216eb_create(int size)
217{
218 struct eb_objects *eb;
219 int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
220 while (count > size)
221 count >>= 1;
222 eb = kzalloc(count*sizeof(struct hlist_head) +
223 sizeof(struct eb_objects),
224 GFP_KERNEL);
225 if (eb == NULL)
226 return eb;
227
228 eb->and = count - 1;
229 return eb;
230}
231
232static void
233eb_reset(struct eb_objects *eb)
234{
235 memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
236}
237
238static void
239eb_add_object(struct eb_objects *eb, struct drm_i915_gem_object *obj)
240{
241 hlist_add_head(&obj->exec_node,
242 &eb->buckets[obj->exec_handle & eb->and]);
243}
244
245static struct drm_i915_gem_object *
246eb_get_object(struct eb_objects *eb, unsigned long handle)
247{
248 struct hlist_head *head;
249 struct hlist_node *node;
250 struct drm_i915_gem_object *obj;
251
252 head = &eb->buckets[handle & eb->and];
253 hlist_for_each(node, head) {
254 obj = hlist_entry(node, struct drm_i915_gem_object, exec_node);
255 if (obj->exec_handle == handle)
256 return obj;
257 }
258
259 return NULL;
260}
261
262static void
263eb_destroy(struct eb_objects *eb)
264{
265 kfree(eb);
266}
267
Chris Wilson54cf91d2010-11-25 18:00:26 +0000268static int
269i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
Chris Wilson67731b82010-12-08 10:38:14 +0000270 struct eb_objects *eb,
Chris Wilson54cf91d2010-11-25 18:00:26 +0000271 struct drm_i915_gem_exec_object2 *entry,
272 struct drm_i915_gem_relocation_entry *reloc)
273{
274 struct drm_device *dev = obj->base.dev;
275 struct drm_gem_object *target_obj;
276 uint32_t target_offset;
277 int ret = -EINVAL;
278
Chris Wilson67731b82010-12-08 10:38:14 +0000279 /* we've already hold a reference to all valid objects */
280 target_obj = &eb_get_object(eb, reloc->target_handle)->base;
281 if (unlikely(target_obj == NULL))
Chris Wilson54cf91d2010-11-25 18:00:26 +0000282 return -ENOENT;
283
284 target_offset = to_intel_bo(target_obj)->gtt_offset;
285
286#if WATCH_RELOC
287 DRM_INFO("%s: obj %p offset %08x target %d "
288 "read %08x write %08x gtt %08x "
289 "presumed %08x delta %08x\n",
290 __func__,
291 obj,
292 (int) reloc->offset,
293 (int) reloc->target_handle,
294 (int) reloc->read_domains,
295 (int) reloc->write_domain,
296 (int) target_offset,
297 (int) reloc->presumed_offset,
298 reloc->delta);
299#endif
300
301 /* The target buffer should have appeared before us in the
302 * exec_object list, so it should have a GTT space bound by now.
303 */
304 if (target_offset == 0) {
305 DRM_ERROR("No GTT space found for object %d\n",
306 reloc->target_handle);
Chris Wilson67731b82010-12-08 10:38:14 +0000307 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000308 }
309
310 /* Validate that the target is in a valid r/w GPU domain */
311 if (reloc->write_domain & (reloc->write_domain - 1)) {
312 DRM_ERROR("reloc with multiple write domains: "
313 "obj %p target %d offset %d "
314 "read %08x write %08x",
315 obj, reloc->target_handle,
316 (int) reloc->offset,
317 reloc->read_domains,
318 reloc->write_domain);
Chris Wilson67731b82010-12-08 10:38:14 +0000319 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000320 }
321 if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
322 reloc->read_domains & I915_GEM_DOMAIN_CPU) {
323 DRM_ERROR("reloc with read/write CPU domains: "
324 "obj %p target %d offset %d "
325 "read %08x write %08x",
326 obj, reloc->target_handle,
327 (int) reloc->offset,
328 reloc->read_domains,
329 reloc->write_domain);
Chris Wilson67731b82010-12-08 10:38:14 +0000330 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000331 }
332 if (reloc->write_domain && target_obj->pending_write_domain &&
333 reloc->write_domain != target_obj->pending_write_domain) {
334 DRM_ERROR("Write domain conflict: "
335 "obj %p target %d offset %d "
336 "new %08x old %08x\n",
337 obj, reloc->target_handle,
338 (int) reloc->offset,
339 reloc->write_domain,
340 target_obj->pending_write_domain);
Chris Wilson67731b82010-12-08 10:38:14 +0000341 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000342 }
343
344 target_obj->pending_read_domains |= reloc->read_domains;
345 target_obj->pending_write_domain |= reloc->write_domain;
346
347 /* If the relocation already has the right value in it, no
348 * more work needs to be done.
349 */
350 if (target_offset == reloc->presumed_offset)
Chris Wilson67731b82010-12-08 10:38:14 +0000351 return 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000352
353 /* Check that the relocation address is valid... */
354 if (reloc->offset > obj->base.size - 4) {
355 DRM_ERROR("Relocation beyond object bounds: "
356 "obj %p target %d offset %d size %d.\n",
357 obj, reloc->target_handle,
358 (int) reloc->offset,
359 (int) obj->base.size);
Chris Wilson67731b82010-12-08 10:38:14 +0000360 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000361 }
362 if (reloc->offset & 3) {
363 DRM_ERROR("Relocation not 4-byte aligned: "
364 "obj %p target %d offset %d.\n",
365 obj, reloc->target_handle,
366 (int) reloc->offset);
Chris Wilson67731b82010-12-08 10:38:14 +0000367 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000368 }
369
370 /* and points to somewhere within the target object. */
371 if (reloc->delta >= target_obj->size) {
372 DRM_ERROR("Relocation beyond target object bounds: "
373 "obj %p target %d delta %d size %d.\n",
374 obj, reloc->target_handle,
375 (int) reloc->delta,
376 (int) target_obj->size);
Chris Wilson67731b82010-12-08 10:38:14 +0000377 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000378 }
379
380 reloc->delta += target_offset;
381 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
382 uint32_t page_offset = reloc->offset & ~PAGE_MASK;
383 char *vaddr;
384
385 vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]);
386 *(uint32_t *)(vaddr + page_offset) = reloc->delta;
387 kunmap_atomic(vaddr);
388 } else {
389 struct drm_i915_private *dev_priv = dev->dev_private;
390 uint32_t __iomem *reloc_entry;
391 void __iomem *reloc_page;
392
393 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
394 if (ret)
Chris Wilson67731b82010-12-08 10:38:14 +0000395 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000396
397 /* Map the page containing the relocation we're going to perform. */
398 reloc->offset += obj->gtt_offset;
399 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
400 reloc->offset & PAGE_MASK);
401 reloc_entry = (uint32_t __iomem *)
402 (reloc_page + (reloc->offset & ~PAGE_MASK));
403 iowrite32(reloc->delta, reloc_entry);
404 io_mapping_unmap_atomic(reloc_page);
405 }
406
407 /* and update the user's relocation entry */
408 reloc->presumed_offset = target_offset;
409
Chris Wilson67731b82010-12-08 10:38:14 +0000410 return 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000411}
412
413static int
414i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
Chris Wilson67731b82010-12-08 10:38:14 +0000415 struct eb_objects *eb,
Chris Wilson54cf91d2010-11-25 18:00:26 +0000416 struct drm_i915_gem_exec_object2 *entry)
417{
418 struct drm_i915_gem_relocation_entry __user *user_relocs;
419 int i, ret;
420
421 user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
422 for (i = 0; i < entry->relocation_count; i++) {
423 struct drm_i915_gem_relocation_entry reloc;
424
425 if (__copy_from_user_inatomic(&reloc,
426 user_relocs+i,
427 sizeof(reloc)))
428 return -EFAULT;
429
Chris Wilson67731b82010-12-08 10:38:14 +0000430 ret = i915_gem_execbuffer_relocate_entry(obj, eb, entry, &reloc);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000431 if (ret)
432 return ret;
433
434 if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset,
435 &reloc.presumed_offset,
436 sizeof(reloc.presumed_offset)))
437 return -EFAULT;
438 }
439
440 return 0;
441}
442
443static int
444i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
Chris Wilson67731b82010-12-08 10:38:14 +0000445 struct eb_objects *eb,
Chris Wilson54cf91d2010-11-25 18:00:26 +0000446 struct drm_i915_gem_exec_object2 *entry,
447 struct drm_i915_gem_relocation_entry *relocs)
448{
449 int i, ret;
450
451 for (i = 0; i < entry->relocation_count; i++) {
Chris Wilson67731b82010-12-08 10:38:14 +0000452 ret = i915_gem_execbuffer_relocate_entry(obj, eb, entry, &relocs[i]);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000453 if (ret)
454 return ret;
455 }
456
457 return 0;
458}
459
460static int
461i915_gem_execbuffer_relocate(struct drm_device *dev,
Chris Wilson67731b82010-12-08 10:38:14 +0000462 struct eb_objects *eb,
Chris Wilson432e58e2010-11-25 19:32:06 +0000463 struct list_head *objects,
464 struct drm_i915_gem_exec_object2 *exec)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000465{
Chris Wilson432e58e2010-11-25 19:32:06 +0000466 struct drm_i915_gem_object *obj;
467 int ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000468
Chris Wilson432e58e2010-11-25 19:32:06 +0000469 list_for_each_entry(obj, objects, exec_list) {
Chris Wilson54cf91d2010-11-25 18:00:26 +0000470 obj->base.pending_read_domains = 0;
471 obj->base.pending_write_domain = 0;
Chris Wilson67731b82010-12-08 10:38:14 +0000472 ret = i915_gem_execbuffer_relocate_object(obj, eb, exec++);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000473 if (ret)
474 return ret;
475 }
476
477 return 0;
478}
479
480static int
Chris Wilsond9e86c02010-11-10 16:40:20 +0000481i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
Chris Wilson54cf91d2010-11-25 18:00:26 +0000482 struct drm_file *file,
Chris Wilson432e58e2010-11-25 19:32:06 +0000483 struct list_head *objects,
484 struct drm_i915_gem_exec_object2 *exec)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000485{
Chris Wilson432e58e2010-11-25 19:32:06 +0000486 struct drm_i915_gem_object *obj;
487 struct drm_i915_gem_exec_object2 *entry;
488 int ret, retry;
Chris Wilson9b3826b2010-12-05 17:11:54 +0000489 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000490
491 /* Attempt to pin all of the buffers into the GTT.
492 * This is done in 3 phases:
493 *
494 * 1a. Unbind all objects that do not match the GTT constraints for
495 * the execbuffer (fenceable, mappable, alignment etc).
496 * 1b. Increment pin count for already bound objects.
497 * 2. Bind new objects.
498 * 3. Decrement pin count.
499 *
500 * This avoid unnecessary unbinding of later objects in order to makr
501 * room for the earlier objects *unless* we need to defragment.
502 */
503 retry = 0;
504 do {
505 ret = 0;
506
507 /* Unbind any ill-fitting objects or pin. */
Chris Wilson432e58e2010-11-25 19:32:06 +0000508 entry = exec;
509 list_for_each_entry(obj, objects, exec_list) {
Chris Wilson54cf91d2010-11-25 18:00:26 +0000510 bool need_fence, need_mappable;
511
Chris Wilson432e58e2010-11-25 19:32:06 +0000512 if (!obj->gtt_space) {
513 entry++;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000514 continue;
Chris Wilson432e58e2010-11-25 19:32:06 +0000515 }
Chris Wilson54cf91d2010-11-25 18:00:26 +0000516
517 need_fence =
Chris Wilson9b3826b2010-12-05 17:11:54 +0000518 has_fenced_gpu_access &&
Chris Wilson54cf91d2010-11-25 18:00:26 +0000519 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
520 obj->tiling_mode != I915_TILING_NONE;
521 need_mappable =
522 entry->relocation_count ? true : need_fence;
523
524 if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) ||
525 (need_mappable && !obj->map_and_fenceable))
526 ret = i915_gem_object_unbind(obj);
527 else
528 ret = i915_gem_object_pin(obj,
529 entry->alignment,
530 need_mappable);
Chris Wilson432e58e2010-11-25 19:32:06 +0000531 if (ret)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000532 goto err;
Chris Wilson432e58e2010-11-25 19:32:06 +0000533
534 entry++;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000535 }
536
537 /* Bind fresh objects */
Chris Wilson432e58e2010-11-25 19:32:06 +0000538 entry = exec;
539 list_for_each_entry(obj, objects, exec_list) {
Chris Wilson54cf91d2010-11-25 18:00:26 +0000540 bool need_fence;
541
542 need_fence =
Chris Wilson9b3826b2010-12-05 17:11:54 +0000543 has_fenced_gpu_access &&
Chris Wilson54cf91d2010-11-25 18:00:26 +0000544 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
545 obj->tiling_mode != I915_TILING_NONE;
546
547 if (!obj->gtt_space) {
548 bool need_mappable =
549 entry->relocation_count ? true : need_fence;
550
551 ret = i915_gem_object_pin(obj,
552 entry->alignment,
553 need_mappable);
554 if (ret)
555 break;
556 }
557
Chris Wilson9b3826b2010-12-05 17:11:54 +0000558 if (has_fenced_gpu_access) {
559 if (need_fence) {
560 ret = i915_gem_object_get_fence(obj, ring, 1);
561 if (ret)
562 break;
563 } else if (entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
564 obj->tiling_mode == I915_TILING_NONE) {
565 /* XXX pipelined! */
566 ret = i915_gem_object_put_fence(obj);
567 if (ret)
568 break;
569 }
570 obj->pending_fenced_gpu_access = need_fence;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000571 }
572
573 entry->offset = obj->gtt_offset;
Chris Wilson432e58e2010-11-25 19:32:06 +0000574 entry++;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000575 }
576
Chris Wilson432e58e2010-11-25 19:32:06 +0000577 /* Decrement pin count for bound objects */
578 list_for_each_entry(obj, objects, exec_list) {
Chris Wilson54cf91d2010-11-25 18:00:26 +0000579 if (obj->gtt_space)
580 i915_gem_object_unpin(obj);
581 }
582
583 if (ret != -ENOSPC || retry > 1)
584 return ret;
585
586 /* First attempt, just clear anything that is purgeable.
587 * Second attempt, clear the entire GTT.
588 */
Chris Wilsond9e86c02010-11-10 16:40:20 +0000589 ret = i915_gem_evict_everything(ring->dev, retry == 0);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000590 if (ret)
591 return ret;
592
593 retry++;
594 } while (1);
Chris Wilson432e58e2010-11-25 19:32:06 +0000595
596err:
Chris Wilson602606a2010-11-28 15:31:02 +0000597 obj = list_entry(obj->exec_list.prev,
598 struct drm_i915_gem_object,
599 exec_list);
Chris Wilson432e58e2010-11-25 19:32:06 +0000600 while (objects != &obj->exec_list) {
601 if (obj->gtt_space)
602 i915_gem_object_unpin(obj);
603
604 obj = list_entry(obj->exec_list.prev,
605 struct drm_i915_gem_object,
606 exec_list);
607 }
608
609 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000610}
611
612static int
613i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
614 struct drm_file *file,
Chris Wilsond9e86c02010-11-10 16:40:20 +0000615 struct intel_ring_buffer *ring,
Chris Wilson432e58e2010-11-25 19:32:06 +0000616 struct list_head *objects,
Chris Wilson67731b82010-12-08 10:38:14 +0000617 struct eb_objects *eb,
Chris Wilson432e58e2010-11-25 19:32:06 +0000618 struct drm_i915_gem_exec_object2 *exec,
Chris Wilson54cf91d2010-11-25 18:00:26 +0000619 int count)
620{
621 struct drm_i915_gem_relocation_entry *reloc;
Chris Wilson432e58e2010-11-25 19:32:06 +0000622 struct drm_i915_gem_object *obj;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000623 int i, total, ret;
624
Chris Wilson67731b82010-12-08 10:38:14 +0000625 /* We may process another execbuffer during the unlock... */
626 while (list_empty(objects)) {
627 obj = list_first_entry(objects,
628 struct drm_i915_gem_object,
629 exec_list);
630 list_del_init(&obj->exec_list);
631 drm_gem_object_unreference(&obj->base);
632 }
633
Chris Wilson54cf91d2010-11-25 18:00:26 +0000634 mutex_unlock(&dev->struct_mutex);
635
636 total = 0;
637 for (i = 0; i < count; i++)
Chris Wilson432e58e2010-11-25 19:32:06 +0000638 total += exec[i].relocation_count;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000639
640 reloc = drm_malloc_ab(total, sizeof(*reloc));
641 if (reloc == NULL) {
642 mutex_lock(&dev->struct_mutex);
643 return -ENOMEM;
644 }
645
646 total = 0;
647 for (i = 0; i < count; i++) {
648 struct drm_i915_gem_relocation_entry __user *user_relocs;
649
Chris Wilson432e58e2010-11-25 19:32:06 +0000650 user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000651
652 if (copy_from_user(reloc+total, user_relocs,
Chris Wilson432e58e2010-11-25 19:32:06 +0000653 exec[i].relocation_count * sizeof(*reloc))) {
Chris Wilson54cf91d2010-11-25 18:00:26 +0000654 ret = -EFAULT;
655 mutex_lock(&dev->struct_mutex);
656 goto err;
657 }
658
Chris Wilson432e58e2010-11-25 19:32:06 +0000659 total += exec[i].relocation_count;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000660 }
661
662 ret = i915_mutex_lock_interruptible(dev);
663 if (ret) {
664 mutex_lock(&dev->struct_mutex);
665 goto err;
666 }
667
Chris Wilson67731b82010-12-08 10:38:14 +0000668 /* reacquire the objects */
669 INIT_LIST_HEAD(objects);
670 eb_reset(eb);
671 for (i = 0; i < count; i++) {
672 struct drm_i915_gem_object *obj;
673
674 obj = to_intel_bo(drm_gem_object_lookup(dev, file,
675 exec[i].handle));
676 if (obj == NULL) {
677 DRM_ERROR("Invalid object handle %d at index %d\n",
678 exec[i].handle, i);
679 ret = -ENOENT;
680 goto err;
681 }
682
683 list_add_tail(&obj->exec_list, objects);
684 obj->exec_handle = exec[i].handle;
685 eb_add_object(eb, obj);
686 }
687
Chris Wilsond9e86c02010-11-10 16:40:20 +0000688 ret = i915_gem_execbuffer_reserve(ring, file, objects, exec);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000689 if (ret)
690 goto err;
691
692 total = 0;
Chris Wilson432e58e2010-11-25 19:32:06 +0000693 list_for_each_entry(obj, objects, exec_list) {
Chris Wilson54cf91d2010-11-25 18:00:26 +0000694 obj->base.pending_read_domains = 0;
695 obj->base.pending_write_domain = 0;
Chris Wilson67731b82010-12-08 10:38:14 +0000696 ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
Chris Wilson432e58e2010-11-25 19:32:06 +0000697 exec,
Chris Wilson54cf91d2010-11-25 18:00:26 +0000698 reloc + total);
699 if (ret)
700 goto err;
701
Chris Wilson432e58e2010-11-25 19:32:06 +0000702 total += exec->relocation_count;
703 exec++;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000704 }
705
706 /* Leave the user relocations as are, this is the painfully slow path,
707 * and we want to avoid the complication of dropping the lock whilst
708 * having buffers reserved in the aperture and so causing spurious
709 * ENOSPC for random operations.
710 */
711
712err:
713 drm_free_large(reloc);
714 return ret;
715}
716
717static void
718i915_gem_execbuffer_flush(struct drm_device *dev,
719 uint32_t invalidate_domains,
720 uint32_t flush_domains,
721 uint32_t flush_rings)
722{
723 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000724 int i;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000725
726 if (flush_domains & I915_GEM_DOMAIN_CPU)
727 intel_gtt_chipset_flush();
728
729 if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000730 for (i = 0; i < I915_NUM_RINGS; i++)
731 if (flush_rings & (1 << i))
732 i915_gem_flush_ring(dev, &dev_priv->ring[i],
733 invalidate_domains,
734 flush_domains);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000735 }
736}
737
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000738static int
739i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
740 struct intel_ring_buffer *to)
741{
742 struct intel_ring_buffer *from = obj->ring;
743 u32 seqno;
744 int ret, idx;
745
746 if (from == NULL || to == from)
747 return 0;
748
749 if (INTEL_INFO(obj->base.dev)->gen < 6)
750 return i915_gem_object_wait_rendering(obj, true);
751
752 idx = intel_ring_sync_index(from, to);
753
754 seqno = obj->last_rendering_seqno;
755 if (seqno <= from->sync_seqno[idx])
756 return 0;
757
758 if (seqno == from->outstanding_lazy_request) {
759 struct drm_i915_gem_request *request;
760
761 request = kzalloc(sizeof(*request), GFP_KERNEL);
762 if (request == NULL)
763 return -ENOMEM;
764
765 ret = i915_add_request(obj->base.dev, NULL, request, from);
766 if (ret) {
767 kfree(request);
768 return ret;
769 }
770
771 seqno = request->seqno;
772 }
773
774 from->sync_seqno[idx] = seqno;
775 return intel_ring_sync(to, from, seqno - 1);
776}
Chris Wilson54cf91d2010-11-25 18:00:26 +0000777
778static int
Chris Wilson432e58e2010-11-25 19:32:06 +0000779i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
780 struct list_head *objects)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000781{
Chris Wilson432e58e2010-11-25 19:32:06 +0000782 struct drm_i915_gem_object *obj;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000783 struct change_domains cd;
Chris Wilson432e58e2010-11-25 19:32:06 +0000784 int ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000785
786 cd.invalidate_domains = 0;
787 cd.flush_domains = 0;
788 cd.flush_rings = 0;
Chris Wilson432e58e2010-11-25 19:32:06 +0000789 list_for_each_entry(obj, objects, exec_list)
790 i915_gem_object_set_to_gpu_domain(obj, ring, &cd);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000791
792 if (cd.invalidate_domains | cd.flush_domains) {
793#if WATCH_EXEC
794 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
795 __func__,
796 cd.invalidate_domains,
797 cd.flush_domains);
798#endif
Chris Wilson432e58e2010-11-25 19:32:06 +0000799 i915_gem_execbuffer_flush(ring->dev,
Chris Wilson54cf91d2010-11-25 18:00:26 +0000800 cd.invalidate_domains,
801 cd.flush_domains,
802 cd.flush_rings);
803 }
804
Chris Wilson432e58e2010-11-25 19:32:06 +0000805 list_for_each_entry(obj, objects, exec_list) {
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000806 ret = i915_gem_execbuffer_sync_rings(obj, ring);
807 if (ret)
808 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000809 }
810
811 return 0;
812}
813
Chris Wilson432e58e2010-11-25 19:32:06 +0000814static bool
815i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000816{
Chris Wilson432e58e2010-11-25 19:32:06 +0000817 return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000818}
819
820static int
821validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
822 int count)
823{
824 int i;
825
826 for (i = 0; i < count; i++) {
827 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
828 int length; /* limited by fault_in_pages_readable() */
829
830 /* First check for malicious input causing overflow */
831 if (exec[i].relocation_count >
832 INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
833 return -EINVAL;
834
835 length = exec[i].relocation_count *
836 sizeof(struct drm_i915_gem_relocation_entry);
837 if (!access_ok(VERIFY_READ, ptr, length))
838 return -EFAULT;
839
840 /* we may also need to update the presumed offsets */
841 if (!access_ok(VERIFY_WRITE, ptr, length))
842 return -EFAULT;
843
844 if (fault_in_pages_readable(ptr, length))
845 return -EFAULT;
846 }
847
848 return 0;
849}
850
Chris Wilson432e58e2010-11-25 19:32:06 +0000851static int
852i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring,
853 struct list_head *objects)
854{
855 struct drm_i915_gem_object *obj;
856 int flips;
857
858 /* Check for any pending flips. As we only maintain a flip queue depth
859 * of 1, we can simply insert a WAIT for the next display flip prior
860 * to executing the batch and avoid stalling the CPU.
861 */
862 flips = 0;
863 list_for_each_entry(obj, objects, exec_list) {
864 if (obj->base.write_domain)
865 flips |= atomic_read(&obj->pending_flip);
866 }
867 if (flips) {
868 int plane, flip_mask, ret;
869
870 for (plane = 0; flips >> plane; plane++) {
871 if (((flips >> plane) & 1) == 0)
872 continue;
873
874 if (plane)
875 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
876 else
877 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
878
879 ret = intel_ring_begin(ring, 2);
880 if (ret)
881 return ret;
882
883 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
884 intel_ring_emit(ring, MI_NOOP);
885 intel_ring_advance(ring);
886 }
887 }
888
889 return 0;
890}
891
892static void
893i915_gem_execbuffer_move_to_active(struct list_head *objects,
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000894 struct intel_ring_buffer *ring,
895 u32 seqno)
Chris Wilson432e58e2010-11-25 19:32:06 +0000896{
897 struct drm_i915_gem_object *obj;
898
899 list_for_each_entry(obj, objects, exec_list) {
900 obj->base.read_domains = obj->base.pending_read_domains;
901 obj->base.write_domain = obj->base.pending_write_domain;
902 obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
903
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000904 i915_gem_object_move_to_active(obj, ring, seqno);
Chris Wilson432e58e2010-11-25 19:32:06 +0000905 if (obj->base.write_domain) {
906 obj->dirty = 1;
Chris Wilson87ca9c82010-12-02 09:42:56 +0000907 obj->pending_gpu_write = true;
Chris Wilson432e58e2010-11-25 19:32:06 +0000908 list_move_tail(&obj->gpu_write_list,
909 &ring->gpu_write_list);
910 intel_mark_busy(ring->dev, obj);
911 }
912
913 trace_i915_gem_object_change_domain(obj,
914 obj->base.read_domains,
915 obj->base.write_domain);
916 }
917}
918
Chris Wilson54cf91d2010-11-25 18:00:26 +0000919static void
920i915_gem_execbuffer_retire_commands(struct drm_device *dev,
Chris Wilson432e58e2010-11-25 19:32:06 +0000921 struct drm_file *file,
Chris Wilson54cf91d2010-11-25 18:00:26 +0000922 struct intel_ring_buffer *ring)
923{
Chris Wilson432e58e2010-11-25 19:32:06 +0000924 struct drm_i915_gem_request *request;
925 u32 flush_domains;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000926
Chris Wilson432e58e2010-11-25 19:32:06 +0000927 /*
928 * Ensure that the commands in the batch buffer are
929 * finished before the interrupt fires.
930 *
931 * The sampler always gets flushed on i965 (sigh).
932 */
933 flush_domains = 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000934 if (INTEL_INFO(dev)->gen >= 4)
935 flush_domains |= I915_GEM_DOMAIN_SAMPLER;
936
937 ring->flush(ring, I915_GEM_DOMAIN_COMMAND, flush_domains);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000938
Chris Wilson432e58e2010-11-25 19:32:06 +0000939 /* Add a breadcrumb for the completion of the batch buffer */
940 request = kzalloc(sizeof(*request), GFP_KERNEL);
941 if (request == NULL || i915_add_request(dev, file, request, ring)) {
942 i915_gem_next_request_seqno(dev, ring);
943 kfree(request);
944 }
945}
Chris Wilson54cf91d2010-11-25 18:00:26 +0000946
947static int
948i915_gem_do_execbuffer(struct drm_device *dev, void *data,
949 struct drm_file *file,
950 struct drm_i915_gem_execbuffer2 *args,
Chris Wilson432e58e2010-11-25 19:32:06 +0000951 struct drm_i915_gem_exec_object2 *exec)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000952{
953 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson432e58e2010-11-25 19:32:06 +0000954 struct list_head objects;
Chris Wilson67731b82010-12-08 10:38:14 +0000955 struct eb_objects *eb;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000956 struct drm_i915_gem_object *batch_obj;
957 struct drm_clip_rect *cliprects = NULL;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000958 struct intel_ring_buffer *ring;
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000959 u32 exec_start, exec_len;
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000960 u32 seqno;
Chris Wilson432e58e2010-11-25 19:32:06 +0000961 int ret, i;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000962
Chris Wilson432e58e2010-11-25 19:32:06 +0000963 if (!i915_gem_check_execbuffer(args)) {
964 DRM_ERROR("execbuf with invalid offset/length\n");
965 return -EINVAL;
966 }
967
968 ret = validate_exec_list(exec, args->buffer_count);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000969 if (ret)
970 return ret;
971
972#if WATCH_EXEC
973 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
974 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
975#endif
976 switch (args->flags & I915_EXEC_RING_MASK) {
977 case I915_EXEC_DEFAULT:
978 case I915_EXEC_RENDER:
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000979 ring = &dev_priv->ring[RCS];
Chris Wilson54cf91d2010-11-25 18:00:26 +0000980 break;
981 case I915_EXEC_BSD:
982 if (!HAS_BSD(dev)) {
983 DRM_ERROR("execbuf with invalid ring (BSD)\n");
984 return -EINVAL;
985 }
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000986 ring = &dev_priv->ring[VCS];
Chris Wilson54cf91d2010-11-25 18:00:26 +0000987 break;
988 case I915_EXEC_BLT:
989 if (!HAS_BLT(dev)) {
990 DRM_ERROR("execbuf with invalid ring (BLT)\n");
991 return -EINVAL;
992 }
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000993 ring = &dev_priv->ring[BCS];
Chris Wilson54cf91d2010-11-25 18:00:26 +0000994 break;
995 default:
996 DRM_ERROR("execbuf with unknown ring: %d\n",
997 (int)(args->flags & I915_EXEC_RING_MASK));
998 return -EINVAL;
999 }
1000
1001 if (args->buffer_count < 1) {
1002 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
1003 return -EINVAL;
1004 }
Chris Wilson54cf91d2010-11-25 18:00:26 +00001005
1006 if (args->num_cliprects != 0) {
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001007 if (ring != &dev_priv->ring[RCS]) {
Chris Wilsonc4e7a412010-11-30 14:10:25 +00001008 DRM_ERROR("clip rectangles are only valid with the render ring\n");
1009 return -EINVAL;
1010 }
1011
Chris Wilson432e58e2010-11-25 19:32:06 +00001012 cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
Chris Wilson54cf91d2010-11-25 18:00:26 +00001013 GFP_KERNEL);
1014 if (cliprects == NULL) {
1015 ret = -ENOMEM;
1016 goto pre_mutex_err;
1017 }
1018
Chris Wilson432e58e2010-11-25 19:32:06 +00001019 if (copy_from_user(cliprects,
1020 (struct drm_clip_rect __user *)(uintptr_t)
1021 args->cliprects_ptr,
1022 sizeof(*cliprects)*args->num_cliprects)) {
Chris Wilson54cf91d2010-11-25 18:00:26 +00001023 ret = -EFAULT;
1024 goto pre_mutex_err;
1025 }
1026 }
1027
Chris Wilson54cf91d2010-11-25 18:00:26 +00001028 ret = i915_mutex_lock_interruptible(dev);
1029 if (ret)
1030 goto pre_mutex_err;
1031
1032 if (dev_priv->mm.suspended) {
1033 mutex_unlock(&dev->struct_mutex);
1034 ret = -EBUSY;
1035 goto pre_mutex_err;
1036 }
1037
Chris Wilson67731b82010-12-08 10:38:14 +00001038 eb = eb_create(args->buffer_count);
1039 if (eb == NULL) {
1040 mutex_unlock(&dev->struct_mutex);
1041 ret = -ENOMEM;
1042 goto pre_mutex_err;
1043 }
1044
Chris Wilson54cf91d2010-11-25 18:00:26 +00001045 /* Look up object handles */
Chris Wilson432e58e2010-11-25 19:32:06 +00001046 INIT_LIST_HEAD(&objects);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001047 for (i = 0; i < args->buffer_count; i++) {
1048 struct drm_i915_gem_object *obj;
1049
Chris Wilson432e58e2010-11-25 19:32:06 +00001050 obj = to_intel_bo(drm_gem_object_lookup(dev, file,
1051 exec[i].handle));
Chris Wilson54cf91d2010-11-25 18:00:26 +00001052 if (obj == NULL) {
1053 DRM_ERROR("Invalid object handle %d at index %d\n",
Chris Wilson432e58e2010-11-25 19:32:06 +00001054 exec[i].handle, i);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001055 /* prevent error path from reading uninitialized data */
Chris Wilson54cf91d2010-11-25 18:00:26 +00001056 ret = -ENOENT;
1057 goto err;
1058 }
Chris Wilson54cf91d2010-11-25 18:00:26 +00001059
Chris Wilson432e58e2010-11-25 19:32:06 +00001060 if (!list_empty(&obj->exec_list)) {
1061 DRM_ERROR("Object %p [handle %d, index %d] appears more than once in object list\n",
1062 obj, exec[i].handle, i);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001063 ret = -EINVAL;
1064 goto err;
1065 }
Chris Wilson432e58e2010-11-25 19:32:06 +00001066
1067 list_add_tail(&obj->exec_list, &objects);
Chris Wilson67731b82010-12-08 10:38:14 +00001068 obj->exec_handle = exec[i].handle;
1069 eb_add_object(eb, obj);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001070 }
1071
1072 /* Move the objects en-masse into the GTT, evicting if necessary. */
Chris Wilsond9e86c02010-11-10 16:40:20 +00001073 ret = i915_gem_execbuffer_reserve(ring, file, &objects, exec);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001074 if (ret)
1075 goto err;
1076
1077 /* The objects are in their final locations, apply the relocations. */
Chris Wilson67731b82010-12-08 10:38:14 +00001078 ret = i915_gem_execbuffer_relocate(dev, eb, &objects, exec);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001079 if (ret) {
1080 if (ret == -EFAULT) {
Chris Wilsond9e86c02010-11-10 16:40:20 +00001081 ret = i915_gem_execbuffer_relocate_slow(dev, file, ring,
Chris Wilson67731b82010-12-08 10:38:14 +00001082 &objects, eb,
1083 exec,
Chris Wilson54cf91d2010-11-25 18:00:26 +00001084 args->buffer_count);
1085 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1086 }
1087 if (ret)
1088 goto err;
1089 }
1090
1091 /* Set the pending read domains for the batch buffer to COMMAND */
Chris Wilson432e58e2010-11-25 19:32:06 +00001092 batch_obj = list_entry(objects.prev,
1093 struct drm_i915_gem_object,
1094 exec_list);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001095 if (batch_obj->base.pending_write_domain) {
1096 DRM_ERROR("Attempting to use self-modifying batch buffer\n");
1097 ret = -EINVAL;
1098 goto err;
1099 }
1100 batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
1101
Chris Wilson432e58e2010-11-25 19:32:06 +00001102 ret = i915_gem_execbuffer_move_to_gpu(ring, &objects);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001103 if (ret)
1104 goto err;
1105
Chris Wilson432e58e2010-11-25 19:32:06 +00001106 ret = i915_gem_execbuffer_wait_for_flips(ring, &objects);
1107 if (ret)
Chris Wilson54cf91d2010-11-25 18:00:26 +00001108 goto err;
Chris Wilson54cf91d2010-11-25 18:00:26 +00001109
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001110 seqno = i915_gem_next_request_seqno(dev, ring);
1111 for (i = 0; i < I915_NUM_RINGS-1; i++) {
1112 if (seqno < ring->sync_seqno[i]) {
1113 /* The GPU can not handle its semaphore value wrapping,
1114 * so every billion or so execbuffers, we need to stall
1115 * the GPU in order to reset the counters.
1116 */
1117 ret = i915_gpu_idle(dev);
1118 if (ret)
1119 goto err;
1120
1121 BUG_ON(ring->sync_seqno[i]);
1122 }
1123 }
1124
Chris Wilsonc4e7a412010-11-30 14:10:25 +00001125 exec_start = batch_obj->gtt_offset + args->batch_start_offset;
1126 exec_len = args->batch_len;
1127 if (cliprects) {
1128 for (i = 0; i < args->num_cliprects; i++) {
1129 ret = i915_emit_box(dev, &cliprects[i],
1130 args->DR1, args->DR4);
1131 if (ret)
1132 goto err;
1133
1134 ret = ring->dispatch_execbuffer(ring,
1135 exec_start, exec_len);
1136 if (ret)
1137 goto err;
1138 }
1139 } else {
1140 ret = ring->dispatch_execbuffer(ring, exec_start, exec_len);
1141 if (ret)
1142 goto err;
1143 }
Chris Wilson54cf91d2010-11-25 18:00:26 +00001144
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001145 i915_gem_execbuffer_move_to_active(&objects, ring, seqno);
Chris Wilson432e58e2010-11-25 19:32:06 +00001146 i915_gem_execbuffer_retire_commands(dev, file, ring);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001147
1148err:
Chris Wilson67731b82010-12-08 10:38:14 +00001149 eb_destroy(eb);
Chris Wilson432e58e2010-11-25 19:32:06 +00001150 while (!list_empty(&objects)) {
1151 struct drm_i915_gem_object *obj;
1152
1153 obj = list_first_entry(&objects,
1154 struct drm_i915_gem_object,
1155 exec_list);
1156 list_del_init(&obj->exec_list);
1157 drm_gem_object_unreference(&obj->base);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001158 }
1159
1160 mutex_unlock(&dev->struct_mutex);
1161
1162pre_mutex_err:
Chris Wilson54cf91d2010-11-25 18:00:26 +00001163 kfree(cliprects);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001164 return ret;
1165}
1166
1167/*
1168 * Legacy execbuffer just creates an exec2 list from the original exec object
1169 * list array and passes it to the real function.
1170 */
1171int
1172i915_gem_execbuffer(struct drm_device *dev, void *data,
1173 struct drm_file *file)
1174{
1175 struct drm_i915_gem_execbuffer *args = data;
1176 struct drm_i915_gem_execbuffer2 exec2;
1177 struct drm_i915_gem_exec_object *exec_list = NULL;
1178 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1179 int ret, i;
1180
1181#if WATCH_EXEC
1182 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
1183 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
1184#endif
1185
1186 if (args->buffer_count < 1) {
1187 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
1188 return -EINVAL;
1189 }
1190
1191 /* Copy in the exec list from userland */
1192 exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
1193 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
1194 if (exec_list == NULL || exec2_list == NULL) {
1195 DRM_ERROR("Failed to allocate exec list for %d buffers\n",
1196 args->buffer_count);
1197 drm_free_large(exec_list);
1198 drm_free_large(exec2_list);
1199 return -ENOMEM;
1200 }
1201 ret = copy_from_user(exec_list,
1202 (struct drm_i915_relocation_entry __user *)
1203 (uintptr_t) args->buffers_ptr,
1204 sizeof(*exec_list) * args->buffer_count);
1205 if (ret != 0) {
1206 DRM_ERROR("copy %d exec entries failed %d\n",
1207 args->buffer_count, ret);
1208 drm_free_large(exec_list);
1209 drm_free_large(exec2_list);
1210 return -EFAULT;
1211 }
1212
1213 for (i = 0; i < args->buffer_count; i++) {
1214 exec2_list[i].handle = exec_list[i].handle;
1215 exec2_list[i].relocation_count = exec_list[i].relocation_count;
1216 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
1217 exec2_list[i].alignment = exec_list[i].alignment;
1218 exec2_list[i].offset = exec_list[i].offset;
1219 if (INTEL_INFO(dev)->gen < 4)
1220 exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
1221 else
1222 exec2_list[i].flags = 0;
1223 }
1224
1225 exec2.buffers_ptr = args->buffers_ptr;
1226 exec2.buffer_count = args->buffer_count;
1227 exec2.batch_start_offset = args->batch_start_offset;
1228 exec2.batch_len = args->batch_len;
1229 exec2.DR1 = args->DR1;
1230 exec2.DR4 = args->DR4;
1231 exec2.num_cliprects = args->num_cliprects;
1232 exec2.cliprects_ptr = args->cliprects_ptr;
1233 exec2.flags = I915_EXEC_RENDER;
1234
1235 ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
1236 if (!ret) {
1237 /* Copy the new buffer offsets back to the user's exec list. */
1238 for (i = 0; i < args->buffer_count; i++)
1239 exec_list[i].offset = exec2_list[i].offset;
1240 /* ... and back out to userspace */
1241 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
1242 (uintptr_t) args->buffers_ptr,
1243 exec_list,
1244 sizeof(*exec_list) * args->buffer_count);
1245 if (ret) {
1246 ret = -EFAULT;
1247 DRM_ERROR("failed to copy %d exec entries "
1248 "back to user (%d)\n",
1249 args->buffer_count, ret);
1250 }
1251 }
1252
1253 drm_free_large(exec_list);
1254 drm_free_large(exec2_list);
1255 return ret;
1256}
1257
1258int
1259i915_gem_execbuffer2(struct drm_device *dev, void *data,
1260 struct drm_file *file)
1261{
1262 struct drm_i915_gem_execbuffer2 *args = data;
1263 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1264 int ret;
1265
1266#if WATCH_EXEC
1267 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
1268 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
1269#endif
1270
1271 if (args->buffer_count < 1) {
1272 DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
1273 return -EINVAL;
1274 }
1275
1276 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
1277 if (exec2_list == NULL) {
1278 DRM_ERROR("Failed to allocate exec list for %d buffers\n",
1279 args->buffer_count);
1280 return -ENOMEM;
1281 }
1282 ret = copy_from_user(exec2_list,
1283 (struct drm_i915_relocation_entry __user *)
1284 (uintptr_t) args->buffers_ptr,
1285 sizeof(*exec2_list) * args->buffer_count);
1286 if (ret != 0) {
1287 DRM_ERROR("copy %d exec entries failed %d\n",
1288 args->buffer_count, ret);
1289 drm_free_large(exec2_list);
1290 return -EFAULT;
1291 }
1292
1293 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
1294 if (!ret) {
1295 /* Copy the new buffer offsets back to the user's exec list. */
1296 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
1297 (uintptr_t) args->buffers_ptr,
1298 exec2_list,
1299 sizeof(*exec2_list) * args->buffer_count);
1300 if (ret) {
1301 ret = -EFAULT;
1302 DRM_ERROR("failed to copy %d exec entries "
1303 "back to user (%d)\n",
1304 args->buffer_count, ret);
1305 }
1306 }
1307
1308 drm_free_large(exec2_list);
1309 return ret;
1310}
1311