blob: 4f2f5f8cdca2e096112c522d2342380fa01dca33 [file] [log] [blame]
Eric Anholt673a3942008-07-30 12:06:12 -07001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include "drmP.h"
29#include "drm.h"
30#include "i915_drm.h"
31#include "i915_drv.h"
Chris Wilson1c5d22f2009-08-25 11:15:50 +010032#include "i915_trace.h"
Jesse Barnes652c3932009-08-17 13:31:43 -070033#include "intel_drv.h"
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Eric Anholt673a3942008-07-30 12:06:12 -070035#include <linux/swap.h>
Jesse Barnes79e53942008-11-07 14:24:08 -080036#include <linux/pci.h>
Eric Anholt673a3942008-07-30 12:06:12 -070037
Eric Anholte47c68e2008-11-14 13:35:19 -080038static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
39static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
40static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
Eric Anholte47c68e2008-11-14 13:35:19 -080041static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
42 int write);
43static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
44 uint64_t offset,
45 uint64_t size);
46static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
Eric Anholt673a3942008-07-30 12:06:12 -070047static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -080048static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
49 unsigned alignment);
Jesse Barnesde151cf2008-11-12 10:03:55 -080050static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
Chris Wilson07f73f62009-09-14 16:50:30 +010051static int i915_gem_evict_something(struct drm_device *dev, int min_size);
Chris Wilsonab5ee572009-09-20 19:25:47 +010052static int i915_gem_evict_from_inactive_list(struct drm_device *dev);
Dave Airlie71acb5e2008-12-30 20:31:46 +100053static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
54 struct drm_i915_gem_pwrite *args,
55 struct drm_file *file_priv);
Eric Anholt673a3942008-07-30 12:06:12 -070056
Chris Wilson31169712009-09-14 16:50:28 +010057static LIST_HEAD(shrink_list);
58static DEFINE_SPINLOCK(shrink_list_lock);
59
Jesse Barnes79e53942008-11-07 14:24:08 -080060int i915_gem_do_init(struct drm_device *dev, unsigned long start,
61 unsigned long end)
62{
63 drm_i915_private_t *dev_priv = dev->dev_private;
64
65 if (start >= end ||
66 (start & (PAGE_SIZE - 1)) != 0 ||
67 (end & (PAGE_SIZE - 1)) != 0) {
68 return -EINVAL;
69 }
70
71 drm_mm_init(&dev_priv->mm.gtt_space, start,
72 end - start);
73
74 dev->gtt_total = (uint32_t) (end - start);
75
76 return 0;
77}
Keith Packard6dbe2772008-10-14 21:41:13 -070078
Eric Anholt673a3942008-07-30 12:06:12 -070079int
80i915_gem_init_ioctl(struct drm_device *dev, void *data,
81 struct drm_file *file_priv)
82{
Eric Anholt673a3942008-07-30 12:06:12 -070083 struct drm_i915_gem_init *args = data;
Jesse Barnes79e53942008-11-07 14:24:08 -080084 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -070085
86 mutex_lock(&dev->struct_mutex);
Jesse Barnes79e53942008-11-07 14:24:08 -080087 ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end);
Eric Anholt673a3942008-07-30 12:06:12 -070088 mutex_unlock(&dev->struct_mutex);
89
Jesse Barnes79e53942008-11-07 14:24:08 -080090 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -070091}
92
Eric Anholt5a125c32008-10-22 21:40:13 -070093int
94i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
95 struct drm_file *file_priv)
96{
Eric Anholt5a125c32008-10-22 21:40:13 -070097 struct drm_i915_gem_get_aperture *args = data;
Eric Anholt5a125c32008-10-22 21:40:13 -070098
99 if (!(dev->driver->driver_features & DRIVER_GEM))
100 return -ENODEV;
101
102 args->aper_size = dev->gtt_total;
Keith Packard2678d9d2008-11-20 22:54:54 -0800103 args->aper_available_size = (args->aper_size -
104 atomic_read(&dev->pin_memory));
Eric Anholt5a125c32008-10-22 21:40:13 -0700105
106 return 0;
107}
108
Eric Anholt673a3942008-07-30 12:06:12 -0700109
110/**
111 * Creates a new mm object and returns a handle to it.
112 */
113int
114i915_gem_create_ioctl(struct drm_device *dev, void *data,
115 struct drm_file *file_priv)
116{
117 struct drm_i915_gem_create *args = data;
118 struct drm_gem_object *obj;
Pekka Paalanena1a2d1d2009-08-23 12:40:55 +0300119 int ret;
120 u32 handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700121
122 args->size = roundup(args->size, PAGE_SIZE);
123
124 /* Allocate the new object */
Daniel Vetterac52bc52010-04-09 19:05:06 +0000125 obj = i915_gem_alloc_object(dev, args->size);
Eric Anholt673a3942008-07-30 12:06:12 -0700126 if (obj == NULL)
127 return -ENOMEM;
128
129 ret = drm_gem_handle_create(file_priv, obj, &handle);
Luca Barbieribc9025b2010-02-09 05:49:12 +0000130 drm_gem_object_handle_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700131
132 if (ret)
133 return ret;
134
135 args->handle = handle;
136
137 return 0;
138}
139
Eric Anholt40123c12009-03-09 13:42:30 -0700140static inline int
Eric Anholteb014592009-03-10 11:44:52 -0700141fast_shmem_read(struct page **pages,
142 loff_t page_base, int page_offset,
143 char __user *data,
144 int length)
145{
146 char __iomem *vaddr;
Florian Mickler2bc43b52009-04-06 22:55:41 +0200147 int unwritten;
Eric Anholteb014592009-03-10 11:44:52 -0700148
149 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
150 if (vaddr == NULL)
151 return -ENOMEM;
Florian Mickler2bc43b52009-04-06 22:55:41 +0200152 unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length);
Eric Anholteb014592009-03-10 11:44:52 -0700153 kunmap_atomic(vaddr, KM_USER0);
154
Florian Mickler2bc43b52009-04-06 22:55:41 +0200155 if (unwritten)
156 return -EFAULT;
157
158 return 0;
Eric Anholteb014592009-03-10 11:44:52 -0700159}
160
Eric Anholt280b7132009-03-12 16:56:27 -0700161static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
162{
163 drm_i915_private_t *dev_priv = obj->dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +0100164 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt280b7132009-03-12 16:56:27 -0700165
166 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
167 obj_priv->tiling_mode != I915_TILING_NONE;
168}
169
Eric Anholteb014592009-03-10 11:44:52 -0700170static inline int
Eric Anholt40123c12009-03-09 13:42:30 -0700171slow_shmem_copy(struct page *dst_page,
172 int dst_offset,
173 struct page *src_page,
174 int src_offset,
175 int length)
176{
177 char *dst_vaddr, *src_vaddr;
178
179 dst_vaddr = kmap_atomic(dst_page, KM_USER0);
180 if (dst_vaddr == NULL)
181 return -ENOMEM;
182
183 src_vaddr = kmap_atomic(src_page, KM_USER1);
184 if (src_vaddr == NULL) {
185 kunmap_atomic(dst_vaddr, KM_USER0);
186 return -ENOMEM;
187 }
188
189 memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
190
191 kunmap_atomic(src_vaddr, KM_USER1);
192 kunmap_atomic(dst_vaddr, KM_USER0);
193
194 return 0;
195}
196
Eric Anholt280b7132009-03-12 16:56:27 -0700197static inline int
198slow_shmem_bit17_copy(struct page *gpu_page,
199 int gpu_offset,
200 struct page *cpu_page,
201 int cpu_offset,
202 int length,
203 int is_read)
204{
205 char *gpu_vaddr, *cpu_vaddr;
206
207 /* Use the unswizzled path if this page isn't affected. */
208 if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
209 if (is_read)
210 return slow_shmem_copy(cpu_page, cpu_offset,
211 gpu_page, gpu_offset, length);
212 else
213 return slow_shmem_copy(gpu_page, gpu_offset,
214 cpu_page, cpu_offset, length);
215 }
216
217 gpu_vaddr = kmap_atomic(gpu_page, KM_USER0);
218 if (gpu_vaddr == NULL)
219 return -ENOMEM;
220
221 cpu_vaddr = kmap_atomic(cpu_page, KM_USER1);
222 if (cpu_vaddr == NULL) {
223 kunmap_atomic(gpu_vaddr, KM_USER0);
224 return -ENOMEM;
225 }
226
227 /* Copy the data, XORing A6 with A17 (1). The user already knows he's
228 * XORing with the other bits (A9 for Y, A9 and A10 for X)
229 */
230 while (length > 0) {
231 int cacheline_end = ALIGN(gpu_offset + 1, 64);
232 int this_length = min(cacheline_end - gpu_offset, length);
233 int swizzled_gpu_offset = gpu_offset ^ 64;
234
235 if (is_read) {
236 memcpy(cpu_vaddr + cpu_offset,
237 gpu_vaddr + swizzled_gpu_offset,
238 this_length);
239 } else {
240 memcpy(gpu_vaddr + swizzled_gpu_offset,
241 cpu_vaddr + cpu_offset,
242 this_length);
243 }
244 cpu_offset += this_length;
245 gpu_offset += this_length;
246 length -= this_length;
247 }
248
249 kunmap_atomic(cpu_vaddr, KM_USER1);
250 kunmap_atomic(gpu_vaddr, KM_USER0);
251
252 return 0;
253}
254
Eric Anholt673a3942008-07-30 12:06:12 -0700255/**
Eric Anholteb014592009-03-10 11:44:52 -0700256 * This is the fast shmem pread path, which attempts to copy_from_user directly
257 * from the backing pages of the object to the user's address space. On a
258 * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
259 */
260static int
261i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
262 struct drm_i915_gem_pread *args,
263 struct drm_file *file_priv)
264{
Daniel Vetter23010e42010-03-08 13:35:02 +0100265 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholteb014592009-03-10 11:44:52 -0700266 ssize_t remain;
267 loff_t offset, page_base;
268 char __user *user_data;
269 int page_offset, page_length;
270 int ret;
271
272 user_data = (char __user *) (uintptr_t) args->data_ptr;
273 remain = args->size;
274
275 mutex_lock(&dev->struct_mutex);
276
Chris Wilson4bdadb92010-01-27 13:36:32 +0000277 ret = i915_gem_object_get_pages(obj, 0);
Eric Anholteb014592009-03-10 11:44:52 -0700278 if (ret != 0)
279 goto fail_unlock;
280
281 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
282 args->size);
283 if (ret != 0)
284 goto fail_put_pages;
285
Daniel Vetter23010e42010-03-08 13:35:02 +0100286 obj_priv = to_intel_bo(obj);
Eric Anholteb014592009-03-10 11:44:52 -0700287 offset = args->offset;
288
289 while (remain > 0) {
290 /* Operation in this page
291 *
292 * page_base = page offset within aperture
293 * page_offset = offset within page
294 * page_length = bytes to copy for this page
295 */
296 page_base = (offset & ~(PAGE_SIZE-1));
297 page_offset = offset & (PAGE_SIZE-1);
298 page_length = remain;
299 if ((page_offset + remain) > PAGE_SIZE)
300 page_length = PAGE_SIZE - page_offset;
301
302 ret = fast_shmem_read(obj_priv->pages,
303 page_base, page_offset,
304 user_data, page_length);
305 if (ret)
306 goto fail_put_pages;
307
308 remain -= page_length;
309 user_data += page_length;
310 offset += page_length;
311 }
312
313fail_put_pages:
314 i915_gem_object_put_pages(obj);
315fail_unlock:
316 mutex_unlock(&dev->struct_mutex);
317
318 return ret;
319}
320
Chris Wilson07f73f62009-09-14 16:50:30 +0100321static int
322i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
323{
324 int ret;
325
Chris Wilson4bdadb92010-01-27 13:36:32 +0000326 ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN);
Chris Wilson07f73f62009-09-14 16:50:30 +0100327
328 /* If we've insufficient memory to map in the pages, attempt
329 * to make some space by throwing out some old buffers.
330 */
331 if (ret == -ENOMEM) {
332 struct drm_device *dev = obj->dev;
Chris Wilson07f73f62009-09-14 16:50:30 +0100333
334 ret = i915_gem_evict_something(dev, obj->size);
335 if (ret)
336 return ret;
337
Chris Wilson4bdadb92010-01-27 13:36:32 +0000338 ret = i915_gem_object_get_pages(obj, 0);
Chris Wilson07f73f62009-09-14 16:50:30 +0100339 }
340
341 return ret;
342}
343
Eric Anholteb014592009-03-10 11:44:52 -0700344/**
345 * This is the fallback shmem pread path, which allocates temporary storage
346 * in kernel space to copy_to_user into outside of the struct_mutex, so we
347 * can copy out of the object's backing pages while holding the struct mutex
348 * and not take page faults.
349 */
350static int
351i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
352 struct drm_i915_gem_pread *args,
353 struct drm_file *file_priv)
354{
Daniel Vetter23010e42010-03-08 13:35:02 +0100355 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholteb014592009-03-10 11:44:52 -0700356 struct mm_struct *mm = current->mm;
357 struct page **user_pages;
358 ssize_t remain;
359 loff_t offset, pinned_pages, i;
360 loff_t first_data_page, last_data_page, num_pages;
361 int shmem_page_index, shmem_page_offset;
362 int data_page_index, data_page_offset;
363 int page_length;
364 int ret;
365 uint64_t data_ptr = args->data_ptr;
Eric Anholt280b7132009-03-12 16:56:27 -0700366 int do_bit17_swizzling;
Eric Anholteb014592009-03-10 11:44:52 -0700367
368 remain = args->size;
369
370 /* Pin the user pages containing the data. We can't fault while
371 * holding the struct mutex, yet we want to hold it while
372 * dereferencing the user data.
373 */
374 first_data_page = data_ptr / PAGE_SIZE;
375 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
376 num_pages = last_data_page - first_data_page + 1;
377
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700378 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
Eric Anholteb014592009-03-10 11:44:52 -0700379 if (user_pages == NULL)
380 return -ENOMEM;
381
382 down_read(&mm->mmap_sem);
383 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
Eric Anholte5e9ecd2009-04-07 16:01:22 -0700384 num_pages, 1, 0, user_pages, NULL);
Eric Anholteb014592009-03-10 11:44:52 -0700385 up_read(&mm->mmap_sem);
386 if (pinned_pages < num_pages) {
387 ret = -EFAULT;
388 goto fail_put_user_pages;
389 }
390
Eric Anholt280b7132009-03-12 16:56:27 -0700391 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
392
Eric Anholteb014592009-03-10 11:44:52 -0700393 mutex_lock(&dev->struct_mutex);
394
Chris Wilson07f73f62009-09-14 16:50:30 +0100395 ret = i915_gem_object_get_pages_or_evict(obj);
396 if (ret)
Eric Anholteb014592009-03-10 11:44:52 -0700397 goto fail_unlock;
398
399 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
400 args->size);
401 if (ret != 0)
402 goto fail_put_pages;
403
Daniel Vetter23010e42010-03-08 13:35:02 +0100404 obj_priv = to_intel_bo(obj);
Eric Anholteb014592009-03-10 11:44:52 -0700405 offset = args->offset;
406
407 while (remain > 0) {
408 /* Operation in this page
409 *
410 * shmem_page_index = page number within shmem file
411 * shmem_page_offset = offset within page in shmem file
412 * data_page_index = page number in get_user_pages return
413 * data_page_offset = offset with data_page_index page.
414 * page_length = bytes to copy for this page
415 */
416 shmem_page_index = offset / PAGE_SIZE;
417 shmem_page_offset = offset & ~PAGE_MASK;
418 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
419 data_page_offset = data_ptr & ~PAGE_MASK;
420
421 page_length = remain;
422 if ((shmem_page_offset + page_length) > PAGE_SIZE)
423 page_length = PAGE_SIZE - shmem_page_offset;
424 if ((data_page_offset + page_length) > PAGE_SIZE)
425 page_length = PAGE_SIZE - data_page_offset;
426
Eric Anholt280b7132009-03-12 16:56:27 -0700427 if (do_bit17_swizzling) {
428 ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
429 shmem_page_offset,
430 user_pages[data_page_index],
431 data_page_offset,
432 page_length,
433 1);
434 } else {
435 ret = slow_shmem_copy(user_pages[data_page_index],
436 data_page_offset,
437 obj_priv->pages[shmem_page_index],
438 shmem_page_offset,
439 page_length);
440 }
Eric Anholteb014592009-03-10 11:44:52 -0700441 if (ret)
442 goto fail_put_pages;
443
444 remain -= page_length;
445 data_ptr += page_length;
446 offset += page_length;
447 }
448
449fail_put_pages:
450 i915_gem_object_put_pages(obj);
451fail_unlock:
452 mutex_unlock(&dev->struct_mutex);
453fail_put_user_pages:
454 for (i = 0; i < pinned_pages; i++) {
455 SetPageDirty(user_pages[i]);
456 page_cache_release(user_pages[i]);
457 }
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700458 drm_free_large(user_pages);
Eric Anholteb014592009-03-10 11:44:52 -0700459
460 return ret;
461}
462
Eric Anholt673a3942008-07-30 12:06:12 -0700463/**
464 * Reads data from the object referenced by handle.
465 *
466 * On error, the contents of *data are undefined.
467 */
468int
469i915_gem_pread_ioctl(struct drm_device *dev, void *data,
470 struct drm_file *file_priv)
471{
472 struct drm_i915_gem_pread *args = data;
473 struct drm_gem_object *obj;
474 struct drm_i915_gem_object *obj_priv;
Eric Anholt673a3942008-07-30 12:06:12 -0700475 int ret;
476
477 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
478 if (obj == NULL)
479 return -EBADF;
Daniel Vetter23010e42010-03-08 13:35:02 +0100480 obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700481
482 /* Bounds check source.
483 *
484 * XXX: This could use review for overflow issues...
485 */
486 if (args->offset > obj->size || args->size > obj->size ||
487 args->offset + args->size > obj->size) {
Luca Barbieribc9025b2010-02-09 05:49:12 +0000488 drm_gem_object_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700489 return -EINVAL;
490 }
491
Eric Anholt280b7132009-03-12 16:56:27 -0700492 if (i915_gem_object_needs_bit17_swizzle(obj)) {
Eric Anholteb014592009-03-10 11:44:52 -0700493 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
Eric Anholt280b7132009-03-12 16:56:27 -0700494 } else {
495 ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
496 if (ret != 0)
497 ret = i915_gem_shmem_pread_slow(dev, obj, args,
498 file_priv);
499 }
Eric Anholt673a3942008-07-30 12:06:12 -0700500
Luca Barbieribc9025b2010-02-09 05:49:12 +0000501 drm_gem_object_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700502
Eric Anholteb014592009-03-10 11:44:52 -0700503 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700504}
505
Keith Packard0839ccb2008-10-30 19:38:48 -0700506/* This is the fast write path which cannot handle
507 * page faults in the source data
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700508 */
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700509
Keith Packard0839ccb2008-10-30 19:38:48 -0700510static inline int
511fast_user_write(struct io_mapping *mapping,
512 loff_t page_base, int page_offset,
513 char __user *user_data,
514 int length)
515{
516 char *vaddr_atomic;
517 unsigned long unwritten;
518
519 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
520 unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
521 user_data, length);
522 io_mapping_unmap_atomic(vaddr_atomic);
523 if (unwritten)
524 return -EFAULT;
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700525 return 0;
Keith Packard0839ccb2008-10-30 19:38:48 -0700526}
527
528/* Here's the write path which can sleep for
529 * page faults
530 */
531
532static inline int
Eric Anholt3de09aa2009-03-09 09:42:23 -0700533slow_kernel_write(struct io_mapping *mapping,
534 loff_t gtt_base, int gtt_offset,
535 struct page *user_page, int user_offset,
536 int length)
Keith Packard0839ccb2008-10-30 19:38:48 -0700537{
Eric Anholt3de09aa2009-03-09 09:42:23 -0700538 char *src_vaddr, *dst_vaddr;
Keith Packard0839ccb2008-10-30 19:38:48 -0700539 unsigned long unwritten;
540
Eric Anholt3de09aa2009-03-09 09:42:23 -0700541 dst_vaddr = io_mapping_map_atomic_wc(mapping, gtt_base);
542 src_vaddr = kmap_atomic(user_page, KM_USER1);
543 unwritten = __copy_from_user_inatomic_nocache(dst_vaddr + gtt_offset,
544 src_vaddr + user_offset,
545 length);
546 kunmap_atomic(src_vaddr, KM_USER1);
547 io_mapping_unmap_atomic(dst_vaddr);
Keith Packard0839ccb2008-10-30 19:38:48 -0700548 if (unwritten)
549 return -EFAULT;
550 return 0;
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700551}
552
Eric Anholt40123c12009-03-09 13:42:30 -0700553static inline int
554fast_shmem_write(struct page **pages,
555 loff_t page_base, int page_offset,
556 char __user *data,
557 int length)
558{
559 char __iomem *vaddr;
Dave Airlied0088772009-03-28 20:29:48 -0400560 unsigned long unwritten;
Eric Anholt40123c12009-03-09 13:42:30 -0700561
562 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
563 if (vaddr == NULL)
564 return -ENOMEM;
Dave Airlied0088772009-03-28 20:29:48 -0400565 unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length);
Eric Anholt40123c12009-03-09 13:42:30 -0700566 kunmap_atomic(vaddr, KM_USER0);
567
Dave Airlied0088772009-03-28 20:29:48 -0400568 if (unwritten)
569 return -EFAULT;
Eric Anholt40123c12009-03-09 13:42:30 -0700570 return 0;
571}
572
Eric Anholt3de09aa2009-03-09 09:42:23 -0700573/**
574 * This is the fast pwrite path, where we copy the data directly from the
575 * user into the GTT, uncached.
576 */
Eric Anholt673a3942008-07-30 12:06:12 -0700577static int
Eric Anholt3de09aa2009-03-09 09:42:23 -0700578i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
579 struct drm_i915_gem_pwrite *args,
580 struct drm_file *file_priv)
Eric Anholt673a3942008-07-30 12:06:12 -0700581{
Daniel Vetter23010e42010-03-08 13:35:02 +0100582 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Keith Packard0839ccb2008-10-30 19:38:48 -0700583 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -0700584 ssize_t remain;
Keith Packard0839ccb2008-10-30 19:38:48 -0700585 loff_t offset, page_base;
Eric Anholt673a3942008-07-30 12:06:12 -0700586 char __user *user_data;
Keith Packard0839ccb2008-10-30 19:38:48 -0700587 int page_offset, page_length;
588 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700589
590 user_data = (char __user *) (uintptr_t) args->data_ptr;
591 remain = args->size;
592 if (!access_ok(VERIFY_READ, user_data, remain))
593 return -EFAULT;
594
595
596 mutex_lock(&dev->struct_mutex);
597 ret = i915_gem_object_pin(obj, 0);
598 if (ret) {
599 mutex_unlock(&dev->struct_mutex);
600 return ret;
601 }
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800602 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
Eric Anholt673a3942008-07-30 12:06:12 -0700603 if (ret)
604 goto fail;
605
Daniel Vetter23010e42010-03-08 13:35:02 +0100606 obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700607 offset = obj_priv->gtt_offset + args->offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700608
609 while (remain > 0) {
610 /* Operation in this page
611 *
Keith Packard0839ccb2008-10-30 19:38:48 -0700612 * page_base = page offset within aperture
613 * page_offset = offset within page
614 * page_length = bytes to copy for this page
Eric Anholt673a3942008-07-30 12:06:12 -0700615 */
Keith Packard0839ccb2008-10-30 19:38:48 -0700616 page_base = (offset & ~(PAGE_SIZE-1));
617 page_offset = offset & (PAGE_SIZE-1);
618 page_length = remain;
619 if ((page_offset + remain) > PAGE_SIZE)
620 page_length = PAGE_SIZE - page_offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700621
Keith Packard0839ccb2008-10-30 19:38:48 -0700622 ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base,
623 page_offset, user_data, page_length);
Eric Anholt673a3942008-07-30 12:06:12 -0700624
Keith Packard0839ccb2008-10-30 19:38:48 -0700625 /* If we get a fault while copying data, then (presumably) our
Eric Anholt3de09aa2009-03-09 09:42:23 -0700626 * source page isn't available. Return the error and we'll
627 * retry in the slow path.
Keith Packard0839ccb2008-10-30 19:38:48 -0700628 */
Eric Anholt3de09aa2009-03-09 09:42:23 -0700629 if (ret)
630 goto fail;
Eric Anholt673a3942008-07-30 12:06:12 -0700631
Keith Packard0839ccb2008-10-30 19:38:48 -0700632 remain -= page_length;
633 user_data += page_length;
634 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700635 }
Eric Anholt673a3942008-07-30 12:06:12 -0700636
637fail:
638 i915_gem_object_unpin(obj);
639 mutex_unlock(&dev->struct_mutex);
640
641 return ret;
642}
643
Eric Anholt3de09aa2009-03-09 09:42:23 -0700644/**
645 * This is the fallback GTT pwrite path, which uses get_user_pages to pin
646 * the memory and maps it using kmap_atomic for copying.
647 *
648 * This code resulted in x11perf -rgb10text consuming about 10% more CPU
649 * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
650 */
Eric Anholt3043c602008-10-02 12:24:47 -0700651static int
Eric Anholt3de09aa2009-03-09 09:42:23 -0700652i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
653 struct drm_i915_gem_pwrite *args,
654 struct drm_file *file_priv)
Eric Anholt673a3942008-07-30 12:06:12 -0700655{
Daniel Vetter23010e42010-03-08 13:35:02 +0100656 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700657 drm_i915_private_t *dev_priv = dev->dev_private;
658 ssize_t remain;
659 loff_t gtt_page_base, offset;
660 loff_t first_data_page, last_data_page, num_pages;
661 loff_t pinned_pages, i;
662 struct page **user_pages;
663 struct mm_struct *mm = current->mm;
664 int gtt_page_offset, data_page_offset, data_page_index, page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700665 int ret;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700666 uint64_t data_ptr = args->data_ptr;
667
668 remain = args->size;
669
670 /* Pin the user pages containing the data. We can't fault while
671 * holding the struct mutex, and all of the pwrite implementations
672 * want to hold it while dereferencing the user data.
673 */
674 first_data_page = data_ptr / PAGE_SIZE;
675 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
676 num_pages = last_data_page - first_data_page + 1;
677
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700678 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
Eric Anholt3de09aa2009-03-09 09:42:23 -0700679 if (user_pages == NULL)
680 return -ENOMEM;
681
682 down_read(&mm->mmap_sem);
683 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
684 num_pages, 0, 0, user_pages, NULL);
685 up_read(&mm->mmap_sem);
686 if (pinned_pages < num_pages) {
687 ret = -EFAULT;
688 goto out_unpin_pages;
689 }
690
691 mutex_lock(&dev->struct_mutex);
692 ret = i915_gem_object_pin(obj, 0);
693 if (ret)
694 goto out_unlock;
695
696 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
697 if (ret)
698 goto out_unpin_object;
699
Daniel Vetter23010e42010-03-08 13:35:02 +0100700 obj_priv = to_intel_bo(obj);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700701 offset = obj_priv->gtt_offset + args->offset;
702
703 while (remain > 0) {
704 /* Operation in this page
705 *
706 * gtt_page_base = page offset within aperture
707 * gtt_page_offset = offset within page in aperture
708 * data_page_index = page number in get_user_pages return
709 * data_page_offset = offset with data_page_index page.
710 * page_length = bytes to copy for this page
711 */
712 gtt_page_base = offset & PAGE_MASK;
713 gtt_page_offset = offset & ~PAGE_MASK;
714 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
715 data_page_offset = data_ptr & ~PAGE_MASK;
716
717 page_length = remain;
718 if ((gtt_page_offset + page_length) > PAGE_SIZE)
719 page_length = PAGE_SIZE - gtt_page_offset;
720 if ((data_page_offset + page_length) > PAGE_SIZE)
721 page_length = PAGE_SIZE - data_page_offset;
722
723 ret = slow_kernel_write(dev_priv->mm.gtt_mapping,
724 gtt_page_base, gtt_page_offset,
725 user_pages[data_page_index],
726 data_page_offset,
727 page_length);
728
729 /* If we get a fault while copying data, then (presumably) our
730 * source page isn't available. Return the error and we'll
731 * retry in the slow path.
732 */
733 if (ret)
734 goto out_unpin_object;
735
736 remain -= page_length;
737 offset += page_length;
738 data_ptr += page_length;
739 }
740
741out_unpin_object:
742 i915_gem_object_unpin(obj);
743out_unlock:
744 mutex_unlock(&dev->struct_mutex);
745out_unpin_pages:
746 for (i = 0; i < pinned_pages; i++)
747 page_cache_release(user_pages[i]);
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700748 drm_free_large(user_pages);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700749
750 return ret;
751}
752
Eric Anholt40123c12009-03-09 13:42:30 -0700753/**
754 * This is the fast shmem pwrite path, which attempts to directly
755 * copy_from_user into the kmapped pages backing the object.
756 */
Eric Anholt673a3942008-07-30 12:06:12 -0700757static int
Eric Anholt40123c12009-03-09 13:42:30 -0700758i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
759 struct drm_i915_gem_pwrite *args,
760 struct drm_file *file_priv)
Eric Anholt673a3942008-07-30 12:06:12 -0700761{
Daniel Vetter23010e42010-03-08 13:35:02 +0100762 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt40123c12009-03-09 13:42:30 -0700763 ssize_t remain;
764 loff_t offset, page_base;
765 char __user *user_data;
766 int page_offset, page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700767 int ret;
Eric Anholt40123c12009-03-09 13:42:30 -0700768
769 user_data = (char __user *) (uintptr_t) args->data_ptr;
770 remain = args->size;
Eric Anholt673a3942008-07-30 12:06:12 -0700771
772 mutex_lock(&dev->struct_mutex);
773
Chris Wilson4bdadb92010-01-27 13:36:32 +0000774 ret = i915_gem_object_get_pages(obj, 0);
Eric Anholt40123c12009-03-09 13:42:30 -0700775 if (ret != 0)
776 goto fail_unlock;
777
Eric Anholte47c68e2008-11-14 13:35:19 -0800778 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
Eric Anholt40123c12009-03-09 13:42:30 -0700779 if (ret != 0)
780 goto fail_put_pages;
Eric Anholt673a3942008-07-30 12:06:12 -0700781
Daniel Vetter23010e42010-03-08 13:35:02 +0100782 obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700783 offset = args->offset;
Eric Anholt40123c12009-03-09 13:42:30 -0700784 obj_priv->dirty = 1;
Eric Anholt673a3942008-07-30 12:06:12 -0700785
Eric Anholt40123c12009-03-09 13:42:30 -0700786 while (remain > 0) {
787 /* Operation in this page
788 *
789 * page_base = page offset within aperture
790 * page_offset = offset within page
791 * page_length = bytes to copy for this page
792 */
793 page_base = (offset & ~(PAGE_SIZE-1));
794 page_offset = offset & (PAGE_SIZE-1);
795 page_length = remain;
796 if ((page_offset + remain) > PAGE_SIZE)
797 page_length = PAGE_SIZE - page_offset;
798
799 ret = fast_shmem_write(obj_priv->pages,
800 page_base, page_offset,
801 user_data, page_length);
802 if (ret)
803 goto fail_put_pages;
804
805 remain -= page_length;
806 user_data += page_length;
807 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700808 }
809
Eric Anholt40123c12009-03-09 13:42:30 -0700810fail_put_pages:
811 i915_gem_object_put_pages(obj);
812fail_unlock:
Eric Anholt673a3942008-07-30 12:06:12 -0700813 mutex_unlock(&dev->struct_mutex);
814
Eric Anholt40123c12009-03-09 13:42:30 -0700815 return ret;
816}
817
818/**
819 * This is the fallback shmem pwrite path, which uses get_user_pages to pin
820 * the memory and maps it using kmap_atomic for copying.
821 *
822 * This avoids taking mmap_sem for faulting on the user's address while the
823 * struct_mutex is held.
824 */
825static int
826i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
827 struct drm_i915_gem_pwrite *args,
828 struct drm_file *file_priv)
829{
Daniel Vetter23010e42010-03-08 13:35:02 +0100830 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt40123c12009-03-09 13:42:30 -0700831 struct mm_struct *mm = current->mm;
832 struct page **user_pages;
833 ssize_t remain;
834 loff_t offset, pinned_pages, i;
835 loff_t first_data_page, last_data_page, num_pages;
836 int shmem_page_index, shmem_page_offset;
837 int data_page_index, data_page_offset;
838 int page_length;
839 int ret;
840 uint64_t data_ptr = args->data_ptr;
Eric Anholt280b7132009-03-12 16:56:27 -0700841 int do_bit17_swizzling;
Eric Anholt40123c12009-03-09 13:42:30 -0700842
843 remain = args->size;
844
845 /* Pin the user pages containing the data. We can't fault while
846 * holding the struct mutex, and all of the pwrite implementations
847 * want to hold it while dereferencing the user data.
848 */
849 first_data_page = data_ptr / PAGE_SIZE;
850 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
851 num_pages = last_data_page - first_data_page + 1;
852
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700853 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
Eric Anholt40123c12009-03-09 13:42:30 -0700854 if (user_pages == NULL)
855 return -ENOMEM;
856
857 down_read(&mm->mmap_sem);
858 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
859 num_pages, 0, 0, user_pages, NULL);
860 up_read(&mm->mmap_sem);
861 if (pinned_pages < num_pages) {
862 ret = -EFAULT;
863 goto fail_put_user_pages;
864 }
865
Eric Anholt280b7132009-03-12 16:56:27 -0700866 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
867
Eric Anholt40123c12009-03-09 13:42:30 -0700868 mutex_lock(&dev->struct_mutex);
869
Chris Wilson07f73f62009-09-14 16:50:30 +0100870 ret = i915_gem_object_get_pages_or_evict(obj);
871 if (ret)
Eric Anholt40123c12009-03-09 13:42:30 -0700872 goto fail_unlock;
873
874 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
875 if (ret != 0)
876 goto fail_put_pages;
877
Daniel Vetter23010e42010-03-08 13:35:02 +0100878 obj_priv = to_intel_bo(obj);
Eric Anholt40123c12009-03-09 13:42:30 -0700879 offset = args->offset;
880 obj_priv->dirty = 1;
881
882 while (remain > 0) {
883 /* Operation in this page
884 *
885 * shmem_page_index = page number within shmem file
886 * shmem_page_offset = offset within page in shmem file
887 * data_page_index = page number in get_user_pages return
888 * data_page_offset = offset with data_page_index page.
889 * page_length = bytes to copy for this page
890 */
891 shmem_page_index = offset / PAGE_SIZE;
892 shmem_page_offset = offset & ~PAGE_MASK;
893 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
894 data_page_offset = data_ptr & ~PAGE_MASK;
895
896 page_length = remain;
897 if ((shmem_page_offset + page_length) > PAGE_SIZE)
898 page_length = PAGE_SIZE - shmem_page_offset;
899 if ((data_page_offset + page_length) > PAGE_SIZE)
900 page_length = PAGE_SIZE - data_page_offset;
901
Eric Anholt280b7132009-03-12 16:56:27 -0700902 if (do_bit17_swizzling) {
903 ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
904 shmem_page_offset,
905 user_pages[data_page_index],
906 data_page_offset,
907 page_length,
908 0);
909 } else {
910 ret = slow_shmem_copy(obj_priv->pages[shmem_page_index],
911 shmem_page_offset,
912 user_pages[data_page_index],
913 data_page_offset,
914 page_length);
915 }
Eric Anholt40123c12009-03-09 13:42:30 -0700916 if (ret)
917 goto fail_put_pages;
918
919 remain -= page_length;
920 data_ptr += page_length;
921 offset += page_length;
922 }
923
924fail_put_pages:
925 i915_gem_object_put_pages(obj);
926fail_unlock:
927 mutex_unlock(&dev->struct_mutex);
928fail_put_user_pages:
929 for (i = 0; i < pinned_pages; i++)
930 page_cache_release(user_pages[i]);
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700931 drm_free_large(user_pages);
Eric Anholt40123c12009-03-09 13:42:30 -0700932
933 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700934}
935
936/**
937 * Writes data to the object referenced by handle.
938 *
939 * On error, the contents of the buffer that were to be modified are undefined.
940 */
941int
942i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
943 struct drm_file *file_priv)
944{
945 struct drm_i915_gem_pwrite *args = data;
946 struct drm_gem_object *obj;
947 struct drm_i915_gem_object *obj_priv;
948 int ret = 0;
949
950 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
951 if (obj == NULL)
952 return -EBADF;
Daniel Vetter23010e42010-03-08 13:35:02 +0100953 obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700954
955 /* Bounds check destination.
956 *
957 * XXX: This could use review for overflow issues...
958 */
959 if (args->offset > obj->size || args->size > obj->size ||
960 args->offset + args->size > obj->size) {
Luca Barbieribc9025b2010-02-09 05:49:12 +0000961 drm_gem_object_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700962 return -EINVAL;
963 }
964
965 /* We can only do the GTT pwrite on untiled buffers, as otherwise
966 * it would end up going through the fenced access, and we'll get
967 * different detiling behavior between reading and writing.
968 * pread/pwrite currently are reading and writing from the CPU
969 * perspective, requiring manual detiling by the client.
970 */
Dave Airlie71acb5e2008-12-30 20:31:46 +1000971 if (obj_priv->phys_obj)
972 ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
973 else if (obj_priv->tiling_mode == I915_TILING_NONE &&
Eric Anholt3de09aa2009-03-09 09:42:23 -0700974 dev->gtt_total != 0) {
975 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
976 if (ret == -EFAULT) {
977 ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
978 file_priv);
979 }
Eric Anholt280b7132009-03-12 16:56:27 -0700980 } else if (i915_gem_object_needs_bit17_swizzle(obj)) {
981 ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv);
Eric Anholt40123c12009-03-09 13:42:30 -0700982 } else {
983 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
984 if (ret == -EFAULT) {
985 ret = i915_gem_shmem_pwrite_slow(dev, obj, args,
986 file_priv);
987 }
988 }
Eric Anholt673a3942008-07-30 12:06:12 -0700989
990#if WATCH_PWRITE
991 if (ret)
992 DRM_INFO("pwrite failed %d\n", ret);
993#endif
994
Luca Barbieribc9025b2010-02-09 05:49:12 +0000995 drm_gem_object_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700996
997 return ret;
998}
999
1000/**
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001001 * Called when user space prepares to use an object with the CPU, either
1002 * through the mmap ioctl's mapping or a GTT mapping.
Eric Anholt673a3942008-07-30 12:06:12 -07001003 */
1004int
1005i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1006 struct drm_file *file_priv)
1007{
Eric Anholta09ba7f2009-08-29 12:49:51 -07001008 struct drm_i915_private *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07001009 struct drm_i915_gem_set_domain *args = data;
1010 struct drm_gem_object *obj;
Jesse Barnes652c3932009-08-17 13:31:43 -07001011 struct drm_i915_gem_object *obj_priv;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001012 uint32_t read_domains = args->read_domains;
1013 uint32_t write_domain = args->write_domain;
Eric Anholt673a3942008-07-30 12:06:12 -07001014 int ret;
1015
1016 if (!(dev->driver->driver_features & DRIVER_GEM))
1017 return -ENODEV;
1018
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001019 /* Only handle setting domains to types used by the CPU. */
Chris Wilson21d509e2009-06-06 09:46:02 +01001020 if (write_domain & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001021 return -EINVAL;
1022
Chris Wilson21d509e2009-06-06 09:46:02 +01001023 if (read_domains & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001024 return -EINVAL;
1025
1026 /* Having something in the write domain implies it's in the read
1027 * domain, and only that read domain. Enforce that in the request.
1028 */
1029 if (write_domain != 0 && read_domains != write_domain)
1030 return -EINVAL;
1031
Eric Anholt673a3942008-07-30 12:06:12 -07001032 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1033 if (obj == NULL)
1034 return -EBADF;
Daniel Vetter23010e42010-03-08 13:35:02 +01001035 obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001036
1037 mutex_lock(&dev->struct_mutex);
Jesse Barnes652c3932009-08-17 13:31:43 -07001038
1039 intel_mark_busy(dev, obj);
1040
Eric Anholt673a3942008-07-30 12:06:12 -07001041#if WATCH_BUF
Krzysztof Halasacfd43c02009-06-20 00:31:28 +02001042 DRM_INFO("set_domain_ioctl %p(%zd), %08x %08x\n",
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001043 obj, obj->size, read_domains, write_domain);
Eric Anholt673a3942008-07-30 12:06:12 -07001044#endif
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001045 if (read_domains & I915_GEM_DOMAIN_GTT) {
1046 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
Eric Anholt02354392008-11-26 13:58:13 -08001047
Eric Anholta09ba7f2009-08-29 12:49:51 -07001048 /* Update the LRU on the fence for the CPU access that's
1049 * about to occur.
1050 */
1051 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
Daniel Vetter007cc8a2010-04-28 11:02:31 +02001052 struct drm_i915_fence_reg *reg =
1053 &dev_priv->fence_regs[obj_priv->fence_reg];
1054 list_move_tail(&reg->lru_list,
Eric Anholta09ba7f2009-08-29 12:49:51 -07001055 &dev_priv->mm.fence_list);
1056 }
1057
Eric Anholt02354392008-11-26 13:58:13 -08001058 /* Silently promote "you're not bound, there was nothing to do"
1059 * to success, since the client was just asking us to
1060 * make sure everything was done.
1061 */
1062 if (ret == -EINVAL)
1063 ret = 0;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001064 } else {
Eric Anholte47c68e2008-11-14 13:35:19 -08001065 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001066 }
1067
Eric Anholt673a3942008-07-30 12:06:12 -07001068 drm_gem_object_unreference(obj);
1069 mutex_unlock(&dev->struct_mutex);
1070 return ret;
1071}
1072
1073/**
1074 * Called when user space has done writes to this buffer
1075 */
1076int
1077i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1078 struct drm_file *file_priv)
1079{
1080 struct drm_i915_gem_sw_finish *args = data;
1081 struct drm_gem_object *obj;
1082 struct drm_i915_gem_object *obj_priv;
1083 int ret = 0;
1084
1085 if (!(dev->driver->driver_features & DRIVER_GEM))
1086 return -ENODEV;
1087
1088 mutex_lock(&dev->struct_mutex);
1089 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1090 if (obj == NULL) {
1091 mutex_unlock(&dev->struct_mutex);
1092 return -EBADF;
1093 }
1094
1095#if WATCH_BUF
Krzysztof Halasacfd43c02009-06-20 00:31:28 +02001096 DRM_INFO("%s: sw_finish %d (%p %zd)\n",
Eric Anholt673a3942008-07-30 12:06:12 -07001097 __func__, args->handle, obj, obj->size);
1098#endif
Daniel Vetter23010e42010-03-08 13:35:02 +01001099 obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001100
1101 /* Pinned buffers may be scanout, so flush the cache */
Eric Anholte47c68e2008-11-14 13:35:19 -08001102 if (obj_priv->pin_count)
1103 i915_gem_object_flush_cpu_write_domain(obj);
1104
Eric Anholt673a3942008-07-30 12:06:12 -07001105 drm_gem_object_unreference(obj);
1106 mutex_unlock(&dev->struct_mutex);
1107 return ret;
1108}
1109
1110/**
1111 * Maps the contents of an object, returning the address it is mapped
1112 * into.
1113 *
1114 * While the mapping holds a reference on the contents of the object, it doesn't
1115 * imply a ref on the object itself.
1116 */
1117int
1118i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1119 struct drm_file *file_priv)
1120{
1121 struct drm_i915_gem_mmap *args = data;
1122 struct drm_gem_object *obj;
1123 loff_t offset;
1124 unsigned long addr;
1125
1126 if (!(dev->driver->driver_features & DRIVER_GEM))
1127 return -ENODEV;
1128
1129 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1130 if (obj == NULL)
1131 return -EBADF;
1132
1133 offset = args->offset;
1134
1135 down_write(&current->mm->mmap_sem);
1136 addr = do_mmap(obj->filp, 0, args->size,
1137 PROT_READ | PROT_WRITE, MAP_SHARED,
1138 args->offset);
1139 up_write(&current->mm->mmap_sem);
Luca Barbieribc9025b2010-02-09 05:49:12 +00001140 drm_gem_object_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001141 if (IS_ERR((void *)addr))
1142 return addr;
1143
1144 args->addr_ptr = (uint64_t) addr;
1145
1146 return 0;
1147}
1148
Jesse Barnesde151cf2008-11-12 10:03:55 -08001149/**
1150 * i915_gem_fault - fault a page into the GTT
1151 * vma: VMA in question
1152 * vmf: fault info
1153 *
1154 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1155 * from userspace. The fault handler takes care of binding the object to
1156 * the GTT (if needed), allocating and programming a fence register (again,
1157 * only if needed based on whether the old reg is still valid or the object
1158 * is tiled) and inserting a new PTE into the faulting process.
1159 *
1160 * Note that the faulting process may involve evicting existing objects
1161 * from the GTT and/or fence registers to make room. So performance may
1162 * suffer if the GTT working set is large or there are few fence registers
1163 * left.
1164 */
1165int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1166{
1167 struct drm_gem_object *obj = vma->vm_private_data;
1168 struct drm_device *dev = obj->dev;
1169 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01001170 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001171 pgoff_t page_offset;
1172 unsigned long pfn;
1173 int ret = 0;
Jesse Barnes0f973f22009-01-26 17:10:45 -08001174 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001175
1176 /* We don't use vmf->pgoff since that has the fake offset */
1177 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1178 PAGE_SHIFT;
1179
1180 /* Now bind it into the GTT if needed */
1181 mutex_lock(&dev->struct_mutex);
1182 if (!obj_priv->gtt_space) {
Chris Wilsone67b8ce2009-09-14 16:50:26 +01001183 ret = i915_gem_object_bind_to_gtt(obj, 0);
Chris Wilsonc7150892009-09-23 00:43:56 +01001184 if (ret)
1185 goto unlock;
Kristian Høgsberg07f4f3e2009-05-27 14:37:28 -04001186
Jesse Barnes14b60392009-05-20 16:47:08 -04001187 list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001188
1189 ret = i915_gem_object_set_to_gtt_domain(obj, write);
Chris Wilsonc7150892009-09-23 00:43:56 +01001190 if (ret)
1191 goto unlock;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001192 }
1193
1194 /* Need a new fence register? */
Eric Anholta09ba7f2009-08-29 12:49:51 -07001195 if (obj_priv->tiling_mode != I915_TILING_NONE) {
Chris Wilson8c4b8c32009-06-17 22:08:52 +01001196 ret = i915_gem_object_get_fence_reg(obj);
Chris Wilsonc7150892009-09-23 00:43:56 +01001197 if (ret)
1198 goto unlock;
Eric Anholtd9ddcb92009-01-27 10:33:49 -08001199 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08001200
1201 pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
1202 page_offset;
1203
1204 /* Finally, remap it using the new GTT offset */
1205 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
Chris Wilsonc7150892009-09-23 00:43:56 +01001206unlock:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001207 mutex_unlock(&dev->struct_mutex);
1208
1209 switch (ret) {
Chris Wilsonc7150892009-09-23 00:43:56 +01001210 case 0:
1211 case -ERESTARTSYS:
1212 return VM_FAULT_NOPAGE;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001213 case -ENOMEM:
1214 case -EAGAIN:
1215 return VM_FAULT_OOM;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001216 default:
Chris Wilsonc7150892009-09-23 00:43:56 +01001217 return VM_FAULT_SIGBUS;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001218 }
1219}
1220
1221/**
1222 * i915_gem_create_mmap_offset - create a fake mmap offset for an object
1223 * @obj: obj in question
1224 *
1225 * GEM memory mapping works by handing back to userspace a fake mmap offset
1226 * it can use in a subsequent mmap(2) call. The DRM core code then looks
1227 * up the object based on the offset and sets up the various memory mapping
1228 * structures.
1229 *
1230 * This routine allocates and attaches a fake offset for @obj.
1231 */
1232static int
1233i915_gem_create_mmap_offset(struct drm_gem_object *obj)
1234{
1235 struct drm_device *dev = obj->dev;
1236 struct drm_gem_mm *mm = dev->mm_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01001237 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001238 struct drm_map_list *list;
Benjamin Herrenschmidtf77d3902009-02-02 16:55:46 +11001239 struct drm_local_map *map;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001240 int ret = 0;
1241
1242 /* Set the object up for mmap'ing */
1243 list = &obj->map_list;
Eric Anholt9a298b22009-03-24 12:23:04 -07001244 list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001245 if (!list->map)
1246 return -ENOMEM;
1247
1248 map = list->map;
1249 map->type = _DRM_GEM;
1250 map->size = obj->size;
1251 map->handle = obj;
1252
1253 /* Get a DRM GEM mmap offset allocated... */
1254 list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
1255 obj->size / PAGE_SIZE, 0, 0);
1256 if (!list->file_offset_node) {
1257 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
1258 ret = -ENOMEM;
1259 goto out_free_list;
1260 }
1261
1262 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
1263 obj->size / PAGE_SIZE, 0);
1264 if (!list->file_offset_node) {
1265 ret = -ENOMEM;
1266 goto out_free_list;
1267 }
1268
1269 list->hash.key = list->file_offset_node->start;
1270 if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
1271 DRM_ERROR("failed to add to map hash\n");
Chris Wilson5618ca62009-12-02 15:15:30 +00001272 ret = -ENOMEM;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001273 goto out_free_mm;
1274 }
1275
1276 /* By now we should be all set, any drm_mmap request on the offset
1277 * below will get to our mmap & fault handler */
1278 obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
1279
1280 return 0;
1281
1282out_free_mm:
1283 drm_mm_put_block(list->file_offset_node);
1284out_free_list:
Eric Anholt9a298b22009-03-24 12:23:04 -07001285 kfree(list->map);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001286
1287 return ret;
1288}
1289
Chris Wilson901782b2009-07-10 08:18:50 +01001290/**
1291 * i915_gem_release_mmap - remove physical page mappings
1292 * @obj: obj in question
1293 *
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02001294 * Preserve the reservation of the mmapping with the DRM core code, but
Chris Wilson901782b2009-07-10 08:18:50 +01001295 * relinquish ownership of the pages back to the system.
1296 *
1297 * It is vital that we remove the page mapping if we have mapped a tiled
1298 * object through the GTT and then lose the fence register due to
1299 * resource pressure. Similarly if the object has been moved out of the
1300 * aperture, than pages mapped into userspace must be revoked. Removing the
1301 * mapping will then trigger a page fault on the next user access, allowing
1302 * fixup by i915_gem_fault().
1303 */
Eric Anholtd05ca302009-07-10 13:02:26 -07001304void
Chris Wilson901782b2009-07-10 08:18:50 +01001305i915_gem_release_mmap(struct drm_gem_object *obj)
1306{
1307 struct drm_device *dev = obj->dev;
Daniel Vetter23010e42010-03-08 13:35:02 +01001308 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Chris Wilson901782b2009-07-10 08:18:50 +01001309
1310 if (dev->dev_mapping)
1311 unmap_mapping_range(dev->dev_mapping,
1312 obj_priv->mmap_offset, obj->size, 1);
1313}
1314
Jesse Barnesab00b3e2009-02-11 14:01:46 -08001315static void
1316i915_gem_free_mmap_offset(struct drm_gem_object *obj)
1317{
1318 struct drm_device *dev = obj->dev;
Daniel Vetter23010e42010-03-08 13:35:02 +01001319 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Jesse Barnesab00b3e2009-02-11 14:01:46 -08001320 struct drm_gem_mm *mm = dev->mm_private;
1321 struct drm_map_list *list;
1322
1323 list = &obj->map_list;
1324 drm_ht_remove_item(&mm->offset_hash, &list->hash);
1325
1326 if (list->file_offset_node) {
1327 drm_mm_put_block(list->file_offset_node);
1328 list->file_offset_node = NULL;
1329 }
1330
1331 if (list->map) {
Eric Anholt9a298b22009-03-24 12:23:04 -07001332 kfree(list->map);
Jesse Barnesab00b3e2009-02-11 14:01:46 -08001333 list->map = NULL;
1334 }
1335
1336 obj_priv->mmap_offset = 0;
1337}
1338
Jesse Barnesde151cf2008-11-12 10:03:55 -08001339/**
1340 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1341 * @obj: object to check
1342 *
1343 * Return the required GTT alignment for an object, taking into account
1344 * potential fence register mapping if needed.
1345 */
1346static uint32_t
1347i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
1348{
1349 struct drm_device *dev = obj->dev;
Daniel Vetter23010e42010-03-08 13:35:02 +01001350 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001351 int start, i;
1352
1353 /*
1354 * Minimum alignment is 4k (GTT page size), but might be greater
1355 * if a fence register is needed for the object.
1356 */
1357 if (IS_I965G(dev) || obj_priv->tiling_mode == I915_TILING_NONE)
1358 return 4096;
1359
1360 /*
1361 * Previous chips need to be aligned to the size of the smallest
1362 * fence register that can contain the object.
1363 */
1364 if (IS_I9XX(dev))
1365 start = 1024*1024;
1366 else
1367 start = 512*1024;
1368
1369 for (i = start; i < obj->size; i <<= 1)
1370 ;
1371
1372 return i;
1373}
1374
1375/**
1376 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1377 * @dev: DRM device
1378 * @data: GTT mapping ioctl data
1379 * @file_priv: GEM object info
1380 *
1381 * Simply returns the fake offset to userspace so it can mmap it.
1382 * The mmap call will end up in drm_gem_mmap(), which will set things
1383 * up so we can get faults in the handler above.
1384 *
1385 * The fault handler will take care of binding the object into the GTT
1386 * (since it may have been evicted to make room for something), allocating
1387 * a fence register, and mapping the appropriate aperture address into
1388 * userspace.
1389 */
1390int
1391i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1392 struct drm_file *file_priv)
1393{
1394 struct drm_i915_gem_mmap_gtt *args = data;
1395 struct drm_i915_private *dev_priv = dev->dev_private;
1396 struct drm_gem_object *obj;
1397 struct drm_i915_gem_object *obj_priv;
1398 int ret;
1399
1400 if (!(dev->driver->driver_features & DRIVER_GEM))
1401 return -ENODEV;
1402
1403 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1404 if (obj == NULL)
1405 return -EBADF;
1406
1407 mutex_lock(&dev->struct_mutex);
1408
Daniel Vetter23010e42010-03-08 13:35:02 +01001409 obj_priv = to_intel_bo(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001410
Chris Wilsonab182822009-09-22 18:46:17 +01001411 if (obj_priv->madv != I915_MADV_WILLNEED) {
1412 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1413 drm_gem_object_unreference(obj);
1414 mutex_unlock(&dev->struct_mutex);
1415 return -EINVAL;
1416 }
1417
1418
Jesse Barnesde151cf2008-11-12 10:03:55 -08001419 if (!obj_priv->mmap_offset) {
1420 ret = i915_gem_create_mmap_offset(obj);
Chris Wilson13af1062009-02-11 14:26:31 +00001421 if (ret) {
1422 drm_gem_object_unreference(obj);
1423 mutex_unlock(&dev->struct_mutex);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001424 return ret;
Chris Wilson13af1062009-02-11 14:26:31 +00001425 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08001426 }
1427
1428 args->offset = obj_priv->mmap_offset;
1429
Jesse Barnesde151cf2008-11-12 10:03:55 -08001430 /*
1431 * Pull it into the GTT so that we have a page list (makes the
1432 * initial fault faster and any subsequent flushing possible).
1433 */
1434 if (!obj_priv->agp_mem) {
Chris Wilsone67b8ce2009-09-14 16:50:26 +01001435 ret = i915_gem_object_bind_to_gtt(obj, 0);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001436 if (ret) {
1437 drm_gem_object_unreference(obj);
1438 mutex_unlock(&dev->struct_mutex);
1439 return ret;
1440 }
Jesse Barnes14b60392009-05-20 16:47:08 -04001441 list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001442 }
1443
1444 drm_gem_object_unreference(obj);
1445 mutex_unlock(&dev->struct_mutex);
1446
1447 return 0;
1448}
1449
Ben Gamari6911a9b2009-04-02 11:24:54 -07001450void
Eric Anholt856fa192009-03-19 14:10:50 -07001451i915_gem_object_put_pages(struct drm_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07001452{
Daniel Vetter23010e42010-03-08 13:35:02 +01001453 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001454 int page_count = obj->size / PAGE_SIZE;
1455 int i;
1456
Eric Anholt856fa192009-03-19 14:10:50 -07001457 BUG_ON(obj_priv->pages_refcount == 0);
Chris Wilsonbb6baf72009-09-22 14:24:13 +01001458 BUG_ON(obj_priv->madv == __I915_MADV_PURGED);
Eric Anholt856fa192009-03-19 14:10:50 -07001459
1460 if (--obj_priv->pages_refcount != 0)
Eric Anholt673a3942008-07-30 12:06:12 -07001461 return;
1462
Eric Anholt280b7132009-03-12 16:56:27 -07001463 if (obj_priv->tiling_mode != I915_TILING_NONE)
1464 i915_gem_object_save_bit_17_swizzle(obj);
1465
Chris Wilson3ef94da2009-09-14 16:50:29 +01001466 if (obj_priv->madv == I915_MADV_DONTNEED)
Chris Wilson13a05fd2009-09-20 23:03:19 +01001467 obj_priv->dirty = 0;
Chris Wilson3ef94da2009-09-14 16:50:29 +01001468
1469 for (i = 0; i < page_count; i++) {
Chris Wilson3ef94da2009-09-14 16:50:29 +01001470 if (obj_priv->dirty)
1471 set_page_dirty(obj_priv->pages[i]);
1472
1473 if (obj_priv->madv == I915_MADV_WILLNEED)
Eric Anholt856fa192009-03-19 14:10:50 -07001474 mark_page_accessed(obj_priv->pages[i]);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001475
1476 page_cache_release(obj_priv->pages[i]);
1477 }
Eric Anholt673a3942008-07-30 12:06:12 -07001478 obj_priv->dirty = 0;
1479
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07001480 drm_free_large(obj_priv->pages);
Eric Anholt856fa192009-03-19 14:10:50 -07001481 obj_priv->pages = NULL;
Eric Anholt673a3942008-07-30 12:06:12 -07001482}
1483
1484static void
Eric Anholtce44b0e2008-11-06 16:00:31 -08001485i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
Eric Anholt673a3942008-07-30 12:06:12 -07001486{
1487 struct drm_device *dev = obj->dev;
1488 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01001489 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001490
1491 /* Add a reference if we're newly entering the active list. */
1492 if (!obj_priv->active) {
1493 drm_gem_object_reference(obj);
1494 obj_priv->active = 1;
1495 }
1496 /* Move from whatever list we were on to the tail of execution. */
Carl Worth5e118f42009-03-20 11:54:25 -07001497 spin_lock(&dev_priv->mm.active_list_lock);
Eric Anholt673a3942008-07-30 12:06:12 -07001498 list_move_tail(&obj_priv->list,
1499 &dev_priv->mm.active_list);
Carl Worth5e118f42009-03-20 11:54:25 -07001500 spin_unlock(&dev_priv->mm.active_list_lock);
Eric Anholtce44b0e2008-11-06 16:00:31 -08001501 obj_priv->last_rendering_seqno = seqno;
Eric Anholt673a3942008-07-30 12:06:12 -07001502}
1503
Eric Anholtce44b0e2008-11-06 16:00:31 -08001504static void
1505i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
1506{
1507 struct drm_device *dev = obj->dev;
1508 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01001509 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholtce44b0e2008-11-06 16:00:31 -08001510
1511 BUG_ON(!obj_priv->active);
1512 list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
1513 obj_priv->last_rendering_seqno = 0;
1514}
Eric Anholt673a3942008-07-30 12:06:12 -07001515
Chris Wilson963b4832009-09-20 23:03:54 +01001516/* Immediately discard the backing storage */
1517static void
1518i915_gem_object_truncate(struct drm_gem_object *obj)
1519{
Daniel Vetter23010e42010-03-08 13:35:02 +01001520 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Chris Wilsonbb6baf72009-09-22 14:24:13 +01001521 struct inode *inode;
Chris Wilson963b4832009-09-20 23:03:54 +01001522
Chris Wilsonbb6baf72009-09-22 14:24:13 +01001523 inode = obj->filp->f_path.dentry->d_inode;
1524 if (inode->i_op->truncate)
1525 inode->i_op->truncate (inode);
1526
1527 obj_priv->madv = __I915_MADV_PURGED;
Chris Wilson963b4832009-09-20 23:03:54 +01001528}
1529
1530static inline int
1531i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv)
1532{
1533 return obj_priv->madv == I915_MADV_DONTNEED;
1534}
1535
Eric Anholt673a3942008-07-30 12:06:12 -07001536static void
1537i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1538{
1539 struct drm_device *dev = obj->dev;
1540 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01001541 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001542
1543 i915_verify_inactive(dev, __FILE__, __LINE__);
1544 if (obj_priv->pin_count != 0)
1545 list_del_init(&obj_priv->list);
1546 else
1547 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1548
Daniel Vetter99fcb762010-02-07 16:20:18 +01001549 BUG_ON(!list_empty(&obj_priv->gpu_write_list));
1550
Eric Anholtce44b0e2008-11-06 16:00:31 -08001551 obj_priv->last_rendering_seqno = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001552 if (obj_priv->active) {
1553 obj_priv->active = 0;
1554 drm_gem_object_unreference(obj);
1555 }
1556 i915_verify_inactive(dev, __FILE__, __LINE__);
1557}
1558
Daniel Vetter63560392010-02-19 11:51:59 +01001559static void
1560i915_gem_process_flushing_list(struct drm_device *dev,
1561 uint32_t flush_domains, uint32_t seqno)
1562{
1563 drm_i915_private_t *dev_priv = dev->dev_private;
1564 struct drm_i915_gem_object *obj_priv, *next;
1565
1566 list_for_each_entry_safe(obj_priv, next,
1567 &dev_priv->mm.gpu_write_list,
1568 gpu_write_list) {
Daniel Vettera8089e82010-04-09 19:05:09 +00001569 struct drm_gem_object *obj = &obj_priv->base;
Daniel Vetter63560392010-02-19 11:51:59 +01001570
1571 if ((obj->write_domain & flush_domains) ==
1572 obj->write_domain) {
1573 uint32_t old_write_domain = obj->write_domain;
1574
1575 obj->write_domain = 0;
1576 list_del_init(&obj_priv->gpu_write_list);
1577 i915_gem_object_move_to_active(obj, seqno);
1578
1579 /* update the fence lru list */
Daniel Vetter007cc8a2010-04-28 11:02:31 +02001580 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
1581 struct drm_i915_fence_reg *reg =
1582 &dev_priv->fence_regs[obj_priv->fence_reg];
1583 list_move_tail(&reg->lru_list,
Daniel Vetter63560392010-02-19 11:51:59 +01001584 &dev_priv->mm.fence_list);
Daniel Vetter007cc8a2010-04-28 11:02:31 +02001585 }
Daniel Vetter63560392010-02-19 11:51:59 +01001586
1587 trace_i915_gem_object_change_domain(obj,
1588 obj->read_domains,
1589 old_write_domain);
1590 }
1591 }
1592}
Daniel Vetter5a5a0c62009-09-15 22:57:36 +02001593uint32_t
Eric Anholtb9624422009-06-03 07:27:35 +00001594i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1595 uint32_t flush_domains)
Eric Anholt673a3942008-07-30 12:06:12 -07001596{
1597 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholtb9624422009-06-03 07:27:35 +00001598 struct drm_i915_file_private *i915_file_priv = NULL;
Eric Anholt673a3942008-07-30 12:06:12 -07001599 struct drm_i915_gem_request *request;
1600 uint32_t seqno;
1601 int was_empty;
Eric Anholt673a3942008-07-30 12:06:12 -07001602
Eric Anholtb9624422009-06-03 07:27:35 +00001603 if (file_priv != NULL)
1604 i915_file_priv = file_priv->driver_priv;
1605
Eric Anholt9a298b22009-03-24 12:23:04 -07001606 request = kzalloc(sizeof(*request), GFP_KERNEL);
Eric Anholt673a3942008-07-30 12:06:12 -07001607 if (request == NULL)
1608 return 0;
1609
Eric Anholt62fdfea2010-05-21 13:26:39 -07001610 seqno = i915_ring_add_request(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07001611
Zhao Yakui44d98a62009-10-09 11:39:40 +08001612 DRM_DEBUG_DRIVER("%d\n", seqno);
Eric Anholt673a3942008-07-30 12:06:12 -07001613
1614 request->seqno = seqno;
1615 request->emitted_jiffies = jiffies;
Eric Anholt673a3942008-07-30 12:06:12 -07001616 was_empty = list_empty(&dev_priv->mm.request_list);
1617 list_add_tail(&request->list, &dev_priv->mm.request_list);
Eric Anholtb9624422009-06-03 07:27:35 +00001618 if (i915_file_priv) {
1619 list_add_tail(&request->client_list,
1620 &i915_file_priv->mm.request_list);
1621 } else {
1622 INIT_LIST_HEAD(&request->client_list);
1623 }
Eric Anholt673a3942008-07-30 12:06:12 -07001624
Eric Anholtce44b0e2008-11-06 16:00:31 -08001625 /* Associate any objects on the flushing list matching the write
1626 * domain we're flushing with our flush.
1627 */
Daniel Vetter63560392010-02-19 11:51:59 +01001628 if (flush_domains != 0)
1629 i915_gem_process_flushing_list(dev, flush_domains, seqno);
Eric Anholtce44b0e2008-11-06 16:00:31 -08001630
Ben Gamarif65d9422009-09-14 17:48:44 -04001631 if (!dev_priv->mm.suspended) {
1632 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
1633 if (was_empty)
1634 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1635 }
Eric Anholt673a3942008-07-30 12:06:12 -07001636 return seqno;
1637}
1638
1639/**
1640 * Command execution barrier
1641 *
1642 * Ensures that all commands in the ring are finished
1643 * before signalling the CPU
1644 */
Eric Anholt3043c602008-10-02 12:24:47 -07001645static uint32_t
Eric Anholt673a3942008-07-30 12:06:12 -07001646i915_retire_commands(struct drm_device *dev)
1647{
1648 drm_i915_private_t *dev_priv = dev->dev_private;
1649 uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
1650 uint32_t flush_domains = 0;
1651 RING_LOCALS;
1652
1653 /* The sampler always gets flushed on i965 (sigh) */
1654 if (IS_I965G(dev))
1655 flush_domains |= I915_GEM_DOMAIN_SAMPLER;
1656 BEGIN_LP_RING(2);
1657 OUT_RING(cmd);
1658 OUT_RING(0); /* noop */
1659 ADVANCE_LP_RING();
1660 return flush_domains;
1661}
1662
1663/**
1664 * Moves buffers associated only with the given active seqno from the active
1665 * to inactive list, potentially freeing them.
1666 */
1667static void
1668i915_gem_retire_request(struct drm_device *dev,
1669 struct drm_i915_gem_request *request)
1670{
1671 drm_i915_private_t *dev_priv = dev->dev_private;
1672
Chris Wilson1c5d22f2009-08-25 11:15:50 +01001673 trace_i915_gem_request_retire(dev, request->seqno);
1674
Eric Anholt673a3942008-07-30 12:06:12 -07001675 /* Move any buffers on the active list that are no longer referenced
1676 * by the ringbuffer to the flushing/inactive lists as appropriate.
1677 */
Carl Worth5e118f42009-03-20 11:54:25 -07001678 spin_lock(&dev_priv->mm.active_list_lock);
Eric Anholt673a3942008-07-30 12:06:12 -07001679 while (!list_empty(&dev_priv->mm.active_list)) {
1680 struct drm_gem_object *obj;
1681 struct drm_i915_gem_object *obj_priv;
1682
1683 obj_priv = list_first_entry(&dev_priv->mm.active_list,
1684 struct drm_i915_gem_object,
1685 list);
Daniel Vettera8089e82010-04-09 19:05:09 +00001686 obj = &obj_priv->base;
Eric Anholt673a3942008-07-30 12:06:12 -07001687
1688 /* If the seqno being retired doesn't match the oldest in the
1689 * list, then the oldest in the list must still be newer than
1690 * this seqno.
1691 */
1692 if (obj_priv->last_rendering_seqno != request->seqno)
Carl Worth5e118f42009-03-20 11:54:25 -07001693 goto out;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001694
Eric Anholt673a3942008-07-30 12:06:12 -07001695#if WATCH_LRU
1696 DRM_INFO("%s: retire %d moves to inactive list %p\n",
1697 __func__, request->seqno, obj);
1698#endif
1699
Eric Anholtce44b0e2008-11-06 16:00:31 -08001700 if (obj->write_domain != 0)
1701 i915_gem_object_move_to_flushing(obj);
Shaohua Li68c84342009-04-08 10:58:23 +08001702 else {
1703 /* Take a reference on the object so it won't be
1704 * freed while the spinlock is held. The list
1705 * protection for this spinlock is safe when breaking
1706 * the lock like this since the next thing we do
1707 * is just get the head of the list again.
1708 */
1709 drm_gem_object_reference(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001710 i915_gem_object_move_to_inactive(obj);
Shaohua Li68c84342009-04-08 10:58:23 +08001711 spin_unlock(&dev_priv->mm.active_list_lock);
1712 drm_gem_object_unreference(obj);
1713 spin_lock(&dev_priv->mm.active_list_lock);
1714 }
Eric Anholt673a3942008-07-30 12:06:12 -07001715 }
Carl Worth5e118f42009-03-20 11:54:25 -07001716out:
1717 spin_unlock(&dev_priv->mm.active_list_lock);
Eric Anholt673a3942008-07-30 12:06:12 -07001718}
1719
1720/**
1721 * Returns true if seq1 is later than seq2.
1722 */
Ben Gamari22be1722009-09-14 17:48:43 -04001723bool
Eric Anholt673a3942008-07-30 12:06:12 -07001724i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1725{
1726 return (int32_t)(seq1 - seq2) >= 0;
1727}
1728
1729uint32_t
1730i915_get_gem_seqno(struct drm_device *dev)
1731{
1732 drm_i915_private_t *dev_priv = dev->dev_private;
1733
Jesse Barnes1918ad72010-04-23 09:32:23 -07001734 if (HAS_PIPE_CONTROL(dev))
Jesse Barnese552eb72010-04-21 11:39:23 -07001735 return ((volatile u32 *)(dev_priv->seqno_page))[0];
1736 else
1737 return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
Eric Anholt673a3942008-07-30 12:06:12 -07001738}
1739
1740/**
1741 * This function clears the request list as sequence numbers are passed.
1742 */
1743void
1744i915_gem_retire_requests(struct drm_device *dev)
1745{
1746 drm_i915_private_t *dev_priv = dev->dev_private;
1747 uint32_t seqno;
1748
Chris Wilson9d34e5d2009-09-24 05:26:06 +01001749 if (!dev_priv->hw_status_page || list_empty(&dev_priv->mm.request_list))
Karsten Wiese6c0594a2009-02-23 15:07:57 +01001750 return;
1751
Eric Anholt673a3942008-07-30 12:06:12 -07001752 seqno = i915_get_gem_seqno(dev);
1753
1754 while (!list_empty(&dev_priv->mm.request_list)) {
1755 struct drm_i915_gem_request *request;
1756 uint32_t retiring_seqno;
1757
1758 request = list_first_entry(&dev_priv->mm.request_list,
1759 struct drm_i915_gem_request,
1760 list);
1761 retiring_seqno = request->seqno;
1762
1763 if (i915_seqno_passed(seqno, retiring_seqno) ||
Ben Gamariba1234d2009-09-14 17:48:47 -04001764 atomic_read(&dev_priv->mm.wedged)) {
Eric Anholt673a3942008-07-30 12:06:12 -07001765 i915_gem_retire_request(dev, request);
1766
1767 list_del(&request->list);
Eric Anholtb9624422009-06-03 07:27:35 +00001768 list_del(&request->client_list);
Eric Anholt9a298b22009-03-24 12:23:04 -07001769 kfree(request);
Eric Anholt673a3942008-07-30 12:06:12 -07001770 } else
1771 break;
1772 }
Chris Wilson9d34e5d2009-09-24 05:26:06 +01001773
1774 if (unlikely (dev_priv->trace_irq_seqno &&
1775 i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
1776 i915_user_irq_put(dev);
1777 dev_priv->trace_irq_seqno = 0;
1778 }
Eric Anholt673a3942008-07-30 12:06:12 -07001779}
1780
1781void
1782i915_gem_retire_work_handler(struct work_struct *work)
1783{
1784 drm_i915_private_t *dev_priv;
1785 struct drm_device *dev;
1786
1787 dev_priv = container_of(work, drm_i915_private_t,
1788 mm.retire_work.work);
1789 dev = dev_priv->dev;
1790
1791 mutex_lock(&dev->struct_mutex);
1792 i915_gem_retire_requests(dev);
Keith Packard6dbe2772008-10-14 21:41:13 -07001793 if (!dev_priv->mm.suspended &&
1794 !list_empty(&dev_priv->mm.request_list))
Eric Anholt9c9fe1f2009-08-03 16:09:16 -07001795 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
Eric Anholt673a3942008-07-30 12:06:12 -07001796 mutex_unlock(&dev->struct_mutex);
1797}
1798
Daniel Vetter5a5a0c62009-09-15 22:57:36 +02001799int
Daniel Vetter48764bf2009-09-15 22:57:32 +02001800i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
Eric Anholt673a3942008-07-30 12:06:12 -07001801{
1802 drm_i915_private_t *dev_priv = dev->dev_private;
Jesse Barnes802c7eb2009-05-05 16:03:48 -07001803 u32 ier;
Eric Anholt673a3942008-07-30 12:06:12 -07001804 int ret = 0;
1805
1806 BUG_ON(seqno == 0);
1807
Ben Gamariba1234d2009-09-14 17:48:47 -04001808 if (atomic_read(&dev_priv->mm.wedged))
Ben Gamariffed1d02009-09-14 17:48:41 -04001809 return -EIO;
1810
Eric Anholt673a3942008-07-30 12:06:12 -07001811 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
Eric Anholtbad720f2009-10-22 16:11:14 -07001812 if (HAS_PCH_SPLIT(dev))
Zhenyu Wang036a4a72009-06-08 14:40:19 +08001813 ier = I915_READ(DEIER) | I915_READ(GTIER);
1814 else
1815 ier = I915_READ(IER);
Jesse Barnes802c7eb2009-05-05 16:03:48 -07001816 if (!ier) {
1817 DRM_ERROR("something (likely vbetool) disabled "
1818 "interrupts, re-enabling\n");
1819 i915_driver_irq_preinstall(dev);
1820 i915_driver_irq_postinstall(dev);
1821 }
1822
Chris Wilson1c5d22f2009-08-25 11:15:50 +01001823 trace_i915_gem_request_wait_begin(dev, seqno);
1824
Eric Anholt673a3942008-07-30 12:06:12 -07001825 dev_priv->mm.waiting_gem_seqno = seqno;
1826 i915_user_irq_get(dev);
Daniel Vetter48764bf2009-09-15 22:57:32 +02001827 if (interruptible)
1828 ret = wait_event_interruptible(dev_priv->irq_queue,
1829 i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
1830 atomic_read(&dev_priv->mm.wedged));
1831 else
1832 wait_event(dev_priv->irq_queue,
1833 i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
1834 atomic_read(&dev_priv->mm.wedged));
1835
Eric Anholt673a3942008-07-30 12:06:12 -07001836 i915_user_irq_put(dev);
1837 dev_priv->mm.waiting_gem_seqno = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01001838
1839 trace_i915_gem_request_wait_end(dev, seqno);
Eric Anholt673a3942008-07-30 12:06:12 -07001840 }
Ben Gamariba1234d2009-09-14 17:48:47 -04001841 if (atomic_read(&dev_priv->mm.wedged))
Eric Anholt673a3942008-07-30 12:06:12 -07001842 ret = -EIO;
1843
1844 if (ret && ret != -ERESTARTSYS)
1845 DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
1846 __func__, ret, seqno, i915_get_gem_seqno(dev));
1847
1848 /* Directly dispatch request retiring. While we have the work queue
1849 * to handle this, the waiter on a request often wants an associated
1850 * buffer to have made it to the inactive list, and we would need
1851 * a separate wait queue to handle that.
1852 */
1853 if (ret == 0)
1854 i915_gem_retire_requests(dev);
1855
1856 return ret;
1857}
1858
Daniel Vetter48764bf2009-09-15 22:57:32 +02001859/**
1860 * Waits for a sequence number to be signaled, and cleans up the
1861 * request and object lists appropriately for that event.
1862 */
1863static int
1864i915_wait_request(struct drm_device *dev, uint32_t seqno)
1865{
1866 return i915_do_wait_request(dev, seqno, 1);
1867}
1868
Eric Anholt673a3942008-07-30 12:06:12 -07001869
1870/**
1871 * Ensures that all rendering to the object has completed and the object is
1872 * safe to unbind from the GTT or access from the CPU.
1873 */
1874static int
1875i915_gem_object_wait_rendering(struct drm_gem_object *obj)
1876{
1877 struct drm_device *dev = obj->dev;
Daniel Vetter23010e42010-03-08 13:35:02 +01001878 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001879 int ret;
1880
Eric Anholte47c68e2008-11-14 13:35:19 -08001881 /* This function only exists to support waiting for existing rendering,
1882 * not for emitting required flushes.
Eric Anholt673a3942008-07-30 12:06:12 -07001883 */
Eric Anholte47c68e2008-11-14 13:35:19 -08001884 BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
Eric Anholt673a3942008-07-30 12:06:12 -07001885
1886 /* If there is rendering queued on the buffer being evicted, wait for
1887 * it.
1888 */
1889 if (obj_priv->active) {
1890#if WATCH_BUF
1891 DRM_INFO("%s: object %p wait for seqno %08x\n",
1892 __func__, obj, obj_priv->last_rendering_seqno);
1893#endif
1894 ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
1895 if (ret != 0)
1896 return ret;
1897 }
1898
1899 return 0;
1900}
1901
1902/**
1903 * Unbinds an object from the GTT aperture.
1904 */
Jesse Barnes0f973f22009-01-26 17:10:45 -08001905int
Eric Anholt673a3942008-07-30 12:06:12 -07001906i915_gem_object_unbind(struct drm_gem_object *obj)
1907{
1908 struct drm_device *dev = obj->dev;
Daniel Vetter4a87b8c2010-02-19 11:51:57 +01001909 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01001910 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001911 int ret = 0;
1912
1913#if WATCH_BUF
1914 DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
1915 DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
1916#endif
1917 if (obj_priv->gtt_space == NULL)
1918 return 0;
1919
1920 if (obj_priv->pin_count != 0) {
1921 DRM_ERROR("Attempting to unbind pinned buffer\n");
1922 return -EINVAL;
1923 }
1924
Eric Anholt5323fd02009-09-09 11:50:45 -07001925 /* blow away mappings if mapped through GTT */
1926 i915_gem_release_mmap(obj);
1927
Eric Anholt673a3942008-07-30 12:06:12 -07001928 /* Move the object to the CPU domain to ensure that
1929 * any possible CPU writes while it's not in the GTT
1930 * are flushed when we go to remap it. This will
1931 * also ensure that all pending GPU writes are finished
1932 * before we unbind.
1933 */
Eric Anholte47c68e2008-11-14 13:35:19 -08001934 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
Eric Anholt673a3942008-07-30 12:06:12 -07001935 if (ret) {
Eric Anholte47c68e2008-11-14 13:35:19 -08001936 if (ret != -ERESTARTSYS)
1937 DRM_ERROR("set_domain failed: %d\n", ret);
Eric Anholt673a3942008-07-30 12:06:12 -07001938 return ret;
1939 }
1940
Eric Anholt5323fd02009-09-09 11:50:45 -07001941 BUG_ON(obj_priv->active);
1942
Daniel Vetter96b47b62009-12-15 17:50:00 +01001943 /* release the fence reg _after_ flushing */
1944 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
1945 i915_gem_clear_fence_reg(obj);
1946
Eric Anholt673a3942008-07-30 12:06:12 -07001947 if (obj_priv->agp_mem != NULL) {
1948 drm_unbind_agp(obj_priv->agp_mem);
1949 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
1950 obj_priv->agp_mem = NULL;
1951 }
1952
Eric Anholt856fa192009-03-19 14:10:50 -07001953 i915_gem_object_put_pages(obj);
Chris Wilsona32808c2009-09-20 21:29:47 +01001954 BUG_ON(obj_priv->pages_refcount);
Eric Anholt673a3942008-07-30 12:06:12 -07001955
1956 if (obj_priv->gtt_space) {
1957 atomic_dec(&dev->gtt_count);
1958 atomic_sub(obj->size, &dev->gtt_memory);
1959
1960 drm_mm_put_block(obj_priv->gtt_space);
1961 obj_priv->gtt_space = NULL;
1962 }
1963
1964 /* Remove ourselves from the LRU list if present. */
Daniel Vetter4a87b8c2010-02-19 11:51:57 +01001965 spin_lock(&dev_priv->mm.active_list_lock);
Eric Anholt673a3942008-07-30 12:06:12 -07001966 if (!list_empty(&obj_priv->list))
1967 list_del_init(&obj_priv->list);
Daniel Vetter4a87b8c2010-02-19 11:51:57 +01001968 spin_unlock(&dev_priv->mm.active_list_lock);
Eric Anholt673a3942008-07-30 12:06:12 -07001969
Chris Wilson963b4832009-09-20 23:03:54 +01001970 if (i915_gem_object_is_purgeable(obj_priv))
1971 i915_gem_object_truncate(obj);
1972
Chris Wilson1c5d22f2009-08-25 11:15:50 +01001973 trace_i915_gem_object_unbind(obj);
1974
Eric Anholt673a3942008-07-30 12:06:12 -07001975 return 0;
1976}
1977
Chris Wilson07f73f62009-09-14 16:50:30 +01001978static struct drm_gem_object *
1979i915_gem_find_inactive_object(struct drm_device *dev, int min_size)
1980{
1981 drm_i915_private_t *dev_priv = dev->dev_private;
1982 struct drm_i915_gem_object *obj_priv;
1983 struct drm_gem_object *best = NULL;
1984 struct drm_gem_object *first = NULL;
1985
1986 /* Try to find the smallest clean object */
1987 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
Daniel Vettera8089e82010-04-09 19:05:09 +00001988 struct drm_gem_object *obj = &obj_priv->base;
Chris Wilson07f73f62009-09-14 16:50:30 +01001989 if (obj->size >= min_size) {
Chris Wilson963b4832009-09-20 23:03:54 +01001990 if ((!obj_priv->dirty ||
1991 i915_gem_object_is_purgeable(obj_priv)) &&
Chris Wilson07f73f62009-09-14 16:50:30 +01001992 (!best || obj->size < best->size)) {
1993 best = obj;
1994 if (best->size == min_size)
1995 return best;
1996 }
1997 if (!first)
1998 first = obj;
1999 }
2000 }
2001
2002 return best ? best : first;
2003}
2004
Eric Anholt673a3942008-07-30 12:06:12 -07002005static int
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002006i915_gpu_idle(struct drm_device *dev)
2007{
2008 drm_i915_private_t *dev_priv = dev->dev_private;
2009 bool lists_empty;
2010 uint32_t seqno;
2011
2012 spin_lock(&dev_priv->mm.active_list_lock);
2013 lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
2014 list_empty(&dev_priv->mm.active_list);
2015 spin_unlock(&dev_priv->mm.active_list_lock);
2016
2017 if (lists_empty)
2018 return 0;
2019
2020 /* Flush everything onto the inactive list. */
2021 i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
2022 seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
2023 if (seqno == 0)
2024 return -ENOMEM;
2025
2026 return i915_wait_request(dev, seqno);
2027}
2028
2029static int
Chris Wilson07f73f62009-09-14 16:50:30 +01002030i915_gem_evict_everything(struct drm_device *dev)
2031{
2032 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson07f73f62009-09-14 16:50:30 +01002033 int ret;
2034 bool lists_empty;
2035
Chris Wilson07f73f62009-09-14 16:50:30 +01002036 spin_lock(&dev_priv->mm.active_list_lock);
2037 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
2038 list_empty(&dev_priv->mm.flushing_list) &&
2039 list_empty(&dev_priv->mm.active_list));
2040 spin_unlock(&dev_priv->mm.active_list_lock);
2041
Chris Wilson97311292009-09-21 00:22:34 +01002042 if (lists_empty)
Chris Wilson07f73f62009-09-14 16:50:30 +01002043 return -ENOSPC;
Chris Wilson07f73f62009-09-14 16:50:30 +01002044
2045 /* Flush everything (on to the inactive lists) and evict */
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002046 ret = i915_gpu_idle(dev);
Chris Wilson07f73f62009-09-14 16:50:30 +01002047 if (ret)
2048 return ret;
2049
Daniel Vetter99fcb762010-02-07 16:20:18 +01002050 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
2051
Chris Wilsonab5ee572009-09-20 19:25:47 +01002052 ret = i915_gem_evict_from_inactive_list(dev);
Chris Wilson07f73f62009-09-14 16:50:30 +01002053 if (ret)
2054 return ret;
2055
2056 spin_lock(&dev_priv->mm.active_list_lock);
2057 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
2058 list_empty(&dev_priv->mm.flushing_list) &&
2059 list_empty(&dev_priv->mm.active_list));
2060 spin_unlock(&dev_priv->mm.active_list_lock);
2061 BUG_ON(!lists_empty);
2062
Eric Anholt673a3942008-07-30 12:06:12 -07002063 return 0;
2064}
2065
2066static int
Chris Wilson07f73f62009-09-14 16:50:30 +01002067i915_gem_evict_something(struct drm_device *dev, int min_size)
Eric Anholt673a3942008-07-30 12:06:12 -07002068{
2069 drm_i915_private_t *dev_priv = dev->dev_private;
2070 struct drm_gem_object *obj;
Chris Wilson07f73f62009-09-14 16:50:30 +01002071 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002072
2073 for (;;) {
Chris Wilson07f73f62009-09-14 16:50:30 +01002074 i915_gem_retire_requests(dev);
2075
Eric Anholt673a3942008-07-30 12:06:12 -07002076 /* If there's an inactive buffer available now, grab it
2077 * and be done.
2078 */
Chris Wilson07f73f62009-09-14 16:50:30 +01002079 obj = i915_gem_find_inactive_object(dev, min_size);
2080 if (obj) {
2081 struct drm_i915_gem_object *obj_priv;
2082
Eric Anholt673a3942008-07-30 12:06:12 -07002083#if WATCH_LRU
2084 DRM_INFO("%s: evicting %p\n", __func__, obj);
2085#endif
Daniel Vetter23010e42010-03-08 13:35:02 +01002086 obj_priv = to_intel_bo(obj);
Chris Wilson07f73f62009-09-14 16:50:30 +01002087 BUG_ON(obj_priv->pin_count != 0);
Eric Anholt673a3942008-07-30 12:06:12 -07002088 BUG_ON(obj_priv->active);
2089
2090 /* Wait on the rendering and unbind the buffer. */
Chris Wilson07f73f62009-09-14 16:50:30 +01002091 return i915_gem_object_unbind(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002092 }
2093
2094 /* If we didn't get anything, but the ring is still processing
Chris Wilson07f73f62009-09-14 16:50:30 +01002095 * things, wait for the next to finish and hopefully leave us
2096 * a buffer to evict.
Eric Anholt673a3942008-07-30 12:06:12 -07002097 */
2098 if (!list_empty(&dev_priv->mm.request_list)) {
2099 struct drm_i915_gem_request *request;
2100
2101 request = list_first_entry(&dev_priv->mm.request_list,
2102 struct drm_i915_gem_request,
2103 list);
2104
2105 ret = i915_wait_request(dev, request->seqno);
2106 if (ret)
Chris Wilson07f73f62009-09-14 16:50:30 +01002107 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002108
Chris Wilson07f73f62009-09-14 16:50:30 +01002109 continue;
Eric Anholt673a3942008-07-30 12:06:12 -07002110 }
2111
2112 /* If we didn't have anything on the request list but there
2113 * are buffers awaiting a flush, emit one and try again.
2114 * When we wait on it, those buffers waiting for that flush
2115 * will get moved to inactive.
2116 */
2117 if (!list_empty(&dev_priv->mm.flushing_list)) {
Chris Wilson07f73f62009-09-14 16:50:30 +01002118 struct drm_i915_gem_object *obj_priv;
Eric Anholt673a3942008-07-30 12:06:12 -07002119
Chris Wilson9a1e2582009-09-20 20:16:50 +01002120 /* Find an object that we can immediately reuse */
2121 list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
Daniel Vettera8089e82010-04-09 19:05:09 +00002122 obj = &obj_priv->base;
Chris Wilson9a1e2582009-09-20 20:16:50 +01002123 if (obj->size >= min_size)
2124 break;
Eric Anholt673a3942008-07-30 12:06:12 -07002125
Chris Wilson9a1e2582009-09-20 20:16:50 +01002126 obj = NULL;
2127 }
Eric Anholt673a3942008-07-30 12:06:12 -07002128
Chris Wilson9a1e2582009-09-20 20:16:50 +01002129 if (obj != NULL) {
2130 uint32_t seqno;
Chris Wilson07f73f62009-09-14 16:50:30 +01002131
Chris Wilson9a1e2582009-09-20 20:16:50 +01002132 i915_gem_flush(dev,
2133 obj->write_domain,
2134 obj->write_domain);
2135 seqno = i915_add_request(dev, NULL, obj->write_domain);
2136 if (seqno == 0)
2137 return -ENOMEM;
Chris Wilson9a1e2582009-09-20 20:16:50 +01002138 continue;
2139 }
Eric Anholt673a3942008-07-30 12:06:12 -07002140 }
2141
Chris Wilson07f73f62009-09-14 16:50:30 +01002142 /* If we didn't do any of the above, there's no single buffer
2143 * large enough to swap out for the new one, so just evict
2144 * everything and start again. (This should be rare.)
Eric Anholt673a3942008-07-30 12:06:12 -07002145 */
Chris Wilson97311292009-09-21 00:22:34 +01002146 if (!list_empty (&dev_priv->mm.inactive_list))
Chris Wilsonab5ee572009-09-20 19:25:47 +01002147 return i915_gem_evict_from_inactive_list(dev);
Chris Wilson97311292009-09-21 00:22:34 +01002148 else
Chris Wilson07f73f62009-09-14 16:50:30 +01002149 return i915_gem_evict_everything(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07002150 }
Keith Packardac94a962008-11-20 23:30:27 -08002151}
2152
Ben Gamari6911a9b2009-04-02 11:24:54 -07002153int
Chris Wilson4bdadb92010-01-27 13:36:32 +00002154i915_gem_object_get_pages(struct drm_gem_object *obj,
2155 gfp_t gfpmask)
Eric Anholt673a3942008-07-30 12:06:12 -07002156{
Daniel Vetter23010e42010-03-08 13:35:02 +01002157 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002158 int page_count, i;
2159 struct address_space *mapping;
2160 struct inode *inode;
2161 struct page *page;
Eric Anholt673a3942008-07-30 12:06:12 -07002162
Eric Anholt856fa192009-03-19 14:10:50 -07002163 if (obj_priv->pages_refcount++ != 0)
Eric Anholt673a3942008-07-30 12:06:12 -07002164 return 0;
2165
2166 /* Get the list of pages out of our struct file. They'll be pinned
2167 * at this point until we release them.
2168 */
2169 page_count = obj->size / PAGE_SIZE;
Eric Anholt856fa192009-03-19 14:10:50 -07002170 BUG_ON(obj_priv->pages != NULL);
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07002171 obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
Eric Anholt856fa192009-03-19 14:10:50 -07002172 if (obj_priv->pages == NULL) {
Eric Anholt856fa192009-03-19 14:10:50 -07002173 obj_priv->pages_refcount--;
Eric Anholt673a3942008-07-30 12:06:12 -07002174 return -ENOMEM;
2175 }
2176
2177 inode = obj->filp->f_path.dentry->d_inode;
2178 mapping = inode->i_mapping;
2179 for (i = 0; i < page_count; i++) {
Chris Wilson4bdadb92010-01-27 13:36:32 +00002180 page = read_cache_page_gfp(mapping, i,
2181 mapping_gfp_mask (mapping) |
2182 __GFP_COLD |
2183 gfpmask);
Chris Wilson1f2b1012010-03-12 19:52:55 +00002184 if (IS_ERR(page))
2185 goto err_pages;
2186
Eric Anholt856fa192009-03-19 14:10:50 -07002187 obj_priv->pages[i] = page;
Eric Anholt673a3942008-07-30 12:06:12 -07002188 }
Eric Anholt280b7132009-03-12 16:56:27 -07002189
2190 if (obj_priv->tiling_mode != I915_TILING_NONE)
2191 i915_gem_object_do_bit_17_swizzle(obj);
2192
Eric Anholt673a3942008-07-30 12:06:12 -07002193 return 0;
Chris Wilson1f2b1012010-03-12 19:52:55 +00002194
2195err_pages:
2196 while (i--)
2197 page_cache_release(obj_priv->pages[i]);
2198
2199 drm_free_large(obj_priv->pages);
2200 obj_priv->pages = NULL;
2201 obj_priv->pages_refcount--;
2202 return PTR_ERR(page);
Eric Anholt673a3942008-07-30 12:06:12 -07002203}
2204
Eric Anholt4e901fd2009-10-26 16:44:17 -07002205static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg)
2206{
2207 struct drm_gem_object *obj = reg->obj;
2208 struct drm_device *dev = obj->dev;
2209 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01002210 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt4e901fd2009-10-26 16:44:17 -07002211 int regnum = obj_priv->fence_reg;
2212 uint64_t val;
2213
2214 val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
2215 0xfffff000) << 32;
2216 val |= obj_priv->gtt_offset & 0xfffff000;
2217 val |= (uint64_t)((obj_priv->stride / 128) - 1) <<
2218 SANDYBRIDGE_FENCE_PITCH_SHIFT;
2219
2220 if (obj_priv->tiling_mode == I915_TILING_Y)
2221 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2222 val |= I965_FENCE_REG_VALID;
2223
2224 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (regnum * 8), val);
2225}
2226
Jesse Barnesde151cf2008-11-12 10:03:55 -08002227static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
2228{
2229 struct drm_gem_object *obj = reg->obj;
2230 struct drm_device *dev = obj->dev;
2231 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01002232 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002233 int regnum = obj_priv->fence_reg;
2234 uint64_t val;
2235
2236 val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
2237 0xfffff000) << 32;
2238 val |= obj_priv->gtt_offset & 0xfffff000;
2239 val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2240 if (obj_priv->tiling_mode == I915_TILING_Y)
2241 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2242 val |= I965_FENCE_REG_VALID;
2243
2244 I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val);
2245}
2246
2247static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
2248{
2249 struct drm_gem_object *obj = reg->obj;
2250 struct drm_device *dev = obj->dev;
2251 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01002252 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002253 int regnum = obj_priv->fence_reg;
Jesse Barnes0f973f22009-01-26 17:10:45 -08002254 int tile_width;
Eric Anholtdc529a42009-03-10 22:34:49 -07002255 uint32_t fence_reg, val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002256 uint32_t pitch_val;
2257
2258 if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
2259 (obj_priv->gtt_offset & (obj->size - 1))) {
Linus Torvaldsf06da262009-02-09 08:57:29 -08002260 WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n",
Jesse Barnes0f973f22009-01-26 17:10:45 -08002261 __func__, obj_priv->gtt_offset, obj->size);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002262 return;
2263 }
2264
Jesse Barnes0f973f22009-01-26 17:10:45 -08002265 if (obj_priv->tiling_mode == I915_TILING_Y &&
2266 HAS_128_BYTE_Y_TILING(dev))
2267 tile_width = 128;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002268 else
Jesse Barnes0f973f22009-01-26 17:10:45 -08002269 tile_width = 512;
2270
2271 /* Note: pitch better be a power of two tile widths */
2272 pitch_val = obj_priv->stride / tile_width;
2273 pitch_val = ffs(pitch_val) - 1;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002274
Daniel Vetterc36a2a62010-04-17 15:12:03 +02002275 if (obj_priv->tiling_mode == I915_TILING_Y &&
2276 HAS_128_BYTE_Y_TILING(dev))
2277 WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
2278 else
2279 WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL);
2280
Jesse Barnesde151cf2008-11-12 10:03:55 -08002281 val = obj_priv->gtt_offset;
2282 if (obj_priv->tiling_mode == I915_TILING_Y)
2283 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2284 val |= I915_FENCE_SIZE_BITS(obj->size);
2285 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2286 val |= I830_FENCE_REG_VALID;
2287
Eric Anholtdc529a42009-03-10 22:34:49 -07002288 if (regnum < 8)
2289 fence_reg = FENCE_REG_830_0 + (regnum * 4);
2290 else
2291 fence_reg = FENCE_REG_945_8 + ((regnum - 8) * 4);
2292 I915_WRITE(fence_reg, val);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002293}
2294
2295static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
2296{
2297 struct drm_gem_object *obj = reg->obj;
2298 struct drm_device *dev = obj->dev;
2299 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01002300 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002301 int regnum = obj_priv->fence_reg;
2302 uint32_t val;
2303 uint32_t pitch_val;
Daniel Vetter8d7773a2009-03-29 14:09:41 +02002304 uint32_t fence_size_bits;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002305
Daniel Vetter8d7773a2009-03-29 14:09:41 +02002306 if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) ||
Jesse Barnesde151cf2008-11-12 10:03:55 -08002307 (obj_priv->gtt_offset & (obj->size - 1))) {
Daniel Vetter8d7773a2009-03-29 14:09:41 +02002308 WARN(1, "%s: object 0x%08x not 512K or size aligned\n",
Jesse Barnes0f973f22009-01-26 17:10:45 -08002309 __func__, obj_priv->gtt_offset);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002310 return;
2311 }
2312
Eric Anholte76a16d2009-05-26 17:44:56 -07002313 pitch_val = obj_priv->stride / 128;
2314 pitch_val = ffs(pitch_val) - 1;
2315 WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
2316
Jesse Barnesde151cf2008-11-12 10:03:55 -08002317 val = obj_priv->gtt_offset;
2318 if (obj_priv->tiling_mode == I915_TILING_Y)
2319 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
Daniel Vetter8d7773a2009-03-29 14:09:41 +02002320 fence_size_bits = I830_FENCE_SIZE_BITS(obj->size);
2321 WARN_ON(fence_size_bits & ~0x00000f00);
2322 val |= fence_size_bits;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002323 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2324 val |= I830_FENCE_REG_VALID;
2325
2326 I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002327}
2328
Daniel Vetterae3db242010-02-19 11:51:58 +01002329static int i915_find_fence_reg(struct drm_device *dev)
2330{
2331 struct drm_i915_fence_reg *reg = NULL;
2332 struct drm_i915_gem_object *obj_priv = NULL;
2333 struct drm_i915_private *dev_priv = dev->dev_private;
2334 struct drm_gem_object *obj = NULL;
2335 int i, avail, ret;
2336
2337 /* First try to find a free reg */
2338 avail = 0;
2339 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2340 reg = &dev_priv->fence_regs[i];
2341 if (!reg->obj)
2342 return i;
2343
Daniel Vetter23010e42010-03-08 13:35:02 +01002344 obj_priv = to_intel_bo(reg->obj);
Daniel Vetterae3db242010-02-19 11:51:58 +01002345 if (!obj_priv->pin_count)
2346 avail++;
2347 }
2348
2349 if (avail == 0)
2350 return -ENOSPC;
2351
2352 /* None available, try to steal one or wait for a user to finish */
2353 i = I915_FENCE_REG_NONE;
Daniel Vetter007cc8a2010-04-28 11:02:31 +02002354 list_for_each_entry(reg, &dev_priv->mm.fence_list,
2355 lru_list) {
2356 obj = reg->obj;
2357 obj_priv = to_intel_bo(obj);
Daniel Vetterae3db242010-02-19 11:51:58 +01002358
2359 if (obj_priv->pin_count)
2360 continue;
2361
2362 /* found one! */
2363 i = obj_priv->fence_reg;
2364 break;
2365 }
2366
2367 BUG_ON(i == I915_FENCE_REG_NONE);
2368
2369 /* We only have a reference on obj from the active list. put_fence_reg
2370 * might drop that one, causing a use-after-free in it. So hold a
2371 * private reference to obj like the other callers of put_fence_reg
2372 * (set_tiling ioctl) do. */
2373 drm_gem_object_reference(obj);
2374 ret = i915_gem_object_put_fence_reg(obj);
2375 drm_gem_object_unreference(obj);
2376 if (ret != 0)
2377 return ret;
2378
2379 return i;
2380}
2381
Jesse Barnesde151cf2008-11-12 10:03:55 -08002382/**
2383 * i915_gem_object_get_fence_reg - set up a fence reg for an object
2384 * @obj: object to map through a fence reg
2385 *
2386 * When mapping objects through the GTT, userspace wants to be able to write
2387 * to them without having to worry about swizzling if the object is tiled.
2388 *
2389 * This function walks the fence regs looking for a free one for @obj,
2390 * stealing one if it can't find any.
2391 *
2392 * It then sets up the reg based on the object's properties: address, pitch
2393 * and tiling format.
2394 */
Chris Wilson8c4b8c32009-06-17 22:08:52 +01002395int
2396i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002397{
2398 struct drm_device *dev = obj->dev;
Jesse Barnes79e53942008-11-07 14:24:08 -08002399 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01002400 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002401 struct drm_i915_fence_reg *reg = NULL;
Daniel Vetterae3db242010-02-19 11:51:58 +01002402 int ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002403
Eric Anholta09ba7f2009-08-29 12:49:51 -07002404 /* Just update our place in the LRU if our fence is getting used. */
2405 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
Daniel Vetter007cc8a2010-04-28 11:02:31 +02002406 reg = &dev_priv->fence_regs[obj_priv->fence_reg];
2407 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
Eric Anholta09ba7f2009-08-29 12:49:51 -07002408 return 0;
2409 }
2410
Jesse Barnesde151cf2008-11-12 10:03:55 -08002411 switch (obj_priv->tiling_mode) {
2412 case I915_TILING_NONE:
2413 WARN(1, "allocating a fence for non-tiled object?\n");
2414 break;
2415 case I915_TILING_X:
Jesse Barnes0f973f22009-01-26 17:10:45 -08002416 if (!obj_priv->stride)
2417 return -EINVAL;
2418 WARN((obj_priv->stride & (512 - 1)),
2419 "object 0x%08x is X tiled but has non-512B pitch\n",
2420 obj_priv->gtt_offset);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002421 break;
2422 case I915_TILING_Y:
Jesse Barnes0f973f22009-01-26 17:10:45 -08002423 if (!obj_priv->stride)
2424 return -EINVAL;
2425 WARN((obj_priv->stride & (128 - 1)),
2426 "object 0x%08x is Y tiled but has non-128B pitch\n",
2427 obj_priv->gtt_offset);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002428 break;
2429 }
2430
Daniel Vetterae3db242010-02-19 11:51:58 +01002431 ret = i915_find_fence_reg(dev);
2432 if (ret < 0)
2433 return ret;
Chris Wilsonfc7170b2009-02-11 14:26:46 +00002434
Daniel Vetterae3db242010-02-19 11:51:58 +01002435 obj_priv->fence_reg = ret;
2436 reg = &dev_priv->fence_regs[obj_priv->fence_reg];
Daniel Vetter007cc8a2010-04-28 11:02:31 +02002437 list_add_tail(&reg->lru_list, &dev_priv->mm.fence_list);
Eric Anholta09ba7f2009-08-29 12:49:51 -07002438
Jesse Barnesde151cf2008-11-12 10:03:55 -08002439 reg->obj = obj;
2440
Eric Anholt4e901fd2009-10-26 16:44:17 -07002441 if (IS_GEN6(dev))
2442 sandybridge_write_fence_reg(reg);
2443 else if (IS_I965G(dev))
Jesse Barnesde151cf2008-11-12 10:03:55 -08002444 i965_write_fence_reg(reg);
2445 else if (IS_I9XX(dev))
2446 i915_write_fence_reg(reg);
2447 else
2448 i830_write_fence_reg(reg);
Eric Anholtd9ddcb92009-01-27 10:33:49 -08002449
Daniel Vetterae3db242010-02-19 11:51:58 +01002450 trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg,
2451 obj_priv->tiling_mode);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002452
Eric Anholtd9ddcb92009-01-27 10:33:49 -08002453 return 0;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002454}
2455
2456/**
2457 * i915_gem_clear_fence_reg - clear out fence register info
2458 * @obj: object to clear
2459 *
2460 * Zeroes out the fence register itself and clears out the associated
2461 * data structures in dev_priv and obj_priv.
2462 */
2463static void
2464i915_gem_clear_fence_reg(struct drm_gem_object *obj)
2465{
2466 struct drm_device *dev = obj->dev;
Jesse Barnes79e53942008-11-07 14:24:08 -08002467 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01002468 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Daniel Vetter007cc8a2010-04-28 11:02:31 +02002469 struct drm_i915_fence_reg *reg =
2470 &dev_priv->fence_regs[obj_priv->fence_reg];
Jesse Barnesde151cf2008-11-12 10:03:55 -08002471
Eric Anholt4e901fd2009-10-26 16:44:17 -07002472 if (IS_GEN6(dev)) {
2473 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
2474 (obj_priv->fence_reg * 8), 0);
2475 } else if (IS_I965G(dev)) {
Jesse Barnesde151cf2008-11-12 10:03:55 -08002476 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
Eric Anholt4e901fd2009-10-26 16:44:17 -07002477 } else {
Eric Anholtdc529a42009-03-10 22:34:49 -07002478 uint32_t fence_reg;
2479
2480 if (obj_priv->fence_reg < 8)
2481 fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
2482 else
2483 fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg -
2484 8) * 4;
2485
2486 I915_WRITE(fence_reg, 0);
2487 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08002488
Daniel Vetter007cc8a2010-04-28 11:02:31 +02002489 reg->obj = NULL;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002490 obj_priv->fence_reg = I915_FENCE_REG_NONE;
Daniel Vetter007cc8a2010-04-28 11:02:31 +02002491 list_del_init(&reg->lru_list);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002492}
2493
Eric Anholt673a3942008-07-30 12:06:12 -07002494/**
Chris Wilson52dc7d32009-06-06 09:46:01 +01002495 * i915_gem_object_put_fence_reg - waits on outstanding fenced access
2496 * to the buffer to finish, and then resets the fence register.
2497 * @obj: tiled object holding a fence register.
2498 *
2499 * Zeroes out the fence register itself and clears out the associated
2500 * data structures in dev_priv and obj_priv.
2501 */
2502int
2503i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
2504{
2505 struct drm_device *dev = obj->dev;
Daniel Vetter23010e42010-03-08 13:35:02 +01002506 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Chris Wilson52dc7d32009-06-06 09:46:01 +01002507
2508 if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
2509 return 0;
2510
Daniel Vetter10ae9bd2010-02-01 13:59:17 +01002511 /* If we've changed tiling, GTT-mappings of the object
2512 * need to re-fault to ensure that the correct fence register
2513 * setup is in place.
2514 */
2515 i915_gem_release_mmap(obj);
2516
Chris Wilson52dc7d32009-06-06 09:46:01 +01002517 /* On the i915, GPU access to tiled buffers is via a fence,
2518 * therefore we must wait for any outstanding access to complete
2519 * before clearing the fence.
2520 */
2521 if (!IS_I965G(dev)) {
2522 int ret;
2523
2524 i915_gem_object_flush_gpu_write_domain(obj);
Chris Wilson52dc7d32009-06-06 09:46:01 +01002525 ret = i915_gem_object_wait_rendering(obj);
2526 if (ret != 0)
2527 return ret;
2528 }
2529
Daniel Vetter4a726612010-02-01 13:59:16 +01002530 i915_gem_object_flush_gtt_write_domain(obj);
Chris Wilson52dc7d32009-06-06 09:46:01 +01002531 i915_gem_clear_fence_reg (obj);
2532
2533 return 0;
2534}
2535
2536/**
Eric Anholt673a3942008-07-30 12:06:12 -07002537 * Finds free space in the GTT aperture and binds the object there.
2538 */
2539static int
2540i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2541{
2542 struct drm_device *dev = obj->dev;
2543 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01002544 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002545 struct drm_mm_node *free_space;
Chris Wilson4bdadb92010-01-27 13:36:32 +00002546 gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
Chris Wilson07f73f62009-09-14 16:50:30 +01002547 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002548
Chris Wilsonbb6baf72009-09-22 14:24:13 +01002549 if (obj_priv->madv != I915_MADV_WILLNEED) {
Chris Wilson3ef94da2009-09-14 16:50:29 +01002550 DRM_ERROR("Attempting to bind a purgeable object\n");
2551 return -EINVAL;
2552 }
2553
Eric Anholt673a3942008-07-30 12:06:12 -07002554 if (alignment == 0)
Jesse Barnes0f973f22009-01-26 17:10:45 -08002555 alignment = i915_gem_get_gtt_alignment(obj);
Daniel Vetter8d7773a2009-03-29 14:09:41 +02002556 if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) {
Eric Anholt673a3942008-07-30 12:06:12 -07002557 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2558 return -EINVAL;
2559 }
2560
2561 search_free:
2562 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
2563 obj->size, alignment, 0);
2564 if (free_space != NULL) {
2565 obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
2566 alignment);
2567 if (obj_priv->gtt_space != NULL) {
2568 obj_priv->gtt_space->private = obj;
2569 obj_priv->gtt_offset = obj_priv->gtt_space->start;
2570 }
2571 }
2572 if (obj_priv->gtt_space == NULL) {
2573 /* If the gtt is empty and we're still having trouble
2574 * fitting our object in, we're out of memory.
2575 */
2576#if WATCH_LRU
2577 DRM_INFO("%s: GTT full, evicting something\n", __func__);
2578#endif
Chris Wilson07f73f62009-09-14 16:50:30 +01002579 ret = i915_gem_evict_something(dev, obj->size);
Chris Wilson97311292009-09-21 00:22:34 +01002580 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07002581 return ret;
Chris Wilson97311292009-09-21 00:22:34 +01002582
Eric Anholt673a3942008-07-30 12:06:12 -07002583 goto search_free;
2584 }
2585
2586#if WATCH_BUF
Krzysztof Halasacfd43c02009-06-20 00:31:28 +02002587 DRM_INFO("Binding object of size %zd at 0x%08x\n",
Eric Anholt673a3942008-07-30 12:06:12 -07002588 obj->size, obj_priv->gtt_offset);
2589#endif
Chris Wilson4bdadb92010-01-27 13:36:32 +00002590 ret = i915_gem_object_get_pages(obj, gfpmask);
Eric Anholt673a3942008-07-30 12:06:12 -07002591 if (ret) {
2592 drm_mm_put_block(obj_priv->gtt_space);
2593 obj_priv->gtt_space = NULL;
Chris Wilson07f73f62009-09-14 16:50:30 +01002594
2595 if (ret == -ENOMEM) {
2596 /* first try to clear up some space from the GTT */
2597 ret = i915_gem_evict_something(dev, obj->size);
2598 if (ret) {
Chris Wilson07f73f62009-09-14 16:50:30 +01002599 /* now try to shrink everyone else */
Chris Wilson4bdadb92010-01-27 13:36:32 +00002600 if (gfpmask) {
2601 gfpmask = 0;
2602 goto search_free;
Chris Wilson07f73f62009-09-14 16:50:30 +01002603 }
2604
2605 return ret;
2606 }
2607
2608 goto search_free;
2609 }
2610
Eric Anholt673a3942008-07-30 12:06:12 -07002611 return ret;
2612 }
2613
Eric Anholt673a3942008-07-30 12:06:12 -07002614 /* Create an AGP memory structure pointing at our pages, and bind it
2615 * into the GTT.
2616 */
2617 obj_priv->agp_mem = drm_agp_bind_pages(dev,
Eric Anholt856fa192009-03-19 14:10:50 -07002618 obj_priv->pages,
Chris Wilson07f73f62009-09-14 16:50:30 +01002619 obj->size >> PAGE_SHIFT,
Keith Packardba1eb1d2008-10-14 19:55:10 -07002620 obj_priv->gtt_offset,
2621 obj_priv->agp_type);
Eric Anholt673a3942008-07-30 12:06:12 -07002622 if (obj_priv->agp_mem == NULL) {
Eric Anholt856fa192009-03-19 14:10:50 -07002623 i915_gem_object_put_pages(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002624 drm_mm_put_block(obj_priv->gtt_space);
2625 obj_priv->gtt_space = NULL;
Chris Wilson07f73f62009-09-14 16:50:30 +01002626
2627 ret = i915_gem_evict_something(dev, obj->size);
Chris Wilson97311292009-09-21 00:22:34 +01002628 if (ret)
Chris Wilson07f73f62009-09-14 16:50:30 +01002629 return ret;
Chris Wilson07f73f62009-09-14 16:50:30 +01002630
2631 goto search_free;
Eric Anholt673a3942008-07-30 12:06:12 -07002632 }
2633 atomic_inc(&dev->gtt_count);
2634 atomic_add(obj->size, &dev->gtt_memory);
2635
2636 /* Assert that the object is not currently in any GPU domain. As it
2637 * wasn't in the GTT, there shouldn't be any way it could have been in
2638 * a GPU cache
2639 */
Chris Wilson21d509e2009-06-06 09:46:02 +01002640 BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
2641 BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
Eric Anholt673a3942008-07-30 12:06:12 -07002642
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002643 trace_i915_gem_object_bind(obj, obj_priv->gtt_offset);
2644
Eric Anholt673a3942008-07-30 12:06:12 -07002645 return 0;
2646}
2647
2648void
2649i915_gem_clflush_object(struct drm_gem_object *obj)
2650{
Daniel Vetter23010e42010-03-08 13:35:02 +01002651 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002652
2653 /* If we don't have a page list set up, then we're not pinned
2654 * to GPU, and we can ignore the cache flush because it'll happen
2655 * again at bind time.
2656 */
Eric Anholt856fa192009-03-19 14:10:50 -07002657 if (obj_priv->pages == NULL)
Eric Anholt673a3942008-07-30 12:06:12 -07002658 return;
2659
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002660 trace_i915_gem_object_clflush(obj);
Eric Anholtcfa16a02009-05-26 18:46:16 -07002661
Eric Anholt856fa192009-03-19 14:10:50 -07002662 drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
Eric Anholt673a3942008-07-30 12:06:12 -07002663}
2664
Eric Anholte47c68e2008-11-14 13:35:19 -08002665/** Flushes any GPU write domain for the object if it's dirty. */
2666static void
2667i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2668{
2669 struct drm_device *dev = obj->dev;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002670 uint32_t old_write_domain;
Eric Anholte47c68e2008-11-14 13:35:19 -08002671
2672 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
2673 return;
2674
2675 /* Queue the GPU write cache flushing we need. */
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002676 old_write_domain = obj->write_domain;
Eric Anholte47c68e2008-11-14 13:35:19 -08002677 i915_gem_flush(dev, 0, obj->write_domain);
Daniel Vetter922a2ef2010-02-19 11:52:01 +01002678 (void) i915_add_request(dev, NULL, obj->write_domain);
Daniel Vetter99fcb762010-02-07 16:20:18 +01002679 BUG_ON(obj->write_domain);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002680
2681 trace_i915_gem_object_change_domain(obj,
2682 obj->read_domains,
2683 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08002684}
2685
2686/** Flushes the GTT write domain for the object if it's dirty. */
2687static void
2688i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
2689{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002690 uint32_t old_write_domain;
2691
Eric Anholte47c68e2008-11-14 13:35:19 -08002692 if (obj->write_domain != I915_GEM_DOMAIN_GTT)
2693 return;
2694
2695 /* No actual flushing is required for the GTT write domain. Writes
2696 * to it immediately go to main memory as far as we know, so there's
2697 * no chipset flush. It also doesn't land in render cache.
2698 */
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002699 old_write_domain = obj->write_domain;
Eric Anholte47c68e2008-11-14 13:35:19 -08002700 obj->write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002701
2702 trace_i915_gem_object_change_domain(obj,
2703 obj->read_domains,
2704 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08002705}
2706
2707/** Flushes the CPU write domain for the object if it's dirty. */
2708static void
2709i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
2710{
2711 struct drm_device *dev = obj->dev;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002712 uint32_t old_write_domain;
Eric Anholte47c68e2008-11-14 13:35:19 -08002713
2714 if (obj->write_domain != I915_GEM_DOMAIN_CPU)
2715 return;
2716
2717 i915_gem_clflush_object(obj);
2718 drm_agp_chipset_flush(dev);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002719 old_write_domain = obj->write_domain;
Eric Anholte47c68e2008-11-14 13:35:19 -08002720 obj->write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002721
2722 trace_i915_gem_object_change_domain(obj,
2723 obj->read_domains,
2724 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08002725}
2726
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05002727void
2728i915_gem_object_flush_write_domain(struct drm_gem_object *obj)
2729{
2730 switch (obj->write_domain) {
2731 case I915_GEM_DOMAIN_GTT:
2732 i915_gem_object_flush_gtt_write_domain(obj);
2733 break;
2734 case I915_GEM_DOMAIN_CPU:
2735 i915_gem_object_flush_cpu_write_domain(obj);
2736 break;
2737 default:
2738 i915_gem_object_flush_gpu_write_domain(obj);
2739 break;
2740 }
2741}
2742
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002743/**
2744 * Moves a single object to the GTT read, and possibly write domain.
2745 *
2746 * This function returns when the move is complete, including waiting on
2747 * flushes to occur.
2748 */
Jesse Barnes79e53942008-11-07 14:24:08 -08002749int
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002750i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2751{
Daniel Vetter23010e42010-03-08 13:35:02 +01002752 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002753 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08002754 int ret;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002755
Eric Anholt02354392008-11-26 13:58:13 -08002756 /* Not valid to be called on unbound objects. */
2757 if (obj_priv->gtt_space == NULL)
2758 return -EINVAL;
2759
Eric Anholte47c68e2008-11-14 13:35:19 -08002760 i915_gem_object_flush_gpu_write_domain(obj);
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002761 /* Wait on any GPU rendering and flushing to occur. */
Eric Anholte47c68e2008-11-14 13:35:19 -08002762 ret = i915_gem_object_wait_rendering(obj);
2763 if (ret != 0)
2764 return ret;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002765
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002766 old_write_domain = obj->write_domain;
2767 old_read_domains = obj->read_domains;
2768
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002769 /* If we're writing through the GTT domain, then CPU and GPU caches
2770 * will need to be invalidated at next use.
2771 */
2772 if (write)
Eric Anholte47c68e2008-11-14 13:35:19 -08002773 obj->read_domains &= I915_GEM_DOMAIN_GTT;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002774
Eric Anholte47c68e2008-11-14 13:35:19 -08002775 i915_gem_object_flush_cpu_write_domain(obj);
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002776
2777 /* It should now be out of any other write domains, and we can update
2778 * the domain values for our changes.
2779 */
2780 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2781 obj->read_domains |= I915_GEM_DOMAIN_GTT;
Eric Anholte47c68e2008-11-14 13:35:19 -08002782 if (write) {
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002783 obj->write_domain = I915_GEM_DOMAIN_GTT;
Eric Anholte47c68e2008-11-14 13:35:19 -08002784 obj_priv->dirty = 1;
2785 }
2786
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002787 trace_i915_gem_object_change_domain(obj,
2788 old_read_domains,
2789 old_write_domain);
2790
Eric Anholte47c68e2008-11-14 13:35:19 -08002791 return 0;
2792}
2793
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002794/*
2795 * Prepare buffer for display plane. Use uninterruptible for possible flush
2796 * wait, as in modesetting process we're not supposed to be interrupted.
2797 */
2798int
2799i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
2800{
2801 struct drm_device *dev = obj->dev;
Daniel Vetter23010e42010-03-08 13:35:02 +01002802 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002803 uint32_t old_write_domain, old_read_domains;
2804 int ret;
2805
2806 /* Not valid to be called on unbound objects. */
2807 if (obj_priv->gtt_space == NULL)
2808 return -EINVAL;
2809
2810 i915_gem_object_flush_gpu_write_domain(obj);
2811
2812 /* Wait on any GPU rendering and flushing to occur. */
2813 if (obj_priv->active) {
2814#if WATCH_BUF
2815 DRM_INFO("%s: object %p wait for seqno %08x\n",
2816 __func__, obj, obj_priv->last_rendering_seqno);
2817#endif
2818 ret = i915_do_wait_request(dev, obj_priv->last_rendering_seqno, 0);
2819 if (ret != 0)
2820 return ret;
2821 }
2822
2823 old_write_domain = obj->write_domain;
2824 old_read_domains = obj->read_domains;
2825
2826 obj->read_domains &= I915_GEM_DOMAIN_GTT;
2827
2828 i915_gem_object_flush_cpu_write_domain(obj);
2829
2830 /* It should now be out of any other write domains, and we can update
2831 * the domain values for our changes.
2832 */
2833 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2834 obj->read_domains |= I915_GEM_DOMAIN_GTT;
2835 obj->write_domain = I915_GEM_DOMAIN_GTT;
2836 obj_priv->dirty = 1;
2837
2838 trace_i915_gem_object_change_domain(obj,
2839 old_read_domains,
2840 old_write_domain);
2841
2842 return 0;
2843}
2844
Eric Anholte47c68e2008-11-14 13:35:19 -08002845/**
2846 * Moves a single object to the CPU read, and possibly write domain.
2847 *
2848 * This function returns when the move is complete, including waiting on
2849 * flushes to occur.
2850 */
2851static int
2852i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
2853{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002854 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08002855 int ret;
2856
2857 i915_gem_object_flush_gpu_write_domain(obj);
2858 /* Wait on any GPU rendering and flushing to occur. */
2859 ret = i915_gem_object_wait_rendering(obj);
2860 if (ret != 0)
2861 return ret;
2862
2863 i915_gem_object_flush_gtt_write_domain(obj);
2864
2865 /* If we have a partially-valid cache of the object in the CPU,
2866 * finish invalidating it and free the per-page flags.
2867 */
2868 i915_gem_object_set_to_full_cpu_read_domain(obj);
2869
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002870 old_write_domain = obj->write_domain;
2871 old_read_domains = obj->read_domains;
2872
Eric Anholte47c68e2008-11-14 13:35:19 -08002873 /* Flush the CPU cache if it's still invalid. */
2874 if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
2875 i915_gem_clflush_object(obj);
Eric Anholte47c68e2008-11-14 13:35:19 -08002876
2877 obj->read_domains |= I915_GEM_DOMAIN_CPU;
2878 }
2879
2880 /* It should now be out of any other write domains, and we can update
2881 * the domain values for our changes.
2882 */
2883 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
2884
2885 /* If we're writing through the CPU, then the GPU read domains will
2886 * need to be invalidated at next use.
2887 */
2888 if (write) {
2889 obj->read_domains &= I915_GEM_DOMAIN_CPU;
2890 obj->write_domain = I915_GEM_DOMAIN_CPU;
2891 }
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002892
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002893 trace_i915_gem_object_change_domain(obj,
2894 old_read_domains,
2895 old_write_domain);
2896
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002897 return 0;
2898}
2899
Eric Anholt673a3942008-07-30 12:06:12 -07002900/*
2901 * Set the next domain for the specified object. This
2902 * may not actually perform the necessary flushing/invaliding though,
2903 * as that may want to be batched with other set_domain operations
2904 *
2905 * This is (we hope) the only really tricky part of gem. The goal
2906 * is fairly simple -- track which caches hold bits of the object
2907 * and make sure they remain coherent. A few concrete examples may
2908 * help to explain how it works. For shorthand, we use the notation
2909 * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
2910 * a pair of read and write domain masks.
2911 *
2912 * Case 1: the batch buffer
2913 *
2914 * 1. Allocated
2915 * 2. Written by CPU
2916 * 3. Mapped to GTT
2917 * 4. Read by GPU
2918 * 5. Unmapped from GTT
2919 * 6. Freed
2920 *
2921 * Let's take these a step at a time
2922 *
2923 * 1. Allocated
2924 * Pages allocated from the kernel may still have
2925 * cache contents, so we set them to (CPU, CPU) always.
2926 * 2. Written by CPU (using pwrite)
2927 * The pwrite function calls set_domain (CPU, CPU) and
2928 * this function does nothing (as nothing changes)
2929 * 3. Mapped by GTT
2930 * This function asserts that the object is not
2931 * currently in any GPU-based read or write domains
2932 * 4. Read by GPU
2933 * i915_gem_execbuffer calls set_domain (COMMAND, 0).
2934 * As write_domain is zero, this function adds in the
2935 * current read domains (CPU+COMMAND, 0).
2936 * flush_domains is set to CPU.
2937 * invalidate_domains is set to COMMAND
2938 * clflush is run to get data out of the CPU caches
2939 * then i915_dev_set_domain calls i915_gem_flush to
2940 * emit an MI_FLUSH and drm_agp_chipset_flush
2941 * 5. Unmapped from GTT
2942 * i915_gem_object_unbind calls set_domain (CPU, CPU)
2943 * flush_domains and invalidate_domains end up both zero
2944 * so no flushing/invalidating happens
2945 * 6. Freed
2946 * yay, done
2947 *
2948 * Case 2: The shared render buffer
2949 *
2950 * 1. Allocated
2951 * 2. Mapped to GTT
2952 * 3. Read/written by GPU
2953 * 4. set_domain to (CPU,CPU)
2954 * 5. Read/written by CPU
2955 * 6. Read/written by GPU
2956 *
2957 * 1. Allocated
2958 * Same as last example, (CPU, CPU)
2959 * 2. Mapped to GTT
2960 * Nothing changes (assertions find that it is not in the GPU)
2961 * 3. Read/written by GPU
2962 * execbuffer calls set_domain (RENDER, RENDER)
2963 * flush_domains gets CPU
2964 * invalidate_domains gets GPU
2965 * clflush (obj)
2966 * MI_FLUSH and drm_agp_chipset_flush
2967 * 4. set_domain (CPU, CPU)
2968 * flush_domains gets GPU
2969 * invalidate_domains gets CPU
2970 * wait_rendering (obj) to make sure all drawing is complete.
2971 * This will include an MI_FLUSH to get the data from GPU
2972 * to memory
2973 * clflush (obj) to invalidate the CPU cache
2974 * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
2975 * 5. Read/written by CPU
2976 * cache lines are loaded and dirtied
2977 * 6. Read written by GPU
2978 * Same as last GPU access
2979 *
2980 * Case 3: The constant buffer
2981 *
2982 * 1. Allocated
2983 * 2. Written by CPU
2984 * 3. Read by GPU
2985 * 4. Updated (written) by CPU again
2986 * 5. Read by GPU
2987 *
2988 * 1. Allocated
2989 * (CPU, CPU)
2990 * 2. Written by CPU
2991 * (CPU, CPU)
2992 * 3. Read by GPU
2993 * (CPU+RENDER, 0)
2994 * flush_domains = CPU
2995 * invalidate_domains = RENDER
2996 * clflush (obj)
2997 * MI_FLUSH
2998 * drm_agp_chipset_flush
2999 * 4. Updated (written) by CPU again
3000 * (CPU, CPU)
3001 * flush_domains = 0 (no previous write domain)
3002 * invalidate_domains = 0 (no new read domains)
3003 * 5. Read by GPU
3004 * (CPU+RENDER, 0)
3005 * flush_domains = CPU
3006 * invalidate_domains = RENDER
3007 * clflush (obj)
3008 * MI_FLUSH
3009 * drm_agp_chipset_flush
3010 */
Keith Packardc0d90822008-11-20 23:11:08 -08003011static void
Eric Anholt8b0e3782009-02-19 14:40:50 -08003012i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07003013{
3014 struct drm_device *dev = obj->dev;
Daniel Vetter23010e42010-03-08 13:35:02 +01003015 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07003016 uint32_t invalidate_domains = 0;
3017 uint32_t flush_domains = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003018 uint32_t old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003019
Eric Anholt8b0e3782009-02-19 14:40:50 -08003020 BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
3021 BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
Eric Anholt673a3942008-07-30 12:06:12 -07003022
Jesse Barnes652c3932009-08-17 13:31:43 -07003023 intel_mark_busy(dev, obj);
3024
Eric Anholt673a3942008-07-30 12:06:12 -07003025#if WATCH_BUF
3026 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
3027 __func__, obj,
Eric Anholt8b0e3782009-02-19 14:40:50 -08003028 obj->read_domains, obj->pending_read_domains,
3029 obj->write_domain, obj->pending_write_domain);
Eric Anholt673a3942008-07-30 12:06:12 -07003030#endif
3031 /*
3032 * If the object isn't moving to a new write domain,
3033 * let the object stay in multiple read domains
3034 */
Eric Anholt8b0e3782009-02-19 14:40:50 -08003035 if (obj->pending_write_domain == 0)
3036 obj->pending_read_domains |= obj->read_domains;
Eric Anholt673a3942008-07-30 12:06:12 -07003037 else
3038 obj_priv->dirty = 1;
3039
3040 /*
3041 * Flush the current write domain if
3042 * the new read domains don't match. Invalidate
3043 * any read domains which differ from the old
3044 * write domain
3045 */
Eric Anholt8b0e3782009-02-19 14:40:50 -08003046 if (obj->write_domain &&
3047 obj->write_domain != obj->pending_read_domains) {
Eric Anholt673a3942008-07-30 12:06:12 -07003048 flush_domains |= obj->write_domain;
Eric Anholt8b0e3782009-02-19 14:40:50 -08003049 invalidate_domains |=
3050 obj->pending_read_domains & ~obj->write_domain;
Eric Anholt673a3942008-07-30 12:06:12 -07003051 }
3052 /*
3053 * Invalidate any read caches which may have
3054 * stale data. That is, any new read domains.
3055 */
Eric Anholt8b0e3782009-02-19 14:40:50 -08003056 invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
Eric Anholt673a3942008-07-30 12:06:12 -07003057 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
3058#if WATCH_BUF
3059 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
3060 __func__, flush_domains, invalidate_domains);
3061#endif
Eric Anholt673a3942008-07-30 12:06:12 -07003062 i915_gem_clflush_object(obj);
3063 }
3064
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003065 old_read_domains = obj->read_domains;
3066
Eric Anholtefbeed92009-02-19 14:54:51 -08003067 /* The actual obj->write_domain will be updated with
3068 * pending_write_domain after we emit the accumulated flush for all
3069 * of our domain changes in execbuffers (which clears objects'
3070 * write_domains). So if we have a current write domain that we
3071 * aren't changing, set pending_write_domain to that.
3072 */
3073 if (flush_domains == 0 && obj->pending_write_domain == 0)
3074 obj->pending_write_domain = obj->write_domain;
Eric Anholt8b0e3782009-02-19 14:40:50 -08003075 obj->read_domains = obj->pending_read_domains;
Eric Anholt673a3942008-07-30 12:06:12 -07003076
3077 dev->invalidate_domains |= invalidate_domains;
3078 dev->flush_domains |= flush_domains;
3079#if WATCH_BUF
3080 DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
3081 __func__,
3082 obj->read_domains, obj->write_domain,
3083 dev->invalidate_domains, dev->flush_domains);
3084#endif
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003085
3086 trace_i915_gem_object_change_domain(obj,
3087 old_read_domains,
3088 obj->write_domain);
Eric Anholt673a3942008-07-30 12:06:12 -07003089}
3090
3091/**
Eric Anholte47c68e2008-11-14 13:35:19 -08003092 * Moves the object from a partially CPU read to a full one.
Eric Anholt673a3942008-07-30 12:06:12 -07003093 *
Eric Anholte47c68e2008-11-14 13:35:19 -08003094 * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
3095 * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
3096 */
3097static void
3098i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
3099{
Daniel Vetter23010e42010-03-08 13:35:02 +01003100 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholte47c68e2008-11-14 13:35:19 -08003101
3102 if (!obj_priv->page_cpu_valid)
3103 return;
3104
3105 /* If we're partially in the CPU read domain, finish moving it in.
3106 */
3107 if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
3108 int i;
3109
3110 for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
3111 if (obj_priv->page_cpu_valid[i])
3112 continue;
Eric Anholt856fa192009-03-19 14:10:50 -07003113 drm_clflush_pages(obj_priv->pages + i, 1);
Eric Anholte47c68e2008-11-14 13:35:19 -08003114 }
Eric Anholte47c68e2008-11-14 13:35:19 -08003115 }
3116
3117 /* Free the page_cpu_valid mappings which are now stale, whether
3118 * or not we've got I915_GEM_DOMAIN_CPU.
3119 */
Eric Anholt9a298b22009-03-24 12:23:04 -07003120 kfree(obj_priv->page_cpu_valid);
Eric Anholte47c68e2008-11-14 13:35:19 -08003121 obj_priv->page_cpu_valid = NULL;
3122}
3123
3124/**
3125 * Set the CPU read domain on a range of the object.
3126 *
3127 * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
3128 * not entirely valid. The page_cpu_valid member of the object flags which
3129 * pages have been flushed, and will be respected by
3130 * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
3131 * of the whole object.
3132 *
3133 * This function returns when the move is complete, including waiting on
3134 * flushes to occur.
Eric Anholt673a3942008-07-30 12:06:12 -07003135 */
3136static int
Eric Anholte47c68e2008-11-14 13:35:19 -08003137i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
3138 uint64_t offset, uint64_t size)
Eric Anholt673a3942008-07-30 12:06:12 -07003139{
Daniel Vetter23010e42010-03-08 13:35:02 +01003140 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003141 uint32_t old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003142 int i, ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003143
Eric Anholte47c68e2008-11-14 13:35:19 -08003144 if (offset == 0 && size == obj->size)
3145 return i915_gem_object_set_to_cpu_domain(obj, 0);
3146
3147 i915_gem_object_flush_gpu_write_domain(obj);
3148 /* Wait on any GPU rendering and flushing to occur. */
3149 ret = i915_gem_object_wait_rendering(obj);
3150 if (ret != 0)
3151 return ret;
3152 i915_gem_object_flush_gtt_write_domain(obj);
3153
3154 /* If we're already fully in the CPU read domain, we're done. */
3155 if (obj_priv->page_cpu_valid == NULL &&
3156 (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
Eric Anholt673a3942008-07-30 12:06:12 -07003157 return 0;
3158
Eric Anholte47c68e2008-11-14 13:35:19 -08003159 /* Otherwise, create/clear the per-page CPU read domain flag if we're
3160 * newly adding I915_GEM_DOMAIN_CPU
3161 */
Eric Anholt673a3942008-07-30 12:06:12 -07003162 if (obj_priv->page_cpu_valid == NULL) {
Eric Anholt9a298b22009-03-24 12:23:04 -07003163 obj_priv->page_cpu_valid = kzalloc(obj->size / PAGE_SIZE,
3164 GFP_KERNEL);
Eric Anholte47c68e2008-11-14 13:35:19 -08003165 if (obj_priv->page_cpu_valid == NULL)
3166 return -ENOMEM;
3167 } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
3168 memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
Eric Anholt673a3942008-07-30 12:06:12 -07003169
3170 /* Flush the cache on any pages that are still invalid from the CPU's
3171 * perspective.
3172 */
Eric Anholte47c68e2008-11-14 13:35:19 -08003173 for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
3174 i++) {
Eric Anholt673a3942008-07-30 12:06:12 -07003175 if (obj_priv->page_cpu_valid[i])
3176 continue;
3177
Eric Anholt856fa192009-03-19 14:10:50 -07003178 drm_clflush_pages(obj_priv->pages + i, 1);
Eric Anholt673a3942008-07-30 12:06:12 -07003179
3180 obj_priv->page_cpu_valid[i] = 1;
3181 }
3182
Eric Anholte47c68e2008-11-14 13:35:19 -08003183 /* It should now be out of any other write domains, and we can update
3184 * the domain values for our changes.
3185 */
3186 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3187
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003188 old_read_domains = obj->read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003189 obj->read_domains |= I915_GEM_DOMAIN_CPU;
3190
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003191 trace_i915_gem_object_change_domain(obj,
3192 old_read_domains,
3193 obj->write_domain);
3194
Eric Anholt673a3942008-07-30 12:06:12 -07003195 return 0;
3196}
3197
3198/**
Eric Anholt673a3942008-07-30 12:06:12 -07003199 * Pin an object to the GTT and evaluate the relocations landing in it.
3200 */
3201static int
3202i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3203 struct drm_file *file_priv,
Jesse Barnes76446ca2009-12-17 22:05:42 -05003204 struct drm_i915_gem_exec_object2 *entry,
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003205 struct drm_i915_gem_relocation_entry *relocs)
Eric Anholt673a3942008-07-30 12:06:12 -07003206{
3207 struct drm_device *dev = obj->dev;
Keith Packard0839ccb2008-10-30 19:38:48 -07003208 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01003209 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07003210 int i, ret;
Keith Packard0839ccb2008-10-30 19:38:48 -07003211 void __iomem *reloc_page;
Jesse Barnes76446ca2009-12-17 22:05:42 -05003212 bool need_fence;
3213
3214 need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
3215 obj_priv->tiling_mode != I915_TILING_NONE;
3216
3217 /* Check fence reg constraints and rebind if necessary */
Owain Ainsworthf590d272010-02-18 15:33:00 +00003218 if (need_fence && !i915_gem_object_fence_offset_ok(obj,
3219 obj_priv->tiling_mode))
Jesse Barnes76446ca2009-12-17 22:05:42 -05003220 i915_gem_object_unbind(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07003221
3222 /* Choose the GTT offset for our buffer and put it there. */
3223 ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
3224 if (ret)
3225 return ret;
3226
Jesse Barnes76446ca2009-12-17 22:05:42 -05003227 /*
3228 * Pre-965 chips need a fence register set up in order to
3229 * properly handle blits to/from tiled surfaces.
3230 */
3231 if (need_fence) {
3232 ret = i915_gem_object_get_fence_reg(obj);
3233 if (ret != 0) {
3234 if (ret != -EBUSY && ret != -ERESTARTSYS)
3235 DRM_ERROR("Failure to install fence: %d\n",
3236 ret);
3237 i915_gem_object_unpin(obj);
3238 return ret;
3239 }
3240 }
3241
Eric Anholt673a3942008-07-30 12:06:12 -07003242 entry->offset = obj_priv->gtt_offset;
3243
Eric Anholt673a3942008-07-30 12:06:12 -07003244 /* Apply the relocations, using the GTT aperture to avoid cache
3245 * flushing requirements.
3246 */
3247 for (i = 0; i < entry->relocation_count; i++) {
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003248 struct drm_i915_gem_relocation_entry *reloc= &relocs[i];
Eric Anholt673a3942008-07-30 12:06:12 -07003249 struct drm_gem_object *target_obj;
3250 struct drm_i915_gem_object *target_obj_priv;
Eric Anholt3043c602008-10-02 12:24:47 -07003251 uint32_t reloc_val, reloc_offset;
3252 uint32_t __iomem *reloc_entry;
Eric Anholt673a3942008-07-30 12:06:12 -07003253
Eric Anholt673a3942008-07-30 12:06:12 -07003254 target_obj = drm_gem_object_lookup(obj->dev, file_priv,
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003255 reloc->target_handle);
Eric Anholt673a3942008-07-30 12:06:12 -07003256 if (target_obj == NULL) {
3257 i915_gem_object_unpin(obj);
3258 return -EBADF;
3259 }
Daniel Vetter23010e42010-03-08 13:35:02 +01003260 target_obj_priv = to_intel_bo(target_obj);
Eric Anholt673a3942008-07-30 12:06:12 -07003261
Chris Wilson8542a0b2009-09-09 21:15:15 +01003262#if WATCH_RELOC
3263 DRM_INFO("%s: obj %p offset %08x target %d "
3264 "read %08x write %08x gtt %08x "
3265 "presumed %08x delta %08x\n",
3266 __func__,
3267 obj,
3268 (int) reloc->offset,
3269 (int) reloc->target_handle,
3270 (int) reloc->read_domains,
3271 (int) reloc->write_domain,
3272 (int) target_obj_priv->gtt_offset,
3273 (int) reloc->presumed_offset,
3274 reloc->delta);
3275#endif
3276
Eric Anholt673a3942008-07-30 12:06:12 -07003277 /* The target buffer should have appeared before us in the
3278 * exec_object list, so it should have a GTT space bound by now.
3279 */
3280 if (target_obj_priv->gtt_space == NULL) {
3281 DRM_ERROR("No GTT space found for object %d\n",
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003282 reloc->target_handle);
Eric Anholt673a3942008-07-30 12:06:12 -07003283 drm_gem_object_unreference(target_obj);
3284 i915_gem_object_unpin(obj);
3285 return -EINVAL;
3286 }
3287
Chris Wilson8542a0b2009-09-09 21:15:15 +01003288 /* Validate that the target is in a valid r/w GPU domain */
Daniel Vetter16edd552010-02-19 11:52:02 +01003289 if (reloc->write_domain & (reloc->write_domain - 1)) {
3290 DRM_ERROR("reloc with multiple write domains: "
3291 "obj %p target %d offset %d "
3292 "read %08x write %08x",
3293 obj, reloc->target_handle,
3294 (int) reloc->offset,
3295 reloc->read_domains,
3296 reloc->write_domain);
3297 return -EINVAL;
3298 }
Chris Wilson8542a0b2009-09-09 21:15:15 +01003299 if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
3300 reloc->read_domains & I915_GEM_DOMAIN_CPU) {
3301 DRM_ERROR("reloc with read/write CPU domains: "
3302 "obj %p target %d offset %d "
3303 "read %08x write %08x",
3304 obj, reloc->target_handle,
3305 (int) reloc->offset,
3306 reloc->read_domains,
3307 reloc->write_domain);
3308 drm_gem_object_unreference(target_obj);
3309 i915_gem_object_unpin(obj);
3310 return -EINVAL;
3311 }
3312 if (reloc->write_domain && target_obj->pending_write_domain &&
3313 reloc->write_domain != target_obj->pending_write_domain) {
3314 DRM_ERROR("Write domain conflict: "
3315 "obj %p target %d offset %d "
3316 "new %08x old %08x\n",
3317 obj, reloc->target_handle,
3318 (int) reloc->offset,
3319 reloc->write_domain,
3320 target_obj->pending_write_domain);
3321 drm_gem_object_unreference(target_obj);
3322 i915_gem_object_unpin(obj);
3323 return -EINVAL;
3324 }
3325
3326 target_obj->pending_read_domains |= reloc->read_domains;
3327 target_obj->pending_write_domain |= reloc->write_domain;
3328
3329 /* If the relocation already has the right value in it, no
3330 * more work needs to be done.
3331 */
3332 if (target_obj_priv->gtt_offset == reloc->presumed_offset) {
3333 drm_gem_object_unreference(target_obj);
3334 continue;
3335 }
3336
3337 /* Check that the relocation address is valid... */
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003338 if (reloc->offset > obj->size - 4) {
Eric Anholt673a3942008-07-30 12:06:12 -07003339 DRM_ERROR("Relocation beyond object bounds: "
3340 "obj %p target %d offset %d size %d.\n",
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003341 obj, reloc->target_handle,
3342 (int) reloc->offset, (int) obj->size);
Eric Anholt673a3942008-07-30 12:06:12 -07003343 drm_gem_object_unreference(target_obj);
3344 i915_gem_object_unpin(obj);
3345 return -EINVAL;
3346 }
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003347 if (reloc->offset & 3) {
Eric Anholt673a3942008-07-30 12:06:12 -07003348 DRM_ERROR("Relocation not 4-byte aligned: "
3349 "obj %p target %d offset %d.\n",
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003350 obj, reloc->target_handle,
3351 (int) reloc->offset);
Eric Anholt673a3942008-07-30 12:06:12 -07003352 drm_gem_object_unreference(target_obj);
3353 i915_gem_object_unpin(obj);
3354 return -EINVAL;
3355 }
3356
Chris Wilson8542a0b2009-09-09 21:15:15 +01003357 /* and points to somewhere within the target object. */
Chris Wilsoncd0b9fb2009-09-15 23:23:18 +01003358 if (reloc->delta >= target_obj->size) {
3359 DRM_ERROR("Relocation beyond target object bounds: "
3360 "obj %p target %d delta %d size %d.\n",
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003361 obj, reloc->target_handle,
Chris Wilsoncd0b9fb2009-09-15 23:23:18 +01003362 (int) reloc->delta, (int) target_obj->size);
Chris Wilson491152b2009-02-11 14:26:32 +00003363 drm_gem_object_unreference(target_obj);
3364 i915_gem_object_unpin(obj);
Eric Anholte47c68e2008-11-14 13:35:19 -08003365 return -EINVAL;
3366 }
3367
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003368 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
3369 if (ret != 0) {
3370 drm_gem_object_unreference(target_obj);
3371 i915_gem_object_unpin(obj);
3372 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -07003373 }
3374
3375 /* Map the page containing the relocation we're going to
3376 * perform.
3377 */
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003378 reloc_offset = obj_priv->gtt_offset + reloc->offset;
Keith Packard0839ccb2008-10-30 19:38:48 -07003379 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
3380 (reloc_offset &
3381 ~(PAGE_SIZE - 1)));
Eric Anholt3043c602008-10-02 12:24:47 -07003382 reloc_entry = (uint32_t __iomem *)(reloc_page +
Keith Packard0839ccb2008-10-30 19:38:48 -07003383 (reloc_offset & (PAGE_SIZE - 1)));
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003384 reloc_val = target_obj_priv->gtt_offset + reloc->delta;
Eric Anholt673a3942008-07-30 12:06:12 -07003385
3386#if WATCH_BUF
3387 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003388 obj, (unsigned int) reloc->offset,
Eric Anholt673a3942008-07-30 12:06:12 -07003389 readl(reloc_entry), reloc_val);
3390#endif
3391 writel(reloc_val, reloc_entry);
Keith Packard0839ccb2008-10-30 19:38:48 -07003392 io_mapping_unmap_atomic(reloc_page);
Eric Anholt673a3942008-07-30 12:06:12 -07003393
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003394 /* The updated presumed offset for this entry will be
3395 * copied back out to the user.
Eric Anholt673a3942008-07-30 12:06:12 -07003396 */
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003397 reloc->presumed_offset = target_obj_priv->gtt_offset;
Eric Anholt673a3942008-07-30 12:06:12 -07003398
3399 drm_gem_object_unreference(target_obj);
3400 }
3401
Eric Anholt673a3942008-07-30 12:06:12 -07003402#if WATCH_BUF
3403 if (0)
3404 i915_gem_dump_object(obj, 128, __func__, ~0);
3405#endif
3406 return 0;
3407}
3408
Eric Anholt673a3942008-07-30 12:06:12 -07003409/* Throttle our rendering by waiting until the ring has completed our requests
3410 * emitted over 20 msec ago.
3411 *
Eric Anholtb9624422009-06-03 07:27:35 +00003412 * Note that if we were to use the current jiffies each time around the loop,
3413 * we wouldn't escape the function with any frames outstanding if the time to
3414 * render a frame was over 20ms.
3415 *
Eric Anholt673a3942008-07-30 12:06:12 -07003416 * This should get us reasonable parallelism between CPU and GPU but also
3417 * relatively low latency when blocking on a particular request to finish.
3418 */
3419static int
3420i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
3421{
3422 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
3423 int ret = 0;
Eric Anholtb9624422009-06-03 07:27:35 +00003424 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
Eric Anholt673a3942008-07-30 12:06:12 -07003425
3426 mutex_lock(&dev->struct_mutex);
Eric Anholtb9624422009-06-03 07:27:35 +00003427 while (!list_empty(&i915_file_priv->mm.request_list)) {
3428 struct drm_i915_gem_request *request;
3429
3430 request = list_first_entry(&i915_file_priv->mm.request_list,
3431 struct drm_i915_gem_request,
3432 client_list);
3433
3434 if (time_after_eq(request->emitted_jiffies, recent_enough))
3435 break;
3436
3437 ret = i915_wait_request(dev, request->seqno);
3438 if (ret != 0)
3439 break;
3440 }
Eric Anholt673a3942008-07-30 12:06:12 -07003441 mutex_unlock(&dev->struct_mutex);
Eric Anholtb9624422009-06-03 07:27:35 +00003442
Eric Anholt673a3942008-07-30 12:06:12 -07003443 return ret;
3444}
3445
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003446static int
Jesse Barnes76446ca2009-12-17 22:05:42 -05003447i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object2 *exec_list,
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003448 uint32_t buffer_count,
3449 struct drm_i915_gem_relocation_entry **relocs)
3450{
3451 uint32_t reloc_count = 0, reloc_index = 0, i;
3452 int ret;
3453
3454 *relocs = NULL;
3455 for (i = 0; i < buffer_count; i++) {
3456 if (reloc_count + exec_list[i].relocation_count < reloc_count)
3457 return -EINVAL;
3458 reloc_count += exec_list[i].relocation_count;
3459 }
3460
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07003461 *relocs = drm_calloc_large(reloc_count, sizeof(**relocs));
Jesse Barnes76446ca2009-12-17 22:05:42 -05003462 if (*relocs == NULL) {
3463 DRM_ERROR("failed to alloc relocs, count %d\n", reloc_count);
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003464 return -ENOMEM;
Jesse Barnes76446ca2009-12-17 22:05:42 -05003465 }
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003466
3467 for (i = 0; i < buffer_count; i++) {
3468 struct drm_i915_gem_relocation_entry __user *user_relocs;
3469
3470 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3471
3472 ret = copy_from_user(&(*relocs)[reloc_index],
3473 user_relocs,
3474 exec_list[i].relocation_count *
3475 sizeof(**relocs));
3476 if (ret != 0) {
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07003477 drm_free_large(*relocs);
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003478 *relocs = NULL;
Florian Mickler2bc43b52009-04-06 22:55:41 +02003479 return -EFAULT;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003480 }
3481
3482 reloc_index += exec_list[i].relocation_count;
3483 }
3484
Florian Mickler2bc43b52009-04-06 22:55:41 +02003485 return 0;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003486}
3487
3488static int
Jesse Barnes76446ca2009-12-17 22:05:42 -05003489i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list,
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003490 uint32_t buffer_count,
3491 struct drm_i915_gem_relocation_entry *relocs)
3492{
3493 uint32_t reloc_count = 0, i;
Florian Mickler2bc43b52009-04-06 22:55:41 +02003494 int ret = 0;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003495
Chris Wilson93533c22010-01-31 10:40:48 +00003496 if (relocs == NULL)
3497 return 0;
3498
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003499 for (i = 0; i < buffer_count; i++) {
3500 struct drm_i915_gem_relocation_entry __user *user_relocs;
Florian Mickler2bc43b52009-04-06 22:55:41 +02003501 int unwritten;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003502
3503 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3504
Florian Mickler2bc43b52009-04-06 22:55:41 +02003505 unwritten = copy_to_user(user_relocs,
3506 &relocs[reloc_count],
3507 exec_list[i].relocation_count *
3508 sizeof(*relocs));
3509
3510 if (unwritten) {
3511 ret = -EFAULT;
3512 goto err;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003513 }
3514
3515 reloc_count += exec_list[i].relocation_count;
3516 }
3517
Florian Mickler2bc43b52009-04-06 22:55:41 +02003518err:
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07003519 drm_free_large(relocs);
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003520
3521 return ret;
3522}
3523
Chris Wilson83d60792009-06-06 09:45:57 +01003524static int
Jesse Barnes76446ca2009-12-17 22:05:42 -05003525i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec,
Chris Wilson83d60792009-06-06 09:45:57 +01003526 uint64_t exec_offset)
3527{
3528 uint32_t exec_start, exec_len;
3529
3530 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
3531 exec_len = (uint32_t) exec->batch_len;
3532
3533 if ((exec_start | exec_len) & 0x7)
3534 return -EINVAL;
3535
3536 if (!exec_start)
3537 return -EINVAL;
3538
3539 return 0;
3540}
3541
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05003542static int
3543i915_gem_wait_for_pending_flip(struct drm_device *dev,
3544 struct drm_gem_object **object_list,
3545 int count)
3546{
3547 drm_i915_private_t *dev_priv = dev->dev_private;
3548 struct drm_i915_gem_object *obj_priv;
3549 DEFINE_WAIT(wait);
3550 int i, ret = 0;
3551
3552 for (;;) {
3553 prepare_to_wait(&dev_priv->pending_flip_queue,
3554 &wait, TASK_INTERRUPTIBLE);
3555 for (i = 0; i < count; i++) {
Daniel Vetter23010e42010-03-08 13:35:02 +01003556 obj_priv = to_intel_bo(object_list[i]);
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05003557 if (atomic_read(&obj_priv->pending_flip) > 0)
3558 break;
3559 }
3560 if (i == count)
3561 break;
3562
3563 if (!signal_pending(current)) {
3564 mutex_unlock(&dev->struct_mutex);
3565 schedule();
3566 mutex_lock(&dev->struct_mutex);
3567 continue;
3568 }
3569 ret = -ERESTARTSYS;
3570 break;
3571 }
3572 finish_wait(&dev_priv->pending_flip_queue, &wait);
3573
3574 return ret;
3575}
3576
Eric Anholt673a3942008-07-30 12:06:12 -07003577int
Jesse Barnes76446ca2009-12-17 22:05:42 -05003578i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3579 struct drm_file *file_priv,
3580 struct drm_i915_gem_execbuffer2 *args,
3581 struct drm_i915_gem_exec_object2 *exec_list)
Eric Anholt673a3942008-07-30 12:06:12 -07003582{
3583 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07003584 struct drm_gem_object **object_list = NULL;
3585 struct drm_gem_object *batch_obj;
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05003586 struct drm_i915_gem_object *obj_priv;
Eric Anholt201361a2009-03-11 12:30:04 -07003587 struct drm_clip_rect *cliprects = NULL;
Chris Wilson93533c22010-01-31 10:40:48 +00003588 struct drm_i915_gem_relocation_entry *relocs = NULL;
Jesse Barnes76446ca2009-12-17 22:05:42 -05003589 int ret = 0, ret2, i, pinned = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07003590 uint64_t exec_offset;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003591 uint32_t seqno, flush_domains, reloc_index;
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05003592 int pin_tries, flips;
Eric Anholt673a3942008-07-30 12:06:12 -07003593
3594#if WATCH_EXEC
3595 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
3596 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
3597#endif
3598
Eric Anholt4f481ed2008-09-10 14:22:49 -07003599 if (args->buffer_count < 1) {
3600 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
3601 return -EINVAL;
3602 }
Eric Anholtc8e0f932009-11-22 03:49:37 +01003603 object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count);
Jesse Barnes76446ca2009-12-17 22:05:42 -05003604 if (object_list == NULL) {
3605 DRM_ERROR("Failed to allocate object list for %d buffers\n",
Eric Anholt673a3942008-07-30 12:06:12 -07003606 args->buffer_count);
3607 ret = -ENOMEM;
3608 goto pre_mutex_err;
3609 }
Eric Anholt673a3942008-07-30 12:06:12 -07003610
Eric Anholt201361a2009-03-11 12:30:04 -07003611 if (args->num_cliprects != 0) {
Eric Anholt9a298b22009-03-24 12:23:04 -07003612 cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
3613 GFP_KERNEL);
Owain Ainswortha40e8d32010-02-09 14:25:55 +00003614 if (cliprects == NULL) {
3615 ret = -ENOMEM;
Eric Anholt201361a2009-03-11 12:30:04 -07003616 goto pre_mutex_err;
Owain Ainswortha40e8d32010-02-09 14:25:55 +00003617 }
Eric Anholt201361a2009-03-11 12:30:04 -07003618
3619 ret = copy_from_user(cliprects,
3620 (struct drm_clip_rect __user *)
3621 (uintptr_t) args->cliprects_ptr,
3622 sizeof(*cliprects) * args->num_cliprects);
3623 if (ret != 0) {
3624 DRM_ERROR("copy %d cliprects failed: %d\n",
3625 args->num_cliprects, ret);
3626 goto pre_mutex_err;
3627 }
3628 }
3629
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003630 ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count,
3631 &relocs);
3632 if (ret != 0)
3633 goto pre_mutex_err;
3634
Eric Anholt673a3942008-07-30 12:06:12 -07003635 mutex_lock(&dev->struct_mutex);
3636
3637 i915_verify_inactive(dev, __FILE__, __LINE__);
3638
Ben Gamariba1234d2009-09-14 17:48:47 -04003639 if (atomic_read(&dev_priv->mm.wedged)) {
Eric Anholt673a3942008-07-30 12:06:12 -07003640 mutex_unlock(&dev->struct_mutex);
Chris Wilsona198bc82009-02-06 16:55:20 +00003641 ret = -EIO;
3642 goto pre_mutex_err;
Eric Anholt673a3942008-07-30 12:06:12 -07003643 }
3644
3645 if (dev_priv->mm.suspended) {
Eric Anholt673a3942008-07-30 12:06:12 -07003646 mutex_unlock(&dev->struct_mutex);
Chris Wilsona198bc82009-02-06 16:55:20 +00003647 ret = -EBUSY;
3648 goto pre_mutex_err;
Eric Anholt673a3942008-07-30 12:06:12 -07003649 }
3650
Keith Packardac94a962008-11-20 23:30:27 -08003651 /* Look up object handles */
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05003652 flips = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07003653 for (i = 0; i < args->buffer_count; i++) {
3654 object_list[i] = drm_gem_object_lookup(dev, file_priv,
3655 exec_list[i].handle);
3656 if (object_list[i] == NULL) {
3657 DRM_ERROR("Invalid object handle %d at index %d\n",
3658 exec_list[i].handle, i);
Chris Wilson0ce907f2010-01-23 20:26:35 +00003659 /* prevent error path from reading uninitialized data */
3660 args->buffer_count = i + 1;
Eric Anholt673a3942008-07-30 12:06:12 -07003661 ret = -EBADF;
3662 goto err;
3663 }
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05003664
Daniel Vetter23010e42010-03-08 13:35:02 +01003665 obj_priv = to_intel_bo(object_list[i]);
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05003666 if (obj_priv->in_execbuffer) {
3667 DRM_ERROR("Object %p appears more than once in object list\n",
3668 object_list[i]);
Chris Wilson0ce907f2010-01-23 20:26:35 +00003669 /* prevent error path from reading uninitialized data */
3670 args->buffer_count = i + 1;
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05003671 ret = -EBADF;
3672 goto err;
3673 }
3674 obj_priv->in_execbuffer = true;
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05003675 flips += atomic_read(&obj_priv->pending_flip);
3676 }
3677
3678 if (flips > 0) {
3679 ret = i915_gem_wait_for_pending_flip(dev, object_list,
3680 args->buffer_count);
3681 if (ret)
3682 goto err;
Keith Packardac94a962008-11-20 23:30:27 -08003683 }
Eric Anholt673a3942008-07-30 12:06:12 -07003684
Keith Packardac94a962008-11-20 23:30:27 -08003685 /* Pin and relocate */
3686 for (pin_tries = 0; ; pin_tries++) {
3687 ret = 0;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003688 reloc_index = 0;
3689
Keith Packardac94a962008-11-20 23:30:27 -08003690 for (i = 0; i < args->buffer_count; i++) {
3691 object_list[i]->pending_read_domains = 0;
3692 object_list[i]->pending_write_domain = 0;
3693 ret = i915_gem_object_pin_and_relocate(object_list[i],
3694 file_priv,
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003695 &exec_list[i],
3696 &relocs[reloc_index]);
Keith Packardac94a962008-11-20 23:30:27 -08003697 if (ret)
3698 break;
3699 pinned = i + 1;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003700 reloc_index += exec_list[i].relocation_count;
Keith Packardac94a962008-11-20 23:30:27 -08003701 }
3702 /* success */
3703 if (ret == 0)
3704 break;
3705
3706 /* error other than GTT full, or we've already tried again */
Chris Wilson2939e1f2009-06-06 09:46:03 +01003707 if (ret != -ENOSPC || pin_tries >= 1) {
Chris Wilson07f73f62009-09-14 16:50:30 +01003708 if (ret != -ERESTARTSYS) {
3709 unsigned long long total_size = 0;
3710 for (i = 0; i < args->buffer_count; i++)
3711 total_size += object_list[i]->size;
3712 DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes: %d\n",
3713 pinned+1, args->buffer_count,
3714 total_size, ret);
3715 DRM_ERROR("%d objects [%d pinned], "
3716 "%d object bytes [%d pinned], "
3717 "%d/%d gtt bytes\n",
3718 atomic_read(&dev->object_count),
3719 atomic_read(&dev->pin_count),
3720 atomic_read(&dev->object_memory),
3721 atomic_read(&dev->pin_memory),
3722 atomic_read(&dev->gtt_memory),
3723 dev->gtt_total);
3724 }
Eric Anholt673a3942008-07-30 12:06:12 -07003725 goto err;
3726 }
Keith Packardac94a962008-11-20 23:30:27 -08003727
3728 /* unpin all of our buffers */
3729 for (i = 0; i < pinned; i++)
3730 i915_gem_object_unpin(object_list[i]);
Eric Anholtb1177632008-12-10 10:09:41 -08003731 pinned = 0;
Keith Packardac94a962008-11-20 23:30:27 -08003732
3733 /* evict everyone we can from the aperture */
3734 ret = i915_gem_evict_everything(dev);
Chris Wilson07f73f62009-09-14 16:50:30 +01003735 if (ret && ret != -ENOSPC)
Keith Packardac94a962008-11-20 23:30:27 -08003736 goto err;
Eric Anholt673a3942008-07-30 12:06:12 -07003737 }
3738
3739 /* Set the pending read domains for the batch buffer to COMMAND */
3740 batch_obj = object_list[args->buffer_count-1];
Chris Wilson5f26a2c2009-06-06 09:45:58 +01003741 if (batch_obj->pending_write_domain) {
3742 DRM_ERROR("Attempting to use self-modifying batch buffer\n");
3743 ret = -EINVAL;
3744 goto err;
3745 }
3746 batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
Eric Anholt673a3942008-07-30 12:06:12 -07003747
Chris Wilson83d60792009-06-06 09:45:57 +01003748 /* Sanity check the batch buffer, prior to moving objects */
3749 exec_offset = exec_list[args->buffer_count - 1].offset;
3750 ret = i915_gem_check_execbuffer (args, exec_offset);
3751 if (ret != 0) {
3752 DRM_ERROR("execbuf with invalid offset/length\n");
3753 goto err;
3754 }
3755
Eric Anholt673a3942008-07-30 12:06:12 -07003756 i915_verify_inactive(dev, __FILE__, __LINE__);
3757
Keith Packard646f0f62008-11-20 23:23:03 -08003758 /* Zero the global flush/invalidate flags. These
3759 * will be modified as new domains are computed
3760 * for each object
3761 */
3762 dev->invalidate_domains = 0;
3763 dev->flush_domains = 0;
3764
Eric Anholt673a3942008-07-30 12:06:12 -07003765 for (i = 0; i < args->buffer_count; i++) {
3766 struct drm_gem_object *obj = object_list[i];
Eric Anholt673a3942008-07-30 12:06:12 -07003767
Keith Packard646f0f62008-11-20 23:23:03 -08003768 /* Compute new gpu domains and update invalidate/flush */
Eric Anholt8b0e3782009-02-19 14:40:50 -08003769 i915_gem_object_set_to_gpu_domain(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07003770 }
3771
3772 i915_verify_inactive(dev, __FILE__, __LINE__);
3773
Keith Packard646f0f62008-11-20 23:23:03 -08003774 if (dev->invalidate_domains | dev->flush_domains) {
3775#if WATCH_EXEC
3776 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
3777 __func__,
3778 dev->invalidate_domains,
3779 dev->flush_domains);
3780#endif
3781 i915_gem_flush(dev,
3782 dev->invalidate_domains,
3783 dev->flush_domains);
Daniel Vetter99fcb762010-02-07 16:20:18 +01003784 if (dev->flush_domains & I915_GEM_GPU_DOMAINS)
Eric Anholtb9624422009-06-03 07:27:35 +00003785 (void)i915_add_request(dev, file_priv,
3786 dev->flush_domains);
Keith Packard646f0f62008-11-20 23:23:03 -08003787 }
Eric Anholt673a3942008-07-30 12:06:12 -07003788
Eric Anholtefbeed92009-02-19 14:54:51 -08003789 for (i = 0; i < args->buffer_count; i++) {
3790 struct drm_gem_object *obj = object_list[i];
Daniel Vetter23010e42010-03-08 13:35:02 +01003791 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003792 uint32_t old_write_domain = obj->write_domain;
Eric Anholtefbeed92009-02-19 14:54:51 -08003793
3794 obj->write_domain = obj->pending_write_domain;
Daniel Vetter99fcb762010-02-07 16:20:18 +01003795 if (obj->write_domain)
3796 list_move_tail(&obj_priv->gpu_write_list,
3797 &dev_priv->mm.gpu_write_list);
3798 else
3799 list_del_init(&obj_priv->gpu_write_list);
3800
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003801 trace_i915_gem_object_change_domain(obj,
3802 obj->read_domains,
3803 old_write_domain);
Eric Anholtefbeed92009-02-19 14:54:51 -08003804 }
3805
Eric Anholt673a3942008-07-30 12:06:12 -07003806 i915_verify_inactive(dev, __FILE__, __LINE__);
3807
3808#if WATCH_COHERENCY
3809 for (i = 0; i < args->buffer_count; i++) {
3810 i915_gem_object_check_coherency(object_list[i],
3811 exec_list[i].handle);
3812 }
3813#endif
3814
Eric Anholt673a3942008-07-30 12:06:12 -07003815#if WATCH_EXEC
Ben Gamari6911a9b2009-04-02 11:24:54 -07003816 i915_gem_dump_object(batch_obj,
Eric Anholt673a3942008-07-30 12:06:12 -07003817 args->batch_len,
3818 __func__,
3819 ~0);
3820#endif
3821
Eric Anholt673a3942008-07-30 12:06:12 -07003822 /* Exec the batchbuffer */
Eric Anholt201361a2009-03-11 12:30:04 -07003823 ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset);
Eric Anholt673a3942008-07-30 12:06:12 -07003824 if (ret) {
3825 DRM_ERROR("dispatch failed %d\n", ret);
3826 goto err;
3827 }
3828
3829 /*
3830 * Ensure that the commands in the batch buffer are
3831 * finished before the interrupt fires
3832 */
3833 flush_domains = i915_retire_commands(dev);
3834
3835 i915_verify_inactive(dev, __FILE__, __LINE__);
3836
3837 /*
3838 * Get a seqno representing the execution of the current buffer,
3839 * which we can wait on. We would like to mitigate these interrupts,
3840 * likely by only creating seqnos occasionally (so that we have
3841 * *some* interrupts representing completion of buffers that we can
3842 * wait on when trying to clear up gtt space).
3843 */
Eric Anholtb9624422009-06-03 07:27:35 +00003844 seqno = i915_add_request(dev, file_priv, flush_domains);
Eric Anholt673a3942008-07-30 12:06:12 -07003845 BUG_ON(seqno == 0);
Eric Anholt673a3942008-07-30 12:06:12 -07003846 for (i = 0; i < args->buffer_count; i++) {
3847 struct drm_gem_object *obj = object_list[i];
Eric Anholt673a3942008-07-30 12:06:12 -07003848
Eric Anholtce44b0e2008-11-06 16:00:31 -08003849 i915_gem_object_move_to_active(obj, seqno);
Eric Anholt673a3942008-07-30 12:06:12 -07003850#if WATCH_LRU
3851 DRM_INFO("%s: move to exec list %p\n", __func__, obj);
3852#endif
3853 }
3854#if WATCH_LRU
3855 i915_dump_lru(dev, __func__);
3856#endif
3857
3858 i915_verify_inactive(dev, __FILE__, __LINE__);
3859
Eric Anholt673a3942008-07-30 12:06:12 -07003860err:
Julia Lawallaad87df2008-12-21 16:28:47 +01003861 for (i = 0; i < pinned; i++)
3862 i915_gem_object_unpin(object_list[i]);
Eric Anholt673a3942008-07-30 12:06:12 -07003863
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05003864 for (i = 0; i < args->buffer_count; i++) {
3865 if (object_list[i]) {
Daniel Vetter23010e42010-03-08 13:35:02 +01003866 obj_priv = to_intel_bo(object_list[i]);
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05003867 obj_priv->in_execbuffer = false;
3868 }
Julia Lawallaad87df2008-12-21 16:28:47 +01003869 drm_gem_object_unreference(object_list[i]);
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05003870 }
Julia Lawallaad87df2008-12-21 16:28:47 +01003871
Eric Anholt673a3942008-07-30 12:06:12 -07003872 mutex_unlock(&dev->struct_mutex);
3873
Chris Wilson93533c22010-01-31 10:40:48 +00003874pre_mutex_err:
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003875 /* Copy the updated relocations out regardless of current error
3876 * state. Failure to update the relocs would mean that the next
3877 * time userland calls execbuf, it would do so with presumed offset
3878 * state that didn't match the actual object state.
3879 */
3880 ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
3881 relocs);
3882 if (ret2 != 0) {
3883 DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
3884
3885 if (ret == 0)
3886 ret = ret2;
3887 }
3888
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07003889 drm_free_large(object_list);
Eric Anholt9a298b22009-03-24 12:23:04 -07003890 kfree(cliprects);
Eric Anholt673a3942008-07-30 12:06:12 -07003891
3892 return ret;
3893}
3894
Jesse Barnes76446ca2009-12-17 22:05:42 -05003895/*
3896 * Legacy execbuffer just creates an exec2 list from the original exec object
3897 * list array and passes it to the real function.
3898 */
3899int
3900i915_gem_execbuffer(struct drm_device *dev, void *data,
3901 struct drm_file *file_priv)
3902{
3903 struct drm_i915_gem_execbuffer *args = data;
3904 struct drm_i915_gem_execbuffer2 exec2;
3905 struct drm_i915_gem_exec_object *exec_list = NULL;
3906 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
3907 int ret, i;
3908
3909#if WATCH_EXEC
3910 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
3911 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
3912#endif
3913
3914 if (args->buffer_count < 1) {
3915 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
3916 return -EINVAL;
3917 }
3918
3919 /* Copy in the exec list from userland */
3920 exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
3921 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
3922 if (exec_list == NULL || exec2_list == NULL) {
3923 DRM_ERROR("Failed to allocate exec list for %d buffers\n",
3924 args->buffer_count);
3925 drm_free_large(exec_list);
3926 drm_free_large(exec2_list);
3927 return -ENOMEM;
3928 }
3929 ret = copy_from_user(exec_list,
3930 (struct drm_i915_relocation_entry __user *)
3931 (uintptr_t) args->buffers_ptr,
3932 sizeof(*exec_list) * args->buffer_count);
3933 if (ret != 0) {
3934 DRM_ERROR("copy %d exec entries failed %d\n",
3935 args->buffer_count, ret);
3936 drm_free_large(exec_list);
3937 drm_free_large(exec2_list);
3938 return -EFAULT;
3939 }
3940
3941 for (i = 0; i < args->buffer_count; i++) {
3942 exec2_list[i].handle = exec_list[i].handle;
3943 exec2_list[i].relocation_count = exec_list[i].relocation_count;
3944 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
3945 exec2_list[i].alignment = exec_list[i].alignment;
3946 exec2_list[i].offset = exec_list[i].offset;
3947 if (!IS_I965G(dev))
3948 exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
3949 else
3950 exec2_list[i].flags = 0;
3951 }
3952
3953 exec2.buffers_ptr = args->buffers_ptr;
3954 exec2.buffer_count = args->buffer_count;
3955 exec2.batch_start_offset = args->batch_start_offset;
3956 exec2.batch_len = args->batch_len;
3957 exec2.DR1 = args->DR1;
3958 exec2.DR4 = args->DR4;
3959 exec2.num_cliprects = args->num_cliprects;
3960 exec2.cliprects_ptr = args->cliprects_ptr;
3961 exec2.flags = 0;
3962
3963 ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list);
3964 if (!ret) {
3965 /* Copy the new buffer offsets back to the user's exec list. */
3966 for (i = 0; i < args->buffer_count; i++)
3967 exec_list[i].offset = exec2_list[i].offset;
3968 /* ... and back out to userspace */
3969 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
3970 (uintptr_t) args->buffers_ptr,
3971 exec_list,
3972 sizeof(*exec_list) * args->buffer_count);
3973 if (ret) {
3974 ret = -EFAULT;
3975 DRM_ERROR("failed to copy %d exec entries "
3976 "back to user (%d)\n",
3977 args->buffer_count, ret);
3978 }
Jesse Barnes76446ca2009-12-17 22:05:42 -05003979 }
3980
3981 drm_free_large(exec_list);
3982 drm_free_large(exec2_list);
3983 return ret;
3984}
3985
3986int
3987i915_gem_execbuffer2(struct drm_device *dev, void *data,
3988 struct drm_file *file_priv)
3989{
3990 struct drm_i915_gem_execbuffer2 *args = data;
3991 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
3992 int ret;
3993
3994#if WATCH_EXEC
3995 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
3996 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
3997#endif
3998
3999 if (args->buffer_count < 1) {
4000 DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
4001 return -EINVAL;
4002 }
4003
4004 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
4005 if (exec2_list == NULL) {
4006 DRM_ERROR("Failed to allocate exec list for %d buffers\n",
4007 args->buffer_count);
4008 return -ENOMEM;
4009 }
4010 ret = copy_from_user(exec2_list,
4011 (struct drm_i915_relocation_entry __user *)
4012 (uintptr_t) args->buffers_ptr,
4013 sizeof(*exec2_list) * args->buffer_count);
4014 if (ret != 0) {
4015 DRM_ERROR("copy %d exec entries failed %d\n",
4016 args->buffer_count, ret);
4017 drm_free_large(exec2_list);
4018 return -EFAULT;
4019 }
4020
4021 ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list);
4022 if (!ret) {
4023 /* Copy the new buffer offsets back to the user's exec list. */
4024 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
4025 (uintptr_t) args->buffers_ptr,
4026 exec2_list,
4027 sizeof(*exec2_list) * args->buffer_count);
4028 if (ret) {
4029 ret = -EFAULT;
4030 DRM_ERROR("failed to copy %d exec entries "
4031 "back to user (%d)\n",
4032 args->buffer_count, ret);
4033 }
4034 }
4035
4036 drm_free_large(exec2_list);
4037 return ret;
4038}
4039
Eric Anholt673a3942008-07-30 12:06:12 -07004040int
4041i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
4042{
4043 struct drm_device *dev = obj->dev;
Daniel Vetter23010e42010-03-08 13:35:02 +01004044 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07004045 int ret;
4046
4047 i915_verify_inactive(dev, __FILE__, __LINE__);
4048 if (obj_priv->gtt_space == NULL) {
4049 ret = i915_gem_object_bind_to_gtt(obj, alignment);
Chris Wilson97311292009-09-21 00:22:34 +01004050 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07004051 return ret;
Chris Wilson22c344e2009-02-11 14:26:45 +00004052 }
Jesse Barnes76446ca2009-12-17 22:05:42 -05004053
Eric Anholt673a3942008-07-30 12:06:12 -07004054 obj_priv->pin_count++;
4055
4056 /* If the object is not active and not pending a flush,
4057 * remove it from the inactive list
4058 */
4059 if (obj_priv->pin_count == 1) {
4060 atomic_inc(&dev->pin_count);
4061 atomic_add(obj->size, &dev->pin_memory);
4062 if (!obj_priv->active &&
Chris Wilson21d509e2009-06-06 09:46:02 +01004063 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
Eric Anholt673a3942008-07-30 12:06:12 -07004064 !list_empty(&obj_priv->list))
4065 list_del_init(&obj_priv->list);
4066 }
4067 i915_verify_inactive(dev, __FILE__, __LINE__);
4068
4069 return 0;
4070}
4071
4072void
4073i915_gem_object_unpin(struct drm_gem_object *obj)
4074{
4075 struct drm_device *dev = obj->dev;
4076 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01004077 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07004078
4079 i915_verify_inactive(dev, __FILE__, __LINE__);
4080 obj_priv->pin_count--;
4081 BUG_ON(obj_priv->pin_count < 0);
4082 BUG_ON(obj_priv->gtt_space == NULL);
4083
4084 /* If the object is no longer pinned, and is
4085 * neither active nor being flushed, then stick it on
4086 * the inactive list
4087 */
4088 if (obj_priv->pin_count == 0) {
4089 if (!obj_priv->active &&
Chris Wilson21d509e2009-06-06 09:46:02 +01004090 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
Eric Anholt673a3942008-07-30 12:06:12 -07004091 list_move_tail(&obj_priv->list,
4092 &dev_priv->mm.inactive_list);
4093 atomic_dec(&dev->pin_count);
4094 atomic_sub(obj->size, &dev->pin_memory);
4095 }
4096 i915_verify_inactive(dev, __FILE__, __LINE__);
4097}
4098
4099int
4100i915_gem_pin_ioctl(struct drm_device *dev, void *data,
4101 struct drm_file *file_priv)
4102{
4103 struct drm_i915_gem_pin *args = data;
4104 struct drm_gem_object *obj;
4105 struct drm_i915_gem_object *obj_priv;
4106 int ret;
4107
4108 mutex_lock(&dev->struct_mutex);
4109
4110 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4111 if (obj == NULL) {
4112 DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
4113 args->handle);
4114 mutex_unlock(&dev->struct_mutex);
4115 return -EBADF;
4116 }
Daniel Vetter23010e42010-03-08 13:35:02 +01004117 obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07004118
Chris Wilsonbb6baf72009-09-22 14:24:13 +01004119 if (obj_priv->madv != I915_MADV_WILLNEED) {
4120 DRM_ERROR("Attempting to pin a purgeable buffer\n");
Chris Wilson3ef94da2009-09-14 16:50:29 +01004121 drm_gem_object_unreference(obj);
4122 mutex_unlock(&dev->struct_mutex);
4123 return -EINVAL;
4124 }
4125
Jesse Barnes79e53942008-11-07 14:24:08 -08004126 if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
4127 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
4128 args->handle);
Chris Wilson96dec612009-02-08 19:08:04 +00004129 drm_gem_object_unreference(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07004130 mutex_unlock(&dev->struct_mutex);
Jesse Barnes79e53942008-11-07 14:24:08 -08004131 return -EINVAL;
4132 }
4133
4134 obj_priv->user_pin_count++;
4135 obj_priv->pin_filp = file_priv;
4136 if (obj_priv->user_pin_count == 1) {
4137 ret = i915_gem_object_pin(obj, args->alignment);
4138 if (ret != 0) {
4139 drm_gem_object_unreference(obj);
4140 mutex_unlock(&dev->struct_mutex);
4141 return ret;
4142 }
Eric Anholt673a3942008-07-30 12:06:12 -07004143 }
4144
4145 /* XXX - flush the CPU caches for pinned objects
4146 * as the X server doesn't manage domains yet
4147 */
Eric Anholte47c68e2008-11-14 13:35:19 -08004148 i915_gem_object_flush_cpu_write_domain(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07004149 args->offset = obj_priv->gtt_offset;
4150 drm_gem_object_unreference(obj);
4151 mutex_unlock(&dev->struct_mutex);
4152
4153 return 0;
4154}
4155
4156int
4157i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
4158 struct drm_file *file_priv)
4159{
4160 struct drm_i915_gem_pin *args = data;
4161 struct drm_gem_object *obj;
Jesse Barnes79e53942008-11-07 14:24:08 -08004162 struct drm_i915_gem_object *obj_priv;
Eric Anholt673a3942008-07-30 12:06:12 -07004163
4164 mutex_lock(&dev->struct_mutex);
4165
4166 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4167 if (obj == NULL) {
4168 DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
4169 args->handle);
4170 mutex_unlock(&dev->struct_mutex);
4171 return -EBADF;
4172 }
4173
Daniel Vetter23010e42010-03-08 13:35:02 +01004174 obj_priv = to_intel_bo(obj);
Jesse Barnes79e53942008-11-07 14:24:08 -08004175 if (obj_priv->pin_filp != file_priv) {
4176 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
4177 args->handle);
4178 drm_gem_object_unreference(obj);
4179 mutex_unlock(&dev->struct_mutex);
4180 return -EINVAL;
4181 }
4182 obj_priv->user_pin_count--;
4183 if (obj_priv->user_pin_count == 0) {
4184 obj_priv->pin_filp = NULL;
4185 i915_gem_object_unpin(obj);
4186 }
Eric Anholt673a3942008-07-30 12:06:12 -07004187
4188 drm_gem_object_unreference(obj);
4189 mutex_unlock(&dev->struct_mutex);
4190 return 0;
4191}
4192
4193int
4194i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4195 struct drm_file *file_priv)
4196{
4197 struct drm_i915_gem_busy *args = data;
4198 struct drm_gem_object *obj;
4199 struct drm_i915_gem_object *obj_priv;
4200
Eric Anholt673a3942008-07-30 12:06:12 -07004201 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4202 if (obj == NULL) {
4203 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
4204 args->handle);
Eric Anholt673a3942008-07-30 12:06:12 -07004205 return -EBADF;
4206 }
4207
Chris Wilsonb1ce7862009-06-06 09:46:00 +01004208 mutex_lock(&dev->struct_mutex);
Eric Anholtf21289b2009-02-18 09:44:56 -08004209 /* Update the active list for the hardware's current position.
4210 * Otherwise this only updates on a delayed timer or when irqs are
4211 * actually unmasked, and our working set ends up being larger than
4212 * required.
4213 */
4214 i915_gem_retire_requests(dev);
4215
Daniel Vetter23010e42010-03-08 13:35:02 +01004216 obj_priv = to_intel_bo(obj);
Eric Anholtc4de0a52008-12-14 19:05:04 -08004217 /* Don't count being on the flushing list against the object being
4218 * done. Otherwise, a buffer left on the flushing list but not getting
4219 * flushed (because nobody's flushing that domain) won't ever return
4220 * unbusy and get reused by libdrm's bo cache. The other expected
4221 * consumer of this interface, OpenGL's occlusion queries, also specs
4222 * that the objects get unbusy "eventually" without any interference.
4223 */
4224 args->busy = obj_priv->active && obj_priv->last_rendering_seqno != 0;
Eric Anholt673a3942008-07-30 12:06:12 -07004225
4226 drm_gem_object_unreference(obj);
4227 mutex_unlock(&dev->struct_mutex);
4228 return 0;
4229}
4230
4231int
4232i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4233 struct drm_file *file_priv)
4234{
4235 return i915_gem_ring_throttle(dev, file_priv);
4236}
4237
Chris Wilson3ef94da2009-09-14 16:50:29 +01004238int
4239i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4240 struct drm_file *file_priv)
4241{
4242 struct drm_i915_gem_madvise *args = data;
4243 struct drm_gem_object *obj;
4244 struct drm_i915_gem_object *obj_priv;
4245
4246 switch (args->madv) {
4247 case I915_MADV_DONTNEED:
4248 case I915_MADV_WILLNEED:
4249 break;
4250 default:
4251 return -EINVAL;
4252 }
4253
4254 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4255 if (obj == NULL) {
4256 DRM_ERROR("Bad handle in i915_gem_madvise_ioctl(): %d\n",
4257 args->handle);
4258 return -EBADF;
4259 }
4260
4261 mutex_lock(&dev->struct_mutex);
Daniel Vetter23010e42010-03-08 13:35:02 +01004262 obj_priv = to_intel_bo(obj);
Chris Wilson3ef94da2009-09-14 16:50:29 +01004263
4264 if (obj_priv->pin_count) {
4265 drm_gem_object_unreference(obj);
4266 mutex_unlock(&dev->struct_mutex);
4267
4268 DRM_ERROR("Attempted i915_gem_madvise_ioctl() on a pinned object\n");
4269 return -EINVAL;
4270 }
4271
Chris Wilsonbb6baf72009-09-22 14:24:13 +01004272 if (obj_priv->madv != __I915_MADV_PURGED)
4273 obj_priv->madv = args->madv;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004274
Chris Wilson2d7ef392009-09-20 23:13:10 +01004275 /* if the object is no longer bound, discard its backing storage */
4276 if (i915_gem_object_is_purgeable(obj_priv) &&
4277 obj_priv->gtt_space == NULL)
4278 i915_gem_object_truncate(obj);
4279
Chris Wilsonbb6baf72009-09-22 14:24:13 +01004280 args->retained = obj_priv->madv != __I915_MADV_PURGED;
4281
Chris Wilson3ef94da2009-09-14 16:50:29 +01004282 drm_gem_object_unreference(obj);
4283 mutex_unlock(&dev->struct_mutex);
4284
4285 return 0;
4286}
4287
Daniel Vetterac52bc52010-04-09 19:05:06 +00004288struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
4289 size_t size)
4290{
Daniel Vetterc397b902010-04-09 19:05:07 +00004291 struct drm_i915_gem_object *obj;
4292
4293 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
4294 if (obj == NULL)
4295 return NULL;
4296
4297 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
4298 kfree(obj);
4299 return NULL;
4300 }
4301
4302 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4303 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4304
4305 obj->agp_type = AGP_USER_MEMORY;
Daniel Vetter62b8b212010-04-09 19:05:08 +00004306 obj->base.driver_private = NULL;
Daniel Vetterc397b902010-04-09 19:05:07 +00004307 obj->fence_reg = I915_FENCE_REG_NONE;
4308 INIT_LIST_HEAD(&obj->list);
4309 INIT_LIST_HEAD(&obj->gpu_write_list);
Daniel Vetterc397b902010-04-09 19:05:07 +00004310 obj->madv = I915_MADV_WILLNEED;
4311
4312 trace_i915_gem_object_create(&obj->base);
4313
4314 return &obj->base;
Daniel Vetterac52bc52010-04-09 19:05:06 +00004315}
4316
Eric Anholt673a3942008-07-30 12:06:12 -07004317int i915_gem_init_object(struct drm_gem_object *obj)
4318{
Daniel Vetterc397b902010-04-09 19:05:07 +00004319 BUG();
Jesse Barnesde151cf2008-11-12 10:03:55 -08004320
Eric Anholt673a3942008-07-30 12:06:12 -07004321 return 0;
4322}
4323
4324void i915_gem_free_object(struct drm_gem_object *obj)
4325{
Jesse Barnesde151cf2008-11-12 10:03:55 -08004326 struct drm_device *dev = obj->dev;
Daniel Vetter23010e42010-03-08 13:35:02 +01004327 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07004328
Chris Wilson1c5d22f2009-08-25 11:15:50 +01004329 trace_i915_gem_object_destroy(obj);
4330
Eric Anholt673a3942008-07-30 12:06:12 -07004331 while (obj_priv->pin_count > 0)
4332 i915_gem_object_unpin(obj);
4333
Dave Airlie71acb5e2008-12-30 20:31:46 +10004334 if (obj_priv->phys_obj)
4335 i915_gem_detach_phys_object(dev, obj);
4336
Eric Anholt673a3942008-07-30 12:06:12 -07004337 i915_gem_object_unbind(obj);
4338
Chris Wilson7e616152009-09-10 08:53:04 +01004339 if (obj_priv->mmap_offset)
4340 i915_gem_free_mmap_offset(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08004341
Daniel Vetterc397b902010-04-09 19:05:07 +00004342 drm_gem_object_release(obj);
4343
Eric Anholt9a298b22009-03-24 12:23:04 -07004344 kfree(obj_priv->page_cpu_valid);
Eric Anholt280b7132009-03-12 16:56:27 -07004345 kfree(obj_priv->bit_17);
Daniel Vetterc397b902010-04-09 19:05:07 +00004346 kfree(obj_priv);
Eric Anholt673a3942008-07-30 12:06:12 -07004347}
4348
Chris Wilsonab5ee572009-09-20 19:25:47 +01004349/** Unbinds all inactive objects. */
Eric Anholt673a3942008-07-30 12:06:12 -07004350static int
Chris Wilsonab5ee572009-09-20 19:25:47 +01004351i915_gem_evict_from_inactive_list(struct drm_device *dev)
Eric Anholt673a3942008-07-30 12:06:12 -07004352{
Chris Wilsonab5ee572009-09-20 19:25:47 +01004353 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07004354
Chris Wilsonab5ee572009-09-20 19:25:47 +01004355 while (!list_empty(&dev_priv->mm.inactive_list)) {
4356 struct drm_gem_object *obj;
4357 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004358
Daniel Vettera8089e82010-04-09 19:05:09 +00004359 obj = &list_first_entry(&dev_priv->mm.inactive_list,
4360 struct drm_i915_gem_object,
4361 list)->base;
Eric Anholt673a3942008-07-30 12:06:12 -07004362
4363 ret = i915_gem_object_unbind(obj);
4364 if (ret != 0) {
Chris Wilsonab5ee572009-09-20 19:25:47 +01004365 DRM_ERROR("Error unbinding object: %d\n", ret);
Eric Anholt673a3942008-07-30 12:06:12 -07004366 return ret;
4367 }
4368 }
4369
Eric Anholt673a3942008-07-30 12:06:12 -07004370 return 0;
4371}
4372
Jesse Barnes5669fca2009-02-17 15:13:31 -08004373int
Eric Anholt673a3942008-07-30 12:06:12 -07004374i915_gem_idle(struct drm_device *dev)
4375{
4376 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson29105cc2010-01-07 10:39:13 +00004377 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004378
Keith Packard6dbe2772008-10-14 21:41:13 -07004379 mutex_lock(&dev->struct_mutex);
4380
4381 if (dev_priv->mm.suspended || dev_priv->ring.ring_obj == NULL) {
4382 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -07004383 return 0;
Keith Packard6dbe2772008-10-14 21:41:13 -07004384 }
Eric Anholt673a3942008-07-30 12:06:12 -07004385
Chris Wilson29105cc2010-01-07 10:39:13 +00004386 ret = i915_gpu_idle(dev);
Keith Packard6dbe2772008-10-14 21:41:13 -07004387 if (ret) {
4388 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -07004389 return ret;
Keith Packard6dbe2772008-10-14 21:41:13 -07004390 }
Eric Anholt673a3942008-07-30 12:06:12 -07004391
Chris Wilson29105cc2010-01-07 10:39:13 +00004392 /* Under UMS, be paranoid and evict. */
4393 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
4394 ret = i915_gem_evict_from_inactive_list(dev);
4395 if (ret) {
4396 mutex_unlock(&dev->struct_mutex);
4397 return ret;
4398 }
4399 }
4400
4401 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4402 * We need to replace this with a semaphore, or something.
4403 * And not confound mm.suspended!
4404 */
4405 dev_priv->mm.suspended = 1;
4406 del_timer(&dev_priv->hangcheck_timer);
4407
4408 i915_kernel_lost_context(dev);
Keith Packard6dbe2772008-10-14 21:41:13 -07004409 i915_gem_cleanup_ringbuffer(dev);
Chris Wilson29105cc2010-01-07 10:39:13 +00004410
Keith Packard6dbe2772008-10-14 21:41:13 -07004411 mutex_unlock(&dev->struct_mutex);
4412
Chris Wilson29105cc2010-01-07 10:39:13 +00004413 /* Cancel the retire work handler, which should be idle now. */
4414 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4415
Eric Anholt673a3942008-07-30 12:06:12 -07004416 return 0;
4417}
4418
Jesse Barnese552eb72010-04-21 11:39:23 -07004419/*
4420 * 965+ support PIPE_CONTROL commands, which provide finer grained control
4421 * over cache flushing.
4422 */
Eric Anholt62fdfea2010-05-21 13:26:39 -07004423int
Jesse Barnese552eb72010-04-21 11:39:23 -07004424i915_gem_init_pipe_control(struct drm_device *dev)
4425{
4426 drm_i915_private_t *dev_priv = dev->dev_private;
4427 struct drm_gem_object *obj;
4428 struct drm_i915_gem_object *obj_priv;
4429 int ret;
4430
Eric Anholt34dc4d42010-05-07 14:30:03 -07004431 obj = i915_gem_alloc_object(dev, 4096);
Jesse Barnese552eb72010-04-21 11:39:23 -07004432 if (obj == NULL) {
4433 DRM_ERROR("Failed to allocate seqno page\n");
4434 ret = -ENOMEM;
4435 goto err;
4436 }
4437 obj_priv = to_intel_bo(obj);
4438 obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
4439
4440 ret = i915_gem_object_pin(obj, 4096);
4441 if (ret)
4442 goto err_unref;
4443
4444 dev_priv->seqno_gfx_addr = obj_priv->gtt_offset;
4445 dev_priv->seqno_page = kmap(obj_priv->pages[0]);
4446 if (dev_priv->seqno_page == NULL)
4447 goto err_unpin;
4448
4449 dev_priv->seqno_obj = obj;
4450 memset(dev_priv->seqno_page, 0, PAGE_SIZE);
4451
4452 return 0;
4453
4454err_unpin:
4455 i915_gem_object_unpin(obj);
4456err_unref:
4457 drm_gem_object_unreference(obj);
4458err:
4459 return ret;
4460}
4461
Eric Anholt62fdfea2010-05-21 13:26:39 -07004462void
Jesse Barnese552eb72010-04-21 11:39:23 -07004463i915_gem_cleanup_pipe_control(struct drm_device *dev)
4464{
4465 drm_i915_private_t *dev_priv = dev->dev_private;
4466 struct drm_gem_object *obj;
4467 struct drm_i915_gem_object *obj_priv;
4468
4469 obj = dev_priv->seqno_obj;
4470 obj_priv = to_intel_bo(obj);
4471 kunmap(obj_priv->pages[0]);
4472 i915_gem_object_unpin(obj);
4473 drm_gem_object_unreference(obj);
4474 dev_priv->seqno_obj = NULL;
4475
4476 dev_priv->seqno_page = NULL;
Eric Anholt673a3942008-07-30 12:06:12 -07004477}
4478
Eric Anholt673a3942008-07-30 12:06:12 -07004479int
4480i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4481 struct drm_file *file_priv)
4482{
4483 drm_i915_private_t *dev_priv = dev->dev_private;
4484 int ret;
4485
Jesse Barnes79e53942008-11-07 14:24:08 -08004486 if (drm_core_check_feature(dev, DRIVER_MODESET))
4487 return 0;
4488
Ben Gamariba1234d2009-09-14 17:48:47 -04004489 if (atomic_read(&dev_priv->mm.wedged)) {
Eric Anholt673a3942008-07-30 12:06:12 -07004490 DRM_ERROR("Reenabling wedged hardware, good luck\n");
Ben Gamariba1234d2009-09-14 17:48:47 -04004491 atomic_set(&dev_priv->mm.wedged, 0);
Eric Anholt673a3942008-07-30 12:06:12 -07004492 }
4493
Eric Anholt673a3942008-07-30 12:06:12 -07004494 mutex_lock(&dev->struct_mutex);
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004495 dev_priv->mm.suspended = 0;
4496
4497 ret = i915_gem_init_ringbuffer(dev);
Wu Fengguangd816f6a2009-04-18 10:43:32 +08004498 if (ret != 0) {
4499 mutex_unlock(&dev->struct_mutex);
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004500 return ret;
Wu Fengguangd816f6a2009-04-18 10:43:32 +08004501 }
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004502
Carl Worth5e118f42009-03-20 11:54:25 -07004503 spin_lock(&dev_priv->mm.active_list_lock);
Eric Anholt673a3942008-07-30 12:06:12 -07004504 BUG_ON(!list_empty(&dev_priv->mm.active_list));
Carl Worth5e118f42009-03-20 11:54:25 -07004505 spin_unlock(&dev_priv->mm.active_list_lock);
4506
Eric Anholt673a3942008-07-30 12:06:12 -07004507 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
4508 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
4509 BUG_ON(!list_empty(&dev_priv->mm.request_list));
Eric Anholt673a3942008-07-30 12:06:12 -07004510 mutex_unlock(&dev->struct_mutex);
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004511
4512 drm_irq_install(dev);
4513
Eric Anholt673a3942008-07-30 12:06:12 -07004514 return 0;
4515}
4516
4517int
4518i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4519 struct drm_file *file_priv)
4520{
Jesse Barnes79e53942008-11-07 14:24:08 -08004521 if (drm_core_check_feature(dev, DRIVER_MODESET))
4522 return 0;
4523
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004524 drm_irq_uninstall(dev);
Linus Torvaldse6890f62009-09-08 17:09:24 -07004525 return i915_gem_idle(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07004526}
4527
4528void
4529i915_gem_lastclose(struct drm_device *dev)
4530{
4531 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004532
Eric Anholte806b492009-01-22 09:56:58 -08004533 if (drm_core_check_feature(dev, DRIVER_MODESET))
4534 return;
4535
Keith Packard6dbe2772008-10-14 21:41:13 -07004536 ret = i915_gem_idle(dev);
4537 if (ret)
4538 DRM_ERROR("failed to idle hardware: %d\n", ret);
Eric Anholt673a3942008-07-30 12:06:12 -07004539}
4540
4541void
4542i915_gem_load(struct drm_device *dev)
4543{
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02004544 int i;
Eric Anholt673a3942008-07-30 12:06:12 -07004545 drm_i915_private_t *dev_priv = dev->dev_private;
4546
Carl Worth5e118f42009-03-20 11:54:25 -07004547 spin_lock_init(&dev_priv->mm.active_list_lock);
Eric Anholt673a3942008-07-30 12:06:12 -07004548 INIT_LIST_HEAD(&dev_priv->mm.active_list);
4549 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
Daniel Vetter99fcb762010-02-07 16:20:18 +01004550 INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
Eric Anholt673a3942008-07-30 12:06:12 -07004551 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
4552 INIT_LIST_HEAD(&dev_priv->mm.request_list);
Eric Anholta09ba7f2009-08-29 12:49:51 -07004553 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
Daniel Vetter007cc8a2010-04-28 11:02:31 +02004554 for (i = 0; i < 16; i++)
4555 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
Eric Anholt673a3942008-07-30 12:06:12 -07004556 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4557 i915_gem_retire_work_handler);
Eric Anholt673a3942008-07-30 12:06:12 -07004558 dev_priv->mm.next_gem_seqno = 1;
4559
Chris Wilson31169712009-09-14 16:50:28 +01004560 spin_lock(&shrink_list_lock);
4561 list_add(&dev_priv->mm.shrink_list, &shrink_list);
4562 spin_unlock(&shrink_list_lock);
4563
Jesse Barnesde151cf2008-11-12 10:03:55 -08004564 /* Old X drivers will take 0-2 for front, back, depth buffers */
Eric Anholtb397c832010-01-26 09:43:10 -08004565 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4566 dev_priv->fence_reg_start = 3;
Jesse Barnesde151cf2008-11-12 10:03:55 -08004567
Jesse Barnes0f973f22009-01-26 17:10:45 -08004568 if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
Jesse Barnesde151cf2008-11-12 10:03:55 -08004569 dev_priv->num_fence_regs = 16;
4570 else
4571 dev_priv->num_fence_regs = 8;
4572
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02004573 /* Initialize fence registers to zero */
4574 if (IS_I965G(dev)) {
4575 for (i = 0; i < 16; i++)
4576 I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
4577 } else {
4578 for (i = 0; i < 8; i++)
4579 I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
4580 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4581 for (i = 0; i < 8; i++)
4582 I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
4583 }
Eric Anholt673a3942008-07-30 12:06:12 -07004584 i915_gem_detect_bit_6_swizzle(dev);
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05004585 init_waitqueue_head(&dev_priv->pending_flip_queue);
Eric Anholt673a3942008-07-30 12:06:12 -07004586}
Dave Airlie71acb5e2008-12-30 20:31:46 +10004587
4588/*
4589 * Create a physically contiguous memory object for this object
4590 * e.g. for cursor + overlay regs
4591 */
4592int i915_gem_init_phys_object(struct drm_device *dev,
4593 int id, int size)
4594{
4595 drm_i915_private_t *dev_priv = dev->dev_private;
4596 struct drm_i915_gem_phys_object *phys_obj;
4597 int ret;
4598
4599 if (dev_priv->mm.phys_objs[id - 1] || !size)
4600 return 0;
4601
Eric Anholt9a298b22009-03-24 12:23:04 -07004602 phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004603 if (!phys_obj)
4604 return -ENOMEM;
4605
4606 phys_obj->id = id;
4607
Zhenyu Wange6be8d92010-01-05 11:25:05 +08004608 phys_obj->handle = drm_pci_alloc(dev, size, 0);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004609 if (!phys_obj->handle) {
4610 ret = -ENOMEM;
4611 goto kfree_obj;
4612 }
4613#ifdef CONFIG_X86
4614 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4615#endif
4616
4617 dev_priv->mm.phys_objs[id - 1] = phys_obj;
4618
4619 return 0;
4620kfree_obj:
Eric Anholt9a298b22009-03-24 12:23:04 -07004621 kfree(phys_obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004622 return ret;
4623}
4624
4625void i915_gem_free_phys_object(struct drm_device *dev, int id)
4626{
4627 drm_i915_private_t *dev_priv = dev->dev_private;
4628 struct drm_i915_gem_phys_object *phys_obj;
4629
4630 if (!dev_priv->mm.phys_objs[id - 1])
4631 return;
4632
4633 phys_obj = dev_priv->mm.phys_objs[id - 1];
4634 if (phys_obj->cur_obj) {
4635 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4636 }
4637
4638#ifdef CONFIG_X86
4639 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4640#endif
4641 drm_pci_free(dev, phys_obj->handle);
4642 kfree(phys_obj);
4643 dev_priv->mm.phys_objs[id - 1] = NULL;
4644}
4645
4646void i915_gem_free_all_phys_object(struct drm_device *dev)
4647{
4648 int i;
4649
Dave Airlie260883c2009-01-22 17:58:49 +10004650 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004651 i915_gem_free_phys_object(dev, i);
4652}
4653
4654void i915_gem_detach_phys_object(struct drm_device *dev,
4655 struct drm_gem_object *obj)
4656{
4657 struct drm_i915_gem_object *obj_priv;
4658 int i;
4659 int ret;
4660 int page_count;
4661
Daniel Vetter23010e42010-03-08 13:35:02 +01004662 obj_priv = to_intel_bo(obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004663 if (!obj_priv->phys_obj)
4664 return;
4665
Chris Wilson4bdadb92010-01-27 13:36:32 +00004666 ret = i915_gem_object_get_pages(obj, 0);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004667 if (ret)
4668 goto out;
4669
4670 page_count = obj->size / PAGE_SIZE;
4671
4672 for (i = 0; i < page_count; i++) {
Eric Anholt856fa192009-03-19 14:10:50 -07004673 char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004674 char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4675
4676 memcpy(dst, src, PAGE_SIZE);
4677 kunmap_atomic(dst, KM_USER0);
4678 }
Eric Anholt856fa192009-03-19 14:10:50 -07004679 drm_clflush_pages(obj_priv->pages, page_count);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004680 drm_agp_chipset_flush(dev);
Chris Wilsond78b47b2009-06-17 21:52:49 +01004681
4682 i915_gem_object_put_pages(obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004683out:
4684 obj_priv->phys_obj->cur_obj = NULL;
4685 obj_priv->phys_obj = NULL;
4686}
4687
4688int
4689i915_gem_attach_phys_object(struct drm_device *dev,
4690 struct drm_gem_object *obj, int id)
4691{
4692 drm_i915_private_t *dev_priv = dev->dev_private;
4693 struct drm_i915_gem_object *obj_priv;
4694 int ret = 0;
4695 int page_count;
4696 int i;
4697
4698 if (id > I915_MAX_PHYS_OBJECT)
4699 return -EINVAL;
4700
Daniel Vetter23010e42010-03-08 13:35:02 +01004701 obj_priv = to_intel_bo(obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004702
4703 if (obj_priv->phys_obj) {
4704 if (obj_priv->phys_obj->id == id)
4705 return 0;
4706 i915_gem_detach_phys_object(dev, obj);
4707 }
4708
4709
4710 /* create a new object */
4711 if (!dev_priv->mm.phys_objs[id - 1]) {
4712 ret = i915_gem_init_phys_object(dev, id,
4713 obj->size);
4714 if (ret) {
Linus Torvaldsaeb565d2009-01-26 10:01:53 -08004715 DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004716 goto out;
4717 }
4718 }
4719
4720 /* bind to the object */
4721 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
4722 obj_priv->phys_obj->cur_obj = obj;
4723
Chris Wilson4bdadb92010-01-27 13:36:32 +00004724 ret = i915_gem_object_get_pages(obj, 0);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004725 if (ret) {
4726 DRM_ERROR("failed to get page list\n");
4727 goto out;
4728 }
4729
4730 page_count = obj->size / PAGE_SIZE;
4731
4732 for (i = 0; i < page_count; i++) {
Eric Anholt856fa192009-03-19 14:10:50 -07004733 char *src = kmap_atomic(obj_priv->pages[i], KM_USER0);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004734 char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4735
4736 memcpy(dst, src, PAGE_SIZE);
4737 kunmap_atomic(src, KM_USER0);
4738 }
4739
Chris Wilsond78b47b2009-06-17 21:52:49 +01004740 i915_gem_object_put_pages(obj);
4741
Dave Airlie71acb5e2008-12-30 20:31:46 +10004742 return 0;
4743out:
4744 return ret;
4745}
4746
4747static int
4748i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
4749 struct drm_i915_gem_pwrite *args,
4750 struct drm_file *file_priv)
4751{
Daniel Vetter23010e42010-03-08 13:35:02 +01004752 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004753 void *obj_addr;
4754 int ret;
4755 char __user *user_data;
4756
4757 user_data = (char __user *) (uintptr_t) args->data_ptr;
4758 obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
4759
Zhao Yakui44d98a62009-10-09 11:39:40 +08004760 DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004761 ret = copy_from_user(obj_addr, user_data, args->size);
4762 if (ret)
4763 return -EFAULT;
4764
4765 drm_agp_chipset_flush(dev);
4766 return 0;
4767}
Eric Anholtb9624422009-06-03 07:27:35 +00004768
4769void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
4770{
4771 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
4772
4773 /* Clean up our request list when the client is going away, so that
4774 * later retire_requests won't dereference our soon-to-be-gone
4775 * file_priv.
4776 */
4777 mutex_lock(&dev->struct_mutex);
4778 while (!list_empty(&i915_file_priv->mm.request_list))
4779 list_del_init(i915_file_priv->mm.request_list.next);
4780 mutex_unlock(&dev->struct_mutex);
4781}
Chris Wilson31169712009-09-14 16:50:28 +01004782
Chris Wilson31169712009-09-14 16:50:28 +01004783static int
Chris Wilson1637ef42010-04-20 17:10:35 +01004784i915_gpu_is_active(struct drm_device *dev)
4785{
4786 drm_i915_private_t *dev_priv = dev->dev_private;
4787 int lists_empty;
4788
4789 spin_lock(&dev_priv->mm.active_list_lock);
4790 lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
4791 list_empty(&dev_priv->mm.active_list);
4792 spin_unlock(&dev_priv->mm.active_list_lock);
4793
4794 return !lists_empty;
4795}
4796
4797static int
Chris Wilson31169712009-09-14 16:50:28 +01004798i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
4799{
4800 drm_i915_private_t *dev_priv, *next_dev;
4801 struct drm_i915_gem_object *obj_priv, *next_obj;
4802 int cnt = 0;
4803 int would_deadlock = 1;
4804
4805 /* "fast-path" to count number of available objects */
4806 if (nr_to_scan == 0) {
4807 spin_lock(&shrink_list_lock);
4808 list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
4809 struct drm_device *dev = dev_priv->dev;
4810
4811 if (mutex_trylock(&dev->struct_mutex)) {
4812 list_for_each_entry(obj_priv,
4813 &dev_priv->mm.inactive_list,
4814 list)
4815 cnt++;
4816 mutex_unlock(&dev->struct_mutex);
4817 }
4818 }
4819 spin_unlock(&shrink_list_lock);
4820
4821 return (cnt / 100) * sysctl_vfs_cache_pressure;
4822 }
4823
4824 spin_lock(&shrink_list_lock);
4825
Chris Wilson1637ef42010-04-20 17:10:35 +01004826rescan:
Chris Wilson31169712009-09-14 16:50:28 +01004827 /* first scan for clean buffers */
4828 list_for_each_entry_safe(dev_priv, next_dev,
4829 &shrink_list, mm.shrink_list) {
4830 struct drm_device *dev = dev_priv->dev;
4831
4832 if (! mutex_trylock(&dev->struct_mutex))
4833 continue;
4834
4835 spin_unlock(&shrink_list_lock);
4836
4837 i915_gem_retire_requests(dev);
4838
4839 list_for_each_entry_safe(obj_priv, next_obj,
4840 &dev_priv->mm.inactive_list,
4841 list) {
4842 if (i915_gem_object_is_purgeable(obj_priv)) {
Daniel Vettera8089e82010-04-09 19:05:09 +00004843 i915_gem_object_unbind(&obj_priv->base);
Chris Wilson31169712009-09-14 16:50:28 +01004844 if (--nr_to_scan <= 0)
4845 break;
4846 }
4847 }
4848
4849 spin_lock(&shrink_list_lock);
4850 mutex_unlock(&dev->struct_mutex);
4851
Chris Wilson963b4832009-09-20 23:03:54 +01004852 would_deadlock = 0;
4853
Chris Wilson31169712009-09-14 16:50:28 +01004854 if (nr_to_scan <= 0)
4855 break;
4856 }
4857
4858 /* second pass, evict/count anything still on the inactive list */
4859 list_for_each_entry_safe(dev_priv, next_dev,
4860 &shrink_list, mm.shrink_list) {
4861 struct drm_device *dev = dev_priv->dev;
4862
4863 if (! mutex_trylock(&dev->struct_mutex))
4864 continue;
4865
4866 spin_unlock(&shrink_list_lock);
4867
4868 list_for_each_entry_safe(obj_priv, next_obj,
4869 &dev_priv->mm.inactive_list,
4870 list) {
4871 if (nr_to_scan > 0) {
Daniel Vettera8089e82010-04-09 19:05:09 +00004872 i915_gem_object_unbind(&obj_priv->base);
Chris Wilson31169712009-09-14 16:50:28 +01004873 nr_to_scan--;
4874 } else
4875 cnt++;
4876 }
4877
4878 spin_lock(&shrink_list_lock);
4879 mutex_unlock(&dev->struct_mutex);
4880
4881 would_deadlock = 0;
4882 }
4883
Chris Wilson1637ef42010-04-20 17:10:35 +01004884 if (nr_to_scan) {
4885 int active = 0;
4886
4887 /*
4888 * We are desperate for pages, so as a last resort, wait
4889 * for the GPU to finish and discard whatever we can.
4890 * This has a dramatic impact to reduce the number of
4891 * OOM-killer events whilst running the GPU aggressively.
4892 */
4893 list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
4894 struct drm_device *dev = dev_priv->dev;
4895
4896 if (!mutex_trylock(&dev->struct_mutex))
4897 continue;
4898
4899 spin_unlock(&shrink_list_lock);
4900
4901 if (i915_gpu_is_active(dev)) {
4902 i915_gpu_idle(dev);
4903 active++;
4904 }
4905
4906 spin_lock(&shrink_list_lock);
4907 mutex_unlock(&dev->struct_mutex);
4908 }
4909
4910 if (active)
4911 goto rescan;
4912 }
4913
Chris Wilson31169712009-09-14 16:50:28 +01004914 spin_unlock(&shrink_list_lock);
4915
4916 if (would_deadlock)
4917 return -1;
4918 else if (cnt > 0)
4919 return (cnt / 100) * sysctl_vfs_cache_pressure;
4920 else
4921 return 0;
4922}
4923
4924static struct shrinker shrinker = {
4925 .shrink = i915_gem_shrink,
4926 .seeks = DEFAULT_SEEKS,
4927};
4928
4929__init void
4930i915_gem_shrinker_init(void)
4931{
4932 register_shrinker(&shrinker);
4933}
4934
4935__exit void
4936i915_gem_shrinker_exit(void)
4937{
4938 unregister_shrinker(&shrinker);
4939}