blob: aa8f6abf16f20b7b39e00be5303b8ef614c61f7a [file] [log] [blame]
Eric Anholt62fdfea2010-05-21 13:26:39 -07001/*
2 * Copyright © 2008-2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Zou Nan hai <nanhai.zou@intel.com>
26 * Xiang Hai hao<haihao.xiang@intel.com>
27 *
28 */
29
30#include "drmP.h"
31#include "drm.h"
Eric Anholt62fdfea2010-05-21 13:26:39 -070032#include "i915_drv.h"
Zou Nan hai8187a2b2010-05-21 09:08:55 +080033#include "i915_drm.h"
Eric Anholt62fdfea2010-05-21 13:26:39 -070034#include "i915_trace.h"
Xiang, Haihao881f47b2010-09-19 14:40:43 +010035#include "intel_drv.h"
Eric Anholt62fdfea2010-05-21 13:26:39 -070036
Chris Wilson6f392d52010-08-07 11:01:22 +010037static u32 i915_gem_get_seqno(struct drm_device *dev)
38{
39 drm_i915_private_t *dev_priv = dev->dev_private;
40 u32 seqno;
41
42 seqno = dev_priv->next_seqno;
43
44 /* reserve 0 for non-seqno */
45 if (++dev_priv->next_seqno == 0)
46 dev_priv->next_seqno = 1;
47
48 return seqno;
49}
50
Chris Wilsonb72f3ac2011-01-04 17:34:02 +000051static int
Chris Wilson78501ea2010-10-27 12:18:21 +010052render_ring_flush(struct intel_ring_buffer *ring,
Chris Wilsonab6f8e32010-09-19 17:53:44 +010053 u32 invalidate_domains,
54 u32 flush_domains)
Eric Anholt62fdfea2010-05-21 13:26:39 -070055{
Chris Wilson78501ea2010-10-27 12:18:21 +010056 struct drm_device *dev = ring->dev;
Chris Wilson6f392d52010-08-07 11:01:22 +010057 drm_i915_private_t *dev_priv = dev->dev_private;
58 u32 cmd;
Chris Wilsonb72f3ac2011-01-04 17:34:02 +000059 int ret;
Chris Wilson6f392d52010-08-07 11:01:22 +010060
Eric Anholt62fdfea2010-05-21 13:26:39 -070061#if WATCH_EXEC
62 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
63 invalidate_domains, flush_domains);
64#endif
Chris Wilson6f392d52010-08-07 11:01:22 +010065
66 trace_i915_gem_request_flush(dev, dev_priv->next_seqno,
Eric Anholt62fdfea2010-05-21 13:26:39 -070067 invalidate_domains, flush_domains);
68
Eric Anholt62fdfea2010-05-21 13:26:39 -070069 if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
70 /*
71 * read/write caches:
72 *
73 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
74 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
75 * also flushed at 2d versus 3d pipeline switches.
76 *
77 * read-only caches:
78 *
79 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
80 * MI_READ_FLUSH is set, and is always flushed on 965.
81 *
82 * I915_GEM_DOMAIN_COMMAND may not exist?
83 *
84 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
85 * invalidated when MI_EXE_FLUSH is set.
86 *
87 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
88 * invalidated with every MI_FLUSH.
89 *
90 * TLBs:
91 *
92 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
93 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
94 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
95 * are flushed at any MI_FLUSH.
96 */
97
98 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
99 if ((invalidate_domains|flush_domains) &
100 I915_GEM_DOMAIN_RENDER)
101 cmd &= ~MI_NO_WRITE_FLUSH;
Chris Wilsona6c45cf2010-09-17 00:32:17 +0100102 if (INTEL_INFO(dev)->gen < 4) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700103 /*
104 * On the 965, the sampler cache always gets flushed
105 * and this bit is reserved.
106 */
107 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
108 cmd |= MI_READ_FLUSH;
109 }
110 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
111 cmd |= MI_EXE_FLUSH;
112
Chris Wilson70eac332010-11-30 14:07:47 +0000113 if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
114 (IS_G4X(dev) || IS_GEN5(dev)))
115 cmd |= MI_INVALIDATE_ISP;
116
Eric Anholt62fdfea2010-05-21 13:26:39 -0700117#if WATCH_EXEC
118 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
119#endif
Chris Wilsonb72f3ac2011-01-04 17:34:02 +0000120 ret = intel_ring_begin(ring, 2);
121 if (ret)
122 return ret;
123
124 intel_ring_emit(ring, cmd);
125 intel_ring_emit(ring, MI_NOOP);
126 intel_ring_advance(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800127 }
Chris Wilsonb72f3ac2011-01-04 17:34:02 +0000128
129 return 0;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800130}
131
Chris Wilson78501ea2010-10-27 12:18:21 +0100132static void ring_write_tail(struct intel_ring_buffer *ring,
Chris Wilson297b0c52010-10-22 17:02:41 +0100133 u32 value)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800134{
Chris Wilson78501ea2010-10-27 12:18:21 +0100135 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Chris Wilson297b0c52010-10-22 17:02:41 +0100136 I915_WRITE_TAIL(ring, value);
Xiang, Haihaod46eefa2010-09-16 10:43:12 +0800137}
138
Chris Wilson78501ea2010-10-27 12:18:21 +0100139u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800140{
Chris Wilson78501ea2010-10-27 12:18:21 +0100141 drm_i915_private_t *dev_priv = ring->dev->dev_private;
142 u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
Daniel Vetter3d281d82010-09-24 21:14:22 +0200143 RING_ACTHD(ring->mmio_base) : ACTHD;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800144
145 return I915_READ(acthd_reg);
146}
147
Chris Wilson78501ea2010-10-27 12:18:21 +0100148static int init_ring_common(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800149{
Chris Wilson78501ea2010-10-27 12:18:21 +0100150 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +0000151 struct drm_i915_gem_object *obj = ring->obj;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800152 u32 head;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800153
154 /* Stop the ring if it's running. */
Daniel Vetter7f2ab692010-08-02 17:06:59 +0200155 I915_WRITE_CTL(ring, 0);
Daniel Vetter570ef602010-08-02 17:06:23 +0200156 I915_WRITE_HEAD(ring, 0);
Chris Wilson78501ea2010-10-27 12:18:21 +0100157 ring->write_tail(ring, 0);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800158
159 /* Initialize the ring. */
Chris Wilson05394f32010-11-08 19:18:58 +0000160 I915_WRITE_START(ring, obj->gtt_offset);
Daniel Vetter570ef602010-08-02 17:06:23 +0200161 head = I915_READ_HEAD(ring) & HEAD_ADDR;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800162
163 /* G45 ring initialization fails to reset head to zero */
164 if (head != 0) {
Chris Wilson6fd0d562010-12-05 20:42:33 +0000165 DRM_DEBUG_KMS("%s head not reset to zero "
166 "ctl %08x head %08x tail %08x start %08x\n",
167 ring->name,
168 I915_READ_CTL(ring),
169 I915_READ_HEAD(ring),
170 I915_READ_TAIL(ring),
171 I915_READ_START(ring));
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800172
Daniel Vetter570ef602010-08-02 17:06:23 +0200173 I915_WRITE_HEAD(ring, 0);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800174
Chris Wilson6fd0d562010-12-05 20:42:33 +0000175 if (I915_READ_HEAD(ring) & HEAD_ADDR) {
176 DRM_ERROR("failed to set %s head to zero "
177 "ctl %08x head %08x tail %08x start %08x\n",
178 ring->name,
179 I915_READ_CTL(ring),
180 I915_READ_HEAD(ring),
181 I915_READ_TAIL(ring),
182 I915_READ_START(ring));
183 }
Eric Anholt62fdfea2010-05-21 13:26:39 -0700184 }
185
Daniel Vetter7f2ab692010-08-02 17:06:59 +0200186 I915_WRITE_CTL(ring,
Chris Wilsonae69b422010-11-07 11:45:52 +0000187 ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
Chris Wilson6aa56062010-10-29 21:44:37 +0100188 | RING_REPORT_64K | RING_VALID);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800189
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800190 /* If the head is still not zero, the ring is dead */
Chris Wilson176f28e2010-10-28 11:18:07 +0100191 if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
Chris Wilson05394f32010-11-08 19:18:58 +0000192 I915_READ_START(ring) != obj->gtt_offset ||
Chris Wilson176f28e2010-10-28 11:18:07 +0100193 (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) {
Chris Wilsone74cfed2010-11-09 10:16:56 +0000194 DRM_ERROR("%s initialization failed "
195 "ctl %08x head %08x tail %08x start %08x\n",
196 ring->name,
197 I915_READ_CTL(ring),
198 I915_READ_HEAD(ring),
199 I915_READ_TAIL(ring),
200 I915_READ_START(ring));
201 return -EIO;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800202 }
203
Chris Wilson78501ea2010-10-27 12:18:21 +0100204 if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
205 i915_kernel_lost_context(ring->dev);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800206 else {
Daniel Vetter570ef602010-08-02 17:06:23 +0200207 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
Daniel Vetter870e86d2010-08-02 16:29:44 +0200208 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800209 ring->space = ring->head - (ring->tail + 8);
210 if (ring->space < 0)
211 ring->space += ring->size;
212 }
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000213
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800214 return 0;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700215}
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800216
Chris Wilsonc6df5412010-12-15 09:56:50 +0000217/*
218 * 965+ support PIPE_CONTROL commands, which provide finer grained control
219 * over cache flushing.
220 */
221struct pipe_control {
222 struct drm_i915_gem_object *obj;
223 volatile u32 *cpu_page;
224 u32 gtt_offset;
225};
226
227static int
228init_pipe_control(struct intel_ring_buffer *ring)
229{
230 struct pipe_control *pc;
231 struct drm_i915_gem_object *obj;
232 int ret;
233
234 if (ring->private)
235 return 0;
236
237 pc = kmalloc(sizeof(*pc), GFP_KERNEL);
238 if (!pc)
239 return -ENOMEM;
240
241 obj = i915_gem_alloc_object(ring->dev, 4096);
242 if (obj == NULL) {
243 DRM_ERROR("Failed to allocate seqno page\n");
244 ret = -ENOMEM;
245 goto err;
246 }
247 obj->agp_type = AGP_USER_CACHED_MEMORY;
248
249 ret = i915_gem_object_pin(obj, 4096, true);
250 if (ret)
251 goto err_unref;
252
253 pc->gtt_offset = obj->gtt_offset;
254 pc->cpu_page = kmap(obj->pages[0]);
255 if (pc->cpu_page == NULL)
256 goto err_unpin;
257
258 pc->obj = obj;
259 ring->private = pc;
260 return 0;
261
262err_unpin:
263 i915_gem_object_unpin(obj);
264err_unref:
265 drm_gem_object_unreference(&obj->base);
266err:
267 kfree(pc);
268 return ret;
269}
270
271static void
272cleanup_pipe_control(struct intel_ring_buffer *ring)
273{
274 struct pipe_control *pc = ring->private;
275 struct drm_i915_gem_object *obj;
276
277 if (!ring->private)
278 return;
279
280 obj = pc->obj;
281 kunmap(obj->pages[0]);
282 i915_gem_object_unpin(obj);
283 drm_gem_object_unreference(&obj->base);
284
285 kfree(pc);
286 ring->private = NULL;
287}
288
Chris Wilson78501ea2010-10-27 12:18:21 +0100289static int init_render_ring(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800290{
Chris Wilson78501ea2010-10-27 12:18:21 +0100291 struct drm_device *dev = ring->dev;
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000292 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson78501ea2010-10-27 12:18:21 +0100293 int ret = init_ring_common(ring);
Zhenyu Wanga69ffdb2010-08-30 16:12:42 +0800294
Chris Wilsona6c45cf2010-09-17 00:32:17 +0100295 if (INTEL_INFO(dev)->gen > 3) {
Chris Wilson78501ea2010-10-27 12:18:21 +0100296 int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
Zhenyu Wanga69ffdb2010-08-30 16:12:42 +0800297 if (IS_GEN6(dev))
298 mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
299 I915_WRITE(MI_MODE, mode);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800300 }
Chris Wilson78501ea2010-10-27 12:18:21 +0100301
Chris Wilsonc6df5412010-12-15 09:56:50 +0000302 if (INTEL_INFO(dev)->gen >= 6) {
303 } else if (IS_GEN5(dev)) {
304 ret = init_pipe_control(ring);
305 if (ret)
306 return ret;
307 }
308
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800309 return ret;
310}
311
Chris Wilsonc6df5412010-12-15 09:56:50 +0000312static void render_ring_cleanup(struct intel_ring_buffer *ring)
313{
314 if (!ring->private)
315 return;
316
317 cleanup_pipe_control(ring);
318}
319
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000320static void
321update_semaphore(struct intel_ring_buffer *ring, int i, u32 seqno)
322{
323 struct drm_device *dev = ring->dev;
324 struct drm_i915_private *dev_priv = dev->dev_private;
325 int id;
326
327 /*
328 * cs -> 1 = vcs, 0 = bcs
329 * vcs -> 1 = bcs, 0 = cs,
330 * bcs -> 1 = cs, 0 = vcs.
331 */
332 id = ring - dev_priv->ring;
333 id += 2 - i;
334 id %= 3;
335
336 intel_ring_emit(ring,
337 MI_SEMAPHORE_MBOX |
338 MI_SEMAPHORE_REGISTER |
339 MI_SEMAPHORE_UPDATE);
340 intel_ring_emit(ring, seqno);
341 intel_ring_emit(ring,
342 RING_SYNC_0(dev_priv->ring[id].mmio_base) + 4*i);
343}
344
345static int
346gen6_add_request(struct intel_ring_buffer *ring,
347 u32 *result)
348{
349 u32 seqno;
350 int ret;
351
352 ret = intel_ring_begin(ring, 10);
353 if (ret)
354 return ret;
355
356 seqno = i915_gem_get_seqno(ring->dev);
357 update_semaphore(ring, 0, seqno);
358 update_semaphore(ring, 1, seqno);
359
360 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
361 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
362 intel_ring_emit(ring, seqno);
363 intel_ring_emit(ring, MI_USER_INTERRUPT);
364 intel_ring_advance(ring);
365
366 *result = seqno;
367 return 0;
368}
369
370int
371intel_ring_sync(struct intel_ring_buffer *ring,
372 struct intel_ring_buffer *to,
373 u32 seqno)
374{
375 int ret;
376
377 ret = intel_ring_begin(ring, 4);
378 if (ret)
379 return ret;
380
381 intel_ring_emit(ring,
382 MI_SEMAPHORE_MBOX |
383 MI_SEMAPHORE_REGISTER |
384 intel_ring_sync_index(ring, to) << 17 |
385 MI_SEMAPHORE_COMPARE);
386 intel_ring_emit(ring, seqno);
387 intel_ring_emit(ring, 0);
388 intel_ring_emit(ring, MI_NOOP);
389 intel_ring_advance(ring);
390
391 return 0;
392}
393
Chris Wilsonc6df5412010-12-15 09:56:50 +0000394#define PIPE_CONTROL_FLUSH(ring__, addr__) \
395do { \
396 intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
397 PIPE_CONTROL_DEPTH_STALL | 2); \
398 intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \
399 intel_ring_emit(ring__, 0); \
400 intel_ring_emit(ring__, 0); \
401} while (0)
402
403static int
404pc_render_add_request(struct intel_ring_buffer *ring,
405 u32 *result)
406{
407 struct drm_device *dev = ring->dev;
408 u32 seqno = i915_gem_get_seqno(dev);
409 struct pipe_control *pc = ring->private;
410 u32 scratch_addr = pc->gtt_offset + 128;
411 int ret;
412
413 /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
414 * incoherent with writes to memory, i.e. completely fubar,
415 * so we need to use PIPE_NOTIFY instead.
416 *
417 * However, we also need to workaround the qword write
418 * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
419 * memory before requesting an interrupt.
420 */
421 ret = intel_ring_begin(ring, 32);
422 if (ret)
423 return ret;
424
425 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
426 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
427 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
428 intel_ring_emit(ring, seqno);
429 intel_ring_emit(ring, 0);
430 PIPE_CONTROL_FLUSH(ring, scratch_addr);
431 scratch_addr += 128; /* write to separate cachelines */
432 PIPE_CONTROL_FLUSH(ring, scratch_addr);
433 scratch_addr += 128;
434 PIPE_CONTROL_FLUSH(ring, scratch_addr);
435 scratch_addr += 128;
436 PIPE_CONTROL_FLUSH(ring, scratch_addr);
437 scratch_addr += 128;
438 PIPE_CONTROL_FLUSH(ring, scratch_addr);
439 scratch_addr += 128;
440 PIPE_CONTROL_FLUSH(ring, scratch_addr);
441 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
442 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
443 PIPE_CONTROL_NOTIFY);
444 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
445 intel_ring_emit(ring, seqno);
446 intel_ring_emit(ring, 0);
447 intel_ring_advance(ring);
448
449 *result = seqno;
450 return 0;
451}
452
Chris Wilson3cce4692010-10-27 16:11:02 +0100453static int
Chris Wilson78501ea2010-10-27 12:18:21 +0100454render_ring_add_request(struct intel_ring_buffer *ring,
Chris Wilson3cce4692010-10-27 16:11:02 +0100455 u32 *result)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700456{
Chris Wilson78501ea2010-10-27 12:18:21 +0100457 struct drm_device *dev = ring->dev;
Chris Wilson3cce4692010-10-27 16:11:02 +0100458 u32 seqno = i915_gem_get_seqno(dev);
459 int ret;
Zhenyu Wangca764822010-05-27 10:26:42 +0800460
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000461 ret = intel_ring_begin(ring, 4);
462 if (ret)
463 return ret;
Chris Wilson3cce4692010-10-27 16:11:02 +0100464
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000465 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
466 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
467 intel_ring_emit(ring, seqno);
468 intel_ring_emit(ring, MI_USER_INTERRUPT);
Chris Wilson3cce4692010-10-27 16:11:02 +0100469 intel_ring_advance(ring);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000470
Chris Wilson3cce4692010-10-27 16:11:02 +0100471 *result = seqno;
472 return 0;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700473}
474
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800475static u32
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000476ring_get_seqno(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800477{
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000478 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
479}
480
Chris Wilsonc6df5412010-12-15 09:56:50 +0000481static u32
482pc_render_get_seqno(struct intel_ring_buffer *ring)
483{
484 struct pipe_control *pc = ring->private;
485 return pc->cpu_page[0];
486}
487
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000488static bool
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000489render_ring_get_irq(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700490{
Chris Wilson78501ea2010-10-27 12:18:21 +0100491 struct drm_device *dev = ring->dev;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700492
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000493 if (!dev->irq_enabled)
494 return false;
495
496 if (atomic_inc_return(&ring->irq_refcount) == 1) {
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000497 drm_i915_private_t *dev_priv = dev->dev_private;
498 unsigned long irqflags;
499
500 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700501 if (HAS_PCH_SPLIT(dev))
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000502 ironlake_enable_graphics_irq(dev_priv,
Chris Wilsonc6df5412010-12-15 09:56:50 +0000503 GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700504 else
505 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000506 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700507 }
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000508
509 return true;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700510}
511
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800512static void
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000513render_ring_put_irq(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700514{
Chris Wilson78501ea2010-10-27 12:18:21 +0100515 struct drm_device *dev = ring->dev;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700516
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000517 if (atomic_dec_and_test(&ring->irq_refcount)) {
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000518 drm_i915_private_t *dev_priv = dev->dev_private;
519 unsigned long irqflags;
520
521 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700522 if (HAS_PCH_SPLIT(dev))
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000523 ironlake_disable_graphics_irq(dev_priv,
Chris Wilsonc6df5412010-12-15 09:56:50 +0000524 GT_USER_INTERRUPT |
525 GT_PIPE_NOTIFY);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700526 else
527 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000528 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700529 }
Eric Anholt62fdfea2010-05-21 13:26:39 -0700530}
531
Chris Wilson78501ea2010-10-27 12:18:21 +0100532void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800533{
Chris Wilson78501ea2010-10-27 12:18:21 +0100534 drm_i915_private_t *dev_priv = ring->dev->dev_private;
535 u32 mmio = IS_GEN6(ring->dev) ?
536 RING_HWS_PGA_GEN6(ring->mmio_base) :
537 RING_HWS_PGA(ring->mmio_base);
538 I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
539 POSTING_READ(mmio);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800540}
541
Chris Wilsonb72f3ac2011-01-04 17:34:02 +0000542static int
Chris Wilson78501ea2010-10-27 12:18:21 +0100543bsd_ring_flush(struct intel_ring_buffer *ring,
544 u32 invalidate_domains,
545 u32 flush_domains)
Zou Nan haid1b851f2010-05-21 09:08:57 +0800546{
Chris Wilsonb72f3ac2011-01-04 17:34:02 +0000547 int ret;
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000548
Chris Wilsonb72f3ac2011-01-04 17:34:02 +0000549 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
550 return 0;
551
552 ret = intel_ring_begin(ring, 2);
553 if (ret)
554 return ret;
555
556 intel_ring_emit(ring, MI_FLUSH);
557 intel_ring_emit(ring, MI_NOOP);
558 intel_ring_advance(ring);
559 return 0;
Zou Nan haid1b851f2010-05-21 09:08:57 +0800560}
561
Chris Wilson3cce4692010-10-27 16:11:02 +0100562static int
Chris Wilson78501ea2010-10-27 12:18:21 +0100563ring_add_request(struct intel_ring_buffer *ring,
Chris Wilson3cce4692010-10-27 16:11:02 +0100564 u32 *result)
Zou Nan haid1b851f2010-05-21 09:08:57 +0800565{
566 u32 seqno;
Chris Wilson3cce4692010-10-27 16:11:02 +0100567 int ret;
568
569 ret = intel_ring_begin(ring, 4);
570 if (ret)
571 return ret;
Chris Wilson6f392d52010-08-07 11:01:22 +0100572
Chris Wilson78501ea2010-10-27 12:18:21 +0100573 seqno = i915_gem_get_seqno(ring->dev);
Chris Wilson6f392d52010-08-07 11:01:22 +0100574
Chris Wilson3cce4692010-10-27 16:11:02 +0100575 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
576 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
577 intel_ring_emit(ring, seqno);
578 intel_ring_emit(ring, MI_USER_INTERRUPT);
579 intel_ring_advance(ring);
Zou Nan haid1b851f2010-05-21 09:08:57 +0800580
581 DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
Chris Wilson3cce4692010-10-27 16:11:02 +0100582 *result = seqno;
583 return 0;
Zou Nan haid1b851f2010-05-21 09:08:57 +0800584}
585
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000586static bool
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000587ring_get_irq(struct intel_ring_buffer *ring, u32 flag)
Zou Nan haid1b851f2010-05-21 09:08:57 +0800588{
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000589 struct drm_device *dev = ring->dev;
590
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000591 if (!dev->irq_enabled)
592 return false;
593
594 if (atomic_inc_return(&ring->irq_refcount) == 1) {
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000595 drm_i915_private_t *dev_priv = dev->dev_private;
596 unsigned long irqflags;
597
598 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
599 ironlake_enable_graphics_irq(dev_priv, flag);
600 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
601 }
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000602
603 return true;
Zou Nan haid1b851f2010-05-21 09:08:57 +0800604}
605
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000606static void
607ring_put_irq(struct intel_ring_buffer *ring, u32 flag)
Zou Nan haid1b851f2010-05-21 09:08:57 +0800608{
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000609 struct drm_device *dev = ring->dev;
610
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000611 if (atomic_dec_and_test(&ring->irq_refcount)) {
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000612 drm_i915_private_t *dev_priv = dev->dev_private;
613 unsigned long irqflags;
614
615 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
616 ironlake_disable_graphics_irq(dev_priv, flag);
617 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
618 }
619}
620
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000621static bool
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000622bsd_ring_get_irq(struct intel_ring_buffer *ring)
623{
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000624 return ring_get_irq(ring, GT_BSD_USER_INTERRUPT);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000625}
626static void
627bsd_ring_put_irq(struct intel_ring_buffer *ring)
628{
Chris Wilsonb13c2b92010-12-13 16:54:50 +0000629 ring_put_irq(ring, GT_BSD_USER_INTERRUPT);
Zou Nan haid1b851f2010-05-21 09:08:57 +0800630}
631
632static int
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000633ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
Zou Nan haid1b851f2010-05-21 09:08:57 +0800634{
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100635 int ret;
Chris Wilson78501ea2010-10-27 12:18:21 +0100636
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100637 ret = intel_ring_begin(ring, 2);
638 if (ret)
639 return ret;
640
Chris Wilson78501ea2010-10-27 12:18:21 +0100641 intel_ring_emit(ring,
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000642 MI_BATCH_BUFFER_START | (2 << 6) |
Chris Wilson78501ea2010-10-27 12:18:21 +0100643 MI_BATCH_NON_SECURE_I965);
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000644 intel_ring_emit(ring, offset);
Chris Wilson78501ea2010-10-27 12:18:21 +0100645 intel_ring_advance(ring);
646
Zou Nan haid1b851f2010-05-21 09:08:57 +0800647 return 0;
648}
649
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800650static int
Chris Wilson78501ea2010-10-27 12:18:21 +0100651render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000652 u32 offset, u32 len)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700653{
Chris Wilson78501ea2010-10-27 12:18:21 +0100654 struct drm_device *dev = ring->dev;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700655 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000656 int ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700657
Chris Wilson6f392d52010-08-07 11:01:22 +0100658 trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700659
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000660 if (IS_I830(dev) || IS_845G(dev)) {
661 ret = intel_ring_begin(ring, 4);
662 if (ret)
663 return ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700664
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000665 intel_ring_emit(ring, MI_BATCH_BUFFER);
666 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
667 intel_ring_emit(ring, offset + len - 8);
668 intel_ring_emit(ring, 0);
669 } else {
670 ret = intel_ring_begin(ring, 2);
671 if (ret)
672 return ret;
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100673
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000674 if (INTEL_INFO(dev)->gen >= 4) {
675 intel_ring_emit(ring,
676 MI_BATCH_BUFFER_START | (2 << 6) |
677 MI_BATCH_NON_SECURE_I965);
678 intel_ring_emit(ring, offset);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700679 } else {
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000680 intel_ring_emit(ring,
681 MI_BATCH_BUFFER_START | (2 << 6));
682 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700683 }
684 }
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000685 intel_ring_advance(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700686
Eric Anholt62fdfea2010-05-21 13:26:39 -0700687 return 0;
688}
689
Chris Wilson78501ea2010-10-27 12:18:21 +0100690static void cleanup_status_page(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700691{
Chris Wilson78501ea2010-10-27 12:18:21 +0100692 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +0000693 struct drm_i915_gem_object *obj;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700694
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800695 obj = ring->status_page.obj;
696 if (obj == NULL)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700697 return;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700698
Chris Wilson05394f32010-11-08 19:18:58 +0000699 kunmap(obj->pages[0]);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700700 i915_gem_object_unpin(obj);
Chris Wilson05394f32010-11-08 19:18:58 +0000701 drm_gem_object_unreference(&obj->base);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800702 ring->status_page.obj = NULL;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700703
704 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
Eric Anholt62fdfea2010-05-21 13:26:39 -0700705}
706
Chris Wilson78501ea2010-10-27 12:18:21 +0100707static int init_status_page(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700708{
Chris Wilson78501ea2010-10-27 12:18:21 +0100709 struct drm_device *dev = ring->dev;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700710 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +0000711 struct drm_i915_gem_object *obj;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700712 int ret;
713
Eric Anholt62fdfea2010-05-21 13:26:39 -0700714 obj = i915_gem_alloc_object(dev, 4096);
715 if (obj == NULL) {
716 DRM_ERROR("Failed to allocate status page\n");
717 ret = -ENOMEM;
718 goto err;
719 }
Chris Wilson05394f32010-11-08 19:18:58 +0000720 obj->agp_type = AGP_USER_CACHED_MEMORY;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700721
Daniel Vetter75e9e912010-11-04 17:11:09 +0100722 ret = i915_gem_object_pin(obj, 4096, true);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700723 if (ret != 0) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700724 goto err_unref;
725 }
726
Chris Wilson05394f32010-11-08 19:18:58 +0000727 ring->status_page.gfx_addr = obj->gtt_offset;
728 ring->status_page.page_addr = kmap(obj->pages[0]);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800729 if (ring->status_page.page_addr == NULL) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700730 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
Eric Anholt62fdfea2010-05-21 13:26:39 -0700731 goto err_unpin;
732 }
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800733 ring->status_page.obj = obj;
734 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700735
Chris Wilson78501ea2010-10-27 12:18:21 +0100736 intel_ring_setup_status_page(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800737 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
738 ring->name, ring->status_page.gfx_addr);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700739
740 return 0;
741
742err_unpin:
743 i915_gem_object_unpin(obj);
744err_unref:
Chris Wilson05394f32010-11-08 19:18:58 +0000745 drm_gem_object_unreference(&obj->base);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700746err:
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800747 return ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700748}
749
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800750int intel_init_ring_buffer(struct drm_device *dev,
Chris Wilsonab6f8e32010-09-19 17:53:44 +0100751 struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700752{
Chris Wilson05394f32010-11-08 19:18:58 +0000753 struct drm_i915_gem_object *obj;
Chris Wilsondd785e32010-08-07 11:01:34 +0100754 int ret;
755
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800756 ring->dev = dev;
Chris Wilson23bc5982010-09-29 16:10:57 +0100757 INIT_LIST_HEAD(&ring->active_list);
758 INIT_LIST_HEAD(&ring->request_list);
Chris Wilson64193402010-10-24 12:38:05 +0100759 INIT_LIST_HEAD(&ring->gpu_write_list);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700760
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800761 if (I915_NEED_GFX_HWS(dev)) {
Chris Wilson78501ea2010-10-27 12:18:21 +0100762 ret = init_status_page(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800763 if (ret)
764 return ret;
765 }
Eric Anholt62fdfea2010-05-21 13:26:39 -0700766
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800767 obj = i915_gem_alloc_object(dev, ring->size);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700768 if (obj == NULL) {
769 DRM_ERROR("Failed to allocate ringbuffer\n");
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800770 ret = -ENOMEM;
Chris Wilsondd785e32010-08-07 11:01:34 +0100771 goto err_hws;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700772 }
Eric Anholt62fdfea2010-05-21 13:26:39 -0700773
Chris Wilson05394f32010-11-08 19:18:58 +0000774 ring->obj = obj;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800775
Daniel Vetter75e9e912010-11-04 17:11:09 +0100776 ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
Chris Wilsondd785e32010-08-07 11:01:34 +0100777 if (ret)
778 goto err_unref;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700779
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800780 ring->map.size = ring->size;
Chris Wilson05394f32010-11-08 19:18:58 +0000781 ring->map.offset = dev->agp->base + obj->gtt_offset;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700782 ring->map.type = 0;
783 ring->map.flags = 0;
784 ring->map.mtrr = 0;
785
786 drm_core_ioremap_wc(&ring->map, dev);
787 if (ring->map.handle == NULL) {
788 DRM_ERROR("Failed to map ringbuffer.\n");
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800789 ret = -EINVAL;
Chris Wilsondd785e32010-08-07 11:01:34 +0100790 goto err_unpin;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700791 }
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800792
Eric Anholt62fdfea2010-05-21 13:26:39 -0700793 ring->virtual_start = ring->map.handle;
Chris Wilson78501ea2010-10-27 12:18:21 +0100794 ret = ring->init(ring);
Chris Wilsondd785e32010-08-07 11:01:34 +0100795 if (ret)
796 goto err_unmap;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700797
Chris Wilson55249ba2010-12-22 14:04:47 +0000798 /* Workaround an erratum on the i830 which causes a hang if
799 * the TAIL pointer points to within the last 2 cachelines
800 * of the buffer.
801 */
802 ring->effective_size = ring->size;
803 if (IS_I830(ring->dev))
804 ring->effective_size -= 128;
805
Chris Wilsonc584fe42010-10-29 18:15:52 +0100806 return 0;
Chris Wilsondd785e32010-08-07 11:01:34 +0100807
808err_unmap:
809 drm_core_ioremapfree(&ring->map, dev);
810err_unpin:
811 i915_gem_object_unpin(obj);
812err_unref:
Chris Wilson05394f32010-11-08 19:18:58 +0000813 drm_gem_object_unreference(&obj->base);
814 ring->obj = NULL;
Chris Wilsondd785e32010-08-07 11:01:34 +0100815err_hws:
Chris Wilson78501ea2010-10-27 12:18:21 +0100816 cleanup_status_page(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800817 return ret;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700818}
819
Chris Wilson78501ea2010-10-27 12:18:21 +0100820void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700821{
Chris Wilson33626e62010-10-29 16:18:36 +0100822 struct drm_i915_private *dev_priv;
823 int ret;
824
Chris Wilson05394f32010-11-08 19:18:58 +0000825 if (ring->obj == NULL)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700826 return;
827
Chris Wilson33626e62010-10-29 16:18:36 +0100828 /* Disable the ring buffer. The ring must be idle at this point */
829 dev_priv = ring->dev->dev_private;
830 ret = intel_wait_ring_buffer(ring, ring->size - 8);
831 I915_WRITE_CTL(ring, 0);
832
Chris Wilson78501ea2010-10-27 12:18:21 +0100833 drm_core_ioremapfree(&ring->map, ring->dev);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700834
Chris Wilson05394f32010-11-08 19:18:58 +0000835 i915_gem_object_unpin(ring->obj);
836 drm_gem_object_unreference(&ring->obj->base);
837 ring->obj = NULL;
Chris Wilson78501ea2010-10-27 12:18:21 +0100838
Zou Nan hai8d192152010-11-02 16:31:01 +0800839 if (ring->cleanup)
840 ring->cleanup(ring);
841
Chris Wilson78501ea2010-10-27 12:18:21 +0100842 cleanup_status_page(ring);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700843}
844
Chris Wilson78501ea2010-10-27 12:18:21 +0100845static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700846{
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800847 unsigned int *virt;
Chris Wilson55249ba2010-12-22 14:04:47 +0000848 int rem = ring->size - ring->tail;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700849
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800850 if (ring->space < rem) {
Chris Wilson78501ea2010-10-27 12:18:21 +0100851 int ret = intel_wait_ring_buffer(ring, rem);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700852 if (ret)
853 return ret;
854 }
Eric Anholt62fdfea2010-05-21 13:26:39 -0700855
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800856 virt = (unsigned int *)(ring->virtual_start + ring->tail);
Chris Wilson1741dd42010-08-04 15:18:12 +0100857 rem /= 8;
858 while (rem--) {
Eric Anholt62fdfea2010-05-21 13:26:39 -0700859 *virt++ = MI_NOOP;
Chris Wilson1741dd42010-08-04 15:18:12 +0100860 *virt++ = MI_NOOP;
861 }
Eric Anholt62fdfea2010-05-21 13:26:39 -0700862
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800863 ring->tail = 0;
Chris Wilson43ed3402010-07-01 17:53:00 +0100864 ring->space = ring->head - 8;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700865
866 return 0;
867}
868
Chris Wilson78501ea2010-10-27 12:18:21 +0100869int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700870{
Chris Wilson78501ea2010-10-27 12:18:21 +0100871 struct drm_device *dev = ring->dev;
Zou Nan haicae58522010-11-09 17:17:32 +0800872 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson78501ea2010-10-27 12:18:21 +0100873 unsigned long end;
Chris Wilson6aa56062010-10-29 21:44:37 +0100874 u32 head;
875
Eric Anholt62fdfea2010-05-21 13:26:39 -0700876 trace_i915_ring_wait_begin (dev);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800877 end = jiffies + 3 * HZ;
878 do {
Chris Wilson8c0a6bf2010-12-09 12:56:37 +0000879 /* If the reported head position has wrapped or hasn't advanced,
880 * fallback to the slow and accurate path.
881 */
882 head = intel_read_status_page(ring, 4);
883 if (head < ring->actual_head)
884 head = I915_READ_HEAD(ring);
885 ring->actual_head = head;
886 ring->head = head & HEAD_ADDR;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700887 ring->space = ring->head - (ring->tail + 8);
888 if (ring->space < 0)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800889 ring->space += ring->size;
Eric Anholt62fdfea2010-05-21 13:26:39 -0700890 if (ring->space >= n) {
Chris Wilson78501ea2010-10-27 12:18:21 +0100891 trace_i915_ring_wait_end(dev);
Eric Anholt62fdfea2010-05-21 13:26:39 -0700892 return 0;
893 }
894
895 if (dev->primary->master) {
896 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
897 if (master_priv->sarea_priv)
898 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
899 }
Zou Nan haid1b851f2010-05-21 09:08:57 +0800900
Chris Wilsone60a0b12010-10-13 10:09:14 +0100901 msleep(1);
Chris Wilsonf4e0b292010-10-29 21:06:16 +0100902 if (atomic_read(&dev_priv->mm.wedged))
903 return -EAGAIN;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800904 } while (!time_after(jiffies, end));
Eric Anholt62fdfea2010-05-21 13:26:39 -0700905 trace_i915_ring_wait_end (dev);
906 return -EBUSY;
907}
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800908
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100909int intel_ring_begin(struct intel_ring_buffer *ring,
910 int num_dwords)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800911{
Zou Nan haibe26a102010-06-12 17:40:24 +0800912 int n = 4*num_dwords;
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100913 int ret;
Chris Wilson78501ea2010-10-27 12:18:21 +0100914
Chris Wilson55249ba2010-12-22 14:04:47 +0000915 if (unlikely(ring->tail + n > ring->effective_size)) {
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100916 ret = intel_wrap_ring_buffer(ring);
917 if (unlikely(ret))
918 return ret;
919 }
Chris Wilson78501ea2010-10-27 12:18:21 +0100920
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100921 if (unlikely(ring->space < n)) {
922 ret = intel_wait_ring_buffer(ring, n);
923 if (unlikely(ret))
924 return ret;
925 }
Chris Wilsond97ed332010-08-04 15:18:13 +0100926
927 ring->space -= n;
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100928 return 0;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800929}
930
Chris Wilson78501ea2010-10-27 12:18:21 +0100931void intel_ring_advance(struct intel_ring_buffer *ring)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800932{
Chris Wilsond97ed332010-08-04 15:18:13 +0100933 ring->tail &= ring->size - 1;
Chris Wilson78501ea2010-10-27 12:18:21 +0100934 ring->write_tail(ring, ring->tail);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800935}
936
Chris Wilsone0708682010-09-19 14:46:27 +0100937static const struct intel_ring_buffer render_ring = {
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800938 .name = "render ring",
Chris Wilson92204342010-09-18 11:02:01 +0100939 .id = RING_RENDER,
Daniel Vetter333e9fe2010-08-02 16:24:01 +0200940 .mmio_base = RENDER_RING_BASE,
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800941 .size = 32 * PAGE_SIZE,
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800942 .init = init_render_ring,
Chris Wilson297b0c52010-10-22 17:02:41 +0100943 .write_tail = ring_write_tail,
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800944 .flush = render_ring_flush,
945 .add_request = render_ring_add_request,
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000946 .get_seqno = ring_get_seqno,
947 .irq_get = render_ring_get_irq,
948 .irq_put = render_ring_put_irq,
Chris Wilson78501ea2010-10-27 12:18:21 +0100949 .dispatch_execbuffer = render_ring_dispatch_execbuffer,
Chris Wilsonc6df5412010-12-15 09:56:50 +0000950 .cleanup = render_ring_cleanup,
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800951};
Zou Nan haid1b851f2010-05-21 09:08:57 +0800952
953/* ring buffer for bit-stream decoder */
954
Chris Wilsone0708682010-09-19 14:46:27 +0100955static const struct intel_ring_buffer bsd_ring = {
Zou Nan haid1b851f2010-05-21 09:08:57 +0800956 .name = "bsd ring",
Chris Wilson92204342010-09-18 11:02:01 +0100957 .id = RING_BSD,
Daniel Vetter333e9fe2010-08-02 16:24:01 +0200958 .mmio_base = BSD_RING_BASE,
Zou Nan haid1b851f2010-05-21 09:08:57 +0800959 .size = 32 * PAGE_SIZE,
Chris Wilson78501ea2010-10-27 12:18:21 +0100960 .init = init_ring_common,
Chris Wilson297b0c52010-10-22 17:02:41 +0100961 .write_tail = ring_write_tail,
Zou Nan haid1b851f2010-05-21 09:08:57 +0800962 .flush = bsd_ring_flush,
Chris Wilson549f7362010-10-19 11:19:32 +0100963 .add_request = ring_add_request,
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000964 .get_seqno = ring_get_seqno,
965 .irq_get = bsd_ring_get_irq,
966 .irq_put = bsd_ring_put_irq,
Chris Wilson78501ea2010-10-27 12:18:21 +0100967 .dispatch_execbuffer = ring_dispatch_execbuffer,
Zou Nan haid1b851f2010-05-21 09:08:57 +0800968};
Xiang, Haihao5c1143b2010-09-16 10:43:11 +0800969
Xiang, Haihao881f47b2010-09-19 14:40:43 +0100970
Chris Wilson78501ea2010-10-27 12:18:21 +0100971static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
Chris Wilson297b0c52010-10-22 17:02:41 +0100972 u32 value)
Xiang, Haihao881f47b2010-09-19 14:40:43 +0100973{
Chris Wilson78501ea2010-10-27 12:18:21 +0100974 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Xiang, Haihao881f47b2010-09-19 14:40:43 +0100975
976 /* Every tail move must follow the sequence below */
977 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
978 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
979 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
980 I915_WRITE(GEN6_BSD_RNCID, 0x0);
981
982 if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
983 GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0,
984 50))
985 DRM_ERROR("timed out waiting for IDLE Indicator\n");
986
Daniel Vetter870e86d2010-08-02 16:29:44 +0200987 I915_WRITE_TAIL(ring, value);
Xiang, Haihao881f47b2010-09-19 14:40:43 +0100988 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
989 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
990 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
991}
992
Chris Wilsonb72f3ac2011-01-04 17:34:02 +0000993static int gen6_ring_flush(struct intel_ring_buffer *ring,
994 u32 invalidate_domains,
995 u32 flush_domains)
Xiang, Haihao881f47b2010-09-19 14:40:43 +0100996{
Chris Wilsonb72f3ac2011-01-04 17:34:02 +0000997 int ret;
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000998
Chris Wilsonb72f3ac2011-01-04 17:34:02 +0000999 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
1000 return 0;
1001
1002 ret = intel_ring_begin(ring, 4);
1003 if (ret)
1004 return ret;
1005
1006 intel_ring_emit(ring, MI_FLUSH_DW);
1007 intel_ring_emit(ring, 0);
1008 intel_ring_emit(ring, 0);
1009 intel_ring_emit(ring, 0);
1010 intel_ring_advance(ring);
1011 return 0;
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001012}
1013
1014static int
Chris Wilson78501ea2010-10-27 12:18:21 +01001015gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
Chris Wilsonc4e7a412010-11-30 14:10:25 +00001016 u32 offset, u32 len)
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001017{
Chris Wilsone1f99ce2010-10-27 12:45:26 +01001018 int ret;
Chris Wilsonab6f8e32010-09-19 17:53:44 +01001019
Chris Wilsone1f99ce2010-10-27 12:45:26 +01001020 ret = intel_ring_begin(ring, 2);
1021 if (ret)
1022 return ret;
1023
Chris Wilson78501ea2010-10-27 12:18:21 +01001024 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
Chris Wilsonab6f8e32010-09-19 17:53:44 +01001025 /* bit0-7 is the length on GEN6+ */
Chris Wilsonc4e7a412010-11-30 14:10:25 +00001026 intel_ring_emit(ring, offset);
Chris Wilson78501ea2010-10-27 12:18:21 +01001027 intel_ring_advance(ring);
Chris Wilsonab6f8e32010-09-19 17:53:44 +01001028
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001029 return 0;
1030}
1031
Chris Wilsonb13c2b92010-12-13 16:54:50 +00001032static bool
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001033gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
1034{
Chris Wilsonb13c2b92010-12-13 16:54:50 +00001035 return ring_get_irq(ring, GT_GEN6_BSD_USER_INTERRUPT);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001036}
1037
1038static void
1039gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
1040{
Chris Wilsonb13c2b92010-12-13 16:54:50 +00001041 ring_put_irq(ring, GT_GEN6_BSD_USER_INTERRUPT);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001042}
1043
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001044/* ring buffer for Video Codec for Gen6+ */
Chris Wilsone0708682010-09-19 14:46:27 +01001045static const struct intel_ring_buffer gen6_bsd_ring = {
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001046 .name = "gen6 bsd ring",
1047 .id = RING_BSD,
1048 .mmio_base = GEN6_BSD_RING_BASE,
1049 .size = 32 * PAGE_SIZE,
1050 .init = init_ring_common,
1051 .write_tail = gen6_bsd_ring_write_tail,
1052 .flush = gen6_ring_flush,
1053 .add_request = gen6_add_request,
1054 .get_seqno = ring_get_seqno,
1055 .irq_get = gen6_bsd_ring_get_irq,
1056 .irq_put = gen6_bsd_ring_put_irq,
1057 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
Chris Wilson549f7362010-10-19 11:19:32 +01001058};
1059
1060/* Blitter support (SandyBridge+) */
1061
Chris Wilsonb13c2b92010-12-13 16:54:50 +00001062static bool
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001063blt_ring_get_irq(struct intel_ring_buffer *ring)
Chris Wilson549f7362010-10-19 11:19:32 +01001064{
Chris Wilsonb13c2b92010-12-13 16:54:50 +00001065 return ring_get_irq(ring, GT_BLT_USER_INTERRUPT);
Chris Wilson549f7362010-10-19 11:19:32 +01001066}
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001067
Chris Wilson549f7362010-10-19 11:19:32 +01001068static void
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001069blt_ring_put_irq(struct intel_ring_buffer *ring)
Chris Wilson549f7362010-10-19 11:19:32 +01001070{
Chris Wilsonb13c2b92010-12-13 16:54:50 +00001071 ring_put_irq(ring, GT_BLT_USER_INTERRUPT);
Chris Wilson549f7362010-10-19 11:19:32 +01001072}
1073
Zou Nan hai8d192152010-11-02 16:31:01 +08001074
1075/* Workaround for some stepping of SNB,
1076 * each time when BLT engine ring tail moved,
1077 * the first command in the ring to be parsed
1078 * should be MI_BATCH_BUFFER_START
1079 */
1080#define NEED_BLT_WORKAROUND(dev) \
1081 (IS_GEN6(dev) && (dev->pdev->revision < 8))
1082
1083static inline struct drm_i915_gem_object *
1084to_blt_workaround(struct intel_ring_buffer *ring)
1085{
1086 return ring->private;
1087}
1088
1089static int blt_ring_init(struct intel_ring_buffer *ring)
1090{
1091 if (NEED_BLT_WORKAROUND(ring->dev)) {
1092 struct drm_i915_gem_object *obj;
Chris Wilson27153f72010-11-02 11:17:23 +00001093 u32 *ptr;
Zou Nan hai8d192152010-11-02 16:31:01 +08001094 int ret;
1095
Chris Wilson05394f32010-11-08 19:18:58 +00001096 obj = i915_gem_alloc_object(ring->dev, 4096);
Zou Nan hai8d192152010-11-02 16:31:01 +08001097 if (obj == NULL)
1098 return -ENOMEM;
1099
Chris Wilson05394f32010-11-08 19:18:58 +00001100 ret = i915_gem_object_pin(obj, 4096, true);
Zou Nan hai8d192152010-11-02 16:31:01 +08001101 if (ret) {
1102 drm_gem_object_unreference(&obj->base);
1103 return ret;
1104 }
1105
1106 ptr = kmap(obj->pages[0]);
Chris Wilson27153f72010-11-02 11:17:23 +00001107 *ptr++ = MI_BATCH_BUFFER_END;
1108 *ptr++ = MI_NOOP;
Zou Nan hai8d192152010-11-02 16:31:01 +08001109 kunmap(obj->pages[0]);
1110
Chris Wilson05394f32010-11-08 19:18:58 +00001111 ret = i915_gem_object_set_to_gtt_domain(obj, false);
Zou Nan hai8d192152010-11-02 16:31:01 +08001112 if (ret) {
Chris Wilson05394f32010-11-08 19:18:58 +00001113 i915_gem_object_unpin(obj);
Zou Nan hai8d192152010-11-02 16:31:01 +08001114 drm_gem_object_unreference(&obj->base);
1115 return ret;
1116 }
1117
1118 ring->private = obj;
1119 }
1120
1121 return init_ring_common(ring);
1122}
1123
1124static int blt_ring_begin(struct intel_ring_buffer *ring,
1125 int num_dwords)
1126{
1127 if (ring->private) {
1128 int ret = intel_ring_begin(ring, num_dwords+2);
1129 if (ret)
1130 return ret;
1131
1132 intel_ring_emit(ring, MI_BATCH_BUFFER_START);
1133 intel_ring_emit(ring, to_blt_workaround(ring)->gtt_offset);
1134
1135 return 0;
1136 } else
1137 return intel_ring_begin(ring, 4);
1138}
1139
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001140static int blt_ring_flush(struct intel_ring_buffer *ring,
Zou Nan hai8d192152010-11-02 16:31:01 +08001141 u32 invalidate_domains,
1142 u32 flush_domains)
1143{
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001144 int ret;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001145
Chris Wilsonb72f3ac2011-01-04 17:34:02 +00001146 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
1147 return 0;
1148
1149 ret = blt_ring_begin(ring, 4);
1150 if (ret)
1151 return ret;
1152
1153 intel_ring_emit(ring, MI_FLUSH_DW);
1154 intel_ring_emit(ring, 0);
1155 intel_ring_emit(ring, 0);
1156 intel_ring_emit(ring, 0);
1157 intel_ring_advance(ring);
1158 return 0;
Zou Nan hai8d192152010-11-02 16:31:01 +08001159}
1160
Zou Nan hai8d192152010-11-02 16:31:01 +08001161static void blt_ring_cleanup(struct intel_ring_buffer *ring)
1162{
1163 if (!ring->private)
1164 return;
1165
1166 i915_gem_object_unpin(ring->private);
1167 drm_gem_object_unreference(ring->private);
1168 ring->private = NULL;
1169}
1170
Chris Wilson549f7362010-10-19 11:19:32 +01001171static const struct intel_ring_buffer gen6_blt_ring = {
1172 .name = "blt ring",
1173 .id = RING_BLT,
1174 .mmio_base = BLT_RING_BASE,
1175 .size = 32 * PAGE_SIZE,
Zou Nan hai8d192152010-11-02 16:31:01 +08001176 .init = blt_ring_init,
Chris Wilson297b0c52010-10-22 17:02:41 +01001177 .write_tail = ring_write_tail,
Zou Nan hai8d192152010-11-02 16:31:01 +08001178 .flush = blt_ring_flush,
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001179 .add_request = gen6_add_request,
1180 .get_seqno = ring_get_seqno,
1181 .irq_get = blt_ring_get_irq,
1182 .irq_put = blt_ring_put_irq,
Chris Wilson78501ea2010-10-27 12:18:21 +01001183 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
Zou Nan hai8d192152010-11-02 16:31:01 +08001184 .cleanup = blt_ring_cleanup,
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001185};
1186
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001187int intel_init_render_ring_buffer(struct drm_device *dev)
1188{
1189 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001190 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001191
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001192 *ring = render_ring;
1193 if (INTEL_INFO(dev)->gen >= 6) {
1194 ring->add_request = gen6_add_request;
Chris Wilsonc6df5412010-12-15 09:56:50 +00001195 } else if (IS_GEN5(dev)) {
1196 ring->add_request = pc_render_add_request;
1197 ring->get_seqno = pc_render_get_seqno;
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001198 }
1199
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001200 if (!I915_NEED_GFX_HWS(dev)) {
1201 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1202 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1203 }
1204
1205 return intel_init_ring_buffer(dev, ring);
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001206}
1207
1208int intel_init_bsd_ring_buffer(struct drm_device *dev)
1209{
1210 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001211 struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001212
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001213 if (IS_GEN6(dev))
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001214 *ring = gen6_bsd_ring;
Xiang, Haihao881f47b2010-09-19 14:40:43 +01001215 else
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001216 *ring = bsd_ring;
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001217
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001218 return intel_init_ring_buffer(dev, ring);
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08001219}
Chris Wilson549f7362010-10-19 11:19:32 +01001220
1221int intel_init_blt_ring_buffer(struct drm_device *dev)
1222{
1223 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001224 struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
Chris Wilson549f7362010-10-19 11:19:32 +01001225
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001226 *ring = gen6_blt_ring;
Chris Wilson549f7362010-10-19 11:19:32 +01001227
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001228 return intel_init_ring_buffer(dev, ring);
Chris Wilson549f7362010-10-19 11:19:32 +01001229}