blob: e074ff5c2ac25156e07770f3619385940d99ee0d [file] [log] [blame]
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/seq_file.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090029#include <linux/slab.h>
Jerome Glisse771fe6b2009-06-05 14:42:42 +020030#include "drmP.h"
31#include "radeon_drm.h"
32#include "radeon_reg.h"
33#include "radeon.h"
34#include "atom.h"
35
36int radeon_debugfs_ib_init(struct radeon_device *rdev);
Christian Königec1a6cc2012-05-02 15:11:11 +020037int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring);
Jerome Glisse771fe6b2009-06-05 14:42:42 +020038
Andi Kleence580fa2011-10-13 16:08:47 -070039u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
40{
41 struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
42 u32 pg_idx, pg_offset;
43 u32 idx_value = 0;
44 int new_page;
45
46 pg_idx = (idx * 4) / PAGE_SIZE;
47 pg_offset = (idx * 4) % PAGE_SIZE;
48
49 if (ibc->kpage_idx[0] == pg_idx)
50 return ibc->kpage[0][pg_offset/4];
51 if (ibc->kpage_idx[1] == pg_idx)
52 return ibc->kpage[1][pg_offset/4];
53
54 new_page = radeon_cs_update_pages(p, pg_idx);
55 if (new_page < 0) {
56 p->parser_error = new_page;
57 return 0;
58 }
59
60 idx_value = ibc->kpage[new_page][pg_offset/4];
61 return idx_value;
62}
63
Christian Könige32eb502011-10-23 12:56:27 +020064void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
Andi Kleence580fa2011-10-13 16:08:47 -070065{
66#if DRM_DEBUG_CODE
Christian Könige32eb502011-10-23 12:56:27 +020067 if (ring->count_dw <= 0) {
Andi Kleence580fa2011-10-13 16:08:47 -070068 DRM_ERROR("radeon: writting more dword to ring than expected !\n");
69 }
70#endif
Christian Könige32eb502011-10-23 12:56:27 +020071 ring->ring[ring->wptr++] = v;
72 ring->wptr &= ring->ptr_mask;
73 ring->count_dw--;
74 ring->ring_free_dw--;
Andi Kleence580fa2011-10-13 16:08:47 -070075}
76
Jerome Glisse771fe6b2009-06-05 14:42:42 +020077/*
78 * IB.
79 */
Jerome Glissec1341e52011-12-21 12:13:47 -050080bool radeon_ib_try_free(struct radeon_device *rdev, struct radeon_ib *ib)
Jerome Glisseb15ba512011-11-15 11:48:34 -050081{
82 bool done = false;
83
84 /* only free ib which have been emited */
Jerome Glissebb635562012-05-09 15:34:46 +020085 if (ib->fence && ib->fence->seq < RADEON_FENCE_NOTEMITED_SEQ) {
Jerome Glisseb15ba512011-11-15 11:48:34 -050086 if (radeon_fence_signaled(ib->fence)) {
87 radeon_fence_unref(&ib->fence);
Christian König557017a2012-05-09 15:34:54 +020088 radeon_sa_bo_free(rdev, &ib->sa_bo, NULL);
Jerome Glisseb15ba512011-11-15 11:48:34 -050089 done = true;
90 }
91 }
92 return done;
93}
94
Jerome Glisse69e130a2011-12-21 12:13:46 -050095int radeon_ib_get(struct radeon_device *rdev, int ring,
96 struct radeon_ib **ib, unsigned size)
Jerome Glisse771fe6b2009-06-05 14:42:42 +020097{
98 struct radeon_fence *fence;
Jerome Glisseb15ba512011-11-15 11:48:34 -050099 unsigned cretry = 0;
100 int r = 0, i, idx;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200101
102 *ib = NULL;
Jerome Glisse69e130a2011-12-21 12:13:46 -0500103 /* align size on 256 bytes */
104 size = ALIGN(size, 256);
Jerome Glisseb15ba512011-11-15 11:48:34 -0500105
Christian König7b1f2482011-09-23 15:11:23 +0200106 r = radeon_fence_create(rdev, &fence, ring);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200107 if (r) {
Jerome Glisse91cb91b2010-02-15 21:36:13 +0100108 dev_err(rdev->dev, "failed to create fence for new IB\n");
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200109 return r;
110 }
Jerome Glisseb15ba512011-11-15 11:48:34 -0500111
Jerome Glisse9fc04b52012-01-23 11:52:15 -0500112 radeon_mutex_lock(&rdev->ib_pool.mutex);
Jerome Glisseb15ba512011-11-15 11:48:34 -0500113 idx = rdev->ib_pool.head_id;
114retry:
115 if (cretry > 5) {
116 dev_err(rdev->dev, "failed to get an ib after 5 retry\n");
Jerome Glisse9fc04b52012-01-23 11:52:15 -0500117 radeon_mutex_unlock(&rdev->ib_pool.mutex);
Jerome Glisse91cb91b2010-02-15 21:36:13 +0100118 radeon_fence_unref(&fence);
Jerome Glisseb15ba512011-11-15 11:48:34 -0500119 return -ENOMEM;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200120 }
Jerome Glisseb15ba512011-11-15 11:48:34 -0500121 cretry++;
122 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
123 radeon_ib_try_free(rdev, &rdev->ib_pool.ibs[idx]);
124 if (rdev->ib_pool.ibs[idx].fence == NULL) {
125 r = radeon_sa_bo_new(rdev, &rdev->ib_pool.sa_manager,
126 &rdev->ib_pool.ibs[idx].sa_bo,
Christian König557017a2012-05-09 15:34:54 +0200127 size, 256, false);
Jerome Glisseb15ba512011-11-15 11:48:34 -0500128 if (!r) {
129 *ib = &rdev->ib_pool.ibs[idx];
Christian König2e0d9912012-05-09 15:34:53 +0200130 (*ib)->ptr = radeon_sa_bo_cpu_addr((*ib)->sa_bo);
131 (*ib)->gpu_addr = radeon_sa_bo_gpu_addr((*ib)->sa_bo);
Jerome Glisseb15ba512011-11-15 11:48:34 -0500132 (*ib)->fence = fence;
Jerome Glisse721604a2012-01-05 22:11:05 -0500133 (*ib)->vm_id = 0;
Alex Deucherdfcf5f32012-03-20 17:18:14 -0400134 (*ib)->is_const_ib = false;
Jerome Glisseb15ba512011-11-15 11:48:34 -0500135 /* ib are most likely to be allocated in a ring fashion
136 * thus rdev->ib_pool.head_id should be the id of the
137 * oldest ib
138 */
139 rdev->ib_pool.head_id = (1 + idx);
140 rdev->ib_pool.head_id &= (RADEON_IB_POOL_SIZE - 1);
Jerome Glisse9fc04b52012-01-23 11:52:15 -0500141 radeon_mutex_unlock(&rdev->ib_pool.mutex);
Jerome Glisseb15ba512011-11-15 11:48:34 -0500142 return 0;
143 }
Jerome Glisse91cb91b2010-02-15 21:36:13 +0100144 }
Jerome Glisseb15ba512011-11-15 11:48:34 -0500145 idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200146 }
Jerome Glisseb15ba512011-11-15 11:48:34 -0500147 /* this should be rare event, ie all ib scheduled none signaled yet.
148 */
149 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
Jerome Glissebb635562012-05-09 15:34:46 +0200150 struct radeon_fence *fence = rdev->ib_pool.ibs[idx].fence;
151 if (fence && fence->seq < RADEON_FENCE_NOTEMITED_SEQ) {
152 r = radeon_fence_wait(fence, false);
Jerome Glisseb15ba512011-11-15 11:48:34 -0500153 if (!r) {
154 goto retry;
155 }
156 /* an error happened */
157 break;
158 }
159 idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1);
160 }
Jerome Glisse9fc04b52012-01-23 11:52:15 -0500161 radeon_mutex_unlock(&rdev->ib_pool.mutex);
Jerome Glisseb15ba512011-11-15 11:48:34 -0500162 radeon_fence_unref(&fence);
163 return r;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200164}
165
166void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
167{
168 struct radeon_ib *tmp = *ib;
169
170 *ib = NULL;
171 if (tmp == NULL) {
172 return;
173 }
Jerome Glisse9fc04b52012-01-23 11:52:15 -0500174 radeon_mutex_lock(&rdev->ib_pool.mutex);
Jerome Glissebb635562012-05-09 15:34:46 +0200175 if (tmp->fence && tmp->fence->seq == RADEON_FENCE_NOTEMITED_SEQ) {
Christian König557017a2012-05-09 15:34:54 +0200176 radeon_sa_bo_free(rdev, &tmp->sa_bo, NULL);
Jerome Glisseb15ba512011-11-15 11:48:34 -0500177 radeon_fence_unref(&tmp->fence);
178 }
Jerome Glisse9fc04b52012-01-23 11:52:15 -0500179 radeon_mutex_unlock(&rdev->ib_pool.mutex);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200180}
181
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200182int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
183{
Christian Könige32eb502011-10-23 12:56:27 +0200184 struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200185 int r = 0;
186
Christian Könige32eb502011-10-23 12:56:27 +0200187 if (!ib->length_dw || !ring->ready) {
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200188 /* TODO: Nothings in the ib we should report. */
Jerome Glisse91cb91b2010-02-15 21:36:13 +0100189 DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200190 return -EINVAL;
191 }
Dave Airlieecb114a2009-09-15 11:12:56 +1000192
Dave Airlie6cdf6582009-06-29 18:29:13 +1000193 /* 64 dwords should be enough for fence too */
Christian Könige32eb502011-10-23 12:56:27 +0200194 r = radeon_ring_lock(rdev, ring, 64);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200195 if (r) {
Paul Bolleec4f2ac2011-01-28 23:32:04 +0100196 DRM_ERROR("radeon: scheduling IB failed (%d).\n", r);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200197 return r;
198 }
Christian König4c87bc22011-10-19 19:02:21 +0200199 radeon_ring_ib_execute(rdev, ib->fence->ring, ib);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200200 radeon_fence_emit(rdev, ib->fence);
Christian Könige32eb502011-10-23 12:56:27 +0200201 radeon_ring_unlock_commit(rdev, ring);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200202 return 0;
203}
204
205int radeon_ib_pool_init(struct radeon_device *rdev)
206{
Jerome Glisseb15ba512011-11-15 11:48:34 -0500207 int i, r;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200208
Jerome Glisse9fc04b52012-01-23 11:52:15 -0500209 radeon_mutex_lock(&rdev->ib_pool.mutex);
Jerome Glisseb15ba512011-11-15 11:48:34 -0500210 if (rdev->ib_pool.ready) {
Jerome Glisse9fc04b52012-01-23 11:52:15 -0500211 radeon_mutex_unlock(&rdev->ib_pool.mutex);
Jerome Glisse9f022dd2009-09-11 15:35:22 +0200212 return 0;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200213 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200214
Christian Königc3b7fe82012-05-09 15:34:56 +0200215 r = radeon_sa_bo_manager_init(rdev, &rdev->ib_pool.sa_manager,
216 RADEON_IB_POOL_SIZE*64*1024,
217 RADEON_GEM_DOMAIN_GTT);
218 if (r) {
219 radeon_mutex_unlock(&rdev->ib_pool.mutex);
220 return r;
221 }
222
Jerome Glisseb15ba512011-11-15 11:48:34 -0500223 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
224 rdev->ib_pool.ibs[i].fence = NULL;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200225 rdev->ib_pool.ibs[i].idx = i;
226 rdev->ib_pool.ibs[i].length_dw = 0;
Christian König2e0d9912012-05-09 15:34:53 +0200227 rdev->ib_pool.ibs[i].sa_bo = NULL;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200228 }
Jerome Glisse91cb91b2010-02-15 21:36:13 +0100229 rdev->ib_pool.head_id = 0;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200230 rdev->ib_pool.ready = true;
231 DRM_INFO("radeon: ib pool ready.\n");
Jerome Glisseb15ba512011-11-15 11:48:34 -0500232
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200233 if (radeon_debugfs_ib_init(rdev)) {
234 DRM_ERROR("Failed to register debugfs file for IB !\n");
235 }
Jerome Glisse9fc04b52012-01-23 11:52:15 -0500236 radeon_mutex_unlock(&rdev->ib_pool.mutex);
Jerome Glisseb15ba512011-11-15 11:48:34 -0500237 return 0;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200238}
239
240void radeon_ib_pool_fini(struct radeon_device *rdev)
241{
Jerome Glisseb15ba512011-11-15 11:48:34 -0500242 unsigned i;
Jerome Glisse4c788672009-11-20 14:29:23 +0100243
Jerome Glisse9fc04b52012-01-23 11:52:15 -0500244 radeon_mutex_lock(&rdev->ib_pool.mutex);
Jerome Glisseb15ba512011-11-15 11:48:34 -0500245 if (rdev->ib_pool.ready) {
246 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
Christian König557017a2012-05-09 15:34:54 +0200247 radeon_sa_bo_free(rdev, &rdev->ib_pool.ibs[i].sa_bo, NULL);
Jerome Glisseb15ba512011-11-15 11:48:34 -0500248 radeon_fence_unref(&rdev->ib_pool.ibs[i].fence);
Alex Deucherca2af922010-05-06 11:02:24 -0400249 }
Jerome Glisseb15ba512011-11-15 11:48:34 -0500250 radeon_sa_bo_manager_fini(rdev, &rdev->ib_pool.sa_manager);
251 rdev->ib_pool.ready = false;
Alex Deucherca2af922010-05-06 11:02:24 -0400252 }
Jerome Glisse9fc04b52012-01-23 11:52:15 -0500253 radeon_mutex_unlock(&rdev->ib_pool.mutex);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200254}
255
Jerome Glisseb15ba512011-11-15 11:48:34 -0500256int radeon_ib_pool_start(struct radeon_device *rdev)
257{
258 return radeon_sa_bo_manager_start(rdev, &rdev->ib_pool.sa_manager);
259}
260
261int radeon_ib_pool_suspend(struct radeon_device *rdev)
262{
263 return radeon_sa_bo_manager_suspend(rdev, &rdev->ib_pool.sa_manager);
264}
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200265
Christian König7bd560e2012-05-02 15:11:12 +0200266int radeon_ib_ring_tests(struct radeon_device *rdev)
267{
268 unsigned i;
269 int r;
270
271 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
272 struct radeon_ring *ring = &rdev->ring[i];
273
274 if (!ring->ready)
275 continue;
276
277 r = radeon_ib_test(rdev, i, ring);
278 if (r) {
279 ring->ready = false;
280
281 if (i == RADEON_RING_TYPE_GFX_INDEX) {
282 /* oh, oh, that's really bad */
283 DRM_ERROR("radeon: failed testing IB on GFX ring (%d).\n", r);
284 rdev->accel_working = false;
285 return r;
286
287 } else {
288 /* still not good, but we can live with it */
289 DRM_ERROR("radeon: failed testing IB on ring %d (%d).\n", i, r);
290 }
291 }
292 }
293 return 0;
294}
295
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200296/*
297 * Ring.
298 */
Christian Könige32eb502011-10-23 12:56:27 +0200299int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *ring)
Christian Königbf852792011-10-13 13:19:22 +0200300{
301 /* r1xx-r5xx only has CP ring */
302 if (rdev->family < CHIP_R600)
303 return RADEON_RING_TYPE_GFX_INDEX;
304
305 if (rdev->family >= CHIP_CAYMAN) {
Christian Könige32eb502011-10-23 12:56:27 +0200306 if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX])
Christian Königbf852792011-10-13 13:19:22 +0200307 return CAYMAN_RING_TYPE_CP1_INDEX;
Christian Könige32eb502011-10-23 12:56:27 +0200308 else if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX])
Christian Königbf852792011-10-13 13:19:22 +0200309 return CAYMAN_RING_TYPE_CP2_INDEX;
310 }
311 return RADEON_RING_TYPE_GFX_INDEX;
312}
313
Christian Könige32eb502011-10-23 12:56:27 +0200314void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200315{
Alex Deucher78c55602011-11-17 14:25:56 -0500316 u32 rptr;
317
Alex Deucher724c80e2010-08-27 18:25:25 -0400318 if (rdev->wb.enabled)
Alex Deucher78c55602011-11-17 14:25:56 -0500319 rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
Christian König5596a9d2011-10-13 12:48:45 +0200320 else
Alex Deucher78c55602011-11-17 14:25:56 -0500321 rptr = RREG32(ring->rptr_reg);
322 ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200323 /* This works because ring_size is a power of 2 */
Christian Könige32eb502011-10-23 12:56:27 +0200324 ring->ring_free_dw = (ring->rptr + (ring->ring_size / 4));
325 ring->ring_free_dw -= ring->wptr;
326 ring->ring_free_dw &= ring->ptr_mask;
327 if (!ring->ring_free_dw) {
328 ring->ring_free_dw = ring->ring_size / 4;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200329 }
330}
331
Christian König7b1f2482011-09-23 15:11:23 +0200332
Christian Könige32eb502011-10-23 12:56:27 +0200333int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200334{
335 int r;
336
337 /* Align requested size with padding so unlock_commit can
338 * pad safely */
Christian Könige32eb502011-10-23 12:56:27 +0200339 ndw = (ndw + ring->align_mask) & ~ring->align_mask;
340 while (ndw > (ring->ring_free_dw - 1)) {
341 radeon_ring_free_size(rdev, ring);
342 if (ndw < ring->ring_free_dw) {
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200343 break;
344 }
Christian König8a47cc92012-05-09 15:34:48 +0200345 r = radeon_fence_wait_next_locked(rdev, radeon_ring_index(rdev, ring));
Matthew Garrett91700f32010-04-30 15:24:17 -0400346 if (r)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200347 return r;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200348 }
Christian Könige32eb502011-10-23 12:56:27 +0200349 ring->count_dw = ndw;
350 ring->wptr_old = ring->wptr;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200351 return 0;
352}
353
Christian Könige32eb502011-10-23 12:56:27 +0200354int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
Matthew Garrett91700f32010-04-30 15:24:17 -0400355{
356 int r;
357
Christian Königd6999bc2012-05-09 15:34:45 +0200358 mutex_lock(&rdev->ring_lock);
Christian Könige32eb502011-10-23 12:56:27 +0200359 r = radeon_ring_alloc(rdev, ring, ndw);
Matthew Garrett91700f32010-04-30 15:24:17 -0400360 if (r) {
Christian Königd6999bc2012-05-09 15:34:45 +0200361 mutex_unlock(&rdev->ring_lock);
Matthew Garrett91700f32010-04-30 15:24:17 -0400362 return r;
363 }
364 return 0;
365}
366
Christian Könige32eb502011-10-23 12:56:27 +0200367void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200368{
369 unsigned count_dw_pad;
370 unsigned i;
371
372 /* We pad to match fetch size */
Christian Könige32eb502011-10-23 12:56:27 +0200373 count_dw_pad = (ring->align_mask + 1) -
374 (ring->wptr & ring->align_mask);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200375 for (i = 0; i < count_dw_pad; i++) {
Alex Deucher78c55602011-11-17 14:25:56 -0500376 radeon_ring_write(ring, ring->nop);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200377 }
378 DRM_MEMORYBARRIER();
Alex Deucher78c55602011-11-17 14:25:56 -0500379 WREG32(ring->wptr_reg, (ring->wptr << ring->ptr_reg_shift) & ring->ptr_reg_mask);
Christian Könige32eb502011-10-23 12:56:27 +0200380 (void)RREG32(ring->wptr_reg);
Matthew Garrett91700f32010-04-30 15:24:17 -0400381}
382
Christian Könige32eb502011-10-23 12:56:27 +0200383void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring)
Matthew Garrett91700f32010-04-30 15:24:17 -0400384{
Christian Könige32eb502011-10-23 12:56:27 +0200385 radeon_ring_commit(rdev, ring);
Christian Königd6999bc2012-05-09 15:34:45 +0200386 mutex_unlock(&rdev->ring_lock);
387}
388
389void radeon_ring_undo(struct radeon_ring *ring)
390{
391 ring->wptr = ring->wptr_old;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200392}
393
Christian Könige32eb502011-10-23 12:56:27 +0200394void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200395{
Christian Königd6999bc2012-05-09 15:34:45 +0200396 radeon_ring_undo(ring);
397 mutex_unlock(&rdev->ring_lock);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200398}
399
Christian König7b9ef162012-05-02 15:11:23 +0200400void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring)
401{
402 int r;
403
Christian König7b9ef162012-05-02 15:11:23 +0200404 radeon_ring_free_size(rdev, ring);
405 if (ring->rptr == ring->wptr) {
406 r = radeon_ring_alloc(rdev, ring, 1);
407 if (!r) {
408 radeon_ring_write(ring, ring->nop);
409 radeon_ring_commit(rdev, ring);
410 }
411 }
Christian König7b9ef162012-05-02 15:11:23 +0200412}
413
Christian König069211e2012-05-02 15:11:20 +0200414void radeon_ring_lockup_update(struct radeon_ring *ring)
415{
416 ring->last_rptr = ring->rptr;
417 ring->last_activity = jiffies;
418}
419
420/**
421 * radeon_ring_test_lockup() - check if ring is lockedup by recording information
422 * @rdev: radeon device structure
423 * @ring: radeon_ring structure holding ring information
424 *
425 * We don't need to initialize the lockup tracking information as we will either
426 * have CP rptr to a different value of jiffies wrap around which will force
427 * initialization of the lockup tracking informations.
428 *
429 * A possible false positivie is if we get call after while and last_cp_rptr ==
430 * the current CP rptr, even if it's unlikely it might happen. To avoid this
431 * if the elapsed time since last call is bigger than 2 second than we return
432 * false and update the tracking information. Due to this the caller must call
433 * radeon_ring_test_lockup several time in less than 2sec for lockup to be reported
434 * the fencing code should be cautious about that.
435 *
436 * Caller should write to the ring to force CP to do something so we don't get
437 * false positive when CP is just gived nothing to do.
438 *
439 **/
440bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
441{
442 unsigned long cjiffies, elapsed;
443 uint32_t rptr;
444
445 cjiffies = jiffies;
446 if (!time_after(cjiffies, ring->last_activity)) {
447 /* likely a wrap around */
448 radeon_ring_lockup_update(ring);
449 return false;
450 }
451 rptr = RREG32(ring->rptr_reg);
452 ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
453 if (ring->rptr != ring->last_rptr) {
454 /* CP is still working no lockup */
455 radeon_ring_lockup_update(ring);
456 return false;
457 }
458 elapsed = jiffies_to_msecs(cjiffies - ring->last_activity);
Christian König3368ff02012-05-02 15:11:21 +0200459 if (radeon_lockup_timeout && elapsed >= radeon_lockup_timeout) {
Christian König069211e2012-05-02 15:11:20 +0200460 dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed);
461 return true;
462 }
463 /* give a chance to the GPU ... */
464 return false;
465}
466
Christian Könige32eb502011-10-23 12:56:27 +0200467int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size,
Alex Deucher78c55602011-11-17 14:25:56 -0500468 unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
469 u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200470{
471 int r;
472
Christian Könige32eb502011-10-23 12:56:27 +0200473 ring->ring_size = ring_size;
474 ring->rptr_offs = rptr_offs;
475 ring->rptr_reg = rptr_reg;
476 ring->wptr_reg = wptr_reg;
Alex Deucher78c55602011-11-17 14:25:56 -0500477 ring->ptr_reg_shift = ptr_reg_shift;
478 ring->ptr_reg_mask = ptr_reg_mask;
479 ring->nop = nop;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200480 /* Allocate ring buffer */
Christian Könige32eb502011-10-23 12:56:27 +0200481 if (ring->ring_obj == NULL) {
482 r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
Jerome Glisse4c788672009-11-20 14:29:23 +0100483 RADEON_GEM_DOMAIN_GTT,
Christian Könige32eb502011-10-23 12:56:27 +0200484 &ring->ring_obj);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200485 if (r) {
Jerome Glisse4c788672009-11-20 14:29:23 +0100486 dev_err(rdev->dev, "(%d) ring create failed\n", r);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200487 return r;
488 }
Christian Könige32eb502011-10-23 12:56:27 +0200489 r = radeon_bo_reserve(ring->ring_obj, false);
Jerome Glisse4c788672009-11-20 14:29:23 +0100490 if (unlikely(r != 0))
491 return r;
Christian Könige32eb502011-10-23 12:56:27 +0200492 r = radeon_bo_pin(ring->ring_obj, RADEON_GEM_DOMAIN_GTT,
493 &ring->gpu_addr);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200494 if (r) {
Christian Könige32eb502011-10-23 12:56:27 +0200495 radeon_bo_unreserve(ring->ring_obj);
Jerome Glisse4c788672009-11-20 14:29:23 +0100496 dev_err(rdev->dev, "(%d) ring pin failed\n", r);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200497 return r;
498 }
Christian Könige32eb502011-10-23 12:56:27 +0200499 r = radeon_bo_kmap(ring->ring_obj,
500 (void **)&ring->ring);
501 radeon_bo_unreserve(ring->ring_obj);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200502 if (r) {
Jerome Glisse4c788672009-11-20 14:29:23 +0100503 dev_err(rdev->dev, "(%d) ring map failed\n", r);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200504 return r;
505 }
506 }
Christian Könige32eb502011-10-23 12:56:27 +0200507 ring->ptr_mask = (ring->ring_size / 4) - 1;
508 ring->ring_free_dw = ring->ring_size / 4;
Christian Königec1a6cc2012-05-02 15:11:11 +0200509 if (radeon_debugfs_ring_init(rdev, ring)) {
510 DRM_ERROR("Failed to register debugfs file for rings !\n");
511 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200512 return 0;
513}
514
Christian Könige32eb502011-10-23 12:56:27 +0200515void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *ring)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200516{
Jerome Glisse4c788672009-11-20 14:29:23 +0100517 int r;
Alex Deucherca2af922010-05-06 11:02:24 -0400518 struct radeon_bo *ring_obj;
Jerome Glisse4c788672009-11-20 14:29:23 +0100519
Christian Königd6999bc2012-05-09 15:34:45 +0200520 mutex_lock(&rdev->ring_lock);
Christian Könige32eb502011-10-23 12:56:27 +0200521 ring_obj = ring->ring_obj;
Christian Königd6999bc2012-05-09 15:34:45 +0200522 ring->ready = false;
Christian Könige32eb502011-10-23 12:56:27 +0200523 ring->ring = NULL;
524 ring->ring_obj = NULL;
Christian Königd6999bc2012-05-09 15:34:45 +0200525 mutex_unlock(&rdev->ring_lock);
Alex Deucherca2af922010-05-06 11:02:24 -0400526
527 if (ring_obj) {
528 r = radeon_bo_reserve(ring_obj, false);
529 if (likely(r == 0)) {
530 radeon_bo_kunmap(ring_obj);
531 radeon_bo_unpin(ring_obj);
532 radeon_bo_unreserve(ring_obj);
533 }
534 radeon_bo_unref(&ring_obj);
535 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200536}
537
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200538/*
539 * Debugfs info
540 */
541#if defined(CONFIG_DEBUG_FS)
Christian Königaf9720f2011-10-24 17:08:44 +0200542
543static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
544{
545 struct drm_info_node *node = (struct drm_info_node *) m->private;
546 struct drm_device *dev = node->minor->dev;
547 struct radeon_device *rdev = dev->dev_private;
548 int ridx = *(int*)node->info_ent->data;
549 struct radeon_ring *ring = &rdev->ring[ridx];
550 unsigned count, i, j;
551
552 radeon_ring_free_size(rdev, ring);
553 count = (ring->ring_size / 4) - ring->ring_free_dw;
554 seq_printf(m, "wptr(0x%04x): 0x%08x\n", ring->wptr_reg, RREG32(ring->wptr_reg));
555 seq_printf(m, "rptr(0x%04x): 0x%08x\n", ring->rptr_reg, RREG32(ring->rptr_reg));
556 seq_printf(m, "driver's copy of the wptr: 0x%08x\n", ring->wptr);
557 seq_printf(m, "driver's copy of the rptr: 0x%08x\n", ring->rptr);
558 seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
559 seq_printf(m, "%u dwords in ring\n", count);
560 i = ring->rptr;
561 for (j = 0; j <= count; j++) {
562 seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
563 i = (i + 1) & ring->ptr_mask;
564 }
565 return 0;
566}
567
568static int radeon_ring_type_gfx_index = RADEON_RING_TYPE_GFX_INDEX;
569static int cayman_ring_type_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX;
570static int cayman_ring_type_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX;
571
572static struct drm_info_list radeon_debugfs_ring_info_list[] = {
573 {"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_ring_type_gfx_index},
574 {"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp1_index},
575 {"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp2_index},
576};
577
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200578static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
579{
580 struct drm_info_node *node = (struct drm_info_node *) m->private;
Christian König293f9fd2012-02-23 15:18:45 +0100581 struct drm_device *dev = node->minor->dev;
582 struct radeon_device *rdev = dev->dev_private;
583 struct radeon_ib *ib = &rdev->ib_pool.ibs[*((unsigned*)node->info_ent->data)];
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200584 unsigned i;
585
586 if (ib == NULL) {
587 return 0;
588 }
Jerome Glisse91cb91b2010-02-15 21:36:13 +0100589 seq_printf(m, "IB %04u\n", ib->idx);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200590 seq_printf(m, "IB fence %p\n", ib->fence);
591 seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
592 for (i = 0; i < ib->length_dw; i++) {
593 seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
594 }
595 return 0;
596}
597
598static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE];
599static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32];
Christian König293f9fd2012-02-23 15:18:45 +0100600static unsigned radeon_debugfs_ib_idx[RADEON_IB_POOL_SIZE];
Christian König711a9722012-05-09 15:34:51 +0200601
602static int radeon_debugfs_sa_info(struct seq_file *m, void *data)
603{
604 struct drm_info_node *node = (struct drm_info_node *) m->private;
605 struct drm_device *dev = node->minor->dev;
606 struct radeon_device *rdev = dev->dev_private;
607
608 radeon_sa_bo_dump_debug_info(&rdev->ib_pool.sa_manager, m);
609
610 return 0;
611
612}
613
614static struct drm_info_list radeon_debugfs_sa_list[] = {
615 {"radeon_sa_info", &radeon_debugfs_sa_info, 0, NULL},
616};
617
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200618#endif
619
Christian Königec1a6cc2012-05-02 15:11:11 +0200620int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring)
Christian Königaf9720f2011-10-24 17:08:44 +0200621{
622#if defined(CONFIG_DEBUG_FS)
Christian Königec1a6cc2012-05-02 15:11:11 +0200623 unsigned i;
624 for (i = 0; i < ARRAY_SIZE(radeon_debugfs_ring_info_list); ++i) {
625 struct drm_info_list *info = &radeon_debugfs_ring_info_list[i];
626 int ridx = *(int*)radeon_debugfs_ring_info_list[i].data;
627 unsigned r;
628
629 if (&rdev->ring[ridx] != ring)
630 continue;
631
632 r = radeon_debugfs_add_files(rdev, info, 1);
633 if (r)
634 return r;
635 }
Christian Königaf9720f2011-10-24 17:08:44 +0200636#endif
Christian Königec1a6cc2012-05-02 15:11:11 +0200637 return 0;
Christian Königaf9720f2011-10-24 17:08:44 +0200638}
639
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200640int radeon_debugfs_ib_init(struct radeon_device *rdev)
641{
642#if defined(CONFIG_DEBUG_FS)
643 unsigned i;
Christian König711a9722012-05-09 15:34:51 +0200644 int r;
645
646 r = radeon_debugfs_add_files(rdev, radeon_debugfs_sa_list, 1);
647 if (r)
648 return r;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200649
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200650 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
651 sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
Christian König293f9fd2012-02-23 15:18:45 +0100652 radeon_debugfs_ib_idx[i] = i;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200653 radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
654 radeon_debugfs_ib_list[i].show = &radeon_debugfs_ib_info;
655 radeon_debugfs_ib_list[i].driver_features = 0;
Christian König293f9fd2012-02-23 15:18:45 +0100656 radeon_debugfs_ib_list[i].data = &radeon_debugfs_ib_idx[i];
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200657 }
658 return radeon_debugfs_add_files(rdev, radeon_debugfs_ib_list,
659 RADEON_IB_POOL_SIZE);
660#else
661 return 0;
662#endif
663}