blob: 88f6fbbe4be63b0142cb2fb4015b015e35e93f2d [file] [log] [blame]
Alex Deucher0af62b02011-01-06 21:19:31 -05001/*
2 * Copyright 2010 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24#include <linux/firmware.h>
25#include <linux/platform_device.h>
26#include <linux/slab.h>
Paul Gortmakere0cd3602011-08-30 11:04:30 -040027#include <linux/module.h>
David Howells760285e2012-10-02 18:01:07 +010028#include <drm/drmP.h>
Alex Deucher0af62b02011-01-06 21:19:31 -050029#include "radeon.h"
30#include "radeon_asic.h"
David Howells760285e2012-10-02 18:01:07 +010031#include <drm/radeon_drm.h>
Alex Deucher0af62b02011-01-06 21:19:31 -050032#include "nid.h"
33#include "atom.h"
34#include "ni_reg.h"
Alex Deucher0c88a022011-03-02 20:07:31 -050035#include "cayman_blit_shaders.h"
Alex Deucher0af62b02011-01-06 21:19:31 -050036
Alex Deucher187e3592013-01-18 14:51:38 -050037extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
Alex Deucherb9952a82011-03-02 20:07:33 -050038extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
39extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
40extern int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
Alex Deucher755d8192011-03-02 20:07:34 -050041extern void evergreen_mc_program(struct radeon_device *rdev);
42extern void evergreen_irq_suspend(struct radeon_device *rdev);
43extern int evergreen_mc_init(struct radeon_device *rdev);
Alex Deucherd054ac12011-09-01 17:46:15 +000044extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
Ilija Hadzicb07759b2011-09-20 10:22:58 -040045extern void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
Alex Deucherc420c742012-03-20 17:18:39 -040046extern void si_rlc_fini(struct radeon_device *rdev);
47extern int si_rlc_init(struct radeon_device *rdev);
Alex Deucherb9952a82011-03-02 20:07:33 -050048
Alex Deucher0af62b02011-01-06 21:19:31 -050049#define EVERGREEN_PFP_UCODE_SIZE 1120
50#define EVERGREEN_PM4_UCODE_SIZE 1376
51#define EVERGREEN_RLC_UCODE_SIZE 768
52#define BTC_MC_UCODE_SIZE 6024
53
Alex Deucher9b8253c2011-03-02 20:07:28 -050054#define CAYMAN_PFP_UCODE_SIZE 2176
55#define CAYMAN_PM4_UCODE_SIZE 2176
56#define CAYMAN_RLC_UCODE_SIZE 1024
57#define CAYMAN_MC_UCODE_SIZE 6037
58
Alex Deucherc420c742012-03-20 17:18:39 -040059#define ARUBA_RLC_UCODE_SIZE 1536
60
Alex Deucher0af62b02011-01-06 21:19:31 -050061/* Firmware Names */
62MODULE_FIRMWARE("radeon/BARTS_pfp.bin");
63MODULE_FIRMWARE("radeon/BARTS_me.bin");
64MODULE_FIRMWARE("radeon/BARTS_mc.bin");
65MODULE_FIRMWARE("radeon/BTC_rlc.bin");
66MODULE_FIRMWARE("radeon/TURKS_pfp.bin");
67MODULE_FIRMWARE("radeon/TURKS_me.bin");
68MODULE_FIRMWARE("radeon/TURKS_mc.bin");
69MODULE_FIRMWARE("radeon/CAICOS_pfp.bin");
70MODULE_FIRMWARE("radeon/CAICOS_me.bin");
71MODULE_FIRMWARE("radeon/CAICOS_mc.bin");
Alex Deucher9b8253c2011-03-02 20:07:28 -050072MODULE_FIRMWARE("radeon/CAYMAN_pfp.bin");
73MODULE_FIRMWARE("radeon/CAYMAN_me.bin");
74MODULE_FIRMWARE("radeon/CAYMAN_mc.bin");
75MODULE_FIRMWARE("radeon/CAYMAN_rlc.bin");
Alex Deucherc420c742012-03-20 17:18:39 -040076MODULE_FIRMWARE("radeon/ARUBA_pfp.bin");
77MODULE_FIRMWARE("radeon/ARUBA_me.bin");
78MODULE_FIRMWARE("radeon/ARUBA_rlc.bin");
Alex Deucher0af62b02011-01-06 21:19:31 -050079
80#define BTC_IO_MC_REGS_SIZE 29
81
82static const u32 barts_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
83 {0x00000077, 0xff010100},
84 {0x00000078, 0x00000000},
85 {0x00000079, 0x00001434},
86 {0x0000007a, 0xcc08ec08},
87 {0x0000007b, 0x00040000},
88 {0x0000007c, 0x000080c0},
89 {0x0000007d, 0x09000000},
90 {0x0000007e, 0x00210404},
91 {0x00000081, 0x08a8e800},
92 {0x00000082, 0x00030444},
93 {0x00000083, 0x00000000},
94 {0x00000085, 0x00000001},
95 {0x00000086, 0x00000002},
96 {0x00000087, 0x48490000},
97 {0x00000088, 0x20244647},
98 {0x00000089, 0x00000005},
99 {0x0000008b, 0x66030000},
100 {0x0000008c, 0x00006603},
101 {0x0000008d, 0x00000100},
102 {0x0000008f, 0x00001c0a},
103 {0x00000090, 0xff000001},
104 {0x00000094, 0x00101101},
105 {0x00000095, 0x00000fff},
106 {0x00000096, 0x00116fff},
107 {0x00000097, 0x60010000},
108 {0x00000098, 0x10010000},
109 {0x00000099, 0x00006000},
110 {0x0000009a, 0x00001000},
111 {0x0000009f, 0x00946a00}
112};
113
114static const u32 turks_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
115 {0x00000077, 0xff010100},
116 {0x00000078, 0x00000000},
117 {0x00000079, 0x00001434},
118 {0x0000007a, 0xcc08ec08},
119 {0x0000007b, 0x00040000},
120 {0x0000007c, 0x000080c0},
121 {0x0000007d, 0x09000000},
122 {0x0000007e, 0x00210404},
123 {0x00000081, 0x08a8e800},
124 {0x00000082, 0x00030444},
125 {0x00000083, 0x00000000},
126 {0x00000085, 0x00000001},
127 {0x00000086, 0x00000002},
128 {0x00000087, 0x48490000},
129 {0x00000088, 0x20244647},
130 {0x00000089, 0x00000005},
131 {0x0000008b, 0x66030000},
132 {0x0000008c, 0x00006603},
133 {0x0000008d, 0x00000100},
134 {0x0000008f, 0x00001c0a},
135 {0x00000090, 0xff000001},
136 {0x00000094, 0x00101101},
137 {0x00000095, 0x00000fff},
138 {0x00000096, 0x00116fff},
139 {0x00000097, 0x60010000},
140 {0x00000098, 0x10010000},
141 {0x00000099, 0x00006000},
142 {0x0000009a, 0x00001000},
143 {0x0000009f, 0x00936a00}
144};
145
146static const u32 caicos_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
147 {0x00000077, 0xff010100},
148 {0x00000078, 0x00000000},
149 {0x00000079, 0x00001434},
150 {0x0000007a, 0xcc08ec08},
151 {0x0000007b, 0x00040000},
152 {0x0000007c, 0x000080c0},
153 {0x0000007d, 0x09000000},
154 {0x0000007e, 0x00210404},
155 {0x00000081, 0x08a8e800},
156 {0x00000082, 0x00030444},
157 {0x00000083, 0x00000000},
158 {0x00000085, 0x00000001},
159 {0x00000086, 0x00000002},
160 {0x00000087, 0x48490000},
161 {0x00000088, 0x20244647},
162 {0x00000089, 0x00000005},
163 {0x0000008b, 0x66030000},
164 {0x0000008c, 0x00006603},
165 {0x0000008d, 0x00000100},
166 {0x0000008f, 0x00001c0a},
167 {0x00000090, 0xff000001},
168 {0x00000094, 0x00101101},
169 {0x00000095, 0x00000fff},
170 {0x00000096, 0x00116fff},
171 {0x00000097, 0x60010000},
172 {0x00000098, 0x10010000},
173 {0x00000099, 0x00006000},
174 {0x0000009a, 0x00001000},
175 {0x0000009f, 0x00916a00}
176};
177
Alex Deucher9b8253c2011-03-02 20:07:28 -0500178static const u32 cayman_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
179 {0x00000077, 0xff010100},
180 {0x00000078, 0x00000000},
181 {0x00000079, 0x00001434},
182 {0x0000007a, 0xcc08ec08},
183 {0x0000007b, 0x00040000},
184 {0x0000007c, 0x000080c0},
185 {0x0000007d, 0x09000000},
186 {0x0000007e, 0x00210404},
187 {0x00000081, 0x08a8e800},
188 {0x00000082, 0x00030444},
189 {0x00000083, 0x00000000},
190 {0x00000085, 0x00000001},
191 {0x00000086, 0x00000002},
192 {0x00000087, 0x48490000},
193 {0x00000088, 0x20244647},
194 {0x00000089, 0x00000005},
195 {0x0000008b, 0x66030000},
196 {0x0000008c, 0x00006603},
197 {0x0000008d, 0x00000100},
198 {0x0000008f, 0x00001c0a},
199 {0x00000090, 0xff000001},
200 {0x00000094, 0x00101101},
201 {0x00000095, 0x00000fff},
202 {0x00000096, 0x00116fff},
203 {0x00000097, 0x60010000},
204 {0x00000098, 0x10010000},
205 {0x00000099, 0x00006000},
206 {0x0000009a, 0x00001000},
207 {0x0000009f, 0x00976b00}
208};
209
Alex Deucher755d8192011-03-02 20:07:34 -0500210int ni_mc_load_microcode(struct radeon_device *rdev)
Alex Deucher0af62b02011-01-06 21:19:31 -0500211{
212 const __be32 *fw_data;
213 u32 mem_type, running, blackout = 0;
214 u32 *io_mc_regs;
Alex Deucher9b8253c2011-03-02 20:07:28 -0500215 int i, ucode_size, regs_size;
Alex Deucher0af62b02011-01-06 21:19:31 -0500216
217 if (!rdev->mc_fw)
218 return -EINVAL;
219
220 switch (rdev->family) {
221 case CHIP_BARTS:
222 io_mc_regs = (u32 *)&barts_io_mc_regs;
Alex Deucher9b8253c2011-03-02 20:07:28 -0500223 ucode_size = BTC_MC_UCODE_SIZE;
224 regs_size = BTC_IO_MC_REGS_SIZE;
Alex Deucher0af62b02011-01-06 21:19:31 -0500225 break;
226 case CHIP_TURKS:
227 io_mc_regs = (u32 *)&turks_io_mc_regs;
Alex Deucher9b8253c2011-03-02 20:07:28 -0500228 ucode_size = BTC_MC_UCODE_SIZE;
229 regs_size = BTC_IO_MC_REGS_SIZE;
Alex Deucher0af62b02011-01-06 21:19:31 -0500230 break;
231 case CHIP_CAICOS:
232 default:
233 io_mc_regs = (u32 *)&caicos_io_mc_regs;
Alex Deucher9b8253c2011-03-02 20:07:28 -0500234 ucode_size = BTC_MC_UCODE_SIZE;
235 regs_size = BTC_IO_MC_REGS_SIZE;
236 break;
237 case CHIP_CAYMAN:
238 io_mc_regs = (u32 *)&cayman_io_mc_regs;
239 ucode_size = CAYMAN_MC_UCODE_SIZE;
240 regs_size = BTC_IO_MC_REGS_SIZE;
Alex Deucher0af62b02011-01-06 21:19:31 -0500241 break;
242 }
243
244 mem_type = (RREG32(MC_SEQ_MISC0) & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT;
245 running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
246
247 if ((mem_type == MC_SEQ_MISC0_GDDR5_VALUE) && (running == 0)) {
248 if (running) {
249 blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
250 WREG32(MC_SHARED_BLACKOUT_CNTL, 1);
251 }
252
253 /* reset the engine and set to writable */
254 WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
255 WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
256
257 /* load mc io regs */
Alex Deucher9b8253c2011-03-02 20:07:28 -0500258 for (i = 0; i < regs_size; i++) {
Alex Deucher0af62b02011-01-06 21:19:31 -0500259 WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
260 WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
261 }
262 /* load the MC ucode */
263 fw_data = (const __be32 *)rdev->mc_fw->data;
Alex Deucher9b8253c2011-03-02 20:07:28 -0500264 for (i = 0; i < ucode_size; i++)
Alex Deucher0af62b02011-01-06 21:19:31 -0500265 WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
266
267 /* put the engine back into the active state */
268 WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
269 WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
270 WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
271
272 /* wait for training to complete */
Alex Deucher0e2c9782011-11-02 18:08:25 -0400273 for (i = 0; i < rdev->usec_timeout; i++) {
274 if (RREG32(MC_IO_PAD_CNTL_D0) & MEM_FALL_OUT_CMD)
275 break;
276 udelay(1);
277 }
Alex Deucher0af62b02011-01-06 21:19:31 -0500278
279 if (running)
280 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
281 }
282
283 return 0;
284}
285
286int ni_init_microcode(struct radeon_device *rdev)
287{
288 struct platform_device *pdev;
289 const char *chip_name;
290 const char *rlc_chip_name;
291 size_t pfp_req_size, me_req_size, rlc_req_size, mc_req_size;
292 char fw_name[30];
293 int err;
294
295 DRM_DEBUG("\n");
296
297 pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
298 err = IS_ERR(pdev);
299 if (err) {
300 printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
301 return -EINVAL;
302 }
303
304 switch (rdev->family) {
305 case CHIP_BARTS:
306 chip_name = "BARTS";
307 rlc_chip_name = "BTC";
Alex Deucher9b8253c2011-03-02 20:07:28 -0500308 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
309 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
310 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
311 mc_req_size = BTC_MC_UCODE_SIZE * 4;
Alex Deucher0af62b02011-01-06 21:19:31 -0500312 break;
313 case CHIP_TURKS:
314 chip_name = "TURKS";
315 rlc_chip_name = "BTC";
Alex Deucher9b8253c2011-03-02 20:07:28 -0500316 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
317 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
318 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
319 mc_req_size = BTC_MC_UCODE_SIZE * 4;
Alex Deucher0af62b02011-01-06 21:19:31 -0500320 break;
321 case CHIP_CAICOS:
322 chip_name = "CAICOS";
323 rlc_chip_name = "BTC";
Alex Deucher9b8253c2011-03-02 20:07:28 -0500324 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
325 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
326 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
327 mc_req_size = BTC_MC_UCODE_SIZE * 4;
328 break;
329 case CHIP_CAYMAN:
330 chip_name = "CAYMAN";
331 rlc_chip_name = "CAYMAN";
332 pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4;
333 me_req_size = CAYMAN_PM4_UCODE_SIZE * 4;
334 rlc_req_size = CAYMAN_RLC_UCODE_SIZE * 4;
335 mc_req_size = CAYMAN_MC_UCODE_SIZE * 4;
Alex Deucher0af62b02011-01-06 21:19:31 -0500336 break;
Alex Deucherc420c742012-03-20 17:18:39 -0400337 case CHIP_ARUBA:
338 chip_name = "ARUBA";
339 rlc_chip_name = "ARUBA";
340 /* pfp/me same size as CAYMAN */
341 pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4;
342 me_req_size = CAYMAN_PM4_UCODE_SIZE * 4;
343 rlc_req_size = ARUBA_RLC_UCODE_SIZE * 4;
344 mc_req_size = 0;
345 break;
Alex Deucher0af62b02011-01-06 21:19:31 -0500346 default: BUG();
347 }
348
Alex Deucher0af62b02011-01-06 21:19:31 -0500349 DRM_INFO("Loading %s Microcode\n", chip_name);
350
351 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
352 err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
353 if (err)
354 goto out;
355 if (rdev->pfp_fw->size != pfp_req_size) {
356 printk(KERN_ERR
357 "ni_cp: Bogus length %zu in firmware \"%s\"\n",
358 rdev->pfp_fw->size, fw_name);
359 err = -EINVAL;
360 goto out;
361 }
362
363 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
364 err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
365 if (err)
366 goto out;
367 if (rdev->me_fw->size != me_req_size) {
368 printk(KERN_ERR
369 "ni_cp: Bogus length %zu in firmware \"%s\"\n",
370 rdev->me_fw->size, fw_name);
371 err = -EINVAL;
372 }
373
374 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
375 err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
376 if (err)
377 goto out;
378 if (rdev->rlc_fw->size != rlc_req_size) {
379 printk(KERN_ERR
380 "ni_rlc: Bogus length %zu in firmware \"%s\"\n",
381 rdev->rlc_fw->size, fw_name);
382 err = -EINVAL;
383 }
384
Alex Deucherc420c742012-03-20 17:18:39 -0400385 /* no MC ucode on TN */
386 if (!(rdev->flags & RADEON_IS_IGP)) {
387 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
388 err = request_firmware(&rdev->mc_fw, fw_name, &pdev->dev);
389 if (err)
390 goto out;
391 if (rdev->mc_fw->size != mc_req_size) {
392 printk(KERN_ERR
393 "ni_mc: Bogus length %zu in firmware \"%s\"\n",
394 rdev->mc_fw->size, fw_name);
395 err = -EINVAL;
396 }
Alex Deucher0af62b02011-01-06 21:19:31 -0500397 }
398out:
399 platform_device_unregister(pdev);
400
401 if (err) {
402 if (err != -EINVAL)
403 printk(KERN_ERR
404 "ni_cp: Failed to load firmware \"%s\"\n",
405 fw_name);
406 release_firmware(rdev->pfp_fw);
407 rdev->pfp_fw = NULL;
408 release_firmware(rdev->me_fw);
409 rdev->me_fw = NULL;
410 release_firmware(rdev->rlc_fw);
411 rdev->rlc_fw = NULL;
412 release_firmware(rdev->mc_fw);
413 rdev->mc_fw = NULL;
414 }
415 return err;
416}
417
Alex Deucherfecf1d02011-03-02 20:07:29 -0500418/*
419 * Core functions
420 */
Alex Deucherfecf1d02011-03-02 20:07:29 -0500421static void cayman_gpu_init(struct radeon_device *rdev)
422{
Alex Deucherfecf1d02011-03-02 20:07:29 -0500423 u32 gb_addr_config = 0;
424 u32 mc_shared_chmap, mc_arb_ramcfg;
Alex Deucherfecf1d02011-03-02 20:07:29 -0500425 u32 cgts_tcc_disable;
426 u32 sx_debug_1;
427 u32 smx_dc_ctl0;
Alex Deucherfecf1d02011-03-02 20:07:29 -0500428 u32 cgts_sm_ctrl_reg;
429 u32 hdp_host_path_cntl;
430 u32 tmp;
Alex Deucher416a2bd2012-05-31 19:00:25 -0400431 u32 disabled_rb_mask;
Alex Deucherfecf1d02011-03-02 20:07:29 -0500432 int i, j;
433
434 switch (rdev->family) {
435 case CHIP_CAYMAN:
Alex Deucherfecf1d02011-03-02 20:07:29 -0500436 rdev->config.cayman.max_shader_engines = 2;
437 rdev->config.cayman.max_pipes_per_simd = 4;
438 rdev->config.cayman.max_tile_pipes = 8;
439 rdev->config.cayman.max_simds_per_se = 12;
440 rdev->config.cayman.max_backends_per_se = 4;
441 rdev->config.cayman.max_texture_channel_caches = 8;
442 rdev->config.cayman.max_gprs = 256;
443 rdev->config.cayman.max_threads = 256;
444 rdev->config.cayman.max_gs_threads = 32;
445 rdev->config.cayman.max_stack_entries = 512;
446 rdev->config.cayman.sx_num_of_sets = 8;
447 rdev->config.cayman.sx_max_export_size = 256;
448 rdev->config.cayman.sx_max_export_pos_size = 64;
449 rdev->config.cayman.sx_max_export_smx_size = 192;
450 rdev->config.cayman.max_hw_contexts = 8;
451 rdev->config.cayman.sq_num_cf_insts = 2;
452
453 rdev->config.cayman.sc_prim_fifo_size = 0x100;
454 rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
455 rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
Alex Deucher416a2bd2012-05-31 19:00:25 -0400456 gb_addr_config = CAYMAN_GB_ADDR_CONFIG_GOLDEN;
Alex Deucherfecf1d02011-03-02 20:07:29 -0500457 break;
Alex Deucher7b76e472012-03-20 17:18:36 -0400458 case CHIP_ARUBA:
459 default:
460 rdev->config.cayman.max_shader_engines = 1;
461 rdev->config.cayman.max_pipes_per_simd = 4;
462 rdev->config.cayman.max_tile_pipes = 2;
463 if ((rdev->pdev->device == 0x9900) ||
Alex Deucherd430f7d2012-06-05 09:50:28 -0400464 (rdev->pdev->device == 0x9901) ||
465 (rdev->pdev->device == 0x9905) ||
466 (rdev->pdev->device == 0x9906) ||
467 (rdev->pdev->device == 0x9907) ||
468 (rdev->pdev->device == 0x9908) ||
469 (rdev->pdev->device == 0x9909) ||
470 (rdev->pdev->device == 0x9910) ||
471 (rdev->pdev->device == 0x9917)) {
Alex Deucher7b76e472012-03-20 17:18:36 -0400472 rdev->config.cayman.max_simds_per_se = 6;
473 rdev->config.cayman.max_backends_per_se = 2;
474 } else if ((rdev->pdev->device == 0x9903) ||
Alex Deucherd430f7d2012-06-05 09:50:28 -0400475 (rdev->pdev->device == 0x9904) ||
476 (rdev->pdev->device == 0x990A) ||
477 (rdev->pdev->device == 0x9913) ||
478 (rdev->pdev->device == 0x9918)) {
Alex Deucher7b76e472012-03-20 17:18:36 -0400479 rdev->config.cayman.max_simds_per_se = 4;
480 rdev->config.cayman.max_backends_per_se = 2;
Alex Deucherd430f7d2012-06-05 09:50:28 -0400481 } else if ((rdev->pdev->device == 0x9919) ||
482 (rdev->pdev->device == 0x9990) ||
483 (rdev->pdev->device == 0x9991) ||
484 (rdev->pdev->device == 0x9994) ||
485 (rdev->pdev->device == 0x99A0)) {
Alex Deucher7b76e472012-03-20 17:18:36 -0400486 rdev->config.cayman.max_simds_per_se = 3;
487 rdev->config.cayman.max_backends_per_se = 1;
488 } else {
489 rdev->config.cayman.max_simds_per_se = 2;
490 rdev->config.cayman.max_backends_per_se = 1;
491 }
492 rdev->config.cayman.max_texture_channel_caches = 2;
493 rdev->config.cayman.max_gprs = 256;
494 rdev->config.cayman.max_threads = 256;
495 rdev->config.cayman.max_gs_threads = 32;
496 rdev->config.cayman.max_stack_entries = 512;
497 rdev->config.cayman.sx_num_of_sets = 8;
498 rdev->config.cayman.sx_max_export_size = 256;
499 rdev->config.cayman.sx_max_export_pos_size = 64;
500 rdev->config.cayman.sx_max_export_smx_size = 192;
501 rdev->config.cayman.max_hw_contexts = 8;
502 rdev->config.cayman.sq_num_cf_insts = 2;
503
504 rdev->config.cayman.sc_prim_fifo_size = 0x40;
505 rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
506 rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
Alex Deucher416a2bd2012-05-31 19:00:25 -0400507 gb_addr_config = ARUBA_GB_ADDR_CONFIG_GOLDEN;
Alex Deucher7b76e472012-03-20 17:18:36 -0400508 break;
Alex Deucherfecf1d02011-03-02 20:07:29 -0500509 }
510
511 /* Initialize HDP */
512 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
513 WREG32((0x2c14 + j), 0x00000000);
514 WREG32((0x2c18 + j), 0x00000000);
515 WREG32((0x2c1c + j), 0x00000000);
516 WREG32((0x2c20 + j), 0x00000000);
517 WREG32((0x2c24 + j), 0x00000000);
518 }
519
520 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
521
Alex Deucherd054ac12011-09-01 17:46:15 +0000522 evergreen_fix_pci_max_read_req_size(rdev);
523
Alex Deucherfecf1d02011-03-02 20:07:29 -0500524 mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
525 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
526
Alex Deucherfecf1d02011-03-02 20:07:29 -0500527 tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
528 rdev->config.cayman.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
529 if (rdev->config.cayman.mem_row_size_in_kb > 4)
530 rdev->config.cayman.mem_row_size_in_kb = 4;
531 /* XXX use MC settings? */
532 rdev->config.cayman.shader_engine_tile_size = 32;
533 rdev->config.cayman.num_gpus = 1;
534 rdev->config.cayman.multi_gpu_tile_size = 64;
535
Alex Deucherfecf1d02011-03-02 20:07:29 -0500536 tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT;
537 rdev->config.cayman.num_tile_pipes = (1 << tmp);
538 tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT;
539 rdev->config.cayman.mem_max_burst_length_bytes = (tmp + 1) * 256;
540 tmp = (gb_addr_config & NUM_SHADER_ENGINES_MASK) >> NUM_SHADER_ENGINES_SHIFT;
541 rdev->config.cayman.num_shader_engines = tmp + 1;
542 tmp = (gb_addr_config & NUM_GPUS_MASK) >> NUM_GPUS_SHIFT;
543 rdev->config.cayman.num_gpus = tmp + 1;
544 tmp = (gb_addr_config & MULTI_GPU_TILE_SIZE_MASK) >> MULTI_GPU_TILE_SIZE_SHIFT;
545 rdev->config.cayman.multi_gpu_tile_size = 1 << tmp;
546 tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT;
547 rdev->config.cayman.mem_row_size_in_kb = 1 << tmp;
548
Alex Deucher416a2bd2012-05-31 19:00:25 -0400549
Alex Deucherfecf1d02011-03-02 20:07:29 -0500550 /* setup tiling info dword. gb_addr_config is not adequate since it does
551 * not have bank info, so create a custom tiling dword.
552 * bits 3:0 num_pipes
553 * bits 7:4 num_banks
554 * bits 11:8 group_size
555 * bits 15:12 row_size
556 */
557 rdev->config.cayman.tile_config = 0;
558 switch (rdev->config.cayman.num_tile_pipes) {
559 case 1:
560 default:
561 rdev->config.cayman.tile_config |= (0 << 0);
562 break;
563 case 2:
564 rdev->config.cayman.tile_config |= (1 << 0);
565 break;
566 case 4:
567 rdev->config.cayman.tile_config |= (2 << 0);
568 break;
569 case 8:
570 rdev->config.cayman.tile_config |= (3 << 0);
571 break;
572 }
Alex Deucher7b76e472012-03-20 17:18:36 -0400573
574 /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
575 if (rdev->flags & RADEON_IS_IGP)
Alex Deucher1f73cca2012-05-24 22:55:15 -0400576 rdev->config.cayman.tile_config |= 1 << 4;
Alex Deucher29d65402012-05-31 18:53:36 -0400577 else {
Alex Deucher5b23c902012-07-31 11:05:11 -0400578 switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
579 case 0: /* four banks */
Alex Deucher29d65402012-05-31 18:53:36 -0400580 rdev->config.cayman.tile_config |= 0 << 4;
Alex Deucher5b23c902012-07-31 11:05:11 -0400581 break;
582 case 1: /* eight banks */
583 rdev->config.cayman.tile_config |= 1 << 4;
584 break;
585 case 2: /* sixteen banks */
586 default:
587 rdev->config.cayman.tile_config |= 2 << 4;
588 break;
589 }
Alex Deucher29d65402012-05-31 18:53:36 -0400590 }
Alex Deucherfecf1d02011-03-02 20:07:29 -0500591 rdev->config.cayman.tile_config |=
Dave Airliecde50832011-05-19 14:14:41 +1000592 ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
Alex Deucherfecf1d02011-03-02 20:07:29 -0500593 rdev->config.cayman.tile_config |=
594 ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
595
Alex Deucher416a2bd2012-05-31 19:00:25 -0400596 tmp = 0;
597 for (i = (rdev->config.cayman.max_shader_engines - 1); i >= 0; i--) {
598 u32 rb_disable_bitmap;
599
600 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
601 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
602 rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16;
603 tmp <<= 4;
604 tmp |= rb_disable_bitmap;
605 }
606 /* enabled rb are just the one not disabled :) */
607 disabled_rb_mask = tmp;
608
609 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
610 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
611
Alex Deucherfecf1d02011-03-02 20:07:29 -0500612 WREG32(GB_ADDR_CONFIG, gb_addr_config);
613 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
614 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
Alex Deucherf60cbd12012-12-04 15:27:33 -0500615 WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
616 WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
Alex Deucherfecf1d02011-03-02 20:07:29 -0500617
Alex Deucher416a2bd2012-05-31 19:00:25 -0400618 tmp = gb_addr_config & NUM_PIPES_MASK;
619 tmp = r6xx_remap_render_backend(rdev, tmp,
620 rdev->config.cayman.max_backends_per_se *
621 rdev->config.cayman.max_shader_engines,
622 CAYMAN_MAX_BACKENDS, disabled_rb_mask);
623 WREG32(GB_BACKEND_MAP, tmp);
Alex Deucherfecf1d02011-03-02 20:07:29 -0500624
Alex Deucher416a2bd2012-05-31 19:00:25 -0400625 cgts_tcc_disable = 0xffff0000;
626 for (i = 0; i < rdev->config.cayman.max_texture_channel_caches; i++)
627 cgts_tcc_disable &= ~(1 << (16 + i));
Alex Deucherfecf1d02011-03-02 20:07:29 -0500628 WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable);
629 WREG32(CGTS_SYS_TCC_DISABLE, cgts_tcc_disable);
Alex Deucherfecf1d02011-03-02 20:07:29 -0500630 WREG32(CGTS_USER_SYS_TCC_DISABLE, cgts_tcc_disable);
631 WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable);
632
633 /* reprogram the shader complex */
634 cgts_sm_ctrl_reg = RREG32(CGTS_SM_CTRL_REG);
635 for (i = 0; i < 16; i++)
636 WREG32(CGTS_SM_CTRL_REG, OVERRIDE);
637 WREG32(CGTS_SM_CTRL_REG, cgts_sm_ctrl_reg);
638
639 /* set HW defaults for 3D engine */
640 WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
641
642 sx_debug_1 = RREG32(SX_DEBUG_1);
643 sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
644 WREG32(SX_DEBUG_1, sx_debug_1);
645
646 smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
647 smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff);
Dave Airlie285e0422011-05-09 14:54:33 +1000648 smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.cayman.sx_num_of_sets);
Alex Deucherfecf1d02011-03-02 20:07:29 -0500649 WREG32(SMX_DC_CTL0, smx_dc_ctl0);
650
651 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4) | CRC_SIMD_ID_WADDR_DISABLE);
652
653 /* need to be explicitly zero-ed */
654 WREG32(VGT_OFFCHIP_LDS_BASE, 0);
655 WREG32(SQ_LSTMP_RING_BASE, 0);
656 WREG32(SQ_HSTMP_RING_BASE, 0);
657 WREG32(SQ_ESTMP_RING_BASE, 0);
658 WREG32(SQ_GSTMP_RING_BASE, 0);
659 WREG32(SQ_VSTMP_RING_BASE, 0);
660 WREG32(SQ_PSTMP_RING_BASE, 0);
661
662 WREG32(TA_CNTL_AUX, DISABLE_CUBE_ANISO);
663
Dave Airlie285e0422011-05-09 14:54:33 +1000664 WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.cayman.sx_max_export_size / 4) - 1) |
665 POSITION_BUFFER_SIZE((rdev->config.cayman.sx_max_export_pos_size / 4) - 1) |
666 SMX_BUFFER_SIZE((rdev->config.cayman.sx_max_export_smx_size / 4) - 1)));
Alex Deucherfecf1d02011-03-02 20:07:29 -0500667
Dave Airlie285e0422011-05-09 14:54:33 +1000668 WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.cayman.sc_prim_fifo_size) |
669 SC_HIZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_hiz_tile_fifo_size) |
670 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_earlyz_tile_fifo_size)));
Alex Deucherfecf1d02011-03-02 20:07:29 -0500671
672
673 WREG32(VGT_NUM_INSTANCES, 1);
674
675 WREG32(CP_PERFMON_CNTL, 0);
676
Dave Airlie285e0422011-05-09 14:54:33 +1000677 WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.cayman.sq_num_cf_insts) |
Alex Deucherfecf1d02011-03-02 20:07:29 -0500678 FETCH_FIFO_HIWATER(0x4) |
679 DONE_FIFO_HIWATER(0xe0) |
680 ALU_UPDATE_FIFO_HIWATER(0x8)));
681
682 WREG32(SQ_GPR_RESOURCE_MGMT_1, NUM_CLAUSE_TEMP_GPRS(4));
683 WREG32(SQ_CONFIG, (VC_ENABLE |
684 EXPORT_SRC_C |
685 GFX_PRIO(0) |
686 CS1_PRIO(0) |
687 CS2_PRIO(1)));
688 WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, DYN_GPR_ENABLE);
689
690 WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
691 FORCE_EOV_MAX_REZ_CNT(255)));
692
693 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) |
694 AUTO_INVLD_EN(ES_AND_GS_AUTO));
695
696 WREG32(VGT_GS_VERTEX_REUSE, 16);
697 WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
698
699 WREG32(CB_PERF_CTR0_SEL_0, 0);
700 WREG32(CB_PERF_CTR0_SEL_1, 0);
701 WREG32(CB_PERF_CTR1_SEL_0, 0);
702 WREG32(CB_PERF_CTR1_SEL_1, 0);
703 WREG32(CB_PERF_CTR2_SEL_0, 0);
704 WREG32(CB_PERF_CTR2_SEL_1, 0);
705 WREG32(CB_PERF_CTR3_SEL_0, 0);
706 WREG32(CB_PERF_CTR3_SEL_1, 0);
707
Dave Airlie0b65f832011-05-19 14:14:42 +1000708 tmp = RREG32(HDP_MISC_CNTL);
709 tmp |= HDP_FLUSH_INVALIDATE_CACHE;
710 WREG32(HDP_MISC_CNTL, tmp);
711
Alex Deucherfecf1d02011-03-02 20:07:29 -0500712 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
713 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
714
715 WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
716
717 udelay(50);
718}
719
Alex Deucherfa8198e2011-03-02 20:07:30 -0500720/*
721 * GART
722 */
723void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev)
724{
725 /* flush hdp cache */
726 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
727
728 /* bits 0-7 are the VM contexts0-7 */
729 WREG32(VM_INVALIDATE_REQUEST, 1);
730}
731
Lauri Kasanen1109ca02012-08-31 13:43:50 -0400732static int cayman_pcie_gart_enable(struct radeon_device *rdev)
Alex Deucherfa8198e2011-03-02 20:07:30 -0500733{
Jerome Glisse721604a2012-01-05 22:11:05 -0500734 int i, r;
Alex Deucherfa8198e2011-03-02 20:07:30 -0500735
Jerome Glissec9a1be92011-11-03 11:16:49 -0400736 if (rdev->gart.robj == NULL) {
Alex Deucherfa8198e2011-03-02 20:07:30 -0500737 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
738 return -EINVAL;
739 }
740 r = radeon_gart_table_vram_pin(rdev);
741 if (r)
742 return r;
743 radeon_gart_restore(rdev);
744 /* Setup TLB control */
Jerome Glisse721604a2012-01-05 22:11:05 -0500745 WREG32(MC_VM_MX_L1_TLB_CNTL,
746 (0xA << 7) |
747 ENABLE_L1_TLB |
Alex Deucherfa8198e2011-03-02 20:07:30 -0500748 ENABLE_L1_FRAGMENT_PROCESSING |
749 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
Jerome Glisse721604a2012-01-05 22:11:05 -0500750 ENABLE_ADVANCED_DRIVER_MODEL |
Alex Deucherfa8198e2011-03-02 20:07:30 -0500751 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
752 /* Setup L2 cache */
753 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
754 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
755 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
756 EFFECTIVE_L2_QUEUE_SIZE(7) |
757 CONTEXT1_IDENTITY_ACCESS_MODE(1));
758 WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
759 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
760 L2_CACHE_BIGK_FRAGMENT_SIZE(6));
761 /* setup context0 */
762 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
763 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
764 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
765 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
766 (u32)(rdev->dummy_page.addr >> 12));
767 WREG32(VM_CONTEXT0_CNTL2, 0);
768 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
769 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
Jerome Glisse721604a2012-01-05 22:11:05 -0500770
771 WREG32(0x15D4, 0);
772 WREG32(0x15D8, 0);
773 WREG32(0x15DC, 0);
774
775 /* empty context1-7 */
Alex Deucher23d4f1f2012-10-08 09:45:46 -0400776 /* Assign the pt base to something valid for now; the pts used for
777 * the VMs are determined by the application and setup and assigned
778 * on the fly in the vm part of radeon_gart.c
779 */
Jerome Glisse721604a2012-01-05 22:11:05 -0500780 for (i = 1; i < 8; i++) {
781 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0);
Alex Deucherc1a7ca02012-10-08 12:15:13 -0400782 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), rdev->vm_manager.max_pfn);
Jerome Glisse721604a2012-01-05 22:11:05 -0500783 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
784 rdev->gart.table_addr >> 12);
785 }
786
787 /* enable context1-7 */
788 WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
789 (u32)(rdev->dummy_page.addr >> 12));
Christian Königae133a12012-09-18 15:30:44 -0400790 WREG32(VM_CONTEXT1_CNTL2, 4);
Dmitry Cherkasovfa87e622012-09-17 19:36:19 +0200791 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
Christian Königae133a12012-09-18 15:30:44 -0400792 RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
793 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
794 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
795 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
796 PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
797 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
798 VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
799 VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
800 READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
801 READ_PROTECTION_FAULT_ENABLE_DEFAULT |
802 WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
803 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
Alex Deucherfa8198e2011-03-02 20:07:30 -0500804
805 cayman_pcie_gart_tlb_flush(rdev);
Tormod Voldenfcf4de52011-08-31 21:54:07 +0000806 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
807 (unsigned)(rdev->mc.gtt_size >> 20),
808 (unsigned long long)rdev->gart.table_addr);
Alex Deucherfa8198e2011-03-02 20:07:30 -0500809 rdev->gart.ready = true;
810 return 0;
811}
812
Lauri Kasanen1109ca02012-08-31 13:43:50 -0400813static void cayman_pcie_gart_disable(struct radeon_device *rdev)
Alex Deucherfa8198e2011-03-02 20:07:30 -0500814{
Alex Deucherfa8198e2011-03-02 20:07:30 -0500815 /* Disable all tables */
816 WREG32(VM_CONTEXT0_CNTL, 0);
817 WREG32(VM_CONTEXT1_CNTL, 0);
818 /* Setup TLB control */
819 WREG32(MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING |
820 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
821 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
822 /* Setup L2 cache */
823 WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
824 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
825 EFFECTIVE_L2_QUEUE_SIZE(7) |
826 CONTEXT1_IDENTITY_ACCESS_MODE(1));
827 WREG32(VM_L2_CNTL2, 0);
828 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
829 L2_CACHE_BIGK_FRAGMENT_SIZE(6));
Jerome Glissec9a1be92011-11-03 11:16:49 -0400830 radeon_gart_table_vram_unpin(rdev);
Alex Deucherfa8198e2011-03-02 20:07:30 -0500831}
832
Lauri Kasanen1109ca02012-08-31 13:43:50 -0400833static void cayman_pcie_gart_fini(struct radeon_device *rdev)
Alex Deucherfa8198e2011-03-02 20:07:30 -0500834{
835 cayman_pcie_gart_disable(rdev);
836 radeon_gart_table_vram_free(rdev);
837 radeon_gart_fini(rdev);
838}
839
Alex Deucher1b370782011-11-17 20:13:28 -0500840void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
841 int ring, u32 cp_int_cntl)
842{
843 u32 srbm_gfx_cntl = RREG32(SRBM_GFX_CNTL) & ~3;
844
845 WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl | (ring & 3));
846 WREG32(CP_INT_CNTL, cp_int_cntl);
847}
848
Alex Deucher0c88a022011-03-02 20:07:31 -0500849/*
850 * CP.
851 */
Alex Deucherb40e7e12011-11-17 14:57:50 -0500852void cayman_fence_ring_emit(struct radeon_device *rdev,
853 struct radeon_fence *fence)
854{
855 struct radeon_ring *ring = &rdev->ring[fence->ring];
856 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
857
Jerome Glisse721604a2012-01-05 22:11:05 -0500858 /* flush read cache over gart for this vmid */
859 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
860 radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
861 radeon_ring_write(ring, 0);
Alex Deucherb40e7e12011-11-17 14:57:50 -0500862 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
863 radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
864 radeon_ring_write(ring, 0xFFFFFFFF);
865 radeon_ring_write(ring, 0);
866 radeon_ring_write(ring, 10); /* poll interval */
867 /* EVENT_WRITE_EOP - flush caches, send int */
868 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
869 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
870 radeon_ring_write(ring, addr & 0xffffffff);
871 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
872 radeon_ring_write(ring, fence->seq);
873 radeon_ring_write(ring, 0);
874}
875
Jerome Glisse721604a2012-01-05 22:11:05 -0500876void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
877{
Christian König876dc9f2012-05-08 14:24:01 +0200878 struct radeon_ring *ring = &rdev->ring[ib->ring];
Jerome Glisse721604a2012-01-05 22:11:05 -0500879
880 /* set to DX10/11 mode */
881 radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
882 radeon_ring_write(ring, 1);
Christian König45df6802012-07-06 16:22:55 +0200883
884 if (ring->rptr_save_reg) {
885 uint32_t next_rptr = ring->wptr + 3 + 4 + 8;
886 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
887 radeon_ring_write(ring, ((ring->rptr_save_reg -
888 PACKET3_SET_CONFIG_REG_START) >> 2));
889 radeon_ring_write(ring, next_rptr);
890 }
891
Jerome Glisse721604a2012-01-05 22:11:05 -0500892 radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
893 radeon_ring_write(ring,
894#ifdef __BIG_ENDIAN
895 (2 << 0) |
896#endif
897 (ib->gpu_addr & 0xFFFFFFFC));
898 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
Christian König4bf3dd92012-08-06 18:57:44 +0200899 radeon_ring_write(ring, ib->length_dw |
900 (ib->vm ? (ib->vm->id << 24) : 0));
Jerome Glisse721604a2012-01-05 22:11:05 -0500901
902 /* flush read cache over gart for this vmid */
903 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
904 radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
Christian König4bf3dd92012-08-06 18:57:44 +0200905 radeon_ring_write(ring, ib->vm ? ib->vm->id : 0);
Jerome Glisse721604a2012-01-05 22:11:05 -0500906 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
907 radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
908 radeon_ring_write(ring, 0xFFFFFFFF);
909 radeon_ring_write(ring, 0);
910 radeon_ring_write(ring, 10); /* poll interval */
911}
912
Alex Deucher0c88a022011-03-02 20:07:31 -0500913static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
914{
915 if (enable)
916 WREG32(CP_ME_CNTL, 0);
917 else {
Dave Airlie38f1cff2011-03-16 11:34:41 +1000918 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
Alex Deucher0c88a022011-03-02 20:07:31 -0500919 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
920 WREG32(SCRATCH_UMSK, 0);
Alex Deucherf60cbd12012-12-04 15:27:33 -0500921 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
Alex Deucher0c88a022011-03-02 20:07:31 -0500922 }
923}
924
925static int cayman_cp_load_microcode(struct radeon_device *rdev)
926{
927 const __be32 *fw_data;
928 int i;
929
930 if (!rdev->me_fw || !rdev->pfp_fw)
931 return -EINVAL;
932
933 cayman_cp_enable(rdev, false);
934
935 fw_data = (const __be32 *)rdev->pfp_fw->data;
936 WREG32(CP_PFP_UCODE_ADDR, 0);
937 for (i = 0; i < CAYMAN_PFP_UCODE_SIZE; i++)
938 WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
939 WREG32(CP_PFP_UCODE_ADDR, 0);
940
941 fw_data = (const __be32 *)rdev->me_fw->data;
942 WREG32(CP_ME_RAM_WADDR, 0);
943 for (i = 0; i < CAYMAN_PM4_UCODE_SIZE; i++)
944 WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
945
946 WREG32(CP_PFP_UCODE_ADDR, 0);
947 WREG32(CP_ME_RAM_WADDR, 0);
948 WREG32(CP_ME_RAM_RADDR, 0);
949 return 0;
950}
951
952static int cayman_cp_start(struct radeon_device *rdev)
953{
Christian Könige32eb502011-10-23 12:56:27 +0200954 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
Alex Deucher0c88a022011-03-02 20:07:31 -0500955 int r, i;
956
Christian Könige32eb502011-10-23 12:56:27 +0200957 r = radeon_ring_lock(rdev, ring, 7);
Alex Deucher0c88a022011-03-02 20:07:31 -0500958 if (r) {
959 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
960 return r;
961 }
Christian Könige32eb502011-10-23 12:56:27 +0200962 radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
963 radeon_ring_write(ring, 0x1);
964 radeon_ring_write(ring, 0x0);
965 radeon_ring_write(ring, rdev->config.cayman.max_hw_contexts - 1);
966 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
967 radeon_ring_write(ring, 0);
968 radeon_ring_write(ring, 0);
969 radeon_ring_unlock_commit(rdev, ring);
Alex Deucher0c88a022011-03-02 20:07:31 -0500970
971 cayman_cp_enable(rdev, true);
972
Christian Könige32eb502011-10-23 12:56:27 +0200973 r = radeon_ring_lock(rdev, ring, cayman_default_size + 19);
Alex Deucher0c88a022011-03-02 20:07:31 -0500974 if (r) {
975 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
976 return r;
977 }
978
979 /* setup clear context state */
Christian Könige32eb502011-10-23 12:56:27 +0200980 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
981 radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
Alex Deucher0c88a022011-03-02 20:07:31 -0500982
983 for (i = 0; i < cayman_default_size; i++)
Christian Könige32eb502011-10-23 12:56:27 +0200984 radeon_ring_write(ring, cayman_default_state[i]);
Alex Deucher0c88a022011-03-02 20:07:31 -0500985
Christian Könige32eb502011-10-23 12:56:27 +0200986 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
987 radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
Alex Deucher0c88a022011-03-02 20:07:31 -0500988
989 /* set clear context state */
Christian Könige32eb502011-10-23 12:56:27 +0200990 radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
991 radeon_ring_write(ring, 0);
Alex Deucher0c88a022011-03-02 20:07:31 -0500992
993 /* SQ_VTX_BASE_VTX_LOC */
Christian Könige32eb502011-10-23 12:56:27 +0200994 radeon_ring_write(ring, 0xc0026f00);
995 radeon_ring_write(ring, 0x00000000);
996 radeon_ring_write(ring, 0x00000000);
997 radeon_ring_write(ring, 0x00000000);
Alex Deucher0c88a022011-03-02 20:07:31 -0500998
999 /* Clear consts */
Christian Könige32eb502011-10-23 12:56:27 +02001000 radeon_ring_write(ring, 0xc0036f00);
1001 radeon_ring_write(ring, 0x00000bc4);
1002 radeon_ring_write(ring, 0xffffffff);
1003 radeon_ring_write(ring, 0xffffffff);
1004 radeon_ring_write(ring, 0xffffffff);
Alex Deucher0c88a022011-03-02 20:07:31 -05001005
Christian Könige32eb502011-10-23 12:56:27 +02001006 radeon_ring_write(ring, 0xc0026900);
1007 radeon_ring_write(ring, 0x00000316);
1008 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
1009 radeon_ring_write(ring, 0x00000010); /* */
Alex Deucher9b91d182011-03-02 20:07:39 -05001010
Christian Könige32eb502011-10-23 12:56:27 +02001011 radeon_ring_unlock_commit(rdev, ring);
Alex Deucher0c88a022011-03-02 20:07:31 -05001012
1013 /* XXX init other rings */
1014
1015 return 0;
1016}
1017
Alex Deucher755d8192011-03-02 20:07:34 -05001018static void cayman_cp_fini(struct radeon_device *rdev)
1019{
Christian König45df6802012-07-06 16:22:55 +02001020 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
Alex Deucher755d8192011-03-02 20:07:34 -05001021 cayman_cp_enable(rdev, false);
Christian König45df6802012-07-06 16:22:55 +02001022 radeon_ring_fini(rdev, ring);
1023 radeon_scratch_free(rdev, ring->rptr_save_reg);
Alex Deucher755d8192011-03-02 20:07:34 -05001024}
1025
Lauri Kasanen1109ca02012-08-31 13:43:50 -04001026static int cayman_cp_resume(struct radeon_device *rdev)
Alex Deucher0c88a022011-03-02 20:07:31 -05001027{
Christian Königb90ca982012-07-04 21:36:53 +02001028 static const int ridx[] = {
1029 RADEON_RING_TYPE_GFX_INDEX,
1030 CAYMAN_RING_TYPE_CP1_INDEX,
1031 CAYMAN_RING_TYPE_CP2_INDEX
1032 };
1033 static const unsigned cp_rb_cntl[] = {
1034 CP_RB0_CNTL,
1035 CP_RB1_CNTL,
1036 CP_RB2_CNTL,
1037 };
1038 static const unsigned cp_rb_rptr_addr[] = {
1039 CP_RB0_RPTR_ADDR,
1040 CP_RB1_RPTR_ADDR,
1041 CP_RB2_RPTR_ADDR
1042 };
1043 static const unsigned cp_rb_rptr_addr_hi[] = {
1044 CP_RB0_RPTR_ADDR_HI,
1045 CP_RB1_RPTR_ADDR_HI,
1046 CP_RB2_RPTR_ADDR_HI
1047 };
1048 static const unsigned cp_rb_base[] = {
1049 CP_RB0_BASE,
1050 CP_RB1_BASE,
1051 CP_RB2_BASE
1052 };
Christian Könige32eb502011-10-23 12:56:27 +02001053 struct radeon_ring *ring;
Christian Königb90ca982012-07-04 21:36:53 +02001054 int i, r;
Alex Deucher0c88a022011-03-02 20:07:31 -05001055
1056 /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
1057 WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
1058 SOFT_RESET_PA |
1059 SOFT_RESET_SH |
1060 SOFT_RESET_VGT |
Jerome Glissea49a50d2011-08-24 20:00:17 +00001061 SOFT_RESET_SPI |
Alex Deucher0c88a022011-03-02 20:07:31 -05001062 SOFT_RESET_SX));
1063 RREG32(GRBM_SOFT_RESET);
1064 mdelay(15);
1065 WREG32(GRBM_SOFT_RESET, 0);
1066 RREG32(GRBM_SOFT_RESET);
1067
Christian König15d33322011-09-15 19:02:22 +02001068 WREG32(CP_SEM_WAIT_TIMER, 0x0);
Alex Deucher11ef3f1f2012-01-20 14:47:43 -05001069 WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
Alex Deucher0c88a022011-03-02 20:07:31 -05001070
1071 /* Set the write pointer delay */
1072 WREG32(CP_RB_WPTR_DELAY, 0);
1073
1074 WREG32(CP_DEBUG, (1 << 27));
1075
Adam Buchbinder48fc7f72012-09-19 21:48:00 -04001076 /* set the wb address whether it's enabled or not */
Alex Deucher0c88a022011-03-02 20:07:31 -05001077 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
Christian Königb90ca982012-07-04 21:36:53 +02001078 WREG32(SCRATCH_UMSK, 0xff);
Alex Deucher0c88a022011-03-02 20:07:31 -05001079
Christian Königb90ca982012-07-04 21:36:53 +02001080 for (i = 0; i < 3; ++i) {
1081 uint32_t rb_cntl;
1082 uint64_t addr;
1083
1084 /* Set ring buffer size */
1085 ring = &rdev->ring[ridx[i]];
1086 rb_cntl = drm_order(ring->ring_size / 8);
1087 rb_cntl |= drm_order(RADEON_GPU_PAGE_SIZE/8) << 8;
1088#ifdef __BIG_ENDIAN
1089 rb_cntl |= BUF_SWAP_32BIT;
1090#endif
1091 WREG32(cp_rb_cntl[i], rb_cntl);
1092
Adam Buchbinder48fc7f72012-09-19 21:48:00 -04001093 /* set the wb address whether it's enabled or not */
Christian Königb90ca982012-07-04 21:36:53 +02001094 addr = rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET;
1095 WREG32(cp_rb_rptr_addr[i], addr & 0xFFFFFFFC);
1096 WREG32(cp_rb_rptr_addr_hi[i], upper_32_bits(addr) & 0xFF);
Alex Deucher0c88a022011-03-02 20:07:31 -05001097 }
1098
Christian Königb90ca982012-07-04 21:36:53 +02001099 /* set the rb base addr, this causes an internal reset of ALL rings */
1100 for (i = 0; i < 3; ++i) {
1101 ring = &rdev->ring[ridx[i]];
1102 WREG32(cp_rb_base[i], ring->gpu_addr >> 8);
1103 }
Alex Deucher0c88a022011-03-02 20:07:31 -05001104
Christian Königb90ca982012-07-04 21:36:53 +02001105 for (i = 0; i < 3; ++i) {
1106 /* Initialize the ring buffer's read and write pointers */
1107 ring = &rdev->ring[ridx[i]];
1108 WREG32_P(cp_rb_cntl[i], RB_RPTR_WR_ENA, ~RB_RPTR_WR_ENA);
Alex Deucher0c88a022011-03-02 20:07:31 -05001109
Christian Königb90ca982012-07-04 21:36:53 +02001110 ring->rptr = ring->wptr = 0;
1111 WREG32(ring->rptr_reg, ring->rptr);
1112 WREG32(ring->wptr_reg, ring->wptr);
Alex Deucher0c88a022011-03-02 20:07:31 -05001113
Christian Königb90ca982012-07-04 21:36:53 +02001114 mdelay(1);
1115 WREG32_P(cp_rb_cntl[i], 0, ~RB_RPTR_WR_ENA);
1116 }
Alex Deucher0c88a022011-03-02 20:07:31 -05001117
1118 /* start the rings */
1119 cayman_cp_start(rdev);
Christian Könige32eb502011-10-23 12:56:27 +02001120 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
1121 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
1122 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
Alex Deucher0c88a022011-03-02 20:07:31 -05001123 /* this only test cp0 */
Alex Deucherf7128122012-02-23 17:53:45 -05001124 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
Alex Deucher0c88a022011-03-02 20:07:31 -05001125 if (r) {
Christian Könige32eb502011-10-23 12:56:27 +02001126 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
1127 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
1128 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
Alex Deucher0c88a022011-03-02 20:07:31 -05001129 return r;
1130 }
1131
1132 return 0;
1133}
1134
Alex Deucherf60cbd12012-12-04 15:27:33 -05001135/*
1136 * DMA
1137 * Starting with R600, the GPU has an asynchronous
1138 * DMA engine. The programming model is very similar
1139 * to the 3D engine (ring buffer, IBs, etc.), but the
1140 * DMA controller has it's own packet format that is
1141 * different form the PM4 format used by the 3D engine.
1142 * It supports copying data, writing embedded data,
1143 * solid fills, and a number of other things. It also
1144 * has support for tiling/detiling of buffers.
1145 * Cayman and newer support two asynchronous DMA engines.
1146 */
1147/**
1148 * cayman_dma_ring_ib_execute - Schedule an IB on the DMA engine
1149 *
1150 * @rdev: radeon_device pointer
1151 * @ib: IB object to schedule
1152 *
1153 * Schedule an IB in the DMA ring (cayman-SI).
1154 */
1155void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
1156 struct radeon_ib *ib)
1157{
1158 struct radeon_ring *ring = &rdev->ring[ib->ring];
1159
1160 if (rdev->wb.enabled) {
1161 u32 next_rptr = ring->wptr + 4;
1162 while ((next_rptr & 7) != 5)
1163 next_rptr++;
1164 next_rptr += 3;
1165 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
1166 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
1167 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
1168 radeon_ring_write(ring, next_rptr);
1169 }
1170
1171 /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
1172 * Pad as necessary with NOPs.
1173 */
1174 while ((ring->wptr & 7) != 5)
1175 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
1176 radeon_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, ib->vm ? ib->vm->id : 0, 0));
1177 radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
1178 radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
1179
1180}
1181
1182/**
1183 * cayman_dma_stop - stop the async dma engines
1184 *
1185 * @rdev: radeon_device pointer
1186 *
1187 * Stop the async dma engines (cayman-SI).
1188 */
1189void cayman_dma_stop(struct radeon_device *rdev)
1190{
1191 u32 rb_cntl;
1192
1193 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
1194
1195 /* dma0 */
1196 rb_cntl = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
1197 rb_cntl &= ~DMA_RB_ENABLE;
1198 WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, rb_cntl);
1199
1200 /* dma1 */
1201 rb_cntl = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
1202 rb_cntl &= ~DMA_RB_ENABLE;
1203 WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, rb_cntl);
1204
1205 rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
1206 rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
1207}
1208
1209/**
1210 * cayman_dma_resume - setup and start the async dma engines
1211 *
1212 * @rdev: radeon_device pointer
1213 *
1214 * Set up the DMA ring buffers and enable them. (cayman-SI).
1215 * Returns 0 for success, error for failure.
1216 */
1217int cayman_dma_resume(struct radeon_device *rdev)
1218{
1219 struct radeon_ring *ring;
Michel Dänzerb3dfcb22013-01-24 19:02:01 +01001220 u32 rb_cntl, dma_cntl, ib_cntl;
Alex Deucherf60cbd12012-12-04 15:27:33 -05001221 u32 rb_bufsz;
1222 u32 reg_offset, wb_offset;
1223 int i, r;
1224
1225 /* Reset dma */
1226 WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
1227 RREG32(SRBM_SOFT_RESET);
1228 udelay(50);
1229 WREG32(SRBM_SOFT_RESET, 0);
1230
1231 for (i = 0; i < 2; i++) {
1232 if (i == 0) {
1233 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
1234 reg_offset = DMA0_REGISTER_OFFSET;
1235 wb_offset = R600_WB_DMA_RPTR_OFFSET;
1236 } else {
1237 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
1238 reg_offset = DMA1_REGISTER_OFFSET;
1239 wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET;
1240 }
1241
1242 WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0);
1243 WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
1244
1245 /* Set ring buffer size in dwords */
1246 rb_bufsz = drm_order(ring->ring_size / 4);
1247 rb_cntl = rb_bufsz << 1;
1248#ifdef __BIG_ENDIAN
1249 rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
1250#endif
1251 WREG32(DMA_RB_CNTL + reg_offset, rb_cntl);
1252
1253 /* Initialize the ring buffer's read and write pointers */
1254 WREG32(DMA_RB_RPTR + reg_offset, 0);
1255 WREG32(DMA_RB_WPTR + reg_offset, 0);
1256
1257 /* set the wb address whether it's enabled or not */
1258 WREG32(DMA_RB_RPTR_ADDR_HI + reg_offset,
1259 upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFF);
1260 WREG32(DMA_RB_RPTR_ADDR_LO + reg_offset,
1261 ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
1262
1263 if (rdev->wb.enabled)
1264 rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
1265
1266 WREG32(DMA_RB_BASE + reg_offset, ring->gpu_addr >> 8);
1267
1268 /* enable DMA IBs */
Michel Dänzerb3dfcb22013-01-24 19:02:01 +01001269 ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE;
1270#ifdef __BIG_ENDIAN
1271 ib_cntl |= DMA_IB_SWAP_ENABLE;
1272#endif
1273 WREG32(DMA_IB_CNTL + reg_offset, ib_cntl);
Alex Deucherf60cbd12012-12-04 15:27:33 -05001274
1275 dma_cntl = RREG32(DMA_CNTL + reg_offset);
1276 dma_cntl &= ~CTXEMPTY_INT_ENABLE;
1277 WREG32(DMA_CNTL + reg_offset, dma_cntl);
1278
1279 ring->wptr = 0;
1280 WREG32(DMA_RB_WPTR + reg_offset, ring->wptr << 2);
1281
1282 ring->rptr = RREG32(DMA_RB_RPTR + reg_offset) >> 2;
1283
1284 WREG32(DMA_RB_CNTL + reg_offset, rb_cntl | DMA_RB_ENABLE);
1285
1286 ring->ready = true;
1287
1288 r = radeon_ring_test(rdev, ring->idx, ring);
1289 if (r) {
1290 ring->ready = false;
1291 return r;
1292 }
1293 }
1294
1295 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
1296
1297 return 0;
1298}
1299
1300/**
1301 * cayman_dma_fini - tear down the async dma engines
1302 *
1303 * @rdev: radeon_device pointer
1304 *
1305 * Stop the async dma engines and free the rings (cayman-SI).
1306 */
1307void cayman_dma_fini(struct radeon_device *rdev)
1308{
1309 cayman_dma_stop(rdev);
1310 radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
1311 radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
1312}
1313
Alex Deucher271d6fed2013-01-03 12:48:05 -05001314static int cayman_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
1315{
1316 struct evergreen_mc_save save;
Alex Deucher187e3592013-01-18 14:51:38 -05001317 u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
1318 u32 tmp;
1319 int ret = 0;
Alex Deucher271d6fed2013-01-03 12:48:05 -05001320
Alex Deucher19fc42e2013-01-14 11:04:39 -05001321 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
Alex Deucher187e3592013-01-18 14:51:38 -05001322 reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE | RADEON_RESET_CP);
Alex Deucher19fc42e2013-01-14 11:04:39 -05001323
1324 if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
1325 reset_mask &= ~RADEON_RESET_DMA;
1326
Alex Deucher271d6fed2013-01-03 12:48:05 -05001327 if (reset_mask == 0)
1328 return 0;
1329
1330 dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
1331
Alex Deucher187e3592013-01-18 14:51:38 -05001332 evergreen_print_gpu_status_regs(rdev);
Alex Deucher271d6fed2013-01-03 12:48:05 -05001333 dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_ADDR 0x%08X\n",
1334 RREG32(0x14F8));
1335 dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n",
1336 RREG32(0x14D8));
1337 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
1338 RREG32(0x14FC));
1339 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1340 RREG32(0x14DC));
1341
Alex Deucher187e3592013-01-18 14:51:38 -05001342 r600_set_bios_scratch_engine_hung(rdev, true);
1343
Alex Deucher271d6fed2013-01-03 12:48:05 -05001344 evergreen_mc_stop(rdev, &save);
1345 if (evergreen_mc_wait_for_idle(rdev)) {
1346 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1347 }
1348
Alex Deucher187e3592013-01-18 14:51:38 -05001349 /* Disable CP parsing/prefetching */
1350 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
1351
1352 if (reset_mask & RADEON_RESET_DMA) {
1353 /* dma0 */
1354 tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
1355 tmp &= ~DMA_RB_ENABLE;
1356 WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
1357
1358 /* dma1 */
1359 tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
1360 tmp &= ~DMA_RB_ENABLE;
1361 WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
1362 }
1363
1364 if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
1365 grbm_soft_reset = SOFT_RESET_CB |
1366 SOFT_RESET_DB |
1367 SOFT_RESET_GDS |
1368 SOFT_RESET_PA |
1369 SOFT_RESET_SC |
1370 SOFT_RESET_SPI |
1371 SOFT_RESET_SH |
1372 SOFT_RESET_SX |
1373 SOFT_RESET_TC |
1374 SOFT_RESET_TA |
1375 SOFT_RESET_VGT |
1376 SOFT_RESET_IA;
1377 }
1378
1379 if (reset_mask & RADEON_RESET_CP) {
1380 grbm_soft_reset |= SOFT_RESET_CP | SOFT_RESET_VGT;
1381
1382 srbm_soft_reset |= SOFT_RESET_GRBM;
1383 }
Alex Deucher271d6fed2013-01-03 12:48:05 -05001384
1385 if (reset_mask & RADEON_RESET_DMA)
Alex Deucher187e3592013-01-18 14:51:38 -05001386 srbm_soft_reset |= SOFT_RESET_DMA | SOFT_RESET_DMA1;
1387
1388 if (grbm_soft_reset) {
1389 tmp = RREG32(GRBM_SOFT_RESET);
1390 tmp |= grbm_soft_reset;
1391 dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
1392 WREG32(GRBM_SOFT_RESET, tmp);
1393 tmp = RREG32(GRBM_SOFT_RESET);
1394
1395 udelay(50);
1396
1397 tmp &= ~grbm_soft_reset;
1398 WREG32(GRBM_SOFT_RESET, tmp);
1399 tmp = RREG32(GRBM_SOFT_RESET);
1400 }
1401
1402 if (srbm_soft_reset) {
1403 tmp = RREG32(SRBM_SOFT_RESET);
1404 tmp |= srbm_soft_reset;
1405 dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1406 WREG32(SRBM_SOFT_RESET, tmp);
1407 tmp = RREG32(SRBM_SOFT_RESET);
1408
1409 udelay(50);
1410
1411 tmp &= ~srbm_soft_reset;
1412 WREG32(SRBM_SOFT_RESET, tmp);
1413 tmp = RREG32(SRBM_SOFT_RESET);
1414 }
Alex Deucher271d6fed2013-01-03 12:48:05 -05001415
1416 /* Wait a little for things to settle down */
1417 udelay(50);
1418
Alex Deucherb9952a82011-03-02 20:07:33 -05001419 evergreen_mc_resume(rdev, &save);
Alex Deucher187e3592013-01-18 14:51:38 -05001420 udelay(50);
Alex Deucher410a3412013-01-18 13:05:39 -05001421
Alex Deucher187e3592013-01-18 14:51:38 -05001422#if 0
1423 if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE | RADEON_RESET_CP)) {
1424 if (RREG32(GRBM_STATUS) & GUI_ACTIVE)
1425 ret = -EAGAIN;
1426 }
1427
1428 if (reset_mask & RADEON_RESET_DMA) {
1429 if (!(RREG32(DMA_STATUS_REG) & DMA_IDLE))
1430 ret = -EAGAIN;
1431 }
1432#endif
1433
1434 if (!ret)
1435 r600_set_bios_scratch_engine_hung(rdev, false);
1436
1437 evergreen_print_gpu_status_regs(rdev);
Alex Deucher410a3412013-01-18 13:05:39 -05001438
Alex Deucherb9952a82011-03-02 20:07:33 -05001439 return 0;
1440}
1441
1442int cayman_asic_reset(struct radeon_device *rdev)
1443{
Alex Deucher271d6fed2013-01-03 12:48:05 -05001444 return cayman_gpu_soft_reset(rdev, (RADEON_RESET_GFX |
1445 RADEON_RESET_COMPUTE |
Alex Deucher187e3592013-01-18 14:51:38 -05001446 RADEON_RESET_DMA |
1447 RADEON_RESET_CP));
Alex Deucherb9952a82011-03-02 20:07:33 -05001448}
1449
Alex Deucherf60cbd12012-12-04 15:27:33 -05001450/**
1451 * cayman_dma_is_lockup - Check if the DMA engine is locked up
1452 *
1453 * @rdev: radeon_device pointer
1454 * @ring: radeon_ring structure holding ring information
1455 *
1456 * Check if the async DMA engine is locked up (cayman-SI).
1457 * Returns true if the engine appears to be locked up, false if not.
1458 */
1459bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1460{
1461 u32 dma_status_reg;
1462
1463 if (ring->idx == R600_RING_TYPE_DMA_INDEX)
1464 dma_status_reg = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET);
1465 else
1466 dma_status_reg = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET);
1467 if (dma_status_reg & DMA_IDLE) {
1468 radeon_ring_lockup_update(ring);
1469 return false;
1470 }
1471 /* force ring activities */
1472 radeon_ring_force_activity(rdev, ring);
1473 return radeon_ring_test_lockup(rdev, ring);
1474}
1475
Alex Deucher755d8192011-03-02 20:07:34 -05001476static int cayman_startup(struct radeon_device *rdev)
1477{
Christian Könige32eb502011-10-23 12:56:27 +02001478 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
Alex Deucher755d8192011-03-02 20:07:34 -05001479 int r;
1480
Ilija Hadzicb07759b2011-09-20 10:22:58 -04001481 /* enable pcie gen2 link */
1482 evergreen_pcie_gen2_enable(rdev);
1483
Alex Deucherc420c742012-03-20 17:18:39 -04001484 if (rdev->flags & RADEON_IS_IGP) {
1485 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
1486 r = ni_init_microcode(rdev);
1487 if (r) {
1488 DRM_ERROR("Failed to load firmware!\n");
1489 return r;
1490 }
1491 }
1492 } else {
1493 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
1494 r = ni_init_microcode(rdev);
1495 if (r) {
1496 DRM_ERROR("Failed to load firmware!\n");
1497 return r;
1498 }
1499 }
1500
1501 r = ni_mc_load_microcode(rdev);
Alex Deucher755d8192011-03-02 20:07:34 -05001502 if (r) {
Alex Deucherc420c742012-03-20 17:18:39 -04001503 DRM_ERROR("Failed to load MC firmware!\n");
Alex Deucher755d8192011-03-02 20:07:34 -05001504 return r;
1505 }
1506 }
Alex Deucher755d8192011-03-02 20:07:34 -05001507
Alex Deucher16cdf042011-10-28 10:30:02 -04001508 r = r600_vram_scratch_init(rdev);
1509 if (r)
1510 return r;
1511
Alex Deucher755d8192011-03-02 20:07:34 -05001512 evergreen_mc_program(rdev);
1513 r = cayman_pcie_gart_enable(rdev);
1514 if (r)
1515 return r;
1516 cayman_gpu_init(rdev);
1517
Alex Deuchercb92d452011-05-25 16:39:00 -04001518 r = evergreen_blit_init(rdev);
Alex Deucher755d8192011-03-02 20:07:34 -05001519 if (r) {
Ilija Hadzicfb3d9e92011-10-12 23:29:41 -04001520 r600_blit_fini(rdev);
Alex Deucher27cd7762012-02-23 17:53:42 -05001521 rdev->asic->copy.copy = NULL;
Alex Deucher755d8192011-03-02 20:07:34 -05001522 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
1523 }
Alex Deucher755d8192011-03-02 20:07:34 -05001524
Alex Deucherc420c742012-03-20 17:18:39 -04001525 /* allocate rlc buffers */
1526 if (rdev->flags & RADEON_IS_IGP) {
1527 r = si_rlc_init(rdev);
1528 if (r) {
1529 DRM_ERROR("Failed to init rlc BOs!\n");
1530 return r;
1531 }
1532 }
1533
Alex Deucher755d8192011-03-02 20:07:34 -05001534 /* allocate wb buffer */
1535 r = radeon_wb_init(rdev);
1536 if (r)
1537 return r;
1538
Jerome Glisse30eb77f2011-11-20 20:45:34 +00001539 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
1540 if (r) {
1541 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
1542 return r;
1543 }
1544
1545 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
1546 if (r) {
1547 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
1548 return r;
1549 }
1550
1551 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
1552 if (r) {
1553 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
1554 return r;
1555 }
1556
Alex Deucherf60cbd12012-12-04 15:27:33 -05001557 r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
1558 if (r) {
1559 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
1560 return r;
1561 }
1562
1563 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
1564 if (r) {
1565 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
1566 return r;
1567 }
1568
Alex Deucher755d8192011-03-02 20:07:34 -05001569 /* Enable IRQ */
1570 r = r600_irq_init(rdev);
1571 if (r) {
1572 DRM_ERROR("radeon: IH init failed (%d).\n", r);
1573 radeon_irq_kms_fini(rdev);
1574 return r;
1575 }
1576 evergreen_irq_set(rdev);
1577
Christian Könige32eb502011-10-23 12:56:27 +02001578 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
Alex Deucher78c55602011-11-17 14:25:56 -05001579 CP_RB0_RPTR, CP_RB0_WPTR,
1580 0, 0xfffff, RADEON_CP_PACKET2);
Alex Deucher755d8192011-03-02 20:07:34 -05001581 if (r)
1582 return r;
Alex Deucherf60cbd12012-12-04 15:27:33 -05001583
1584 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
1585 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
1586 DMA_RB_RPTR + DMA0_REGISTER_OFFSET,
1587 DMA_RB_WPTR + DMA0_REGISTER_OFFSET,
1588 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
1589 if (r)
1590 return r;
1591
1592 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
1593 r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
1594 DMA_RB_RPTR + DMA1_REGISTER_OFFSET,
1595 DMA_RB_WPTR + DMA1_REGISTER_OFFSET,
1596 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
1597 if (r)
1598 return r;
1599
Alex Deucher755d8192011-03-02 20:07:34 -05001600 r = cayman_cp_load_microcode(rdev);
1601 if (r)
1602 return r;
1603 r = cayman_cp_resume(rdev);
1604 if (r)
1605 return r;
1606
Alex Deucherf60cbd12012-12-04 15:27:33 -05001607 r = cayman_dma_resume(rdev);
1608 if (r)
1609 return r;
1610
Christian König2898c342012-07-05 11:55:34 +02001611 r = radeon_ib_pool_init(rdev);
1612 if (r) {
1613 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
Jerome Glisseb15ba512011-11-15 11:48:34 -05001614 return r;
Christian König2898c342012-07-05 11:55:34 +02001615 }
Jerome Glisseb15ba512011-11-15 11:48:34 -05001616
Christian Königc6105f22012-07-05 14:32:00 +02001617 r = radeon_vm_manager_init(rdev);
1618 if (r) {
1619 dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
Jerome Glisse721604a2012-01-05 22:11:05 -05001620 return r;
Christian Königc6105f22012-07-05 14:32:00 +02001621 }
Jerome Glisse721604a2012-01-05 22:11:05 -05001622
Rafał Miłecki6b53a052012-06-11 12:34:01 +02001623 r = r600_audio_init(rdev);
1624 if (r)
1625 return r;
1626
Alex Deucher755d8192011-03-02 20:07:34 -05001627 return 0;
1628}
1629
1630int cayman_resume(struct radeon_device *rdev)
1631{
1632 int r;
1633
1634 /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
1635 * posting will perform necessary task to bring back GPU into good
1636 * shape.
1637 */
1638 /* post card */
1639 atom_asic_init(rdev->mode_info.atom_context);
1640
Jerome Glisseb15ba512011-11-15 11:48:34 -05001641 rdev->accel_working = true;
Alex Deucher755d8192011-03-02 20:07:34 -05001642 r = cayman_startup(rdev);
1643 if (r) {
1644 DRM_ERROR("cayman startup failed on resume\n");
Jerome Glisse6b7746e2012-02-20 17:57:20 -05001645 rdev->accel_working = false;
Alex Deucher755d8192011-03-02 20:07:34 -05001646 return r;
1647 }
Alex Deucher755d8192011-03-02 20:07:34 -05001648 return r;
Alex Deucher755d8192011-03-02 20:07:34 -05001649}
1650
1651int cayman_suspend(struct radeon_device *rdev)
1652{
Rafał Miłecki6b53a052012-06-11 12:34:01 +02001653 r600_audio_fini(rdev);
Alex Deucher755d8192011-03-02 20:07:34 -05001654 cayman_cp_enable(rdev, false);
Alex Deucherf60cbd12012-12-04 15:27:33 -05001655 cayman_dma_stop(rdev);
Alex Deucher755d8192011-03-02 20:07:34 -05001656 evergreen_irq_suspend(rdev);
1657 radeon_wb_disable(rdev);
1658 cayman_pcie_gart_disable(rdev);
Alex Deucher755d8192011-03-02 20:07:34 -05001659 return 0;
1660}
1661
1662/* Plan is to move initialization in that function and use
1663 * helper function so that radeon_device_init pretty much
1664 * do nothing more than calling asic specific function. This
1665 * should also allow to remove a bunch of callback function
1666 * like vram_info.
1667 */
1668int cayman_init(struct radeon_device *rdev)
1669{
Christian Könige32eb502011-10-23 12:56:27 +02001670 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
Alex Deucher755d8192011-03-02 20:07:34 -05001671 int r;
1672
Alex Deucher755d8192011-03-02 20:07:34 -05001673 /* Read BIOS */
1674 if (!radeon_get_bios(rdev)) {
1675 if (ASIC_IS_AVIVO(rdev))
1676 return -EINVAL;
1677 }
1678 /* Must be an ATOMBIOS */
1679 if (!rdev->is_atom_bios) {
1680 dev_err(rdev->dev, "Expecting atombios for cayman GPU\n");
1681 return -EINVAL;
1682 }
1683 r = radeon_atombios_init(rdev);
1684 if (r)
1685 return r;
1686
1687 /* Post card if necessary */
1688 if (!radeon_card_posted(rdev)) {
1689 if (!rdev->bios) {
1690 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
1691 return -EINVAL;
1692 }
1693 DRM_INFO("GPU not posted. posting now...\n");
1694 atom_asic_init(rdev->mode_info.atom_context);
1695 }
1696 /* Initialize scratch registers */
1697 r600_scratch_init(rdev);
1698 /* Initialize surface registers */
1699 radeon_surface_init(rdev);
1700 /* Initialize clocks */
1701 radeon_get_clock_info(rdev->ddev);
1702 /* Fence driver */
Jerome Glisse30eb77f2011-11-20 20:45:34 +00001703 r = radeon_fence_driver_init(rdev);
Alex Deucher755d8192011-03-02 20:07:34 -05001704 if (r)
1705 return r;
1706 /* initialize memory controller */
1707 r = evergreen_mc_init(rdev);
1708 if (r)
1709 return r;
1710 /* Memory manager */
1711 r = radeon_bo_init(rdev);
1712 if (r)
1713 return r;
1714
1715 r = radeon_irq_kms_init(rdev);
1716 if (r)
1717 return r;
1718
Christian Könige32eb502011-10-23 12:56:27 +02001719 ring->ring_obj = NULL;
1720 r600_ring_init(rdev, ring, 1024 * 1024);
Alex Deucher755d8192011-03-02 20:07:34 -05001721
Alex Deucherf60cbd12012-12-04 15:27:33 -05001722 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
1723 ring->ring_obj = NULL;
1724 r600_ring_init(rdev, ring, 64 * 1024);
1725
1726 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
1727 ring->ring_obj = NULL;
1728 r600_ring_init(rdev, ring, 64 * 1024);
1729
Alex Deucher755d8192011-03-02 20:07:34 -05001730 rdev->ih.ring_obj = NULL;
1731 r600_ih_ring_init(rdev, 64 * 1024);
1732
1733 r = r600_pcie_gart_init(rdev);
1734 if (r)
1735 return r;
1736
1737 rdev->accel_working = true;
1738 r = cayman_startup(rdev);
1739 if (r) {
1740 dev_err(rdev->dev, "disabling GPU acceleration\n");
1741 cayman_cp_fini(rdev);
Alex Deucherf60cbd12012-12-04 15:27:33 -05001742 cayman_dma_fini(rdev);
Alex Deucher755d8192011-03-02 20:07:34 -05001743 r600_irq_fini(rdev);
Alex Deucherc420c742012-03-20 17:18:39 -04001744 if (rdev->flags & RADEON_IS_IGP)
1745 si_rlc_fini(rdev);
Alex Deucher755d8192011-03-02 20:07:34 -05001746 radeon_wb_fini(rdev);
Christian König2898c342012-07-05 11:55:34 +02001747 radeon_ib_pool_fini(rdev);
Jerome Glisse721604a2012-01-05 22:11:05 -05001748 radeon_vm_manager_fini(rdev);
Alex Deucher755d8192011-03-02 20:07:34 -05001749 radeon_irq_kms_fini(rdev);
1750 cayman_pcie_gart_fini(rdev);
1751 rdev->accel_working = false;
1752 }
Alex Deucher755d8192011-03-02 20:07:34 -05001753
1754 /* Don't start up if the MC ucode is missing.
1755 * The default clocks and voltages before the MC ucode
1756 * is loaded are not suffient for advanced operations.
Alex Deucherc420c742012-03-20 17:18:39 -04001757 *
1758 * We can skip this check for TN, because there is no MC
1759 * ucode.
Alex Deucher755d8192011-03-02 20:07:34 -05001760 */
Alex Deucherc420c742012-03-20 17:18:39 -04001761 if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
Alex Deucher755d8192011-03-02 20:07:34 -05001762 DRM_ERROR("radeon: MC ucode required for NI+.\n");
1763 return -EINVAL;
1764 }
1765
1766 return 0;
1767}
1768
1769void cayman_fini(struct radeon_device *rdev)
1770{
Ilija Hadzicfb3d9e92011-10-12 23:29:41 -04001771 r600_blit_fini(rdev);
Alex Deucher755d8192011-03-02 20:07:34 -05001772 cayman_cp_fini(rdev);
Alex Deucherf60cbd12012-12-04 15:27:33 -05001773 cayman_dma_fini(rdev);
Alex Deucher755d8192011-03-02 20:07:34 -05001774 r600_irq_fini(rdev);
Alex Deucherc420c742012-03-20 17:18:39 -04001775 if (rdev->flags & RADEON_IS_IGP)
1776 si_rlc_fini(rdev);
Alex Deucher755d8192011-03-02 20:07:34 -05001777 radeon_wb_fini(rdev);
Jerome Glisse721604a2012-01-05 22:11:05 -05001778 radeon_vm_manager_fini(rdev);
Christian König2898c342012-07-05 11:55:34 +02001779 radeon_ib_pool_fini(rdev);
Alex Deucher755d8192011-03-02 20:07:34 -05001780 radeon_irq_kms_fini(rdev);
1781 cayman_pcie_gart_fini(rdev);
Alex Deucher16cdf042011-10-28 10:30:02 -04001782 r600_vram_scratch_fini(rdev);
Alex Deucher755d8192011-03-02 20:07:34 -05001783 radeon_gem_fini(rdev);
1784 radeon_fence_driver_fini(rdev);
1785 radeon_bo_fini(rdev);
1786 radeon_atombios_fini(rdev);
1787 kfree(rdev->bios);
1788 rdev->bios = NULL;
1789}
1790
Jerome Glisse721604a2012-01-05 22:11:05 -05001791/*
1792 * vm
1793 */
1794int cayman_vm_init(struct radeon_device *rdev)
1795{
1796 /* number of VMs */
1797 rdev->vm_manager.nvm = 8;
1798 /* base offset of vram pages */
Alex Deuchere71270f2012-03-20 17:18:38 -04001799 if (rdev->flags & RADEON_IS_IGP) {
1800 u64 tmp = RREG32(FUS_MC_VM_FB_OFFSET);
1801 tmp <<= 22;
1802 rdev->vm_manager.vram_base_offset = tmp;
1803 } else
1804 rdev->vm_manager.vram_base_offset = 0;
Jerome Glisse721604a2012-01-05 22:11:05 -05001805 return 0;
1806}
1807
1808void cayman_vm_fini(struct radeon_device *rdev)
1809{
1810}
1811
Christian Königdce34bf2012-09-17 19:36:18 +02001812#define R600_ENTRY_VALID (1 << 0)
Jerome Glisse721604a2012-01-05 22:11:05 -05001813#define R600_PTE_SYSTEM (1 << 1)
1814#define R600_PTE_SNOOPED (1 << 2)
1815#define R600_PTE_READABLE (1 << 5)
1816#define R600_PTE_WRITEABLE (1 << 6)
1817
Christian König089a7862012-08-11 11:54:05 +02001818uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags)
Jerome Glisse721604a2012-01-05 22:11:05 -05001819{
1820 uint32_t r600_flags = 0;
Christian Königdce34bf2012-09-17 19:36:18 +02001821 r600_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_ENTRY_VALID : 0;
Jerome Glisse721604a2012-01-05 22:11:05 -05001822 r600_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
1823 r600_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
1824 if (flags & RADEON_VM_PAGE_SYSTEM) {
1825 r600_flags |= R600_PTE_SYSTEM;
1826 r600_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
1827 }
1828 return r600_flags;
1829}
1830
Alex Deucher7a083292012-08-31 13:51:21 -04001831/**
1832 * cayman_vm_set_page - update the page tables using the CP
1833 *
1834 * @rdev: radeon_device pointer
Christian Königdce34bf2012-09-17 19:36:18 +02001835 * @pe: addr of the page entry
1836 * @addr: dst addr to write into pe
1837 * @count: number of page entries to update
1838 * @incr: increase next addr by incr bytes
1839 * @flags: access flags
Alex Deucher7a083292012-08-31 13:51:21 -04001840 *
1841 * Update the page tables using the CP (cayman-si).
1842 */
Christian Königdce34bf2012-09-17 19:36:18 +02001843void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
1844 uint64_t addr, unsigned count,
1845 uint32_t incr, uint32_t flags)
Jerome Glisse721604a2012-01-05 22:11:05 -05001846{
Christian König2a6f1ab2012-08-11 15:00:30 +02001847 struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
Christian Königdce34bf2012-09-17 19:36:18 +02001848 uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
Alex Deucher3b6b59b2012-10-22 12:19:01 -04001849 uint64_t value;
1850 unsigned ndw;
Jerome Glisse721604a2012-01-05 22:11:05 -05001851
Alex Deucher3b6b59b2012-10-22 12:19:01 -04001852 if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
1853 while (count) {
1854 ndw = 1 + count * 2;
1855 if (ndw > 0x3FFF)
1856 ndw = 0x3FFF;
Christian König089a7862012-08-11 11:54:05 +02001857
Alex Deucher3b6b59b2012-10-22 12:19:01 -04001858 radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, ndw));
1859 radeon_ring_write(ring, pe);
1860 radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
1861 for (; ndw > 1; ndw -= 2, --count, pe += 8) {
1862 if (flags & RADEON_VM_PAGE_SYSTEM) {
1863 value = radeon_vm_map_gart(rdev, addr);
1864 value &= 0xFFFFFFFFFFFFF000ULL;
1865 } else if (flags & RADEON_VM_PAGE_VALID) {
1866 value = addr;
1867 } else {
1868 value = 0;
1869 }
Christian Königf9fdffa2012-10-22 17:42:36 +02001870 addr += incr;
Alex Deucher3b6b59b2012-10-22 12:19:01 -04001871 value |= r600_flags;
1872 radeon_ring_write(ring, value);
1873 radeon_ring_write(ring, upper_32_bits(value));
Christian Königf9fdffa2012-10-22 17:42:36 +02001874 }
Alex Deucher3b6b59b2012-10-22 12:19:01 -04001875 }
1876 } else {
1877 while (count) {
1878 ndw = count * 2;
1879 if (ndw > 0xFFFFE)
1880 ndw = 0xFFFFE;
Christian Königf9fdffa2012-10-22 17:42:36 +02001881
Alex Deucher3b6b59b2012-10-22 12:19:01 -04001882 /* for non-physically contiguous pages (system) */
1883 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw));
1884 radeon_ring_write(ring, pe);
1885 radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
1886 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
1887 if (flags & RADEON_VM_PAGE_SYSTEM) {
1888 value = radeon_vm_map_gart(rdev, addr);
1889 value &= 0xFFFFFFFFFFFFF000ULL;
1890 } else if (flags & RADEON_VM_PAGE_VALID) {
1891 value = addr;
1892 } else {
1893 value = 0;
1894 }
1895 addr += incr;
1896 value |= r600_flags;
1897 radeon_ring_write(ring, value);
1898 radeon_ring_write(ring, upper_32_bits(value));
1899 }
Christian König2a6f1ab2012-08-11 15:00:30 +02001900 }
Christian König2a6f1ab2012-08-11 15:00:30 +02001901 }
Jerome Glisse721604a2012-01-05 22:11:05 -05001902}
Christian König9b40e5d2012-08-08 12:22:43 +02001903
Alex Deucher7a083292012-08-31 13:51:21 -04001904/**
1905 * cayman_vm_flush - vm flush using the CP
1906 *
1907 * @rdev: radeon_device pointer
1908 *
1909 * Update the page table base and flush the VM TLB
1910 * using the CP (cayman-si).
1911 */
Alex Deucher498522b2012-10-02 14:43:38 -04001912void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
Christian König9b40e5d2012-08-08 12:22:43 +02001913{
Alex Deucher498522b2012-10-02 14:43:38 -04001914 struct radeon_ring *ring = &rdev->ring[ridx];
Christian König9b40e5d2012-08-08 12:22:43 +02001915
Christian Königee60e292012-08-09 16:21:08 +02001916 if (vm == NULL)
Christian König9b40e5d2012-08-08 12:22:43 +02001917 return;
1918
Christian Königee60e292012-08-09 16:21:08 +02001919 radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0));
Dmitry Cherkasovfa87e622012-09-17 19:36:19 +02001920 radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
Christian Königee60e292012-08-09 16:21:08 +02001921
Christian König9b40e5d2012-08-08 12:22:43 +02001922 /* flush hdp cache */
1923 radeon_ring_write(ring, PACKET0(HDP_MEM_COHERENCY_FLUSH_CNTL, 0));
1924 radeon_ring_write(ring, 0x1);
1925
1926 /* bits 0-7 are the VM contexts0-7 */
1927 radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0));
Alex Deucher498522b2012-10-02 14:43:38 -04001928 radeon_ring_write(ring, 1 << vm->id);
Christian König58f8cf52012-10-22 17:42:35 +02001929
1930 /* sync PFP to ME, otherwise we might get invalid PFP reads */
1931 radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
1932 radeon_ring_write(ring, 0x0);
Alex Deucher0af62b02011-01-06 21:19:31 -05001933}
Alex Deucherf60cbd12012-12-04 15:27:33 -05001934
1935void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
1936{
1937 struct radeon_ring *ring = &rdev->ring[ridx];
1938
1939 if (vm == NULL)
1940 return;
1941
1942 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
1943 radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
1944 radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
1945
1946 /* flush hdp cache */
1947 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
1948 radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
1949 radeon_ring_write(ring, 1);
1950
1951 /* bits 0-7 are the VM contexts0-7 */
1952 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
1953 radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
1954 radeon_ring_write(ring, 1 << vm->id);
1955}
1956