aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/radeon/cik.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/radeon/cik.c')
-rw-r--r--drivers/gpu/drm/radeon/cik.c77
1 files changed, 54 insertions, 23 deletions
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 89c01fa6dd8e..87073e69fc0d 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -3880,7 +3880,21 @@ void cik_fence_gfx_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring = &rdev->ring[fence->ring];
u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
- /* EVENT_WRITE_EOP - flush caches, send int */
+ /* Workaround for cache flush problems. First send a dummy EOP
+ * event down the pipe with seq one below.
+ */
+ radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
+ radeon_ring_write(ring, (EOP_TCL1_ACTION_EN |
+ EOP_TC_ACTION_EN |
+ EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
+ EVENT_INDEX(5)));
+ radeon_ring_write(ring, addr & 0xfffffffc);
+ radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
+ DATA_SEL(1) | INT_SEL(0));
+ radeon_ring_write(ring, fence->seq - 1);
+ radeon_ring_write(ring, 0);
+
+ /* Then send the real EOP event down the pipe. */
radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
radeon_ring_write(ring, (EOP_TCL1_ACTION_EN |
EOP_TC_ACTION_EN |
@@ -4509,6 +4523,31 @@ void cik_compute_set_wptr(struct radeon_device *rdev,
WDOORBELL32(ring->doorbell_index, ring->wptr);
}
+static void cik_compute_stop(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ u32 j, tmp;
+
+ cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
+ /* Disable wptr polling. */
+ tmp = RREG32(CP_PQ_WPTR_POLL_CNTL);
+ tmp &= ~WPTR_POLL_EN;
+ WREG32(CP_PQ_WPTR_POLL_CNTL, tmp);
+ /* Disable HQD. */
+ if (RREG32(CP_HQD_ACTIVE) & 1) {
+ WREG32(CP_HQD_DEQUEUE_REQUEST, 1);
+ for (j = 0; j < rdev->usec_timeout; j++) {
+ if (!(RREG32(CP_HQD_ACTIVE) & 1))
+ break;
+ udelay(1);
+ }
+ WREG32(CP_HQD_DEQUEUE_REQUEST, 0);
+ WREG32(CP_HQD_PQ_RPTR, 0);
+ WREG32(CP_HQD_PQ_WPTR, 0);
+ }
+ cik_srbm_select(rdev, 0, 0, 0, 0);
+}
+
/**
* cik_cp_compute_enable - enable/disable the compute CP MEs
*
@@ -4522,6 +4561,15 @@ static void cik_cp_compute_enable(struct radeon_device *rdev, bool enable)
if (enable)
WREG32(CP_MEC_CNTL, 0);
else {
+ /*
+ * To make hibernation reliable we need to clear compute ring
+ * configuration before halting the compute ring.
+ */
+ mutex_lock(&rdev->srbm_mutex);
+ cik_compute_stop(rdev,&rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]);
+ cik_compute_stop(rdev,&rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]);
+ mutex_unlock(&rdev->srbm_mutex);
+
WREG32(CP_MEC_CNTL, (MEC_ME1_HALT | MEC_ME2_HALT));
rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
@@ -5750,7 +5798,7 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev)
/* restore context1-15 */
/* set vm size, must be a multiple of 4 */
WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
- WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
+ WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn - 1);
for (i = 1; i < 16; i++) {
if (i < 8)
WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
@@ -6314,6 +6362,7 @@ static void cik_enable_mgcg(struct radeon_device *rdev, bool enable)
}
orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
+ data |= 0x00000001;
data &= 0xfffffffd;
if (orig != data)
WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
@@ -6345,7 +6394,7 @@ static void cik_enable_mgcg(struct radeon_device *rdev, bool enable)
}
} else {
orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
- data |= 0x00000002;
+ data |= 0x00000003;
if (orig != data)
WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
@@ -7294,7 +7343,6 @@ int cik_irq_set(struct radeon_device *rdev)
u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
u32 grbm_int_cntl = 0;
u32 dma_cntl, dma_cntl1;
- u32 thermal_int;
if (!rdev->irq.installed) {
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -7331,13 +7379,6 @@ int cik_irq_set(struct radeon_device *rdev)
cp_m2p2 = RREG32(CP_ME2_PIPE2_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
cp_m2p3 = RREG32(CP_ME2_PIPE3_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
- if (rdev->flags & RADEON_IS_IGP)
- thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL) &
- ~(THERM_INTH_MASK | THERM_INTL_MASK);
- else
- thermal_int = RREG32_SMC(CG_THERMAL_INT) &
- ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
-
/* enable CP interrupts on all rings */
if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
DRM_DEBUG("cik_irq_set: sw int gfx\n");
@@ -7495,14 +7536,6 @@ int cik_irq_set(struct radeon_device *rdev)
hpd6 |= DC_HPDx_INT_EN;
}
- if (rdev->irq.dpm_thermal) {
- DRM_DEBUG("dpm thermal\n");
- if (rdev->flags & RADEON_IS_IGP)
- thermal_int |= THERM_INTH_MASK | THERM_INTL_MASK;
- else
- thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
- }
-
WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, dma_cntl);
@@ -7556,10 +7589,8 @@ int cik_irq_set(struct radeon_device *rdev)
WREG32(DC_HPD5_INT_CONTROL, hpd5);
WREG32(DC_HPD6_INT_CONTROL, hpd6);
- if (rdev->flags & RADEON_IS_IGP)
- WREG32_SMC(CG_THERMAL_INT_CTRL, thermal_int);
- else
- WREG32_SMC(CG_THERMAL_INT, thermal_int);
+ /* posting read */
+ RREG32(SRBM_STATUS);
return 0;
}