aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c1
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c1
-rw-r--r--drivers/gpu/drm/drm_edid.c3
-rw-r--r--drivers/gpu/drm/drm_gem.c18
-rw-r--r--drivers/gpu/drm/drm_irq.c5
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c18
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c13
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h4
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c57
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c2
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c13
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h32
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c10
-rw-r--r--drivers/gpu/drm/i915/intel_display.c47
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h2
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c3
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c47
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c21
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h3
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c70
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/mc.h7
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/base.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/base.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c14
-rw-r--r--drivers/gpu/drm/radeon/atom.c5
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c43
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c7
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c6
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c19
-rw-r--r--drivers/gpu/drm/radeon/ni.c6
-rw-r--r--drivers/gpu/drm/radeon/r600.c33
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c29
-rw-r--r--drivers/gpu/drm/radeon/radeon.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c159
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_sa.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c126
-rw-r--r--drivers/gpu/drm/radeon/rv770.c17
-rw-r--r--drivers/gpu/drm/radeon/si.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c58
55 files changed, 622 insertions, 362 deletions
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 09da3393c527..d5902e21d4a3 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -348,6 +348,7 @@ int ast_bo_create(struct drm_device *dev, int size, int align,
astbo->gem.driver_private = NULL;
astbo->bo.bdev = &ast->ttm.bdev;
+ astbo->bo.bdev->dev_mapping = dev->dev_mapping;
ast_ttm_placement(astbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index 2ed8cfc740c9..c18faff82651 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -353,6 +353,7 @@ int cirrus_bo_create(struct drm_device *dev, int size, int align,
cirrusbo->gem.driver_private = NULL;
cirrusbo->bo.bdev = &cirrus->ttm.bdev;
+ cirrusbo->bo.bdev->dev_mapping = dev->dev_mapping;
cirrus_ttm_placement(cirrusbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 9e62bbedb5ad..0cb9b5d8e30a 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -125,6 +125,9 @@ static struct edid_quirk {
/* ViewSonic VA2026w */
{ "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING },
+
+ /* Medion MD 30217 PG */
+ { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
};
/*
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index cf919e36e8ae..239ef30f4a62 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -453,25 +453,21 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
spin_lock(&dev->object_name_lock);
if (!obj->name) {
ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
- obj->name = ret;
- args->name = (uint64_t) obj->name;
- spin_unlock(&dev->object_name_lock);
- idr_preload_end();
-
if (ret < 0)
goto err;
- ret = 0;
+
+ obj->name = ret;
/* Allocate a reference for the name table. */
drm_gem_object_reference(obj);
- } else {
- args->name = (uint64_t) obj->name;
- spin_unlock(&dev->object_name_lock);
- idr_preload_end();
- ret = 0;
}
+ args->name = (uint64_t) obj->name;
+ ret = 0;
+
err:
+ spin_unlock(&dev->object_name_lock);
+ idr_preload_end();
drm_gem_object_unreference_unlocked(obj);
return ret;
}
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 8bcce7866d36..f92da0a32f0d 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -708,7 +708,10 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
/* Subtract time delta from raw timestamp to get final
* vblank_time timestamp for end of vblank.
*/
- etime = ktime_sub_ns(etime, delta_ns);
+ if (delta_ns < 0)
+ etime = ktime_add_ns(etime, -delta_ns);
+ else
+ etime = ktime_sub_ns(etime, delta_ns);
*vblank_time = ktime_to_timeval(etime);
DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n",
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 3b315ba85a3e..17d9b0b6afc5 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1511,6 +1511,14 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dev_priv->dev = dev;
dev_priv->info = info;
+ spin_lock_init(&dev_priv->irq_lock);
+ spin_lock_init(&dev_priv->gpu_error.lock);
+ spin_lock_init(&dev_priv->rps.lock);
+ spin_lock_init(&dev_priv->gt_lock);
+ mutex_init(&dev_priv->dpio_lock);
+ mutex_init(&dev_priv->rps.hw_lock);
+ mutex_init(&dev_priv->modeset_restore_lock);
+
i915_dump_device_info(dev_priv);
if (i915_get_bridge_dev(dev)) {
@@ -1601,6 +1609,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
intel_detect_pch(dev);
intel_irq_init(dev);
+ intel_pm_init(dev);
+ intel_gt_sanitize(dev);
intel_gt_init(dev);
/* Try to make sure MCHBAR is enabled before poking at it */
@@ -1626,14 +1636,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (!IS_I945G(dev) && !IS_I945GM(dev))
pci_enable_msi(dev->pdev);
- spin_lock_init(&dev_priv->irq_lock);
- spin_lock_init(&dev_priv->gpu_error.lock);
- spin_lock_init(&dev_priv->rps.lock);
- mutex_init(&dev_priv->dpio_lock);
-
- mutex_init(&dev_priv->rps.hw_lock);
- mutex_init(&dev_priv->modeset_restore_lock);
-
dev_priv->num_plane = 1;
if (IS_VALLEYVIEW(dev))
dev_priv->num_plane = 2;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index a2e4953b8e8d..bc6cd3117ac3 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -685,7 +685,7 @@ static int i915_drm_thaw(struct drm_device *dev)
{
int error = 0;
- intel_gt_reset(dev);
+ intel_gt_sanitize(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
mutex_lock(&dev->struct_mutex);
@@ -711,7 +711,7 @@ int i915_resume(struct drm_device *dev)
pci_set_master(dev->pdev);
- intel_gt_reset(dev);
+ intel_gt_sanitize(dev);
/*
* Platforms with opregion should have sane BIOS, older ones (gen3 and
@@ -1247,21 +1247,21 @@ hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
+ unsigned long irqflags; \
u##x val = 0; \
+ spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
if (IS_GEN5(dev_priv->dev)) \
ilk_dummy_write(dev_priv); \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
- unsigned long irqflags; \
- spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
if (dev_priv->forcewake_count == 0) \
dev_priv->gt.force_wake_get(dev_priv); \
val = read##y(dev_priv->regs + reg); \
if (dev_priv->forcewake_count == 0) \
dev_priv->gt.force_wake_put(dev_priv); \
- spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
} else { \
val = read##y(dev_priv->regs + reg); \
} \
+ spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
trace_i915_reg_rw(false, reg, val, sizeof(val)); \
return val; \
}
@@ -1274,8 +1274,10 @@ __i915_read(64, q)
#define __i915_write(x, y) \
void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
+ unsigned long irqflags; \
u32 __fifo_ret = 0; \
trace_i915_reg_rw(true, reg, val, sizeof(val)); \
+ spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
} \
@@ -1287,6 +1289,7 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
gen6_gt_check_fifodbg(dev_priv); \
} \
hsw_unclaimed_reg_check(dev_priv, reg); \
+ spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
}
__i915_write(8, b)
__i915_write(16, w)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 9669a0b8b440..47d8b68c5004 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -491,6 +491,7 @@ enum intel_sbi_destination {
#define QUIRK_PIPEA_FORCE (1<<0)
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
#define QUIRK_INVERT_BRIGHTNESS (1<<2)
+#define QUIRK_NO_PCH_PWM_ENABLE (1<<3)
struct intel_fbdev;
struct intel_fbc_work;
@@ -1474,9 +1475,10 @@ void i915_hangcheck_elapsed(unsigned long data);
void i915_handle_error(struct drm_device *dev, bool wedged);
extern void intel_irq_init(struct drm_device *dev);
+extern void intel_pm_init(struct drm_device *dev);
extern void intel_hpd_init(struct drm_device *dev);
extern void intel_gt_init(struct drm_device *dev);
-extern void intel_gt_reset(struct drm_device *dev);
+extern void intel_gt_sanitize(struct drm_device *dev);
void i915_error_state_free(struct kref *error_ref);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 9e35dafc5807..0a30088178b0 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1160,7 +1160,8 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
/* Manually manage the write flush as we may have not yet
* retired the buffer.
*/
- if (obj->last_write_seqno &&
+ if (ret == 0 &&
+ obj->last_write_seqno &&
i915_seqno_passed(seqno, obj->last_write_seqno)) {
obj->last_write_seqno = 0;
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
@@ -1880,6 +1881,10 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
u32 seqno = intel_ring_get_seqno(ring);
BUG_ON(ring == NULL);
+ if (obj->ring != ring && obj->last_write_seqno) {
+ /* Keep the seqno relative to the current ring */
+ obj->last_write_seqno = seqno;
+ }
obj->ring = ring;
/* Add a reference if we're newly entering the active list. */
@@ -2133,7 +2138,17 @@ void i915_gem_restore_fences(struct drm_device *dev)
for (i = 0; i < dev_priv->num_fence_regs; i++) {
struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
- i915_gem_write_fence(dev, i, reg->obj);
+
+ /*
+ * Commit delayed tiling changes if we have an object still
+ * attached to the fence, otherwise just clear the fence.
+ */
+ if (reg->obj) {
+ i915_gem_object_update_fence(reg->obj, reg,
+ reg->obj->tiling_mode);
+ } else {
+ i915_gem_write_fence(dev, i, NULL);
+ }
}
}
@@ -2533,7 +2548,6 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
drm_i915_private_t *dev_priv = dev->dev_private;
int fence_reg;
int fence_pitch_shift;
- uint64_t val;
if (INTEL_INFO(dev)->gen >= 6) {
fence_reg = FENCE_REG_SANDYBRIDGE_0;
@@ -2543,8 +2557,23 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
}
+ fence_reg += reg * 8;
+
+ /* To w/a incoherency with non-atomic 64-bit register updates,
+ * we split the 64-bit update into two 32-bit writes. In order
+ * for a partial fence not to be evaluated between writes, we
+ * precede the update with write to turn off the fence register,
+ * and only enable the fence as the last step.
+ *
+ * For extra levels of paranoia, we make sure each step lands
+ * before applying the next step.
+ */
+ I915_WRITE(fence_reg, 0);
+ POSTING_READ(fence_reg);
+
if (obj) {
u32 size = obj->gtt_space->size;
+ uint64_t val;
val = (uint64_t)((obj->gtt_offset + size - 4096) &
0xfffff000) << 32;
@@ -2553,12 +2582,16 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I965_FENCE_TILING_Y_SHIFT;
val |= I965_FENCE_REG_VALID;
- } else
- val = 0;
- fence_reg += reg * 8;
- I915_WRITE64(fence_reg, val);
- POSTING_READ(fence_reg);
+ I915_WRITE(fence_reg + 4, val >> 32);
+ POSTING_READ(fence_reg + 4);
+
+ I915_WRITE(fence_reg + 0, val);
+ POSTING_READ(fence_reg);
+ } else {
+ I915_WRITE(fence_reg + 4, 0);
+ POSTING_READ(fence_reg + 4);
+ }
}
static void i915_write_fence_reg(struct drm_device *dev, int reg,
@@ -2653,6 +2686,10 @@ static void i915_gem_write_fence(struct drm_device *dev, int reg,
if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
mb();
+ WARN(obj && (!obj->stride || !obj->tiling_mode),
+ "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
+ obj->stride, obj->tiling_mode);
+
switch (INTEL_INFO(dev)->gen) {
case 7:
case 6:
@@ -2712,6 +2749,7 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
fence->obj = NULL;
list_del_init(&fence->lru_list);
}
+ obj->fence_dirty = false;
}
static int
@@ -2841,7 +2879,6 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
return 0;
i915_gem_object_update_fence(obj, reg, enable);
- obj->fence_dirty = false;
return 0;
}
@@ -4456,7 +4493,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list)
if (obj->pages_pin_count == 0)
cnt += obj->base.size >> PAGE_SHIFT;
- list_for_each_entry(obj, &dev_priv->mm.inactive_list, gtt_list)
+ list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list)
if (obj->pin_count == 0 && obj->pages_pin_count == 0)
cnt += obj->base.size >> PAGE_SHIFT;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index a1e8ecb6adf6..3bc8a58a8d5f 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -113,7 +113,7 @@ static int get_context_size(struct drm_device *dev)
case 7:
reg = I915_READ(GEN7_CXT_SIZE);
if (IS_HASWELL(dev))
- ret = HSW_CXT_TOTAL_SIZE(reg) * 64;
+ ret = HSW_CXT_TOTAL_SIZE;
else
ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
break;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 0aa2ef0d2ae0..e5e328691e3a 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -70,15 +70,6 @@ static const u32 hpd_status_gen4[] = {
[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
};
-static const u32 hpd_status_i965[] = {
- [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
- [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I965,
- [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I965,
- [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
- [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
- [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
-};
-
static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
@@ -2952,13 +2943,13 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ?
HOTPLUG_INT_STATUS_G4X :
- HOTPLUG_INT_STATUS_I965);
+ HOTPLUG_INT_STATUS_I915);
DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
hotplug_status);
if (hotplug_trigger) {
if (hotplug_irq_storm_detect(dev, hotplug_trigger,
- IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i965))
+ IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915))
i915_hpd_irq_setup(dev);
queue_work(dev_priv->wq,
&dev_priv->hotplug_work);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 2d6b62e42daf..a365780aeb1e 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -617,6 +617,8 @@
will not assert AGPBUSY# and will only
be delivered when out of C3. */
#define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */
+#define INSTPM_TLB_INVALIDATE (1<<9)
+#define INSTPM_SYNC_FLUSH (1<<5)
#define ACTHD 0x020c8
#define FW_BLC 0x020d8
#define FW_BLC2 0x020dc
@@ -1535,14 +1537,13 @@
GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \
GEN7_CXT_GT1_SIZE(ctx_reg) + \
GEN7_CXT_VFSTATE_SIZE(ctx_reg))
-#define HSW_CXT_POWER_SIZE(ctx_reg) ((ctx_reg >> 26) & 0x3f)
-#define HSW_CXT_RING_SIZE(ctx_reg) ((ctx_reg >> 23) & 0x7)
-#define HSW_CXT_RENDER_SIZE(ctx_reg) ((ctx_reg >> 15) & 0xff)
-#define HSW_CXT_TOTAL_SIZE(ctx_reg) (HSW_CXT_POWER_SIZE(ctx_reg) + \
- HSW_CXT_RING_SIZE(ctx_reg) + \
- HSW_CXT_RENDER_SIZE(ctx_reg) + \
- GEN7_CXT_VFSTATE_SIZE(ctx_reg))
-
+/* Haswell does have the CXT_SIZE register however it does not appear to be
+ * valid. Now, docs explain in dwords what is in the context object. The full
+ * size is 70720 bytes, however, the power context and execlist context will
+ * never be saved (power context is stored elsewhere, and execlists don't work
+ * on HSW) - so the final size is 66944 bytes, which rounds to 17 pages.
+ */
+#define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE)
/*
* Overlay regs
@@ -1691,6 +1692,12 @@
/* SDVO is different across gen3/4 */
#define SDVOC_HOTPLUG_INT_STATUS_G4X (1 << 3)
#define SDVOB_HOTPLUG_INT_STATUS_G4X (1 << 2)
+/*
+ * Bspec seems to be seriously misleaded about the SDVO hpd bits on i965g/gm,
+ * since reality corrobates that they're the same as on gen3. But keep these
+ * bits here (and the comment!) to help any other lost wanderers back onto the
+ * right tracks.
+ */
#define SDVOC_HOTPLUG_INT_STATUS_I965 (3 << 4)
#define SDVOB_HOTPLUG_INT_STATUS_I965 (3 << 2)
#define SDVOC_HOTPLUG_INT_STATUS_I915 (1 << 7)
@@ -1702,13 +1709,6 @@
PORTC_HOTPLUG_INT_STATUS | \
PORTD_HOTPLUG_INT_STATUS)
-#define HOTPLUG_INT_STATUS_I965 (CRT_HOTPLUG_INT_STATUS | \
- SDVOB_HOTPLUG_INT_STATUS_I965 | \
- SDVOC_HOTPLUG_INT_STATUS_I965 | \
- PORTB_HOTPLUG_INT_STATUS | \
- PORTC_HOTPLUG_INT_STATUS | \
- PORTD_HOTPLUG_INT_STATUS)
-
#define HOTPLUG_INT_STATUS_I915 (CRT_HOTPLUG_INT_STATUS | \
SDVOB_HOTPLUG_INT_STATUS_I915 | \
SDVOC_HOTPLUG_INT_STATUS_I915 | \
@@ -4246,7 +4246,7 @@
#define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 <<22)
#define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 <<22)
#define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 <<22)
-#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x33 <<22)
+#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x3e <<22)
/* legacy values */
#define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 <<22)
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index fb961bb81903..16e674af4d57 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -684,7 +684,7 @@ static void intel_ddi_mode_set(struct drm_encoder *encoder,
struct intel_digital_port *intel_dig_port =
enc_to_dig_port(encoder);
- intel_dp->DP = intel_dig_port->port_reversal |
+ intel_dp->DP = intel_dig_port->saved_port_bits |
DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
switch (intel_dp->lane_count) {
case 1:
@@ -1324,7 +1324,8 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
* enabling the port.
*/
I915_WRITE(DDI_BUF_CTL(port),
- intel_dig_port->port_reversal | DDI_BUF_CTL_ENABLE);
+ intel_dig_port->saved_port_bits |
+ DDI_BUF_CTL_ENABLE);
} else if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -1543,8 +1544,9 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
intel_encoder->get_hw_state = intel_ddi_get_hw_state;
intel_dig_port->port = port;
- intel_dig_port->port_reversal = I915_READ(DDI_BUF_CTL(port)) &
- DDI_BUF_PORT_REVERSAL;
+ intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
+ (DDI_BUF_PORT_REVERSAL |
+ DDI_A_4_LANES);
if (hdmi_connector)
intel_dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port);
intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 56746dcac40f..eea5982657a8 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -4333,7 +4333,8 @@ static void vlv_update_pll(struct intel_crtc *crtc)
static void i9xx_update_pll(struct intel_crtc *crtc,
intel_clock_t *reduced_clock,
- int num_connectors)
+ int num_connectors,
+ bool needs_tv_clock)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4391,7 +4392,7 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
if (INTEL_INFO(dev)->gen >= 4)
dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
- if (is_sdvo && intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_TVOUT))
+ if (is_sdvo && needs_tv_clock)
dpll |= PLL_REF_INPUT_TVCLKINBC;
else if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_TVOUT))
/* XXX: just matching BIOS for now */
@@ -4716,7 +4717,8 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
else
i9xx_update_pll(intel_crtc,
has_reduced_clock ? &reduced_clock : NULL,
- num_connectors);
+ num_connectors,
+ is_sdvo && is_tv);
/* Set up the display plane register */
dspcntr = DISPPLANE_GAMMA_ENABLE;
@@ -8146,15 +8148,20 @@ static void intel_set_config_restore_state(struct drm_device *dev,
}
static bool
-is_crtc_connector_off(struct drm_crtc *crtc, struct drm_connector *connectors,
- int num_connectors)
+is_crtc_connector_off(struct drm_mode_set *set)
{
int i;
- for (i = 0; i < num_connectors; i++)
- if (connectors[i].encoder &&
- connectors[i].encoder->crtc == crtc &&
- connectors[i].dpms != DRM_MODE_DPMS_ON)
+ if (set->num_connectors == 0)
+ return false;
+
+ if (WARN_ON(set->connectors == NULL))
+ return false;
+
+ for (i = 0; i < set->num_connectors; i++)
+ if (set->connectors[i]->encoder &&
+ set->connectors[i]->encoder->crtc == set->crtc &&
+ set->connectors[i]->dpms != DRM_MODE_DPMS_ON)
return true;
return false;
@@ -8167,10 +8174,8 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set,
/* We should be able to check here if the fb has the same properties
* and then just flip_or_move it */
- if (set->connectors != NULL &&
- is_crtc_connector_off(set->crtc, *set->connectors,
- set->num_connectors)) {
- config->mode_changed = true;
+ if (is_crtc_connector_off(set)) {
+ config->mode_changed = true;
} else if (set->crtc->fb != set->fb) {
/* If we have no fb then treat it as a full mode set */
if (set->crtc->fb == NULL) {
@@ -8914,6 +8919,17 @@ static void quirk_invert_brightness(struct drm_device *dev)
DRM_INFO("applying inverted panel brightness quirk\n");
}
+/*
+ * Some machines (Dell XPS13) suffer broken backlight controls if
+ * BLM_PCH_PWM_ENABLE is set.
+ */
+static void quirk_no_pcm_pwm_enable(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ dev_priv->quirks |= QUIRK_NO_PCH_PWM_ENABLE;
+ DRM_INFO("applying no-PCH_PWM_ENABLE quirk\n");
+}
+
struct intel_quirk {
int device;
int subsystem_vendor;
@@ -8983,6 +8999,11 @@ static struct intel_quirk intel_quirks[] = {
/* Acer Aspire 4736Z */
{ 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
+
+ /* Dell XPS13 HD Sandy Bridge */
+ { 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable },
+ /* Dell XPS13 HD and XPS13 FHD Ivy Bridge */
+ { 0x0166, 0x1028, 0x058b, quirk_no_pcm_pwm_enable },
};
static void intel_init_quirks(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 624a9e6b8d71..7cd55843e73e 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -426,7 +426,7 @@ struct intel_dp {
struct intel_digital_port {
struct intel_encoder base;
enum port port;
- u32 port_reversal;
+ u32 saved_port_bits;
struct intel_dp dp;
struct intel_hdmi hdmi;
};
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index eb5e6e95f3c7..33cb87f7983e 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -354,7 +354,8 @@ void intel_panel_enable_backlight(struct drm_device *dev,
POSTING_READ(reg);
I915_WRITE(reg, tmp | BLM_PWM_ENABLE);
- if (HAS_PCH_SPLIT(dev)) {
+ if (HAS_PCH_SPLIT(dev) &&
+ !(dev_priv->quirks & QUIRK_NO_PCH_PWM_ENABLE)) {
tmp = I915_READ(BLC_PWM_PCH_CTL1);
tmp |= BLM_PCH_PWM_ENABLE;
tmp &= ~BLM_PCH_OVERRIDE_ENABLE;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index aa01128ff192..94ad6bc08260 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4486,7 +4486,7 @@ static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
gen6_gt_check_fifodbg(dev_priv);
}
-void intel_gt_reset(struct drm_device *dev)
+void intel_gt_sanitize(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4497,26 +4497,61 @@ void intel_gt_reset(struct drm_device *dev)
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
__gen6_gt_force_wake_mt_reset(dev_priv);
}
+
+ /* BIOS often leaves RC6 enabled, but disable it for hw init */
+ if (INTEL_INFO(dev)->gen >= 6)
+ intel_disable_gt_powersave(dev);
}
void intel_gt_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- spin_lock_init(&dev_priv->gt_lock);
-
- intel_gt_reset(dev);
-
if (IS_VALLEYVIEW(dev)) {
dev_priv->gt.force_wake_get = vlv_force_wake_get;
dev_priv->gt.force_wake_put = vlv_force_wake_put;
- } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
+ } else if (IS_HASWELL(dev)) {
dev_priv->gt.force_wake_get = __gen6_gt_force_wake_mt_get;
dev_priv->gt.force_wake_put = __gen6_gt_force_wake_mt_put;
+ } else if (IS_IVYBRIDGE(dev)) {
+ u32 ecobus;
+
+ /* IVB configs may use multi-threaded forcewake */
+
+ /* A small trick here - if the bios hasn't configured
+ * MT forcewake, and if the device is in RC6, then
+ * force_wake_mt_get will not wake the device and the
+ * ECOBUS read will return zero. Which will be
+ * (correctly) interpreted by the test below as MT
+ * forcewake being disabled.
+ */
+ mutex_lock(&dev->struct_mutex);
+ __gen6_gt_force_wake_mt_get(dev_priv);
+ ecobus = I915_READ_NOTRACE(ECOBUS);
+ __gen6_gt_force_wake_mt_put(dev_priv);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (ecobus & FORCEWAKE_MT_ENABLE) {
+ dev_priv->gt.force_wake_get =
+ __gen6_gt_force_wake_mt_get;
+ dev_priv->gt.force_wake_put =
+ __gen6_gt_force_wake_mt_put;
+ } else {
+ DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
+ DRM_INFO("when using vblank-synced partial screen updates.\n");
+ dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
+ dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
+ }
} else if (IS_GEN6(dev)) {
dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
}
+}
+
+void intel_pm_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
intel_gen6_powersave_work);
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 1d5d613eb6be..48fe23e8d180 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -490,9 +490,6 @@ cleanup_pipe_control(struct intel_ring_buffer *ring)
struct pipe_control *pc = ring->private;
struct drm_i915_gem_object *obj;
- if (!ring->private)
- return;
-
obj = pc->obj;
kunmap(sg_page(obj->pages->sgl));
@@ -500,7 +497,6 @@ cleanup_pipe_control(struct intel_ring_buffer *ring)
drm_gem_object_unreference(&obj->base);
kfree(pc);
- ring->private = NULL;
}
static int init_render_ring(struct intel_ring_buffer *ring)
@@ -571,7 +567,10 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring)
if (HAS_BROKEN_CS_TLB(dev))
drm_gem_object_unreference(to_gem_object(ring->private));
- cleanup_pipe_control(ring);
+ if (INTEL_INFO(dev)->gen >= 5)
+ cleanup_pipe_control(ring);
+
+ ring->private = NULL;
}
static void
@@ -908,6 +907,18 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
POSTING_READ(mmio);
+
+ /* Flush the TLB for this page */
+ if (INTEL_INFO(dev)->gen >= 6) {
+ u32 reg = RING_INSTPM(ring->mmio_base);
+ I915_WRITE(reg,
+ _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
+ INSTPM_SYNC_FLUSH));
+ if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
+ 1000))
+ DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
+ ring->name);
+ }
}
static int
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index bf29b2f4d68d..988911afcc8b 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -198,7 +198,8 @@ struct mga_device {
struct ttm_bo_device bdev;
} ttm;
- u32 reg_1e24; /* SE model number */
+ /* SE model number stored in reg 0x1e24 */
+ u32 unique_rev_id;
};
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index 99059237da38..dafe049fb1ae 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -176,7 +176,7 @@ static int mgag200_device_init(struct drm_device *dev,
/* stash G200 SE model number for later use */
if (IS_G200_SE(mdev))
- mdev->reg_1e24 = RREG32(0x1e24);
+ mdev->unique_rev_id = RREG32(0x1e24);
ret = mga_vram_init(mdev);
if (ret)
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index ee66badc8bb6..99e07b688ea8 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1008,7 +1008,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
if (IS_G200_SE(mdev)) {
- if (mdev->reg_1e24 >= 0x02) {
+ if (mdev->unique_rev_id >= 0x02) {
u8 hi_pri_lvl;
u32 bpp;
u32 mb;
@@ -1038,7 +1038,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
WREG8(MGAREG_CRTCEXT_DATA, hi_pri_lvl);
} else {
WREG8(MGAREG_CRTCEXT_INDEX, 0x06);
- if (mdev->reg_1e24 >= 0x01)
+ if (mdev->unique_rev_id >= 0x01)
WREG8(MGAREG_CRTCEXT_DATA, 0x03);
else
WREG8(MGAREG_CRTCEXT_DATA, 0x04);
@@ -1410,6 +1410,32 @@ static int mga_vga_get_modes(struct drm_connector *connector)
return ret;
}
+static uint32_t mga_vga_calculate_mode_bandwidth(struct drm_display_mode *mode,
+ int bits_per_pixel)
+{
+ uint32_t total_area, divisor;
+ int64_t active_area, pixels_per_second, bandwidth;
+ uint64_t bytes_per_pixel = (bits_per_pixel + 7) / 8;
+
+ divisor = 1024;
+
+ if (!mode->htotal || !mode->vtotal || !mode->clock)
+ return 0;
+
+ active_area = mode->hdisplay * mode->vdisplay;
+ total_area = mode->htotal * mode->vtotal;
+
+ pixels_per_second = active_area * mode->clock * 1000;
+ do_div(pixels_per_second, total_area);
+
+ bandwidth = pixels_per_second * bytes_per_pixel * 100;
+ do_div(bandwidth, divisor);
+
+ return (uint32_t)(bandwidth);
+}
+
+#define MODE_BANDWIDTH MODE_BAD
+
static int mga_vga_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
@@ -1421,7 +1447,45 @@ static int mga_vga_mode_valid(struct drm_connector *connector,
int bpp = 32;
int i = 0;
- /* FIXME: Add bandwidth and g200se limitations */
+ if (IS_G200_SE(mdev)) {
+ if (mdev->unique_rev_id == 0x01) {
+ if (mode->hdisplay > 1600)
+ return MODE_VIRTUAL_X;
+ if (mode->vdisplay > 1200)
+ return MODE_VIRTUAL_Y;
+ if (mga_vga_calculate_mode_bandwidth(mode, bpp)
+ > (24400 * 1024))
+ return MODE_BANDWIDTH;
+ } else if (mdev->unique_rev_id >= 0x02) {
+ if (mode->hdisplay > 1920)
+ return MODE_VIRTUAL_X;
+ if (mode->vdisplay > 1200)
+ return MODE_VIRTUAL_Y;
+ if (mga_vga_calculate_mode_bandwidth(mode, bpp)
+ > (30100 * 1024))
+ return MODE_BANDWIDTH;
+ }
+ } else if (mdev->type == G200_WB) {
+ if (mode->hdisplay > 1280)
+ return MODE_VIRTUAL_X;
+ if (mode->vdisplay > 1024)
+ return MODE_VIRTUAL_Y;
+ if (mga_vga_calculate_mode_bandwidth(mode,
+ bpp > (31877 * 1024)))
+ return MODE_BANDWIDTH;
+ } else if (mdev->type == G200_EV &&
+ (mga_vga_calculate_mode_bandwidth(mode, bpp)
+ > (32700 * 1024))) {
+ return MODE_BANDWIDTH;
+ } else if (mode->type == G200_EH &&
+ (mga_vga_calculate_mode_bandwidth(mode, bpp)
+ > (37500 * 1024))) {
+ return MODE_BANDWIDTH;
+ } else if (mode->type == G200_ER &&
+ (mga_vga_calculate_mode_bandwidth(mode,
+ bpp) > (55000 * 1024))) {
+ return MODE_BANDWIDTH;
+ }
if (mode->crtc_hdisplay > 2048 || mode->crtc_hsync_start > 4096 ||
mode->crtc_hsync_end > 4096 || mode->crtc_htotal > 4096 ||
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 401c9891d3a8..d2cb32f3c05b 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -347,6 +347,7 @@ int mgag200_bo_create(struct drm_device *dev, int size, int align,
mgabo->gem.driver_private = NULL;
mgabo->bo.bdev = &mdev->ttm.bdev;
+ mgabo->bo.bdev->dev_mapping = dev->dev_mapping;
mgag200_ttm_placement(mgabo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c
index f065fc248adf..db8c6fd46278 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c
@@ -55,6 +55,10 @@ nva3_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data)
nv_wr32(priv, 0x61c510 + soff, 0x00000000);
nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000001);
+ nv_mask(priv, 0x61c5d0 + soff, 0x00070001, 0x00010001); /* SPARE, HW_CTS */
+ nv_mask(priv, 0x61c568 + soff, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */
+ nv_mask(priv, 0x61c578 + soff, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */
+
/* ??? */
nv_mask(priv, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
nv_mask(priv, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
index 6a38402fa56c..5680d3eb11ca 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -1107,6 +1107,7 @@ nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head)
u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
u32 hval, hreg = 0x614200 + (head * 0x800);
u32 oval, oreg;
+ u32 mask;
u32 conf = exec_clkcmp(priv, head, 0xff, pclk, &outp);
if (conf != ~0) {
if (outp.location == 0 && outp.type == DCB_OUTPUT_DP) {
@@ -1133,6 +1134,7 @@ nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head)
oreg = 0x614280 + (ffs(outp.or) - 1) * 0x800;
oval = 0x00000000;
hval = 0x00000000;
+ mask = 0xffffffff;
} else
if (!outp.location) {
if (outp.type == DCB_OUTPUT_DP)
@@ -1140,14 +1142,16 @@ nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head)
oreg = 0x614300 + (ffs(outp.or) - 1) * 0x800;
oval = (conf & 0x0100) ? 0x00000101 : 0x00000000;
hval = 0x00000000;
+ mask = 0x00000707;
} else {
oreg = 0x614380 + (ffs(outp.or) - 1) * 0x800;
oval = 0x00000001;
hval = 0x00000001;
+ mask = 0x00000707;
}
nv_mask(priv, hreg, 0x0000000f, hval);
- nv_mask(priv, oreg, 0x00000707, oval);
+ nv_mask(priv, oreg, mask, oval);
}
}
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
index d5502267c30f..9d2cd2006250 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
@@ -20,8 +20,8 @@ nouveau_mc(void *obj)
return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_MC];
}
-#define nouveau_mc_create(p,e,o,d) \
- nouveau_mc_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nouveau_mc_create(p,e,o,m,d) \
+ nouveau_mc_create_((p), (e), (o), (m), sizeof(**d), (void **)d)
#define nouveau_mc_destroy(p) ({ \
struct nouveau_mc *pmc = (p); _nouveau_mc_dtor(nv_object(pmc)); \
})
@@ -33,7 +33,8 @@ nouveau_mc(void *obj)
})
int nouveau_mc_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, int, void **);
+ struct nouveau_oclass *, const struct nouveau_mc_intr *,
+ int, void **);
void _nouveau_mc_dtor(struct nouveau_object *);
int _nouveau_mc_init(struct nouveau_object *);
int _nouveau_mc_fini(struct nouveau_object *, bool);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
index 1c0330b8c9a4..ec9cd6f10f91 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
@@ -80,7 +80,9 @@ _nouveau_mc_dtor(struct nouveau_object *object)
int
nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, int length, void **pobject)
+ struct nouveau_oclass *oclass,
+ const struct nouveau_mc_intr *intr_map,
+ int length, void **pobject)
{
struct nouveau_device *device = nv_device(parent);
struct nouveau_mc *pmc;
@@ -92,6 +94,8 @@ nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
+ pmc->intr_map = intr_map;
+
ret = request_irq(device->pdev->irq, nouveau_mc_intr,
IRQF_SHARED, "nouveau", pmc);
if (ret < 0)
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
index 8c769715227b..64aa4edb0d9d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
@@ -50,12 +50,11 @@ nv04_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv04_mc_priv *priv;
int ret;
- ret = nouveau_mc_create(parent, engine, oclass, &priv);
+ ret = nouveau_mc_create(parent, engine, oclass, nv04_mc_intr, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
- priv->base.intr_map = nv04_mc_intr;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
index 51919371810f..d9891782bf28 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
@@ -36,12 +36,11 @@ nv44_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv44_mc_priv *priv;
int ret;
- ret = nouveau_mc_create(parent, engine, oclass, &priv);
+ ret = nouveau_mc_create(parent, engine, oclass, nv04_mc_intr, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
- priv->base.intr_map = nv04_mc_intr;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
index d796924f9930..732d8100344b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
@@ -52,12 +52,11 @@ nv50_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv50_mc_priv *priv;
int ret;
- ret = nouveau_mc_create(parent, engine, oclass, &priv);
+ ret = nouveau_mc_create(parent, engine, oclass, nv50_mc_intr, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
- priv->base.intr_map = nv50_mc_intr;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
index e82fd21b5041..0d57b4d3e001 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
@@ -54,12 +54,11 @@ nv98_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv98_mc_priv *priv;
int ret;
- ret = nouveau_mc_create(parent, engine, oclass, &priv);
+ ret = nouveau_mc_create(parent, engine, oclass, nv98_mc_intr, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
- priv->base.intr_map = nv98_mc_intr;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
index 737bd4b682e1..4c97cd2e7b56 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
@@ -56,12 +56,11 @@ nvc0_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nvc0_mc_priv *priv;
int ret;
- ret = nouveau_mc_create(parent, engine, oclass, &priv);
+ ret = nouveau_mc_create(parent, engine, oclass, nvc0_mc_intr, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
- priv->base.intr_map = nvc0_mc_intr;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
index 77c67fc970e6..e66fb77131bc 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
@@ -362,7 +362,7 @@ nouveau_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
vm->fpde = offset >> (vmm->pgt_bits + 12);
vm->lpde = (offset + length - 1) >> (vmm->pgt_bits + 12);
- vm->pgt = kcalloc(vm->lpde - vm->fpde + 1, sizeof(*vm->pgt), GFP_KERNEL);
+ vm->pgt = vzalloc((vm->lpde - vm->fpde + 1) * sizeof(*vm->pgt));
if (!vm->pgt) {
kfree(vm);
return -ENOMEM;
@@ -371,7 +371,7 @@ nouveau_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
ret = nouveau_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12,
block >> 12);
if (ret) {
- kfree(vm->pgt);
+ vfree(vm->pgt);
kfree(vm);
return ret;
}
@@ -446,7 +446,7 @@ nouveau_vm_del(struct nouveau_vm *vm)
}
nouveau_mm_fini(&vm->mm);
- kfree(vm->pgt);
+ vfree(vm->pgt);
kfree(vm);
}
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
index 8e47a9bae8c3..22aa9963ea6f 100644
--- a/drivers/gpu/drm/nouveau/nv17_fence.c
+++ b/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -76,7 +76,7 @@ nv17_fence_context_new(struct nouveau_channel *chan)
struct ttm_mem_reg *mem = &priv->bo->bo.mem;
struct nouveau_object *object;
u32 start = mem->start * PAGE_SIZE;
- u32 limit = mem->start + mem->size - 1;
+ u32 limit = start + mem->size - 1;
int ret = 0;
fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index f9701e567db8..0ee363840035 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -39,6 +39,8 @@ nv50_fence_context_new(struct nouveau_channel *chan)
struct nv10_fence_chan *fctx;
struct ttm_mem_reg *mem = &priv->bo->bo.mem;
struct nouveau_object *object;
+ u32 start = mem->start * PAGE_SIZE;
+ u32 limit = start + mem->size - 1;
int ret, i;
fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
@@ -51,26 +53,28 @@ nv50_fence_context_new(struct nouveau_channel *chan)
fctx->base.sync = nv17_fence_sync;
ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
- NvSema, 0x0002,
+ NvSema, 0x003d,
&(struct nv_dma_class) {
.flags = NV_DMA_TARGET_VRAM |
NV_DMA_ACCESS_RDWR,
- .start = mem->start * PAGE_SIZE,
- .limit = mem->size - 1,
+ .start = start,
+ .limit = limit,
}, sizeof(struct nv_dma_class),
&object);
/* dma objects for display sync channel semaphore blocks */
for (i = 0; !ret && i < dev->mode_config.num_crtc; i++) {
struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
+ u32 start = bo->bo.mem.start * PAGE_SIZE;
+ u32 limit = start + bo->bo.mem.size - 1;
ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
NvEvoSema0 + i, 0x003d,
&(struct nv_dma_class) {
.flags = NV_DMA_TARGET_VRAM |
NV_DMA_ACCESS_RDWR,
- .start = bo->bo.offset,
- .limit = bo->bo.offset + 0xfff,
+ .start = start,
+ .limit = limit,
}, sizeof(struct nv_dma_class),
&object);
}
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index fb441a790f3d..15da7ef344a4 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -1222,12 +1222,17 @@ int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
int r;
mutex_lock(&ctx->mutex);
+ /* reset data block */
+ ctx->data_block = 0;
/* reset reg block */
ctx->reg_block = 0;
/* reset fb window */
ctx->fb_base = 0;
/* reset io mode */
ctx->io_mode = ATOM_IO_MM;
+ /* reset divmul */
+ ctx->divmul[0] = 0;
+ ctx->divmul[1] = 0;
r = atom_execute_table_locked(ctx, index, params);
mutex_unlock(&ctx->mutex);
return r;
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 064023bed480..32501f6ec991 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -44,6 +44,41 @@ static char *pre_emph_names[] = {
};
/***** radeon AUX functions *****/
+
+/* Atom needs data in little endian format
+ * so swap as appropriate when copying data to
+ * or from atom. Note that atom operates on
+ * dw units.
+ */
+static void radeon_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
+{
+#ifdef __BIG_ENDIAN
+ u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */
+ u32 *dst32, *src32;
+ int i;
+
+ memcpy(src_tmp, src, num_bytes);
+ src32 = (u32 *)src_tmp;
+ dst32 = (u32 *)dst_tmp;
+ if (to_le) {
+ for (i = 0; i < ((num_bytes + 3) / 4); i++)
+ dst32[i] = cpu_to_le32(src32[i]);
+ memcpy(dst, dst_tmp, num_bytes);
+ } else {
+ u8 dws = num_bytes & ~3;
+ for (i = 0; i < ((num_bytes + 3) / 4); i++)
+ dst32[i] = le32_to_cpu(src32[i]);
+ memcpy(dst, dst_tmp, dws);
+ if (num_bytes % 4) {
+ for (i = 0; i < (num_bytes % 4); i++)
+ dst[dws+i] = dst_tmp[dws+i];
+ }
+ }
+#else
+ memcpy(dst, src, num_bytes);
+#endif
+}
+
union aux_channel_transaction {
PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1;
PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2;
@@ -65,10 +100,10 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1);
- memcpy(base, send, send_bytes);
+ radeon_copy_swap(base, send, send_bytes, true);
- args.v1.lpAuxRequest = 0 + 4;
- args.v1.lpDataOut = 16 + 4;
+ args.v1.lpAuxRequest = cpu_to_le16((u16)(0 + 4));
+ args.v1.lpDataOut = cpu_to_le16((u16)(16 + 4));
args.v1.ucDataOutLen = 0;
args.v1.ucChannelID = chan->rec.i2c_id;
args.v1.ucDelay = delay / 10;
@@ -102,7 +137,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
recv_bytes = recv_size;
if (recv && recv_size)
- memcpy(recv, base + 16, recv_bytes);
+ radeon_copy_swap(recv, base + 16, recv_bytes, false);
return recv_bytes;
}
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 8406c8251fbf..4120d355cadd 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -186,6 +186,13 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
u8 backlight_level;
char bl_name[16];
+ /* Mac laptops with multiple GPUs use the gmux driver for backlight
+ * so don't register a backlight device
+ */
+ if ((rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
+ (rdev->pdev->device == 0x6741))
+ return;
+
if (!radeon_encoder->enc_priv)
return;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 0f89ce3d02b9..687b421f75c3 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -4681,6 +4681,8 @@ static int evergreen_startup(struct radeon_device *rdev)
/* enable pcie gen2 link */
evergreen_pcie_gen2_enable(rdev);
+ evergreen_mc_program(rdev);
+
if (ASIC_IS_DCE5(rdev)) {
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
r = ni_init_microcode(rdev);
@@ -4708,7 +4710,6 @@ static int evergreen_startup(struct radeon_device *rdev)
if (r)
return r;
- evergreen_mc_program(rdev);
if (rdev->flags & RADEON_IS_AGP) {
evergreen_agp_enable(rdev);
} else {
@@ -4854,10 +4855,10 @@ int evergreen_resume(struct radeon_device *rdev)
int evergreen_suspend(struct radeon_device *rdev)
{
r600_audio_fini(rdev);
+ r600_uvd_stop(rdev);
radeon_uvd_suspend(rdev);
r700_cp_stop(rdev);
r600_dma_stop(rdev);
- r600_uvd_rbc_stop(rdev);
evergreen_irq_suspend(rdev);
radeon_wb_disable(rdev);
evergreen_pcie_gart_disable(rdev);
@@ -4988,6 +4989,7 @@ void evergreen_fini(struct radeon_device *rdev)
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
evergreen_pcie_gart_fini(rdev);
+ r600_uvd_stop(rdev);
radeon_uvd_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index ed7c8a768092..bb9ea3641312 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -128,14 +128,7 @@ static void evergreen_hdmi_update_avi_infoframe(struct drm_encoder *encoder,
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t offset = dig->afmt->offset;
uint8_t *frame = buffer + 3;
-
- /* Our header values (type, version, length) should be alright, Intel
- * is using the same. Checksum function also seems to be OK, it works
- * fine for audio infoframe. However calculated value is always lower
- * by 2 in comparison to fglrx. It breaks displaying anything in case
- * of TVs that strictly check the checksum. Hack it manually here to
- * workaround this issue. */
- frame[0x0] += 2;
+ uint8_t *header = buffer;
WREG32(AFMT_AVI_INFO0 + offset,
frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
@@ -144,7 +137,7 @@ static void evergreen_hdmi_update_avi_infoframe(struct drm_encoder *encoder,
WREG32(AFMT_AVI_INFO2 + offset,
frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
WREG32(AFMT_AVI_INFO3 + offset,
- frame[0xC] | (frame[0xD] << 8));
+ frame[0xC] | (frame[0xD] << 8) | (header[1] << 24));
}
static void evergreen_audio_set_dto(struct drm_encoder *encoder, u32 clock)
@@ -164,9 +157,9 @@ static void evergreen_audio_set_dto(struct drm_encoder *encoder, u32 clock)
* number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
* is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
*/
+ WREG32(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL(radeon_crtc->crtc_id));
WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100);
- WREG32(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL(radeon_crtc->crtc_id));
}
@@ -184,6 +177,9 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
uint32_t offset;
ssize_t err;
+ if (!dig || !dig->afmt)
+ return;
+
/* Silent, r600_hdmi_enable will raise WARN for us */
if (!dig->afmt->enabled)
return;
@@ -287,6 +283,9 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ if (!dig || !dig->afmt)
+ return;
+
/* Silent, r600_hdmi_enable will raise WARN for us */
if (enable && dig->afmt->enabled)
return;
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 84583302b081..3bf43a16adcc 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1929,6 +1929,8 @@ static int cayman_startup(struct radeon_device *rdev)
/* enable pcie gen2 link */
evergreen_pcie_gen2_enable(rdev);
+ evergreen_mc_program(rdev);
+
if (rdev->flags & RADEON_IS_IGP) {
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
r = ni_init_microcode(rdev);
@@ -1957,7 +1959,6 @@ static int cayman_startup(struct radeon_device *rdev)
if (r)
return r;
- evergreen_mc_program(rdev);
r = cayman_pcie_gart_enable(rdev);
if (r)
return r;
@@ -2133,7 +2134,7 @@ int cayman_suspend(struct radeon_device *rdev)
radeon_vm_manager_fini(rdev);
cayman_cp_enable(rdev, false);
cayman_dma_stop(rdev);
- r600_uvd_rbc_stop(rdev);
+ r600_uvd_stop(rdev);
radeon_uvd_suspend(rdev);
evergreen_irq_suspend(rdev);
radeon_wb_disable(rdev);
@@ -2265,6 +2266,7 @@ void cayman_fini(struct radeon_device *rdev)
radeon_vm_manager_fini(rdev);
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
+ r600_uvd_stop(rdev);
radeon_uvd_fini(rdev);
cayman_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 6948eb88c2b7..f19620b472f5 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2675,12 +2675,29 @@ int r600_uvd_rbc_start(struct radeon_device *rdev)
return 0;
}
-void r600_uvd_rbc_stop(struct radeon_device *rdev)
+void r600_uvd_stop(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
/* force RBC into idle state */
WREG32(UVD_RBC_RB_CNTL, 0x11010101);
+
+ /* Stall UMC and register bus before resetting VCPU */
+ WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
+ WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
+ mdelay(1);
+
+ /* put VCPU into reset */
+ WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET);
+ mdelay(5);
+
+ /* disable VCPU clock */
+ WREG32(UVD_VCPU_CNTL, 0x0);
+
+ /* Unstall UMC and register bus */
+ WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8));
+ WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3));
+
ring->ready = false;
}
@@ -2700,6 +2717,11 @@ int r600_uvd_init(struct radeon_device *rdev)
/* disable interupt */
WREG32_P(UVD_MASTINT_EN, 0, ~(1 << 1));
+ /* Stall UMC and register bus before resetting VCPU */
+ WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
+ WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
+ mdelay(1);
+
/* put LMI, VCPU, RBC etc... into reset */
WREG32(UVD_SOFT_RESET, LMI_SOFT_RESET | VCPU_SOFT_RESET |
LBSI_SOFT_RESET | RBC_SOFT_RESET | CSM_SOFT_RESET |
@@ -2729,10 +2751,6 @@ int r600_uvd_init(struct radeon_device *rdev)
WREG32(UVD_MPC_SET_ALU, 0);
WREG32(UVD_MPC_SET_MUX, 0x88);
- /* Stall UMC */
- WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
- WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
-
/* take all subblocks out of reset, except VCPU */
WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET);
mdelay(5);
@@ -2986,7 +3004,7 @@ void r600_uvd_fence_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
struct radeon_ring *ring = &rdev->ring[fence->ring];
- uint32_t addr = rdev->fence_drv[fence->ring].gpu_addr;
+ uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr;
radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
radeon_ring_write(ring, fence->seq);
@@ -3206,6 +3224,8 @@ static int r600_startup(struct radeon_device *rdev)
/* enable pcie gen2 link */
r600_pcie_gen2_enable(rdev);
+ r600_mc_program(rdev);
+
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
r = r600_init_microcode(rdev);
if (r) {
@@ -3218,7 +3238,6 @@ static int r600_startup(struct radeon_device *rdev)
if (r)
return r;
- r600_mc_program(rdev);
if (rdev->flags & RADEON_IS_AGP) {
r600_agp_enable(rdev);
} else {
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 456750a0daa5..b9b1139da356 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -133,14 +133,7 @@ static void r600_hdmi_update_avi_infoframe(struct drm_encoder *encoder,
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t offset = dig->afmt->offset;
uint8_t *frame = buffer + 3;
-
- /* Our header values (type, version, length) should be alright, Intel
- * is using the same. Checksum function also seems to be OK, it works
- * fine for audio infoframe. However calculated value is always lower
- * by 2 in comparison to fglrx. It breaks displaying anything in case
- * of TVs that strictly check the checksum. Hack it manually here to
- * workaround this issue. */
- frame[0x0] += 2;
+ uint8_t *header = buffer;
WREG32(HDMI0_AVI_INFO0 + offset,
frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
@@ -149,7 +142,7 @@ static void r600_hdmi_update_avi_infoframe(struct drm_encoder *encoder,
WREG32(HDMI0_AVI_INFO2 + offset,
frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
WREG32(HDMI0_AVI_INFO3 + offset,
- frame[0xC] | (frame[0xD] << 8));
+ frame[0xC] | (frame[0xD] << 8) | (header[1] << 24));
}
/*
@@ -249,9 +242,15 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
/* according to the reg specs, this should DCE3.2 only, but in
* practice it seems to cover DCE3.0 as well.
*/
- WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
- WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100);
- WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */
+ if (dig->dig_encoder == 0) {
+ WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
+ WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100);
+ WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */
+ } else {
+ WREG32(DCCG_AUDIO_DTO1_PHASE, base_rate * 100);
+ WREG32(DCCG_AUDIO_DTO1_MODULE, clock * 100);
+ WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
+ }
} else {
/* according to the reg specs, this should be DCE2.0 and DCE3.0 */
WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) |
@@ -273,6 +272,9 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
uint32_t offset;
ssize_t err;
+ if (!dig || !dig->afmt)
+ return;
+
/* Silent, r600_hdmi_enable will raise WARN for us */
if (!dig->afmt->enabled)
return;
@@ -455,6 +457,9 @@ void r600_hdmi_enable(struct drm_encoder *encoder, bool enable)
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
u32 hdmi = HDMI0_ERROR_ACK;
+ if (!dig || !dig->afmt)
+ return;
+
/* Silent, r600_hdmi_enable will raise WARN for us */
if (enable && dig->afmt->enabled)
return;
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 142ce6cc69f5..d4ff48ce1d8b 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -408,6 +408,7 @@ struct radeon_sa_manager {
uint64_t gpu_addr;
void *cpu_ptr;
uint32_t domain;
+ uint32_t align;
};
struct radeon_sa_bo;
@@ -1144,6 +1145,7 @@ struct radeon_uvd {
struct radeon_bo *vcpu_bo;
void *cpu_addr;
uint64_t gpu_addr;
+ void *saved_bo;
atomic_t handles[RADEON_MAX_UVD_HANDLES];
struct drm_file *filp[RADEON_MAX_UVD_HANDLES];
struct delayed_work idle_work;
@@ -1762,7 +1764,7 @@ void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
WREG32(reg, tmp_); \
} while (0)
#define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
-#define WREG32_OR(reg, or) WREG32_P(reg, or, ~or)
+#define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or))
#define WREG32_PLL_P(reg, val, mask) \
do { \
uint32_t tmp_ = RREG32_PLL(reg); \
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index a2802b47ee95..de36c4722423 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -986,8 +986,8 @@ static struct radeon_asic r600_asic = {
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &r600_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
- .copy = &r600_copy_dma,
- .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
+ .copy = &r600_copy_blit,
+ .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
@@ -1074,8 +1074,8 @@ static struct radeon_asic rs780_asic = {
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &r600_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
- .copy = &r600_copy_dma,
- .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
+ .copy = &r600_copy_blit,
+ .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index a72759ede753..34223fc3d828 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -399,7 +399,7 @@ uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev);
/* uvd */
int r600_uvd_init(struct radeon_device *rdev);
int r600_uvd_rbc_start(struct radeon_device *rdev);
-void r600_uvd_rbc_stop(struct radeon_device *rdev);
+void r600_uvd_stop(struct radeon_device *rdev);
int r600_uvd_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
void r600_uvd_fence_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 78edadc9e86b..68ce36056019 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -147,7 +147,7 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
enum radeon_combios_table_offset table)
{
struct radeon_device *rdev = dev->dev_private;
- int rev;
+ int rev, size;
uint16_t offset = 0, check_offset;
if (!rdev->bios)
@@ -156,174 +156,106 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
switch (table) {
/* absolute offset tables */
case COMBIOS_ASIC_INIT_1_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0xc);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0xc;
break;
case COMBIOS_BIOS_SUPPORT_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x14);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x14;
break;
case COMBIOS_DAC_PROGRAMMING_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x2a);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x2a;
break;
case COMBIOS_MAX_COLOR_DEPTH_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x2c);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x2c;
break;
case COMBIOS_CRTC_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x2e);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x2e;
break;
case COMBIOS_PLL_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x30);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x30;
break;
case COMBIOS_TV_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x32);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x32;
break;
case COMBIOS_DFP_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x34);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x34;
break;
case COMBIOS_HW_CONFIG_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x36);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x36;
break;
case COMBIOS_MULTIMEDIA_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x38);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x38;
break;
case COMBIOS_TV_STD_PATCH_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x3e);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x3e;
break;
case COMBIOS_LCD_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x40);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x40;
break;
case COMBIOS_MOBILE_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x42);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x42;
break;
case COMBIOS_PLL_INIT_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x46);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x46;
break;
case COMBIOS_MEM_CONFIG_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x48);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x48;
break;
case COMBIOS_SAVE_MASK_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x4a);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x4a;
break;
case COMBIOS_HARDCODED_EDID_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x4c);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x4c;
break;
case COMBIOS_ASIC_INIT_2_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x4e);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x4e;
break;
case COMBIOS_CONNECTOR_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x50);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x50;
break;
case COMBIOS_DYN_CLK_1_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x52);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x52;
break;
case COMBIOS_RESERVED_MEM_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x54);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x54;
break;
case COMBIOS_EXT_TMDS_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x58);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x58;
break;
case COMBIOS_MEM_CLK_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x5a);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x5a;
break;
case COMBIOS_EXT_DAC_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x5c);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x5c;
break;
case COMBIOS_MISC_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x5e);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x5e;
break;
case COMBIOS_CRT_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x60);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x60;
break;
case COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x62);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x62;
break;
case COMBIOS_COMPONENT_VIDEO_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x64);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x64;
break;
case COMBIOS_FAN_SPEED_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x66);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x66;
break;
case COMBIOS_OVERDRIVE_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x68);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x68;
break;
case COMBIOS_OEM_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x6a);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x6a;
break;
case COMBIOS_DYN_CLK_2_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x6c);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x6c;
break;
case COMBIOS_POWER_CONNECTOR_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x6e);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x6e;
break;
case COMBIOS_I2C_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x70);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x70;
break;
/* relative offset tables */
case COMBIOS_ASIC_INIT_3_TABLE: /* offset from misc info */
@@ -439,11 +371,16 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
}
break;
default:
+ check_offset = 0;
break;
}
- return offset;
+ size = RBIOS8(rdev->bios_header_start + 0x6);
+ /* check absolute offset tables */
+ if (table < COMBIOS_ASIC_INIT_3_TABLE && check_offset && check_offset < size)
+ offset = RBIOS16(rdev->bios_header_start + check_offset);
+ return offset;
}
bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev)
@@ -965,16 +902,22 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
dac = RBIOS8(dac_info + 0x3) & 0xf;
p_dac->ps2_pdac_adj = (bg << 8) | (dac);
}
- /* if the values are all zeros, use the table */
- if (p_dac->ps2_pdac_adj)
+ /* if the values are zeros, use the table */
+ if ((dac == 0) || (bg == 0))
+ found = 0;
+ else
found = 1;
}
/* quirks */
+ /* Radeon 7000 (RV100) */
+ if (((dev->pdev->device == 0x5159) &&
+ (dev->pdev->subsystem_vendor == 0x174B) &&
+ (dev->pdev->subsystem_device == 0x7c28)) ||
/* Radeon 9100 (R200) */
- if ((dev->pdev->device == 0x514D) &&
+ ((dev->pdev->device == 0x514D) &&
(dev->pdev->subsystem_vendor == 0x174B) &&
- (dev->pdev->subsystem_device == 0x7149)) {
+ (dev->pdev->subsystem_device == 0x7149))) {
/* vbios value is bad, use the default */
found = 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 43ec4a401f07..5ce190b8bd1f 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -467,6 +467,7 @@ int radeon_vm_manager_init(struct radeon_device *rdev)
size *= 2;
r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
RADEON_GPU_PAGE_ALIGN(size),
+ RADEON_GPU_PAGE_SIZE,
RADEON_GEM_DOMAIN_VRAM);
if (r) {
dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 5a99d433fc35..1fe12ab5c5ea 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -241,9 +241,6 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
{
int r = 0;
- INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
- INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
-
spin_lock_init(&rdev->irq.lock);
r = drm_vblank_init(rdev->ddev, rdev->num_crtc);
if (r) {
@@ -265,6 +262,10 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
rdev->irq.installed = false;
return r;
}
+
+ INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
+ INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
+
DRM_INFO("radeon: irq initialized.\n");
return 0;
}
@@ -284,8 +285,8 @@ void radeon_irq_kms_fini(struct radeon_device *rdev)
rdev->irq.installed = false;
if (rdev->msi_enabled)
pci_disable_msi(rdev->pdev);
+ flush_work(&rdev->hotplug_work);
}
- flush_work(&rdev->hotplug_work);
}
/**
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index e2cb80a96b51..294382394608 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -158,7 +158,7 @@ static inline void * radeon_sa_bo_cpu_addr(struct radeon_sa_bo *sa_bo)
extern int radeon_sa_bo_manager_init(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager,
- unsigned size, u32 domain);
+ unsigned size, u32 align, u32 domain);
extern void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager);
extern int radeon_sa_bo_manager_start(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 82434018cbe8..83f6295a0e5e 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -224,6 +224,7 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
}
r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo,
RADEON_IB_POOL_SIZE*64*1024,
+ RADEON_GPU_PAGE_SIZE,
RADEON_GEM_DOMAIN_GTT);
if (r) {
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
index 0abe5a9431bb..f0bac68254b7 100644
--- a/drivers/gpu/drm/radeon/radeon_sa.c
+++ b/drivers/gpu/drm/radeon/radeon_sa.c
@@ -49,7 +49,7 @@ static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager);
int radeon_sa_bo_manager_init(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager,
- unsigned size, u32 domain)
+ unsigned size, u32 align, u32 domain)
{
int i, r;
@@ -57,13 +57,14 @@ int radeon_sa_bo_manager_init(struct radeon_device *rdev,
sa_manager->bo = NULL;
sa_manager->size = size;
sa_manager->domain = domain;
+ sa_manager->align = align;
sa_manager->hole = &sa_manager->olist;
INIT_LIST_HEAD(&sa_manager->olist);
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
INIT_LIST_HEAD(&sa_manager->flist[i]);
}
- r = radeon_bo_create(rdev, size, RADEON_GPU_PAGE_SIZE, true,
+ r = radeon_bo_create(rdev, size, align, true,
domain, NULL, &sa_manager->bo);
if (r) {
dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r);
@@ -317,7 +318,7 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
unsigned tries[RADEON_NUM_RINGS];
int i, r;
- BUG_ON(align > RADEON_GPU_PAGE_SIZE);
+ BUG_ON(align > sa_manager->align);
BUG_ON(size > sa_manager->size);
*sa_bo = kmalloc(sizeof(struct radeon_sa_bo), GFP_KERNEL);
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index cad735dd02c6..f3ccf6d4addb 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -123,16 +123,29 @@ int radeon_uvd_init(struct radeon_device *rdev)
return r;
}
- r = radeon_uvd_resume(rdev);
- if (r)
+ r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false);
+ if (r) {
+ radeon_bo_unref(&rdev->uvd.vcpu_bo);
+ dev_err(rdev->dev, "(%d) failed to reserve UVD bo\n", r);
return r;
+ }
- memset(rdev->uvd.cpu_addr, 0, bo_size);
- memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size);
+ r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->uvd.gpu_addr);
+ if (r) {
+ radeon_bo_unreserve(rdev->uvd.vcpu_bo);
+ radeon_bo_unref(&rdev->uvd.vcpu_bo);
+ dev_err(rdev->dev, "(%d) UVD bo pin failed\n", r);
+ return r;
+ }
- r = radeon_uvd_suspend(rdev);
- if (r)
+ r = radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr);
+ if (r) {
+ dev_err(rdev->dev, "(%d) UVD map failed\n", r);
return r;
+ }
+
+ radeon_bo_unreserve(rdev->uvd.vcpu_bo);
for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
atomic_set(&rdev->uvd.handles[i], 0);
@@ -144,70 +157,73 @@ int radeon_uvd_init(struct radeon_device *rdev)
void radeon_uvd_fini(struct radeon_device *rdev)
{
- radeon_uvd_suspend(rdev);
- radeon_bo_unref(&rdev->uvd.vcpu_bo);
-}
-
-int radeon_uvd_suspend(struct radeon_device *rdev)
-{
int r;
if (rdev->uvd.vcpu_bo == NULL)
- return 0;
+ return;
r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false);
if (!r) {
radeon_bo_kunmap(rdev->uvd.vcpu_bo);
radeon_bo_unpin(rdev->uvd.vcpu_bo);
- rdev->uvd.cpu_addr = NULL;
- if (!radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_CPU, NULL)) {
- radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr);
- }
radeon_bo_unreserve(rdev->uvd.vcpu_bo);
-
- if (rdev->uvd.cpu_addr) {
- radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
- } else {
- rdev->fence_drv[R600_RING_TYPE_UVD_INDEX].cpu_addr = NULL;
- }
}
- return r;
+
+ radeon_bo_unref(&rdev->uvd.vcpu_bo);
+
+ release_firmware(rdev->uvd_fw);
+}
+
+int radeon_uvd_suspend(struct radeon_device *rdev)
+{
+ unsigned size;
+ void *ptr;
+ int i;
+
+ if (rdev->uvd.vcpu_bo == NULL)
+ return 0;
+
+ for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i)
+ if (atomic_read(&rdev->uvd.handles[i]))
+ break;
+
+ if (i == RADEON_MAX_UVD_HANDLES)
+ return 0;
+
+ size = radeon_bo_size(rdev->uvd.vcpu_bo);
+ size -= rdev->uvd_fw->size;
+
+ ptr = rdev->uvd.cpu_addr;
+ ptr += rdev->uvd_fw->size;
+
+ rdev->uvd.saved_bo = kmalloc(size, GFP_KERNEL);
+ memcpy(rdev->uvd.saved_bo, ptr, size);
+
+ return 0;
}
int radeon_uvd_resume(struct radeon_device *rdev)
{
- int r;
+ unsigned size;
+ void *ptr;
if (rdev->uvd.vcpu_bo == NULL)
return -EINVAL;
- r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false);
- if (r) {
- radeon_bo_unref(&rdev->uvd.vcpu_bo);
- dev_err(rdev->dev, "(%d) failed to reserve UVD bo\n", r);
- return r;
- }
-
- /* Have been pin in cpu unmap unpin */
- radeon_bo_kunmap(rdev->uvd.vcpu_bo);
- radeon_bo_unpin(rdev->uvd.vcpu_bo);
+ memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size);
- r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM,
- &rdev->uvd.gpu_addr);
- if (r) {
- radeon_bo_unreserve(rdev->uvd.vcpu_bo);
- radeon_bo_unref(&rdev->uvd.vcpu_bo);
- dev_err(rdev->dev, "(%d) UVD bo pin failed\n", r);
- return r;
- }
+ size = radeon_bo_size(rdev->uvd.vcpu_bo);
+ size -= rdev->uvd_fw->size;
- r = radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr);
- if (r) {
- dev_err(rdev->dev, "(%d) UVD map failed\n", r);
- return r;
- }
+ ptr = rdev->uvd.cpu_addr;
+ ptr += rdev->uvd_fw->size;
- radeon_bo_unreserve(rdev->uvd.vcpu_bo);
+ if (rdev->uvd.saved_bo != NULL) {
+ memcpy(ptr, rdev->uvd.saved_bo, size);
+ kfree(rdev->uvd.saved_bo);
+ rdev->uvd.saved_bo = NULL;
+ } else
+ memset(ptr, 0, size);
return 0;
}
@@ -222,8 +238,8 @@ void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp)
{
int i, r;
for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
- if (rdev->uvd.filp[i] == filp) {
- uint32_t handle = atomic_read(&rdev->uvd.handles[i]);
+ uint32_t handle = atomic_read(&rdev->uvd.handles[i]);
+ if (handle != 0 && rdev->uvd.filp[i] == filp) {
struct radeon_fence *fence;
r = radeon_uvd_get_destroy_msg(rdev,
@@ -343,6 +359,14 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
return -EINVAL;
}
+ if (bo->tbo.sync_obj) {
+ r = radeon_fence_wait(bo->tbo.sync_obj, false);
+ if (r) {
+ DRM_ERROR("Failed waiting for UVD message (%d)!\n", r);
+ return r;
+ }
+ }
+
r = radeon_bo_kmap(bo, &ptr);
if (r)
return r;
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 4a62ad2e5399..f5e92cfcc140 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -744,10 +744,10 @@ static void rv770_init_golden_registers(struct radeon_device *rdev)
(const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers));
radeon_program_register_sequence(rdev,
rv730_golden_registers,
- (const u32)ARRAY_SIZE(rv770_golden_registers));
+ (const u32)ARRAY_SIZE(rv730_golden_registers));
radeon_program_register_sequence(rdev,
rv730_mgcg_init,
- (const u32)ARRAY_SIZE(rv770_mgcg_init));
+ (const u32)ARRAY_SIZE(rv730_mgcg_init));
break;
case CHIP_RV710:
radeon_program_register_sequence(rdev,
@@ -758,18 +758,18 @@ static void rv770_init_golden_registers(struct radeon_device *rdev)
(const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers));
radeon_program_register_sequence(rdev,
rv710_golden_registers,
- (const u32)ARRAY_SIZE(rv770_golden_registers));
+ (const u32)ARRAY_SIZE(rv710_golden_registers));
radeon_program_register_sequence(rdev,
rv710_mgcg_init,
- (const u32)ARRAY_SIZE(rv770_mgcg_init));
+ (const u32)ARRAY_SIZE(rv710_mgcg_init));
break;
case CHIP_RV740:
radeon_program_register_sequence(rdev,
rv740_golden_registers,
- (const u32)ARRAY_SIZE(rv770_golden_registers));
+ (const u32)ARRAY_SIZE(rv740_golden_registers));
radeon_program_register_sequence(rdev,
rv740_mgcg_init,
- (const u32)ARRAY_SIZE(rv770_mgcg_init));
+ (const u32)ARRAY_SIZE(rv740_mgcg_init));
break;
default:
break;
@@ -1829,6 +1829,8 @@ static int rv770_startup(struct radeon_device *rdev)
/* enable pcie gen2 link */
rv770_pcie_gen2_enable(rdev);
+ rv770_mc_program(rdev);
+
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
r = r600_init_microcode(rdev);
if (r) {
@@ -1841,7 +1843,6 @@ static int rv770_startup(struct radeon_device *rdev)
if (r)
return r;
- rv770_mc_program(rdev);
if (rdev->flags & RADEON_IS_AGP) {
rv770_agp_enable(rdev);
} else {
@@ -1983,6 +1984,7 @@ int rv770_resume(struct radeon_device *rdev)
int rv770_suspend(struct radeon_device *rdev)
{
r600_audio_fini(rdev);
+ r600_uvd_stop(rdev);
radeon_uvd_suspend(rdev);
r700_cp_stop(rdev);
r600_dma_stop(rdev);
@@ -2098,6 +2100,7 @@ void rv770_fini(struct radeon_device *rdev)
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
rv770_pcie_gart_fini(rdev);
+ r600_uvd_stop(rdev);
radeon_uvd_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index a1b0da6b5808..1a96a16b9996 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -5270,6 +5270,8 @@ static int si_startup(struct radeon_device *rdev)
struct radeon_ring *ring;
int r;
+ si_mc_program(rdev);
+
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
!rdev->rlc_fw || !rdev->mc_fw) {
r = si_init_microcode(rdev);
@@ -5289,7 +5291,6 @@ static int si_startup(struct radeon_device *rdev)
if (r)
return r;
- si_mc_program(rdev);
r = si_pcie_gart_enable(rdev);
if (r)
return r;
@@ -5473,7 +5474,7 @@ int si_suspend(struct radeon_device *rdev)
si_cp_enable(rdev, false);
cayman_dma_stop(rdev);
if (rdev->has_uvd) {
- r600_uvd_rbc_stop(rdev);
+ r600_uvd_stop(rdev);
radeon_uvd_suspend(rdev);
}
si_irq_suspend(rdev);
@@ -5613,8 +5614,10 @@ void si_fini(struct radeon_device *rdev)
radeon_vm_manager_fini(rdev);
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
- if (rdev->has_uvd)
+ if (rdev->has_uvd) {
+ r600_uvd_stop(rdev);
radeon_uvd_fini(rdev);
+ }
si_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
index 3751730764a5..1a0bf07fe54b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -29,7 +29,9 @@
#include <drm/drmP.h>
#include <drm/ttm/ttm_bo_driver.h>
-#define VMW_PPN_SIZE sizeof(unsigned long)
+#define VMW_PPN_SIZE (sizeof(unsigned long))
+/* A future safe maximum remap size. */
+#define VMW_PPN_PER_REMAP ((31 * 1024) / VMW_PPN_SIZE)
static int vmw_gmr2_bind(struct vmw_private *dev_priv,
struct page *pages[],
@@ -38,43 +40,61 @@ static int vmw_gmr2_bind(struct vmw_private *dev_priv,
{
SVGAFifoCmdDefineGMR2 define_cmd;
SVGAFifoCmdRemapGMR2 remap_cmd;
- uint32_t define_size = sizeof(define_cmd) + 4;
- uint32_t remap_size = VMW_PPN_SIZE * num_pages + sizeof(remap_cmd) + 4;
uint32_t *cmd;
uint32_t *cmd_orig;
+ uint32_t define_size = sizeof(define_cmd) + sizeof(*cmd);
+ uint32_t remap_num = num_pages / VMW_PPN_PER_REMAP + ((num_pages % VMW_PPN_PER_REMAP) > 0);
+ uint32_t remap_size = VMW_PPN_SIZE * num_pages + (sizeof(remap_cmd) + sizeof(*cmd)) * remap_num;
+ uint32_t remap_pos = 0;
+ uint32_t cmd_size = define_size + remap_size;
uint32_t i;
- cmd_orig = cmd = vmw_fifo_reserve(dev_priv, define_size + remap_size);
+ cmd_orig = cmd = vmw_fifo_reserve(dev_priv, cmd_size);
if (unlikely(cmd == NULL))
return -ENOMEM;
define_cmd.gmrId = gmr_id;
define_cmd.numPages = num_pages;
+ *cmd++ = SVGA_CMD_DEFINE_GMR2;
+ memcpy(cmd, &define_cmd, sizeof(define_cmd));
+ cmd += sizeof(define_cmd) / sizeof(*cmd);
+
+ /*
+ * Need to split the command if there are too many
+ * pages that goes into the gmr.
+ */
+
remap_cmd.gmrId = gmr_id;
remap_cmd.flags = (VMW_PPN_SIZE > sizeof(*cmd)) ?
SVGA_REMAP_GMR2_PPN64 : SVGA_REMAP_GMR2_PPN32;
- remap_cmd.offsetPages = 0;
- remap_cmd.numPages = num_pages;
- *cmd++ = SVGA_CMD_DEFINE_GMR2;
- memcpy(cmd, &define_cmd, sizeof(define_cmd));
- cmd += sizeof(define_cmd) / sizeof(uint32);
+ while (num_pages > 0) {
+ unsigned long nr = min(num_pages, (unsigned long)VMW_PPN_PER_REMAP);
+
+ remap_cmd.offsetPages = remap_pos;
+ remap_cmd.numPages = nr;
- *cmd++ = SVGA_CMD_REMAP_GMR2;
- memcpy(cmd, &remap_cmd, sizeof(remap_cmd));
- cmd += sizeof(remap_cmd) / sizeof(uint32);
+ *cmd++ = SVGA_CMD_REMAP_GMR2;
+ memcpy(cmd, &remap_cmd, sizeof(remap_cmd));
+ cmd += sizeof(remap_cmd) / sizeof(*cmd);
- for (i = 0; i < num_pages; ++i) {
- if (VMW_PPN_SIZE <= 4)
- *cmd = page_to_pfn(*pages++);
- else
- *((uint64_t *)cmd) = page_to_pfn(*pages++);
+ for (i = 0; i < nr; ++i) {
+ if (VMW_PPN_SIZE <= 4)
+ *cmd = page_to_pfn(*pages++);
+ else
+ *((uint64_t *)cmd) = page_to_pfn(*pages++);
- cmd += VMW_PPN_SIZE / sizeof(*cmd);
+ cmd += VMW_PPN_SIZE / sizeof(*cmd);
+ }
+
+ num_pages -= nr;
+ remap_pos += nr;
}
- vmw_fifo_commit(dev_priv, define_size + remap_size);
+ BUG_ON(cmd != cmd_orig + cmd_size / sizeof(*cmd));
+
+ vmw_fifo_commit(dev_priv, cmd_size);
return 0;
}