aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
authorMark Brown <broonie@kernel.org>2014-10-09 18:02:47 +0100
committerMark Brown <broonie@kernel.org>2014-10-09 18:02:47 +0100
commit2606d2448703e8995ca39a59d8a1106a1e0f034a (patch)
tree588f1b850a7e13d0e2f3461d66909db55c7531fb /drivers/gpu/drm/i915
parent8bb495e3f02401ee6f76d1b1d77f3ac9f079e376 (diff)
parentb8a669d29702a8fb529f4fae450a86b8676b0e42 (diff)
Merge tag 'kvm-for-lsk-v3.10-v1' of git://git.linaro.org/people/christoffer.dall/linux-kvm-arm into lsk-v3.10-kvmv3.10/topic/kvm
KVM/ARM/arm64 Support for LSK v3.10
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c18
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c13
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h4
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c57
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c2
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c13
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h32
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c10
-rw-r--r--drivers/gpu/drm/i915/intel_display.c47
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h2
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c3
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c47
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c21
13 files changed, 187 insertions, 82 deletions
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 3b315ba85a3e..17d9b0b6afc5 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1511,6 +1511,14 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dev_priv->dev = dev;
dev_priv->info = info;
+ spin_lock_init(&dev_priv->irq_lock);
+ spin_lock_init(&dev_priv->gpu_error.lock);
+ spin_lock_init(&dev_priv->rps.lock);
+ spin_lock_init(&dev_priv->gt_lock);
+ mutex_init(&dev_priv->dpio_lock);
+ mutex_init(&dev_priv->rps.hw_lock);
+ mutex_init(&dev_priv->modeset_restore_lock);
+
i915_dump_device_info(dev_priv);
if (i915_get_bridge_dev(dev)) {
@@ -1601,6 +1609,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
intel_detect_pch(dev);
intel_irq_init(dev);
+ intel_pm_init(dev);
+ intel_gt_sanitize(dev);
intel_gt_init(dev);
/* Try to make sure MCHBAR is enabled before poking at it */
@@ -1626,14 +1636,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (!IS_I945G(dev) && !IS_I945GM(dev))
pci_enable_msi(dev->pdev);
- spin_lock_init(&dev_priv->irq_lock);
- spin_lock_init(&dev_priv->gpu_error.lock);
- spin_lock_init(&dev_priv->rps.lock);
- mutex_init(&dev_priv->dpio_lock);
-
- mutex_init(&dev_priv->rps.hw_lock);
- mutex_init(&dev_priv->modeset_restore_lock);
-
dev_priv->num_plane = 1;
if (IS_VALLEYVIEW(dev))
dev_priv->num_plane = 2;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index a2e4953b8e8d..bc6cd3117ac3 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -685,7 +685,7 @@ static int i915_drm_thaw(struct drm_device *dev)
{
int error = 0;
- intel_gt_reset(dev);
+ intel_gt_sanitize(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
mutex_lock(&dev->struct_mutex);
@@ -711,7 +711,7 @@ int i915_resume(struct drm_device *dev)
pci_set_master(dev->pdev);
- intel_gt_reset(dev);
+ intel_gt_sanitize(dev);
/*
* Platforms with opregion should have sane BIOS, older ones (gen3 and
@@ -1247,21 +1247,21 @@ hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
+ unsigned long irqflags; \
u##x val = 0; \
+ spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
if (IS_GEN5(dev_priv->dev)) \
ilk_dummy_write(dev_priv); \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
- unsigned long irqflags; \
- spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
if (dev_priv->forcewake_count == 0) \
dev_priv->gt.force_wake_get(dev_priv); \
val = read##y(dev_priv->regs + reg); \
if (dev_priv->forcewake_count == 0) \
dev_priv->gt.force_wake_put(dev_priv); \
- spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
} else { \
val = read##y(dev_priv->regs + reg); \
} \
+ spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
trace_i915_reg_rw(false, reg, val, sizeof(val)); \
return val; \
}
@@ -1274,8 +1274,10 @@ __i915_read(64, q)
#define __i915_write(x, y) \
void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
+ unsigned long irqflags; \
u32 __fifo_ret = 0; \
trace_i915_reg_rw(true, reg, val, sizeof(val)); \
+ spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
} \
@@ -1287,6 +1289,7 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
gen6_gt_check_fifodbg(dev_priv); \
} \
hsw_unclaimed_reg_check(dev_priv, reg); \
+ spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
}
__i915_write(8, b)
__i915_write(16, w)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 9669a0b8b440..47d8b68c5004 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -491,6 +491,7 @@ enum intel_sbi_destination {
#define QUIRK_PIPEA_FORCE (1<<0)
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
#define QUIRK_INVERT_BRIGHTNESS (1<<2)
+#define QUIRK_NO_PCH_PWM_ENABLE (1<<3)
struct intel_fbdev;
struct intel_fbc_work;
@@ -1474,9 +1475,10 @@ void i915_hangcheck_elapsed(unsigned long data);
void i915_handle_error(struct drm_device *dev, bool wedged);
extern void intel_irq_init(struct drm_device *dev);
+extern void intel_pm_init(struct drm_device *dev);
extern void intel_hpd_init(struct drm_device *dev);
extern void intel_gt_init(struct drm_device *dev);
-extern void intel_gt_reset(struct drm_device *dev);
+extern void intel_gt_sanitize(struct drm_device *dev);
void i915_error_state_free(struct kref *error_ref);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 9e35dafc5807..0a30088178b0 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1160,7 +1160,8 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
/* Manually manage the write flush as we may have not yet
* retired the buffer.
*/
- if (obj->last_write_seqno &&
+ if (ret == 0 &&
+ obj->last_write_seqno &&
i915_seqno_passed(seqno, obj->last_write_seqno)) {
obj->last_write_seqno = 0;
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
@@ -1880,6 +1881,10 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
u32 seqno = intel_ring_get_seqno(ring);
BUG_ON(ring == NULL);
+ if (obj->ring != ring && obj->last_write_seqno) {
+ /* Keep the seqno relative to the current ring */
+ obj->last_write_seqno = seqno;
+ }
obj->ring = ring;
/* Add a reference if we're newly entering the active list. */
@@ -2133,7 +2138,17 @@ void i915_gem_restore_fences(struct drm_device *dev)
for (i = 0; i < dev_priv->num_fence_regs; i++) {
struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
- i915_gem_write_fence(dev, i, reg->obj);
+
+ /*
+ * Commit delayed tiling changes if we have an object still
+ * attached to the fence, otherwise just clear the fence.
+ */
+ if (reg->obj) {
+ i915_gem_object_update_fence(reg->obj, reg,
+ reg->obj->tiling_mode);
+ } else {
+ i915_gem_write_fence(dev, i, NULL);
+ }
}
}
@@ -2533,7 +2548,6 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
drm_i915_private_t *dev_priv = dev->dev_private;
int fence_reg;
int fence_pitch_shift;
- uint64_t val;
if (INTEL_INFO(dev)->gen >= 6) {
fence_reg = FENCE_REG_SANDYBRIDGE_0;
@@ -2543,8 +2557,23 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
}
+ fence_reg += reg * 8;
+
+ /* To w/a incoherency with non-atomic 64-bit register updates,
+ * we split the 64-bit update into two 32-bit writes. In order
+ * for a partial fence not to be evaluated between writes, we
+ * precede the update with write to turn off the fence register,
+ * and only enable the fence as the last step.
+ *
+ * For extra levels of paranoia, we make sure each step lands
+ * before applying the next step.
+ */
+ I915_WRITE(fence_reg, 0);
+ POSTING_READ(fence_reg);
+
if (obj) {
u32 size = obj->gtt_space->size;
+ uint64_t val;
val = (uint64_t)((obj->gtt_offset + size - 4096) &
0xfffff000) << 32;
@@ -2553,12 +2582,16 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I965_FENCE_TILING_Y_SHIFT;
val |= I965_FENCE_REG_VALID;
- } else
- val = 0;
- fence_reg += reg * 8;
- I915_WRITE64(fence_reg, val);
- POSTING_READ(fence_reg);
+ I915_WRITE(fence_reg + 4, val >> 32);
+ POSTING_READ(fence_reg + 4);
+
+ I915_WRITE(fence_reg + 0, val);
+ POSTING_READ(fence_reg);
+ } else {
+ I915_WRITE(fence_reg + 4, 0);
+ POSTING_READ(fence_reg + 4);
+ }
}
static void i915_write_fence_reg(struct drm_device *dev, int reg,
@@ -2653,6 +2686,10 @@ static void i915_gem_write_fence(struct drm_device *dev, int reg,
if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
mb();
+ WARN(obj && (!obj->stride || !obj->tiling_mode),
+ "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
+ obj->stride, obj->tiling_mode);
+
switch (INTEL_INFO(dev)->gen) {
case 7:
case 6:
@@ -2712,6 +2749,7 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
fence->obj = NULL;
list_del_init(&fence->lru_list);
}
+ obj->fence_dirty = false;
}
static int
@@ -2841,7 +2879,6 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
return 0;
i915_gem_object_update_fence(obj, reg, enable);
- obj->fence_dirty = false;
return 0;
}
@@ -4456,7 +4493,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list)
if (obj->pages_pin_count == 0)
cnt += obj->base.size >> PAGE_SHIFT;
- list_for_each_entry(obj, &dev_priv->mm.inactive_list, gtt_list)
+ list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list)
if (obj->pin_count == 0 && obj->pages_pin_count == 0)
cnt += obj->base.size >> PAGE_SHIFT;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index a1e8ecb6adf6..3bc8a58a8d5f 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -113,7 +113,7 @@ static int get_context_size(struct drm_device *dev)
case 7:
reg = I915_READ(GEN7_CXT_SIZE);
if (IS_HASWELL(dev))
- ret = HSW_CXT_TOTAL_SIZE(reg) * 64;
+ ret = HSW_CXT_TOTAL_SIZE;
else
ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
break;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 0aa2ef0d2ae0..e5e328691e3a 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -70,15 +70,6 @@ static const u32 hpd_status_gen4[] = {
[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
};
-static const u32 hpd_status_i965[] = {
- [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
- [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I965,
- [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I965,
- [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
- [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
- [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
-};
-
static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
@@ -2952,13 +2943,13 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ?
HOTPLUG_INT_STATUS_G4X :
- HOTPLUG_INT_STATUS_I965);
+ HOTPLUG_INT_STATUS_I915);
DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
hotplug_status);
if (hotplug_trigger) {
if (hotplug_irq_storm_detect(dev, hotplug_trigger,
- IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i965))
+ IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915))
i915_hpd_irq_setup(dev);
queue_work(dev_priv->wq,
&dev_priv->hotplug_work);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 2d6b62e42daf..a365780aeb1e 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -617,6 +617,8 @@
will not assert AGPBUSY# and will only
be delivered when out of C3. */
#define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */
+#define INSTPM_TLB_INVALIDATE (1<<9)
+#define INSTPM_SYNC_FLUSH (1<<5)
#define ACTHD 0x020c8
#define FW_BLC 0x020d8
#define FW_BLC2 0x020dc
@@ -1535,14 +1537,13 @@
GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \
GEN7_CXT_GT1_SIZE(ctx_reg) + \
GEN7_CXT_VFSTATE_SIZE(ctx_reg))
-#define HSW_CXT_POWER_SIZE(ctx_reg) ((ctx_reg >> 26) & 0x3f)
-#define HSW_CXT_RING_SIZE(ctx_reg) ((ctx_reg >> 23) & 0x7)
-#define HSW_CXT_RENDER_SIZE(ctx_reg) ((ctx_reg >> 15) & 0xff)
-#define HSW_CXT_TOTAL_SIZE(ctx_reg) (HSW_CXT_POWER_SIZE(ctx_reg) + \
- HSW_CXT_RING_SIZE(ctx_reg) + \
- HSW_CXT_RENDER_SIZE(ctx_reg) + \
- GEN7_CXT_VFSTATE_SIZE(ctx_reg))
-
+/* Haswell does have the CXT_SIZE register however it does not appear to be
+ * valid. Now, docs explain in dwords what is in the context object. The full
+ * size is 70720 bytes, however, the power context and execlist context will
+ * never be saved (power context is stored elsewhere, and execlists don't work
+ * on HSW) - so the final size is 66944 bytes, which rounds to 17 pages.
+ */
+#define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE)
/*
* Overlay regs
@@ -1691,6 +1692,12 @@
/* SDVO is different across gen3/4 */
#define SDVOC_HOTPLUG_INT_STATUS_G4X (1 << 3)
#define SDVOB_HOTPLUG_INT_STATUS_G4X (1 << 2)
+/*
+ * Bspec seems to be seriously misleaded about the SDVO hpd bits on i965g/gm,
+ * since reality corrobates that they're the same as on gen3. But keep these
+ * bits here (and the comment!) to help any other lost wanderers back onto the
+ * right tracks.
+ */
#define SDVOC_HOTPLUG_INT_STATUS_I965 (3 << 4)
#define SDVOB_HOTPLUG_INT_STATUS_I965 (3 << 2)
#define SDVOC_HOTPLUG_INT_STATUS_I915 (1 << 7)
@@ -1702,13 +1709,6 @@
PORTC_HOTPLUG_INT_STATUS | \
PORTD_HOTPLUG_INT_STATUS)
-#define HOTPLUG_INT_STATUS_I965 (CRT_HOTPLUG_INT_STATUS | \
- SDVOB_HOTPLUG_INT_STATUS_I965 | \
- SDVOC_HOTPLUG_INT_STATUS_I965 | \
- PORTB_HOTPLUG_INT_STATUS | \
- PORTC_HOTPLUG_INT_STATUS | \
- PORTD_HOTPLUG_INT_STATUS)
-
#define HOTPLUG_INT_STATUS_I915 (CRT_HOTPLUG_INT_STATUS | \
SDVOB_HOTPLUG_INT_STATUS_I915 | \
SDVOC_HOTPLUG_INT_STATUS_I915 | \
@@ -4246,7 +4246,7 @@
#define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 <<22)
#define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 <<22)
#define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 <<22)
-#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x33 <<22)
+#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x3e <<22)
/* legacy values */
#define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 <<22)
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index fb961bb81903..16e674af4d57 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -684,7 +684,7 @@ static void intel_ddi_mode_set(struct drm_encoder *encoder,
struct intel_digital_port *intel_dig_port =
enc_to_dig_port(encoder);
- intel_dp->DP = intel_dig_port->port_reversal |
+ intel_dp->DP = intel_dig_port->saved_port_bits |
DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
switch (intel_dp->lane_count) {
case 1:
@@ -1324,7 +1324,8 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
* enabling the port.
*/
I915_WRITE(DDI_BUF_CTL(port),
- intel_dig_port->port_reversal | DDI_BUF_CTL_ENABLE);
+ intel_dig_port->saved_port_bits |
+ DDI_BUF_CTL_ENABLE);
} else if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -1543,8 +1544,9 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
intel_encoder->get_hw_state = intel_ddi_get_hw_state;
intel_dig_port->port = port;
- intel_dig_port->port_reversal = I915_READ(DDI_BUF_CTL(port)) &
- DDI_BUF_PORT_REVERSAL;
+ intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
+ (DDI_BUF_PORT_REVERSAL |
+ DDI_A_4_LANES);
if (hdmi_connector)
intel_dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port);
intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 56746dcac40f..eea5982657a8 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -4333,7 +4333,8 @@ static void vlv_update_pll(struct intel_crtc *crtc)
static void i9xx_update_pll(struct intel_crtc *crtc,
intel_clock_t *reduced_clock,
- int num_connectors)
+ int num_connectors,
+ bool needs_tv_clock)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4391,7 +4392,7 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
if (INTEL_INFO(dev)->gen >= 4)
dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
- if (is_sdvo && intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_TVOUT))
+ if (is_sdvo && needs_tv_clock)
dpll |= PLL_REF_INPUT_TVCLKINBC;
else if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_TVOUT))
/* XXX: just matching BIOS for now */
@@ -4716,7 +4717,8 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
else
i9xx_update_pll(intel_crtc,
has_reduced_clock ? &reduced_clock : NULL,
- num_connectors);
+ num_connectors,
+ is_sdvo && is_tv);
/* Set up the display plane register */
dspcntr = DISPPLANE_GAMMA_ENABLE;
@@ -8146,15 +8148,20 @@ static void intel_set_config_restore_state(struct drm_device *dev,
}
static bool
-is_crtc_connector_off(struct drm_crtc *crtc, struct drm_connector *connectors,
- int num_connectors)
+is_crtc_connector_off(struct drm_mode_set *set)
{
int i;
- for (i = 0; i < num_connectors; i++)
- if (connectors[i].encoder &&
- connectors[i].encoder->crtc == crtc &&
- connectors[i].dpms != DRM_MODE_DPMS_ON)
+ if (set->num_connectors == 0)
+ return false;
+
+ if (WARN_ON(set->connectors == NULL))
+ return false;
+
+ for (i = 0; i < set->num_connectors; i++)
+ if (set->connectors[i]->encoder &&
+ set->connectors[i]->encoder->crtc == set->crtc &&
+ set->connectors[i]->dpms != DRM_MODE_DPMS_ON)
return true;
return false;
@@ -8167,10 +8174,8 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set,
/* We should be able to check here if the fb has the same properties
* and then just flip_or_move it */
- if (set->connectors != NULL &&
- is_crtc_connector_off(set->crtc, *set->connectors,
- set->num_connectors)) {
- config->mode_changed = true;
+ if (is_crtc_connector_off(set)) {
+ config->mode_changed = true;
} else if (set->crtc->fb != set->fb) {
/* If we have no fb then treat it as a full mode set */
if (set->crtc->fb == NULL) {
@@ -8914,6 +8919,17 @@ static void quirk_invert_brightness(struct drm_device *dev)
DRM_INFO("applying inverted panel brightness quirk\n");
}
+/*
+ * Some machines (Dell XPS13) suffer broken backlight controls if
+ * BLM_PCH_PWM_ENABLE is set.
+ */
+static void quirk_no_pcm_pwm_enable(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ dev_priv->quirks |= QUIRK_NO_PCH_PWM_ENABLE;
+ DRM_INFO("applying no-PCH_PWM_ENABLE quirk\n");
+}
+
struct intel_quirk {
int device;
int subsystem_vendor;
@@ -8983,6 +8999,11 @@ static struct intel_quirk intel_quirks[] = {
/* Acer Aspire 4736Z */
{ 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
+
+ /* Dell XPS13 HD Sandy Bridge */
+ { 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable },
+ /* Dell XPS13 HD and XPS13 FHD Ivy Bridge */
+ { 0x0166, 0x1028, 0x058b, quirk_no_pcm_pwm_enable },
};
static void intel_init_quirks(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 624a9e6b8d71..7cd55843e73e 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -426,7 +426,7 @@ struct intel_dp {
struct intel_digital_port {
struct intel_encoder base;
enum port port;
- u32 port_reversal;
+ u32 saved_port_bits;
struct intel_dp dp;
struct intel_hdmi hdmi;
};
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index eb5e6e95f3c7..33cb87f7983e 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -354,7 +354,8 @@ void intel_panel_enable_backlight(struct drm_device *dev,
POSTING_READ(reg);
I915_WRITE(reg, tmp | BLM_PWM_ENABLE);
- if (HAS_PCH_SPLIT(dev)) {
+ if (HAS_PCH_SPLIT(dev) &&
+ !(dev_priv->quirks & QUIRK_NO_PCH_PWM_ENABLE)) {
tmp = I915_READ(BLC_PWM_PCH_CTL1);
tmp |= BLM_PCH_PWM_ENABLE;
tmp &= ~BLM_PCH_OVERRIDE_ENABLE;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index aa01128ff192..94ad6bc08260 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4486,7 +4486,7 @@ static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
gen6_gt_check_fifodbg(dev_priv);
}
-void intel_gt_reset(struct drm_device *dev)
+void intel_gt_sanitize(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4497,26 +4497,61 @@ void intel_gt_reset(struct drm_device *dev)
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
__gen6_gt_force_wake_mt_reset(dev_priv);
}
+
+ /* BIOS often leaves RC6 enabled, but disable it for hw init */
+ if (INTEL_INFO(dev)->gen >= 6)
+ intel_disable_gt_powersave(dev);
}
void intel_gt_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- spin_lock_init(&dev_priv->gt_lock);
-
- intel_gt_reset(dev);
-
if (IS_VALLEYVIEW(dev)) {
dev_priv->gt.force_wake_get = vlv_force_wake_get;
dev_priv->gt.force_wake_put = vlv_force_wake_put;
- } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
+ } else if (IS_HASWELL(dev)) {
dev_priv->gt.force_wake_get = __gen6_gt_force_wake_mt_get;
dev_priv->gt.force_wake_put = __gen6_gt_force_wake_mt_put;
+ } else if (IS_IVYBRIDGE(dev)) {
+ u32 ecobus;
+
+ /* IVB configs may use multi-threaded forcewake */
+
+ /* A small trick here - if the bios hasn't configured
+ * MT forcewake, and if the device is in RC6, then
+ * force_wake_mt_get will not wake the device and the
+ * ECOBUS read will return zero. Which will be
+ * (correctly) interpreted by the test below as MT
+ * forcewake being disabled.
+ */
+ mutex_lock(&dev->struct_mutex);
+ __gen6_gt_force_wake_mt_get(dev_priv);
+ ecobus = I915_READ_NOTRACE(ECOBUS);
+ __gen6_gt_force_wake_mt_put(dev_priv);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (ecobus & FORCEWAKE_MT_ENABLE) {
+ dev_priv->gt.force_wake_get =
+ __gen6_gt_force_wake_mt_get;
+ dev_priv->gt.force_wake_put =
+ __gen6_gt_force_wake_mt_put;
+ } else {
+ DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
+ DRM_INFO("when using vblank-synced partial screen updates.\n");
+ dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
+ dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
+ }
} else if (IS_GEN6(dev)) {
dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
}
+}
+
+void intel_pm_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
intel_gen6_powersave_work);
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 1d5d613eb6be..48fe23e8d180 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -490,9 +490,6 @@ cleanup_pipe_control(struct intel_ring_buffer *ring)
struct pipe_control *pc = ring->private;
struct drm_i915_gem_object *obj;
- if (!ring->private)
- return;
-
obj = pc->obj;
kunmap(sg_page(obj->pages->sgl));
@@ -500,7 +497,6 @@ cleanup_pipe_control(struct intel_ring_buffer *ring)
drm_gem_object_unreference(&obj->base);
kfree(pc);
- ring->private = NULL;
}
static int init_render_ring(struct intel_ring_buffer *ring)
@@ -571,7 +567,10 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring)
if (HAS_BROKEN_CS_TLB(dev))
drm_gem_object_unreference(to_gem_object(ring->private));
- cleanup_pipe_control(ring);
+ if (INTEL_INFO(dev)->gen >= 5)
+ cleanup_pipe_control(ring);
+
+ ring->private = NULL;
}
static void
@@ -908,6 +907,18 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
POSTING_READ(mmio);
+
+ /* Flush the TLB for this page */
+ if (INTEL_INFO(dev)->gen >= 6) {
+ u32 reg = RING_INSTPM(ring->mmio_base);
+ I915_WRITE(reg,
+ _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
+ INSTPM_SYNC_FLUSH));
+ if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
+ 1000))
+ DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
+ ring->name);
+ }
}
static int