aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_dp.c96
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_encoders.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_dpm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c60
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c53
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_smc.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_dpm.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c70
-rw-r--r--drivers/gpu/drm/ast/ast_main.c2
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c12
-rw-r--r--drivers/gpu/drm/drm_atomic.c5
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c4
-rw-r--r--drivers/gpu/drm/drm_cache.c1
-rw-r--r--drivers/gpu/drm/drm_crtc.c65
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c27
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c385
-rw-r--r--drivers/gpu/drm/drm_edid.c8
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c5
-rw-r--r--drivers/gpu/drm/drm_gem.c29
-rw-r--r--drivers/gpu/drm/drm_ioc32.c4
-rw-r--r--drivers/gpu/drm/drm_irq.c73
-rw-r--r--drivers/gpu/drm/drm_modes.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c2
-rw-r--r--drivers/gpu/drm/gma500/gem.c2
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c2
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c6
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c5
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c2
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c10
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h26
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c39
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c8
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c3
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c25
-rw-r--r--drivers/gpu/drm/i915/intel_display.c121
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c66
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c6
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h11
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_panel_vbt.c8
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c6
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c15
-rw-r--r--drivers/gpu/drm/i915/intel_hotplug.c9
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c7
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c2
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c25
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c32
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c2
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c6
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c8
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c10
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c27
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c7
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fbcon.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/ramht.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c39
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c6
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c13
-rw-r--r--drivers/gpu/drm/qxl/qxl_draw.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h2
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c3
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c10
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c108
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c11
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c8
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c154
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c10
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h46
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h5
-rw-r--r--drivers/gpu/drm/radeon/radeon.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c62
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c21
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c19
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_auxch.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h6
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_sa.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c12
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c13
-rw-r--r--drivers/gpu/drm/radeon/sid.h5
-rw-r--r--drivers/gpu/drm/radeon/vce_v1_0.c12
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c7
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c2
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c25
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c19
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c53
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c16
143 files changed, 1830 insertions, 801 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 04c270757030..ca066018ea34 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -22,7 +22,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o
# add asic specific block
-amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o gmc_v7_0.o cik_ih.o kv_smc.o kv_dpm.o \
+amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o \
amdgpu_amdkfd_gfx_v7.o
@@ -31,6 +31,7 @@ amdgpu-y += \
# add GMC block
amdgpu-y += \
+ gmc_v7_0.o \
gmc_v8_0.o
# add IH block
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 048cfe073dae..ff5566c69f7d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -604,8 +604,6 @@ struct amdgpu_sa_manager {
uint32_t align;
};
-struct amdgpu_sa_bo;
-
/* sub-allocation buffer */
struct amdgpu_sa_bo {
struct list_head olist;
@@ -712,9 +710,9 @@ int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev);
void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev);
int amdgpu_gart_init(struct amdgpu_device *adev);
void amdgpu_gart_fini(struct amdgpu_device *adev);
-void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset,
+void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
int pages);
-int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset,
+int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
int pages, struct page **pagelist,
dma_addr_t *dma_addr, uint32_t flags);
@@ -1675,6 +1673,7 @@ struct amdgpu_uvd {
struct amdgpu_bo *vcpu_bo;
void *cpu_addr;
uint64_t gpu_addr;
+ unsigned fw_version;
atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
struct delayed_work idle_work;
@@ -2314,6 +2313,8 @@ bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
uint32_t flags);
bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
+bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
+ unsigned long end);
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
struct ttm_mem_reg *mem);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 9416e0f5c1db..51a9942cdb40 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -331,6 +331,19 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *
(le16_to_cpu(path->usConnObjectId) &
OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
+ /* Skip TV/CV support */
+ if ((le16_to_cpu(path->usDeviceTag) ==
+ ATOM_DEVICE_TV1_SUPPORT) ||
+ (le16_to_cpu(path->usDeviceTag) ==
+ ATOM_DEVICE_CV_SUPPORT))
+ continue;
+
+ if (con_obj_id >= ARRAY_SIZE(object_connector_convert)) {
+ DRM_ERROR("invalid con_obj_id %d for device tag 0x%04x\n",
+ con_obj_id, le16_to_cpu(path->usDeviceTag));
+ continue;
+ }
+
connector_type =
object_connector_convert[con_obj_id];
connector_object_id = con_obj_id;
@@ -566,28 +579,19 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev)
le16_to_cpu(firmware_info->info.usReferenceClock);
ppll->reference_div = 0;
- if (crev < 2)
- ppll->pll_out_min =
- le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output);
- else
- ppll->pll_out_min =
- le32_to_cpu(firmware_info->info_12.ulMinPixelClockPLL_Output);
+ ppll->pll_out_min =
+ le32_to_cpu(firmware_info->info_12.ulMinPixelClockPLL_Output);
ppll->pll_out_max =
le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output);
- if (crev >= 4) {
- ppll->lcd_pll_out_min =
- le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100;
- if (ppll->lcd_pll_out_min == 0)
- ppll->lcd_pll_out_min = ppll->pll_out_min;
- ppll->lcd_pll_out_max =
- le16_to_cpu(firmware_info->info_14.usLcdMaxPixelClockPLL_Output) * 100;
- if (ppll->lcd_pll_out_max == 0)
- ppll->lcd_pll_out_max = ppll->pll_out_max;
- } else {
+ ppll->lcd_pll_out_min =
+ le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100;
+ if (ppll->lcd_pll_out_min == 0)
ppll->lcd_pll_out_min = ppll->pll_out_min;
+ ppll->lcd_pll_out_max =
+ le16_to_cpu(firmware_info->info_14.usLcdMaxPixelClockPLL_Output) * 100;
+ if (ppll->lcd_pll_out_max == 0)
ppll->lcd_pll_out_max = ppll->pll_out_max;
- }
if (ppll->pll_out_min == 0)
ppll->pll_out_min = 64800;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index 5a8fbadbd27b..29adbbe225c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -10,6 +10,7 @@
#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/pci.h>
+#include <linux/delay.h>
#include "amdgpu_acpi.h"
@@ -256,6 +257,10 @@ static int amdgpu_atpx_set_discrete_state(struct amdgpu_atpx *atpx, u8 state)
if (!info)
return -EIO;
kfree(info);
+
+ /* 200ms delay is required after off */
+ if (state == 0)
+ msleep(200);
}
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 89c3dd62ba21..930083336968 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -77,7 +77,7 @@ void amdgpu_connector_hotplug(struct drm_connector *connector)
} else if (amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) {
/* Don't try to start link training before we
* have the dpcd */
- if (!amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
+ if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
return;
/* set it to OFF so that drm_helper_connector_dpms()
@@ -194,12 +194,12 @@ int amdgpu_connector_get_monitor_bpc(struct drm_connector *connector)
bpc = 8;
DRM_DEBUG("%s: HDMI deep color 10 bpc exceeds max tmds clock. Using %d bpc.\n",
connector->name, bpc);
- } else if (bpc > 8) {
- /* max_tmds_clock missing, but hdmi spec mandates it for deep color. */
- DRM_DEBUG("%s: Required max tmds clock for HDMI deep color missing. Using 8 bpc.\n",
- connector->name);
- bpc = 8;
}
+ } else if (bpc > 8) {
+ /* max_tmds_clock missing, but hdmi spec mandates it for deep color. */
+ DRM_DEBUG("%s: Required max tmds clock for HDMI deep color missing. Using 8 bpc.\n",
+ connector->name);
+ bpc = 8;
}
}
@@ -1690,7 +1690,6 @@ amdgpu_connector_add(struct amdgpu_device *adev,
DRM_MODE_SCALE_NONE);
/* no HPD on analog connectors */
amdgpu_connector->hpd.hpd = AMDGPU_HPD_NONE;
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
connector->interlace_allowed = true;
connector->doublescan_allowed = true;
break;
@@ -1893,8 +1892,10 @@ amdgpu_connector_add(struct amdgpu_device *adev,
}
if (amdgpu_connector->hpd.hpd == AMDGPU_HPD_NONE) {
- if (i2c_bus->valid)
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ if (i2c_bus->valid) {
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+ }
} else
connector->polled = DRM_CONNECTOR_POLL_HPD;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index d5b421330145..16302f7d59f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1744,15 +1744,20 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
}
/* post card */
- amdgpu_atom_asic_init(adev->mode_info.atom_context);
+ if (!amdgpu_card_posted(adev))
+ amdgpu_atom_asic_init(adev->mode_info.atom_context);
r = amdgpu_resume(adev);
+ if (r)
+ DRM_ERROR("amdgpu_resume failed (%d).\n", r);
amdgpu_fence_driver_resume(adev);
- r = amdgpu_ib_ring_tests(adev);
- if (r)
- DRM_ERROR("ib ring test failed (%d).\n", r);
+ if (resume) {
+ r = amdgpu_ib_ring_tests(adev);
+ if (r)
+ DRM_ERROR("ib ring test failed (%d).\n", r);
+ }
r = amdgpu_late_init(adev);
if (r)
@@ -1789,6 +1794,23 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
drm_kms_helper_poll_enable(dev);
+ /*
+ * Most of the connector probing functions try to acquire runtime pm
+ * refs to ensure that the GPU is powered on when connector polling is
+ * performed. Since we're calling this from a runtime PM callback,
+ * trying to acquire rpm refs will cause us to deadlock.
+ *
+ * Since we're guaranteed to be holding the rpm lock, it's safe to
+ * temporarily disable the rpm helpers so this doesn't deadlock us.
+ */
+#ifdef CONFIG_PM
+ dev->dev->power.disable_depth++;
+#endif
+ drm_helper_hpd_irq_event(dev);
+#ifdef CONFIG_PM
+ dev->dev->power.disable_depth--;
+#endif
+
if (fbcon) {
amdgpu_fbdev_set_suspend(adev, 0);
console_unlock();
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 5580d3420c3a..82903ca78529 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -72,8 +72,8 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
struct drm_crtc *crtc = &amdgpuCrtc->base;
unsigned long flags;
- unsigned i;
- int vpos, hpos, stat, min_udelay;
+ unsigned i, repcnt = 4;
+ int vpos, hpos, stat, min_udelay = 0;
struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
amdgpu_flip_wait_fence(adev, &work->excl);
@@ -96,7 +96,7 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
* In practice this won't execute very often unless on very fast
* machines because the time window for this to happen is very small.
*/
- for (;;) {
+ while (amdgpuCrtc->enabled && --repcnt) {
/* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
* start in hpos, and to the "fudged earlier" vblank start in
* vpos.
@@ -112,12 +112,24 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
break;
/* Sleep at least until estimated real start of hw vblank */
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
+ if (min_udelay > vblank->framedur_ns / 2000) {
+ /* Don't wait ridiculously long - something is wrong */
+ repcnt = 0;
+ break;
+ }
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
usleep_range(min_udelay, 2 * min_udelay);
spin_lock_irqsave(&crtc->dev->event_lock, flags);
};
+ if (!repcnt)
+ DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, "
+ "framedur %d, linedur %d, stat %d, vpos %d, "
+ "hpos %d\n", work->crtc_id, min_udelay,
+ vblank->framedur_ns / 1000,
+ vblank->linedur_ns / 1000, stat, vpos, hpos);
+
/* do the flip (mmio) */
adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base);
/* set the flip status */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
index 7b7f4aba60c0..fe36caf1b7d7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
@@ -150,7 +150,7 @@ u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
amdgpu_crtc = to_amdgpu_crtc(crtc);
if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
- vrefresh = amdgpu_crtc->hw_mode.vrefresh;
+ vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
break;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 0508c5cd103a..8d6668cedf6d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -250,11 +250,11 @@ static struct pci_device_id pciidlist[] = {
{0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
#endif
/* topaz */
- {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
+ {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
+ {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
+ {0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
+ {0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
+ {0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
/* tonga */
{0x1002, 0x6920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
{0x1002, 0x6921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 7312d729d300..22a613a95bf0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -221,7 +221,7 @@ void amdgpu_gart_table_vram_free(struct amdgpu_device *adev)
* Unbinds the requested pages from the gart page table and
* replaces them with the dummy page (all asics).
*/
-void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset,
+void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
int pages)
{
unsigned t;
@@ -269,7 +269,7 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset,
* (all asics).
* Returns 0 for success, -EINVAL for failure.
*/
-int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset,
+int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
int pages, struct page **pagelist, dma_addr_t *dma_addr,
uint32_t flags)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 9e25edafa721..c77a1ebfc632 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -288,7 +288,7 @@ void amdgpu_ib_pool_fini(struct amdgpu_device *adev)
int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
{
unsigned i;
- int r;
+ int r, ret = 0;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
@@ -309,10 +309,11 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
} else {
/* still not good, but we can live with it */
DRM_ERROR("amdgpu: failed testing IB on ring %d (%d).\n", i, r);
+ ret = r;
}
}
}
- return 0;
+ return ret;
}
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index e23843f4d877..4488e82f87b0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -303,7 +303,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
fw_info.feature = adev->vce.fb_version;
break;
case AMDGPU_INFO_FW_UVD:
- fw_info.ver = 0;
+ fw_info.ver = adev->uvd.fw_version;
fw_info.feature = 0;
break;
case AMDGPU_INFO_FW_GMC:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index b1969f2b2038..d4e2780c0796 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -142,7 +142,8 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
list_for_each_entry(bo, &node->bos, mn_list) {
- if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound)
+ if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start,
+ end))
continue;
r = amdgpu_bo_reserve(bo, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 064ebb347074..89df7871653d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -52,7 +52,7 @@ struct amdgpu_hpd;
#define AMDGPU_MAX_HPD_PINS 6
#define AMDGPU_MAX_CRTCS 6
-#define AMDGPU_MAX_AFMT_BLOCKS 7
+#define AMDGPU_MAX_AFMT_BLOCKS 9
enum amdgpu_rmx_type {
RMX_OFF,
@@ -308,8 +308,8 @@ struct amdgpu_mode_info {
struct atom_context *atom_context;
struct card_info *atom_card_info;
bool mode_config_initialized;
- struct amdgpu_crtc *crtcs[6];
- struct amdgpu_afmt *afmt[7];
+ struct amdgpu_crtc *crtcs[AMDGPU_MAX_CRTCS];
+ struct amdgpu_afmt *afmt[AMDGPU_MAX_AFMT_BLOCKS];
/* DVI-I properties */
struct drm_property *coherent_mode_property;
/* DAC enable load detect */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index c3ce103b6a33..73628c7599e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -33,6 +33,7 @@
#include <linux/slab.h>
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
+#include <drm/drm_cache.h>
#include "amdgpu.h"
#include "amdgpu_trace.h"
@@ -261,6 +262,13 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
AMDGPU_GEM_DOMAIN_OA);
bo->flags = flags;
+
+ /* For architectures that don't support WC memory,
+ * mask out the WC flag from the BO
+ */
+ if (!drm_arch_can_wc_memory())
+ bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+
amdgpu_fill_placement_to_bo(bo, placement);
/* Kernel allocation are uninterruptible */
r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type,
@@ -399,7 +407,8 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
}
if (fpfn > bo->placements[i].fpfn)
bo->placements[i].fpfn = fpfn;
- if (lpfn && lpfn < bo->placements[i].lpfn)
+ if (!bo->placements[i].lpfn ||
+ (lpfn && lpfn < bo->placements[i].lpfn))
bo->placements[i].lpfn = lpfn;
bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
}
@@ -531,6 +540,7 @@ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
if (!metadata_size) {
if (bo->metadata_size) {
kfree(bo->metadata);
+ bo->metadata = NULL;
bo->metadata_size = 0;
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 22a8c7d3a3ab..7ae15fad16ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -595,11 +595,6 @@ force:
/* update display watermarks based on new power state */
amdgpu_display_bandwidth_update(adev);
- /* update displays */
- amdgpu_dpm_display_configuration_changed(adev);
-
- adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
- adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
/* wait for the rings to drain */
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
@@ -616,6 +611,12 @@ force:
amdgpu_dpm_post_set_power_state(adev);
+ /* update displays */
+ amdgpu_dpm_display_configuration_changed(adev);
+
+ adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
+ adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
+
if (adev->pm.funcs->force_performance_level) {
if (adev->pm.dpm.thermal_active) {
enum amdgpu_dpm_forced_level level = adev->pm.dpm.forced_level;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index 8b88edb0434b..ca72a2e487b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -354,12 +354,15 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
for (i = 0, count = 0; i < AMDGPU_MAX_RINGS; ++i)
if (fences[i])
- fences[count++] = fences[i];
+ fences[count++] = fence_get(fences[i]);
if (count) {
spin_unlock(&sa_manager->wq.lock);
t = fence_wait_any_timeout(fences, count, false,
MAX_SCHEDULE_TIMEOUT);
+ for (i = 0; i < count; ++i)
+ fence_put(fences[i]);
+
r = (t > 0) ? 0 : t;
spin_lock(&sa_manager->wq.lock);
} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index dd005c336c97..181ce39ef5e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -293,7 +293,8 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
fence = to_amdgpu_fence(sync->sync_to[i]);
/* check if we really need to sync */
- if (!amdgpu_fence_need_sync(fence, ring))
+ if (!amdgpu_enable_scheduler &&
+ !amdgpu_fence_need_sync(fence, ring))
continue;
/* prevent GPU deadlocks */
@@ -303,7 +304,7 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
}
if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores) {
- r = fence_wait(&fence->base, true);
+ r = fence_wait(sync->sync_to[i], true);
if (r)
return r;
continue;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 8a1752ff3d8e..475c38fe9245 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -233,8 +233,8 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
adev = amdgpu_get_adev(bo->bdev);
ring = adev->mman.buffer_funcs_ring;
- old_start = old_mem->start << PAGE_SHIFT;
- new_start = new_mem->start << PAGE_SHIFT;
+ old_start = (u64)old_mem->start << PAGE_SHIFT;
+ new_start = (u64)new_mem->start << PAGE_SHIFT;
switch (old_mem->mem_type) {
case TTM_PL_VRAM:
@@ -712,7 +712,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
0, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(adev->pdev, gtt->ttm.dma_address[i])) {
- while (--i) {
+ while (i--) {
pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i],
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
gtt->ttm.dma_address[i] = 0;
@@ -783,6 +783,25 @@ bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm)
return !!gtt->userptr;
}
+bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
+ unsigned long end)
+{
+ struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ unsigned long size;
+
+ if (gtt == NULL)
+ return false;
+
+ if (gtt->ttm.ttm.state != tt_bound || !gtt->userptr)
+ return false;
+
+ size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE;
+ if (gtt->userptr > end || gtt->userptr + size <= start)
+ return false;
+
+ return true;
+}
+
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
@@ -808,7 +827,7 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
flags |= AMDGPU_PTE_SNOOPED;
}
- if (adev->asic_type >= CHIP_TOPAZ)
+ if (adev->asic_type >= CHIP_TONGA)
flags |= AMDGPU_PTE_EXECUTABLE;
flags |= AMDGPU_PTE_READABLE;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 53f987aeeacf..3b35ad83867c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -156,6 +156,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
version_major, version_minor, family_id);
+ adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) |
+ (family_id << 8));
+
bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
+ AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE;
r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
@@ -273,6 +276,8 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset,
(adev->uvd.fw->size) - offset);
+ cancel_delayed_work_sync(&adev->uvd.idle_work);
+
size = amdgpu_bo_size(adev->uvd.vcpu_bo);
size -= le32_to_cpu(hdr->ucode_size_bytes);
ptr = adev->uvd.cpu_addr;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index a745eeeb5d82..bb0da76051a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -220,6 +220,7 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
if (i == AMDGPU_MAX_VCE_HANDLES)
return 0;
+ cancel_delayed_work_sync(&adev->vce.idle_work);
/* TODO: suspending running encoding sessions isn't supported */
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index b53d273eb7a1..8c5ec151ddac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1010,13 +1010,13 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
return -EINVAL;
/* make sure object fit at this offset */
- eaddr = saddr + size;
+ eaddr = saddr + size - 1;
if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo)))
return -EINVAL;
last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
- if (last_pfn > adev->vm_manager.max_pfn) {
- dev_err(adev->dev, "va above limit (0x%08X > 0x%08X)\n",
+ if (last_pfn >= adev->vm_manager.max_pfn) {
+ dev_err(adev->dev, "va above limit (0x%08X >= 0x%08X)\n",
last_pfn, adev->vm_manager.max_pfn);
return -EINVAL;
}
@@ -1025,7 +1025,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
eaddr /= AMDGPU_GPU_PAGE_SIZE;
spin_lock(&vm->it_lock);
- it = interval_tree_iter_first(&vm->va, saddr, eaddr - 1);
+ it = interval_tree_iter_first(&vm->va, saddr, eaddr);
spin_unlock(&vm->it_lock);
if (it) {
struct amdgpu_bo_va_mapping *tmp;
@@ -1046,7 +1046,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
INIT_LIST_HEAD(&mapping->list);
mapping->it.start = saddr;
- mapping->it.last = eaddr - 1;
+ mapping->it.last = eaddr;
mapping->offset = offset;
mapping->flags = flags;
@@ -1248,7 +1248,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
{
const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
AMDGPU_VM_PTE_COUNT * 8);
- unsigned pd_size, pd_entries, pts_size;
+ unsigned pd_size, pd_entries;
int i, r;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
@@ -1266,8 +1266,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
pd_entries = amdgpu_vm_num_pdes(adev);
/* allocate page table array */
- pts_size = pd_entries * sizeof(struct amdgpu_vm_pt);
- vm->page_tables = kzalloc(pts_size, GFP_KERNEL);
+ vm->page_tables = drm_calloc_large(pd_entries, sizeof(struct amdgpu_vm_pt));
if (vm->page_tables == NULL) {
DRM_ERROR("Cannot allocate memory for page table array\n");
return -ENOMEM;
@@ -1327,7 +1326,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
for (i = 0; i < amdgpu_vm_num_pdes(adev); i++)
amdgpu_bo_unref(&vm->page_tables[i].bo);
- kfree(vm->page_tables);
+ drm_free_large(vm->page_tables);
amdgpu_bo_unref(&vm->page_directory);
fence_put(vm->page_directory_fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
index 92b6acadfc52..21aacc1f45c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
@@ -243,7 +243,7 @@ static void amdgpu_atombios_dp_get_adjust_train(const u8 link_status[DP_LINK_STA
/* convert bits per color to bits per pixel */
/* get bpc from the EDID */
-static int amdgpu_atombios_dp_convert_bpc_to_bpp(int bpc)
+static unsigned amdgpu_atombios_dp_convert_bpc_to_bpp(int bpc)
{
if (bpc == 0)
return 24;
@@ -251,64 +251,32 @@ static int amdgpu_atombios_dp_convert_bpc_to_bpp(int bpc)
return bpc * 3;
}
-/* get the max pix clock supported by the link rate and lane num */
-static int amdgpu_atombios_dp_get_max_dp_pix_clock(int link_rate,
- int lane_num,
- int bpp)
-{
- return (link_rate * lane_num * 8) / bpp;
-}
-
/***** amdgpu specific DP functions *****/
-/* First get the min lane# when low rate is used according to pixel clock
- * (prefer low rate), second check max lane# supported by DP panel,
- * if the max lane# < low rate lane# then use max lane# instead.
- */
-static int amdgpu_atombios_dp_get_dp_lane_number(struct drm_connector *connector,
+static int amdgpu_atombios_dp_get_dp_link_config(struct drm_connector *connector,
const u8 dpcd[DP_DPCD_SIZE],
- int pix_clock)
-{
- int bpp = amdgpu_atombios_dp_convert_bpc_to_bpp(amdgpu_connector_get_monitor_bpc(connector));
- int max_link_rate = drm_dp_max_link_rate(dpcd);
- int max_lane_num = drm_dp_max_lane_count(dpcd);
- int lane_num;
- int max_dp_pix_clock;
-
- for (lane_num = 1; lane_num < max_lane_num; lane_num <<= 1) {
- max_dp_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(max_link_rate, lane_num, bpp);
- if (pix_clock <= max_dp_pix_clock)
- break;
- }
-
- return lane_num;
-}
-
-static int amdgpu_atombios_dp_get_dp_link_clock(struct drm_connector *connector,
- const u8 dpcd[DP_DPCD_SIZE],
- int pix_clock)
+ unsigned pix_clock,
+ unsigned *dp_lanes, unsigned *dp_rate)
{
- int bpp = amdgpu_atombios_dp_convert_bpc_to_bpp(amdgpu_connector_get_monitor_bpc(connector));
- int lane_num, max_pix_clock;
-
- if (amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector) ==
- ENCODER_OBJECT_ID_NUTMEG)
- return 270000;
-
- lane_num = amdgpu_atombios_dp_get_dp_lane_number(connector, dpcd, pix_clock);
- max_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(162000, lane_num, bpp);
- if (pix_clock <= max_pix_clock)
- return 162000;
- max_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(270000, lane_num, bpp);
- if (pix_clock <= max_pix_clock)
- return 270000;
- if (amdgpu_connector_is_dp12_capable(connector)) {
- max_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(540000, lane_num, bpp);
- if (pix_clock <= max_pix_clock)
- return 540000;
+ unsigned bpp =
+ amdgpu_atombios_dp_convert_bpc_to_bpp(amdgpu_connector_get_monitor_bpc(connector));
+ static const unsigned link_rates[3] = { 162000, 270000, 540000 };
+ unsigned max_link_rate = drm_dp_max_link_rate(dpcd);
+ unsigned max_lane_num = drm_dp_max_lane_count(dpcd);
+ unsigned lane_num, i, max_pix_clock;
+
+ for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
+ for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
+ max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
+ if (max_pix_clock >= pix_clock) {
+ *dp_lanes = lane_num;
+ *dp_rate = link_rates[i];
+ return 0;
+ }
+ }
}
- return drm_dp_max_link_rate(dpcd);
+ return -EINVAL;
}
static u8 amdgpu_atombios_dp_encoder_service(struct amdgpu_device *adev,
@@ -422,6 +390,7 @@ void amdgpu_atombios_dp_set_link_config(struct drm_connector *connector,
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
struct amdgpu_connector_atom_dig *dig_connector;
+ int ret;
if (!amdgpu_connector->con_priv)
return;
@@ -429,10 +398,14 @@ void amdgpu_atombios_dp_set_link_config(struct drm_connector *connector,
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
- dig_connector->dp_clock =
- amdgpu_atombios_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
- dig_connector->dp_lane_count =
- amdgpu_atombios_dp_get_dp_lane_number(connector, dig_connector->dpcd, mode->clock);
+ ret = amdgpu_atombios_dp_get_dp_link_config(connector, dig_connector->dpcd,
+ mode->clock,
+ &dig_connector->dp_lane_count,
+ &dig_connector->dp_clock);
+ if (ret) {
+ dig_connector->dp_clock = 0;
+ dig_connector->dp_lane_count = 0;
+ }
}
}
@@ -441,14 +414,17 @@ int amdgpu_atombios_dp_mode_valid_helper(struct drm_connector *connector,
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
struct amdgpu_connector_atom_dig *dig_connector;
- int dp_clock;
+ unsigned dp_lanes, dp_clock;
+ int ret;
if (!amdgpu_connector->con_priv)
return MODE_CLOCK_HIGH;
dig_connector = amdgpu_connector->con_priv;
- dp_clock =
- amdgpu_atombios_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
+ ret = amdgpu_atombios_dp_get_dp_link_config(connector, dig_connector->dpcd,
+ mode->clock, &dp_lanes, &dp_clock);
+ if (ret)
+ return MODE_CLOCK_HIGH;
if ((dp_clock == 540000) &&
(!amdgpu_connector_is_dp12_capable(connector)))
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
index 1e0bba29e167..542517d4e584 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
@@ -98,6 +98,7 @@ amdgpu_atombios_encoder_set_backlight_level(struct amdgpu_encoder *amdgpu_encode
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
if (dig->backlight_level == 0)
amdgpu_atombios_encoder_setup_dig_transmitter(encoder,
ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
@@ -298,6 +299,10 @@ bool amdgpu_atombios_encoder_mode_fixup(struct drm_encoder *encoder,
&& (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
+ /* vertical FP must be at least 1 */
+ if (mode->crtc_vsync_start == mode->crtc_vdisplay)
+ adjusted_mode->crtc_vsync_start++;
+
/* get the native mode for scaling */
if (amdgpu_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT))
amdgpu_panel_mode_fixup(encoder, adjusted_mode);
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 5f712ceddf08..c568293cb6c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -52,6 +52,7 @@ static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev);
static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev);
static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev);
static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev);
+static int cik_sdma_soft_reset(void *handle);
MODULE_FIRMWARE("radeon/bonaire_sdma.bin");
MODULE_FIRMWARE("radeon/bonaire_sdma1.bin");
@@ -1030,6 +1031,8 @@ static int cik_sdma_resume(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ cik_sdma_soft_reset(handle);
+
return cik_sdma_hw_init(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
index 8035d4d6a4f5..653917a3bcc2 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
@@ -1955,10 +1955,8 @@ static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
}
} else { /*pi->caps_vce_pg*/
cz_update_vce_dpm(adev);
- cz_enable_vce_dpm(adev, true);
+ cz_enable_vce_dpm(adev, !gate);
}
-
- return;
}
const struct amd_ip_funcs cz_dpm_ip_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 72793f93e2fc..b57fffc2d4af 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -3628,6 +3628,19 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr)
{
int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+ uint32_t seq = ring->fence_drv.sync_seq[ring->idx];
+ uint64_t addr = ring->fence_drv.gpu_addr;
+
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+ amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
+ WAIT_REG_MEM_FUNCTION(3) | /* equal */
+ WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */
+ amdgpu_ring_write(ring, addr & 0xfffffffc);
+ amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
+ amdgpu_ring_write(ring, seq);
+ amdgpu_ring_write(ring, 0xffffffff);
+ amdgpu_ring_write(ring, 4); /* poll interval */
+
if (usepfp) {
/* synce CE with ME to prevent CE fetch CEIB before context switch done */
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
@@ -5450,7 +5463,7 @@ static int gfx_v7_0_eop_irq(struct amdgpu_device *adev,
case 2:
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
ring = &adev->gfx.compute_ring[i];
- if ((ring->me == me_id) & (ring->pipe == pipe_id))
+ if ((ring->me == me_id) && (ring->pipe == pipe_id))
amdgpu_fence_process(ring);
}
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index e1dcab98e249..d1054034d14b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -90,7 +90,6 @@ MODULE_FIRMWARE("amdgpu/topaz_ce.bin");
MODULE_FIRMWARE("amdgpu/topaz_pfp.bin");
MODULE_FIRMWARE("amdgpu/topaz_me.bin");
MODULE_FIRMWARE("amdgpu/topaz_mec.bin");
-MODULE_FIRMWARE("amdgpu/topaz_mec2.bin");
MODULE_FIRMWARE("amdgpu/topaz_rlc.bin");
MODULE_FIRMWARE("amdgpu/fiji_ce.bin");
@@ -807,7 +806,8 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
- if (adev->asic_type != CHIP_STONEY) {
+ if ((adev->asic_type != CHIP_STONEY) &&
+ (adev->asic_type != CHIP_TOPAZ)) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
if (!err) {
@@ -4681,7 +4681,8 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
- WAIT_REG_MEM_FUNCTION(3))); /* equal */
+ WAIT_REG_MEM_FUNCTION(3) | /* equal */
+ WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */
amdgpu_ring_write(ring, addr & 0xfffffffc);
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
amdgpu_ring_write(ring, seq);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index ed8abb58a785..df17fababbd6 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -42,9 +42,39 @@ static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
MODULE_FIRMWARE("radeon/bonaire_mc.bin");
MODULE_FIRMWARE("radeon/hawaii_mc.bin");
+MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
+
+static const u32 golden_settings_iceland_a11[] =
+{
+ mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+ mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+ mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+ mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
+};
+
+static const u32 iceland_mgcg_cgcg_init[] =
+{
+ mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
+};
+
+static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+ case CHIP_TOPAZ:
+ amdgpu_program_register_sequence(adev,
+ iceland_mgcg_cgcg_init,
+ (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
+ amdgpu_program_register_sequence(adev,
+ golden_settings_iceland_a11,
+ (const u32)ARRAY_SIZE(golden_settings_iceland_a11));
+ break;
+ default:
+ break;
+ }
+}
/**
- * gmc8_mc_wait_for_idle - wait for MC idle callback.
+ * gmc7_mc_wait_for_idle - wait for MC idle callback.
*
* @adev: amdgpu_device pointer
*
@@ -132,13 +162,21 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
case CHIP_HAWAII:
chip_name = "hawaii";
break;
+ case CHIP_TOPAZ:
+ chip_name = "topaz";
+ break;
case CHIP_KAVERI:
case CHIP_KABINI:
+ case CHIP_MULLINS:
return 0;
default: BUG();
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+ if (adev->asic_type == CHIP_TOPAZ)
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
+ else
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+
err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
if (err)
goto out;
@@ -861,14 +899,6 @@ static int gmc_v7_0_early_init(void *handle)
gmc_v7_0_set_gart_funcs(adev);
gmc_v7_0_set_irq_funcs(adev);
- if (adev->flags & AMD_IS_APU) {
- adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
- } else {
- u32 tmp = RREG32(mmMC_SEQ_MISC0);
- tmp &= MC_SEQ_MISC0__MT__MASK;
- adev->mc.vram_type = gmc_v7_0_convert_vram_type(tmp);
- }
-
return 0;
}
@@ -889,6 +919,14 @@ static int gmc_v7_0_sw_init(void *handle)
if (r)
return r;
+ if (adev->flags & AMD_IS_APU) {
+ adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
+ } else {
+ u32 tmp = RREG32(mmMC_SEQ_MISC0);
+ tmp &= MC_SEQ_MISC0__MT__MASK;
+ adev->mc.vram_type = gmc_v7_0_convert_vram_type(tmp);
+ }
+
r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
if (r)
return r;
@@ -980,6 +1018,8 @@ static int gmc_v7_0_hw_init(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ gmc_v7_0_init_golden_registers(adev);
+
gmc_v7_0_mc_program(adev);
if (!(adev->flags & AMD_IS_APU)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index d39028440814..08423089fb84 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -42,9 +42,7 @@
static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev);
static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
-MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
-MODULE_FIRMWARE("amdgpu/fiji_mc.bin");
static const u32 golden_settings_tonga_a11[] =
{
@@ -75,19 +73,6 @@ static const u32 fiji_mgcg_cgcg_init[] =
mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
};
-static const u32 golden_settings_iceland_a11[] =
-{
- mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
- mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
- mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
- mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
-};
-
-static const u32 iceland_mgcg_cgcg_init[] =
-{
- mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
-};
-
static const u32 cz_mgcg_cgcg_init[] =
{
mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
@@ -102,14 +87,6 @@ static const u32 stoney_mgcg_cgcg_init[] =
static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
- case CHIP_TOPAZ:
- amdgpu_program_register_sequence(adev,
- iceland_mgcg_cgcg_init,
- (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- golden_settings_iceland_a11,
- (const u32)ARRAY_SIZE(golden_settings_iceland_a11));
- break;
case CHIP_FIJI:
amdgpu_program_register_sequence(adev,
fiji_mgcg_cgcg_init,
@@ -229,15 +206,10 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
DRM_DEBUG("\n");
switch (adev->asic_type) {
- case CHIP_TOPAZ:
- chip_name = "topaz";
- break;
case CHIP_TONGA:
chip_name = "tonga";
break;
case CHIP_FIJI:
- chip_name = "fiji";
- break;
case CHIP_CARRIZO:
case CHIP_STONEY:
return 0;
@@ -880,14 +852,6 @@ static int gmc_v8_0_early_init(void *handle)
gmc_v8_0_set_gart_funcs(adev);
gmc_v8_0_set_irq_funcs(adev);
- if (adev->flags & AMD_IS_APU) {
- adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
- } else {
- u32 tmp = RREG32(mmMC_SEQ_MISC0);
- tmp &= MC_SEQ_MISC0__MT__MASK;
- adev->mc.vram_type = gmc_v8_0_convert_vram_type(tmp);
- }
-
return 0;
}
@@ -898,6 +862,8 @@ static int gmc_v8_0_late_init(void *handle)
return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
}
+#define mmMC_SEQ_MISC0_FIJI 0xA71
+
static int gmc_v8_0_sw_init(void *handle)
{
int r;
@@ -908,6 +874,19 @@ static int gmc_v8_0_sw_init(void *handle)
if (r)
return r;
+ if (adev->flags & AMD_IS_APU) {
+ adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
+ } else {
+ u32 tmp;
+
+ if (adev->asic_type == CHIP_FIJI)
+ tmp = RREG32(mmMC_SEQ_MISC0_FIJI);
+ else
+ tmp = RREG32(mmMC_SEQ_MISC0);
+ tmp &= MC_SEQ_MISC0__MT__MASK;
+ adev->mc.vram_type = gmc_v8_0_convert_vram_type(tmp);
+ }
+
r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
if (r)
return r;
@@ -1003,7 +982,7 @@ static int gmc_v8_0_hw_init(void *handle)
gmc_v8_0_mc_program(adev);
- if (!(adev->flags & AMD_IS_APU)) {
+ if (adev->asic_type == CHIP_TONGA) {
r = gmc_v8_0_mc_load_microcode(adev);
if (r) {
DRM_ERROR("Failed to load MC firmware!\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
index 966d4b2ed9da..090486c18249 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
@@ -432,7 +432,7 @@ static uint32_t iceland_smu_get_mask_for_fw_type(uint32_t fw_type)
case AMDGPU_UCODE_ID_CP_ME:
return UCODE_ID_CP_ME_MASK;
case AMDGPU_UCODE_ID_CP_MEC1:
- return UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK | UCODE_ID_CP_MEC_JT2_MASK;
+ return UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK;
case AMDGPU_UCODE_ID_CP_MEC2:
return UCODE_ID_CP_MEC_MASK;
case AMDGPU_UCODE_ID_RLC_G:
@@ -522,12 +522,6 @@ static int iceland_smu_request_load_fw(struct amdgpu_device *adev)
return -EINVAL;
}
- if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT2,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for MEC_JT2\n");
- return -EINVAL;
- }
-
if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0,
&toc->entry[toc->num_entries++])) {
DRM_ERROR("Failed to get firmware entry for SDMA0\n");
@@ -550,8 +544,8 @@ static int iceland_smu_request_load_fw(struct amdgpu_device *adev)
UCODE_ID_CP_ME_MASK |
UCODE_ID_CP_PFP_MASK |
UCODE_ID_CP_MEC_MASK |
- UCODE_ID_CP_MEC_JT1_MASK |
- UCODE_ID_CP_MEC_JT2_MASK;
+ UCODE_ID_CP_MEC_JT1_MASK;
+
if (iceland_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) {
DRM_ERROR("Fail to request SMU load ucode\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 2cf50180cc51..b1c7a9b3631b 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -32,8 +32,8 @@
#include "oss/oss_2_4_d.h"
#include "oss/oss_2_4_sh_mask.h"
-#include "gmc/gmc_8_1_d.h"
-#include "gmc/gmc_8_1_sh_mask.h"
+#include "gmc/gmc_7_1_d.h"
+#include "gmc/gmc_7_1_sh_mask.h"
#include "gca/gfx_8_0_d.h"
#include "gca/gfx_8_0_enum.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
index 204903897b4f..63d6cb3c1110 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
@@ -122,25 +122,12 @@ static int tonga_dpm_hw_fini(void *handle)
static int tonga_dpm_suspend(void *handle)
{
- return 0;
+ return tonga_dpm_hw_fini(handle);
}
static int tonga_dpm_resume(void *handle)
{
- int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- mutex_lock(&adev->pm.mutex);
-
- ret = tonga_smu_start(adev);
- if (ret) {
- DRM_ERROR("SMU start failed\n");
- goto fail;
- }
-
-fail:
- mutex_unlock(&adev->pm.mutex);
- return ret;
+ return tonga_dpm_hw_init(handle);
}
static int tonga_dpm_set_clockgating_state(void *handle,
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 2adc1c855e85..3e9cbe398151 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -60,6 +60,7 @@
#include "vi.h"
#include "vi_dpm.h"
#include "gmc_v8_0.h"
+#include "gmc_v7_0.h"
#include "gfx_v8_0.h"
#include "sdma_v2_4.h"
#include "sdma_v3_0.h"
@@ -1081,10 +1082,10 @@ static const struct amdgpu_ip_block_version topaz_ip_blocks[] =
},
{
.type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 8,
- .minor = 0,
+ .major = 7,
+ .minor = 4,
.rev = 0,
- .funcs = &gmc_v8_0_ip_funcs,
+ .funcs = &gmc_v7_0_ip_funcs,
},
{
.type = AMD_IP_BLOCK_TYPE_IH,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
index c34c393e9aea..d5e19b5fbbfb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
@@ -513,7 +513,7 @@ static int dbgdev_wave_control_set_registers(
union SQ_CMD_BITS *in_reg_sq_cmd,
union GRBM_GFX_INDEX_BITS *in_reg_gfx_index)
{
- int status;
+ int status = 0;
union SQ_CMD_BITS reg_sq_cmd;
union GRBM_GFX_INDEX_BITS reg_gfx_index;
struct HsaDbgWaveMsgAMDGen2 *pMsg;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 9be007081b72..eb1da83c9902 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -242,13 +242,19 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
pqm_uninit(&p->pqm);
/* Iterate over all process device data structure and check
- * if we should reset all wavefronts */
- list_for_each_entry(pdd, &p->per_device_data, per_device_list)
+ * if we should delete debug managers and reset all wavefronts
+ */
+ list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
+ if ((pdd->dev->dbgmgr) &&
+ (pdd->dev->dbgmgr->pasid == p->pasid))
+ kfd_dbgmgr_destroy(pdd->dev->dbgmgr);
+
if (pdd->reset_wavefronts) {
pr_warn("amdkfd: Resetting all wave fronts\n");
dbgdev_wave_reset_wavefronts(pdd->dev, p);
pdd->reset_wavefronts = false;
}
+ }
mutex_unlock(&p->mutex);
@@ -404,42 +410,52 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
idx = srcu_read_lock(&kfd_processes_srcu);
+ /*
+ * Look for the process that matches the pasid. If there is no such
+ * process, we either released it in amdkfd's own notifier, or there
+ * is a bug. Unfortunately, there is no way to tell...
+ */
hash_for_each_rcu(kfd_processes_table, i, p, kfd_processes)
- if (p->pasid == pasid)
- break;
+ if (p->pasid == pasid) {
- srcu_read_unlock(&kfd_processes_srcu, idx);
+ srcu_read_unlock(&kfd_processes_srcu, idx);
- BUG_ON(p->pasid != pasid);
+ pr_debug("Unbinding process %d from IOMMU\n", pasid);
- mutex_lock(&p->mutex);
+ mutex_lock(&p->mutex);
- if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid))
- kfd_dbgmgr_destroy(dev->dbgmgr);
+ if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid))
+ kfd_dbgmgr_destroy(dev->dbgmgr);
- pqm_uninit(&p->pqm);
+ pqm_uninit(&p->pqm);
- pdd = kfd_get_process_device_data(dev, p);
+ pdd = kfd_get_process_device_data(dev, p);
- if (!pdd) {
- mutex_unlock(&p->mutex);
- return;
- }
+ if (!pdd) {
+ mutex_unlock(&p->mutex);
+ return;
+ }
- if (pdd->reset_wavefronts) {
- dbgdev_wave_reset_wavefronts(pdd->dev, p);
- pdd->reset_wavefronts = false;
- }
+ if (pdd->reset_wavefronts) {
+ dbgdev_wave_reset_wavefronts(pdd->dev, p);
+ pdd->reset_wavefronts = false;
+ }
- /*
- * Just mark pdd as unbound, because we still need it to call
- * amd_iommu_unbind_pasid() in when the process exits.
- * We don't call amd_iommu_unbind_pasid() here
- * because the IOMMU called us.
- */
- pdd->bound = false;
+ /*
+ * Just mark pdd as unbound, because we still need it
+ * to call amd_iommu_unbind_pasid() in when the
+ * process exits.
+ * We don't call amd_iommu_unbind_pasid() here
+ * because the IOMMU called us.
+ */
+ pdd->bound = false;
- mutex_unlock(&p->mutex);
+ mutex_unlock(&p->mutex);
+
+ return;
+ }
+
+ srcu_read_unlock(&kfd_processes_srcu, idx);
}
struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p)
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 541a610667ad..e0b4586a26fd 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -227,7 +227,7 @@ static int ast_get_dram_info(struct drm_device *dev)
} while (ast_read32(ast, 0x10000) != 0x01);
data = ast_read32(ast, 0x10004);
- if (data & 0x400)
+ if (data & 0x40)
ast->dram_bus_width = 16;
else
ast->dram_bus_width = 32;
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
index d0299aed517e..e231176cb66b 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -316,25 +316,27 @@ atmel_hlcdc_plane_update_pos_and_size(struct atmel_hlcdc_plane *plane,
u32 *coeff_tab = heo_upscaling_ycoef;
u32 max_memsize;
- if (state->crtc_w < state->src_w)
+ if (state->crtc_h < state->src_h)
coeff_tab = heo_downscaling_ycoef;
for (i = 0; i < ARRAY_SIZE(heo_upscaling_ycoef); i++)
atmel_hlcdc_layer_update_cfg(&plane->layer,
33 + i,
0xffffffff,
coeff_tab[i]);
- factor = ((8 * 256 * state->src_w) - (256 * 4)) /
- state->crtc_w;
+ factor = ((8 * 256 * state->src_h) - (256 * 4)) /
+ state->crtc_h;
factor++;
- max_memsize = ((factor * state->crtc_w) + (256 * 4)) /
+ max_memsize = ((factor * state->crtc_h) + (256 * 4)) /
2048;
- if (max_memsize > state->src_w)
+ if (max_memsize > state->src_h)
factor--;
factor_reg |= (factor << 16) | 0x80000000;
}
atmel_hlcdc_layer_update_cfg(&plane->layer, 13, 0xffffffff,
factor_reg);
+ } else {
+ atmel_hlcdc_layer_update_cfg(&plane->layer, 13, 0xffffffff, 0);
}
}
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index aeee083c7f95..6253775b8d9c 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -150,7 +150,7 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
for (i = 0; i < state->num_connector; i++) {
struct drm_connector *connector = state->connectors[i];
- if (!connector)
+ if (!connector || !connector->funcs)
continue;
/*
@@ -367,6 +367,8 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
drm_property_unreference_blob(state->mode_blob);
state->mode_blob = NULL;
+ memset(&state->mode, 0, sizeof(state->mode));
+
if (blob) {
if (blob->length != sizeof(struct drm_mode_modeinfo) ||
drm_mode_convert_umode(&state->mode,
@@ -379,7 +381,6 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n",
state->mode.name, state);
} else {
- memset(&state->mode, 0, sizeof(state->mode));
state->enable = false;
DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n",
state);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index e5aec45bf985..1ac29d703c12 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -108,7 +108,6 @@ steal_encoder(struct drm_atomic_state *state,
struct drm_crtc_state *crtc_state;
struct drm_connector *connector;
struct drm_connector_state *connector_state;
- int ret;
/*
* We can only steal an encoder coming from a connector, which means we
@@ -139,9 +138,6 @@ steal_encoder(struct drm_atomic_state *state,
if (IS_ERR(connector_state))
return PTR_ERR(connector_state);
- ret = drm_atomic_set_crtc_for_connector(connector_state, NULL);
- if (ret)
- return ret;
connector_state->best_encoder = NULL;
}
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index 6743ff7dccfa..7f4a6c550319 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -136,6 +136,7 @@ drm_clflush_virt_range(void *addr, unsigned long length)
mb();
for (; addr < end; addr += size)
clflushopt(addr);
+ clflushopt(end - 1); /* force serialisation */
mb();
return;
}
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 24c5434abd1c..5e4bb4837bae 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -2682,8 +2682,6 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
goto out;
}
- drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
-
/*
* Check whether the primary plane supports the fb pixel format.
* Drivers not implementing the universal planes API use a
@@ -3316,6 +3314,24 @@ int drm_mode_addfb2(struct drm_device *dev,
return 0;
}
+struct drm_mode_rmfb_work {
+ struct work_struct work;
+ struct list_head fbs;
+};
+
+static void drm_mode_rmfb_work_fn(struct work_struct *w)
+{
+ struct drm_mode_rmfb_work *arg = container_of(w, typeof(*arg), work);
+
+ while (!list_empty(&arg->fbs)) {
+ struct drm_framebuffer *fb =
+ list_first_entry(&arg->fbs, typeof(*fb), filp_head);
+
+ list_del_init(&fb->filp_head);
+ drm_framebuffer_remove(fb);
+ }
+}
+
/**
* drm_mode_rmfb - remove an FB from the configuration
* @dev: drm device for the ioctl
@@ -3356,7 +3372,25 @@ int drm_mode_rmfb(struct drm_device *dev,
mutex_unlock(&dev->mode_config.fb_lock);
mutex_unlock(&file_priv->fbs_lock);
- drm_framebuffer_unreference(fb);
+ /*
+ * we now own the reference that was stored in the fbs list
+ *
+ * drm_framebuffer_remove may fail with -EINTR on pending signals,
+ * so run this in a separate stack as there's no way to correctly
+ * handle this after the fb is already removed from the lookup table.
+ */
+ if (atomic_read(&fb->refcount.refcount) > 1) {
+ struct drm_mode_rmfb_work arg;
+
+ INIT_WORK_ONSTACK(&arg.work, drm_mode_rmfb_work_fn);
+ INIT_LIST_HEAD(&arg.fbs);
+ list_add_tail(&fb->filp_head, &arg.fbs);
+
+ schedule_work(&arg.work);
+ flush_work(&arg.work);
+ destroy_work_on_stack(&arg.work);
+ } else
+ drm_framebuffer_unreference(fb);
return 0;
@@ -3509,7 +3543,6 @@ out_err1:
return ret;
}
-
/**
* drm_fb_release - remove and free the FBs on this file
* @priv: drm file for the ioctl
@@ -3524,6 +3557,9 @@ out_err1:
void drm_fb_release(struct drm_file *priv)
{
struct drm_framebuffer *fb, *tfb;
+ struct drm_mode_rmfb_work arg;
+
+ INIT_LIST_HEAD(&arg.fbs);
/*
* When the file gets released that means no one else can access the fb
@@ -3536,10 +3572,22 @@ void drm_fb_release(struct drm_file *priv)
* at it any more.
*/
list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) {
- list_del_init(&fb->filp_head);
+ if (atomic_read(&fb->refcount.refcount) > 1) {
+ list_move_tail(&fb->filp_head, &arg.fbs);
+ } else {
+ list_del_init(&fb->filp_head);
- /* This drops the fpriv->fbs reference. */
- drm_framebuffer_unreference(fb);
+ /* This drops the fpriv->fbs reference. */
+ drm_framebuffer_unreference(fb);
+ }
+ }
+
+ if (!list_empty(&arg.fbs)) {
+ INIT_WORK_ONSTACK(&arg.work, drm_mode_rmfb_work_fn);
+
+ schedule_work(&arg.work);
+ flush_work(&arg.work);
+ destroy_work_on_stack(&arg.work);
}
}
@@ -5183,6 +5231,9 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
unsigned long flags;
int ret = -EINVAL;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
page_flip->reserved != 0)
return -EINVAL;
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 9535c5b60387..7e5a97204051 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -178,7 +178,7 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
{
struct drm_dp_aux_msg msg;
unsigned int retry;
- int err;
+ int err = 0;
memset(&msg, 0, sizeof(msg));
msg.address = offset;
@@ -186,6 +186,8 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
msg.buffer = buffer;
msg.size = size;
+ mutex_lock(&aux->hw_mutex);
+
/*
* The specification doesn't give any recommendation on how often to
* retry native transactions. We used to retry 7 times like for
@@ -194,25 +196,24 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
*/
for (retry = 0; retry < 32; retry++) {
- mutex_lock(&aux->hw_mutex);
err = aux->transfer(aux, &msg);
- mutex_unlock(&aux->hw_mutex);
if (err < 0) {
if (err == -EBUSY)
continue;
- return err;
+ goto unlock;
}
switch (msg.reply & DP_AUX_NATIVE_REPLY_MASK) {
case DP_AUX_NATIVE_REPLY_ACK:
if (err < size)
- return -EPROTO;
- return err;
+ err = -EPROTO;
+ goto unlock;
case DP_AUX_NATIVE_REPLY_NACK:
- return -EIO;
+ err = -EIO;
+ goto unlock;
case DP_AUX_NATIVE_REPLY_DEFER:
usleep_range(AUX_RETRY_INTERVAL, AUX_RETRY_INTERVAL + 100);
@@ -221,7 +222,11 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
}
DRM_DEBUG_KMS("too many retries, giving up\n");
- return -EIO;
+ err = -EIO;
+
+unlock:
+ mutex_unlock(&aux->hw_mutex);
+ return err;
}
/**
@@ -543,9 +548,7 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
int max_retries = max(7, drm_dp_i2c_retry_count(msg, dp_aux_i2c_speed_khz));
for (retry = 0, defer_i2c = 0; retry < (max_retries + defer_i2c); retry++) {
- mutex_lock(&aux->hw_mutex);
ret = aux->transfer(aux, msg);
- mutex_unlock(&aux->hw_mutex);
if (ret < 0) {
if (ret == -EBUSY)
continue;
@@ -684,6 +687,8 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
memset(&msg, 0, sizeof(msg));
+ mutex_lock(&aux->hw_mutex);
+
for (i = 0; i < num; i++) {
msg.address = msgs[i].addr;
drm_dp_i2c_msg_set_request(&msg, &msgs[i]);
@@ -738,6 +743,8 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
msg.size = 0;
(void)drm_dp_i2c_do_msg(aux, &msg);
+ mutex_unlock(&aux->hw_mutex);
+
return err;
}
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 809959d56d78..2485fb652716 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -798,6 +798,18 @@ static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
return mstb;
}
+static void drm_dp_free_mst_port(struct kref *kref);
+
+static void drm_dp_free_mst_branch_device(struct kref *kref)
+{
+ struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
+ if (mstb->port_parent) {
+ if (list_empty(&mstb->port_parent->next))
+ kref_put(&mstb->port_parent->kref, drm_dp_free_mst_port);
+ }
+ kfree(mstb);
+}
+
static void drm_dp_destroy_mst_branch_device(struct kref *kref)
{
struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
@@ -805,6 +817,15 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
bool wake_tx = false;
/*
+ * init kref again to be used by ports to remove mst branch when it is
+ * not needed anymore
+ */
+ kref_init(kref);
+
+ if (mstb->port_parent && list_empty(&mstb->port_parent->next))
+ kref_get(&mstb->port_parent->kref);
+
+ /*
* destroy all ports - don't need lock
* as there are no more references to the mst branch
* device at this point.
@@ -830,7 +851,8 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
if (wake_tx)
wake_up(&mstb->mgr->tx_waitq);
- kfree(mstb);
+
+ kref_put(kref, drm_dp_free_mst_branch_device);
}
static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb)
@@ -878,6 +900,7 @@ static void drm_dp_destroy_port(struct kref *kref)
* from an EDID retrieval */
mutex_lock(&mgr->destroy_connector_lock);
+ kref_get(&port->parent->kref);
list_add(&port->next, &mgr->destroy_connector_list);
mutex_unlock(&mgr->destroy_connector_lock);
schedule_work(&mgr->destroy_connector_work);
@@ -973,17 +996,17 @@ static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u
static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
u8 *rad)
{
- int lct = port->parent->lct;
+ int parent_lct = port->parent->lct;
int shift = 4;
- int idx = lct / 2;
- if (lct > 1) {
- memcpy(rad, port->parent->rad, idx);
- shift = (lct % 2) ? 4 : 0;
+ int idx = (parent_lct - 1) / 2;
+ if (parent_lct > 1) {
+ memcpy(rad, port->parent->rad, idx + 1);
+ shift = (parent_lct % 2) ? 4 : 0;
} else
rad[0] = 0;
rad[idx] |= port->port_num << shift;
- return lct + 1;
+ return parent_lct + 1;
}
/*
@@ -1013,18 +1036,27 @@ static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
return send_link;
}
-static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb,
- struct drm_dp_mst_port *port)
+static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
{
int ret;
- if (port->dpcd_rev >= 0x12) {
- port->guid_valid = drm_dp_validate_guid(mstb->mgr, port->guid);
- if (!port->guid_valid) {
- ret = drm_dp_send_dpcd_write(mstb->mgr,
- port,
- DP_GUID,
- 16, port->guid);
- port->guid_valid = true;
+
+ memcpy(mstb->guid, guid, 16);
+
+ if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
+ if (mstb->port_parent) {
+ ret = drm_dp_send_dpcd_write(
+ mstb->mgr,
+ mstb->port_parent,
+ DP_GUID,
+ 16,
+ mstb->guid);
+ } else {
+
+ ret = drm_dp_dpcd_write(
+ mstb->mgr->aux,
+ DP_GUID,
+ mstb->guid,
+ 16);
}
}
}
@@ -1039,7 +1071,7 @@ static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
for (i = 0; i < (mstb->lct - 1); i++) {
int shift = (i % 2) ? 0 : 4;
- int port_num = mstb->rad[i / 2] >> shift;
+ int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
snprintf(temp, sizeof(temp), "-%d", port_num);
strlcat(proppath, temp, proppath_size);
}
@@ -1081,7 +1113,6 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
port->dpcd_rev = port_msg->dpcd_revision;
port->num_sdp_streams = port_msg->num_sdp_streams;
port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
- memcpy(port->guid, port_msg->peer_guid, 16);
/* manage mstb port lists with mgr lock - take a reference
for this list */
@@ -1094,11 +1125,9 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
if (old_ddps != port->ddps) {
if (port->ddps) {
- drm_dp_check_port_guid(mstb, port);
if (!port->input)
drm_dp_send_enum_path_resources(mstb->mgr, mstb, port);
} else {
- port->guid_valid = false;
port->available_pbn = 0;
}
}
@@ -1157,10 +1186,8 @@ static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
if (old_ddps != port->ddps) {
if (port->ddps) {
- drm_dp_check_port_guid(mstb, port);
dowork = true;
} else {
- port->guid_valid = false;
port->available_pbn = 0;
}
}
@@ -1190,7 +1217,7 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_
for (i = 0; i < lct - 1; i++) {
int shift = (i % 2) ? 0 : 4;
- int port_num = rad[i / 2] >> shift;
+ int port_num = (rad[i / 2] >> shift) & 0xf;
list_for_each_entry(port, &mstb->ports, next) {
if (port->port_num == port_num) {
@@ -1210,6 +1237,48 @@ out:
return mstb;
}
+static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
+ struct drm_dp_mst_branch *mstb,
+ uint8_t *guid)
+{
+ struct drm_dp_mst_branch *found_mstb;
+ struct drm_dp_mst_port *port;
+
+ if (memcmp(mstb->guid, guid, 16) == 0)
+ return mstb;
+
+
+ list_for_each_entry(port, &mstb->ports, next) {
+ if (!port->mstb)
+ continue;
+
+ found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
+
+ if (found_mstb)
+ return found_mstb;
+ }
+
+ return NULL;
+}
+
+static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device_by_guid(
+ struct drm_dp_mst_topology_mgr *mgr,
+ uint8_t *guid)
+{
+ struct drm_dp_mst_branch *mstb;
+
+ /* find the port by iterating down */
+ mutex_lock(&mgr->lock);
+
+ mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
+
+ if (mstb)
+ kref_get(&mstb->kref);
+
+ mutex_unlock(&mgr->lock);
+ return mstb;
+}
+
static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb)
{
@@ -1320,6 +1389,7 @@ static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
struct drm_dp_sideband_msg_tx *txmsg)
{
struct drm_dp_mst_branch *mstb = txmsg->dst;
+ u8 req_type;
/* both msg slots are full */
if (txmsg->seqno == -1) {
@@ -1336,7 +1406,13 @@ static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
txmsg->seqno = 1;
mstb->tx_slots[txmsg->seqno] = txmsg;
}
- hdr->broadcast = 0;
+
+ req_type = txmsg->msg[0] & 0x7f;
+ if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
+ req_type == DP_RESOURCE_STATUS_NOTIFY)
+ hdr->broadcast = 1;
+ else
+ hdr->broadcast = 0;
hdr->path_msg = txmsg->path_msg;
hdr->lct = mstb->lct;
hdr->lcr = mstb->lct - 1;
@@ -1438,26 +1514,18 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
}
/* called holding qlock */
-static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
+static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_sideband_msg_tx *txmsg)
{
- struct drm_dp_sideband_msg_tx *txmsg;
int ret;
/* construct a chunk from the first msg in the tx_msg queue */
- if (list_empty(&mgr->tx_msg_upq)) {
- mgr->tx_up_in_progress = false;
- return;
- }
-
- txmsg = list_first_entry(&mgr->tx_msg_upq, struct drm_dp_sideband_msg_tx, next);
ret = process_single_tx_qlock(mgr, txmsg, true);
- if (ret == 1) {
- /* up txmsgs aren't put in slots - so free after we send it */
- list_del(&txmsg->next);
- kfree(txmsg);
- } else if (ret)
+
+ if (ret != 1)
DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
- mgr->tx_up_in_progress = true;
+
+ txmsg->dst->tx_slots[txmsg->seqno] = NULL;
}
static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
@@ -1507,6 +1575,9 @@ static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
txmsg->reply.u.link_addr.ports[i].num_sdp_streams,
txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks);
}
+
+ drm_dp_check_mstb_guid(mstb, txmsg->reply.u.link_addr.guid);
+
for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
}
@@ -1554,6 +1625,37 @@ static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
return 0;
}
+static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
+{
+ if (!mstb->port_parent)
+ return NULL;
+
+ if (mstb->port_parent->mstb != mstb)
+ return mstb->port_parent;
+
+ return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
+}
+
+static struct drm_dp_mst_branch *drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_branch *mstb,
+ int *port_num)
+{
+ struct drm_dp_mst_branch *rmstb = NULL;
+ struct drm_dp_mst_port *found_port;
+ mutex_lock(&mgr->lock);
+ if (mgr->mst_primary) {
+ found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
+
+ if (found_port) {
+ rmstb = found_port->parent;
+ kref_get(&rmstb->kref);
+ *port_num = found_port->port_num;
+ }
+ }
+ mutex_unlock(&mgr->lock);
+ return rmstb;
+}
+
static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
int id,
@@ -1561,12 +1663,23 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
{
struct drm_dp_sideband_msg_tx *txmsg;
struct drm_dp_mst_branch *mstb;
- int len, ret;
+ int len, ret, port_num;
- mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
- if (!mstb)
+ port = drm_dp_get_validated_port_ref(mgr, port);
+ if (!port)
return -EINVAL;
+ port_num = port->port_num;
+ mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
+ if (!mstb) {
+ mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
+
+ if (!mstb) {
+ drm_dp_put_port(port);
+ return -EINVAL;
+ }
+ }
+
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
if (!txmsg) {
ret = -ENOMEM;
@@ -1574,7 +1687,7 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
}
txmsg->dst = mstb;
- len = build_allocate_payload(txmsg, port->port_num,
+ len = build_allocate_payload(txmsg, port_num,
id,
pbn);
@@ -1590,6 +1703,7 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
kfree(txmsg);
fail_put:
drm_dp_put_mst_branch_device(mstb);
+ drm_dp_put_port(port);
return ret;
}
@@ -1672,6 +1786,11 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
req_payload.start_slot = cur_slots;
if (mgr->proposed_vcpis[i]) {
port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
+ port = drm_dp_get_validated_port_ref(mgr, port);
+ if (!port) {
+ mutex_unlock(&mgr->payload_lock);
+ return -EINVAL;
+ }
req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
} else {
port = NULL;
@@ -1697,6 +1816,9 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
mgr->payloads[i].payload_state = req_payload.payload_state;
}
cur_slots += req_payload.num_slots;
+
+ if (port)
+ drm_dp_put_port(port);
}
for (i = 0; i < mgr->max_payloads; i++) {
@@ -1844,11 +1966,12 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
drm_dp_encode_up_ack_reply(txmsg, req_type);
mutex_lock(&mgr->qlock);
- list_add_tail(&txmsg->next, &mgr->tx_msg_upq);
- if (!mgr->tx_up_in_progress) {
- process_single_up_tx_qlock(mgr);
- }
+
+ process_single_up_tx_qlock(mgr, txmsg);
+
mutex_unlock(&mgr->qlock);
+
+ kfree(txmsg);
return 0;
}
@@ -1927,31 +2050,17 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
mgr->mst_primary = mstb;
kref_get(&mgr->mst_primary->kref);
- {
- struct drm_dp_payload reset_pay;
- reset_pay.start_slot = 0;
- reset_pay.num_slots = 0x3f;
- drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
- }
-
ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
- DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
+ DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
if (ret < 0) {
goto out_unlock;
}
-
- /* sort out guid */
- ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, mgr->guid, 16);
- if (ret != 16) {
- DRM_DEBUG_KMS("failed to read DP GUID %d\n", ret);
- goto out_unlock;
- }
-
- mgr->guid_valid = drm_dp_validate_guid(mgr, mgr->guid);
- if (!mgr->guid_valid) {
- ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, mgr->guid, 16);
- mgr->guid_valid = true;
+ {
+ struct drm_dp_payload reset_pay;
+ reset_pay.start_slot = 0;
+ reset_pay.num_slots = 0x3f;
+ drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
}
queue_work(system_long_wq, &mgr->work);
@@ -2015,6 +2124,8 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
if (mgr->mst_primary) {
int sret;
+ u8 guid[16];
+
sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
if (sret != DP_RECEIVER_CAP_SIZE) {
DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
@@ -2029,6 +2140,16 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
ret = -1;
goto out_unlock;
}
+
+ /* Some hubs forget their guids after they resume */
+ sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
+ if (sret != 16) {
+ DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
+ ret = -1;
+ goto out_unlock;
+ }
+ drm_dp_check_mstb_guid(mgr->mst_primary, guid);
+
ret = 0;
} else
ret = -1;
@@ -2145,28 +2266,51 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
if (mgr->up_req_recv.have_eomt) {
struct drm_dp_sideband_msg_req_body msg;
- struct drm_dp_mst_branch *mstb;
+ struct drm_dp_mst_branch *mstb = NULL;
bool seqno;
- mstb = drm_dp_get_mst_branch_device(mgr,
- mgr->up_req_recv.initial_hdr.lct,
- mgr->up_req_recv.initial_hdr.rad);
- if (!mstb) {
- DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
- memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
- return 0;
+
+ if (!mgr->up_req_recv.initial_hdr.broadcast) {
+ mstb = drm_dp_get_mst_branch_device(mgr,
+ mgr->up_req_recv.initial_hdr.lct,
+ mgr->up_req_recv.initial_hdr.rad);
+ if (!mstb) {
+ DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
+ memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+ return 0;
+ }
}
seqno = mgr->up_req_recv.initial_hdr.seqno;
drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg);
if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
- drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false);
+ drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
+
+ if (!mstb)
+ mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.conn_stat.guid);
+
+ if (!mstb) {
+ DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
+ memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+ return 0;
+ }
+
drm_dp_update_port(mstb, &msg.u.conn_stat);
+
DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
(*mgr->cbs->hotplug)(mgr);
} else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
- drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false);
+ drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
+ if (!mstb)
+ mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.resource_stat.guid);
+
+ if (!mstb) {
+ DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
+ memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+ return 0;
+ }
+
DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
}
@@ -2346,6 +2490,7 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp
DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn);
if (pbn == port->vcpi.pbn) {
*slots = port->vcpi.num_slots;
+ drm_dp_put_port(port);
return true;
}
}
@@ -2505,32 +2650,31 @@ EXPORT_SYMBOL(drm_dp_check_act_status);
*/
int drm_dp_calc_pbn_mode(int clock, int bpp)
{
- fixed20_12 pix_bw;
- fixed20_12 fbpp;
- fixed20_12 result;
- fixed20_12 margin, tmp;
- u32 res;
-
- pix_bw.full = dfixed_const(clock);
- fbpp.full = dfixed_const(bpp);
- tmp.full = dfixed_const(8);
- fbpp.full = dfixed_div(fbpp, tmp);
-
- result.full = dfixed_mul(pix_bw, fbpp);
- margin.full = dfixed_const(54);
- tmp.full = dfixed_const(64);
- margin.full = dfixed_div(margin, tmp);
- result.full = dfixed_div(result, margin);
-
- margin.full = dfixed_const(1006);
- tmp.full = dfixed_const(1000);
- margin.full = dfixed_div(margin, tmp);
- result.full = dfixed_mul(result, margin);
-
- result.full = dfixed_div(result, tmp);
- result.full = dfixed_ceil(result);
- res = dfixed_trunc(result);
- return res;
+ u64 kbps;
+ s64 peak_kbps;
+ u32 numerator;
+ u32 denominator;
+
+ kbps = clock * bpp;
+
+ /*
+ * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
+ * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
+ * common multiplier to render an integer PBN for all link rate/lane
+ * counts combinations
+ * calculate
+ * peak_kbps *= (1006/1000)
+ * peak_kbps *= (64/54)
+ * peak_kbps *= 8 convert to bytes
+ */
+
+ numerator = 64 * 1006;
+ denominator = 54 * 8 * 1000 * 1000;
+
+ kbps *= numerator;
+ peak_kbps = drm_fixp_from_fraction(kbps, denominator);
+
+ return drm_fixp2int_ceil(peak_kbps);
}
EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
@@ -2538,11 +2682,23 @@ static int test_calc_pbn_mode(void)
{
int ret;
ret = drm_dp_calc_pbn_mode(154000, 30);
- if (ret != 689)
+ if (ret != 689) {
+ DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
+ 154000, 30, 689, ret);
return -EINVAL;
+ }
ret = drm_dp_calc_pbn_mode(234000, 30);
- if (ret != 1047)
+ if (ret != 1047) {
+ DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
+ 234000, 30, 1047, ret);
return -EINVAL;
+ }
+ ret = drm_dp_calc_pbn_mode(297000, 24);
+ if (ret != 1063) {
+ DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
+ 297000, 24, 1063, ret);
+ return -EINVAL;
+ }
return 0;
}
@@ -2683,6 +2839,13 @@ static void drm_dp_tx_work(struct work_struct *work)
mutex_unlock(&mgr->qlock);
}
+static void drm_dp_free_mst_port(struct kref *kref)
+{
+ struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
+ kref_put(&port->parent->kref, drm_dp_free_mst_branch_device);
+ kfree(port);
+}
+
static void drm_dp_destroy_connector_work(struct work_struct *work)
{
struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
@@ -2703,13 +2866,20 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
list_del(&port->next);
mutex_unlock(&mgr->destroy_connector_lock);
+ kref_init(&port->kref);
+ INIT_LIST_HEAD(&port->next);
+
mgr->cbs->destroy_connector(mgr, port->connector);
drm_dp_port_teardown_pdt(port, port->pdt);
- if (!port->input && port->vcpi.vcpi > 0)
+ if (!port->input && port->vcpi.vcpi > 0) {
+ drm_dp_mst_reset_vcpi_slots(mgr, port);
+ drm_dp_update_payload_part1(mgr);
drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
- kfree(port);
+ }
+
+ kref_put(&port->kref, drm_dp_free_mst_port);
send_hotplug = true;
}
if (send_hotplug)
@@ -2736,7 +2906,6 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
mutex_init(&mgr->qlock);
mutex_init(&mgr->payload_lock);
mutex_init(&mgr->destroy_connector_lock);
- INIT_LIST_HEAD(&mgr->tx_msg_upq);
INIT_LIST_HEAD(&mgr->tx_msg_downq);
INIT_LIST_HEAD(&mgr->destroy_connector_list);
INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index d5d2c03fd136..8c9ac021608f 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -73,6 +73,8 @@
#define EDID_QUIRK_FORCE_8BPC (1 << 8)
/* Force 12bpc */
#define EDID_QUIRK_FORCE_12BPC (1 << 9)
+/* Force 6bpc */
+#define EDID_QUIRK_FORCE_6BPC (1 << 10)
struct detailed_mode_closure {
struct drm_connector *connector;
@@ -99,6 +101,9 @@ static struct edid_quirk {
/* Unknown Acer */
{ "ACR", 2423, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
+ /* AEO model 0 reports 8 bpc, but is a 6 bpc panel */
+ { "AEO", 0, EDID_QUIRK_FORCE_6BPC },
+
/* Belinea 10 15 55 */
{ "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
{ "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
@@ -3820,6 +3825,9 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
drm_add_display_info(edid, &connector->display_info, connector);
+ if (quirks & EDID_QUIRK_FORCE_6BPC)
+ connector->display_info.bpc = 6;
+
if (quirks & EDID_QUIRK_FORCE_8BPC)
connector->display_info.bpc = 8;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 69cbab5e5c81..5ad036741b99 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -1899,7 +1899,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
int n, int width, int height)
{
int c, o;
- struct drm_device *dev = fb_helper->dev;
struct drm_connector *connector;
const struct drm_connector_helper_funcs *connector_funcs;
struct drm_encoder *encoder;
@@ -1918,7 +1917,7 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
if (modes[n] == NULL)
return best_score;
- crtcs = kzalloc(dev->mode_config.num_connector *
+ crtcs = kzalloc(fb_helper->connector_count *
sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
if (!crtcs)
return best_score;
@@ -1964,7 +1963,7 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
if (score > best_score) {
best_score = score;
memcpy(best_crtcs, crtcs,
- dev->mode_config.num_connector *
+ fb_helper->connector_count *
sizeof(struct drm_fb_helper_crtc *));
}
}
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index c7de454e8e88..b205224f1a44 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -338,27 +338,32 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
spin_unlock(&file_priv->table_lock);
idr_preload_end();
mutex_unlock(&dev->object_name_lock);
- if (ret < 0) {
- drm_gem_object_handle_unreference_unlocked(obj);
- return ret;
- }
+ if (ret < 0)
+ goto err_unref;
+
*handlep = ret;
ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp);
- if (ret) {
- drm_gem_handle_delete(file_priv, *handlep);
- return ret;
- }
+ if (ret)
+ goto err_remove;
if (dev->driver->gem_open_object) {
ret = dev->driver->gem_open_object(obj, file_priv);
- if (ret) {
- drm_gem_handle_delete(file_priv, *handlep);
- return ret;
- }
+ if (ret)
+ goto err_revoke;
}
return 0;
+
+err_revoke:
+ drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
+err_remove:
+ spin_lock(&file_priv->table_lock);
+ idr_remove(&file_priv->object_idr, *handlep);
+ spin_unlock(&file_priv->table_lock);
+err_unref:
+ drm_gem_object_handle_unreference_unlocked(obj);
+ return ret;
}
/**
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index 57676f8d7ecf..a6289752be16 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -1015,6 +1015,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
return 0;
}
+#if defined(CONFIG_X86) || defined(CONFIG_IA64)
typedef struct drm_mode_fb_cmd232 {
u32 fb_id;
u32 width;
@@ -1071,6 +1072,7 @@ static int compat_drm_mode_addfb2(struct file *file, unsigned int cmd,
return 0;
}
+#endif
static drm_ioctl_compat_t *drm_compat_ioctls[] = {
[DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
@@ -1104,7 +1106,9 @@ static drm_ioctl_compat_t *drm_compat_ioctls[] = {
[DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW32)] = compat_drm_update_draw,
#endif
[DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK32)] = compat_drm_wait_vblank,
+#if defined(CONFIG_X86) || defined(CONFIG_IA64)
[DRM_IOCTL_NR(DRM_IOCTL_MODE_ADDFB232)] = compat_drm_mode_addfb2,
+#endif
};
/**
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 607f493ae801..8090989185b2 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -221,6 +221,64 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
diff = (flags & DRM_CALLED_FROM_VBLIRQ) != 0;
}
+ /*
+ * Within a drm_vblank_pre_modeset - drm_vblank_post_modeset
+ * interval? If so then vblank irqs keep running and it will likely
+ * happen that the hardware vblank counter is not trustworthy as it
+ * might reset at some point in that interval and vblank timestamps
+ * are not trustworthy either in that interval. Iow. this can result
+ * in a bogus diff >> 1 which must be avoided as it would cause
+ * random large forward jumps of the software vblank counter.
+ */
+ if (diff > 1 && (vblank->inmodeset & 0x2)) {
+ DRM_DEBUG_VBL("clamping vblank bump to 1 on crtc %u: diffr=%u"
+ " due to pre-modeset.\n", pipe, diff);
+ diff = 1;
+ }
+
+ /*
+ * FIMXE: Need to replace this hack with proper seqlocks.
+ *
+ * Restrict the bump of the software vblank counter to a safe maximum
+ * value of +1 whenever there is the possibility that concurrent readers
+ * of vblank timestamps could be active at the moment, as the current
+ * implementation of the timestamp caching and updating is not safe
+ * against concurrent readers for calls to store_vblank() with a bump
+ * of anything but +1. A bump != 1 would very likely return corrupted
+ * timestamps to userspace, because the same slot in the cache could
+ * be concurrently written by store_vblank() and read by one of those
+ * readers without the read-retry logic detecting the collision.
+ *
+ * Concurrent readers can exist when we are called from the
+ * drm_vblank_off() or drm_vblank_on() functions and other non-vblank-
+ * irq callers. However, all those calls to us are happening with the
+ * vbl_lock locked to prevent drm_vblank_get(), so the vblank refcount
+ * can't increase while we are executing. Therefore a zero refcount at
+ * this point is safe for arbitrary counter bumps if we are called
+ * outside vblank irq, a non-zero count is not 100% safe. Unfortunately
+ * we must also accept a refcount of 1, as whenever we are called from
+ * drm_vblank_get() -> drm_vblank_enable() the refcount will be 1 and
+ * we must let that one pass through in order to not lose vblank counts
+ * during vblank irq off - which would completely defeat the whole
+ * point of this routine.
+ *
+ * Whenever we are called from vblank irq, we have to assume concurrent
+ * readers exist or can show up any time during our execution, even if
+ * the refcount is currently zero, as vblank irqs are usually only
+ * enabled due to the presence of readers, and because when we are called
+ * from vblank irq we can't hold the vbl_lock to protect us from sudden
+ * bumps in vblank refcount. Therefore also restrict bumps to +1 when
+ * called from vblank irq.
+ */
+ if ((diff > 1) && (atomic_read(&vblank->refcount) > 1 ||
+ (flags & DRM_CALLED_FROM_VBLIRQ))) {
+ DRM_DEBUG_VBL("clamping vblank bump to 1 on crtc %u: diffr=%u "
+ "refcount %u, vblirq %u\n", pipe, diff,
+ atomic_read(&vblank->refcount),
+ (flags & DRM_CALLED_FROM_VBLIRQ) != 0);
+ diff = 1;
+ }
+
DRM_DEBUG_VBL("updating vblank count on crtc %u:"
" current=%u, diff=%u, hw=%u hw_last=%u\n",
pipe, vblank->count, diff, cur_vblank, vblank->last);
@@ -1313,7 +1371,13 @@ void drm_vblank_off(struct drm_device *dev, unsigned int pipe)
spin_lock_irqsave(&dev->event_lock, irqflags);
spin_lock(&dev->vbl_lock);
- vblank_disable_and_save(dev, pipe);
+ DRM_DEBUG_VBL("crtc %d, vblank enabled %d, inmodeset %d\n",
+ pipe, vblank->enabled, vblank->inmodeset);
+
+ /* Avoid redundant vblank disables without previous drm_vblank_on(). */
+ if (drm_core_check_feature(dev, DRIVER_ATOMIC) || !vblank->inmodeset)
+ vblank_disable_and_save(dev, pipe);
+
wake_up(&vblank->queue);
/*
@@ -1415,6 +1479,9 @@ void drm_vblank_on(struct drm_device *dev, unsigned int pipe)
return;
spin_lock_irqsave(&dev->vbl_lock, irqflags);
+ DRM_DEBUG_VBL("crtc %d, vblank enabled %d, inmodeset %d\n",
+ pipe, vblank->enabled, vblank->inmodeset);
+
/* Drop our private "prevent drm_vblank_get" refcount */
if (vblank->inmodeset) {
atomic_dec(&vblank->refcount);
@@ -1427,8 +1494,7 @@ void drm_vblank_on(struct drm_device *dev, unsigned int pipe)
* re-enable interrupts if there are users left, or the
* user wishes vblank interrupts to be enabled all the time.
*/
- if (atomic_read(&vblank->refcount) != 0 ||
- (!dev->vblank_disable_immediate && drm_vblank_offdelay == 0))
+ if (atomic_read(&vblank->refcount) != 0 || drm_vblank_offdelay == 0)
WARN_ON(drm_vblank_enable(dev, pipe));
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
@@ -1523,6 +1589,7 @@ void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe)
if (vblank->inmodeset) {
spin_lock_irqsave(&dev->vbl_lock, irqflags);
dev->vblank_disable_allowed = true;
+ drm_reset_vblank_timestamp(dev, pipe);
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
if (vblank->inmodeset & 0x2)
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index cd74a0953f42..39e30abddf08 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -1487,6 +1487,8 @@ int drm_mode_convert_umode(struct drm_display_mode *out,
if (out->status != MODE_OK)
goto out;
+ drm_mode_set_crtcinfo(out, CRTC_INTERLACE_HALVE_V);
+
ret = 0;
out:
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index 17cea400ae32..d3de377dc857 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -220,7 +220,7 @@ i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter)
* FIXME: This is the old dp aux helper, gma500 is the last driver that needs to
* be ported over to the new helper code in drm_dp_helper.c like i915 or radeon.
*/
-static int __deprecated
+static int
i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
{
int error;
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index c707fa6fca85..e3bdc8b1c32c 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -130,7 +130,7 @@ int psb_gem_create(struct drm_file *file, struct drm_device *dev, u64 size,
return ret;
}
/* We have the initial and handle reference but need only one now */
- drm_gem_object_unreference(&r->gem);
+ drm_gem_object_unreference_unlocked(&r->gem);
*handlep = handle;
return 0;
}
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
index 6b43ae3ffd73..1616af209bfc 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
@@ -72,7 +72,7 @@ static const char *const dsi_errors[] = {
"RX Prot Violation",
"HS Generic Write FIFO Full",
"LP Generic Write FIFO Full",
- "Generic Read Data Avail"
+ "Generic Read Data Avail",
"Special Packet Sent",
"Tearing Effect",
};
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index b4741d121a74..61fcb3b22297 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -402,6 +402,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (ret)
goto cleanup_gem_stolen;
+ intel_setup_gmbus(dev);
+
/* Important: The output setup functions called by modeset_init need
* working irqs for e.g. gmbus and dp aux transfers. */
intel_modeset_init(dev);
@@ -451,6 +453,7 @@ cleanup_gem:
cleanup_irq:
intel_guc_ucode_fini(dev);
drm_irq_uninstall(dev);
+ intel_teardown_gmbus(dev);
cleanup_gem_stolen:
i915_gem_cleanup_stolen(dev);
cleanup_vga_switcheroo:
@@ -1028,7 +1031,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
/* Try to make sure MCHBAR is enabled before poking at it */
intel_setup_mchbar(dev);
- intel_setup_gmbus(dev);
intel_opregion_setup(dev);
i915_gem_load(dev);
@@ -1099,7 +1101,6 @@ out_gem_unload:
if (dev->pdev->msi_enabled)
pci_disable_msi(dev->pdev);
- intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev);
pm_qos_remove_request(&dev_priv->pm_qos);
destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
@@ -1198,7 +1199,6 @@ int i915_driver_unload(struct drm_device *dev)
intel_csr_ucode_fini(dev);
- intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev);
destroy_workqueue(dev_priv->hotplug.dp_wq);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 760e0ce4aa26..a6ad938f44a6 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -531,7 +531,10 @@ void intel_detect_pch(struct drm_device *dev)
dev_priv->pch_type = PCH_SPT;
DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
WARN_ON(!IS_SKYLAKE(dev));
- } else if (id == INTEL_PCH_P2X_DEVICE_ID_TYPE) {
+ } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
+ ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
+ pch->subsystem_vendor == 0x1af4 &&
+ pch->subsystem_device == 0x1100)) {
dev_priv->pch_type = intel_virt_detect_pch(dev);
} else
continue;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index f4af19a0d569..d400d6773bbb 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2614,6 +2614,7 @@ struct drm_i915_cmd_table {
#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
+#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
#define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type)
#define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT)
@@ -3312,6 +3313,9 @@ static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
}
extern void intel_i2c_reset(struct drm_device *dev);
+/* intel_bios.c */
+bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port);
+
/* intel_opregion.c */
#ifdef CONFIG_ACPI
extern int intel_opregion_setup(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 02ceb7a4b481..0433d25f9d23 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -340,6 +340,10 @@ void i915_gem_context_reset(struct drm_device *dev)
i915_gem_context_unreference(lctx);
ring->last_context = NULL;
}
+
+ /* Force the GPU state to be reinitialised on enabling */
+ if (ring->default_context)
+ ring->default_context->legacy_hw_ctx.initialized = false;
}
}
@@ -708,7 +712,7 @@ static int do_switch(struct drm_i915_gem_request *req)
if (ret)
goto unpin_out;
- if (!to->legacy_hw_ctx.initialized) {
+ if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) {
hw_flags |= MI_RESTORE_INHIBIT;
/* NB: If we inhibit the restore, the context is not allowed to
* die because future work may end up depending on valid address
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 86c7500454b4..b37fe0df743e 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2747,6 +2747,7 @@ void i915_global_gtt_cleanup(struct drm_device *dev)
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
ppgtt->base.cleanup(&ppgtt->base);
+ kfree(ppgtt);
}
if (drm_mm_initialized(&vm->mm)) {
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index f7df54a8ee2b..c0a96f1ee18e 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -39,7 +39,7 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
if (!mutex_is_locked(mutex))
return false;
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
+#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER)
return mutex->owner == task;
#else
/* Since UP may be pre-empted, we cannot assume that we own the lock */
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 0d228f909dcb..0f42a2782afc 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2354,9 +2354,13 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
spt_irq_handler(dev, pch_iir);
else
cpt_irq_handler(dev, pch_iir);
- } else
- DRM_ERROR("The master control interrupt lied (SDE)!\n");
-
+ } else {
+ /*
+ * Like on previous PCH there seems to be something
+ * fishy going on with forwarding PCH interrupts.
+ */
+ DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
+ }
}
I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index bc7b8faba84d..cace154bbdc0 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2838,7 +2838,14 @@ enum skl_disp_power_wells {
#define GEN6_RP_STATE_CAP (MCHBAR_MIRROR_BASE_SNB + 0x5998)
#define BXT_RP_STATE_CAP 0x138170
-#define INTERVAL_1_28_US(us) (((us) * 100) >> 7)
+/*
+ * Make these a multiple of magic 25 to avoid SNB (eg. Dell XPS
+ * 8300) freezing up around GPU hangs. Looks as if even
+ * scheduling/timer interrupts start misbehaving if the RPS
+ * EI/thresholds are "bad", leading to a very sluggish or even
+ * frozen machine.
+ */
+#define INTERVAL_1_28_US(us) roundup(((us) * 100) >> 7, 25)
#define INTERVAL_1_33_US(us) (((us) * 3) >> 2)
#define INTERVAL_0_833_US(us) (((us) * 6) / 5)
#define GT_INTERVAL_FROM_US(dev_priv, us) (IS_GEN9(dev_priv) ? \
@@ -3233,19 +3240,20 @@ enum skl_disp_power_wells {
#define PORT_HOTPLUG_STAT (dev_priv->info.display_mmio_offset + 0x61114)
/*
- * HDMI/DP bits are gen4+
+ * HDMI/DP bits are g4x+
*
* WARNING: Bspec for hpd status bits on gen4 seems to be completely confused.
* Please check the detailed lore in the commit message for for experimental
* evidence.
*/
-#define PORTD_HOTPLUG_LIVE_STATUS_G4X (1 << 29)
+/* Bspec says GM45 should match G4X/VLV/CHV, but reality disagrees */
+#define PORTD_HOTPLUG_LIVE_STATUS_GM45 (1 << 29)
+#define PORTC_HOTPLUG_LIVE_STATUS_GM45 (1 << 28)
+#define PORTB_HOTPLUG_LIVE_STATUS_GM45 (1 << 27)
+/* G4X/VLV/CHV DP/HDMI bits again match Bspec */
+#define PORTD_HOTPLUG_LIVE_STATUS_G4X (1 << 27)
#define PORTC_HOTPLUG_LIVE_STATUS_G4X (1 << 28)
-#define PORTB_HOTPLUG_LIVE_STATUS_G4X (1 << 27)
-/* VLV DP/HDMI bits again match Bspec */
-#define PORTD_HOTPLUG_LIVE_STATUS_VLV (1 << 27)
-#define PORTC_HOTPLUG_LIVE_STATUS_VLV (1 << 28)
-#define PORTB_HOTPLUG_LIVE_STATUS_VLV (1 << 29)
+#define PORTB_HOTPLUG_LIVE_STATUS_G4X (1 << 29)
#define PORTD_HOTPLUG_INT_STATUS (3 << 21)
#define PORTD_HOTPLUG_INT_LONG_PULSE (2 << 21)
#define PORTD_HOTPLUG_INT_SHORT_PULSE (1 << 21)
@@ -7350,6 +7358,8 @@ enum skl_disp_power_wells {
#define TRANS_CLK_SEL_DISABLED (0x0<<29)
#define TRANS_CLK_SEL_PORT(x) (((x)+1)<<29)
+#define CDCLK_FREQ 0x46200
+
#define TRANSA_MSA_MISC 0x60410
#define TRANSB_MSA_MISC 0x61410
#define TRANSC_MSA_MISC 0x62410
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index ce82f9c7df24..d14bdc537587 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1351,3 +1351,42 @@ intel_parse_bios(struct drm_device *dev)
return 0;
}
+
+/**
+ * intel_bios_is_port_present - is the specified digital port present
+ * @dev_priv: i915 device instance
+ * @port: port to check
+ *
+ * Return true if the device in %port is present.
+ */
+bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port)
+{
+ static const struct {
+ u16 dp, hdmi;
+ } port_mapping[] = {
+ [PORT_B] = { DVO_PORT_DPB, DVO_PORT_HDMIB, },
+ [PORT_C] = { DVO_PORT_DPC, DVO_PORT_HDMIC, },
+ [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, },
+ [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, },
+ };
+ int i;
+
+ /* FIXME maybe deal with port A as well? */
+ if (WARN_ON(port == PORT_A) || port >= ARRAY_SIZE(port_mapping))
+ return false;
+
+ if (!dev_priv->vbt.child_dev_num)
+ return false;
+
+ for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+ const union child_device_config *p_child =
+ &dev_priv->vbt.child_dev[i];
+ if ((p_child->common.dvo_port == port_mapping[port].dp ||
+ p_child->common.dvo_port == port_mapping[port].hdmi) &&
+ (p_child->common.device_type & (DEVICE_TYPE_TMDS_DVI_SIGNALING |
+ DEVICE_TYPE_DISPLAYPORT_OUTPUT)))
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 6a2c76e367a5..97d1ed20418b 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -248,8 +248,14 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
pipe_config->has_pch_encoder = true;
/* LPT FDI RX only supports 8bpc. */
- if (HAS_PCH_LPT(dev))
+ if (HAS_PCH_LPT(dev)) {
+ if (pipe_config->bw_constrained && pipe_config->pipe_bpp < 24) {
+ DRM_DEBUG_KMS("LPT only supports 24bpp\n");
+ return false;
+ }
+
pipe_config->pipe_bpp = 24;
+ }
/* FDI must always be 2.7 GHz */
if (HAS_DDI(dev)) {
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index 9e530a739354..fc28c512ece3 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -180,7 +180,8 @@ struct stepping_info {
static const struct stepping_info skl_stepping_info[] = {
{'A', '0'}, {'B', '0'}, {'C', '0'},
{'D', '0'}, {'E', '0'}, {'F', '0'},
- {'G', '0'}, {'H', '0'}, {'I', '0'}
+ {'G', '0'}, {'H', '0'}, {'I', '0'},
+ {'J', '0'}, {'K', '0'}
};
static struct stepping_info bxt_stepping_info[] = {
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index a6752a61d99f..3c6b07683bd9 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -464,9 +464,17 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
} else if (IS_BROADWELL(dev)) {
ddi_translations_fdi = bdw_ddi_translations_fdi;
ddi_translations_dp = bdw_ddi_translations_dp;
- ddi_translations_edp = bdw_ddi_translations_edp;
+
+ if (dev_priv->edp_low_vswing) {
+ ddi_translations_edp = bdw_ddi_translations_edp;
+ n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
+ } else {
+ ddi_translations_edp = bdw_ddi_translations_dp;
+ n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
+ }
+
ddi_translations_hdmi = bdw_ddi_translations_hdmi;
- n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
+
n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
hdmi_default_entry = 7;
@@ -1582,7 +1590,8 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc,
DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
wrpll_params.central_freq;
- } else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
+ } else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+ intel_encoder->type == INTEL_OUTPUT_DP_MST) {
switch (crtc_state->port_clock / 2) {
case 81000:
ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
@@ -3187,12 +3196,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
intel_ddi_clock_get(encoder, pipe_config);
}
-static void intel_ddi_destroy(struct drm_encoder *encoder)
-{
- /* HDMI has nothing special to destroy, so we can go with this. */
- intel_dp_encoder_destroy(encoder);
-}
-
static bool intel_ddi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
@@ -3211,7 +3214,8 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder,
}
static const struct drm_encoder_funcs intel_ddi_funcs = {
- .destroy = intel_ddi_destroy,
+ .reset = intel_dp_encoder_reset,
+ .destroy = intel_dp_encoder_destroy,
};
static struct intel_connector *
@@ -3283,6 +3287,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
intel_encoder->post_disable = intel_ddi_post_disable;
intel_encoder->get_hw_state = intel_ddi_get_hw_state;
intel_encoder->get_config = intel_ddi_get_config;
+ intel_encoder->suspend = intel_dp_encoder_suspend;
intel_dig_port->port = port;
intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 32cf97346978..909d1d71d130 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2950,13 +2950,13 @@ u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier,
}
}
-unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane,
- struct drm_i915_gem_object *obj,
- unsigned int plane)
+u32 intel_plane_obj_offset(struct intel_plane *intel_plane,
+ struct drm_i915_gem_object *obj,
+ unsigned int plane)
{
const struct i915_ggtt_view *view = &i915_ggtt_view_normal;
struct i915_vma *vma;
- unsigned char *offset;
+ u64 offset;
if (intel_rotation_90_or_270(intel_plane->base.state->rotation))
view = &i915_ggtt_view_rotated;
@@ -2966,14 +2966,16 @@ unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane,
view->type))
return -1;
- offset = (unsigned char *)vma->node.start;
+ offset = vma->node.start;
if (plane == 1) {
offset += vma->ggtt_view.rotation_info.uv_start_page *
PAGE_SIZE;
}
- return (unsigned long)offset;
+ WARN_ON(upper_32_bits(offset));
+
+ return lower_32_bits(offset);
}
static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
@@ -3099,7 +3101,7 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc,
u32 tile_height, plane_offset, plane_size;
unsigned int rotation;
int x_offset, y_offset;
- unsigned long surf_addr;
+ u32 surf_addr;
struct intel_crtc_state *crtc_state = intel_crtc->config;
struct intel_plane_state *plane_state;
int src_x = 0, src_y = 0, src_w = 0, src_h = 0;
@@ -4447,7 +4449,7 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state)
intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX);
return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
- &state->scaler_state.scaler_id, DRM_ROTATE_0,
+ &state->scaler_state.scaler_id, BIT(DRM_ROTATE_0),
state->pipe_src_w, state->pipe_src_h,
adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
}
@@ -8228,12 +8230,14 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *encoder;
+ int i;
u32 val, final;
bool has_lvds = false;
bool has_cpu_edp = false;
bool has_panel = false;
bool has_ck505 = false;
bool can_ssc = false;
+ bool using_ssc_source = false;
/* We need to take the global config into account */
for_each_intel_encoder(dev, encoder) {
@@ -8260,8 +8264,22 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
can_ssc = true;
}
- DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n",
- has_panel, has_lvds, has_ck505);
+ /* Check if any DPLLs are using the SSC source */
+ for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+ u32 temp = I915_READ(PCH_DPLL(i));
+
+ if (!(temp & DPLL_VCO_ENABLE))
+ continue;
+
+ if ((temp & PLL_REF_INPUT_MASK) ==
+ PLLB_REF_INPUT_SPREADSPECTRUMIN) {
+ using_ssc_source = true;
+ break;
+ }
+ }
+
+ DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
+ has_panel, has_lvds, has_ck505, using_ssc_source);
/* Ironlake: try to setup display ref clock before DPLL
* enabling. This is only under driver's control after
@@ -8298,9 +8316,9 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
} else
final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
- } else {
- final |= DREF_SSC_SOURCE_DISABLE;
- final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
+ } else if (using_ssc_source) {
+ final |= DREF_SSC_SOURCE_ENABLE;
+ final |= DREF_SSC1_ENABLE;
}
if (final == val)
@@ -8346,7 +8364,7 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
POSTING_READ(PCH_DREF_CONTROL);
udelay(200);
} else {
- DRM_DEBUG_KMS("Disabling SSC entirely\n");
+ DRM_DEBUG_KMS("Disabling CPU source output\n");
val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
@@ -8357,16 +8375,20 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
POSTING_READ(PCH_DREF_CONTROL);
udelay(200);
- /* Turn off the SSC source */
- val &= ~DREF_SSC_SOURCE_MASK;
- val |= DREF_SSC_SOURCE_DISABLE;
+ if (!using_ssc_source) {
+ DRM_DEBUG_KMS("Disabling SSC source\n");
- /* Turn off SSC1 */
- val &= ~DREF_SSC1_ENABLE;
+ /* Turn off the SSC source */
+ val &= ~DREF_SSC_SOURCE_MASK;
+ val |= DREF_SSC_SOURCE_DISABLE;
- I915_WRITE(PCH_DREF_CONTROL, val);
- POSTING_READ(PCH_DREF_CONTROL);
- udelay(200);
+ /* Turn off SSC1 */
+ val &= ~DREF_SSC1_ENABLE;
+
+ I915_WRITE(PCH_DREF_CONTROL, val);
+ POSTING_READ(PCH_DREF_CONTROL);
+ udelay(200);
+ }
}
BUG_ON(val != final);
@@ -9669,6 +9691,8 @@ static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data);
mutex_unlock(&dev_priv->rps.hw_lock);
+ I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
+
intel_update_cdclk(dev);
WARN(cdclk != dev_priv->cdclk_freq,
@@ -13537,11 +13561,12 @@ intel_check_primary_plane(struct drm_plane *plane,
int max_scale = DRM_PLANE_HELPER_NO_SCALING;
bool can_position = false;
- /* use scaler when colorkey is not required */
- if (INTEL_INFO(plane->dev)->gen >= 9 &&
- state->ckey.flags == I915_SET_COLORKEY_NONE) {
- min_scale = 1;
- max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
+ if (INTEL_INFO(plane->dev)->gen >= 9) {
+ /* use scaler when colorkey is not required */
+ if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
+ min_scale = 1;
+ max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
+ }
can_position = true;
}
@@ -14137,6 +14162,8 @@ static void intel_setup_outputs(struct drm_device *dev)
if (I915_READ(PCH_DP_D) & DP_DETECTED)
intel_dp_init(dev, PCH_DP_D, PORT_D);
} else if (IS_VALLEYVIEW(dev)) {
+ bool has_edp, has_port;
+
/*
* The DP_DETECTED bit is the latched state of the DDC
* SDA pin at boot. However since eDP doesn't require DDC
@@ -14145,27 +14172,37 @@ static void intel_setup_outputs(struct drm_device *dev)
* Thus we can't rely on the DP_DETECTED bit alone to detect
* eDP ports. Consult the VBT as well as DP_DETECTED to
* detect eDP ports.
+ *
+ * Sadly the straps seem to be missing sometimes even for HDMI
+ * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
+ * and VBT for the presence of the port. Additionally we can't
+ * trust the port type the VBT declares as we've seen at least
+ * HDMI ports that the VBT claim are DP or eDP.
*/
- if (I915_READ(VLV_HDMIB) & SDVO_DETECTED &&
- !intel_dp_is_edp(dev, PORT_B))
+ has_edp = intel_dp_is_edp(dev, PORT_B);
+ has_port = intel_bios_is_port_present(dev_priv, PORT_B);
+ if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
+ has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B);
+ if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
intel_hdmi_init(dev, VLV_HDMIB, PORT_B);
- if (I915_READ(VLV_DP_B) & DP_DETECTED ||
- intel_dp_is_edp(dev, PORT_B))
- intel_dp_init(dev, VLV_DP_B, PORT_B);
- if (I915_READ(VLV_HDMIC) & SDVO_DETECTED &&
- !intel_dp_is_edp(dev, PORT_C))
+ has_edp = intel_dp_is_edp(dev, PORT_C);
+ has_port = intel_bios_is_port_present(dev_priv, PORT_C);
+ if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
+ has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C);
+ if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
intel_hdmi_init(dev, VLV_HDMIC, PORT_C);
- if (I915_READ(VLV_DP_C) & DP_DETECTED ||
- intel_dp_is_edp(dev, PORT_C))
- intel_dp_init(dev, VLV_DP_C, PORT_C);
if (IS_CHERRYVIEW(dev)) {
- /* eDP not supported on port D, so don't check VBT */
- if (I915_READ(CHV_HDMID) & SDVO_DETECTED)
- intel_hdmi_init(dev, CHV_HDMID, PORT_D);
- if (I915_READ(CHV_DP_D) & DP_DETECTED)
+ /*
+ * eDP not supported on port D,
+ * so no need to worry about it
+ */
+ has_port = intel_bios_is_port_present(dev_priv, PORT_D);
+ if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
intel_dp_init(dev, CHV_DP_D, PORT_D);
+ if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
+ intel_hdmi_init(dev, CHV_HDMID, PORT_D);
}
intel_dsi_init(dev);
@@ -15565,6 +15602,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
intel_cleanup_gt_powersave(dev);
mutex_unlock(&dev->struct_mutex);
+
+ intel_teardown_gmbus(dev);
}
/*
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 78b8ec84d576..ebbd23407a80 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -3628,8 +3628,7 @@ static bool
intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
uint8_t dp_train_pat)
{
- if (!intel_dp->train_set_valid)
- memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
+ memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
intel_dp_set_signal_levels(intel_dp, DP);
return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
}
@@ -3746,22 +3745,6 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
break;
}
- /*
- * if we used previously trained voltage and pre-emphasis values
- * and we don't get clock recovery, reset link training values
- */
- if (intel_dp->train_set_valid) {
- DRM_DEBUG_KMS("clock recovery not ok, reset");
- /* clear the flag as we are not reusing train set */
- intel_dp->train_set_valid = false;
- if (!intel_dp_reset_link_train(intel_dp, &DP,
- DP_TRAINING_PATTERN_1 |
- DP_LINK_SCRAMBLING_DISABLE)) {
- DRM_ERROR("failed to enable link training\n");
- return;
- }
- continue;
- }
/* Check to see if we've tried the max voltage */
for (i = 0; i < intel_dp->lane_count; i++)
@@ -3854,7 +3837,6 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
/* Make sure clock is still ok */
if (!drm_dp_clock_recovery_ok(link_status,
intel_dp->lane_count)) {
- intel_dp->train_set_valid = false;
intel_dp_link_training_clock_recovery(intel_dp);
intel_dp_set_link_train(intel_dp, &DP,
training_pattern |
@@ -3871,7 +3853,6 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
/* Try 5 times, then try clock recovery if that fails */
if (tries > 5) {
- intel_dp->train_set_valid = false;
intel_dp_link_training_clock_recovery(intel_dp);
intel_dp_set_link_train(intel_dp, &DP,
training_pattern |
@@ -3893,10 +3874,8 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
intel_dp->DP = DP;
- if (channel_eq) {
- intel_dp->train_set_valid = true;
+ if (channel_eq)
DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
- }
}
void intel_dp_stop_link_train(struct intel_dp *intel_dp)
@@ -4613,20 +4592,20 @@ static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
return I915_READ(PORT_HOTPLUG_STAT) & bit;
}
-static bool vlv_digital_port_connected(struct drm_i915_private *dev_priv,
- struct intel_digital_port *port)
+static bool gm45_digital_port_connected(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *port)
{
u32 bit;
switch (port->port) {
case PORT_B:
- bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
+ bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
break;
case PORT_C:
- bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
+ bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
break;
case PORT_D:
- bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
+ bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
break;
default:
MISSING_CASE(port->port);
@@ -4678,8 +4657,8 @@ bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
return cpt_digital_port_connected(dev_priv, port);
else if (IS_BROXTON(dev_priv))
return bxt_digital_port_connected(dev_priv, port);
- else if (IS_VALLEYVIEW(dev_priv))
- return vlv_digital_port_connected(dev_priv, port);
+ else if (IS_GM45(dev_priv))
+ return gm45_digital_port_connected(dev_priv, port);
else
return g4x_digital_port_connected(dev_priv, port);
}
@@ -5035,7 +5014,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
kfree(intel_dig_port);
}
-static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
+void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
@@ -5077,15 +5056,17 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
edp_panel_vdd_schedule_off(intel_dp);
}
-static void intel_dp_encoder_reset(struct drm_encoder *encoder)
+void intel_dp_encoder_reset(struct drm_encoder *encoder)
{
- struct intel_dp *intel_dp;
+ struct drm_i915_private *dev_priv = to_i915(encoder->dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ if (!HAS_DDI(dev_priv))
+ intel_dp->DP = I915_READ(intel_dp->output_reg);
if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
return;
- intel_dp = enc_to_intel_dp(encoder);
-
pps_lock(intel_dp);
/*
@@ -5157,9 +5138,6 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
intel_display_power_get(dev_priv, power_domain);
if (long_hpd) {
- /* indicate that we need to restart link training */
- intel_dp->train_set_valid = false;
-
if (!intel_digital_port_connected(dev_priv, intel_dig_port))
goto mst_fail;
@@ -6135,8 +6113,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
return true;
}
-void
-intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
+bool intel_dp_init(struct drm_device *dev,
+ int output_reg,
+ enum port port)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_digital_port *intel_dig_port;
@@ -6146,7 +6125,7 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
if (!intel_dig_port)
- return;
+ return false;
intel_connector = intel_connector_alloc();
if (!intel_connector)
@@ -6201,15 +6180,14 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
if (!intel_dp_init_connector(intel_dig_port, intel_connector))
goto err_init_connector;
- return;
+ return true;
err_init_connector:
drm_encoder_cleanup(encoder);
kfree(intel_connector);
err_connector_alloc:
kfree(intel_dig_port);
-
- return;
+ return false;
}
void intel_dp_mst_suspend(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 0639275fc471..06bd9257acdc 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -477,6 +477,8 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_device *dev = connector->dev;
+ intel_connector->unregister(intel_connector);
+
/* need to nuke the connector */
drm_modeset_lock_all(dev);
if (connector->state->crtc) {
@@ -490,11 +492,7 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
WARN(ret, "Disabling mst crtc failed with %i\n", ret);
}
- drm_modeset_unlock_all(dev);
- intel_connector->unregister(intel_connector);
-
- drm_modeset_lock_all(dev);
intel_connector_remove_from_fbdev(intel_connector);
drm_connector_cleanup(connector);
drm_modeset_unlock_all(dev);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 0d00f07b7163..41442e619595 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -783,7 +783,6 @@ struct intel_dp {
bool has_aux_irq,
int send_bytes,
uint32_t aux_clock_divider);
- bool train_set_valid;
/* Displayport compliance testing */
unsigned long compliance_test_type;
@@ -1178,9 +1177,9 @@ void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file);
int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state);
-unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane,
- struct drm_i915_gem_object *obj,
- unsigned int plane);
+u32 intel_plane_obj_offset(struct intel_plane *intel_plane,
+ struct drm_i915_gem_object *obj,
+ unsigned int plane);
u32 skl_plane_ctl_format(uint32_t pixel_format);
u32 skl_plane_ctl_tiling(uint64_t fb_modifier);
@@ -1196,7 +1195,7 @@ void intel_csr_ucode_fini(struct drm_device *dev);
void assert_csr_loaded(struct drm_i915_private *dev_priv);
/* intel_dp.c */
-void intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
+bool intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector);
void intel_dp_set_link_params(struct intel_dp *intel_dp,
@@ -1204,6 +1203,8 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp,
void intel_dp_start_link_train(struct intel_dp *intel_dp);
void intel_dp_stop_link_train(struct intel_dp *intel_dp);
void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
+void intel_dp_encoder_reset(struct drm_encoder *encoder);
+void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
void intel_dp_encoder_destroy(struct drm_encoder *encoder);
int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
bool intel_dp_compute_config(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
index a5e99ac305da..a8912aecc31f 100644
--- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
@@ -207,7 +207,12 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
gpio = *data++;
/* pull up/down */
- action = *data++;
+ action = *data++ & 1;
+
+ if (gpio >= ARRAY_SIZE(gtable)) {
+ DRM_DEBUG_KMS("unknown gpio %u\n", gpio);
+ goto out;
+ }
function = gtable[gpio].function_reg;
pad = gtable[gpio].pad_reg;
@@ -226,6 +231,7 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
vlv_gpio_nc_write(dev_priv, pad, val);
mutex_unlock(&dev_priv->sb_lock);
+out:
return data;
}
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 4fd5fdfef6bd..c0c094d5b822 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -362,12 +362,12 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
uint64_t conn_configured = 0, mask;
int pass = 0;
- save_enabled = kcalloc(dev->mode_config.num_connector, sizeof(bool),
+ save_enabled = kcalloc(fb_helper->connector_count, sizeof(bool),
GFP_KERNEL);
if (!save_enabled)
return false;
- memcpy(save_enabled, enabled, dev->mode_config.num_connector);
+ memcpy(save_enabled, enabled, fb_helper->connector_count);
mask = (1 << fb_helper->connector_count) - 1;
retry:
for (i = 0; i < fb_helper->connector_count; i++) {
@@ -501,7 +501,7 @@ retry:
if (fallback) {
bail:
DRM_DEBUG_KMS("Not using firmware configuration\n");
- memcpy(enabled, save_enabled, dev->mode_config.num_connector);
+ memcpy(enabled, save_enabled, fb_helper->connector_count);
kfree(save_enabled);
return false;
}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index e6c035b0fc1c..dff69fef47e0 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -1388,8 +1388,16 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
hdmi_to_dig_port(intel_hdmi));
}
- if (!live_status)
- DRM_DEBUG_KMS("Live status not up!");
+ if (!live_status) {
+ DRM_DEBUG_KMS("HDMI live status down\n");
+ /*
+ * Live status register is not reliable on all intel platforms.
+ * So consider live_status only for certain platforms, for
+ * others, read EDID to determine presence of sink.
+ */
+ if (INTEL_INFO(dev_priv)->gen < 7 || IS_IVYBRIDGE(dev_priv))
+ live_status = true;
+ }
intel_hdmi_unset_edid(connector);
@@ -2022,6 +2030,9 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
enum port port = intel_dig_port->port;
uint8_t alternate_ddc_pin;
+ DRM_DEBUG_KMS("Adding HDMI connector on port %c\n",
+ port_name(port));
+
drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA);
drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index b17785719598..d7a6437d9da2 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -468,9 +468,14 @@ void intel_hpd_init(struct drm_i915_private *dev_priv)
list_for_each_entry(connector, &mode_config->connector_list, head) {
struct intel_connector *intel_connector = to_intel_connector(connector);
connector->polled = intel_connector->polled;
- if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
- connector->polled = DRM_CONNECTOR_POLL_HPD;
+
+ /* MST has a dynamic intel_connector->encoder and it's reprobing
+ * is all handled by the MST helpers. */
if (intel_connector->mst_port)
+ continue;
+
+ if (!connector->polled && I915_HAS_HOTPLUG(dev) &&
+ intel_connector->encoder->hpd_pin > HPD_NONE)
connector->polled = DRM_CONNECTOR_POLL_HPD;
}
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 8324654037b6..f3bee54c414f 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -675,7 +675,7 @@ int intel_setup_gmbus(struct drm_device *dev)
return 0;
err:
- while (--pin) {
+ while (pin--) {
if (!intel_gmbus_is_valid_pin(dev_priv, pin))
continue;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 88e12bdf79e2..7058f75c7b42 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -776,11 +776,11 @@ static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
if (unlikely(total_bytes > remain_usable)) {
/*
* The base request will fit but the reserved space
- * falls off the end. So only need to to wait for the
- * reserved size after flushing out the remainder.
+ * falls off the end. So don't need an immediate wrap
+ * and only need to effectively wait for the reserved
+ * size space from the start of ringbuffer.
*/
wait_bytes = remain_actual + ringbuf->reserved_size;
- need_wrap = true;
} else if (total_bytes > ringbuf->space) {
/* No wrapping required, just waiting. */
wait_bytes = total_bytes;
@@ -1706,6 +1706,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
if (flush_domains) {
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
flags |= PIPE_CONTROL_FLUSH_ENABLE;
}
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 6dc13c02c28e..e362a30776fa 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -682,7 +682,7 @@ static void intel_didl_outputs(struct drm_device *dev)
}
if (!acpi_video_bus) {
- DRM_ERROR("No ACPI video bus found\n");
+ DRM_DEBUG_KMS("No ACPI video bus found\n");
return;
}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index f091ad12d694..1e851e037c29 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -1789,16 +1789,20 @@ static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
const struct intel_plane_state *pstate,
uint32_t mem_value)
{
- int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0;
+ /*
+ * We treat the cursor plane as always-on for the purposes of watermark
+ * calculation. Until we have two-stage watermark programming merged,
+ * this is necessary to avoid flickering.
+ */
+ int cpp = 4;
+ int width = pstate->visible ? pstate->base.crtc_w : 64;
- if (!cstate->base.active || !pstate->visible)
+ if (!cstate->base.active)
return 0;
return ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
cstate->base.adjusted_mode.crtc_htotal,
- drm_rect_width(&pstate->dst),
- bpp,
- mem_value);
+ width, cpp, mem_value);
}
/* Only for WM_LP. */
@@ -3880,6 +3884,8 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
+ memset(active, 0, sizeof(*active));
+
active->pipe_enabled = intel_crtc->active;
if (active->pipe_enabled) {
@@ -4520,7 +4526,8 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
else
gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
dev_priv->rps.last_adj = 0;
- I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
+ I915_WRITE(GEN6_PMINTRMSK,
+ gen6_sanitize_rps_pm_mask(dev_priv, ~0));
}
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -6620,6 +6627,12 @@ static void broadwell_init_clock_gating(struct drm_device *dev)
misccpctl = I915_READ(GEN7_MISCCPCTL);
I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
I915_WRITE(GEN8_L3SQCREG1, BDW_WA_L3SQCREG1_DEFAULT);
+ /*
+ * Wait at least 100 clocks before re-enabling clock gating. See
+ * the definition of L3SQCREG1 in BSpec.
+ */
+ POSTING_READ(GEN8_L3SQCREG1);
+ udelay(1);
I915_WRITE(GEN7_MISCCPCTL, misccpctl);
/*
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 9461a238f5d5..9d48443bca2e 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -347,6 +347,7 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req,
if (flush_domains) {
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
flags |= PIPE_CONTROL_FLUSH_ENABLE;
}
if (invalidate_domains) {
@@ -419,6 +420,7 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req,
if (flush_domains) {
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
flags |= PIPE_CONTROL_FLUSH_ENABLE;
}
if (invalidate_domains) {
@@ -1920,6 +1922,17 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
return 0;
}
+static void cleanup_phys_status_page(struct intel_engine_cs *ring)
+{
+ struct drm_i915_private *dev_priv = to_i915(ring->dev);
+
+ if (!dev_priv->status_page_dmah)
+ return;
+
+ drm_pci_free(ring->dev, dev_priv->status_page_dmah);
+ ring->status_page.page_addr = NULL;
+}
+
static void cleanup_status_page(struct intel_engine_cs *ring)
{
struct drm_i915_gem_object *obj;
@@ -1936,9 +1949,9 @@ static void cleanup_status_page(struct intel_engine_cs *ring)
static int init_status_page(struct intel_engine_cs *ring)
{
- struct drm_i915_gem_object *obj;
+ struct drm_i915_gem_object *obj = ring->status_page.obj;
- if ((obj = ring->status_page.obj) == NULL) {
+ if (obj == NULL) {
unsigned flags;
int ret;
@@ -2132,7 +2145,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
if (ret)
goto error;
} else {
- BUG_ON(ring->id != RCS);
+ WARN_ON(ring->id != RCS);
ret = init_phys_status_page(ring);
if (ret)
goto error;
@@ -2177,7 +2190,12 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
if (ring->cleanup)
ring->cleanup(ring);
- cleanup_status_page(ring);
+ if (I915_NEED_GFX_HWS(ring->dev)) {
+ cleanup_status_page(ring);
+ } else {
+ WARN_ON(ring->id != RCS);
+ cleanup_phys_status_page(ring);
+ }
i915_cmd_parser_fini_ring(ring);
i915_gem_batch_pool_fini(&ring->batch_pool);
@@ -2339,11 +2357,11 @@ static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes)
if (unlikely(total_bytes > remain_usable)) {
/*
* The base request will fit but the reserved space
- * falls off the end. So only need to to wait for the
- * reserved size after flushing out the remainder.
+ * falls off the end. So don't need an immediate wrap
+ * and only need to effectively wait for the reserved
+ * size space from the start of ringbuffer.
*/
wait_bytes = remain_actual + ringbuf->reserved_size;
- need_wrap = true;
} else if (total_bytes > ringbuf->space) {
/* No wrapping required, just waiting. */
wait_bytes = total_bytes;
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 56dc132e8e20..2cc6aa072f4c 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -195,7 +195,7 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
const struct drm_intel_sprite_colorkey *key =
&to_intel_plane_state(drm_plane->state)->ckey;
- unsigned long surf_addr;
+ u32 surf_addr;
u32 tile_height, plane_offset, plane_size;
unsigned int rotation;
int x_offset, y_offset;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 43cba129a0c0..cc91ae832ffb 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1132,7 +1132,11 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
dev_priv->uncore.funcs.force_wake_get =
fw_domains_get_with_thread_status;
- dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
+ if (IS_HASWELL(dev))
+ dev_priv->uncore.funcs.force_wake_put =
+ fw_domains_put_with_fifo;
+ else
+ dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
} else if (IS_IVYBRIDGE(dev)) {
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 7b990b4e96d2..5378bdc3bbf9 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -26,6 +26,7 @@
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_of.h>
+#include <video/imx-ipu-v3.h>
#include "imx-drm.h"
@@ -504,6 +505,13 @@ static int compare_of(struct device *dev, void *data)
{
struct device_node *np = data;
+ /* Special case for DI, dev->of_node may not be set yet */
+ if (strcmp(dev->driver->name, "imx-ipuv3-crtc") == 0) {
+ struct ipu_client_platformdata *pdata = dev->platform_data;
+
+ return pdata->of_node == np;
+ }
+
/* Special case for LDB, one device for two channels */
if (of_node_cmp(np->name, "lvds-channel") == 0) {
np = of_get_parent(np);
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 4ab841eebee1..9b0abd44b751 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -369,7 +369,7 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
ret = imx_drm_add_crtc(drm, &ipu_crtc->base, &ipu_crtc->imx_crtc,
&ipu_crtc->plane[0]->base, &ipu_crtc_helper_funcs,
- ipu_crtc->dev->of_node);
+ pdata->of_node);
if (ret) {
dev_err(ipu_crtc->dev, "adding crtc failed with %d.\n", ret);
goto err_put_resources;
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index c99d3fe12881..e5bb40e58020 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -194,7 +194,7 @@ static int mga_g200se_set_plls(struct mga_device *mdev, long clock)
}
}
- fvv = pllreffreq * testn / testm;
+ fvv = pllreffreq * (n + 1) / (m + 1);
fvv = (fvv - 800000) / 50000;
if (fvv > 15)
@@ -214,6 +214,14 @@ static int mga_g200se_set_plls(struct mga_device *mdev, long clock)
WREG_DAC(MGA1064_PIX_PLLC_M, m);
WREG_DAC(MGA1064_PIX_PLLC_N, n);
WREG_DAC(MGA1064_PIX_PLLC_P, p);
+
+ if (mdev->unique_rev_id >= 0x04) {
+ WREG_DAC(0x1a, 0x09);
+ msleep(20);
+ WREG_DAC(0x1a, 0x01);
+
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 6d7cd3fe21e7..1847f83b1e33 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -55,6 +55,14 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
return submit;
}
+static inline unsigned long __must_check
+copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
+{
+ if (access_ok(VERIFY_READ, from, n))
+ return __copy_from_user_inatomic(to, from, n);
+ return -EFAULT;
+}
+
static int submit_lookup_objects(struct msm_gem_submit *submit,
struct drm_msm_gem_submit *args, struct drm_file *file)
{
@@ -62,6 +70,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
int ret = 0;
spin_lock(&file->table_lock);
+ pagefault_disable();
for (i = 0; i < args->nr_bos; i++) {
struct drm_msm_gem_submit_bo submit_bo;
@@ -70,10 +79,15 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
void __user *userptr =
to_user_ptr(args->bos + (i * sizeof(submit_bo)));
- ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
- if (ret) {
- ret = -EFAULT;
- goto out_unlock;
+ ret = copy_from_user_inatomic(&submit_bo, userptr, sizeof(submit_bo));
+ if (unlikely(ret)) {
+ pagefault_enable();
+ spin_unlock(&file->table_lock);
+ ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
+ if (ret)
+ goto out;
+ spin_lock(&file->table_lock);
+ pagefault_disable();
}
if (submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) {
@@ -113,9 +127,12 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
}
out_unlock:
- submit->nr_bos = i;
+ pagefault_enable();
spin_unlock(&file->table_lock);
+out:
+ submit->nr_bos = i;
+
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 2e7cbe933533..2a5ed7460354 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -969,10 +969,13 @@ nouveau_connector_hotplug(struct nvif_notify *notify)
NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", name);
+ mutex_lock(&drm->dev->mode_config.mutex);
if (plugged)
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
else
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ mutex_unlock(&drm->dev->mode_config.mutex);
+
drm_helper_hpd_irq_event(connector->dev);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 64c8d932d5f1..58a3f7cf2fb3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -634,10 +634,6 @@ nouveau_display_resume(struct drm_device *dev, bool runtime)
nv_crtc->lut.depth = 0;
}
- /* Make sure that drm and hw vblank irqs get resumed if needed. */
- for (head = 0; head < dev->mode_config.num_crtc; head++)
- drm_vblank_on(dev, head);
-
/* This should ensure we don't hit a locking problem when someone
* wakes us up via a connector. We should never go into suspend
* while the display is on anyways.
@@ -647,6 +643,10 @@ nouveau_display_resume(struct drm_device *dev, bool runtime)
drm_helper_resume_force_mode(dev);
+ /* Make sure that drm and hw vblank irqs get resumed if needed. */
+ for (head = 0; head < dev->mode_config.num_crtc; head++)
+ drm_vblank_on(dev, head);
+
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 1d3ee5179ab8..d236fc7c425b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -308,7 +308,16 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
bool boot = false;
int ret;
- /* remove conflicting drivers (vesafb, efifb etc) */
+ /* We need to check that the chipset is supported before booting
+ * fbdev off the hardware, as there's no way to put it back.
+ */
+ ret = nvkm_device_pci_new(pdev, NULL, "error", true, false, 0, &device);
+ if (ret)
+ return ret;
+
+ nvkm_device_del(&device);
+
+ /* Remove conflicting drivers (vesafb, efifb etc). */
aper = alloc_apertures(3);
if (!aper)
return -ENOMEM;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 59f27e774acb..e40a1b07a014 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -557,6 +557,8 @@ nouveau_fbcon_init(struct drm_device *dev)
if (ret)
goto fini;
+ if (fbcon->helper.fbdev)
+ fbcon->helper.fbdev->pixmap.buf_align = 4;
return 0;
fini:
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
index 60e32c4e4e49..35ecc0d0458f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
@@ -24,7 +24,7 @@
static int nouveau_platform_probe(struct platform_device *pdev)
{
const struct nvkm_device_tegra_func *func;
- struct nvkm_device *device;
+ struct nvkm_device *device = NULL;
struct drm_device *drm;
int ret;
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 789dc2993b0d..f90568327468 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -82,7 +82,6 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
uint32_t fg;
uint32_t bg;
uint32_t dsize;
- uint32_t width;
uint32_t *data = (uint32_t *)image->data;
int ret;
@@ -93,9 +92,6 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
if (ret)
return ret;
- width = ALIGN(image->width, 8);
- dsize = ALIGN(width * image->height, 32) >> 5;
-
if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
fg = ((uint32_t *) info->pseudo_palette)[image->fg_color];
@@ -111,10 +107,11 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
((image->dx + image->width) & 0xffff));
OUT_RING(chan, bg);
OUT_RING(chan, fg);
- OUT_RING(chan, (image->height << 16) | width);
+ OUT_RING(chan, (image->height << 16) | ALIGN(image->width, 8));
OUT_RING(chan, (image->height << 16) | image->width);
OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
+ dsize = ALIGN(ALIGN(image->width, 8) * image->height, 32) >> 5;
while (dsize) {
int iter_len = dsize > 128 ? 128 : dsize;
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index e05499d6ed83..c8e096533f60 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -95,7 +95,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
struct nouveau_fbdev *nfbdev = info->par;
struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
struct nouveau_channel *chan = drm->channel;
- uint32_t width, dwords, *data = (uint32_t *)image->data;
+ uint32_t dwords, *data = (uint32_t *)image->data;
uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
uint32_t *palette = info->pseudo_palette;
int ret;
@@ -107,9 +107,6 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
if (ret)
return ret;
- width = ALIGN(image->width, 32);
- dwords = (width * image->height) >> 5;
-
BEGIN_NV04(chan, NvSub2D, 0x0814, 2);
if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
@@ -128,6 +125,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
OUT_RING(chan, 0);
OUT_RING(chan, image->dy);
+ dwords = ALIGN(ALIGN(image->width, 8) * image->height, 32) >> 5;
while (dwords) {
int push = dwords > 2047 ? 2047 : dwords;
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
index c97395b4a312..22d32578dafd 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -95,7 +95,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
struct nouveau_fbdev *nfbdev = info->par;
struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
struct nouveau_channel *chan = drm->channel;
- uint32_t width, dwords, *data = (uint32_t *)image->data;
+ uint32_t dwords, *data = (uint32_t *)image->data;
uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
uint32_t *palette = info->pseudo_palette;
int ret;
@@ -107,9 +107,6 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
if (ret)
return ret;
- width = ALIGN(image->width, 32);
- dwords = (width * image->height) >> 5;
-
BEGIN_NVC0(chan, NvSub2D, 0x0814, 2);
if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
@@ -128,6 +125,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
OUT_RING (chan, 0);
OUT_RING (chan, image->dy);
+ dwords = ALIGN(ALIGN(image->width, 8) * image->height, 32) >> 5;
while (dwords) {
int push = dwords > 2047 ? 2047 : dwords;
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/ramht.c b/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
index 3216e157a8a0..89da47234016 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
@@ -131,7 +131,7 @@ nvkm_ramht_del(struct nvkm_ramht **pramht)
struct nvkm_ramht *ramht = *pramht;
if (ramht) {
nvkm_gpuobj_del(&ramht->gpuobj);
- kfree(*pramht);
+ vfree(*pramht);
*pramht = NULL;
}
}
@@ -143,8 +143,8 @@ nvkm_ramht_new(struct nvkm_device *device, u32 size, u32 align,
struct nvkm_ramht *ramht;
int ret, i;
- if (!(ramht = *pramht = kzalloc(sizeof(*ramht) + (size >> 3) *
- sizeof(*ramht->data), GFP_KERNEL)))
+ if (!(ramht = *pramht = vzalloc(sizeof(*ramht) +
+ (size >> 3) * sizeof(*ramht->data))))
return -ENOMEM;
ramht->device = device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
index 7f8a42721eb2..e7e581d6a8ff 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -252,32 +252,40 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
if (!(tdev = kzalloc(sizeof(*tdev), GFP_KERNEL)))
return -ENOMEM;
- *pdevice = &tdev->device;
+
tdev->func = func;
tdev->pdev = pdev;
tdev->irq = -1;
tdev->vdd = devm_regulator_get(&pdev->dev, "vdd");
- if (IS_ERR(tdev->vdd))
- return PTR_ERR(tdev->vdd);
+ if (IS_ERR(tdev->vdd)) {
+ ret = PTR_ERR(tdev->vdd);
+ goto free;
+ }
tdev->rst = devm_reset_control_get(&pdev->dev, "gpu");
- if (IS_ERR(tdev->rst))
- return PTR_ERR(tdev->rst);
+ if (IS_ERR(tdev->rst)) {
+ ret = PTR_ERR(tdev->rst);
+ goto free;
+ }
tdev->clk = devm_clk_get(&pdev->dev, "gpu");
- if (IS_ERR(tdev->clk))
- return PTR_ERR(tdev->clk);
+ if (IS_ERR(tdev->clk)) {
+ ret = PTR_ERR(tdev->clk);
+ goto free;
+ }
tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr");
- if (IS_ERR(tdev->clk_pwr))
- return PTR_ERR(tdev->clk_pwr);
+ if (IS_ERR(tdev->clk_pwr)) {
+ ret = PTR_ERR(tdev->clk_pwr);
+ goto free;
+ }
nvkm_device_tegra_probe_iommu(tdev);
ret = nvkm_device_tegra_power_up(tdev);
if (ret)
- return ret;
+ goto remove;
tdev->gpu_speedo = tegra_sku_info.gpu_speedo_value;
ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev,
@@ -285,9 +293,19 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
cfg, dbg, detect, mmio, subdev_mask,
&tdev->device);
if (ret)
- return ret;
+ goto powerdown;
+
+ *pdevice = &tdev->device;
return 0;
+
+powerdown:
+ nvkm_device_tegra_power_down(tdev);
+remove:
+ nvkm_device_tegra_remove_iommu(tdev);
+free:
+ kfree(tdev);
+ return ret;
}
#else
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
index 74e2f7c6c07e..9688970eca47 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
@@ -328,6 +328,7 @@ nvkm_dp_train(struct work_struct *w)
.outp = outp,
}, *dp = &_dp;
u32 datarate = 0;
+ u8 pwr;
int ret;
if (!outp->base.info.location && disp->func->sor.magic)
@@ -355,6 +356,15 @@ nvkm_dp_train(struct work_struct *w)
/* disable link interrupt handling during link training */
nvkm_notify_put(&outp->irq);
+ /* ensure sink is not in a low-power state */
+ if (!nvkm_rdaux(outp->aux, DPCD_SC00, &pwr, 1)) {
+ if ((pwr & DPCD_SC00_SET_POWER) != DPCD_SC00_SET_POWER_D0) {
+ pwr &= ~DPCD_SC00_SET_POWER;
+ pwr |= DPCD_SC00_SET_POWER_D0;
+ nvkm_wraux(outp->aux, DPCD_SC00, &pwr, 1);
+ }
+ }
+
/* enable down-spreading and execute pre-train script from vbios */
dp_link_train_init(dp, outp->dpcd[3] & 0x01);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h
index 9596290329c7..6e10c5e0ef11 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h
@@ -71,5 +71,11 @@
#define DPCD_LS0C_LANE1_POST_CURSOR2 0x0c
#define DPCD_LS0C_LANE0_POST_CURSOR2 0x03
+/* DPCD Sink Control */
+#define DPCD_SC00 0x00600
+#define DPCD_SC00_SET_POWER 0x03
+#define DPCD_SC00_SET_POWER_D0 0x01
+#define DPCD_SC00_SET_POWER_D3 0x03
+
void nvkm_dp_train(struct work_struct *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
index b4b41b135643..2aaf0dd19a55 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
@@ -40,8 +40,8 @@ static int
gf119_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
{
struct nvkm_device *device = outp->base.disp->engine.subdev.device;
- const u32 loff = gf119_sor_loff(outp);
- nvkm_mask(device, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern);
+ const u32 soff = gf119_sor_soff(outp);
+ nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, 0x01010101 * pattern);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c
index bfcc6408a772..b7f4b826febe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c
@@ -36,7 +36,10 @@ nv04_fifo_dma_object_dtor(struct nvkm_fifo_chan *base, int cookie)
{
struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
struct nvkm_instmem *imem = chan->fifo->base.engine.subdev.device->imem;
+
+ mutex_lock(&chan->fifo->base.engine.subdev.mutex);
nvkm_ramht_remove(imem->ramht, cookie);
+ mutex_unlock(&chan->fifo->base.engine.subdev.mutex);
}
static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 9f5dfc85147a..eeeea1c2ca23 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -874,22 +874,41 @@ gf100_gr_trap_gpc_rop(struct gf100_gr *gr, int gpc)
}
static const struct nvkm_enum gf100_mp_warp_error[] = {
- { 0x00, "NO_ERROR" },
- { 0x01, "STACK_MISMATCH" },
+ { 0x01, "STACK_ERROR" },
+ { 0x02, "API_STACK_ERROR" },
+ { 0x03, "RET_EMPTY_STACK_ERROR" },
+ { 0x04, "PC_WRAP" },
{ 0x05, "MISALIGNED_PC" },
- { 0x08, "MISALIGNED_GPR" },
- { 0x09, "INVALID_OPCODE" },
- { 0x0d, "GPR_OUT_OF_BOUNDS" },
- { 0x0e, "MEM_OUT_OF_BOUNDS" },
- { 0x0f, "UNALIGNED_MEM_ACCESS" },
+ { 0x06, "PC_OVERFLOW" },
+ { 0x07, "MISALIGNED_IMMC_ADDR" },
+ { 0x08, "MISALIGNED_REG" },
+ { 0x09, "ILLEGAL_INSTR_ENCODING" },
+ { 0x0a, "ILLEGAL_SPH_INSTR_COMBO" },
+ { 0x0b, "ILLEGAL_INSTR_PARAM" },
+ { 0x0c, "INVALID_CONST_ADDR" },
+ { 0x0d, "OOR_REG" },
+ { 0x0e, "OOR_ADDR" },
+ { 0x0f, "MISALIGNED_ADDR" },
{ 0x10, "INVALID_ADDR_SPACE" },
- { 0x11, "INVALID_PARAM" },
+ { 0x11, "ILLEGAL_INSTR_PARAM2" },
+ { 0x12, "INVALID_CONST_ADDR_LDC" },
+ { 0x13, "GEOMETRY_SM_ERROR" },
+ { 0x14, "DIVERGENT" },
+ { 0x15, "WARP_EXIT" },
{}
};
static const struct nvkm_bitfield gf100_mp_global_error[] = {
+ { 0x00000001, "SM_TO_SM_FAULT" },
+ { 0x00000002, "L1_ERROR" },
{ 0x00000004, "MULTIPLE_WARP_ERRORS" },
- { 0x00000008, "OUT_OF_STACK_SPACE" },
+ { 0x00000008, "PHYSICAL_STACK_OVERFLOW" },
+ { 0x00000010, "BPT_INT" },
+ { 0x00000020, "BPT_PAUSE" },
+ { 0x00000040, "SINGLE_STEP_COMPLETE" },
+ { 0x20000000, "ECC_SEC_ERROR" },
+ { 0x40000000, "ECC_DED_ERROR" },
+ { 0x80000000, "TIMEOUT" },
{}
};
@@ -1717,6 +1736,8 @@ gf100_gr_init(struct gf100_gr *gr)
gf100_gr_mmio(gr, gr->func->mmio);
+ nvkm_mask(device, TPC_UNIT(0, 0, 0x05c), 0x00000001, 0x00000001);
+
memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
do {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
index 69de8c6259fe..f1e15a4d4f64 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
@@ -76,8 +76,8 @@ nv30_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
nvkm_wo32(chan->inst, i, 0x00040004);
for (i = 0x1f18; i <= 0x3088 ; i += 16) {
nvkm_wo32(chan->inst, i + 0, 0x10700ff9);
- nvkm_wo32(chan->inst, i + 1, 0x0436086c);
- nvkm_wo32(chan->inst, i + 2, 0x000c001b);
+ nvkm_wo32(chan->inst, i + 4, 0x0436086c);
+ nvkm_wo32(chan->inst, i + 8, 0x000c001b);
}
for (i = 0x30b8; i < 0x30c8; i += 4)
nvkm_wo32(chan->inst, i, 0x0000ffff);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
index 2207dac23981..300f5ed5de0b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
@@ -75,8 +75,8 @@ nv34_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
nvkm_wo32(chan->inst, i, 0x00040004);
for (i = 0x15ac; i <= 0x271c ; i += 16) {
nvkm_wo32(chan->inst, i + 0, 0x10700ff9);
- nvkm_wo32(chan->inst, i + 1, 0x0436086c);
- nvkm_wo32(chan->inst, i + 2, 0x000c001b);
+ nvkm_wo32(chan->inst, i + 4, 0x0436086c);
+ nvkm_wo32(chan->inst, i + 8, 0x000c001b);
}
for (i = 0x274c; i < 0x275c; i += 4)
nvkm_wo32(chan->inst, i, 0x0000ffff);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c
index 4bef72a9d106..3fda594700e0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c
@@ -59,9 +59,11 @@ static void
nv40_perfctr_next(struct nvkm_pm *pm, struct nvkm_perfdom *dom)
{
struct nvkm_device *device = pm->engine.subdev.device;
- if (pm->sequence != pm->sequence) {
+ struct nv40_pm *nv40pm = container_of(pm, struct nv40_pm, base);
+
+ if (nv40pm->sequence != pm->sequence) {
nvkm_wr32(device, 0x400084, 0x00000020);
- pm->sequence = pm->sequence;
+ nv40pm->sequence = pm->sequence;
}
}
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 183aea1abebc..5edebf495c07 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -375,10 +375,15 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
qxl_bo_kunmap(user_bo);
+ qcrtc->cur_x += qcrtc->hot_spot_x - hot_x;
+ qcrtc->cur_y += qcrtc->hot_spot_y - hot_y;
+ qcrtc->hot_spot_x = hot_x;
+ qcrtc->hot_spot_y = hot_y;
+
cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
cmd->type = QXL_CURSOR_SET;
- cmd->u.set.position.x = qcrtc->cur_x;
- cmd->u.set.position.y = qcrtc->cur_y;
+ cmd->u.set.position.x = qcrtc->cur_x + qcrtc->hot_spot_x;
+ cmd->u.set.position.y = qcrtc->cur_y + qcrtc->hot_spot_y;
cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0);
@@ -441,8 +446,8 @@ static int qxl_crtc_cursor_move(struct drm_crtc *crtc,
cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
cmd->type = QXL_CURSOR_MOVE;
- cmd->u.position.x = qcrtc->cur_x;
- cmd->u.position.y = qcrtc->cur_y;
+ cmd->u.position.x = qcrtc->cur_x + qcrtc->hot_spot_x;
+ cmd->u.position.y = qcrtc->cur_y + qcrtc->hot_spot_y;
qxl_release_unmap(qdev, release, &cmd->release_info);
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c
index 56e1d633875e..6e6c76080d6a 100644
--- a/drivers/gpu/drm/qxl/qxl_draw.c
+++ b/drivers/gpu/drm/qxl/qxl_draw.c
@@ -136,6 +136,8 @@ static int qxl_palette_create_1bit(struct qxl_bo *palette_bo,
* correctly globaly, since that would require
* tracking all of our palettes. */
ret = qxl_bo_kmap(palette_bo, (void **)&pal);
+ if (ret)
+ return ret;
pal->num_ents = 2;
pal->unique = unique++;
if (visual == FB_VISUAL_TRUECOLOR || visual == FB_VISUAL_DIRECTCOLOR) {
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 01a86948eb8c..3ab90179e9ab 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -135,6 +135,8 @@ struct qxl_crtc {
int index;
int cur_x;
int cur_y;
+ int hot_spot_x;
+ int hot_spot_y;
};
struct qxl_output {
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index 2ae8577497ca..7c2e78201ead 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -168,7 +168,8 @@ static int qxl_process_single_command(struct qxl_device *qdev,
cmd->command_size))
return -EFAULT;
- reloc_info = kmalloc(sizeof(struct qxl_reloc_info) * cmd->relocs_num, GFP_KERNEL);
+ reloc_info = kmalloc_array(cmd->relocs_num,
+ sizeof(struct qxl_reloc_info), GFP_KERNEL);
if (!reloc_info)
return -ENOMEM;
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index dac78ad24b31..79bab6fd76bb 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1739,6 +1739,7 @@ static u32 radeon_get_pll_use_mask(struct drm_crtc *crtc)
static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
struct drm_crtc *test_crtc;
struct radeon_crtc *test_radeon_crtc;
@@ -1748,6 +1749,10 @@ static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc)
test_radeon_crtc = to_radeon_crtc(test_crtc);
if (test_radeon_crtc->encoder &&
ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
+ /* PPLL2 is exclusive to UNIPHYA on DCE61 */
+ if (ASIC_IS_DCE61(rdev) && !ASIC_IS_DCE8(rdev) &&
+ test_radeon_crtc->pll_id == ATOM_PPLL2)
+ continue;
/* for DP use the same PLL for all */
if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)
return test_radeon_crtc->pll_id;
@@ -1769,6 +1774,7 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
struct drm_crtc *test_crtc;
struct radeon_crtc *test_radeon_crtc;
u32 adjusted_clock, test_adjusted_clock;
@@ -1784,6 +1790,10 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
test_radeon_crtc = to_radeon_crtc(test_crtc);
if (test_radeon_crtc->encoder &&
!ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
+ /* PPLL2 is exclusive to UNIPHYA on DCE61 */
+ if (ASIC_IS_DCE61(rdev) && !ASIC_IS_DCE8(rdev) &&
+ test_radeon_crtc->pll_id == ATOM_PPLL2)
+ continue;
/* check if we are already driving this connector with another crtc */
if (test_radeon_crtc->connector == radeon_crtc->connector) {
/* if we are, return that pll */
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index bd73b4069069..44ee72e04df9 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -302,77 +302,31 @@ static int convert_bpc_to_bpp(int bpc)
return bpc * 3;
}
-/* get the max pix clock supported by the link rate and lane num */
-static int dp_get_max_dp_pix_clock(int link_rate,
- int lane_num,
- int bpp)
-{
- return (link_rate * lane_num * 8) / bpp;
-}
-
/***** radeon specific DP functions *****/
-int radeon_dp_get_max_link_rate(struct drm_connector *connector,
- const u8 dpcd[DP_DPCD_SIZE])
-{
- int max_link_rate;
-
- if (radeon_connector_is_dp12_capable(connector))
- max_link_rate = min(drm_dp_max_link_rate(dpcd), 540000);
- else
- max_link_rate = min(drm_dp_max_link_rate(dpcd), 270000);
-
- return max_link_rate;
-}
-
-/* First get the min lane# when low rate is used according to pixel clock
- * (prefer low rate), second check max lane# supported by DP panel,
- * if the max lane# < low rate lane# then use max lane# instead.
- */
-static int radeon_dp_get_dp_lane_number(struct drm_connector *connector,
- const u8 dpcd[DP_DPCD_SIZE],
- int pix_clock)
-{
- int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
- int max_link_rate = radeon_dp_get_max_link_rate(connector, dpcd);
- int max_lane_num = drm_dp_max_lane_count(dpcd);
- int lane_num;
- int max_dp_pix_clock;
-
- for (lane_num = 1; lane_num < max_lane_num; lane_num <<= 1) {
- max_dp_pix_clock = dp_get_max_dp_pix_clock(max_link_rate, lane_num, bpp);
- if (pix_clock <= max_dp_pix_clock)
- break;
- }
-
- return lane_num;
-}
-
-static int radeon_dp_get_dp_link_clock(struct drm_connector *connector,
- const u8 dpcd[DP_DPCD_SIZE],
- int pix_clock)
+int radeon_dp_get_dp_link_config(struct drm_connector *connector,
+ const u8 dpcd[DP_DPCD_SIZE],
+ unsigned pix_clock,
+ unsigned *dp_lanes, unsigned *dp_rate)
{
int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
- int lane_num, max_pix_clock;
-
- if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
- ENCODER_OBJECT_ID_NUTMEG)
- return 270000;
-
- lane_num = radeon_dp_get_dp_lane_number(connector, dpcd, pix_clock);
- max_pix_clock = dp_get_max_dp_pix_clock(162000, lane_num, bpp);
- if (pix_clock <= max_pix_clock)
- return 162000;
- max_pix_clock = dp_get_max_dp_pix_clock(270000, lane_num, bpp);
- if (pix_clock <= max_pix_clock)
- return 270000;
- if (radeon_connector_is_dp12_capable(connector)) {
- max_pix_clock = dp_get_max_dp_pix_clock(540000, lane_num, bpp);
- if (pix_clock <= max_pix_clock)
- return 540000;
+ static const unsigned link_rates[3] = { 162000, 270000, 540000 };
+ unsigned max_link_rate = drm_dp_max_link_rate(dpcd);
+ unsigned max_lane_num = drm_dp_max_lane_count(dpcd);
+ unsigned lane_num, i, max_pix_clock;
+
+ for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
+ for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
+ max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
+ if (max_pix_clock >= pix_clock) {
+ *dp_lanes = lane_num;
+ *dp_rate = link_rates[i];
+ return 0;
+ }
+ }
}
- return radeon_dp_get_max_link_rate(connector, dpcd);
+ return -EINVAL;
}
static u8 radeon_dp_encoder_service(struct radeon_device *rdev,
@@ -491,6 +445,7 @@ void radeon_dp_set_link_config(struct drm_connector *connector,
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *dig_connector;
+ int ret;
if (!radeon_connector->con_priv)
return;
@@ -498,10 +453,14 @@ void radeon_dp_set_link_config(struct drm_connector *connector,
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
- dig_connector->dp_clock =
- radeon_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
- dig_connector->dp_lane_count =
- radeon_dp_get_dp_lane_number(connector, dig_connector->dpcd, mode->clock);
+ ret = radeon_dp_get_dp_link_config(connector, dig_connector->dpcd,
+ mode->clock,
+ &dig_connector->dp_lane_count,
+ &dig_connector->dp_clock);
+ if (ret) {
+ dig_connector->dp_clock = 0;
+ dig_connector->dp_lane_count = 0;
+ }
}
}
@@ -510,7 +469,8 @@ int radeon_dp_mode_valid_helper(struct drm_connector *connector,
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *dig_connector;
- int dp_clock;
+ unsigned dp_clock, dp_lanes;
+ int ret;
if ((mode->clock > 340000) &&
(!radeon_connector_is_dp12_capable(connector)))
@@ -520,8 +480,12 @@ int radeon_dp_mode_valid_helper(struct drm_connector *connector,
return MODE_CLOCK_HIGH;
dig_connector = radeon_connector->con_priv;
- dp_clock =
- radeon_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
+ ret = radeon_dp_get_dp_link_config(connector, dig_connector->dpcd,
+ mode->clock,
+ &dp_lanes,
+ &dp_clock);
+ if (ret)
+ return MODE_CLOCK_HIGH;
if ((dp_clock == 540000) &&
(!radeon_connector_is_dp12_capable(connector)))
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index bb292143997e..d4ac8c837314 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -120,6 +120,7 @@ atombios_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level)
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
if (dig->backlight_level == 0)
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
else {
@@ -310,6 +311,10 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
&& (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
+ /* vertical FP must be at least 1 */
+ if (mode->crtc_vsync_start == mode->crtc_vdisplay)
+ adjusted_mode->crtc_vsync_start++;
+
/* get the native mode for scaling */
if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) {
radeon_panel_mode_fixup(encoder, adjusted_mode);
@@ -892,8 +897,6 @@ atombios_dig_encoder_setup2(struct drm_encoder *encoder, int action, int panel_m
else
args.v1.ucLaneNum = 4;
- if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000))
- args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1;
@@ -910,6 +913,10 @@ atombios_dig_encoder_setup2(struct drm_encoder *encoder, int action, int panel_m
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
else
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
+
+ if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000))
+ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
+
break;
case 2:
case 3:
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 752072771388..367a916f364e 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -301,6 +301,14 @@ void dce6_dp_audio_set_dto(struct radeon_device *rdev,
* is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
*/
if (ASIC_IS_DCE8(rdev)) {
+ unsigned int div = (RREG32(DENTIST_DISPCLK_CNTL) &
+ DENTIST_DPREFCLK_WDIVIDER_MASK) >>
+ DENTIST_DPREFCLK_WDIVIDER_SHIFT;
+ div = radeon_audio_decode_dfs_div(div);
+
+ if (div)
+ clock = clock * 100 / div;
+
WREG32(DCE8_DCCG_AUDIO_DTO1_PHASE, 24000);
WREG32(DCE8_DCCG_AUDIO_DTO1_MODULE, clock);
} else {
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 2ad462896896..32491355a1d4 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2608,10 +2608,152 @@ static void evergreen_agp_enable(struct radeon_device *rdev)
WREG32(VM_CONTEXT1_CNTL, 0);
}
+static const unsigned ni_dig_offsets[] =
+{
+ NI_DIG0_REGISTER_OFFSET,
+ NI_DIG1_REGISTER_OFFSET,
+ NI_DIG2_REGISTER_OFFSET,
+ NI_DIG3_REGISTER_OFFSET,
+ NI_DIG4_REGISTER_OFFSET,
+ NI_DIG5_REGISTER_OFFSET
+};
+
+static const unsigned ni_tx_offsets[] =
+{
+ NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1,
+ NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1,
+ NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1,
+ NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1,
+ NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1,
+ NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1
+};
+
+static const unsigned evergreen_dp_offsets[] =
+{
+ EVERGREEN_DP0_REGISTER_OFFSET,
+ EVERGREEN_DP1_REGISTER_OFFSET,
+ EVERGREEN_DP2_REGISTER_OFFSET,
+ EVERGREEN_DP3_REGISTER_OFFSET,
+ EVERGREEN_DP4_REGISTER_OFFSET,
+ EVERGREEN_DP5_REGISTER_OFFSET
+};
+
+
+/*
+ * Assumption is that EVERGREEN_CRTC_MASTER_EN enable for requested crtc
+ * We go from crtc to connector and it is not relible since it
+ * should be an opposite direction .If crtc is enable then
+ * find the dig_fe which selects this crtc and insure that it enable.
+ * if such dig_fe is found then find dig_be which selects found dig_be and
+ * insure that it enable and in DP_SST mode.
+ * if UNIPHY_PLL_CONTROL1.enable then we should disconnect timing
+ * from dp symbols clocks .
+ */
+static bool evergreen_is_dp_sst_stream_enabled(struct radeon_device *rdev,
+ unsigned crtc_id, unsigned *ret_dig_fe)
+{
+ unsigned i;
+ unsigned dig_fe;
+ unsigned dig_be;
+ unsigned dig_en_be;
+ unsigned uniphy_pll;
+ unsigned digs_fe_selected;
+ unsigned dig_be_mode;
+ unsigned dig_fe_mask;
+ bool is_enabled = false;
+ bool found_crtc = false;
+
+ /* loop through all running dig_fe to find selected crtc */
+ for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
+ dig_fe = RREG32(NI_DIG_FE_CNTL + ni_dig_offsets[i]);
+ if (dig_fe & NI_DIG_FE_CNTL_SYMCLK_FE_ON &&
+ crtc_id == NI_DIG_FE_CNTL_SOURCE_SELECT(dig_fe)) {
+ /* found running pipe */
+ found_crtc = true;
+ dig_fe_mask = 1 << i;
+ dig_fe = i;
+ break;
+ }
+ }
+
+ if (found_crtc) {
+ /* loop through all running dig_be to find selected dig_fe */
+ for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
+ dig_be = RREG32(NI_DIG_BE_CNTL + ni_dig_offsets[i]);
+ /* if dig_fe_selected by dig_be? */
+ digs_fe_selected = NI_DIG_BE_CNTL_FE_SOURCE_SELECT(dig_be);
+ dig_be_mode = NI_DIG_FE_CNTL_MODE(dig_be);
+ if (dig_fe_mask & digs_fe_selected &&
+ /* if dig_be in sst mode? */
+ dig_be_mode == NI_DIG_BE_DPSST) {
+ dig_en_be = RREG32(NI_DIG_BE_EN_CNTL +
+ ni_dig_offsets[i]);
+ uniphy_pll = RREG32(NI_DCIO_UNIPHY0_PLL_CONTROL1 +
+ ni_tx_offsets[i]);
+ /* dig_be enable and tx is running */
+ if (dig_en_be & NI_DIG_BE_EN_CNTL_ENABLE &&
+ dig_en_be & NI_DIG_BE_EN_CNTL_SYMBCLK_ON &&
+ uniphy_pll & NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE) {
+ is_enabled = true;
+ *ret_dig_fe = dig_fe;
+ break;
+ }
+ }
+ }
+ }
+
+ return is_enabled;
+}
+
+/*
+ * Blank dig when in dp sst mode
+ * Dig ignores crtc timing
+ */
+static void evergreen_blank_dp_output(struct radeon_device *rdev,
+ unsigned dig_fe)
+{
+ unsigned stream_ctrl;
+ unsigned fifo_ctrl;
+ unsigned counter = 0;
+
+ if (dig_fe >= ARRAY_SIZE(evergreen_dp_offsets)) {
+ DRM_ERROR("invalid dig_fe %d\n", dig_fe);
+ return;
+ }
+
+ stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
+ evergreen_dp_offsets[dig_fe]);
+ if (!(stream_ctrl & EVERGREEN_DP_VID_STREAM_CNTL_ENABLE)) {
+ DRM_ERROR("dig %d , should be enable\n", dig_fe);
+ return;
+ }
+
+ stream_ctrl &=~EVERGREEN_DP_VID_STREAM_CNTL_ENABLE;
+ WREG32(EVERGREEN_DP_VID_STREAM_CNTL +
+ evergreen_dp_offsets[dig_fe], stream_ctrl);
+
+ stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
+ evergreen_dp_offsets[dig_fe]);
+ while (counter < 32 && stream_ctrl & EVERGREEN_DP_VID_STREAM_STATUS) {
+ msleep(1);
+ counter++;
+ stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
+ evergreen_dp_offsets[dig_fe]);
+ }
+ if (counter >= 32 )
+ DRM_ERROR("counter exceeds %d\n", counter);
+
+ fifo_ctrl = RREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe]);
+ fifo_ctrl |= EVERGREEN_DP_STEER_FIFO_RESET;
+ WREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe], fifo_ctrl);
+
+}
+
void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
{
u32 crtc_enabled, tmp, frame_count, blackout;
int i, j;
+ unsigned dig_fe;
if (!ASIC_IS_NODCE(rdev)) {
save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
@@ -2651,7 +2793,17 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
break;
udelay(1);
}
-
+ /*we should disable dig if it drives dp sst*/
+ /*but we are in radeon_device_init and the topology is unknown*/
+ /*and it is available after radeon_modeset_init*/
+ /*the following method radeon_atom_encoder_dpms_dig*/
+ /*does the job if we initialize it properly*/
+ /*for now we do it this manually*/
+ /**/
+ if (ASIC_IS_DCE5(rdev) &&
+ evergreen_is_dp_sst_stream_enabled(rdev, i ,&dig_fe))
+ evergreen_blank_dp_output(rdev, dig_fe);
+ /*we could remove 6 lines below*/
/* XXX this is a hack to avoid strange behavior with EFI on certain systems */
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index 9953356fe263..3cf04a2f44bb 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -289,6 +289,16 @@ void dce4_dp_audio_set_dto(struct radeon_device *rdev,
* number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
* is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
*/
+ if (ASIC_IS_DCE41(rdev)) {
+ unsigned int div = (RREG32(DCE41_DENTIST_DISPCLK_CNTL) &
+ DENTIST_DPREFCLK_WDIVIDER_MASK) >>
+ DENTIST_DPREFCLK_WDIVIDER_SHIFT;
+ div = radeon_audio_decode_dfs_div(div);
+
+ if (div)
+ clock = 100 * clock / div;
+ }
+
WREG32(DCCG_AUDIO_DTO1_PHASE, 24000);
WREG32(DCCG_AUDIO_DTO1_MODULE, clock);
}
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index aa939dfed3a3..b436badf9efa 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -250,8 +250,43 @@
/* HDMI blocks at 0x7030, 0x7c30, 0x10830, 0x11430, 0x12030, 0x12c30 */
#define EVERGREEN_HDMI_BASE 0x7030
+/*DIG block*/
+#define NI_DIG0_REGISTER_OFFSET (0x7000 - 0x7000)
+#define NI_DIG1_REGISTER_OFFSET (0x7C00 - 0x7000)
+#define NI_DIG2_REGISTER_OFFSET (0x10800 - 0x7000)
+#define NI_DIG3_REGISTER_OFFSET (0x11400 - 0x7000)
+#define NI_DIG4_REGISTER_OFFSET (0x12000 - 0x7000)
+#define NI_DIG5_REGISTER_OFFSET (0x12C00 - 0x7000)
+
+
+#define NI_DIG_FE_CNTL 0x7000
+# define NI_DIG_FE_CNTL_SOURCE_SELECT(x) ((x) & 0x3)
+# define NI_DIG_FE_CNTL_SYMCLK_FE_ON (1<<24)
+
+
+#define NI_DIG_BE_CNTL 0x7140
+# define NI_DIG_BE_CNTL_FE_SOURCE_SELECT(x) (((x) >> 8 ) & 0x3F)
+# define NI_DIG_FE_CNTL_MODE(x) (((x) >> 16) & 0x7 )
+
+#define NI_DIG_BE_EN_CNTL 0x7144
+# define NI_DIG_BE_EN_CNTL_ENABLE (1 << 0)
+# define NI_DIG_BE_EN_CNTL_SYMBCLK_ON (1 << 8)
+# define NI_DIG_BE_DPSST 0
/* Display Port block */
+#define EVERGREEN_DP0_REGISTER_OFFSET (0x730C - 0x730C)
+#define EVERGREEN_DP1_REGISTER_OFFSET (0x7F0C - 0x730C)
+#define EVERGREEN_DP2_REGISTER_OFFSET (0x10B0C - 0x730C)
+#define EVERGREEN_DP3_REGISTER_OFFSET (0x1170C - 0x730C)
+#define EVERGREEN_DP4_REGISTER_OFFSET (0x1230C - 0x730C)
+#define EVERGREEN_DP5_REGISTER_OFFSET (0x12F0C - 0x730C)
+
+
+#define EVERGREEN_DP_VID_STREAM_CNTL 0x730C
+# define EVERGREEN_DP_VID_STREAM_CNTL_ENABLE (1 << 0)
+# define EVERGREEN_DP_VID_STREAM_STATUS (1 <<16)
+#define EVERGREEN_DP_STEER_FIFO 0x7310
+# define EVERGREEN_DP_STEER_FIFO_RESET (1 << 0)
#define EVERGREEN_DP_SEC_CNTL 0x7280
# define EVERGREEN_DP_SEC_STREAM_ENABLE (1 << 0)
# define EVERGREEN_DP_SEC_ASP_ENABLE (1 << 4)
@@ -266,4 +301,15 @@
# define EVERGREEN_DP_SEC_N_BASE_MULTIPLE(x) (((x) & 0xf) << 24)
# define EVERGREEN_DP_SEC_SS_EN (1 << 28)
+/*DCIO_UNIPHY block*/
+#define NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1 (0x6600 -0x6600)
+#define NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1 (0x6640 -0x6600)
+#define NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1 (0x6680 - 0x6600)
+#define NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1 (0x66C0 - 0x6600)
+#define NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1 (0x6700 - 0x6600)
+#define NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1 (0x6740 - 0x6600)
+
+#define NI_DCIO_UNIPHY0_PLL_CONTROL1 0x6618
+# define NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE (1 << 0)
+
#endif
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 4aa5f755572b..13b6029d65cc 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -511,6 +511,11 @@
#define DCCG_AUDIO_DTO1_CNTL 0x05cc
# define DCCG_AUDIO_DTO1_USE_512FBR_DTO (1 << 3)
+#define DCE41_DENTIST_DISPCLK_CNTL 0x049c
+# define DENTIST_DPREFCLK_WDIVIDER(x) (((x) & 0x7f) << 24)
+# define DENTIST_DPREFCLK_WDIVIDER_MASK (0x7f << 24)
+# define DENTIST_DPREFCLK_WDIVIDER_SHIFT 24
+
/* DCE 4.0 AFMT */
#define HDMI_CONTROL 0x7030
# define HDMI_KEEPOUT_MODE (1 << 0)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 87db64983ea8..5580568088bb 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -268,6 +268,7 @@ struct radeon_clock {
uint32_t current_dispclk;
uint32_t dp_extclk;
uint32_t max_pixel_clock;
+ uint32_t vco_freq;
};
/*
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 8f285244c839..0c5b3eeff82d 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -437,7 +437,9 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
}
/* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */
- if (((dev->pdev->device == 0x9802) || (dev->pdev->device == 0x9806)) &&
+ if (((dev->pdev->device == 0x9802) ||
+ (dev->pdev->device == 0x9805) ||
+ (dev->pdev->device == 0x9806)) &&
(dev->pdev->subsystem_vendor == 0x1734) &&
(dev->pdev->subsystem_device == 0x11bd)) {
if (*connector_type == DRM_MODE_CONNECTOR_VGA) {
@@ -448,14 +450,6 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
}
}
- /* Fujitsu D3003-S2 board lists DVI-I as DVI-I and VGA */
- if ((dev->pdev->device == 0x9805) &&
- (dev->pdev->subsystem_vendor == 0x1734) &&
- (dev->pdev->subsystem_device == 0x11bd)) {
- if (*connector_type == DRM_MODE_CONNECTOR_VGA)
- return false;
- }
-
return true;
}
@@ -1112,6 +1106,31 @@ union firmware_info {
ATOM_FIRMWARE_INFO_V2_2 info_22;
};
+union igp_info {
+ struct _ATOM_INTEGRATED_SYSTEM_INFO info;
+ struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
+ struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
+ struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
+ struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
+};
+
+static void radeon_atombios_get_dentist_vco_freq(struct radeon_device *rdev)
+{
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
+ union igp_info *igp_info;
+ u8 frev, crev;
+ u16 data_offset;
+
+ if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
+ igp_info = (union igp_info *)(mode_info->atom_context->bios +
+ data_offset);
+ rdev->clock.vco_freq =
+ le32_to_cpu(igp_info->info_6.ulDentistVCOFreq);
+ }
+}
+
bool radeon_atom_get_clock_info(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
@@ -1136,7 +1155,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
le16_to_cpu(firmware_info->info.usReferenceClock);
p1pll->reference_div = 0;
- if (crev < 2)
+ if ((frev < 2) && (crev < 2))
p1pll->pll_out_min =
le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output);
else
@@ -1145,7 +1164,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
p1pll->pll_out_max =
le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output);
- if (crev >= 4) {
+ if (((frev < 2) && (crev >= 4)) || (frev >= 2)) {
p1pll->lcd_pll_out_min =
le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100;
if (p1pll->lcd_pll_out_min == 0)
@@ -1263,20 +1282,25 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
rdev->mode_info.firmware_flags =
le16_to_cpu(firmware_info->info.usFirmwareCapability.susAccess);
+ if (ASIC_IS_DCE8(rdev))
+ rdev->clock.vco_freq =
+ le32_to_cpu(firmware_info->info_22.ulGPUPLL_OutputFreq);
+ else if (ASIC_IS_DCE5(rdev))
+ rdev->clock.vco_freq = rdev->clock.current_dispclk;
+ else if (ASIC_IS_DCE41(rdev))
+ radeon_atombios_get_dentist_vco_freq(rdev);
+ else
+ rdev->clock.vco_freq = rdev->clock.current_dispclk;
+
+ if (rdev->clock.vco_freq == 0)
+ rdev->clock.vco_freq = 360000; /* 3.6 GHz */
+
return true;
}
return false;
}
-union igp_info {
- struct _ATOM_INTEGRATED_SYSTEM_INFO info;
- struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
- struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
- struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
- struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
-};
-
bool radeon_atombios_sideport_present(struct radeon_device *rdev)
{
struct radeon_mode_info *mode_info = &rdev->mode_info;
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index c4b4f298a283..69ce95571136 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -10,6 +10,7 @@
#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/pci.h>
+#include <linux/delay.h>
#include "radeon_acpi.h"
@@ -255,6 +256,10 @@ static int radeon_atpx_set_discrete_state(struct radeon_atpx *atpx, u8 state)
if (!info)
return -EIO;
kfree(info);
+
+ /* 200ms delay is required after off */
+ if (state == 0)
+ msleep(200);
}
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
index 2c02e99b5f95..b214663b370d 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.c
+++ b/drivers/gpu/drm/radeon/radeon_audio.c
@@ -739,9 +739,6 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- struct radeon_connector_atom_dig *dig_connector =
- radeon_connector->con_priv;
if (!dig || !dig->afmt)
return;
@@ -753,10 +750,7 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
radeon_audio_write_speaker_allocation(encoder);
radeon_audio_write_sad_regs(encoder);
radeon_audio_write_latency_fields(encoder, mode);
- if (rdev->clock.dp_extclk || ASIC_IS_DCE5(rdev))
- radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10);
- else
- radeon_audio_set_dto(encoder, dig_connector->dp_clock);
+ radeon_audio_set_dto(encoder, rdev->clock.vco_freq * 10);
radeon_audio_set_audio_packet(encoder);
radeon_audio_select_pin(encoder);
@@ -781,3 +775,15 @@ void radeon_audio_dpms(struct drm_encoder *encoder, int mode)
if (radeon_encoder->audio && radeon_encoder->audio->dpms)
radeon_encoder->audio->dpms(encoder, mode == DRM_MODE_DPMS_ON);
}
+
+unsigned int radeon_audio_decode_dfs_div(unsigned int div)
+{
+ if (div >= 8 && div < 64)
+ return (div - 8) * 25 + 200;
+ else if (div >= 64 && div < 96)
+ return (div - 64) * 50 + 1600;
+ else if (div >= 96 && div < 128)
+ return (div - 96) * 100 + 3200;
+ else
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_audio.h b/drivers/gpu/drm/radeon/radeon_audio.h
index 059cc3012062..5c70cceaa4a6 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.h
+++ b/drivers/gpu/drm/radeon/radeon_audio.h
@@ -79,5 +79,6 @@ void radeon_audio_fini(struct radeon_device *rdev);
void radeon_audio_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode);
void radeon_audio_dpms(struct drm_encoder *encoder, int mode);
+unsigned int radeon_audio_decode_dfs_div(unsigned int div);
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 340f3f549f29..30f00748ed37 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -1996,10 +1996,12 @@ radeon_add_atom_connector(struct drm_device *dev,
rdev->mode_info.dither_property,
RADEON_FMT_DITHER_DISABLE);
- if (radeon_audio != 0)
+ if (radeon_audio != 0) {
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.audio_property,
RADEON_AUDIO_AUTO);
+ radeon_connector->audio = RADEON_AUDIO_AUTO;
+ }
if (ASIC_IS_DCE5(rdev))
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.output_csc_property,
@@ -2056,7 +2058,6 @@ radeon_add_atom_connector(struct drm_device *dev,
RADEON_OUTPUT_CSC_BYPASS);
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
connector->interlace_allowed = true;
connector->doublescan_allowed = true;
break;
@@ -2124,6 +2125,7 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.audio_property,
RADEON_AUDIO_AUTO);
+ radeon_connector->audio = RADEON_AUDIO_AUTO;
}
if (connector_type == DRM_MODE_CONNECTOR_DVII) {
radeon_connector->dac_load_detect = true;
@@ -2179,6 +2181,7 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.audio_property,
RADEON_AUDIO_AUTO);
+ radeon_connector->audio = RADEON_AUDIO_AUTO;
}
if (ASIC_IS_DCE5(rdev))
drm_object_attach_property(&radeon_connector->base.base,
@@ -2231,6 +2234,7 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.audio_property,
RADEON_AUDIO_AUTO);
+ radeon_connector->audio = RADEON_AUDIO_AUTO;
}
if (ASIC_IS_DCE5(rdev))
drm_object_attach_property(&radeon_connector->base.base,
@@ -2303,8 +2307,10 @@ radeon_add_atom_connector(struct drm_device *dev,
}
if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) {
- if (i2c_bus->valid)
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ if (i2c_bus->valid) {
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+ }
} else
connector->polled = DRM_CONNECTOR_POLL_HPD;
@@ -2380,7 +2386,6 @@ radeon_add_legacy_connector(struct drm_device *dev,
1);
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
connector->interlace_allowed = true;
connector->doublescan_allowed = true;
break;
@@ -2465,10 +2470,13 @@ radeon_add_legacy_connector(struct drm_device *dev,
}
if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) {
- if (i2c_bus->valid)
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ if (i2c_bus->valid) {
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+ }
} else
connector->polled = DRM_CONNECTOR_POLL_HPD;
+
connector->display_info.subpixel_order = subpixel_order;
drm_connector_register(connector);
}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index c566993a2ec3..e2dd5d19c32c 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -630,6 +630,23 @@ void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
/*
* GPU helpers function.
*/
+
+/**
+ * radeon_device_is_virtual - check if we are running is a virtual environment
+ *
+ * Check if the asic has been passed through to a VM (all asics).
+ * Used at driver startup.
+ * Returns true if virtual or false if not.
+ */
+static bool radeon_device_is_virtual(void)
+{
+#ifdef CONFIG_X86
+ return boot_cpu_has(X86_FEATURE_HYPERVISOR);
+#else
+ return false;
+#endif
+}
+
/**
* radeon_card_posted - check if the hw has already been initialized
*
@@ -643,6 +660,10 @@ bool radeon_card_posted(struct radeon_device *rdev)
{
uint32_t reg;
+ /* for pass through, always force asic_init */
+ if (radeon_device_is_virtual())
+ return false;
+
/* required for EFI mode on macbook2,1 which uses an r5xx asic */
if (efi_enabled(EFI_BOOT) &&
(rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 1eca0acac016..3645b223aa37 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -403,7 +403,8 @@ static void radeon_flip_work_func(struct work_struct *__work)
struct drm_crtc *crtc = &radeon_crtc->base;
unsigned long flags;
int r;
- int vpos, hpos, stat, min_udelay;
+ int vpos, hpos, stat, min_udelay = 0;
+ unsigned repcnt = 4;
struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
down_read(&rdev->exclusive_lock);
@@ -454,7 +455,7 @@ static void radeon_flip_work_func(struct work_struct *__work)
* In practice this won't execute very often unless on very fast
* machines because the time window for this to happen is very small.
*/
- for (;;) {
+ while (radeon_crtc->enabled && --repcnt) {
/* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
* start in hpos, and to the "fudged earlier" vblank start in
* vpos.
@@ -470,12 +471,24 @@ static void radeon_flip_work_func(struct work_struct *__work)
break;
/* Sleep at least until estimated real start of hw vblank */
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
+ if (min_udelay > vblank->framedur_ns / 2000) {
+ /* Don't wait ridiculously long - something is wrong */
+ repcnt = 0;
+ break;
+ }
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
usleep_range(min_udelay, 2 * min_udelay);
spin_lock_irqsave(&crtc->dev->event_lock, flags);
};
+ if (!repcnt)
+ DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, "
+ "framedur %d, linedur %d, stat %d, vpos %d, "
+ "hpos %d\n", work->crtc_id, min_udelay,
+ vblank->framedur_ns / 1000,
+ vblank->linedur_ns / 1000, stat, vpos, hpos);
+
/* do the flip (mmio) */
radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base);
diff --git a/drivers/gpu/drm/radeon/radeon_dp_auxch.c b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
index 3b0c229d7dcd..db64e0062689 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_auxch.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
@@ -105,7 +105,7 @@ radeon_dp_aux_transfer_native(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg
tmp &= AUX_HPD_SEL(0x7);
tmp |= AUX_HPD_SEL(chan->rec.hpd);
- tmp |= AUX_EN | AUX_LS_READ_EN;
+ tmp |= AUX_EN | AUX_LS_READ_EN | AUX_HPD_DISCON(0x1);
WREG32(AUX_CONTROL + aux_offset[instance], tmp);
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index 744f5c49c664..6dd39bdedb97 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -525,11 +525,9 @@ static bool radeon_mst_mode_fixup(struct drm_encoder *encoder,
drm_mode_set_crtcinfo(adjusted_mode, 0);
{
struct radeon_connector_atom_dig *dig_connector;
-
dig_connector = mst_enc->connector->con_priv;
dig_connector->dp_lane_count = drm_dp_max_lane_count(dig_connector->dpcd);
- dig_connector->dp_clock = radeon_dp_get_max_link_rate(&mst_enc->connector->base,
- dig_connector->dpcd);
+ dig_connector->dp_clock = drm_dp_max_link_rate(dig_connector->dpcd);
DRM_DEBUG_KMS("dig clock %p %d %d\n", dig_connector,
dig_connector->dp_lane_count, dig_connector->dp_clock);
}
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index bba112628b47..7a0666ac4e23 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -757,8 +757,10 @@ extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector);
extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector);
extern int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
struct drm_connector *connector);
-int radeon_dp_get_max_link_rate(struct drm_connector *connector,
- const u8 *dpcd);
+extern int radeon_dp_get_dp_link_config(struct drm_connector *connector,
+ const u8 *dpcd,
+ unsigned pix_clock,
+ unsigned *dp_lanes, unsigned *dp_rate);
extern void radeon_dp_set_rx_power_state(struct drm_connector *connector,
u8 power_state);
extern void radeon_dp_aux_init(struct radeon_connector *radeon_connector);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 84d45633d28c..fb6ad143873f 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -33,6 +33,7 @@
#include <linux/slab.h>
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
+#include <drm/drm_cache.h>
#include "radeon.h"
#include "radeon_trace.h"
@@ -245,6 +246,12 @@ int radeon_bo_create(struct radeon_device *rdev,
DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
"better performance thanks to write-combining\n");
bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
+#else
+ /* For architectures that don't support WC memory,
+ * mask out the WC flag from the BO
+ */
+ if (!drm_arch_can_wc_memory())
+ bo->flags &= ~RADEON_GEM_GTT_WC;
#endif
radeon_ttm_placement_from_domain(bo, domain);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 59abebd6b5dc..60ab31517153 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -1078,10 +1078,6 @@ force:
/* update displays */
radeon_dpm_display_configuration_changed(rdev);
- rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
- rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
- rdev->pm.dpm.single_display = single_display;
-
/* wait for the rings to drain */
for (i = 0; i < RADEON_NUM_RINGS; i++) {
struct radeon_ring *ring = &rdev->ring[i];
@@ -1097,6 +1093,10 @@ force:
radeon_dpm_post_set_power_state(rdev);
+ rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
+ rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
+ rdev->pm.dpm.single_display = single_display;
+
if (rdev->asic->dpm.force_performance_level) {
if (rdev->pm.dpm.thermal_active) {
enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
index c507896aca45..197b157b73d0 100644
--- a/drivers/gpu/drm/radeon/radeon_sa.c
+++ b/drivers/gpu/drm/radeon/radeon_sa.c
@@ -349,8 +349,13 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
/* see if we can skip over some allocations */
} while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
+ for (i = 0; i < RADEON_NUM_RINGS; ++i)
+ radeon_fence_ref(fences[i]);
+
spin_unlock(&sa_manager->wq.lock);
r = radeon_fence_wait_any(rdev, fences, false);
+ for (i = 0; i < RADEON_NUM_RINGS; ++i)
+ radeon_fence_unref(&fences[i]);
spin_lock(&sa_manager->wq.lock);
/* if we have nothing to wait for block */
if (r == -ENOENT) {
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index e34307459e50..35310336dd0a 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -235,6 +235,8 @@ static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
+ if (radeon_ttm_tt_has_userptr(bo->ttm))
+ return -EPERM;
return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
}
@@ -261,8 +263,8 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
rdev = radeon_get_rdev(bo->bdev);
ridx = radeon_copy_ring_index(rdev);
- old_start = old_mem->start << PAGE_SHIFT;
- new_start = new_mem->start << PAGE_SHIFT;
+ old_start = (u64)old_mem->start << PAGE_SHIFT;
+ new_start = (u64)new_mem->start << PAGE_SHIFT;
switch (old_mem->mem_type) {
case TTM_PL_VRAM:
@@ -758,7 +760,7 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
0, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) {
- while (--i) {
+ while (i--) {
pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
gtt->ttm.dma_address[i] = 0;
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index 48d97c040f49..3979632b9225 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -455,15 +455,15 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
if (soffset) {
/* make sure object fit at this offset */
- eoffset = soffset + size;
+ eoffset = soffset + size - 1;
if (soffset >= eoffset) {
r = -EINVAL;
goto error_unreserve;
}
last_pfn = eoffset / RADEON_GPU_PAGE_SIZE;
- if (last_pfn > rdev->vm_manager.max_pfn) {
- dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
+ if (last_pfn >= rdev->vm_manager.max_pfn) {
+ dev_err(rdev->dev, "va above limit (0x%08X >= 0x%08X)\n",
last_pfn, rdev->vm_manager.max_pfn);
r = -EINVAL;
goto error_unreserve;
@@ -478,7 +478,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
eoffset /= RADEON_GPU_PAGE_SIZE;
if (soffset || eoffset) {
struct interval_tree_node *it;
- it = interval_tree_iter_first(&vm->va, soffset, eoffset - 1);
+ it = interval_tree_iter_first(&vm->va, soffset, eoffset);
if (it && it != &bo_va->it) {
struct radeon_bo_va *tmp;
tmp = container_of(it, struct radeon_bo_va, it);
@@ -518,7 +518,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
if (soffset || eoffset) {
spin_lock(&vm->status_lock);
bo_va->it.start = soffset;
- bo_va->it.last = eoffset - 1;
+ bo_va->it.last = eoffset;
list_add(&bo_va->vm_status, &vm->cleared);
spin_unlock(&vm->status_lock);
interval_tree_insert(&bo_va->it, &vm->va);
@@ -888,7 +888,7 @@ static void radeon_vm_fence_pts(struct radeon_vm *vm,
unsigned i;
start >>= radeon_vm_block_size;
- end >>= radeon_vm_block_size;
+ end = (end - 1) >> radeon_vm_block_size;
for (i = start; i <= end; ++i)
radeon_bo_fence(vm->page_tables[i].bo, fence, true);
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index a82b891ae1fe..3aaa07dafc00 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2926,9 +2926,12 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
/* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
{ PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
{ PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
+ { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0x2015, 0, 120000 },
{ PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
{ PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 },
{ PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
+ { PCI_VENDOR_ID_ATI, 0x6811, 0x148c, 0x2015, 0, 120000 },
+ { PCI_VENDOR_ID_ATI, 0x6810, 0x1682, 0x9275, 0, 120000 },
{ 0, 0, 0, 0 },
};
@@ -3008,6 +3011,16 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
}
++p;
}
+ /* limit mclk on all R7 370 parts for stability */
+ if (rdev->pdev->device == 0x6811 &&
+ rdev->pdev->revision == 0x81)
+ max_mclk = 120000;
+ /* limit sclk/mclk on Jet parts for stability */
+ if (rdev->pdev->device == 0x6665 &&
+ rdev->pdev->revision == 0xc3) {
+ max_sclk = 75000;
+ max_mclk = 80000;
+ }
if (rps->vce_active) {
rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 4c4a7218a3bd..d1a7b58dd291 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -915,6 +915,11 @@
#define DCCG_AUDIO_DTO1_PHASE 0x05c0
#define DCCG_AUDIO_DTO1_MODULE 0x05c4
+#define DENTIST_DISPCLK_CNTL 0x0490
+# define DENTIST_DPREFCLK_WDIVIDER(x) (((x) & 0x7f) << 24)
+# define DENTIST_DPREFCLK_WDIVIDER_MASK (0x7f << 24)
+# define DENTIST_DPREFCLK_WDIVIDER_SHIFT 24
+
#define AFMT_AUDIO_SRC_CONTROL 0x713c
#define AFMT_AUDIO_SRC_SELECT(x) (((x) & 7) << 0)
/* AFMT_AUDIO_SRC_SELECT
diff --git a/drivers/gpu/drm/radeon/vce_v1_0.c b/drivers/gpu/drm/radeon/vce_v1_0.c
index 07a0d378e122..a01efe39a820 100644
--- a/drivers/gpu/drm/radeon/vce_v1_0.c
+++ b/drivers/gpu/drm/radeon/vce_v1_0.c
@@ -178,12 +178,12 @@ int vce_v1_0_load_fw(struct radeon_device *rdev, uint32_t *data)
return -EINVAL;
}
- for (i = 0; i < sign->num; ++i) {
- if (sign->val[i].chip_id == chip_id)
+ for (i = 0; i < le32_to_cpu(sign->num); ++i) {
+ if (le32_to_cpu(sign->val[i].chip_id) == chip_id)
break;
}
- if (i == sign->num)
+ if (i == le32_to_cpu(sign->num))
return -EINVAL;
data += (256 - 64) / 4;
@@ -191,18 +191,18 @@ int vce_v1_0_load_fw(struct radeon_device *rdev, uint32_t *data)
data[1] = sign->val[i].nonce[1];
data[2] = sign->val[i].nonce[2];
data[3] = sign->val[i].nonce[3];
- data[4] = sign->len + 64;
+ data[4] = cpu_to_le32(le32_to_cpu(sign->len) + 64);
memset(&data[5], 0, 44);
memcpy(&data[16], &sign[1], rdev->vce_fw->size - sizeof(*sign));
- data += data[4] / 4;
+ data += le32_to_cpu(data[4]) / 4;
data[0] = sign->val[i].sigval[0];
data[1] = sign->val[i].sigval[1];
data[2] = sign->val[i].sigval[2];
data[3] = sign->val[i].sigval[3];
- rdev->vce.keyselect = sign->val[i].keyselect;
+ rdev->vce.keyselect = le32_to_cpu(sign->val[i].keyselect);
return 0;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 745e996d2dbc..4ae8b56b1847 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1004,9 +1004,9 @@ out_unlock:
return ret;
}
-static bool ttm_bo_mem_compat(struct ttm_placement *placement,
- struct ttm_mem_reg *mem,
- uint32_t *new_flags)
+bool ttm_bo_mem_compat(struct ttm_placement *placement,
+ struct ttm_mem_reg *mem,
+ uint32_t *new_flags)
{
int i;
@@ -1038,6 +1038,7 @@ static bool ttm_bo_mem_compat(struct ttm_placement *placement,
return false;
}
+EXPORT_SYMBOL(ttm_bo_mem_compat);
int ttm_bo_validate(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 62c7b1dafaa4..73e41a8613da 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -539,7 +539,7 @@ static int udlfb_create(struct drm_fb_helper *helper,
out_destroy_fbi:
drm_fb_helper_release_fbi(helper);
out_gfree:
- drm_gem_object_unreference(&ufbdev->ufb.obj->base);
+ drm_gem_object_unreference_unlocked(&ufbdev->ufb.obj->base);
out:
return ret;
}
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 2a0a784ab6ee..d7528e0d8442 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -52,7 +52,7 @@ udl_gem_create(struct drm_file *file,
return ret;
}
- drm_gem_object_unreference(&obj->base);
+ drm_gem_object_unreference_unlocked(&obj->base);
*handle_p = handle;
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index 6377e8151000..67cebb23c940 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -247,7 +247,7 @@ static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
{
struct vmw_cmdbuf_man *man = header->man;
- BUG_ON(!spin_is_locked(&man->lock));
+ lockdep_assert_held_once(&man->lock);
if (header->inline_space) {
vmw_cmdbuf_header_inline_free(header);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
index 299925a1f6c6..eadc981ee79a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
@@ -49,6 +49,7 @@ int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv,
{
struct ttm_buffer_object *bo = &buf->base;
int ret;
+ uint32_t new_flags;
ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
if (unlikely(ret != 0))
@@ -60,7 +61,12 @@ int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
goto err;
- ret = ttm_bo_validate(bo, placement, interruptible, false);
+ if (buf->pin_count > 0)
+ ret = ttm_bo_mem_compat(placement, &bo->mem,
+ &new_flags) == true ? 0 : -EINVAL;
+ else
+ ret = ttm_bo_validate(bo, placement, interruptible, false);
+
if (!ret)
vmw_bo_pin_reserved(buf, true);
@@ -91,6 +97,7 @@ int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
{
struct ttm_buffer_object *bo = &buf->base;
int ret;
+ uint32_t new_flags;
ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
if (unlikely(ret != 0))
@@ -102,6 +109,12 @@ int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
goto err;
+ if (buf->pin_count > 0) {
+ ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
+ &new_flags) == true ? 0 : -EINVAL;
+ goto out_unreserve;
+ }
+
ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
false);
if (likely(ret == 0) || ret == -ERESTARTSYS)
@@ -161,6 +174,7 @@ int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv,
struct ttm_placement placement;
struct ttm_place place;
int ret = 0;
+ uint32_t new_flags;
place = vmw_vram_placement.placement[0];
place.lpfn = bo->num_pages;
@@ -185,10 +199,15 @@ int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv,
*/
if (bo->mem.mem_type == TTM_PL_VRAM &&
bo->mem.start < bo->num_pages &&
- bo->mem.start > 0)
+ bo->mem.start > 0 &&
+ buf->pin_count == 0)
(void) ttm_bo_validate(bo, &vmw_sys_placement, false, false);
- ret = ttm_bo_validate(bo, &placement, interruptible, false);
+ if (buf->pin_count > 0)
+ ret = ttm_bo_mem_compat(&placement, &bo->mem,
+ &new_flags) == true ? 0 : -EINVAL;
+ else
+ ret = ttm_bo_validate(bo, &placement, interruptible, false);
/* For some reason we didn't end up at the start of vram */
WARN_ON(ret == 0 && bo->offset != 0);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index c49812b80dd0..f3f31f995878 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -25,6 +25,7 @@
*
**************************************************************************/
#include <linux/module.h>
+#include <linux/console.h>
#include <drm/drmP.h>
#include "vmwgfx_drv.h"
@@ -226,6 +227,7 @@ static int vmw_force_iommu;
static int vmw_restrict_iommu;
static int vmw_force_coherent;
static int vmw_restrict_dma_mask;
+static int vmw_assume_16bpp;
static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
static void vmw_master_init(struct vmw_master *);
@@ -242,6 +244,8 @@ MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
module_param_named(force_coherent, vmw_force_coherent, int, 0600);
MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
+MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
+module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600);
static void vmw_print_capabilities(uint32_t capabilities)
@@ -651,6 +655,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
+ dev_priv->assume_16bpp = !!vmw_assume_16bpp;
+
dev_priv->enable_fb = enable_fbdev;
vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
@@ -697,6 +703,13 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
vmw_read(dev_priv,
SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
+ /*
+ * Workaround for low memory 2D VMs to compensate for the
+ * allocation taken by fbdev
+ */
+ if (!(dev_priv->capabilities & SVGA_CAP_3D))
+ mem_size *= 2;
+
dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
dev_priv->prim_bb_mem =
vmw_read(dev_priv,
@@ -1538,6 +1551,12 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
static int __init vmwgfx_init(void)
{
int ret;
+
+#ifdef CONFIG_VGA_CONSOLE
+ if (vgacon_text_force())
+ return -EINVAL;
+#endif
+
ret = drm_pci_init(&driver, &vmw_pci_driver);
if (ret)
DRM_ERROR("Failed initializing DRM.\n");
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 469cdd520615..2e94fe27b3f6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -387,6 +387,7 @@ struct vmw_private {
spinlock_t hw_lock;
spinlock_t cap_lock;
bool has_dx;
+ bool assume_16bpp;
/*
* VGA registers.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 5da5de0cb522..4948c1529836 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -3273,19 +3273,19 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
&vmw_cmd_dx_cid_check, true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
true, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_ok,
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
- &vmw_cmd_ok, true, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_ok,
+ &vmw_cmd_dx_cid_check, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
true, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_ok,
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
true, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_invalid,
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
true, false, true),
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 3b1faf7862a5..d2d93959b119 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -517,28 +517,6 @@ static int vmw_fb_kms_framebuffer(struct fb_info *info)
par->set_fb = &vfb->base;
- if (!par->bo_ptr) {
- /*
- * Pin before mapping. Since we don't know in what placement
- * to pin, call into KMS to do it for us.
- */
- ret = vfb->pin(vfb);
- if (ret) {
- DRM_ERROR("Could not pin the fbdev framebuffer.\n");
- return ret;
- }
-
- ret = ttm_bo_kmap(&par->vmw_bo->base, 0,
- par->vmw_bo->base.num_pages, &par->map);
- if (ret) {
- vfb->unpin(vfb);
- DRM_ERROR("Could not map the fbdev framebuffer.\n");
- return ret;
- }
-
- par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
- }
-
return 0;
}
@@ -573,9 +551,9 @@ static int vmw_fb_set_par(struct fb_info *info)
mode = old_mode;
old_mode = NULL;
} else if (!vmw_kms_validate_mode_vram(vmw_priv,
- mode->hdisplay *
- (var->bits_per_pixel + 7) / 8,
- mode->vdisplay)) {
+ mode->hdisplay *
+ DIV_ROUND_UP(var->bits_per_pixel, 8),
+ mode->vdisplay)) {
drm_mode_destroy(vmw_priv->dev, mode);
return -EINVAL;
}
@@ -601,6 +579,31 @@ static int vmw_fb_set_par(struct fb_info *info)
if (ret)
goto out_unlock;
+ if (!par->bo_ptr) {
+ struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(set.fb);
+
+ /*
+ * Pin before mapping. Since we don't know in what placement
+ * to pin, call into KMS to do it for us.
+ */
+ ret = vfb->pin(vfb);
+ if (ret) {
+ DRM_ERROR("Could not pin the fbdev framebuffer.\n");
+ goto out_unlock;
+ }
+
+ ret = ttm_bo_kmap(&par->vmw_bo->base, 0,
+ par->vmw_bo->base.num_pages, &par->map);
+ if (ret) {
+ vfb->unpin(vfb);
+ DRM_ERROR("Could not map the fbdev framebuffer.\n");
+ goto out_unlock;
+ }
+
+ par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
+ }
+
+
vmw_fb_dirty_mark(par, par->fb_x, par->fb_y,
par->set_fb->width, par->set_fb->height);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 9b4bb9e74d73..060e5c6f4446 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -763,21 +763,25 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev,
uint32_t format;
struct drm_vmw_size content_base_size;
struct vmw_resource *res;
+ unsigned int bytes_pp;
int ret;
switch (mode_cmd->depth) {
case 32:
case 24:
format = SVGA3D_X8R8G8B8;
+ bytes_pp = 4;
break;
case 16:
case 15:
format = SVGA3D_R5G6B5;
+ bytes_pp = 2;
break;
case 8:
format = SVGA3D_P8;
+ bytes_pp = 1;
break;
default:
@@ -785,7 +789,7 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev,
return -EINVAL;
}
- content_base_size.width = mode_cmd->width;
+ content_base_size.width = mode_cmd->pitch / bytes_pp;
content_base_size.height = mode_cmd->height;
content_base_size.depth = 1;
@@ -1534,14 +1538,10 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
};
int i;
- u32 assumed_bpp = 2;
+ u32 assumed_bpp = 4;
- /*
- * If using screen objects, then assume 32-bpp because that's what the
- * SVGA device is assuming
- */
- if (dev_priv->active_display_unit == vmw_du_screen_object)
- assumed_bpp = 4;
+ if (dev_priv->assume_16bpp)
+ assumed_bpp = 2;
if (dev_priv->active_display_unit == vmw_du_screen_target) {
max_width = min(max_width, dev_priv->stdu_max_width);