aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c21
-rw-r--r--drivers/gpu/drm/ast/ast_main.c2
-rw-r--r--drivers/gpu/drm/bochs/bochs_fbdev.c14
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c12
-rw-r--r--drivers/gpu/drm/drm_crtc.c7
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c304
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c7
-rw-r--r--drivers/gpu/drm/drm_lock.c6
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c22
-rw-r--r--drivers/gpu/drm/gma500/gem.c2
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h17
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c12
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c8
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c10
-rw-r--r--drivers/gpu/drm/i915/intel_display.c70
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c89
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h3
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_panel_vbt.c8
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c20
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c6
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_fb.c12
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c5
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c47
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c7
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fbcon.c6
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c10
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c1
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c93
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h4
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c22
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c7
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c17
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c18
-rw-r--r--drivers/gpu/drm/radeon/cik.c34
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c36
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c46
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c30
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c78
-rw-r--r--drivers/gpu/drm/radeon/radeon_sa.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c80
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c10
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c7
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c24
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c4
63 files changed, 966 insertions, 391 deletions
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index 5c60ae524c45..ff68eefae273 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -335,18 +335,27 @@ int ast_fbdev_init(struct drm_device *dev)
ret = drm_fb_helper_init(dev, &afbdev->helper,
1, 1);
- if (ret) {
- kfree(afbdev);
- return ret;
- }
+ if (ret)
+ goto free;
- drm_fb_helper_single_add_all_connectors(&afbdev->helper);
+ ret = drm_fb_helper_single_add_all_connectors(&afbdev->helper);
+ if (ret)
+ goto fini;
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(dev);
- drm_fb_helper_initial_config(&afbdev->helper, 32);
+ ret = drm_fb_helper_initial_config(&afbdev->helper, 32);
+ if (ret)
+ goto fini;
+
return 0;
+
+fini:
+ drm_fb_helper_fini(&afbdev->helper);
+free:
+ kfree(afbdev);
+ return ret;
}
void ast_fbdev_fini(struct drm_device *dev)
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 035dacc93382..fd5c5f3370f6 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -227,7 +227,7 @@ static int ast_get_dram_info(struct drm_device *dev)
} while (ast_read32(ast, 0x10000) != 0x01);
data = ast_read32(ast, 0x10004);
- if (data & 0x400)
+ if (data & 0x40)
ast->dram_bus_width = 16;
else
ast->dram_bus_width = 32;
diff --git a/drivers/gpu/drm/bochs/bochs_fbdev.c b/drivers/gpu/drm/bochs/bochs_fbdev.c
index fe95d31cd110..9cdab0f2575c 100644
--- a/drivers/gpu/drm/bochs/bochs_fbdev.c
+++ b/drivers/gpu/drm/bochs/bochs_fbdev.c
@@ -197,12 +197,22 @@ int bochs_fbdev_init(struct bochs_device *bochs)
if (ret)
return ret;
- drm_fb_helper_single_add_all_connectors(&bochs->fb.helper);
+ ret = drm_fb_helper_single_add_all_connectors(&bochs->fb.helper);
+ if (ret)
+ goto fini;
+
drm_helper_disable_unused_functions(bochs->dev);
- drm_fb_helper_initial_config(&bochs->fb.helper, 32);
+
+ ret = drm_fb_helper_initial_config(&bochs->fb.helper, 32);
+ if (ret)
+ goto fini;
bochs->fb.initialized = true;
return 0;
+
+fini:
+ drm_fb_helper_fini(&bochs->fb.helper);
+ return ret;
}
void bochs_fbdev_fini(struct bochs_device *bochs)
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index d231b1c317af..b487c4e40d83 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -314,17 +314,17 @@ int cirrus_fbdev_init(struct cirrus_device *cdev)
ret = drm_fb_helper_init(cdev->dev, &gfbdev->helper,
cdev->num_crtc, CIRRUSFB_CONN_LIMIT);
- if (ret) {
- kfree(gfbdev);
+ if (ret)
+ return ret;
+
+ ret = drm_fb_helper_single_add_all_connectors(&gfbdev->helper);
+ if (ret)
return ret;
- }
- drm_fb_helper_single_add_all_connectors(&gfbdev->helper);
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(cdev->dev);
- drm_fb_helper_initial_config(&gfbdev->helper, bpp_sel);
- return 0;
+ return drm_fb_helper_initial_config(&gfbdev->helper, bpp_sel);
}
void cirrus_fbdev_fini(struct cirrus_device *cdev)
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index b7f101b52d81..0cd6e0d7dd1a 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -2555,8 +2555,11 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- /* For some reason crtc x/y offsets are signed internally. */
- if (crtc_req->x > INT_MAX || crtc_req->y > INT_MAX)
+ /*
+ * Universal plane src offsets are only 16.16, prevent havoc for
+ * drivers using universal plane code internally.
+ */
+ if (crtc_req->x & 0xffff0000 || crtc_req->y & 0xffff0000)
return -ERANGE;
drm_modeset_lock_all(dev);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index f50d884b81cf..c4f8e8f172cd 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -956,17 +956,17 @@ static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u
static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
u8 *rad)
{
- int lct = port->parent->lct;
+ int parent_lct = port->parent->lct;
int shift = 4;
- int idx = lct / 2;
- if (lct > 1) {
- memcpy(rad, port->parent->rad, idx);
- shift = (lct % 2) ? 4 : 0;
+ int idx = (parent_lct - 1) / 2;
+ if (parent_lct > 1) {
+ memcpy(rad, port->parent->rad, idx + 1);
+ shift = (parent_lct % 2) ? 4 : 0;
} else
rad[0] = 0;
rad[idx] |= port->port_num << shift;
- return lct + 1;
+ return parent_lct + 1;
}
/*
@@ -996,37 +996,47 @@ static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
return send_link;
}
-static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb,
- struct drm_dp_mst_port *port)
+static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
{
int ret;
- if (port->dpcd_rev >= 0x12) {
- port->guid_valid = drm_dp_validate_guid(mstb->mgr, port->guid);
- if (!port->guid_valid) {
- ret = drm_dp_send_dpcd_write(mstb->mgr,
- port,
- DP_GUID,
- 16, port->guid);
- port->guid_valid = true;
+
+ memcpy(mstb->guid, guid, 16);
+
+ if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
+ if (mstb->port_parent) {
+ ret = drm_dp_send_dpcd_write(
+ mstb->mgr,
+ mstb->port_parent,
+ DP_GUID,
+ 16,
+ mstb->guid);
+ } else {
+
+ ret = drm_dp_dpcd_write(
+ mstb->mgr->aux,
+ DP_GUID,
+ mstb->guid,
+ 16);
}
}
}
static void build_mst_prop_path(struct drm_dp_mst_port *port,
struct drm_dp_mst_branch *mstb,
- char *proppath)
+ char *proppath,
+ size_t proppath_size)
{
int i;
char temp[8];
- snprintf(proppath, 255, "mst:%d", mstb->mgr->conn_base_id);
+ snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
for (i = 0; i < (mstb->lct - 1); i++) {
int shift = (i % 2) ? 0 : 4;
- int port_num = mstb->rad[i / 2] >> shift;
- snprintf(temp, 8, "-%d", port_num);
- strncat(proppath, temp, 255);
+ int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
+ snprintf(temp, sizeof(temp), "-%d", port_num);
+ strlcat(proppath, temp, proppath_size);
}
- snprintf(temp, 8, "-%d", port->port_num);
- strncat(proppath, temp, 255);
+ snprintf(temp, sizeof(temp), "-%d", port->port_num);
+ strlcat(proppath, temp, proppath_size);
}
static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
@@ -1063,7 +1073,6 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
port->dpcd_rev = port_msg->dpcd_revision;
port->num_sdp_streams = port_msg->num_sdp_streams;
port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
- memcpy(port->guid, port_msg->peer_guid, 16);
/* manage mstb port lists with mgr lock - take a reference
for this list */
@@ -1076,11 +1085,9 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
if (old_ddps != port->ddps) {
if (port->ddps) {
- drm_dp_check_port_guid(mstb, port);
if (!port->input)
drm_dp_send_enum_path_resources(mstb->mgr, mstb, port);
} else {
- port->guid_valid = false;
port->available_pbn = 0;
}
}
@@ -1097,7 +1104,7 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
if (created && !port->input) {
char proppath[255];
- build_mst_prop_path(port, mstb, proppath);
+ build_mst_prop_path(port, mstb, proppath, sizeof(proppath));
port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
}
@@ -1125,10 +1132,8 @@ static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
if (old_ddps != port->ddps) {
if (port->ddps) {
- drm_dp_check_port_guid(mstb, port);
dowork = true;
} else {
- port->guid_valid = false;
port->available_pbn = 0;
}
}
@@ -1152,11 +1157,13 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_
struct drm_dp_mst_port *port;
int i;
/* find the port by iterating down */
+
+ mutex_lock(&mgr->lock);
mstb = mgr->mst_primary;
for (i = 0; i < lct - 1; i++) {
int shift = (i % 2) ? 0 : 4;
- int port_num = rad[i / 2] >> shift;
+ int port_num = (rad[i / 2] >> shift) & 0xf;
list_for_each_entry(port, &mstb->ports, next) {
if (port->port_num == port_num) {
@@ -1171,6 +1178,49 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_
}
}
kref_get(&mstb->kref);
+ mutex_unlock(&mgr->lock);
+ return mstb;
+}
+
+static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
+ struct drm_dp_mst_branch *mstb,
+ uint8_t *guid)
+{
+ struct drm_dp_mst_branch *found_mstb;
+ struct drm_dp_mst_port *port;
+
+ if (memcmp(mstb->guid, guid, 16) == 0)
+ return mstb;
+
+
+ list_for_each_entry(port, &mstb->ports, next) {
+ if (!port->mstb)
+ continue;
+
+ found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
+
+ if (found_mstb)
+ return found_mstb;
+ }
+
+ return NULL;
+}
+
+static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device_by_guid(
+ struct drm_dp_mst_topology_mgr *mgr,
+ uint8_t *guid)
+{
+ struct drm_dp_mst_branch *mstb;
+
+ /* find the port by iterating down */
+ mutex_lock(&mgr->lock);
+
+ mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
+
+ if (mstb)
+ kref_get(&mstb->kref);
+
+ mutex_unlock(&mgr->lock);
return mstb;
}
@@ -1178,7 +1228,7 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m
struct drm_dp_mst_branch *mstb)
{
struct drm_dp_mst_port *port;
-
+ struct drm_dp_mst_branch *mstb_child;
if (!mstb->link_address_sent) {
drm_dp_send_link_address(mgr, mstb);
mstb->link_address_sent = true;
@@ -1193,17 +1243,31 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m
if (!port->available_pbn)
drm_dp_send_enum_path_resources(mgr, mstb, port);
- if (port->mstb)
- drm_dp_check_and_send_link_address(mgr, port->mstb);
+ if (port->mstb) {
+ mstb_child = drm_dp_get_validated_mstb_ref(mgr, port->mstb);
+ if (mstb_child) {
+ drm_dp_check_and_send_link_address(mgr, mstb_child);
+ drm_dp_put_mst_branch_device(mstb_child);
+ }
+ }
}
}
static void drm_dp_mst_link_probe_work(struct work_struct *work)
{
struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work);
+ struct drm_dp_mst_branch *mstb;
- drm_dp_check_and_send_link_address(mgr, mgr->mst_primary);
-
+ mutex_lock(&mgr->lock);
+ mstb = mgr->mst_primary;
+ if (mstb) {
+ kref_get(&mstb->kref);
+ }
+ mutex_unlock(&mgr->lock);
+ if (mstb) {
+ drm_dp_check_and_send_link_address(mgr, mstb);
+ drm_dp_put_mst_branch_device(mstb);
+ }
}
static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
@@ -1258,7 +1322,6 @@ retry:
goto retry;
}
DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
- WARN(1, "fail\n");
return -EIO;
}
@@ -1272,6 +1335,7 @@ static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
struct drm_dp_sideband_msg_tx *txmsg)
{
struct drm_dp_mst_branch *mstb = txmsg->dst;
+ u8 req_type;
/* both msg slots are full */
if (txmsg->seqno == -1) {
@@ -1288,7 +1352,13 @@ static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
txmsg->seqno = 1;
mstb->tx_slots[txmsg->seqno] = txmsg;
}
- hdr->broadcast = 0;
+
+ req_type = txmsg->msg[0] & 0x7f;
+ if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
+ req_type == DP_RESOURCE_STATUS_NOTIFY)
+ hdr->broadcast = 1;
+ else
+ hdr->broadcast = 0;
hdr->path_msg = txmsg->path_msg;
hdr->lct = mstb->lct;
hdr->lcr = mstb->lct - 1;
@@ -1457,6 +1527,9 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
txmsg->reply.u.link_addr.ports[i].num_sdp_streams,
txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks);
}
+
+ drm_dp_check_mstb_guid(mstb, txmsg->reply.u.link_addr.guid);
+
for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
}
@@ -1621,6 +1694,11 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
req_payload.start_slot = cur_slots;
if (mgr->proposed_vcpis[i]) {
port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
+ port = drm_dp_get_validated_port_ref(mgr, port);
+ if (!port) {
+ mutex_unlock(&mgr->payload_lock);
+ return -EINVAL;
+ }
req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
} else {
port = NULL;
@@ -1646,6 +1724,9 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
mgr->payloads[i].payload_state = req_payload.payload_state;
}
cur_slots += req_payload.num_slots;
+
+ if (port)
+ drm_dp_put_port(port);
}
for (i = 0; i < mgr->max_payloads; i++) {
@@ -1860,31 +1941,17 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
mgr->mst_primary = mstb;
kref_get(&mgr->mst_primary->kref);
- {
- struct drm_dp_payload reset_pay;
- reset_pay.start_slot = 0;
- reset_pay.num_slots = 0x3f;
- drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
- }
-
ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
- DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
+ DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
if (ret < 0) {
goto out_unlock;
}
-
- /* sort out guid */
- ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, mgr->guid, 16);
- if (ret != 16) {
- DRM_DEBUG_KMS("failed to read DP GUID %d\n", ret);
- goto out_unlock;
- }
-
- mgr->guid_valid = drm_dp_validate_guid(mgr, mgr->guid);
- if (!mgr->guid_valid) {
- ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, mgr->guid, 16);
- mgr->guid_valid = true;
+ {
+ struct drm_dp_payload reset_pay;
+ reset_pay.start_slot = 0;
+ reset_pay.num_slots = 0x3f;
+ drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
}
queue_work(system_long_wq, &mgr->work);
@@ -1946,6 +2013,8 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
if (mgr->mst_primary) {
int sret;
+ u8 guid[16];
+
sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
if (sret != DP_RECEIVER_CAP_SIZE) {
DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
@@ -1960,6 +2029,16 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
ret = -1;
goto out_unlock;
}
+
+ /* Some hubs forget their guids after they resume */
+ sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
+ if (sret != 16) {
+ DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
+ ret = -1;
+ goto out_unlock;
+ }
+ drm_dp_check_mstb_guid(mgr->mst_primary, guid);
+
ret = 0;
} else
ret = -1;
@@ -2076,28 +2155,51 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
if (mgr->up_req_recv.have_eomt) {
struct drm_dp_sideband_msg_req_body msg;
- struct drm_dp_mst_branch *mstb;
+ struct drm_dp_mst_branch *mstb = NULL;
bool seqno;
- mstb = drm_dp_get_mst_branch_device(mgr,
- mgr->up_req_recv.initial_hdr.lct,
- mgr->up_req_recv.initial_hdr.rad);
- if (!mstb) {
- DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
- memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
- return 0;
+
+ if (!mgr->up_req_recv.initial_hdr.broadcast) {
+ mstb = drm_dp_get_mst_branch_device(mgr,
+ mgr->up_req_recv.initial_hdr.lct,
+ mgr->up_req_recv.initial_hdr.rad);
+ if (!mstb) {
+ DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
+ memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+ return 0;
+ }
}
seqno = mgr->up_req_recv.initial_hdr.seqno;
drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg);
if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
- drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false);
+ drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
+
+ if (!mstb)
+ mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.conn_stat.guid);
+
+ if (!mstb) {
+ DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
+ memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+ return 0;
+ }
+
drm_dp_update_port(mstb, &msg.u.conn_stat);
+
DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
(*mgr->cbs->hotplug)(mgr);
} else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
- drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false);
+ drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
+ if (!mstb)
+ mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.resource_stat.guid);
+
+ if (!mstb) {
+ DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
+ memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+ return 0;
+ }
+
DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
}
@@ -2267,6 +2369,7 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp
DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn);
if (pbn == port->vcpi.pbn) {
*slots = port->vcpi.num_slots;
+ drm_dp_put_port(port);
return true;
}
}
@@ -2413,32 +2516,31 @@ EXPORT_SYMBOL(drm_dp_check_act_status);
*/
int drm_dp_calc_pbn_mode(int clock, int bpp)
{
- fixed20_12 pix_bw;
- fixed20_12 fbpp;
- fixed20_12 result;
- fixed20_12 margin, tmp;
- u32 res;
-
- pix_bw.full = dfixed_const(clock);
- fbpp.full = dfixed_const(bpp);
- tmp.full = dfixed_const(8);
- fbpp.full = dfixed_div(fbpp, tmp);
-
- result.full = dfixed_mul(pix_bw, fbpp);
- margin.full = dfixed_const(54);
- tmp.full = dfixed_const(64);
- margin.full = dfixed_div(margin, tmp);
- result.full = dfixed_div(result, margin);
-
- margin.full = dfixed_const(1006);
- tmp.full = dfixed_const(1000);
- margin.full = dfixed_div(margin, tmp);
- result.full = dfixed_mul(result, margin);
-
- result.full = dfixed_div(result, tmp);
- result.full = dfixed_ceil(result);
- res = dfixed_trunc(result);
- return res;
+ u64 kbps;
+ s64 peak_kbps;
+ u32 numerator;
+ u32 denominator;
+
+ kbps = clock * bpp;
+
+ /*
+ * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
+ * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
+ * common multiplier to render an integer PBN for all link rate/lane
+ * counts combinations
+ * calculate
+ * peak_kbps *= (1006/1000)
+ * peak_kbps *= (64/54)
+ * peak_kbps *= 8 convert to bytes
+ */
+
+ numerator = 64 * 1006;
+ denominator = 54 * 8 * 1000 * 1000;
+
+ kbps *= numerator;
+ peak_kbps = drm_fixp_from_fraction(kbps, denominator);
+
+ return drm_fixp2int_ceil(peak_kbps);
}
EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
@@ -2446,11 +2548,23 @@ static int test_calc_pbn_mode(void)
{
int ret;
ret = drm_dp_calc_pbn_mode(154000, 30);
- if (ret != 689)
+ if (ret != 689) {
+ DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
+ 154000, 30, 689, ret);
return -EINVAL;
+ }
ret = drm_dp_calc_pbn_mode(234000, 30);
- if (ret != 1047)
+ if (ret != 1047) {
+ DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
+ 234000, 30, 1047, ret);
return -EINVAL;
+ }
+ ret = drm_dp_calc_pbn_mode(297000, 24);
+ if (ret != 1063) {
+ DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
+ 297000, 24, 1063, ret);
+ return -EINVAL;
+ }
return 0;
}
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index e9a2827ad1c4..1e41f466055c 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -1453,7 +1453,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
int n, int width, int height)
{
int c, o;
- struct drm_device *dev = fb_helper->dev;
struct drm_connector *connector;
struct drm_connector_helper_funcs *connector_funcs;
struct drm_encoder *encoder;
@@ -1472,7 +1471,7 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
if (modes[n] == NULL)
return best_score;
- crtcs = kzalloc(dev->mode_config.num_connector *
+ crtcs = kzalloc(fb_helper->connector_count *
sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
if (!crtcs)
return best_score;
@@ -1518,7 +1517,7 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
if (score > best_score) {
best_score = score;
memcpy(best_crtcs, crtcs,
- dev->mode_config.num_connector *
+ fb_helper->connector_count *
sizeof(struct drm_fb_helper_crtc *));
}
}
@@ -1638,7 +1637,7 @@ out:
* RETURNS:
* Zero if everything went ok, nonzero otherwise.
*/
-bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
+int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
{
struct drm_device *dev = fb_helper->dev;
int count = 0;
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index f861361a635e..4924d381b664 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -61,6 +61,9 @@ int drm_legacy_lock(struct drm_device *dev, void *data,
struct drm_master *master = file_priv->master;
int ret = 0;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
++file_priv->lock_count;
if (lock->context == DRM_KERNEL_CONTEXT) {
@@ -153,6 +156,9 @@ int drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file_
struct drm_lock *lock = data;
struct drm_master *master = file_priv->master;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
if (lock->context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
task_pid_nr(current), lock->context);
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index ddd90ddbc200..2d42ce6d3757 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -593,6 +593,7 @@ int psb_fbdev_init(struct drm_device *dev)
{
struct psb_fbdev *fbdev;
struct drm_psb_private *dev_priv = dev->dev_private;
+ int ret;
fbdev = kzalloc(sizeof(struct psb_fbdev), GFP_KERNEL);
if (!fbdev) {
@@ -604,16 +605,29 @@ int psb_fbdev_init(struct drm_device *dev)
drm_fb_helper_prepare(dev, &fbdev->psb_fb_helper, &psb_fb_helper_funcs);
- drm_fb_helper_init(dev, &fbdev->psb_fb_helper, dev_priv->ops->crtcs,
- INTELFB_CONN_LIMIT);
+ ret = drm_fb_helper_init(dev, &fbdev->psb_fb_helper,
+ dev_priv->ops->crtcs, INTELFB_CONN_LIMIT);
+ if (ret)
+ goto free;
- drm_fb_helper_single_add_all_connectors(&fbdev->psb_fb_helper);
+ ret = drm_fb_helper_single_add_all_connectors(&fbdev->psb_fb_helper);
+ if (ret)
+ goto fini;
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(dev);
- drm_fb_helper_initial_config(&fbdev->psb_fb_helper, 32);
+ ret = drm_fb_helper_initial_config(&fbdev->psb_fb_helper, 32);
+ if (ret)
+ goto fini;
+
return 0;
+
+fini:
+ drm_fb_helper_fini(&fbdev->psb_fb_helper);
+free:
+ kfree(fbdev);
+ return ret;
}
static void psb_fbdev_fini(struct drm_device *dev)
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index c707fa6fca85..e3bdc8b1c32c 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -130,7 +130,7 @@ int psb_gem_create(struct drm_file *file, struct drm_device *dev, u64 size,
return ret;
}
/* We have the initial and handle reference but need only one now */
- drm_gem_object_unreference(&r->gem);
+ drm_gem_object_unreference_unlocked(&r->gem);
*handlep = handle;
return 0;
}
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
index 87885d8c06e8..4869117b69eb 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
@@ -85,7 +85,7 @@ static const char *const dsi_errors[] = {
"RX Prot Violation",
"HS Generic Write FIFO Full",
"LP Generic Write FIFO Full",
- "Generic Read Data Avail"
+ "Generic Read Data Avail",
"Special Packet Sent",
"Tearing Effect",
};
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a84971351eee..c68e9f2947a6 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2899,15 +2899,14 @@ int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val);
#define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
#define I915_READ64_2x32(lower_reg, upper_reg) ({ \
- u32 upper = I915_READ(upper_reg); \
- u32 lower = I915_READ(lower_reg); \
- u32 tmp = I915_READ(upper_reg); \
- if (upper != tmp) { \
- upper = tmp; \
- lower = I915_READ(lower_reg); \
- WARN_ON(I915_READ(upper_reg) != upper); \
- } \
- (u64)upper << 32 | lower; })
+ u32 upper, lower, tmp; \
+ tmp = I915_READ(upper_reg); \
+ do { \
+ upper = tmp; \
+ lower = I915_READ(lower_reg); \
+ tmp = I915_READ(upper_reg); \
+ } while (upper != tmp); \
+ (u64)upper << 32 | lower; })
#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index e08a4729596b..9bf747f2a32b 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -5768,6 +5768,7 @@ enum punit_power_well {
#define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6)
#define HALF_SLICE_CHICKEN3 0xe184
+#define HSW_SAMPLE_C_PERFORMANCE (1<<9)
#define GEN8_CENTROID_PIXEL_OPT_DIS (1<<8)
#define GEN8_SAMPLER_POWER_BYPASS_DIS (1<<1)
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index a4bd90f36a03..d96b152a6e04 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -41,7 +41,7 @@ find_section(struct bdb_header *bdb, int section_id)
{
u8 *base = (u8 *)bdb;
int index = 0;
- u16 total, current_size;
+ u32 total, current_size;
u8 current_id;
/* skip to first section */
@@ -56,6 +56,10 @@ find_section(struct bdb_header *bdb, int section_id)
current_size = *((u16 *)(base + index));
index += 2;
+ /* The MIPI Sequence Block v3+ has a separate size field. */
+ if (current_id == BDB_MIPI_SEQUENCE && *(base + index) >= 3)
+ current_size = *((const u32 *)(base + index + 1));
+
if (index + current_size > total)
return NULL;
@@ -794,6 +798,12 @@ parse_mipi(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
return;
}
+ /* Fail gracefully for forward incompatible sequence block. */
+ if (sequence->version >= 3) {
+ DRM_ERROR("Unable to parse MIPI Sequence Block v3+\n");
+ return;
+ }
+
DRM_DEBUG_DRIVER("Found MIPI sequence block\n");
block_size = get_blocksize(sequence);
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 9212e6504e0f..f0e8e2a2c547 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -311,8 +311,14 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
pipe_config->has_pch_encoder = true;
/* LPT FDI RX only supports 8bpc. */
- if (HAS_PCH_LPT(dev))
+ if (HAS_PCH_LPT(dev)) {
+ if (pipe_config->bw_constrained && pipe_config->pipe_bpp < 24) {
+ DRM_DEBUG_KMS("LPT only supports 24bpp\n");
+ return false;
+ }
+
pipe_config->pipe_bpp = 24;
+ }
/* FDI must always be 2.7 GHz */
if (HAS_DDI(dev)) {
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index b63d4fa204a3..4b476aa4ab05 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1515,12 +1515,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
hsw_ddi_clock_get(encoder, pipe_config);
}
-static void intel_ddi_destroy(struct drm_encoder *encoder)
-{
- /* HDMI has nothing special to destroy, so we can go with this. */
- intel_dp_encoder_destroy(encoder);
-}
-
static bool intel_ddi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
@@ -1539,7 +1533,8 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder,
}
static const struct drm_encoder_funcs intel_ddi_funcs = {
- .destroy = intel_ddi_destroy,
+ .reset = intel_dp_encoder_reset,
+ .destroy = intel_dp_encoder_destroy,
};
static struct intel_connector *
@@ -1612,6 +1607,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
intel_encoder->post_disable = intel_ddi_post_disable;
intel_encoder->get_hw_state = intel_ddi_get_hw_state;
intel_encoder->get_config = intel_ddi_get_config;
+ intel_encoder->suspend = intel_dp_encoder_suspend;
intel_dig_port->port = port;
intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 448327fe4d85..234e89c013dd 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1650,6 +1650,8 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
}
+ I915_WRITE(reg, dpll);
+
/* Wait for the clocks to stabilize. */
POSTING_READ(reg);
udelay(150);
@@ -6524,12 +6526,14 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *encoder;
+ int i;
u32 val, final;
bool has_lvds = false;
bool has_cpu_edp = false;
bool has_panel = false;
bool has_ck505 = false;
bool can_ssc = false;
+ bool using_ssc_source = false;
/* We need to take the global config into account */
for_each_intel_encoder(dev, encoder) {
@@ -6554,8 +6558,22 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
can_ssc = true;
}
- DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n",
- has_panel, has_lvds, has_ck505);
+ /* Check if any DPLLs are using the SSC source */
+ for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+ u32 temp = I915_READ(PCH_DPLL(i));
+
+ if (!(temp & DPLL_VCO_ENABLE))
+ continue;
+
+ if ((temp & PLL_REF_INPUT_MASK) ==
+ PLLB_REF_INPUT_SPREADSPECTRUMIN) {
+ using_ssc_source = true;
+ break;
+ }
+ }
+
+ DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
+ has_panel, has_lvds, has_ck505, using_ssc_source);
/* Ironlake: try to setup display ref clock before DPLL
* enabling. This is only under driver's control after
@@ -6592,9 +6610,9 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
} else
final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
- } else {
- final |= DREF_SSC_SOURCE_DISABLE;
- final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
+ } else if (using_ssc_source) {
+ final |= DREF_SSC_SOURCE_ENABLE;
+ final |= DREF_SSC1_ENABLE;
}
if (final == val)
@@ -6640,7 +6658,7 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
POSTING_READ(PCH_DREF_CONTROL);
udelay(200);
} else {
- DRM_DEBUG_KMS("Disabling SSC entirely\n");
+ DRM_DEBUG_KMS("Disabling CPU source output\n");
val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
@@ -6651,16 +6669,20 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
POSTING_READ(PCH_DREF_CONTROL);
udelay(200);
- /* Turn off the SSC source */
- val &= ~DREF_SSC_SOURCE_MASK;
- val |= DREF_SSC_SOURCE_DISABLE;
+ if (!using_ssc_source) {
+ DRM_DEBUG_KMS("Disabling SSC source\n");
- /* Turn off SSC1 */
- val &= ~DREF_SSC1_ENABLE;
+ /* Turn off the SSC source */
+ val &= ~DREF_SSC_SOURCE_MASK;
+ val |= DREF_SSC_SOURCE_DISABLE;
- I915_WRITE(PCH_DREF_CONTROL, val);
- POSTING_READ(PCH_DREF_CONTROL);
- udelay(200);
+ /* Turn off SSC1 */
+ val &= ~DREF_SSC1_ENABLE;
+
+ I915_WRITE(PCH_DREF_CONTROL, val);
+ POSTING_READ(PCH_DREF_CONTROL);
+ udelay(200);
+ }
}
BUG_ON(val != final);
@@ -10184,11 +10206,21 @@ connected_sink_compute_bpp(struct intel_connector *connector,
pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
}
- /* Clamp bpp to 8 on screens without EDID 1.4 */
- if (connector->base.display_info.bpc == 0 && bpp > 24) {
- DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
- bpp);
- pipe_config->pipe_bpp = 24;
+ /* Clamp bpp to default limit on screens without EDID 1.4 */
+ if (connector->base.display_info.bpc == 0) {
+ int type = connector->base.connector_type;
+ int clamp_bpp = 24;
+
+ /* Fall back to 18 bpp when DP sink capability is unknown. */
+ if (type == DRM_MODE_CONNECTOR_DisplayPort ||
+ type == DRM_MODE_CONNECTOR_eDP)
+ clamp_bpp = 18;
+
+ if (bpp > clamp_bpp) {
+ DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n",
+ bpp, clamp_bpp);
+ pipe_config->pipe_bpp = clamp_bpp;
+ }
}
}
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index a29db0a1da42..a915d729c33d 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -778,10 +778,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
DP_AUX_CH_CTL_RECEIVE_ERROR))
continue;
if (status & DP_AUX_CH_CTL_DONE)
- break;
+ goto done;
}
- if (status & DP_AUX_CH_CTL_DONE)
- break;
}
if ((status & DP_AUX_CH_CTL_DONE) == 0) {
@@ -790,6 +788,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
goto out;
}
+done:
/* Check for timeout or receive error.
* Timeouts occur when the sink is not connected
*/
@@ -4424,7 +4423,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
kfree(intel_dig_port);
}
-static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
+void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
@@ -4441,9 +4440,52 @@ static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
pps_unlock(intel_dp);
}
-static void intel_dp_encoder_reset(struct drm_encoder *encoder)
+static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
{
- intel_edp_panel_vdd_sanitize(to_intel_encoder(encoder));
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum intel_display_power_domain power_domain;
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ if (!edp_have_panel_vdd(intel_dp))
+ return;
+
+ /*
+ * The VDD bit needs a power domain reference, so if the bit is
+ * already enabled when we boot or resume, grab this reference and
+ * schedule a vdd off, so we don't hold on to the reference
+ * indefinitely.
+ */
+ DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
+ power_domain = intel_display_port_power_domain(&intel_dig_port->base);
+ intel_display_power_get(dev_priv, power_domain);
+
+ edp_panel_vdd_schedule_off(intel_dp);
+}
+
+void intel_dp_encoder_reset(struct drm_encoder *encoder)
+{
+ struct intel_dp *intel_dp;
+
+ if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
+ return;
+
+ intel_dp = enc_to_intel_dp(encoder);
+
+ pps_lock(intel_dp);
+
+ /*
+ * Read out the current power sequencer assignment,
+ * in case the BIOS did something with it.
+ */
+ if (IS_VALLEYVIEW(encoder->dev))
+ vlv_initial_power_sequencer_setup(intel_dp);
+
+ intel_edp_panel_vdd_sanitize(intel_dp);
+
+ pps_unlock(intel_dp);
}
static const struct drm_connector_funcs intel_dp_connector_funcs = {
@@ -4925,37 +4967,6 @@ intel_dp_drrs_init(struct intel_digital_port *intel_dig_port,
return downclock_mode;
}
-void intel_edp_panel_vdd_sanitize(struct intel_encoder *intel_encoder)
-{
- struct drm_device *dev = intel_encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_dp *intel_dp;
- enum intel_display_power_domain power_domain;
-
- if (intel_encoder->type != INTEL_OUTPUT_EDP)
- return;
-
- intel_dp = enc_to_intel_dp(&intel_encoder->base);
-
- pps_lock(intel_dp);
-
- if (!edp_have_panel_vdd(intel_dp))
- goto out;
- /*
- * The VDD bit needs a power domain reference, so if the bit is
- * already enabled when we boot or resume, grab this reference and
- * schedule a vdd off, so we don't hold on to the reference
- * indefinitely.
- */
- DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
- power_domain = intel_display_port_power_domain(intel_encoder);
- intel_display_power_get(dev_priv, power_domain);
-
- edp_panel_vdd_schedule_off(intel_dp);
- out:
- pps_unlock(intel_dp);
-}
-
static bool intel_edp_init_connector(struct intel_dp *intel_dp,
struct intel_connector *intel_connector,
struct edp_power_seq *power_seq)
@@ -4976,7 +4987,9 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
if (!is_edp(intel_dp))
return true;
- intel_edp_panel_vdd_sanitize(intel_encoder);
+ pps_lock(intel_dp);
+ intel_edp_panel_vdd_sanitize(intel_dp);
+ pps_unlock(intel_dp);
/* Cache DPCD and EDID for edp. */
intel_edp_panel_vdd_on(intel_dp);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 07ce04683c30..c91a4fb596e4 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -922,6 +922,8 @@ void intel_dp_start_link_train(struct intel_dp *intel_dp);
void intel_dp_complete_link_train(struct intel_dp *intel_dp);
void intel_dp_stop_link_train(struct intel_dp *intel_dp);
void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
+void intel_dp_encoder_reset(struct drm_encoder *encoder);
+void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
void intel_dp_encoder_destroy(struct drm_encoder *encoder);
void intel_dp_check_link_status(struct intel_dp *intel_dp);
int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
@@ -933,7 +935,6 @@ bool intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
void intel_edp_backlight_on(struct intel_dp *intel_dp);
void intel_edp_backlight_off(struct intel_dp *intel_dp);
void intel_edp_panel_vdd_on(struct intel_dp *intel_dp);
-void intel_edp_panel_vdd_sanitize(struct intel_encoder *intel_encoder);
void intel_edp_panel_on(struct intel_dp *intel_dp);
void intel_edp_panel_off(struct intel_dp *intel_dp);
void intel_edp_psr_enable(struct intel_dp *intel_dp);
diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
index f6bdd44069ce..6ea603ae9055 100644
--- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
@@ -171,7 +171,12 @@ static u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, u8 *data)
gpio = *data++;
/* pull up/down */
- action = *data++;
+ action = *data++ & 1;
+
+ if (gpio >= ARRAY_SIZE(gtable)) {
+ DRM_DEBUG_KMS("unknown gpio %u\n", gpio);
+ goto out;
+ }
function = gtable[gpio].function_reg;
pad = gtable[gpio].pad_reg;
@@ -190,6 +195,7 @@ static u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, u8 *data)
vlv_gpio_nc_write(dev_priv, pad, val);
mutex_unlock(&dev_priv->dpio_lock);
+out:
return data;
}
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 56e437e31580..ae628001fd97 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -435,7 +435,7 @@ gmbus_xfer(struct i2c_adapter *adapter,
struct intel_gmbus,
adapter);
struct drm_i915_private *dev_priv = bus->dev_priv;
- int i, reg_offset;
+ int i = 0, inc, try = 0, reg_offset;
int ret = 0;
intel_aux_display_runtime_get(dev_priv);
@@ -448,12 +448,14 @@ gmbus_xfer(struct i2c_adapter *adapter,
reg_offset = dev_priv->gpio_mmio_base;
+retry:
I915_WRITE(GMBUS0 + reg_offset, bus->reg0);
- for (i = 0; i < num; i++) {
+ for (; i < num; i += inc) {
+ inc = 1;
if (gmbus_is_index_read(msgs, i, num)) {
ret = gmbus_xfer_index_read(dev_priv, &msgs[i]);
- i += 1; /* set i to the index of the read xfer */
+ inc = 2; /* an index read is two msgs */
} else if (msgs[i].flags & I2C_M_RD) {
ret = gmbus_xfer_read(dev_priv, &msgs[i], 0);
} else {
@@ -525,6 +527,18 @@ clear_err:
adapter->name, msgs[i].addr,
(msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len);
+ /*
+ * Passive adapters sometimes NAK the first probe. Retry the first
+ * message once on -ENXIO for GMBUS transfers; the bit banging algorithm
+ * has retries internally. See also the retry loop in
+ * drm_do_probe_ddc_edid, which bails out on the first -ENXIO.
+ */
+ if (ret == -ENXIO && i == 0 && try++ == 0) {
+ DRM_DEBUG_KMS("GMBUS [%s] NAK on first message, retry\n",
+ adapter->name);
+ goto retry;
+ }
+
goto out;
timeout:
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 83c7ecf2608a..993df2dded42 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2952,6 +2952,8 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
+ memset(active, 0, sizeof(*active));
+
active->pipe_enabled = intel_crtc_active(crtc);
if (active->pipe_enabled) {
@@ -5691,6 +5693,10 @@ static void haswell_init_clock_gating(struct drm_device *dev)
I915_WRITE(GEN7_GT_MODE,
GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
+ /* WaSampleCChickenBitEnable:hsw */
+ I915_WRITE(HALF_SLICE_CHICKEN3,
+ _MASKED_BIT_ENABLE(HSW_SAMPLE_C_PERFORMANCE));
+
/* WaSwitchSolVfFArbitrationPriority:hsw */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
index 4415af3666ab..c36b8304042b 100644
--- a/drivers/gpu/drm/mgag200/mgag200_fb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
@@ -303,14 +303,22 @@ int mgag200_fbdev_init(struct mga_device *mdev)
if (ret)
return ret;
- drm_fb_helper_single_add_all_connectors(&mfbdev->helper);
+ ret = drm_fb_helper_single_add_all_connectors(&mfbdev->helper);
+ if (ret)
+ goto fini;
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(mdev->dev);
- drm_fb_helper_initial_config(&mfbdev->helper, bpp_sel);
+ ret = drm_fb_helper_initial_config(&mfbdev->helper, bpp_sel);
+ if (ret)
+ goto fini;
return 0;
+
+fini:
+ drm_fb_helper_fini(&mfbdev->helper);
+ return ret;
}
void mgag200_fbdev_fini(struct mga_device *mdev)
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 83485ab81ce8..d8e693d1f6d4 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1525,6 +1525,11 @@ static int mga_vga_mode_valid(struct drm_connector *connector,
return MODE_BANDWIDTH;
}
+ if ((mode->hdisplay % 8) != 0 || (mode->hsync_start % 8) != 0 ||
+ (mode->hsync_end % 8) != 0 || (mode->htotal % 8) != 0) {
+ return MODE_H_ILLEGAL;
+ }
+
if (mode->crtc_hdisplay > 2048 || mode->crtc_hsync_start > 4096 ||
mode->crtc_hsync_end > 4096 || mode->crtc_htotal > 4096 ||
mode->crtc_vdisplay > 2048 || mode->crtc_vsync_start > 4096 ||
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index ab5bfd2d0ebf..956710dd84a4 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -245,17 +245,23 @@ struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev)
goto fail;
}
- drm_fb_helper_single_add_all_connectors(helper);
+ ret = drm_fb_helper_single_add_all_connectors(helper);
+ if (ret)
+ goto fini;
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(dev);
- drm_fb_helper_initial_config(helper, 32);
+ ret = drm_fb_helper_initial_config(helper, 32);
+ if (ret)
+ goto fini;
priv->fbdev = helper;
return helper;
+fini:
+ drm_fb_helper_fini(helper);
fail:
kfree(fbdev);
return NULL;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index c8ac9482cf2e..a088c96dab5b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -952,10 +952,13 @@ nouveau_connector_hotplug(struct nvif_notify *notify)
NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", name);
+ mutex_lock(&drm->dev->mode_config.mutex);
if (plugged)
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
else
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ mutex_unlock(&drm->dev->mode_config.mutex);
+
drm_helper_hpd_irq_event(connector->dev);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 593ef8a2a069..993c9a0377da 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -178,8 +178,30 @@ nouveau_fbcon_sync(struct fb_info *info)
return 0;
}
+static int
+nouveau_fbcon_open(struct fb_info *info, int user)
+{
+ struct nouveau_fbdev *fbcon = info->par;
+ struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
+ int ret = pm_runtime_get_sync(drm->dev->dev);
+ if (ret < 0 && ret != -EACCES)
+ return ret;
+ return 0;
+}
+
+static int
+nouveau_fbcon_release(struct fb_info *info, int user)
+{
+ struct nouveau_fbdev *fbcon = info->par;
+ struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
+ pm_runtime_put(drm->dev->dev);
+ return 0;
+}
+
static struct fb_ops nouveau_fbcon_ops = {
.owner = THIS_MODULE,
+ .fb_open = nouveau_fbcon_open,
+ .fb_release = nouveau_fbcon_release,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
.fb_fillrect = nouveau_fbcon_fillrect,
@@ -195,6 +217,8 @@ static struct fb_ops nouveau_fbcon_ops = {
static struct fb_ops nouveau_fbcon_sw_ops = {
.owner = THIS_MODULE,
+ .fb_open = nouveau_fbcon_open,
+ .fb_release = nouveau_fbcon_release,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
.fb_fillrect = cfb_fillrect,
@@ -522,12 +546,12 @@ nouveau_fbcon_init(struct drm_device *dev)
ret = drm_fb_helper_init(dev, &fbcon->helper,
dev->mode_config.num_crtc, 4);
- if (ret) {
- kfree(fbcon);
- return ret;
- }
+ if (ret)
+ goto free;
- drm_fb_helper_single_add_all_connectors(&fbcon->helper);
+ ret = drm_fb_helper_single_add_all_connectors(&fbcon->helper);
+ if (ret)
+ goto fini;
if (drm->device.info.ram_size <= 32 * 1024 * 1024)
preferred_bpp = 8;
@@ -540,8 +564,19 @@ nouveau_fbcon_init(struct drm_device *dev)
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(dev);
- drm_fb_helper_initial_config(&fbcon->helper, preferred_bpp);
+ ret = drm_fb_helper_initial_config(&fbcon->helper, preferred_bpp);
+ if (ret)
+ goto fini;
+
+ if (fbcon->helper.fbdev)
+ fbcon->helper.fbdev->pixmap.buf_align = 4;
return 0;
+
+fini:
+ drm_fb_helper_fini(&fbcon->helper);
+free:
+ kfree(fbcon);
+ return ret;
}
void
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 36951ee4b157..9fcc77d5923e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -199,11 +199,12 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
struct nouveau_vma *vma;
- if (nvbo->bo.mem.mem_type == TTM_PL_TT)
+ if (is_power_of_2(nvbo->valid_domains))
+ rep->domain = nvbo->valid_domains;
+ else if (nvbo->bo.mem.mem_type == TTM_PL_TT)
rep->domain = NOUVEAU_GEM_DOMAIN_GART;
else
rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
-
rep->offset = nvbo->bo.offset;
if (cli->vm) {
vma = nouveau_bo_vma_find(nvbo, cli->vm);
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 4ef602c5469d..7ab83f7f2763 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -82,7 +82,6 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
uint32_t fg;
uint32_t bg;
uint32_t dsize;
- uint32_t width;
uint32_t *data = (uint32_t *)image->data;
int ret;
@@ -93,9 +92,6 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
if (ret)
return ret;
- width = ALIGN(image->width, 8);
- dsize = ALIGN(width * image->height, 32) >> 5;
-
if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
fg = ((uint32_t *) info->pseudo_palette)[image->fg_color];
@@ -111,10 +107,11 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
((image->dx + image->width) & 0xffff));
OUT_RING(chan, bg);
OUT_RING(chan, fg);
- OUT_RING(chan, (image->height << 16) | width);
+ OUT_RING(chan, (image->height << 16) | image->width);
OUT_RING(chan, (image->height << 16) | image->width);
OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
+ dsize = ALIGN(image->width * image->height, 32) >> 5;
while (dsize) {
int iter_len = dsize > 128 ? 128 : dsize;
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index 394c89abcc97..cb2a71ada99e 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -95,7 +95,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
struct nouveau_fbdev *nfbdev = info->par;
struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
struct nouveau_channel *chan = drm->channel;
- uint32_t width, dwords, *data = (uint32_t *)image->data;
+ uint32_t dwords, *data = (uint32_t *)image->data;
uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
uint32_t *palette = info->pseudo_palette;
int ret;
@@ -107,9 +107,6 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
if (ret)
return ret;
- width = ALIGN(image->width, 32);
- dwords = (width * image->height) >> 5;
-
BEGIN_NV04(chan, NvSub2D, 0x0814, 2);
if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
@@ -128,6 +125,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
OUT_RING(chan, 0);
OUT_RING(chan, image->dy);
+ dwords = ALIGN(image->width * image->height, 32) >> 5;
while (dwords) {
int push = dwords > 2047 ? 2047 : dwords;
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
index 61246677e8dc..69f760e8c54f 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -95,7 +95,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
struct nouveau_fbdev *nfbdev = info->par;
struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
struct nouveau_channel *chan = drm->channel;
- uint32_t width, dwords, *data = (uint32_t *)image->data;
+ uint32_t dwords, *data = (uint32_t *)image->data;
uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
uint32_t *palette = info->pseudo_palette;
int ret;
@@ -107,9 +107,6 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
if (ret)
return ret;
- width = ALIGN(image->width, 32);
- dwords = (width * image->height) >> 5;
-
BEGIN_NVC0(chan, NvSub2D, 0x0814, 2);
if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
@@ -128,6 +125,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
OUT_RING (chan, 0);
OUT_RING (chan, image->dy);
+ dwords = ALIGN(image->width * image->height, 32) >> 5;
while (dwords) {
int push = dwords > 2047 ? 2047 : dwords;
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index 8436c6857cda..d292d24b3a6e 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -334,17 +334,23 @@ struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev)
goto fail;
}
- drm_fb_helper_single_add_all_connectors(helper);
+ ret = drm_fb_helper_single_add_all_connectors(helper);
+ if (ret)
+ goto fini;
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(dev);
- drm_fb_helper_initial_config(helper, 32);
+ ret = drm_fb_helper_initial_config(helper, 32);
+ if (ret)
+ goto fini;
priv->fbdev = helper;
return helper;
+fini:
+ drm_fb_helper_fini(helper);
fail:
kfree(fbdev);
return NULL;
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index 97823644d347..f33251d67914 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -505,6 +505,7 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev,
cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
cmd->type = QXL_SURFACE_CMD_CREATE;
+ cmd->flags = QXL_SURF_FLAG_KEEP_DATA;
cmd->u.surface_create.format = surf->surf.format;
cmd->u.surface_create.width = surf->surf.width;
cmd->u.surface_create.height = surf->surf.height;
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 0d1396266857..8e6e73cb187d 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -136,9 +136,35 @@ static int qxl_add_monitors_config_modes(struct drm_connector *connector,
*pwidth = head->width;
*pheight = head->height;
drm_mode_probed_add(connector, mode);
+ /* remember the last custom size for mode validation */
+ qdev->monitors_config_width = mode->hdisplay;
+ qdev->monitors_config_height = mode->vdisplay;
return 1;
}
+static struct mode_size {
+ int w;
+ int h;
+} common_modes[] = {
+ { 640, 480},
+ { 720, 480},
+ { 800, 600},
+ { 848, 480},
+ {1024, 768},
+ {1152, 768},
+ {1280, 720},
+ {1280, 800},
+ {1280, 854},
+ {1280, 960},
+ {1280, 1024},
+ {1440, 900},
+ {1400, 1050},
+ {1680, 1050},
+ {1600, 1200},
+ {1920, 1080},
+ {1920, 1200}
+};
+
static int qxl_add_common_modes(struct drm_connector *connector,
unsigned pwidth,
unsigned pheight)
@@ -146,29 +172,6 @@ static int qxl_add_common_modes(struct drm_connector *connector,
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode = NULL;
int i;
- struct mode_size {
- int w;
- int h;
- } common_modes[] = {
- { 640, 480},
- { 720, 480},
- { 800, 600},
- { 848, 480},
- {1024, 768},
- {1152, 768},
- {1280, 720},
- {1280, 800},
- {1280, 854},
- {1280, 960},
- {1280, 1024},
- {1440, 900},
- {1400, 1050},
- {1680, 1050},
- {1600, 1200},
- {1920, 1080},
- {1920, 1200}
- };
-
for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h,
60, false, false, false);
@@ -340,10 +343,15 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
qxl_bo_kunmap(user_bo);
+ qcrtc->cur_x += qcrtc->hot_spot_x - hot_x;
+ qcrtc->cur_y += qcrtc->hot_spot_y - hot_y;
+ qcrtc->hot_spot_x = hot_x;
+ qcrtc->hot_spot_y = hot_y;
+
cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
cmd->type = QXL_CURSOR_SET;
- cmd->u.set.position.x = qcrtc->cur_x;
- cmd->u.set.position.y = qcrtc->cur_y;
+ cmd->u.set.position.x = qcrtc->cur_x + qcrtc->hot_spot_x;
+ cmd->u.set.position.y = qcrtc->cur_y + qcrtc->hot_spot_y;
cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0);
@@ -406,8 +414,8 @@ static int qxl_crtc_cursor_move(struct drm_crtc *crtc,
cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
cmd->type = QXL_CURSOR_MOVE;
- cmd->u.position.x = qcrtc->cur_x;
- cmd->u.position.y = qcrtc->cur_y;
+ cmd->u.position.x = qcrtc->cur_x + qcrtc->hot_spot_x;
+ cmd->u.position.y = qcrtc->cur_y + qcrtc->hot_spot_y;
qxl_release_unmap(qdev, release, &cmd->release_info);
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
@@ -598,7 +606,7 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
adjusted_mode->hdisplay,
adjusted_mode->vdisplay);
- if (qcrtc->index == 0)
+ if (bo->is_primary == false)
recreate_primary = true;
if (bo->surf.stride * bo->surf.height > qdev->vram_size) {
@@ -806,11 +814,22 @@ static int qxl_conn_get_modes(struct drm_connector *connector)
static int qxl_conn_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
+ struct drm_device *ddev = connector->dev;
+ struct qxl_device *qdev = ddev->dev_private;
+ int i;
+
/* TODO: is this called for user defined modes? (xrandr --add-mode)
* TODO: check that the mode fits in the framebuffer */
- DRM_DEBUG("%s: %dx%d status=%d\n", mode->name, mode->hdisplay,
- mode->vdisplay, mode->status);
- return MODE_OK;
+
+ if(qdev->monitors_config_width == mode->hdisplay &&
+ qdev->monitors_config_height == mode->vdisplay)
+ return MODE_OK;
+
+ for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
+ if (common_modes[i].w == mode->hdisplay && common_modes[i].h == mode->vdisplay)
+ return MODE_OK;
+ }
+ return MODE_BAD;
}
static struct drm_encoder *qxl_best_encoder(struct drm_connector *connector)
@@ -855,13 +874,15 @@ static enum drm_connector_status qxl_conn_detect(
drm_connector_to_qxl_output(connector);
struct drm_device *ddev = connector->dev;
struct qxl_device *qdev = ddev->dev_private;
- int connected;
+ bool connected = false;
/* The first monitor is always connected */
- connected = (output->index == 0) ||
- (qdev->client_monitors_config &&
- qdev->client_monitors_config->count > output->index &&
- qxl_head_enabled(&qdev->client_monitors_config->heads[output->index]));
+ if (!qdev->client_monitors_config) {
+ if (output->index == 0)
+ connected = true;
+ } else
+ connected = qdev->client_monitors_config->count > output->index &&
+ qxl_head_enabled(&qdev->client_monitors_config->heads[output->index]);
DRM_DEBUG("#%d connected: %d\n", output->index, connected);
if (!connected)
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 7c6cafe21f5f..eef66769245f 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -135,6 +135,8 @@ struct qxl_crtc {
int index;
int cur_x;
int cur_y;
+ int hot_spot_x;
+ int hot_spot_y;
};
struct qxl_output {
@@ -325,6 +327,8 @@ struct qxl_device {
struct work_struct fb_work;
struct drm_property *hotplug_mode_update_property;
+ int monitors_config_width;
+ int monitors_config_height;
};
/* forward declaration for QXL_INFO_IO */
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index 3d7c1d00a424..f778c0e8ae3c 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -686,14 +686,24 @@ int qxl_fbdev_init(struct qxl_device *qdev)
ret = drm_fb_helper_init(qdev->ddev, &qfbdev->helper,
qxl_num_crtc /* num_crtc - QXL supports just 1 */,
QXLFB_CONN_LIMIT);
- if (ret) {
- kfree(qfbdev);
- return ret;
- }
+ if (ret)
+ goto free;
+
+ ret = drm_fb_helper_single_add_all_connectors(&qfbdev->helper);
+ if (ret)
+ goto fini;
+
+ ret = drm_fb_helper_initial_config(&qfbdev->helper, bpp_sel);
+ if (ret)
+ goto fini;
- drm_fb_helper_single_add_all_connectors(&qfbdev->helper);
- drm_fb_helper_initial_config(&qfbdev->helper, bpp_sel);
return 0;
+
+fini:
+ drm_fb_helper_fini(&qfbdev->helper);
+free:
+ kfree(qfbdev);
+ return ret;
}
void qxl_fbdev_fini(struct qxl_device *qdev)
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index b110883f8253..3aefaa058f0c 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -122,8 +122,10 @@ static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev,
qobj = gem_to_qxl_bo(gobj);
ret = qxl_release_list_add(release, qobj);
- if (ret)
+ if (ret) {
+ drm_gem_object_unreference_unlocked(gobj);
return NULL;
+ }
return qobj;
}
@@ -166,7 +168,8 @@ static int qxl_process_single_command(struct qxl_device *qdev,
cmd->command_size))
return -EFAULT;
- reloc_info = kmalloc(sizeof(struct qxl_reloc_info) * cmd->relocs_num, GFP_KERNEL);
+ reloc_info = kmalloc_array(cmd->relocs_num,
+ sizeof(struct qxl_reloc_info), GFP_KERNEL);
if (!reloc_info)
return -ENOMEM;
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 4f9ec36698f7..2f2e50a0feb4 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -580,9 +580,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
else
radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
- /* if there is no audio, set MINM_OVER_MAXP */
- if (!drm_detect_monitor_audio(radeon_connector_edid(connector)))
- radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
if (rdev->family < CHIP_RV770)
radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
/* use frac fb div on APUs */
@@ -1733,6 +1730,7 @@ static u32 radeon_get_pll_use_mask(struct drm_crtc *crtc)
static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
struct drm_crtc *test_crtc;
struct radeon_crtc *test_radeon_crtc;
@@ -1742,6 +1740,10 @@ static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc)
test_radeon_crtc = to_radeon_crtc(test_crtc);
if (test_radeon_crtc->encoder &&
ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
+ /* PPLL2 is exclusive to UNIPHYA on DCE61 */
+ if (ASIC_IS_DCE61(rdev) && !ASIC_IS_DCE8(rdev) &&
+ test_radeon_crtc->pll_id == ATOM_PPLL2)
+ continue;
/* for DP use the same PLL for all */
if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)
return test_radeon_crtc->pll_id;
@@ -1763,6 +1765,7 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
struct drm_crtc *test_crtc;
struct radeon_crtc *test_radeon_crtc;
u32 adjusted_clock, test_adjusted_clock;
@@ -1778,6 +1781,10 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
test_radeon_crtc = to_radeon_crtc(test_crtc);
if (test_radeon_crtc->encoder &&
!ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
+ /* PPLL2 is exclusive to UNIPHYA on DCE61 */
+ if (ASIC_IS_DCE61(rdev) && !ASIC_IS_DCE8(rdev) &&
+ test_radeon_crtc->pll_id == ATOM_PPLL2)
+ continue;
/* check if we are already driving this connector with another crtc */
if (test_radeon_crtc->connector == radeon_crtc->connector) {
/* if we are, return that pll */
@@ -1789,9 +1796,7 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
if ((crtc->mode.clock == test_crtc->mode.clock) &&
(adjusted_clock == test_adjusted_clock) &&
(radeon_crtc->ss_enabled == test_radeon_crtc->ss_enabled) &&
- (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID) &&
- (drm_detect_monitor_audio(radeon_connector_edid(test_radeon_crtc->connector)) ==
- drm_detect_monitor_audio(radeon_connector_edid(radeon_crtc->connector))))
+ (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID))
return test_radeon_crtc->pll_id;
}
}
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index b8cd7975f797..07dd3523425a 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -308,6 +308,10 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
&& (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
+ /* vertical FP must be at least 1 */
+ if (mode->crtc_vsync_start == mode->crtc_vdisplay)
+ adjusted_mode->crtc_vsync_start++;
+
/* get the native mode for scaling */
if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) {
radeon_panel_mode_fixup(encoder, adjusted_mode);
@@ -872,8 +876,6 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mo
else
args.v1.ucLaneNum = 4;
- if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000))
- args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1;
@@ -890,6 +892,10 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mo
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
else
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
+
+ if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000))
+ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
+
break;
case 2:
case 3:
@@ -1586,8 +1592,9 @@ radeon_atom_encoder_dpms_avivo(struct drm_encoder *encoder, int mode)
} else
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
- args.ucAction = ATOM_LCD_BLON;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+
+ atombios_set_backlight_level(radeon_encoder, dig->backlight_level);
}
break;
case DRM_MODE_DPMS_STANDBY:
@@ -1668,8 +1675,7 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0);
}
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
- atombios_dig_transmitter_setup(encoder,
- ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
+ atombios_set_backlight_level(radeon_encoder, dig->backlight_level);
if (ext_encoder)
atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
break;
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 63e8bff5d47a..87073e69fc0d 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -4523,6 +4523,31 @@ void cik_compute_set_wptr(struct radeon_device *rdev,
WDOORBELL32(ring->doorbell_index, ring->wptr);
}
+static void cik_compute_stop(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ u32 j, tmp;
+
+ cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
+ /* Disable wptr polling. */
+ tmp = RREG32(CP_PQ_WPTR_POLL_CNTL);
+ tmp &= ~WPTR_POLL_EN;
+ WREG32(CP_PQ_WPTR_POLL_CNTL, tmp);
+ /* Disable HQD. */
+ if (RREG32(CP_HQD_ACTIVE) & 1) {
+ WREG32(CP_HQD_DEQUEUE_REQUEST, 1);
+ for (j = 0; j < rdev->usec_timeout; j++) {
+ if (!(RREG32(CP_HQD_ACTIVE) & 1))
+ break;
+ udelay(1);
+ }
+ WREG32(CP_HQD_DEQUEUE_REQUEST, 0);
+ WREG32(CP_HQD_PQ_RPTR, 0);
+ WREG32(CP_HQD_PQ_WPTR, 0);
+ }
+ cik_srbm_select(rdev, 0, 0, 0, 0);
+}
+
/**
* cik_cp_compute_enable - enable/disable the compute CP MEs
*
@@ -4536,6 +4561,15 @@ static void cik_cp_compute_enable(struct radeon_device *rdev, bool enable)
if (enable)
WREG32(CP_MEC_CNTL, 0);
else {
+ /*
+ * To make hibernation reliable we need to clear compute ring
+ * configuration before halting the compute ring.
+ */
+ mutex_lock(&rdev->srbm_mutex);
+ cik_compute_stop(rdev,&rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]);
+ cik_compute_stop(rdev,&rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]);
+ mutex_unlock(&rdev->srbm_mutex);
+
WREG32(CP_MEC_CNTL, (MEC_ME1_HALT | MEC_ME2_HALT));
rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index d748963af08b..02246acba36a 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -268,6 +268,17 @@ static void cik_sdma_gfx_stop(struct radeon_device *rdev)
}
rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
+
+ /* FIXME use something else than big hammer but after few days can not
+ * seem to find good combination so reset SDMA blocks as it seems we
+ * do not shut them down properly. This fix hibernation and does not
+ * affect suspend to ram.
+ */
+ WREG32(SRBM_SOFT_RESET, SOFT_RESET_SDMA | SOFT_RESET_SDMA1);
+ (void)RREG32(SRBM_SOFT_RESET);
+ udelay(50);
+ WREG32(SRBM_SOFT_RESET, 0);
+ (void)RREG32(SRBM_SOFT_RESET);
}
/**
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index d6f0e40db81d..ee38f9aba391 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -1628,6 +1628,7 @@ struct radeon_pm {
struct device *int_hwmon_dev;
/* dpm */
bool dpm_enabled;
+ bool sysfs_initialized;
struct radeon_dpm dpm;
};
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index d79e892093b5..882171b411f0 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -436,7 +436,9 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
}
/* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */
- if (((dev->pdev->device == 0x9802) || (dev->pdev->device == 0x9806)) &&
+ if (((dev->pdev->device == 0x9802) ||
+ (dev->pdev->device == 0x9805) ||
+ (dev->pdev->device == 0x9806)) &&
(dev->pdev->subsystem_vendor == 0x1734) &&
(dev->pdev->subsystem_device == 0x11bd)) {
if (*connector_type == DRM_MODE_CONNECTOR_VGA) {
@@ -447,14 +449,6 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
}
}
- /* Fujitsu D3003-S2 board lists DVI-I as DVI-I and VGA */
- if ((dev->pdev->device == 0x9805) &&
- (dev->pdev->subsystem_vendor == 0x1734) &&
- (dev->pdev->subsystem_device == 0x11bd)) {
- if (*connector_type == DRM_MODE_CONNECTOR_VGA)
- return false;
- }
-
return true;
}
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 3e5f6b71f3ad..a9b01bcf7d0a 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -1255,10 +1255,15 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) &&
(RBIOS16(tmp + 2) == lvds->native_mode.vdisplay)) {
+ u32 hss = (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8;
+
+ if (hss > lvds->native_mode.hdisplay)
+ hss = (10 - 1) * 8;
+
lvds->native_mode.htotal = lvds->native_mode.hdisplay +
(RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8;
lvds->native_mode.hsync_start = lvds->native_mode.hdisplay +
- (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8;
+ hss;
lvds->native_mode.hsync_end = lvds->native_mode.hsync_start +
(RBIOS8(tmp + 23) * 8);
@@ -3382,6 +3387,14 @@ void radeon_combios_asic_init(struct drm_device *dev)
rdev->pdev->subsystem_device == 0x30ae)
return;
+ /* quirk for rs4xx HP Compaq dc5750 Small Form Factor to make it resume
+ * - it hangs on resume inside the dynclk 1 table.
+ */
+ if (rdev->family == CHIP_RS480 &&
+ rdev->pdev->subsystem_vendor == 0x103c &&
+ rdev->pdev->subsystem_device == 0x280a)
+ return;
+
/* DYN CLK 1 */
table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
if (table)
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 26baa9c05f6c..15f09068ac00 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -72,6 +72,11 @@ void radeon_connector_hotplug(struct drm_connector *connector)
if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
} else if (radeon_dp_needs_link_train(radeon_connector)) {
+ /* Don't try to start link training before we
+ * have the dpcd */
+ if (!radeon_dp_getdpcd(radeon_connector))
+ return;
+
/* set it to OFF so that drm_helper_connector_dpms()
* won't return immediately since the current state
* is ON at this point.
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index bdf263a4a67c..6b99d3956baa 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -599,6 +599,23 @@ void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
/*
* GPU helpers function.
*/
+
+/**
+ * radeon_device_is_virtual - check if we are running is a virtual environment
+ *
+ * Check if the asic has been passed through to a VM (all asics).
+ * Used at driver startup.
+ * Returns true if virtual or false if not.
+ */
+static bool radeon_device_is_virtual(void)
+{
+#ifdef CONFIG_X86
+ return boot_cpu_has(X86_FEATURE_HYPERVISOR);
+#else
+ return false;
+#endif
+}
+
/**
* radeon_card_posted - check if the hw has already been initialized
*
@@ -612,6 +629,10 @@ bool radeon_card_posted(struct radeon_device *rdev)
{
uint32_t reg;
+ /* for pass through, always force asic_init */
+ if (radeon_device_is_virtual())
+ return false;
+
/* required for EFI mode on macbook2,1 which uses an r5xx asic */
if (efi_enabled(EFI_BOOT) &&
(rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
@@ -1426,6 +1447,21 @@ int radeon_device_init(struct radeon_device *rdev,
if (r)
DRM_ERROR("ib ring test failed (%d).\n", r);
+ /*
+ * Turks/Thames GPU will freeze whole laptop if DPM is not restarted
+ * after the CP ring have chew one packet at least. Hence here we stop
+ * and restart DPM after the radeon_ib_ring_tests().
+ */
+ if (rdev->pm.dpm_enabled &&
+ (rdev->pm.pm_method == PM_METHOD_DPM) &&
+ (rdev->family == CHIP_TURKS) &&
+ (rdev->flags & RADEON_IS_MOBILITY)) {
+ mutex_lock(&rdev->pm.mutex);
+ radeon_dpm_disable(rdev);
+ radeon_dpm_enable(rdev);
+ mutex_unlock(&rdev->pm.mutex);
+ }
+
if ((radeon_testing & 1)) {
if (rdev->accel_working)
radeon_test_moves(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 2e1e9aa79cea..21e6e9745d00 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -1619,18 +1619,8 @@ int radeon_modeset_init(struct radeon_device *rdev)
radeon_fbdev_init(rdev);
drm_kms_helper_poll_init(rdev->ddev);
- if (rdev->pm.dpm_enabled) {
- /* do dpm late init */
- ret = radeon_pm_late_init(rdev);
- if (ret) {
- rdev->pm.dpm_enabled = false;
- DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
- }
- /* set the dpm state for PX since there won't be
- * a modeset to call this.
- */
- radeon_pm_compute_clocks(rdev);
- }
+ /* do pm late init */
+ ret = radeon_pm_late_init(rdev);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 0ea1db83d573..6514ff218995 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -303,7 +303,8 @@ out_unref:
void radeon_fb_output_poll_changed(struct radeon_device *rdev)
{
- drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper);
+ if (rdev->mode_info.rfbdev)
+ drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper);
}
static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev)
@@ -343,6 +344,10 @@ int radeon_fbdev_init(struct radeon_device *rdev)
int bpp_sel = 32;
int ret;
+ /* don't enable fbdev if no connectors */
+ if (list_empty(&rdev->ddev->mode_config.connector_list))
+ return 0;
+
/* select 8 bpp console on RN50 or 16MB cards */
if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024))
bpp_sel = 8;
@@ -360,18 +365,27 @@ int radeon_fbdev_init(struct radeon_device *rdev)
ret = drm_fb_helper_init(rdev->ddev, &rfbdev->helper,
rdev->num_crtc,
RADEONFB_CONN_LIMIT);
- if (ret) {
- kfree(rfbdev);
- return ret;
- }
+ if (ret)
+ goto free;
- drm_fb_helper_single_add_all_connectors(&rfbdev->helper);
+ ret = drm_fb_helper_single_add_all_connectors(&rfbdev->helper);
+ if (ret)
+ goto fini;
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(rdev->ddev);
- drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);
+ ret = drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);
+ if (ret)
+ goto fini;
+
return 0;
+
+fini:
+ drm_fb_helper_fini(&rfbdev->helper);
+free:
+ kfree(rfbdev);
+ return ret;
}
void radeon_fbdev_fini(struct radeon_device *rdev)
@@ -386,7 +400,8 @@ void radeon_fbdev_fini(struct radeon_device *rdev)
void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state)
{
- fb_set_suspend(rdev->mode_info.rfbdev->helper.fbdev, state);
+ if (rdev->mode_info.rfbdev)
+ fb_set_suspend(rdev->mode_info.rfbdev->helper.fbdev, state);
}
int radeon_fbdev_total_size(struct radeon_device *rdev)
@@ -401,7 +416,22 @@ int radeon_fbdev_total_size(struct radeon_device *rdev)
bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
{
+ if (!rdev->mode_info.rfbdev)
+ return false;
+
if (robj == gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj))
return true;
return false;
}
+
+void radeon_fb_add_connector(struct radeon_device *rdev, struct drm_connector *connector)
+{
+ if (rdev->mode_info.rfbdev)
+ drm_fb_helper_add_one_connector(&rdev->mode_info.rfbdev->helper, connector);
+}
+
+void radeon_fb_remove_connector(struct radeon_device *rdev, struct drm_connector *connector)
+{
+ if (rdev->mode_info.rfbdev)
+ drm_fb_helper_remove_one_connector(&rdev->mode_info.rfbdev->helper, connector);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 00fc59762e0d..ccedb17580f7 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -79,10 +79,17 @@ static void radeon_hotplug_work_func(struct work_struct *work)
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
+ /* we can race here at startup, some boards seem to trigger
+ * hotplug irqs when they shouldn't. */
+ if (!rdev->mode_info.mode_config_initialized)
+ return;
+
+ mutex_lock(&mode_config->mutex);
if (mode_config->num_connector) {
list_for_each_entry(connector, &mode_config->connector_list, head)
radeon_connector_hotplug(connector);
}
+ mutex_unlock(&mode_config->mutex);
/* Just fire off a uevent and let userspace tell us what to do */
drm_helper_hpd_irq_event(dev);
}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 8ec652722e4f..46bcd5d38a1e 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -540,6 +540,9 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file
else
*value = 1;
break;
+ case RADEON_INFO_VA_UNMAP_WORKING:
+ *value = true;
+ break;
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->request);
return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 04db2fdd8692..3a395f405fff 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -921,6 +921,10 @@ bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
void radeon_fb_output_poll_changed(struct radeon_device *rdev);
void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id);
+
+void radeon_fb_add_connector(struct radeon_device *rdev, struct drm_connector *connector);
+void radeon_fb_remove_connector(struct radeon_device *rdev, struct drm_connector *connector);
+
void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id);
int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 2a7ba30165c7..9ec2ddd97edf 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -33,6 +33,7 @@
#include <linux/slab.h>
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
+#include <drm/drm_cache.h>
#include "radeon.h"
#include "radeon_trace.h"
@@ -204,7 +205,7 @@ int radeon_bo_create(struct radeon_device *rdev,
/* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
* See https://bugs.freedesktop.org/show_bug.cgi?id=84627
*/
- bo->flags &= ~RADEON_GEM_GTT_WC;
+ bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
#elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
/* Don't try to enable write-combining when it can't work, or things
* may be slow
@@ -216,7 +217,13 @@ int radeon_bo_create(struct radeon_device *rdev,
DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
"better performance thanks to write-combining\n");
- bo->flags &= ~RADEON_GEM_GTT_WC;
+ bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
+#else
+ /* For architectures that don't support WC memory,
+ * mask out the WC flag from the BO
+ */
+ if (!drm_arch_can_wc_memory())
+ bo->flags &= ~RADEON_GEM_GTT_WC;
#endif
radeon_ttm_placement_from_domain(bo, domain);
@@ -798,3 +805,22 @@ int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
ttm_bo_unreserve(&bo->tbo);
return r;
}
+
+/**
+ * radeon_bo_fence - add fence to buffer object
+ *
+ * @bo: buffer object in question
+ * @fence: fence to add
+ * @shared: true if fence should be added shared
+ *
+ */
+void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
+ bool shared)
+{
+ struct reservation_object *resv = bo->tbo.resv;
+
+ if (shared)
+ reservation_object_add_shared_fence(resv, &fence->base);
+ else
+ reservation_object_add_excl_fence(resv, &fence->base);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 1b8ec7917154..3b0b377f76cb 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -155,6 +155,8 @@ extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem);
extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
+extern void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
+ bool shared);
/*
* sub allocation
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 1d94b542cd82..ab782e3f5d95 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -939,10 +939,6 @@ force:
/* update displays */
radeon_dpm_display_configuration_changed(rdev);
- rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
- rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
- rdev->pm.dpm.single_display = single_display;
-
/* wait for the rings to drain */
for (i = 0; i < RADEON_NUM_RINGS; i++) {
struct radeon_ring *ring = &rdev->ring[i];
@@ -958,6 +954,10 @@ force:
radeon_dpm_post_set_power_state(rdev);
+ rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
+ rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
+ rdev->pm.dpm.single_display = single_display;
+
if (rdev->asic->dpm.force_performance_level) {
if (rdev->pm.dpm.thermal_active) {
enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
@@ -1192,14 +1192,6 @@ static int radeon_pm_init_old(struct radeon_device *rdev)
INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler);
if (rdev->pm.num_power_states > 1) {
- /* where's the best place to put these? */
- ret = device_create_file(rdev->dev, &dev_attr_power_profile);
- if (ret)
- DRM_ERROR("failed to create device file for power profile\n");
- ret = device_create_file(rdev->dev, &dev_attr_power_method);
- if (ret)
- DRM_ERROR("failed to create device file for power method\n");
-
if (radeon_debugfs_pm_init(rdev)) {
DRM_ERROR("Failed to register debugfs file for PM!\n");
}
@@ -1257,20 +1249,6 @@ static int radeon_pm_init_dpm(struct radeon_device *rdev)
goto dpm_failed;
rdev->pm.dpm_enabled = true;
- ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state);
- if (ret)
- DRM_ERROR("failed to create device file for dpm state\n");
- ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
- if (ret)
- DRM_ERROR("failed to create device file for dpm state\n");
- /* XXX: these are noops for dpm but are here for backwards compat */
- ret = device_create_file(rdev->dev, &dev_attr_power_profile);
- if (ret)
- DRM_ERROR("failed to create device file for power profile\n");
- ret = device_create_file(rdev->dev, &dev_attr_power_method);
- if (ret)
- DRM_ERROR("failed to create device file for power method\n");
-
if (radeon_debugfs_pm_init(rdev)) {
DRM_ERROR("Failed to register debugfs file for dpm!\n");
}
@@ -1411,9 +1389,51 @@ int radeon_pm_late_init(struct radeon_device *rdev)
int ret = 0;
if (rdev->pm.pm_method == PM_METHOD_DPM) {
- mutex_lock(&rdev->pm.mutex);
- ret = radeon_dpm_late_enable(rdev);
- mutex_unlock(&rdev->pm.mutex);
+ if (rdev->pm.dpm_enabled) {
+ if (!rdev->pm.sysfs_initialized) {
+ ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state);
+ if (ret)
+ DRM_ERROR("failed to create device file for dpm state\n");
+ ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
+ if (ret)
+ DRM_ERROR("failed to create device file for dpm state\n");
+ /* XXX: these are noops for dpm but are here for backwards compat */
+ ret = device_create_file(rdev->dev, &dev_attr_power_profile);
+ if (ret)
+ DRM_ERROR("failed to create device file for power profile\n");
+ ret = device_create_file(rdev->dev, &dev_attr_power_method);
+ if (ret)
+ DRM_ERROR("failed to create device file for power method\n");
+ if (!ret)
+ rdev->pm.sysfs_initialized = true;
+ }
+
+ mutex_lock(&rdev->pm.mutex);
+ ret = radeon_dpm_late_enable(rdev);
+ mutex_unlock(&rdev->pm.mutex);
+ if (ret) {
+ rdev->pm.dpm_enabled = false;
+ DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
+ } else {
+ /* set the dpm state for PX since there won't be
+ * a modeset to call this.
+ */
+ radeon_pm_compute_clocks(rdev);
+ }
+ }
+ } else {
+ if ((rdev->pm.num_power_states > 1) &&
+ (!rdev->pm.sysfs_initialized)) {
+ /* where's the best place to put these? */
+ ret = device_create_file(rdev->dev, &dev_attr_power_profile);
+ if (ret)
+ DRM_ERROR("failed to create device file for power profile\n");
+ ret = device_create_file(rdev->dev, &dev_attr_power_method);
+ if (ret)
+ DRM_ERROR("failed to create device file for power method\n");
+ if (!ret)
+ rdev->pm.sysfs_initialized = true;
+ }
}
return ret;
}
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
index c507896aca45..197b157b73d0 100644
--- a/drivers/gpu/drm/radeon/radeon_sa.c
+++ b/drivers/gpu/drm/radeon/radeon_sa.c
@@ -349,8 +349,13 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
/* see if we can skip over some allocations */
} while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
+ for (i = 0; i < RADEON_NUM_RINGS; ++i)
+ radeon_fence_ref(fences[i]);
+
spin_unlock(&sa_manager->wq.lock);
r = radeon_fence_wait_any(rdev, fences, false);
+ for (i = 0; i < RADEON_NUM_RINGS; ++i)
+ radeon_fence_unref(&fences[i]);
spin_lock(&sa_manager->wq.lock);
/* if we have nothing to wait for block */
if (r == -ENOENT) {
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 13f69472e716..fdea9d07cca7 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -212,6 +212,8 @@ static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
+ if (radeon_ttm_tt_has_userptr(bo->ttm))
+ return -EPERM;
return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
}
@@ -735,7 +737,7 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
0, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) {
- while (--i) {
+ while (i--) {
pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
gtt->ttm.dma_address[i] = 0;
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index 2ab80a5331a4..0974fa17deb2 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -143,7 +143,7 @@ struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
list[0].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
list[0].tv.bo = &vm->page_directory->tbo;
- list[0].tv.shared = false;
+ list[0].tv.shared = true;
list[0].tiling_flags = 0;
list[0].handle = 0;
list_add(&list[0].tv.head, head);
@@ -157,7 +157,7 @@ struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
list[idx].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
list[idx].tv.bo = &list[idx].robj->tbo;
- list[idx].tv.shared = false;
+ list[idx].tv.shared = true;
list[idx].tiling_flags = 0;
list[idx].handle = 0;
list_add(&list[idx++].tv.head, head);
@@ -387,35 +387,25 @@ static void radeon_vm_set_pages(struct radeon_device *rdev,
static int radeon_vm_clear_bo(struct radeon_device *rdev,
struct radeon_bo *bo)
{
- struct ttm_validate_buffer tv;
- struct ww_acquire_ctx ticket;
- struct list_head head;
struct radeon_ib ib;
unsigned entries;
uint64_t addr;
int r;
- memset(&tv, 0, sizeof(tv));
- tv.bo = &bo->tbo;
- tv.shared = false;
-
- INIT_LIST_HEAD(&head);
- list_add(&tv.head, &head);
-
- r = ttm_eu_reserve_buffers(&ticket, &head, true);
- if (r)
+ r = radeon_bo_reserve(bo, false);
+ if (r)
return r;
- r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
- if (r)
- goto error;
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+ if (r)
+ goto error_unreserve;
addr = radeon_bo_gpu_offset(bo);
entries = radeon_bo_size(bo) / 8;
r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, 256);
if (r)
- goto error;
+ goto error_unreserve;
ib.length_dw = 0;
@@ -425,15 +415,15 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev,
r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r)
- goto error;
+ goto error_free;
- ttm_eu_fence_buffer_objects(&ticket, &head, &ib.fence->base);
- radeon_ib_free(rdev, &ib);
+ radeon_bo_fence(bo, ib.fence, false);
- return 0;
+error_free:
+ radeon_ib_free(rdev, &ib);
-error:
- ttm_eu_backoff_reservation(&ticket, &head);
+error_unreserve:
+ radeon_bo_unreserve(bo);
return r;
}
@@ -464,14 +454,14 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
if (soffset) {
/* make sure object fit at this offset */
- eoffset = soffset + size;
+ eoffset = soffset + size - 1;
if (soffset >= eoffset) {
return -EINVAL;
}
last_pfn = eoffset / RADEON_GPU_PAGE_SIZE;
- if (last_pfn > rdev->vm_manager.max_pfn) {
- dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
+ if (last_pfn >= rdev->vm_manager.max_pfn) {
+ dev_err(rdev->dev, "va above limit (0x%08X >= 0x%08X)\n",
last_pfn, rdev->vm_manager.max_pfn);
return -EINVAL;
}
@@ -485,7 +475,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
eoffset /= RADEON_GPU_PAGE_SIZE;
if (soffset || eoffset) {
struct interval_tree_node *it;
- it = interval_tree_iter_first(&vm->va, soffset, eoffset - 1);
+ it = interval_tree_iter_first(&vm->va, soffset, eoffset);
if (it && it != &bo_va->it) {
struct radeon_bo_va *tmp;
tmp = container_of(it, struct radeon_bo_va, it);
@@ -513,6 +503,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
tmp->addr = bo_va->addr;
tmp->bo = radeon_bo_ref(bo_va->bo);
list_add(&tmp->vm_status, &vm->freed);
+
+ bo_va->addr = 0;
}
interval_tree_remove(&bo_va->it, &vm->va);
@@ -522,7 +514,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
if (soffset || eoffset) {
bo_va->it.start = soffset;
- bo_va->it.last = eoffset - 1;
+ bo_va->it.last = eoffset;
interval_tree_insert(&bo_va->it, &vm->va);
}
@@ -710,6 +702,7 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
radeon_ib_free(rdev, &ib);
return r;
}
+ radeon_bo_fence(pd, ib.fence, false);
radeon_fence_unref(&vm->fence);
vm->fence = radeon_fence_ref(ib.fence);
radeon_fence_unref(&vm->last_flush);
@@ -868,6 +861,31 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
}
/**
+ * radeon_vm_fence_pts - fence page tables after an update
+ *
+ * @vm: requested vm
+ * @start: start of GPU address range
+ * @end: end of GPU address range
+ * @fence: fence to use
+ *
+ * Fence the page tables in the range @start - @end (cayman+).
+ *
+ * Global and local mutex must be locked!
+ */
+static void radeon_vm_fence_pts(struct radeon_vm *vm,
+ uint64_t start, uint64_t end,
+ struct radeon_fence *fence)
+{
+ unsigned i;
+
+ start >>= radeon_vm_block_size;
+ end = (end - 1) >> radeon_vm_block_size;
+
+ for (i = start; i <= end; ++i)
+ radeon_bo_fence(vm->page_tables[i].bo, fence, false);
+}
+
+/**
* radeon_vm_bo_update - map a bo into the vm page table
*
* @rdev: radeon_device pointer
@@ -979,6 +997,7 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
radeon_ib_free(rdev, &ib);
return r;
}
+ radeon_vm_fence_pts(vm, bo_va->it.start, bo_va->it.last + 1, ib.fence);
radeon_fence_unref(&vm->fence);
vm->fence = radeon_fence_ref(ib.fence);
radeon_ib_free(rdev, &ib);
@@ -1058,7 +1077,8 @@ void radeon_vm_bo_rmv(struct radeon_device *rdev,
list_del(&bo_va->bo_list);
mutex_lock(&vm->mutex);
- interval_tree_remove(&bo_va->it, &vm->va);
+ if (bo_va->it.start || bo_va->it.last)
+ interval_tree_remove(&bo_va->it, &vm->va);
list_del(&bo_va->vm_status);
if (bo_va->addr) {
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 534edaa77940..c3530caa7ddd 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2922,6 +2922,12 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
/* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
{ PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
{ PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
+ { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0x2015, 0, 120000 },
+ { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
+ { PCI_VENDOR_ID_ATI, 0x6811, 0x1762, 0x2015, 0, 120000 },
+ { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
+ { PCI_VENDOR_ID_ATI, 0x6811, 0x148c, 0x2015, 0, 120000 },
+ { PCI_VENDOR_ID_ATI, 0x6810, 0x1682, 0x9275, 0, 120000 },
{ 0, 0, 0, 0 },
};
@@ -2951,6 +2957,10 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
}
++p;
}
+ /* limit mclk on all R7 370 parts for stability */
+ if (rdev->pdev->device == 0x6811 &&
+ rdev->pdev->revision == 0x81)
+ max_mclk = 120000;
if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
ni_dpm_vblank_too_short(rdev))
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index d395b0bef73b..7370202c2022 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1000,9 +1000,9 @@ out_unlock:
return ret;
}
-static bool ttm_bo_mem_compat(struct ttm_placement *placement,
- struct ttm_mem_reg *mem,
- uint32_t *new_flags)
+bool ttm_bo_mem_compat(struct ttm_placement *placement,
+ struct ttm_mem_reg *mem,
+ uint32_t *new_flags)
{
int i;
@@ -1034,6 +1034,7 @@ static bool ttm_bo_mem_compat(struct ttm_placement *placement,
return false;
}
+EXPORT_SYMBOL(ttm_bo_mem_compat);
int ttm_bo_validate(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 8cbcb4589bd3..cd8d183dcfe5 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -546,7 +546,7 @@ static int udlfb_create(struct drm_fb_helper *helper,
return ret;
out_gfree:
- drm_gem_object_unreference(&ufbdev->ufb.obj->base);
+ drm_gem_object_unreference_unlocked(&ufbdev->ufb.obj->base);
out:
return ret;
}
@@ -589,19 +589,27 @@ int udl_fbdev_init(struct drm_device *dev)
ret = drm_fb_helper_init(dev, &ufbdev->helper,
1, 1);
- if (ret) {
- kfree(ufbdev);
- return ret;
-
- }
+ if (ret)
+ goto free;
- drm_fb_helper_single_add_all_connectors(&ufbdev->helper);
+ ret = drm_fb_helper_single_add_all_connectors(&ufbdev->helper);
+ if (ret)
+ goto fini;
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(dev);
- drm_fb_helper_initial_config(&ufbdev->helper, bpp_sel);
+ ret = drm_fb_helper_initial_config(&ufbdev->helper, bpp_sel);
+ if (ret)
+ goto fini;
+
return 0;
+
+fini:
+ drm_fb_helper_fini(&ufbdev->helper);
+free:
+ kfree(ufbdev);
+ return ret;
}
void udl_fbdev_cleanup(struct drm_device *dev)
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 8044f5fb7c49..6de963b70eee 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -51,7 +51,7 @@ udl_gem_create(struct drm_file *file,
return ret;
}
- drm_gem_object_unreference(&obj->base);
+ drm_gem_object_unreference_unlocked(&obj->base);
*handle_p = handle;
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 0426b5bed8fc..59e2ae85eed9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -25,6 +25,7 @@
*
**************************************************************************/
#include <linux/module.h>
+#include <linux/console.h>
#include <drm/drmP.h>
#include "vmwgfx_drv.h"
@@ -1447,6 +1448,12 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
static int __init vmwgfx_init(void)
{
int ret;
+
+#ifdef CONFIG_VGA_CONSOLE
+ if (vgacon_text_force())
+ return -EINVAL;
+#endif
+
ret = drm_pci_init(&driver, &vmw_pci_driver);
if (ret)
DRM_ERROR("Failed initializing DRM.\n");
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 50b52802f470..8ad66bbd4f28 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -2489,7 +2489,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes, true);
if (unlikely(ret != 0))
- goto out_err;
+ goto out_err_nores;
ret = vmw_validate_buffers(dev_priv, sw_context);
if (unlikely(ret != 0))
@@ -2533,6 +2533,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
vmw_resource_relocations_free(&sw_context->res_relocations);
vmw_fifo_commit(dev_priv, command_size);
+ mutex_unlock(&dev_priv->binding_mutex);
vmw_query_bo_switch_commit(dev_priv, sw_context);
ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
@@ -2548,7 +2549,6 @@ int vmw_execbuf_process(struct drm_file *file_priv,
DRM_ERROR("Fence submission error. Syncing.\n");
vmw_resource_list_unreserve(&sw_context->resource_list, false);
- mutex_unlock(&dev_priv->binding_mutex);
ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
(void *) fence);