diff options
Diffstat (limited to 'drivers/gpu')
128 files changed, 2145 insertions, 1073 deletions
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c index 5c60ae524c45..ff68eefae273 100644 --- a/drivers/gpu/drm/ast/ast_fb.c +++ b/drivers/gpu/drm/ast/ast_fb.c @@ -335,18 +335,27 @@ int ast_fbdev_init(struct drm_device *dev) ret = drm_fb_helper_init(dev, &afbdev->helper, 1, 1); - if (ret) { - kfree(afbdev); - return ret; - } + if (ret) + goto free; - drm_fb_helper_single_add_all_connectors(&afbdev->helper); + ret = drm_fb_helper_single_add_all_connectors(&afbdev->helper); + if (ret) + goto fini; /* disable all the possible outputs/crtcs before entering KMS mode */ drm_helper_disable_unused_functions(dev); - drm_fb_helper_initial_config(&afbdev->helper, 32); + ret = drm_fb_helper_initial_config(&afbdev->helper, 32); + if (ret) + goto fini; + return 0; + +fini: + drm_fb_helper_fini(&afbdev->helper); +free: + kfree(afbdev); + return ret; } void ast_fbdev_fini(struct drm_device *dev) diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index 035dacc93382..fd5c5f3370f6 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -227,7 +227,7 @@ static int ast_get_dram_info(struct drm_device *dev) } while (ast_read32(ast, 0x10000) != 0x01); data = ast_read32(ast, 0x10004); - if (data & 0x400) + if (data & 0x40) ast->dram_bus_width = 16; else ast->dram_bus_width = 32; diff --git a/drivers/gpu/drm/bochs/bochs_fbdev.c b/drivers/gpu/drm/bochs/bochs_fbdev.c index fe95d31cd110..9cdab0f2575c 100644 --- a/drivers/gpu/drm/bochs/bochs_fbdev.c +++ b/drivers/gpu/drm/bochs/bochs_fbdev.c @@ -197,12 +197,22 @@ int bochs_fbdev_init(struct bochs_device *bochs) if (ret) return ret; - drm_fb_helper_single_add_all_connectors(&bochs->fb.helper); + ret = drm_fb_helper_single_add_all_connectors(&bochs->fb.helper); + if (ret) + goto fini; + drm_helper_disable_unused_functions(bochs->dev); - drm_fb_helper_initial_config(&bochs->fb.helper, 32); + + ret = drm_fb_helper_initial_config(&bochs->fb.helper, 32); + if (ret) + goto fini; bochs->fb.initialized = true; return 0; + +fini: + drm_fb_helper_fini(&bochs->fb.helper); + return ret; } void bochs_fbdev_fini(struct bochs_device *bochs) diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c index d231b1c317af..b487c4e40d83 100644 --- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c +++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c @@ -314,17 +314,17 @@ int cirrus_fbdev_init(struct cirrus_device *cdev) ret = drm_fb_helper_init(cdev->dev, &gfbdev->helper, cdev->num_crtc, CIRRUSFB_CONN_LIMIT); - if (ret) { - kfree(gfbdev); + if (ret) + return ret; + + ret = drm_fb_helper_single_add_all_connectors(&gfbdev->helper); + if (ret) return ret; - } - drm_fb_helper_single_add_all_connectors(&gfbdev->helper); /* disable all the possible outputs/crtcs before entering KMS mode */ drm_helper_disable_unused_functions(cdev->dev); - drm_fb_helper_initial_config(&gfbdev->helper, bpp_sel); - return 0; + return drm_fb_helper_initial_config(&gfbdev->helper, bpp_sel); } void cirrus_fbdev_fini(struct cirrus_device *cdev) diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index e79c8d3700d8..18dd03bb7af0 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -42,9 +42,10 @@ #include "drm_crtc_internal.h" #include "drm_internal.h" -static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, - struct drm_mode_fb_cmd2 *r, - struct drm_file *file_priv); +static struct drm_framebuffer * +internal_framebuffer_create(struct drm_device *dev, + struct drm_mode_fb_cmd2 *r, + struct drm_file *file_priv); /* Avoid boilerplate. I'm tired of typing. */ #define DRM_ENUM_NAME_FN(fnname, list) \ @@ -530,17 +531,6 @@ void drm_framebuffer_reference(struct drm_framebuffer *fb) } EXPORT_SYMBOL(drm_framebuffer_reference); -static void drm_framebuffer_free_bug(struct kref *kref) -{ - BUG(); -} - -static void __drm_framebuffer_unreference(struct drm_framebuffer *fb) -{ - DRM_DEBUG("%p: FB ID: %d (%d)\n", fb, fb->base.id, atomic_read(&fb->refcount.refcount)); - kref_put(&fb->refcount, drm_framebuffer_free_bug); -} - /** * drm_framebuffer_unregister_private - unregister a private fb from the lookup idr * @fb: fb to unregister @@ -1296,7 +1286,7 @@ void drm_plane_force_disable(struct drm_plane *plane) return; } /* disconnect the plane from the fb and crtc: */ - __drm_framebuffer_unreference(plane->old_fb); + drm_framebuffer_unreference(plane->old_fb); plane->old_fb = NULL; plane->fb = NULL; plane->crtc = NULL; @@ -2565,8 +2555,11 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; - /* For some reason crtc x/y offsets are signed internally. */ - if (crtc_req->x > INT_MAX || crtc_req->y > INT_MAX) + /* + * Universal plane src offsets are only 16.16, prevent havoc for + * drivers using universal plane code internally. + */ + if (crtc_req->x & 0xffff0000 || crtc_req->y & 0xffff0000) return -ERANGE; drm_modeset_lock_all(dev); @@ -2739,13 +2732,11 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc, */ if (req->flags & DRM_MODE_CURSOR_BO) { if (req->handle) { - fb = add_framebuffer_internal(dev, &fbreq, file_priv); + fb = internal_framebuffer_create(dev, &fbreq, file_priv); if (IS_ERR(fb)) { DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n"); return PTR_ERR(fb); } - - drm_framebuffer_reference(fb); } else { fb = NULL; } @@ -3114,9 +3105,10 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r) return 0; } -static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, - struct drm_mode_fb_cmd2 *r, - struct drm_file *file_priv) +static struct drm_framebuffer * +internal_framebuffer_create(struct drm_device *dev, + struct drm_mode_fb_cmd2 *r, + struct drm_file *file_priv) { struct drm_mode_config *config = &dev->mode_config; struct drm_framebuffer *fb; @@ -3148,12 +3140,6 @@ static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, return fb; } - mutex_lock(&file_priv->fbs_lock); - r->fb_id = fb->base.id; - list_add(&fb->filp_head, &file_priv->fbs); - DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id); - mutex_unlock(&file_priv->fbs_lock); - return fb; } @@ -3175,15 +3161,24 @@ static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, int drm_mode_addfb2(struct drm_device *dev, void *data, struct drm_file *file_priv) { + struct drm_mode_fb_cmd2 *r = data; struct drm_framebuffer *fb; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; - fb = add_framebuffer_internal(dev, data, file_priv); + fb = internal_framebuffer_create(dev, r, file_priv); if (IS_ERR(fb)) return PTR_ERR(fb); + /* Transfer ownership to the filp for reaping on close */ + + DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id); + mutex_lock(&file_priv->fbs_lock); + r->fb_id = fb->base.id; + list_add(&fb->filp_head, &file_priv->fbs); + mutex_unlock(&file_priv->fbs_lock); + return 0; } @@ -4588,6 +4583,9 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev, unsigned long flags; int ret = -EINVAL; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS || page_flip->reserved != 0) return -EINVAL; diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index 08e33b8b13a4..9f59c9027ebe 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c @@ -378,10 +378,11 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request, /* * The specification doesn't give any recommendation on how often to - * retry native transactions, so retry 7 times like for I2C-over-AUX - * transactions. + * retry native transactions. We used to retry 7 times like for + * aux i2c transactions but real world devices this wasn't + * sufficient, bump to 32 which makes Dell 4k monitors happier. */ - for (retry = 0; retry < 7; retry++) { + for (retry = 0; retry < 32; retry++) { mutex_lock(&aux->hw_mutex); err = aux->transfer(aux, &msg); diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 070f913d2dba..c4f8e8f172cd 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -839,6 +839,8 @@ static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb) static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt) { + struct drm_dp_mst_branch *mstb; + switch (old_pdt) { case DP_PEER_DEVICE_DP_LEGACY_CONV: case DP_PEER_DEVICE_SST_SINK: @@ -846,8 +848,9 @@ static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt) drm_dp_mst_unregister_i2c_bus(&port->aux); break; case DP_PEER_DEVICE_MST_BRANCHING: - drm_dp_put_mst_branch_device(port->mstb); + mstb = port->mstb; port->mstb = NULL; + drm_dp_put_mst_branch_device(mstb); break; } } @@ -953,17 +956,17 @@ static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port, u8 *rad) { - int lct = port->parent->lct; + int parent_lct = port->parent->lct; int shift = 4; - int idx = lct / 2; - if (lct > 1) { - memcpy(rad, port->parent->rad, idx); - shift = (lct % 2) ? 4 : 0; + int idx = (parent_lct - 1) / 2; + if (parent_lct > 1) { + memcpy(rad, port->parent->rad, idx + 1); + shift = (parent_lct % 2) ? 4 : 0; } else rad[0] = 0; rad[idx] |= port->port_num << shift; - return lct + 1; + return parent_lct + 1; } /* @@ -993,37 +996,47 @@ static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port) return send_link; } -static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb, - struct drm_dp_mst_port *port) +static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid) { int ret; - if (port->dpcd_rev >= 0x12) { - port->guid_valid = drm_dp_validate_guid(mstb->mgr, port->guid); - if (!port->guid_valid) { - ret = drm_dp_send_dpcd_write(mstb->mgr, - port, - DP_GUID, - 16, port->guid); - port->guid_valid = true; + + memcpy(mstb->guid, guid, 16); + + if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) { + if (mstb->port_parent) { + ret = drm_dp_send_dpcd_write( + mstb->mgr, + mstb->port_parent, + DP_GUID, + 16, + mstb->guid); + } else { + + ret = drm_dp_dpcd_write( + mstb->mgr->aux, + DP_GUID, + mstb->guid, + 16); } } } static void build_mst_prop_path(struct drm_dp_mst_port *port, struct drm_dp_mst_branch *mstb, - char *proppath) + char *proppath, + size_t proppath_size) { int i; char temp[8]; - snprintf(proppath, 255, "mst:%d", mstb->mgr->conn_base_id); + snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id); for (i = 0; i < (mstb->lct - 1); i++) { int shift = (i % 2) ? 0 : 4; - int port_num = mstb->rad[i / 2] >> shift; - snprintf(temp, 8, "-%d", port_num); - strncat(proppath, temp, 255); + int port_num = (mstb->rad[i / 2] >> shift) & 0xf; + snprintf(temp, sizeof(temp), "-%d", port_num); + strlcat(proppath, temp, proppath_size); } - snprintf(temp, 8, "-%d", port->port_num); - strncat(proppath, temp, 255); + snprintf(temp, sizeof(temp), "-%d", port->port_num); + strlcat(proppath, temp, proppath_size); } static void drm_dp_add_port(struct drm_dp_mst_branch *mstb, @@ -1060,7 +1073,6 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb, port->dpcd_rev = port_msg->dpcd_revision; port->num_sdp_streams = port_msg->num_sdp_streams; port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks; - memcpy(port->guid, port_msg->peer_guid, 16); /* manage mstb port lists with mgr lock - take a reference for this list */ @@ -1073,11 +1085,9 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb, if (old_ddps != port->ddps) { if (port->ddps) { - drm_dp_check_port_guid(mstb, port); if (!port->input) drm_dp_send_enum_path_resources(mstb->mgr, mstb, port); } else { - port->guid_valid = false; port->available_pbn = 0; } } @@ -1094,7 +1104,7 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb, if (created && !port->input) { char proppath[255]; - build_mst_prop_path(port, mstb, proppath); + build_mst_prop_path(port, mstb, proppath, sizeof(proppath)); port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath); } @@ -1122,10 +1132,8 @@ static void drm_dp_update_port(struct drm_dp_mst_branch *mstb, if (old_ddps != port->ddps) { if (port->ddps) { - drm_dp_check_port_guid(mstb, port); dowork = true; } else { - port->guid_valid = false; port->available_pbn = 0; } } @@ -1149,11 +1157,13 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_ struct drm_dp_mst_port *port; int i; /* find the port by iterating down */ + + mutex_lock(&mgr->lock); mstb = mgr->mst_primary; for (i = 0; i < lct - 1; i++) { int shift = (i % 2) ? 0 : 4; - int port_num = rad[i / 2] >> shift; + int port_num = (rad[i / 2] >> shift) & 0xf; list_for_each_entry(port, &mstb->ports, next) { if (port->port_num == port_num) { @@ -1168,6 +1178,49 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_ } } kref_get(&mstb->kref); + mutex_unlock(&mgr->lock); + return mstb; +} + +static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper( + struct drm_dp_mst_branch *mstb, + uint8_t *guid) +{ + struct drm_dp_mst_branch *found_mstb; + struct drm_dp_mst_port *port; + + if (memcmp(mstb->guid, guid, 16) == 0) + return mstb; + + + list_for_each_entry(port, &mstb->ports, next) { + if (!port->mstb) + continue; + + found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid); + + if (found_mstb) + return found_mstb; + } + + return NULL; +} + +static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device_by_guid( + struct drm_dp_mst_topology_mgr *mgr, + uint8_t *guid) +{ + struct drm_dp_mst_branch *mstb; + + /* find the port by iterating down */ + mutex_lock(&mgr->lock); + + mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid); + + if (mstb) + kref_get(&mstb->kref); + + mutex_unlock(&mgr->lock); return mstb; } @@ -1175,7 +1228,7 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m struct drm_dp_mst_branch *mstb) { struct drm_dp_mst_port *port; - + struct drm_dp_mst_branch *mstb_child; if (!mstb->link_address_sent) { drm_dp_send_link_address(mgr, mstb); mstb->link_address_sent = true; @@ -1190,17 +1243,31 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m if (!port->available_pbn) drm_dp_send_enum_path_resources(mgr, mstb, port); - if (port->mstb) - drm_dp_check_and_send_link_address(mgr, port->mstb); + if (port->mstb) { + mstb_child = drm_dp_get_validated_mstb_ref(mgr, port->mstb); + if (mstb_child) { + drm_dp_check_and_send_link_address(mgr, mstb_child); + drm_dp_put_mst_branch_device(mstb_child); + } + } } } static void drm_dp_mst_link_probe_work(struct work_struct *work) { struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work); + struct drm_dp_mst_branch *mstb; - drm_dp_check_and_send_link_address(mgr, mgr->mst_primary); - + mutex_lock(&mgr->lock); + mstb = mgr->mst_primary; + if (mstb) { + kref_get(&mstb->kref); + } + mutex_unlock(&mgr->lock); + if (mstb) { + drm_dp_check_and_send_link_address(mgr, mstb); + drm_dp_put_mst_branch_device(mstb); + } } static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr, @@ -1255,7 +1322,6 @@ retry: goto retry; } DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret); - WARN(1, "fail\n"); return -EIO; } @@ -1269,6 +1335,7 @@ static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr, struct drm_dp_sideband_msg_tx *txmsg) { struct drm_dp_mst_branch *mstb = txmsg->dst; + u8 req_type; /* both msg slots are full */ if (txmsg->seqno == -1) { @@ -1285,7 +1352,13 @@ static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr, txmsg->seqno = 1; mstb->tx_slots[txmsg->seqno] = txmsg; } - hdr->broadcast = 0; + + req_type = txmsg->msg[0] & 0x7f; + if (req_type == DP_CONNECTION_STATUS_NOTIFY || + req_type == DP_RESOURCE_STATUS_NOTIFY) + hdr->broadcast = 1; + else + hdr->broadcast = 0; hdr->path_msg = txmsg->path_msg; hdr->lct = mstb->lct; hdr->lcr = mstb->lct - 1; @@ -1454,6 +1527,9 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, txmsg->reply.u.link_addr.ports[i].num_sdp_streams, txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks); } + + drm_dp_check_mstb_guid(mstb, txmsg->reply.u.link_addr.guid); + for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) { drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]); } @@ -1618,6 +1694,11 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr) req_payload.start_slot = cur_slots; if (mgr->proposed_vcpis[i]) { port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi); + port = drm_dp_get_validated_port_ref(mgr, port); + if (!port) { + mutex_unlock(&mgr->payload_lock); + return -EINVAL; + } req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots; } else { port = NULL; @@ -1643,6 +1724,9 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr) mgr->payloads[i].payload_state = req_payload.payload_state; } cur_slots += req_payload.num_slots; + + if (port) + drm_dp_put_port(port); } for (i = 0; i < mgr->max_payloads; i++) { @@ -1857,31 +1941,17 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms mgr->mst_primary = mstb; kref_get(&mgr->mst_primary->kref); - { - struct drm_dp_payload reset_pay; - reset_pay.start_slot = 0; - reset_pay.num_slots = 0x3f; - drm_dp_dpcd_write_payload(mgr, 0, &reset_pay); - } - ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, - DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC); + DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC); if (ret < 0) { goto out_unlock; } - - /* sort out guid */ - ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, mgr->guid, 16); - if (ret != 16) { - DRM_DEBUG_KMS("failed to read DP GUID %d\n", ret); - goto out_unlock; - } - - mgr->guid_valid = drm_dp_validate_guid(mgr, mgr->guid); - if (!mgr->guid_valid) { - ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, mgr->guid, 16); - mgr->guid_valid = true; + { + struct drm_dp_payload reset_pay; + reset_pay.start_slot = 0; + reset_pay.num_slots = 0x3f; + drm_dp_dpcd_write_payload(mgr, 0, &reset_pay); } queue_work(system_long_wq, &mgr->work); @@ -1943,6 +2013,8 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr) if (mgr->mst_primary) { int sret; + u8 guid[16]; + sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE); if (sret != DP_RECEIVER_CAP_SIZE) { DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n"); @@ -1957,6 +2029,16 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr) ret = -1; goto out_unlock; } + + /* Some hubs forget their guids after they resume */ + sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16); + if (sret != 16) { + DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n"); + ret = -1; + goto out_unlock; + } + drm_dp_check_mstb_guid(mgr->mst_primary, guid); + ret = 0; } else ret = -1; @@ -2073,28 +2155,51 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) if (mgr->up_req_recv.have_eomt) { struct drm_dp_sideband_msg_req_body msg; - struct drm_dp_mst_branch *mstb; + struct drm_dp_mst_branch *mstb = NULL; bool seqno; - mstb = drm_dp_get_mst_branch_device(mgr, - mgr->up_req_recv.initial_hdr.lct, - mgr->up_req_recv.initial_hdr.rad); - if (!mstb) { - DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct); - memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); - return 0; + + if (!mgr->up_req_recv.initial_hdr.broadcast) { + mstb = drm_dp_get_mst_branch_device(mgr, + mgr->up_req_recv.initial_hdr.lct, + mgr->up_req_recv.initial_hdr.rad); + if (!mstb) { + DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct); + memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); + return 0; + } } seqno = mgr->up_req_recv.initial_hdr.seqno; drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg); if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) { - drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false); + drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false); + + if (!mstb) + mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.conn_stat.guid); + + if (!mstb) { + DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct); + memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); + return 0; + } + drm_dp_update_port(mstb, &msg.u.conn_stat); + DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type); (*mgr->cbs->hotplug)(mgr); } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) { - drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false); + drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false); + if (!mstb) + mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.resource_stat.guid); + + if (!mstb) { + DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct); + memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); + return 0; + } + DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn); } @@ -2264,6 +2369,7 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn); if (pbn == port->vcpi.pbn) { *slots = port->vcpi.num_slots; + drm_dp_put_port(port); return true; } } @@ -2410,32 +2516,31 @@ EXPORT_SYMBOL(drm_dp_check_act_status); */ int drm_dp_calc_pbn_mode(int clock, int bpp) { - fixed20_12 pix_bw; - fixed20_12 fbpp; - fixed20_12 result; - fixed20_12 margin, tmp; - u32 res; - - pix_bw.full = dfixed_const(clock); - fbpp.full = dfixed_const(bpp); - tmp.full = dfixed_const(8); - fbpp.full = dfixed_div(fbpp, tmp); - - result.full = dfixed_mul(pix_bw, fbpp); - margin.full = dfixed_const(54); - tmp.full = dfixed_const(64); - margin.full = dfixed_div(margin, tmp); - result.full = dfixed_div(result, margin); - - margin.full = dfixed_const(1006); - tmp.full = dfixed_const(1000); - margin.full = dfixed_div(margin, tmp); - result.full = dfixed_mul(result, margin); - - result.full = dfixed_div(result, tmp); - result.full = dfixed_ceil(result); - res = dfixed_trunc(result); - return res; + u64 kbps; + s64 peak_kbps; + u32 numerator; + u32 denominator; + + kbps = clock * bpp; + + /* + * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006 + * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on + * common multiplier to render an integer PBN for all link rate/lane + * counts combinations + * calculate + * peak_kbps *= (1006/1000) + * peak_kbps *= (64/54) + * peak_kbps *= 8 convert to bytes + */ + + numerator = 64 * 1006; + denominator = 54 * 8 * 1000 * 1000; + + kbps *= numerator; + peak_kbps = drm_fixp_from_fraction(kbps, denominator); + + return drm_fixp2int_ceil(peak_kbps); } EXPORT_SYMBOL(drm_dp_calc_pbn_mode); @@ -2443,11 +2548,23 @@ static int test_calc_pbn_mode(void) { int ret; ret = drm_dp_calc_pbn_mode(154000, 30); - if (ret != 689) + if (ret != 689) { + DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n", + 154000, 30, 689, ret); return -EINVAL; + } ret = drm_dp_calc_pbn_mode(234000, 30); - if (ret != 1047) + if (ret != 1047) { + DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n", + 234000, 30, 1047, ret); return -EINVAL; + } + ret = drm_dp_calc_pbn_mode(297000, 24); + if (ret != 1063) { + DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n", + 297000, 24, 1063, ret); + return -EINVAL; + } return 0; } diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c index 0a235fe61c9b..144a0368ccd0 100644 --- a/drivers/gpu/drm/drm_edid_load.c +++ b/drivers/gpu/drm/drm_edid_load.c @@ -288,6 +288,7 @@ int drm_load_edid_firmware(struct drm_connector *connector) drm_mode_connector_update_edid_property(connector, edid); ret = drm_add_edid_modes(connector, edid); + drm_edid_to_eld(connector, edid); kfree(edid); return ret; diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 0c0c39bac23d..1e41f466055c 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -145,6 +145,31 @@ int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_ } EXPORT_SYMBOL(drm_fb_helper_add_one_connector); +static void remove_from_modeset(struct drm_mode_set *set, + struct drm_connector *connector) +{ + int i, j; + + for (i = 0; i < set->num_connectors; i++) { + if (set->connectors[i] == connector) + break; + } + + if (i == set->num_connectors) + return; + + for (j = i + 1; j < set->num_connectors; j++) { + set->connectors[j - 1] = set->connectors[j]; + } + set->num_connectors--; + + /* because i915 is pissy about this.. + * TODO maybe need to makes sure we set it back to !=NULL somewhere? + */ + if (set->num_connectors == 0) + set->fb = NULL; +} + int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, struct drm_connector *connector) { @@ -167,6 +192,11 @@ int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, } fb_helper->connector_count--; kfree(fb_helper_connector); + + /* also cleanup dangling references to the connector: */ + for (i = 0; i < fb_helper->crtc_count; i++) + remove_from_modeset(&fb_helper->crtc_info[i].mode_set, connector); + return 0; } EXPORT_SYMBOL(drm_fb_helper_remove_one_connector); @@ -347,9 +377,18 @@ bool drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper) { struct drm_device *dev = fb_helper->dev; bool ret; + bool do_delayed = false; + drm_modeset_lock_all(dev); ret = restore_fbdev_mode(fb_helper); + + do_delayed = fb_helper->delayed_hotplug; + if (do_delayed) + fb_helper->delayed_hotplug = false; drm_modeset_unlock_all(dev); + + if (do_delayed) + drm_fb_helper_hotplug_event(fb_helper); return ret; } EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked); @@ -888,10 +927,6 @@ int drm_fb_helper_set_par(struct fb_info *info) drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper); - if (fb_helper->delayed_hotplug) { - fb_helper->delayed_hotplug = false; - drm_fb_helper_hotplug_event(fb_helper); - } return 0; } EXPORT_SYMBOL(drm_fb_helper_set_par); @@ -1418,7 +1453,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper, int n, int width, int height) { int c, o; - struct drm_device *dev = fb_helper->dev; struct drm_connector *connector; struct drm_connector_helper_funcs *connector_funcs; struct drm_encoder *encoder; @@ -1437,7 +1471,7 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper, if (modes[n] == NULL) return best_score; - crtcs = kzalloc(dev->mode_config.num_connector * + crtcs = kzalloc(fb_helper->connector_count * sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL); if (!crtcs) return best_score; @@ -1483,7 +1517,7 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper, if (score > best_score) { best_score = score; memcpy(best_crtcs, crtcs, - dev->mode_config.num_connector * + fb_helper->connector_count * sizeof(struct drm_fb_helper_crtc *)); } } @@ -1603,7 +1637,7 @@ out: * RETURNS: * Zero if everything went ok, nonzero otherwise. */ -bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel) +int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel) { struct drm_device *dev = fb_helper->dev; int count = 0; diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 5ef03c216a27..7d64d22486e9 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -131,12 +131,11 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc) /* Reinitialize corresponding vblank timestamp if high-precision query * available. Skip this step if query unsupported or failed. Will - * reinitialize delayed at next vblank interrupt in that case. + * reinitialize delayed at next vblank interrupt in that case and + * assign 0 for now, to mark the vblanktimestamp as invalid. */ - if (rc) { - tslot = atomic_read(&vblank->count) + diff; - vblanktimestamp(dev, crtc, tslot) = t_vblank; - } + tslot = atomic_read(&vblank->count) + diff; + vblanktimestamp(dev, crtc, tslot) = rc ? t_vblank : (struct timeval) {0, 0}; smp_mb__before_atomic(); atomic_add(diff, &vblank->count); @@ -1029,7 +1028,8 @@ void drm_vblank_put(struct drm_device *dev, int crtc) { struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; - BUG_ON(atomic_read(&vblank->refcount) == 0); + if (WARN_ON(atomic_read(&vblank->refcount) == 0)) + return; if (WARN_ON(crtc >= dev->num_crtcs)) return; diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c index f861361a635e..4924d381b664 100644 --- a/drivers/gpu/drm/drm_lock.c +++ b/drivers/gpu/drm/drm_lock.c @@ -61,6 +61,9 @@ int drm_legacy_lock(struct drm_device *dev, void *data, struct drm_master *master = file_priv->master; int ret = 0; + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + ++file_priv->lock_count; if (lock->context == DRM_KERNEL_CONTEXT) { @@ -153,6 +156,9 @@ int drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file_ struct drm_lock *lock = data; struct drm_master *master = file_priv->master; + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + if (lock->context == DRM_KERNEL_CONTEXT) { DRM_ERROR("Process %d using kernel context %d\n", task_pid_nr(current), lock->context); diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index 6857e9ad6339..5edc61f2f212 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -151,6 +151,7 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect struct edid *edid = (struct edid *) connector->edid_blob_ptr->data; count = drm_add_edid_modes(connector, edid); + drm_edid_to_eld(connector, edid); } else count = (*connector_funcs->get_modes)(connector); } diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c index ddd90ddbc200..2d42ce6d3757 100644 --- a/drivers/gpu/drm/gma500/framebuffer.c +++ b/drivers/gpu/drm/gma500/framebuffer.c @@ -593,6 +593,7 @@ int psb_fbdev_init(struct drm_device *dev) { struct psb_fbdev *fbdev; struct drm_psb_private *dev_priv = dev->dev_private; + int ret; fbdev = kzalloc(sizeof(struct psb_fbdev), GFP_KERNEL); if (!fbdev) { @@ -604,16 +605,29 @@ int psb_fbdev_init(struct drm_device *dev) drm_fb_helper_prepare(dev, &fbdev->psb_fb_helper, &psb_fb_helper_funcs); - drm_fb_helper_init(dev, &fbdev->psb_fb_helper, dev_priv->ops->crtcs, - INTELFB_CONN_LIMIT); + ret = drm_fb_helper_init(dev, &fbdev->psb_fb_helper, + dev_priv->ops->crtcs, INTELFB_CONN_LIMIT); + if (ret) + goto free; - drm_fb_helper_single_add_all_connectors(&fbdev->psb_fb_helper); + ret = drm_fb_helper_single_add_all_connectors(&fbdev->psb_fb_helper); + if (ret) + goto fini; /* disable all the possible outputs/crtcs before entering KMS mode */ drm_helper_disable_unused_functions(dev); - drm_fb_helper_initial_config(&fbdev->psb_fb_helper, 32); + ret = drm_fb_helper_initial_config(&fbdev->psb_fb_helper, 32); + if (ret) + goto fini; + return 0; + +fini: + drm_fb_helper_fini(&fbdev->psb_fb_helper); +free: + kfree(fbdev); + return ret; } static void psb_fbdev_fini(struct drm_device *dev) diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c index c707fa6fca85..e3bdc8b1c32c 100644 --- a/drivers/gpu/drm/gma500/gem.c +++ b/drivers/gpu/drm/gma500/gem.c @@ -130,7 +130,7 @@ int psb_gem_create(struct drm_file *file, struct drm_device *dev, u64 size, return ret; } /* We have the initial and handle reference but need only one now */ - drm_gem_object_unreference(&r->gem); + drm_gem_object_unreference_unlocked(&r->gem); *handlep = handle; return 0; } diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c index 87885d8c06e8..4869117b69eb 100644 --- a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c +++ b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c @@ -85,7 +85,7 @@ static const char *const dsi_errors[] = { "RX Prot Violation", "HS Generic Write FIFO Full", "LP Generic Write FIFO Full", - "Generic Read Data Avail" + "Generic Read Data Avail", "Special Packet Sent", "Tearing Effect", }; diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 2318b4c7a8f8..b51f02758836 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -692,11 +692,12 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings) spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } - intel_dp_mst_resume(dev); drm_modeset_lock_all(dev); intel_modeset_setup_hw_state(dev, true); drm_modeset_unlock_all(dev); + intel_dp_mst_resume(dev); + /* * ... but also need to make sure that hotplug processing * doesn't cause havoc. Like in the driver load code we don't @@ -1087,7 +1088,7 @@ static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv) s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS_BASE + i * 4); s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT); - s->gfx_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT); + s->gfx_max_req_count = I915_READ(GEN7_GFX_MAX_REQ_COUNT); s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7); s->ecochk = I915_READ(GAM_ECOCHK); @@ -1140,6 +1141,7 @@ static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv) /* Gunit-Display CZ domain, 0x182028-0x1821CF */ s->gu_ctl0 = I915_READ(VLV_GU_CTL0); s->gu_ctl1 = I915_READ(VLV_GU_CTL1); + s->pcbr = I915_READ(VLV_PCBR); s->clock_gate_dis2 = I915_READ(VLV_GUNIT_CLOCK_GATE2); /* @@ -1168,7 +1170,7 @@ static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv) I915_WRITE(GEN7_LRA_LIMITS_BASE + i * 4, s->lra_limits[i]); I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count); - I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->gfx_max_req_count); + I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count); I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp); I915_WRITE(GAM_ECOCHK, s->ecochk); @@ -1234,6 +1236,7 @@ static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv) /* Gunit-Display CZ domain, 0x182028-0x1821CF */ I915_WRITE(VLV_GU_CTL0, s->gu_ctl0); I915_WRITE(VLV_GU_CTL1, s->gu_ctl1); + I915_WRITE(VLV_PCBR, s->pcbr); I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2); } @@ -1242,19 +1245,7 @@ int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on) u32 val; int err; - val = I915_READ(VLV_GTLC_SURVIVABILITY_REG); - WARN_ON(!!(val & VLV_GFX_CLK_FORCE_ON_BIT) == force_on); - #define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT) - /* Wait for a previous force-off to settle */ - if (force_on) { - err = wait_for(!COND, 20); - if (err) { - DRM_ERROR("timeout waiting for GFX clock force-off (%08x)\n", - I915_READ(VLV_GTLC_SURVIVABILITY_REG)); - return err; - } - } val = I915_READ(VLV_GTLC_SURVIVABILITY_REG); val &= ~VLV_GFX_CLK_FORCE_ON_BIT; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 16a6f6d187a1..c68e9f2947a6 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -877,6 +877,7 @@ struct i915_suspend_saved_registers { u32 savePIPEB_LINK_N1; u32 saveMCHBAR_RENDER_STANDBY; u32 savePCH_PORT_HOTPLUG; + u16 saveGCDGMBUS; }; struct vlv_s0ix_state { @@ -937,6 +938,7 @@ struct vlv_s0ix_state { /* Display 2 CZ domain */ u32 gu_ctl0; u32 gu_ctl1; + u32 pcbr; u32 clock_gate_dis2; }; @@ -1665,8 +1667,6 @@ struct drm_i915_private { */ struct workqueue_struct *dp_wq; - uint32_t bios_vgacntr; - /* Old dri1 support infrastructure, beware the dragons ya fools entering * here! */ struct i915_dri1_state dri1; @@ -2077,8 +2077,8 @@ struct drm_i915_cmd_table { #define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ (INTEL_DEVID(dev) & 0xFF00) == 0x0C00) #define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \ - ((INTEL_DEVID(dev) & 0xf) == 0x2 || \ - (INTEL_DEVID(dev) & 0xf) == 0x6 || \ + ((INTEL_DEVID(dev) & 0xf) == 0x6 || \ + (INTEL_DEVID(dev) & 0xf) == 0xb || \ (INTEL_DEVID(dev) & 0xf) == 0xe)) #define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \ (INTEL_DEVID(dev) & 0xFF00) == 0x0A00) @@ -2899,15 +2899,14 @@ int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val); #define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true) #define I915_READ64_2x32(lower_reg, upper_reg) ({ \ - u32 upper = I915_READ(upper_reg); \ - u32 lower = I915_READ(lower_reg); \ - u32 tmp = I915_READ(upper_reg); \ - if (upper != tmp) { \ - upper = tmp; \ - lower = I915_READ(lower_reg); \ - WARN_ON(I915_READ(upper_reg) != upper); \ - } \ - (u64)upper << 32 | lower; }) + u32 upper, lower, tmp; \ + tmp = I915_READ(upper_reg); \ + do { \ + upper = tmp; \ + lower = I915_READ(lower_reg); \ + tmp = I915_READ(upper_reg); \ + } while (upper != tmp); \ + (u64)upper << 32 | lower; }) #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 28f91df2604d..d88dbedeaa77 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3050,6 +3050,13 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg, u32 size = i915_gem_obj_ggtt_size(obj); uint64_t val; + /* Adjust fence size to match tiled area */ + if (obj->tiling_mode != I915_TILING_NONE) { + uint32_t row_size = obj->stride * + (obj->tiling_mode == I915_TILING_Y ? 32 : 8); + size = (size / row_size) * row_size; + } + val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) & 0xfffff000) << 32; val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000; @@ -4193,7 +4200,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, struct drm_i915_gem_object *obj; int ret; - if (INTEL_INFO(dev)->gen >= 6) + if (drm_core_check_feature(dev, DRIVER_MODESET)) return -ENODEV; ret = i915_mutex_lock_interruptible(dev); @@ -4249,6 +4256,9 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data, struct drm_i915_gem_object *obj; int ret; + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return -ENODEV; + ret = i915_mutex_lock_interruptible(dev); if (ret) return ret; @@ -4808,25 +4818,18 @@ i915_gem_init_hw(struct drm_device *dev) for (i = 0; i < NUM_L3_SLICES(dev); i++) i915_gem_l3_remap(&dev_priv->ring[RCS], i); - /* - * XXX: Contexts should only be initialized once. Doing a switch to the - * default context switch however is something we'd like to do after - * reset or thaw (the latter may not actually be necessary for HW, but - * goes with our code better). Context switching requires rings (for - * the do_switch), but before enabling PPGTT. So don't move this. - */ - ret = i915_gem_context_enable(dev_priv); + ret = i915_ppgtt_init_hw(dev); if (ret && ret != -EIO) { - DRM_ERROR("Context enable failed %d\n", ret); + DRM_ERROR("PPGTT enable failed %d\n", ret); i915_gem_cleanup_ringbuffer(dev); - - return ret; } - ret = i915_ppgtt_init_hw(dev); + ret = i915_gem_context_enable(dev_priv); if (ret && ret != -EIO) { - DRM_ERROR("PPGTT enable failed %d\n", ret); + DRM_ERROR("Context enable failed %d\n", ret); i915_gem_cleanup_ringbuffer(dev); + + return ret; } return ret; @@ -5141,7 +5144,7 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task) if (!mutex_is_locked(mutex)) return false; -#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES) +#if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES) return mutex->owner == task; #else /* Since UP may be pre-empted, we cannot assume that we own the lock */ diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index a5221d8f1580..c12f087d7a14 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -468,7 +468,12 @@ mi_set_context(struct intel_engine_cs *ring, u32 hw_flags) { u32 flags = hw_flags | MI_MM_SPACE_GTT; - int ret; + const int num_rings = + /* Use an extended w/a on ivb+ if signalling from other rings */ + i915_semaphore_is_enabled(ring->dev) ? + hweight32(INTEL_INFO(ring->dev)->ring_mask) - 1 : + 0; + int len, i, ret; /* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value @@ -485,15 +490,31 @@ mi_set_context(struct intel_engine_cs *ring, if (!IS_HASWELL(ring->dev) && INTEL_INFO(ring->dev)->gen < 8) flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN); - ret = intel_ring_begin(ring, 6); + + len = 4; + if (INTEL_INFO(ring->dev)->gen >= 7) + len += 2 + (num_rings ? 4*num_rings + 2 : 0); + + ret = intel_ring_begin(ring, len); if (ret) return ret; /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */ - if (INTEL_INFO(ring->dev)->gen >= 7) + if (INTEL_INFO(ring->dev)->gen >= 7) { intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE); - else - intel_ring_emit(ring, MI_NOOP); + if (num_rings) { + struct intel_engine_cs *signaller; + + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings)); + for_each_ring(signaller, to_i915(ring->dev), i) { + if (signaller == ring) + continue; + + intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base)); + intel_ring_emit(ring, _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE)); + } + } + } intel_ring_emit(ring, MI_NOOP); intel_ring_emit(ring, MI_SET_CONTEXT); @@ -505,10 +526,21 @@ mi_set_context(struct intel_engine_cs *ring, */ intel_ring_emit(ring, MI_NOOP); - if (INTEL_INFO(ring->dev)->gen >= 7) + if (INTEL_INFO(ring->dev)->gen >= 7) { + if (num_rings) { + struct intel_engine_cs *signaller; + + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings)); + for_each_ring(signaller, to_i915(ring->dev), i) { + if (signaller == ring) + continue; + + intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base)); + intel_ring_emit(ring, _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE)); + } + } intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE); - else - intel_ring_emit(ring, MI_NOOP); + } intel_ring_advance(ring); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 728938f02341..b92844ae5b27 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -1745,6 +1745,7 @@ void i915_global_gtt_cleanup(struct drm_device *dev) struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; ppgtt->base.cleanup(&ppgtt->base); + kfree(ppgtt); } if (drm_mm_initialized(&vm->mm)) { diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index 85fda6b803e4..360087eb83fd 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -137,7 +137,11 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) r = devm_request_mem_region(dev->dev, base + 1, dev_priv->gtt.stolen_size - 1, "Graphics Stolen Memory"); - if (r == NULL) { + /* + * GEN3 firmware likes to smash pci bridges into the stolen + * range. Apparently this works. + */ + if (r == NULL && !IS_GEN3(dev)) { DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n", base, base + (uint32_t)dev_priv->gtt.stolen_size); base = 0; @@ -481,10 +485,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, stolen_offset, gtt_offset, size); /* KISS and expect everything to be page-aligned */ - BUG_ON(stolen_offset & 4095); - BUG_ON(size & 4095); - - if (WARN_ON(size == 0)) + if (WARN_ON(size == 0 || stolen_offset & 4095 || size & 4095)) return NULL; stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 2b1eaa29ada4..6765148ea5bc 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -315,9 +315,10 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, return -EINVAL; } + mutex_lock(&dev->struct_mutex); if (i915_gem_obj_is_pinned(obj) || obj->framebuffer_references) { - drm_gem_object_unreference_unlocked(&obj->base); - return -EBUSY; + ret = -EBUSY; + goto err; } if (args->tiling_mode == I915_TILING_NONE) { @@ -349,7 +350,6 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, } } - mutex_lock(&dev->struct_mutex); if (args->tiling_mode != obj->tiling_mode || args->stride != obj->stride) { /* We need to rebind the object if its current allocation @@ -395,6 +395,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, obj->bit_17 = NULL; } +err: drm_gem_object_unreference(&obj->base); mutex_unlock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index d182058383a9..1719078c763a 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c @@ -113,7 +113,10 @@ restart: continue; obj = mo->obj; - drm_gem_object_reference(&obj->base); + + if (!kref_get_unless_zero(&obj->base.refcount)) + continue; + spin_unlock(&mn->lock); cancel_userptr(obj); @@ -149,7 +152,20 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, it = interval_tree_iter_first(&mn->objects, start, end); if (it != NULL) { obj = container_of(it, struct i915_mmu_object, it)->obj; - drm_gem_object_reference(&obj->base); + + /* The mmu_object is released late when destroying the + * GEM object so it is entirely possible to gain a + * reference on an object in the process of being freed + * since our serialisation is via the spinlock and not + * the struct_mutex - and consequently use it after it + * is freed and then double free it. + */ + if (!kref_get_unless_zero(&obj->base.refcount)) { + spin_unlock(&mn->lock); + serial = 0; + continue; + } + serial = mn->serial; } spin_unlock(&mn->lock); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index f66392b6e287..23329b48766f 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -2123,6 +2123,9 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg) u32 iir, gt_iir, pm_iir; irqreturn_t ret = IRQ_NONE; + if (!intel_irqs_enabled(dev_priv)) + return IRQ_NONE; + while (true) { /* Find, clear, then process each source of interrupt */ @@ -2167,6 +2170,9 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg) u32 master_ctl, iir; irqreturn_t ret = IRQ_NONE; + if (!intel_irqs_enabled(dev_priv)) + return IRQ_NONE; + for (;;) { master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; iir = I915_READ(VLV_IIR); @@ -2455,6 +2461,9 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) u32 de_iir, gt_iir, de_ier, sde_ier = 0; irqreturn_t ret = IRQ_NONE; + if (!intel_irqs_enabled(dev_priv)) + return IRQ_NONE; + /* We get interrupts on unclaimed registers, so check for this before we * do any I915_{READ,WRITE}. */ intel_uncore_check_errors(dev); @@ -2525,6 +2534,9 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) uint32_t tmp = 0; enum pipe pipe; + if (!intel_irqs_enabled(dev_priv)) + return IRQ_NONE; + master_ctl = I915_READ(GEN8_MASTER_IRQ); master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; if (!master_ctl) @@ -3986,14 +3998,12 @@ static int i8xx_irq_postinstall(struct drm_device *dev) ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | - I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | - I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); + I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); I915_WRITE16(IMR, dev_priv->irq_mask); I915_WRITE16(IER, I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | - I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | I915_USER_INTERRUPT); POSTING_READ16(IER); @@ -4022,8 +4032,6 @@ static bool i8xx_handle_vblank(struct drm_device *dev, if ((iir & flip_pending) == 0) goto check_page_flip; - intel_prepare_page_flip(dev, plane); - /* We detect FlipDone by looking for the change in PendingFlip from '1' * to '0' on the following vblank, i.e. IIR has the Pendingflip * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence @@ -4033,6 +4041,7 @@ static bool i8xx_handle_vblank(struct drm_device *dev, if (I915_READ16(ISR) & flip_pending) goto check_page_flip; + intel_prepare_page_flip(dev, plane); intel_finish_page_flip(dev, pipe); return true; @@ -4053,6 +4062,9 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; + if (!intel_irqs_enabled(dev_priv)) + return IRQ_NONE; + iir = I915_READ16(IIR); if (iir == 0) return IRQ_NONE; @@ -4159,14 +4171,12 @@ static int i915_irq_postinstall(struct drm_device *dev) I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | - I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | - I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); + I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); enable_mask = I915_ASLE_INTERRUPT | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | - I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | I915_USER_INTERRUPT; if (I915_HAS_HOTPLUG(dev)) { @@ -4210,8 +4220,6 @@ static bool i915_handle_vblank(struct drm_device *dev, if ((iir & flip_pending) == 0) goto check_page_flip; - intel_prepare_page_flip(dev, plane); - /* We detect FlipDone by looking for the change in PendingFlip from '1' * to '0' on the following vblank, i.e. IIR has the Pendingflip * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence @@ -4221,6 +4229,7 @@ static bool i915_handle_vblank(struct drm_device *dev, if (I915_READ(ISR) & flip_pending) goto check_page_flip; + intel_prepare_page_flip(dev, plane); intel_finish_page_flip(dev, pipe); return true; @@ -4240,6 +4249,9 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; int pipe, ret = IRQ_NONE; + if (!intel_irqs_enabled(dev_priv)) + return IRQ_NONE; + iir = I915_READ(IIR); do { bool irq_received = (iir & ~flip_mask) != 0; @@ -4468,6 +4480,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; + if (!intel_irqs_enabled(dev_priv)) + return IRQ_NONE; + iir = I915_READ(IIR); for (;;) { @@ -4779,4 +4794,5 @@ void intel_runtime_pm_restore_interrupts(struct drm_device *dev) dev_priv->pm._irqs_disabled = false; dev->driver->irq_preinstall(dev); dev->driver->irq_postinstall(dev); + synchronize_irq(dev_priv->dev->irq); } diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index c01e5f31430e..9bf747f2a32b 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -74,6 +74,7 @@ #define I915_GC_RENDER_CLOCK_166_MHZ (0 << 0) #define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0) #define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0) +#define GCDGMBUS 0xcc #define PCI_LBPC 0xf4 /* legacy/combination backlight modes, also called LBB */ @@ -370,6 +371,7 @@ #define PIPE_CONTROL_STORE_DATA_INDEX (1<<21) #define PIPE_CONTROL_CS_STALL (1<<20) #define PIPE_CONTROL_TLB_INVALIDATE (1<<18) +#define PIPE_CONTROL_MEDIA_STATE_CLEAR (1<<16) #define PIPE_CONTROL_QW_WRITE (1<<14) #define PIPE_CONTROL_POST_SYNC_OP_MASK (3<<14) #define PIPE_CONTROL_DEPTH_STALL (1<<13) @@ -1071,6 +1073,7 @@ enum punit_power_well { #define GEN6_VERSYNC (RING_SYNC_1(VEBOX_RING_BASE)) #define GEN6_VEVSYNC (RING_SYNC_2(VEBOX_RING_BASE)) #define GEN6_NOSYNC 0 +#define RING_PSMI_CTL(base) ((base)+0x50) #define RING_MAX_IDLE(base) ((base)+0x54) #define RING_HWS_PGA(base) ((base)+0x80) #define RING_HWS_PGA_GEN6(base) ((base)+0x2080) @@ -1401,6 +1404,7 @@ enum punit_power_well { #define GEN6_BLITTER_FBC_NOTIFY (1<<3) #define GEN6_RC_SLEEP_PSMI_CONTROL 0x2050 +#define GEN6_PSMI_SLEEP_MSG_DISABLE (1 << 0) #define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12) #define GEN8_FF_DOP_CLOCK_GATE_DISABLE (1<<10) @@ -1653,6 +1657,7 @@ enum punit_power_well { #define GMBUS_CYCLE_INDEX (2<<25) #define GMBUS_CYCLE_STOP (4<<25) #define GMBUS_BYTE_COUNT_SHIFT 16 +#define GMBUS_BYTE_COUNT_MAX 256U #define GMBUS_SLAVE_INDEX_SHIFT 8 #define GMBUS_SLAVE_ADDR_SHIFT 1 #define GMBUS_SLAVE_READ (1<<0) @@ -5763,6 +5768,7 @@ enum punit_power_well { #define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6) #define HALF_SLICE_CHICKEN3 0xe184 +#define HSW_SAMPLE_C_PERFORMANCE (1<<9) #define GEN8_CENTROID_PIXEL_OPT_DIS (1<<8) #define GEN8_SAMPLER_POWER_BYPASS_DIS (1<<1) diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 043123c77a1f..e22b0e825de2 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -328,6 +328,10 @@ int i915_save_state(struct drm_device *dev) } } + if (IS_GEN4(dev)) + pci_read_config_word(dev->pdev, GCDGMBUS, + &dev_priv->regfile.saveGCDGMBUS); + /* Cache mode state */ if (INTEL_INFO(dev)->gen < 7) dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); @@ -356,6 +360,10 @@ int i915_restore_state(struct drm_device *dev) mutex_lock(&dev->struct_mutex); i915_gem_restore_fences(dev); + + if (IS_GEN4(dev)) + pci_write_config_word(dev->pdev, GCDGMBUS, + dev_priv->regfile.saveGCDGMBUS); i915_restore_display(dev); if (!drm_core_check_feature(dev, DRIVER_MODESET)) { diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index a4bd90f36a03..d96b152a6e04 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -41,7 +41,7 @@ find_section(struct bdb_header *bdb, int section_id) { u8 *base = (u8 *)bdb; int index = 0; - u16 total, current_size; + u32 total, current_size; u8 current_id; /* skip to first section */ @@ -56,6 +56,10 @@ find_section(struct bdb_header *bdb, int section_id) current_size = *((u16 *)(base + index)); index += 2; + /* The MIPI Sequence Block v3+ has a separate size field. */ + if (current_id == BDB_MIPI_SEQUENCE && *(base + index) >= 3) + current_size = *((const u32 *)(base + index + 1)); + if (index + current_size > total) return NULL; @@ -794,6 +798,12 @@ parse_mipi(struct drm_i915_private *dev_priv, struct bdb_header *bdb) return; } + /* Fail gracefully for forward incompatible sequence block. */ + if (sequence->version >= 3) { + DRM_ERROR("Unable to parse MIPI Sequence Block v3+\n"); + return; + } + DRM_DEBUG_DRIVER("Found MIPI sequence block\n"); block_size = get_blocksize(sequence); diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 9212e6504e0f..f0e8e2a2c547 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -311,8 +311,14 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder, pipe_config->has_pch_encoder = true; /* LPT FDI RX only supports 8bpc. */ - if (HAS_PCH_LPT(dev)) + if (HAS_PCH_LPT(dev)) { + if (pipe_config->bw_constrained && pipe_config->pipe_bpp < 24) { + DRM_DEBUG_KMS("LPT only supports 24bpp\n"); + return false; + } + pipe_config->pipe_bpp = 24; + } /* FDI must always be 2.7 GHz */ if (HAS_DDI(dev)) { diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index b63d4fa204a3..4b476aa4ab05 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -1515,12 +1515,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder, hsw_ddi_clock_get(encoder, pipe_config); } -static void intel_ddi_destroy(struct drm_encoder *encoder) -{ - /* HDMI has nothing special to destroy, so we can go with this. */ - intel_dp_encoder_destroy(encoder); -} - static bool intel_ddi_compute_config(struct intel_encoder *encoder, struct intel_crtc_config *pipe_config) { @@ -1539,7 +1533,8 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder, } static const struct drm_encoder_funcs intel_ddi_funcs = { - .destroy = intel_ddi_destroy, + .reset = intel_dp_encoder_reset, + .destroy = intel_dp_encoder_destroy, }; static struct intel_connector * @@ -1612,6 +1607,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port) intel_encoder->post_disable = intel_ddi_post_disable; intel_encoder->get_hw_state = intel_ddi_get_hw_state; intel_encoder->get_config = intel_ddi_get_config; + intel_encoder->suspend = intel_dp_encoder_suspend; intel_dig_port->port = port; intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) & diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 9cb5c95d5898..c2d76fed3abf 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1650,6 +1650,8 @@ static void i9xx_enable_pll(struct intel_crtc *crtc) I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE); } + I915_WRITE(reg, dpll); + /* Wait for the clocks to stabilize. */ POSTING_READ(reg); udelay(150); @@ -2358,13 +2360,19 @@ static bool intel_alloc_plane_obj(struct intel_crtc *crtc, struct drm_device *dev = crtc->base.dev; struct drm_i915_gem_object *obj = NULL; struct drm_mode_fb_cmd2 mode_cmd = { 0 }; - u32 base = plane_config->base; + u32 base_aligned = round_down(plane_config->base, PAGE_SIZE); + u32 size_aligned = round_up(plane_config->base + plane_config->size, + PAGE_SIZE); + + size_aligned -= base_aligned; if (plane_config->size == 0) return false; - obj = i915_gem_object_create_stolen_for_preallocated(dev, base, base, - plane_config->size); + obj = i915_gem_object_create_stolen_for_preallocated(dev, + base_aligned, + base_aligned, + size_aligned); if (!obj) return false; @@ -6383,8 +6391,7 @@ static void i9xx_get_plane_config(struct intel_crtc *crtc, aligned_height = intel_align_height(dev, crtc->base.primary->fb->height, plane_config->tiled); - plane_config->size = PAGE_ALIGN(crtc->base.primary->fb->pitches[0] * - aligned_height); + plane_config->size = crtc->base.primary->fb->pitches[0] * aligned_height; DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", pipe, plane, crtc->base.primary->fb->width, @@ -7424,8 +7431,7 @@ static void ironlake_get_plane_config(struct intel_crtc *crtc, aligned_height = intel_align_height(dev, crtc->base.primary->fb->height, plane_config->tiled); - plane_config->size = PAGE_ALIGN(crtc->base.primary->fb->pitches[0] * - aligned_height); + plane_config->size = crtc->base.primary->fb->pitches[0] * aligned_height; DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", pipe, plane, crtc->base.primary->fb->width, @@ -10019,7 +10025,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, if (obj->tiling_mode != work->old_fb_obj->tiling_mode) /* vlv: DISPLAY_FLIP fails to change tiling */ ring = NULL; - } else if (IS_IVYBRIDGE(dev)) { + } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { ring = &dev_priv->ring[BCS]; } else if (INTEL_INFO(dev)->gen >= 7) { ring = obj->ring; @@ -10180,11 +10186,21 @@ connected_sink_compute_bpp(struct intel_connector *connector, pipe_config->pipe_bpp = connector->base.display_info.bpc*3; } - /* Clamp bpp to 8 on screens without EDID 1.4 */ - if (connector->base.display_info.bpc == 0 && bpp > 24) { - DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n", - bpp); - pipe_config->pipe_bpp = 24; + /* Clamp bpp to default limit on screens without EDID 1.4 */ + if (connector->base.display_info.bpc == 0) { + int type = connector->base.connector_type; + int clamp_bpp = 24; + + /* Fall back to 18 bpp when DP sink capability is unknown. */ + if (type == DRM_MODE_CONNECTOR_DisplayPort || + type == DRM_MODE_CONNECTOR_eDP) + clamp_bpp = 18; + + if (bpp > clamp_bpp) { + DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n", + bpp, clamp_bpp); + pipe_config->pipe_bpp = clamp_bpp; + } } } @@ -12895,6 +12911,9 @@ static struct intel_quirk intel_quirks[] = { /* HP Chromebook 14 (Celeron 2955U) */ { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present }, + + /* Dell Chromebook 11 */ + { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present }, }; static void intel_init_quirks(struct drm_device *dev) @@ -12933,11 +12952,7 @@ static void i915_disable_vga(struct drm_device *dev) vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); udelay(300); - /* - * Fujitsu-Siemens Lifebook S6010 (830) has problems resuming - * from S3 without preserving (some of?) the other bits. - */ - I915_WRITE(vga_reg, dev_priv->bios_vgacntr | VGA_DISP_DISABLE); + I915_WRITE(vga_reg, VGA_DISP_DISABLE); POSTING_READ(vga_reg); } @@ -13026,8 +13041,6 @@ void intel_modeset_init(struct drm_device *dev) intel_shared_dpll_init(dev); - /* save the BIOS value before clobbering it */ - dev_priv->bios_vgacntr = I915_READ(i915_vgacntrl_reg(dev)); /* Just disable it once at startup */ i915_disable_vga(dev); intel_setup_outputs(dev); diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 4bcd91757321..a915d729c33d 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -778,10 +778,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, DP_AUX_CH_CTL_RECEIVE_ERROR)) continue; if (status & DP_AUX_CH_CTL_DONE) - break; + goto done; } - if (status & DP_AUX_CH_CTL_DONE) - break; } if ((status & DP_AUX_CH_CTL_DONE) == 0) { @@ -790,6 +788,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, goto out; } +done: /* Check for timeout or receive error. * Timeouts occur when the sink is not connected */ @@ -1037,7 +1036,7 @@ intel_dp_compute_config(struct intel_encoder *encoder, pipe_config->has_dp_encoder = true; pipe_config->has_drrs = false; - pipe_config->has_audio = intel_dp->has_audio; + pipe_config->has_audio = intel_dp->has_audio && port != PORT_A; if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) { intel_fixed_panel_mode(intel_connector->panel.fixed_mode, @@ -1879,8 +1878,8 @@ static void intel_dp_get_config(struct intel_encoder *encoder, int dotclock; tmp = I915_READ(intel_dp->output_reg); - if (tmp & DP_AUDIO_OUTPUT_ENABLE) - pipe_config->has_audio = true; + + pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A; if ((port == PORT_A) || !HAS_PCH_CPT(dev)) { if (tmp & DP_SYNC_HS_HIGH) @@ -3645,8 +3644,6 @@ intel_dp_link_down(struct intel_dp *intel_dp) enum port port = intel_dig_port->port; struct drm_device *dev = intel_dig_port->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = - to_intel_crtc(intel_dig_port->base.base.crtc); uint32_t DP = intel_dp->DP; if (WARN_ON(HAS_DDI(dev))) @@ -3671,8 +3668,6 @@ intel_dp_link_down(struct intel_dp *intel_dp) if (HAS_PCH_IBX(dev) && I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { - struct drm_crtc *crtc = intel_dig_port->base.base.crtc; - /* Hardware workaround: leaving our transcoder select * set to transcoder B while it's off will prevent the * corresponding HDMI output on transcoder A. @@ -3683,18 +3678,7 @@ intel_dp_link_down(struct intel_dp *intel_dp) */ DP &= ~DP_PIPEB_SELECT; I915_WRITE(intel_dp->output_reg, DP); - - /* Changes to enable or select take place the vblank - * after being written. - */ - if (WARN_ON(crtc == NULL)) { - /* We should never try to disable a port without a crtc - * attached. For paranoia keep the code around for a - * bit. */ - POSTING_READ(intel_dp->output_reg); - msleep(50); - } else - intel_wait_for_vblank(dev, intel_crtc->pipe); + POSTING_READ(intel_dp->output_reg); } DP &= ~DP_AUDIO_OUTPUT_ENABLE; @@ -4439,7 +4423,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder) kfree(intel_dig_port); } -static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) +void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); @@ -4456,9 +4440,52 @@ static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) pps_unlock(intel_dp); } -static void intel_dp_encoder_reset(struct drm_encoder *encoder) +static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp) +{ + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = intel_dig_port->base.base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + enum intel_display_power_domain power_domain; + + lockdep_assert_held(&dev_priv->pps_mutex); + + if (!edp_have_panel_vdd(intel_dp)) + return; + + /* + * The VDD bit needs a power domain reference, so if the bit is + * already enabled when we boot or resume, grab this reference and + * schedule a vdd off, so we don't hold on to the reference + * indefinitely. + */ + DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n"); + power_domain = intel_display_port_power_domain(&intel_dig_port->base); + intel_display_power_get(dev_priv, power_domain); + + edp_panel_vdd_schedule_off(intel_dp); +} + +void intel_dp_encoder_reset(struct drm_encoder *encoder) { - intel_edp_panel_vdd_sanitize(to_intel_encoder(encoder)); + struct intel_dp *intel_dp; + + if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP) + return; + + intel_dp = enc_to_intel_dp(encoder); + + pps_lock(intel_dp); + + /* + * Read out the current power sequencer assignment, + * in case the BIOS did something with it. + */ + if (IS_VALLEYVIEW(encoder->dev)) + vlv_initial_power_sequencer_setup(intel_dp); + + intel_edp_panel_vdd_sanitize(intel_dp); + + pps_unlock(intel_dp); } static const struct drm_connector_funcs intel_dp_connector_funcs = { @@ -4940,37 +4967,6 @@ intel_dp_drrs_init(struct intel_digital_port *intel_dig_port, return downclock_mode; } -void intel_edp_panel_vdd_sanitize(struct intel_encoder *intel_encoder) -{ - struct drm_device *dev = intel_encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_dp *intel_dp; - enum intel_display_power_domain power_domain; - - if (intel_encoder->type != INTEL_OUTPUT_EDP) - return; - - intel_dp = enc_to_intel_dp(&intel_encoder->base); - - pps_lock(intel_dp); - - if (!edp_have_panel_vdd(intel_dp)) - goto out; - /* - * The VDD bit needs a power domain reference, so if the bit is - * already enabled when we boot or resume, grab this reference and - * schedule a vdd off, so we don't hold on to the reference - * indefinitely. - */ - DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n"); - power_domain = intel_display_port_power_domain(intel_encoder); - intel_display_power_get(dev_priv, power_domain); - - edp_panel_vdd_schedule_off(intel_dp); - out: - pps_unlock(intel_dp); -} - static bool intel_edp_init_connector(struct intel_dp *intel_dp, struct intel_connector *intel_connector, struct edp_power_seq *power_seq) @@ -4991,7 +4987,9 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, if (!is_edp(intel_dp)) return true; - intel_edp_panel_vdd_sanitize(intel_encoder); + pps_lock(intel_dp); + intel_edp_panel_vdd_sanitize(intel_dp); + pps_unlock(intel_dp); /* Cache DPCD and EDID for edp. */ intel_edp_panel_vdd_on(intel_dp); diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index ba715229a540..c91a4fb596e4 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -35,9 +35,6 @@ #include <drm/drm_fb_helper.h> #include <drm/drm_dp_mst_helper.h> -#define DIV_ROUND_CLOSEST_ULL(ll, d) \ -({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; }) - /** * _wait_for - magic (register) wait macro * @@ -925,6 +922,8 @@ void intel_dp_start_link_train(struct intel_dp *intel_dp); void intel_dp_complete_link_train(struct intel_dp *intel_dp); void intel_dp_stop_link_train(struct intel_dp *intel_dp); void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode); +void intel_dp_encoder_reset(struct drm_encoder *encoder); +void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder); void intel_dp_encoder_destroy(struct drm_encoder *encoder); void intel_dp_check_link_status(struct intel_dp *intel_dp); int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc); @@ -936,7 +935,6 @@ bool intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, void intel_edp_backlight_on(struct intel_dp *intel_dp); void intel_edp_backlight_off(struct intel_dp *intel_dp); void intel_edp_panel_vdd_on(struct intel_dp *intel_dp); -void intel_edp_panel_vdd_sanitize(struct intel_encoder *intel_encoder); void intel_edp_panel_on(struct intel_dp *intel_dp); void intel_edp_panel_off(struct intel_dp *intel_dp); void intel_edp_psr_enable(struct intel_dp *intel_dp); diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c index f6bdd44069ce..6ea603ae9055 100644 --- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c +++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c @@ -171,7 +171,12 @@ static u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, u8 *data) gpio = *data++; /* pull up/down */ - action = *data++; + action = *data++ & 1; + + if (gpio >= ARRAY_SIZE(gtable)) { + DRM_DEBUG_KMS("unknown gpio %u\n", gpio); + goto out; + } function = gtable[gpio].function_reg; pad = gtable[gpio].pad_reg; @@ -190,6 +195,7 @@ static u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, u8 *data) vlv_gpio_nc_write(dev_priv, pad, val); mutex_unlock(&dev_priv->dpio_lock); +out: return data; } diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index b31088a551f2..ae628001fd97 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -270,18 +270,17 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv) } static int -gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg, - u32 gmbus1_index) +gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv, + unsigned short addr, u8 *buf, unsigned int len, + u32 gmbus1_index) { int reg_offset = dev_priv->gpio_mmio_base; - u16 len = msg->len; - u8 *buf = msg->buf; I915_WRITE(GMBUS1 + reg_offset, gmbus1_index | GMBUS_CYCLE_WAIT | (len << GMBUS_BYTE_COUNT_SHIFT) | - (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) | + (addr << GMBUS_SLAVE_ADDR_SHIFT) | GMBUS_SLAVE_READ | GMBUS_SW_RDY); while (len) { int ret; @@ -303,11 +302,35 @@ gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg, } static int -gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg) +gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg, + u32 gmbus1_index) { - int reg_offset = dev_priv->gpio_mmio_base; - u16 len = msg->len; u8 *buf = msg->buf; + unsigned int rx_size = msg->len; + unsigned int len; + int ret; + + do { + len = min(rx_size, GMBUS_BYTE_COUNT_MAX); + + ret = gmbus_xfer_read_chunk(dev_priv, msg->addr, + buf, len, gmbus1_index); + if (ret) + return ret; + + rx_size -= len; + buf += len; + } while (rx_size != 0); + + return 0; +} + +static int +gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv, + unsigned short addr, u8 *buf, unsigned int len) +{ + int reg_offset = dev_priv->gpio_mmio_base; + unsigned int chunk_size = len; u32 val, loop; val = loop = 0; @@ -319,8 +342,8 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg) I915_WRITE(GMBUS3 + reg_offset, val); I915_WRITE(GMBUS1 + reg_offset, GMBUS_CYCLE_WAIT | - (msg->len << GMBUS_BYTE_COUNT_SHIFT) | - (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) | + (chunk_size << GMBUS_BYTE_COUNT_SHIFT) | + (addr << GMBUS_SLAVE_ADDR_SHIFT) | GMBUS_SLAVE_WRITE | GMBUS_SW_RDY); while (len) { int ret; @@ -337,6 +360,29 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg) if (ret) return ret; } + + return 0; +} + +static int +gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg) +{ + u8 *buf = msg->buf; + unsigned int tx_size = msg->len; + unsigned int len; + int ret; + + do { + len = min(tx_size, GMBUS_BYTE_COUNT_MAX); + + ret = gmbus_xfer_write_chunk(dev_priv, msg->addr, buf, len); + if (ret) + return ret; + + buf += len; + tx_size -= len; + } while (tx_size != 0); + return 0; } @@ -389,7 +435,7 @@ gmbus_xfer(struct i2c_adapter *adapter, struct intel_gmbus, adapter); struct drm_i915_private *dev_priv = bus->dev_priv; - int i, reg_offset; + int i = 0, inc, try = 0, reg_offset; int ret = 0; intel_aux_display_runtime_get(dev_priv); @@ -402,12 +448,14 @@ gmbus_xfer(struct i2c_adapter *adapter, reg_offset = dev_priv->gpio_mmio_base; +retry: I915_WRITE(GMBUS0 + reg_offset, bus->reg0); - for (i = 0; i < num; i++) { + for (; i < num; i += inc) { + inc = 1; if (gmbus_is_index_read(msgs, i, num)) { ret = gmbus_xfer_index_read(dev_priv, &msgs[i]); - i += 1; /* set i to the index of the read xfer */ + inc = 2; /* an index read is two msgs */ } else if (msgs[i].flags & I2C_M_RD) { ret = gmbus_xfer_read(dev_priv, &msgs[i], 0); } else { @@ -479,6 +527,18 @@ clear_err: adapter->name, msgs[i].addr, (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len); + /* + * Passive adapters sometimes NAK the first probe. Retry the first + * message once on -ENXIO for GMBUS transfers; the bit banging algorithm + * has retries internally. See also the retry loop in + * drm_do_probe_ddc_edid, which bails out on the first -ENXIO. + */ + if (ret == -ENXIO && i == 0 && try++ == 0) { + DRM_DEBUG_KMS("GMBUS [%s] NAK on first message, retry\n", + adapter->name); + goto retry; + } + goto out; timeout: diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index bafd38b5703e..a97b83b78ae7 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -1106,15 +1106,17 @@ static int gen8_emit_flush(struct intel_ringbuffer *ringbuf, cmd = MI_FLUSH_DW + 1; - if (ring == &dev_priv->ring[VCS]) { - if (invalidate_domains & I915_GEM_GPU_DOMAINS) - cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD | - MI_FLUSH_DW_STORE_INDEX | - MI_FLUSH_DW_OP_STOREDW; - } else { - if (invalidate_domains & I915_GEM_DOMAIN_RENDER) - cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX | - MI_FLUSH_DW_OP_STOREDW; + /* We always require a command barrier so that subsequent + * commands, such as breadcrumb interrupts, are strictly ordered + * wrt the contents of the write cache being flushed to memory + * (and thus being coherent from the CPU). + */ + cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; + + if (invalidate_domains & I915_GEM_GPU_DOMAINS) { + cmd |= MI_INVALIDATE_TLB; + if (ring == &dev_priv->ring[VCS]) + cmd |= MI_INVALIDATE_BSD; } intel_logical_ring_emit(ringbuf, cmd); diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index c0bbf2172446..b7e80bf190f8 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -809,12 +809,28 @@ static int intel_dual_link_lvds_callback(const struct dmi_system_id *id) static const struct dmi_system_id intel_dual_link_lvds[] = { { .callback = intel_dual_link_lvds_callback, - .ident = "Apple MacBook Pro (Core i5/i7 Series)", + .ident = "Apple MacBook Pro 15\" (2010)", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro6,2"), + }, + }, + { + .callback = intel_dual_link_lvds_callback, + .ident = "Apple MacBook Pro 15\" (2011)", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"), }, }, + { + .callback = intel_dual_link_lvds_callback, + .ident = "Apple MacBook Pro 15\" (2012)", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro9,1"), + }, + }, { } /* terminating entry */ }; diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 41b3be217493..ce5a385150c8 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -30,6 +30,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/kernel.h> #include <linux/moduleparam.h> #include "intel_drv.h" @@ -947,7 +948,7 @@ void intel_panel_enable_backlight(struct intel_connector *connector) WARN_ON(panel->backlight.max == 0); - if (panel->backlight.level == 0) { + if (panel->backlight.level <= panel->backlight.min) { panel->backlight.level = panel->backlight.max; if (panel->backlight.device) panel->backlight.device->props.brightness = diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index ad2fd605f76b..993df2dded42 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2952,6 +2952,8 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc) if (IS_HASWELL(dev) || IS_BROADWELL(dev)) hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe)); + memset(active, 0, sizeof(*active)); + active->pipe_enabled = intel_crtc_active(crtc); if (active->pipe_enabled) { @@ -5691,6 +5693,10 @@ static void haswell_init_clock_gating(struct drm_device *dev) I915_WRITE(GEN7_GT_MODE, GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4); + /* WaSampleCChickenBitEnable:hsw */ + I915_WRITE(HALF_SLICE_CHICKEN3, + _MASKED_BIT_ENABLE(HSW_SAMPLE_C_PERFORMANCE)); + /* WaSwitchSolVfFArbitrationPriority:hsw */ I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); @@ -6520,29 +6526,6 @@ static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv, chv_set_pipe_power_well(dev_priv, power_well, false); } -static void check_power_well_state(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - bool enabled = power_well->ops->is_enabled(dev_priv, power_well); - - if (power_well->always_on || !i915.disable_power_well) { - if (!enabled) - goto mismatch; - - return; - } - - if (enabled != (power_well->count > 0)) - goto mismatch; - - return; - -mismatch: - WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n", - power_well->name, power_well->always_on, enabled, - power_well->count, i915.disable_power_well); -} - void intel_display_power_get(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain) { @@ -6562,8 +6545,6 @@ void intel_display_power_get(struct drm_i915_private *dev_priv, power_well->ops->enable(dev_priv, power_well); power_well->hw_enabled = true; } - - check_power_well_state(dev_priv, power_well); } power_domains->domain_use_count[domain]++; @@ -6593,8 +6574,6 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, power_well->hw_enabled = false; power_well->ops->disable(dev_priv, power_well); } - - check_power_well_state(dev_priv, power_well); } mutex_unlock(&power_domains->lock); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 0a80e419b589..9f10b771319f 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -362,12 +362,15 @@ gen7_render_ring_flush(struct intel_engine_cs *ring, flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR; /* * TLB invalidate requires a post-sync write. */ flags |= PIPE_CONTROL_QW_WRITE; flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; + flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD; + /* Workaround: we must issue a pipe_control with CS-stall bit * set before a pipe_control command that has the state cache * invalidate bit set. */ @@ -2136,6 +2139,14 @@ static int gen6_bsd_ring_flush(struct intel_engine_cs *ring, cmd = MI_FLUSH_DW; if (INTEL_INFO(ring->dev)->gen >= 8) cmd += 1; + + /* We always require a command barrier so that subsequent + * commands, such as breadcrumb interrupts, are strictly ordered + * wrt the contents of the write cache being flushed to memory + * (and thus being coherent from the CPU). + */ + cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; + /* * Bspec vol 1c.5 - video engine command streamer: * "If ENABLED, all TLBs will be invalidated once the flush @@ -2143,8 +2154,8 @@ static int gen6_bsd_ring_flush(struct intel_engine_cs *ring, * Post-Sync Operation field is a value of 1h or 3h." */ if (invalidate & I915_GEM_GPU_DOMAINS) - cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD | - MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; + cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD; + intel_ring_emit(ring, cmd); intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); if (INTEL_INFO(ring->dev)->gen >= 8) { @@ -2239,6 +2250,14 @@ static int gen6_ring_flush(struct intel_engine_cs *ring, cmd = MI_FLUSH_DW; if (INTEL_INFO(ring->dev)->gen >= 8) cmd += 1; + + /* We always require a command barrier so that subsequent + * commands, such as breadcrumb interrupts, are strictly ordered + * wrt the contents of the write cache being flushed to memory + * (and thus being coherent from the CPU). + */ + cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; + /* * Bspec vol 1c.3 - blitter engine command streamer: * "If ENABLED, all TLBs will be invalidated once the flush @@ -2246,8 +2265,7 @@ static int gen6_ring_flush(struct intel_engine_cs *ring, * Post-Sync Operation field is a value of 1h or 3h." */ if (invalidate & I915_GEM_DOMAIN_RENDER) - cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX | - MI_FLUSH_DW_OP_STOREDW; + cmd |= MI_INVALIDATE_TLB; intel_ring_emit(ring, cmd); intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); if (INTEL_INFO(ring->dev)->gen >= 8) { diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c index 01d841ea3140..731b10a09aa0 100644 --- a/drivers/gpu/drm/i915/intel_sideband.c +++ b/drivers/gpu/drm/i915/intel_sideband.c @@ -82,7 +82,7 @@ u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr) WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); mutex_lock(&dev_priv->dpio_lock); - vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_PUNIT, + vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT, SB_CRRDDA_NP, addr, &val); mutex_unlock(&dev_priv->dpio_lock); @@ -94,7 +94,7 @@ void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val) WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); mutex_lock(&dev_priv->dpio_lock); - vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_PUNIT, + vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT, SB_CRWRDA_NP, addr, &val); mutex_unlock(&dev_priv->dpio_lock); } @@ -103,7 +103,7 @@ u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg) { u32 val = 0; - vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_BUNIT, + vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT, SB_CRRDDA_NP, reg, &val); return val; @@ -111,7 +111,7 @@ u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg) void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val) { - vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_BUNIT, + vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT, SB_CRWRDA_NP, reg, &val); } @@ -122,7 +122,7 @@ u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr) WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); mutex_lock(&dev_priv->dpio_lock); - vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_NC, + vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_NC, SB_CRRDDA_NP, addr, &val); mutex_unlock(&dev_priv->dpio_lock); @@ -132,56 +132,56 @@ u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr) u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg) { u32 val = 0; - vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC, + vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPIO_NC, SB_CRRDDA_NP, reg, &val); return val; } void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val) { - vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC, + vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPIO_NC, SB_CRWRDA_NP, reg, &val); } u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg) { u32 val = 0; - vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK, + vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCK, SB_CRRDDA_NP, reg, &val); return val; } void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val) { - vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK, + vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCK, SB_CRWRDA_NP, reg, &val); } u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg) { u32 val = 0; - vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU, + vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCU, SB_CRRDDA_NP, reg, &val); return val; } void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val) { - vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU, + vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCU, SB_CRWRDA_NP, reg, &val); } u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg) { u32 val = 0; - vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE, + vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPS_CORE, SB_CRRDDA_NP, reg, &val); return val; } void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val) { - vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE, + vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPS_CORE, SB_CRWRDA_NP, reg, &val); } diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 07a74ef589bd..4edebce7f213 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -1178,7 +1178,7 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data, drm_modeset_lock_all(dev); plane = drm_plane_find(dev, set->plane_id); - if (!plane) { + if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY) { ret = -ENOENT; goto out_unlock; } @@ -1205,7 +1205,7 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data, drm_modeset_lock_all(dev); plane = drm_plane_find(dev, get->plane_id); - if (!plane) { + if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY) { ret = -ENOENT; goto out_unlock; } diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 918b76163965..b29091b21a76 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -43,8 +43,8 @@ static void assert_device_not_suspended(struct drm_i915_private *dev_priv) { - WARN(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended, - "Device suspended\n"); + WARN_ONCE(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended, + "Device suspended\n"); } static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c index 4415af3666ab..c36b8304042b 100644 --- a/drivers/gpu/drm/mgag200/mgag200_fb.c +++ b/drivers/gpu/drm/mgag200/mgag200_fb.c @@ -303,14 +303,22 @@ int mgag200_fbdev_init(struct mga_device *mdev) if (ret) return ret; - drm_fb_helper_single_add_all_connectors(&mfbdev->helper); + ret = drm_fb_helper_single_add_all_connectors(&mfbdev->helper); + if (ret) + goto fini; /* disable all the possible outputs/crtcs before entering KMS mode */ drm_helper_disable_unused_functions(mdev->dev); - drm_fb_helper_initial_config(&mfbdev->helper, bpp_sel); + ret = drm_fb_helper_initial_config(&mfbdev->helper, bpp_sel); + if (ret) + goto fini; return 0; + +fini: + drm_fb_helper_fini(&mfbdev->helper); + return ret; } void mgag200_fbdev_fini(struct mga_device *mdev) diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index 83485ab81ce8..d8e693d1f6d4 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -1525,6 +1525,11 @@ static int mga_vga_mode_valid(struct drm_connector *connector, return MODE_BANDWIDTH; } + if ((mode->hdisplay % 8) != 0 || (mode->hsync_start % 8) != 0 || + (mode->hsync_end % 8) != 0 || (mode->htotal % 8) != 0) { + return MODE_H_ILLEGAL; + } + if (mode->crtc_hdisplay > 2048 || mode->crtc_hsync_start > 4096 || mode->crtc_hsync_end > 4096 || mode->crtc_htotal > 4096 || mode->crtc_vdisplay > 2048 || mode->crtc_vsync_start > 4096 || diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c index ab5bfd2d0ebf..956710dd84a4 100644 --- a/drivers/gpu/drm/msm/msm_fbdev.c +++ b/drivers/gpu/drm/msm/msm_fbdev.c @@ -245,17 +245,23 @@ struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev) goto fail; } - drm_fb_helper_single_add_all_connectors(helper); + ret = drm_fb_helper_single_add_all_connectors(helper); + if (ret) + goto fini; /* disable all the possible outputs/crtcs before entering KMS mode */ drm_helper_disable_unused_functions(dev); - drm_fb_helper_initial_config(helper, 32); + ret = drm_fb_helper_initial_config(helper, 32); + if (ret) + goto fini; priv->fbdev = helper; return helper; +fini: + drm_fb_helper_fini(helper); fail: kfree(fbdev); return NULL; diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index cd0554f68316..4ff8c334e7c8 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -55,6 +55,14 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev, return submit; } +static inline unsigned long __must_check +copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) +{ + if (access_ok(VERIFY_READ, from, n)) + return __copy_from_user_inatomic(to, from, n); + return -EFAULT; +} + static int submit_lookup_objects(struct msm_gem_submit *submit, struct drm_msm_gem_submit *args, struct drm_file *file) { @@ -62,6 +70,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit, int ret = 0; spin_lock(&file->table_lock); + pagefault_disable(); for (i = 0; i < args->nr_bos; i++) { struct drm_msm_gem_submit_bo submit_bo; @@ -70,10 +79,15 @@ static int submit_lookup_objects(struct msm_gem_submit *submit, void __user *userptr = to_user_ptr(args->bos + (i * sizeof(submit_bo))); - ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo)); - if (ret) { - ret = -EFAULT; - goto out_unlock; + ret = copy_from_user_inatomic(&submit_bo, userptr, sizeof(submit_bo)); + if (unlikely(ret)) { + pagefault_enable(); + spin_unlock(&file->table_lock); + ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo)); + if (ret) + goto out; + spin_lock(&file->table_lock); + pagefault_disable(); } if (submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) { @@ -113,9 +127,12 @@ static int submit_lookup_objects(struct msm_gem_submit *submit, } out_unlock: - submit->nr_bos = i; + pagefault_enable(); spin_unlock(&file->table_lock); +out: + submit->nr_bos = i; + return ret; } diff --git a/drivers/gpu/drm/nouveau/core/core/event.c b/drivers/gpu/drm/nouveau/core/core/event.c index ff2b434b3db4..760947e380c9 100644 --- a/drivers/gpu/drm/nouveau/core/core/event.c +++ b/drivers/gpu/drm/nouveau/core/core/event.c @@ -26,7 +26,7 @@ void nvkm_event_put(struct nvkm_event *event, u32 types, int index) { - BUG_ON(!spin_is_locked(&event->refs_lock)); + assert_spin_locked(&event->refs_lock); while (types) { int type = __ffs(types); types &= ~(1 << type); if (--event->refs[index * event->types_nr + type] == 0) { @@ -39,7 +39,7 @@ nvkm_event_put(struct nvkm_event *event, u32 types, int index) void nvkm_event_get(struct nvkm_event *event, u32 types, int index) { - BUG_ON(!spin_is_locked(&event->refs_lock)); + assert_spin_locked(&event->refs_lock); while (types) { int type = __ffs(types); types &= ~(1 << type); if (++event->refs[index * event->types_nr + type] == 1) { diff --git a/drivers/gpu/drm/nouveau/core/core/notify.c b/drivers/gpu/drm/nouveau/core/core/notify.c index d1bcde55e9d7..839a32577680 100644 --- a/drivers/gpu/drm/nouveau/core/core/notify.c +++ b/drivers/gpu/drm/nouveau/core/core/notify.c @@ -98,7 +98,7 @@ nvkm_notify_send(struct nvkm_notify *notify, void *data, u32 size) struct nvkm_event *event = notify->event; unsigned long flags; - BUG_ON(!spin_is_locked(&event->list_lock)); + assert_spin_locked(&event->list_lock); BUG_ON(size != notify->size); spin_lock_irqsave(&event->refs_lock, flags); diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c index a75c35ccf25c..165401c4045c 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c @@ -24,13 +24,6 @@ #include "nv04.h" -static void -nv4c_mc_msi_rearm(struct nouveau_mc *pmc) -{ - struct nv04_mc_priv *priv = (void *)pmc; - nv_wr08(priv, 0x088050, 0xff); -} - struct nouveau_oclass * nv4c_mc_oclass = &(struct nouveau_mc_oclass) { .base.handle = NV_SUBDEV(MC, 0x4c), @@ -41,5 +34,4 @@ nv4c_mc_oclass = &(struct nouveau_mc_oclass) { .fini = _nouveau_mc_fini, }, .intr = nv04_mc_intr, - .msi_rearm = nv4c_mc_msi_rearm, }.base; diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index c8ac9482cf2e..a088c96dab5b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -952,10 +952,13 @@ nouveau_connector_hotplug(struct nvif_notify *notify) NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", name); + mutex_lock(&drm->dev->mode_config.mutex); if (plugged) drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); else drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); + mutex_unlock(&drm->dev->mode_config.mutex); + drm_helper_hpd_irq_event(connector->dev); } diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 593ef8a2a069..993c9a0377da 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -178,8 +178,30 @@ nouveau_fbcon_sync(struct fb_info *info) return 0; } +static int +nouveau_fbcon_open(struct fb_info *info, int user) +{ + struct nouveau_fbdev *fbcon = info->par; + struct nouveau_drm *drm = nouveau_drm(fbcon->dev); + int ret = pm_runtime_get_sync(drm->dev->dev); + if (ret < 0 && ret != -EACCES) + return ret; + return 0; +} + +static int +nouveau_fbcon_release(struct fb_info *info, int user) +{ + struct nouveau_fbdev *fbcon = info->par; + struct nouveau_drm *drm = nouveau_drm(fbcon->dev); + pm_runtime_put(drm->dev->dev); + return 0; +} + static struct fb_ops nouveau_fbcon_ops = { .owner = THIS_MODULE, + .fb_open = nouveau_fbcon_open, + .fb_release = nouveau_fbcon_release, .fb_check_var = drm_fb_helper_check_var, .fb_set_par = drm_fb_helper_set_par, .fb_fillrect = nouveau_fbcon_fillrect, @@ -195,6 +217,8 @@ static struct fb_ops nouveau_fbcon_ops = { static struct fb_ops nouveau_fbcon_sw_ops = { .owner = THIS_MODULE, + .fb_open = nouveau_fbcon_open, + .fb_release = nouveau_fbcon_release, .fb_check_var = drm_fb_helper_check_var, .fb_set_par = drm_fb_helper_set_par, .fb_fillrect = cfb_fillrect, @@ -522,12 +546,12 @@ nouveau_fbcon_init(struct drm_device *dev) ret = drm_fb_helper_init(dev, &fbcon->helper, dev->mode_config.num_crtc, 4); - if (ret) { - kfree(fbcon); - return ret; - } + if (ret) + goto free; - drm_fb_helper_single_add_all_connectors(&fbcon->helper); + ret = drm_fb_helper_single_add_all_connectors(&fbcon->helper); + if (ret) + goto fini; if (drm->device.info.ram_size <= 32 * 1024 * 1024) preferred_bpp = 8; @@ -540,8 +564,19 @@ nouveau_fbcon_init(struct drm_device *dev) /* disable all the possible outputs/crtcs before entering KMS mode */ drm_helper_disable_unused_functions(dev); - drm_fb_helper_initial_config(&fbcon->helper, preferred_bpp); + ret = drm_fb_helper_initial_config(&fbcon->helper, preferred_bpp); + if (ret) + goto fini; + + if (fbcon->helper.fbdev) + fbcon->helper.fbdev->pixmap.buf_align = 4; return 0; + +fini: + drm_fb_helper_fini(&fbcon->helper); +free: + kfree(fbcon); + return ret; } void diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 36951ee4b157..9fcc77d5923e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -199,11 +199,12 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem, struct nouveau_bo *nvbo = nouveau_gem_object(gem); struct nouveau_vma *vma; - if (nvbo->bo.mem.mem_type == TTM_PL_TT) + if (is_power_of_2(nvbo->valid_domains)) + rep->domain = nvbo->valid_domains; + else if (nvbo->bo.mem.mem_type == TTM_PL_TT) rep->domain = NOUVEAU_GEM_DOMAIN_GART; else rep->domain = NOUVEAU_GEM_DOMAIN_VRAM; - rep->offset = nvbo->bo.offset; if (cli->vm) { vma = nouveau_bo_vma_find(nvbo, cli->vm); diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index 753a6def61e7..3d1cfcb96b6b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -28,6 +28,7 @@ #include "nouveau_ttm.h" #include "nouveau_gem.h" +#include "drm_legacy.h" static int nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) { @@ -281,7 +282,7 @@ nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma) struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev); if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) - return -EINVAL; + return drm_legacy_mmap(filp, vma); return ttm_bo_mmap(filp, vma, &drm->ttm.bdev); } diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c index 4ef602c5469d..ec974273d1e4 100644 --- a/drivers/gpu/drm/nouveau/nv04_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c @@ -82,7 +82,6 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) uint32_t fg; uint32_t bg; uint32_t dsize; - uint32_t width; uint32_t *data = (uint32_t *)image->data; int ret; @@ -93,9 +92,6 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) if (ret) return ret; - width = ALIGN(image->width, 8); - dsize = ALIGN(width * image->height, 32) >> 5; - if (info->fix.visual == FB_VISUAL_TRUECOLOR || info->fix.visual == FB_VISUAL_DIRECTCOLOR) { fg = ((uint32_t *) info->pseudo_palette)[image->fg_color]; @@ -111,10 +107,11 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) ((image->dx + image->width) & 0xffff)); OUT_RING(chan, bg); OUT_RING(chan, fg); - OUT_RING(chan, (image->height << 16) | width); + OUT_RING(chan, (image->height << 16) | ALIGN(image->width, 8)); OUT_RING(chan, (image->height << 16) | image->width); OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff)); + dsize = ALIGN(ALIGN(image->width, 8) * image->height, 32) >> 5; while (dsize) { int iter_len = dsize > 128 ? 128 : dsize; diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c index 394c89abcc97..8462f72e8819 100644 --- a/drivers/gpu/drm/nouveau/nv50_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c @@ -95,7 +95,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) struct nouveau_fbdev *nfbdev = info->par; struct nouveau_drm *drm = nouveau_drm(nfbdev->dev); struct nouveau_channel *chan = drm->channel; - uint32_t width, dwords, *data = (uint32_t *)image->data; + uint32_t dwords, *data = (uint32_t *)image->data; uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel)); uint32_t *palette = info->pseudo_palette; int ret; @@ -107,9 +107,6 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) if (ret) return ret; - width = ALIGN(image->width, 32); - dwords = (width * image->height) >> 5; - BEGIN_NV04(chan, NvSub2D, 0x0814, 2); if (info->fix.visual == FB_VISUAL_TRUECOLOR || info->fix.visual == FB_VISUAL_DIRECTCOLOR) { @@ -128,6 +125,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) OUT_RING(chan, 0); OUT_RING(chan, image->dy); + dwords = ALIGN(ALIGN(image->width, 8) * image->height, 32) >> 5; while (dwords) { int push = dwords > 2047 ? 2047 : dwords; diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c index 61246677e8dc..90552420c217 100644 --- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c +++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c @@ -95,7 +95,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) struct nouveau_fbdev *nfbdev = info->par; struct nouveau_drm *drm = nouveau_drm(nfbdev->dev); struct nouveau_channel *chan = drm->channel; - uint32_t width, dwords, *data = (uint32_t *)image->data; + uint32_t dwords, *data = (uint32_t *)image->data; uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel)); uint32_t *palette = info->pseudo_palette; int ret; @@ -107,9 +107,6 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) if (ret) return ret; - width = ALIGN(image->width, 32); - dwords = (width * image->height) >> 5; - BEGIN_NVC0(chan, NvSub2D, 0x0814, 2); if (info->fix.visual == FB_VISUAL_TRUECOLOR || info->fix.visual == FB_VISUAL_DIRECTCOLOR) { @@ -128,6 +125,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) OUT_RING (chan, 0); OUT_RING (chan, image->dy); + dwords = ALIGN(ALIGN(image->width, 8) * image->height, 32) >> 5; while (dwords) { int push = dwords > 2047 ? 2047 : dwords; diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c index 8436c6857cda..d292d24b3a6e 100644 --- a/drivers/gpu/drm/omapdrm/omap_fbdev.c +++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c @@ -334,17 +334,23 @@ struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev) goto fail; } - drm_fb_helper_single_add_all_connectors(helper); + ret = drm_fb_helper_single_add_all_connectors(helper); + if (ret) + goto fini; /* disable all the possible outputs/crtcs before entering KMS mode */ drm_helper_disable_unused_functions(dev); - drm_fb_helper_initial_config(helper, 32); + ret = drm_fb_helper_initial_config(helper, 32); + if (ret) + goto fini; priv->fbdev = helper; return helper; +fini: + drm_fb_helper_fini(helper); fail: kfree(fbdev); return NULL; diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c index 97823644d347..f33251d67914 100644 --- a/drivers/gpu/drm/qxl/qxl_cmd.c +++ b/drivers/gpu/drm/qxl/qxl_cmd.c @@ -505,6 +505,7 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev, cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release); cmd->type = QXL_SURFACE_CMD_CREATE; + cmd->flags = QXL_SURF_FLAG_KEEP_DATA; cmd->u.surface_create.format = surf->surf.format; cmd->u.surface_create.width = surf->surf.width; cmd->u.surface_create.height = surf->surf.height; diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index 0d1396266857..8e6e73cb187d 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -136,9 +136,35 @@ static int qxl_add_monitors_config_modes(struct drm_connector *connector, *pwidth = head->width; *pheight = head->height; drm_mode_probed_add(connector, mode); + /* remember the last custom size for mode validation */ + qdev->monitors_config_width = mode->hdisplay; + qdev->monitors_config_height = mode->vdisplay; return 1; } +static struct mode_size { + int w; + int h; +} common_modes[] = { + { 640, 480}, + { 720, 480}, + { 800, 600}, + { 848, 480}, + {1024, 768}, + {1152, 768}, + {1280, 720}, + {1280, 800}, + {1280, 854}, + {1280, 960}, + {1280, 1024}, + {1440, 900}, + {1400, 1050}, + {1680, 1050}, + {1600, 1200}, + {1920, 1080}, + {1920, 1200} +}; + static int qxl_add_common_modes(struct drm_connector *connector, unsigned pwidth, unsigned pheight) @@ -146,29 +172,6 @@ static int qxl_add_common_modes(struct drm_connector *connector, struct drm_device *dev = connector->dev; struct drm_display_mode *mode = NULL; int i; - struct mode_size { - int w; - int h; - } common_modes[] = { - { 640, 480}, - { 720, 480}, - { 800, 600}, - { 848, 480}, - {1024, 768}, - {1152, 768}, - {1280, 720}, - {1280, 800}, - {1280, 854}, - {1280, 960}, - {1280, 1024}, - {1440, 900}, - {1400, 1050}, - {1680, 1050}, - {1600, 1200}, - {1920, 1080}, - {1920, 1200} - }; - for (i = 0; i < ARRAY_SIZE(common_modes); i++) { mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false); @@ -340,10 +343,15 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc, qxl_bo_kunmap(user_bo); + qcrtc->cur_x += qcrtc->hot_spot_x - hot_x; + qcrtc->cur_y += qcrtc->hot_spot_y - hot_y; + qcrtc->hot_spot_x = hot_x; + qcrtc->hot_spot_y = hot_y; + cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release); cmd->type = QXL_CURSOR_SET; - cmd->u.set.position.x = qcrtc->cur_x; - cmd->u.set.position.y = qcrtc->cur_y; + cmd->u.set.position.x = qcrtc->cur_x + qcrtc->hot_spot_x; + cmd->u.set.position.y = qcrtc->cur_y + qcrtc->hot_spot_y; cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0); @@ -406,8 +414,8 @@ static int qxl_crtc_cursor_move(struct drm_crtc *crtc, cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release); cmd->type = QXL_CURSOR_MOVE; - cmd->u.position.x = qcrtc->cur_x; - cmd->u.position.y = qcrtc->cur_y; + cmd->u.position.x = qcrtc->cur_x + qcrtc->hot_spot_x; + cmd->u.position.y = qcrtc->cur_y + qcrtc->hot_spot_y; qxl_release_unmap(qdev, release, &cmd->release_info); qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); @@ -598,7 +606,7 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc, adjusted_mode->hdisplay, adjusted_mode->vdisplay); - if (qcrtc->index == 0) + if (bo->is_primary == false) recreate_primary = true; if (bo->surf.stride * bo->surf.height > qdev->vram_size) { @@ -806,11 +814,22 @@ static int qxl_conn_get_modes(struct drm_connector *connector) static int qxl_conn_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { + struct drm_device *ddev = connector->dev; + struct qxl_device *qdev = ddev->dev_private; + int i; + /* TODO: is this called for user defined modes? (xrandr --add-mode) * TODO: check that the mode fits in the framebuffer */ - DRM_DEBUG("%s: %dx%d status=%d\n", mode->name, mode->hdisplay, - mode->vdisplay, mode->status); - return MODE_OK; + + if(qdev->monitors_config_width == mode->hdisplay && + qdev->monitors_config_height == mode->vdisplay) + return MODE_OK; + + for (i = 0; i < ARRAY_SIZE(common_modes); i++) { + if (common_modes[i].w == mode->hdisplay && common_modes[i].h == mode->vdisplay) + return MODE_OK; + } + return MODE_BAD; } static struct drm_encoder *qxl_best_encoder(struct drm_connector *connector) @@ -855,13 +874,15 @@ static enum drm_connector_status qxl_conn_detect( drm_connector_to_qxl_output(connector); struct drm_device *ddev = connector->dev; struct qxl_device *qdev = ddev->dev_private; - int connected; + bool connected = false; /* The first monitor is always connected */ - connected = (output->index == 0) || - (qdev->client_monitors_config && - qdev->client_monitors_config->count > output->index && - qxl_head_enabled(&qdev->client_monitors_config->heads[output->index])); + if (!qdev->client_monitors_config) { + if (output->index == 0) + connected = true; + } else + connected = qdev->client_monitors_config->count > output->index && + qxl_head_enabled(&qdev->client_monitors_config->heads[output->index]); DRM_DEBUG("#%d connected: %d\n", output->index, connected); if (!connected) diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index 7c6cafe21f5f..eef66769245f 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h @@ -135,6 +135,8 @@ struct qxl_crtc { int index; int cur_x; int cur_y; + int hot_spot_x; + int hot_spot_y; }; struct qxl_output { @@ -325,6 +327,8 @@ struct qxl_device { struct work_struct fb_work; struct drm_property *hotplug_mode_update_property; + int monitors_config_width; + int monitors_config_height; }; /* forward declaration for QXL_INFO_IO */ diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c index 3d7c1d00a424..f778c0e8ae3c 100644 --- a/drivers/gpu/drm/qxl/qxl_fb.c +++ b/drivers/gpu/drm/qxl/qxl_fb.c @@ -686,14 +686,24 @@ int qxl_fbdev_init(struct qxl_device *qdev) ret = drm_fb_helper_init(qdev->ddev, &qfbdev->helper, qxl_num_crtc /* num_crtc - QXL supports just 1 */, QXLFB_CONN_LIMIT); - if (ret) { - kfree(qfbdev); - return ret; - } + if (ret) + goto free; + + ret = drm_fb_helper_single_add_all_connectors(&qfbdev->helper); + if (ret) + goto fini; + + ret = drm_fb_helper_initial_config(&qfbdev->helper, bpp_sel); + if (ret) + goto fini; - drm_fb_helper_single_add_all_connectors(&qfbdev->helper); - drm_fb_helper_initial_config(&qfbdev->helper, bpp_sel); return 0; + +fini: + drm_fb_helper_fini(&qfbdev->helper); +free: + kfree(qfbdev); + return ret; } void qxl_fbdev_fini(struct qxl_device *qdev) diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c index b110883f8253..3aefaa058f0c 100644 --- a/drivers/gpu/drm/qxl/qxl_ioctl.c +++ b/drivers/gpu/drm/qxl/qxl_ioctl.c @@ -122,8 +122,10 @@ static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev, qobj = gem_to_qxl_bo(gobj); ret = qxl_release_list_add(release, qobj); - if (ret) + if (ret) { + drm_gem_object_unreference_unlocked(gobj); return NULL; + } return qobj; } @@ -166,7 +168,8 @@ static int qxl_process_single_command(struct qxl_device *qdev, cmd->command_size)) return -EFAULT; - reloc_info = kmalloc(sizeof(struct qxl_reloc_info) * cmd->relocs_num, GFP_KERNEL); + reloc_info = kmalloc_array(cmd->relocs_num, + sizeof(struct qxl_reloc_info), GFP_KERNEL); if (!reloc_info) return -ENOMEM; diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 30d242b25078..e0780bf1138f 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -330,8 +330,10 @@ atombios_set_crtc_dtd_timing(struct drm_crtc *crtc, misc |= ATOM_COMPOSITESYNC; if (mode->flags & DRM_MODE_FLAG_INTERLACE) misc |= ATOM_INTERLACE; - if (mode->flags & DRM_MODE_FLAG_DBLSCAN) + if (mode->flags & DRM_MODE_FLAG_DBLCLK) misc |= ATOM_DOUBLE_CLOCK_MODE; + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) + misc |= ATOM_H_REPLICATIONBY2 | ATOM_V_REPLICATIONBY2; args.susModeMiscInfo.usAccess = cpu_to_le16(misc); args.ucCRTC = radeon_crtc->crtc_id; @@ -374,8 +376,10 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc, misc |= ATOM_COMPOSITESYNC; if (mode->flags & DRM_MODE_FLAG_INTERLACE) misc |= ATOM_INTERLACE; - if (mode->flags & DRM_MODE_FLAG_DBLSCAN) + if (mode->flags & DRM_MODE_FLAG_DBLCLK) misc |= ATOM_DOUBLE_CLOCK_MODE; + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) + misc |= ATOM_H_REPLICATIONBY2 | ATOM_V_REPLICATIONBY2; args.susModeMiscInfo.usAccess = cpu_to_le16(misc); args.ucCRTC = radeon_crtc->crtc_id; @@ -582,7 +586,8 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev) || ASIC_IS_DCE8(rdev)) radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; /* use frac fb div on RS780/RS880 */ - if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880)) + if (((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880)) + && !radeon_crtc->ss_enabled) radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; if (ASIC_IS_DCE32(rdev) && mode->clock > 165000) radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; @@ -612,7 +617,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, if (radeon_crtc->ss.refdiv) { radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV; radeon_crtc->pll_reference_div = radeon_crtc->ss.refdiv; - if (ASIC_IS_AVIVO(rdev)) + if (ASIC_IS_AVIVO(rdev) && + rdev->family != CHIP_RS780 && + rdev->family != CHIP_RS880) radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; } } @@ -1405,6 +1412,9 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc, (x << 16) | y); viewport_w = crtc->mode.hdisplay; viewport_h = (crtc->mode.vdisplay + 1) & ~1; + if ((rdev->family >= CHIP_BONAIRE) && + (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)) + viewport_h *= 2; WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset, (viewport_w << 16) | viewport_h); @@ -1723,6 +1733,7 @@ static u32 radeon_get_pll_use_mask(struct drm_crtc *crtc) static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; + struct radeon_device *rdev = dev->dev_private; struct drm_crtc *test_crtc; struct radeon_crtc *test_radeon_crtc; @@ -1732,6 +1743,10 @@ static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc) test_radeon_crtc = to_radeon_crtc(test_crtc); if (test_radeon_crtc->encoder && ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) { + /* PPLL2 is exclusive to UNIPHYA on DCE61 */ + if (ASIC_IS_DCE61(rdev) && !ASIC_IS_DCE8(rdev) && + test_radeon_crtc->pll_id == ATOM_PPLL2) + continue; /* for DP use the same PLL for all */ if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID) return test_radeon_crtc->pll_id; @@ -1753,6 +1768,7 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; + struct radeon_device *rdev = dev->dev_private; struct drm_crtc *test_crtc; struct radeon_crtc *test_radeon_crtc; u32 adjusted_clock, test_adjusted_clock; @@ -1768,6 +1784,10 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc) test_radeon_crtc = to_radeon_crtc(test_crtc); if (test_radeon_crtc->encoder && !ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) { + /* PPLL2 is exclusive to UNIPHYA on DCE61 */ + if (ASIC_IS_DCE61(rdev) && !ASIC_IS_DCE8(rdev) && + test_radeon_crtc->pll_id == ATOM_PPLL2) + continue; /* check if we are already driving this connector with another crtc */ if (test_radeon_crtc->connector == radeon_crtc->connector) { /* if we are, return that pll */ @@ -1851,10 +1871,9 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc) return pll; } /* otherwise, pick one of the plls */ - if ((rdev->family == CHIP_KAVERI) || - (rdev->family == CHIP_KABINI) || + if ((rdev->family == CHIP_KABINI) || (rdev->family == CHIP_MULLINS)) { - /* KB/KV/ML has PPLL1 and PPLL2 */ + /* KB/ML has PPLL1 and PPLL2 */ pll_in_use = radeon_get_pll_use_mask(crtc); if (!(pll_in_use & (1 << ATOM_PPLL2))) return ATOM_PPLL2; @@ -1863,7 +1882,7 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc) DRM_ERROR("unable to allocate a PPLL\n"); return ATOM_PPLL_INVALID; } else { - /* CI has PPLL0, PPLL1, and PPLL2 */ + /* CI/KV has PPLL0, PPLL1, and PPLL2 */ pll_in_use = radeon_get_pll_use_mask(crtc); if (!(pll_in_use & (1 << ATOM_PPLL2))) return ATOM_PPLL2; @@ -2154,6 +2173,7 @@ static void atombios_crtc_disable(struct drm_crtc *crtc) case ATOM_PPLL0: /* disable the ppll */ if ((rdev->family == CHIP_ARUBA) || + (rdev->family == CHIP_KAVERI) || (rdev->family == CHIP_BONAIRE) || (rdev->family == CHIP_HAWAII)) atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id, diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 11ba9d21b89b..ceab25d54233 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -405,19 +405,21 @@ bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector) { struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; u8 msg[DP_DPCD_SIZE]; - int ret; + int ret, i; - ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg, - DP_DPCD_SIZE); - if (ret > 0) { - memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE); + for (i = 0; i < 7; i++) { + ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg, + DP_DPCD_SIZE); + if (ret == DP_DPCD_SIZE) { + memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE); - DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd), - dig_connector->dpcd); + DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd), + dig_connector->dpcd); - radeon_dp_probe_oui(radeon_connector); + radeon_dp_probe_oui(radeon_connector); - return true; + return true; + } } dig_connector->dpcd[0] = 0; return false; @@ -492,6 +494,10 @@ int radeon_dp_mode_valid_helper(struct drm_connector *connector, struct radeon_connector_atom_dig *dig_connector; int dp_clock; + if ((mode->clock > 340000) && + (!radeon_connector_is_dp12_capable(connector))) + return MODE_CLOCK_HIGH; + if (!radeon_connector->con_priv) return MODE_CLOCK_HIGH; dig_connector = radeon_connector->con_priv; @@ -619,10 +625,8 @@ static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info) drm_dp_dpcd_writeb(dp_info->aux, DP_DOWNSPREAD_CTRL, 0); - if ((dp_info->connector->connector_type == DRM_MODE_CONNECTOR_eDP) && - (dig->panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE)) { + if (dig->panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE) drm_dp_dpcd_writeb(dp_info->aux, DP_EDP_CONFIGURATION_SET, 1); - } /* set the lane count on the sink */ tmp = dp_info->dp_lane_count; diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index b8cd7975f797..e809d2574356 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c @@ -119,6 +119,7 @@ atombios_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level) case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: if (dig->backlight_level == 0) atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0); else { @@ -308,6 +309,10 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2))) adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2; + /* vertical FP must be at least 1 */ + if (mode->crtc_vsync_start == mode->crtc_vdisplay) + adjusted_mode->crtc_vsync_start++; + /* get the native mode for scaling */ if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) { radeon_panel_mode_fixup(encoder, adjusted_mode); @@ -872,8 +877,6 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mo else args.v1.ucLaneNum = 4; - if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000)) - args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ; switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1; @@ -890,6 +893,10 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mo args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB; else args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA; + + if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000)) + args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ; + break; case 2: case 3: @@ -1586,8 +1593,9 @@ radeon_atom_encoder_dpms_avivo(struct drm_encoder *encoder, int mode) } else atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { - args.ucAction = ATOM_LCD_BLON; - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; + + atombios_set_backlight_level(radeon_encoder, dig->backlight_level); } break; case DRM_MODE_DPMS_STANDBY: @@ -1668,8 +1676,7 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode) atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0); } if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) - atombios_dig_transmitter_setup(encoder, - ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0); + atombios_set_backlight_level(radeon_encoder, dig->backlight_level); if (ext_encoder) atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE); break; diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index 11a55e9dad7f..c5699b593665 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c @@ -4729,7 +4729,7 @@ void ci_dpm_disable(struct radeon_device *rdev) ci_enable_spread_spectrum(rdev, false); ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false); ci_stop_dpm(rdev); - ci_enable_ds_master_switch(rdev, true); + ci_enable_ds_master_switch(rdev, false); ci_enable_ulv(rdev, false); ci_clear_vc(rdev); ci_reset_to_default(rdev); diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 89c01fa6dd8e..87073e69fc0d 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -3880,7 +3880,21 @@ void cik_fence_gfx_ring_emit(struct radeon_device *rdev, struct radeon_ring *ring = &rdev->ring[fence->ring]; u64 addr = rdev->fence_drv[fence->ring].gpu_addr; - /* EVENT_WRITE_EOP - flush caches, send int */ + /* Workaround for cache flush problems. First send a dummy EOP + * event down the pipe with seq one below. + */ + radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); + radeon_ring_write(ring, (EOP_TCL1_ACTION_EN | + EOP_TC_ACTION_EN | + EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | + EVENT_INDEX(5))); + radeon_ring_write(ring, addr & 0xfffffffc); + radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | + DATA_SEL(1) | INT_SEL(0)); + radeon_ring_write(ring, fence->seq - 1); + radeon_ring_write(ring, 0); + + /* Then send the real EOP event down the pipe. */ radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); radeon_ring_write(ring, (EOP_TCL1_ACTION_EN | EOP_TC_ACTION_EN | @@ -4509,6 +4523,31 @@ void cik_compute_set_wptr(struct radeon_device *rdev, WDOORBELL32(ring->doorbell_index, ring->wptr); } +static void cik_compute_stop(struct radeon_device *rdev, + struct radeon_ring *ring) +{ + u32 j, tmp; + + cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0); + /* Disable wptr polling. */ + tmp = RREG32(CP_PQ_WPTR_POLL_CNTL); + tmp &= ~WPTR_POLL_EN; + WREG32(CP_PQ_WPTR_POLL_CNTL, tmp); + /* Disable HQD. */ + if (RREG32(CP_HQD_ACTIVE) & 1) { + WREG32(CP_HQD_DEQUEUE_REQUEST, 1); + for (j = 0; j < rdev->usec_timeout; j++) { + if (!(RREG32(CP_HQD_ACTIVE) & 1)) + break; + udelay(1); + } + WREG32(CP_HQD_DEQUEUE_REQUEST, 0); + WREG32(CP_HQD_PQ_RPTR, 0); + WREG32(CP_HQD_PQ_WPTR, 0); + } + cik_srbm_select(rdev, 0, 0, 0, 0); +} + /** * cik_cp_compute_enable - enable/disable the compute CP MEs * @@ -4522,6 +4561,15 @@ static void cik_cp_compute_enable(struct radeon_device *rdev, bool enable) if (enable) WREG32(CP_MEC_CNTL, 0); else { + /* + * To make hibernation reliable we need to clear compute ring + * configuration before halting the compute ring. + */ + mutex_lock(&rdev->srbm_mutex); + cik_compute_stop(rdev,&rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]); + cik_compute_stop(rdev,&rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]); + mutex_unlock(&rdev->srbm_mutex); + WREG32(CP_MEC_CNTL, (MEC_ME1_HALT | MEC_ME2_HALT)); rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; @@ -5750,7 +5798,7 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev) /* restore context1-15 */ /* set vm size, must be a multiple of 4 */ WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0); - WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn); + WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn - 1); for (i = 1; i < 16; i++) { if (i < 8) WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2), @@ -6314,6 +6362,7 @@ static void cik_enable_mgcg(struct radeon_device *rdev, bool enable) } orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE); + data |= 0x00000001; data &= 0xfffffffd; if (orig != data) WREG32(RLC_CGTT_MGCG_OVERRIDE, data); @@ -6345,7 +6394,7 @@ static void cik_enable_mgcg(struct radeon_device *rdev, bool enable) } } else { orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE); - data |= 0x00000002; + data |= 0x00000003; if (orig != data) WREG32(RLC_CGTT_MGCG_OVERRIDE, data); @@ -7294,7 +7343,6 @@ int cik_irq_set(struct radeon_device *rdev) u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; u32 grbm_int_cntl = 0; u32 dma_cntl, dma_cntl1; - u32 thermal_int; if (!rdev->irq.installed) { WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); @@ -7331,13 +7379,6 @@ int cik_irq_set(struct radeon_device *rdev) cp_m2p2 = RREG32(CP_ME2_PIPE2_INT_CNTL) & ~TIME_STAMP_INT_ENABLE; cp_m2p3 = RREG32(CP_ME2_PIPE3_INT_CNTL) & ~TIME_STAMP_INT_ENABLE; - if (rdev->flags & RADEON_IS_IGP) - thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL) & - ~(THERM_INTH_MASK | THERM_INTL_MASK); - else - thermal_int = RREG32_SMC(CG_THERMAL_INT) & - ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW); - /* enable CP interrupts on all rings */ if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { DRM_DEBUG("cik_irq_set: sw int gfx\n"); @@ -7495,14 +7536,6 @@ int cik_irq_set(struct radeon_device *rdev) hpd6 |= DC_HPDx_INT_EN; } - if (rdev->irq.dpm_thermal) { - DRM_DEBUG("dpm thermal\n"); - if (rdev->flags & RADEON_IS_IGP) - thermal_int |= THERM_INTH_MASK | THERM_INTL_MASK; - else - thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW; - } - WREG32(CP_INT_CNTL_RING0, cp_int_cntl); WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, dma_cntl); @@ -7556,10 +7589,8 @@ int cik_irq_set(struct radeon_device *rdev) WREG32(DC_HPD5_INT_CONTROL, hpd5); WREG32(DC_HPD6_INT_CONTROL, hpd6); - if (rdev->flags & RADEON_IS_IGP) - WREG32_SMC(CG_THERMAL_INT_CTRL, thermal_int); - else - WREG32_SMC(CG_THERMAL_INT, thermal_int); + /* posting read */ + RREG32(SRBM_STATUS); return 0; } diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c index d748963af08b..02246acba36a 100644 --- a/drivers/gpu/drm/radeon/cik_sdma.c +++ b/drivers/gpu/drm/radeon/cik_sdma.c @@ -268,6 +268,17 @@ static void cik_sdma_gfx_stop(struct radeon_device *rdev) } rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false; rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false; + + /* FIXME use something else than big hammer but after few days can not + * seem to find good combination so reset SDMA blocks as it seems we + * do not shut them down properly. This fix hibernation and does not + * affect suspend to ram. + */ + WREG32(SRBM_SOFT_RESET, SOFT_RESET_SDMA | SOFT_RESET_SDMA1); + (void)RREG32(SRBM_SOFT_RESET); + udelay(50); + WREG32(SRBM_SOFT_RESET, 0); + (void)RREG32(SRBM_SOFT_RESET); } /** diff --git a/drivers/gpu/drm/radeon/dce3_1_afmt.c b/drivers/gpu/drm/radeon/dce3_1_afmt.c index 2fe8cfc966d9..bafdf92a5732 100644 --- a/drivers/gpu/drm/radeon/dce3_1_afmt.c +++ b/drivers/gpu/drm/radeon/dce3_1_afmt.c @@ -103,7 +103,7 @@ static void dce3_2_afmt_write_sad_regs(struct drm_encoder *encoder) } sad_count = drm_edid_to_sad(radeon_connector->edid, &sads); - if (sad_count < 0) { + if (sad_count <= 0) { DRM_ERROR("Couldn't read SADs: %d\n", sad_count); return; } diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 85995b4e3338..c674f63d7f14 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -4589,6 +4589,9 @@ int evergreen_irq_set(struct radeon_device *rdev) WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, afmt5); WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, afmt6); + /* posting read */ + RREG32(SRBM_STATUS); + return 0; } diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c index 9b42001295ba..85a109e1e56b 100644 --- a/drivers/gpu/drm/radeon/kv_dpm.c +++ b/drivers/gpu/drm/radeon/kv_dpm.c @@ -1169,6 +1169,19 @@ void kv_dpm_enable_bapm(struct radeon_device *rdev, bool enable) } } +static void kv_enable_thermal_int(struct radeon_device *rdev, bool enable) +{ + u32 thermal_int; + + thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL); + if (enable) + thermal_int |= THERM_INTH_MASK | THERM_INTL_MASK; + else + thermal_int &= ~(THERM_INTH_MASK | THERM_INTL_MASK); + WREG32_SMC(CG_THERMAL_INT_CTRL, thermal_int); + +} + int kv_dpm_enable(struct radeon_device *rdev) { struct kv_power_info *pi = kv_get_pi(rdev); @@ -1280,8 +1293,7 @@ int kv_dpm_late_enable(struct radeon_device *rdev) DRM_ERROR("kv_set_thermal_temperature_range failed\n"); return ret; } - rdev->irq.dpm_thermal = true; - radeon_irq_set(rdev); + kv_enable_thermal_int(rdev, true); } /* powerdown unused blocks for now */ @@ -1312,6 +1324,7 @@ void kv_dpm_disable(struct radeon_device *rdev) kv_stop_dpm(rdev); kv_enable_ulv(rdev, false); kv_reset_am(rdev); + kv_enable_thermal_int(rdev, false); kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps); } @@ -2745,13 +2758,11 @@ int kv_dpm_init(struct radeon_device *rdev) pi->enable_auto_thermal_throttling = true; pi->disable_nb_ps3_in_battery = false; if (radeon_bapm == -1) { - /* There are stability issues reported on with - * bapm enabled on an asrock system. - */ - if (rdev->pdev->subsystem_vendor == 0x1849) - pi->bapm_enable = false; - else + /* only enable bapm on KB, ML by default */ + if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) pi->bapm_enable = true; + else + pi->bapm_enable = false; } else if (radeon_bapm == 0) { pi->bapm_enable = false; } else { diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 3faee58946dd..3e8a0d634420 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -1085,12 +1085,12 @@ static void cayman_gpu_init(struct radeon_device *rdev) if ((rdev->config.cayman.max_backends_per_se == 1) && (rdev->flags & RADEON_IS_IGP)) { - if ((disabled_rb_mask & 3) == 1) { - /* RB0 disabled, RB1 enabled */ - tmp = 0x11111111; - } else { + if ((disabled_rb_mask & 3) == 2) { /* RB1 disabled, RB0 enabled */ tmp = 0x00000000; + } else { + /* RB0 disabled, RB1 enabled */ + tmp = 0x11111111; } } else { tmp = gb_addr_config & NUM_PIPES_MASK; @@ -1269,7 +1269,8 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev) */ for (i = 1; i < 8; i++) { WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0); - WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), rdev->vm_manager.max_pfn); + WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), + rdev->vm_manager.max_pfn - 1); WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2), rdev->vm_manager.saved_table_addr[i]); } @@ -1338,9 +1339,7 @@ static void cayman_pcie_gart_fini(struct radeon_device *rdev) void cayman_cp_int_cntl_setup(struct radeon_device *rdev, int ring, u32 cp_int_cntl) { - u32 srbm_gfx_cntl = RREG32(SRBM_GFX_CNTL) & ~3; - - WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl | (ring & 3)); + WREG32(SRBM_GFX_CNTL, RINGID(ring)); WREG32(CP_INT_CNTL, cp_int_cntl); } diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index b53b31a7b76f..a959cc1e7c8e 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -644,6 +644,7 @@ int r100_pci_gart_init(struct radeon_device *rdev) return r; rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush; + rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry; rdev->asic->gart.set_page = &r100_pci_gart_set_page; return radeon_gart_table_ram_alloc(rdev); } @@ -681,11 +682,16 @@ void r100_pci_gart_disable(struct radeon_device *rdev) WREG32(RADEON_AIC_HI_ADDR, 0); } +uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags) +{ + return addr; +} + void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i, - uint64_t addr, uint32_t flags) + uint64_t entry) { u32 *gtt = rdev->gart.ptr; - gtt[i] = cpu_to_le32(lower_32_bits(addr)); + gtt[i] = cpu_to_le32(lower_32_bits(entry)); } void r100_pci_gart_fini(struct radeon_device *rdev) @@ -722,6 +728,10 @@ int r100_irq_set(struct radeon_device *rdev) tmp |= RADEON_FP2_DETECT_MASK; } WREG32(RADEON_GEN_INT_CNTL, tmp); + + /* read back to post the write */ + RREG32(RADEON_GEN_INT_CNTL); + return 0; } diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 1bc4704034ce..f3ef6257d669 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c @@ -73,11 +73,8 @@ void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev) #define R300_PTE_WRITEABLE (1 << 2) #define R300_PTE_READABLE (1 << 3) -void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, - uint64_t addr, uint32_t flags) +uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags) { - void __iomem *ptr = rdev->gart.ptr; - addr = (lower_32_bits(addr) >> 8) | ((upper_32_bits(addr) & 0xff) << 24); if (flags & RADEON_GART_PAGE_READ) @@ -86,10 +83,18 @@ void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, addr |= R300_PTE_WRITEABLE; if (!(flags & RADEON_GART_PAGE_SNOOP)) addr |= R300_PTE_UNSNOOPED; + return addr; +} + +void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, + uint64_t entry) +{ + void __iomem *ptr = rdev->gart.ptr; + /* on x86 we want this to be CPU endian, on powerpc * on powerpc without HW swappers, it'll get swapped on way * into VRAM - so no need for cpu_to_le32 on VRAM tables */ - writel(addr, ((void __iomem *)ptr) + (i * 4)); + writel(entry, ((void __iomem *)ptr) + (i * 4)); } int rv370_pcie_gart_init(struct radeon_device *rdev) @@ -109,6 +114,7 @@ int rv370_pcie_gart_init(struct radeon_device *rdev) DRM_ERROR("Failed to register debugfs file for PCIE gart !\n"); rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush; + rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry; rdev->asic->gart.set_page = &rv370_pcie_gart_set_page; return radeon_gart_table_vram_alloc(rdev); } diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 56b02927cd3d..ee0868dec2f4 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -3787,6 +3787,9 @@ int r600_irq_set(struct radeon_device *rdev) WREG32(RV770_CG_THERMAL_INT, thermal_int); } + /* posting read */ + RREG32(R_000E50_SRBM_STATUS); + return 0; } diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c index b5c73df8e202..fdac8d3847dc 100644 --- a/drivers/gpu/drm/radeon/r600_dpm.c +++ b/drivers/gpu/drm/radeon/r600_dpm.c @@ -156,19 +156,20 @@ u32 r600_dpm_get_vblank_time(struct radeon_device *rdev) struct drm_device *dev = rdev->ddev; struct drm_crtc *crtc; struct radeon_crtc *radeon_crtc; - u32 line_time_us, vblank_lines; + u32 vblank_in_pixels; u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */ if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) { list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { radeon_crtc = to_radeon_crtc(crtc); if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) { - line_time_us = (radeon_crtc->hw_mode.crtc_htotal * 1000) / - radeon_crtc->hw_mode.clock; - vblank_lines = radeon_crtc->hw_mode.crtc_vblank_end - - radeon_crtc->hw_mode.crtc_vdisplay + - (radeon_crtc->v_border * 2); - vblank_time_us = vblank_lines * line_time_us; + vblank_in_pixels = + radeon_crtc->hw_mode.crtc_htotal * + (radeon_crtc->hw_mode.crtc_vblank_end - + radeon_crtc->hw_mode.crtc_vdisplay + + (radeon_crtc->v_border * 2)); + + vblank_time_us = vblank_in_pixels * 1000 / radeon_crtc->hw_mode.clock; break; } } @@ -188,7 +189,7 @@ u32 r600_dpm_get_vrefresh(struct radeon_device *rdev) list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { radeon_crtc = to_radeon_crtc(crtc); if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) { - vrefresh = radeon_crtc->hw_mode.vrefresh; + vrefresh = drm_mode_vrefresh(&radeon_crtc->hw_mode); break; } } diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index a9717b3fbf1b..ee38f9aba391 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -245,6 +245,7 @@ bool radeon_get_bios(struct radeon_device *rdev); * Dummy page */ struct radeon_dummy_page { + uint64_t entry; struct page *page; dma_addr_t addr; }; @@ -626,6 +627,7 @@ struct radeon_gart { unsigned table_size; struct page **pages; dma_addr_t *pages_addr; + uint64_t *pages_entry; bool ready; }; @@ -1542,6 +1544,7 @@ struct radeon_dpm { int new_active_crtc_count; u32 current_active_crtcs; int current_active_crtc_count; + bool single_display; struct radeon_dpm_dynamic_state dyn_state; struct radeon_dpm_fan fan; u32 tdp_limit; @@ -1625,6 +1628,7 @@ struct radeon_pm { struct device *int_hwmon_dev; /* dpm */ bool dpm_enabled; + bool sysfs_initialized; struct radeon_dpm dpm; }; @@ -1819,8 +1823,9 @@ struct radeon_asic { /* gart */ struct { void (*tlb_flush)(struct radeon_device *rdev); + uint64_t (*get_page_entry)(uint64_t addr, uint32_t flags); void (*set_page)(struct radeon_device *rdev, unsigned i, - uint64_t addr, uint32_t flags); + uint64_t entry); } gart; struct { int (*init)(struct radeon_device *rdev); @@ -2818,7 +2823,8 @@ static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v) #define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state)) #define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev)) #define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev)) -#define radeon_gart_set_page(rdev, i, p, f) (rdev)->asic->gart.set_page((rdev), (i), (p), (f)) +#define radeon_gart_get_page_entry(a, f) (rdev)->asic->gart.get_page_entry((a), (f)) +#define radeon_gart_set_page(rdev, i, e) (rdev)->asic->gart.set_page((rdev), (i), (e)) #define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev)) #define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev)) #define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) ((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count))) diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 850de57069be..c34ceb8d3d27 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -159,11 +159,13 @@ void radeon_agp_disable(struct radeon_device *rdev) DRM_INFO("Forcing AGP to PCIE mode\n"); rdev->flags |= RADEON_IS_PCIE; rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush; + rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry; rdev->asic->gart.set_page = &rv370_pcie_gart_set_page; } else { DRM_INFO("Forcing AGP to PCI mode\n"); rdev->flags |= RADEON_IS_PCI; rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush; + rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry; rdev->asic->gart.set_page = &r100_pci_gart_set_page; } rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; @@ -199,6 +201,7 @@ static struct radeon_asic r100_asic = { .mc_wait_for_idle = &r100_mc_wait_for_idle, .gart = { .tlb_flush = &r100_pci_gart_tlb_flush, + .get_page_entry = &r100_pci_gart_get_page_entry, .set_page = &r100_pci_gart_set_page, }, .ring = { @@ -265,6 +268,7 @@ static struct radeon_asic r200_asic = { .mc_wait_for_idle = &r100_mc_wait_for_idle, .gart = { .tlb_flush = &r100_pci_gart_tlb_flush, + .get_page_entry = &r100_pci_gart_get_page_entry, .set_page = &r100_pci_gart_set_page, }, .ring = { @@ -333,6 +337,20 @@ static struct radeon_asic_ring r300_gfx_ring = { .set_wptr = &r100_gfx_set_wptr, }; +static struct radeon_asic_ring rv515_gfx_ring = { + .ib_execute = &r100_ring_ib_execute, + .emit_fence = &r300_fence_ring_emit, + .emit_semaphore = &r100_semaphore_ring_emit, + .cs_parse = &r300_cs_parse, + .ring_start = &rv515_ring_start, + .ring_test = &r100_ring_test, + .ib_test = &r100_ib_test, + .is_lockup = &r100_gpu_is_lockup, + .get_rptr = &r100_gfx_get_rptr, + .get_wptr = &r100_gfx_get_wptr, + .set_wptr = &r100_gfx_set_wptr, +}; + static struct radeon_asic r300_asic = { .init = &r300_init, .fini = &r300_fini, @@ -345,6 +363,7 @@ static struct radeon_asic r300_asic = { .mc_wait_for_idle = &r300_mc_wait_for_idle, .gart = { .tlb_flush = &r100_pci_gart_tlb_flush, + .get_page_entry = &r100_pci_gart_get_page_entry, .set_page = &r100_pci_gart_set_page, }, .ring = { @@ -411,6 +430,7 @@ static struct radeon_asic r300_asic_pcie = { .mc_wait_for_idle = &r300_mc_wait_for_idle, .gart = { .tlb_flush = &rv370_pcie_gart_tlb_flush, + .get_page_entry = &rv370_pcie_gart_get_page_entry, .set_page = &rv370_pcie_gart_set_page, }, .ring = { @@ -477,6 +497,7 @@ static struct radeon_asic r420_asic = { .mc_wait_for_idle = &r300_mc_wait_for_idle, .gart = { .tlb_flush = &rv370_pcie_gart_tlb_flush, + .get_page_entry = &rv370_pcie_gart_get_page_entry, .set_page = &rv370_pcie_gart_set_page, }, .ring = { @@ -543,6 +564,7 @@ static struct radeon_asic rs400_asic = { .mc_wait_for_idle = &rs400_mc_wait_for_idle, .gart = { .tlb_flush = &rs400_gart_tlb_flush, + .get_page_entry = &rs400_gart_get_page_entry, .set_page = &rs400_gart_set_page, }, .ring = { @@ -609,6 +631,7 @@ static struct radeon_asic rs600_asic = { .mc_wait_for_idle = &rs600_mc_wait_for_idle, .gart = { .tlb_flush = &rs600_gart_tlb_flush, + .get_page_entry = &rs600_gart_get_page_entry, .set_page = &rs600_gart_set_page, }, .ring = { @@ -677,6 +700,7 @@ static struct radeon_asic rs690_asic = { .mc_wait_for_idle = &rs690_mc_wait_for_idle, .gart = { .tlb_flush = &rs400_gart_tlb_flush, + .get_page_entry = &rs400_gart_get_page_entry, .set_page = &rs400_gart_set_page, }, .ring = { @@ -745,10 +769,11 @@ static struct radeon_asic rv515_asic = { .mc_wait_for_idle = &rv515_mc_wait_for_idle, .gart = { .tlb_flush = &rv370_pcie_gart_tlb_flush, + .get_page_entry = &rv370_pcie_gart_get_page_entry, .set_page = &rv370_pcie_gart_set_page, }, .ring = { - [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring + [RADEON_RING_TYPE_GFX_INDEX] = &rv515_gfx_ring }, .irq = { .set = &rs600_irq_set, @@ -811,10 +836,11 @@ static struct radeon_asic r520_asic = { .mc_wait_for_idle = &r520_mc_wait_for_idle, .gart = { .tlb_flush = &rv370_pcie_gart_tlb_flush, + .get_page_entry = &rv370_pcie_gart_get_page_entry, .set_page = &rv370_pcie_gart_set_page, }, .ring = { - [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring + [RADEON_RING_TYPE_GFX_INDEX] = &rv515_gfx_ring }, .irq = { .set = &rs600_irq_set, @@ -905,6 +931,7 @@ static struct radeon_asic r600_asic = { .get_gpu_clock_counter = &r600_get_gpu_clock_counter, .gart = { .tlb_flush = &r600_pcie_gart_tlb_flush, + .get_page_entry = &rs600_gart_get_page_entry, .set_page = &rs600_gart_set_page, }, .ring = { @@ -990,6 +1017,7 @@ static struct radeon_asic rv6xx_asic = { .get_gpu_clock_counter = &r600_get_gpu_clock_counter, .gart = { .tlb_flush = &r600_pcie_gart_tlb_flush, + .get_page_entry = &rs600_gart_get_page_entry, .set_page = &rs600_gart_set_page, }, .ring = { @@ -1081,6 +1109,7 @@ static struct radeon_asic rs780_asic = { .get_gpu_clock_counter = &r600_get_gpu_clock_counter, .gart = { .tlb_flush = &r600_pcie_gart_tlb_flush, + .get_page_entry = &rs600_gart_get_page_entry, .set_page = &rs600_gart_set_page, }, .ring = { @@ -1161,7 +1190,7 @@ static struct radeon_asic rs780_asic = { static struct radeon_asic_ring rv770_uvd_ring = { .ib_execute = &uvd_v1_0_ib_execute, .emit_fence = &uvd_v2_2_fence_emit, - .emit_semaphore = &uvd_v1_0_semaphore_emit, + .emit_semaphore = &uvd_v2_2_semaphore_emit, .cs_parse = &radeon_uvd_cs_parse, .ring_test = &uvd_v1_0_ring_test, .ib_test = &uvd_v1_0_ib_test, @@ -1185,6 +1214,7 @@ static struct radeon_asic rv770_asic = { .get_gpu_clock_counter = &r600_get_gpu_clock_counter, .gart = { .tlb_flush = &r600_pcie_gart_tlb_flush, + .get_page_entry = &rs600_gart_get_page_entry, .set_page = &rs600_gart_set_page, }, .ring = { @@ -1303,6 +1333,7 @@ static struct radeon_asic evergreen_asic = { .get_gpu_clock_counter = &r600_get_gpu_clock_counter, .gart = { .tlb_flush = &evergreen_pcie_gart_tlb_flush, + .get_page_entry = &rs600_gart_get_page_entry, .set_page = &rs600_gart_set_page, }, .ring = { @@ -1395,6 +1426,7 @@ static struct radeon_asic sumo_asic = { .get_gpu_clock_counter = &r600_get_gpu_clock_counter, .gart = { .tlb_flush = &evergreen_pcie_gart_tlb_flush, + .get_page_entry = &rs600_gart_get_page_entry, .set_page = &rs600_gart_set_page, }, .ring = { @@ -1486,6 +1518,7 @@ static struct radeon_asic btc_asic = { .get_gpu_clock_counter = &r600_get_gpu_clock_counter, .gart = { .tlb_flush = &evergreen_pcie_gart_tlb_flush, + .get_page_entry = &rs600_gart_get_page_entry, .set_page = &rs600_gart_set_page, }, .ring = { @@ -1621,6 +1654,7 @@ static struct radeon_asic cayman_asic = { .get_gpu_clock_counter = &r600_get_gpu_clock_counter, .gart = { .tlb_flush = &cayman_pcie_gart_tlb_flush, + .get_page_entry = &rs600_gart_get_page_entry, .set_page = &rs600_gart_set_page, }, .vm = { @@ -1724,6 +1758,7 @@ static struct radeon_asic trinity_asic = { .get_gpu_clock_counter = &r600_get_gpu_clock_counter, .gart = { .tlb_flush = &cayman_pcie_gart_tlb_flush, + .get_page_entry = &rs600_gart_get_page_entry, .set_page = &rs600_gart_set_page, }, .vm = { @@ -1857,6 +1892,7 @@ static struct radeon_asic si_asic = { .get_gpu_clock_counter = &si_get_gpu_clock_counter, .gart = { .tlb_flush = &si_pcie_gart_tlb_flush, + .get_page_entry = &rs600_gart_get_page_entry, .set_page = &rs600_gart_set_page, }, .vm = { @@ -2018,6 +2054,7 @@ static struct radeon_asic ci_asic = { .get_gpu_clock_counter = &cik_get_gpu_clock_counter, .gart = { .tlb_flush = &cik_pcie_gart_tlb_flush, + .get_page_entry = &rs600_gart_get_page_entry, .set_page = &rs600_gart_set_page, }, .vm = { @@ -2125,6 +2162,7 @@ static struct radeon_asic kv_asic = { .get_gpu_clock_counter = &cik_get_gpu_clock_counter, .gart = { .tlb_flush = &cik_pcie_gart_tlb_flush, + .get_page_entry = &rs600_gart_get_page_entry, .set_page = &rs600_gart_set_page, }, .vm = { diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index d8ace5b28a5b..6dd68f663181 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -67,8 +67,9 @@ bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp); int r100_asic_reset(struct radeon_device *rdev); u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc); void r100_pci_gart_tlb_flush(struct radeon_device *rdev); +uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags); void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i, - uint64_t addr, uint32_t flags); + uint64_t entry); void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring); int r100_irq_set(struct radeon_device *rdev); int r100_irq_process(struct radeon_device *rdev); @@ -172,8 +173,9 @@ extern void r300_fence_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence); extern int r300_cs_parse(struct radeon_cs_parser *p); extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev); +extern uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags); extern void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, - uint64_t addr, uint32_t flags); + uint64_t entry); extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes); extern int rv370_get_pcie_lanes(struct radeon_device *rdev); extern void r300_set_reg_safe(struct radeon_device *rdev); @@ -208,8 +210,9 @@ extern void rs400_fini(struct radeon_device *rdev); extern int rs400_suspend(struct radeon_device *rdev); extern int rs400_resume(struct radeon_device *rdev); void rs400_gart_tlb_flush(struct radeon_device *rdev); +uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags); void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, - uint64_t addr, uint32_t flags); + uint64_t entry); uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg); void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); int rs400_gart_init(struct radeon_device *rdev); @@ -232,8 +235,9 @@ int rs600_irq_process(struct radeon_device *rdev); void rs600_irq_disable(struct radeon_device *rdev); u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc); void rs600_gart_tlb_flush(struct radeon_device *rdev); +uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags); void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, - uint64_t addr, uint32_t flags); + uint64_t entry); uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg); void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); void rs600_bandwidth_update(struct radeon_device *rdev); @@ -904,6 +908,10 @@ void uvd_v1_0_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); int uvd_v2_2_resume(struct radeon_device *rdev); void uvd_v2_2_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence); +bool uvd_v2_2_semaphore_emit(struct radeon_device *rdev, + struct radeon_ring *ring, + struct radeon_semaphore *semaphore, + bool emit_wait); /* uvd v3.1 */ bool uvd_v3_1_semaphore_emit(struct radeon_device *rdev, diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index df69b92ba164..ad0f9c7882c4 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -436,7 +436,9 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, } /* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */ - if (((dev->pdev->device == 0x9802) || (dev->pdev->device == 0x9806)) && + if (((dev->pdev->device == 0x9802) || + (dev->pdev->device == 0x9805) || + (dev->pdev->device == 0x9806)) && (dev->pdev->subsystem_vendor == 0x1734) && (dev->pdev->subsystem_device == 0x11bd)) { if (*connector_type == DRM_MODE_CONNECTOR_VGA) { @@ -447,14 +449,6 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, } } - /* Fujitsu D3003-S2 board lists DVI-I as DVI-I and VGA */ - if ((dev->pdev->device == 0x9805) && - (dev->pdev->subsystem_vendor == 0x1734) && - (dev->pdev->subsystem_device == 0x11bd)) { - if (*connector_type == DRM_MODE_CONNECTOR_VGA) - return false; - } - return true; } @@ -1134,7 +1128,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) le16_to_cpu(firmware_info->info.usReferenceClock); p1pll->reference_div = 0; - if (crev < 2) + if ((frev < 2) && (crev < 2)) p1pll->pll_out_min = le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output); else @@ -1143,7 +1137,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) p1pll->pll_out_max = le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output); - if (crev >= 4) { + if (((frev < 2) && (crev >= 4)) || (frev >= 2)) { p1pll->lcd_pll_out_min = le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100; if (p1pll->lcd_pll_out_min == 0) @@ -3280,6 +3274,7 @@ int radeon_atom_get_voltage_evv(struct radeon_device *rdev, args.in.ucVoltageType = VOLTAGE_TYPE_VDDC; args.in.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE; + args.in.usVoltageLevel = cpu_to_le16(virtual_voltage_id); args.in.ulSCLKFreq = cpu_to_le32(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].clk); diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c index 8bc7d0bbd3c8..868247c22de4 100644 --- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c +++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c @@ -10,6 +10,7 @@ #include <linux/slab.h> #include <linux/acpi.h> #include <linux/pci.h> +#include <linux/delay.h> #include "radeon_acpi.h" @@ -255,6 +256,10 @@ static int radeon_atpx_set_discrete_state(struct radeon_atpx *atpx, u8 state) if (!info) return -EIO; kfree(info); + + /* 200ms delay is required after off */ + if (state == 0) + msleep(200); } return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c index 9e7f23dd14bd..87d5fb21cb61 100644 --- a/drivers/gpu/drm/radeon/radeon_benchmark.c +++ b/drivers/gpu/drm/radeon/radeon_benchmark.c @@ -34,7 +34,8 @@ static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size, uint64_t saddr, uint64_t daddr, - int flag, int n) + int flag, int n, + struct reservation_object *resv) { unsigned long start_jiffies; unsigned long end_jiffies; @@ -47,12 +48,12 @@ static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size, case RADEON_BENCHMARK_COPY_DMA: fence = radeon_copy_dma(rdev, saddr, daddr, size / RADEON_GPU_PAGE_SIZE, - NULL); + resv); break; case RADEON_BENCHMARK_COPY_BLIT: fence = radeon_copy_blit(rdev, saddr, daddr, size / RADEON_GPU_PAGE_SIZE, - NULL); + resv); break; default: DRM_ERROR("Unknown copy method\n"); @@ -120,7 +121,8 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size, if (rdev->asic->copy.dma) { time = radeon_benchmark_do_move(rdev, size, saddr, daddr, - RADEON_BENCHMARK_COPY_DMA, n); + RADEON_BENCHMARK_COPY_DMA, n, + dobj->tbo.resv); if (time < 0) goto out_cleanup; if (time > 0) @@ -130,7 +132,8 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size, if (rdev->asic->copy.blit) { time = radeon_benchmark_do_move(rdev, size, saddr, daddr, - RADEON_BENCHMARK_COPY_BLIT, n); + RADEON_BENCHMARK_COPY_BLIT, n, + dobj->tbo.resv); if (time < 0) goto out_cleanup; if (time > 0) diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index 63ccb8fa799c..d27e4ccb848c 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c @@ -76,7 +76,7 @@ static bool igp_read_bios_from_vram(struct radeon_device *rdev) static bool radeon_read_bios(struct radeon_device *rdev) { - uint8_t __iomem *bios; + uint8_t __iomem *bios, val1, val2; size_t size; rdev->bios = NULL; @@ -86,15 +86,19 @@ static bool radeon_read_bios(struct radeon_device *rdev) return false; } - if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) { + val1 = readb(&bios[0]); + val2 = readb(&bios[1]); + + if (size == 0 || val1 != 0x55 || val2 != 0xaa) { pci_unmap_rom(rdev->pdev, bios); return false; } - rdev->bios = kmemdup(bios, size, GFP_KERNEL); + rdev->bios = kzalloc(size, GFP_KERNEL); if (rdev->bios == NULL) { pci_unmap_rom(rdev->pdev, bios); return false; } + memcpy_fromio(rdev->bios, bios, size); pci_unmap_rom(rdev->pdev, bios); return true; } diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 3e5f6b71f3ad..a9b01bcf7d0a 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -1255,10 +1255,15 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) && (RBIOS16(tmp + 2) == lvds->native_mode.vdisplay)) { + u32 hss = (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8; + + if (hss > lvds->native_mode.hdisplay) + hss = (10 - 1) * 8; + lvds->native_mode.htotal = lvds->native_mode.hdisplay + (RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8; lvds->native_mode.hsync_start = lvds->native_mode.hdisplay + - (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8; + hss; lvds->native_mode.hsync_end = lvds->native_mode.hsync_start + (RBIOS8(tmp + 23) * 8); @@ -3382,6 +3387,14 @@ void radeon_combios_asic_init(struct drm_device *dev) rdev->pdev->subsystem_device == 0x30ae) return; + /* quirk for rs4xx HP Compaq dc5750 Small Form Factor to make it resume + * - it hangs on resume inside the dynclk 1 table. + */ + if (rdev->family == CHIP_RS480 && + rdev->pdev->subsystem_vendor == 0x103c && + rdev->pdev->subsystem_device == 0x280a) + return; + /* DYN CLK 1 */ table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE); if (table) diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 26baa9c05f6c..4f686bb6bad7 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -72,6 +72,11 @@ void radeon_connector_hotplug(struct drm_connector *connector) if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) { drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); } else if (radeon_dp_needs_link_train(radeon_connector)) { + /* Don't try to start link training before we + * have the dpcd */ + if (!radeon_dp_getdpcd(radeon_connector)) + return; + /* set it to OFF so that drm_helper_connector_dpms() * won't return immediately since the current state * is ON at this point. @@ -1944,7 +1949,6 @@ radeon_add_atom_connector(struct drm_device *dev, DRM_MODE_SCALE_NONE); /* no HPD on analog connectors */ radeon_connector->hpd.hpd = RADEON_HPD_NONE; - connector->polled = DRM_CONNECTOR_POLL_CONNECT; connector->interlace_allowed = true; connector->doublescan_allowed = true; break; @@ -2175,8 +2179,10 @@ radeon_add_atom_connector(struct drm_device *dev, } if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) { - if (i2c_bus->valid) - connector->polled = DRM_CONNECTOR_POLL_CONNECT; + if (i2c_bus->valid) { + connector->polled = DRM_CONNECTOR_POLL_CONNECT | + DRM_CONNECTOR_POLL_DISCONNECT; + } } else connector->polled = DRM_CONNECTOR_POLL_HPD; @@ -2252,7 +2258,6 @@ radeon_add_legacy_connector(struct drm_device *dev, 1); /* no HPD on analog connectors */ radeon_connector->hpd.hpd = RADEON_HPD_NONE; - connector->polled = DRM_CONNECTOR_POLL_CONNECT; connector->interlace_allowed = true; connector->doublescan_allowed = true; break; @@ -2337,10 +2342,13 @@ radeon_add_legacy_connector(struct drm_device *dev, } if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) { - if (i2c_bus->valid) - connector->polled = DRM_CONNECTOR_POLL_CONNECT; + if (i2c_bus->valid) { + connector->polled = DRM_CONNECTOR_POLL_CONNECT | + DRM_CONNECTOR_POLL_DISCONNECT; + } } else connector->polled = DRM_CONNECTOR_POLL_HPD; + connector->display_info.subpixel_order = subpixel_order; drm_connector_register(connector); } diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 6f377de099f9..a5b7f6f98f5f 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -275,11 +275,13 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) u32 ring = RADEON_CS_RING_GFX; s32 priority = 0; + INIT_LIST_HEAD(&p->validated); + if (!cs->num_chunks) { return 0; } + /* get chunks */ - INIT_LIST_HEAD(&p->validated); p->idx = 0; p->ib.sa_bo = NULL; p->ib.semaphore = NULL; diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 995a8b1770dd..6b99d3956baa 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -599,6 +599,23 @@ void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) /* * GPU helpers function. */ + +/** + * radeon_device_is_virtual - check if we are running is a virtual environment + * + * Check if the asic has been passed through to a VM (all asics). + * Used at driver startup. + * Returns true if virtual or false if not. + */ +static bool radeon_device_is_virtual(void) +{ +#ifdef CONFIG_X86 + return boot_cpu_has(X86_FEATURE_HYPERVISOR); +#else + return false; +#endif +} + /** * radeon_card_posted - check if the hw has already been initialized * @@ -612,6 +629,10 @@ bool radeon_card_posted(struct radeon_device *rdev) { uint32_t reg; + /* for pass through, always force asic_init */ + if (radeon_device_is_virtual()) + return false; + /* required for EFI mode on macbook2,1 which uses an r5xx asic */ if (efi_enabled(EFI_BOOT) && (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) && @@ -743,6 +764,8 @@ int radeon_dummy_page_init(struct radeon_device *rdev) rdev->dummy_page.page = NULL; return -ENOMEM; } + rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr, + RADEON_GART_PAGE_DUMMY); return 0; } @@ -1424,6 +1447,21 @@ int radeon_device_init(struct radeon_device *rdev, if (r) DRM_ERROR("ib ring test failed (%d).\n", r); + /* + * Turks/Thames GPU will freeze whole laptop if DPM is not restarted + * after the CP ring have chew one packet at least. Hence here we stop + * and restart DPM after the radeon_ib_ring_tests(). + */ + if (rdev->pm.dpm_enabled && + (rdev->pm.pm_method == PM_METHOD_DPM) && + (rdev->family == CHIP_TURKS) && + (rdev->flags & RADEON_IS_MOBILITY)) { + mutex_lock(&rdev->pm.mutex); + radeon_dpm_disable(rdev); + radeon_dpm_enable(rdev); + mutex_unlock(&rdev->pm.mutex); + } + if ((radeon_testing & 1)) { if (rdev->accel_working) radeon_test_moves(rdev); diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 00ead8c2758a..21e6e9745d00 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -959,6 +959,9 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll, if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV && pll->flags & RADEON_PLL_USE_REF_DIV) ref_div_max = pll->reference_div; + else if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) + /* fix for problems on RS880 */ + ref_div_max = min(pll->max_ref_div, 7u); else ref_div_max = pll->max_ref_div; @@ -1616,18 +1619,8 @@ int radeon_modeset_init(struct radeon_device *rdev) radeon_fbdev_init(rdev); drm_kms_helper_poll_init(rdev->ddev); - if (rdev->pm.dpm_enabled) { - /* do dpm late init */ - ret = radeon_pm_late_init(rdev); - if (ret) { - rdev->pm.dpm_enabled = false; - DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n"); - } - /* set the dpm state for PX since there won't be - * a modeset to call this. - */ - radeon_pm_compute_clocks(rdev); - } + /* do pm late init */ + ret = radeon_pm_late_init(rdev); return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 6b670b0bc47b..3a297037cc17 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c @@ -179,9 +179,12 @@ static void radeon_encoder_add_backlight(struct radeon_encoder *radeon_encoder, (rdev->pdev->subsystem_vendor == 0x1734) && (rdev->pdev->subsystem_device == 0x1107)) use_bl = false; +/* Older PPC macs use on-GPU backlight controller */ +#ifndef CONFIG_PPC_PMAC /* disable native backlight control on older asics */ else if (rdev->family < CHIP_R600) use_bl = false; +#endif else use_bl = true; } diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index 0ea1db83d573..6514ff218995 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c @@ -303,7 +303,8 @@ out_unref: void radeon_fb_output_poll_changed(struct radeon_device *rdev) { - drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper); + if (rdev->mode_info.rfbdev) + drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper); } static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev) @@ -343,6 +344,10 @@ int radeon_fbdev_init(struct radeon_device *rdev) int bpp_sel = 32; int ret; + /* don't enable fbdev if no connectors */ + if (list_empty(&rdev->ddev->mode_config.connector_list)) + return 0; + /* select 8 bpp console on RN50 or 16MB cards */ if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024)) bpp_sel = 8; @@ -360,18 +365,27 @@ int radeon_fbdev_init(struct radeon_device *rdev) ret = drm_fb_helper_init(rdev->ddev, &rfbdev->helper, rdev->num_crtc, RADEONFB_CONN_LIMIT); - if (ret) { - kfree(rfbdev); - return ret; - } + if (ret) + goto free; - drm_fb_helper_single_add_all_connectors(&rfbdev->helper); + ret = drm_fb_helper_single_add_all_connectors(&rfbdev->helper); + if (ret) + goto fini; /* disable all the possible outputs/crtcs before entering KMS mode */ drm_helper_disable_unused_functions(rdev->ddev); - drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel); + ret = drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel); + if (ret) + goto fini; + return 0; + +fini: + drm_fb_helper_fini(&rfbdev->helper); +free: + kfree(rfbdev); + return ret; } void radeon_fbdev_fini(struct radeon_device *rdev) @@ -386,7 +400,8 @@ void radeon_fbdev_fini(struct radeon_device *rdev) void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state) { - fb_set_suspend(rdev->mode_info.rfbdev->helper.fbdev, state); + if (rdev->mode_info.rfbdev) + fb_set_suspend(rdev->mode_info.rfbdev->helper.fbdev, state); } int radeon_fbdev_total_size(struct radeon_device *rdev) @@ -401,7 +416,22 @@ int radeon_fbdev_total_size(struct radeon_device *rdev) bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj) { + if (!rdev->mode_info.rfbdev) + return false; + if (robj == gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj)) return true; return false; } + +void radeon_fb_add_connector(struct radeon_device *rdev, struct drm_connector *connector) +{ + if (rdev->mode_info.rfbdev) + drm_fb_helper_add_one_connector(&rdev->mode_info.rfbdev->helper, connector); +} + +void radeon_fb_remove_connector(struct radeon_device *rdev, struct drm_connector *connector) +{ + if (rdev->mode_info.rfbdev) + drm_fb_helper_remove_one_connector(&rdev->mode_info.rfbdev->helper, connector); +} diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 995167025282..8569afaba688 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -1029,37 +1029,59 @@ static inline bool radeon_test_signaled(struct radeon_fence *fence) return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags); } +struct radeon_wait_cb { + struct fence_cb base; + struct task_struct *task; +}; + +static void +radeon_fence_wait_cb(struct fence *fence, struct fence_cb *cb) +{ + struct radeon_wait_cb *wait = + container_of(cb, struct radeon_wait_cb, base); + + wake_up_process(wait->task); +} + static signed long radeon_fence_default_wait(struct fence *f, bool intr, signed long t) { struct radeon_fence *fence = to_radeon_fence(f); struct radeon_device *rdev = fence->rdev; - bool signaled; + struct radeon_wait_cb cb; - fence_enable_sw_signaling(&fence->base); + cb.task = current; - /* - * This function has to return -EDEADLK, but cannot hold - * exclusive_lock during the wait because some callers - * may already hold it. This means checking needs_reset without - * lock, and not fiddling with any gpu internals. - * - * The callback installed with fence_enable_sw_signaling will - * run before our wait_event_*timeout call, so we will see - * both the signaled fence and the changes to needs_reset. - */ + if (fence_add_callback(f, &cb.base, radeon_fence_wait_cb)) + return t; + + while (t > 0) { + if (intr) + set_current_state(TASK_INTERRUPTIBLE); + else + set_current_state(TASK_UNINTERRUPTIBLE); + + /* + * radeon_test_signaled must be called after + * set_current_state to prevent a race with wake_up_process + */ + if (radeon_test_signaled(fence)) + break; + + if (rdev->needs_reset) { + t = -EDEADLK; + break; + } + + t = schedule_timeout(t); + + if (t > 0 && intr && signal_pending(current)) + t = -ERESTARTSYS; + } + + __set_current_state(TASK_RUNNING); + fence_remove_callback(f, &cb.base); - if (intr) - t = wait_event_interruptible_timeout(rdev->fence_queue, - ((signaled = radeon_test_signaled(fence)) || - rdev->needs_reset), t); - else - t = wait_event_timeout(rdev->fence_queue, - ((signaled = radeon_test_signaled(fence)) || - rdev->needs_reset), t); - - if (t > 0 && !signaled) - return -EDEADLK; return t; } diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index 84146d5901aa..c7be612b60c9 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c @@ -165,6 +165,19 @@ int radeon_gart_table_vram_pin(struct radeon_device *rdev) radeon_bo_unpin(rdev->gart.robj); radeon_bo_unreserve(rdev->gart.robj); rdev->gart.table_addr = gpu_addr; + + if (!r) { + int i; + + /* We might have dropped some GART table updates while it wasn't + * mapped, restore all entries + */ + for (i = 0; i < rdev->gart.num_gpu_pages; i++) + radeon_gart_set_page(rdev, i, rdev->gart.pages_entry[i]); + mb(); + radeon_gart_tlb_flush(rdev); + } + return r; } @@ -228,7 +241,6 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, unsigned t; unsigned p; int i, j; - u64 page_base; if (!rdev->gart.ready) { WARN(1, "trying to unbind memory from uninitialized GART !\n"); @@ -240,13 +252,12 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, if (rdev->gart.pages[p]) { rdev->gart.pages[p] = NULL; rdev->gart.pages_addr[p] = rdev->dummy_page.addr; - page_base = rdev->gart.pages_addr[p]; for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { + rdev->gart.pages_entry[t] = rdev->dummy_page.entry; if (rdev->gart.ptr) { - radeon_gart_set_page(rdev, t, page_base, - RADEON_GART_PAGE_DUMMY); + radeon_gart_set_page(rdev, t, + rdev->dummy_page.entry); } - page_base += RADEON_GPU_PAGE_SIZE; } } } @@ -274,7 +285,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, { unsigned t; unsigned p; - uint64_t page_base; + uint64_t page_base, page_entry; int i, j; if (!rdev->gart.ready) { @@ -287,12 +298,14 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, for (i = 0; i < pages; i++, p++) { rdev->gart.pages_addr[p] = dma_addr[i]; rdev->gart.pages[p] = pagelist[i]; - if (rdev->gart.ptr) { - page_base = rdev->gart.pages_addr[p]; - for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { - radeon_gart_set_page(rdev, t, page_base, flags); - page_base += RADEON_GPU_PAGE_SIZE; + page_base = dma_addr[i]; + for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { + page_entry = radeon_gart_get_page_entry(page_base, flags); + rdev->gart.pages_entry[t] = page_entry; + if (rdev->gart.ptr) { + radeon_gart_set_page(rdev, t, page_entry); } + page_base += RADEON_GPU_PAGE_SIZE; } } mb(); @@ -340,10 +353,17 @@ int radeon_gart_init(struct radeon_device *rdev) radeon_gart_fini(rdev); return -ENOMEM; } + rdev->gart.pages_entry = vmalloc(sizeof(uint64_t) * + rdev->gart.num_gpu_pages); + if (rdev->gart.pages_entry == NULL) { + radeon_gart_fini(rdev); + return -ENOMEM; + } /* set GART entry to point to the dummy page by default */ - for (i = 0; i < rdev->gart.num_cpu_pages; i++) { + for (i = 0; i < rdev->gart.num_cpu_pages; i++) rdev->gart.pages_addr[i] = rdev->dummy_page.addr; - } + for (i = 0; i < rdev->gart.num_gpu_pages; i++) + rdev->gart.pages_entry[i] = rdev->dummy_page.entry; return 0; } @@ -356,15 +376,17 @@ int radeon_gart_init(struct radeon_device *rdev) */ void radeon_gart_fini(struct radeon_device *rdev) { - if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) { + if (rdev->gart.ready) { /* unbind pages */ radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages); } rdev->gart.ready = false; vfree(rdev->gart.pages); vfree(rdev->gart.pages_addr); + vfree(rdev->gart.pages_entry); rdev->gart.pages = NULL; rdev->gart.pages_addr = NULL; + rdev->gart.pages_entry = NULL; radeon_dummy_page_fini(rdev); } diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index c194497aa586..b4abff67c63f 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -146,7 +146,8 @@ int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri struct radeon_bo_va *bo_va; int r; - if (rdev->family < CHIP_CAYMAN) { + if ((rdev->family < CHIP_CAYMAN) || + (!rdev->accel_working)) { return 0; } @@ -176,7 +177,8 @@ void radeon_gem_object_close(struct drm_gem_object *obj, struct radeon_bo_va *bo_va; int r; - if (rdev->family < CHIP_CAYMAN) { + if ((rdev->family < CHIP_CAYMAN) || + (!rdev->accel_working)) { return; } diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index 00fc59762e0d..ccedb17580f7 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c @@ -79,10 +79,17 @@ static void radeon_hotplug_work_func(struct work_struct *work) struct drm_mode_config *mode_config = &dev->mode_config; struct drm_connector *connector; + /* we can race here at startup, some boards seem to trigger + * hotplug irqs when they shouldn't. */ + if (!rdev->mode_info.mode_config_initialized) + return; + + mutex_lock(&mode_config->mutex); if (mode_config->num_connector) { list_for_each_entry(connector, &mode_config->connector_list, head) radeon_connector_hotplug(connector); } + mutex_unlock(&mode_config->mutex); /* Just fire off a uevent and let userspace tell us what to do */ drm_helper_hpd_irq_event(dev); } diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 03586763ee86..46bcd5d38a1e 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -540,6 +540,9 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file else *value = 1; break; + case RADEON_INFO_VA_UNMAP_WORKING: + *value = true; + break; default: DRM_DEBUG_KMS("Invalid request %d\n", info->request); return -EINVAL; @@ -598,14 +601,14 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) return -ENOMEM; } - vm = &fpriv->vm; - r = radeon_vm_init(rdev, vm); - if (r) { - kfree(fpriv); - return r; - } - if (rdev->accel_working) { + vm = &fpriv->vm; + r = radeon_vm_init(rdev, vm); + if (r) { + kfree(fpriv); + return r; + } + r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); if (r) { radeon_vm_fini(rdev, vm); @@ -663,9 +666,9 @@ void radeon_driver_postclose_kms(struct drm_device *dev, radeon_vm_bo_rmv(rdev, vm->ib_bo_va); radeon_bo_unreserve(rdev->ring_tmp_bo.bo); } + radeon_vm_fini(rdev, vm); } - radeon_vm_fini(rdev, vm); kfree(fpriv); file_priv->driver_priv = NULL; } diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c index a69bd441dd2d..572b4dbec186 100644 --- a/drivers/gpu/drm/radeon/radeon_mn.c +++ b/drivers/gpu/drm/radeon/radeon_mn.c @@ -122,7 +122,6 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn, it = interval_tree_iter_first(&rmn->objects, start, end); while (it) { struct radeon_bo *bo; - struct fence *fence; int r; bo = container_of(it, struct radeon_bo, mn_it); @@ -134,12 +133,10 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn, continue; } - fence = reservation_object_get_excl(bo->tbo.resv); - if (fence) { - r = radeon_fence_wait((struct radeon_fence *)fence, false); - if (r) - DRM_ERROR("(%d) failed to wait for user bo\n", r); - } + r = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, + false, MAX_SCHEDULE_TIMEOUT); + if (r) + DRM_ERROR("(%d) failed to wait for user bo\n", r); radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU); r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 04db2fdd8692..3a395f405fff 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -921,6 +921,10 @@ bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj) void radeon_fb_output_poll_changed(struct radeon_device *rdev); void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id); + +void radeon_fb_add_connector(struct radeon_device *rdev, struct drm_connector *connector); +void radeon_fb_remove_connector(struct radeon_device *rdev, struct drm_connector *connector); + void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id); int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled); diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 4c0d786d5c7a..9ec2ddd97edf 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -33,6 +33,7 @@ #include <linux/slab.h> #include <drm/drmP.h> #include <drm/radeon_drm.h> +#include <drm/drm_cache.h> #include "radeon.h" #include "radeon_trace.h" @@ -151,19 +152,6 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) else rbo->placements[i].lpfn = 0; } - - /* - * Use two-ended allocation depending on the buffer size to - * improve fragmentation quality. - * 512kb was measured as the most optimal number. - */ - if (!((rbo->flags & RADEON_GEM_CPU_ACCESS) && - (rbo->placements[i].flags & TTM_PL_FLAG_VRAM)) && - rbo->tbo.mem.size > 512 * 1024) { - for (i = 0; i < c; i++) { - rbo->placements[i].flags |= TTM_PL_FLAG_TOPDOWN; - } - } } int radeon_bo_create(struct radeon_device *rdev, @@ -217,7 +205,25 @@ int radeon_bo_create(struct radeon_device *rdev, /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit * See https://bugs.freedesktop.org/show_bug.cgi?id=84627 */ - bo->flags &= ~RADEON_GEM_GTT_WC; + bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); +#elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT) + /* Don't try to enable write-combining when it can't work, or things + * may be slow + * See https://bugs.freedesktop.org/show_bug.cgi?id=88758 + */ + +#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \ + thanks to write-combining + + DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for " + "better performance thanks to write-combining\n"); + bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); +#else + /* For architectures that don't support WC memory, + * mask out the WC flag from the BO + */ + if (!drm_arch_can_wc_memory()) + bo->flags &= ~RADEON_GEM_GTT_WC; #endif radeon_ttm_placement_from_domain(bo, domain); @@ -799,3 +805,22 @@ int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait) ttm_bo_unreserve(&bo->tbo); return r; } + +/** + * radeon_bo_fence - add fence to buffer object + * + * @bo: buffer object in question + * @fence: fence to add + * @shared: true if fence should be added shared + * + */ +void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence, + bool shared) +{ + struct reservation_object *resv = bo->tbo.resv; + + if (shared) + reservation_object_add_shared_fence(resv, &fence->base); + else + reservation_object_add_excl_fence(resv, &fence->base); +} diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h index 1b8ec7917154..3b0b377f76cb 100644 --- a/drivers/gpu/drm/radeon/radeon_object.h +++ b/drivers/gpu/drm/radeon/radeon_object.h @@ -155,6 +155,8 @@ extern void radeon_bo_move_notify(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem); extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo); extern int radeon_bo_get_surface_reg(struct radeon_bo *bo); +extern void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence, + bool shared); /* * sub allocation diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 32522cc940a1..ab782e3f5d95 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -704,12 +704,8 @@ static void radeon_dpm_thermal_work_handler(struct work_struct *work) radeon_pm_compute_clocks(rdev); } -static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev, - enum radeon_pm_state_type dpm_state) +static bool radeon_dpm_single_display(struct radeon_device *rdev) { - int i; - struct radeon_ps *ps; - u32 ui_class; bool single_display = (rdev->pm.dpm.new_active_crtc_count < 2) ? true : false; @@ -719,6 +715,17 @@ static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev, single_display = false; } + return single_display; +} + +static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev, + enum radeon_pm_state_type dpm_state) +{ + int i; + struct radeon_ps *ps; + u32 ui_class; + bool single_display = radeon_dpm_single_display(rdev); + /* certain older asics have a separare 3D performance state, * so try that first if the user selected performance */ @@ -844,6 +851,7 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev) struct radeon_ps *ps; enum radeon_pm_state_type dpm_state; int ret; + bool single_display = radeon_dpm_single_display(rdev); /* if dpm init failed */ if (!rdev->pm.dpm_enabled) @@ -868,6 +876,9 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev) /* vce just modifies an existing state so force a change */ if (ps->vce_active != rdev->pm.dpm.vce_active) goto force; + /* user has made a display change (such as timing) */ + if (rdev->pm.dpm.single_display != single_display) + goto force; if ((rdev->family < CHIP_BARTS) || (rdev->flags & RADEON_IS_IGP)) { /* for pre-BTC and APUs if the num crtcs changed but state is the same, * all we need to do is update the display configuration. @@ -928,9 +939,6 @@ force: /* update displays */ radeon_dpm_display_configuration_changed(rdev); - rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs; - rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count; - /* wait for the rings to drain */ for (i = 0; i < RADEON_NUM_RINGS; i++) { struct radeon_ring *ring = &rdev->ring[i]; @@ -946,6 +954,10 @@ force: radeon_dpm_post_set_power_state(rdev); + rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs; + rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count; + rdev->pm.dpm.single_display = single_display; + if (rdev->asic->dpm.force_performance_level) { if (rdev->pm.dpm.thermal_active) { enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level; @@ -1180,14 +1192,6 @@ static int radeon_pm_init_old(struct radeon_device *rdev) INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler); if (rdev->pm.num_power_states > 1) { - /* where's the best place to put these? */ - ret = device_create_file(rdev->dev, &dev_attr_power_profile); - if (ret) - DRM_ERROR("failed to create device file for power profile\n"); - ret = device_create_file(rdev->dev, &dev_attr_power_method); - if (ret) - DRM_ERROR("failed to create device file for power method\n"); - if (radeon_debugfs_pm_init(rdev)) { DRM_ERROR("Failed to register debugfs file for PM!\n"); } @@ -1245,20 +1249,6 @@ static int radeon_pm_init_dpm(struct radeon_device *rdev) goto dpm_failed; rdev->pm.dpm_enabled = true; - ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state); - if (ret) - DRM_ERROR("failed to create device file for dpm state\n"); - ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level); - if (ret) - DRM_ERROR("failed to create device file for dpm state\n"); - /* XXX: these are noops for dpm but are here for backwards compat */ - ret = device_create_file(rdev->dev, &dev_attr_power_profile); - if (ret) - DRM_ERROR("failed to create device file for power profile\n"); - ret = device_create_file(rdev->dev, &dev_attr_power_method); - if (ret) - DRM_ERROR("failed to create device file for power method\n"); - if (radeon_debugfs_pm_init(rdev)) { DRM_ERROR("Failed to register debugfs file for dpm!\n"); } @@ -1287,8 +1277,39 @@ dpm_failed: return ret; } +struct radeon_dpm_quirk { + u32 chip_vendor; + u32 chip_device; + u32 subsys_vendor; + u32 subsys_device; +}; + +/* cards with dpm stability problems */ +static struct radeon_dpm_quirk radeon_dpm_quirk_list[] = { + /* TURKS - https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1386534 */ + { PCI_VENDOR_ID_ATI, 0x6759, 0x1682, 0x3195 }, + /* TURKS - https://bugzilla.kernel.org/show_bug.cgi?id=83731 */ + { PCI_VENDOR_ID_ATI, 0x6840, 0x1179, 0xfb81 }, + { 0, 0, 0, 0 }, +}; + int radeon_pm_init(struct radeon_device *rdev) { + struct radeon_dpm_quirk *p = radeon_dpm_quirk_list; + bool disable_dpm = false; + + /* Apply dpm quirks */ + while (p && p->chip_device != 0) { + if (rdev->pdev->vendor == p->chip_vendor && + rdev->pdev->device == p->chip_device && + rdev->pdev->subsystem_vendor == p->subsys_vendor && + rdev->pdev->subsystem_device == p->subsys_device) { + disable_dpm = true; + break; + } + ++p; + } + /* enable dpm on rv6xx+ */ switch (rdev->family) { case CHIP_RV610: @@ -1344,6 +1365,8 @@ int radeon_pm_init(struct radeon_device *rdev) (!(rdev->flags & RADEON_IS_IGP)) && (!rdev->smc_fw)) rdev->pm.pm_method = PM_METHOD_PROFILE; + else if (disable_dpm && (radeon_dpm == -1)) + rdev->pm.pm_method = PM_METHOD_PROFILE; else if (radeon_dpm == 0) rdev->pm.pm_method = PM_METHOD_PROFILE; else @@ -1366,9 +1389,51 @@ int radeon_pm_late_init(struct radeon_device *rdev) int ret = 0; if (rdev->pm.pm_method == PM_METHOD_DPM) { - mutex_lock(&rdev->pm.mutex); - ret = radeon_dpm_late_enable(rdev); - mutex_unlock(&rdev->pm.mutex); + if (rdev->pm.dpm_enabled) { + if (!rdev->pm.sysfs_initialized) { + ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state); + if (ret) + DRM_ERROR("failed to create device file for dpm state\n"); + ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level); + if (ret) + DRM_ERROR("failed to create device file for dpm state\n"); + /* XXX: these are noops for dpm but are here for backwards compat */ + ret = device_create_file(rdev->dev, &dev_attr_power_profile); + if (ret) + DRM_ERROR("failed to create device file for power profile\n"); + ret = device_create_file(rdev->dev, &dev_attr_power_method); + if (ret) + DRM_ERROR("failed to create device file for power method\n"); + if (!ret) + rdev->pm.sysfs_initialized = true; + } + + mutex_lock(&rdev->pm.mutex); + ret = radeon_dpm_late_enable(rdev); + mutex_unlock(&rdev->pm.mutex); + if (ret) { + rdev->pm.dpm_enabled = false; + DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n"); + } else { + /* set the dpm state for PX since there won't be + * a modeset to call this. + */ + radeon_pm_compute_clocks(rdev); + } + } + } else { + if ((rdev->pm.num_power_states > 1) && + (!rdev->pm.sysfs_initialized)) { + /* where's the best place to put these? */ + ret = device_create_file(rdev->dev, &dev_attr_power_profile); + if (ret) + DRM_ERROR("failed to create device file for power profile\n"); + ret = device_create_file(rdev->dev, &dev_attr_power_method); + if (ret) + DRM_ERROR("failed to create device file for power method\n"); + if (!ret) + rdev->pm.sysfs_initialized = true; + } } return ret; } diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c index c507896aca45..197b157b73d0 100644 --- a/drivers/gpu/drm/radeon/radeon_sa.c +++ b/drivers/gpu/drm/radeon/radeon_sa.c @@ -349,8 +349,13 @@ int radeon_sa_bo_new(struct radeon_device *rdev, /* see if we can skip over some allocations */ } while (radeon_sa_bo_next_hole(sa_manager, fences, tries)); + for (i = 0; i < RADEON_NUM_RINGS; ++i) + radeon_fence_ref(fences[i]); + spin_unlock(&sa_manager->wq.lock); r = radeon_fence_wait_any(rdev, fences, false); + for (i = 0; i < RADEON_NUM_RINGS; ++i) + radeon_fence_unref(&fences[i]); spin_lock(&sa_manager->wq.lock); /* if we have nothing to wait for block */ if (r == -ENOENT) { diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c index 07b506b41008..791818165c76 100644 --- a/drivers/gpu/drm/radeon/radeon_test.c +++ b/drivers/gpu/drm/radeon/radeon_test.c @@ -119,11 +119,11 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag) if (ring == R600_RING_TYPE_DMA_INDEX) fence = radeon_copy_dma(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, - NULL); + vram_obj->tbo.resv); else fence = radeon_copy_blit(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, - NULL); + vram_obj->tbo.resv); if (IS_ERR(fence)) { DRM_ERROR("Failed GTT->VRAM copy %d\n", i); r = PTR_ERR(fence); @@ -170,11 +170,11 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag) if (ring == R600_RING_TYPE_DMA_INDEX) fence = radeon_copy_dma(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, - NULL); + vram_obj->tbo.resv); else fence = radeon_copy_blit(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, - NULL); + vram_obj->tbo.resv); if (IS_ERR(fence)) { DRM_ERROR("Failed VRAM->GTT copy %d\n", i); r = PTR_ERR(fence); diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 8624979afb65..f361af72c548 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -196,7 +196,7 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo, rbo = container_of(bo, struct radeon_bo, tbo); switch (bo->mem.mem_type) { case TTM_PL_VRAM: - if (rbo->rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready == false) + if (rbo->rdev->ring[radeon_copy_ring_index(rbo->rdev)].ready == false) radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU); else radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); @@ -212,6 +212,8 @@ static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp) { struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo); + if (radeon_ttm_tt_has_userptr(bo->ttm)) + return -EPERM; return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp); } @@ -238,8 +240,8 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, rdev = radeon_get_rdev(bo->bdev); ridx = radeon_copy_ring_index(rdev); - old_start = old_mem->start << PAGE_SHIFT; - new_start = new_mem->start << PAGE_SHIFT; + old_start = (u64)old_mem->start << PAGE_SHIFT; + new_start = (u64)new_mem->start << PAGE_SHIFT; switch (old_mem->mem_type) { case TTM_PL_VRAM: @@ -568,19 +570,21 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm) { struct radeon_device *rdev = radeon_get_rdev(ttm->bdev); struct radeon_ttm_tt *gtt = (void *)ttm; - struct scatterlist *sg; - int i; + struct sg_page_iter sg_iter; int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY); enum dma_data_direction direction = write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE; + /* double check that we don't free the table twice */ + if (!ttm->sg->sgl) + return; + /* free the sg table and pages again */ dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction); - for_each_sg(ttm->sg->sgl, sg, ttm->sg->nents, i) { - struct page *page = sg_page(sg); - + for_each_sg_page(ttm->sg->sgl, &sg_iter, ttm->sg->nents, 0) { + struct page *page = sg_page_iter_page(&sg_iter); if (!(gtt->userflags & RADEON_GEM_USERPTR_READONLY)) set_page_dirty(page); @@ -733,7 +737,7 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm) 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) { - while (--i) { + while (i--) { pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i], PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); gtt->ttm.dma_address[i] = 0; diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 11b662469253..db6536f722f2 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -396,6 +396,29 @@ static int radeon_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[]) return 0; } +static int radeon_uvd_validate_codec(struct radeon_cs_parser *p, + unsigned stream_type) +{ + switch (stream_type) { + case 0: /* H264 */ + case 1: /* VC1 */ + /* always supported */ + return 0; + + case 3: /* MPEG2 */ + case 4: /* MPEG4 */ + /* only since UVD 3 */ + if (p->rdev->family >= CHIP_PALM) + return 0; + + /* fall through */ + default: + DRM_ERROR("UVD codec not supported by hardware %d!\n", + stream_type); + return -EINVAL; + } +} + static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, unsigned offset, unsigned buf_sizes[]) { @@ -436,50 +459,70 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, return -EINVAL; } - if (msg_type == 1) { - /* it's a decode msg, calc buffer sizes */ - r = radeon_uvd_cs_msg_decode(msg, buf_sizes); - /* calc image size (width * height) */ - img_size = msg[6] * msg[7]; + switch (msg_type) { + case 0: + /* it's a create msg, calc image size (width * height) */ + img_size = msg[7] * msg[8]; + + r = radeon_uvd_validate_codec(p, msg[4]); + radeon_bo_kunmap(bo); + if (r) + return r; + + /* try to alloc a new handle */ + for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { + if (atomic_read(&p->rdev->uvd.handles[i]) == handle) { + DRM_ERROR("Handle 0x%x already in use!\n", handle); + return -EINVAL; + } + + if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) { + p->rdev->uvd.filp[i] = p->filp; + p->rdev->uvd.img_size[i] = img_size; + return 0; + } + } + + DRM_ERROR("No more free UVD handles!\n"); + return -EINVAL; + + case 1: + /* it's a decode msg, validate codec and calc buffer sizes */ + r = radeon_uvd_validate_codec(p, msg[4]); + if (!r) + r = radeon_uvd_cs_msg_decode(msg, buf_sizes); radeon_bo_kunmap(bo); if (r) return r; - } else if (msg_type == 2) { + /* validate the handle */ + for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { + if (atomic_read(&p->rdev->uvd.handles[i]) == handle) { + if (p->rdev->uvd.filp[i] != p->filp) { + DRM_ERROR("UVD handle collision detected!\n"); + return -EINVAL; + } + return 0; + } + } + + DRM_ERROR("Invalid UVD handle 0x%x!\n", handle); + return -ENOENT; + + case 2: /* it's a destroy msg, free the handle */ for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) atomic_cmpxchg(&p->rdev->uvd.handles[i], handle, 0); radeon_bo_kunmap(bo); return 0; - } else { - /* it's a create msg, calc image size (width * height) */ - img_size = msg[7] * msg[8]; - radeon_bo_kunmap(bo); - if (msg_type != 0) { - DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type); - return -EINVAL; - } - - /* it's a create msg, no special handling needed */ - } - - /* create or decode, validate the handle */ - for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { - if (atomic_read(&p->rdev->uvd.handles[i]) == handle) - return 0; - } + default: - /* handle not found try to alloc a new one */ - for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { - if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) { - p->rdev->uvd.filp[i] = p->filp; - p->rdev->uvd.img_size[i] = img_size; - return 0; - } + DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type); + return -EINVAL; } - DRM_ERROR("No more free UVD handles!\n"); + BUG(); return -EINVAL; } diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c index 9e85757d5599..1c0022a49486 100644 --- a/drivers/gpu/drm/radeon/radeon_vce.c +++ b/drivers/gpu/drm/radeon/radeon_vce.c @@ -493,18 +493,27 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi, * * @p: parser context * @handle: handle to validate + * @allocated: allocated a new handle? * * Validates the handle and return the found session index or -EINVAL * we we don't have another free session index. */ -int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle) +static int radeon_vce_validate_handle(struct radeon_cs_parser *p, + uint32_t handle, bool *allocated) { unsigned i; + *allocated = false; + /* validate the handle */ for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) { - if (atomic_read(&p->rdev->vce.handles[i]) == handle) + if (atomic_read(&p->rdev->vce.handles[i]) == handle) { + if (p->rdev->vce.filp[i] != p->filp) { + DRM_ERROR("VCE handle collision detected!\n"); + return -EINVAL; + } return i; + } } /* handle not found try to alloc a new one */ @@ -512,6 +521,7 @@ int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle) if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) { p->rdev->vce.filp[i] = p->filp; p->rdev->vce.img_size[i] = 0; + *allocated = true; return i; } } @@ -529,10 +539,10 @@ int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle) int radeon_vce_cs_parse(struct radeon_cs_parser *p) { int session_idx = -1; - bool destroyed = false; + bool destroyed = false, created = false, allocated = false; uint32_t tmp, handle = 0; uint32_t *size = &tmp; - int i, r; + int i, r = 0; while (p->idx < p->chunks[p->chunk_ib_idx].length_dw) { uint32_t len = radeon_get_ib_value(p, p->idx); @@ -540,18 +550,21 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p) if ((len < 8) || (len & 3)) { DRM_ERROR("invalid VCE command length (%d)!\n", len); - return -EINVAL; + r = -EINVAL; + goto out; } if (destroyed) { DRM_ERROR("No other command allowed after destroy!\n"); - return -EINVAL; + r = -EINVAL; + goto out; } switch (cmd) { case 0x00000001: // session handle = radeon_get_ib_value(p, p->idx + 2); - session_idx = radeon_vce_validate_handle(p, handle); + session_idx = radeon_vce_validate_handle(p, handle, + &allocated); if (session_idx < 0) return session_idx; size = &p->rdev->vce.img_size[session_idx]; @@ -561,6 +574,13 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p) break; case 0x01000001: // create + created = true; + if (!allocated) { + DRM_ERROR("Handle already in use!\n"); + r = -EINVAL; + goto out; + } + *size = radeon_get_ib_value(p, p->idx + 8) * radeon_get_ib_value(p, p->idx + 10) * 8 * 3 / 2; @@ -577,12 +597,12 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p) r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9, *size); if (r) - return r; + goto out; r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11, *size / 3); if (r) - return r; + goto out; break; case 0x02000001: // destroy @@ -593,7 +613,7 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p) r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2, *size * 2); if (r) - return r; + goto out; break; case 0x05000004: // video bitstream buffer @@ -601,36 +621,47 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p) r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2, tmp); if (r) - return r; + goto out; break; case 0x05000005: // feedback buffer r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2, 4096); if (r) - return r; + goto out; break; default: DRM_ERROR("invalid VCE command (0x%x)!\n", cmd); - return -EINVAL; + r = -EINVAL; + goto out; } if (session_idx == -1) { DRM_ERROR("no session command at start of IB\n"); - return -EINVAL; + r = -EINVAL; + goto out; } p->idx += len / 4; } - if (destroyed) { - /* IB contains a destroy msg, free the handle */ + if (allocated && !created) { + DRM_ERROR("New session without create command!\n"); + r = -ENOENT; + } + +out: + if ((!r && destroyed) || (r && allocated)) { + /* + * IB contains a destroy msg or we have allocated an + * handle and got an error, anyway free the handle + */ for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0); } - return 0; + return r; } /** diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index dfde266529e2..0974fa17deb2 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c @@ -143,7 +143,7 @@ struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev, list[0].prefered_domains = RADEON_GEM_DOMAIN_VRAM; list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM; list[0].tv.bo = &vm->page_directory->tbo; - list[0].tv.shared = false; + list[0].tv.shared = true; list[0].tiling_flags = 0; list[0].handle = 0; list_add(&list[0].tv.head, head); @@ -157,7 +157,7 @@ struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev, list[idx].prefered_domains = RADEON_GEM_DOMAIN_VRAM; list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM; list[idx].tv.bo = &list[idx].robj->tbo; - list[idx].tv.shared = false; + list[idx].tv.shared = true; list[idx].tiling_flags = 0; list[idx].handle = 0; list_add(&list[idx++].tv.head, head); @@ -387,35 +387,25 @@ static void radeon_vm_set_pages(struct radeon_device *rdev, static int radeon_vm_clear_bo(struct radeon_device *rdev, struct radeon_bo *bo) { - struct ttm_validate_buffer tv; - struct ww_acquire_ctx ticket; - struct list_head head; struct radeon_ib ib; unsigned entries; uint64_t addr; int r; - memset(&tv, 0, sizeof(tv)); - tv.bo = &bo->tbo; - tv.shared = false; - - INIT_LIST_HEAD(&head); - list_add(&tv.head, &head); - - r = ttm_eu_reserve_buffers(&ticket, &head, true); - if (r) + r = radeon_bo_reserve(bo, false); + if (r) return r; - r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); - if (r) - goto error; + r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); + if (r) + goto error_unreserve; addr = radeon_bo_gpu_offset(bo); entries = radeon_bo_size(bo) / 8; r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, 256); if (r) - goto error; + goto error_unreserve; ib.length_dw = 0; @@ -425,15 +415,15 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev, r = radeon_ib_schedule(rdev, &ib, NULL, false); if (r) - goto error; + goto error_free; - ttm_eu_fence_buffer_objects(&ticket, &head, &ib.fence->base); - radeon_ib_free(rdev, &ib); + radeon_bo_fence(bo, ib.fence, false); - return 0; +error_free: + radeon_ib_free(rdev, &ib); -error: - ttm_eu_backoff_reservation(&ticket, &head); +error_unreserve: + radeon_bo_unreserve(bo); return r; } @@ -464,14 +454,14 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, if (soffset) { /* make sure object fit at this offset */ - eoffset = soffset + size; + eoffset = soffset + size - 1; if (soffset >= eoffset) { return -EINVAL; } last_pfn = eoffset / RADEON_GPU_PAGE_SIZE; - if (last_pfn > rdev->vm_manager.max_pfn) { - dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n", + if (last_pfn >= rdev->vm_manager.max_pfn) { + dev_err(rdev->dev, "va above limit (0x%08X >= 0x%08X)\n", last_pfn, rdev->vm_manager.max_pfn); return -EINVAL; } @@ -481,6 +471,23 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, } mutex_lock(&vm->mutex); + soffset /= RADEON_GPU_PAGE_SIZE; + eoffset /= RADEON_GPU_PAGE_SIZE; + if (soffset || eoffset) { + struct interval_tree_node *it; + it = interval_tree_iter_first(&vm->va, soffset, eoffset); + if (it && it != &bo_va->it) { + struct radeon_bo_va *tmp; + tmp = container_of(it, struct radeon_bo_va, it); + /* bo and tmp overlap, invalid offset */ + dev_err(rdev->dev, "bo %p va 0x%010Lx conflict with " + "(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo, + soffset, tmp->bo, tmp->it.start, tmp->it.last); + mutex_unlock(&vm->mutex); + return -EINVAL; + } + } + if (bo_va->it.start || bo_va->it.last) { if (bo_va->addr) { /* add a clone of the bo_va to clear the old address */ @@ -496,6 +503,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, tmp->addr = bo_va->addr; tmp->bo = radeon_bo_ref(bo_va->bo); list_add(&tmp->vm_status, &vm->freed); + + bo_va->addr = 0; } interval_tree_remove(&bo_va->it, &vm->va); @@ -503,23 +512,9 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, bo_va->it.last = 0; } - soffset /= RADEON_GPU_PAGE_SIZE; - eoffset /= RADEON_GPU_PAGE_SIZE; if (soffset || eoffset) { - struct interval_tree_node *it; - it = interval_tree_iter_first(&vm->va, soffset, eoffset - 1); - if (it) { - struct radeon_bo_va *tmp; - tmp = container_of(it, struct radeon_bo_va, it); - /* bo and tmp overlap, invalid offset */ - dev_err(rdev->dev, "bo %p va 0x%010Lx conflict with " - "(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo, - soffset, tmp->bo, tmp->it.start, tmp->it.last); - mutex_unlock(&vm->mutex); - return -EINVAL; - } bo_va->it.start = soffset; - bo_va->it.last = eoffset - 1; + bo_va->it.last = eoffset; interval_tree_insert(&bo_va->it, &vm->va); } @@ -707,6 +702,7 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev, radeon_ib_free(rdev, &ib); return r; } + radeon_bo_fence(pd, ib.fence, false); radeon_fence_unref(&vm->fence); vm->fence = radeon_fence_ref(ib.fence); radeon_fence_unref(&vm->last_flush); @@ -753,9 +749,11 @@ static void radeon_vm_frag_ptes(struct radeon_device *rdev, */ /* NI is optimized for 256KB fragments, SI and newer for 64KB */ - uint64_t frag_flags = rdev->family == CHIP_CAYMAN ? + uint64_t frag_flags = ((rdev->family == CHIP_CAYMAN) || + (rdev->family == CHIP_ARUBA)) ? R600_PTE_FRAG_256KB : R600_PTE_FRAG_64KB; - uint64_t frag_align = rdev->family == CHIP_CAYMAN ? 0x200 : 0x80; + uint64_t frag_align = ((rdev->family == CHIP_CAYMAN) || + (rdev->family == CHIP_ARUBA)) ? 0x200 : 0x80; uint64_t frag_start = ALIGN(pe_start, frag_align); uint64_t frag_end = pe_end & ~(frag_align - 1); @@ -863,6 +861,31 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev, } /** + * radeon_vm_fence_pts - fence page tables after an update + * + * @vm: requested vm + * @start: start of GPU address range + * @end: end of GPU address range + * @fence: fence to use + * + * Fence the page tables in the range @start - @end (cayman+). + * + * Global and local mutex must be locked! + */ +static void radeon_vm_fence_pts(struct radeon_vm *vm, + uint64_t start, uint64_t end, + struct radeon_fence *fence) +{ + unsigned i; + + start >>= radeon_vm_block_size; + end = (end - 1) >> radeon_vm_block_size; + + for (i = start; i <= end; ++i) + radeon_bo_fence(vm->page_tables[i].bo, fence, false); +} + +/** * radeon_vm_bo_update - map a bo into the vm page table * * @rdev: radeon_device pointer @@ -974,6 +997,7 @@ int radeon_vm_bo_update(struct radeon_device *rdev, radeon_ib_free(rdev, &ib); return r; } + radeon_vm_fence_pts(vm, bo_va->it.start, bo_va->it.last + 1, ib.fence); radeon_fence_unref(&vm->fence); vm->fence = radeon_fence_ref(ib.fence); radeon_ib_free(rdev, &ib); @@ -1053,7 +1077,8 @@ void radeon_vm_bo_rmv(struct radeon_device *rdev, list_del(&bo_va->bo_list); mutex_lock(&vm->mutex); - interval_tree_remove(&bo_va->it, &vm->va); + if (bo_va->it.start || bo_va->it.last) + interval_tree_remove(&bo_va->it, &vm->va); list_del(&bo_va->vm_status); if (bo_va->addr) { diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index c5799f16aa4b..34e3235f41d2 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c @@ -212,11 +212,9 @@ void rs400_gart_fini(struct radeon_device *rdev) #define RS400_PTE_WRITEABLE (1 << 2) #define RS400_PTE_READABLE (1 << 3) -void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, - uint64_t addr, uint32_t flags) +uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags) { uint32_t entry; - u32 *gtt = rdev->gart.ptr; entry = (lower_32_bits(addr) & PAGE_MASK) | ((upper_32_bits(addr) & 0xff) << 4); @@ -226,8 +224,14 @@ void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, entry |= RS400_PTE_WRITEABLE; if (!(flags & RADEON_GART_PAGE_SNOOP)) entry |= RS400_PTE_UNSNOOPED; - entry = cpu_to_le32(entry); - gtt[i] = entry; + return entry; +} + +void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, + uint64_t entry) +{ + u32 *gtt = rdev->gart.ptr; + gtt[i] = cpu_to_le32(lower_32_bits(entry)); } int rs400_mc_wait_for_idle(struct radeon_device *rdev) diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 9acb1c3c005b..039660662ee8 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c @@ -625,11 +625,8 @@ static void rs600_gart_fini(struct radeon_device *rdev) radeon_gart_table_vram_free(rdev); } -void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, - uint64_t addr, uint32_t flags) +uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags) { - void __iomem *ptr = (void *)rdev->gart.ptr; - addr = addr & 0xFFFFFFFFFFFFF000ULL; addr |= R600_PTE_SYSTEM; if (flags & RADEON_GART_PAGE_VALID) @@ -640,7 +637,14 @@ void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, addr |= R600_PTE_WRITEABLE; if (flags & RADEON_GART_PAGE_SNOOP) addr |= R600_PTE_SNOOPED; - writeq(addr, ptr + (i * 8)); + return addr; +} + +void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, + uint64_t entry) +{ + void __iomem *ptr = (void *)rdev->gart.ptr; + writeq(entry, ptr + (i * 8)); } int rs600_irq_set(struct radeon_device *rdev) @@ -689,6 +693,10 @@ int rs600_irq_set(struct radeon_device *rdev) WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2); if (ASIC_IS_DCE2(rdev)) WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, hdmi0); + + /* posting read */ + RREG32(R_000040_GEN_INT_CNTL); + return 0; } diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h index 3cf1e2921545..9ef2064b1c9c 100644 --- a/drivers/gpu/drm/radeon/rv770d.h +++ b/drivers/gpu/drm/radeon/rv770d.h @@ -989,6 +989,9 @@ ((n) & 0x3FFF) << 16) /* UVD */ +#define UVD_SEMA_ADDR_LOW 0xef00 +#define UVD_SEMA_ADDR_HIGH 0xef04 +#define UVD_SEMA_CMD 0xef08 #define UVD_GPCOM_VCPU_CMD 0xef0c #define UVD_GPCOM_VCPU_DATA0 0xef10 #define UVD_GPCOM_VCPU_DATA1 0xef14 diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 7d5083dc4acb..3ad2e07fb48e 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -4285,7 +4285,7 @@ static int si_pcie_gart_enable(struct radeon_device *rdev) /* empty context1-15 */ /* set vm size, must be a multiple of 4 */ WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0); - WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn); + WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn - 1); /* Assign the pt base to something valid for now; the pts used for * the VMs are determined by the application and setup and assigned * on the fly in the vm part of radeon_gart.c @@ -6192,6 +6192,9 @@ int si_irq_set(struct radeon_device *rdev) WREG32(CG_THERMAL_INT, thermal_int); + /* posting read */ + RREG32(SRBM_STATUS); + return 0; } @@ -7112,8 +7115,7 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK); if (!vclk || !dclk) { - /* keep the Bypass mode, put PLL to sleep */ - WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK); + /* keep the Bypass mode */ return 0; } @@ -7129,8 +7131,7 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) /* set VCO_MODE to 1 */ WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK); - /* toggle UPLL_SLEEP to 1 then back to 0 */ - WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK); + /* disable sleep mode */ WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK); /* deassert UPLL_RESET */ diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index 676e6c2ba90a..1a75d78a2708 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c @@ -2908,6 +2908,29 @@ static int si_init_smc_spll_table(struct radeon_device *rdev) return ret; } +struct si_dpm_quirk { + u32 chip_vendor; + u32 chip_device; + u32 subsys_vendor; + u32 subsys_device; + u32 max_sclk; + u32 max_mclk; +}; + +/* cards with dpm stability problems */ +static struct si_dpm_quirk si_dpm_quirk_list[] = { + /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */ + { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 }, + { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 }, + { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0x2015, 0, 120000 }, + { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 }, + { PCI_VENDOR_ID_ATI, 0x6811, 0x1762, 0x2015, 0, 120000 }, + { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 }, + { PCI_VENDOR_ID_ATI, 0x6811, 0x148c, 0x2015, 0, 120000 }, + { PCI_VENDOR_ID_ATI, 0x6810, 0x1682, 0x9275, 0, 120000 }, + { 0, 0, 0, 0 }, +}; + static void si_apply_state_adjust_rules(struct radeon_device *rdev, struct radeon_ps *rps) { @@ -2918,7 +2941,65 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, u32 mclk, sclk; u16 vddc, vddci; u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc; + u32 max_sclk = 0, max_mclk = 0; int i; + struct si_dpm_quirk *p = si_dpm_quirk_list; + + /* limit all SI kickers */ + if (rdev->family == CHIP_PITCAIRN) { + if ((rdev->pdev->revision == 0x81) || + (rdev->pdev->device == 0x6810) || + (rdev->pdev->device == 0x6811) || + (rdev->pdev->device == 0x6816) || + (rdev->pdev->device == 0x6817) || + (rdev->pdev->device == 0x6806)) + max_mclk = 120000; + } else if (rdev->family == CHIP_VERDE) { + if ((rdev->pdev->revision == 0x81) || + (rdev->pdev->revision == 0x83) || + (rdev->pdev->revision == 0x87) || + (rdev->pdev->device == 0x6820) || + (rdev->pdev->device == 0x6821) || + (rdev->pdev->device == 0x6822) || + (rdev->pdev->device == 0x6823) || + (rdev->pdev->device == 0x682A) || + (rdev->pdev->device == 0x682B)) { + max_sclk = 75000; + max_mclk = 80000; + } + } else if (rdev->family == CHIP_OLAND) { + if ((rdev->pdev->revision == 0xC7) || + (rdev->pdev->revision == 0x80) || + (rdev->pdev->revision == 0x81) || + (rdev->pdev->revision == 0x83) || + (rdev->pdev->device == 0x6604) || + (rdev->pdev->device == 0x6605)) { + max_sclk = 75000; + max_mclk = 80000; + } + } else if (rdev->family == CHIP_HAINAN) { + if ((rdev->pdev->revision == 0x81) || + (rdev->pdev->revision == 0x83) || + (rdev->pdev->revision == 0xC3) || + (rdev->pdev->device == 0x6664) || + (rdev->pdev->device == 0x6665) || + (rdev->pdev->device == 0x6667)) { + max_sclk = 75000; + max_mclk = 80000; + } + } + /* Apply dpm quirks */ + while (p && p->chip_device != 0) { + if (rdev->pdev->vendor == p->chip_vendor && + rdev->pdev->device == p->chip_device && + rdev->pdev->subsystem_vendor == p->subsys_vendor && + rdev->pdev->subsystem_device == p->subsys_device) { + max_sclk = p->max_sclk; + max_mclk = p->max_mclk; + break; + } + ++p; + } if ((rdev->pm.dpm.new_active_crtc_count > 1) || ni_dpm_vblank_too_short(rdev)) @@ -2972,6 +3053,14 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, if (ps->performance_levels[i].mclk > max_mclk_vddc) ps->performance_levels[i].mclk = max_mclk_vddc; } + if (max_mclk) { + if (ps->performance_levels[i].mclk > max_mclk) + ps->performance_levels[i].mclk = max_mclk; + } + if (max_sclk) { + if (ps->performance_levels[i].sclk > max_sclk) + ps->performance_levels[i].sclk = max_sclk; + } } /* XXX validate the min clocks required for display */ diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c index e72b3cb59358..c6b1cbca47fc 100644 --- a/drivers/gpu/drm/radeon/uvd_v1_0.c +++ b/drivers/gpu/drm/radeon/uvd_v1_0.c @@ -466,18 +466,8 @@ bool uvd_v1_0_semaphore_emit(struct radeon_device *rdev, struct radeon_semaphore *semaphore, bool emit_wait) { - uint64_t addr = semaphore->gpu_addr; - - radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0)); - radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF); - - radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0)); - radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF); - - radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0)); - radeon_ring_write(ring, emit_wait ? 1 : 0); - - return true; + /* disable semaphores for UVD V1 hardware */ + return false; } /** diff --git a/drivers/gpu/drm/radeon/uvd_v2_2.c b/drivers/gpu/drm/radeon/uvd_v2_2.c index 89193519f8a1..7ed778cec7c6 100644 --- a/drivers/gpu/drm/radeon/uvd_v2_2.c +++ b/drivers/gpu/drm/radeon/uvd_v2_2.c @@ -60,6 +60,35 @@ void uvd_v2_2_fence_emit(struct radeon_device *rdev, } /** + * uvd_v2_2_semaphore_emit - emit semaphore command + * + * @rdev: radeon_device pointer + * @ring: radeon_ring pointer + * @semaphore: semaphore to emit commands for + * @emit_wait: true if we should emit a wait command + * + * Emit a semaphore command (either wait or signal) to the UVD ring. + */ +bool uvd_v2_2_semaphore_emit(struct radeon_device *rdev, + struct radeon_ring *ring, + struct radeon_semaphore *semaphore, + bool emit_wait) +{ + uint64_t addr = semaphore->gpu_addr; + + radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0)); + radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF); + + radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0)); + radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF); + + radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0)); + radeon_ring_write(ring, emit_wait ? 1 : 0); + + return true; +} + +/** * uvd_v2_2_resume - memory controller programming * * @rdev: radeon_device pointer diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index 59736bb810cd..1218419c12f6 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -152,7 +152,7 @@ static int host1x_reloc_copy_from_user(struct host1x_reloc *dest, if (err < 0) return err; - err = get_user(dest->target.offset, &src->cmdbuf.offset); + err = get_user(dest->target.offset, &src->target.offset); if (err < 0) return err; diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c index ce023fa3e8ae..ab9a4539a446 100644 --- a/drivers/gpu/drm/tegra/gem.c +++ b/drivers/gpu/drm/tegra/gem.c @@ -259,16 +259,12 @@ void tegra_bo_free_object(struct drm_gem_object *gem) int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm, struct drm_mode_create_dumb *args) { - int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); + unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); struct tegra_drm *tegra = drm->dev_private; struct tegra_bo *bo; - min_pitch = round_up(min_pitch, tegra->pitch_align); - if (args->pitch < min_pitch) - args->pitch = min_pitch; - - if (args->size < args->pitch * args->height) - args->size = args->pitch * args->height; + args->pitch = round_up(min_pitch, tegra->pitch_align); + args->size = args->pitch * args->height; bo = tegra_bo_create_with_handle(file, drm, args->size, 0, &args->handle); diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index d395b0bef73b..7370202c2022 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -1000,9 +1000,9 @@ out_unlock: return ret; } -static bool ttm_bo_mem_compat(struct ttm_placement *placement, - struct ttm_mem_reg *mem, - uint32_t *new_flags) +bool ttm_bo_mem_compat(struct ttm_placement *placement, + struct ttm_mem_reg *mem, + uint32_t *new_flags) { int i; @@ -1034,6 +1034,7 @@ static bool ttm_bo_mem_compat(struct ttm_placement *placement, return false; } +EXPORT_SYMBOL(ttm_bo_mem_compat); int ttm_bo_validate(struct ttm_buffer_object *bo, struct ttm_placement *placement, diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index 09874d695188..025c429050c0 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -297,11 +297,12 @@ static void ttm_pool_update_free_locked(struct ttm_page_pool *pool, * * @pool: to free the pages from * @free_all: If set to true will free all pages in pool - * @gfp: GFP flags. + * @use_static: Safe to use static buffer **/ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free, - gfp_t gfp) + bool use_static) { + static struct page *static_buf[NUM_PAGES_TO_ALLOC]; unsigned long irq_flags; struct page *p; struct page **pages_to_free; @@ -311,7 +312,11 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free, if (NUM_PAGES_TO_ALLOC < nr_free) npages_to_free = NUM_PAGES_TO_ALLOC; - pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), gfp); + if (use_static) + pages_to_free = static_buf; + else + pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), + GFP_KERNEL); if (!pages_to_free) { pr_err("Failed to allocate memory for pool free operation\n"); return 0; @@ -374,7 +379,8 @@ restart: if (freed_pages) ttm_pages_put(pages_to_free, freed_pages); out: - kfree(pages_to_free); + if (pages_to_free != static_buf) + kfree(pages_to_free); return nr_free; } @@ -383,8 +389,6 @@ out: * * XXX: (dchinner) Deadlock warning! * - * We need to pass sc->gfp_mask to ttm_page_pool_free(). - * * This code is crying out for a shrinker per pool.... */ static unsigned long @@ -407,8 +411,8 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) if (shrink_pages == 0) break; pool = &_manager->pools[(i + pool_offset)%NUM_POOLS]; - shrink_pages = ttm_page_pool_free(pool, nr_free, - sc->gfp_mask); + /* OK to use static buffer since global mutex is held. */ + shrink_pages = ttm_page_pool_free(pool, nr_free, true); freed += nr_free - shrink_pages; } mutex_unlock(&lock); @@ -710,7 +714,7 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags, } spin_unlock_irqrestore(&pool->lock, irq_flags); if (npages) - ttm_page_pool_free(pool, npages, GFP_KERNEL); + ttm_page_pool_free(pool, npages, false); } /* @@ -849,9 +853,9 @@ void ttm_page_alloc_fini(void) pr_info("Finalizing pool allocator\n"); ttm_pool_mm_shrink_fini(_manager); + /* OK to use static buffer since global mutex is no longer used. */ for (i = 0; i < NUM_POOLS; ++i) - ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES, - GFP_KERNEL); + ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES, true); kobject_put(&_manager->kobj); _manager = NULL; diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c index c96db433f8af..01e1d27eb078 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c @@ -411,11 +411,12 @@ static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page) * * @pool: to free the pages from * @nr_free: If set to true will free all pages in pool - * @gfp: GFP flags. + * @use_static: Safe to use static buffer **/ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free, - gfp_t gfp) + bool use_static) { + static struct page *static_buf[NUM_PAGES_TO_ALLOC]; unsigned long irq_flags; struct dma_page *dma_p, *tmp; struct page **pages_to_free; @@ -432,7 +433,11 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free, npages_to_free, nr_free); } #endif - pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), gfp); + if (use_static) + pages_to_free = static_buf; + else + pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), + GFP_KERNEL); if (!pages_to_free) { pr_err("%s: Failed to allocate memory for pool free operation\n", @@ -502,7 +507,8 @@ restart: if (freed_pages) ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages); out: - kfree(pages_to_free); + if (pages_to_free != static_buf) + kfree(pages_to_free); return nr_free; } @@ -531,7 +537,8 @@ static void ttm_dma_free_pool(struct device *dev, enum pool_type type) if (pool->type != type) continue; /* Takes a spinlock.. */ - ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, GFP_KERNEL); + /* OK to use static buffer since global mutex is held. */ + ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, true); WARN_ON(((pool->npages_in_use + pool->npages_free) != 0)); /* This code path is called after _all_ references to the * struct device has been dropped - so nobody should be @@ -986,7 +993,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev) /* shrink pool if necessary (only on !is_cached pools)*/ if (npages) - ttm_dma_page_pool_free(pool, npages, GFP_KERNEL); + ttm_dma_page_pool_free(pool, npages, false); ttm->state = tt_unpopulated; } EXPORT_SYMBOL_GPL(ttm_dma_unpopulate); @@ -996,8 +1003,6 @@ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate); * * XXX: (dchinner) Deadlock warning! * - * We need to pass sc->gfp_mask to ttm_dma_page_pool_free(). - * * I'm getting sadder as I hear more pathetical whimpers about needing per-pool * shrinkers */ @@ -1030,8 +1035,8 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) if (++idx < pool_offset) continue; nr_free = shrink_pages; - shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, - sc->gfp_mask); + /* OK to use static buffer since global mutex is held. */ + shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true); freed += nr_free - shrink_pages; pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n", diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c index 8cbcb4589bd3..cd8d183dcfe5 100644 --- a/drivers/gpu/drm/udl/udl_fb.c +++ b/drivers/gpu/drm/udl/udl_fb.c @@ -546,7 +546,7 @@ static int udlfb_create(struct drm_fb_helper *helper, return ret; out_gfree: - drm_gem_object_unreference(&ufbdev->ufb.obj->base); + drm_gem_object_unreference_unlocked(&ufbdev->ufb.obj->base); out: return ret; } @@ -589,19 +589,27 @@ int udl_fbdev_init(struct drm_device *dev) ret = drm_fb_helper_init(dev, &ufbdev->helper, 1, 1); - if (ret) { - kfree(ufbdev); - return ret; - - } + if (ret) + goto free; - drm_fb_helper_single_add_all_connectors(&ufbdev->helper); + ret = drm_fb_helper_single_add_all_connectors(&ufbdev->helper); + if (ret) + goto fini; /* disable all the possible outputs/crtcs before entering KMS mode */ drm_helper_disable_unused_functions(dev); - drm_fb_helper_initial_config(&ufbdev->helper, bpp_sel); + ret = drm_fb_helper_initial_config(&ufbdev->helper, bpp_sel); + if (ret) + goto fini; + return 0; + +fini: + drm_fb_helper_fini(&ufbdev->helper); +free: + kfree(ufbdev); + return ret; } void udl_fbdev_cleanup(struct drm_device *dev) diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c index 8044f5fb7c49..6de963b70eee 100644 --- a/drivers/gpu/drm/udl/udl_gem.c +++ b/drivers/gpu/drm/udl/udl_gem.c @@ -51,7 +51,7 @@ udl_gem_create(struct drm_file *file, return ret; } - drm_gem_object_unreference(&obj->base); + drm_gem_object_unreference_unlocked(&obj->base); *handle_p = handle; return 0; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 25f3c250fd98..59e2ae85eed9 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -25,6 +25,7 @@ * **************************************************************************/ #include <linux/module.h> +#include <linux/console.h> #include <drm/drmP.h> #include "vmwgfx_drv.h" @@ -406,11 +407,9 @@ int vmw_3d_resource_inc(struct vmw_private *dev_priv, if (unlikely(ret != 0)) --dev_priv->num_3d_resources; } else if (unhide_svga) { - mutex_lock(&dev_priv->hw_mutex); vmw_write(dev_priv, SVGA_REG_ENABLE, vmw_read(dev_priv, SVGA_REG_ENABLE) & ~SVGA_REG_ENABLE_HIDE); - mutex_unlock(&dev_priv->hw_mutex); } mutex_unlock(&dev_priv->release_mutex); @@ -433,13 +432,10 @@ void vmw_3d_resource_dec(struct vmw_private *dev_priv, mutex_lock(&dev_priv->release_mutex); if (unlikely(--dev_priv->num_3d_resources == 0)) vmw_release_device(dev_priv); - else if (hide_svga) { - mutex_lock(&dev_priv->hw_mutex); + else if (hide_svga) vmw_write(dev_priv, SVGA_REG_ENABLE, vmw_read(dev_priv, SVGA_REG_ENABLE) | SVGA_REG_ENABLE_HIDE); - mutex_unlock(&dev_priv->hw_mutex); - } n3d = (int32_t) dev_priv->num_3d_resources; mutex_unlock(&dev_priv->release_mutex); @@ -600,12 +596,14 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) dev_priv->dev = dev; dev_priv->vmw_chipset = chipset; dev_priv->last_read_seqno = (uint32_t) -100; - mutex_init(&dev_priv->hw_mutex); mutex_init(&dev_priv->cmdbuf_mutex); mutex_init(&dev_priv->release_mutex); mutex_init(&dev_priv->binding_mutex); rwlock_init(&dev_priv->resource_lock); ttm_lock_init(&dev_priv->reservation_sem); + spin_lock_init(&dev_priv->hw_lock); + spin_lock_init(&dev_priv->waiter_lock); + spin_lock_init(&dev_priv->cap_lock); for (i = vmw_res_context; i < vmw_res_max; ++i) { idr_init(&dev_priv->res_idr[i]); @@ -626,14 +624,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) dev_priv->enable_fb = enable_fbdev; - mutex_lock(&dev_priv->hw_mutex); - vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); svga_id = vmw_read(dev_priv, SVGA_REG_ID); if (svga_id != SVGA_ID_2) { ret = -ENOSYS; DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id); - mutex_unlock(&dev_priv->hw_mutex); goto out_err0; } @@ -683,10 +678,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) dev_priv->prim_bb_mem = dev_priv->vram_size; ret = vmw_dma_masks(dev_priv); - if (unlikely(ret != 0)) { - mutex_unlock(&dev_priv->hw_mutex); + if (unlikely(ret != 0)) goto out_err0; - } /* * Limit back buffer size to VRAM size. Remove this once @@ -695,8 +688,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) if (dev_priv->prim_bb_mem > dev_priv->vram_size) dev_priv->prim_bb_mem = dev_priv->vram_size; - mutex_unlock(&dev_priv->hw_mutex); - vmw_print_capabilities(dev_priv->capabilities); if (dev_priv->capabilities & SVGA_CAP_GMR2) { @@ -735,32 +726,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) goto out_err1; } - ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM, - (dev_priv->vram_size >> PAGE_SHIFT)); - if (unlikely(ret != 0)) { - DRM_ERROR("Failed initializing memory manager for VRAM.\n"); - goto out_err2; - } - - dev_priv->has_gmr = true; - if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) || - refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, - VMW_PL_GMR) != 0) { - DRM_INFO("No GMR memory available. " - "Graphics memory resources are very limited.\n"); - dev_priv->has_gmr = false; - } - - if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { - dev_priv->has_mob = true; - if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB, - VMW_PL_MOB) != 0) { - DRM_INFO("No MOB memory available. " - "3D will be disabled.\n"); - dev_priv->has_mob = false; - } - } - dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start, dev_priv->mmio_size); @@ -823,6 +788,33 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) goto out_no_fman; } + + ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM, + (dev_priv->vram_size >> PAGE_SHIFT)); + if (unlikely(ret != 0)) { + DRM_ERROR("Failed initializing memory manager for VRAM.\n"); + goto out_no_vram; + } + + dev_priv->has_gmr = true; + if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) || + refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, + VMW_PL_GMR) != 0) { + DRM_INFO("No GMR memory available. " + "Graphics memory resources are very limited.\n"); + dev_priv->has_gmr = false; + } + + if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { + dev_priv->has_mob = true; + if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB, + VMW_PL_MOB) != 0) { + DRM_INFO("No MOB memory available. " + "3D will be disabled.\n"); + dev_priv->has_mob = false; + } + } + vmw_kms_save_vga(dev_priv); /* Start kms and overlay systems, needs fifo. */ @@ -848,6 +840,12 @@ out_no_fifo: vmw_kms_close(dev_priv); out_no_kms: vmw_kms_restore_vga(dev_priv); + if (dev_priv->has_mob) + (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); + if (dev_priv->has_gmr) + (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); + (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); +out_no_vram: vmw_fence_manager_takedown(dev_priv->fman); out_no_fman: if (dev_priv->capabilities & SVGA_CAP_IRQMASK) @@ -863,12 +861,6 @@ out_err4: iounmap(dev_priv->mmio_virt); out_err3: arch_phys_wc_del(dev_priv->mmio_mtrr); - if (dev_priv->has_mob) - (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); - if (dev_priv->has_gmr) - (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); - (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); -out_err2: (void)ttm_bo_device_release(&dev_priv->bdev); out_err1: vmw_ttm_global_release(dev_priv); @@ -898,6 +890,13 @@ static int vmw_driver_unload(struct drm_device *dev) } vmw_kms_close(dev_priv); vmw_overlay_close(dev_priv); + + if (dev_priv->has_mob) + (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); + if (dev_priv->has_gmr) + (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); + (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); + vmw_fence_manager_takedown(dev_priv->fman); if (dev_priv->capabilities & SVGA_CAP_IRQMASK) drm_irq_uninstall(dev_priv->dev); @@ -909,11 +908,6 @@ static int vmw_driver_unload(struct drm_device *dev) ttm_object_device_release(&dev_priv->tdev); iounmap(dev_priv->mmio_virt); arch_phys_wc_del(dev_priv->mmio_mtrr); - if (dev_priv->has_mob) - (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); - if (dev_priv->has_gmr) - (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); - (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); (void)ttm_bo_device_release(&dev_priv->bdev); vmw_ttm_global_release(dev_priv); @@ -1063,8 +1057,12 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd, vmaster = vmw_master_check(dev, file_priv, flags); if (unlikely(IS_ERR(vmaster))) { - DRM_INFO("IOCTL ERROR %d\n", nr); - return PTR_ERR(vmaster); + ret = PTR_ERR(vmaster); + + if (ret != -ERESTARTSYS) + DRM_INFO("IOCTL ERROR Command %d, Error %ld.\n", + nr, ret); + return ret; } ret = ioctl_func(filp, cmd, arg); @@ -1157,9 +1155,7 @@ static int vmw_master_set(struct drm_device *dev, if (unlikely(ret != 0)) return ret; vmw_kms_save_vga(dev_priv); - mutex_lock(&dev_priv->hw_mutex); vmw_write(dev_priv, SVGA_REG_TRACES, 0); - mutex_unlock(&dev_priv->hw_mutex); } if (active) { @@ -1193,9 +1189,7 @@ out_no_active_lock: if (!dev_priv->enable_fb) { vmw_kms_restore_vga(dev_priv); vmw_3d_resource_dec(dev_priv, true); - mutex_lock(&dev_priv->hw_mutex); vmw_write(dev_priv, SVGA_REG_TRACES, 1); - mutex_unlock(&dev_priv->hw_mutex); } return ret; } @@ -1230,9 +1224,7 @@ static void vmw_master_drop(struct drm_device *dev, DRM_ERROR("Unable to clean VRAM on master drop.\n"); vmw_kms_restore_vga(dev_priv); vmw_3d_resource_dec(dev_priv, true); - mutex_lock(&dev_priv->hw_mutex); vmw_write(dev_priv, SVGA_REG_TRACES, 1); - mutex_unlock(&dev_priv->hw_mutex); } dev_priv->active_master = &dev_priv->fbdev_master; @@ -1364,10 +1356,8 @@ static void vmw_pm_complete(struct device *kdev) struct drm_device *dev = pci_get_drvdata(pdev); struct vmw_private *dev_priv = vmw_priv(dev); - mutex_lock(&dev_priv->hw_mutex); vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); (void) vmw_read(dev_priv, SVGA_REG_ID); - mutex_unlock(&dev_priv->hw_mutex); /** * Reclaim 3d reference held by fbdev and potentially @@ -1458,6 +1448,12 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent) static int __init vmwgfx_init(void) { int ret; + +#ifdef CONFIG_VGA_CONSOLE + if (vgacon_text_force()) + return -EINVAL; +#endif + ret = drm_pci_init(&driver, &vmw_pci_driver); if (ret) DRM_ERROR("Failed initializing DRM.\n"); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 4ee799b43d5d..d26a6daa9719 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -399,7 +399,8 @@ struct vmw_private { uint32_t memory_size; bool has_gmr; bool has_mob; - struct mutex hw_mutex; + spinlock_t hw_lock; + spinlock_t cap_lock; /* * VGA registers. @@ -449,8 +450,9 @@ struct vmw_private { atomic_t marker_seq; wait_queue_head_t fence_queue; wait_queue_head_t fifo_queue; - int fence_queue_waiters; /* Protected by hw_mutex */ - int goal_queue_waiters; /* Protected by hw_mutex */ + spinlock_t waiter_lock; + int fence_queue_waiters; /* Protected by waiter_lock */ + int goal_queue_waiters; /* Protected by waiter_lock */ atomic_t fifo_queue_waiters; uint32_t last_read_seqno; spinlock_t irq_lock; @@ -553,20 +555,35 @@ static inline struct vmw_master *vmw_master(struct drm_master *master) return (struct vmw_master *) master->driver_priv; } +/* + * The locking here is fine-grained, so that it is performed once + * for every read- and write operation. This is of course costly, but we + * don't perform much register access in the timing critical paths anyway. + * Instead we have the extra benefit of being sure that we don't forget + * the hw lock around register accesses. + */ static inline void vmw_write(struct vmw_private *dev_priv, unsigned int offset, uint32_t value) { + unsigned long irq_flags; + + spin_lock_irqsave(&dev_priv->hw_lock, irq_flags); outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT); + spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags); } static inline uint32_t vmw_read(struct vmw_private *dev_priv, unsigned int offset) { - uint32_t val; + unsigned long irq_flags; + u32 val; + spin_lock_irqsave(&dev_priv->hw_lock, irq_flags); outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT); + spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags); + return val; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 596cd6dafd33..8ad66bbd4f28 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -2489,7 +2489,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes, true); if (unlikely(ret != 0)) - goto out_err; + goto out_err_nores; ret = vmw_validate_buffers(dev_priv, sw_context); if (unlikely(ret != 0)) @@ -2533,6 +2533,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, vmw_resource_relocations_free(&sw_context->res_relocations); vmw_fifo_commit(dev_priv, command_size); + mutex_unlock(&dev_priv->binding_mutex); vmw_query_bo_switch_commit(dev_priv, sw_context); ret = vmw_execbuf_fence_commands(file_priv, dev_priv, @@ -2548,7 +2549,6 @@ int vmw_execbuf_process(struct drm_file *file_priv, DRM_ERROR("Fence submission error. Syncing.\n"); vmw_resource_list_unreserve(&sw_context->resource_list, false); - mutex_unlock(&dev_priv->binding_mutex); ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes, (void *) fence); @@ -2778,13 +2778,11 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, NULL, arg->command_size, arg->throttle_us, (void __user *)(unsigned long)arg->fence_rep, NULL); - + ttm_read_unlock(&dev_priv->reservation_sem); if (unlikely(ret != 0)) - goto out_unlock; + return ret; vmw_kms_cursor_post_execbuf(dev_priv); -out_unlock: - ttm_read_unlock(&dev_priv->reservation_sem); - return ret; + return 0; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index 197164fd7803..945f1e0dad92 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c @@ -35,7 +35,7 @@ struct vmw_fence_manager { struct vmw_private *dev_priv; spinlock_t lock; struct list_head fence_list; - struct work_struct work, ping_work; + struct work_struct work; u32 user_fence_size; u32 fence_size; u32 event_fence_action_size; @@ -134,14 +134,6 @@ static const char *vmw_fence_get_timeline_name(struct fence *f) return "svga"; } -static void vmw_fence_ping_func(struct work_struct *work) -{ - struct vmw_fence_manager *fman = - container_of(work, struct vmw_fence_manager, ping_work); - - vmw_fifo_ping_host(fman->dev_priv, SVGA_SYNC_GENERIC); -} - static bool vmw_fence_enable_signaling(struct fence *f) { struct vmw_fence_obj *fence = @@ -155,11 +147,7 @@ static bool vmw_fence_enable_signaling(struct fence *f) if (seqno - fence->base.seqno < VMW_FENCE_WRAP) return false; - if (mutex_trylock(&dev_priv->hw_mutex)) { - vmw_fifo_ping_host_locked(dev_priv, SVGA_SYNC_GENERIC); - mutex_unlock(&dev_priv->hw_mutex); - } else - schedule_work(&fman->ping_work); + vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); return true; } @@ -305,7 +293,6 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv) INIT_LIST_HEAD(&fman->fence_list); INIT_LIST_HEAD(&fman->cleanup_list); INIT_WORK(&fman->work, &vmw_fence_work_func); - INIT_WORK(&fman->ping_work, &vmw_fence_ping_func); fman->fifo_down = true; fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)); fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj)); @@ -323,7 +310,6 @@ void vmw_fence_manager_takedown(struct vmw_fence_manager *fman) bool lists_empty; (void) cancel_work_sync(&fman->work); - (void) cancel_work_sync(&fman->ping_work); spin_lock_irqsave(&fman->lock, irq_flags); lists_empty = list_empty(&fman->fence_list) && @@ -545,35 +531,19 @@ void vmw_fence_obj_flush(struct vmw_fence_obj *fence) static void vmw_fence_destroy(struct vmw_fence_obj *fence) { - struct vmw_fence_manager *fman = fman_from_fence(fence); - fence_free(&fence->base); - - /* - * Free kernel space accounting. - */ - ttm_mem_global_free(vmw_mem_glob(fman->dev_priv), - fman->fence_size); } int vmw_fence_create(struct vmw_fence_manager *fman, uint32_t seqno, struct vmw_fence_obj **p_fence) { - struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv); struct vmw_fence_obj *fence; int ret; - ret = ttm_mem_global_alloc(mem_glob, fman->fence_size, - false, false); - if (unlikely(ret != 0)) - return ret; - fence = kzalloc(sizeof(*fence), GFP_KERNEL); - if (unlikely(fence == NULL)) { - ret = -ENOMEM; - goto out_no_object; - } + if (unlikely(fence == NULL)) + return -ENOMEM; ret = vmw_fence_obj_init(fman, fence, seqno, vmw_fence_destroy); @@ -585,8 +555,6 @@ int vmw_fence_create(struct vmw_fence_manager *fman, out_err_init: kfree(fence); -out_no_object: - ttm_mem_global_free(mem_glob, fman->fence_size); return ret; } @@ -1105,6 +1073,8 @@ static int vmw_event_fence_action_create(struct drm_file *file_priv, if (ret != 0) goto out_no_queue; + return 0; + out_no_queue: event->base.destroy(&event->base); out_no_event: @@ -1180,17 +1150,10 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data, BUG_ON(fence == NULL); - if (arg->flags & DRM_VMW_FE_FLAG_REQ_TIME) - ret = vmw_event_fence_action_create(file_priv, fence, - arg->flags, - arg->user_data, - true); - else - ret = vmw_event_fence_action_create(file_priv, fence, - arg->flags, - arg->user_data, - true); - + ret = vmw_event_fence_action_create(file_priv, fence, + arg->flags, + arg->user_data, + true); if (unlikely(ret != 0)) { if (ret != -ERESTARTSYS) DRM_ERROR("Failed to attach event to fence.\n"); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c index 09e10aefcd8e..39f2b03888e7 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c @@ -44,10 +44,10 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv) if (!dev_priv->has_mob) return false; - mutex_lock(&dev_priv->hw_mutex); + spin_lock(&dev_priv->cap_lock); vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D); result = vmw_read(dev_priv, SVGA_REG_DEV_CAP); - mutex_unlock(&dev_priv->hw_mutex); + spin_unlock(&dev_priv->cap_lock); return (result != 0); } @@ -120,7 +120,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT)); DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL)); - mutex_lock(&dev_priv->hw_mutex); dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE); dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE); dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES); @@ -143,7 +142,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) mb(); vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1); - mutex_unlock(&dev_priv->hw_mutex); max = ioread32(fifo_mem + SVGA_FIFO_MAX); min = ioread32(fifo_mem + SVGA_FIFO_MIN); @@ -160,31 +158,28 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) return vmw_fifo_send_fence(dev_priv, &dummy); } -void vmw_fifo_ping_host_locked(struct vmw_private *dev_priv, uint32_t reason) +void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason) { __le32 __iomem *fifo_mem = dev_priv->mmio_virt; + static DEFINE_SPINLOCK(ping_lock); + unsigned long irq_flags; + /* + * The ping_lock is needed because we don't have an atomic + * test-and-set of the SVGA_FIFO_BUSY register. + */ + spin_lock_irqsave(&ping_lock, irq_flags); if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) { iowrite32(1, fifo_mem + SVGA_FIFO_BUSY); vmw_write(dev_priv, SVGA_REG_SYNC, reason); } -} - -void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason) -{ - mutex_lock(&dev_priv->hw_mutex); - - vmw_fifo_ping_host_locked(dev_priv, reason); - - mutex_unlock(&dev_priv->hw_mutex); + spin_unlock_irqrestore(&ping_lock, irq_flags); } void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) { __le32 __iomem *fifo_mem = dev_priv->mmio_virt; - mutex_lock(&dev_priv->hw_mutex); - vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC); while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0) ; @@ -198,7 +193,6 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) vmw_write(dev_priv, SVGA_REG_TRACES, dev_priv->traces_state); - mutex_unlock(&dev_priv->hw_mutex); vmw_marker_queue_takedown(&fifo->marker_queue); if (likely(fifo->static_buffer != NULL)) { @@ -271,7 +265,7 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv, return vmw_fifo_wait_noirq(dev_priv, bytes, interruptible, timeout); - mutex_lock(&dev_priv->hw_mutex); + spin_lock(&dev_priv->waiter_lock); if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) { spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); outl(SVGA_IRQFLAG_FIFO_PROGRESS, @@ -280,7 +274,7 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv, vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); } - mutex_unlock(&dev_priv->hw_mutex); + spin_unlock(&dev_priv->waiter_lock); if (interruptible) ret = wait_event_interruptible_timeout @@ -296,14 +290,14 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv, else if (likely(ret > 0)) ret = 0; - mutex_lock(&dev_priv->hw_mutex); + spin_lock(&dev_priv->waiter_lock); if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) { spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); dev_priv->irq_mask &= ~SVGA_IRQFLAG_FIFO_PROGRESS; vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); } - mutex_unlock(&dev_priv->hw_mutex); + spin_unlock(&dev_priv->waiter_lock); return ret; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c index 37881ecf5d7a..69c8ce23123c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c @@ -135,13 +135,13 @@ static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce, (pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32); compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS; - mutex_lock(&dev_priv->hw_mutex); + spin_lock(&dev_priv->cap_lock); for (i = 0; i < max_size; ++i) { vmw_write(dev_priv, SVGA_REG_DEV_CAP, i); compat_cap->pairs[i][0] = i; compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP); } - mutex_unlock(&dev_priv->hw_mutex); + spin_unlock(&dev_priv->cap_lock); return 0; } @@ -191,12 +191,12 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, if (num > SVGA3D_DEVCAP_MAX) num = SVGA3D_DEVCAP_MAX; - mutex_lock(&dev_priv->hw_mutex); + spin_lock(&dev_priv->cap_lock); for (i = 0; i < num; ++i) { vmw_write(dev_priv, SVGA_REG_DEV_CAP, i); *bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP); } - mutex_unlock(&dev_priv->hw_mutex); + spin_unlock(&dev_priv->cap_lock); } else if (gb_objects) { ret = vmw_fill_compat_cap(dev_priv, bounce, size); if (unlikely(ret != 0)) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c index 0c423766c441..9fe9827ee499 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c @@ -62,13 +62,8 @@ irqreturn_t vmw_irq_handler(int irq, void *arg) static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno) { - uint32_t busy; - mutex_lock(&dev_priv->hw_mutex); - busy = vmw_read(dev_priv, SVGA_REG_BUSY); - mutex_unlock(&dev_priv->hw_mutex); - - return (busy == 0); + return (vmw_read(dev_priv, SVGA_REG_BUSY) == 0); } void vmw_update_seqno(struct vmw_private *dev_priv, @@ -184,7 +179,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv, void vmw_seqno_waiter_add(struct vmw_private *dev_priv) { - mutex_lock(&dev_priv->hw_mutex); + spin_lock(&dev_priv->waiter_lock); if (dev_priv->fence_queue_waiters++ == 0) { unsigned long irq_flags; @@ -195,12 +190,12 @@ void vmw_seqno_waiter_add(struct vmw_private *dev_priv) vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); } - mutex_unlock(&dev_priv->hw_mutex); + spin_unlock(&dev_priv->waiter_lock); } void vmw_seqno_waiter_remove(struct vmw_private *dev_priv) { - mutex_lock(&dev_priv->hw_mutex); + spin_lock(&dev_priv->waiter_lock); if (--dev_priv->fence_queue_waiters == 0) { unsigned long irq_flags; @@ -209,13 +204,13 @@ void vmw_seqno_waiter_remove(struct vmw_private *dev_priv) vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); } - mutex_unlock(&dev_priv->hw_mutex); + spin_unlock(&dev_priv->waiter_lock); } void vmw_goal_waiter_add(struct vmw_private *dev_priv) { - mutex_lock(&dev_priv->hw_mutex); + spin_lock(&dev_priv->waiter_lock); if (dev_priv->goal_queue_waiters++ == 0) { unsigned long irq_flags; @@ -226,12 +221,12 @@ void vmw_goal_waiter_add(struct vmw_private *dev_priv) vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); } - mutex_unlock(&dev_priv->hw_mutex); + spin_unlock(&dev_priv->waiter_lock); } void vmw_goal_waiter_remove(struct vmw_private *dev_priv) { - mutex_lock(&dev_priv->hw_mutex); + spin_lock(&dev_priv->waiter_lock); if (--dev_priv->goal_queue_waiters == 0) { unsigned long irq_flags; @@ -240,7 +235,7 @@ void vmw_goal_waiter_remove(struct vmw_private *dev_priv) vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); } - mutex_unlock(&dev_priv->hw_mutex); + spin_unlock(&dev_priv->waiter_lock); } int vmw_wait_seqno(struct vmw_private *dev_priv, @@ -315,9 +310,7 @@ void vmw_irq_uninstall(struct drm_device *dev) if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) return; - mutex_lock(&dev_priv->hw_mutex); vmw_write(dev_priv, SVGA_REG_IRQMASK, 0); - mutex_unlock(&dev_priv->hw_mutex); status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 941a7bc0b791..173ec3377e4f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -1828,9 +1828,7 @@ vmw_du_connector_detect(struct drm_connector *connector, bool force) struct vmw_private *dev_priv = vmw_priv(dev); struct vmw_display_unit *du = vmw_connector_to_du(connector); - mutex_lock(&dev_priv->hw_mutex); num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS); - mutex_unlock(&dev_priv->hw_mutex); return ((vmw_connector_to_du(connector)->unit < num_displays && du->pref_active) ? @@ -2035,23 +2033,17 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, int i; struct drm_mode_config *mode_config = &dev->mode_config; - ret = ttm_read_lock(&dev_priv->reservation_sem, true); - if (unlikely(ret != 0)) - return ret; - if (!arg->num_outputs) { struct drm_vmw_rect def_rect = {0, 0, 800, 600}; vmw_du_update_layout(dev_priv, 1, &def_rect); - goto out_unlock; + return 0; } rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect); rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect), GFP_KERNEL); - if (unlikely(!rects)) { - ret = -ENOMEM; - goto out_unlock; - } + if (unlikely(!rects)) + return -ENOMEM; user_rects = (void __user *)(unsigned long)arg->rects; ret = copy_from_user(rects, user_rects, rects_size); @@ -2076,7 +2068,5 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, out_free: kfree(rects); -out_unlock: - ttm_read_unlock(&dev_priv->reservation_sem); return ret; } |