diff options
author | Linux Build Service Account <lnxbuild@localhost> | 2019-09-15 00:51:11 -0700 |
---|---|---|
committer | Linux Build Service Account <lnxbuild@localhost> | 2019-09-15 00:51:11 -0700 |
commit | b6bcdb74c182560e136ea585e0ba28e12d0b1b6c (patch) | |
tree | 29978d8cafb3294470f397db241d74a26c7fa1ef | |
parent | 845c507fbf9cc1951916f568cb184ded98481b03 (diff) | |
parent | 5143e68f638f24680b5cdb9c405cfdcedb48a256 (diff) |
Merge 5143e68f638f24680b5cdb9c405cfdcedb48a256 on remote branchLA.UM.7.11.r1-03900-NICOBAR.0
Change-Id: I7019178b079947202a9427888dcf9e5def7110cc
-rw-r--r-- | drivers/gpu/drm/msm/dp/dp_debug.c | 18 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/sde/sde_crtc.c | 47 | ||||
-rw-r--r-- | drivers/misc/qseecom.c | 7 | ||||
-rw-r--r-- | drivers/soc/qcom/mem-offline.c | 227 |
4 files changed, 258 insertions, 41 deletions
diff --git a/drivers/gpu/drm/msm/dp/dp_debug.c b/drivers/gpu/drm/msm/dp/dp_debug.c index c53172fa035a..82a104c649ca 100644 --- a/drivers/gpu/drm/msm/dp/dp_debug.c +++ b/drivers/gpu/drm/msm/dp/dp_debug.c @@ -199,8 +199,7 @@ static ssize_t dp_debug_write_dpcd(struct file *file, goto bail; size = min_t(size_t, count, SZ_2K); - - if (size <= char_to_nib) + if (size < 4) goto bail; buf = kzalloc(size, GFP_KERNEL); @@ -230,6 +229,8 @@ static ssize_t dp_debug_write_dpcd(struct file *file, } size -= 4; + if (size == 0) + goto bail; dpcd_size = size / char_to_nib; data_len = dpcd_size; @@ -315,6 +316,7 @@ static ssize_t dp_debug_read_dpcd(struct file *file, debug->aux->dpcd_updated(debug->aux); } + len = min_t(size_t, count, len); if (!copy_to_user(user_buff, buf, len)) *ppos += len; @@ -646,6 +648,7 @@ static ssize_t dp_debug_max_pclk_khz_read(struct file *file, debug->dp_debug.max_pclk_khz, debug->parser->max_pclk_khz); + len = min_t(size_t, count, len); if (copy_to_user(user_buff, buf, len)) { kfree(buf); return -EFAULT; @@ -807,6 +810,7 @@ static ssize_t dp_debug_read_connected(struct file *file, len += snprintf(buf, SZ_8, "%d\n", debug->hpd->hpd_high); + len = min_t(size_t, count, len); if (copy_to_user(user_buff, buf, len)) return -EFAULT; @@ -857,6 +861,7 @@ static ssize_t dp_debug_read_hdcp(struct file *file, len = sizeof(debug->dp_debug.hdcp_status); + len = min_t(size_t, count, len); if (copy_to_user(user_buff, debug->dp_debug.hdcp_status, len)) return -EFAULT; @@ -920,6 +925,7 @@ static ssize_t dp_debug_read_edid_modes(struct file *file, } mutex_unlock(&connector->dev->mode_config.mutex); + len = min_t(size_t, count, len); if (copy_to_user(user_buff, buf, len)) { kfree(buf); rc = -EFAULT; @@ -995,6 +1001,7 @@ static ssize_t dp_debug_read_edid_modes_mst(struct file *file, } mutex_unlock(&connector->dev->mode_config.mutex); + len = min_t(size_t, count, len); if (copy_to_user(user_buff, buf, len)) { kfree(buf); rc = -EFAULT; @@ -1035,6 +1042,7 @@ static ssize_t dp_debug_read_mst_con_id(struct file *file, ret = snprintf(buf, max_size, "%u\n", debug->mst_con_id); len += ret; + len = min_t(size_t, count, len); if (copy_to_user(user_buff, buf, len)) { kfree(buf); rc = -EFAULT; @@ -1098,6 +1106,7 @@ static ssize_t dp_debug_read_mst_conn_info(struct file *file, } mutex_unlock(&debug->dp_debug.dp_mst_connector_list.lock); + len = min_t(size_t, count, len); if (copy_to_user(user_buff, buf, len)) { kfree(buf); rc = -EFAULT; @@ -1187,6 +1196,7 @@ static ssize_t dp_debug_read_info(struct file *file, char __user *user_buff, if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) goto error; + len = min_t(size_t, count, len); if (copy_to_user(user_buff, buf, len)) goto error; @@ -1219,6 +1229,7 @@ static ssize_t dp_debug_bw_code_read(struct file *file, len += snprintf(buf + len, (SZ_4K - len), "max_bw_code = %d\n", debug->panel->max_bw_code); + len = min_t(size_t, count, len); if (copy_to_user(user_buff, buf, len)) { kfree(buf); return -EFAULT; @@ -1244,6 +1255,7 @@ static ssize_t dp_debug_tpg_read(struct file *file, len += snprintf(buf, SZ_8, "%d\n", debug->dp_debug.tpg_state); + len = min_t(size_t, count, len); if (copy_to_user(user_buff, buf, len)) return -EFAULT; @@ -1434,6 +1446,7 @@ static ssize_t dp_debug_read_hdr(struct file *file, goto error; } + len = min_t(size_t, count, len); if (copy_to_user(user_buff, buf, len)) { kfree(buf); rc = -EFAULT; @@ -1609,6 +1622,7 @@ static ssize_t dp_debug_read_dump(struct file *file, print_hex_dump(KERN_DEBUG, prefix, DUMP_PREFIX_NONE, 16, 4, buf, len, false); + len = min_t(size_t, count, len); if (copy_to_user(user_buff, buf, len)) return -EFAULT; diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c index b109a1dac5b4..e693474c2f8c 100644 --- a/drivers/gpu/drm/msm/sde/sde_crtc.c +++ b/drivers/gpu/drm/msm/sde/sde_crtc.c @@ -3257,24 +3257,27 @@ static int _sde_crtc_check_dest_scaler_data(struct drm_crtc *crtc, SDE_DEBUG("crtc%d\n", crtc->base.id); + mutex_lock(&sde_crtc->crtc_lock); + if (!cstate->ds_dirty) { SDE_DEBUG("dest scaler property not set, skip validation\n"); - return 0; + goto end; } if (!kms || !kms->catalog) { SDE_ERROR("crtc%d: invalid parameters\n", crtc->base.id); - return -EINVAL; + ret = -EINVAL; + goto end; } if (!kms->catalog->mdp[0].has_dest_scaler) { SDE_DEBUG("dest scaler feature not supported\n"); - return 0; + goto end; } if (!sde_crtc->num_mixers) { SDE_DEBUG("mixers not allocated\n"); - return 0; + goto end; } ret = _sde_validate_hw_resources(sde_crtc); @@ -3453,10 +3456,12 @@ disable: cstate->ds_dirty = false; } - return 0; + goto end; err: cstate->ds_dirty = false; +end: + mutex_unlock(&sde_crtc->crtc_lock); return ret; } @@ -3576,12 +3581,12 @@ static void _sde_crtc_setup_mixers(struct drm_crtc *crtc) struct sde_crtc *sde_crtc = to_sde_crtc(crtc); struct drm_encoder *enc; + mutex_lock(&sde_crtc->crtc_lock); sde_crtc->num_ctls = 0; sde_crtc->num_mixers = 0; sde_crtc->mixers_swapped = false; memset(sde_crtc->mixers, 0, sizeof(sde_crtc->mixers)); - mutex_lock(&sde_crtc->crtc_lock); /* Check for mixers on all encoders attached to this crtc */ list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) { if (enc->crtc != crtc) @@ -5759,7 +5764,11 @@ static int _sde_crtc_get_output_fence(struct drm_crtc *crtc, { struct sde_crtc *sde_crtc; struct sde_crtc_state *cstate; - uint32_t offset; + uint32_t offset, i; + struct drm_connector_state *old_conn_state, *new_conn_state; + struct drm_connector *conn; + struct sde_connector *sde_conn = NULL; + struct msm_display_info disp_info; bool is_vid = false; struct drm_encoder *encoder; @@ -5773,6 +5782,29 @@ static int _sde_crtc_get_output_fence(struct drm_crtc *crtc, break; } + /* + * encoder_mask of drm_crtc_state will be zero until atomic_check + * phase completes for first commit of dp. Hence, check for video + * mode capability for current commit from new_connector_state. + */ + if (!state->encoder_mask) { + for_each_oldnew_connector_in_state(state->state, conn, + old_conn_state, new_conn_state, i) { + if (!new_conn_state || new_conn_state->crtc != crtc) + continue; + + sde_conn = to_sde_connector(new_conn_state->connector); + if (sde_conn->display && sde_conn->ops.get_info) { + sde_conn->ops.get_info(conn, &disp_info, + sde_conn->display); + is_vid |= disp_info.capabilities & + MSM_DISPLAY_CAP_VID_MODE; + if (is_vid) + break; + } + } + } + offset = sde_crtc_get_property(cstate, CRTC_PROP_OUTPUT_FENCE_OFFSET); /* @@ -5790,6 +5822,7 @@ static int _sde_crtc_get_output_fence(struct drm_crtc *crtc, * which will be incremented during the prepare commit phase */ offset++; + SDE_EVT32(DRMID(crtc), is_vid, offset); return sde_fence_create(sde_crtc->output_fence, val, offset); } diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c index 7210a0613f2e..51116ef07b7b 100644 --- a/drivers/misc/qseecom.c +++ b/drivers/misc/qseecom.c @@ -7656,6 +7656,13 @@ static long qseecom_ioctl(struct file *file, break; } case QSEECOM_IOCTL_APP_LOADED_QUERY_REQ: { + if ((data->type != QSEECOM_GENERIC) && + (data->type != QSEECOM_CLIENT_APP)) { + pr_err("app loaded query req: invalid handle (%d)\n", + data->type); + ret = -EINVAL; + break; + } data->type = QSEECOM_CLIENT_APP; mutex_lock(&app_access_lock); atomic_inc(&data->ioctl_count); diff --git a/drivers/soc/qcom/mem-offline.c b/drivers/soc/qcom/mem-offline.c index 6e720ee93f6a..f6e8cfee3f9c 100644 --- a/drivers/soc/qcom/mem-offline.c +++ b/drivers/soc/qcom/mem-offline.c @@ -56,6 +56,8 @@ enum memory_states { MAX_STATE, }; +static enum memory_states *mem_sec_state; + static struct mem_offline_mailbox { struct mbox_client cl; struct mbox_chan *mbox; @@ -173,15 +175,155 @@ static int aop_send_msg(unsigned long addr, bool online) return (mbox_send_message(mailbox.mbox, &pkt) < 0); } -static int send_msg(struct memory_notify *mn, bool online) +/* + * When offline_granule >= memory block size, this returns the number of + * sections in a offlineable segment. + * When offline_granule < memory block size, returns the sections_per_block. + */ +static unsigned long get_rounded_sections_per_segment(void) +{ + + return max(((offline_granule * SZ_1M) / memory_block_size_bytes()) * + sections_per_block, + (unsigned long)sections_per_block); +} + +static int send_msg(struct memory_notify *mn, bool online, int count) +{ + unsigned long segment_size = offline_granule * SZ_1M; + unsigned long start, base_sec_nr, sec_nr, sections_per_segment; + int ret, idx, i; + + sections_per_segment = get_rounded_sections_per_segment(); + sec_nr = pfn_to_section_nr(SECTION_ALIGN_DOWN(mn->start_pfn)); + idx = (sec_nr - start_section_nr) / sections_per_segment; + base_sec_nr = start_section_nr + (idx * sections_per_segment); + start = section_nr_to_pfn(base_sec_nr); + + for (i = 0; i < count; ++i) { + if (is_rpm_controller) + ret = mem_region_refresh_control(start, + segment_size >> PAGE_SHIFT, + online); + else + ret = aop_send_msg(__pfn_to_phys(start), online); + + if (ret) { + pr_err("PASR: %s %s request addr:0x%llx failed\n", + is_rpm_controller ? "RPM" : "AOP", + online ? "online" : "offline", + __pfn_to_phys(start)); + goto undo; + } + + start = __phys_to_pfn(__pfn_to_phys(start) + segment_size); + } + + return 0; +undo: + start = section_nr_to_pfn(base_sec_nr); + while (i-- > 0) { + int ret; + + if (is_rpm_controller) + ret = mem_region_refresh_control(start, + segment_size >> PAGE_SHIFT, + !online); + else + ret = aop_send_msg(__pfn_to_phys(start), !online); + + if (ret) + panic("Failed to completely online/offline a hotpluggable segment. A quasi state of memblock can cause randomn system failures."); + start = __phys_to_pfn(__pfn_to_phys(start) + segment_size); + } + + return ret; +} + +static bool need_to_send_remote_request(struct memory_notify *mn, + enum memory_states request) +{ + int i, idx, cur_idx; + int base_sec_nr, sec_nr; + unsigned long sections_per_segment; + + sections_per_segment = get_rounded_sections_per_segment(); + sec_nr = pfn_to_section_nr(SECTION_ALIGN_DOWN(mn->start_pfn)); + idx = (sec_nr - start_section_nr) / sections_per_segment; + cur_idx = (sec_nr - start_section_nr) / sections_per_block; + base_sec_nr = start_section_nr + (idx * sections_per_segment); + + /* + * For MEM_OFFLINE, don't send the request if there are other online + * blocks in the segment. + * For MEM_ONLINE, don't send the request if there is already one + * online block in the segment. + */ + if (request == MEMORY_OFFLINE || request == MEMORY_ONLINE) { + for (i = base_sec_nr; + i < (base_sec_nr + sections_per_segment); + i += sections_per_block) { + idx = (i - start_section_nr) / sections_per_block; + /* current operating block */ + if (idx == cur_idx) + continue; + if (mem_sec_state[idx] == MEMORY_ONLINE) + goto out; + } + return 1; + } +out: + return 0; +} + +/* + * This returns the number of hotpluggable segments in a memory block. + */ +static int get_num_memblock_hotplug_segments(void) +{ + unsigned long segment_size = offline_granule * SZ_1M; + unsigned long block_size = memory_block_size_bytes(); + + if (segment_size < block_size) { + if (block_size % segment_size) { + pr_warn("PASR is unusable. Offline granule size should be in multiples for memory_block_size_bytes.\n"); + return 0; + } + return block_size / segment_size; + } + + return 1; +} + +static int mem_change_refresh_state(struct memory_notify *mn, + enum memory_states state) { int start = SECTION_ALIGN_DOWN(mn->start_pfn); + unsigned long sec_nr = pfn_to_section_nr(start); + bool online = (state == MEMORY_ONLINE) ? true : false; + unsigned long idx = (sec_nr - start_section_nr) / sections_per_block; + int ret, count; + + if (mem_sec_state[idx] == state) { + /* we shouldn't be getting this request */ + pr_warn("mem-offline: state of mem%d block already in %s state. Ignoring refresh state change request\n", + sec_nr, online ? "online" : "offline"); + return 0; + } + + count = get_num_memblock_hotplug_segments(); + if (!count) + return -EINVAL; + + if (!need_to_send_remote_request(mn, state)) + goto out; - if (is_rpm_controller) - return mem_region_refresh_control(start, mn->nr_pages, - online); - else - return aop_send_msg(__pfn_to_phys(start), online); + ret = send_msg(mn, online, count); + if (ret) + return -EINVAL; +out: + mem_sec_state[idx] = state; + return 0; } static int mem_event_callback(struct notifier_block *self, @@ -223,10 +365,9 @@ static int mem_event_callback(struct notifier_block *self, idx) / sections_per_block].fail_count; cur = ktime_get(); - if (send_msg(mn, true)) - pr_err("PASR: %s online request addr:0x%llx failed\n", - is_rpm_controller ? "RPM" : "AOP", - __pfn_to_phys(start)); + if (mem_change_refresh_state(mn, MEMORY_ONLINE)) + return NOTIFY_BAD; + if (!debug_pagealloc_enabled()) { /* Create kernel page-tables */ create_pgtable_mapping(start_addr, end_addr); @@ -252,10 +393,11 @@ static int mem_event_callback(struct notifier_block *self, /* Clear kernel page-tables */ clear_pgtable_mapping(start_addr, end_addr); } - if (send_msg(mn, false)) - pr_err("PASR: %s offline request addr:0x%llx failed\n", - is_rpm_controller ? "RPM" : "AOP", - __pfn_to_phys(start)); + mem_change_refresh_state(mn, MEMORY_OFFLINE); + /* + * Notifying that something went bad at this stage won't + * help since this is the last stage of memory hotplug. + */ delay = ktime_ms_delta(ktime_get(), cur); record_stat(sec_nr, delay, MEMORY_OFFLINE); @@ -266,10 +408,7 @@ static int mem_event_callback(struct notifier_block *self, case MEM_CANCEL_ONLINE: pr_info("mem-offline: MEM_CANCEL_ONLINE: start = 0x%lx end = 0x%lx", start_addr, end_addr); - if (send_msg(mn, false)) - pr_err("PASR: %s online request addr:0x%llx failed\n", - is_rpm_controller ? "RPM" : "AOP", - __pfn_to_phys(start)); + mem_change_refresh_state(mn, MEMORY_OFFLINE); break; default: break; @@ -401,9 +540,6 @@ static struct attribute_group mem_attr_group = { static int mem_sysfs_init(void) { - unsigned int total_blks = (end_section_nr - start_section_nr + 1) / - sections_per_block; - if (start_section_nr == end_section_nr) return -EINVAL; @@ -414,11 +550,6 @@ static int mem_sysfs_init(void) if (sysfs_create_group(kobj, &mem_attr_group)) kobject_put(kobj); - mem_info = kzalloc(sizeof(*mem_info) * total_blks * MAX_STATE, - GFP_KERNEL); - if (!mem_info) - return -ENOMEM; - return 0; } @@ -437,8 +568,9 @@ static int mem_parse_dt(struct platform_device *pdev) return -EINVAL; } offline_granule = be32_to_cpup(val); - if (!offline_granule && !(offline_granule & (offline_granule - 1)) && - offline_granule * SZ_1M < MIN_MEMORY_BLOCK_SIZE) { + if (!offline_granule || (offline_granule & (offline_granule - 1)) || + ((offline_granule * SZ_1M < MIN_MEMORY_BLOCK_SIZE) && + (MIN_MEMORY_BLOCK_SIZE % (offline_granule * SZ_1M)))) { pr_err("mem-offine: invalid granule property\n"); return -EINVAL; } @@ -470,7 +602,8 @@ static struct notifier_block hotplug_memory_callback_nb = { static int mem_offline_driver_probe(struct platform_device *pdev) { - int ret; + unsigned int total_blks; + int ret, i; if (mem_parse_dt(pdev)) return -ENODEV; @@ -482,16 +615,46 @@ static int mem_offline_driver_probe(struct platform_device *pdev) if (ret > 0) pr_err("mem-offline: !!ERROR!! Auto onlining some memory blocks failed. System could run with less RAM\n"); - if (mem_sysfs_init()) - return -ENODEV; + total_blks = (end_section_nr - start_section_nr + 1) / + sections_per_block; + mem_info = kcalloc(total_blks * MAX_STATE, sizeof(*mem_info), + GFP_KERNEL); + if (!mem_info) + return -ENOMEM; + + mem_sec_state = kcalloc(total_blks, sizeof(*mem_sec_state), GFP_KERNEL); + if (!mem_sec_state) { + ret = -ENOMEM; + goto err_free_mem_info; + } + + /* we assume that hardware state of mem blocks are online after boot */ + for (i = 0; i < total_blks; i++) + mem_sec_state[i] = MEMORY_ONLINE; + + if (mem_sysfs_init()) { + ret = -ENODEV; + goto err_free_mem_sec_state; + } if (register_hotmemory_notifier(&hotplug_memory_callback_nb)) { pr_err("mem-offline: Registering memory hotplug notifier failed\n"); - return -ENODEV; + ret = -ENODEV; + goto err_sysfs_remove_group; } pr_info("mem-offline: Added memory blocks ranging from mem%lu - mem%lu\n", start_section_nr, end_section_nr); + return 0; + +err_sysfs_remove_group: + sysfs_remove_group(kobj, &mem_attr_group); + kobject_put(kobj); +err_free_mem_sec_state: + kfree(mem_sec_state); +err_free_mem_info: + kfree(mem_info); + return ret; } static const struct of_device_id mem_offline_match_table[] = { |