aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinux Build Service Account <lnxbuild@localhost>2020-04-12 01:22:30 -0700
committerGerrit - the friendly Code Review server <code-review@localhost>2020-04-12 01:22:30 -0700
commitca1447b8081547e278fcb15945bb9e2a9b251350 (patch)
tree6a767bbba277640c88f5c7bee52019fcbe7a6c3a
parent7bc0296c4cda56b535850d186b6e58b266a99afb (diff)
parent6a6a1f1e12303a961bd97f8d4bfc2b6e25995c54 (diff)
Merge "Diff patch between the mainline and release branch."LA.UM.8.13.r1-07300-SAIPAN.0
-rw-r--r--drivers/gpu/msm/kgsl.c78
-rw-r--r--drivers/gpu/msm/kgsl.h18
-rw-r--r--drivers/gpu/msm/kgsl_iommu.c46
-rw-r--r--drivers/gpu/msm/kgsl_pool.c3
-rw-r--r--drivers/gpu/msm/kgsl_sharedmem.c8
-rw-r--r--drivers/platform/msm/gsi/gsi.c7
-rw-r--r--drivers/platform/msm/gsi/gsi.h2
-rw-r--r--include/linux/msm_gsi.h2
8 files changed, 121 insertions, 43 deletions
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 46dc0206f245..5f2824cc2b3d 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -223,15 +223,6 @@ int kgsl_readtimestamp(struct kgsl_device *device, void *priv,
}
EXPORT_SYMBOL(kgsl_readtimestamp);
-/* Scheduled by kgsl_mem_entry_put_deferred() */
-static void _deferred_put(struct work_struct *work)
-{
- struct kgsl_mem_entry *entry =
- container_of(work, struct kgsl_mem_entry, work);
-
- kgsl_mem_entry_put(entry);
-}
-
static struct kgsl_mem_entry *kgsl_mem_entry_create(void)
{
struct kgsl_mem_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
@@ -317,17 +308,10 @@ static void kgsl_destroy_ion(struct kgsl_dma_buf_meta *meta)
}
#endif
-void
-kgsl_mem_entry_destroy(struct kref *kref)
+static void mem_entry_destroy(struct kgsl_mem_entry *entry)
{
- struct kgsl_mem_entry *entry = container_of(kref,
- struct kgsl_mem_entry,
- refcount);
unsigned int memtype;
- if (entry == NULL)
- return;
-
/* pull out the memtype before the flags get cleared */
memtype = kgsl_memdesc_usermem_type(&entry->memdesc);
@@ -379,8 +363,34 @@ kgsl_mem_entry_destroy(struct kref *kref)
kfree(entry);
}
+
+static void _deferred_destroy(struct work_struct *work)
+{
+ struct kgsl_mem_entry *entry =
+ container_of(work, struct kgsl_mem_entry, work);
+
+ mem_entry_destroy(entry);
+}
+
+void kgsl_mem_entry_destroy(struct kref *kref)
+{
+ struct kgsl_mem_entry *entry =
+ container_of(kref, struct kgsl_mem_entry, refcount);
+
+ mem_entry_destroy(entry);
+}
EXPORT_SYMBOL(kgsl_mem_entry_destroy);
+void kgsl_mem_entry_destroy_deferred(struct kref *kref)
+{
+ struct kgsl_mem_entry *entry =
+ container_of(kref, struct kgsl_mem_entry, refcount);
+
+ INIT_WORK(&entry->work, _deferred_destroy);
+ queue_work(kgsl_driver.mem_workqueue, &entry->work);
+}
+EXPORT_SYMBOL(kgsl_mem_entry_destroy_deferred);
+
/* Allocate a IOVA for memory objects that don't use SVM */
static int kgsl_mem_entry_track_gpuaddr(struct kgsl_device *device,
struct kgsl_process_private *process,
@@ -936,6 +946,13 @@ static struct kgsl_process_private *kgsl_process_private_new(
struct kgsl_process_private *private;
pid_t tgid = task_tgid_nr(current);
+ /*
+ * Flush mem_workqueue to make sure that any lingering
+ * structs (process pagetable etc) are released before
+ * starting over again.
+ */
+ flush_workqueue(kgsl_driver.mem_workqueue);
+
/* Search in the process list */
list_for_each_entry(private, &kgsl_driver.process_list, list) {
if (private->pid == tgid) {
@@ -998,7 +1015,7 @@ static void process_release_memory(struct kgsl_process_private *private)
if (!entry->pending_free) {
entry->pending_free = 1;
spin_unlock(&private->mem_lock);
- kgsl_mem_entry_put(entry);
+ kgsl_mem_entry_put_deferred(entry);
} else {
spin_unlock(&private->mem_lock);
}
@@ -2197,7 +2214,7 @@ long kgsl_ioctl_sharedmem_free(struct kgsl_device_private *dev_priv,
return -EINVAL;
ret = gpumem_free_entry(entry);
- kgsl_mem_entry_put(entry);
+ kgsl_mem_entry_put_deferred(entry);
return ret;
}
@@ -2215,7 +2232,7 @@ long kgsl_ioctl_gpumem_free_id(struct kgsl_device_private *dev_priv,
return -EINVAL;
ret = gpumem_free_entry(entry);
- kgsl_mem_entry_put(entry);
+ kgsl_mem_entry_put_deferred(entry);
return ret;
}
@@ -2259,8 +2276,7 @@ static bool gpuobj_free_fence_func(void *priv)
entry->memdesc.gpuaddr, entry->memdesc.size,
entry->memdesc.flags);
- INIT_WORK(&entry->work, _deferred_put);
- queue_work(kgsl_driver.mem_workqueue, &entry->work);
+ kgsl_mem_entry_put_deferred(entry);
return true;
}
@@ -2325,7 +2341,7 @@ long kgsl_ioctl_gpuobj_free(struct kgsl_device_private *dev_priv,
else
ret = -EINVAL;
- kgsl_mem_entry_put(entry);
+ kgsl_mem_entry_put_deferred(entry);
return ret;
}
@@ -2355,7 +2371,7 @@ long kgsl_ioctl_cmdstream_freememontimestamp_ctxtid(
ret = gpumem_free_entry_on_timestamp(dev_priv->device, entry,
context, param->timestamp);
- kgsl_mem_entry_put(entry);
+ kgsl_mem_entry_put_deferred(entry);
kgsl_context_put(context);
return ret;
@@ -3740,7 +3756,7 @@ long kgsl_ioctl_sparse_phys_free(struct kgsl_device_private *dev_priv,
/* One put for find_id(), one put for the kgsl_mem_entry_create() */
kgsl_mem_entry_put(entry);
- kgsl_mem_entry_put(entry);
+ kgsl_mem_entry_put_deferred(entry);
return 0;
}
@@ -3824,7 +3840,7 @@ long kgsl_ioctl_sparse_virt_free(struct kgsl_device_private *dev_priv,
/* One put for find_id(), one put for the kgsl_mem_entry_create() */
kgsl_mem_entry_put(entry);
- kgsl_mem_entry_put(entry);
+ kgsl_mem_entry_put_deferred(entry);
return 0;
}
@@ -4486,7 +4502,7 @@ kgsl_gpumem_vm_close(struct vm_area_struct *vma)
return;
entry->memdesc.useraddr = 0;
- kgsl_mem_entry_put(entry);
+ kgsl_mem_entry_put_deferred(entry);
}
static const struct vm_operations_struct kgsl_gpumem_vm_ops = {
@@ -5163,6 +5179,12 @@ void kgsl_device_platform_remove(struct kgsl_device *device)
}
EXPORT_SYMBOL(kgsl_device_platform_remove);
+static void
+_flush_mem_workqueue(struct work_struct *work)
+{
+ flush_workqueue(kgsl_driver.mem_workqueue);
+}
+
static void kgsl_core_exit(void)
{
kgsl_events_exit();
@@ -5264,6 +5286,8 @@ static int __init kgsl_core_init(void)
kgsl_driver.mem_workqueue = alloc_workqueue("kgsl-mementry",
WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
+ INIT_WORK(&kgsl_driver.mem_work, _flush_mem_workqueue);
+
kthread_init_worker(&kgsl_driver.worker);
kgsl_driver.worker_thread = kthread_run(kthread_worker_fn,
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
index dadd0e209453..e263c310e62b 100644
--- a/drivers/gpu/msm/kgsl.h
+++ b/drivers/gpu/msm/kgsl.h
@@ -120,6 +120,7 @@ struct kgsl_context;
* @full_cache_threshold: the threshold that triggers a full cache flush
* @workqueue: Pointer to a single threaded workqueue
* @mem_workqueue: Pointer to a workqueue for deferring memory entries
+ * @mem_work: Work struct to schedule mem_workqueue flush
*/
struct kgsl_driver {
struct cdev cdev;
@@ -150,6 +151,7 @@ struct kgsl_driver {
unsigned int full_cache_threshold;
struct workqueue_struct *workqueue;
struct workqueue_struct *mem_workqueue;
+ struct work_struct mem_work;
struct kthread_worker worker;
struct task_struct *worker_thread;
};
@@ -425,6 +427,7 @@ long kgsl_ioctl_gpu_sparse_command(struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data);
void kgsl_mem_entry_destroy(struct kref *kref);
+void kgsl_mem_entry_destroy_deferred(struct kref *kref);
void kgsl_get_egl_counts(struct kgsl_mem_entry *entry,
int *egl_surface_count, int *egl_image_count);
@@ -542,6 +545,21 @@ kgsl_mem_entry_put(struct kgsl_mem_entry *entry)
kref_put(&entry->refcount, kgsl_mem_entry_destroy);
}
+/**
+ * kgsl_mem_entry_put_deferred - Puts refcount and triggers deferred
+ * mem_entry destroy when refcount goes to zero.
+ * @entry: memory entry to be put.
+ *
+ * Use this to put a memory entry when we don't want to block
+ * the caller while destroying memory entry.
+ */
+static inline void
+kgsl_mem_entry_put_deferred(struct kgsl_mem_entry *entry)
+{
+ if (entry)
+ kref_put(&entry->refcount, kgsl_mem_entry_destroy_deferred);
+}
+
/*
* kgsl_addr_range_overlap() - Checks if 2 ranges overlap
* @gpuaddr1: Start of first address range
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index 8ccdc4949752..61c6eb0e9be4 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -2446,13 +2446,37 @@ out:
return ret;
}
+static int get_gpuaddr(struct kgsl_pagetable *pagetable,
+ struct kgsl_memdesc *memdesc, u64 start, u64 end,
+ u64 size, unsigned int align)
+{
+ u64 addr;
+ int ret;
+
+ spin_lock(&pagetable->lock);
+ addr = _get_unmapped_area(pagetable, start, end, size, align);
+ if (addr == (u64) -ENOMEM) {
+ spin_unlock(&pagetable->lock);
+ return -ENOMEM;
+ }
+
+ ret = _insert_gpuaddr(pagetable, addr, size);
+ spin_unlock(&pagetable->lock);
+
+ if (ret == 0) {
+ memdesc->gpuaddr = addr;
+ memdesc->pagetable = pagetable;
+ }
+
+ return ret;
+}
static int kgsl_iommu_get_gpuaddr(struct kgsl_pagetable *pagetable,
struct kgsl_memdesc *memdesc)
{
struct kgsl_iommu_pt *pt = pagetable->priv;
int ret = 0;
- uint64_t addr, start, end, size;
+ u64 start, end, size;
unsigned int align;
if (WARN_ON(kgsl_memdesc_use_cpu_map(memdesc)))
@@ -2482,23 +2506,13 @@ static int kgsl_iommu_get_gpuaddr(struct kgsl_pagetable *pagetable,
if (kgsl_memdesc_is_secured(memdesc))
start += secure_global_size;
- spin_lock(&pagetable->lock);
-
- addr = _get_unmapped_area(pagetable, start, end, size, align);
-
- if (addr == (uint64_t) -ENOMEM) {
- ret = -ENOMEM;
- goto out;
+ ret = get_gpuaddr(pagetable, memdesc, start, end, size, align);
+ /* if OoM, retry once after flushing mem_wq */
+ if (ret == -ENOMEM) {
+ flush_workqueue(kgsl_driver.mem_workqueue);
+ ret = get_gpuaddr(pagetable, memdesc, start, end, size, align);
}
- ret = _insert_gpuaddr(pagetable, addr, size);
- if (ret == 0) {
- memdesc->gpuaddr = addr;
- memdesc->pagetable = pagetable;
- }
-
-out:
- spin_unlock(&pagetable->lock);
return ret;
}
diff --git a/drivers/gpu/msm/kgsl_pool.c b/drivers/gpu/msm/kgsl_pool.c
index 840c92e389cb..d5a7f5ea47fa 100644
--- a/drivers/gpu/msm/kgsl_pool.c
+++ b/drivers/gpu/msm/kgsl_pool.c
@@ -508,6 +508,9 @@ static unsigned long
kgsl_pool_shrink_count_objects(struct shrinker *shrinker,
struct shrink_control *sc)
{
+ /* Trigger mem_workqueue flush to free memory */
+ kgsl_schedule_work(&kgsl_driver.mem_work);
+
/* Return total pool size as everything in pool can be freed */
return kgsl_pool_size_total();
}
diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c
index dfe194aa46ac..e93d56567494 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.c
+++ b/drivers/gpu/msm/kgsl_sharedmem.c
@@ -889,6 +889,7 @@ kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
unsigned int pcount = 0;
size_t len;
unsigned int align;
+ bool memwq_flush_done = false;
static DEFINE_RATELIMIT_STATE(_rs,
DEFAULT_RATELIMIT_INTERVAL,
@@ -964,6 +965,13 @@ kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
if (page_count == -EAGAIN)
continue;
+ /* if OoM, retry once after flushing mem_wq */
+ if (page_count == -ENOMEM && !memwq_flush_done) {
+ flush_workqueue(kgsl_driver.mem_workqueue);
+ memwq_flush_done = true;
+ continue;
+ }
+
/*
* Update sglen and memdesc size,as requested allocation
* not served fully. So that they can be correctly freed
diff --git a/drivers/platform/msm/gsi/gsi.c b/drivers/platform/msm/gsi/gsi.c
index fe1e946b2586..2cec0e164e85 100644
--- a/drivers/platform/msm/gsi/gsi.c
+++ b/drivers/platform/msm/gsi/gsi.c
@@ -20,6 +20,7 @@
#define GSI_CMD_POLL_CNT 5
#define GSI_STOP_CMD_TIMEOUT_MS 200
#define GSI_MAX_CH_LOW_WEIGHT 15
+#define GSI_IRQ_STORM_THR 5
#define GSI_STOP_CMD_POLL_CNT 4
#define GSI_STOP_IN_PROC_CMD_POLL_CNT 2
@@ -808,8 +809,14 @@ static irqreturn_t gsi_isr(int irq, void *ctxt)
gsi_ctx->per.rel_clk_cb(gsi_ctx->per.user_data);
}
} else if (!gsi_ctx->per.clk_status_cb()) {
+ /* we only want to capture the gsi isr storm here */
+ if (atomic_read(&gsi_ctx->num_unclock_irq) ==
+ GSI_IRQ_STORM_THR)
+ gsi_ctx->per.enable_clk_bug_on();
+ atomic_inc(&gsi_ctx->num_unclock_irq);
return IRQ_HANDLED;
} else {
+ atomic_set(&gsi_ctx->num_unclock_irq, 0);
gsi_handle_irq();
}
return IRQ_HANDLED;
diff --git a/drivers/platform/msm/gsi/gsi.h b/drivers/platform/msm/gsi/gsi.h
index c15cdda142be..da435048a9f5 100644
--- a/drivers/platform/msm/gsi/gsi.h
+++ b/drivers/platform/msm/gsi/gsi.h
@@ -237,6 +237,8 @@ struct gsi_ctx {
u32 intcntrlr_mem_size;
irq_handler_t intcntrlr_gsi_isr;
irq_handler_t intcntrlr_client_isr;
+
+ atomic_t num_unclock_irq;
};
enum gsi_re_type {
diff --git a/include/linux/msm_gsi.h b/include/linux/msm_gsi.h
index 19ebdf3d0c15..177e1abdd543 100644
--- a/include/linux/msm_gsi.h
+++ b/include/linux/msm_gsi.h
@@ -97,6 +97,7 @@ enum gsi_intr_type {
* @rel_clk_cb: callback to release peripheral clock
* @user_data: cookie used for notifications
* @clk_status_cb: callback to update the current msm bus clock vote
+ * @enable_clk_bug_on: enable IPA clock for dump saving before assert
* @skip_ieob_mask_wa: flag for skipping ieob_mask_wa
* All the callbacks are in interrupt context
*
@@ -120,6 +121,7 @@ struct gsi_per_props {
int (*rel_clk_cb)(void *user_data);
void *user_data;
int (*clk_status_cb)(void);
+ void (*enable_clk_bug_on)(void);
bool skip_ieob_mask_wa;
};