aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/scheduler/gpu_scheduler.c')
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c52
1 files changed, 50 insertions, 2 deletions
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 3697eeeecf82..89619a5a4289 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -222,6 +222,12 @@ amd_sched_entity_pop_job(struct amd_sched_entity *entity)
while ((entity->dependency = sched->ops->dependency(sched_job))) {
+ if (entity->dependency->context == entity->fence_context) {
+ /* We can ignore fences from ourself */
+ fence_put(entity->dependency);
+ continue;
+ }
+
if (fence_add_callback(entity->dependency, &entity->cb,
amd_sched_entity_wakeup))
fence_put(entity->dependency);
@@ -327,19 +333,49 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
struct amd_sched_fence *s_fence =
container_of(cb, struct amd_sched_fence, cb);
struct amd_gpu_scheduler *sched = s_fence->sched;
+ unsigned long flags;
atomic_dec(&sched->hw_rq_count);
amd_sched_fence_signal(s_fence);
+ if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
+ cancel_delayed_work(&s_fence->dwork);
+ spin_lock_irqsave(&sched->fence_list_lock, flags);
+ list_del_init(&s_fence->list);
+ spin_unlock_irqrestore(&sched->fence_list_lock, flags);
+ }
fence_put(&s_fence->base);
wake_up_interruptible(&sched->wake_up_worker);
}
+static void amd_sched_fence_work_func(struct work_struct *work)
+{
+ struct amd_sched_fence *s_fence =
+ container_of(work, struct amd_sched_fence, dwork.work);
+ struct amd_gpu_scheduler *sched = s_fence->sched;
+ struct amd_sched_fence *entity, *tmp;
+ unsigned long flags;
+
+ DRM_ERROR("[%s] scheduler is timeout!\n", sched->name);
+
+ /* Clean all pending fences */
+ spin_lock_irqsave(&sched->fence_list_lock, flags);
+ list_for_each_entry_safe(entity, tmp, &sched->fence_list, list) {
+ DRM_ERROR(" fence no %d\n", entity->base.seqno);
+ cancel_delayed_work(&entity->dwork);
+ list_del_init(&entity->list);
+ fence_put(&entity->base);
+ }
+ spin_unlock_irqrestore(&sched->fence_list_lock, flags);
+}
+
static int amd_sched_main(void *param)
{
struct sched_param sparam = {.sched_priority = 1};
struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
int r, count;
+ spin_lock_init(&sched->fence_list_lock);
+ INIT_LIST_HEAD(&sched->fence_list);
sched_setscheduler(current, SCHED_FIFO, &sparam);
while (!kthread_should_stop()) {
@@ -347,6 +383,7 @@ static int amd_sched_main(void *param)
struct amd_sched_fence *s_fence;
struct amd_sched_job *sched_job;
struct fence *fence;
+ unsigned long flags;
wait_event_interruptible(sched->wake_up_worker,
kthread_should_stop() ||
@@ -357,6 +394,15 @@ static int amd_sched_main(void *param)
entity = sched_job->s_entity;
s_fence = sched_job->s_fence;
+
+ if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
+ INIT_DELAYED_WORK(&s_fence->dwork, amd_sched_fence_work_func);
+ schedule_delayed_work(&s_fence->dwork, sched->timeout);
+ spin_lock_irqsave(&sched->fence_list_lock, flags);
+ list_add_tail(&s_fence->list, &sched->fence_list);
+ spin_unlock_irqrestore(&sched->fence_list_lock, flags);
+ }
+
atomic_inc(&sched->hw_rq_count);
fence = sched->ops->run_job(sched_job);
if (fence) {
@@ -392,11 +438,12 @@ static int amd_sched_main(void *param)
*/
int amd_sched_init(struct amd_gpu_scheduler *sched,
struct amd_sched_backend_ops *ops,
- unsigned hw_submission, const char *name)
+ unsigned hw_submission, long timeout, const char *name)
{
sched->ops = ops;
sched->hw_submission_limit = hw_submission;
sched->name = name;
+ sched->timeout = timeout;
amd_sched_rq_init(&sched->sched_rq);
amd_sched_rq_init(&sched->kernel_rq);
@@ -421,5 +468,6 @@ int amd_sched_init(struct amd_gpu_scheduler *sched,
*/
void amd_sched_fini(struct amd_gpu_scheduler *sched)
{
- kthread_stop(sched->thread);
+ if (sched->thread)
+ kthread_stop(sched->thread);
}