aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_request.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_request.c')
-rw-r--r--drivers/gpu/drm/i915/i915_request.c1902
1 files changed, 1312 insertions, 590 deletions
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index a195a92d0105..73d5195146b0 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -23,42 +23,50 @@
*/
#include <linux/dma-fence-array.h>
+#include <linux/dma-fence-chain.h>
#include <linux/irq_work.h>
#include <linux/prefetch.h>
#include <linux/sched.h>
#include <linux/sched/clock.h>
#include <linux/sched/signal.h>
+#include <linux/sched/mm.h>
#include "gem/i915_gem_context.h"
+#include "gt/intel_breadcrumbs.h"
#include "gt/intel_context.h"
+#include "gt/intel_engine.h"
+#include "gt/intel_engine_heartbeat.h"
+#include "gt/intel_engine_regs.h"
+#include "gt/intel_gpu_commands.h"
+#include "gt/intel_reset.h"
+#include "gt/intel_ring.h"
+#include "gt/intel_rps.h"
#include "i915_active.h"
+#include "i915_deps.h"
+#include "i915_driver.h"
#include "i915_drv.h"
-#include "i915_globals.h"
+#include "i915_trace.h"
#include "intel_pm.h"
struct execute_cb {
- struct list_head link;
struct irq_work work;
struct i915_sw_fence *fence;
- void (*hook)(struct i915_request *rq, struct dma_fence *signal);
struct i915_request *signal;
};
-static struct i915_global_request {
- struct i915_global base;
- struct kmem_cache *slab_requests;
- struct kmem_cache *slab_dependencies;
- struct kmem_cache *slab_execute_cbs;
-} global;
+static struct kmem_cache *slab_requests;
+static struct kmem_cache *slab_execute_cbs;
static const char *i915_fence_get_driver_name(struct dma_fence *fence)
{
- return "i915";
+ return dev_name(to_request(fence)->engine->i915->drm.dev);
}
static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
{
+ const struct i915_gem_context *ctx;
+
/*
* The timeline struct (as part of the ppgtt underneath a context)
* may be freed when the request is no longer in use by the GPU.
@@ -71,7 +79,11 @@ static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
return "signaled";
- return to_request(fence)->gem_context->name ?: "[i915]";
+ ctx = i915_request_gem_context(to_request(fence));
+ if (!ctx)
+ return "[" DRIVER_NAME "]";
+
+ return ctx->name;
}
static bool i915_fence_signaled(struct dma_fence *fence)
@@ -88,15 +100,29 @@ static signed long i915_fence_wait(struct dma_fence *fence,
bool interruptible,
signed long timeout)
{
- return i915_request_wait(to_request(fence),
- interruptible | I915_WAIT_PRIORITY,
- timeout);
+ return i915_request_wait_timeout(to_request(fence),
+ interruptible | I915_WAIT_PRIORITY,
+ timeout);
+}
+
+struct kmem_cache *i915_request_slab_cache(void)
+{
+ return slab_requests;
}
static void i915_fence_release(struct dma_fence *fence)
{
struct i915_request *rq = to_request(fence);
+ GEM_BUG_ON(rq->guc_prio != GUC_PRIO_INIT &&
+ rq->guc_prio != GUC_PRIO_FINI);
+
+ i915_request_free_capture_list(fetch_and_zero(&rq->capture_list));
+ if (rq->batch_res) {
+ i915_vma_resource_put(rq->batch_res);
+ rq->batch_res = NULL;
+ }
+
/*
* The request is put onto a RCU freelist (i.e. the address
* is immediately reused), mark the fences as being freed now.
@@ -107,7 +133,20 @@ static void i915_fence_release(struct dma_fence *fence)
i915_sw_fence_fini(&rq->submit);
i915_sw_fence_fini(&rq->semaphore);
- kmem_cache_free(global.slab_requests, rq);
+ /*
+ * Keep one request on each engine for reserved use under mempressure,
+ * do not use with virtual engines as this really is only needed for
+ * kernel contexts.
+ */
+ if (!intel_engine_is_virtual(rq->engine) &&
+ !cmpxchg(&rq->engine->request_pool, NULL, rq)) {
+ intel_context_put(rq->context);
+ return;
+ }
+
+ intel_context_put(rq->context);
+
+ kmem_cache_free(slab_requests, rq);
}
const struct dma_fence_ops i915_fence_ops = {
@@ -119,144 +158,236 @@ const struct dma_fence_ops i915_fence_ops = {
.release = i915_fence_release,
};
-static inline void
-i915_request_remove_from_client(struct i915_request *request)
+static void irq_execute_cb(struct irq_work *wrk)
{
- struct drm_i915_file_private *file_priv;
+ struct execute_cb *cb = container_of(wrk, typeof(*cb), work);
- file_priv = request->file_priv;
- if (!file_priv)
+ i915_sw_fence_complete(cb->fence);
+ kmem_cache_free(slab_execute_cbs, cb);
+}
+
+static __always_inline void
+__notify_execute_cb(struct i915_request *rq, bool (*fn)(struct irq_work *wrk))
+{
+ struct execute_cb *cb, *cn;
+
+ if (llist_empty(&rq->execute_cb))
return;
- spin_lock(&file_priv->mm.lock);
- if (request->file_priv) {
- list_del(&request->client_link);
- request->file_priv = NULL;
+ llist_for_each_entry_safe(cb, cn,
+ llist_del_all(&rq->execute_cb),
+ work.node.llist)
+ fn(&cb->work);
+}
+
+static void __notify_execute_cb_irq(struct i915_request *rq)
+{
+ __notify_execute_cb(rq, irq_work_queue);
+}
+
+static bool irq_work_imm(struct irq_work *wrk)
+{
+ wrk->func(wrk);
+ return false;
+}
+
+void i915_request_notify_execute_cb_imm(struct i915_request *rq)
+{
+ __notify_execute_cb(rq, irq_work_imm);
+}
+
+static void __i915_request_fill(struct i915_request *rq, u8 val)
+{
+ void *vaddr = rq->ring->vaddr;
+ u32 head;
+
+ head = rq->infix;
+ if (rq->postfix < head) {
+ memset(vaddr + head, val, rq->ring->size - head);
+ head = 0;
}
- spin_unlock(&file_priv->mm.lock);
+ memset(vaddr + head, val, rq->postfix - head);
}
-static void advance_ring(struct i915_request *request)
+/**
+ * i915_request_active_engine
+ * @rq: request to inspect
+ * @active: pointer in which to return the active engine
+ *
+ * Fills the currently active engine to the @active pointer if the request
+ * is active and still not completed.
+ *
+ * Returns true if request was active or false otherwise.
+ */
+bool
+i915_request_active_engine(struct i915_request *rq,
+ struct intel_engine_cs **active)
{
- struct intel_ring *ring = request->ring;
- unsigned int tail;
+ struct intel_engine_cs *engine, *locked;
+ bool ret = false;
/*
- * We know the GPU must have read the request to have
- * sent us the seqno + interrupt, so use the position
- * of tail of the request to update the last known position
- * of the GPU head.
+ * Serialise with __i915_request_submit() so that it sees
+ * is-banned?, or we know the request is already inflight.
*
- * Note this requires that we are always called in request
- * completion order.
+ * Note that rq->engine is unstable, and so we double
+ * check that we have acquired the lock on the final engine.
*/
- GEM_BUG_ON(!list_is_first(&request->ring_link, &ring->request_list));
- if (list_is_last(&request->ring_link, &ring->request_list)) {
- /*
- * We may race here with execlists resubmitting this request
- * as we retire it. The resubmission will move the ring->tail
- * forwards (to request->wa_tail). We either read the
- * current value that was written to hw, or the value that
- * is just about to be. Either works, if we miss the last two
- * noops - they are safe to be replayed on a reset.
- */
- tail = READ_ONCE(request->tail);
- list_del(&ring->active_link);
+ locked = READ_ONCE(rq->engine);
+ spin_lock_irq(&locked->sched_engine->lock);
+ while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
+ spin_unlock(&locked->sched_engine->lock);
+ locked = engine;
+ spin_lock(&locked->sched_engine->lock);
+ }
+
+ if (i915_request_is_active(rq)) {
+ if (!__i915_request_is_complete(rq))
+ *active = locked;
+ ret = true;
+ }
+
+ spin_unlock_irq(&locked->sched_engine->lock);
+
+ return ret;
+}
+
+static void __rq_init_watchdog(struct i915_request *rq)
+{
+ rq->watchdog.timer.function = NULL;
+}
+
+static enum hrtimer_restart __rq_watchdog_expired(struct hrtimer *hrtimer)
+{
+ struct i915_request *rq =
+ container_of(hrtimer, struct i915_request, watchdog.timer);
+ struct intel_gt *gt = rq->engine->gt;
+
+ if (!i915_request_completed(rq)) {
+ if (llist_add(&rq->watchdog.link, &gt->watchdog.list))
+ schedule_work(&gt->watchdog.work);
} else {
- tail = request->postfix;
+ i915_request_put(rq);
}
- list_del_init(&request->ring_link);
- ring->head = tail;
+ return HRTIMER_NORESTART;
}
-static void free_capture_list(struct i915_request *request)
+static void __rq_arm_watchdog(struct i915_request *rq)
{
- struct i915_capture_list *capture;
+ struct i915_request_watchdog *wdg = &rq->watchdog;
+ struct intel_context *ce = rq->context;
+
+ if (!ce->watchdog.timeout_us)
+ return;
+
+ i915_request_get(rq);
+
+ hrtimer_init(&wdg->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ wdg->timer.function = __rq_watchdog_expired;
+ hrtimer_start_range_ns(&wdg->timer,
+ ns_to_ktime(ce->watchdog.timeout_us *
+ NSEC_PER_USEC),
+ NSEC_PER_MSEC,
+ HRTIMER_MODE_REL);
+}
+
+static void __rq_cancel_watchdog(struct i915_request *rq)
+{
+ struct i915_request_watchdog *wdg = &rq->watchdog;
+
+ if (wdg->timer.function && hrtimer_try_to_cancel(&wdg->timer) > 0)
+ i915_request_put(rq);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
- capture = request->capture_list;
+/**
+ * i915_request_free_capture_list - Free a capture list
+ * @capture: Pointer to the first list item or NULL
+ *
+ */
+void i915_request_free_capture_list(struct i915_capture_list *capture)
+{
while (capture) {
struct i915_capture_list *next = capture->next;
+ i915_vma_resource_put(capture->vma_res);
kfree(capture);
capture = next;
}
}
-static bool i915_request_retire(struct i915_request *rq)
-{
- struct i915_active_request *active, *next;
+#define assert_capture_list_is_null(_rq) GEM_BUG_ON((_rq)->capture_list)
+
+#define clear_capture_list(_rq) ((_rq)->capture_list = NULL)
+
+#else
+
+#define i915_request_free_capture_list(_a) do {} while (0)
+
+#define assert_capture_list_is_null(_a) do {} while (0)
+
+#define clear_capture_list(_rq) do {} while (0)
- lockdep_assert_held(&rq->i915->drm.struct_mutex);
- if (!i915_request_completed(rq))
+#endif
+
+bool i915_request_retire(struct i915_request *rq)
+{
+ if (!__i915_request_is_complete(rq))
return false;
- GEM_TRACE("%s fence %llx:%lld, current %d\n",
- rq->engine->name,
- rq->fence.context, rq->fence.seqno,
- hwsp_seqno(rq));
+ RQ_TRACE(rq, "\n");
GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
trace_i915_request_retire(rq);
+ i915_request_mark_complete(rq);
- advance_ring(rq);
+ __rq_cancel_watchdog(rq);
/*
- * Walk through the active list, calling retire on each. This allows
- * objects to track their GPU activity and mark themselves as idle
- * when their *last* active request is completed (updating state
- * tracking lists for eviction, active references for GEM, etc).
+ * We know the GPU must have read the request to have
+ * sent us the seqno + interrupt, so use the position
+ * of tail of the request to update the last known position
+ * of the GPU head.
*
- * As the ->retire() may free the node, we decouple it first and
- * pass along the auxiliary information (to avoid dereferencing
- * the node after the callback).
+ * Note this requires that we are always called in request
+ * completion order.
*/
- list_for_each_entry_safe(active, next, &rq->active_list, link) {
- /*
- * In microbenchmarks or focusing upon time inside the kernel,
- * we may spend an inordinate amount of time simply handling
- * the retirement of requests and processing their callbacks.
- * Of which, this loop itself is particularly hot due to the
- * cache misses when jumping around the list of
- * i915_active_request. So we try to keep this loop as
- * streamlined as possible and also prefetch the next
- * i915_active_request to try and hide the likely cache miss.
- */
- prefetchw(next);
-
- INIT_LIST_HEAD(&active->link);
- RCU_INIT_POINTER(active->request, NULL);
-
- active->retire(active, rq);
- }
-
- local_irq_disable();
-
- spin_lock(&rq->engine->active.lock);
- list_del(&rq->sched.link);
- spin_unlock(&rq->engine->active.lock);
-
- spin_lock(&rq->lock);
- i915_request_mark_complete(rq);
- if (!i915_request_signaled(rq))
+ GEM_BUG_ON(!list_is_first(&rq->link,
+ &i915_request_timeline(rq)->requests));
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ /* Poison before we release our space in the ring */
+ __i915_request_fill(rq, POISON_FREE);
+ rq->ring->head = rq->postfix;
+
+ if (!i915_request_signaled(rq)) {
+ spin_lock_irq(&rq->lock);
dma_fence_signal_locked(&rq->fence);
- if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags))
- i915_request_cancel_breadcrumb(rq);
- if (rq->waitboost) {
- GEM_BUG_ON(!atomic_read(&rq->i915->gt_pm.rps.num_waiters));
- atomic_dec(&rq->i915->gt_pm.rps.num_waiters);
+ spin_unlock_irq(&rq->lock);
}
- spin_unlock(&rq->lock);
- local_irq_enable();
+ if (test_and_set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags))
+ intel_rps_dec_waiters(&rq->engine->gt->rps);
- intel_context_exit(rq->hw_context);
- intel_context_unpin(rq->hw_context);
+ /*
+ * We only loosely track inflight requests across preemption,
+ * and so we may find ourselves attempting to retire a _completed_
+ * request that we have removed from the HW and put back on a run
+ * queue.
+ *
+ * As we set I915_FENCE_FLAG_ACTIVE on the request, this should be
+ * after removing the breadcrumb and signaling it, so that we do not
+ * inadvertently attach the breadcrumb to a completed request.
+ */
+ rq->engine->remove_active_request(rq);
+ GEM_BUG_ON(!llist_empty(&rq->execute_cb));
+
+ __list_del_entry(&rq->link); /* poison neither prev/next (RCU walks) */
- i915_request_remove_from_client(rq);
- list_del(&rq->link);
+ intel_context_exit(rq->context);
+ intel_context_unpin(rq->context);
- free_capture_list(rq);
i915_sched_node_fini(&rq->sched);
i915_request_put(rq);
@@ -265,86 +396,97 @@ static bool i915_request_retire(struct i915_request *rq)
void i915_request_retire_upto(struct i915_request *rq)
{
- struct intel_ring *ring = rq->ring;
+ struct intel_timeline * const tl = i915_request_timeline(rq);
struct i915_request *tmp;
- GEM_TRACE("%s fence %llx:%lld, current %d\n",
- rq->engine->name,
- rq->fence.context, rq->fence.seqno,
- hwsp_seqno(rq));
-
- lockdep_assert_held(&rq->i915->drm.struct_mutex);
- GEM_BUG_ON(!i915_request_completed(rq));
-
- if (list_empty(&rq->ring_link))
- return;
+ RQ_TRACE(rq, "\n");
+ GEM_BUG_ON(!__i915_request_is_complete(rq));
do {
- tmp = list_first_entry(&ring->request_list,
- typeof(*tmp), ring_link);
+ tmp = list_first_entry(&tl->requests, typeof(*tmp), link);
+ GEM_BUG_ON(!i915_request_completed(tmp));
} while (i915_request_retire(tmp) && tmp != rq);
}
-static void irq_execute_cb(struct irq_work *wrk)
+static struct i915_request * const *
+__engine_active(struct intel_engine_cs *engine)
{
- struct execute_cb *cb = container_of(wrk, typeof(*cb), work);
-
- i915_sw_fence_complete(cb->fence);
- kmem_cache_free(global.slab_execute_cbs, cb);
+ return READ_ONCE(engine->execlists.active);
}
-static void irq_execute_cb_hook(struct irq_work *wrk)
+static bool __request_in_flight(const struct i915_request *signal)
{
- struct execute_cb *cb = container_of(wrk, typeof(*cb), work);
+ struct i915_request * const *port, *rq;
+ bool inflight = false;
- cb->hook(container_of(cb->fence, struct i915_request, submit),
- &cb->signal->fence);
- i915_request_put(cb->signal);
-
- irq_execute_cb(wrk);
-}
-
-static void __notify_execute_cb(struct i915_request *rq)
-{
- struct execute_cb *cb;
-
- lockdep_assert_held(&rq->lock);
-
- if (list_empty(&rq->execute_cb))
- return;
-
- list_for_each_entry(cb, &rq->execute_cb, link)
- irq_work_queue(&cb->work);
+ if (!i915_request_is_ready(signal))
+ return false;
/*
- * XXX Rollback on __i915_request_unsubmit()
+ * Even if we have unwound the request, it may still be on
+ * the GPU (preempt-to-busy). If that request is inside an
+ * unpreemptible critical section, it will not be removed. Some
+ * GPU functions may even be stuck waiting for the paired request
+ * (__await_execution) to be submitted and cannot be preempted
+ * until the bond is executing.
*
- * In the future, perhaps when we have an active time-slicing scheduler,
- * it will be interesting to unsubmit parallel execution and remove
- * busywaits from the GPU until their master is restarted. This is
- * quite hairy, we have to carefully rollback the fence and do a
- * preempt-to-idle cycle on the target engine, all the while the
- * master execute_cb may refire.
+ * As we know that there are always preemption points between
+ * requests, we know that only the currently executing request
+ * may be still active even though we have cleared the flag.
+ * However, we can't rely on our tracking of ELSP[0] to know
+ * which request is currently active and so maybe stuck, as
+ * the tracking maybe an event behind. Instead assume that
+ * if the context is still inflight, then it is still active
+ * even if the active flag has been cleared.
+ *
+ * To further complicate matters, if there a pending promotion, the HW
+ * may either perform a context switch to the second inflight execlists,
+ * or it may switch to the pending set of execlists. In the case of the
+ * latter, it may send the ACK and we process the event copying the
+ * pending[] over top of inflight[], _overwriting_ our *active. Since
+ * this implies the HW is arbitrating and not struck in *active, we do
+ * not worry about complete accuracy, but we do require no read/write
+ * tearing of the pointer [the read of the pointer must be valid, even
+ * as the array is being overwritten, for which we require the writes
+ * to avoid tearing.]
+ *
+ * Note that the read of *execlists->active may race with the promotion
+ * of execlists->pending[] to execlists->inflight[], overwritting
+ * the value at *execlists->active. This is fine. The promotion implies
+ * that we received an ACK from the HW, and so the context is not
+ * stuck -- if we do not see ourselves in *active, the inflight status
+ * is valid. If instead we see ourselves being copied into *active,
+ * we are inflight and may signal the callback.
*/
- INIT_LIST_HEAD(&rq->execute_cb);
+ if (!intel_context_inflight(signal->context))
+ return false;
+
+ rcu_read_lock();
+ for (port = __engine_active(signal->engine);
+ (rq = READ_ONCE(*port)); /* may race with promotion of pending[] */
+ port++) {
+ if (rq->context == signal->context) {
+ inflight = i915_seqno_passed(rq->fence.seqno,
+ signal->fence.seqno);
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return inflight;
}
static int
-__i915_request_await_execution(struct i915_request *rq,
- struct i915_request *signal,
- void (*hook)(struct i915_request *rq,
- struct dma_fence *signal),
- gfp_t gfp)
+__await_execution(struct i915_request *rq,
+ struct i915_request *signal,
+ gfp_t gfp)
{
struct execute_cb *cb;
- if (i915_request_is_active(signal)) {
- if (hook)
- hook(rq, &signal->fence);
+ if (i915_request_is_active(signal))
return 0;
- }
- cb = kmem_cache_alloc(global.slab_execute_cbs, gfp);
+ cb = kmem_cache_alloc(slab_execute_cbs, gfp);
if (!cb)
return -ENOMEM;
@@ -352,42 +494,128 @@ __i915_request_await_execution(struct i915_request *rq,
i915_sw_fence_await(cb->fence);
init_irq_work(&cb->work, irq_execute_cb);
- if (hook) {
- cb->hook = hook;
- cb->signal = i915_request_get(signal);
- cb->work.func = irq_execute_cb_hook;
+ /*
+ * Register the callback first, then see if the signaler is already
+ * active. This ensures that if we race with the
+ * __notify_execute_cb from i915_request_submit() and we are not
+ * included in that list, we get a second bite of the cherry and
+ * execute it ourselves. After this point, a future
+ * i915_request_submit() will notify us.
+ *
+ * In i915_request_retire() we set the ACTIVE bit on a completed
+ * request (then flush the execute_cb). So by registering the
+ * callback first, then checking the ACTIVE bit, we serialise with
+ * the completed/retired request.
+ */
+ if (llist_add(&cb->work.node.llist, &signal->execute_cb)) {
+ if (i915_request_is_active(signal) ||
+ __request_in_flight(signal))
+ i915_request_notify_execute_cb_imm(signal);
}
- spin_lock_irq(&signal->lock);
- if (i915_request_is_active(signal)) {
- if (hook) {
- hook(rq, &signal->fence);
- i915_request_put(signal);
- }
- i915_sw_fence_complete(cb->fence);
- kmem_cache_free(global.slab_execute_cbs, cb);
- } else {
- list_add_tail(&cb->link, &signal->execute_cb);
+ return 0;
+}
+
+static bool fatal_error(int error)
+{
+ switch (error) {
+ case 0: /* not an error! */
+ case -EAGAIN: /* innocent victim of a GT reset (__i915_request_reset) */
+ case -ETIMEDOUT: /* waiting for Godot (timer_i915_sw_fence_wake) */
+ return false;
+ default:
+ return true;
}
- spin_unlock_irq(&signal->lock);
+}
- return 0;
+void __i915_request_skip(struct i915_request *rq)
+{
+ GEM_BUG_ON(!fatal_error(rq->fence.error));
+
+ if (rq->infix == rq->postfix)
+ return;
+
+ RQ_TRACE(rq, "error: %d\n", rq->fence.error);
+
+ /*
+ * As this request likely depends on state from the lost
+ * context, clear out all the user operations leaving the
+ * breadcrumb at the end (so we get the fence notifications).
+ */
+ __i915_request_fill(rq, 0);
+ rq->infix = rq->postfix;
}
-void __i915_request_submit(struct i915_request *request)
+bool i915_request_set_error_once(struct i915_request *rq, int error)
+{
+ int old;
+
+ GEM_BUG_ON(!IS_ERR_VALUE((long)error));
+
+ if (i915_request_signaled(rq))
+ return false;
+
+ old = READ_ONCE(rq->fence.error);
+ do {
+ if (fatal_error(old))
+ return false;
+ } while (!try_cmpxchg(&rq->fence.error, &old, error));
+
+ return true;
+}
+
+struct i915_request *i915_request_mark_eio(struct i915_request *rq)
+{
+ if (__i915_request_is_complete(rq))
+ return NULL;
+
+ GEM_BUG_ON(i915_request_signaled(rq));
+
+ /* As soon as the request is completed, it may be retired */
+ rq = i915_request_get(rq);
+
+ i915_request_set_error_once(rq, -EIO);
+ i915_request_mark_complete(rq);
+
+ return rq;
+}
+
+bool __i915_request_submit(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
+ bool result = false;
- GEM_TRACE("%s fence %llx:%lld, current %d\n",
- engine->name,
- request->fence.context, request->fence.seqno,
- hwsp_seqno(request));
+ RQ_TRACE(request, "\n");
GEM_BUG_ON(!irqs_disabled());
- lockdep_assert_held(&engine->active.lock);
+ lockdep_assert_held(&engine->sched_engine->lock);
+
+ /*
+ * With the advent of preempt-to-busy, we frequently encounter
+ * requests that we have unsubmitted from HW, but left running
+ * until the next ack and so have completed in the meantime. On
+ * resubmission of that completed request, we can skip
+ * updating the payload, and execlists can even skip submitting
+ * the request.
+ *
+ * We must remove the request from the caller's priority queue,
+ * and the caller must only call us when the request is in their
+ * priority queue, under the sched_engine->lock. This ensures that the
+ * request has *not* yet been retired and we can safely move
+ * the request into the engine->active.list where it will be
+ * dropped upon retiring. (Otherwise if resubmit a *retired*
+ * request, this would be a horrible use-after-free.)
+ */
+ if (__i915_request_is_complete(request)) {
+ list_del_init(&request->sched.link);
+ goto active;
+ }
- if (i915_gem_context_is_banned(request->gem_context))
- i915_request_skip(request, -EIO);
+ if (unlikely(intel_context_is_banned(request->context)))
+ i915_request_set_error_once(request, -EIO);
+
+ if (unlikely(fatal_error(request->fence.error)))
+ __i915_request_skip(request);
/*
* Are we using semaphores when the gpu is already saturated?
@@ -409,29 +637,40 @@ void __i915_request_submit(struct i915_request *request)
i915_sw_fence_signaled(&request->semaphore))
engine->saturated |= request->sched.semaphores;
- /* We may be recursing from the signal callback of another i915 fence */
- spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
+ engine->emit_fini_breadcrumb(request,
+ request->ring->vaddr + request->postfix);
+
+ trace_i915_request_execute(request);
+ if (engine->bump_serial)
+ engine->bump_serial(engine);
+ else
+ engine->serial++;
- list_move_tail(&request->sched.link, &engine->active.requests);
+ result = true;
GEM_BUG_ON(test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
+ engine->add_active_request(request);
+active:
+ clear_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags);
set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
- if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) &&
- !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags) &&
- !i915_request_enable_breadcrumb(request))
- intel_engine_queue_breadcrumbs(engine);
-
- __notify_execute_cb(request);
-
- spin_unlock(&request->lock);
-
- engine->emit_fini_breadcrumb(request,
- request->ring->vaddr + request->postfix);
+ /*
+ * XXX Rollback bonded-execution on __i915_request_unsubmit()?
+ *
+ * In the future, perhaps when we have an active time-slicing scheduler,
+ * it will be interesting to unsubmit parallel execution and remove
+ * busywaits from the GPU until their master is restarted. This is
+ * quite hairy, we have to carefully rollback the fence and do a
+ * preempt-to-idle cycle on the target engine, all the while the
+ * master execute_cb may refire.
+ */
+ __notify_execute_cb_irq(request);
- engine->serial++;
+ /* We may be recursing from the signal callback of another i915 fence */
+ if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
+ i915_request_enable_breadcrumb(request);
- trace_i915_request_execute(request);
+ return result;
}
void i915_request_submit(struct i915_request *request)
@@ -440,46 +679,41 @@ void i915_request_submit(struct i915_request *request)
unsigned long flags;
/* Will be called from irq-context when using foreign fences. */
- spin_lock_irqsave(&engine->active.lock, flags);
+ spin_lock_irqsave(&engine->sched_engine->lock, flags);
__i915_request_submit(request);
- spin_unlock_irqrestore(&engine->active.lock, flags);
+ spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
}
void __i915_request_unsubmit(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
- GEM_TRACE("%s fence %llx:%lld, current %d\n",
- engine->name,
- request->fence.context, request->fence.seqno,
- hwsp_seqno(request));
-
- GEM_BUG_ON(!irqs_disabled());
- lockdep_assert_held(&engine->active.lock);
-
/*
* Only unwind in reverse order, required so that the per-context list
* is kept in seqno/ring order.
*/
+ RQ_TRACE(request, "\n");
- /* We may be recursing from the signal callback of another i915 fence */
- spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
+ GEM_BUG_ON(!irqs_disabled());
+ lockdep_assert_held(&engine->sched_engine->lock);
+ /*
+ * Before we remove this breadcrumb from the signal list, we have
+ * to ensure that a concurrent dma_fence_enable_signaling() does not
+ * attach itself. We first mark the request as no longer active and
+ * make sure that is visible to other cores, and then remove the
+ * breadcrumb if attached.
+ */
+ GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
+ clear_bit_unlock(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
i915_request_cancel_breadcrumb(request);
- GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
- clear_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
-
- spin_unlock(&request->lock);
-
/* We've already spun, don't charge on resubmitting. */
- if (request->sched.semaphores && i915_request_started(request)) {
- request->sched.attr.priority |= I915_PRIORITY_NOSEMAPHORE;
+ if (request->sched.semaphores && __i915_request_has_started(request))
request->sched.semaphores = 0;
- }
/*
* We don't need to wake_up any waiters on request->execute, they
@@ -496,14 +730,24 @@ void i915_request_unsubmit(struct i915_request *request)
unsigned long flags;
/* Will be called from irq-context when using foreign fences. */
- spin_lock_irqsave(&engine->active.lock, flags);
+ spin_lock_irqsave(&engine->sched_engine->lock, flags);
__i915_request_unsubmit(request);
- spin_unlock_irqrestore(&engine->active.lock, flags);
+ spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
+}
+
+void i915_request_cancel(struct i915_request *rq, int error)
+{
+ if (!i915_request_set_error_once(rq, error))
+ return;
+
+ set_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags);
+
+ intel_context_cancel_request(rq->context, rq);
}
-static int __i915_sw_fence_call
+static int
submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
{
struct i915_request *request =
@@ -512,6 +756,12 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
switch (state) {
case FENCE_COMPLETE:
trace_i915_request_submit(request);
+
+ if (unlikely(fence->error))
+ i915_request_set_error_once(request, fence->error);
+ else
+ __rq_arm_watchdog(request);
+
/*
* We need to serialize use of the submit_request() callback
* with its hotplugging performed during an emergency
@@ -533,75 +783,101 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
return NOTIFY_DONE;
}
-static int __i915_sw_fence_call
+static int
semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
{
- struct i915_request *request =
- container_of(fence, typeof(*request), semaphore);
+ struct i915_request *rq = container_of(fence, typeof(*rq), semaphore);
switch (state) {
case FENCE_COMPLETE:
- i915_schedule_bump_priority(request, I915_PRIORITY_NOSEMAPHORE);
break;
case FENCE_FREE:
- i915_request_put(request);
+ i915_request_put(rq);
break;
}
return NOTIFY_DONE;
}
-static void ring_retire_requests(struct intel_ring *ring)
+static void retire_requests(struct intel_timeline *tl)
{
struct i915_request *rq, *rn;
- list_for_each_entry_safe(rq, rn, &ring->request_list, ring_link)
+ list_for_each_entry_safe(rq, rn, &tl->requests, link)
if (!i915_request_retire(rq))
break;
}
static noinline struct i915_request *
-request_alloc_slow(struct intel_context *ce, gfp_t gfp)
+request_alloc_slow(struct intel_timeline *tl,
+ struct i915_request **rsvd,
+ gfp_t gfp)
{
- struct intel_ring *ring = ce->ring;
struct i915_request *rq;
- if (list_empty(&ring->request_list))
- goto out;
+ /* If we cannot wait, dip into our reserves */
+ if (!gfpflags_allow_blocking(gfp)) {
+ rq = xchg(rsvd, NULL);
+ if (!rq) /* Use the normal failure path for one final WARN */
+ goto out;
- if (!gfpflags_allow_blocking(gfp))
+ return rq;
+ }
+
+ if (list_empty(&tl->requests))
goto out;
/* Move our oldest request to the slab-cache (if not in use!) */
- rq = list_first_entry(&ring->request_list, typeof(*rq), ring_link);
+ rq = list_first_entry(&tl->requests, typeof(*rq), link);
i915_request_retire(rq);
- rq = kmem_cache_alloc(global.slab_requests,
+ rq = kmem_cache_alloc(slab_requests,
gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
if (rq)
return rq;
/* Ratelimit ourselves to prevent oom from malicious clients */
- rq = list_last_entry(&ring->request_list, typeof(*rq), ring_link);
+ rq = list_last_entry(&tl->requests, typeof(*rq), link);
cond_synchronize_rcu(rq->rcustate);
/* Retire our old requests in the hope that we free some */
- ring_retire_requests(ring);
+ retire_requests(tl);
out:
- return kmem_cache_alloc(global.slab_requests, gfp);
+ return kmem_cache_alloc(slab_requests, gfp);
}
+static void __i915_request_ctor(void *arg)
+{
+ struct i915_request *rq = arg;
+
+ spin_lock_init(&rq->lock);
+ i915_sched_node_init(&rq->sched);
+ i915_sw_fence_init(&rq->submit, submit_notify);
+ i915_sw_fence_init(&rq->semaphore, semaphore_notify);
+
+ clear_capture_list(rq);
+ rq->batch_res = NULL;
+
+ init_llist_head(&rq->execute_cb);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#define clear_batch_ptr(_rq) ((_rq)->batch = NULL)
+#else
+#define clear_batch_ptr(_a) do {} while (0)
+#endif
+
struct i915_request *
__i915_request_create(struct intel_context *ce, gfp_t gfp)
{
- struct i915_timeline *tl = ce->ring->timeline;
+ struct intel_timeline *tl = ce->timeline;
struct i915_request *rq;
u32 seqno;
int ret;
- might_sleep_if(gfpflags_allow_blocking(gfp));
+ might_alloc(gfp);
/* Check that the caller provided an already pinned context */
__intel_context_pin(ce);
@@ -635,49 +911,60 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
*
* Do not use kmem_cache_zalloc() here!
*/
- rq = kmem_cache_alloc(global.slab_requests,
+ rq = kmem_cache_alloc(slab_requests,
gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
if (unlikely(!rq)) {
- rq = request_alloc_slow(ce, gfp);
+ rq = request_alloc_slow(tl, &ce->engine->request_pool, gfp);
if (!rq) {
ret = -ENOMEM;
goto err_unreserve;
}
}
- ret = i915_timeline_get_seqno(tl, rq, &seqno);
+ /*
+ * Hold a reference to the intel_context over life of an i915_request.
+ * Without this an i915_request can exist after the context has been
+ * destroyed (e.g. request retired, context closed, but user space holds
+ * a reference to the request from an out fence). In the case of GuC
+ * submission + virtual engine, the engine that the request references
+ * is also destroyed which can trigger bad pointer dref in fence ops
+ * (e.g. i915_fence_get_driver_name). We could likely change these
+ * functions to avoid touching the engine but let's just be safe and
+ * hold the intel_context reference. In execlist mode the request always
+ * eventually points to a physical engine so this isn't an issue.
+ */
+ rq->context = intel_context_get(ce);
+ rq->engine = ce->engine;
+ rq->ring = ce->ring;
+ rq->execution_mask = ce->engine->mask;
+
+ ret = intel_timeline_get_seqno(tl, rq, &seqno);
if (ret)
goto err_free;
- rq->i915 = ce->engine->i915;
- rq->hw_context = ce;
- rq->gem_context = ce->gem_context;
- rq->engine = ce->engine;
- rq->ring = ce->ring;
- rq->timeline = tl;
+ dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock,
+ tl->fence_context, seqno);
+
+ RCU_INIT_POINTER(rq->timeline, tl);
rq->hwsp_seqno = tl->hwsp_seqno;
- rq->hwsp_cacheline = tl->hwsp_cacheline;
+ GEM_BUG_ON(__i915_request_is_complete(rq));
+
rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */
- spin_lock_init(&rq->lock);
- dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock,
- tl->fence_context, seqno);
+ rq->guc_prio = GUC_PRIO_INIT;
/* We bump the ref for the fence chain */
- i915_sw_fence_init(&i915_request_get(rq)->submit, submit_notify);
- i915_sw_fence_init(&i915_request_get(rq)->semaphore, semaphore_notify);
-
- i915_sched_node_init(&rq->sched);
+ i915_sw_fence_reinit(&i915_request_get(rq)->submit);
+ i915_sw_fence_reinit(&i915_request_get(rq)->semaphore);
- /* No zalloc, must clear what we need by hand */
- rq->file_priv = NULL;
- rq->batch = NULL;
- rq->capture_list = NULL;
- rq->waitboost = false;
- rq->execution_mask = ALL_ENGINES;
+ i915_sched_node_reinit(&rq->sched);
- INIT_LIST_HEAD(&rq->active_list);
- INIT_LIST_HEAD(&rq->execute_cb);
+ /* No zalloc, everything must be cleared after use */
+ clear_batch_ptr(rq);
+ __rq_init_watchdog(rq);
+ assert_capture_list_is_null(rq);
+ GEM_BUG_ON(!llist_empty(&rq->execute_cb));
+ GEM_BUG_ON(rq->batch_res);
/*
* Reserve space in the ring buffer for all the commands required to
@@ -709,18 +996,20 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
rq->infix = rq->ring->emit; /* end of header; start of user payload */
intel_context_mark_active(ce);
+ list_add_tail_rcu(&rq->link, &tl->requests);
+
return rq;
err_unwind:
ce->ring->emit = rq->head;
/* Make sure we didn't add ourselves to external state before freeing */
- GEM_BUG_ON(!list_empty(&rq->active_list));
GEM_BUG_ON(!list_empty(&rq->sched.signalers_list));
GEM_BUG_ON(!list_empty(&rq->sched.waiters_list));
err_free:
- kmem_cache_free(global.slab_requests, rq);
+ intel_context_put(ce);
+ kmem_cache_free(slab_requests, rq);
err_unreserve:
intel_context_unpin(ce);
return ERR_PTR(ret);
@@ -730,15 +1019,15 @@ struct i915_request *
i915_request_create(struct intel_context *ce)
{
struct i915_request *rq;
- int err;
+ struct intel_timeline *tl;
- err = intel_context_timeline_lock(ce);
- if (err)
- return ERR_PTR(err);
+ tl = intel_context_timeline_lock(ce);
+ if (IS_ERR(tl))
+ return ERR_CAST(tl);
/* Move our oldest request to the slab-cache (if not in use!) */
- rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link);
- if (!list_is_last(&rq->ring_link, &ce->ring->request_list))
+ rq = list_first_entry(&tl->requests, typeof(*rq), link);
+ if (!list_is_last(&rq->link, &tl->requests))
i915_request_retire(rq);
intel_context_enter(ce);
@@ -748,28 +1037,79 @@ i915_request_create(struct intel_context *ce)
goto err_unlock;
/* Check that we do not interrupt ourselves with a new request */
- rq->cookie = lockdep_pin_lock(&ce->ring->timeline->mutex);
+ rq->cookie = lockdep_pin_lock(&tl->mutex);
return rq;
err_unlock:
- intel_context_timeline_unlock(ce);
+ intel_context_timeline_unlock(tl);
return rq;
}
static int
i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
{
- if (list_is_first(&signal->ring_link, &signal->ring->request_list))
+ struct dma_fence *fence;
+ int err;
+
+ if (i915_request_timeline(rq) == rcu_access_pointer(signal->timeline))
return 0;
- signal = list_prev_entry(signal, ring_link);
- if (i915_timeline_sync_is_later(rq->timeline, &signal->fence))
+ if (i915_request_started(signal))
return 0;
- return i915_sw_fence_await_dma_fence(&rq->submit,
- &signal->fence, 0,
- I915_FENCE_GFP);
+ /*
+ * The caller holds a reference on @signal, but we do not serialise
+ * against it being retired and removed from the lists.
+ *
+ * We do not hold a reference to the request before @signal, and
+ * so must be very careful to ensure that it is not _recycled_ as
+ * we follow the link backwards.
+ */
+ fence = NULL;
+ rcu_read_lock();
+ do {
+ struct list_head *pos = READ_ONCE(signal->link.prev);
+ struct i915_request *prev;
+
+ /* Confirm signal has not been retired, the link is valid */
+ if (unlikely(__i915_request_has_started(signal)))
+ break;
+
+ /* Is signal the earliest request on its timeline? */
+ if (pos == &rcu_dereference(signal->timeline)->requests)
+ break;
+
+ /*
+ * Peek at the request before us in the timeline. That
+ * request will only be valid before it is retired, so
+ * after acquiring a reference to it, confirm that it is
+ * still part of the signaler's timeline.
+ */
+ prev = list_entry(pos, typeof(*prev), link);
+ if (!i915_request_get_rcu(prev))
+ break;
+
+ /* After the strong barrier, confirm prev is still attached */
+ if (unlikely(READ_ONCE(prev->link.next) != &signal->link)) {
+ i915_request_put(prev);
+ break;
+ }
+
+ fence = &prev->fence;
+ } while (0);
+ rcu_read_unlock();
+ if (!fence)
+ return 0;
+
+ err = 0;
+ if (!intel_timeline_sync_is_later(i915_request_timeline(rq), fence))
+ err = i915_sw_fence_await_dma_fence(&rq->submit,
+ fence, 0,
+ I915_FENCE_GFP);
+ dma_fence_put(fence);
+
+ return err;
}
static intel_engine_mask_t
@@ -787,42 +1127,32 @@ already_busywaiting(struct i915_request *rq)
*
* See the are-we-too-late? check in __i915_request_submit().
*/
- return rq->sched.semaphores | rq->engine->saturated;
+ return rq->sched.semaphores | READ_ONCE(rq->engine->saturated);
}
static int
-emit_semaphore_wait(struct i915_request *to,
- struct i915_request *from,
- gfp_t gfp)
+__emit_semaphore_wait(struct i915_request *to,
+ struct i915_request *from,
+ u32 seqno)
{
+ const int has_token = GRAPHICS_VER(to->engine->i915) >= 12;
u32 hwsp_offset;
+ int len, err;
u32 *cs;
- int err;
-
- GEM_BUG_ON(!from->timeline->has_initial_breadcrumb);
- GEM_BUG_ON(INTEL_GEN(to->i915) < 8);
- /* Just emit the first semaphore we see as request space is limited. */
- if (already_busywaiting(to) & from->engine->mask)
- return i915_sw_fence_await_dma_fence(&to->submit,
- &from->fence, 0,
- I915_FENCE_GFP);
-
- err = i915_request_await_start(to, from);
- if (err < 0)
- return err;
-
- /* Only submit our spinner after the signaler is running! */
- err = __i915_request_await_execution(to, from, NULL, gfp);
- if (err)
- return err;
+ GEM_BUG_ON(GRAPHICS_VER(to->engine->i915) < 8);
+ GEM_BUG_ON(i915_request_has_initial_breadcrumb(to));
/* We need to pin the signaler's HWSP until we are finished reading. */
- err = i915_timeline_read_hwsp(from, to, &hwsp_offset);
+ err = intel_timeline_read_hwsp(from, to, &hwsp_offset);
if (err)
return err;
- cs = intel_ring_begin(to, 4);
+ len = 4;
+ if (has_token)
+ len += 2;
+
+ cs = intel_ring_begin(to, len);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -834,20 +1164,297 @@ emit_semaphore_wait(struct i915_request *to,
* (post-wrap) values than they were expecting (and so wait
* forever).
*/
- *cs++ = MI_SEMAPHORE_WAIT |
- MI_SEMAPHORE_GLOBAL_GTT |
- MI_SEMAPHORE_POLL |
- MI_SEMAPHORE_SAD_GTE_SDD;
- *cs++ = from->fence.seqno;
+ *cs++ = (MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_GTE_SDD) +
+ has_token;
+ *cs++ = seqno;
*cs++ = hwsp_offset;
*cs++ = 0;
+ if (has_token) {
+ *cs++ = 0;
+ *cs++ = MI_NOOP;
+ }
intel_ring_advance(to, cs);
- to->sched.semaphores |= from->engine->mask;
- to->sched.flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
return 0;
}
+static bool
+can_use_semaphore_wait(struct i915_request *to, struct i915_request *from)
+{
+ return to->engine->gt->ggtt == from->engine->gt->ggtt;
+}
+
+static int
+emit_semaphore_wait(struct i915_request *to,
+ struct i915_request *from,
+ gfp_t gfp)
+{
+ const intel_engine_mask_t mask = READ_ONCE(from->engine)->mask;
+ struct i915_sw_fence *wait = &to->submit;
+
+ if (!can_use_semaphore_wait(to, from))
+ goto await_fence;
+
+ if (!intel_context_use_semaphores(to->context))
+ goto await_fence;
+
+ if (i915_request_has_initial_breadcrumb(to))
+ goto await_fence;
+
+ /*
+ * If this or its dependents are waiting on an external fence
+ * that may fail catastrophically, then we want to avoid using
+ * sempahores as they bypass the fence signaling metadata, and we
+ * lose the fence->error propagation.
+ */
+ if (from->sched.flags & I915_SCHED_HAS_EXTERNAL_CHAIN)
+ goto await_fence;
+
+ /* Just emit the first semaphore we see as request space is limited. */
+ if (already_busywaiting(to) & mask)
+ goto await_fence;
+
+ if (i915_request_await_start(to, from) < 0)
+ goto await_fence;
+
+ /* Only submit our spinner after the signaler is running! */
+ if (__await_execution(to, from, gfp))
+ goto await_fence;
+
+ if (__emit_semaphore_wait(to, from, from->fence.seqno))
+ goto await_fence;
+
+ to->sched.semaphores |= mask;
+ wait = &to->semaphore;
+
+await_fence:
+ return i915_sw_fence_await_dma_fence(wait,
+ &from->fence, 0,
+ I915_FENCE_GFP);
+}
+
+static bool intel_timeline_sync_has_start(struct intel_timeline *tl,
+ struct dma_fence *fence)
+{
+ return __intel_timeline_sync_is_later(tl,
+ fence->context,
+ fence->seqno - 1);
+}
+
+static int intel_timeline_sync_set_start(struct intel_timeline *tl,
+ const struct dma_fence *fence)
+{
+ return __intel_timeline_sync_set(tl, fence->context, fence->seqno - 1);
+}
+
+static int
+__i915_request_await_execution(struct i915_request *to,
+ struct i915_request *from)
+{
+ int err;
+
+ GEM_BUG_ON(intel_context_is_barrier(from->context));
+
+ /* Submit both requests at the same time */
+ err = __await_execution(to, from, I915_FENCE_GFP);
+ if (err)
+ return err;
+
+ /* Squash repeated depenendices to the same timelines */
+ if (intel_timeline_sync_has_start(i915_request_timeline(to),
+ &from->fence))
+ return 0;
+
+ /*
+ * Wait until the start of this request.
+ *
+ * The execution cb fires when we submit the request to HW. But in
+ * many cases this may be long before the request itself is ready to
+ * run (consider that we submit 2 requests for the same context, where
+ * the request of interest is behind an indefinite spinner). So we hook
+ * up to both to reduce our queues and keep the execution lag minimised
+ * in the worst case, though we hope that the await_start is elided.
+ */
+ err = i915_request_await_start(to, from);
+ if (err < 0)
+ return err;
+
+ /*
+ * Ensure both start together [after all semaphores in signal]
+ *
+ * Now that we are queued to the HW at roughly the same time (thanks
+ * to the execute cb) and are ready to run at roughly the same time
+ * (thanks to the await start), our signaler may still be indefinitely
+ * delayed by waiting on a semaphore from a remote engine. If our
+ * signaler depends on a semaphore, so indirectly do we, and we do not
+ * want to start our payload until our signaler also starts theirs.
+ * So we wait.
+ *
+ * However, there is also a second condition for which we need to wait
+ * for the precise start of the signaler. Consider that the signaler
+ * was submitted in a chain of requests following another context
+ * (with just an ordinary intra-engine fence dependency between the
+ * two). In this case the signaler is queued to HW, but not for
+ * immediate execution, and so we must wait until it reaches the
+ * active slot.
+ */
+ if (can_use_semaphore_wait(to, from) &&
+ intel_engine_has_semaphores(to->engine) &&
+ !i915_request_has_initial_breadcrumb(to)) {
+ err = __emit_semaphore_wait(to, from, from->fence.seqno - 1);
+ if (err < 0)
+ return err;
+ }
+
+ /* Couple the dependency tree for PI on this exposed to->fence */
+ if (to->engine->sched_engine->schedule) {
+ err = i915_sched_node_add_dependency(&to->sched,
+ &from->sched,
+ I915_DEPENDENCY_WEAK);
+ if (err < 0)
+ return err;
+ }
+
+ return intel_timeline_sync_set_start(i915_request_timeline(to),
+ &from->fence);
+}
+
+static void mark_external(struct i915_request *rq)
+{
+ /*
+ * The downside of using semaphores is that we lose metadata passing
+ * along the signaling chain. This is particularly nasty when we
+ * need to pass along a fatal error such as EFAULT or EDEADLK. For
+ * fatal errors we want to scrub the request before it is executed,
+ * which means that we cannot preload the request onto HW and have
+ * it wait upon a semaphore.
+ */
+ rq->sched.flags |= I915_SCHED_HAS_EXTERNAL_CHAIN;
+}
+
+static int
+__i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
+{
+ mark_external(rq);
+ return i915_sw_fence_await_dma_fence(&rq->submit, fence,
+ i915_fence_context_timeout(rq->engine->i915,
+ fence->context),
+ I915_FENCE_GFP);
+}
+
+static int
+i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
+{
+ struct dma_fence *iter;
+ int err = 0;
+
+ if (!to_dma_fence_chain(fence))
+ return __i915_request_await_external(rq, fence);
+
+ dma_fence_chain_for_each(iter, fence) {
+ struct dma_fence_chain *chain = to_dma_fence_chain(iter);
+
+ if (!dma_fence_is_i915(chain->fence)) {
+ err = __i915_request_await_external(rq, iter);
+ break;
+ }
+
+ err = i915_request_await_dma_fence(rq, chain->fence);
+ if (err < 0)
+ break;
+ }
+
+ dma_fence_put(iter);
+ return err;
+}
+
+static inline bool is_parallel_rq(struct i915_request *rq)
+{
+ return intel_context_is_parallel(rq->context);
+}
+
+static inline struct intel_context *request_to_parent(struct i915_request *rq)
+{
+ return intel_context_to_parent(rq->context);
+}
+
+static bool is_same_parallel_context(struct i915_request *to,
+ struct i915_request *from)
+{
+ if (is_parallel_rq(to))
+ return request_to_parent(to) == request_to_parent(from);
+
+ return false;
+}
+
+int
+i915_request_await_execution(struct i915_request *rq,
+ struct dma_fence *fence)
+{
+ struct dma_fence **child = &fence;
+ unsigned int nchild = 1;
+ int ret;
+
+ if (dma_fence_is_array(fence)) {
+ struct dma_fence_array *array = to_dma_fence_array(fence);
+
+ /* XXX Error for signal-on-any fence arrays */
+
+ child = array->fences;
+ nchild = array->num_fences;
+ GEM_BUG_ON(!nchild);
+ }
+
+ do {
+ fence = *child++;
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ continue;
+
+ if (fence->context == rq->fence.context)
+ continue;
+
+ /*
+ * We don't squash repeated fence dependencies here as we
+ * want to run our callback in all cases.
+ */
+
+ if (dma_fence_is_i915(fence)) {
+ if (is_same_parallel_context(rq, to_request(fence)))
+ continue;
+ ret = __i915_request_await_execution(rq,
+ to_request(fence));
+ } else {
+ ret = i915_request_await_external(rq, fence);
+ }
+ if (ret < 0)
+ return ret;
+ } while (--nchild);
+
+ return 0;
+}
+
+static int
+await_request_submit(struct i915_request *to, struct i915_request *from)
+{
+ /*
+ * If we are waiting on a virtual engine, then it may be
+ * constrained to execute on a single engine *prior* to submission.
+ * When it is submitted, it will be first submitted to the virtual
+ * engine and then passed to the physical engine. We cannot allow
+ * the waiter to be submitted immediately to the physical engine
+ * as it may then bypass the virtual request.
+ */
+ if (to->engine == READ_ONCE(from->engine))
+ return i915_sw_fence_await_sw_fence_gfp(&to->submit,
+ &from->submit,
+ I915_FENCE_GFP);
+ else
+ return __i915_request_await_execution(to, from);
+}
+
static int
i915_request_await_request(struct i915_request *to, struct i915_request *from)
{
@@ -856,38 +1463,27 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
GEM_BUG_ON(to == from);
GEM_BUG_ON(to->timeline == from->timeline);
- if (i915_request_completed(from))
+ if (i915_request_completed(from)) {
+ i915_sw_fence_set_error_once(&to->submit, from->fence.error);
return 0;
+ }
- if (to->engine->schedule) {
- ret = i915_sched_node_add_dependency(&to->sched, &from->sched);
+ if (to->engine->sched_engine->schedule) {
+ ret = i915_sched_node_add_dependency(&to->sched,
+ &from->sched,
+ I915_DEPENDENCY_EXTERNAL);
if (ret < 0)
return ret;
}
- if (to->engine == from->engine) {
- ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
- &from->submit,
- I915_FENCE_GFP);
- } else if (intel_engine_has_semaphores(to->engine) &&
- to->gem_context->sched.priority >= I915_PRIORITY_NORMAL) {
+ if (!intel_engine_uses_guc(to->engine) &&
+ is_power_of_2(to->execution_mask | READ_ONCE(from->execution_mask)))
+ ret = await_request_submit(to, from);
+ else
ret = emit_semaphore_wait(to, from, I915_FENCE_GFP);
- } else {
- ret = i915_sw_fence_await_dma_fence(&to->submit,
- &from->fence, 0,
- I915_FENCE_GFP);
- }
if (ret < 0)
return ret;
- if (to->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN) {
- ret = i915_sw_fence_await_dma_fence(&to->semaphore,
- &from->fence, 0,
- I915_FENCE_GFP);
- if (ret < 0)
- return ret;
- }
-
return 0;
}
@@ -928,70 +1524,48 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
continue;
/* Squash repeated waits to the same timelines */
- if (fence->context != rq->i915->mm.unordered_timeline &&
- i915_timeline_sync_is_later(rq->timeline, fence))
+ if (fence->context &&
+ intel_timeline_sync_is_later(i915_request_timeline(rq),
+ fence))
continue;
- if (dma_fence_is_i915(fence))
+ if (dma_fence_is_i915(fence)) {
+ if (is_same_parallel_context(rq, to_request(fence)))
+ continue;
ret = i915_request_await_request(rq, to_request(fence));
- else
- ret = i915_sw_fence_await_dma_fence(&rq->submit, fence,
- I915_FENCE_TIMEOUT,
- I915_FENCE_GFP);
+ } else {
+ ret = i915_request_await_external(rq, fence);
+ }
if (ret < 0)
return ret;
/* Record the latest fence used against each timeline */
- if (fence->context != rq->i915->mm.unordered_timeline)
- i915_timeline_sync_set(rq->timeline, fence);
+ if (fence->context)
+ intel_timeline_sync_set(i915_request_timeline(rq),
+ fence);
} while (--nchild);
return 0;
}
-int
-i915_request_await_execution(struct i915_request *rq,
- struct dma_fence *fence,
- void (*hook)(struct i915_request *rq,
- struct dma_fence *signal))
+/**
+ * i915_request_await_deps - set this request to (async) wait upon a struct
+ * i915_deps dma_fence collection
+ * @rq: request we are wishing to use
+ * @deps: The struct i915_deps containing the dependencies.
+ *
+ * Returns 0 if successful, negative error code on error.
+ */
+int i915_request_await_deps(struct i915_request *rq, const struct i915_deps *deps)
{
- struct dma_fence **child = &fence;
- unsigned int nchild = 1;
- int ret;
+ int i, err;
- if (dma_fence_is_array(fence)) {
- struct dma_fence_array *array = to_dma_fence_array(fence);
-
- /* XXX Error for signal-on-any fence arrays */
-
- child = array->fences;
- nchild = array->num_fences;
- GEM_BUG_ON(!nchild);
+ for (i = 0; i < deps->num_deps; ++i) {
+ err = i915_request_await_dma_fence(rq, deps->fences[i]);
+ if (err)
+ return err;
}
- do {
- fence = *child++;
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
- continue;
-
- /*
- * We don't squash repeated fence dependencies here as we
- * want to run our callback in all cases.
- */
-
- if (dma_fence_is_i915(fence))
- ret = __i915_request_await_execution(rq,
- to_request(fence),
- hook,
- I915_FENCE_GFP);
- else
- ret = i915_sw_fence_await_dma_fence(&rq->submit, fence,
- I915_FENCE_TIMEOUT,
- GFP_KERNEL);
- if (ret < 0)
- return ret;
- } while (--nchild);
-
return 0;
}
@@ -1020,68 +1594,99 @@ i915_request_await_object(struct i915_request *to,
struct drm_i915_gem_object *obj,
bool write)
{
- struct dma_fence *excl;
+ struct dma_resv_iter cursor;
+ struct dma_fence *fence;
int ret = 0;
- if (write) {
- struct dma_fence **shared;
- unsigned int count, i;
-
- ret = reservation_object_get_fences_rcu(obj->base.resv,
- &excl, &count, &shared);
+ dma_resv_for_each_fence(&cursor, obj->base.resv,
+ dma_resv_usage_rw(write), fence) {
+ ret = i915_request_await_dma_fence(to, fence);
if (ret)
- return ret;
+ break;
+ }
- for (i = 0; i < count; i++) {
- ret = i915_request_await_dma_fence(to, shared[i]);
- if (ret)
- break;
+ return ret;
+}
- dma_fence_put(shared[i]);
- }
+static struct i915_request *
+__i915_request_ensure_parallel_ordering(struct i915_request *rq,
+ struct intel_timeline *timeline)
+{
+ struct i915_request *prev;
- for (; i < count; i++)
- dma_fence_put(shared[i]);
- kfree(shared);
- } else {
- excl = reservation_object_get_excl_rcu(obj->base.resv);
- }
+ GEM_BUG_ON(!is_parallel_rq(rq));
- if (excl) {
- if (ret == 0)
- ret = i915_request_await_dma_fence(to, excl);
+ prev = request_to_parent(rq)->parallel.last_rq;
+ if (prev) {
+ if (!__i915_request_is_complete(prev)) {
+ i915_sw_fence_await_sw_fence(&rq->submit,
+ &prev->submit,
+ &rq->submitq);
- dma_fence_put(excl);
+ if (rq->engine->sched_engine->schedule)
+ __i915_sched_node_add_dependency(&rq->sched,
+ &prev->sched,
+ &rq->dep,
+ 0);
+ }
+ i915_request_put(prev);
}
- return ret;
+ request_to_parent(rq)->parallel.last_rq = i915_request_get(rq);
+
+ return to_request(__i915_active_fence_set(&timeline->last_request,
+ &rq->fence));
}
-void i915_request_skip(struct i915_request *rq, int error)
+static struct i915_request *
+__i915_request_ensure_ordering(struct i915_request *rq,
+ struct intel_timeline *timeline)
{
- void *vaddr = rq->ring->vaddr;
- u32 head;
+ struct i915_request *prev;
- GEM_BUG_ON(!IS_ERR_VALUE((long)error));
- dma_fence_set_error(&rq->fence, error);
+ GEM_BUG_ON(is_parallel_rq(rq));
- /*
- * As this request likely depends on state from the lost
- * context, clear out all the user operations leaving the
- * breadcrumb at the end (so we get the fence notifications).
- */
- head = rq->infix;
- if (rq->postfix < head) {
- memset(vaddr + head, 0, rq->ring->size - head);
- head = 0;
+ prev = to_request(__i915_active_fence_set(&timeline->last_request,
+ &rq->fence));
+
+ if (prev && !__i915_request_is_complete(prev)) {
+ bool uses_guc = intel_engine_uses_guc(rq->engine);
+ bool pow2 = is_power_of_2(READ_ONCE(prev->engine)->mask |
+ rq->engine->mask);
+ bool same_context = prev->context == rq->context;
+
+ /*
+ * The requests are supposed to be kept in order. However,
+ * we need to be wary in case the timeline->last_request
+ * is used as a barrier for external modification to this
+ * context.
+ */
+ GEM_BUG_ON(same_context &&
+ i915_seqno_passed(prev->fence.seqno,
+ rq->fence.seqno));
+
+ if ((same_context && uses_guc) || (!uses_guc && pow2))
+ i915_sw_fence_await_sw_fence(&rq->submit,
+ &prev->submit,
+ &rq->submitq);
+ else
+ __i915_sw_fence_await_dma_fence(&rq->submit,
+ &prev->fence,
+ &rq->dmaq);
+ if (rq->engine->sched_engine->schedule)
+ __i915_sched_node_add_dependency(&rq->sched,
+ &prev->sched,
+ &rq->dep,
+ 0);
}
- memset(vaddr + head, 0, rq->postfix - head);
+
+ return prev;
}
static struct i915_request *
__i915_request_add_to_timeline(struct i915_request *rq)
{
- struct i915_timeline *timeline = rq->timeline;
+ struct intel_timeline *timeline = i915_request_timeline(rq);
struct i915_request *prev;
/*
@@ -1103,25 +1708,21 @@ __i915_request_add_to_timeline(struct i915_request *rq)
* complete (to maximise our greedy late load balancing) and this
* precludes optimising to use semaphores serialisation of a single
* timeline across engines.
+ *
+ * We do not order parallel submission requests on the timeline as each
+ * parallel submission context has its own timeline and the ordering
+ * rules for parallel requests are that they must be submitted in the
+ * order received from the execbuf IOCTL. So rather than using the
+ * timeline we store a pointer to last request submitted in the
+ * relationship in the gem context and insert a submission fence
+ * between that request and request passed into this function or
+ * alternatively we use completion fence if gem context has a single
+ * timeline and this is the first submission of an execbuf IOCTL.
*/
- prev = rcu_dereference_protected(timeline->last_request.request, 1);
- if (prev && !i915_request_completed(prev)) {
- if (is_power_of_2(prev->engine->mask | rq->engine->mask))
- i915_sw_fence_await_sw_fence(&rq->submit,
- &prev->submit,
- &rq->submitq);
- else
- __i915_sw_fence_await_dma_fence(&rq->submit,
- &prev->fence,
- &rq->dmaq);
- if (rq->engine->schedule)
- __i915_sched_node_add_dependency(&rq->sched,
- &prev->sched,
- &rq->dep,
- 0);
- }
-
- list_add_tail(&rq->link, &timeline->requests);
+ if (likely(!is_parallel_rq(rq)))
+ prev = __i915_request_ensure_ordering(rq, timeline);
+ else
+ prev = __i915_request_ensure_parallel_ordering(rq, timeline);
/*
* Make sure that no request gazumped us - if it was allocated after
@@ -1129,7 +1730,6 @@ __i915_request_add_to_timeline(struct i915_request *rq)
* us, the timeline will hold its seqno which is later than ours.
*/
GEM_BUG_ON(timeline->seqno != rq->fence.seqno);
- __i915_active_request_set(&timeline->last_request, rq);
return prev;
}
@@ -1143,11 +1743,9 @@ struct i915_request *__i915_request_commit(struct i915_request *rq)
{
struct intel_engine_cs *engine = rq->engine;
struct intel_ring *ring = rq->ring;
- struct i915_request *prev;
u32 *cs;
- GEM_TRACE("%s fence %llx:%lld\n",
- engine->name, rq->fence.context, rq->fence.seqno);
+ RQ_TRACE(rq, "\n");
/*
* To ensure that this call will not fail, space for its emissions
@@ -1156,6 +1754,7 @@ struct i915_request *__i915_request_commit(struct i915_request *rq)
*/
GEM_BUG_ON(rq->reserved_space > ring->space);
rq->reserved_space = 0;
+ rq->emitted_jiffies = jiffies;
/*
* Record the position of the start of the breadcrumb so that
@@ -1167,13 +1766,18 @@ struct i915_request *__i915_request_commit(struct i915_request *rq)
GEM_BUG_ON(IS_ERR(cs));
rq->postfix = intel_ring_offset(rq, cs);
- prev = __i915_request_add_to_timeline(rq);
+ return __i915_request_add_to_timeline(rq);
+}
- list_add_tail(&rq->ring_link, &ring->request_list);
- if (list_is_first(&rq->ring_link, &ring->request_list))
- list_add(&ring->active_link, &rq->i915->gt.active_rings);
- rq->emitted_jiffies = jiffies;
+void __i915_request_queue_bh(struct i915_request *rq)
+{
+ i915_sw_fence_commit(&rq->semaphore);
+ i915_sw_fence_commit(&rq->submit);
+}
+void __i915_request_queue(struct i915_request *rq,
+ const struct i915_sched_attr *attr)
+{
/*
* Let the backend know a new request has arrived that may need
* to adjust the existing execution schedule due to a high priority
@@ -1185,80 +1789,39 @@ struct i915_request *__i915_request_commit(struct i915_request *rq)
* decide whether to preempt the entire chain so that it is ready to
* run at the earliest possible convenience.
*/
- local_bh_disable();
- i915_sw_fence_commit(&rq->semaphore);
- rcu_read_lock(); /* RCU serialisation for set-wedged protection */
- if (engine->schedule) {
- struct i915_sched_attr attr = rq->gem_context->sched;
-
- /*
- * Boost actual workloads past semaphores!
- *
- * With semaphores we spin on one engine waiting for another,
- * simply to reduce the latency of starting our work when
- * the signaler completes. However, if there is any other
- * work that we could be doing on this engine instead, that
- * is better utilisation and will reduce the overall duration
- * of the current work. To avoid PI boosting a semaphore
- * far in the distance past over useful work, we keep a history
- * of any semaphore use along our dependency chain.
- */
- if (!(rq->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN))
- attr.priority |= I915_PRIORITY_NOSEMAPHORE;
-
- /*
- * Boost priorities to new clients (new request flows).
- *
- * Allow interactive/synchronous clients to jump ahead of
- * the bulk clients. (FQ_CODEL)
- */
- if (list_empty(&rq->sched.signalers_list))
- attr.priority |= I915_PRIORITY_WAIT;
-
- engine->schedule(rq, &attr);
- }
- rcu_read_unlock();
- i915_sw_fence_commit(&rq->submit);
- local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
+ if (attr && rq->engine->sched_engine->schedule)
+ rq->engine->sched_engine->schedule(rq, attr);
- return prev;
+ local_bh_disable();
+ __i915_request_queue_bh(rq);
+ local_bh_enable(); /* kick tasklets */
}
void i915_request_add(struct i915_request *rq)
{
- struct i915_request *prev;
+ struct intel_timeline * const tl = i915_request_timeline(rq);
+ struct i915_sched_attr attr = {};
+ struct i915_gem_context *ctx;
- lockdep_assert_held(&rq->timeline->mutex);
- lockdep_unpin_lock(&rq->timeline->mutex, rq->cookie);
+ lockdep_assert_held(&tl->mutex);
+ lockdep_unpin_lock(&tl->mutex, rq->cookie);
trace_i915_request_add(rq);
+ __i915_request_commit(rq);
- prev = __i915_request_commit(rq);
+ /* XXX placeholder for selftests */
+ rcu_read_lock();
+ ctx = rcu_dereference(rq->context->gem_context);
+ if (ctx)
+ attr = ctx->sched;
+ rcu_read_unlock();
- /*
- * In typical scenarios, we do not expect the previous request on
- * the timeline to be still tracked by timeline->last_request if it
- * has been completed. If the completed request is still here, that
- * implies that request retirement is a long way behind submission,
- * suggesting that we haven't been retiring frequently enough from
- * the combination of retire-before-alloc, waiters and the background
- * retirement worker. So if the last request on this timeline was
- * already completed, do a catch up pass, flushing the retirement queue
- * up to this client. Since we have now moved the heaviest operations
- * during retirement onto secondary workers, such as freeing objects
- * or contexts, retiring a bunch of requests is mostly list management
- * (and cache misses), and so we should not be overly penalizing this
- * client by performing excess work, though we may still performing
- * work on behalf of others -- but instead we should benefit from
- * improved resource management. (Well, that's the theory at least.)
- */
- if (prev && i915_request_completed(prev))
- i915_request_retire_upto(prev);
+ __i915_request_queue(rq, &attr);
- mutex_unlock(&rq->timeline->mutex);
+ mutex_unlock(&tl->mutex);
}
-static unsigned long local_clock_us(unsigned int *cpu)
+static unsigned long local_clock_ns(unsigned int *cpu)
{
unsigned long t;
@@ -1275,7 +1838,7 @@ static unsigned long local_clock_us(unsigned int *cpu)
* stop busywaiting, see busywait_stop().
*/
*cpu = get_cpu();
- t = local_clock() >> 10;
+ t = local_clock();
put_cpu();
return t;
@@ -1285,15 +1848,15 @@ static bool busywait_stop(unsigned long timeout, unsigned int cpu)
{
unsigned int this_cpu;
- if (time_after(local_clock_us(&this_cpu), timeout))
+ if (time_after(local_clock_ns(&this_cpu), timeout))
return true;
return this_cpu != cpu;
}
-static bool __i915_spin_request(const struct i915_request * const rq,
- int state, unsigned long timeout_us)
+static bool __i915_spin_request(struct i915_request * const rq, int state)
{
+ unsigned long timeout_ns;
unsigned int cpu;
/*
@@ -1321,15 +1884,16 @@ static bool __i915_spin_request(const struct i915_request * const rq,
* takes to sleep on a request, on the order of a microsecond.
*/
- timeout_us += local_clock_us(&cpu);
+ timeout_ns = READ_ONCE(rq->engine->props.max_busywait_duration_ns);
+ timeout_ns += local_clock_ns(&cpu);
do {
- if (i915_request_completed(rq))
+ if (dma_fence_is_signaled(&rq->fence))
return true;
if (signal_pending_state(state, current))
break;
- if (busywait_stop(timeout_us, cpu))
+ if (busywait_stop(timeout_ns, cpu))
break;
cpu_relax();
@@ -1347,27 +1911,31 @@ static void request_wait_wake(struct dma_fence *fence, struct dma_fence_cb *cb)
{
struct request_wait *wait = container_of(cb, typeof(*wait), cb);
- wake_up_process(wait->tsk);
+ wake_up_process(fetch_and_zero(&wait->tsk));
}
/**
- * i915_request_wait - wait until execution of request has finished
+ * i915_request_wait_timeout - wait until execution of request has finished
* @rq: the request to wait upon
* @flags: how to wait
* @timeout: how long to wait in jiffies
*
- * i915_request_wait() waits for the request to be completed, for a
+ * i915_request_wait_timeout() waits for the request to be completed, for a
* maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an
* unbounded wait).
*
* Returns the remaining time (in jiffies) if the request completed, which may
- * be zero or -ETIME if the request is unfinished after the timeout expires.
+ * be zero if the request is unfinished after the timeout expires.
+ * If the timeout is 0, it will return 1 if the fence is signaled.
+ *
* May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is
* pending before the request completes.
+ *
+ * NOTE: This function has the same wait semantics as dma-fence.
*/
-long i915_request_wait(struct i915_request *rq,
- unsigned int flags,
- long timeout)
+long i915_request_wait_timeout(struct i915_request *rq,
+ unsigned int flags,
+ long timeout)
{
const int state = flags & I915_WAIT_INTERRUPTIBLE ?
TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
@@ -1377,7 +1945,7 @@ long i915_request_wait(struct i915_request *rq,
GEM_BUG_ON(timeout < 0);
if (dma_fence_is_signaled(&rq->fence))
- return timeout;
+ return timeout ?: 1;
if (!timeout)
return -ETIME;
@@ -1390,8 +1958,7 @@ long i915_request_wait(struct i915_request *rq,
* serialise wait/reset with an explicit lock, we do want
* lockdep to detect potential dependency cycles.
*/
- mutex_acquire(&rq->i915->gpu_error.wedge_mutex.dep_map,
- 0, 0, _THIS_IP_);
+ mutex_acquire(&rq->engine->gt->reset.mutex.dep_map, 0, 0, _THIS_IP_);
/*
* Optimistic spin before touching IRQs.
@@ -1416,11 +1983,9 @@ long i915_request_wait(struct i915_request *rq,
* completion. That requires having a good predictor for the request
* duration, which we currently lack.
*/
- if (CONFIG_DRM_I915_SPIN_REQUEST &&
- __i915_spin_request(rq, state, CONFIG_DRM_I915_SPIN_REQUEST)) {
- dma_fence_signal(&rq->fence);
+ if (CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT &&
+ __i915_spin_request(rq, state))
goto out;
- }
/*
* This client is about to stall waiting for the GPU. In many cases
@@ -1434,20 +1999,35 @@ long i915_request_wait(struct i915_request *rq,
* but at a cost of spending more power processing the workload
* (bad for battery).
*/
- if (flags & I915_WAIT_PRIORITY) {
- if (!i915_request_started(rq) && INTEL_GEN(rq->i915) >= 6)
- gen6_rps_boost(rq);
- i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT);
- }
+ if (flags & I915_WAIT_PRIORITY && !i915_request_started(rq))
+ intel_rps_boost(rq);
wait.tsk = current;
if (dma_fence_add_callback(&rq->fence, &wait.cb, request_wait_wake))
goto out;
+ /*
+ * Flush the submission tasklet, but only if it may help this request.
+ *
+ * We sometimes experience some latency between the HW interrupts and
+ * tasklet execution (mostly due to ksoftirqd latency, but it can also
+ * be due to lazy CS events), so lets run the tasklet manually if there
+ * is a chance it may submit this request. If the request is not ready
+ * to run, as it is waiting for other fences to be signaled, flushing
+ * the tasklet is busy work without any advantage for this client.
+ *
+ * If the HW is being lazy, this is the last chance before we go to
+ * sleep to catch any pending events. We will check periodically in
+ * the heartbeat to flush the submission tasklets as a last resort
+ * for unhappy HW.
+ */
+ if (i915_request_is_ready(rq))
+ __intel_engine_flush_submission(rq->engine, false);
+
for (;;) {
set_current_state(state);
- if (i915_request_completed(rq))
+ if (dma_fence_is_signaled(&rq->fence))
break;
if (signal_pending_state(state, current)) {
@@ -1464,28 +2044,188 @@ long i915_request_wait(struct i915_request *rq,
}
__set_current_state(TASK_RUNNING);
- dma_fence_remove_callback(&rq->fence, &wait.cb);
+ if (READ_ONCE(wait.tsk))
+ dma_fence_remove_callback(&rq->fence, &wait.cb);
+ GEM_BUG_ON(!list_empty(&wait.cb.node));
out:
- mutex_release(&rq->i915->gpu_error.wedge_mutex.dep_map, 0, _THIS_IP_);
+ mutex_release(&rq->engine->gt->reset.mutex.dep_map, _THIS_IP_);
trace_i915_request_wait_end(rq);
return timeout;
}
-bool i915_retire_requests(struct drm_i915_private *i915)
+/**
+ * i915_request_wait - wait until execution of request has finished
+ * @rq: the request to wait upon
+ * @flags: how to wait
+ * @timeout: how long to wait in jiffies
+ *
+ * i915_request_wait() waits for the request to be completed, for a
+ * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an
+ * unbounded wait).
+ *
+ * Returns the remaining time (in jiffies) if the request completed, which may
+ * be zero or -ETIME if the request is unfinished after the timeout expires.
+ * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is
+ * pending before the request completes.
+ *
+ * NOTE: This function behaves differently from dma-fence wait semantics for
+ * timeout = 0. It returns 0 on success, and -ETIME if not signaled.
+ */
+long i915_request_wait(struct i915_request *rq,
+ unsigned int flags,
+ long timeout)
+{
+ long ret = i915_request_wait_timeout(rq, flags, timeout);
+
+ if (!ret)
+ return -ETIME;
+
+ if (ret > 0 && !timeout)
+ return 0;
+
+ return ret;
+}
+
+static int print_sched_attr(const struct i915_sched_attr *attr,
+ char *buf, int x, int len)
+{
+ if (attr->priority == I915_PRIORITY_INVALID)
+ return x;
+
+ x += snprintf(buf + x, len - x,
+ " prio=%d", attr->priority);
+
+ return x;
+}
+
+static char queue_status(const struct i915_request *rq)
+{
+ if (i915_request_is_active(rq))
+ return 'E';
+
+ if (i915_request_is_ready(rq))
+ return intel_engine_is_virtual(rq->engine) ? 'V' : 'R';
+
+ return 'U';
+}
+
+static const char *run_status(const struct i915_request *rq)
+{
+ if (__i915_request_is_complete(rq))
+ return "!";
+
+ if (__i915_request_has_started(rq))
+ return "*";
+
+ if (!i915_sw_fence_signaled(&rq->semaphore))
+ return "&";
+
+ return "";
+}
+
+static const char *fence_status(const struct i915_request *rq)
+{
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
+ return "+";
+
+ if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags))
+ return "-";
+
+ return "";
+}
+
+void i915_request_show(struct drm_printer *m,
+ const struct i915_request *rq,
+ const char *prefix,
+ int indent)
+{
+ const char *name = rq->fence.ops->get_timeline_name((struct dma_fence *)&rq->fence);
+ char buf[80] = "";
+ int x = 0;
+
+ /*
+ * The prefix is used to show the queue status, for which we use
+ * the following flags:
+ *
+ * U [Unready]
+ * - initial status upon being submitted by the user
+ *
+ * - the request is not ready for execution as it is waiting
+ * for external fences
+ *
+ * R [Ready]
+ * - all fences the request was waiting on have been signaled,
+ * and the request is now ready for execution and will be
+ * in a backend queue
+ *
+ * - a ready request may still need to wait on semaphores
+ * [internal fences]
+ *
+ * V [Ready/virtual]
+ * - same as ready, but queued over multiple backends
+ *
+ * E [Executing]
+ * - the request has been transferred from the backend queue and
+ * submitted for execution on HW
+ *
+ * - a completed request may still be regarded as executing, its
+ * status may not be updated until it is retired and removed
+ * from the lists
+ */
+
+ x = print_sched_attr(&rq->sched.attr, buf, x, sizeof(buf));
+
+ drm_printf(m, "%s%.*s%c %llx:%lld%s%s %s @ %dms: %s\n",
+ prefix, indent, " ",
+ queue_status(rq),
+ rq->fence.context, rq->fence.seqno,
+ run_status(rq),
+ fence_status(rq),
+ buf,
+ jiffies_to_msecs(jiffies - rq->emitted_jiffies),
+ name);
+}
+
+static bool engine_match_ring(struct intel_engine_cs *engine, struct i915_request *rq)
{
- struct intel_ring *ring, *tmp;
+ u32 ring = ENGINE_READ(engine, RING_START);
- lockdep_assert_held(&i915->drm.struct_mutex);
+ return ring == i915_ggtt_offset(rq->ring->vma);
+}
- list_for_each_entry_safe(ring, tmp,
- &i915->gt.active_rings, active_link) {
- intel_ring_get(ring); /* last rq holds reference! */
- ring_retire_requests(ring);
- intel_ring_put(ring);
+static bool match_ring(struct i915_request *rq)
+{
+ struct intel_engine_cs *engine;
+ bool found;
+ int i;
+
+ if (!intel_engine_is_virtual(rq->engine))
+ return engine_match_ring(rq->engine, rq);
+
+ found = false;
+ i = 0;
+ while ((engine = intel_engine_get_sibling(rq->engine, i++))) {
+ found = engine_match_ring(engine, rq);
+ if (found)
+ break;
}
- return !list_empty(&i915->gt.active_rings);
+ return found;
+}
+
+enum i915_request_state i915_test_request_state(struct i915_request *rq)
+{
+ if (i915_request_completed(rq))
+ return I915_REQUEST_COMPLETE;
+
+ if (!i915_request_started(rq))
+ return I915_REQUEST_PENDING;
+
+ if (match_ring(rq))
+ return I915_REQUEST_ACTIVE;
+
+ return I915_REQUEST_QUEUED;
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
@@ -1493,53 +2233,35 @@ bool i915_retire_requests(struct drm_i915_private *i915)
#include "selftests/i915_request.c"
#endif
-static void i915_global_request_shrink(void)
+void i915_request_module_exit(void)
{
- kmem_cache_shrink(global.slab_dependencies);
- kmem_cache_shrink(global.slab_execute_cbs);
- kmem_cache_shrink(global.slab_requests);
+ kmem_cache_destroy(slab_execute_cbs);
+ kmem_cache_destroy(slab_requests);
}
-static void i915_global_request_exit(void)
+int __init i915_request_module_init(void)
{
- kmem_cache_destroy(global.slab_dependencies);
- kmem_cache_destroy(global.slab_execute_cbs);
- kmem_cache_destroy(global.slab_requests);
-}
-
-static struct i915_global_request global = { {
- .shrink = i915_global_request_shrink,
- .exit = i915_global_request_exit,
-} };
-
-int __init i915_global_request_init(void)
-{
- global.slab_requests = KMEM_CACHE(i915_request,
- SLAB_HWCACHE_ALIGN |
- SLAB_RECLAIM_ACCOUNT |
- SLAB_TYPESAFE_BY_RCU);
- if (!global.slab_requests)
+ slab_requests =
+ kmem_cache_create("i915_request",
+ sizeof(struct i915_request),
+ __alignof__(struct i915_request),
+ SLAB_HWCACHE_ALIGN |
+ SLAB_RECLAIM_ACCOUNT |
+ SLAB_TYPESAFE_BY_RCU,
+ __i915_request_ctor);
+ if (!slab_requests)
return -ENOMEM;
- global.slab_execute_cbs = KMEM_CACHE(execute_cb,
+ slab_execute_cbs = KMEM_CACHE(execute_cb,
SLAB_HWCACHE_ALIGN |
SLAB_RECLAIM_ACCOUNT |
SLAB_TYPESAFE_BY_RCU);
- if (!global.slab_execute_cbs)
+ if (!slab_execute_cbs)
goto err_requests;
- global.slab_dependencies = KMEM_CACHE(i915_dependency,
- SLAB_HWCACHE_ALIGN |
- SLAB_RECLAIM_ACCOUNT);
- if (!global.slab_dependencies)
- goto err_execute_cbs;
-
- i915_global_register(&global.base);
return 0;
-err_execute_cbs:
- kmem_cache_destroy(global.slab_execute_cbs);
err_requests:
- kmem_cache_destroy(global.slab_requests);
+ kmem_cache_destroy(slab_requests);
return -ENOMEM;
}