summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2021-01-20 12:17:18 +0000
committerChris Wilson <chris@chris-wilson.co.uk>2021-01-21 12:34:13 +0000
commita2dd2ff50cde3cbbeecec72225bb18582b291f14 (patch)
tree76c5ec921aea2b358d5ec8363d482b484bb10997 /drivers/gpu/drm/i915/gt/intel_execlists_submission.c
parentaba738262b91ea17e2e44024fedbfb9c63760df6 (diff)
drm/i915/gt: Skip over completed active execlists, again
Now that we are careful to always force-restore contexts upon rewinding (where necessary), we can restore our optimisation to skip over completed active execlists when dequeuing. Referenecs: 35f3fd8182ba ("drm/i915/execlists: Workaround switching back to a completed context") References: 8ab3a3812aa9 ("drm/i915/gt: Incrementally check for rewinding") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20210120121718.26435-2-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/gt/intel_execlists_submission.c')
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c34
1 files changed, 16 insertions, 18 deletions
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 524c8b54d220..ac1be7a632d3 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -1224,12 +1224,20 @@ static void set_preempt_timeout(struct intel_engine_cs *engine,
active_preempt_timeout(engine, rq));
}
+static bool completed(const struct i915_request *rq)
+{
+ if (i915_request_has_sentinel(rq))
+ return false;
+
+ return __i915_request_is_complete(rq);
+}
+
static void execlists_dequeue(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
struct i915_request **port = execlists->pending;
struct i915_request ** const last_port = port + execlists->port_mask;
- struct i915_request *last = *execlists->active;
+ struct i915_request *last, * const *active;
struct virtual_engine *ve;
struct rb_node *rb;
bool submit = false;
@@ -1266,21 +1274,13 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* i.e. we will retrigger preemption following the ack in case
* of trouble.
*
- * In theory we can skip over completed contexts that have not
- * yet been processed by events (as those events are in flight):
- *
- * while ((last = *active) && i915_request_completed(last))
- * active++;
- *
- * However, the GPU cannot handle this as it will ultimately
- * find itself trying to jump back into a context it has just
- * completed and barf.
*/
+ active = execlists->active;
+ while ((last = *active) && completed(last))
+ active++;
if (last) {
- if (__i915_request_is_complete(last)) {
- goto check_secondary;
- } else if (need_preempt(engine, last)) {
+ if (need_preempt(engine, last)) {
ENGINE_TRACE(engine,
"preempting last=%llx:%lld, prio=%d, hint=%d\n",
last->fence.context,
@@ -1359,9 +1359,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* we hopefully coalesce several updates into a single
* submission.
*/
-check_secondary:
- if (!list_is_last(&last->sched.link,
- &engine->active.requests)) {
+ if (active[1]) {
/*
* Even if ELSP[1] is occupied and not worthy
* of timeslices, our queue might be.
@@ -1562,7 +1560,7 @@ done:
* of ordered contexts.
*/
if (submit &&
- memcmp(execlists->active,
+ memcmp(active,
execlists->pending,
(port - execlists->pending) * sizeof(*port))) {
*port = NULL;
@@ -1570,7 +1568,7 @@ done:
execlists_schedule_in(*port, port - execlists->pending);
WRITE_ONCE(execlists->yield, -1);
- set_preempt_timeout(engine, *execlists->active);
+ set_preempt_timeout(engine, *active);
execlists_submit_ports(engine);
} else {
ring_set_paused(engine, 0);