summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
diff options
context:
space:
mode:
authorMatthew Brost <matthew.brost@intel.com>2021-06-17 18:06:36 -0700
committerMatt Roper <matthew.d.roper@intel.com>2021-06-18 15:14:09 -0700
commit71ed60112d5d3bc90df704c1db2b655a9f4a7b66 (patch)
treea731d2be22fb920918ae2812d089b46baa0956bf /drivers/gpu/drm/i915/gt/intel_execlists_submission.c
parent3f623e06cd56573d57660ce02d63aaf0a09d3fbb (diff)
drm/i915: Add kick_backend function to i915_sched_engine
Not all back-ends require a kick after a scheduling update, so make the kick a call-back function that the back-end can opt-in to. Also move the current kick function from the scheduler to the execlists file as it is specific to that back-end. Signed-off-by: Matthew Brost <matthew.brost@intel.com> Reviewed-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com> Signed-off-by: Matt Roper <matthew.d.roper@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20210618010638.98941-7-matthew.brost@intel.com
Diffstat (limited to 'drivers/gpu/drm/i915/gt/intel_execlists_submission.c')
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c52
1 files changed, 52 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 8a3d4014fd2c..9487d9e0be62 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -3116,10 +3116,61 @@ static bool can_preempt(struct intel_engine_cs *engine)
return engine->class != RENDER_CLASS;
}
+static void kick_execlists(const struct i915_request *rq, int prio)
+{
+ struct intel_engine_cs *engine = rq->engine;
+ struct i915_sched_engine *sched_engine = engine->sched_engine;
+ const struct i915_request *inflight;
+
+ /*
+ * We only need to kick the tasklet once for the high priority
+ * new context we add into the queue.
+ */
+ if (prio <= sched_engine->queue_priority_hint)
+ return;
+
+ rcu_read_lock();
+
+ /* Nothing currently active? We're overdue for a submission! */
+ inflight = execlists_active(&engine->execlists);
+ if (!inflight)
+ goto unlock;
+
+ /*
+ * If we are already the currently executing context, don't
+ * bother evaluating if we should preempt ourselves.
+ */
+ if (inflight->context == rq->context)
+ goto unlock;
+
+ ENGINE_TRACE(engine,
+ "bumping queue-priority-hint:%d for rq:%llx:%lld, inflight:%llx:%lld prio %d\n",
+ prio,
+ rq->fence.context, rq->fence.seqno,
+ inflight->fence.context, inflight->fence.seqno,
+ inflight->sched.attr.priority);
+
+ sched_engine->queue_priority_hint = prio;
+
+ /*
+ * Allow preemption of low -> normal -> high, but we do
+ * not allow low priority tasks to preempt other low priority
+ * tasks under the impression that latency for low priority
+ * tasks does not matter (as much as background throughput),
+ * so kiss.
+ */
+ if (prio >= max(I915_PRIORITY_NORMAL, rq_prio(inflight)))
+ tasklet_hi_schedule(&engine->execlists.tasklet);
+
+unlock:
+ rcu_read_unlock();
+}
+
static void execlists_set_default_submission(struct intel_engine_cs *engine)
{
engine->submit_request = execlists_submit_request;
engine->sched_engine->schedule = i915_schedule;
+ engine->sched_engine->kick_backend = kick_execlists;
engine->execlists.tasklet.callback = execlists_submission_tasklet;
}
@@ -3702,6 +3753,7 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings,
ve->base.request_alloc = execlists_request_alloc;
ve->base.sched_engine->schedule = i915_schedule;
+ ve->base.sched_engine->kick_backend = kick_execlists;
ve->base.submit_request = virtual_submit_request;
ve->base.bond_execute = virtual_bond_execute;