summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2021-02-01 08:56:22 +0000
committerDaniel Vetter <daniel.vetter@ffwll.ch>2021-03-24 19:30:36 +0100
commitc10e4a7960f3032b0313c4b684e0b4025b4c138d (patch)
tree8461d400de755aadba6c9dcf67c58cc81147a003 /drivers/gpu/drm/i915/gt/intel_execlists_submission.c
parentd712f4ce25d3935d6e124a84f7f8d640ab0da611 (diff)
drm/i915: Protect against request freeing during cancellation on wedging
As soon as we mark a request as completed, it may be retired. So when cancelling a request and marking it complete, make sure we first keep a reference to the request. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20210201085715.27435-4-chris@chris-wilson.co.uk Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/gt/intel_execlists_submission.c')
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c19
1 files changed, 11 insertions, 8 deletions
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 8b61c958c031..08a7f5b671e3 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -2954,7 +2954,7 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
/* Mark all executing requests as skipped. */
list_for_each_entry(rq, &engine->active.requests, sched.link)
- i915_request_mark_eio(rq);
+ i915_request_put(i915_request_mark_eio(rq));
intel_engine_signal_breadcrumbs(engine);
/* Flush the queued requests to the timeline list (for retiring). */
@@ -2962,8 +2962,10 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
struct i915_priolist *p = to_priolist(rb);
priolist_for_each_request_consume(rq, rn, p) {
- i915_request_mark_eio(rq);
- __i915_request_submit(rq);
+ if (i915_request_mark_eio(rq)) {
+ __i915_request_submit(rq);
+ i915_request_put(rq);
+ }
}
rb_erase_cached(&p->node, &execlists->queue);
@@ -2972,7 +2974,7 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
/* On-hold requests will be flushed to timeline upon their release */
list_for_each_entry(rq, &engine->active.hold, sched.link)
- i915_request_mark_eio(rq);
+ i915_request_put(i915_request_mark_eio(rq));
/* Cancel all attached virtual engines */
while ((rb = rb_first_cached(&execlists->virtual))) {
@@ -2985,10 +2987,11 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
spin_lock(&ve->base.active.lock);
rq = fetch_and_zero(&ve->request);
if (rq) {
- i915_request_mark_eio(rq);
-
- rq->engine = engine;
- __i915_request_submit(rq);
+ if (i915_request_mark_eio(rq)) {
+ rq->engine = engine;
+ __i915_request_submit(rq);
+ i915_request_put(rq);
+ }
i915_request_put(rq);
ve->base.execlists.queue_priority_hint = INT_MIN;