summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_active.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2019-11-20 12:54:33 +0000
committerChris Wilson <chris@chris-wilson.co.uk>2019-11-20 15:59:23 +0000
commit07779a76ee1f93f930cf697b22be73d16e14f50c (patch)
tree06ccd469c0fa100c2dda52a0ad99c0bce5ebe376 /drivers/gpu/drm/i915/i915_active.c
parent8a126392b7d7b95cffbf164b0f85974df7d70852 (diff)
drm/i915: Mark up the calling context for intel_wakeref_put()
Previously, we assumed we could use mutex_trylock() within an atomic context, falling back to a worker if contended. However, such trickery is illegal inside interrupt context, and so we need to always use a worker under such circumstances. As we normally are in process context, we can typically use a plain mutex, and only defer to a work when we know we are being called from an interrupt path. Fixes: 51fbd8de87dc ("drm/i915/pmu: Atomically acquire the gt_pm wakeref") References: a0855d24fc22d ("locking/mutex: Complain upon mutex API misuse in IRQ contexts") References: https://bugs.freedesktop.org/show_bug.cgi?id=111626 Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20191120125433.3767149-1-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/i915_active.c')
-rw-r--r--drivers/gpu/drm/i915/i915_active.c5
1 files changed, 3 insertions, 2 deletions
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index 5448f37c8102..dca15ace88f6 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -672,12 +672,13 @@ void i915_active_acquire_barrier(struct i915_active *ref)
* populated by i915_request_add_active_barriers() to point to the
* request that will eventually release them.
*/
- spin_lock_irqsave_nested(&ref->tree_lock, flags, SINGLE_DEPTH_NESTING);
llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) {
struct active_node *node = barrier_from_ll(pos);
struct intel_engine_cs *engine = barrier_to_engine(node);
struct rb_node **p, *parent;
+ spin_lock_irqsave_nested(&ref->tree_lock, flags,
+ SINGLE_DEPTH_NESTING);
parent = NULL;
p = &ref->tree.rb_node;
while (*p) {
@@ -693,12 +694,12 @@ void i915_active_acquire_barrier(struct i915_active *ref)
}
rb_link_node(&node->node, parent, p);
rb_insert_color(&node->node, &ref->tree);
+ spin_unlock_irqrestore(&ref->tree_lock, flags);
GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
llist_add(barrier_to_ll(node), &engine->barrier_tasks);
intel_engine_pm_put(engine);
}
- spin_unlock_irqrestore(&ref->tree_lock, flags);
}
void i915_request_add_active_barriers(struct i915_request *rq)