summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gem/i915_gem_wait.c
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2021-10-21 12:35:39 +0200
committerChristian König <christian.koenig@amd.com>2021-11-17 14:26:24 +0100
commit2cbb8d4d67700b4ea7373a307676fe312251b257 (patch)
tree2e714b255b7798b9fc45f6c62c20a92dbc57147b /drivers/gpu/drm/i915/gem/i915_gem_wait.c
parent7e2e69ed4678a4c660c4727e625a396b06c0c372 (diff)
drm/i915: use new iterator in i915_gem_object_wait_reservation
Simplifying the code a bit. Signed-off-by: Christian König <christian.koenig@amd.com> [mlankhorst: Handle timeout = 0 correctly, use new i915_request_wait_timeout.] Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Acked-by: Daniel Vetter <daniel@ffwll.ch> Link: https://patchwork.freedesktop.org/patch/msgid/20211116102431.198905-7-christian.koenig@amd.com
Diffstat (limited to 'drivers/gpu/drm/i915/gem/i915_gem_wait.c')
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_wait.c63
1 files changed, 19 insertions, 44 deletions
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
index 34e46134326b..f11325484110 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
@@ -25,7 +25,7 @@ i915_gem_object_wait_fence(struct dma_fence *fence,
return timeout;
if (dma_fence_is_i915(fence))
- return i915_request_wait(to_request(fence), flags, timeout);
+ return i915_request_wait_timeout(to_request(fence), flags, timeout);
return dma_fence_wait_timeout(fence,
flags & I915_WAIT_INTERRUPTIBLE,
@@ -37,58 +37,29 @@ i915_gem_object_wait_reservation(struct dma_resv *resv,
unsigned int flags,
long timeout)
{
- struct dma_fence *excl;
- bool prune_fences = false;
-
- if (flags & I915_WAIT_ALL) {
- struct dma_fence **shared;
- unsigned int count, i;
- int ret;
-
- ret = dma_resv_get_fences(resv, &excl, &count, &shared);
- if (ret)
- return ret;
-
- for (i = 0; i < count; i++) {
- timeout = i915_gem_object_wait_fence(shared[i],
- flags, timeout);
- if (timeout < 0)
- break;
-
- dma_fence_put(shared[i]);
- }
+ struct dma_resv_iter cursor;
+ struct dma_fence *fence;
+ long ret = timeout ?: 1;
- for (; i < count; i++)
- dma_fence_put(shared[i]);
- kfree(shared);
+ dma_resv_iter_begin(&cursor, resv, flags & I915_WAIT_ALL);
+ dma_resv_for_each_fence_unlocked(&cursor, fence) {
+ ret = i915_gem_object_wait_fence(fence, flags, timeout);
+ if (ret <= 0)
+ break;
- /*
- * If both shared fences and an exclusive fence exist,
- * then by construction the shared fences must be later
- * than the exclusive fence. If we successfully wait for
- * all the shared fences, we know that the exclusive fence
- * must all be signaled. If all the shared fences are
- * signaled, we can prune the array and recover the
- * floating references on the fences/requests.
- */
- prune_fences = count && timeout >= 0;
- } else {
- excl = dma_resv_get_excl_unlocked(resv);
+ if (timeout)
+ timeout = ret;
}
-
- if (excl && timeout >= 0)
- timeout = i915_gem_object_wait_fence(excl, flags, timeout);
-
- dma_fence_put(excl);
+ dma_resv_iter_end(&cursor);
/*
* Opportunistically prune the fences iff we know they have *all* been
* signaled.
*/
- if (prune_fences)
+ if (timeout > 0)
dma_resv_prune(resv);
- return timeout;
+ return ret;
}
static void fence_set_priority(struct dma_fence *fence,
@@ -177,7 +148,11 @@ i915_gem_object_wait(struct drm_i915_gem_object *obj,
timeout = i915_gem_object_wait_reservation(obj->base.resv,
flags, timeout);
- return timeout < 0 ? timeout : 0;
+
+ if (timeout < 0)
+ return timeout;
+
+ return !timeout ? -ETIME : 0;
}
static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)