summaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2020-05-14 12:18:09 +0100
committerChris Wilson <chris@chris-wilson.co.uk>2020-05-19 14:19:22 +0100
commit2a677795335f0ed0a2ddb361b1bc0f81c2565f8e (patch)
treee82db907b92e410cc1ff168b34f5b3c6e2518e72 /tests
parentf3072b262d01c1d2837b73c658faba77686dd154 (diff)
i915/gem_exec_balancer: Force timeslicing of the virtual request
Investigate the impact of timeslicing on the virtal request, both with independent and dependent workloads. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Acked-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Diffstat (limited to 'tests')
-rw-r--r--tests/i915/gem_exec_balancer.c83
1 files changed, 83 insertions, 0 deletions
diff --git a/tests/i915/gem_exec_balancer.c b/tests/i915/gem_exec_balancer.c
index d4944e3f..2ff93437 100644
--- a/tests/i915/gem_exec_balancer.c
+++ b/tests/i915/gem_exec_balancer.c
@@ -1531,6 +1531,86 @@ static void full(int i915, unsigned int flags)
gem_quiescent_gpu(i915);
}
+static void __sliced(int i915,
+ uint32_t ctx, unsigned int count,
+ unsigned int flags)
+{
+ igt_spin_t *load[count];
+ igt_spin_t *virtual;
+
+ virtual = igt_spin_new(i915, ctx, .engine = 0,
+ .flags = (IGT_SPIN_FENCE_OUT |
+ IGT_SPIN_POLL_RUN));
+ for (int i = 0; i < count; i++)
+ load[i] = __igt_spin_new(i915, ctx,
+ .engine = i + 1,
+ .fence = virtual->out_fence,
+ .flags = flags);
+
+ /* Wait long enough for the virtual timeslice [1 ms] to expire */
+ igt_spin_busywait_until_started(virtual);
+ usleep(50 * 1000); /* 50ms */
+
+ igt_spin_end(virtual);
+ igt_assert_eq(sync_fence_wait(virtual->out_fence, 1000), 0);
+ igt_assert_eq(sync_fence_status(virtual->out_fence), 1);
+
+ for (int i = 0; i < count; i++)
+ igt_spin_free(i915, load[i]);
+ igt_spin_free(i915, virtual);
+}
+
+static void sliced(int i915)
+{
+ /*
+ * Let's investigate what happens when the virtual request is
+ * timesliced away.
+ *
+ * If the engine is busy with independent work, we want the virtual
+ * request to hop over to an idle engine (within its balancing set).
+ * However, if the work is dependent upon the virtual request,
+ * we most certainly do not want to reschedule that work ahead of
+ * the virtual request. [If we did, we should still have the saving
+ * grace of being able to move the virual request to another engine
+ * and so run both in parallel.] If we do neither, and get stuck
+ * on the dependent work and never run the virtual request, we hang.
+ */
+
+ igt_require(gem_scheduler_has_preemption(i915));
+ igt_require(gem_scheduler_has_semaphores(i915));
+
+ for (int class = 0; class < 32; class++) {
+ struct i915_engine_class_instance *ci;
+ unsigned int count;
+
+ ci = list_engines(i915, 1u << class, &count);
+ if (!ci)
+ continue;
+
+ if (count < 2) {
+ free(ci);
+ continue;
+ }
+
+ igt_fork(child, count) {
+ uint32_t ctx = load_balancer_create(i915, ci, count);
+
+ /* Independent load */
+ __sliced(i915, ctx, count, 0);
+
+ /* Dependent load */
+ __sliced(i915, ctx, count, IGT_SPIN_FENCE_IN);
+
+ gem_context_destroy(i915, ctx);
+ }
+ igt_waitchildren();
+
+ free(ci);
+ }
+
+ gem_quiescent_gpu(i915);
+}
+
static void nop(int i915)
{
struct drm_i915_gem_exec_object2 batch = {
@@ -2014,6 +2094,9 @@ igt_main
igt_subtest("semaphore")
semaphore(i915);
+ igt_subtest("sliced")
+ sliced(i915);
+
igt_subtest("smoke")
smoketest(i915, 20);