summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2018-02-24 18:19:26 +0000
committerChris Wilson <chris@chris-wilson.co.uk>2018-04-18 14:04:25 +0100
commitb00eb18e900577354e509968d9fcd6c83cf48ab7 (patch)
tree3994b0f7e8c6a858f45c7bf84a0a277b2d699754
parent83ba5b7d3bde48b383df41792fc9c955a5a23bdb (diff)
igt/gem_exec_schedule: Exercise "deep" preemption
In investigating the issue with having to force preemption within the executing ELSP[], we want to trigger preemption between all elements of that array. To that end, we issue a series of requests with different priorities to fill the in-flight ELSP[] and then demand preemption into the middle of that series. One can think of even more complicated reordering requirements of ELSP[], trying to switch between every possible combination of permutations. Rather than check all 2 billion combinations, be content with a few. v2: Add a different pattern for queued requests. Not only do we need to inject a request into the middle of a single context with a queue of different priority contexts, but we also want a queue of different contexts, as they have different patterns of ELSP[] behaviour. v3: Fixup the naming clash from copy'n'pasting Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Mika Kuoppala <mika.kuoppala@intel.com> Cc: Michał Winiarski <michal.winiarski@intel.com> Reviewed-by: Michał Winiarski <michal.winiarski@intel.com>
-rw-r--r--tests/gem_exec_schedule.c188
1 files changed, 169 insertions, 19 deletions
diff --git a/tests/gem_exec_schedule.c b/tests/gem_exec_schedule.c
index d2f040ab..5d0f215b 100644
--- a/tests/gem_exec_schedule.c
+++ b/tests/gem_exec_schedule.c
@@ -373,13 +373,78 @@ static void preempt(int fd, unsigned ring, unsigned flags)
gem_close(fd, result);
}
-static void preempt_other(int fd, unsigned ring)
+#define CHAIN 0x1
+#define CONTEXTS 0x2
+
+static igt_spin_t *__noise(int fd, uint32_t ctx, int prio, igt_spin_t *spin)
+{
+ unsigned other;
+
+ gem_context_set_priority(fd, ctx, prio);
+
+ for_each_physical_engine(fd, other) {
+ if (spin == NULL) {
+ spin = __igt_spin_batch_new(fd, ctx, other, 0);
+ } else {
+ struct drm_i915_gem_exec_object2 obj = {
+ .handle = spin->handle,
+ };
+ struct drm_i915_gem_execbuffer2 eb = {
+ .buffer_count = 1,
+ .buffers_ptr = to_user_pointer(&obj),
+ .rsvd1 = ctx,
+ .flags = other,
+ };
+ gem_execbuf(fd, &eb);
+ }
+ }
+
+ return spin;
+}
+
+static void __preempt_other(int fd,
+ uint32_t *ctx,
+ unsigned int target, unsigned int primary,
+ unsigned flags)
{
uint32_t result = gem_create(fd, 4096);
uint32_t *ptr = gem_mmap__gtt(fd, result, 4096, PROT_READ);
- igt_spin_t *spin[MAX_ENGINES];
- unsigned int other;
- unsigned int n, i;
+ unsigned int n, i, other;
+
+ n = 0;
+ store_dword(fd, ctx[LO], primary,
+ result, (n + 1)*sizeof(uint32_t), n + 1,
+ 0, I915_GEM_DOMAIN_RENDER);
+ n++;
+
+ if (flags & CHAIN) {
+ for_each_physical_engine(fd, other) {
+ store_dword(fd, ctx[LO], other,
+ result, (n + 1)*sizeof(uint32_t), n + 1,
+ 0, I915_GEM_DOMAIN_RENDER);
+ n++;
+ }
+ }
+
+ store_dword(fd, ctx[HI], target,
+ result, (n + 1)*sizeof(uint32_t), n + 1,
+ 0, I915_GEM_DOMAIN_RENDER);
+
+ igt_debugfs_dump(fd, "i915_engine_info");
+ gem_set_domain(fd, result, I915_GEM_DOMAIN_GTT, 0);
+
+ n++;
+ for (i = 0; i <= n; i++)
+ igt_assert_eq_u32(ptr[i], i);
+
+ munmap(ptr, 4096);
+ gem_close(fd, result);
+}
+
+static void preempt_other(int fd, unsigned ring, unsigned int flags)
+{
+ unsigned int primary;
+ igt_spin_t *spin = NULL;
uint32_t ctx[3];
/* On each engine, insert
@@ -396,36 +461,97 @@ static void preempt_other(int fd, unsigned ring)
gem_context_set_priority(fd, ctx[LO], MIN_PRIO);
ctx[NOISE] = gem_context_create(fd);
+ spin = __noise(fd, ctx[NOISE], 0, NULL);
ctx[HI] = gem_context_create(fd);
gem_context_set_priority(fd, ctx[HI], MAX_PRIO);
+ for_each_physical_engine(fd, primary) {
+ igt_debug("Primary engine: %s\n", e__->name);
+ __preempt_other(fd, ctx, ring, primary, flags);
+
+ }
+
+ igt_assert(gem_bo_busy(fd, spin->handle));
+ igt_spin_batch_free(fd, spin);
+
+ gem_context_destroy(fd, ctx[LO]);
+ gem_context_destroy(fd, ctx[NOISE]);
+ gem_context_destroy(fd, ctx[HI]);
+}
+
+static void __preempt_queue(int fd,
+ unsigned target, unsigned primary,
+ unsigned depth, unsigned flags)
+{
+ uint32_t result = gem_create(fd, 4096);
+ uint32_t *ptr = gem_mmap__gtt(fd, result, 4096, PROT_READ);
+ igt_spin_t *above = NULL, *below = NULL;
+ unsigned int other, n, i;
+ int prio = MAX_PRIO;
+ uint32_t ctx[3] = {
+ gem_context_create(fd),
+ gem_context_create(fd),
+ gem_context_create(fd),
+ };
+
+ for (n = 0; n < depth; n++) {
+ if (flags & CONTEXTS) {
+ gem_context_destroy(fd, ctx[NOISE]);
+ ctx[NOISE] = gem_context_create(fd);
+ }
+ above = __noise(fd, ctx[NOISE], prio--, above);
+ }
+
+ gem_context_set_priority(fd, ctx[HI], prio--);
+
+ for (; n < MAX_ELSP_QLEN; n++) {
+ if (flags & CONTEXTS) {
+ gem_context_destroy(fd, ctx[NOISE]);
+ ctx[NOISE] = gem_context_create(fd);
+ }
+ below = __noise(fd, ctx[NOISE], prio--, below);
+ }
+
+ gem_context_set_priority(fd, ctx[LO], prio--);
+
n = 0;
- for_each_physical_engine(fd, other) {
- igt_assert(n < ARRAY_SIZE(spin));
+ store_dword(fd, ctx[LO], primary,
+ result, (n + 1)*sizeof(uint32_t), n + 1,
+ 0, I915_GEM_DOMAIN_RENDER);
+ n++;
- spin[n] = __igt_spin_batch_new(fd, ctx[NOISE], other, 0);
- store_dword(fd, ctx[LO], other,
- result, (n + 1)*sizeof(uint32_t), n + 1,
- 0, I915_GEM_DOMAIN_RENDER);
- n++;
+ if (flags & CHAIN) {
+ for_each_physical_engine(fd, other) {
+ store_dword(fd, ctx[LO], other,
+ result, (n + 1)*sizeof(uint32_t), n + 1,
+ 0, I915_GEM_DOMAIN_RENDER);
+ n++;
+ }
}
- store_dword(fd, ctx[HI], ring,
+
+ store_dword(fd, ctx[HI], target,
result, (n + 1)*sizeof(uint32_t), n + 1,
0, I915_GEM_DOMAIN_RENDER);
igt_debugfs_dump(fd, "i915_engine_info");
- gem_set_domain(fd, result, I915_GEM_DOMAIN_GTT, 0);
- for (i = 0; i < n; i++) {
- igt_assert(gem_bo_busy(fd, spin[i]->handle));
- igt_spin_batch_free(fd, spin[i]);
+ if (above) {
+ igt_assert(gem_bo_busy(fd, above->handle));
+ igt_spin_batch_free(fd, above);
}
+ gem_set_domain(fd, result, I915_GEM_DOMAIN_GTT, 0);
+
n++;
for (i = 0; i <= n; i++)
igt_assert_eq_u32(ptr[i], i);
+ if (below) {
+ igt_assert(gem_bo_busy(fd, below->handle));
+ igt_spin_batch_free(fd, below);
+ }
+
gem_context_destroy(fd, ctx[LO]);
gem_context_destroy(fd, ctx[NOISE]);
gem_context_destroy(fd, ctx[HI]);
@@ -434,6 +560,16 @@ static void preempt_other(int fd, unsigned ring)
gem_close(fd, result);
}
+static void preempt_queue(int fd, unsigned ring, unsigned int flags)
+{
+ unsigned other;
+
+ for_each_physical_engine(fd, other) {
+ for (unsigned depth = 0; depth <= MAX_ELSP_QLEN; depth++)
+ __preempt_queue(fd, ring, other, depth, flags);
+ }
+}
+
static void preempt_self(int fd, unsigned ring)
{
uint32_t result = gem_create(fd, 4096);
@@ -981,12 +1117,26 @@ igt_main
igt_subtest_f("preempt-contexts-%s", e->name)
preempt(fd, e->exec_id | e->flags, NEW_CTX);
- igt_subtest_f("preempt-other-%s", e->name)
- preempt_other(fd, e->exec_id | e->flags);
-
igt_subtest_f("preempt-self-%s", e->name)
preempt_self(fd, e->exec_id | e->flags);
+ igt_subtest_f("preempt-other-%s", e->name)
+ preempt_other(fd, e->exec_id | e->flags, 0);
+
+ igt_subtest_f("preempt-other-chain-%s", e->name)
+ preempt_other(fd, e->exec_id | e->flags, CHAIN);
+
+ igt_subtest_f("preempt-queue-%s", e->name)
+ preempt_queue(fd, e->exec_id | e->flags, 0);
+
+ igt_subtest_f("preempt-queue-chain-%s", e->name)
+ preempt_queue(fd, e->exec_id | e->flags, CHAIN);
+ igt_subtest_f("preempt-queue-contexts-%s", e->name)
+ preempt_queue(fd, e->exec_id | e->flags, CONTEXTS);
+
+ igt_subtest_f("preempt-queue-contexts-chain-%s", e->name)
+ preempt_queue(fd, e->exec_id | e->flags, CONTEXTS | CHAIN);
+
igt_subtest_group {
igt_hang_t hang;