summaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2017-07-16 16:28:41 +0100
committerChris Wilson <chris@chris-wilson.co.uk>2017-09-07 19:10:28 +0100
commita380134e5e553661ffa06e92460465ec9a3de29d (patch)
tree6943c8aaf4d2959f9c5da59a55e0b616cf4f0365 /tests
parent61f8de7d04e6b69d1cb43770bbef522311ebc3cc (diff)
igt/gem_exec_schedule: Basic tests for preemption
We queue N low priority hanging batches across the engines and check that our high priority write over takes them. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: MichaƂ Winiarski <michal.winiarski@intel.com>
Diffstat (limited to 'tests')
-rw-r--r--tests/gem_busy.c4
-rw-r--r--tests/gem_exec_fence.c10
-rw-r--r--tests/gem_exec_reloc.c6
-rw-r--r--tests/gem_exec_schedule.c163
-rw-r--r--tests/gem_shrink.c4
-rw-r--r--tests/gem_spin_batch.c4
-rw-r--r--tests/gem_wait.c2
-rw-r--r--tests/kms_busy.c8
-rw-r--r--tests/kms_flip.c4
-rw-r--r--tests/pm_rps.c2
10 files changed, 186 insertions, 21 deletions
diff --git a/tests/gem_busy.c b/tests/gem_busy.c
index 8702dd7e..58aa5ee5 100644
--- a/tests/gem_busy.c
+++ b/tests/gem_busy.c
@@ -460,7 +460,7 @@ static bool has_semaphores(int fd)
static bool has_extended_busy_ioctl(int fd)
{
- igt_spin_t *spin = igt_spin_batch_new(fd, I915_EXEC_RENDER, 0);
+ igt_spin_t *spin = igt_spin_batch_new(fd, 0, I915_EXEC_RENDER, 0);
uint32_t read, write;
__gem_busy(fd, spin->handle, &read, &write);
@@ -471,7 +471,7 @@ static bool has_extended_busy_ioctl(int fd)
static void basic(int fd, unsigned ring, unsigned flags)
{
- igt_spin_t *spin = igt_spin_batch_new(fd, ring, 0);
+ igt_spin_t *spin = igt_spin_batch_new(fd, 0, ring, 0);
struct timespec tv;
int timeout;
bool busy;
diff --git a/tests/gem_exec_fence.c b/tests/gem_exec_fence.c
index 76d9a7ea..4a6c7082 100644
--- a/tests/gem_exec_fence.c
+++ b/tests/gem_exec_fence.c
@@ -438,7 +438,7 @@ static void test_parallel(int fd, unsigned int master)
/* Fill the queue with many requests so that the next one has to
* wait before it can be executed by the hardware.
*/
- spin = igt_spin_batch_new(fd, master, c.handle);
+ spin = igt_spin_batch_new(fd, 0, master, c.handle);
resubmit(fd, spin->handle, master, 16);
/* Now queue the master request and its secondaries */
@@ -954,7 +954,7 @@ static void test_syncobj_unused_fence(int fd)
struct local_gem_exec_fence fence = {
.handle = syncobj_create(fd),
};
- igt_spin_t *spin = igt_spin_batch_new(fd, 0, 0);
+ igt_spin_t *spin = igt_spin_batch_new(fd, 0, 0, 0);
/* sanity check our syncobj_to_sync_file interface */
igt_assert_eq(__syncobj_to_sync_file(fd, 0), -ENOENT);
@@ -1018,7 +1018,7 @@ static void test_syncobj_signal(int fd)
struct local_gem_exec_fence fence = {
.handle = syncobj_create(fd),
};
- igt_spin_t *spin = igt_spin_batch_new(fd, 0, 0);
+ igt_spin_t *spin = igt_spin_batch_new(fd, 0, 0, 0);
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(&obj);
@@ -1062,7 +1062,7 @@ static void test_syncobj_wait(int fd)
gem_quiescent_gpu(fd);
- spin = igt_spin_batch_new(fd, 0, 0);
+ spin = igt_spin_batch_new(fd, 0, 0, 0);
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(&obj);
@@ -1132,7 +1132,7 @@ static void test_syncobj_import(int fd)
.handle = syncobj_create(fd),
};
int export[2];
- igt_spin_t *spin = igt_spin_batch_new(fd, 0, 0);
+ igt_spin_t *spin = igt_spin_batch_new(fd, 0, 0, 0);
for (int n = 0; n < ARRAY_SIZE(export); n++)
export[n] = syncobj_export(fd, fence.handle);
diff --git a/tests/gem_exec_reloc.c b/tests/gem_exec_reloc.c
index edbc0f13..432a42a9 100644
--- a/tests/gem_exec_reloc.c
+++ b/tests/gem_exec_reloc.c
@@ -388,7 +388,7 @@ static void basic_reloc(int fd, unsigned before, unsigned after, unsigned flags)
}
if (flags & ACTIVE) {
- spin = igt_spin_batch_new(fd, I915_EXEC_DEFAULT, obj.handle);
+ spin = igt_spin_batch_new(fd, 0, I915_EXEC_DEFAULT, obj.handle);
if (!(flags & HANG))
igt_spin_batch_set_timeout(spin, NSEC_PER_SEC/100);
igt_assert(gem_bo_busy(fd, obj.handle));
@@ -454,7 +454,7 @@ static void basic_reloc(int fd, unsigned before, unsigned after, unsigned flags)
}
if (flags & ACTIVE) {
- spin = igt_spin_batch_new(fd, I915_EXEC_DEFAULT, obj.handle);
+ spin = igt_spin_batch_new(fd, 0, I915_EXEC_DEFAULT, obj.handle);
if (!(flags & HANG))
igt_spin_batch_set_timeout(spin, NSEC_PER_SEC/100);
igt_assert(gem_bo_busy(fd, obj.handle));
@@ -581,7 +581,7 @@ static void basic_range(int fd, unsigned flags)
execbuf.buffer_count = n + 1;
if (flags & ACTIVE) {
- spin = igt_spin_batch_new(fd, 0, obj[n].handle);
+ spin = igt_spin_batch_new(fd, 0, 0, obj[n].handle);
if (!(flags & HANG))
igt_spin_batch_set_timeout(spin, NSEC_PER_SEC/100);
igt_assert(gem_bo_busy(fd, obj[n].handle));
diff --git a/tests/gem_exec_schedule.c b/tests/gem_exec_schedule.c
index c151d487..8f906c9c 100644
--- a/tests/gem_exec_schedule.c
+++ b/tests/gem_exec_schedule.c
@@ -373,6 +373,157 @@ static void promotion(int fd, unsigned ring)
munmap(ptr, 4096);
}
+#define NEW_CTX 0x1
+static void preempt(int fd, unsigned ring, unsigned flags)
+{
+ uint32_t result = gem_create(fd, 4096);
+ uint32_t *ptr = gem_mmap__gtt(fd, result, 4096, PROT_READ);
+ igt_spin_t *spin[16];
+ uint32_t ctx[2];
+
+ ctx[LO] = gem_context_create(fd);
+ ctx_set_priority(fd, ctx[LO], -MAX_PRIO);
+
+ ctx[HI] = gem_context_create(fd);
+ ctx_set_priority(fd, ctx[HI], MAX_PRIO);
+
+ for (int n = 0; n < 16; n++) {
+ if (flags & NEW_CTX) {
+ gem_context_destroy(fd, ctx[LO]);
+ ctx[LO] = gem_context_create(fd);
+ ctx_set_priority(fd, ctx[LO], -MAX_PRIO);
+ }
+ spin[n] = __igt_spin_batch_new(fd, ctx[LO], ring, 0);
+ igt_debug("spin[%d].handle=%d\n", n, spin[n]->handle);
+
+ store_dword(fd, ctx[HI], ring, result, 0, n + 1, 0, I915_GEM_DOMAIN_RENDER);
+
+ gem_set_domain(fd, result, I915_GEM_DOMAIN_GTT, 0);
+ igt_assert_eq_u32(ptr[0], n + 1);
+ igt_assert(gem_bo_busy(fd, spin[0]->handle));
+ }
+
+ for (int n = 0; n < 16; n++)
+ igt_spin_batch_free(fd, spin[n]);
+
+ gem_context_destroy(fd, ctx[LO]);
+ gem_context_destroy(fd, ctx[HI]);
+
+ munmap(ptr, 4096);
+ gem_close(fd, result);
+}
+
+static void preempt_other(int fd, unsigned ring)
+{
+ uint32_t result = gem_create(fd, 4096);
+ uint32_t *ptr = gem_mmap__gtt(fd, result, 4096, PROT_READ);
+ igt_spin_t *spin[16];
+ unsigned int other;
+ unsigned int n, i;
+ uint32_t ctx[3];
+
+ /* On each engine, insert
+ * [NOISE] spinner,
+ * [LOW] write
+ *
+ * Then on our target engine do a [HIGH] write which should then
+ * prompt its dependent LOW writes in front of the spinner on
+ * each engine. The purpose of this test is to check that preemption
+ * can cross engines.
+ */
+
+ ctx[LO] = gem_context_create(fd);
+ ctx_set_priority(fd, ctx[LO], -MAX_PRIO);
+
+ ctx[NOISE] = gem_context_create(fd);
+
+ ctx[HI] = gem_context_create(fd);
+ ctx_set_priority(fd, ctx[HI], MAX_PRIO);
+
+ n = 0;
+ for_each_engine(fd, other) {
+ spin[n] = __igt_spin_batch_new(fd, ctx[NOISE], other, 0);
+ store_dword(fd, ctx[LO], other,
+ result, (n + 1)*sizeof(uint32_t), n + 1,
+ 0, I915_GEM_DOMAIN_RENDER);
+ n++;
+ }
+ store_dword(fd, ctx[HI], ring,
+ result, (n + 1)*sizeof(uint32_t), n + 1,
+ 0, I915_GEM_DOMAIN_RENDER);
+
+ gem_set_domain(fd, result, I915_GEM_DOMAIN_GTT, 0);
+
+ for (i = 0; i < n; i++) {
+ igt_assert(gem_bo_busy(fd, spin[i]->handle));
+ igt_spin_batch_free(fd, spin[i]);
+ }
+
+ n++;
+ for (i = 0; i <= n; i++)
+ igt_assert_eq_u32(ptr[i], i);
+
+ gem_context_destroy(fd, ctx[LO]);
+ gem_context_destroy(fd, ctx[NOISE]);
+ gem_context_destroy(fd, ctx[HI]);
+
+ munmap(ptr, 4096);
+ gem_close(fd, result);
+}
+
+static void preempt_self(int fd, unsigned ring)
+{
+ uint32_t result = gem_create(fd, 4096);
+ uint32_t *ptr = gem_mmap__gtt(fd, result, 4096, PROT_READ);
+ igt_spin_t *spin[16];
+ unsigned int other;
+ unsigned int n, i;
+ uint32_t ctx[3];
+
+ /* On each engine, insert
+ * [NOISE] spinner,
+ * [self/LOW] write
+ *
+ * Then on our target engine do a [self/HIGH] write which should then
+ * preempt its own lower priority task on any engine.
+ */
+
+ ctx[NOISE] = gem_context_create(fd);
+
+ ctx[HI] = gem_context_create(fd);
+
+ n = 0;
+ ctx_set_priority(fd, ctx[HI], -MAX_PRIO);
+ for_each_engine(fd, other) {
+ spin[n] = __igt_spin_batch_new(fd, ctx[NOISE], other, 0);
+ store_dword(fd, ctx[HI], other,
+ result, (n + 1)*sizeof(uint32_t), n + 1,
+ 0, I915_GEM_DOMAIN_RENDER);
+ n++;
+ }
+ ctx_set_priority(fd, ctx[HI], MAX_PRIO);
+ store_dword(fd, ctx[HI], ring,
+ result, (n + 1)*sizeof(uint32_t), n + 1,
+ 0, I915_GEM_DOMAIN_RENDER);
+
+ gem_set_domain(fd, result, I915_GEM_DOMAIN_GTT, 0);
+
+ for (i = 0; i < n; i++) {
+ igt_assert(gem_bo_busy(fd, spin[i]->handle));
+ igt_spin_batch_free(fd, spin[i]);
+ }
+
+ n++;
+ for (i = 0; i <= n; i++)
+ igt_assert_eq_u32(ptr[i], i);
+
+ gem_context_destroy(fd, ctx[NOISE]);
+ gem_context_destroy(fd, ctx[HI]);
+
+ munmap(ptr, 4096);
+ gem_close(fd, result);
+}
+
static void deep(int fd, unsigned ring)
{
#define XS 8
@@ -726,6 +877,18 @@ igt_main
igt_subtest_f("promotion-%s", e->name)
promotion(fd, e->exec_id | e->flags);
+ igt_subtest_f("preempt-%s", e->name)
+ preempt(fd, e->exec_id | e->flags, 0);
+
+ igt_subtest_f("preempt-contexts-%s", e->name)
+ preempt(fd, e->exec_id | e->flags, NEW_CTX);
+
+ igt_subtest_f("preempt-other-%s", e->name)
+ preempt_other(fd, e->exec_id | e->flags);
+
+ igt_subtest_f("preempt-self-%s", e->name)
+ preempt_self(fd, e->exec_id | e->flags);
+
igt_subtest_f("deep-%s", e->name)
deep(fd, e->exec_id | e->flags);
diff --git a/tests/gem_shrink.c b/tests/gem_shrink.c
index 8b09fc80..06f7a301 100644
--- a/tests/gem_shrink.c
+++ b/tests/gem_shrink.c
@@ -311,9 +311,9 @@ static void reclaim(unsigned engine, int timeout)
} while (!*shared);
}
- spin = igt_spin_batch_new(fd, engine, 0);
+ spin = igt_spin_batch_new(fd, 0, engine, 0);
igt_until_timeout(timeout) {
- igt_spin_t *next = __igt_spin_batch_new(fd, engine, 0);
+ igt_spin_t *next = __igt_spin_batch_new(fd, 0, engine, 0);
igt_spin_batch_set_timeout(spin, timeout_100ms);
gem_sync(fd, spin->handle);
diff --git a/tests/gem_spin_batch.c b/tests/gem_spin_batch.c
index 941aa139..24c5d9b7 100644
--- a/tests/gem_spin_batch.c
+++ b/tests/gem_spin_batch.c
@@ -41,9 +41,9 @@ static void spin(int fd, unsigned int engine, unsigned int timeout_sec)
struct timespec itv = { };
uint64_t elapsed;
- spin = igt_spin_batch_new(fd, engine, 0);
+ spin = igt_spin_batch_new(fd, 0, engine, 0);
while ((elapsed = igt_nsec_elapsed(&tv)) >> 30 < timeout_sec) {
- igt_spin_t *next = __igt_spin_batch_new(fd, engine, 0);
+ igt_spin_t *next = __igt_spin_batch_new(fd, 0, engine, 0);
igt_spin_batch_set_timeout(spin,
timeout_100ms - igt_nsec_elapsed(&itv));
diff --git a/tests/gem_wait.c b/tests/gem_wait.c
index 591690ad..cf8c8154 100644
--- a/tests/gem_wait.c
+++ b/tests/gem_wait.c
@@ -110,7 +110,7 @@ static void unplug(struct cork *c)
static void basic(int fd, unsigned engine, unsigned flags)
{
struct cork cork = plug(fd, flags);
- igt_spin_t *spin = igt_spin_batch_new(fd, engine, cork.handle);
+ igt_spin_t *spin = igt_spin_batch_new(fd, 0, engine, cork.handle);
struct drm_i915_gem_wait wait = {
flags & WRITE ? cork.handle : spin->handle
};
diff --git a/tests/kms_busy.c b/tests/kms_busy.c
index ecf0b2eb..7e31c2c8 100644
--- a/tests/kms_busy.c
+++ b/tests/kms_busy.c
@@ -91,7 +91,8 @@ static void flip_to_fb(igt_display_t *dpy, int pipe,
struct timespec tv = { 1, 0 };
struct drm_event_vblank ev;
- igt_spin_t *t = igt_spin_batch_new(dpy->drm_fd, ring, fb->gem_handle);
+ igt_spin_t *t = igt_spin_batch_new(dpy->drm_fd,
+ 0, ring, fb->gem_handle);
if (modeset) {
/*
@@ -203,7 +204,8 @@ static void test_flip(igt_display_t *dpy, unsigned ring, int pipe, bool modeset)
static void test_atomic_commit_hang(igt_display_t *dpy, igt_plane_t *primary,
struct igt_fb *busy_fb, unsigned ring)
{
- igt_spin_t *t = igt_spin_batch_new(dpy->drm_fd, ring, busy_fb->gem_handle);
+ igt_spin_t *t = igt_spin_batch_new(dpy->drm_fd,
+ 0, ring, busy_fb->gem_handle);
struct pollfd pfd = { .fd = dpy->drm_fd, .events = POLLIN };
unsigned flags = 0;
struct drm_event_vblank ev;
@@ -290,7 +292,7 @@ static void test_pageflip_modeset_hang(igt_display_t *dpy,
igt_display_commit2(dpy, dpy->is_atomic ? COMMIT_ATOMIC : COMMIT_LEGACY);
- t = igt_spin_batch_new(dpy->drm_fd, ring, fb.gem_handle);
+ t = igt_spin_batch_new(dpy->drm_fd, 0, ring, fb.gem_handle);
do_or_die(drmModePageFlip(dpy->drm_fd, dpy->pipes[pipe].crtc_id, fb.fb_id, DRM_MODE_PAGE_FLIP_EVENT, &fb));
diff --git a/tests/kms_flip.c b/tests/kms_flip.c
index 837ebecd..5ac6438d 100644
--- a/tests/kms_flip.c
+++ b/tests/kms_flip.c
@@ -694,14 +694,14 @@ static unsigned int run_test_step(struct test_output *o)
o->current_fb_id = !o->current_fb_id;
if (o->flags & TEST_WITH_DUMMY_BCS) {
- spin_bcs = igt_spin_batch_new(drm_fd, I915_EXEC_BLT,
+ spin_bcs = igt_spin_batch_new(drm_fd, 0, I915_EXEC_BLT,
o->fb_info[o->current_fb_id].gem_handle);
igt_spin_batch_set_timeout(spin_bcs,
NSEC_PER_SEC);
}
if (o->flags & TEST_WITH_DUMMY_RCS) {
- spin_rcs = igt_spin_batch_new(drm_fd, I915_EXEC_RENDER,
+ spin_rcs = igt_spin_batch_new(drm_fd, 0, I915_EXEC_RENDER,
o->fb_info[o->current_fb_id].gem_handle);
igt_spin_batch_set_timeout(spin_rcs,
NSEC_PER_SEC);
diff --git a/tests/pm_rps.c b/tests/pm_rps.c
index e79f0ea7..5eb969a5 100644
--- a/tests/pm_rps.c
+++ b/tests/pm_rps.c
@@ -579,7 +579,7 @@ static void boost_freq(int fd, int *boost_freqs)
engine = I915_EXEC_RENDER;
if (intel_gen(lh.devid) >= 6)
engine = I915_EXEC_BLT;
- load = igt_spin_batch_new(fd, engine, 0);
+ load = igt_spin_batch_new(fd, 0, engine, 0);
/* Waiting will grant us a boost to maximum */
gem_wait(fd, load->handle, &timeout);