summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAntonio Argenziano <antonio.argenziano@intel.com>2018-06-11 14:43:43 -0700
committerAntonio Argenziano <antonio.argenziano@intel.com>2018-06-12 09:01:36 -0700
commite94ce40798e35d2e3c4494f50b617908066bbf8b (patch)
tree400b5b4e342d0e9ca1b4dd1a67416a4afd42a488
parent95bfb2902473b9f4e644c3eb831fdf110d87ed4f (diff)
tests/gem_exec_basic: Wait on outstanding work before returning from test
All subtests send a workload to the engines and then return without waiting on it, while this is not a problem because the test targets the API, it makes the hang detector pointless since the driver will declare an hang long after the test has completed. v2: - Use common functions to create/terminate a batch. (Chris) Signed-off-by: Antonio Argenziano <antonio.argenziano@intel.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
-rw-r--r--tests/gem_exec_basic.c39
1 files changed, 27 insertions, 12 deletions
diff --git a/tests/gem_exec_basic.c b/tests/gem_exec_basic.c
index 2f057ef0..dcb83864 100644
--- a/tests/gem_exec_basic.c
+++ b/tests/gem_exec_basic.c
@@ -25,37 +25,52 @@
IGT_TEST_DESCRIPTION("Basic sanity check of execbuf-ioctl rings.");
+static uint32_t batch_create(int fd)
+{
+ const uint32_t bbe = MI_BATCH_BUFFER_END;
+ uint32_t handle;
+
+ handle = gem_create(fd, 4096);
+ gem_write(fd, handle, 0, &bbe, sizeof(bbe));
+
+ return handle;
+}
+
+static void batch_fini(int fd, uint32_t handle)
+{
+ gem_sync(fd, handle); /* catch any GPU hang */
+ gem_close(fd, handle);
+}
+
static void noop(int fd, unsigned ring)
{
- uint32_t bbe = MI_BATCH_BUFFER_END;
struct drm_i915_gem_execbuffer2 execbuf;
struct drm_i915_gem_exec_object2 exec;
gem_require_ring(fd, ring);
memset(&exec, 0, sizeof(exec));
- exec.handle = gem_create(fd, 4096);
- gem_write(fd, exec.handle, 0, &bbe, sizeof(bbe));
+
+ exec.handle = batch_create(fd);
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(&exec);
execbuf.buffer_count = 1;
execbuf.flags = ring;
gem_execbuf(fd, &execbuf);
- gem_close(fd, exec.handle);
+
+ batch_fini(fd, exec.handle);
}
static void readonly(int fd, unsigned ring)
{
- uint32_t bbe = MI_BATCH_BUFFER_END;
struct drm_i915_gem_execbuffer2 *execbuf;
struct drm_i915_gem_exec_object2 exec;
gem_require_ring(fd, ring);
memset(&exec, 0, sizeof(exec));
- exec.handle = gem_create(fd, 4096);
- gem_write(fd, exec.handle, 0, &bbe, sizeof(bbe));
+ exec.handle = batch_create(fd);
execbuf = mmap(NULL, 4096, PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0);
igt_assert(execbuf != NULL);
@@ -66,13 +81,14 @@ static void readonly(int fd, unsigned ring)
igt_assert(mprotect(execbuf, 4096, PROT_READ) == 0);
gem_execbuf(fd, execbuf);
+
munmap(execbuf, 4096);
- gem_close(fd, exec.handle);
+
+ batch_fini(fd, exec.handle);
}
static void gtt(int fd, unsigned ring)
{
- uint32_t bbe = MI_BATCH_BUFFER_END;
struct drm_i915_gem_execbuffer2 *execbuf;
struct drm_i915_gem_exec_object2 *exec;
uint32_t handle;
@@ -86,16 +102,15 @@ static void gtt(int fd, unsigned ring)
exec = (struct drm_i915_gem_exec_object2 *)(execbuf + 1);
gem_close(fd, handle);
- exec->handle = gem_create(fd, 4096);
- gem_write(fd, exec->handle, 0, &bbe, sizeof(bbe));
+ exec->handle = batch_create(fd);
execbuf->buffers_ptr = to_user_pointer(exec);
execbuf->buffer_count = 1;
execbuf->flags = ring;
gem_execbuf(fd, execbuf);
- gem_close(fd, exec->handle);
+ batch_fini(fd, exec->handle);
munmap(execbuf, 4096);
}