diff options
author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2013-07-10 15:34:56 +0200 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2013-07-10 15:34:56 +0200 |
commit | 727440c48a6d247591d9cfc3af53e6f2e2b1b7f9 (patch) | |
tree | fc9af798f7365616e8f1d4265c9e8bceef661ad7 /tests | |
parent | 21c0ab30a144a2d236221be86a8947c5ce19101c (diff) |
tests: actually add gem_write_read_ring_switch
Duh ...
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'tests')
-rw-r--r-- | tests/.gitignore | 1 | ||||
-rw-r--r-- | tests/gem_write_read_ring_switch.c | 200 |
2 files changed, 201 insertions, 0 deletions
diff --git a/tests/.gitignore b/tests/.gitignore index 1f7c691f..d2566951 100644 --- a/tests/.gitignore +++ b/tests/.gitignore @@ -76,6 +76,7 @@ gem_unfence_active_buffers gem_unref_active_buffers gem_vmap_blits gem_wait_render_timeout +gem_write_read_ring_switch gen3_mixed_blits gen3_render_linear_blits gen3_render_mixed_blits diff --git a/tests/gem_write_read_ring_switch.c b/tests/gem_write_read_ring_switch.c new file mode 100644 index 00000000..702b37c4 --- /dev/null +++ b/tests/gem_write_read_ring_switch.c @@ -0,0 +1,200 @@ +/* + * Copyright © 2013 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Daniel Vetter <daniel.vetter@ffwll.ch> + * + */ + +#include <stdlib.h> +#include <stdio.h> +#include <string.h> +#include <assert.h> +#include <fcntl.h> +#include <inttypes.h> +#include <errno.h> +#include <sys/stat.h> +#include <sys/time.h> +#include "drm.h" +#include "i915_drm.h" +#include "drmtest.h" +#include "intel_bufmgr.h" +#include "intel_batchbuffer.h" +#include "intel_gpu_tools.h" +#include "i830_reg.h" + +#define LOCAL_I915_EXEC_VEBOX (4<<0) +bool skipped_all = true; + +static drm_intel_bufmgr *bufmgr; +struct intel_batchbuffer *batch; +static drm_intel_bo *load_bo, *target_bo, *dummy_bo; +int fd; + +/* Testcase: check read/write syncpoints when switching rings + * + * We've had a bug where the syncpoint for the last write was mangled after a + * ring switch using semaphores. This resulted in cpu reads returning before the + * write actually completed. This test exercises this. + */ + +#define COLOR 0xffffffff +static void run_test(int ring, const char *testname) +{ + uint32_t *ptr; + int i; + + skipped_all = false; + + printf("running subtest %s\n", testname); + + target_bo = drm_intel_bo_alloc(bufmgr, "target bo", 4096, 4096); + if (!target_bo) { + fprintf(stderr, "failed to alloc target buffer\n"); + exit(-1); + } + + /* Need to map first so that we can do our own domain mangement with + * set_domain. */ + drm_intel_bo_map(target_bo, 0); + ptr = target_bo->virtual; + assert(*ptr == 0); + + /* put some load onto the gpu to keep the light buffers active for long + * enough */ + for (i = 0; i < 1000; i++) { + BEGIN_BATCH(8); + OUT_BATCH(XY_SRC_COPY_BLT_CMD | + XY_SRC_COPY_BLT_WRITE_ALPHA | + XY_SRC_COPY_BLT_WRITE_RGB); + OUT_BATCH((3 << 24) | /* 32 bits */ + (0xcc << 16) | /* copy ROP */ + 4096); + OUT_BATCH(0); /* dst x1,y1 */ + OUT_BATCH((1024 << 16) | 512); + OUT_RELOC(load_bo, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0); + OUT_BATCH((0 << 16) | 512); /* src x1, y1 */ + OUT_BATCH(4096); + OUT_RELOC(load_bo, I915_GEM_DOMAIN_RENDER, 0, 0); + ADVANCE_BATCH(); + } + + BEGIN_BATCH(6); + OUT_BATCH(XY_COLOR_BLT_CMD | + XY_COLOR_BLT_WRITE_ALPHA | + XY_COLOR_BLT_WRITE_RGB); + OUT_BATCH((3 << 24) | /* 32 bits */ + (0xff << 16) | + 128); + OUT_BATCH(0); /* dst x1,y1 */ + OUT_BATCH((1 << 16) | 1); + OUT_RELOC(target_bo, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0); + OUT_BATCH(COLOR); + ADVANCE_BATCH(); + + intel_batchbuffer_flush(batch); + + /* Emit an empty batch so that signalled seqno on the target ring > + * signalled seqnoe on the blt ring. This is required to hit the bug. */ + BEGIN_BATCH(2); + OUT_BATCH(MI_NOOP); + OUT_BATCH(MI_NOOP); + ADVANCE_BATCH(); + intel_batchbuffer_flush_on_ring(batch, ring); + + /* For the ring->ring sync it's important to only emit a read reloc, for + * otherwise the obj->last_write_seqno will be updated. */ + if (ring == I915_EXEC_RENDER) { + BEGIN_BATCH(4); + OUT_BATCH(MI_COND_BATCH_BUFFER_END | MI_DO_COMPARE); + OUT_BATCH(0xffffffff); /* compare dword */ + OUT_RELOC(target_bo, I915_GEM_DOMAIN_RENDER, 0, 0); + OUT_BATCH(MI_NOOP); + ADVANCE_BATCH(); + } else { + BEGIN_BATCH(4); + OUT_BATCH(MI_FLUSH_DW | 1); + OUT_BATCH(0); /* reserved */ + OUT_RELOC(target_bo, I915_GEM_DOMAIN_RENDER, 0, 0); + OUT_BATCH(MI_NOOP | (1<<22) | (0xf)); + ADVANCE_BATCH(); + } + intel_batchbuffer_flush_on_ring(batch, ring); + + gem_set_domain(fd, target_bo->handle, I915_GEM_DOMAIN_GTT, 0); + assert(*ptr == COLOR); + drm_intel_bo_unmap(target_bo); + + drm_intel_bo_unreference(target_bo); +} + +int main(int argc, char **argv) +{ + uint32_t devid; + + drmtest_subtest_init(argc, argv); + + fd = drm_open_any(); + devid = intel_get_drm_devid(fd); + + bufmgr = drm_intel_bufmgr_gem_init(fd, 4096); + if (!bufmgr) { + fprintf(stderr, "failed to init libdrm\n"); + exit(-1); + } + /* don't enable buffer reuse!! */ + //drm_intel_bufmgr_gem_enable_reuse(bufmgr); + + batch = intel_batchbuffer_alloc(bufmgr, intel_get_drm_devid(fd)); + assert(batch); + + dummy_bo = drm_intel_bo_alloc(bufmgr, "dummy bo", 4096, 4096); + if (!dummy_bo) { + fprintf(stderr, "failed to alloc dummy buffer\n"); + exit(-1); + } + + load_bo = drm_intel_bo_alloc(bufmgr, "load bo", 1024*4096, 4096); + if (!load_bo) { + fprintf(stderr, "failed to alloc load buffer\n"); + exit(-1); + } + + /* test only makes sense with separate blitter */ + if (drmtest_run_subtest("blt2render")) + if (HAS_BLT_RING(devid)) + run_test(I915_EXEC_RENDER, "blt2render"); + + if (drmtest_run_subtest("blt2bsd")) + if (HAS_BSD_RING(devid)) + run_test(I915_EXEC_BSD, "blt2bsd"); + + if (drmtest_run_subtest("blt2vebox")) + if (gem_has_vebox(fd)) + run_test(LOCAL_I915_EXEC_VEBOX, "blt2vebox"); + + drm_intel_bufmgr_destroy(bufmgr); + + close(fd); + + return skipped_all ? 77 : 0; +} |