summaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2016-01-08 10:51:09 +0000
committerChris Wilson <chris@chris-wilson.co.uk>2016-01-19 14:03:15 +0000
commit1c61c0f75097983d1a19e03e2c6dfd631307d1ef (patch)
tree37af5960fc1e512909f777451e1f93a677ff2172 /tests
parent42291f25100cd01653b3fb3a2a1fbe6975e9c1bf (diff)
gem_concurrent_blit: Add a pread/pwrite variant to only set part of the buffer
The idea is to check partial cacheline reads/writes. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'tests')
-rw-r--r--tests/gem_concurrent_all.c45
1 files changed, 38 insertions, 7 deletions
diff --git a/tests/gem_concurrent_all.c b/tests/gem_concurrent_all.c
index a7db23cb..e333d443 100644
--- a/tests/gem_concurrent_all.c
+++ b/tests/gem_concurrent_all.c
@@ -56,6 +56,7 @@ IGT_TEST_DESCRIPTION("Test of pread/pwrite/mmap behavior when writing to active"
int fd, devid, gen;
struct intel_batchbuffer *batch;
int all;
+int pass;
static void
nop_release_bo(drm_intel_bo *bo)
@@ -95,6 +96,29 @@ prw_cmp_bo(drm_intel_bo *bo, uint32_t val, int width, int height, drm_intel_bo *
drm_intel_bo_unmap(tmp);
}
+#define pixel(y, width) ((y)*(width) + (((y) + pass)%(width)))
+
+static void
+partial_set_bo(drm_intel_bo *bo, uint32_t val, int width, int height)
+{
+ int y;
+
+ for (y = 0; y < height; y++)
+ do_or_die(drm_intel_bo_subdata(bo, 4*pixel(y, width), 4, &val));
+}
+
+static void
+partial_cmp_bo(drm_intel_bo *bo, uint32_t val, int width, int height, drm_intel_bo *tmp)
+{
+ int y;
+
+ for (y = 0; y < height; y++) {
+ uint32_t buf;
+ do_or_die(drm_intel_bo_get_subdata(bo, 4*pixel(y, width), 4, &buf));
+ igt_assert_eq_u32(buf, val);
+ }
+}
+
static drm_intel_bo *
create_normal_bo(drm_intel_bufmgr *bufmgr, uint64_t size)
{
@@ -200,7 +224,7 @@ gtt_cmp_bo(drm_intel_bo *bo, uint32_t val, int width, int height, drm_intel_bo *
/* GTT access is slow. So we just compare a few points */
drm_intel_gem_bo_start_gtt_access(bo, false);
for (y = 0; y < height; y++)
- igt_assert_eq_u32(vaddr[y*width+y], val);
+ igt_assert_eq_u32(vaddr[pixel(y, width)], val);
}
static drm_intel_bo *
@@ -389,6 +413,13 @@ const struct access_mode {
.release_bo = nop_release_bo,
},
{
+ .name = "partial",
+ .set_bo = partial_set_bo,
+ .cmp_bo = partial_cmp_bo,
+ .create_bo = unmapped_create_bo,
+ .release_bo = nop_release_bo,
+ },
+ {
.name = "cpu",
.require = create_cpu_require,
.set_bo = cpu_set_bo,
@@ -901,7 +932,7 @@ static void do_read_read_bcs(struct buffers *buffers,
do_copy_func(buffers->dst[i], buffers->src[i]);
blt_copy_bo(buffers->spare, buffers->src[i]);
}
- cpu_cmp_bo(buffers->spare, 0xdeadbeef^(buffers->count-1), width, height, NULL);
+ buffers->mode->cmp_bo(buffers->spare, 0xdeadbeef^(buffers->count-1), width, height, buffers->dummy);
hang = do_hang_func();
for (i = buffers->count; i--; )
buffers->mode->cmp_bo(buffers->dst[i], 0xdeadbeef ^ i, width, height, buffers->dummy);
@@ -942,7 +973,7 @@ static void do_read_read_rcs(struct buffers *buffers,
do_copy_func(buffers->dst[i], buffers->src[i]);
render_copy_bo(buffers->spare, buffers->src[i]);
}
- cpu_cmp_bo(buffers->spare, 0xdeadbeef^(buffers->count-1), width, height, NULL);
+ buffers->mode->cmp_bo(buffers->spare, 0xdeadbeef^(buffers->count-1), width, height, buffers->dummy);
hang = do_hang_func();
for (i = buffers->count; i--; )
buffers->mode->cmp_bo(buffers->dst[i], 0xdeadbeef ^ i, width, height, buffers->dummy);
@@ -1012,10 +1043,9 @@ static void run_interruptible(struct buffers *buffers,
do_copy do_copy_func,
do_hang do_hang_func)
{
- int loop;
-
- for (loop = 0; loop < 10; loop++)
+ for (pass = 0; pass < 10; pass++)
do_test_func(buffers, do_copy_func, do_hang_func);
+ pass = 0;
check_gpu();
}
@@ -1039,8 +1069,9 @@ static void __run_forked(struct buffers *buffers,
batch = buffers_init(buffers, buffers->mode, fd);
buffers_create(buffers, num_buffers);
- for (int loop = 0; loop < loops; loop++)
+ for (pass = 0; pass < loops; pass++)
do_test_func(buffers, do_copy_func, do_hang_func);
+ pass = 0;
buffers_fini(buffers);
}