summaryrefslogtreecommitdiff
path: root/tests/gem_streaming_writes.c
diff options
context:
space:
mode:
authorVille Syrjälä <ville.syrjala@linux.intel.com>2015-10-09 19:11:39 +0300
committerVille Syrjälä <ville.syrjala@linux.intel.com>2015-10-12 19:57:58 +0300
commitf52e7ec7876603b4edb0bff412255f8f1624ecc4 (patch)
treec8d668fd83f6f30c52c6ee0f89921299f0ad6eed /tests/gem_streaming_writes.c
parentb8a77dd6c8e9f73493b2f86f293ff9c06fc08049 (diff)
Replace __gem_mmap__{cpu,gtt,wc}() + igt_assert() with gem_mmap__{cpu,gtt,wc}()
gem_mmap__{cpu,gtt,wc}() already has the assert built in, so replace __gem_mmap__{cpu,gtt,wc}() + igt_assert() with it. Mostly done with coccinelle, with some manual help: @@ identifier I; expression E1, E2, E3, E4, E5, E6; @@ ( - I = __gem_mmap__gtt(E1, E2, E3, E4); + I = gem_mmap__gtt(E1, E2, E3, E4); ... - igt_assert(I); | - I = __gem_mmap__cpu(E1, E2, E3, E4, E5); + I = gem_mmap__cpu(E1, E2, E3, E4, E5); ... - igt_assert(I); | - I = __gem_mmap__wc(E1, E2, E3, E4, E5); + I = gem_mmap__wc(E1, E2, E3, E4, E5); ... - igt_assert(I); ) Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com> Stochastically-reviwewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'tests/gem_streaming_writes.c')
-rw-r--r--tests/gem_streaming_writes.c36
1 files changed, 16 insertions, 20 deletions
diff --git a/tests/gem_streaming_writes.c b/tests/gem_streaming_writes.c
index 6664582a..5b365c9b 100644
--- a/tests/gem_streaming_writes.c
+++ b/tests/gem_streaming_writes.c
@@ -88,22 +88,21 @@ static void test_streaming(int fd, int mode, int sync)
switch (mode) {
case 0: /* cpu/snoop */
gem_set_caching(fd, src, I915_CACHING_CACHED);
- s = __gem_mmap__cpu(fd, src, 0, OBJECT_SIZE, PROT_READ | PROT_WRITE);
- igt_assert(s);
+ s = gem_mmap__cpu(fd, src, 0, OBJECT_SIZE,
+ PROT_READ | PROT_WRITE);
break;
case 1: /* gtt */
- s = __gem_mmap__gtt(fd, src, OBJECT_SIZE, PROT_READ | PROT_WRITE);
- igt_assert(s);
+ s = gem_mmap__gtt(fd, src, OBJECT_SIZE,
+ PROT_READ | PROT_WRITE);
break;
case 2: /* wc */
- s = __gem_mmap__wc(fd, src, 0, OBJECT_SIZE, PROT_READ | PROT_WRITE);
- igt_assert(s);
+ s = gem_mmap__wc(fd, src, 0, OBJECT_SIZE,
+ PROT_READ | PROT_WRITE);
break;
}
*s = 0; /* fault the object into the mappable range first (for GTT) */
- d = __gem_mmap__cpu(fd, dst, 0, OBJECT_SIZE, PROT_READ);
- igt_assert(d);
+ d = gem_mmap__cpu(fd, dst, 0, OBJECT_SIZE, PROT_READ);
gem_write(fd, dst, 0, tmp, sizeof(tmp));
memset(&execbuf, 0, sizeof(execbuf));
@@ -154,8 +153,7 @@ static void test_streaming(int fd, int mode, int sync)
batch[i].handle = gem_create(fd, 4096);
batch[i].offset = 0;
- base = __gem_mmap__cpu(fd, batch[i].handle, 0, 4096, PROT_WRITE);
- igt_assert(base);
+ base = gem_mmap__cpu(fd, batch[i].handle, 0, 4096, PROT_WRITE);
for (int j = 0; j < 64; j++) {
unsigned x = (n * CHUNK_SIZE) % 4096 >> 2;
@@ -254,11 +252,9 @@ static void test_batch(int fd, int mode, int reverse)
exec[DST].handle = gem_create(fd, OBJECT_SIZE);
exec[SRC].handle = gem_create(fd, OBJECT_SIZE);
- s = __gem_mmap__wc(fd, src, 0, OBJECT_SIZE, PROT_READ | PROT_WRITE);
- igt_assert(s);
+ s = gem_mmap__wc(fd, src, 0, OBJECT_SIZE, PROT_READ | PROT_WRITE);
- d = __gem_mmap__cpu(fd, dst, 0, OBJECT_SIZE, PROT_READ);
- igt_assert(d);
+ d = gem_mmap__cpu(fd, dst, 0, OBJECT_SIZE, PROT_READ);
memset(reloc, 0, sizeof(reloc));
reloc[0].offset = 4 * sizeof(uint32_t);
@@ -285,16 +281,16 @@ static void test_batch(int fd, int mode, int reverse)
switch (mode) {
case 0: /* cpu/snoop */
igt_require(gem_has_llc(fd));
- base = __gem_mmap__cpu(fd, exec[BATCH].handle, 0, batch_size, PROT_READ | PROT_WRITE);
- igt_assert(base);
+ base = gem_mmap__cpu(fd, exec[BATCH].handle, 0, batch_size,
+ PROT_READ | PROT_WRITE);
break;
case 1: /* gtt */
- base = __gem_mmap__gtt(fd, exec[BATCH].handle, batch_size, PROT_READ | PROT_WRITE);
- igt_assert(base);
+ base = gem_mmap__gtt(fd, exec[BATCH].handle, batch_size,
+ PROT_READ | PROT_WRITE);
break;
case 2: /* wc */
- base = __gem_mmap__wc(fd, exec[BATCH].handle, 0, batch_size, PROT_READ | PROT_WRITE);
- igt_assert(base);
+ base = gem_mmap__wc(fd, exec[BATCH].handle, 0, batch_size,
+ PROT_READ | PROT_WRITE);
break;
}
*base = 0; /* fault the object into the mappable range first */