summaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2016-02-25 21:43:01 +0000
committerChris Wilson <chris@chris-wilson.co.uk>2016-03-01 13:25:07 +0000
commitaed69b56d4c63a19594440be6679307b2781ae2c (patch)
treec378ad1952669e651c11a89e1e2136a70db06fb7 /tests
parent925e5e1caef9b56bd53df457735514b644c7a399 (diff)
lib: Add read/write direction support for dmabuf synchronisation
Allow read-only synchronisation on dmabuf mmaps, useful to allow concurrent read-read testing between the CPU and GPU. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'tests')
-rw-r--r--tests/gem_concurrent_all.c8
-rw-r--r--tests/kms_mmap_write_crc.c6
-rw-r--r--tests/prime_mmap_coherency.c4
3 files changed, 9 insertions, 9 deletions
diff --git a/tests/gem_concurrent_all.c b/tests/gem_concurrent_all.c
index f62901c9..8718f169 100644
--- a/tests/gem_concurrent_all.c
+++ b/tests/gem_concurrent_all.c
@@ -365,10 +365,10 @@ dmabuf_set_bo(struct buffers *b, drm_intel_bo *bo, uint32_t val)
uint32_t *v;
int size;
- prime_sync_start(dmabuf->fd);
+ prime_sync_start(dmabuf->fd, true);
for (v = dmabuf->map, size = b->size; size--; v++)
*v = val;
- prime_sync_end(dmabuf->fd);
+ prime_sync_end(dmabuf->fd, true);
}
static void
@@ -378,10 +378,10 @@ dmabuf_cmp_bo(struct buffers *b, drm_intel_bo *bo, uint32_t val)
uint32_t *v;
int size;
- prime_sync_start(dmabuf->fd);
+ prime_sync_start(dmabuf->fd, false);
for (v = dmabuf->map, size = b->size; size--; v++)
igt_assert_eq_u32(*v, val);
- prime_sync_end(dmabuf->fd);
+ prime_sync_end(dmabuf->fd, false);
}
static void
diff --git a/tests/kms_mmap_write_crc.c b/tests/kms_mmap_write_crc.c
index 6984bbd1..ae026b61 100644
--- a/tests/kms_mmap_write_crc.c
+++ b/tests/kms_mmap_write_crc.c
@@ -110,7 +110,7 @@ static void test(data_t *data)
* firstly demonstrate the need for DMA_BUF_SYNC_START ("begin_cpu_access")
*/
if (ioctl_sync)
- prime_sync_start(dma_buf_fd);
+ prime_sync_start(dma_buf_fd, true);
/* use dmabuf pointer to make the other fb all white too */
buf = malloc(fb->size);
@@ -142,7 +142,7 @@ static void test(data_t *data)
/* sync start, to move to CPU domain */
if (ioctl_sync)
- prime_sync_start(dma_buf_fd);
+ prime_sync_start(dma_buf_fd, true);
/* use dmabuf pointer in the same fb to make it all white */
buf = malloc(fb->size);
@@ -154,7 +154,7 @@ static void test(data_t *data)
/* if we don't change to the GTT domain again, the whites won't get flushed
* and therefore we demonstrates the need for sync end here */
if (ioctl_sync)
- prime_sync_end(dma_buf_fd);
+ prime_sync_end(dma_buf_fd, true);
/* check that the crc is as expected, which requires that caches got flushed */
igt_pipe_crc_collect_crc(data->pipe_crc, &crc);
diff --git a/tests/prime_mmap_coherency.c b/tests/prime_mmap_coherency.c
index a9a2664a..180d8a4d 100644
--- a/tests/prime_mmap_coherency.c
+++ b/tests/prime_mmap_coherency.c
@@ -97,7 +97,7 @@ static void test_read_flush(bool expect_stale_cache)
* until we try to read them again in step #4. This behavior could be fixed
* by flush CPU read right before accessing the CPU pointer */
if (!expect_stale_cache)
- prime_sync_start(dma_buf_fd);
+ prime_sync_start(dma_buf_fd, false);
for (i = 0; i < (width * height) / 4; i++)
if (ptr_cpu[i] != 0x11111111) {
@@ -149,7 +149,7 @@ static void test_write_flush(bool expect_stale_cache)
/* This is the main point of this test: !llc hw requires a cache write
* flush right here (explained in step #4). */
if (!expect_stale_cache)
- prime_sync_start(dma_buf_fd);
+ prime_sync_start(dma_buf_fd, true);
memset(ptr_cpu, 0x11, width * height);