summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2019-04-13 14:25:42 +0100
committerChris Wilson <chris@chris-wilson.co.uk>2019-04-16 09:17:07 +0100
commit2bd82477a9ee6aa308e0f1045df4901f766683ea (patch)
tree19dfe2cd712cd93ea16629706f5843b495b9a16a
parentcf27a37b867bf31dccbe5f1b3bd84a2e606544f0 (diff)
i915/gem_mmap_gtt: Markup a couple of GTT set-domains
We have to control the cache domains, especially important before first writing into the object. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
-rw-r--r--tests/i915/gem_mmap_gtt.c26
1 files changed, 16 insertions, 10 deletions
diff --git a/tests/i915/gem_mmap_gtt.c b/tests/i915/gem_mmap_gtt.c
index 58922ee3..ab7d3f2d 100644
--- a/tests/i915/gem_mmap_gtt.c
+++ b/tests/i915/gem_mmap_gtt.c
@@ -678,27 +678,26 @@ test_huge_copy(int fd, int huge, int tiling_a, int tiling_b, int ncpus)
igt_fork(child, ncpus) {
uint64_t valid_size = huge_object_size;
- uint32_t bo;
+ uint32_t bo[2];
char *a, *b;
- bo = gem_create(fd, huge_object_size);
+ bo[0] = gem_create(fd, huge_object_size);
if (tiling_a) {
- igt_require(__gem_set_tiling(fd, bo, abs(tiling_a), min_tile_width(devid, tiling_a)) == 0);
+ igt_require(__gem_set_tiling(fd, bo[0], abs(tiling_a), min_tile_width(devid, tiling_a)) == 0);
valid_size = rounddown(valid_size, tile_row_size(tiling_a, min_tile_width(devid, tiling_a)));
}
- a = __gem_mmap__gtt(fd, bo, huge_object_size, PROT_READ | PROT_WRITE);
+ a = __gem_mmap__gtt(fd, bo[0], huge_object_size, PROT_READ | PROT_WRITE);
igt_require(a);
- gem_close(fd, bo);
- bo = gem_create(fd, huge_object_size);
+ bo[1] = gem_create(fd, huge_object_size);
if (tiling_b) {
- igt_require(__gem_set_tiling(fd, bo, abs(tiling_b), max_tile_width(devid, tiling_b)) == 0);
+ igt_require(__gem_set_tiling(fd, bo[1], abs(tiling_b), max_tile_width(devid, tiling_b)) == 0);
valid_size = rounddown(valid_size, tile_row_size(tiling_b, max_tile_width(devid, tiling_b)));
}
- b = __gem_mmap__gtt(fd, bo, huge_object_size, PROT_READ | PROT_WRITE);
+ b = __gem_mmap__gtt(fd, bo[1], huge_object_size, PROT_READ | PROT_WRITE);
igt_require(b);
- gem_close(fd, bo);
+ gem_set_domain(fd, bo[0], I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
for (i = 0; i < valid_size / PAGE_SIZE; i++) {
uint32_t *ptr = (uint32_t *)(a + PAGE_SIZE*i);
for (int j = 0; j < PAGE_SIZE/4; j++)
@@ -706,7 +705,7 @@ test_huge_copy(int fd, int huge, int tiling_a, int tiling_b, int ncpus)
igt_progress("Writing a ", i, valid_size / PAGE_SIZE);
}
-
+ gem_set_domain(fd, bo[1], I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
for (i = 0; i < valid_size / PAGE_SIZE; i++) {
uint32_t *ptr = (uint32_t *)(b + PAGE_SIZE*i);
for (int j = 0; j < PAGE_SIZE/4; j++)
@@ -727,12 +726,19 @@ test_huge_copy(int fd, int huge, int tiling_a, int tiling_b, int ncpus)
A_tmp[j] = B_tmp[j];
else
B_tmp[j] = A_tmp[j];
+
+ gem_set_domain(fd, bo[0], I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
memcpy(A, A_tmp, PAGE_SIZE);
+
+ gem_set_domain(fd, bo[1], I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
memcpy(B, B_tmp, PAGE_SIZE);
igt_progress("Copying a<->b ", i, valid_size / PAGE_SIZE);
}
+ gem_close(fd, bo[0]);
+ gem_close(fd, bo[1]);
+
for (i = 0; i < valid_size / PAGE_SIZE; i++) {
uint32_t page[PAGE_SIZE/sizeof(uint32_t)];
copy_wc_page(page, a + PAGE_SIZE*i);