diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2019-09-30 11:30:52 +0100 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2019-12-08 12:20:29 +0000 |
commit | d45e11f8e10a4c5775372002ec89974ce3d97b27 (patch) | |
tree | c4d81579d24eeac22be5e2160150d5bd17241b20 /tests/i915/gem_mmap_gtt.c | |
parent | babef7436c5f8adfa3c13d3d032493d4924f8d7a (diff) |
i915/gem_mmap_gtt: Reduce RSS for fault-concurrent
The intent of exercising parallel page fault is not necessarily to
exercise parallel swap-in (we can safely rely on that being well tested
and is orthogonal to page faulting), but to make sure that our object
and GGTT locking is exercised. We can safely reduce our RSS without loss
of coverage. Furthermore, by using varying sizes we can exercise
different code paths within page faulting, rather than all being
serviced as partial mmaps. Instead of allocating 32 surfaces, each of
16MiB, we allocate 32 surfaces in incremental 512KiB sizes; halving the
memory requirement.
References: https://bugs.freedesktop.org/show_bug.cgi?id=111864
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Acked-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Diffstat (limited to 'tests/i915/gem_mmap_gtt.c')
-rw-r--r-- | tests/i915/gem_mmap_gtt.c | 40 |
1 files changed, 28 insertions, 12 deletions
diff --git a/tests/i915/gem_mmap_gtt.c b/tests/i915/gem_mmap_gtt.c index 4ea54ef1..af87ebc3 100644 --- a/tests/i915/gem_mmap_gtt.c +++ b/tests/i915/gem_mmap_gtt.c @@ -57,30 +57,36 @@ set_domain_gtt(int fd, uint32_t handle) } static void * -mmap_bo(int fd, uint32_t handle) +mmap_bo(int fd, uint32_t handle, uint64_t size) { void *ptr; - ptr = gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_READ | PROT_WRITE); + ptr = gem_mmap__gtt(fd, handle, size, PROT_READ | PROT_WRITE); return ptr; } static void * -create_pointer(int fd) +create_pointer_size(int fd, uint64_t size) { uint32_t handle; void *ptr; - handle = gem_create(fd, OBJECT_SIZE); + handle = gem_create(fd, size); - ptr = mmap_bo(fd, handle); + ptr = mmap_bo(fd, handle, size); gem_close(fd, handle); return ptr; } +static void * +create_pointer(int fd) +{ + return create_pointer_size(fd, OBJECT_SIZE); +} + static void test_access(int fd) { @@ -482,7 +488,7 @@ test_write_gtt(int fd) dst = gem_create(fd, OBJECT_SIZE); /* prefault object into gtt */ - dst_gtt = mmap_bo(fd, dst); + dst_gtt = mmap_bo(fd, dst, OBJECT_SIZE); set_domain_gtt(fd, dst); memset(dst_gtt, 0, OBJECT_SIZE); munmap(dst_gtt, OBJECT_SIZE); @@ -972,10 +978,16 @@ thread_fault_concurrent(void *closure) int n; for (n = 0; n < 32; n++) { + unsigned int id = (n + t->id) % 32; + uint32_t sz = *t->ptr[id] - 1; + int idx = rand() % sz + 1; + if (n & 1) - *t->ptr[(n + t->id) % 32] = val; + t->ptr[id][idx] = val; else - val = *t->ptr[(n + t->id) % 32]; + val = t->ptr[id][idx]; + + val++; } return NULL; @@ -989,7 +1001,10 @@ test_fault_concurrent(int fd) int n; for (n = 0; n < 32; n++) { - ptr[n] = create_pointer(fd); + uint32_t sz = (n + 1) << 19; /* 512KiB increments */ + + ptr[n] = create_pointer_size(fd, sz); + *ptr[n] = sz / sizeof(uint32_t); /* num_elems for convenience */ } for (n = 0; n < 64; n++) { @@ -998,12 +1013,13 @@ test_fault_concurrent(int fd) pthread_create(&thread[n].thread, NULL, thread_fault_concurrent, &thread[n]); } + sleep(2); + for (n = 0; n < 64; n++) pthread_join(thread[n].thread, NULL); - for (n = 0; n < 32; n++) { - munmap(ptr[n], OBJECT_SIZE); - } + for (n = 0; n < 32; n++) + munmap(ptr[n], *ptr[n] * sizeof(uint32_t)); } static void |