summaryrefslogtreecommitdiff
path: root/tests/perf_pmu.c
diff options
context:
space:
mode:
authorTvrtko Ursulin <tvrtko.ursulin@intel.com>2017-12-22 13:13:48 +0000
committerTvrtko Ursulin <tvrtko.ursulin@intel.com>2017-12-22 17:04:59 +0000
commite2ae8b9793d058d29c8e8452cd7d60961034629b (patch)
tree56c1baf9db729151a69db12b08b45e59acd85e7a /tests/perf_pmu.c
parent05690ad570a4501f0d0263ec05399660bd163117 (diff)
tests/perf_pmu: Simplify interrupt testing
Rather than calibrate and emit nop batches, use a manually signalled chain of spinners to generate the desired interrupts. v2: Two flavours of interrupt generation. (Chris Wilson) Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'tests/perf_pmu.c')
-rw-r--r--tests/perf_pmu.c141
1 files changed, 69 insertions, 72 deletions
diff --git a/tests/perf_pmu.c b/tests/perf_pmu.c
index eb8791cd..85ce1739 100644
--- a/tests/perf_pmu.c
+++ b/tests/perf_pmu.c
@@ -816,94 +816,85 @@ static void cpu_hotplug(int gem_fd)
assert_within_epsilon(val, ref, tolerance);
}
-static unsigned long calibrate_nop(int fd, const uint64_t calibration_us)
+static void
+test_interrupts(int gem_fd)
{
- const uint64_t cal_min_us = calibration_us * 3;
- const unsigned int tolerance_pct = 10;
- const uint32_t bbe = MI_BATCH_BUFFER_END;
- const unsigned int loops = 17;
- struct drm_i915_gem_exec_object2 obj = {};
- struct drm_i915_gem_execbuffer2 eb = {
- .buffer_count = 1, .buffers_ptr = to_user_pointer(&obj),
- };
- struct timespec t_begin = { };
- uint64_t size, last_size, ns;
-
- igt_nsec_elapsed(&t_begin);
-
- size = 256 * 1024;
- do {
- struct timespec t_start = { };
+ const unsigned int test_duration_ms = 1000;
+ const int target = 30;
+ igt_spin_t *spin[target];
+ struct pollfd pfd;
+ uint64_t idle, busy;
+ int fence_fd;
+ int fd;
+
+ gem_quiescent_gpu(gem_fd);
- obj.handle = gem_create(fd, size);
- gem_write(fd, obj.handle, size - sizeof(bbe), &bbe,
- sizeof(bbe));
- gem_execbuf(fd, &eb);
- gem_sync(fd, obj.handle);
+ fd = open_pmu(I915_PMU_INTERRUPTS);
- igt_nsec_elapsed(&t_start);
+ /* Queue spinning batches. */
+ for (int i = 0; i < target; i++) {
+ spin[i] = igt_spin_batch_new_fence(gem_fd, 0, I915_EXEC_RENDER);
+ if (i == 0) {
+ fence_fd = spin[i]->out_fence;
+ } else {
+ int old_fd = fence_fd;
- for (int loop = 0; loop < loops; loop++)
- gem_execbuf(fd, &eb);
- gem_sync(fd, obj.handle);
+ fence_fd = sync_fence_merge(old_fd,
+ spin[i]->out_fence);
+ close(old_fd);
+ }
- ns = igt_nsec_elapsed(&t_start);
+ igt_assert(fence_fd >= 0);
+ }
- gem_close(fd, obj.handle);
+ /* Wait for idle state. */
+ idle = pmu_read_single(fd);
+ do {
+ busy = idle;
+ usleep(1e3);
+ idle = pmu_read_single(fd);
+ } while (idle != busy);
- last_size = size;
- size = calibration_us * 1000 * size * loops / ns;
- size = ALIGN(size, sizeof(uint32_t));
- } while (igt_nsec_elapsed(&t_begin) / 1000 < cal_min_us ||
- abs(size - last_size) > (size * tolerance_pct / 100));
+ /* Arm batch expiration. */
+ for (int i = 0; i < target; i++)
+ igt_spin_batch_set_timeout(spin[i],
+ (i + 1) * test_duration_ms * 1e6
+ / target);
- return size;
+ /* Wait for last batch to finish. */
+ pfd.events = POLLIN;
+ pfd.fd = fence_fd;
+ igt_assert_eq(poll(&pfd, 1, 2 * test_duration_ms), 1);
+ close(fence_fd);
+
+ /* Free batches. */
+ for (int i = 0; i < target; i++)
+ igt_spin_batch_free(gem_fd, spin[i]);
+
+ /* Check at least as many interrupts has been generated. */
+ busy = pmu_read_single(fd) - idle;
+ close(fd);
+
+ igt_assert_lte(target, busy);
}
static void
-test_interrupts(int gem_fd)
+test_interrupts_sync(int gem_fd)
{
- const uint32_t bbe = MI_BATCH_BUFFER_END;
const unsigned int test_duration_ms = 1000;
- struct drm_i915_gem_exec_object2 obj = { };
- struct drm_i915_gem_execbuffer2 eb = {
- .buffers_ptr = to_user_pointer(&obj),
- .buffer_count = 1,
- .flags = I915_EXEC_FENCE_OUT,
- };
- unsigned long sz;
- igt_spin_t *spin;
const int target = 30;
+ igt_spin_t *spin[target];
struct pollfd pfd;
uint64_t idle, busy;
int fd;
- sz = calibrate_nop(gem_fd, test_duration_ms * 1000 / target);
gem_quiescent_gpu(gem_fd);
fd = open_pmu(I915_PMU_INTERRUPTS);
- spin = igt_spin_batch_new(gem_fd, 0, 0, 0);
- obj.handle = gem_create(gem_fd, sz);
- gem_write(gem_fd, obj.handle, sz - sizeof(bbe), &bbe, sizeof(bbe));
-
- pfd.events = POLLIN;
- pfd.fd = -1;
- for (int i = 0; i < target; i++) {
- int new;
-
- /* Merge all the fences together so we can wait on them all */
- gem_execbuf_wr(gem_fd, &eb);
- new = eb.rsvd2 >> 32;
- if (pfd.fd == -1) {
- pfd.fd = new;
- } else {
- int old = pfd.fd;
- pfd.fd = sync_fence_merge(old, new);
- close(old);
- close(new);
- }
- }
+ /* Queue spinning batches. */
+ for (int i = 0; i < target; i++)
+ spin[i] = __igt_spin_batch_new_fence(gem_fd, 0, 0);
/* Wait for idle state. */
idle = pmu_read_single(fd);
@@ -913,13 +904,16 @@ test_interrupts(int gem_fd)
idle = pmu_read_single(fd);
} while (idle != busy);
- /* Install the fences and enable signaling */
- igt_assert_eq(poll(&pfd, 1, 10), 0);
+ /* Process the batch queue. */
+ pfd.events = POLLIN;
+ for (int i = 0; i < target; i++) {
+ const unsigned int timeout_ms = test_duration_ms / target;
- /* Unplug the calibrated queue and wait for all the fences */
- igt_spin_batch_free(gem_fd, spin);
- igt_assert_eq(poll(&pfd, 1, 2 * test_duration_ms), 1);
- close(pfd.fd);
+ pfd.fd = spin[i]->out_fence;
+ igt_spin_batch_set_timeout(spin[i], timeout_ms * 1e6);
+ igt_assert_eq(poll(&pfd, 1, 2 * timeout_ms), 1);
+ igt_spin_batch_free(gem_fd, spin[i]);
+ }
/* Check at least as many interrupts has been generated. */
busy = pmu_read_single(fd) - idle;
@@ -1208,6 +1202,9 @@ igt_main
igt_subtest("interrupts")
test_interrupts(fd);
+ igt_subtest("interrupts-sync")
+ test_interrupts_sync(fd);
+
/**
* Test RC6 residency reporting.
*/