summaryrefslogtreecommitdiff
path: root/tests/perf_pmu.c
diff options
context:
space:
mode:
authorTvrtko Ursulin <tvrtko.ursulin@intel.com>2018-02-02 18:37:46 +0000
committerTvrtko Ursulin <tvrtko.ursulin@intel.com>2018-02-05 18:02:36 +0000
commitc9be7aa6cfcc4b2c9de20f062393547dd4a49ab7 (patch)
tree3b83c92132532b36a95ab6686f0b6b7b61755e65 /tests/perf_pmu.c
parent1dd291976c25685ce72a4efed454fc97982b83c0 (diff)
tests/perf_pmu: Tighten busy measurement
In cases where we manually terminate the busy batch, we always want to sample busyness while the batch is running, just before we will terminate it, and not the other way around. This way we make the window for unwated idleness getting sampled smaller. Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'tests/perf_pmu.c')
-rw-r--r--tests/perf_pmu.c28
1 files changed, 13 insertions, 15 deletions
diff --git a/tests/perf_pmu.c b/tests/perf_pmu.c
index 2f7d3341..bf16e5e8 100644
--- a/tests/perf_pmu.c
+++ b/tests/perf_pmu.c
@@ -146,10 +146,9 @@ single(int gem_fd, const struct intel_execution_engine2 *e, bool busy)
spin = NULL;
slept = measured_usleep(batch_duration_ns / 1000);
- igt_spin_batch_end(spin);
-
val = pmu_read_single(fd);
+ igt_spin_batch_end(spin);
igt_spin_batch_free(gem_fd, spin);
close(fd);
@@ -256,7 +255,7 @@ busy_double_start(int gem_fd, const struct intel_execution_engine2 *e)
gem_quiescent_gpu(gem_fd);
}
-static void log_busy(int fd, unsigned int num_engines, uint64_t *val)
+static void log_busy(unsigned int num_engines, uint64_t *val)
{
char buf[1024];
int rem = sizeof(buf);
@@ -303,14 +302,14 @@ busy_check_all(int gem_fd, const struct intel_execution_engine2 *e,
spin = igt_spin_batch_new(gem_fd, 0, e2ring(gem_fd, e), 0);
slept = measured_usleep(batch_duration_ns / 1000);
- igt_spin_batch_end(spin);
-
pmu_read_multi(fd[0], num_engines, val);
- log_busy(fd[0], num_engines, val);
+ igt_spin_batch_end(spin);
igt_spin_batch_free(gem_fd, spin);
close(fd[0]);
+ log_busy(num_engines, val);
+
assert_within_epsilon(val[busy_idx], slept, tolerance);
for (i = 0; i < num_engines; i++) {
if (i == busy_idx)
@@ -364,14 +363,14 @@ most_busy_check_all(int gem_fd, const struct intel_execution_engine2 *e,
fd[i] = open_group(val[i], fd[0]);
slept = measured_usleep(batch_duration_ns / 1000);
- igt_spin_batch_end(spin);
-
pmu_read_multi(fd[0], num_engines, val);
- log_busy(fd[0], num_engines, val);
+ igt_spin_batch_end(spin);
igt_spin_batch_free(gem_fd, spin);
close(fd[0]);
+ log_busy(num_engines, val);
+
for (i = 0; i < num_engines; i++) {
if (i == idle_idx)
assert_within_epsilon(val[i], 0.0f, tolerance);
@@ -420,14 +419,14 @@ all_busy_check_all(int gem_fd, const unsigned int num_engines)
fd[i] = open_group(val[i], fd[0]);
slept = measured_usleep(batch_duration_ns / 1000);
- igt_spin_batch_end(spin);
-
pmu_read_multi(fd[0], num_engines, val);
- log_busy(fd[0], num_engines, val);
+ igt_spin_batch_end(spin);
igt_spin_batch_free(gem_fd, spin);
close(fd[0]);
+ log_busy(num_engines, val);
+
for (i = 0; i < num_engines; i++)
assert_within_epsilon(val[i], slept, tolerance);
gem_quiescent_gpu(gem_fd);
@@ -903,12 +902,11 @@ static void cpu_hotplug(int gem_fd)
igt_waitchildren();
- igt_spin_batch_end(spin);
- gem_sync(gem_fd, spin->handle);
-
ref = igt_nsec_elapsed(&start);
val = pmu_read_single(fd);
+ igt_spin_batch_end(spin);
+ gem_sync(gem_fd, spin->handle);
igt_spin_batch_free(gem_fd, spin);
close(fd);