summaryrefslogtreecommitdiff
path: root/tests/perf_pmu.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2018-02-19 11:08:19 +0000
committerChris Wilson <chris@chris-wilson.co.uk>2018-02-19 15:32:38 +0000
commita68b3060dec0c59d3c6403f551fecf3bc4a1fd64 (patch)
tree9fbe4fce83cb9487a7c1390732bf2ca56628163d /tests/perf_pmu.c
parentf6820d90eb6853c7f4ce8ba7e5668d4ff9bf8984 (diff)
igt/perf_pmu: Retain original GTT offset when resubmitting the spinner
Since the spin batch contains a relocation to itself, when we resubmit the spinner, we must ensure that it is executed at the same location. While the spinner is busy, resubmitting will reuse the same location, but if it is idle, the kernel may move it between execution. In this case, we need to record the previous location (in obj.offset) and then demand the kernel reuse the location using EXEC_OBJECT_PINNED. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Diffstat (limited to 'tests/perf_pmu.c')
-rw-r--r--tests/perf_pmu.c26
1 files changed, 16 insertions, 10 deletions
diff --git a/tests/perf_pmu.c b/tests/perf_pmu.c
index 82053416..7613a619 100644
--- a/tests/perf_pmu.c
+++ b/tests/perf_pmu.c
@@ -387,15 +387,13 @@ busy_check_all(int gem_fd, const struct intel_execution_engine2 *e,
}
static void
-__submit_spin_batch(int gem_fd, igt_spin_t *spin,
+__submit_spin_batch(int gem_fd,
+ struct drm_i915_gem_exec_object2 *obj,
const struct intel_execution_engine2 *e)
{
- struct drm_i915_gem_exec_object2 obj = {
- .handle = spin->handle
- };
struct drm_i915_gem_execbuffer2 eb = {
.buffer_count = 1,
- .buffers_ptr = to_user_pointer(&obj),
+ .buffers_ptr = to_user_pointer(obj),
.flags = e2ring(gem_fd, e),
};
@@ -406,6 +404,7 @@ static void
most_busy_check_all(int gem_fd, const struct intel_execution_engine2 *e,
const unsigned int num_engines, unsigned int flags)
{
+ struct drm_i915_gem_exec_object2 obj = {};
const struct intel_execution_engine2 *e_;
uint64_t tval[2][num_engines];
uint64_t val[num_engines];
@@ -422,10 +421,11 @@ most_busy_check_all(int gem_fd, const struct intel_execution_engine2 *e,
if (e == e_) {
idle_idx = i;
} else if (spin) {
- __submit_spin_batch(gem_fd, spin, e_);
+ __submit_spin_batch(gem_fd, &obj, e_);
} else {
spin = igt_spin_batch_new(gem_fd, 0,
e2ring(gem_fd, e_), 0);
+ obj.handle = spin->handle;
}
val[i++] = I915_PMU_ENGINE_BUSY(e_->class, e_->instance);
@@ -464,6 +464,7 @@ static void
all_busy_check_all(int gem_fd, const unsigned int num_engines,
unsigned int flags)
{
+ struct drm_i915_gem_exec_object2 obj = {};
const struct intel_execution_engine2 *e;
uint64_t tval[2][num_engines];
uint64_t val[num_engines];
@@ -478,10 +479,11 @@ all_busy_check_all(int gem_fd, const unsigned int num_engines,
continue;
if (spin) {
- __submit_spin_batch(gem_fd, spin, e);
+ __submit_spin_batch(gem_fd, &obj, e);
} else {
spin = igt_spin_batch_new(gem_fd, 0,
e2ring(gem_fd, e), 0);
+ obj.handle = spin->handle;
}
val[i++] = I915_PMU_ENGINE_BUSY(e->class, e->instance);
@@ -1455,6 +1457,7 @@ accuracy(int gem_fd, const struct intel_execution_engine2 *e,
test_us * 2 * 1000 };
unsigned long sleep_busy = busy_us;
unsigned long sleep_idle = idle_us;
+ struct drm_i915_gem_exec_object2 obj = {};
igt_spin_t *spin;
int ret;
@@ -1467,8 +1470,11 @@ accuracy(int gem_fd, const struct intel_execution_engine2 *e,
/* Allocate our spin batch and idle it. */
spin = igt_spin_batch_new(gem_fd, 0, e2ring(gem_fd, e), 0);
+ obj.handle = spin->handle;
+ __submit_spin_batch(gem_fd, &obj, e); /* record its location */
igt_spin_batch_end(spin);
- gem_sync(gem_fd, spin->handle);
+ gem_sync(gem_fd, obj.handle);
+ obj.flags |= EXEC_OBJECT_PINNED;
/* 1st pass is calibration, second pass is the test. */
for (int pass = 0; pass < ARRAY_SIZE(timeout); pass++) {
@@ -1485,10 +1491,10 @@ accuracy(int gem_fd, const struct intel_execution_engine2 *e,
/* Restart the spinbatch. */
__rearm_spin_batch(spin);
- __submit_spin_batch(gem_fd, spin, e);
+ __submit_spin_batch(gem_fd, &obj, e);
measured_usleep(sleep_busy);
igt_spin_batch_end(spin);
- gem_sync(gem_fd, spin->handle);
+ gem_sync(gem_fd, obj.handle);
busy_ns += igt_nsec_elapsed(&t_busy);