summaryrefslogtreecommitdiff
path: root/tests/perf.c
diff options
context:
space:
mode:
authorLionel Landwerlin <lionel.g.landwerlin@intel.com>2017-04-14 02:49:19 +0100
committerLionel Landwerlin <lionel.g.landwerlin@intel.com>2017-10-04 13:44:00 +0100
commit0e41ce59d71031a21aa9ae83e46ccd54b7601dc8 (patch)
treefefe757a76b01ae1c47e4bb277cb0c7e54487e65 /tests/perf.c
parent55d3c3b00838f8ad103233a75c12453b9733fd37 (diff)
tests/perf: make buffer-fill more reliable
Filling rate of the buffer must discard context switch reports as they do not depend upon the periodicity, instead they're a factor on the amount of different applications concurrently running on the system. Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com> Tested-by: Matthew Auld <matthew.auld@intel.com> Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Diffstat (limited to 'tests/perf.c')
-rw-r--r--tests/perf.c120
1 files changed, 103 insertions, 17 deletions
diff --git a/tests/perf.c b/tests/perf.c
index 1f6251e1..c8537f00 100644
--- a/tests/perf.c
+++ b/tests/perf.c
@@ -2704,22 +2704,30 @@ test_buffer_fill(void)
.num_properties = sizeof(properties) / 16,
.properties_ptr = to_user_pointer(properties),
};
+ struct drm_i915_perf_record_header *header;
int buf_size = 65536 * (256 + sizeof(struct drm_i915_perf_record_header));
uint8_t *buf = malloc(buf_size);
+ int len;
size_t oa_buf_size = 16 * 1024 * 1024;
size_t report_size = oa_formats[test_oa_format].size;
int n_full_oa_reports = oa_buf_size / report_size;
uint64_t fill_duration = n_full_oa_reports * oa_period;
+ load_helper_init();
+ load_helper_run(HIGH);
+
igt_assert(fill_duration < 1000000000);
stream_fd = __perf_open(drm_fd, &param);
for (int i = 0; i < 5; i++) {
- struct drm_i915_perf_record_header *header;
bool overflow_seen;
- int offset = 0;
- int len;
+ uint32_t n_periodic_reports;
+ uint32_t first_timestamp = 0, last_timestamp = 0;
+ uint32_t last_periodic_report[64];
+ double tick_per_period;
+
+ do_ioctl(stream_fd, I915_PERF_IOCTL_ENABLE, 0);
nanosleep(&(struct timespec){ .tv_sec = 0,
.tv_nsec = fill_duration * 1.25 },
@@ -2731,7 +2739,7 @@ test_buffer_fill(void)
igt_assert_neq(len, -1);
overflow_seen = false;
- for (offset = 0; offset < len; offset += header->size) {
+ for (int offset = 0; offset < len; offset += header->size) {
header = (void *)(buf + offset);
if (header->type == DRM_I915_PERF_RECORD_OA_BUFFER_LOST)
@@ -2740,32 +2748,110 @@ test_buffer_fill(void)
igt_assert_eq(overflow_seen, true);
+ do_ioctl(stream_fd, I915_PERF_IOCTL_DISABLE, 0);
+
+ igt_debug("fill_duration = %luns, oa_exponent = %u\n",
+ fill_duration, oa_exponent);
+
+ do_ioctl(stream_fd, I915_PERF_IOCTL_ENABLE, 0);
+
nanosleep(&(struct timespec){ .tv_sec = 0,
- .tv_nsec = fill_duration / 2 },
- NULL);
+ .tv_nsec = fill_duration / 2 },
+ NULL);
- while ((len = read(stream_fd, buf, buf_size)) == -1 && errno == EINTR)
- ;
+ n_periodic_reports = 0;
- igt_assert_neq(len, -1);
+ /* Because of the race condition between notification of new
+ * reports and reports landing in memory, we need to rely on
+ * timestamps to figure whether we've read enough of them.
+ */
+ while (((last_timestamp - first_timestamp) * oa_period) < (fill_duration / 2)) {
- igt_assert(len > report_size * n_full_oa_reports * 0.45);
- igt_assert(len < report_size * n_full_oa_reports * 0.55);
+ igt_debug("dts=%u elapsed=%lu duration=%lu\n",
+ last_timestamp - first_timestamp,
+ (last_timestamp - first_timestamp) * oa_period,
+ fill_duration / 2);
- overflow_seen = false;
- for (offset = 0; offset < len; offset += header->size) {
- header = (void *)(buf + offset);
+ while ((len = read(stream_fd, buf, buf_size)) == -1 && errno == EINTR)
+ ;
- if (header->type == DRM_I915_PERF_RECORD_OA_BUFFER_LOST)
- overflow_seen = true;
+ igt_assert_neq(len, -1);
+
+ for (int offset = 0; offset < len; offset += header->size) {
+ uint32_t *report;
+ double previous_tick_per_period;
+
+ header = (void *) (buf + offset);
+ report = (void *) (header + 1);
+
+ switch (header->type) {
+ case DRM_I915_PERF_RECORD_OA_REPORT_LOST:
+ igt_debug("report loss, trying again\n");
+ break;
+ case DRM_I915_PERF_RECORD_SAMPLE:
+ igt_debug(" > report ts=%u"
+ " ts_delta_last_periodic=%8u is_timer=%i ctx_id=%8x gpu_ticks=%u nb_periodic=%u\n",
+ report[1],
+ n_periodic_reports > 0 ? report[1] - last_periodic_report[1] : 0,
+ oa_report_is_periodic(oa_exponent, report),
+ oa_report_get_ctx_id(report),
+ n_periodic_reports > 0 ? report[3] - last_periodic_report[3] : 0,
+ n_periodic_reports);
+
+ if (first_timestamp == 0)
+ first_timestamp = report[1];
+ last_timestamp = report[1];
+
+ previous_tick_per_period = tick_per_period;
+
+ if (n_periodic_reports > 1 &&
+ oa_report_is_periodic(oa_exponent, report)) {
+ tick_per_period =
+ oa_reports_tick_per_period(last_periodic_report,
+ report);
+
+ if (!double_value_within(previous_tick_per_period,
+ tick_per_period, 5))
+ igt_debug("clock change!\n");
+
+ memcpy(last_periodic_report, report,
+ sizeof(last_periodic_report));
+ }
+
+ /* We want to measure only the periodic
+ * reports, ctx-switch might inflate the
+ * content of the buffer and skew or
+ * measurement.
+ */
+ n_periodic_reports +=
+ oa_report_is_periodic(oa_exponent, report);
+ break;
+ case DRM_I915_PERF_RECORD_OA_BUFFER_LOST:
+ igt_assert(!"unexpected overflow");
+ break;
+ }
+ }
}
- igt_assert_eq(overflow_seen, false);
+ do_ioctl(stream_fd, I915_PERF_IOCTL_DISABLE, 0);
+
+ igt_debug("%f < %lu < %f\n",
+ report_size * n_full_oa_reports * 0.45,
+ n_periodic_reports * report_size,
+ report_size * n_full_oa_reports * 0.55);
+
+ igt_assert(n_periodic_reports * report_size >
+ report_size * n_full_oa_reports * 0.45);
+ igt_assert(n_periodic_reports * report_size <
+ report_size * n_full_oa_reports * 0.55);
}
free(buf);
__perf_close(stream_fd);
+
+ load_helper_stop();
+ load_helper_fini();
}
static void