summaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorMarius Vlad <marius.c.vlad@intel.com>2015-11-27 20:08:33 +0200
committerImre Deak <imre.deak@intel.com>2015-11-27 20:29:19 +0200
commit4cfcea4056ab78a8097b41e26748c940936cd411 (patch)
treee56d517cf99d8af354d7b66d2a1ff3d044020b54 /tests
parentd2a17f4bc4d466ac19fd00ae7fe7998d6626460e (diff)
tests/pm_rpm tests for set_caching and set_tiling ioctl(s)
v4: re-bind the gem objects each time before calling disable_all_screens_and_wait(). v3: Use smaller sizes when allocating gem objects for caching tests. v2: use mmap to gtt instead off cpu and various style-changes. Signed-off-by: Marius Vlad <marius.c.vlad@intel.com> Reviewed-by: Imre Deak <imre.deak@intel.com> Signed-off-by: Imre Deak <imre.deak@intel.com>
Diffstat (limited to 'tests')
-rw-r--r--tests/pm_rpm.c114
1 files changed, 114 insertions, 0 deletions
diff --git a/tests/pm_rpm.c b/tests/pm_rpm.c
index c4fb19ce..e9ba9ea2 100644
--- a/tests/pm_rpm.c
+++ b/tests/pm_rpm.c
@@ -1729,6 +1729,114 @@ static void planes_subtest(bool universal, bool dpms)
}
}
+static void pm_test_tiling(void)
+{
+ uint32_t *handles;
+ uint8_t **gem_bufs;
+
+ int max_gem_objs = 0;
+ uint8_t off_bit = 14;
+ uint32_t gtt_obj_max_size = (256 * 1024);
+
+ uint32_t i, j, k, tiling_modes[3] = {
+ I915_TILING_NONE,
+ I915_TILING_X,
+ I915_TILING_Y,
+ };
+ uint32_t ti, sw;
+
+ /* default stride value */
+ uint32_t stride = 512;
+
+ /* calculate how many objects we can map */
+ for (i = 1 << off_bit; i <= gtt_obj_max_size; i <<= 1, max_gem_objs++)
+ ;
+
+ gem_bufs = calloc(max_gem_objs, sizeof(*gem_bufs));
+ handles = calloc(max_gem_objs, sizeof(*handles));
+
+ /* try to set different tiling for each handle */
+ for (i = 0; i < ARRAY_SIZE(tiling_modes); i++) {
+
+ for (j = 0, k = 1 << off_bit;
+ k <= gtt_obj_max_size; k <<= 1, j++) {
+ handles[j] = gem_create(drm_fd, k);
+ gem_bufs[j] = gem_mmap__gtt(drm_fd, handles[j],
+ k, PROT_WRITE);
+ memset(gem_bufs[j], 0x0, k);
+ }
+
+ disable_all_screens_and_wait(&ms_data);
+
+ for (j = 0; j < max_gem_objs; j++) {
+ gem_set_tiling(drm_fd, handles[j],
+ tiling_modes[i], stride);
+ gem_get_tiling(drm_fd, handles[j], &ti, &sw);
+ igt_assert(tiling_modes[i] == ti);
+ }
+
+ enable_one_screen_and_wait(&ms_data);
+
+ for (j = 0, k = 1 << off_bit;
+ k <= gtt_obj_max_size; k <<= 1, j++) {
+ igt_assert(munmap(gem_bufs[j], k) == 0);
+ gem_close(drm_fd, handles[j]);
+ }
+ }
+
+ free(gem_bufs);
+ free(handles);
+}
+
+static void pm_test_caching(void)
+{
+ uint32_t handle;
+ uint8_t *gem_buf;
+
+ uint32_t i, got_caching;
+ uint32_t gtt_obj_max_size = (16 * 1024);
+ uint32_t cache_levels[3] = {
+ I915_CACHING_NONE,
+ I915_CACHING_CACHED, /* LLC caching */
+ I915_CACHING_DISPLAY, /* eDRAM caching */
+ };
+
+
+ handle = gem_create(drm_fd, gtt_obj_max_size);
+ gem_buf = gem_mmap__gtt(drm_fd, handle, gtt_obj_max_size, PROT_WRITE);
+
+ for (i = 0; i < ARRAY_SIZE(cache_levels); i++) {
+ memset(gem_buf, 16 << i, gtt_obj_max_size);
+
+ disable_all_screens_and_wait(&ms_data);
+
+ igt_debug("Setting cache level %u\n", cache_levels[i]);
+
+ gem_set_caching(drm_fd, handle, cache_levels[i]);
+
+ got_caching = gem_get_caching(drm_fd, handle);
+
+ igt_debug("Got back %u\n", got_caching);
+
+ /*
+ * Allow fall-back to CACHING_NONE in case the platform does
+ * not support it.
+ */
+ if (cache_levels[i] == I915_CACHING_DISPLAY)
+ igt_assert(got_caching == I915_CACHING_NONE ||
+ got_caching == I915_CACHING_DISPLAY);
+ else
+ igt_assert(got_caching == cache_levels[i]);
+
+ enable_one_screen_and_wait(&ms_data);
+ }
+
+ igt_assert(munmap(gem_buf, gtt_obj_max_size) == 0);
+ gem_close(drm_fd, handle);
+}
+
+
+
static void fences_subtest(bool dpms)
{
int i;
@@ -1927,6 +2035,12 @@ int main(int argc, char *argv[])
igt_subtest("gem-execbuf-stress-extra-wait")
gem_execbuf_stress_subtest(rounds, WAIT_STATUS | WAIT_EXTRA);
+ /* power-wake reference tests */
+ igt_subtest("pm-tiling")
+ pm_test_tiling();
+ igt_subtest("pm-caching")
+ pm_test_caching();
+
igt_fixture
teardown_environment();