summaryrefslogtreecommitdiff
path: root/tests/i915/gen9_exec_parse.c
diff options
context:
space:
mode:
authorMatthew Auld <matthew.auld@intel.com>2020-12-24 10:29:05 +0000
committerChris Wilson <chris@chris-wilson.co.uk>2020-12-24 14:53:23 +0000
commit334f0c326bb2812e7a2764dc63ff83c83b6daf58 (patch)
tree11dc189f1555f306482dd16a7a89257f28ea6289 /tests/i915/gen9_exec_parse.c
parent194bc45e54e8bfe727d9709e60c3ba0dbe78481d (diff)
i915/gen9_exec_parse: shadow peek
The shadow batch needs to be in the user visible ppGTT, so make sure we are not leaking anything, if we can guess where the shadow will be placed. Signed-off-by: Matthew Auld <matthew.auld@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'tests/i915/gen9_exec_parse.c')
-rw-r--r--tests/i915/gen9_exec_parse.c129
1 files changed, 129 insertions, 0 deletions
diff --git a/tests/i915/gen9_exec_parse.c b/tests/i915/gen9_exec_parse.c
index 087d6f35..6f54c4e1 100644
--- a/tests/i915/gen9_exec_parse.c
+++ b/tests/i915/gen9_exec_parse.c
@@ -1051,6 +1051,132 @@ static void test_rejected(int i915, uint32_t handle, bool ctx_param)
}
}
+#define PAGE_SHIFT 12
+#define PAGE_SIZE (1ULL << 12)
+
+static inline uint32_t fill_and_copy_shadow(uint32_t *batch, uint32_t len,
+ uintptr_t src, uintptr_t dst)
+{
+ unsigned int i = 0;
+
+#define XY_COLOR_BLT_CMD (2 << 29 | 0x50 << 22)
+#define BLT_WRITE_ALPHA (1<<21)
+#define BLT_WRITE_RGB (1<<20)
+ batch[i++] = XY_COLOR_BLT_CMD | BLT_WRITE_ALPHA | BLT_WRITE_RGB | (7 - 2);
+ batch[i++] = 0xf0 << 16 | 1 << 25 | 1 << 24 | PAGE_SIZE;
+ batch[i++] = 0;
+ batch[i++] = len >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
+ batch[i++] = lower_32_bits(dst);
+ batch[i++] = upper_32_bits(dst);
+
+ batch[i++] = 0xdeadbeaf;
+ batch[i++] = 0;
+
+#define COPY_BLT_CMD (2<<29|0x53<<22)
+ batch[i++] = COPY_BLT_CMD | BLT_WRITE_ALPHA | BLT_WRITE_RGB | 8;
+ batch[i++] = 0xcc << 16 | 1 << 25 | 1 << 24 | PAGE_SIZE;
+ batch[i++] = 0;
+ batch[i++] = len >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
+ batch[i++] = lower_32_bits(dst);
+ batch[i++] = upper_32_bits(dst);
+ batch[i++] = 0;
+ batch[i++] = PAGE_SIZE;
+ batch[i++] = lower_32_bits(src);
+ batch[i++] = upper_32_bits(src);
+
+ batch[i++] = MI_BATCH_BUFFER_END;
+ batch[i++] = 0;
+
+ return i * sizeof(uint32_t);
+}
+
+static inline uint64_t sign_extend(uint64_t x, int index)
+{
+ int shift = 63 - index;
+ return (int64_t)(x << shift) >> shift;
+}
+
+static uint64_t gen8_canonical_address(uint64_t address)
+{
+ return sign_extend(address, 47);
+}
+
+static void test_shadow_peek(int fd)
+{
+ uint64_t size = PAGE_SIZE;
+ struct drm_i915_gem_exec_object2 exec[2] = {};
+ struct drm_i915_gem_execbuffer2 execbuf = {
+ .buffers_ptr = to_user_pointer(exec),
+ .buffer_count = 2,
+ };
+ uint32_t *vaddr;
+ uint32_t len;
+ int i;
+
+ exec[0].handle = gem_create(fd, size); /* scratch for shadow */
+ exec[0].flags = EXEC_OBJECT_PINNED |
+ EXEC_OBJECT_SUPPORTS_48B_ADDRESS |
+ EXEC_OBJECT_PAD_TO_SIZE;
+ exec[0].offset = 0;
+ /*
+ * Ensure the shadow has no place to hide, if say it were placed
+ * randomly within the address space. We leave enough space for our
+ * batch, which leaves exactly one perfect sized hole for the shadow to
+ * occupy later.
+ *
+ * Note that pad_to_size is just the node.size for the vma, which means
+ * we can easily occupy the entire 48b ppGTT, if we want, without
+ * needing an insane amount of physical memory.
+ */
+ exec[0].pad_to_size = gem_aperture_size(fd) - 2 * size;
+
+ exec[1].handle = gem_create(fd, size); /* batch */
+ exec[1].flags = EXEC_OBJECT_PINNED | EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
+ exec[1].offset = gen8_canonical_address(exec[0].pad_to_size);
+
+ vaddr = gem_mmap__wc(fd, exec[1].handle, 0, size, PROT_WRITE);
+
+ len = fill_and_copy_shadow(vaddr,
+ size,
+ exec[0].pad_to_size + size, /* shadow location */
+ exec[0].offset);
+
+ munmap(vaddr, size);
+
+ execbuf.flags = I915_EXEC_BLT;
+ execbuf.batch_start_offset = 0;
+ execbuf.batch_len = len;
+
+ igt_assert_eq(__gem_execbuf(fd, &execbuf), 0);
+ gem_sync(fd, exec[1].handle);
+
+ gem_set_domain(fd, exec[0].handle,
+ I915_GEM_DOMAIN_CPU,
+ I915_GEM_DOMAIN_CPU);
+
+ vaddr = gem_mmap__cpu(fd, exec[0].handle, 0, size, PROT_READ);
+
+
+ /* Shadow batch is meant to be read-only */
+ for (i = 0; i < len / sizeof(uint32_t); i++) {
+ if (i != 6)
+ igt_assert_neq_u32(vaddr[i], 0xdeadbeaf);
+ }
+
+ /*
+ * Since batch_len is smaller than PAGE_SIZE, we should expect the extra
+ * dwords to be zeroed. Even though this doesn't affect execution, we
+ * don't want to be leaking stuff by accident.
+ */
+ for (i = len / sizeof(uint32_t); i < size / sizeof(uint32_t); i++)
+ igt_assert_eq_u32(vaddr[i], 0);
+
+ munmap(vaddr, size);
+
+ for (i = 0; i < ARRAY_SIZE(exec); i++)
+ gem_close(fd, exec[i].handle);
+}
+
igt_main
{
uint32_t handle;
@@ -1138,6 +1264,9 @@ igt_main
igt_subtest("bb-oversize")
test_bb_oversize(i915);
+ igt_subtest("shadow-peek")
+ test_shadow_peek(i915);
+
igt_fixture {
igt_stop_hang_detector();
gem_close(i915, handle);