summaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
authorJordan Justen <jordan.l.justen@intel.com>2018-02-20 03:05:56 -0800
committerJordan Justen <jordan.l.justen@intel.com>2018-03-19 11:28:18 -0700
commitb09e979a67817a9b068f841bda81940b9d208850 (patch)
tree440bd169421bd4f95734c043e5abe71b75a47099 /tools
parent3e0eca2f7a7935a6726c43823726d42c4b0d80f9 (diff)
tools/aubdump: For gen10+ support addresses up to 4GB
For gen10, we now add mappings for buffers as they are needed. Instead of doing this dynamically, we could always map the entire 4GB. With 4KB pages, the tables would take up 8MB in every AUB. AUBs are often quite huge compared to 8MB, but they can also be just a few hundred KB. This should allow the AUB to create up to about 4GB of allocated buffers, whereas before we were limited to about 64MB. While it is unlikely that we'll try to capture AUBs that generate buffers up to 4GB in size, this change also should allow pinned buffers to be used anywhere in the first 4GB. (I tested a pinned buffer at 0xf0000000.) Signed-off-by: Jordan Justen <jordan.l.justen@intel.com> Reviewed-by: Scott D Phillips <scott.d.phillips@intel.com>
Diffstat (limited to 'tools')
-rw-r--r--tools/aubdump.c20
1 files changed, 18 insertions, 2 deletions
diff --git a/tools/aubdump.c b/tools/aubdump.c
index fbd8bf8f..267061b0 100644
--- a/tools/aubdump.c
+++ b/tools/aubdump.c
@@ -505,6 +505,12 @@ gen8_map_ggtt_range(uint64_t start, uint64_t end)
}
static void
+gen8_map_base_size(uint64_t base, uint64_t size)
+{
+ gen8_map_ggtt_range(base, base + size);
+}
+
+static void
gen10_write_header(void)
{
char app_name[8 * 4];
@@ -524,15 +530,16 @@ gen10_write_header(void)
dword_out(0); /* version */
data_out(app_name, app_name_len);
- gen8_map_ggtt_range(0, MEMORY_MAP_SIZE);
-
/* RENDER_RING */
+ gen8_map_base_size(RENDER_RING_ADDR, RING_SIZE);
mem_trace_memory_write_header_out(RENDER_RING_ADDR, RING_SIZE,
AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_LOCAL);
for (uint32_t i = 0; i < RING_SIZE; i += sizeof(uint32_t))
dword_out(0);
/* RENDER_PPHWSP */
+ gen8_map_base_size(RENDER_CONTEXT_ADDR,
+ PPHWSP_SIZE + sizeof(render_context_init));
mem_trace_memory_write_header_out(RENDER_CONTEXT_ADDR,
PPHWSP_SIZE +
sizeof(render_context_init),
@@ -544,12 +551,15 @@ gen10_write_header(void)
data_out(render_context_init, sizeof(render_context_init));
/* BLITTER_RING */
+ gen8_map_base_size(BLITTER_RING_ADDR, RING_SIZE);
mem_trace_memory_write_header_out(BLITTER_RING_ADDR, RING_SIZE,
AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_LOCAL);
for (uint32_t i = 0; i < RING_SIZE; i += sizeof(uint32_t))
dword_out(0);
/* BLITTER_PPHWSP */
+ gen8_map_base_size(BLITTER_CONTEXT_ADDR,
+ PPHWSP_SIZE + sizeof(blitter_context_init));
mem_trace_memory_write_header_out(BLITTER_CONTEXT_ADDR,
PPHWSP_SIZE +
sizeof(blitter_context_init),
@@ -561,12 +571,15 @@ gen10_write_header(void)
data_out(blitter_context_init, sizeof(blitter_context_init));
/* VIDEO_RING */
+ gen8_map_base_size(VIDEO_RING_ADDR, RING_SIZE);
mem_trace_memory_write_header_out(VIDEO_RING_ADDR, RING_SIZE,
AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_LOCAL);
for (uint32_t i = 0; i < RING_SIZE; i += sizeof(uint32_t))
dword_out(0);
/* VIDEO_PPHWSP */
+ gen8_map_base_size(VIDEO_CONTEXT_ADDR,
+ PPHWSP_SIZE + sizeof(video_context_init));
mem_trace_memory_write_header_out(VIDEO_CONTEXT_ADDR,
PPHWSP_SIZE +
sizeof(video_context_init),
@@ -961,6 +974,9 @@ dump_execbuffer2(int fd, struct drm_i915_gem_execbuffer2 *execbuffer2)
if (bo->map == NULL && bo->size > 0)
bo->map = gem_mmap(fd, obj->handle, 0, bo->size);
fail_if(bo->map == MAP_FAILED, "intel_aubdump: bo mmap failed\n");
+
+ if (gen >= 10)
+ gen8_map_ggtt_range(bo->offset, bo->offset + bo->size);
}
batch_index = (execbuffer2->flags & I915_EXEC_BATCH_FIRST) ? 0 :