summaryrefslogtreecommitdiff
path: root/lib/gpgpu_fill.c
diff options
context:
space:
mode:
authorDominik Zeromski <dominik.zeromski@intel.com>2015-07-17 11:25:44 +0200
committerThomas Wood <thomas.wood@intel.com>2015-07-20 18:13:37 +0100
commita017c2905a615d514ae38c8c624d70ef0b11be91 (patch)
treeba9f8a677958e1d15501a3865b859267e51c98a7 /lib/gpgpu_fill.c
parented816d560ce5a1d80a005a452ee0e4295ac1698f (diff)
lib/gpgpu_fill: Add BDW support
BDW changed structure of surface state and interface descriptors. Commands like state base address, gpgpu walker were extended. Cc: Thomas Wood <thomas.wood@intel.com> Signed-off-by: Dominik Zeromski <dominik.zeromski@intel.com> Signed-off-by: Thomas Wood <thomas.wood@intel.com>
Diffstat (limited to 'lib/gpgpu_fill.c')
-rwxr-xr-xlib/gpgpu_fill.c296
1 files changed, 290 insertions, 6 deletions
diff --git a/lib/gpgpu_fill.c b/lib/gpgpu_fill.c
index f0911e68..c98f1211 100755
--- a/lib/gpgpu_fill.c
+++ b/lib/gpgpu_fill.c
@@ -32,6 +32,7 @@
#include "drmtest.h"
#include "intel_batchbuffer.h"
#include "gen7_media.h"
+#include "gen8_media.h"
#include "gpgpu_fill.h"
/* shaders/gpgpu/gpgpu_fill.gxa */
@@ -48,6 +49,19 @@ static const uint32_t gen7_gpgpu_kernel[][4] = {
{ 0x07800031, 0x20001ca8, 0x00000e00, 0x82000010 },
};
+static const uint32_t gen8_gpgpu_kernel[][4] = {
+ { 0x00400001, 0x20202288, 0x00000020, 0x00000000 },
+ { 0x00000041, 0x20400208, 0x06000004, 0x00000010 },
+ { 0x00000001, 0x20440208, 0x00000018, 0x00000000 },
+ { 0x00600001, 0x20800208, 0x008d0000, 0x00000000 },
+ { 0x00200001, 0x20800208, 0x00450040, 0x00000000 },
+ { 0x00000001, 0x20880608, 0x00000000, 0x0000000f },
+ { 0x00800001, 0x20a00208, 0x00000020, 0x00000000 },
+ { 0x0c800031, 0x24000a40, 0x0e000080, 0x060a8000 },
+ { 0x00600001, 0x2e000208, 0x008d0000, 0x00000000 },
+ { 0x07800031, 0x20000a40, 0x0e000e00, 0x82000010 },
+};
+
static uint32_t
batch_used(struct intel_batchbuffer *batch)
{
@@ -97,8 +111,7 @@ gen7_render_flush(struct intel_batchbuffer *batch, uint32_t batch_end)
}
static uint32_t
-gen7_fill_curbe_buffer_data(struct intel_batchbuffer *batch,
- uint8_t color)
+gen7_fill_curbe_buffer_data(struct intel_batchbuffer *batch, uint8_t color)
{
uint8_t *curbe_buffer;
uint32_t offset;
@@ -160,6 +173,58 @@ gen7_fill_surface_state(struct intel_batchbuffer *batch,
}
static uint32_t
+gen8_fill_surface_state(struct intel_batchbuffer *batch,
+ struct igt_buf *buf,
+ uint32_t format,
+ int is_dst)
+{
+ struct gen8_surface_state *ss;
+ uint32_t write_domain, read_domain, offset;
+ int ret;
+
+ if (is_dst) {
+ write_domain = read_domain = I915_GEM_DOMAIN_RENDER;
+ } else {
+ write_domain = 0;
+ read_domain = I915_GEM_DOMAIN_SAMPLER;
+ }
+
+ ss = batch_alloc(batch, sizeof(*ss), 64);
+ offset = batch_offset(batch, ss);
+
+ ss->ss0.surface_type = GEN8_SURFACE_2D;
+ ss->ss0.surface_format = format;
+ ss->ss0.render_cache_read_write = 1;
+ ss->ss0.vertical_alignment = 1; /* align 4 */
+ ss->ss0.horizontal_alignment = 1; /* align 4 */
+
+ if (buf->tiling == I915_TILING_X)
+ ss->ss0.tiled_mode = 2;
+ else if (buf->tiling == I915_TILING_Y)
+ ss->ss0.tiled_mode = 3;
+
+ ss->ss8.base_addr = buf->bo->offset;
+
+ ret = drm_intel_bo_emit_reloc(batch->bo,
+ batch_offset(batch, ss) + 8 * 4,
+ buf->bo, 0,
+ read_domain, write_domain);
+ igt_assert_eq(ret, 0);
+
+ ss->ss2.height = igt_buf_height(buf) - 1;
+ ss->ss2.width = igt_buf_width(buf) - 1;
+ ss->ss3.pitch = buf->stride - 1;
+
+ ss->ss7.shader_chanel_select_r = 4;
+ ss->ss7.shader_chanel_select_g = 5;
+ ss->ss7.shader_chanel_select_b = 6;
+ ss->ss7.shader_chanel_select_a = 7;
+
+ return offset;
+
+}
+
+static uint32_t
gen7_fill_binding_table(struct intel_batchbuffer *batch,
struct igt_buf *dst)
{
@@ -174,6 +239,20 @@ gen7_fill_binding_table(struct intel_batchbuffer *batch,
}
static uint32_t
+gen8_fill_binding_table(struct intel_batchbuffer *batch,
+ struct igt_buf *dst)
+{
+ uint32_t *binding_table, offset;
+
+ binding_table = batch_alloc(batch, 32, 64);
+ offset = batch_offset(batch, binding_table);
+
+ binding_table[0] = gen8_fill_surface_state(batch, dst, GEN8_SURFACEFORMAT_R8_UNORM, 1);
+
+ return offset;
+}
+
+static uint32_t
gen7_fill_gpgpu_kernel(struct intel_batchbuffer *batch,
const uint32_t kernel[][4],
size_t size)
@@ -216,6 +295,37 @@ gen7_fill_interface_descriptor(struct intel_batchbuffer *batch, struct igt_buf *
return offset;
}
+static uint32_t
+gen8_fill_interface_descriptor(struct intel_batchbuffer *batch, struct igt_buf *dst,
+ const uint32_t kernel[][4], size_t size)
+{
+ struct gen8_interface_descriptor_data *idd;
+ uint32_t offset;
+ uint32_t binding_table_offset, kernel_offset;
+
+ binding_table_offset = gen8_fill_binding_table(batch, dst);
+ kernel_offset = gen7_fill_gpgpu_kernel(batch, kernel, size);
+
+ idd = batch_alloc(batch, sizeof(*idd), 64);
+ offset = batch_offset(batch, idd);
+
+ idd->desc0.kernel_start_pointer = (kernel_offset >> 6);
+
+ idd->desc2.single_program_flow = 1;
+ idd->desc2.floating_point_mode = GEN8_FLOATING_POINT_IEEE_754;
+
+ idd->desc3.sampler_count = 0; /* 0 samplers used */
+ idd->desc3.sampler_state_pointer = 0;
+
+ idd->desc4.binding_table_entry_count = 0;
+ idd->desc4.binding_table_pointer = (binding_table_offset >> 5);
+
+ idd->desc5.constant_urb_entry_read_offset = 0;
+ idd->desc5.constant_urb_entry_read_length = 1; /* grf 1 */
+
+ return offset;
+}
+
static void
gen7_emit_state_base_address(struct intel_batchbuffer *batch)
{
@@ -244,6 +354,42 @@ gen7_emit_state_base_address(struct intel_batchbuffer *batch)
}
static void
+gen8_emit_state_base_address(struct intel_batchbuffer *batch)
+{
+ OUT_BATCH(GEN8_STATE_BASE_ADDRESS | (16 - 2));
+
+ /* general */
+ OUT_BATCH(0 | (0x78 << 4) | (0 << 1) | BASE_ADDRESS_MODIFY);
+ OUT_BATCH(0);
+
+ /* stateless data port */
+ OUT_BATCH(0 | BASE_ADDRESS_MODIFY);
+
+ /* surface */
+ OUT_RELOC(batch->bo, I915_GEM_DOMAIN_SAMPLER, 0, BASE_ADDRESS_MODIFY);
+
+ /* dynamic */
+ OUT_RELOC(batch->bo, I915_GEM_DOMAIN_RENDER | I915_GEM_DOMAIN_INSTRUCTION,
+ 0, BASE_ADDRESS_MODIFY);
+
+ /* indirect */
+ OUT_BATCH(0);
+ OUT_BATCH(0 );
+
+ /* instruction */
+ OUT_RELOC(batch->bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY);
+
+ /* general state buffer size */
+ OUT_BATCH(0xfffff000 | 1);
+ /* dynamic state buffer size */
+ OUT_BATCH(1 << 12 | 1);
+ /* indirect object buffer size */
+ OUT_BATCH(0xfffff000 | 1);
+ /* intruction buffer size, must set modify enable bit, otherwise it may result in GPU hang */
+ OUT_BATCH(1 << 12 | 1);
+}
+
+static void
gen7_emit_vfe_state_gpgpu(struct intel_batchbuffer *batch)
{
OUT_BATCH(GEN7_MEDIA_VFE_STATE | (8 - 2));
@@ -269,6 +415,29 @@ gen7_emit_vfe_state_gpgpu(struct intel_batchbuffer *batch)
}
static void
+gen8_emit_vfe_state_gpgpu(struct intel_batchbuffer *batch)
+{
+ OUT_BATCH(GEN8_MEDIA_VFE_STATE | (9 - 2));
+
+ /* scratch buffer */
+ OUT_BATCH(0);
+ OUT_BATCH(0);
+
+ /* number of threads & urb entries */
+ OUT_BATCH(1 << 16 | 1 << 8);
+
+ OUT_BATCH(0);
+
+ /* urb entry size & curbe size */
+ OUT_BATCH(0 << 16 | 1);
+
+ /* scoreboard */
+ OUT_BATCH(0);
+ OUT_BATCH(0);
+ OUT_BATCH(0);
+}
+
+static void
gen7_emit_curbe_load(struct intel_batchbuffer *batch, uint32_t curbe_buffer)
{
OUT_BATCH(GEN7_MEDIA_CURBE_LOAD | (4 - 2));
@@ -291,6 +460,17 @@ gen7_emit_interface_descriptor_load(struct intel_batchbuffer *batch, uint32_t in
}
static void
+gen8_emit_interface_descriptor_load(struct intel_batchbuffer *batch, uint32_t interface_descriptor)
+{
+ OUT_BATCH(GEN8_MEDIA_INTERFACE_DESCRIPTOR_LOAD | (4 - 2));
+ OUT_BATCH(0);
+ /* interface descriptor data length */
+ OUT_BATCH(sizeof(struct gen8_interface_descriptor_data));
+ /* interface descriptor address, is relative to the dynamics base address */
+ OUT_BATCH(interface_descriptor);
+}
+
+static void
gen7_emit_gpgpu_walk(struct intel_batchbuffer *batch,
unsigned x, unsigned y,
unsigned width, unsigned height)
@@ -347,6 +527,66 @@ gen7_emit_gpgpu_walk(struct intel_batchbuffer *batch,
OUT_BATCH(0xffffffff);
}
+static void
+gen8_emit_gpgpu_walk(struct intel_batchbuffer *batch,
+ unsigned x, unsigned y,
+ unsigned width, unsigned height)
+{
+ uint32_t x_dim, y_dim, tmp, right_mask;
+
+ /*
+ * Simply do SIMD16 based dispatch, so every thread uses
+ * SIMD16 channels.
+ *
+ * Define our own thread group size, e.g 16x1 for every group, then
+ * will have 1 thread each group in SIMD16 dispatch. So thread
+ * width/height/depth are all 1.
+ *
+ * Then thread group X = width / 16 (aligned to 16)
+ * thread group Y = height;
+ */
+ x_dim = (width + 15) / 16;
+ y_dim = height;
+
+ tmp = width & 15;
+ if (tmp == 0)
+ right_mask = (1 << 16) - 1;
+ else
+ right_mask = (1 << tmp) - 1;
+
+ OUT_BATCH(GEN7_GPGPU_WALKER | 13);
+
+ OUT_BATCH(0); /* kernel offset */
+ OUT_BATCH(0); /* indirect data length */
+ OUT_BATCH(0); /* indirect data offset */
+
+ /* SIMD size, thread w/h/d */
+ OUT_BATCH(1 << 30 | /* SIMD16 */
+ 0 << 16 | /* depth:1 */
+ 0 << 8 | /* height:1 */
+ 0); /* width:1 */
+
+ /* thread group X */
+ OUT_BATCH(0);
+ OUT_BATCH(0);
+ OUT_BATCH(x_dim);
+
+ /* thread group Y */
+ OUT_BATCH(0);
+ OUT_BATCH(0);
+ OUT_BATCH(y_dim);
+
+ /* thread group Z */
+ OUT_BATCH(0);
+ OUT_BATCH(1);
+
+ /* right mask */
+ OUT_BATCH(right_mask);
+
+ /* bottom mask, height 1, always 0xffffffff */
+ OUT_BATCH(0xffffffff);
+}
+
/*
* This sets up the gpgpu pipeline,
*
@@ -403,13 +643,9 @@ gen7_gpgpu_fillfunc(struct intel_batchbuffer *batch,
OUT_BATCH(GEN7_PIPELINE_SELECT | PIPELINE_SELECT_GPGPU);
gen7_emit_state_base_address(batch);
-
gen7_emit_vfe_state_gpgpu(batch);
-
gen7_emit_curbe_load(batch, curbe_buffer);
-
gen7_emit_interface_descriptor_load(batch, interface_descriptor);
-
gen7_emit_gpgpu_walk(batch, x, y, width, height);
OUT_BATCH(MI_BATCH_BUFFER_END);
@@ -420,3 +656,51 @@ gen7_gpgpu_fillfunc(struct intel_batchbuffer *batch,
gen7_render_flush(batch, batch_end);
intel_batchbuffer_reset(batch);
}
+
+void
+gen8_gpgpu_fillfunc(struct intel_batchbuffer *batch,
+ struct igt_buf *dst,
+ unsigned x, unsigned y,
+ unsigned width, unsigned height,
+ uint8_t color)
+{
+ uint32_t curbe_buffer, interface_descriptor;
+ uint32_t batch_end;
+
+ intel_batchbuffer_flush(batch);
+
+ /* setup states */
+ batch->ptr = &batch->buffer[BATCH_STATE_SPLIT];
+
+ /*
+ * const buffer needs to fill for every thread, but as we have just 1 thread
+ * per every group, so need only one curbe data.
+ *
+ * For each thread, just use thread group ID for buffer offset.
+ */
+ curbe_buffer = gen7_fill_curbe_buffer_data(batch, color);
+
+ interface_descriptor = gen8_fill_interface_descriptor(batch, dst,
+ gen8_gpgpu_kernel,
+ sizeof(gen8_gpgpu_kernel));
+ igt_assert(batch->ptr < &batch->buffer[4095]);
+
+ batch->ptr = batch->buffer;
+
+ /* GPGPU pipeline */
+ OUT_BATCH(GEN7_PIPELINE_SELECT | PIPELINE_SELECT_GPGPU);
+
+ gen8_emit_state_base_address(batch);
+ gen8_emit_vfe_state_gpgpu(batch);
+ gen7_emit_curbe_load(batch, curbe_buffer);
+ gen8_emit_interface_descriptor_load(batch, interface_descriptor);
+ gen8_emit_gpgpu_walk(batch, x, y, width, height);
+
+ OUT_BATCH(MI_BATCH_BUFFER_END);
+
+ batch_end = batch_align(batch, 8);
+ igt_assert(batch_end < BATCH_STATE_SPLIT);
+
+ gen7_render_flush(batch, batch_end);
+ intel_batchbuffer_reset(batch);
+}