diff options
author | Katarzyna Dec <katarzyna.dec@intel.com> | 2018-05-04 15:02:14 +0200 |
---|---|---|
committer | MichaĆ Winiarski <michal.winiarski@intel.com> | 2018-05-15 10:51:31 +0200 |
commit | 92e89da237960726bcd90879d4a11ab1d553937d (patch) | |
tree | 6115b087a56ffeb3dbfde4bb8aa5475fe11bd65d /lib | |
parent | 18fa0c11012da6aa9a5511dc9b1736064a02b429 (diff) |
lib/media_spin: Move helper functions to gpu_fill library
Let's remove duplications introduced by moving media_spin helper
functions to gpu_fill. These were mainly the same functions
as for Gen8 media/gpgpu fill. gen8_render_flush from media_spin
was replaced by gen7_render_flush. The only functions that were
left intact are gen8_spin_curbe_buffer_data, gen8_emit_vfe_state_spin,
gen8_emit_media_objects_spin and gen8lp_emit_media_objects_spin.
v2: squashed patches 1 and 2 from v1
v3: updated commit msg
Signed-off-by: Katarzyna Dec <katarzyna.dec@intel.com>
Cc: Lukasz Kalamarz <lukasz.kalamarz@intel.com>
Cc: Antonio Argenziano <antonio.argenziano@intel.com>
Reviewed-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Diffstat (limited to 'lib')
-rw-r--r-- | lib/gpu_fill.c | 81 | ||||
-rw-r--r-- | lib/gpu_fill.h | 13 | ||||
-rw-r--r-- | lib/media_spin.c | 347 |
3 files changed, 113 insertions, 328 deletions
diff --git a/lib/gpu_fill.c b/lib/gpu_fill.c index f05d4eca..f5fc61bb 100644 --- a/lib/gpu_fill.c +++ b/lib/gpu_fill.c @@ -352,6 +352,20 @@ gen7_emit_gpgpu_walk(struct intel_batchbuffer *batch, } uint32_t +gen8_spin_curbe_buffer_data(struct intel_batchbuffer *batch, + uint32_t iters) +{ + uint32_t *curbe_buffer; + uint32_t offset; + + curbe_buffer = intel_batchbuffer_subdata_alloc(batch, 64, 64); + offset = intel_batchbuffer_subdata_offset(batch, curbe_buffer); + *curbe_buffer = iters; + + return offset; +} + +uint32_t gen8_fill_surface_state(struct intel_batchbuffer *batch, struct igt_buf *buf, uint32_t format, @@ -526,6 +540,30 @@ gen8_emit_vfe_state_gpgpu(struct intel_batchbuffer *batch) } void +gen8_emit_vfe_state_spin(struct intel_batchbuffer *batch) +{ + OUT_BATCH(GEN8_MEDIA_VFE_STATE | (9 - 2)); + + /* scratch buffer */ + OUT_BATCH(0); + OUT_BATCH(0); + + /* number of threads & urb entries */ + OUT_BATCH(2 << 8); + + OUT_BATCH(0); + + /* urb entry size & curbe size */ + OUT_BATCH(2 << 16 | + 2); + + /* scoreboard */ + OUT_BATCH(0); + OUT_BATCH(0); + OUT_BATCH(0); +} + +void gen8_emit_gpgpu_walk(struct intel_batchbuffer *batch, unsigned x, unsigned y, unsigned width, unsigned height) @@ -586,6 +624,49 @@ gen8_emit_gpgpu_walk(struct intel_batchbuffer *batch, } void +gen8_emit_media_objects_spin(struct intel_batchbuffer *batch) +{ + OUT_BATCH(GEN8_MEDIA_OBJECT | (8 - 2)); + + /* interface descriptor offset */ + OUT_BATCH(0); + + /* without indirect data */ + OUT_BATCH(0); + OUT_BATCH(0); + + /* scoreboard */ + OUT_BATCH(0); + OUT_BATCH(0); + + /* inline data (xoffset, yoffset) */ + OUT_BATCH(0); + OUT_BATCH(0); + gen8_emit_media_state_flush(batch); +} + +void +gen8lp_emit_media_objects_spin(struct intel_batchbuffer *batch) +{ + OUT_BATCH(GEN8_MEDIA_OBJECT | (8 - 2)); + + /* interface descriptor offset */ + OUT_BATCH(0); + + /* without indirect data */ + OUT_BATCH(0); + OUT_BATCH(0); + + /* scoreboard */ + OUT_BATCH(0); + OUT_BATCH(0); + + /* inline data (xoffset, yoffset) */ + OUT_BATCH(0); + OUT_BATCH(0); +} + +void gen9_emit_state_base_address(struct intel_batchbuffer *batch) { OUT_BATCH(GEN8_STATE_BASE_ADDRESS | (19 - 2)); diff --git a/lib/gpu_fill.h b/lib/gpu_fill.h index 067d4987..5335fe3f 100644 --- a/lib/gpu_fill.h +++ b/lib/gpu_fill.h @@ -89,6 +89,10 @@ gen7_emit_gpgpu_walk(struct intel_batchbuffer *batch, unsigned width, unsigned height); uint32_t +gen8_spin_curbe_buffer_data(struct intel_batchbuffer *batch, + uint32_t iters); + +uint32_t gen8_fill_surface_state(struct intel_batchbuffer *batch, struct igt_buf *buf, uint32_t format, @@ -110,11 +114,20 @@ void gen8_emit_vfe_state_gpgpu(struct intel_batchbuffer *batch); void +gen8_emit_vfe_state_spin(struct intel_batchbuffer *batch); + +void gen8_emit_gpgpu_walk(struct intel_batchbuffer *batch, unsigned x, unsigned y, unsigned width, unsigned height); void +gen8_emit_media_objects_spin(struct intel_batchbuffer *batch); + +void +gen8lp_emit_media_objects_spin(struct intel_batchbuffer *batch); + +void gen9_emit_state_base_address(struct intel_batchbuffer *batch); #endif /* GPU_FILL_H */ diff --git a/lib/media_spin.c b/lib/media_spin.c index d9e058b1..16ea8483 100644 --- a/lib/media_spin.c +++ b/lib/media_spin.c @@ -31,6 +31,7 @@ #include "intel_batchbuffer.h" #include "gen8_media.h" #include "media_spin.h" +#include "gpu_fill.h" static const uint32_t spin_kernel[][4] = { { 0x00600001, 0x20800208, 0x008d0000, 0x00000000 }, /* mov (8)r4.0<1>:ud r0.0<8;8;1>:ud */ @@ -45,316 +46,6 @@ static const uint32_t spin_kernel[][4] = { { 0x07800031, 0x20000a40, 0x0e000e00, 0x82000010 }, /* send.ts (16)null<1> r112<0;1;0>:d 0x82000010 */ }; -static void -gen8_render_flush(struct intel_batchbuffer *batch, uint32_t batch_end) -{ - int ret; - - ret = drm_intel_bo_subdata(batch->bo, 0, 4096, batch->buffer); - if (ret == 0) - ret = drm_intel_gem_bo_context_exec(batch->bo, NULL, - batch_end, 0); - igt_assert_eq(ret, 0); -} - -static uint32_t -gen8_spin_curbe_buffer_data(struct intel_batchbuffer *batch, - uint32_t iters) -{ - uint32_t *curbe_buffer; - uint32_t offset; - - curbe_buffer = intel_batchbuffer_subdata_alloc(batch, 64, 64); - offset = intel_batchbuffer_subdata_offset(batch, curbe_buffer); - *curbe_buffer = iters; - - return offset; -} - -static uint32_t -gen8_spin_surface_state(struct intel_batchbuffer *batch, - struct igt_buf *buf, - uint32_t format, - int is_dst) -{ - struct gen8_surface_state *ss; - uint32_t write_domain, read_domain, offset; - int ret; - - if (is_dst) { - write_domain = read_domain = I915_GEM_DOMAIN_RENDER; - } else { - write_domain = 0; - read_domain = I915_GEM_DOMAIN_SAMPLER; - } - - ss = intel_batchbuffer_subdata_alloc(batch, sizeof(*ss), 64); - offset = intel_batchbuffer_subdata_offset(batch, ss); - - ss->ss0.surface_type = GEN8_SURFACE_2D; - ss->ss0.surface_format = format; - ss->ss0.render_cache_read_write = 1; - ss->ss0.vertical_alignment = 1; /* align 4 */ - ss->ss0.horizontal_alignment = 1; /* align 4 */ - - if (buf->tiling == I915_TILING_X) - ss->ss0.tiled_mode = 2; - else if (buf->tiling == I915_TILING_Y) - ss->ss0.tiled_mode = 3; - - ss->ss8.base_addr = buf->bo->offset; - - ret = drm_intel_bo_emit_reloc(batch->bo, - intel_batchbuffer_subdata_offset(batch, ss) + 8 * 4, - buf->bo, 0, - read_domain, write_domain); - igt_assert_eq(ret, 0); - - ss->ss2.height = igt_buf_height(buf) - 1; - ss->ss2.width = igt_buf_width(buf) - 1; - ss->ss3.pitch = buf->stride - 1; - - ss->ss7.shader_chanel_select_r = 4; - ss->ss7.shader_chanel_select_g = 5; - ss->ss7.shader_chanel_select_b = 6; - ss->ss7.shader_chanel_select_a = 7; - - return offset; -} - -static uint32_t -gen8_spin_binding_table(struct intel_batchbuffer *batch, - struct igt_buf *dst) -{ - uint32_t *binding_table, offset; - - binding_table = intel_batchbuffer_subdata_alloc(batch, 32, 64); - offset = intel_batchbuffer_subdata_offset(batch, binding_table); - - binding_table[0] = gen8_spin_surface_state(batch, dst, - GEN8_SURFACEFORMAT_R8_UNORM, 1); - - return offset; -} - -static uint32_t -gen8_spin_media_kernel(struct intel_batchbuffer *batch, - const uint32_t kernel[][4], - size_t size) -{ - uint32_t offset; - - offset = intel_batchbuffer_copy_data(batch, kernel, size, 64); - - return offset; -} - -static uint32_t -gen8_spin_interface_descriptor(struct intel_batchbuffer *batch, - struct igt_buf *dst) -{ - struct gen8_interface_descriptor_data *idd; - uint32_t offset; - uint32_t binding_table_offset, kernel_offset; - - binding_table_offset = gen8_spin_binding_table(batch, dst); - kernel_offset = gen8_spin_media_kernel(batch, spin_kernel, - sizeof(spin_kernel)); - - idd = intel_batchbuffer_subdata_alloc(batch, sizeof(*idd), 64); - offset = intel_batchbuffer_subdata_offset(batch, idd); - - idd->desc0.kernel_start_pointer = (kernel_offset >> 6); - - idd->desc2.single_program_flow = 1; - idd->desc2.floating_point_mode = GEN8_FLOATING_POINT_IEEE_754; - - idd->desc3.sampler_count = 0; /* 0 samplers used */ - idd->desc3.sampler_state_pointer = 0; - - idd->desc4.binding_table_entry_count = 0; - idd->desc4.binding_table_pointer = (binding_table_offset >> 5); - - idd->desc5.constant_urb_entry_read_offset = 0; - idd->desc5.constant_urb_entry_read_length = 1; /* grf 1 */ - - return offset; -} - -static void -gen8_emit_state_base_address(struct intel_batchbuffer *batch) -{ - OUT_BATCH(GEN8_STATE_BASE_ADDRESS | (16 - 2)); - - /* general */ - OUT_BATCH(0 | BASE_ADDRESS_MODIFY); - OUT_BATCH(0); - - /* stateless data port */ - OUT_BATCH(0 | BASE_ADDRESS_MODIFY); - - /* surface */ - OUT_RELOC(batch->bo, I915_GEM_DOMAIN_SAMPLER, 0, BASE_ADDRESS_MODIFY); - - /* dynamic */ - OUT_RELOC(batch->bo, I915_GEM_DOMAIN_RENDER | I915_GEM_DOMAIN_INSTRUCTION, - 0, BASE_ADDRESS_MODIFY); - - /* indirect */ - OUT_BATCH(0); - OUT_BATCH(0); - - /* instruction */ - OUT_RELOC(batch->bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY); - - /* general state buffer size */ - OUT_BATCH(0xfffff000 | 1); - /* dynamic state buffer size */ - OUT_BATCH(1 << 12 | 1); - /* indirect object buffer size */ - OUT_BATCH(0xfffff000 | 1); - /* intruction buffer size, must set modify enable bit, otherwise it may result in GPU hang */ - OUT_BATCH(1 << 12 | 1); -} - -static void -gen9_emit_state_base_address(struct intel_batchbuffer *batch) -{ - OUT_BATCH(GEN8_STATE_BASE_ADDRESS | (19 - 2)); - - /* general */ - OUT_BATCH(0 | BASE_ADDRESS_MODIFY); - OUT_BATCH(0); - - /* stateless data port */ - OUT_BATCH(0 | BASE_ADDRESS_MODIFY); - - /* surface */ - OUT_RELOC(batch->bo, I915_GEM_DOMAIN_SAMPLER, 0, BASE_ADDRESS_MODIFY); - - /* dynamic */ - OUT_RELOC(batch->bo, I915_GEM_DOMAIN_RENDER | I915_GEM_DOMAIN_INSTRUCTION, - 0, BASE_ADDRESS_MODIFY); - - /* indirect */ - OUT_BATCH(0); - OUT_BATCH(0); - - /* instruction */ - OUT_RELOC(batch->bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY); - - /* general state buffer size */ - OUT_BATCH(0xfffff000 | 1); - /* dynamic state buffer size */ - OUT_BATCH(1 << 12 | 1); - /* indirect object buffer size */ - OUT_BATCH(0xfffff000 | 1); - /* intruction buffer size, must set modify enable bit, otherwise it may result in GPU hang */ - OUT_BATCH(1 << 12 | 1); - - /* Bindless surface state base address */ - OUT_BATCH(0 | BASE_ADDRESS_MODIFY); - OUT_BATCH(0); - OUT_BATCH(0xfffff000); -} - -static void -gen8_emit_vfe_state(struct intel_batchbuffer *batch) -{ - OUT_BATCH(GEN8_MEDIA_VFE_STATE | (9 - 2)); - - /* scratch buffer */ - OUT_BATCH(0); - OUT_BATCH(0); - - /* number of threads & urb entries */ - OUT_BATCH(2 << 8); - - OUT_BATCH(0); - - /* urb entry size & curbe size */ - OUT_BATCH(2 << 16 | - 2); - - /* scoreboard */ - OUT_BATCH(0); - OUT_BATCH(0); - OUT_BATCH(0); -} - -static void -gen8_emit_curbe_load(struct intel_batchbuffer *batch, uint32_t curbe_buffer) -{ - OUT_BATCH(GEN8_MEDIA_CURBE_LOAD | (4 - 2)); - OUT_BATCH(0); - /* curbe total data length */ - OUT_BATCH(64); - /* curbe data start address, is relative to the dynamics base address */ - OUT_BATCH(curbe_buffer); -} - -static void -gen8_emit_interface_descriptor_load(struct intel_batchbuffer *batch, - uint32_t interface_descriptor) -{ - OUT_BATCH(GEN8_MEDIA_INTERFACE_DESCRIPTOR_LOAD | (4 - 2)); - OUT_BATCH(0); - /* interface descriptor data length */ - OUT_BATCH(sizeof(struct gen8_interface_descriptor_data)); - /* interface descriptor address, is relative to the dynamics base address */ - OUT_BATCH(interface_descriptor); -} - -static void -gen8_emit_media_state_flush(struct intel_batchbuffer *batch) -{ - OUT_BATCH(GEN8_MEDIA_STATE_FLUSH | (2 - 2)); - OUT_BATCH(0); -} - -static void -gen8_emit_media_objects(struct intel_batchbuffer *batch) -{ - OUT_BATCH(GEN8_MEDIA_OBJECT | (8 - 2)); - - /* interface descriptor offset */ - OUT_BATCH(0); - - /* without indirect data */ - OUT_BATCH(0); - OUT_BATCH(0); - - /* scoreboard */ - OUT_BATCH(0); - OUT_BATCH(0); - - /* inline data (xoffset, yoffset) */ - OUT_BATCH(0); - OUT_BATCH(0); - gen8_emit_media_state_flush(batch); -} - -static void -gen8lp_emit_media_objects(struct intel_batchbuffer *batch) -{ - OUT_BATCH(GEN8_MEDIA_OBJECT | (8 - 2)); - - /* interface descriptor offset */ - OUT_BATCH(0); - - /* without indirect data */ - OUT_BATCH(0); - OUT_BATCH(0); - - /* scoreboard */ - OUT_BATCH(0); - OUT_BATCH(0); - - /* inline data (xoffset, yoffset) */ - OUT_BATCH(0); - OUT_BATCH(0); -} - /* * This sets up the media pipeline, * @@ -390,7 +81,7 @@ gen8_media_spinfunc(struct intel_batchbuffer *batch, batch->ptr = &batch->buffer[BATCH_STATE_SPLIT]; curbe_buffer = gen8_spin_curbe_buffer_data(batch, spins); - interface_descriptor = gen8_spin_interface_descriptor(batch, dst); + interface_descriptor = gen8_fill_interface_descriptor(batch, dst, spin_kernel, sizeof(spin_kernel)); igt_assert(batch->ptr < &batch->buffer[4095]); /* media pipeline */ @@ -398,20 +89,20 @@ gen8_media_spinfunc(struct intel_batchbuffer *batch, OUT_BATCH(GEN8_PIPELINE_SELECT | PIPELINE_SELECT_MEDIA); gen8_emit_state_base_address(batch); - gen8_emit_vfe_state(batch); + gen8_emit_vfe_state_spin(batch); - gen8_emit_curbe_load(batch, curbe_buffer); + gen7_emit_curbe_load(batch, curbe_buffer); - gen8_emit_interface_descriptor_load(batch, interface_descriptor); + gen7_emit_interface_descriptor_load(batch, interface_descriptor); - gen8_emit_media_objects(batch); + gen8_emit_media_objects_spin(batch); OUT_BATCH(MI_BATCH_BUFFER_END); batch_end = intel_batchbuffer_align(batch, 8); igt_assert(batch_end < BATCH_STATE_SPLIT); - gen8_render_flush(batch, batch_end); + gen7_render_flush(batch, batch_end); intel_batchbuffer_reset(batch); } @@ -428,7 +119,7 @@ gen8lp_media_spinfunc(struct intel_batchbuffer *batch, batch->ptr = &batch->buffer[BATCH_STATE_SPLIT]; curbe_buffer = gen8_spin_curbe_buffer_data(batch, spins); - interface_descriptor = gen8_spin_interface_descriptor(batch, dst); + interface_descriptor = gen8_fill_interface_descriptor(batch, dst, spin_kernel, sizeof(spin_kernel)); igt_assert(batch->ptr < &batch->buffer[4095]); /* media pipeline */ @@ -436,20 +127,20 @@ gen8lp_media_spinfunc(struct intel_batchbuffer *batch, OUT_BATCH(GEN8_PIPELINE_SELECT | PIPELINE_SELECT_MEDIA); gen8_emit_state_base_address(batch); - gen8_emit_vfe_state(batch); + gen8_emit_vfe_state_spin(batch); - gen8_emit_curbe_load(batch, curbe_buffer); + gen7_emit_curbe_load(batch, curbe_buffer); - gen8_emit_interface_descriptor_load(batch, interface_descriptor); + gen7_emit_interface_descriptor_load(batch, interface_descriptor); - gen8lp_emit_media_objects(batch); + gen8lp_emit_media_objects_spin(batch); OUT_BATCH(MI_BATCH_BUFFER_END); batch_end = intel_batchbuffer_align(batch, 8); igt_assert(batch_end < BATCH_STATE_SPLIT); - gen8_render_flush(batch, batch_end); + gen7_render_flush(batch, batch_end); intel_batchbuffer_reset(batch); } @@ -466,7 +157,7 @@ gen9_media_spinfunc(struct intel_batchbuffer *batch, batch->ptr = &batch->buffer[BATCH_STATE_SPLIT]; curbe_buffer = gen8_spin_curbe_buffer_data(batch, spins); - interface_descriptor = gen8_spin_interface_descriptor(batch, dst); + interface_descriptor = gen8_fill_interface_descriptor(batch, dst, spin_kernel, sizeof(spin_kernel)); igt_assert(batch->ptr < &batch->buffer[4095]); /* media pipeline */ @@ -479,13 +170,13 @@ gen9_media_spinfunc(struct intel_batchbuffer *batch, GEN9_FORCE_MEDIA_AWAKE_MASK); gen9_emit_state_base_address(batch); - gen8_emit_vfe_state(batch); + gen8_emit_vfe_state_spin(batch); - gen8_emit_curbe_load(batch, curbe_buffer); + gen7_emit_curbe_load(batch, curbe_buffer); - gen8_emit_interface_descriptor_load(batch, interface_descriptor); + gen7_emit_interface_descriptor_load(batch, interface_descriptor); - gen8_emit_media_objects(batch); + gen8_emit_media_objects_spin(batch); OUT_BATCH(GEN8_PIPELINE_SELECT | PIPELINE_SELECT_MEDIA | GEN9_FORCE_MEDIA_AWAKE_DISABLE | @@ -499,6 +190,6 @@ gen9_media_spinfunc(struct intel_batchbuffer *batch, batch_end = intel_batchbuffer_align(batch, 8); igt_assert(batch_end < BATCH_STATE_SPLIT); - gen8_render_flush(batch, batch_end); + gen7_render_flush(batch, batch_end); intel_batchbuffer_reset(batch); } |