summaryrefslogtreecommitdiff
path: root/tools/null_state_gen/intel_renderstate_gen6.c
diff options
context:
space:
mode:
authorMika Kuoppala <mika.kuoppala@linux.intel.com>2014-08-01 21:19:56 +0300
committerMika Kuoppala <mika.kuoppala@intel.com>2014-09-05 18:04:14 +0300
commit4a604dee6003d3c377a2984ce288f7e9b8fdf85b (patch)
treebd51011d7c6b4fbb04e5ebcc23f2a14c4b288dcb /tools/null_state_gen/intel_renderstate_gen6.c
parentb77145dd489ef4e05799aa734927b9f9e77710d2 (diff)
tools/null_state_gen: build cmd and state space separately
Instead of building batch directly to memory, build into cmd and state arrays. This representation allows us more flexibility in batch state expression and batch generation/relocation. As a bonus, we can also attach the line information that produced the batch data to help debugging. There is no change in the output states produced. This can be considered as a preparatory patch to help introduce gen8 golden state. Signed-off-by: Mika Kuoppala <mika.kuoppala@intel.com>
Diffstat (limited to 'tools/null_state_gen/intel_renderstate_gen6.c')
-rw-r--r--tools/null_state_gen/intel_renderstate_gen6.c136
1 files changed, 55 insertions, 81 deletions
diff --git a/tools/null_state_gen/intel_renderstate_gen6.c b/tools/null_state_gen/intel_renderstate_gen6.c
index f169d022..5f922f79 100644
--- a/tools/null_state_gen/intel_renderstate_gen6.c
+++ b/tools/null_state_gen/intel_renderstate_gen6.c
@@ -33,31 +33,23 @@ static const uint32_t ps_kernel_nomask_affine[][4] = {
static uint32_t
gen6_bind_buf_null(struct intel_batchbuffer *batch)
{
- struct gen6_surface_state *ss;
- int ret;
+ struct gen6_surface_state ss;
+ memset(&ss, 0, sizeof(ss));
- ss = intel_batch_state_alloc(batch, sizeof(*ss), 32);
- if (ss == NULL)
- return -1;
-
- memset(ss, 0, sizeof(*ss));
-
- return intel_batch_offset(batch, ss);
+ return OUT_STATE_STRUCT(ss, 32);
}
static uint32_t
gen6_bind_surfaces(struct intel_batchbuffer *batch)
{
- uint32_t *binding_table;
+ unsigned offset;
- binding_table = intel_batch_state_alloc(batch, 32, 32);
- if (binding_table == NULL)
- return -1;
+ offset = intel_batch_state_alloc(batch, 32, 32, "bind surfaces");
- binding_table[0] = gen6_bind_buf_null(batch);
- binding_table[1] = gen6_bind_buf_null(batch);
+ bb_area_emit_offset(batch->state, offset, gen6_bind_buf_null(batch), STATE_OFFSET, "bind 1");
+ bb_area_emit_offset(batch->state, offset + 4, gen6_bind_buf_null(batch), STATE_OFFSET, "bind 2");
- return intel_batch_offset(batch, binding_table);
+ return offset;
}
static void
@@ -108,7 +100,7 @@ gen6_emit_viewports(struct intel_batchbuffer *batch, uint32_t cc_vp)
(4 - 2));
OUT_BATCH(0);
OUT_BATCH(0);
- OUT_BATCH(cc_vp);
+ OUT_BATCH_STATE_OFFSET(cc_vp);
}
static void
@@ -202,7 +194,7 @@ static void
gen6_emit_cc(struct intel_batchbuffer *batch, uint32_t blend)
{
OUT_BATCH(GEN6_3DSTATE_CC_STATE_POINTERS | (4 - 2));
- OUT_BATCH(blend | 1);
+ OUT_BATCH_STATE_OFFSET(blend | 1);
OUT_BATCH(1024 | 1);
OUT_BATCH(1024 | 1);
}
@@ -215,7 +207,7 @@ gen6_emit_sampler(struct intel_batchbuffer *batch, uint32_t state)
(4 - 2));
OUT_BATCH(0); /* VS */
OUT_BATCH(0); /* GS */
- OUT_BATCH(state);
+ OUT_BATCH_STATE_OFFSET(state);
}
static void
@@ -249,7 +241,7 @@ static void
gen6_emit_wm(struct intel_batchbuffer *batch, int kernel)
{
OUT_BATCH(GEN6_3DSTATE_WM | (9 - 2));
- OUT_BATCH(kernel);
+ OUT_BATCH_STATE_OFFSET(kernel);
OUT_BATCH(1 << GEN6_3DSTATE_WM_SAMPLER_COUNT_SHIFT |
2 << GEN6_3DSTATE_WM_BINDING_TABLE_ENTRY_COUNT_SHIFT);
OUT_BATCH(0);
@@ -271,7 +263,7 @@ gen6_emit_binding_table(struct intel_batchbuffer *batch, uint32_t wm_table)
(4 - 2));
OUT_BATCH(0); /* vs */
OUT_BATCH(0); /* gs */
- OUT_BATCH(wm_table);
+ OUT_BATCH_STATE_OFFSET(wm_table);
}
static void
@@ -325,36 +317,32 @@ gen6_emit_vertex_elements(struct intel_batchbuffer *batch)
static uint32_t
gen6_create_cc_viewport(struct intel_batchbuffer *batch)
{
- struct gen6_cc_viewport *vp;
+ struct gen6_cc_viewport vp;
- vp = intel_batch_state_alloc(batch, sizeof(*vp), 32);
- if (vp == NULL)
- return -1;
+ memset(&vp, 0, sizeof(vp));
- vp->min_depth = -1.e35;
- vp->max_depth = 1.e35;
+ vp.min_depth = -1.e35;
+ vp.max_depth = 1.e35;
- return intel_batch_offset(batch, vp);
+ return OUT_STATE_STRUCT(vp, 32);
}
static uint32_t
gen6_create_cc_blend(struct intel_batchbuffer *batch)
{
- struct gen6_blend_state *blend;
+ struct gen6_blend_state blend;
- blend = intel_batch_state_alloc(batch, sizeof(*blend), 64);
- if (blend == NULL)
- return -1;
+ memset(&blend, 0, sizeof(blend));
- blend->blend0.dest_blend_factor = GEN6_BLENDFACTOR_ZERO;
- blend->blend0.source_blend_factor = GEN6_BLENDFACTOR_ONE;
- blend->blend0.blend_func = GEN6_BLENDFUNCTION_ADD;
- blend->blend0.blend_enable = 1;
+ blend.blend0.dest_blend_factor = GEN6_BLENDFACTOR_ZERO;
+ blend.blend0.source_blend_factor = GEN6_BLENDFACTOR_ONE;
+ blend.blend0.blend_func = GEN6_BLENDFUNCTION_ADD;
+ blend.blend0.blend_enable = 1;
- blend->blend1.post_blend_clamp_enable = 1;
- blend->blend1.pre_blend_clamp_enable = 1;
+ blend.blend1.post_blend_clamp_enable = 1;
+ blend.blend1.pre_blend_clamp_enable = 1;
- return intel_batch_offset(batch, blend);
+ return OUT_STATE_STRUCT(blend, 64);
}
static uint32_t
@@ -362,7 +350,7 @@ gen6_create_kernel(struct intel_batchbuffer *batch)
{
return intel_batch_state_copy(batch, ps_kernel_nomask_affine,
sizeof(ps_kernel_nomask_affine),
- 64);
+ 64, "ps_kernel");
}
static uint32_t
@@ -370,70 +358,64 @@ gen6_create_sampler(struct intel_batchbuffer *batch,
sampler_filter_t filter,
sampler_extend_t extend)
{
- struct gen6_sampler_state *ss;
+ struct gen6_sampler_state ss;
- ss = intel_batch_state_alloc(batch, sizeof(*ss), 32);
- if (ss == NULL)
- return -1;
+ memset(&ss, 0, sizeof(ss));
- ss->ss0.lod_preclamp = 1; /* GL mode */
+ ss.ss0.lod_preclamp = 1; /* GL mode */
/* We use the legacy mode to get the semantics specified by
* the Render extension. */
- ss->ss0.border_color_mode = GEN6_BORDER_COLOR_MODE_LEGACY;
+ ss.ss0.border_color_mode = GEN6_BORDER_COLOR_MODE_LEGACY;
switch (filter) {
default:
case SAMPLER_FILTER_NEAREST:
- ss->ss0.min_filter = GEN6_MAPFILTER_NEAREST;
- ss->ss0.mag_filter = GEN6_MAPFILTER_NEAREST;
+ ss.ss0.min_filter = GEN6_MAPFILTER_NEAREST;
+ ss.ss0.mag_filter = GEN6_MAPFILTER_NEAREST;
break;
case SAMPLER_FILTER_BILINEAR:
- ss->ss0.min_filter = GEN6_MAPFILTER_LINEAR;
- ss->ss0.mag_filter = GEN6_MAPFILTER_LINEAR;
+ ss.ss0.min_filter = GEN6_MAPFILTER_LINEAR;
+ ss.ss0.mag_filter = GEN6_MAPFILTER_LINEAR;
break;
}
switch (extend) {
default:
case SAMPLER_EXTEND_NONE:
- ss->ss1.r_wrap_mode = GEN6_TEXCOORDMODE_CLAMP_BORDER;
- ss->ss1.s_wrap_mode = GEN6_TEXCOORDMODE_CLAMP_BORDER;
- ss->ss1.t_wrap_mode = GEN6_TEXCOORDMODE_CLAMP_BORDER;
+ ss.ss1.r_wrap_mode = GEN6_TEXCOORDMODE_CLAMP_BORDER;
+ ss.ss1.s_wrap_mode = GEN6_TEXCOORDMODE_CLAMP_BORDER;
+ ss.ss1.t_wrap_mode = GEN6_TEXCOORDMODE_CLAMP_BORDER;
break;
case SAMPLER_EXTEND_REPEAT:
- ss->ss1.r_wrap_mode = GEN6_TEXCOORDMODE_WRAP;
- ss->ss1.s_wrap_mode = GEN6_TEXCOORDMODE_WRAP;
- ss->ss1.t_wrap_mode = GEN6_TEXCOORDMODE_WRAP;
+ ss.ss1.r_wrap_mode = GEN6_TEXCOORDMODE_WRAP;
+ ss.ss1.s_wrap_mode = GEN6_TEXCOORDMODE_WRAP;
+ ss.ss1.t_wrap_mode = GEN6_TEXCOORDMODE_WRAP;
break;
case SAMPLER_EXTEND_PAD:
- ss->ss1.r_wrap_mode = GEN6_TEXCOORDMODE_CLAMP;
- ss->ss1.s_wrap_mode = GEN6_TEXCOORDMODE_CLAMP;
- ss->ss1.t_wrap_mode = GEN6_TEXCOORDMODE_CLAMP;
+ ss.ss1.r_wrap_mode = GEN6_TEXCOORDMODE_CLAMP;
+ ss.ss1.s_wrap_mode = GEN6_TEXCOORDMODE_CLAMP;
+ ss.ss1.t_wrap_mode = GEN6_TEXCOORDMODE_CLAMP;
break;
case SAMPLER_EXTEND_REFLECT:
- ss->ss1.r_wrap_mode = GEN6_TEXCOORDMODE_MIRROR;
- ss->ss1.s_wrap_mode = GEN6_TEXCOORDMODE_MIRROR;
- ss->ss1.t_wrap_mode = GEN6_TEXCOORDMODE_MIRROR;
+ ss.ss1.r_wrap_mode = GEN6_TEXCOORDMODE_MIRROR;
+ ss.ss1.s_wrap_mode = GEN6_TEXCOORDMODE_MIRROR;
+ ss.ss1.t_wrap_mode = GEN6_TEXCOORDMODE_MIRROR;
break;
}
- return intel_batch_offset(batch, ss);
+ return OUT_STATE_STRUCT(ss, 32);
}
static uint32_t
gen6_create_vertex_buffer(struct intel_batchbuffer *batch)
{
- uint16_t *v;
-
- v = intel_batch_state_alloc(batch, 2 * sizeof(uint16_t), 8);
- if (v == NULL)
- return -1;
+ uint16_t v[2];
v[0] = 0;
v[1] = 0;
- return intel_batch_offset(batch, v);
+ return intel_batch_state_copy(batch, v, sizeof(v), 8, "vertex buffer");
}
static void gen6_emit_vertex_buffer(struct intel_batchbuffer *batch)
@@ -447,17 +429,15 @@ static void gen6_emit_vertex_buffer(struct intel_batchbuffer *batch)
0 << VB0_BUFFER_INDEX_SHIFT |
VB0_NULL_VERTEX_BUFFER |
0 << VB0_BUFFER_PITCH_SHIFT);
- OUT_RELOC(batch, I915_GEM_DOMAIN_VERTEX, 0, offset);
- OUT_RELOC(batch, I915_GEM_DOMAIN_VERTEX, 0, offset);
+ OUT_RELOC_STATE(batch, I915_GEM_DOMAIN_VERTEX, 0, offset);
+ OUT_RELOC_STATE(batch, I915_GEM_DOMAIN_VERTEX, 0, offset);
OUT_BATCH(0);
}
-int gen6_setup_null_render_state(struct intel_batchbuffer *batch)
+void gen6_setup_null_render_state(struct intel_batchbuffer *batch)
{
uint32_t wm_state, wm_kernel, wm_table;
- uint32_t cc_vp, cc_blend, offset;
- uint32_t batch_end;
- int ret;
+ uint32_t cc_vp, cc_blend;
wm_table = gen6_bind_surfaces(batch);
wm_kernel = gen6_create_kernel(batch);
@@ -492,10 +472,4 @@ int gen6_setup_null_render_state(struct intel_batchbuffer *batch)
gen6_emit_vertex_buffer(batch);
OUT_BATCH(MI_BATCH_BUFFER_END);
-
- ret = intel_batch_error(batch);
- if (ret == 0)
- ret = intel_batch_total_used(batch);
-
- return ret;
}