summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gt
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2020-06-01 08:24:13 +0100
committerChris Wilson <chris@chris-wilson.co.uk>2020-06-02 10:42:15 +0100
commit4e408a720a0530ae880378cb5830e7e812fefa4b (patch)
tree7760adf541d460b362d018921e1a72c22ca0ce13 /drivers/gpu/drm/i915/gt
parentc1f8587870602274e1de97aca89361cf91bc12d2 (diff)
drm/i915/gt: Move legacy context wa to intel_workarounds
Use the central mechanism for recording and verifying that we restore the w/a for the older devices as well. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200601072446.19548-3-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/gt')
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c28
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c31
2 files changed, 31 insertions, 28 deletions
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 96881cd8b17b..d9c1701061b9 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -429,32 +429,6 @@ static void reset_finish(struct intel_engine_cs *engine)
{
}
-static int rcs_resume(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *i915 = engine->i915;
- struct intel_uncore *uncore = engine->uncore;
-
- /*
- * Disable CONSTANT_BUFFER before it is loaded from the context
- * image. For as it is loaded, it is executed and the stored
- * address may no longer be valid, leading to a GPU hang.
- *
- * This imposes the requirement that userspace reload their
- * CONSTANT_BUFFER on every batch, fortunately a requirement
- * they are already accustomed to from before contexts were
- * enabled.
- */
- if (IS_GEN(i915, 4))
- intel_uncore_write(uncore, ECOSKPD,
- _MASKED_BIT_ENABLE(ECO_CONSTANT_BUFFER_SR_DISABLE));
-
- if (IS_GEN_RANGE(i915, 6, 7))
- intel_uncore_write(uncore, INSTPM,
- _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
-
- return xcs_resume(engine);
-}
-
static void reset_cancel(struct intel_engine_cs *engine)
{
struct i915_request *request;
@@ -1139,8 +1113,6 @@ static void setup_rcs(struct intel_engine_cs *engine)
if (IS_HASWELL(i915))
engine->emit_bb_start = hsw_emit_bb_start;
-
- engine->resume = rcs_resume;
}
static void setup_vcs(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index fa1e15657663..94d66a9d760d 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -199,6 +199,18 @@ wa_masked_dis(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
#define WA_SET_FIELD_MASKED(addr, mask, value) \
wa_write_masked_or(wal, (addr), 0, _MASKED_FIELD((mask), (value)))
+static void gen6_ctx_workarounds_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
+{
+ WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
+}
+
+static void gen7_ctx_workarounds_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
+{
+ WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
+}
+
static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
@@ -638,6 +650,10 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
chv_ctx_workarounds_init(engine, wal);
else if (IS_BROADWELL(i915))
bdw_ctx_workarounds_init(engine, wal);
+ else if (IS_GEN(i915, 7))
+ gen7_ctx_workarounds_init(engine, wal);
+ else if (IS_GEN(i915, 6))
+ gen6_ctx_workarounds_init(engine, wal);
else if (INTEL_GEN(i915) < 8)
return;
else
@@ -1583,6 +1599,21 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
0, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH),
/* XXX bit doesn't stick on Broadwater */
IS_I965G(i915) ? 0 : VS_TIMER_DISPATCH);
+
+ if (IS_GEN(i915, 4))
+ /*
+ * Disable CONSTANT_BUFFER before it is loaded from the context
+ * image. For as it is loaded, it is executed and the stored
+ * address may no longer be valid, leading to a GPU hang.
+ *
+ * This imposes the requirement that userspace reload their
+ * CONSTANT_BUFFER on every batch, fortunately a requirement
+ * they are already accustomed to from before contexts were
+ * enabled.
+ */
+ wa_add(wal, ECOSKPD,
+ 0, _MASKED_BIT_ENABLE(ECO_CONSTANT_BUFFER_SR_DISABLE),
+ 0 /* XXX bit doesn't stick on Broadwater */);
}
static void