summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/intel_ringbuffer.h
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2016-07-15 13:50:58 +1000
committerDave Airlie <airlied@redhat.com>2016-07-15 13:50:58 +1000
commitff37c05a996bb96eccc21f4fb1b32ba0e24f3443 (patch)
treec09b09b37521f2f8f3f7a9bb3b0a33a2b3bde1a1 /drivers/gpu/drm/i915/intel_ringbuffer.h
parent6c181c82106e12dced317e93a7a396cbb8c64f75 (diff)
parent0b2c0582f1570bfc95aa9ac1cd340a215d8e8335 (diff)
Merge tag 'drm-intel-next-2016-07-11' of git://anongit.freedesktop.org/drm-intel into drm-next
- select igt testing depencies for CONFIG_DRM_I915_DEBUG (Chris) - track outputs in crtc state and clean up all our ad-hoc connector/encoder walking in modest code (Ville) - demidlayer drm_device/drm_i915_private (Chris Wilson) - thundering herd fix from Chris Wilson, with lots of help from Tvrtko Ursulin - piles of assorted clean and fallout from the thundering herd fix - documentation and more tuning for waitboosting (Chris) - pooled EU support on bxt (Arun Siluvery) - bxt support is no longer considered prelimary! - ring/engine vfunc cleanup from Tvrtko - introduce intel_wait_for_register helper (Chris) - opregion updates (Jani Nukla) - tuning and fixes for wait_for macros (Tvrkto&Imre) - more kabylake pci ids (Rodrigo) - pps cleanup and fixes for bxt (Imre) - move sink crc support over to atomic state (Maarten) - fix up async fbdev init ordering (Chris) - fbc fixes from Paulo and Chris * tag 'drm-intel-next-2016-07-11' of git://anongit.freedesktop.org/drm-intel: (223 commits) drm/i915: Update DRIVER_DATE to 20160711 drm/i915: Select DRM_VGEM for igt drm/i915: Select X86_MSR for igt drm/i915: Fill unused GGTT with scratch pages for VT-d drm/i915: Introduce Kabypoint PCH for Kabylake H/DT. drm/i915:gen9: implement WaMediaPoolStateCmdInWABB drm/i915: Check for invalid cloning earlier during modeset drm/i915: Simplify hdmi_12bpc_possible() drm/i915: Kill has_dsi_encoder drm/i915: s/INTEL_OUTPUT_DISPLAYPORT/INTEL_OUTPUT_DP/ drm/i915: Replace some open coded intel_crtc_has_dp_encoder()s drm/i915: Kill has_dp_encoder from pipe_config drm/i915: Replace manual lvds and sdvo/hdmi counting with intel_crtc_has_type() drm/i915: Unify intel_pipe_has_type() and intel_pipe_will_have_type() drm/i915: Add output_types bitmask into the crtc state drm/i915: Remove encoder type checks from MST suspend/resume drm/i915: Don't mark eDP encoders as MST capable drm/i915: avoid wait_for_atomic() in non-atomic host2guc_action() drm/i915: Group the irq breadcrumb variables into the same cacheline drm/i915: Wake up the bottom-half if we steal their interrupt ...
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.h')
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h135
1 files changed, 105 insertions, 30 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index b33c876fed20..12cb7ed90014 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -62,18 +62,6 @@ struct intel_hw_status_page {
(i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
GEN8_SEMAPHORE_OFFSET(from, (__ring)->id))
-#define GEN8_RING_SEMAPHORE_INIT(e) do { \
- if (!dev_priv->semaphore_obj) { \
- break; \
- } \
- (e)->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET((e), RCS); \
- (e)->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET((e), VCS); \
- (e)->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET((e), BCS); \
- (e)->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET((e), VECS); \
- (e)->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET((e), VCS2); \
- (e)->semaphore.signal_ggtt[(e)->id] = MI_SEMAPHORE_SYNC_INVALID; \
- } while(0)
-
enum intel_ring_hangcheck_action {
HANGCHECK_IDLE = 0,
HANGCHECK_WAIT,
@@ -86,8 +74,8 @@ enum intel_ring_hangcheck_action {
struct intel_ring_hangcheck {
u64 acthd;
+ unsigned long user_interrupts;
u32 seqno;
- unsigned user_interrupts;
int score;
enum intel_ring_hangcheck_action action;
int deadlock;
@@ -141,6 +129,8 @@ struct i915_ctx_workarounds {
struct drm_i915_gem_object *obj;
};
+struct drm_i915_gem_request;
+
struct intel_engine_cs {
struct drm_i915_private *i915;
const char *name;
@@ -160,6 +150,39 @@ struct intel_engine_cs {
struct intel_ringbuffer *buffer;
struct list_head buffers;
+ /* Rather than have every client wait upon all user interrupts,
+ * with the herd waking after every interrupt and each doing the
+ * heavyweight seqno dance, we delegate the task (of being the
+ * bottom-half of the user interrupt) to the first client. After
+ * every interrupt, we wake up one client, who does the heavyweight
+ * coherent seqno read and either goes back to sleep (if incomplete),
+ * or wakes up all the completed clients in parallel, before then
+ * transferring the bottom-half status to the next client in the queue.
+ *
+ * Compared to walking the entire list of waiters in a single dedicated
+ * bottom-half, we reduce the latency of the first waiter by avoiding
+ * a context switch, but incur additional coherent seqno reads when
+ * following the chain of request breadcrumbs. Since it is most likely
+ * that we have a single client waiting on each seqno, then reducing
+ * the overhead of waking that client is much preferred.
+ */
+ struct intel_breadcrumbs {
+ struct task_struct *irq_seqno_bh; /* bh for user interrupts */
+ unsigned long irq_wakeups;
+ bool irq_posted;
+
+ spinlock_t lock; /* protects the lists of requests */
+ struct rb_root waiters; /* sorted by retirement, priority */
+ struct rb_root signals; /* sorted by retirement */
+ struct intel_wait *first_wait; /* oldest waiter by retirement */
+ struct task_struct *signaler; /* used for fence signalling */
+ struct drm_i915_gem_request *first_signal;
+ struct timer_list fake_irq; /* used after a missed interrupt */
+
+ bool irq_enabled : 1;
+ bool rpm_wakelock : 1;
+ } breadcrumbs;
+
/*
* A pool of objects to use as shadow copies of client batch buffers
* when the command parser is enabled. Prevents the client from
@@ -170,11 +193,10 @@ struct intel_engine_cs {
struct intel_hw_status_page status_page;
struct i915_ctx_workarounds wa_ctx;
- unsigned irq_refcount; /* protected by dev_priv->irq_lock */
- u32 irq_enable_mask; /* bitmask to enable ring interrupt */
- struct drm_i915_gem_request *trace_irq_req;
- bool __must_check (*irq_get)(struct intel_engine_cs *ring);
- void (*irq_put)(struct intel_engine_cs *ring);
+ u32 irq_keep_mask; /* always keep these interrupts */
+ u32 irq_enable_mask; /* bitmask to enable ring interrupt */
+ void (*irq_enable)(struct intel_engine_cs *ring);
+ void (*irq_disable)(struct intel_engine_cs *ring);
int (*init_hw)(struct intel_engine_cs *ring);
@@ -193,9 +215,6 @@ struct intel_engine_cs {
* monotonic, even if not coherent.
*/
void (*irq_seqno_barrier)(struct intel_engine_cs *ring);
- u32 (*get_seqno)(struct intel_engine_cs *ring);
- void (*set_seqno)(struct intel_engine_cs *ring,
- u32 seqno);
int (*dispatch_execbuffer)(struct drm_i915_gem_request *req,
u64 offset, u32 length,
unsigned dispatch_flags);
@@ -272,7 +291,6 @@ struct intel_engine_cs {
unsigned int idle_lite_restore_wa;
bool disable_lite_restore_wa;
u32 ctx_desc_template;
- u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */
int (*emit_request)(struct drm_i915_gem_request *request);
int (*emit_flush)(struct drm_i915_gem_request *request,
u32 invalidate_domains,
@@ -304,12 +322,9 @@ struct intel_engine_cs {
* inspecting request list.
*/
u32 last_submitted_seqno;
- unsigned user_interrupts;
bool gpu_caches_dirty;
- wait_queue_head_t irq_queue;
-
struct i915_gem_context *last_context;
struct intel_ring_hangcheck hangcheck;
@@ -317,7 +332,6 @@ struct intel_engine_cs {
struct {
struct drm_i915_gem_object *obj;
u32 gtt_offset;
- volatile u32 *cpu_page;
} scratch;
bool needs_cmd_parser;
@@ -348,13 +362,13 @@ struct intel_engine_cs {
};
static inline bool
-intel_engine_initialized(struct intel_engine_cs *engine)
+intel_engine_initialized(const struct intel_engine_cs *engine)
{
return engine->i915 != NULL;
}
static inline unsigned
-intel_engine_flag(struct intel_engine_cs *engine)
+intel_engine_flag(const struct intel_engine_cs *engine)
{
return 1 << engine->id;
}
@@ -456,15 +470,14 @@ static inline void intel_ring_advance(struct intel_engine_cs *engine)
}
int __intel_ring_space(int head, int tail, int size);
void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
-bool intel_engine_stopped(struct intel_engine_cs *engine);
int __must_check intel_engine_idle(struct intel_engine_cs *engine);
void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno);
int intel_ring_flush_all_caches(struct drm_i915_gem_request *req);
int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req);
+int intel_init_pipe_control(struct intel_engine_cs *engine, int size);
void intel_fini_pipe_control(struct intel_engine_cs *engine);
-int intel_init_pipe_control(struct intel_engine_cs *engine);
int intel_init_render_ring_buffer(struct drm_device *dev);
int intel_init_bsd_ring_buffer(struct drm_device *dev);
@@ -473,6 +486,10 @@ int intel_init_blt_ring_buffer(struct drm_device *dev);
int intel_init_vebox_ring_buffer(struct drm_device *dev);
u64 intel_ring_get_active_head(struct intel_engine_cs *engine);
+static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
+{
+ return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
+}
int init_workarounds_ring(struct intel_engine_cs *engine);
@@ -495,4 +512,62 @@ static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
return engine->status_page.gfx_addr + I915_GEM_HWS_INDEX_ADDR;
}
+/* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */
+struct intel_wait {
+ struct rb_node node;
+ struct task_struct *tsk;
+ u32 seqno;
+};
+
+struct intel_signal_node {
+ struct rb_node node;
+ struct intel_wait wait;
+};
+
+int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
+
+static inline void intel_wait_init(struct intel_wait *wait, u32 seqno)
+{
+ wait->tsk = current;
+ wait->seqno = seqno;
+}
+
+static inline bool intel_wait_complete(const struct intel_wait *wait)
+{
+ return RB_EMPTY_NODE(&wait->node);
+}
+
+bool intel_engine_add_wait(struct intel_engine_cs *engine,
+ struct intel_wait *wait);
+void intel_engine_remove_wait(struct intel_engine_cs *engine,
+ struct intel_wait *wait);
+void intel_engine_enable_signaling(struct drm_i915_gem_request *request);
+
+static inline bool intel_engine_has_waiter(struct intel_engine_cs *engine)
+{
+ return READ_ONCE(engine->breadcrumbs.irq_seqno_bh);
+}
+
+static inline bool intel_engine_wakeup(struct intel_engine_cs *engine)
+{
+ bool wakeup = false;
+ struct task_struct *tsk = READ_ONCE(engine->breadcrumbs.irq_seqno_bh);
+ /* Note that for this not to dangerously chase a dangling pointer,
+ * the caller is responsible for ensure that the task remain valid for
+ * wake_up_process() i.e. that the RCU grace period cannot expire.
+ *
+ * Also note that tsk is likely to be in !TASK_RUNNING state so an
+ * early test for tsk->state != TASK_RUNNING before wake_up_process()
+ * is unlikely to be beneficial.
+ */
+ if (tsk)
+ wakeup = wake_up_process(tsk);
+ return wakeup;
+}
+
+void intel_engine_enable_fake_irq(struct intel_engine_cs *engine);
+void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
+unsigned int intel_kick_waiters(struct drm_i915_private *i915);
+unsigned int intel_kick_signalers(struct drm_i915_private *i915);
+
#endif /* _INTEL_RINGBUFFER_H_ */