summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c4
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c46
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c37
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h30
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c14
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c21
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c51
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h22
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c10
-rw-r--r--drivers/gpu/drm/i915/intel_display.c228
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c36
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c39
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c12
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c3
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c37
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c109
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h16
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c112
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c43
22 files changed, 618 insertions, 263 deletions
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 3601466c550..4ff9b6cc973 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -865,7 +865,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
int max_freq;
/* RPSTAT1 is in the GT power well */
- __gen6_force_wake_get(dev_priv);
+ __gen6_gt_force_wake_get(dev_priv);
seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
seq_printf(m, "RPSTAT1: 0x%08x\n", I915_READ(GEN6_RPSTAT1));
@@ -888,7 +888,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
max_freq * 100);
- __gen6_force_wake_put(dev_priv);
+ __gen6_gt_force_wake_put(dev_priv);
} else {
seq_printf(m, "no P-state info available\n");
}
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 844f3c972b0..e33d9be7df3 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -152,7 +152,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
- struct intel_ring_buffer *ring = LP_RING(dev_priv);
+ int ret;
master_priv->sarea = drm_getsarea(dev);
if (master_priv->sarea) {
@@ -163,33 +163,22 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
}
if (init->ring_size != 0) {
- if (ring->obj != NULL) {
+ if (LP_RING(dev_priv)->obj != NULL) {
i915_dma_cleanup(dev);
DRM_ERROR("Client tried to initialize ringbuffer in "
"GEM mode\n");
return -EINVAL;
}
- ring->size = init->ring_size;
-
- ring->map.offset = init->ring_start;
- ring->map.size = init->ring_size;
- ring->map.type = 0;
- ring->map.flags = 0;
- ring->map.mtrr = 0;
-
- drm_core_ioremap_wc(&ring->map, dev);
-
- if (ring->map.handle == NULL) {
+ ret = intel_render_ring_init_dri(dev,
+ init->ring_start,
+ init->ring_size);
+ if (ret) {
i915_dma_cleanup(dev);
- DRM_ERROR("can not ioremap virtual address for"
- " ring buffer\n");
- return -ENOMEM;
+ return ret;
}
}
- ring->virtual_start = ring->map.handle;
-
dev_priv->cpp = init->cpp;
dev_priv->back_offset = init->back_offset;
dev_priv->front_offset = init->front_offset;
@@ -1226,9 +1215,15 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (ret)
DRM_INFO("failed to find VBIOS tables\n");
- /* if we have > 1 VGA cards, then disable the radeon VGA resources */
+ /* If we have > 1 VGA cards, then we need to arbitrate access
+ * to the common VGA resources.
+ *
+ * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
+ * then we do not take part in VGA arbitration and the
+ * vga_client_register() fails with -ENODEV.
+ */
ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
- if (ret)
+ if (ret && ret != -ENODEV)
goto cleanup_ringbuffer;
intel_register_dsm_handler();
@@ -1900,6 +1895,17 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (IS_GEN2(dev))
dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
+ /* 965GM sometimes incorrectly writes to hardware status page (HWS)
+ * using 32bit addressing, overwriting memory if HWS is located
+ * above 4GB.
+ *
+ * The documentation also mentions an issue with undefined
+ * behaviour if any general state is accessed within a page above 4GB,
+ * which also needs to be handled carefully.
+ */
+ if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
+ dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
+
mmio_bar = IS_GEN2(dev) ? 1 : 0;
dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0);
if (!dev_priv->regs) {
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 72fea2bcfc4..22ec066adae 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -46,6 +46,12 @@ module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
unsigned int i915_powersave = 1;
module_param_named(powersave, i915_powersave, int, 0600);
+unsigned int i915_semaphores = 0;
+module_param_named(semaphores, i915_semaphores, int, 0600);
+
+unsigned int i915_enable_rc6 = 0;
+module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
+
unsigned int i915_lvds_downclock = 0;
module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
@@ -60,7 +66,7 @@ extern int intel_agp_enabled;
#define INTEL_VGA_DEVICE(id, info) { \
.class = PCI_CLASS_DISPLAY_VGA << 8, \
- .class_mask = 0xffff00, \
+ .class_mask = 0xff0000, \
.vendor = 0x8086, \
.device = id, \
.subvendor = PCI_ANY_ID, \
@@ -251,7 +257,7 @@ void intel_detect_pch (struct drm_device *dev)
}
}
-void __gen6_force_wake_get(struct drm_i915_private *dev_priv)
+void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
{
int count;
@@ -267,12 +273,22 @@ void __gen6_force_wake_get(struct drm_i915_private *dev_priv)
udelay(10);
}
-void __gen6_force_wake_put(struct drm_i915_private *dev_priv)
+void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE, 0);
POSTING_READ(FORCEWAKE);
}
+void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
+{
+ int loop = 500;
+ u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
+ while (fifo < 20 && loop--) {
+ udelay(10);
+ fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
+ }
+}
+
static int i915_drm_freeze(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -354,12 +370,13 @@ static int i915_drm_thaw(struct drm_device *dev)
error = i915_gem_init_ringbuffer(dev);
mutex_unlock(&dev->struct_mutex);
+ drm_mode_config_reset(dev);
drm_irq_install(dev);
/* Resume the modeset for every activated CRTC */
drm_helper_resume_force_mode(dev);
- if (dev_priv->renderctx && dev_priv->pwrctx)
+ if (IS_IRONLAKE_M(dev))
ironlake_enable_rc6(dev);
}
@@ -542,6 +559,7 @@ int i915_reset(struct drm_device *dev, u8 flags)
mutex_unlock(&dev->struct_mutex);
drm_irq_uninstall(dev);
+ drm_mode_config_reset(dev);
drm_irq_install(dev);
mutex_lock(&dev->struct_mutex);
}
@@ -566,6 +584,14 @@ int i915_reset(struct drm_device *dev, u8 flags)
static int __devinit
i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
+ /* Only bind to function 0 of the device. Early generations
+ * used function 1 as a placeholder for multi-head. This causes
+ * us confusion instead, especially on the systems where both
+ * functions have the same PCI-ID!
+ */
+ if (PCI_FUNC(pdev->devfn))
+ return -ENODEV;
+
return drm_get_pci_dev(pdev, ent, &driver);
}
@@ -752,6 +778,9 @@ static int __init i915_init(void)
driver.driver_features &= ~DRIVER_MODESET;
#endif
+ if (!(driver.driver_features & DRIVER_MODESET))
+ driver.get_vblank_timestamp = NULL;
+
return drm_init(&driver);
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 5969f46ac2d..456f4048483 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -543,8 +543,11 @@ typedef struct drm_i915_private {
/** List of all objects in gtt_space. Used to restore gtt
* mappings on resume */
struct list_head gtt_list;
- /** End of mappable part of GTT */
+
+ /** Usable portion of the GTT for GEM */
+ unsigned long gtt_start;
unsigned long gtt_mappable_end;
+ unsigned long gtt_end;
struct io_mapping *gtt_mapping;
int gtt_mtrr;
@@ -953,8 +956,10 @@ extern struct drm_ioctl_desc i915_ioctls[];
extern int i915_max_ioctl;
extern unsigned int i915_fbpercrtc;
extern unsigned int i915_powersave;
+extern unsigned int i915_semaphores;
extern unsigned int i915_lvds_downclock;
extern unsigned int i915_panel_use_ssc;
+extern unsigned int i915_enable_rc6;
extern int i915_suspend(struct drm_device *dev, pm_message_t state);
extern int i915_resume(struct drm_device *dev);
@@ -1173,6 +1178,9 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
void i915_gem_free_all_phys_object(struct drm_device *dev);
void i915_gem_release(struct drm_device *dev, struct drm_file *file);
+uint32_t
+i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj);
+
/* i915_gem_gtt.c */
void i915_gem_restore_gtt_mappings(struct drm_device *dev);
int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj);
@@ -1349,22 +1357,32 @@ __i915_write(64, q)
* must be set to prevent GT core from power down and stale values being
* returned.
*/
-void __gen6_force_wake_get(struct drm_i915_private *dev_priv);
-void __gen6_force_wake_put (struct drm_i915_private *dev_priv);
-static inline u32 i915_safe_read(struct drm_i915_private *dev_priv, u32 reg)
+void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
+void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
+void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
+
+static inline u32 i915_gt_read(struct drm_i915_private *dev_priv, u32 reg)
{
u32 val;
if (dev_priv->info->gen >= 6) {
- __gen6_force_wake_get(dev_priv);
+ __gen6_gt_force_wake_get(dev_priv);
val = I915_READ(reg);
- __gen6_force_wake_put(dev_priv);
+ __gen6_gt_force_wake_put(dev_priv);
} else
val = I915_READ(reg);
return val;
}
+static inline void i915_gt_write(struct drm_i915_private *dev_priv,
+ u32 reg, u32 val)
+{
+ if (dev_priv->info->gen >= 6)
+ __gen6_gt_wait_for_fifo(dev_priv);
+ I915_WRITE(reg, val);
+}
+
static inline void
i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len)
{
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 3dfc848ff75..36e66cc5225 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -140,12 +140,16 @@ void i915_gem_do_init(struct drm_device *dev,
{
drm_i915_private_t *dev_priv = dev->dev_private;
- drm_mm_init(&dev_priv->mm.gtt_space, start,
- end - start);
+ drm_mm_init(&dev_priv->mm.gtt_space, start, end - start);
+ dev_priv->mm.gtt_start = start;
+ dev_priv->mm.gtt_mappable_end = mappable_end;
+ dev_priv->mm.gtt_end = end;
dev_priv->mm.gtt_total = end - start;
dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
- dev_priv->mm.gtt_mappable_end = mappable_end;
+
+ /* Take over this portion of the GTT */
+ intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
}
int
@@ -1394,7 +1398,7 @@ i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj)
* Return the required GTT alignment for an object, only taking into account
* unfenced tiled surface requirements.
*/
-static uint32_t
+uint32_t
i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
@@ -1857,7 +1861,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
seqno = ring->get_seqno(ring);
- for (i = 0; i < I915_NUM_RINGS; i++)
+ for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
if (seqno >= ring->sync_seqno[i])
ring->sync_seqno[i] = 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index dcfdf4151b6..50ab1614571 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -772,8 +772,8 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
if (from == NULL || to == from)
return 0;
- /* XXX gpu semaphores are currently causing hard hangs on SNB mobile */
- if (INTEL_INFO(obj->base.dev)->gen < 6 || IS_MOBILE(obj->base.dev))
+ /* XXX gpu semaphores are implicated in various hard hangs on SNB */
+ if (INTEL_INFO(obj->base.dev)->gen < 6 || !i915_semaphores)
return i915_gem_object_wait_rendering(obj, true);
idx = intel_ring_sync_index(from, to);
@@ -1175,7 +1175,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto err;
seqno = i915_gem_next_request_seqno(dev, ring);
- for (i = 0; i < I915_NUM_RINGS-1; i++) {
+ for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) {
if (seqno < ring->sync_seqno[i]) {
/* The GPU can not handle its semaphore value wrapping,
* so every billion or so execbuffers, we need to stall
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 70433ae50ac..b0abdc64aa9 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -34,6 +34,10 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
+ /* First fill our portion of the GTT with scratch pages */
+ intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE,
+ (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
+
list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
i915_gem_clflush_object(obj);
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 22a32b9932c..d64843e18df 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -349,14 +349,27 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
(obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end &&
i915_gem_object_fence_ok(obj, args->tiling_mode));
- obj->tiling_changed = true;
- obj->tiling_mode = args->tiling_mode;
- obj->stride = args->stride;
+ /* Rebind if we need a change of alignment */
+ if (!obj->map_and_fenceable) {
+ u32 unfenced_alignment =
+ i915_gem_get_unfenced_gtt_alignment(obj);
+ if (obj->gtt_offset & (unfenced_alignment - 1))
+ ret = i915_gem_object_unbind(obj);
+ }
+
+ if (ret == 0) {
+ obj->tiling_changed = true;
+ obj->tiling_mode = args->tiling_mode;
+ obj->stride = args->stride;
+ }
}
+ /* we have to maintain this existing ABI... */
+ args->stride = obj->stride;
+ args->tiling_mode = obj->tiling_mode;
drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
- return 0;
+ return ret;
}
/**
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index b8e509ae065..8a9e08bf1cf 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -274,24 +274,35 @@ int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
return ret;
}
-int i915_get_vblank_timestamp(struct drm_device *dev, int crtc,
+int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
int *max_error,
struct timeval *vblank_time,
unsigned flags)
{
- struct drm_crtc *drmcrtc;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
- if (crtc < 0 || crtc >= dev->num_crtcs) {
- DRM_ERROR("Invalid crtc %d\n", crtc);
+ if (pipe < 0 || pipe >= dev_priv->num_pipe) {
+ DRM_ERROR("Invalid crtc %d\n", pipe);
return -EINVAL;
}
/* Get drm_crtc to timestamp: */
- drmcrtc = intel_get_crtc_for_pipe(dev, crtc);
+ crtc = intel_get_crtc_for_pipe(dev, pipe);
+ if (crtc == NULL) {
+ DRM_ERROR("Invalid crtc %d\n", pipe);
+ return -EINVAL;
+ }
+
+ if (!crtc->enabled) {
+ DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
+ return -EBUSY;
+ }
/* Helper routine in DRM core does all the work: */
- return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error,
- vblank_time, flags, drmcrtc);
+ return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
+ vblank_time, flags,
+ crtc);
}
/*
@@ -305,6 +316,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
+ DRM_DEBUG_KMS("running encoder hotplug functions\n");
+
list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
if (encoder->hot_plug)
encoder->hot_plug(encoder);
@@ -348,8 +361,12 @@ static void notify_ring(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 seqno = ring->get_seqno(ring);
+ u32 seqno;
+ if (ring->obj == NULL)
+ return;
+
+ seqno = ring->get_seqno(ring);
trace_i915_gem_request_complete(dev, seqno);
ring->irq_seqno = seqno;
@@ -831,6 +848,8 @@ static void i915_capture_error_state(struct drm_device *dev)
i++;
error->pinned_bo_count = i - error->active_bo_count;
+ error->active_bo = NULL;
+ error->pinned_bo = NULL;
if (i) {
error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
GFP_ATOMIC);
@@ -1179,18 +1198,18 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
intel_finish_page_flip_plane(dev, 1);
}
- if (pipea_stats & vblank_status) {
+ if (pipea_stats & vblank_status &&
+ drm_handle_vblank(dev, 0)) {
vblank++;
- drm_handle_vblank(dev, 0);
if (!dev_priv->flip_pending_is_done) {
i915_pageflip_stall_check(dev, 0);
intel_finish_page_flip(dev, 0);
}
}
- if (pipeb_stats & vblank_status) {
+ if (pipeb_stats & vblank_status &&
+ drm_handle_vblank(dev, 1)) {
vblank++;
- drm_handle_vblank(dev, 1);
if (!dev_priv->flip_pending_is_done) {
i915_pageflip_stall_check(dev, 1);
intel_finish_page_flip(dev, 1);
@@ -1278,12 +1297,12 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
if (master_priv->sarea_priv)
master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
- ret = -ENODEV;
if (ring->irq_get(ring)) {
DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
READ_BREADCRUMB(dev_priv) >= irq_nr);
ring->irq_put(ring);
- }
+ } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
+ ret = -EBUSY;
if (ret == -EBUSY) {
DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
@@ -1632,9 +1651,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
} else {
hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
- hotplug_mask |= SDE_AUX_MASK | SDE_FDI_MASK | SDE_TRANS_MASK;
- I915_WRITE(FDI_RXA_IMR, 0);
- I915_WRITE(FDI_RXB_IMR, 0);
+ hotplug_mask |= SDE_AUX_MASK;
}
dev_priv->pch_irq_mask = ~hotplug_mask;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 40a407f41f6..3e6f486f460 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -174,7 +174,9 @@
* address/value pairs. Don't overdue it, though, x <= 2^4 must hold!
*/
#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1)
-#define MI_FLUSH_DW MI_INSTR(0x26, 2) /* for GEN6 */
+#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */
+#define MI_INVALIDATE_TLB (1<<18)
+#define MI_INVALIDATE_BSD (1<<7)
#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
#define MI_BATCH_NON_SECURE (1)
#define MI_BATCH_NON_SECURE_I965 (1<<8)
@@ -513,6 +515,10 @@
#define GEN6_BLITTER_SYNC_STATUS (1 << 24)
#define GEN6_BLITTER_USER_INTERRUPT (1 << 22)
+#define GEN6_BLITTER_ECOSKPD 0x221d0
+#define GEN6_BLITTER_LOCK_SHIFT 16
+#define GEN6_BLITTER_FBC_NOTIFY (1<<3)
+
#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050
#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK (1 << 16)
#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE (1 << 0)
@@ -1547,17 +1553,7 @@
/* Backlight control */
#define BLC_PWM_CTL 0x61254
-#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
#define BLC_PWM_CTL2 0x61250 /* 965+ only */
-#define BLM_COMBINATION_MODE (1 << 30)
-/*
- * This is the most significant 15 bits of the number of backlight cycles in a
- * complete cycle of the modulated backlight control.
- *
- * The actual value is this field multiplied by two.
- */
-#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17)
-#define BLM_LEGACY_MODE (1 << 16)
/*
* This is the number of cycles out of the backlight modulation cycle for which
* the backlight is on.
@@ -2626,6 +2622,8 @@
#define DISPLAY_PORT_PLL_BIOS_2 0x46014
#define PCH_DSPCLK_GATE_D 0x42020
+# define DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9)
+# define DPFCRUNIT_CLOCK_GATE_DISABLE (1 << 8)
# define DPFDUNIT_CLOCK_GATE_DISABLE (1 << 7)
# define DPARBUNIT_CLOCK_GATE_DISABLE (1 << 5)
@@ -3263,6 +3261,8 @@
#define FORCEWAKE 0xA18C
#define FORCEWAKE_ACK 0x130090
+#define GT_FIFO_FREE_ENTRIES 0x120008
+
#define GEN6_RPNSWREQ 0xA008
#define GEN6_TURBO_DISABLE (1<<31)
#define GEN6_FREQUENCY(x) ((x)<<25)
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 17035b87ee4..8a77ff4a723 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -535,6 +535,15 @@ static int intel_crt_set_property(struct drm_connector *connector,
return 0;
}
+static void intel_crt_reset(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct intel_crt *crt = intel_attached_crt(connector);
+
+ if (HAS_PCH_SPLIT(dev))
+ crt->force_hotplug_required = 1;
+}
+
/*
* Routines for controlling stuff on the analog port
*/
@@ -548,6 +557,7 @@ static const struct drm_encoder_helper_funcs intel_crt_helper_funcs = {
};
static const struct drm_connector_funcs intel_crt_connector_funcs = {
+ .reset = intel_crt_reset,
.dpms = drm_helper_connector_dpms,
.detect = intel_crt_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 98967f3b772..49fb54fd9a1 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1213,6 +1213,26 @@ static bool g4x_fbc_enabled(struct drm_device *dev)
return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
}
+static void sandybridge_blit_fbc_update(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 blt_ecoskpd;
+
+ /* Make sure blitter notifies FBC of writes */
+ __gen6_gt_force_wake_get(dev_priv);
+ blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
+ blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
+ GEN6_BLITTER_LOCK_SHIFT;
+ I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
+ blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
+ I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
+ blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
+ GEN6_BLITTER_LOCK_SHIFT);
+ I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
+ POSTING_READ(GEN6_BLITTER_ECOSKPD);
+ __gen6_gt_force_wake_put(dev_priv);
+}
+
static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
{
struct drm_device *dev = crtc->dev;
@@ -1266,6 +1286,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
I915_WRITE(SNB_DPFC_CTL_SA,
SNB_CPU_FENCE_ENABLE | dev_priv->cfb_fence);
I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
+ sandybridge_blit_fbc_update(dev);
}
DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
@@ -1609,19 +1630,19 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
wait_event(dev_priv->pending_flip_queue,
+ atomic_read(&dev_priv->mm.wedged) ||
atomic_read(&obj->pending_flip) == 0);
/* Big Hammer, we also need to ensure that any pending
* MI_WAIT_FOR_EVENT inside a user batch buffer on the
* current scanout is retired before unpinning the old
* framebuffer.
+ *
+ * This should only fail upon a hung GPU, in which case we
+ * can safely continue.
*/
ret = i915_gem_object_flush_gpu(obj, false);
- if (ret) {
- i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
+ (void) ret;
}
ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
@@ -2024,6 +2045,31 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
atomic_read(&obj->pending_flip) == 0);
}
+static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct intel_encoder *encoder;
+
+ /*
+ * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
+ * must be driven by its own crtc; no sharing is possible.
+ */
+ list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
+ if (encoder->base.crtc != crtc)
+ continue;
+
+ switch (encoder->type) {
+ case INTEL_OUTPUT_EDP:
+ if (!intel_encoder_is_pch_edp(&encoder->base))
+ return false;
+ continue;
+ }
+ }
+
+ return true;
+}
+
static void ironlake_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -2032,6 +2078,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
u32 reg, temp;
+ bool is_pch_port = false;
if (intel_crtc->active)
return;
@@ -2045,7 +2092,56 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
}
- ironlake_fdi_enable(crtc);
+ is_pch_port = intel_crtc_driving_pch(crtc);
+
+ if (is_pch_port)
+ ironlake_fdi_enable(crtc);
+ else {
+ /* disable CPU FDI tx and PCH FDI rx */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
+ POSTING_READ(reg);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~(0x7 << 16);
+ temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
+ I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
+
+ POSTING_READ(reg);
+ udelay(100);
+
+ /* Ironlake workaround, disable clock pointer after downing FDI */
+ if (HAS_PCH_IBX(dev))
+ I915_WRITE(FDI_RX_CHICKEN(pipe),
+ I915_READ(FDI_RX_CHICKEN(pipe) &
+ ~FDI_RX_PHASE_SYNC_POINTER_ENABLE));
+
+ /* still set train pattern 1 */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ I915_WRITE(reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ }
+ /* BPC in FDI rx is consistent with that in PIPECONF */
+ temp &= ~(0x07 << 16);
+ temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
+ I915_WRITE(reg, temp);
+
+ POSTING_READ(reg);
+ udelay(100);
+ }
/* Enable panel fitting for LVDS */
if (dev_priv->pch_pf_size &&
@@ -2079,6 +2175,10 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
intel_flush_display_plane(dev, plane);
}
+ /* Skip the PCH stuff if possible */
+ if (!is_pch_port)
+ goto done;
+
/* For PCH output, training FDI link */
if (IS_GEN6(dev))
gen6_fdi_link_train(crtc);
@@ -2163,7 +2263,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
I915_WRITE(reg, temp | TRANS_ENABLE);
if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
DRM_ERROR("failed to enable transcoder %d\n", pipe);
-
+done:
intel_crtc_load_lut(crtc);
intel_update_fbc(dev);
intel_crtc_update_cursor(crtc, true);
@@ -5530,6 +5630,16 @@ cleanup_work:
return ret;
}
+static void intel_crtc_reset(struct drm_crtc *crtc)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ /* Reset flags back to the 'unknown' status so that they
+ * will be correctly set on the initial modeset.
+ */
+ intel_crtc->dpms_mode = -1;
+}
+
static struct drm_crtc_helper_funcs intel_helper_funcs = {
.dpms = intel_crtc_dpms,
.mode_fixup = intel_crtc_mode_fixup,
@@ -5541,6 +5651,7 @@ static struct drm_crtc_helper_funcs intel_helper_funcs = {
};
static const struct drm_crtc_funcs intel_crtc_funcs = {
+ .reset = intel_crtc_reset,
.cursor_set = intel_crtc_cursor_set,
.cursor_move = intel_crtc_cursor_move,
.gamma_set = intel_crtc_gamma_set,
@@ -5631,8 +5742,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
- intel_crtc->cursor_addr = 0;
- intel_crtc->dpms_mode = -1;
+ intel_crtc_reset(&intel_crtc->base);
intel_crtc->active = true; /* force the pipe off on setup_init_config */
if (HAS_PCH_SPLIT(dev)) {
@@ -6172,7 +6282,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
* userspace...
*/
I915_WRITE(GEN6_RC_STATE, 0);
- __gen6_force_wake_get(dev_priv);
+ __gen6_gt_force_wake_get(dev_priv);
/* disable the counters and set deterministic thresholds */
I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -6270,7 +6380,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
/* enable all PM interrupts */
I915_WRITE(GEN6_PMINTRMSK, 0);
- __gen6_force_wake_put(dev_priv);
+ __gen6_gt_force_wake_put(dev_priv);
}
void intel_enable_clock_gating(struct drm_device *dev)
@@ -6286,7 +6396,9 @@ void intel_enable_clock_gating(struct drm_device *dev)
if (IS_GEN5(dev)) {
/* Required for FBC */
- dspclk_gate |= DPFDUNIT_CLOCK_GATE_DISABLE;
+ dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
+ DPFCRUNIT_CLOCK_GATE_DISABLE |
+ DPFDUNIT_CLOCK_GATE_DISABLE;
/* Required for CxSR */
dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
@@ -6429,52 +6541,60 @@ void intel_enable_clock_gating(struct drm_device *dev)
}
}
-void intel_disable_clock_gating(struct drm_device *dev)
+static void ironlake_teardown_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->renderctx) {
- struct drm_i915_gem_object *obj = dev_priv->renderctx;
-
- I915_WRITE(CCID, 0);
- POSTING_READ(CCID);
-
- i915_gem_object_unpin(obj);
- drm_gem_object_unreference(&obj->base);
+ i915_gem_object_unpin(dev_priv->renderctx);
+ drm_gem_object_unreference(&dev_priv->renderctx->base);
dev_priv->renderctx = NULL;
}
if (dev_priv->pwrctx) {
- struct drm_i915_gem_object *obj = dev_priv->pwrctx;
+ i915_gem_object_unpin(dev_priv->pwrctx);
+ drm_gem_object_unreference(&dev_priv->pwrctx->base);
+ dev_priv->pwrctx = NULL;
+ }
+}
+
+static void ironlake_disable_rc6(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (I915_READ(PWRCTXA)) {
+ /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
+ I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
+ wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
+ 50);
I915_WRITE(PWRCTXA, 0);
POSTING_READ(PWRCTXA);
- i915_gem_object_unpin(obj);
- drm_gem_object_unreference(&obj->base);
- dev_priv->pwrctx = NULL;
+ I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
+ POSTING_READ(RSTDBYCTL);
}
+
+ ironlake_teardown_rc6(dev);
}
-static void ironlake_disable_rc6(struct drm_device *dev)
+static int ironlake_setup_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
- I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
- wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
- 10);
- POSTING_READ(CCID);
- I915_WRITE(PWRCTXA, 0);
- POSTING_READ(PWRCTXA);
- I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
- POSTING_READ(RSTDBYCTL);
- i915_gem_object_unpin(dev_priv->renderctx);
- drm_gem_object_unreference(&dev_priv->renderctx->base);
- dev_priv->renderctx = NULL;
- i915_gem_object_unpin(dev_priv->pwrctx);
- drm_gem_object_unreference(&dev_priv->pwrctx->base);
- dev_priv->pwrctx = NULL;
+ if (dev_priv->renderctx == NULL)
+ dev_priv->renderctx = intel_alloc_context_page(dev);
+ if (!dev_priv->renderctx)
+ return -ENOMEM;
+
+ if (dev_priv->pwrctx == NULL)
+ dev_priv->pwrctx = intel_alloc_context_page(dev);
+ if (!dev_priv->pwrctx) {
+ ironlake_teardown_rc6(dev);
+ return -ENOMEM;
+ }
+
+ return 0;
}
void ironlake_enable_rc6(struct drm_device *dev)
@@ -6482,15 +6602,26 @@ void ironlake_enable_rc6(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
+ /* rc6 disabled by default due to repeated reports of hanging during
+ * boot and resume.
+ */
+ if (!i915_enable_rc6)
+ return;
+
+ ret = ironlake_setup_rc6(dev);
+ if (ret)
+ return;
+
/*
* GPU can automatically power down the render unit if given a page
* to save state.
*/
ret = BEGIN_LP_RING(6);
if (ret) {
- ironlake_disable_rc6(dev);
+ ironlake_teardown_rc6(dev);
return;
}
+
OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
OUT_RING(MI_SET_CONTEXT);
OUT_RING(dev_priv->renderctx->gtt_offset |
@@ -6507,6 +6638,7 @@ void ironlake_enable_rc6(struct drm_device *dev)
I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
}
+
/* Set up chip specific display functions */
static void intel_init_display(struct drm_device *dev)
{
@@ -6749,21 +6881,9 @@ void intel_modeset_init(struct drm_device *dev)
if (IS_GEN6(dev))
gen6_enable_rps(dev_priv);
- if (IS_IRONLAKE_M(dev)) {
- dev_priv->renderctx = intel_alloc_context_page(dev);
- if (!dev_priv->renderctx)
- goto skip_rc6;
- dev_priv->pwrctx = intel_alloc_context_page(dev);
- if (!dev_priv->pwrctx) {
- i915_gem_object_unpin(dev_priv->renderctx);
- drm_gem_object_unreference(&dev_priv->renderctx->base);
- dev_priv->renderctx = NULL;
- goto skip_rc6;
- }
+ if (IS_IRONLAKE_M(dev))
ironlake_enable_rc6(dev);
- }
-skip_rc6:
INIT_WORK(&dev_priv->idle_work, intel_idle_update);
setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
(unsigned long)dev);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 1f4242b682c..51cb4e36997 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1639,6 +1639,24 @@ static int intel_dp_get_modes(struct drm_connector *connector)
return 0;
}
+static bool
+intel_dp_detect_audio(struct drm_connector *connector)
+{
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct edid *edid;
+ bool has_audio = false;
+
+ edid = drm_get_edid(connector, &intel_dp->adapter);
+ if (edid) {
+ has_audio = drm_detect_monitor_audio(edid);
+
+ connector->display_info.raw_edid = NULL;
+ kfree(edid);
+ }
+
+ return has_audio;
+}
+
static int
intel_dp_set_property(struct drm_connector *connector,
struct drm_property *property,
@@ -1652,17 +1670,23 @@ intel_dp_set_property(struct drm_connector *connector,
return ret;
if (property == intel_dp->force_audio_property) {
- if (val == intel_dp->force_audio)
+ int i = val;
+ bool has_audio;
+
+ if (i == intel_dp->force_audio)
return 0;
- intel_dp->force_audio = val;
+ intel_dp->force_audio = i;
- if (val > 0 && intel_dp->has_audio)
- return 0;
- if (val < 0 && !intel_dp->has_audio)
+ if (i == 0)
+ has_audio = intel_dp_detect_audio(connector);
+ else
+ has_audio = i > 0;
+
+ if (has_audio == intel_dp->has_audio)
return 0;
- intel_dp->has_audio = val > 0;
+ intel_dp->has_audio = has_audio;
goto done;
}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 74db2557d64..2c431049963 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -298,7 +298,6 @@ extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, int regno);
extern void intel_enable_clock_gating(struct drm_device *dev);
-extern void intel_disable_clock_gating(struct drm_device *dev);
extern void ironlake_enable_drps(struct drm_device *dev);
extern void ironlake_disable_drps(struct drm_device *dev);
extern void gen6_enable_rps(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 0d0273e7b02..c635c9e357b 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -251,6 +251,27 @@ static int intel_hdmi_get_modes(struct drm_connector *connector)
&dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
}
+static bool
+intel_hdmi_detect_audio(struct drm_connector *connector)
+{
+ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ struct edid *edid;
+ bool has_audio = false;
+
+ edid = drm_get_edid(connector,
+ &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
+ if (edid) {
+ if (edid->input & DRM_EDID_INPUT_DIGITAL)
+ has_audio = drm_detect_monitor_audio(edid);
+
+ connector->display_info.raw_edid = NULL;
+ kfree(edid);
+ }
+
+ return has_audio;
+}
+
static int
intel_hdmi_set_property(struct drm_connector *connector,
struct drm_property *property,
@@ -264,17 +285,23 @@ intel_hdmi_set_property(struct drm_connector *connector,
return ret;
if (property == intel_hdmi->force_audio_property) {
- if (val == intel_hdmi->force_audio)
+ int i = val;
+ bool has_audio;
+
+ if (i == intel_hdmi->force_audio)
return 0;
- intel_hdmi->force_audio = val;
+ intel_hdmi->force_audio = i;
- if (val > 0 && intel_hdmi->has_audio)
- return 0;
- if (val < 0 && !intel_hdmi->has_audio)
+ if (i == 0)
+ has_audio = intel_hdmi_detect_audio(connector);
+ else
+ has_audio = i > 0;
+
+ if (has_audio == intel_hdmi->has_audio)
return 0;
- intel_hdmi->has_audio = val > 0;
+ intel_hdmi->has_audio = has_audio;
goto done;
}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index ace8d5d30dd..bcdba7bd5cf 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -261,12 +261,6 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
return true;
}
- /* Make sure pre-965s set dither correctly */
- if (INTEL_INFO(dev)->gen < 4) {
- if (dev_priv->lvds_dither)
- pfit_control |= PANEL_8TO6_DITHER_ENABLE;
- }
-
/* Native modes don't need fitting */
if (adjusted_mode->hdisplay == mode->hdisplay &&
adjusted_mode->vdisplay == mode->vdisplay)
@@ -374,10 +368,16 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
}
out:
+ /* If not enabling scaling, be consistent and always use 0. */
if ((pfit_control & PFIT_ENABLE) == 0) {
pfit_control = 0;
pfit_pgm_ratios = 0;
}
+
+ /* Make sure pre-965 set dither correctly */
+ if (INTEL_INFO(dev)->gen < 4 && dev_priv->lvds_dither)
+ pfit_control |= PANEL_8TO6_DITHER_ENABLE;
+
if (pfit_control != intel_lvds->pfit_control ||
pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) {
intel_lvds->pfit_control = pfit_control;
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index f295a7aaadf..64fd64443ca 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -26,6 +26,7 @@
*/
#include <linux/acpi.h>
+#include <linux/acpi_io.h>
#include <acpi/video.h>
#include "drmP.h"
@@ -476,7 +477,7 @@ int intel_opregion_setup(struct drm_device *dev)
return -ENOTSUPP;
}
- base = ioremap(asls, OPREGION_SIZE);
+ base = acpi_os_ioremap(asls, OPREGION_SIZE);
if (!base)
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index c65992df458..d860abeda70 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -30,8 +30,6 @@
#include "intel_drv.h"
-#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */
-
void
intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode)
@@ -112,19 +110,6 @@ done:
dev_priv->pch_pf_size = (width << 16) | height;
}
-static int is_backlight_combination_mode(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (INTEL_INFO(dev)->gen >= 4)
- return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE;
-
- if (IS_GEN2(dev))
- return I915_READ(BLC_PWM_CTL) & BLM_LEGACY_MODE;
-
- return 0;
-}
-
static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv)
{
u32 val;
@@ -181,9 +166,6 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev)
if (INTEL_INFO(dev)->gen < 4)
max &= ~1;
}
-
- if (is_backlight_combination_mode(dev))
- max *= 0xff;
}
DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max);
@@ -201,15 +183,6 @@ u32 intel_panel_get_backlight(struct drm_device *dev)
val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
if (IS_PINEVIEW(dev))
val >>= 1;
-
- if (is_backlight_combination_mode(dev)){
- u8 lbpc;
-
- val &= ~1;
- pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
- val *= lbpc;
- val >>= 1;
- }
}
DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
@@ -232,16 +205,6 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level)
if (HAS_PCH_SPLIT(dev))
return intel_pch_panel_set_backlight(dev, level);
-
- if (is_backlight_combination_mode(dev)){
- u32 max = intel_panel_get_max_backlight(dev);
- u8 lpbc;
-
- lpbc = level * 0xfe / max + 1;
- level /= lpbc;
- pci_write_config_byte(dev->pdev, PCI_LBPC, lpbc);
- }
-
tmp = I915_READ(BLC_PWM_CTL);
if (IS_PINEVIEW(dev)) {
tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index f6b9baa6a63..445f27efe67 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -34,6 +34,14 @@
#include "i915_trace.h"
#include "intel_drv.h"
+static inline int ring_space(struct intel_ring_buffer *ring)
+{
+ int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
+ if (space < 0)
+ space += ring->size;
+ return space;
+}
+
static u32 i915_gem_get_seqno(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -204,11 +212,9 @@ static int init_ring_common(struct intel_ring_buffer *ring)
if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
i915_kernel_lost_context(ring->dev);
else {
- ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
+ ring->head = I915_READ_HEAD(ring);
ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
- ring->space = ring->head - (ring->tail + 8);
- if (ring->space < 0)
- ring->space += ring->size;
+ ring->space = ring_space(ring);
}
return 0;
@@ -921,32 +927,34 @@ static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
}
ring->tail = 0;
- ring->space = ring->head - 8;
+ ring->space = ring_space(ring);
return 0;
}
int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
{
- int reread = 0;
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long end;
u32 head;
+ /* If the reported head position has wrapped or hasn't advanced,
+ * fallback to the slow and accurate path.
+ */
+ head = intel_read_status_page(ring, 4);
+ if (head > ring->head) {
+ ring->head = head;
+ ring->space = ring_space(ring);
+ if (ring->space >= n)
+ return 0;
+ }
+
trace_i915_ring_wait_begin (dev);
end = jiffies + 3 * HZ;
do {
- /* If the reported head position has wrapped or hasn't advanced,
- * fallback to the slow and accurate path.
- */
- head = intel_read_status_page(ring, 4);
- if (reread)
- head = I915_READ_HEAD(ring);
- ring->head = head & HEAD_ADDR;
- ring->space = ring->head - (ring->tail + 8);
- if (ring->space < 0)
- ring->space += ring->size;
+ ring->head = I915_READ_HEAD(ring);
+ ring->space = ring_space(ring);
if (ring->space >= n) {
trace_i915_ring_wait_end(dev);
return 0;
@@ -961,7 +969,6 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
msleep(1);
if (atomic_read(&dev_priv->mm.wedged))
return -EAGAIN;
- reread = 1;
} while (!time_after(jiffies, end));
trace_i915_ring_wait_end (dev);
return -EBUSY;
@@ -1052,22 +1059,25 @@ static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
}
static int gen6_ring_flush(struct intel_ring_buffer *ring,
- u32 invalidate_domains,
- u32 flush_domains)
+ u32 invalidate, u32 flush)
{
+ uint32_t cmd;
int ret;
- if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
+ if (((invalidate | flush) & I915_GEM_GPU_DOMAINS) == 0)
return 0;
ret = intel_ring_begin(ring, 4);
if (ret)
return ret;
- intel_ring_emit(ring, MI_FLUSH_DW);
- intel_ring_emit(ring, 0);
+ cmd = MI_FLUSH_DW;
+ if (invalidate & I915_GEM_GPU_DOMAINS)
+ cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
+ intel_ring_emit(ring, cmd);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
return 0;
}
@@ -1223,22 +1233,25 @@ static int blt_ring_begin(struct intel_ring_buffer *ring,
}
static int blt_ring_flush(struct intel_ring_buffer *ring,
- u32 invalidate_domains,
- u32 flush_domains)
+ u32 invalidate, u32 flush)
{
+ uint32_t cmd;
int ret;
- if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
+ if (((invalidate | flush) & I915_GEM_DOMAIN_RENDER) == 0)
return 0;
ret = blt_ring_begin(ring, 4);
if (ret)
return ret;
- intel_ring_emit(ring, MI_FLUSH_DW);
- intel_ring_emit(ring, 0);
+ cmd = MI_FLUSH_DW;
+ if (invalidate & I915_GEM_DOMAIN_RENDER)
+ cmd |= MI_INVALIDATE_TLB;
+ intel_ring_emit(ring, cmd);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
return 0;
}
@@ -1292,6 +1305,48 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
return intel_init_ring_buffer(dev, ring);
}
+int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+
+ *ring = render_ring;
+ if (INTEL_INFO(dev)->gen >= 6) {
+ ring->add_request = gen6_add_request;
+ ring->irq_get = gen6_render_ring_get_irq;
+ ring->irq_put = gen6_render_ring_put_irq;
+ } else if (IS_GEN5(dev)) {
+ ring->add_request = pc_render_add_request;
+ ring->get_seqno = pc_render_get_seqno;
+ }
+
+ ring->dev = dev;
+ INIT_LIST_HEAD(&ring->active_list);
+ INIT_LIST_HEAD(&ring->request_list);
+ INIT_LIST_HEAD(&ring->gpu_write_list);
+
+ ring->size = size;
+ ring->effective_size = ring->size;
+ if (IS_I830(ring->dev))
+ ring->effective_size -= 128;
+
+ ring->map.offset = start;
+ ring->map.size = size;
+ ring->map.type = 0;
+ ring->map.flags = 0;
+ ring->map.mtrr = 0;
+
+ drm_core_ioremap_wc(&ring->map, dev);
+ if (ring->map.handle == NULL) {
+ DRM_ERROR("can not ioremap virtual address for"
+ " ring buffer\n");
+ return -ENOMEM;
+ }
+
+ ring->virtual_start = (void __force __iomem *)ring->map.handle;
+ return 0;
+}
+
int intel_init_bsd_ring_buffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 5b0abfa881f..34306865a5d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -14,22 +14,23 @@ struct intel_hw_status_page {
struct drm_i915_gem_object *obj;
};
-#define I915_RING_READ(reg) i915_safe_read(dev_priv, reg)
+#define I915_RING_READ(reg) i915_gt_read(dev_priv, reg)
+#define I915_RING_WRITE(reg, val) i915_gt_write(dev_priv, reg, val)
#define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL((ring)->mmio_base))
-#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
+#define I915_WRITE_TAIL(ring, val) I915_RING_WRITE(RING_TAIL((ring)->mmio_base), val)
#define I915_READ_START(ring) I915_RING_READ(RING_START((ring)->mmio_base))
-#define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
+#define I915_WRITE_START(ring, val) I915_RING_WRITE(RING_START((ring)->mmio_base), val)
#define I915_READ_HEAD(ring) I915_RING_READ(RING_HEAD((ring)->mmio_base))
-#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
+#define I915_WRITE_HEAD(ring, val) I915_RING_WRITE(RING_HEAD((ring)->mmio_base), val)
#define I915_READ_CTL(ring) I915_RING_READ(RING_CTL((ring)->mmio_base))
-#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
+#define I915_WRITE_CTL(ring, val) I915_RING_WRITE(RING_CTL((ring)->mmio_base), val)
-#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
#define I915_READ_IMR(ring) I915_RING_READ(RING_IMR((ring)->mmio_base))
+#define I915_WRITE_IMR(ring, val) I915_RING_WRITE(RING_IMR((ring)->mmio_base), val)
#define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID((ring)->mmio_base))
#define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0((ring)->mmio_base))
@@ -166,4 +167,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev);
u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
+/* DRI warts */
+int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size);
+
#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 45cd37652a3..7c50cdce84f 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -46,6 +46,7 @@
SDVO_TV_MASK)
#define IS_TV(c) (c->output_flag & SDVO_TV_MASK)
+#define IS_TMDS(c) (c->output_flag & SDVO_TMDS_MASK)
#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK)
#define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK))
@@ -473,20 +474,6 @@ static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
return false;
}
- i = 3;
- while (status == SDVO_CMD_STATUS_PENDING && i--) {
- if (!intel_sdvo_read_byte(intel_sdvo,
- SDVO_I2C_CMD_STATUS,
- &status))
- return false;
- }
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("command returns response %s [%d]\n",
- status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP ? cmd_status_names[status] : "???",
- status);
- return false;
- }
-
return true;
}
@@ -497,6 +484,8 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
u8 status;
int i;
+ DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo));
+
/*
* The documentation states that all commands will be
* processed within 15µs, and that we need only poll
@@ -505,14 +494,19 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
*
* Check 5 times in case the hardware failed to read the docs.
*/
- do {
+ if (!intel_sdvo_read_byte(intel_sdvo,
+ SDVO_I2C_CMD_STATUS,
+ &status))
+ goto log_fail;
+
+ while (status == SDVO_CMD_STATUS_PENDING && retry--) {
+ udelay(15);
if (!intel_sdvo_read_byte(intel_sdvo,
SDVO_I2C_CMD_STATUS,
&status))
- return false;
- } while (status == SDVO_CMD_STATUS_PENDING && --retry);
+ goto log_fail;
+ }
- DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo));
if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
DRM_LOG_KMS("(%s)", cmd_status_names[status]);
else
@@ -533,7 +527,7 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
return true;
log_fail:
- DRM_LOG_KMS("\n");
+ DRM_LOG_KMS("... failed\n");
return false;
}
@@ -550,6 +544,7 @@ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
static bool intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo,
u8 ddc_bus)
{
+ /* This must be the immediately preceding write before the i2c xfer */
return intel_sdvo_write_cmd(intel_sdvo,
SDVO_CMD_SET_CONTROL_BUS_SWITCH,
&ddc_bus, 1);
@@ -557,7 +552,10 @@ static bool intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo,
static bool intel_sdvo_set_value(struct intel_sdvo *intel_sdvo, u8 cmd, const void *data, int len)
{
- return intel_sdvo_write_cmd(intel_sdvo, cmd, data, len);
+ if (!intel_sdvo_write_cmd(intel_sdvo, cmd, data, len))
+ return false;
+
+ return intel_sdvo_read_response(intel_sdvo, NULL, 0);
}
static bool
@@ -859,18 +857,21 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
intel_dip_infoframe_csum(&avi_if);
- if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_INDEX,
+ if (!intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_HBUF_INDEX,
set_buf_index, 2))
return false;
for (i = 0; i < sizeof(avi_if); i += 8) {
- if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_DATA,
+ if (!intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_HBUF_DATA,
data, 8))
return false;
data++;
}
- return intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_TXRATE,
+ return intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_HBUF_TXRATE,
&tx_rate, 1);
}
@@ -1359,7 +1360,8 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid);
intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid);
}
- }
+ } else
+ status = connector_status_disconnected;
connector->display_info.raw_edid = NULL;
kfree(edid);
}
@@ -1407,10 +1409,25 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
if ((intel_sdvo_connector->output_flag & response) == 0)
ret = connector_status_disconnected;
- else if (response & SDVO_TMDS_MASK)
+ else if (IS_TMDS(intel_sdvo_connector))
ret = intel_sdvo_hdmi_sink_detect(connector);
- else
- ret = connector_status_connected;
+ else {
+ struct edid *edid;
+
+ /* if we have an edid check it matches the connection */
+ edid = intel_sdvo_get_edid(connector);
+ if (edid == NULL)
+ edid = intel_sdvo_get_analog_edid(connector);
+ if (edid != NULL) {
+ if (edid->input & DRM_EDID_INPUT_DIGITAL)
+ ret = connector_status_disconnected;
+ else
+ ret = connector_status_connected;
+ connector->display_info.raw_edid = NULL;
+ kfree(edid);
+ } else
+ ret = connector_status_connected;
+ }
/* May update encoder flag for like clock for SDVO TV, etc.*/
if (ret == connector_status_connected) {
@@ -1446,10 +1463,15 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
edid = intel_sdvo_get_analog_edid(connector);
if (edid != NULL) {
- if (edid->input & DRM_EDID_INPUT_DIGITAL) {
+ struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+ bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
+ bool connector_is_digital = !!IS_TMDS(intel_sdvo_connector);
+
+ if (connector_is_digital == monitor_is_digital) {
drm_mode_connector_update_edid_property(connector, edid);
drm_add_edid_modes(connector, edid);
}
+
connector->display_info.raw_edid = NULL;
kfree(edid);
}
@@ -1668,6 +1690,22 @@ static void intel_sdvo_destroy(struct drm_connector *connector)
kfree(connector);
}
+static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector)
+{
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+ struct edid *edid;
+ bool has_audio = false;
+
+ if (!intel_sdvo->is_hdmi)
+ return false;
+
+ edid = intel_sdvo_get_edid(connector);
+ if (edid != NULL && edid->input & DRM_EDID_INPUT_DIGITAL)
+ has_audio = drm_detect_monitor_audio(edid);
+
+ return has_audio;
+}
+
static int
intel_sdvo_set_property(struct drm_connector *connector,
struct drm_property *property,
@@ -1684,17 +1722,23 @@ intel_sdvo_set_property(struct drm_connector *connector,
return ret;
if (property == intel_sdvo_connector->force_audio_property) {
- if (val == intel_sdvo_connector->force_audio)
+ int i = val;
+ bool has_audio;
+
+ if (i == intel_sdvo_connector->force_audio)
return 0;
- intel_sdvo_connector->force_audio = val;
+ intel_sdvo_connector->force_audio = i;
- if (val > 0 && intel_sdvo->has_hdmi_audio)
- return 0;
- if (val < 0 && !intel_sdvo->has_hdmi_audio)
+ if (i == 0)
+ has_audio = intel_sdvo_detect_hdmi_audio(connector);
+ else
+ has_audio = i > 0;
+
+ if (has_audio == intel_sdvo->has_hdmi_audio)
return 0;
- intel_sdvo->has_hdmi_audio = val > 0;
+ intel_sdvo->has_hdmi_audio = has_audio;
goto done;
}
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 93206e4eaa6..fe4a53a50b8 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1234,7 +1234,8 @@ static const struct drm_display_mode reported_modes[] = {
* \return false if TV is disconnected.
*/
static int
-intel_tv_detect_type (struct intel_tv *intel_tv)
+intel_tv_detect_type (struct intel_tv *intel_tv,
+ struct drm_connector *connector)
{
struct drm_encoder *encoder = &intel_tv->base.base;
struct drm_device *dev = encoder->dev;
@@ -1245,11 +1246,13 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
int type;
/* Disable TV interrupts around load detect or we'll recurse */
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_disable_pipestat(dev_priv, 0,
- PIPE_HOTPLUG_INTERRUPT_ENABLE |
- PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ i915_disable_pipestat(dev_priv, 0,
+ PIPE_HOTPLUG_INTERRUPT_ENABLE |
+ PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ }
save_tv_dac = tv_dac = I915_READ(TV_DAC);
save_tv_ctl = tv_ctl = I915_READ(TV_CTL);
@@ -1302,11 +1305,13 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
I915_WRITE(TV_CTL, save_tv_ctl);
/* Restore interrupt config */
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_enable_pipestat(dev_priv, 0,
- PIPE_HOTPLUG_INTERRUPT_ENABLE |
- PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ i915_enable_pipestat(dev_priv, 0,
+ PIPE_HOTPLUG_INTERRUPT_ENABLE |
+ PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ }
return type;
}
@@ -1356,7 +1361,7 @@ intel_tv_detect(struct drm_connector *connector, bool force)
drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V);
if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) {
- type = intel_tv_detect_type(intel_tv);
+ type = intel_tv_detect_type(intel_tv, connector);
} else if (force) {
struct drm_crtc *crtc;
int dpms_mode;
@@ -1364,7 +1369,7 @@ intel_tv_detect(struct drm_connector *connector, bool force)
crtc = intel_get_load_detect_pipe(&intel_tv->base, connector,
&mode, &dpms_mode);
if (crtc) {
- type = intel_tv_detect_type(intel_tv);
+ type = intel_tv_detect_type(intel_tv, connector);
intel_release_load_detect_pipe(&intel_tv->base, connector,
dpms_mode);
} else
@@ -1658,6 +1663,18 @@ intel_tv_init(struct drm_device *dev)
intel_encoder = &intel_tv->base;
connector = &intel_connector->base;
+ /* The documentation, for the older chipsets at least, recommend
+ * using a polling method rather than hotplug detection for TVs.
+ * This is because in order to perform the hotplug detection, the PLLs
+ * for the TV must be kept alive increasing power drain and starving
+ * bandwidth from other encoders. Notably for instance, it causes
+ * pipe underruns on Crestline when this encoder is supposedly idle.
+ *
+ * More recent chipsets favour HDMI rather than integrated S-Video.
+ */
+ connector->polled =
+ DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+
drm_connector_init(dev, connector, &intel_tv_connector_funcs,
DRM_MODE_CONNECTOR_SVIDEO);