summaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/Kconfig9
-rw-r--r--drivers/gpu/drm/Makefile1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c10
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c22
-rw-r--r--drivers/gpu/drm/bochs/bochs_hw.c1
-rw-r--r--drivers/gpu/drm/bridge/Kconfig11
-rw-r--r--drivers/gpu/drm/bridge/Makefile1
-rw-r--r--drivers/gpu/drm/bridge/ps8622.c684
-rw-r--r--drivers/gpu/drm/bridge/ptn3460.c2
-rw-r--r--drivers/gpu/drm/drm_atomic.c27
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c150
-rw-r--r--drivers/gpu/drm/drm_crtc.c2
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c9
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c4
-rw-r--r--drivers/gpu/drm/drm_edid_load.c1
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c6
-rw-r--r--drivers/gpu/drm/drm_ioctl.c60
-rw-r--r--drivers/gpu/drm/drm_modes.c8
-rw-r--r--drivers/gpu/drm/drm_of.c10
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c1
-rw-r--r--drivers/gpu/drm/exynos/Kconfig2
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c182
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_connector.c245
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_connector.h20
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c101
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.h7
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c27
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h40
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c274
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.h15
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c44
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c80
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.h7
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c136
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c259
-rw-r--r--drivers/gpu/drm/exynos/regs-mixer.h2
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c8
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h144
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c517
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c103
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c95
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c709
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h127
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c335
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c17
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c167
-rw-r--r--drivers/gpu/drm/i915/i915_params.c13
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h8
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h99
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c9
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c24
-rw-r--r--drivers/gpu/drm/i915/intel_display.c842
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c264
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c34
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h65
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c1
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c1
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c2
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c2
-rw-r--r--drivers/gpu/drm/i915/intel_frontbuffer.c3
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c22
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c3
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c3
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c89
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c2
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c1
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c429
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c3
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c20
-rw-r--r--drivers/gpu/drm/msm/Kconfig11
-rw-r--r--drivers/gpu/drm/msm/Makefile5
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c212
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h117
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.xml.h418
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c1993
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c705
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_phy.c352
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c34
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h399
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c102
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h18
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c343
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c86
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c315
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h75
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c83
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c26
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c200
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h75
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c64
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c100
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h29
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c3
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c25
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h5
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c43
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c85
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c6
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.c12
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c622
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_priv.h8
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c80
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.h1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c241
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h23
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c66
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c57
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c10
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c20
-rw-r--r--drivers/gpu/drm/omapdrm/omap_irq.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c146
-rw-r--r--drivers/gpu/drm/panel/Kconfig1
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c256
-rw-r--r--drivers/gpu/drm/radeon/cikd.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_kfd.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_mn.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c4
-rw-r--r--drivers/gpu/drm/radeon/vce_v2_0.c3
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c25
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c30
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c3
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c25
-rw-r--r--drivers/gpu/drm/tegra/dc.c99
-rw-r--r--drivers/gpu/drm/tegra/dc.h7
-rw-r--r--drivers/gpu/drm/tegra/drm.c18
-rw-r--r--drivers/gpu/drm/tegra/drm.h4
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c4
-rw-r--r--drivers/gpu/drm/tegra/hdmi.h2
-rw-r--r--drivers/gpu/drm/tegra/sor.c202
-rw-r--r--drivers/gpu/drm/vgem/Makefile4
-rw-r--r--drivers/gpu/drm/vgem/vgem_dma_buf.c94
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c364
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.h57
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c4
-rw-r--r--drivers/gpu/host1x/syncpt.c6
-rw-r--r--drivers/gpu/ipu-v3/ipu-di.c9
-rw-r--r--drivers/gpu/ipu-v3/ipu-ic.c4
155 files changed, 11305 insertions, 3880 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 151a050129e7..47f2ce81b412 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -165,6 +165,15 @@ config DRM_SAVAGE
Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister
chipset. If M is selected the module will be called savage.
+config DRM_VGEM
+ tristate "Virtual GEM provider"
+ depends on DRM
+ help
+ Choose this option to get a virtual graphics memory manager,
+ as used by Mesa's software renderer for enhanced performance.
+ If M is selected the module will be called vgem.
+
+
source "drivers/gpu/drm/exynos/Kconfig"
source "drivers/gpu/drm/rockchip/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 2c239b99de64..7d4944e1a60c 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -48,6 +48,7 @@ obj-$(CONFIG_DRM_SIS) += sis/
obj-$(CONFIG_DRM_SAVAGE)+= savage/
obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/
obj-$(CONFIG_DRM_VIA) +=via/
+obj-$(CONFIG_DRM_VGEM) += vgem/
obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/
obj-$(CONFIG_DRM_EXYNOS) +=exynos/
obj-$(CONFIG_DRM_ROCKCHIP) +=rockchip/
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index d7174300f501..69af73f15310 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -649,6 +649,7 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
pr_debug(" sdma queue id: %d\n", q->properties.sdma_queue_id);
pr_debug(" sdma engine id: %d\n", q->properties.sdma_engine_id);
+ init_sdma_vm(dqm, q, qpd);
retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
&q->gart_mqd_addr, &q->properties);
if (retval != 0) {
@@ -656,7 +657,14 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
return retval;
}
- init_sdma_vm(dqm, q, qpd);
+ retval = mqd->load_mqd(mqd, q->mqd, 0,
+ 0, NULL);
+ if (retval != 0) {
+ deallocate_sdma_queue(dqm, q->sdma_id);
+ mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+ return retval;
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index e415a2a9207e..c7d298e62c96 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -44,7 +44,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
BUG_ON(!kq || !dev);
BUG_ON(type != KFD_QUEUE_TYPE_DIQ && type != KFD_QUEUE_TYPE_HIQ);
- pr_debug("kfd: In func %s initializing queue type %d size %d\n",
+ pr_debug("amdkfd: In func %s initializing queue type %d size %d\n",
__func__, KFD_QUEUE_TYPE_HIQ, queue_size);
nop.opcode = IT_NOP;
@@ -69,12 +69,16 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
prop.doorbell_ptr = kfd_get_kernel_doorbell(dev, &prop.doorbell_off);
- if (prop.doorbell_ptr == NULL)
+ if (prop.doorbell_ptr == NULL) {
+ pr_err("amdkfd: error init doorbell");
goto err_get_kernel_doorbell;
+ }
retval = kfd_gtt_sa_allocate(dev, queue_size, &kq->pq);
- if (retval != 0)
+ if (retval != 0) {
+ pr_err("amdkfd: error init pq queues size (%d)\n", queue_size);
goto err_pq_allocate_vidmem;
+ }
kq->pq_kernel_addr = kq->pq->cpu_ptr;
kq->pq_gpu_addr = kq->pq->gpu_addr;
@@ -165,10 +169,8 @@ err_rptr_allocate_vidmem:
err_eop_allocate_vidmem:
kfd_gtt_sa_free(dev, kq->pq);
err_pq_allocate_vidmem:
- pr_err("kfd: error init pq\n");
kfd_release_kernel_doorbell(dev, prop.doorbell_ptr);
err_get_kernel_doorbell:
- pr_err("kfd: error init doorbell");
return false;
}
@@ -187,6 +189,8 @@ static void uninitialize(struct kernel_queue *kq)
else if (kq->queue->properties.type == KFD_QUEUE_TYPE_DIQ)
kfd_gtt_sa_free(kq->dev, kq->fence_mem_obj);
+ kq->mqd->uninit_mqd(kq->mqd, kq->queue->mqd, kq->queue->mqd_mem_obj);
+
kfd_gtt_sa_free(kq->dev, kq->rptr_mem);
kfd_gtt_sa_free(kq->dev, kq->wptr_mem);
kq->ops_asic_specific.uninitialize(kq);
@@ -211,7 +215,7 @@ static int acquire_packet_buffer(struct kernel_queue *kq,
queue_address = (unsigned int *)kq->pq_kernel_addr;
queue_size_dwords = kq->queue->properties.queue_size / sizeof(uint32_t);
- pr_debug("kfd: In func %s\nrptr: %d\nwptr: %d\nqueue_address 0x%p\n",
+ pr_debug("amdkfd: In func %s\nrptr: %d\nwptr: %d\nqueue_address 0x%p\n",
__func__, rptr, wptr, queue_address);
available_size = (rptr - 1 - wptr + queue_size_dwords) %
@@ -296,7 +300,7 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
}
if (kq->ops.initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE) == false) {
- pr_err("kfd: failed to init kernel queue\n");
+ pr_err("amdkfd: failed to init kernel queue\n");
kfree(kq);
return NULL;
}
@@ -319,7 +323,7 @@ static __attribute__((unused)) void test_kq(struct kfd_dev *dev)
BUG_ON(!dev);
- pr_err("kfd: starting kernel queue test\n");
+ pr_err("amdkfd: starting kernel queue test\n");
kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_HIQ);
BUG_ON(!kq);
@@ -330,7 +334,7 @@ static __attribute__((unused)) void test_kq(struct kfd_dev *dev)
buffer[i] = kq->nop_packet;
kq->ops.submit_packet(kq);
- pr_err("kfd: ending kernel queue test\n");
+ pr_err("amdkfd: ending kernel queue test\n");
}
diff --git a/drivers/gpu/drm/bochs/bochs_hw.c b/drivers/gpu/drm/bochs/bochs_hw.c
index 460389702d31..a39b0343c197 100644
--- a/drivers/gpu/drm/bochs/bochs_hw.c
+++ b/drivers/gpu/drm/bochs/bochs_hw.c
@@ -164,6 +164,7 @@ void bochs_hw_setmode(struct bochs_device *bochs,
bochs_vga_writeb(bochs, 0x3c0, 0x20); /* unblank */
+ bochs_dispi_write(bochs, VBE_DISPI_INDEX_ENABLE, 0);
bochs_dispi_write(bochs, VBE_DISPI_INDEX_BPP, bochs->bpp);
bochs_dispi_write(bochs, VBE_DISPI_INDEX_XRES, bochs->xres);
bochs_dispi_write(bochs, VBE_DISPI_INDEX_YRES, bochs->yres);
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index f38bbcdf929b..acef3223772c 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -11,3 +11,14 @@ config DRM_PTN3460
select DRM_PANEL
---help---
ptn3460 eDP-LVDS bridge chip driver.
+
+config DRM_PS8622
+ tristate "Parade eDP/LVDS bridge"
+ depends on DRM
+ depends on OF
+ select DRM_PANEL
+ select DRM_KMS_HELPER
+ select BACKLIGHT_LCD_SUPPORT
+ select BACKLIGHT_CLASS_DEVICE
+ ---help---
+ parade eDP-LVDS bridge chip driver.
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index d8a8cfd12fbb..8dfebd984370 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -1,4 +1,5 @@
ccflags-y := -Iinclude/drm
+obj-$(CONFIG_DRM_PS8622) += ps8622.o
obj-$(CONFIG_DRM_PTN3460) += ptn3460.o
obj-$(CONFIG_DRM_DW_HDMI) += dw_hdmi.o
diff --git a/drivers/gpu/drm/bridge/ps8622.c b/drivers/gpu/drm/bridge/ps8622.c
new file mode 100644
index 000000000000..e895aa7ea353
--- /dev/null
+++ b/drivers/gpu/drm/bridge/ps8622.c
@@ -0,0 +1,684 @@
+/*
+ * Parade PS8622 eDP/LVDS bridge driver
+ *
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/fb.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/pm.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_panel.h>
+
+#include "drmP.h"
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+/* Brightness scale on the Parade chip */
+#define PS8622_MAX_BRIGHTNESS 0xff
+
+/* Timings taken from the version 1.7 datasheet for the PS8622/PS8625 */
+#define PS8622_POWER_RISE_T1_MIN_US 10
+#define PS8622_POWER_RISE_T1_MAX_US 10000
+#define PS8622_RST_HIGH_T2_MIN_US 3000
+#define PS8622_RST_HIGH_T2_MAX_US 30000
+#define PS8622_PWMO_END_T12_MS 200
+#define PS8622_POWER_FALL_T16_MAX_US 10000
+#define PS8622_POWER_OFF_T17_MS 500
+
+#if ((PS8622_RST_HIGH_T2_MIN_US + PS8622_POWER_RISE_T1_MAX_US) > \
+ (PS8622_RST_HIGH_T2_MAX_US + PS8622_POWER_RISE_T1_MIN_US))
+#error "T2.min + T1.max must be less than T2.max + T1.min"
+#endif
+
+struct ps8622_bridge {
+ struct drm_connector connector;
+ struct i2c_client *client;
+ struct drm_bridge bridge;
+ struct drm_panel *panel;
+ struct regulator *v12;
+ struct backlight_device *bl;
+
+ struct gpio_desc *gpio_slp;
+ struct gpio_desc *gpio_rst;
+
+ u32 max_lane_count;
+ u32 lane_count;
+
+ bool enabled;
+};
+
+static inline struct ps8622_bridge *
+ bridge_to_ps8622(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct ps8622_bridge, bridge);
+}
+
+static inline struct ps8622_bridge *
+ connector_to_ps8622(struct drm_connector *connector)
+{
+ return container_of(connector, struct ps8622_bridge, connector);
+}
+
+static int ps8622_set(struct i2c_client *client, u8 page, u8 reg, u8 val)
+{
+ int ret;
+ struct i2c_adapter *adap = client->adapter;
+ struct i2c_msg msg;
+ u8 data[] = {reg, val};
+
+ msg.addr = client->addr + page;
+ msg.flags = 0;
+ msg.len = sizeof(data);
+ msg.buf = data;
+
+ ret = i2c_transfer(adap, &msg, 1);
+ if (ret != 1)
+ pr_warn("PS8622 I2C write (0x%02x,0x%02x,0x%02x) failed: %d\n",
+ client->addr + page, reg, val, ret);
+ return !(ret == 1);
+}
+
+static int ps8622_send_config(struct ps8622_bridge *ps8622)
+{
+ struct i2c_client *cl = ps8622->client;
+ int err = 0;
+
+ /* HPD low */
+ err = ps8622_set(cl, 0x02, 0xa1, 0x01);
+ if (err)
+ goto error;
+
+ /* SW setting: [1:0] SW output 1.2V voltage is lower to 96% */
+ err = ps8622_set(cl, 0x04, 0x14, 0x01);
+ if (err)
+ goto error;
+
+ /* RCO SS setting: [5:4] = b01 0.5%, b10 1%, b11 1.5% */
+ err = ps8622_set(cl, 0x04, 0xe3, 0x20);
+ if (err)
+ goto error;
+
+ /* [7] RCO SS enable */
+ err = ps8622_set(cl, 0x04, 0xe2, 0x80);
+ if (err)
+ goto error;
+
+ /* RPHY Setting
+ * [3:2] CDR tune wait cycle before measure for fine tune
+ * b00: 1us b01: 0.5us b10:2us, b11: 4us
+ */
+ err = ps8622_set(cl, 0x04, 0x8a, 0x0c);
+ if (err)
+ goto error;
+
+ /* [3] RFD always on */
+ err = ps8622_set(cl, 0x04, 0x89, 0x08);
+ if (err)
+ goto error;
+
+ /* CTN lock in/out: 20000ppm/80000ppm. Lock out 2 times. */
+ err = ps8622_set(cl, 0x04, 0x71, 0x2d);
+ if (err)
+ goto error;
+
+ /* 2.7G CDR settings: NOF=40LSB for HBR CDR setting */
+ err = ps8622_set(cl, 0x04, 0x7d, 0x07);
+ if (err)
+ goto error;
+
+ /* [1:0] Fmin=+4bands */
+ err = ps8622_set(cl, 0x04, 0x7b, 0x00);
+ if (err)
+ goto error;
+
+ /* [7:5] DCO_FTRNG=+-40% */
+ err = ps8622_set(cl, 0x04, 0x7a, 0xfd);
+ if (err)
+ goto error;
+
+ /* 1.62G CDR settings: [5:2]NOF=64LSB [1:0]DCO scale is 2/5 */
+ err = ps8622_set(cl, 0x04, 0xc0, 0x12);
+ if (err)
+ goto error;
+
+ /* Gitune=-37% */
+ err = ps8622_set(cl, 0x04, 0xc1, 0x92);
+ if (err)
+ goto error;
+
+ /* Fbstep=100% */
+ err = ps8622_set(cl, 0x04, 0xc2, 0x1c);
+ if (err)
+ goto error;
+
+ /* [7] LOS signal disable */
+ err = ps8622_set(cl, 0x04, 0x32, 0x80);
+ if (err)
+ goto error;
+
+ /* RPIO Setting: [7:4] LVDS driver bias current : 75% (250mV swing) */
+ err = ps8622_set(cl, 0x04, 0x00, 0xb0);
+ if (err)
+ goto error;
+
+ /* [7:6] Right-bar GPIO output strength is 8mA */
+ err = ps8622_set(cl, 0x04, 0x15, 0x40);
+ if (err)
+ goto error;
+
+ /* EQ Training State Machine Setting, RCO calibration start */
+ err = ps8622_set(cl, 0x04, 0x54, 0x10);
+ if (err)
+ goto error;
+
+ /* Logic, needs more than 10 I2C command */
+ /* [4:0] MAX_LANE_COUNT set to max supported lanes */
+ err = ps8622_set(cl, 0x01, 0x02, 0x80 | ps8622->max_lane_count);
+ if (err)
+ goto error;
+
+ /* [4:0] LANE_COUNT_SET set to chosen lane count */
+ err = ps8622_set(cl, 0x01, 0x21, 0x80 | ps8622->lane_count);
+ if (err)
+ goto error;
+
+ err = ps8622_set(cl, 0x00, 0x52, 0x20);
+ if (err)
+ goto error;
+
+ /* HPD CP toggle enable */
+ err = ps8622_set(cl, 0x00, 0xf1, 0x03);
+ if (err)
+ goto error;
+
+ err = ps8622_set(cl, 0x00, 0x62, 0x41);
+ if (err)
+ goto error;
+
+ /* Counter number, add 1ms counter delay */
+ err = ps8622_set(cl, 0x00, 0xf6, 0x01);
+ if (err)
+ goto error;
+
+ /* [6]PWM function control by DPCD0040f[7], default is PWM block */
+ err = ps8622_set(cl, 0x00, 0x77, 0x06);
+ if (err)
+ goto error;
+
+ /* 04h Adjust VTotal toleranceto fix the 30Hz no display issue */
+ err = ps8622_set(cl, 0x00, 0x4c, 0x04);
+ if (err)
+ goto error;
+
+ /* DPCD00400='h00, Parade OUI ='h001cf8 */
+ err = ps8622_set(cl, 0x01, 0xc0, 0x00);
+ if (err)
+ goto error;
+
+ /* DPCD00401='h1c */
+ err = ps8622_set(cl, 0x01, 0xc1, 0x1c);
+ if (err)
+ goto error;
+
+ /* DPCD00402='hf8 */
+ err = ps8622_set(cl, 0x01, 0xc2, 0xf8);
+ if (err)
+ goto error;
+
+ /* DPCD403~408 = ASCII code, D2SLV5='h4432534c5635 */
+ err = ps8622_set(cl, 0x01, 0xc3, 0x44);
+ if (err)
+ goto error;
+
+ /* DPCD404 */
+ err = ps8622_set(cl, 0x01, 0xc4, 0x32);
+ if (err)
+ goto error;
+
+ /* DPCD405 */
+ err = ps8622_set(cl, 0x01, 0xc5, 0x53);
+ if (err)
+ goto error;
+
+ /* DPCD406 */
+ err = ps8622_set(cl, 0x01, 0xc6, 0x4c);
+ if (err)
+ goto error;
+
+ /* DPCD407 */
+ err = ps8622_set(cl, 0x01, 0xc7, 0x56);
+ if (err)
+ goto error;
+
+ /* DPCD408 */
+ err = ps8622_set(cl, 0x01, 0xc8, 0x35);
+ if (err)
+ goto error;
+
+ /* DPCD40A, Initial Code major revision '01' */
+ err = ps8622_set(cl, 0x01, 0xca, 0x01);
+ if (err)
+ goto error;
+
+ /* DPCD40B, Initial Code minor revision '05' */
+ err = ps8622_set(cl, 0x01, 0xcb, 0x05);
+ if (err)
+ goto error;
+
+
+ if (ps8622->bl) {
+ /* DPCD720, internal PWM */
+ err = ps8622_set(cl, 0x01, 0xa5, 0xa0);
+ if (err)
+ goto error;
+
+ /* FFh for 100% brightness, 0h for 0% brightness */
+ err = ps8622_set(cl, 0x01, 0xa7,
+ ps8622->bl->props.brightness);
+ if (err)
+ goto error;
+ } else {
+ /* DPCD720, external PWM */
+ err = ps8622_set(cl, 0x01, 0xa5, 0x80);
+ if (err)
+ goto error;
+ }
+
+ /* Set LVDS output as 6bit-VESA mapping, single LVDS channel */
+ err = ps8622_set(cl, 0x01, 0xcc, 0x13);
+ if (err)
+ goto error;
+
+ /* Enable SSC set by register */
+ err = ps8622_set(cl, 0x02, 0xb1, 0x20);
+ if (err)
+ goto error;
+
+ /* Set SSC enabled and +/-1% central spreading */
+ err = ps8622_set(cl, 0x04, 0x10, 0x16);
+ if (err)
+ goto error;
+
+ /* Logic end */
+ /* MPU Clock source: LC => RCO */
+ err = ps8622_set(cl, 0x04, 0x59, 0x60);
+ if (err)
+ goto error;
+
+ /* LC -> RCO */
+ err = ps8622_set(cl, 0x04, 0x54, 0x14);
+ if (err)
+ goto error;
+
+ /* HPD high */
+ err = ps8622_set(cl, 0x02, 0xa1, 0x91);
+
+error:
+ return err ? -EIO : 0;
+}
+
+static int ps8622_backlight_update(struct backlight_device *bl)
+{
+ struct ps8622_bridge *ps8622 = dev_get_drvdata(&bl->dev);
+ int ret, brightness = bl->props.brightness;
+
+ if (bl->props.power != FB_BLANK_UNBLANK ||
+ bl->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK))
+ brightness = 0;
+
+ if (!ps8622->enabled)
+ return -EINVAL;
+
+ ret = ps8622_set(ps8622->client, 0x01, 0xa7, brightness);
+
+ return ret;
+}
+
+static const struct backlight_ops ps8622_backlight_ops = {
+ .update_status = ps8622_backlight_update,
+};
+
+static void ps8622_pre_enable(struct drm_bridge *bridge)
+{
+ struct ps8622_bridge *ps8622 = bridge_to_ps8622(bridge);
+ int ret;
+
+ if (ps8622->enabled)
+ return;
+
+ gpiod_set_value(ps8622->gpio_rst, 0);
+
+ if (ps8622->v12) {
+ ret = regulator_enable(ps8622->v12);
+ if (ret)
+ DRM_ERROR("fails to enable ps8622->v12");
+ }
+
+ if (drm_panel_prepare(ps8622->panel)) {
+ DRM_ERROR("failed to prepare panel\n");
+ return;
+ }
+
+ gpiod_set_value(ps8622->gpio_slp, 1);
+
+ /*
+ * T1 is the range of time that it takes for the power to rise after we
+ * enable the lcd/ps8622 fet. T2 is the range of time in which the
+ * data sheet specifies we should deassert the reset pin.
+ *
+ * If it takes T1.max for the power to rise, we need to wait atleast
+ * T2.min before deasserting the reset pin. If it takes T1.min for the
+ * power to rise, we need to wait at most T2.max before deasserting the
+ * reset pin.
+ */
+ usleep_range(PS8622_RST_HIGH_T2_MIN_US + PS8622_POWER_RISE_T1_MAX_US,
+ PS8622_RST_HIGH_T2_MAX_US + PS8622_POWER_RISE_T1_MIN_US);
+
+ gpiod_set_value(ps8622->gpio_rst, 1);
+
+ /* wait 20ms after RST high */
+ usleep_range(20000, 30000);
+
+ ret = ps8622_send_config(ps8622);
+ if (ret) {
+ DRM_ERROR("Failed to send config to bridge (%d)\n", ret);
+ return;
+ }
+
+ ps8622->enabled = true;
+}
+
+static void ps8622_enable(struct drm_bridge *bridge)
+{
+ struct ps8622_bridge *ps8622 = bridge_to_ps8622(bridge);
+
+ if (drm_panel_enable(ps8622->panel)) {
+ DRM_ERROR("failed to enable panel\n");
+ return;
+ }
+}
+
+static void ps8622_disable(struct drm_bridge *bridge)
+{
+ struct ps8622_bridge *ps8622 = bridge_to_ps8622(bridge);
+
+ if (drm_panel_disable(ps8622->panel)) {
+ DRM_ERROR("failed to disable panel\n");
+ return;
+ }
+ msleep(PS8622_PWMO_END_T12_MS);
+}
+
+static void ps8622_post_disable(struct drm_bridge *bridge)
+{
+ struct ps8622_bridge *ps8622 = bridge_to_ps8622(bridge);
+
+ if (!ps8622->enabled)
+ return;
+
+ ps8622->enabled = false;
+
+ /*
+ * This doesn't matter if the regulators are turned off, but something
+ * else might keep them on. In that case, we want to assert the slp gpio
+ * to lower power.
+ */
+ gpiod_set_value(ps8622->gpio_slp, 0);
+
+ if (drm_panel_unprepare(ps8622->panel)) {
+ DRM_ERROR("failed to unprepare panel\n");
+ return;
+ }
+
+ if (ps8622->v12)
+ regulator_disable(ps8622->v12);
+
+ /*
+ * Sleep for at least the amount of time that it takes the power rail to
+ * fall to prevent asserting the rst gpio from doing anything.
+ */
+ usleep_range(PS8622_POWER_FALL_T16_MAX_US,
+ 2 * PS8622_POWER_FALL_T16_MAX_US);
+ gpiod_set_value(ps8622->gpio_rst, 0);
+
+ msleep(PS8622_POWER_OFF_T17_MS);
+}
+
+static int ps8622_get_modes(struct drm_connector *connector)
+{
+ struct ps8622_bridge *ps8622;
+
+ ps8622 = connector_to_ps8622(connector);
+
+ return drm_panel_get_modes(ps8622->panel);
+}
+
+static struct drm_encoder *ps8622_best_encoder(struct drm_connector *connector)
+{
+ struct ps8622_bridge *ps8622;
+
+ ps8622 = connector_to_ps8622(connector);
+
+ return ps8622->bridge.encoder;
+}
+
+static const struct drm_connector_helper_funcs ps8622_connector_helper_funcs = {
+ .get_modes = ps8622_get_modes,
+ .best_encoder = ps8622_best_encoder,
+};
+
+static enum drm_connector_status ps8622_detect(struct drm_connector *connector,
+ bool force)
+{
+ return connector_status_connected;
+}
+
+static void ps8622_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_cleanup(connector);
+}
+
+static const struct drm_connector_funcs ps8622_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .detect = ps8622_detect,
+ .destroy = ps8622_connector_destroy,
+};
+
+static int ps8622_attach(struct drm_bridge *bridge)
+{
+ struct ps8622_bridge *ps8622 = bridge_to_ps8622(bridge);
+ int ret;
+
+ if (!bridge->encoder) {
+ DRM_ERROR("Parent encoder object not found");
+ return -ENODEV;
+ }
+
+ ps8622->connector.polled = DRM_CONNECTOR_POLL_HPD;
+ ret = drm_connector_init(bridge->dev, &ps8622->connector,
+ &ps8622_connector_funcs, DRM_MODE_CONNECTOR_LVDS);
+ if (ret) {
+ DRM_ERROR("Failed to initialize connector with drm\n");
+ return ret;
+ }
+ drm_connector_helper_add(&ps8622->connector,
+ &ps8622_connector_helper_funcs);
+ drm_connector_register(&ps8622->connector);
+ drm_mode_connector_attach_encoder(&ps8622->connector,
+ bridge->encoder);
+
+ if (ps8622->panel)
+ drm_panel_attach(ps8622->panel, &ps8622->connector);
+
+ drm_helper_hpd_irq_event(ps8622->connector.dev);
+
+ return ret;
+}
+
+static const struct drm_bridge_funcs ps8622_bridge_funcs = {
+ .pre_enable = ps8622_pre_enable,
+ .enable = ps8622_enable,
+ .disable = ps8622_disable,
+ .post_disable = ps8622_post_disable,
+ .attach = ps8622_attach,
+};
+
+static const struct of_device_id ps8622_devices[] = {
+ {.compatible = "parade,ps8622",},
+ {.compatible = "parade,ps8625",},
+ {}
+};
+MODULE_DEVICE_TABLE(of, ps8622_devices);
+
+static int ps8622_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct device_node *endpoint, *panel_node;
+ struct ps8622_bridge *ps8622;
+ int ret;
+
+ ps8622 = devm_kzalloc(dev, sizeof(*ps8622), GFP_KERNEL);
+ if (!ps8622)
+ return -ENOMEM;
+
+ endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
+ if (endpoint) {
+ panel_node = of_graph_get_remote_port_parent(endpoint);
+ if (panel_node) {
+ ps8622->panel = of_drm_find_panel(panel_node);
+ of_node_put(panel_node);
+ if (!ps8622->panel)
+ return -EPROBE_DEFER;
+ }
+ }
+
+ ps8622->client = client;
+
+ ps8622->v12 = devm_regulator_get(dev, "vdd12");
+ if (IS_ERR(ps8622->v12)) {
+ dev_info(dev, "no 1.2v regulator found for PS8622\n");
+ ps8622->v12 = NULL;
+ }
+
+ ps8622->gpio_slp = devm_gpiod_get(dev, "sleep");
+ if (IS_ERR(ps8622->gpio_slp)) {
+ ret = PTR_ERR(ps8622->gpio_slp);
+ dev_err(dev, "cannot get gpio_slp %d\n", ret);
+ return ret;
+ }
+ ret = gpiod_direction_output(ps8622->gpio_slp, 1);
+ if (ret) {
+ dev_err(dev, "cannot configure gpio_slp\n");
+ return ret;
+ }
+
+ ps8622->gpio_rst = devm_gpiod_get(dev, "reset");
+ if (IS_ERR(ps8622->gpio_rst)) {
+ ret = PTR_ERR(ps8622->gpio_rst);
+ dev_err(dev, "cannot get gpio_rst %d\n", ret);
+ return ret;
+ }
+ /*
+ * Assert the reset pin high to avoid the bridge being
+ * initialized prematurely
+ */
+ ret = gpiod_direction_output(ps8622->gpio_rst, 1);
+ if (ret) {
+ dev_err(dev, "cannot configure gpio_rst\n");
+ return ret;
+ }
+
+ ps8622->max_lane_count = id->driver_data;
+
+ if (of_property_read_u32(dev->of_node, "lane-count",
+ &ps8622->lane_count)) {
+ ps8622->lane_count = ps8622->max_lane_count;
+ } else if (ps8622->lane_count > ps8622->max_lane_count) {
+ dev_info(dev, "lane-count property is too high,"
+ "using max_lane_count\n");
+ ps8622->lane_count = ps8622->max_lane_count;
+ }
+
+ if (!of_find_property(dev->of_node, "use-external-pwm", NULL)) {
+ ps8622->bl = backlight_device_register("ps8622-backlight",
+ dev, ps8622, &ps8622_backlight_ops,
+ NULL);
+ if (IS_ERR(ps8622->bl)) {
+ DRM_ERROR("failed to register backlight\n");
+ ret = PTR_ERR(ps8622->bl);
+ ps8622->bl = NULL;
+ return ret;
+ }
+ ps8622->bl->props.max_brightness = PS8622_MAX_BRIGHTNESS;
+ ps8622->bl->props.brightness = PS8622_MAX_BRIGHTNESS;
+ }
+
+ ps8622->bridge.funcs = &ps8622_bridge_funcs;
+ ps8622->bridge.of_node = dev->of_node;
+ ret = drm_bridge_add(&ps8622->bridge);
+ if (ret) {
+ DRM_ERROR("Failed to add bridge\n");
+ return ret;
+ }
+
+ i2c_set_clientdata(client, ps8622);
+
+ return 0;
+}
+
+static int ps8622_remove(struct i2c_client *client)
+{
+ struct ps8622_bridge *ps8622 = i2c_get_clientdata(client);
+
+ if (ps8622->bl)
+ backlight_device_unregister(ps8622->bl);
+
+ drm_bridge_remove(&ps8622->bridge);
+
+ return 0;
+}
+
+static const struct i2c_device_id ps8622_i2c_table[] = {
+ /* Device type, max_lane_count */
+ {"ps8622", 1},
+ {"ps8625", 2},
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, ps8622_i2c_table);
+
+static struct i2c_driver ps8622_driver = {
+ .id_table = ps8622_i2c_table,
+ .probe = ps8622_probe,
+ .remove = ps8622_remove,
+ .driver = {
+ .name = "ps8622",
+ .owner = THIS_MODULE,
+ .of_match_table = ps8622_devices,
+ },
+};
+module_i2c_driver(ps8622_driver);
+
+MODULE_AUTHOR("Vincent Palatin <vpalatin@chromium.org>");
+MODULE_DESCRIPTION("Parade ps8622/ps8625 eDP-LVDS converter driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/bridge/ptn3460.c b/drivers/gpu/drm/bridge/ptn3460.c
index 826833e396f0..9d2f053382e1 100644
--- a/drivers/gpu/drm/bridge/ptn3460.c
+++ b/drivers/gpu/drm/bridge/ptn3460.c
@@ -265,7 +265,7 @@ static struct drm_connector_funcs ptn3460_connector_funcs = {
.destroy = ptn3460_connector_destroy,
};
-int ptn3460_bridge_attach(struct drm_bridge *bridge)
+static int ptn3460_bridge_attach(struct drm_bridge *bridge)
{
struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge);
int ret;
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index a6caaae40b9e..57efdbeff008 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -134,6 +134,7 @@ void drm_atomic_state_clear(struct drm_atomic_state *state)
connector->funcs->atomic_destroy_state(connector,
state->connector_states[i]);
+ state->connectors[i] = NULL;
state->connector_states[i] = NULL;
}
@@ -145,6 +146,7 @@ void drm_atomic_state_clear(struct drm_atomic_state *state)
crtc->funcs->atomic_destroy_state(crtc,
state->crtc_states[i]);
+ state->crtcs[i] = NULL;
state->crtc_states[i] = NULL;
}
@@ -156,6 +158,7 @@ void drm_atomic_state_clear(struct drm_atomic_state *state)
plane->funcs->atomic_destroy_state(plane,
state->plane_states[i]);
+ state->planes[i] = NULL;
state->plane_states[i] = NULL;
}
}
@@ -170,6 +173,9 @@ EXPORT_SYMBOL(drm_atomic_state_clear);
*/
void drm_atomic_state_free(struct drm_atomic_state *state)
{
+ if (!state)
+ return;
+
drm_atomic_state_clear(state);
DRM_DEBUG_ATOMIC("Freeing atomic state %p\n", state);
@@ -248,11 +254,14 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
struct drm_mode_config *config = &dev->mode_config;
/* FIXME: Mode prop is missing, which also controls ->enable. */
- if (property == config->prop_active) {
+ if (property == config->prop_active)
state->active = val;
- } else if (crtc->funcs->atomic_set_property)
+ else if (crtc->funcs->atomic_set_property)
return crtc->funcs->atomic_set_property(crtc, state, property, val);
- return -EINVAL;
+ else
+ return -EINVAL;
+
+ return 0;
}
EXPORT_SYMBOL(drm_atomic_crtc_set_property);
@@ -266,9 +275,17 @@ int drm_atomic_crtc_get_property(struct drm_crtc *crtc,
const struct drm_crtc_state *state,
struct drm_property *property, uint64_t *val)
{
- if (crtc->funcs->atomic_get_property)
+ struct drm_device *dev = crtc->dev;
+ struct drm_mode_config *config = &dev->mode_config;
+
+ if (property == config->prop_active)
+ *val = state->active;
+ else if (crtc->funcs->atomic_get_property)
return crtc->funcs->atomic_get_property(crtc, state, property, val);
- return -EINVAL;
+ else
+ return -EINVAL;
+
+ return 0;
}
/**
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index d9ed9a54fd1e..41c38edade74 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -587,7 +587,8 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
old_crtc_state = old_state->crtc_states[drm_crtc_index(old_conn_state->crtc)];
- if (!old_crtc_state->active)
+ if (!old_crtc_state->active ||
+ !needs_modeset(old_conn_state->crtc->state))
continue;
encoder = old_conn_state->best_encoder;
@@ -847,7 +848,8 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
if (!connector || !connector->state->best_encoder)
continue;
- if (!connector->state->crtc->state->active)
+ if (!connector->state->crtc->state->active ||
+ !needs_modeset(connector->state->crtc->state))
continue;
encoder = connector->state->best_encoder;
@@ -2067,6 +2069,26 @@ void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc)
EXPORT_SYMBOL(drm_atomic_helper_crtc_reset);
/**
+ * __drm_atomic_helper_crtc_duplicate_state - copy atomic CRTC state
+ * @crtc: CRTC object
+ * @state: atomic CRTC state
+ *
+ * Copies atomic state from a CRTC's current state and resets inferred values.
+ * This is useful for drivers that subclass the CRTC state.
+ */
+void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ memcpy(state, crtc->state, sizeof(*state));
+
+ state->mode_changed = false;
+ state->active_changed = false;
+ state->planes_changed = false;
+ state->event = NULL;
+}
+EXPORT_SYMBOL(__drm_atomic_helper_crtc_duplicate_state);
+
+/**
* drm_atomic_helper_crtc_duplicate_state - default state duplicate hook
* @crtc: drm CRTC
*
@@ -2081,20 +2103,35 @@ drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc)
if (WARN_ON(!crtc->state))
return NULL;
- state = kmemdup(crtc->state, sizeof(*crtc->state), GFP_KERNEL);
-
- if (state) {
- state->mode_changed = false;
- state->active_changed = false;
- state->planes_changed = false;
- state->event = NULL;
- }
+ state = kmalloc(sizeof(*state), GFP_KERNEL);
+ if (state)
+ __drm_atomic_helper_crtc_duplicate_state(crtc, state);
return state;
}
EXPORT_SYMBOL(drm_atomic_helper_crtc_duplicate_state);
/**
+ * __drm_atomic_helper_crtc_destroy_state - release CRTC state
+ * @crtc: CRTC object
+ * @state: CRTC state object to release
+ *
+ * Releases all resources stored in the CRTC state without actually freeing
+ * the memory of the CRTC state. This is useful for drivers that subclass the
+ * CRTC state.
+ */
+void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ /*
+ * This is currently a placeholder so that drivers that subclass the
+ * state will automatically do the right thing if code is ever added
+ * to this function.
+ */
+}
+EXPORT_SYMBOL(__drm_atomic_helper_crtc_destroy_state);
+
+/**
* drm_atomic_helper_crtc_destroy_state - default state destroy hook
* @crtc: drm CRTC
* @state: CRTC state object to release
@@ -2105,6 +2142,7 @@ EXPORT_SYMBOL(drm_atomic_helper_crtc_duplicate_state);
void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
+ __drm_atomic_helper_crtc_destroy_state(crtc, state);
kfree(state);
}
EXPORT_SYMBOL(drm_atomic_helper_crtc_destroy_state);
@@ -2130,6 +2168,24 @@ void drm_atomic_helper_plane_reset(struct drm_plane *plane)
EXPORT_SYMBOL(drm_atomic_helper_plane_reset);
/**
+ * __drm_atomic_helper_plane_duplicate_state - copy atomic plane state
+ * @plane: plane object
+ * @state: atomic plane state
+ *
+ * Copies atomic state from a plane's current state. This is useful for
+ * drivers that subclass the plane state.
+ */
+void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ memcpy(state, plane->state, sizeof(*state));
+
+ if (state->fb)
+ drm_framebuffer_reference(state->fb);
+}
+EXPORT_SYMBOL(__drm_atomic_helper_plane_duplicate_state);
+
+/**
* drm_atomic_helper_plane_duplicate_state - default state duplicate hook
* @plane: drm plane
*
@@ -2144,16 +2200,32 @@ drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane)
if (WARN_ON(!plane->state))
return NULL;
- state = kmemdup(plane->state, sizeof(*plane->state), GFP_KERNEL);
-
- if (state && state->fb)
- drm_framebuffer_reference(state->fb);
+ state = kmalloc(sizeof(*state), GFP_KERNEL);
+ if (state)
+ __drm_atomic_helper_plane_duplicate_state(plane, state);
return state;
}
EXPORT_SYMBOL(drm_atomic_helper_plane_duplicate_state);
/**
+ * __drm_atomic_helper_plane_destroy_state - release plane state
+ * @plane: plane object
+ * @state: plane state object to release
+ *
+ * Releases all resources stored in the plane state without actually freeing
+ * the memory of the plane state. This is useful for drivers that subclass the
+ * plane state.
+ */
+void __drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ if (state->fb)
+ drm_framebuffer_unreference(state->fb);
+}
+EXPORT_SYMBOL(__drm_atomic_helper_plane_destroy_state);
+
+/**
* drm_atomic_helper_plane_destroy_state - default state destroy hook
* @plane: drm plane
* @state: plane state object to release
@@ -2164,9 +2236,7 @@ EXPORT_SYMBOL(drm_atomic_helper_plane_duplicate_state);
void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
- if (state->fb)
- drm_framebuffer_unreference(state->fb);
-
+ __drm_atomic_helper_plane_destroy_state(plane, state);
kfree(state);
}
EXPORT_SYMBOL(drm_atomic_helper_plane_destroy_state);
@@ -2190,6 +2260,22 @@ void drm_atomic_helper_connector_reset(struct drm_connector *connector)
EXPORT_SYMBOL(drm_atomic_helper_connector_reset);
/**
+ * __drm_atomic_helper_connector_duplicate_state - copy atomic connector state
+ * @connector: connector object
+ * @state: atomic connector state
+ *
+ * Copies atomic state from a connector's current state. This is useful for
+ * drivers that subclass the connector state.
+ */
+void
+__drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector,
+ struct drm_connector_state *state)
+{
+ memcpy(state, connector->state, sizeof(*state));
+}
+EXPORT_SYMBOL(__drm_atomic_helper_connector_duplicate_state);
+
+/**
* drm_atomic_helper_connector_duplicate_state - default state duplicate hook
* @connector: drm connector
*
@@ -2199,14 +2285,41 @@ EXPORT_SYMBOL(drm_atomic_helper_connector_reset);
struct drm_connector_state *
drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector)
{
+ struct drm_connector_state *state;
+
if (WARN_ON(!connector->state))
return NULL;
- return kmemdup(connector->state, sizeof(*connector->state), GFP_KERNEL);
+ state = kmalloc(sizeof(*state), GFP_KERNEL);
+ if (state)
+ __drm_atomic_helper_connector_duplicate_state(connector, state);
+
+ return state;
}
EXPORT_SYMBOL(drm_atomic_helper_connector_duplicate_state);
/**
+ * __drm_atomic_helper_connector_destroy_state - release connector state
+ * @connector: connector object
+ * @state: connector state object to release
+ *
+ * Releases all resources stored in the connector state without actually
+ * freeing the memory of the connector state. This is useful for drivers that
+ * subclass the connector state.
+ */
+void
+__drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
+ struct drm_connector_state *state)
+{
+ /*
+ * This is currently a placeholder so that drivers that subclass the
+ * state will automatically do the right thing if code is ever added
+ * to this function.
+ */
+}
+EXPORT_SYMBOL(__drm_atomic_helper_connector_destroy_state);
+
+/**
* drm_atomic_helper_connector_destroy_state - default state destroy hook
* @connector: drm connector
* @state: connector state object to release
@@ -2217,6 +2330,7 @@ EXPORT_SYMBOL(drm_atomic_helper_connector_duplicate_state);
void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
struct drm_connector_state *state)
{
+ __drm_atomic_helper_connector_destroy_state(connector, state);
kfree(state);
}
EXPORT_SYMBOL(drm_atomic_helper_connector_destroy_state);
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index d576a4dea64f..b3989e23195e 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -5599,6 +5599,7 @@ struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev,
mutex_unlock(&dev->mode_config.idr_mutex);
return NULL;
}
+EXPORT_SYMBOL(drm_mode_get_tile_group);
/**
* drm_mode_create_tile_group - create a tile group from a displayid description
@@ -5637,3 +5638,4 @@ struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
mutex_unlock(&dev->mode_config.idr_mutex);
return tg;
}
+EXPORT_SYMBOL(drm_mode_create_tile_group);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 3053aab968f9..dd895c409ca3 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -270,7 +270,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
- struct drm_display_mode *adjusted_mode, saved_mode;
+ struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode;
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
struct drm_encoder_helper_funcs *encoder_funcs;
int saved_x, saved_y;
@@ -292,6 +292,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
}
saved_mode = crtc->mode;
+ saved_hwmode = crtc->hwmode;
saved_x = crtc->x;
saved_y = crtc->y;
@@ -334,6 +335,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
}
DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
+ crtc->hwmode = *adjusted_mode;
+
/* Prepare the encoders and CRTCs before setting the mode. */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
@@ -396,9 +399,6 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
encoder->bridge->funcs->enable(encoder->bridge);
}
- /* Store real post-adjustment hardware mode. */
- crtc->hwmode = *adjusted_mode;
-
/* Calculate and store various constants which
* are later needed by vblank and swap-completion
* timestamping. They are derived from true hwmode.
@@ -411,6 +411,7 @@ done:
if (!ret) {
crtc->enabled = saved_enabled;
crtc->mode = saved_mode;
+ crtc->hwmode = saved_hwmode;
crtc->x = saved_x;
crtc->y = saved_y;
}
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index d5368ea56a0f..71dcbc64ae98 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -462,7 +462,7 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
break;
case DP_AUX_NATIVE_REPLY_NACK:
- DRM_DEBUG_KMS("native nack\n");
+ DRM_DEBUG_KMS("native nack (result=%d, size=%zu)\n", ret, msg->size);
return -EREMOTEIO;
case DP_AUX_NATIVE_REPLY_DEFER:
@@ -493,7 +493,7 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
return ret;
case DP_AUX_I2C_REPLY_NACK:
- DRM_DEBUG_KMS("I2C nack\n");
+ DRM_DEBUG_KMS("I2C nack (result=%d, size=%zu\n", ret, msg->size);
aux->i2c_nack_count++;
return -EREMOTEIO;
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index 732cb6f8e653..4c0aa97aaf03 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -287,6 +287,7 @@ int drm_load_edid_firmware(struct drm_connector *connector)
drm_mode_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
+ drm_edid_to_eld(connector, edid);
kfree(edid);
return ret;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 1a20db7c971f..309b9476fe96 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -1283,12 +1283,12 @@ struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *f
int width, int height)
{
struct drm_cmdline_mode *cmdline_mode;
- struct drm_display_mode *mode = NULL;
+ struct drm_display_mode *mode;
bool prefer_non_interlace;
cmdline_mode = &fb_helper_conn->connector->cmdline_mode;
if (cmdline_mode->specified == false)
- return mode;
+ return NULL;
/* attempt to find a matching mode in the list of modes
* we have gotten so far, if not add a CVT mode that conforms
@@ -1297,7 +1297,7 @@ struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *f
goto create_mode;
prefer_non_interlace = !cmdline_mode->interlace;
- again:
+again:
list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) {
/* check width/height */
if (mode->hdisplay != cmdline_mode->xres ||
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index a6d773a61c2d..266dcd6cdf3b 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -524,8 +524,13 @@ static int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
return 0;
}
-#define DRM_IOCTL_DEF(ioctl, _func, _flags) \
- [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl}
+#define DRM_IOCTL_DEF(ioctl, _func, _flags) \
+ [DRM_IOCTL_NR(ioctl)] = { \
+ .cmd = ioctl, \
+ .func = _func, \
+ .flags = _flags, \
+ .name = #ioctl \
+ }
/** Ioctl table */
static const struct drm_ioctl_desc drm_ioctls[] = {
@@ -663,39 +668,29 @@ long drm_ioctl(struct file *filp,
int retcode = -EINVAL;
char stack_kdata[128];
char *kdata = NULL;
- unsigned int usize, asize;
+ unsigned int usize, asize, drv_size;
dev = file_priv->minor->dev;
if (drm_device_is_unplugged(dev))
return -ENODEV;
- if ((nr >= DRM_CORE_IOCTL_COUNT) &&
- ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
- goto err_i1;
- if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) &&
- (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
- u32 drv_size;
+ if (nr >= DRM_COMMAND_BASE && nr < DRM_COMMAND_END) {
+ /* driver ioctl */
+ if (nr - DRM_COMMAND_BASE >= dev->driver->num_ioctls)
+ goto err_i1;
ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
- drv_size = _IOC_SIZE(ioctl->cmd_drv);
- usize = asize = _IOC_SIZE(cmd);
- if (drv_size > asize)
- asize = drv_size;
- cmd = ioctl->cmd_drv;
- }
- else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
- u32 drv_size;
-
+ } else {
+ /* core ioctl */
+ if (nr >= DRM_CORE_IOCTL_COUNT)
+ goto err_i1;
ioctl = &drm_ioctls[nr];
+ }
- drv_size = _IOC_SIZE(ioctl->cmd);
- usize = asize = _IOC_SIZE(cmd);
- if (drv_size > asize)
- asize = drv_size;
-
- cmd = ioctl->cmd;
- } else
- goto err_i1;
+ drv_size = _IOC_SIZE(ioctl->cmd);
+ usize = _IOC_SIZE(cmd);
+ asize = max(usize, drv_size);
+ cmd = ioctl->cmd;
DRM_DEBUG("pid=%d, dev=0x%lx, auth=%d, %s\n",
task_pid_nr(current),
@@ -776,12 +771,13 @@ EXPORT_SYMBOL(drm_ioctl);
*/
bool drm_ioctl_flags(unsigned int nr, unsigned int *flags)
{
- if ((nr >= DRM_COMMAND_END && nr < DRM_CORE_IOCTL_COUNT) ||
- (nr < DRM_COMMAND_BASE)) {
- *flags = drm_ioctls[nr].flags;
- return true;
- }
+ if (nr >= DRM_COMMAND_BASE && nr < DRM_COMMAND_END)
+ return false;
+
+ if (nr >= DRM_CORE_IOCTL_COUNT)
+ return false;
- return false;
+ *flags = drm_ioctls[nr].flags;
+ return true;
}
EXPORT_SYMBOL(drm_ioctl_flags);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 2cca85f23138..213b11ea69b5 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -903,6 +903,12 @@ EXPORT_SYMBOL(drm_mode_duplicate);
*/
bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2)
{
+ if (!mode1 && !mode2)
+ return true;
+
+ if (!mode1 || !mode2)
+ return false;
+
/* do clock check convert to PICOS so fb modes get matched
* the same */
if (mode1->clock && mode2->clock) {
@@ -1148,7 +1154,7 @@ EXPORT_SYMBOL(drm_mode_sort);
/**
* drm_mode_connector_list_update - update the mode list for the connector
* @connector: the connector to update
- * @merge_type_bits: whether to merge or overright type bits.
+ * @merge_type_bits: whether to merge or overwrite type bits
*
* This moves the modes from the @connector probed_modes list
* to the actual mode list. It compares the probed mode against the current
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
index 16150a00c237..aaa130736bf8 100644
--- a/drivers/gpu/drm/drm_of.c
+++ b/drivers/gpu/drm/drm_of.c
@@ -43,14 +43,10 @@ static uint32_t drm_crtc_port_mask(struct drm_device *dev,
uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
struct device_node *port)
{
- struct device_node *remote_port, *ep = NULL;
+ struct device_node *remote_port, *ep;
uint32_t possible_crtcs = 0;
- do {
- ep = of_graph_get_next_endpoint(port, ep);
- if (!ep)
- break;
-
+ for_each_endpoint_of_node(port, ep) {
remote_port = of_graph_get_remote_port(ep);
if (!remote_port) {
of_node_put(ep);
@@ -60,7 +56,7 @@ uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
possible_crtcs |= drm_crtc_port_mask(dev, remote_port);
of_node_put(remote_port);
- } while (1);
+ }
return possible_crtcs;
}
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 6591d48c1b9d..3fee587bc284 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -174,6 +174,7 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
struct edid *edid = (struct edid *) connector->edid_blob_ptr->data;
count = drm_add_edid_modes(connector, edid);
+ drm_edid_to_eld(connector, edid);
} else
count = (*connector_funcs->get_modes)(connector);
}
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index a5e74612100e..0a6780367d28 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -50,7 +50,7 @@ config DRM_EXYNOS_DSI
config DRM_EXYNOS_DP
bool "EXYNOS DRM DP driver support"
- depends on (DRM_EXYNOS_FIMD || DRM_EXYNOS7DECON) && ARCH_EXYNOS && (DRM_PTN3460=n || DRM_PTN3460=y || DRM_PTN3460=DRM_EXYNOS)
+ depends on (DRM_EXYNOS_FIMD || DRM_EXYNOS7_DECON) && ARCH_EXYNOS && (DRM_PTN3460=n || DRM_PTN3460=y || DRM_PTN3460=DRM_EXYNOS)
default DRM_EXYNOS
select DRM_PANEL
help
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 63f02e2380ae..1f7e33f59de6 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -28,6 +28,7 @@
#include <video/exynos7_decon.h>
#include "exynos_drm_crtc.h"
+#include "exynos_drm_plane.h"
#include "exynos_drm_drv.h"
#include "exynos_drm_fbdev.h"
#include "exynos_drm_iommu.h"
@@ -41,32 +42,16 @@
#define WINDOWS_NR 2
-struct decon_win_data {
- unsigned int ovl_x;
- unsigned int ovl_y;
- unsigned int offset_x;
- unsigned int offset_y;
- unsigned int ovl_width;
- unsigned int ovl_height;
- unsigned int fb_width;
- unsigned int fb_height;
- unsigned int bpp;
- unsigned int pixel_format;
- dma_addr_t dma_addr;
- bool enabled;
- bool resume;
-};
-
struct decon_context {
struct device *dev;
struct drm_device *drm_dev;
struct exynos_drm_crtc *crtc;
+ struct exynos_drm_plane planes[WINDOWS_NR];
struct clk *pclk;
struct clk *aclk;
struct clk *eclk;
struct clk *vclk;
void __iomem *regs;
- struct decon_win_data win_data[WINDOWS_NR];
unsigned int default_win;
unsigned long irq_flags;
bool i80_if;
@@ -296,59 +281,16 @@ static void decon_disable_vblank(struct exynos_drm_crtc *crtc)
}
}
-static void decon_win_mode_set(struct exynos_drm_crtc *crtc,
- struct exynos_drm_plane *plane)
-{
- struct decon_context *ctx = crtc->ctx;
- struct decon_win_data *win_data;
- int win, padding;
-
- if (!plane) {
- DRM_ERROR("plane is NULL\n");
- return;
- }
-
- win = plane->zpos;
- if (win == DEFAULT_ZPOS)
- win = ctx->default_win;
-
- if (win < 0 || win >= WINDOWS_NR)
- return;
-
-
- win_data = &ctx->win_data[win];
-
- padding = (plane->pitch / (plane->bpp >> 3)) - plane->fb_width;
- win_data->offset_x = plane->fb_x;
- win_data->offset_y = plane->fb_y;
- win_data->fb_width = plane->fb_width + padding;
- win_data->fb_height = plane->fb_height;
- win_data->ovl_x = plane->crtc_x;
- win_data->ovl_y = plane->crtc_y;
- win_data->ovl_width = plane->crtc_width;
- win_data->ovl_height = plane->crtc_height;
- win_data->dma_addr = plane->dma_addr[0];
- win_data->bpp = plane->bpp;
- win_data->pixel_format = plane->pixel_format;
-
- DRM_DEBUG_KMS("offset_x = %d, offset_y = %d\n",
- win_data->offset_x, win_data->offset_y);
- DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
- win_data->ovl_width, win_data->ovl_height);
- DRM_DEBUG_KMS("paddr = 0x%lx\n", (unsigned long)win_data->dma_addr);
- DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
- plane->fb_width, plane->crtc_width);
-}
-
static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win)
{
- struct decon_win_data *win_data = &ctx->win_data[win];
+ struct exynos_drm_plane *plane = &ctx->planes[win];
unsigned long val;
+ int padding;
val = readl(ctx->regs + WINCON(win));
val &= ~WINCONx_BPPMODE_MASK;
- switch (win_data->pixel_format) {
+ switch (plane->pixel_format) {
case DRM_FORMAT_RGB565:
val |= WINCONx_BPPMODE_16BPP_565;
val |= WINCONx_BURSTLEN_16WORD;
@@ -397,7 +339,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win)
break;
}
- DRM_DEBUG_KMS("bpp = %d\n", win_data->bpp);
+ DRM_DEBUG_KMS("bpp = %d\n", plane->bpp);
/*
* In case of exynos, setting dma-burst to 16Word causes permanent
@@ -407,7 +349,8 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win)
* movement causes unstable DMA which results into iommu crash/tear.
*/
- if (win_data->fb_width < MIN_FB_WIDTH_FOR_16WORD_BURST) {
+ padding = (plane->pitch / (plane->bpp >> 3)) - plane->fb_width;
+ if (plane->fb_width + padding < MIN_FB_WIDTH_FOR_16WORD_BURST) {
val &= ~WINCONx_BURSTLEN_MASK;
val |= WINCONx_BURSTLEN_8WORD;
}
@@ -435,7 +378,7 @@ static void decon_win_set_colkey(struct decon_context *ctx, unsigned int win)
* @protect: 1 to protect (disable updates)
*/
static void decon_shadow_protect_win(struct decon_context *ctx,
- int win, bool protect)
+ unsigned int win, bool protect)
{
u32 bits, val;
@@ -449,12 +392,12 @@ static void decon_shadow_protect_win(struct decon_context *ctx,
writel(val, ctx->regs + SHADOWCON);
}
-static void decon_win_commit(struct exynos_drm_crtc *crtc, int zpos)
+static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
{
struct decon_context *ctx = crtc->ctx;
struct drm_display_mode *mode = &crtc->base.mode;
- struct decon_win_data *win_data;
- int win = zpos;
+ struct exynos_drm_plane *plane;
+ int padding;
unsigned long val, alpha;
unsigned int last_x;
unsigned int last_y;
@@ -462,17 +405,14 @@ static void decon_win_commit(struct exynos_drm_crtc *crtc, int zpos)
if (ctx->suspended)
return;
- if (win == DEFAULT_ZPOS)
- win = ctx->default_win;
-
if (win < 0 || win >= WINDOWS_NR)
return;
- win_data = &ctx->win_data[win];
+ plane = &ctx->planes[win];
/* If suspended, enable this on resume */
if (ctx->suspended) {
- win_data->resume = true;
+ plane->resume = true;
return;
}
@@ -490,39 +430,41 @@ static void decon_win_commit(struct exynos_drm_crtc *crtc, int zpos)
decon_shadow_protect_win(ctx, win, true);
/* buffer start address */
- val = (unsigned long)win_data->dma_addr;
+ val = (unsigned long)plane->dma_addr[0];
writel(val, ctx->regs + VIDW_BUF_START(win));
+ padding = (plane->pitch / (plane->bpp >> 3)) - plane->fb_width;
+
/* buffer size */
- writel(win_data->fb_width, ctx->regs + VIDW_WHOLE_X(win));
- writel(win_data->fb_height, ctx->regs + VIDW_WHOLE_Y(win));
+ writel(plane->fb_width + padding, ctx->regs + VIDW_WHOLE_X(win));
+ writel(plane->fb_height, ctx->regs + VIDW_WHOLE_Y(win));
/* offset from the start of the buffer to read */
- writel(win_data->offset_x, ctx->regs + VIDW_OFFSET_X(win));
- writel(win_data->offset_y, ctx->regs + VIDW_OFFSET_Y(win));
+ writel(plane->src_x, ctx->regs + VIDW_OFFSET_X(win));
+ writel(plane->src_y, ctx->regs + VIDW_OFFSET_Y(win));
DRM_DEBUG_KMS("start addr = 0x%lx\n",
- (unsigned long)win_data->dma_addr);
+ (unsigned long)val);
DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
- win_data->ovl_width, win_data->ovl_height);
+ plane->crtc_width, plane->crtc_height);
/*
* OSD position.
* In case the window layout goes of LCD layout, DECON fails.
*/
- if ((win_data->ovl_x + win_data->ovl_width) > mode->hdisplay)
- win_data->ovl_x = mode->hdisplay - win_data->ovl_width;
- if ((win_data->ovl_y + win_data->ovl_height) > mode->vdisplay)
- win_data->ovl_y = mode->vdisplay - win_data->ovl_height;
+ if ((plane->crtc_x + plane->crtc_width) > mode->hdisplay)
+ plane->crtc_x = mode->hdisplay - plane->crtc_width;
+ if ((plane->crtc_y + plane->crtc_height) > mode->vdisplay)
+ plane->crtc_y = mode->vdisplay - plane->crtc_height;
- val = VIDOSDxA_TOPLEFT_X(win_data->ovl_x) |
- VIDOSDxA_TOPLEFT_Y(win_data->ovl_y);
+ val = VIDOSDxA_TOPLEFT_X(plane->crtc_x) |
+ VIDOSDxA_TOPLEFT_Y(plane->crtc_y);
writel(val, ctx->regs + VIDOSD_A(win));
- last_x = win_data->ovl_x + win_data->ovl_width;
+ last_x = plane->crtc_x + plane->crtc_width;
if (last_x)
last_x--;
- last_y = win_data->ovl_y + win_data->ovl_height;
+ last_y = plane->crtc_y + plane->crtc_height;
if (last_y)
last_y--;
@@ -531,7 +473,7 @@ static void decon_win_commit(struct exynos_drm_crtc *crtc, int zpos)
writel(val, ctx->regs + VIDOSD_B(win));
DRM_DEBUG_KMS("osd pos: tx = %d, ty = %d, bx = %d, by = %d\n",
- win_data->ovl_x, win_data->ovl_y, last_x, last_y);
+ plane->crtc_x, plane->crtc_y, last_x, last_y);
/* OSD alpha */
alpha = VIDOSDxC_ALPHA0_R_F(0x0) |
@@ -565,27 +507,23 @@ static void decon_win_commit(struct exynos_drm_crtc *crtc, int zpos)
val |= DECON_UPDATE_STANDALONE_F;
writel(val, ctx->regs + DECON_UPDATE);
- win_data->enabled = true;
+ plane->enabled = true;
}
-static void decon_win_disable(struct exynos_drm_crtc *crtc, int zpos)
+static void decon_win_disable(struct exynos_drm_crtc *crtc, unsigned int win)
{
struct decon_context *ctx = crtc->ctx;
- struct decon_win_data *win_data;
- int win = zpos;
+ struct exynos_drm_plane *plane;
u32 val;
- if (win == DEFAULT_ZPOS)
- win = ctx->default_win;
-
if (win < 0 || win >= WINDOWS_NR)
return;
- win_data = &ctx->win_data[win];
+ plane = &ctx->planes[win];
if (ctx->suspended) {
/* do not resume this window*/
- win_data->resume = false;
+ plane->resume = false;
return;
}
@@ -604,42 +542,42 @@ static void decon_win_disable(struct exynos_drm_crtc *crtc, int zpos)
val |= DECON_UPDATE_STANDALONE_F;
writel(val, ctx->regs + DECON_UPDATE);
- win_data->enabled = false;
+ plane->enabled = false;
}
static void decon_window_suspend(struct decon_context *ctx)
{
- struct decon_win_data *win_data;
+ struct exynos_drm_plane *plane;
int i;
for (i = 0; i < WINDOWS_NR; i++) {
- win_data = &ctx->win_data[i];
- win_data->resume = win_data->enabled;
- if (win_data->enabled)
+ plane = &ctx->planes[i];
+ plane->resume = plane->enabled;
+ if (plane->enabled)
decon_win_disable(ctx->crtc, i);
}
}
static void decon_window_resume(struct decon_context *ctx)
{
- struct decon_win_data *win_data;
+ struct exynos_drm_plane *plane;
int i;
for (i = 0; i < WINDOWS_NR; i++) {
- win_data = &ctx->win_data[i];
- win_data->enabled = win_data->resume;
- win_data->resume = false;
+ plane = &ctx->planes[i];
+ plane->enabled = plane->resume;
+ plane->resume = false;
}
}
static void decon_apply(struct decon_context *ctx)
{
- struct decon_win_data *win_data;
+ struct exynos_drm_plane *plane;
int i;
for (i = 0; i < WINDOWS_NR; i++) {
- win_data = &ctx->win_data[i];
- if (win_data->enabled)
+ plane = &ctx->planes[i];
+ if (plane->enabled)
decon_win_commit(ctx->crtc, i);
else
decon_win_disable(ctx->crtc, i);
@@ -779,7 +717,6 @@ static struct exynos_drm_crtc_ops decon_crtc_ops = {
.enable_vblank = decon_enable_vblank,
.disable_vblank = decon_disable_vblank,
.wait_for_vblank = decon_wait_for_vblank,
- .win_mode_set = decon_win_mode_set,
.win_commit = decon_win_commit,
.win_disable = decon_win_disable,
};
@@ -818,6 +755,9 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
{
struct decon_context *ctx = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
+ struct exynos_drm_plane *exynos_plane;
+ enum drm_plane_type type;
+ unsigned int zpos;
int ret;
ret = decon_ctx_initialize(ctx, drm_dev);
@@ -826,8 +766,18 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
return ret;
}
- ctx->crtc = exynos_drm_crtc_create(drm_dev, ctx->pipe,
- EXYNOS_DISPLAY_TYPE_LCD,
+ for (zpos = 0; zpos < WINDOWS_NR; zpos++) {
+ type = (zpos == ctx->default_win) ? DRM_PLANE_TYPE_PRIMARY :
+ DRM_PLANE_TYPE_OVERLAY;
+ ret = exynos_plane_init(drm_dev, &ctx->planes[zpos],
+ 1 << ctx->pipe, type, zpos);
+ if (ret)
+ return ret;
+ }
+
+ exynos_plane = &ctx->planes[ctx->default_win];
+ ctx->crtc = exynos_drm_crtc_create(drm_dev, &exynos_plane->base,
+ ctx->pipe, EXYNOS_DISPLAY_TYPE_LCD,
&decon_crtc_ops, ctx);
if (IS_ERR(ctx->crtc)) {
decon_ctx_remove(ctx);
@@ -888,8 +838,8 @@ static int decon_probe(struct platform_device *pdev)
of_node_put(i80_if_timings);
ctx->regs = of_iomap(dev->of_node, 0);
- if (IS_ERR(ctx->regs)) {
- ret = PTR_ERR(ctx->regs);
+ if (!ctx->regs) {
+ ret = -ENOMEM;
goto err_del_component;
}
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c
index bf17a60b40ed..1dbfba58f909 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.c
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.c
@@ -32,10 +32,16 @@
#include <drm/bridge/ptn3460.h>
#include "exynos_dp_core.h"
+#include "exynos_drm_fimd.h"
#define ctx_from_connector(c) container_of(c, struct exynos_dp_device, \
connector)
+static inline struct exynos_drm_crtc *dp_to_crtc(struct exynos_dp_device *dp)
+{
+ return to_exynos_crtc(dp->encoder->crtc);
+}
+
static inline struct exynos_dp_device *
display_to_dp(struct exynos_drm_display *d)
{
@@ -1070,6 +1076,8 @@ static void exynos_dp_poweron(struct exynos_dp_device *dp)
}
}
+ fimd_dp_clock_enable(dp_to_crtc(dp), true);
+
clk_prepare_enable(dp->clock);
exynos_dp_phy_init(dp);
exynos_dp_init_dp(dp);
@@ -1094,6 +1102,8 @@ static void exynos_dp_poweroff(struct exynos_dp_device *dp)
exynos_dp_phy_exit(dp);
clk_disable_unprepare(dp->clock);
+ fimd_dp_clock_enable(dp_to_crtc(dp), false);
+
if (dp->panel) {
if (drm_panel_unprepare(dp->panel))
DRM_ERROR("failed to turnoff the panel\n");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c
deleted file mode 100644
index ba9b3d5ed672..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.c
+++ /dev/null
@@ -1,245 +0,0 @@
-/*
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
- * Authors:
- * Inki Dae <inki.dae@samsung.com>
- * Joonyoung Shim <jy0922.shim@samsung.com>
- * Seung-Woo Kim <sw0312.kim@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
-
-#include <drm/exynos_drm.h>
-#include "exynos_drm_drv.h"
-#include "exynos_drm_encoder.h"
-#include "exynos_drm_connector.h"
-
-#define to_exynos_connector(x) container_of(x, struct exynos_drm_connector,\
- drm_connector)
-
-struct exynos_drm_connector {
- struct drm_connector drm_connector;
- uint32_t encoder_id;
- struct exynos_drm_display *display;
-};
-
-static int exynos_drm_connector_get_modes(struct drm_connector *connector)
-{
- struct exynos_drm_connector *exynos_connector =
- to_exynos_connector(connector);
- struct exynos_drm_display *display = exynos_connector->display;
- struct edid *edid = NULL;
- unsigned int count = 0;
- int ret;
-
- /*
- * if get_edid() exists then get_edid() callback of hdmi side
- * is called to get edid data through i2c interface else
- * get timing from the FIMD driver(display controller).
- *
- * P.S. in case of lcd panel, count is always 1 if success
- * because lcd panel has only one mode.
- */
- if (display->ops->get_edid) {
- edid = display->ops->get_edid(display, connector);
- if (IS_ERR_OR_NULL(edid)) {
- ret = PTR_ERR(edid);
- edid = NULL;
- DRM_ERROR("Panel operation get_edid failed %d\n", ret);
- goto out;
- }
-
- count = drm_add_edid_modes(connector, edid);
- if (!count) {
- DRM_ERROR("Add edid modes failed %d\n", count);
- goto out;
- }
-
- drm_mode_connector_update_edid_property(connector, edid);
- } else {
- struct exynos_drm_panel_info *panel;
- struct drm_display_mode *mode = drm_mode_create(connector->dev);
- if (!mode) {
- DRM_ERROR("failed to create a new display mode.\n");
- return 0;
- }
-
- if (display->ops->get_panel)
- panel = display->ops->get_panel(display);
- else {
- drm_mode_destroy(connector->dev, mode);
- return 0;
- }
-
- drm_display_mode_from_videomode(&panel->vm, mode);
- mode->width_mm = panel->width_mm;
- mode->height_mm = panel->height_mm;
- connector->display_info.width_mm = mode->width_mm;
- connector->display_info.height_mm = mode->height_mm;
-
- mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
- drm_mode_set_name(mode);
- drm_mode_probed_add(connector, mode);
-
- count = 1;
- }
-
-out:
- kfree(edid);
- return count;
-}
-
-static int exynos_drm_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- struct exynos_drm_connector *exynos_connector =
- to_exynos_connector(connector);
- struct exynos_drm_display *display = exynos_connector->display;
- int ret = MODE_BAD;
-
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
- if (display->ops->check_mode)
- if (!display->ops->check_mode(display, mode))
- ret = MODE_OK;
-
- return ret;
-}
-
-static struct drm_encoder *exynos_drm_best_encoder(
- struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct exynos_drm_connector *exynos_connector =
- to_exynos_connector(connector);
- return drm_encoder_find(dev, exynos_connector->encoder_id);
-}
-
-static struct drm_connector_helper_funcs exynos_connector_helper_funcs = {
- .get_modes = exynos_drm_connector_get_modes,
- .mode_valid = exynos_drm_connector_mode_valid,
- .best_encoder = exynos_drm_best_encoder,
-};
-
-static int exynos_drm_connector_fill_modes(struct drm_connector *connector,
- unsigned int max_width, unsigned int max_height)
-{
- struct exynos_drm_connector *exynos_connector =
- to_exynos_connector(connector);
- struct exynos_drm_display *display = exynos_connector->display;
- unsigned int width, height;
-
- width = max_width;
- height = max_height;
-
- /*
- * if specific driver want to find desired_mode using maxmum
- * resolution then get max width and height from that driver.
- */
- if (display->ops->get_max_resol)
- display->ops->get_max_resol(display, &width, &height);
-
- return drm_helper_probe_single_connector_modes(connector, width,
- height);
-}
-
-/* get detection status of display device. */
-static enum drm_connector_status
-exynos_drm_connector_detect(struct drm_connector *connector, bool force)
-{
- struct exynos_drm_connector *exynos_connector =
- to_exynos_connector(connector);
- struct exynos_drm_display *display = exynos_connector->display;
- enum drm_connector_status status = connector_status_disconnected;
-
- if (display->ops->is_connected) {
- if (display->ops->is_connected(display))
- status = connector_status_connected;
- else
- status = connector_status_disconnected;
- }
-
- return status;
-}
-
-static void exynos_drm_connector_destroy(struct drm_connector *connector)
-{
- struct exynos_drm_connector *exynos_connector =
- to_exynos_connector(connector);
-
- drm_connector_unregister(connector);
- drm_connector_cleanup(connector);
- kfree(exynos_connector);
-}
-
-static struct drm_connector_funcs exynos_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
- .fill_modes = exynos_drm_connector_fill_modes,
- .detect = exynos_drm_connector_detect,
- .destroy = exynos_drm_connector_destroy,
-};
-
-struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
- struct drm_encoder *encoder)
-{
- struct exynos_drm_connector *exynos_connector;
- struct exynos_drm_display *display = exynos_drm_get_display(encoder);
- struct drm_connector *connector;
- int type;
- int err;
-
- exynos_connector = kzalloc(sizeof(*exynos_connector), GFP_KERNEL);
- if (!exynos_connector)
- return NULL;
-
- connector = &exynos_connector->drm_connector;
-
- switch (display->type) {
- case EXYNOS_DISPLAY_TYPE_HDMI:
- type = DRM_MODE_CONNECTOR_HDMIA;
- connector->interlace_allowed = true;
- connector->polled = DRM_CONNECTOR_POLL_HPD;
- break;
- case EXYNOS_DISPLAY_TYPE_VIDI:
- type = DRM_MODE_CONNECTOR_VIRTUAL;
- connector->polled = DRM_CONNECTOR_POLL_HPD;
- break;
- default:
- type = DRM_MODE_CONNECTOR_Unknown;
- break;
- }
-
- drm_connector_init(dev, connector, &exynos_connector_funcs, type);
- drm_connector_helper_add(connector, &exynos_connector_helper_funcs);
-
- err = drm_connector_register(connector);
- if (err)
- goto err_connector;
-
- exynos_connector->encoder_id = encoder->base.id;
- exynos_connector->display = display;
- connector->dpms = DRM_MODE_DPMS_OFF;
- connector->encoder = encoder;
-
- err = drm_mode_connector_attach_encoder(connector, encoder);
- if (err) {
- DRM_ERROR("failed to attach a connector to a encoder\n");
- goto err_sysfs;
- }
-
- DRM_DEBUG_KMS("connector has been created\n");
-
- return connector;
-
-err_sysfs:
- drm_connector_unregister(connector);
-err_connector:
- drm_connector_cleanup(connector);
- kfree(exynos_connector);
- return NULL;
-}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.h b/drivers/gpu/drm/exynos/exynos_drm_connector.h
deleted file mode 100644
index 4eb20d78379a..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
- * Authors:
- * Inki Dae <inki.dae@samsung.com>
- * Joonyoung Shim <jy0922.shim@samsung.com>
- * Seung-Woo Kim <sw0312.kim@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#ifndef _EXYNOS_DRM_CONNECTOR_H_
-#define _EXYNOS_DRM_CONNECTOR_H_
-
-struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
- struct drm_encoder *encoder);
-
-#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 48ccab7fdf63..eb49195cec5c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -34,9 +34,8 @@ static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
if (mode > DRM_MODE_DPMS_ON) {
/* wait for the completion of page flip. */
if (!wait_event_timeout(exynos_crtc->pending_flip_queue,
- !atomic_read(&exynos_crtc->pending_flip),
- HZ/20))
- atomic_set(&exynos_crtc->pending_flip, 0);
+ (exynos_crtc->event == NULL), HZ/20))
+ exynos_crtc->event = NULL;
drm_crtc_vblank_off(crtc);
}
@@ -164,11 +163,10 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
uint32_t page_flip_flags)
{
struct drm_device *dev = crtc->dev;
- struct exynos_drm_private *dev_priv = dev->dev_private;
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
struct drm_framebuffer *old_fb = crtc->primary->fb;
unsigned int crtc_w, crtc_h;
- int ret = -EINVAL;
+ int ret;
/* when the page flip is requested, crtc's dpms should be on */
if (exynos_crtc->dpms > DRM_MODE_DPMS_ON) {
@@ -176,48 +174,49 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
return -EINVAL;
}
- mutex_lock(&dev->struct_mutex);
+ if (!event)
+ return -EINVAL;
- if (event) {
- /*
- * the pipe from user always is 0 so we can set pipe number
- * of current owner to event.
- */
- event->pipe = exynos_crtc->pipe;
+ spin_lock_irq(&dev->event_lock);
+ if (exynos_crtc->event) {
+ ret = -EBUSY;
+ goto out;
+ }
- ret = drm_vblank_get(dev, exynos_crtc->pipe);
- if (ret) {
- DRM_DEBUG("failed to acquire vblank counter\n");
+ ret = drm_vblank_get(dev, exynos_crtc->pipe);
+ if (ret) {
+ DRM_DEBUG("failed to acquire vblank counter\n");
+ goto out;
+ }
- goto out;
- }
+ exynos_crtc->event = event;
+ spin_unlock_irq(&dev->event_lock);
+ /*
+ * the pipe from user always is 0 so we can set pipe number
+ * of current owner to event.
+ */
+ event->pipe = exynos_crtc->pipe;
+
+ crtc->primary->fb = fb;
+ crtc_w = fb->width - crtc->x;
+ crtc_h = fb->height - crtc->y;
+ ret = exynos_update_plane(crtc->primary, crtc, fb, 0, 0,
+ crtc_w, crtc_h, crtc->x, crtc->y,
+ crtc_w, crtc_h);
+ if (ret) {
+ crtc->primary->fb = old_fb;
spin_lock_irq(&dev->event_lock);
- list_add_tail(&event->base.link,
- &dev_priv->pageflip_event_list);
- atomic_set(&exynos_crtc->pending_flip, 1);
+ exynos_crtc->event = NULL;
+ drm_vblank_put(dev, exynos_crtc->pipe);
spin_unlock_irq(&dev->event_lock);
-
- crtc->primary->fb = fb;
- crtc_w = fb->width - crtc->x;
- crtc_h = fb->height - crtc->y;
- ret = exynos_update_plane(crtc->primary, crtc, fb, 0, 0,
- crtc_w, crtc_h, crtc->x, crtc->y,
- crtc_w, crtc_h);
- if (ret) {
- crtc->primary->fb = old_fb;
-
- spin_lock_irq(&dev->event_lock);
- drm_vblank_put(dev, exynos_crtc->pipe);
- list_del(&event->base.link);
- atomic_set(&exynos_crtc->pending_flip, 0);
- spin_unlock_irq(&dev->event_lock);
-
- goto out;
- }
+ return ret;
}
+
+ return 0;
+
out:
- mutex_unlock(&dev->struct_mutex);
+ spin_unlock_irq(&dev->event_lock);
return ret;
}
@@ -239,13 +238,13 @@ static struct drm_crtc_funcs exynos_crtc_funcs = {
};
struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
+ struct drm_plane *plane,
int pipe,
enum exynos_drm_output_type type,
struct exynos_drm_crtc_ops *ops,
void *ctx)
{
struct exynos_drm_crtc *exynos_crtc;
- struct drm_plane *plane;
struct exynos_drm_private *private = drm_dev->dev_private;
struct drm_crtc *crtc;
int ret;
@@ -255,19 +254,12 @@ struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
return ERR_PTR(-ENOMEM);
init_waitqueue_head(&exynos_crtc->pending_flip_queue);
- atomic_set(&exynos_crtc->pending_flip, 0);
exynos_crtc->dpms = DRM_MODE_DPMS_OFF;
exynos_crtc->pipe = pipe;
exynos_crtc->type = type;
exynos_crtc->ops = ops;
exynos_crtc->ctx = ctx;
- plane = exynos_plane_init(drm_dev, 1 << pipe,
- DRM_PLANE_TYPE_PRIMARY);
- if (IS_ERR(plane)) {
- ret = PTR_ERR(plane);
- goto err_plane;
- }
crtc = &exynos_crtc->base;
@@ -284,7 +276,6 @@ struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
err_crtc:
plane->funcs->destroy(plane);
-err_plane:
kfree(exynos_crtc);
return ERR_PTR(ret);
}
@@ -320,26 +311,20 @@ void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int pipe)
void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int pipe)
{
struct exynos_drm_private *dev_priv = dev->dev_private;
- struct drm_pending_vblank_event *e, *t;
struct drm_crtc *drm_crtc = dev_priv->crtc[pipe];
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(drm_crtc);
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
+ if (exynos_crtc->event) {
- list_for_each_entry_safe(e, t, &dev_priv->pageflip_event_list,
- base.link) {
- /* if event's pipe isn't same as crtc then ignore it. */
- if (pipe != e->pipe)
- continue;
-
- list_del(&e->base.link);
- drm_send_vblank_event(dev, -1, e);
+ drm_send_vblank_event(dev, -1, exynos_crtc->event);
drm_vblank_put(dev, pipe);
- atomic_set(&exynos_crtc->pending_flip, 0);
wake_up(&exynos_crtc->pending_flip_queue);
+
}
+ exynos_crtc->event = NULL;
spin_unlock_irqrestore(&dev->event_lock, flags);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
index 6258b800aab8..0ecd8fc45cff 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -18,6 +18,7 @@
#include "exynos_drm_drv.h"
struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
+ struct drm_plane *plane,
int pipe,
enum exynos_drm_output_type type,
struct exynos_drm_crtc_ops *ops,
@@ -27,12 +28,6 @@ void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int pipe);
void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int pipe);
void exynos_drm_crtc_complete_scanout(struct drm_framebuffer *fb);
-void exynos_drm_crtc_plane_mode_set(struct drm_crtc *crtc,
- struct exynos_drm_plane *plane);
-void exynos_drm_crtc_plane_commit(struct drm_crtc *crtc, int zpos);
-void exynos_drm_crtc_plane_enable(struct drm_crtc *crtc, int zpos);
-void exynos_drm_crtc_plane_disable(struct drm_crtc *crtc, int zpos);
-
/* This function gets pipe value to crtc device matched with out_type. */
int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev,
unsigned int out_type);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 90168d7cf66a..8ac465208eae 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -55,13 +55,11 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
{
struct exynos_drm_private *private;
int ret;
- int nr;
private = kzalloc(sizeof(struct exynos_drm_private), GFP_KERNEL);
if (!private)
return -ENOMEM;
- INIT_LIST_HEAD(&private->pageflip_event_list);
dev_set_drvdata(dev->dev, dev);
dev->dev_private = (void *)private;
@@ -81,19 +79,6 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
exynos_drm_mode_config_init(dev);
- for (nr = 0; nr < MAX_PLANE; nr++) {
- struct drm_plane *plane;
- unsigned long possible_crtcs = (1 << MAX_CRTC) - 1;
-
- plane = exynos_plane_init(dev, possible_crtcs,
- DRM_PLANE_TYPE_OVERLAY);
- if (!IS_ERR(plane))
- continue;
-
- ret = PTR_ERR(plane);
- goto err_mode_config_cleanup;
- }
-
/* setup possible_clones. */
exynos_drm_encoder_setup(dev);
@@ -237,25 +222,13 @@ static void exynos_drm_preclose(struct drm_device *dev,
static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
{
- struct exynos_drm_private *private = dev->dev_private;
- struct drm_pending_vblank_event *v, *vt;
struct drm_pending_event *e, *et;
unsigned long flags;
if (!file->driver_priv)
return;
- /* Release all events not unhandled by page flip handler. */
spin_lock_irqsave(&dev->event_lock, flags);
- list_for_each_entry_safe(v, vt, &private->pageflip_event_list,
- base.link) {
- if (v->base.file_priv == file) {
- list_del(&v->base.link);
- drm_vblank_put(dev, v->pipe);
- v->base.destroy(&v->base);
- }
- }
-
/* Release all events handled by page flip handler but not freed. */
list_for_each_entry_safe(e, et, &file->event_list, link) {
list_del(&e->link);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index 9afd390d4674..e12ecb5d5d9a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -21,7 +21,6 @@
#define MAX_CRTC 3
#define MAX_PLANE 5
#define MAX_FB_BUFFER 4
-#define DEFAULT_ZPOS -1
#define to_exynos_crtc(x) container_of(x, struct exynos_drm_crtc, base)
#define to_exynos_plane(x) container_of(x, struct exynos_drm_plane, base)
@@ -48,20 +47,22 @@ enum exynos_drm_output_type {
* Exynos drm common overlay structure.
*
* @base: plane object
- * @fb_x: offset x on a framebuffer to be displayed.
+ * @src_x: offset x on a framebuffer to be displayed.
* - the unit is screen coordinates.
- * @fb_y: offset y on a framebuffer to be displayed.
+ * @src_y: offset y on a framebuffer to be displayed.
* - the unit is screen coordinates.
- * @fb_width: width of a framebuffer.
- * @fb_height: height of a framebuffer.
* @src_width: width of a partial image to be displayed from framebuffer.
* @src_height: height of a partial image to be displayed from framebuffer.
+ * @fb_width: width of a framebuffer.
+ * @fb_height: height of a framebuffer.
* @crtc_x: offset x on hardware screen.
* @crtc_y: offset y on hardware screen.
* @crtc_width: window width to be displayed (hardware screen).
* @crtc_height: window height to be displayed (hardware screen).
* @mode_width: width of screen mode.
* @mode_height: height of screen mode.
+ * @h_ratio: horizontal scaling ratio, 16.16 fixed point
+ * @v_ratio: vertical scaling ratio, 16.16 fixed point
* @refresh: refresh rate.
* @scan_flag: interlace or progressive way.
* (it could be DRM_MODE_FLAG_*)
@@ -78,6 +79,7 @@ enum exynos_drm_output_type {
* @transparency: transparency on or off.
* @activated: activated or not.
* @enabled: enabled or not.
+ * @resume: to resume or not.
*
* this structure is common to exynos SoC and its contents would be copied
* to hardware specific overlay info.
@@ -85,25 +87,27 @@ enum exynos_drm_output_type {
struct exynos_drm_plane {
struct drm_plane base;
- unsigned int fb_x;
- unsigned int fb_y;
- unsigned int fb_width;
- unsigned int fb_height;
+ unsigned int src_x;
+ unsigned int src_y;
unsigned int src_width;
unsigned int src_height;
+ unsigned int fb_width;
+ unsigned int fb_height;
unsigned int crtc_x;
unsigned int crtc_y;
unsigned int crtc_width;
unsigned int crtc_height;
unsigned int mode_width;
unsigned int mode_height;
+ unsigned int h_ratio;
+ unsigned int v_ratio;
unsigned int refresh;
unsigned int scan_flag;
unsigned int bpp;
unsigned int pitch;
uint32_t pixel_format;
dma_addr_t dma_addr[MAX_FB_BUFFER];
- int zpos;
+ unsigned int zpos;
unsigned int index_color;
bool default_win:1;
@@ -112,6 +116,7 @@ struct exynos_drm_plane {
bool transparency:1;
bool activated:1;
bool enabled:1;
+ bool resume:1;
};
/*
@@ -172,9 +177,7 @@ struct exynos_drm_display {
* @disable_vblank: specific driver callback for disabling vblank interrupt.
* @wait_for_vblank: wait for vblank interrupt to make sure that
* hardware overlay is updated.
- * @win_mode_set: copy drm overlay info to hw specific overlay info.
* @win_commit: apply hardware specific overlay data to registers.
- * @win_enable: enable hardware specific overlay.
* @win_disable: disable hardware specific overlay.
* @te_handler: trigger to transfer video image at the tearing effect
* synchronization signal if there is a page flip request.
@@ -189,11 +192,8 @@ struct exynos_drm_crtc_ops {
int (*enable_vblank)(struct exynos_drm_crtc *crtc);
void (*disable_vblank)(struct exynos_drm_crtc *crtc);
void (*wait_for_vblank)(struct exynos_drm_crtc *crtc);
- void (*win_mode_set)(struct exynos_drm_crtc *crtc,
- struct exynos_drm_plane *plane);
- void (*win_commit)(struct exynos_drm_crtc *crtc, int zpos);
- void (*win_enable)(struct exynos_drm_crtc *crtc, int zpos);
- void (*win_disable)(struct exynos_drm_crtc *crtc, int zpos);
+ void (*win_commit)(struct exynos_drm_crtc *crtc, unsigned int zpos);
+ void (*win_disable)(struct exynos_drm_crtc *crtc, unsigned int zpos);
void (*te_handler)(struct exynos_drm_crtc *crtc);
};
@@ -210,6 +210,7 @@ struct exynos_drm_crtc_ops {
* we can refer to the crtc to current hardware interrupt occurred through
* this pipe value.
* @dpms: store the crtc dpms value
+ * @event: vblank event that is currently queued for flip
* @ops: pointer to callbacks for exynos drm specific functionality
* @ctx: A pointer to the crtc's implementation specific context
*/
@@ -219,7 +220,7 @@ struct exynos_drm_crtc {
unsigned int pipe;
unsigned int dpms;
wait_queue_head_t pending_flip_queue;
- atomic_t pending_flip;
+ struct drm_pending_vblank_event *event;
struct exynos_drm_crtc_ops *ops;
void *ctx;
};
@@ -249,9 +250,6 @@ struct drm_exynos_file_private {
struct exynos_drm_private {
struct drm_fb_helper *fb_helper;
- /* list head for new event to be added. */
- struct list_head pageflip_event_list;
-
/*
* created crtc object would be contained at this array and
* this array is used to be aware of which crtc did it request vblank.
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 05fe93dc57a8..04927153bf38 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1473,12 +1473,6 @@ static int exynos_dsi_get_modes(struct drm_connector *connector)
return 0;
}
-static int exynos_dsi_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- return MODE_OK;
-}
-
static struct drm_encoder *
exynos_dsi_best_encoder(struct drm_connector *connector)
{
@@ -1489,7 +1483,6 @@ exynos_dsi_best_encoder(struct drm_connector *connector)
static struct drm_connector_helper_funcs exynos_dsi_connector_helper_funcs = {
.get_modes = exynos_dsi_get_modes,
- .mode_valid = exynos_dsi_mode_valid,
.best_encoder = exynos_dsi_best_encoder,
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index d346d1e6eda0..929cb03a8eab 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -151,10 +151,8 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
exynos_gem_obj = to_exynos_gem_obj(obj);
ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
- if (ret < 0) {
- DRM_ERROR("cannot use this gem memory type for fb.\n");
- return ERR_PTR(-EINVAL);
- }
+ if (ret < 0)
+ return ERR_PTR(ret);
exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
if (!exynos_fb)
@@ -250,10 +248,8 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
exynos_fb->exynos_gem_obj[i] = exynos_gem_obj;
ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
- if (ret < 0) {
- DRM_ERROR("cannot use this gem memory type for fb.\n");
+ if (ret < 0)
goto err_unreference;
- }
}
ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 925fc69af1a0..9819fa6a9e2a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -31,7 +31,9 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_fbdev.h"
#include "exynos_drm_crtc.h"
+#include "exynos_drm_plane.h"
#include "exynos_drm_iommu.h"
+#include "exynos_drm_fimd.h"
/*
* FIMD stands for Fully Interactive Mobile Display and
@@ -54,6 +56,9 @@
/* size control register for hardware windows 1 ~ 2. */
#define VIDOSD_D(win) (VIDOSD_BASE + 0x0C + (win) * 16)
+#define VIDWnALPHA0(win) (VIDW_ALPHA + 0x00 + (win) * 8)
+#define VIDWnALPHA1(win) (VIDW_ALPHA + 0x04 + (win) * 8)
+
#define VIDWx_BUF_START(win, buf) (VIDW_BUF_START(buf) + (win) * 8)
#define VIDWx_BUF_END(win, buf) (VIDW_BUF_END(buf) + (win) * 8)
#define VIDWx_BUF_SIZE(win, buf) (VIDW_BUF_SIZE(buf) + (win) * 4)
@@ -140,31 +145,15 @@ static struct fimd_driver_data exynos5_fimd_driver_data = {
.has_vtsel = 1,
};
-struct fimd_win_data {
- unsigned int offset_x;
- unsigned int offset_y;
- unsigned int ovl_width;
- unsigned int ovl_height;
- unsigned int fb_width;
- unsigned int fb_height;
- unsigned int bpp;
- unsigned int pixel_format;
- dma_addr_t dma_addr;
- unsigned int buf_offsize;
- unsigned int line_size; /* bytes */
- bool enabled;
- bool resume;
-};
-
struct fimd_context {
struct device *dev;
struct drm_device *drm_dev;
struct exynos_drm_crtc *crtc;
+ struct exynos_drm_plane planes[WINDOWS_NR];
struct clk *bus_clk;
struct clk *lcd_clk;
void __iomem *regs;
struct regmap *sysreg;
- struct fimd_win_data win_data[WINDOWS_NR];
unsigned int default_win;
unsigned long irq_flags;
u32 vidcon0;
@@ -284,14 +273,9 @@ static void fimd_clear_channel(struct fimd_context *ctx)
}
}
-static int fimd_ctx_initialize(struct fimd_context *ctx,
+static int fimd_iommu_attach_devices(struct fimd_context *ctx,
struct drm_device *drm_dev)
{
- struct exynos_drm_private *priv;
- priv = drm_dev->dev_private;
-
- ctx->drm_dev = drm_dev;
- ctx->pipe = priv->pipe++;
/* attach this sub driver to iommu mapping if supported. */
if (is_drm_iommu_supported(ctx->drm_dev)) {
@@ -313,7 +297,7 @@ static int fimd_ctx_initialize(struct fimd_context *ctx,
return 0;
}
-static void fimd_ctx_remove(struct fimd_context *ctx)
+static void fimd_iommu_detach_devices(struct fimd_context *ctx)
{
/* detach this sub driver from iommu mapping if supported. */
if (is_drm_iommu_supported(ctx->drm_dev))
@@ -506,58 +490,9 @@ static void fimd_disable_vblank(struct exynos_drm_crtc *crtc)
}
}
-static void fimd_win_mode_set(struct exynos_drm_crtc *crtc,
- struct exynos_drm_plane *plane)
-{
- struct fimd_context *ctx = crtc->ctx;
- struct fimd_win_data *win_data;
- int win;
- unsigned long offset;
-
- if (!plane) {
- DRM_ERROR("plane is NULL\n");
- return;
- }
-
- win = plane->zpos;
- if (win == DEFAULT_ZPOS)
- win = ctx->default_win;
-
- if (win < 0 || win >= WINDOWS_NR)
- return;
-
- offset = plane->fb_x * (plane->bpp >> 3);
- offset += plane->fb_y * plane->pitch;
-
- DRM_DEBUG_KMS("offset = 0x%lx, pitch = %x\n", offset, plane->pitch);
-
- win_data = &ctx->win_data[win];
-
- win_data->offset_x = plane->crtc_x;
- win_data->offset_y = plane->crtc_y;
- win_data->ovl_width = plane->crtc_width;
- win_data->ovl_height = plane->crtc_height;
- win_data->fb_width = plane->fb_width;
- win_data->fb_height = plane->fb_height;
- win_data->dma_addr = plane->dma_addr[0] + offset;
- win_data->bpp = plane->bpp;
- win_data->pixel_format = plane->pixel_format;
- win_data->buf_offsize = (plane->fb_width - plane->crtc_width) *
- (plane->bpp >> 3);
- win_data->line_size = plane->crtc_width * (plane->bpp >> 3);
-
- DRM_DEBUG_KMS("offset_x = %d, offset_y = %d\n",
- win_data->offset_x, win_data->offset_y);
- DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
- win_data->ovl_width, win_data->ovl_height);
- DRM_DEBUG_KMS("paddr = 0x%lx\n", (unsigned long)win_data->dma_addr);
- DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
- plane->fb_width, plane->crtc_width);
-}
-
static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win)
{
- struct fimd_win_data *win_data = &ctx->win_data[win];
+ struct exynos_drm_plane *plane = &ctx->planes[win];
unsigned long val;
val = WINCONx_ENWIN;
@@ -567,11 +502,11 @@ static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win)
* So the request format is ARGB8888 then change it to XRGB8888.
*/
if (ctx->driver_data->has_limited_fmt && !win) {
- if (win_data->pixel_format == DRM_FORMAT_ARGB8888)
- win_data->pixel_format = DRM_FORMAT_XRGB8888;
+ if (plane->pixel_format == DRM_FORMAT_ARGB8888)
+ plane->pixel_format = DRM_FORMAT_XRGB8888;
}
- switch (win_data->pixel_format) {
+ switch (plane->pixel_format) {
case DRM_FORMAT_C8:
val |= WINCON0_BPPMODE_8BPP_PALETTE;
val |= WINCONx_BURSTLEN_8WORD;
@@ -607,7 +542,7 @@ static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win)
break;
}
- DRM_DEBUG_KMS("bpp = %d\n", win_data->bpp);
+ DRM_DEBUG_KMS("bpp = %d\n", plane->bpp);
/*
* In case of exynos, setting dma-burst to 16Word causes permanent
@@ -617,12 +552,30 @@ static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win)
* movement causes unstable DMA which results into iommu crash/tear.
*/
- if (win_data->fb_width < MIN_FB_WIDTH_FOR_16WORD_BURST) {
+ if (plane->fb_width < MIN_FB_WIDTH_FOR_16WORD_BURST) {
val &= ~WINCONx_BURSTLEN_MASK;
val |= WINCONx_BURSTLEN_4WORD;
}
writel(val, ctx->regs + WINCON(win));
+
+ /* hardware window 0 doesn't support alpha channel. */
+ if (win != 0) {
+ /* OSD alpha */
+ val = VIDISD14C_ALPHA0_R(0xf) |
+ VIDISD14C_ALPHA0_G(0xf) |
+ VIDISD14C_ALPHA0_B(0xf) |
+ VIDISD14C_ALPHA1_R(0xf) |
+ VIDISD14C_ALPHA1_G(0xf) |
+ VIDISD14C_ALPHA1_B(0xf);
+
+ writel(val, ctx->regs + VIDOSD_C(win));
+
+ val = VIDW_ALPHA_R(0xf) | VIDW_ALPHA_G(0xf) |
+ VIDW_ALPHA_G(0xf);
+ writel(val, ctx->regs + VIDWnALPHA0(win));
+ writel(val, ctx->regs + VIDWnALPHA1(win));
+ }
}
static void fimd_win_set_colkey(struct fimd_context *ctx, unsigned int win)
@@ -645,7 +598,7 @@ static void fimd_win_set_colkey(struct fimd_context *ctx, unsigned int win)
* @protect: 1 to protect (disable updates)
*/
static void fimd_shadow_protect_win(struct fimd_context *ctx,
- int win, bool protect)
+ unsigned int win, bool protect)
{
u32 reg, bits, val;
@@ -665,29 +618,25 @@ static void fimd_shadow_protect_win(struct fimd_context *ctx,
writel(val, ctx->regs + reg);
}
-static void fimd_win_commit(struct exynos_drm_crtc *crtc, int zpos)
+static void fimd_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
{
struct fimd_context *ctx = crtc->ctx;
- struct fimd_win_data *win_data;
- int win = zpos;
- unsigned long val, alpha, size;
- unsigned int last_x;
- unsigned int last_y;
+ struct exynos_drm_plane *plane;
+ dma_addr_t dma_addr;
+ unsigned long val, size, offset;
+ unsigned int last_x, last_y, buf_offsize, line_size;
if (ctx->suspended)
return;
- if (win == DEFAULT_ZPOS)
- win = ctx->default_win;
-
if (win < 0 || win >= WINDOWS_NR)
return;
- win_data = &ctx->win_data[win];
+ plane = &ctx->planes[win];
/* If suspended, enable this on resume */
if (ctx->suspended) {
- win_data->resume = true;
+ plane->resume = true;
return;
}
@@ -704,38 +653,45 @@ static void fimd_win_commit(struct exynos_drm_crtc *crtc, int zpos)
/* protect windows */
fimd_shadow_protect_win(ctx, win, true);
+
+ offset = plane->src_x * (plane->bpp >> 3);
+ offset += plane->src_y * plane->pitch;
+
/* buffer start address */
- val = (unsigned long)win_data->dma_addr;
+ dma_addr = plane->dma_addr[0] + offset;
+ val = (unsigned long)dma_addr;
writel(val, ctx->regs + VIDWx_BUF_START(win, 0));
/* buffer end address */
- size = win_data->fb_width * win_data->ovl_height * (win_data->bpp >> 3);
- val = (unsigned long)(win_data->dma_addr + size);
+ size = plane->pitch * plane->crtc_height;
+ val = (unsigned long)(dma_addr + size);
writel(val, ctx->regs + VIDWx_BUF_END(win, 0));
DRM_DEBUG_KMS("start addr = 0x%lx, end addr = 0x%lx, size = 0x%lx\n",
- (unsigned long)win_data->dma_addr, val, size);
+ (unsigned long)dma_addr, val, size);
DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
- win_data->ovl_width, win_data->ovl_height);
+ plane->crtc_width, plane->crtc_height);
/* buffer size */
- val = VIDW_BUF_SIZE_OFFSET(win_data->buf_offsize) |
- VIDW_BUF_SIZE_PAGEWIDTH(win_data->line_size) |
- VIDW_BUF_SIZE_OFFSET_E(win_data->buf_offsize) |
- VIDW_BUF_SIZE_PAGEWIDTH_E(win_data->line_size);
+ buf_offsize = plane->pitch - (plane->crtc_width * (plane->bpp >> 3));
+ line_size = plane->crtc_width * (plane->bpp >> 3);
+ val = VIDW_BUF_SIZE_OFFSET(buf_offsize) |
+ VIDW_BUF_SIZE_PAGEWIDTH(line_size) |
+ VIDW_BUF_SIZE_OFFSET_E(buf_offsize) |
+ VIDW_BUF_SIZE_PAGEWIDTH_E(line_size);
writel(val, ctx->regs + VIDWx_BUF_SIZE(win, 0));
/* OSD position */
- val = VIDOSDxA_TOPLEFT_X(win_data->offset_x) |
- VIDOSDxA_TOPLEFT_Y(win_data->offset_y) |
- VIDOSDxA_TOPLEFT_X_E(win_data->offset_x) |
- VIDOSDxA_TOPLEFT_Y_E(win_data->offset_y);
+ val = VIDOSDxA_TOPLEFT_X(plane->crtc_x) |
+ VIDOSDxA_TOPLEFT_Y(plane->crtc_y) |
+ VIDOSDxA_TOPLEFT_X_E(plane->crtc_x) |
+ VIDOSDxA_TOPLEFT_Y_E(plane->crtc_y);
writel(val, ctx->regs + VIDOSD_A(win));
- last_x = win_data->offset_x + win_data->ovl_width;
+ last_x = plane->crtc_x + plane->crtc_width;
if (last_x)
last_x--;
- last_y = win_data->offset_y + win_data->ovl_height;
+ last_y = plane->crtc_y + plane->crtc_height;
if (last_y)
last_y--;
@@ -745,24 +701,14 @@ static void fimd_win_commit(struct exynos_drm_crtc *crtc, int zpos)
writel(val, ctx->regs + VIDOSD_B(win));
DRM_DEBUG_KMS("osd pos: tx = %d, ty = %d, bx = %d, by = %d\n",
- win_data->offset_x, win_data->offset_y, last_x, last_y);
-
- /* hardware window 0 doesn't support alpha channel. */
- if (win != 0) {
- /* OSD alpha */
- alpha = VIDISD14C_ALPHA1_R(0xf) |
- VIDISD14C_ALPHA1_G(0xf) |
- VIDISD14C_ALPHA1_B(0xf);
-
- writel(alpha, ctx->regs + VIDOSD_C(win));
- }
+ plane->crtc_x, plane->crtc_y, last_x, last_y);
/* OSD size */
if (win != 3 && win != 4) {
u32 offset = VIDOSD_D(win);
if (win == 0)
offset = VIDOSD_C(win);
- val = win_data->ovl_width * win_data->ovl_height;
+ val = plane->crtc_width * plane->crtc_height;
writel(val, ctx->regs + offset);
DRM_DEBUG_KMS("osd size = 0x%x\n", (unsigned int)val);
@@ -782,29 +728,25 @@ static void fimd_win_commit(struct exynos_drm_crtc *crtc, int zpos)
/* Enable DMA channel and unprotect windows */
fimd_shadow_protect_win(ctx, win, false);
- win_data->enabled = true;
+ plane->enabled = true;
if (ctx->i80_if)
atomic_set(&ctx->win_updated, 1);
}
-static void fimd_win_disable(struct exynos_drm_crtc *crtc, int zpos)
+static void fimd_win_disable(struct exynos_drm_crtc *crtc, unsigned int win)
{
struct fimd_context *ctx = crtc->ctx;
- struct fimd_win_data *win_data;
- int win = zpos;
-
- if (win == DEFAULT_ZPOS)
- win = ctx->default_win;
+ struct exynos_drm_plane *plane;
if (win < 0 || win >= WINDOWS_NR)
return;
- win_data = &ctx->win_data[win];
+ plane = &ctx->planes[win];
if (ctx->suspended) {
/* do not resume this window*/
- win_data->resume = false;
+ plane->resume = false;
return;
}
@@ -819,42 +761,42 @@ static void fimd_win_disable(struct exynos_drm_crtc *crtc, int zpos)
/* unprotect windows */
fimd_shadow_protect_win(ctx, win, false);
- win_data->enabled = false;
+ plane->enabled = false;
}
static void fimd_window_suspend(struct fimd_context *ctx)
{
- struct fimd_win_data *win_data;
+ struct exynos_drm_plane *plane;
int i;
for (i = 0; i < WINDOWS_NR; i++) {
- win_data = &ctx->win_data[i];
- win_data->resume = win_data->enabled;
- if (win_data->enabled)
+ plane = &ctx->planes[i];
+ plane->resume = plane->enabled;
+ if (plane->enabled)
fimd_win_disable(ctx->crtc, i);
}
}
static void fimd_window_resume(struct fimd_context *ctx)
{
- struct fimd_win_data *win_data;
+ struct exynos_drm_plane *plane;
int i;
for (i = 0; i < WINDOWS_NR; i++) {
- win_data = &ctx->win_data[i];
- win_data->enabled = win_data->resume;
- win_data->resume = false;
+ plane = &ctx->planes[i];
+ plane->enabled = plane->resume;
+ plane->resume = false;
}
}
static void fimd_apply(struct fimd_context *ctx)
{
- struct fimd_win_data *win_data;
+ struct exynos_drm_plane *plane;
int i;
for (i = 0; i < WINDOWS_NR; i++) {
- win_data = &ctx->win_data[i];
- if (win_data->enabled)
+ plane = &ctx->planes[i];
+ if (plane->enabled)
fimd_win_commit(ctx->crtc, i);
else
fimd_win_disable(ctx->crtc, i);
@@ -1011,7 +953,6 @@ static struct exynos_drm_crtc_ops fimd_crtc_ops = {
.enable_vblank = fimd_enable_vblank,
.disable_vblank = fimd_disable_vblank,
.wait_for_vblank = fimd_wait_for_vblank,
- .win_mode_set = fimd_win_mode_set,
.win_commit = fimd_win_commit,
.win_disable = fimd_win_disable,
.te_handler = fimd_te_handler,
@@ -1056,25 +997,38 @@ static int fimd_bind(struct device *dev, struct device *master, void *data)
{
struct fimd_context *ctx = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
+ struct exynos_drm_private *priv = drm_dev->dev_private;
+ struct exynos_drm_plane *exynos_plane;
+ enum drm_plane_type type;
+ unsigned int zpos;
int ret;
- ret = fimd_ctx_initialize(ctx, drm_dev);
- if (ret) {
- DRM_ERROR("fimd_ctx_initialize failed.\n");
- return ret;
+ ctx->drm_dev = drm_dev;
+ ctx->pipe = priv->pipe++;
+
+ for (zpos = 0; zpos < WINDOWS_NR; zpos++) {
+ type = (zpos == ctx->default_win) ? DRM_PLANE_TYPE_PRIMARY :
+ DRM_PLANE_TYPE_OVERLAY;
+ ret = exynos_plane_init(drm_dev, &ctx->planes[zpos],
+ 1 << ctx->pipe, type, zpos);
+ if (ret)
+ return ret;
}
- ctx->crtc = exynos_drm_crtc_create(drm_dev, ctx->pipe,
- EXYNOS_DISPLAY_TYPE_LCD,
+ exynos_plane = &ctx->planes[ctx->default_win];
+ ctx->crtc = exynos_drm_crtc_create(drm_dev, &exynos_plane->base,
+ ctx->pipe, EXYNOS_DISPLAY_TYPE_LCD,
&fimd_crtc_ops, ctx);
- if (IS_ERR(ctx->crtc)) {
- fimd_ctx_remove(ctx);
+ if (IS_ERR(ctx->crtc))
return PTR_ERR(ctx->crtc);
- }
if (ctx->display)
exynos_drm_create_enc_conn(drm_dev, ctx->display);
+ ret = fimd_iommu_attach_devices(ctx, drm_dev);
+ if (ret)
+ return ret;
+
return 0;
}
@@ -1086,10 +1040,10 @@ static void fimd_unbind(struct device *dev, struct device *master,
fimd_dpms(ctx->crtc, DRM_MODE_DPMS_OFF);
+ fimd_iommu_detach_devices(ctx);
+
if (ctx->display)
exynos_dpi_remove(ctx->display);
-
- fimd_ctx_remove(ctx);
}
static const struct component_ops fimd_component_ops = {
@@ -1238,6 +1192,24 @@ static int fimd_remove(struct platform_device *pdev)
return 0;
}
+void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable)
+{
+ struct fimd_context *ctx = crtc->ctx;
+ u32 val;
+
+ /*
+ * Only Exynos 5250, 5260, 5410 and 542x requires enabling DP/MIE
+ * clock. On these SoCs the bootloader may enable it but any
+ * power domain off/on will reset it to disable state.
+ */
+ if (ctx->driver_data != &exynos5_fimd_driver_data)
+ return;
+
+ val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE;
+ writel(DP_MIE_CLK_DP_ENABLE, ctx->regs + DP_MIE_CLKCON);
+}
+EXPORT_SYMBOL_GPL(fimd_dp_clock_enable);
+
struct platform_driver fimd_driver = {
.probe = fimd_probe,
.remove = fimd_remove,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.h b/drivers/gpu/drm/exynos/exynos_drm_fimd.h
new file mode 100644
index 000000000000..b4fcaa568456
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _EXYNOS_DRM_FIMD_H_
+#define _EXYNOS_DRM_FIMD_H_
+
+extern void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable);
+
+#endif /* _EXYNOS_DRM_FIMD_H_ */
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index d5ad17dfc24d..b7f1cbc46cc2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -476,6 +476,45 @@ err_clear:
return ret;
}
+static int ipp_validate_mem_node(struct drm_device *drm_dev,
+ struct drm_exynos_ipp_mem_node *m_node,
+ struct drm_exynos_ipp_cmd_node *c_node)
+{
+ struct drm_exynos_ipp_config *ipp_cfg;
+ unsigned int num_plane;
+ unsigned long min_size, size;
+ unsigned int bpp;
+ int i;
+
+ /* The property id should already be varified */
+ ipp_cfg = &c_node->property.config[m_node->prop_id];
+ num_plane = drm_format_num_planes(ipp_cfg->fmt);
+
+ /**
+ * This is a rather simplified validation of a memory node.
+ * It basically verifies provided gem object handles
+ * and the buffer sizes with respect to current configuration.
+ * This is not the best that can be done
+ * but it seems more than enough
+ */
+ for (i = 0; i < num_plane; ++i) {
+ if (!m_node->buf_info.handles[i]) {
+ DRM_ERROR("invalid handle for plane %d\n", i);
+ return -EINVAL;
+ }
+ bpp = drm_format_plane_cpp(ipp_cfg->fmt, i);
+ min_size = (ipp_cfg->sz.hsize * ipp_cfg->sz.vsize * bpp) >> 3;
+ size = exynos_drm_gem_get_size(drm_dev,
+ m_node->buf_info.handles[i],
+ c_node->filp);
+ if (min_size > size) {
+ DRM_ERROR("invalid size for plane %d\n", i);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
static int ipp_put_mem_node(struct drm_device *drm_dev,
struct drm_exynos_ipp_cmd_node *c_node,
struct drm_exynos_ipp_mem_node *m_node)
@@ -552,6 +591,11 @@ static struct drm_exynos_ipp_mem_node
}
mutex_lock(&c_node->mem_lock);
+ if (ipp_validate_mem_node(drm_dev, m_node, c_node)) {
+ ipp_put_mem_node(drm_dev, c_node, m_node);
+ mutex_unlock(&c_node->mem_lock);
+ return ERR_PTR(-EFAULT);
+ }
list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
mutex_unlock(&c_node->mem_lock);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index a5616872eee7..13ea3349363b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -92,7 +92,6 @@ void exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
uint32_t src_w, uint32_t src_h)
{
struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane);
- struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
unsigned int actual_w;
unsigned int actual_h;
@@ -111,13 +110,17 @@ void exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
crtc_y = 0;
}
+ /* set ratio */
+ exynos_plane->h_ratio = (src_w << 16) / crtc_w;
+ exynos_plane->v_ratio = (src_h << 16) / crtc_h;
+
/* set drm framebuffer data. */
- exynos_plane->fb_x = src_x;
- exynos_plane->fb_y = src_y;
+ exynos_plane->src_x = src_x;
+ exynos_plane->src_y = src_y;
+ exynos_plane->src_width = (actual_w * exynos_plane->h_ratio) >> 16;
+ exynos_plane->src_height = (actual_h * exynos_plane->v_ratio) >> 16;
exynos_plane->fb_width = fb->width;
exynos_plane->fb_height = fb->height;
- exynos_plane->src_width = src_w;
- exynos_plane->src_height = src_h;
exynos_plane->bpp = fb->bits_per_pixel;
exynos_plane->pitch = fb->pitches[0];
exynos_plane->pixel_format = fb->pixel_format;
@@ -139,9 +142,6 @@ void exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
exynos_plane->crtc_width, exynos_plane->crtc_height);
plane->crtc = crtc;
-
- if (exynos_crtc->ops->win_mode_set)
- exynos_crtc->ops->win_mode_set(exynos_crtc, exynos_plane);
}
int
@@ -175,46 +175,21 @@ static int exynos_disable_plane(struct drm_plane *plane)
struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane);
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(plane->crtc);
- if (exynos_crtc->ops->win_disable)
+ if (exynos_crtc && exynos_crtc->ops->win_disable)
exynos_crtc->ops->win_disable(exynos_crtc,
exynos_plane->zpos);
return 0;
}
-static void exynos_plane_destroy(struct drm_plane *plane)
-{
- struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane);
-
- exynos_disable_plane(plane);
- drm_plane_cleanup(plane);
- kfree(exynos_plane);
-}
-
-static int exynos_plane_set_property(struct drm_plane *plane,
- struct drm_property *property,
- uint64_t val)
-{
- struct drm_device *dev = plane->dev;
- struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane);
- struct exynos_drm_private *dev_priv = dev->dev_private;
-
- if (property == dev_priv->plane_zpos_property) {
- exynos_plane->zpos = val;
- return 0;
- }
-
- return -EINVAL;
-}
-
static struct drm_plane_funcs exynos_plane_funcs = {
.update_plane = exynos_update_plane,
.disable_plane = exynos_disable_plane,
- .destroy = exynos_plane_destroy,
- .set_property = exynos_plane_set_property,
+ .destroy = drm_plane_cleanup,
};
-static void exynos_plane_attach_zpos_property(struct drm_plane *plane)
+static void exynos_plane_attach_zpos_property(struct drm_plane *plane,
+ unsigned int zpos)
{
struct drm_device *dev = plane->dev;
struct exynos_drm_private *dev_priv = dev->dev_private;
@@ -222,41 +197,36 @@ static void exynos_plane_attach_zpos_property(struct drm_plane *plane)
prop = dev_priv->plane_zpos_property;
if (!prop) {
- prop = drm_property_create_range(dev, 0, "zpos", 0,
- MAX_PLANE - 1);
+ prop = drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE,
+ "zpos", 0, MAX_PLANE - 1);
if (!prop)
return;
dev_priv->plane_zpos_property = prop;
}
- drm_object_attach_property(&plane->base, prop, 0);
+ drm_object_attach_property(&plane->base, prop, zpos);
}
-struct drm_plane *exynos_plane_init(struct drm_device *dev,
- unsigned long possible_crtcs,
- enum drm_plane_type type)
+int exynos_plane_init(struct drm_device *dev,
+ struct exynos_drm_plane *exynos_plane,
+ unsigned long possible_crtcs, enum drm_plane_type type,
+ unsigned int zpos)
{
- struct exynos_drm_plane *exynos_plane;
int err;
- exynos_plane = kzalloc(sizeof(struct exynos_drm_plane), GFP_KERNEL);
- if (!exynos_plane)
- return ERR_PTR(-ENOMEM);
-
err = drm_universal_plane_init(dev, &exynos_plane->base, possible_crtcs,
&exynos_plane_funcs, formats,
ARRAY_SIZE(formats), type);
if (err) {
DRM_ERROR("failed to initialize plane\n");
- kfree(exynos_plane);
- return ERR_PTR(err);
+ return err;
}
- if (type == DRM_PLANE_TYPE_PRIMARY)
- exynos_plane->zpos = DEFAULT_ZPOS;
- else
- exynos_plane_attach_zpos_property(&exynos_plane->base);
+ exynos_plane->zpos = zpos;
- return &exynos_plane->base;
+ if (type == DRM_PLANE_TYPE_OVERLAY)
+ exynos_plane_attach_zpos_property(&exynos_plane->base, zpos);
+
+ return 0;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.h b/drivers/gpu/drm/exynos/exynos_drm_plane.h
index 9d3c374e7b3e..f360590d1412 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.h
@@ -20,6 +20,7 @@ int exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h);
-struct drm_plane *exynos_plane_init(struct drm_device *dev,
- unsigned long possible_crtcs,
- enum drm_plane_type type);
+int exynos_plane_init(struct drm_device *dev,
+ struct exynos_drm_plane *exynos_plane,
+ unsigned long possible_crtcs, enum drm_plane_type type,
+ unsigned int zpos);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index b886972b5888..27e84ec21694 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -23,6 +23,7 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_crtc.h"
+#include "exynos_drm_plane.h"
#include "exynos_drm_encoder.h"
#include "exynos_drm_vidi.h"
@@ -32,20 +33,6 @@
#define ctx_from_connector(c) container_of(c, struct vidi_context, \
connector)
-struct vidi_win_data {
- unsigned int offset_x;
- unsigned int offset_y;
- unsigned int ovl_width;
- unsigned int ovl_height;
- unsigned int fb_width;
- unsigned int fb_height;
- unsigned int bpp;
- dma_addr_t dma_addr;
- unsigned int buf_offsize;
- unsigned int line_size; /* bytes */
- bool enabled;
-};
-
struct vidi_context {
struct exynos_drm_display display;
struct platform_device *pdev;
@@ -53,7 +40,7 @@ struct vidi_context {
struct exynos_drm_crtc *crtc;
struct drm_encoder *encoder;
struct drm_connector connector;
- struct vidi_win_data win_data[WINDOWS_NR];
+ struct exynos_drm_plane planes[WINDOWS_NR];
struct edid *raw_edid;
unsigned int clkdiv;
unsigned int default_win;
@@ -97,19 +84,6 @@ static const char fake_edid_info[] = {
0x00, 0x00, 0x00, 0x06
};
-static void vidi_apply(struct vidi_context *ctx)
-{
- struct exynos_drm_crtc_ops *crtc_ops = ctx->crtc->ops;
- struct vidi_win_data *win_data;
- int i;
-
- for (i = 0; i < WINDOWS_NR; i++) {
- win_data = &ctx->win_data[i];
- if (win_data->enabled && (crtc_ops && crtc_ops->win_commit))
- crtc_ops->win_commit(ctx->crtc, i);
- }
-}
-
static int vidi_enable_vblank(struct exynos_drm_crtc *crtc)
{
struct vidi_context *ctx = crtc->ctx;
@@ -143,104 +117,46 @@ static void vidi_disable_vblank(struct exynos_drm_crtc *crtc)
ctx->vblank_on = false;
}
-static void vidi_win_mode_set(struct exynos_drm_crtc *crtc,
- struct exynos_drm_plane *plane)
-{
- struct vidi_context *ctx = crtc->ctx;
- struct vidi_win_data *win_data;
- int win;
- unsigned long offset;
-
- if (!plane) {
- DRM_ERROR("plane is NULL\n");
- return;
- }
-
- win = plane->zpos;
- if (win == DEFAULT_ZPOS)
- win = ctx->default_win;
-
- if (win < 0 || win >= WINDOWS_NR)
- return;
-
- offset = plane->fb_x * (plane->bpp >> 3);
- offset += plane->fb_y * plane->pitch;
-
- DRM_DEBUG_KMS("offset = 0x%lx, pitch = %x\n", offset, plane->pitch);
-
- win_data = &ctx->win_data[win];
-
- win_data->offset_x = plane->crtc_x;
- win_data->offset_y = plane->crtc_y;
- win_data->ovl_width = plane->crtc_width;
- win_data->ovl_height = plane->crtc_height;
- win_data->fb_width = plane->fb_width;
- win_data->fb_height = plane->fb_height;
- win_data->dma_addr = plane->dma_addr[0] + offset;
- win_data->bpp = plane->bpp;
- win_data->buf_offsize = (plane->fb_width - plane->crtc_width) *
- (plane->bpp >> 3);
- win_data->line_size = plane->crtc_width * (plane->bpp >> 3);
-
- /*
- * some parts of win_data should be transferred to user side
- * through specific ioctl.
- */
-
- DRM_DEBUG_KMS("offset_x = %d, offset_y = %d\n",
- win_data->offset_x, win_data->offset_y);
- DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
- win_data->ovl_width, win_data->ovl_height);
- DRM_DEBUG_KMS("paddr = 0x%lx\n", (unsigned long)win_data->dma_addr);
- DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
- plane->fb_width, plane->crtc_width);
-}
-
-static void vidi_win_commit(struct exynos_drm_crtc *crtc, int zpos)
+static void vidi_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
{
struct vidi_context *ctx = crtc->ctx;
- struct vidi_win_data *win_data;
- int win = zpos;
+ struct exynos_drm_plane *plane;
if (ctx->suspended)
return;
- if (win == DEFAULT_ZPOS)
- win = ctx->default_win;
-
if (win < 0 || win >= WINDOWS_NR)
return;
- win_data = &ctx->win_data[win];
+ plane = &ctx->planes[win];
- win_data->enabled = true;
+ plane->enabled = true;
- DRM_DEBUG_KMS("dma_addr = %pad\n", &win_data->dma_addr);
+ DRM_DEBUG_KMS("dma_addr = %pad\n", plane->dma_addr);
if (ctx->vblank_on)
schedule_work(&ctx->work);
}
-static void vidi_win_disable(struct exynos_drm_crtc *crtc, int zpos)
+static void vidi_win_disable(struct exynos_drm_crtc *crtc, unsigned int win)
{
struct vidi_context *ctx = crtc->ctx;
- struct vidi_win_data *win_data;
- int win = zpos;
-
- if (win == DEFAULT_ZPOS)
- win = ctx->default_win;
+ struct exynos_drm_plane *plane;
if (win < 0 || win >= WINDOWS_NR)
return;
- win_data = &ctx->win_data[win];
- win_data->enabled = false;
+ plane = &ctx->planes[win];
+ plane->enabled = false;
/* TODO. */
}
static int vidi_power_on(struct vidi_context *ctx, bool enable)
{
+ struct exynos_drm_plane *plane;
+ int i;
+
DRM_DEBUG_KMS("%s\n", __FILE__);
if (enable != false && enable != true)
@@ -253,7 +169,11 @@ static int vidi_power_on(struct vidi_context *ctx, bool enable)
if (test_and_clear_bit(0, &ctx->irq_flags))
vidi_enable_vblank(ctx->crtc);
- vidi_apply(ctx);
+ for (i = 0; i < WINDOWS_NR; i++) {
+ plane = &ctx->planes[i];
+ if (plane->enabled)
+ vidi_win_commit(ctx->crtc, i);
+ }
} else {
ctx->suspended = true;
}
@@ -301,7 +221,6 @@ static struct exynos_drm_crtc_ops vidi_crtc_ops = {
.dpms = vidi_dpms,
.enable_vblank = vidi_enable_vblank,
.disable_vblank = vidi_disable_vblank,
- .win_mode_set = vidi_win_mode_set,
.win_commit = vidi_win_commit,
.win_disable = vidi_win_disable,
};
@@ -543,12 +462,25 @@ static int vidi_bind(struct device *dev, struct device *master, void *data)
{
struct vidi_context *ctx = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
+ struct exynos_drm_plane *exynos_plane;
+ enum drm_plane_type type;
+ unsigned int zpos;
int ret;
vidi_ctx_initialize(ctx, drm_dev);
- ctx->crtc = exynos_drm_crtc_create(drm_dev, ctx->pipe,
- EXYNOS_DISPLAY_TYPE_VIDI,
+ for (zpos = 0; zpos < WINDOWS_NR; zpos++) {
+ type = (zpos == ctx->default_win) ? DRM_PLANE_TYPE_PRIMARY :
+ DRM_PLANE_TYPE_OVERLAY;
+ ret = exynos_plane_init(drm_dev, &ctx->planes[zpos],
+ 1 << ctx->pipe, type, zpos);
+ if (ret)
+ return ret;
+ }
+
+ exynos_plane = &ctx->planes[ctx->default_win];
+ ctx->crtc = exynos_drm_crtc_create(drm_dev, &exynos_plane->base,
+ ctx->pipe, EXYNOS_DISPLAY_TYPE_VIDI,
&vidi_crtc_ops, ctx);
if (IS_ERR(ctx->crtc)) {
DRM_ERROR("failed to create crtc.\n");
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 229b3613c60b..722cbf32192a 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -2007,7 +2007,7 @@ static void hdmi_mode_set(struct exynos_drm_display *display,
DRM_DEBUG_KMS("xres=%d, yres=%d, refresh=%d, intl=%s\n",
m->hdisplay, m->vdisplay,
m->vrefresh, (m->flags & DRM_MODE_FLAG_INTERLACE) ?
- "INTERLACED" : "PROGERESSIVE");
+ "INTERLACED" : "PROGRESSIVE");
/* preserve mode information for later use. */
drm_mode_copy(&hdata->current_mode, mode);
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 3518bc4654c5..fbec750574e6 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -37,34 +37,13 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_crtc.h"
+#include "exynos_drm_plane.h"
#include "exynos_drm_iommu.h"
#include "exynos_mixer.h"
#define MIXER_WIN_NR 3
#define MIXER_DEFAULT_WIN 0
-struct hdmi_win_data {
- dma_addr_t dma_addr;
- dma_addr_t chroma_dma_addr;
- uint32_t pixel_format;
- unsigned int bpp;
- unsigned int crtc_x;
- unsigned int crtc_y;
- unsigned int crtc_width;
- unsigned int crtc_height;
- unsigned int fb_x;
- unsigned int fb_y;
- unsigned int fb_width;
- unsigned int fb_height;
- unsigned int src_width;
- unsigned int src_height;
- unsigned int mode_width;
- unsigned int mode_height;
- unsigned int scan_flags;
- bool enabled;
- bool resume;
-};
-
struct mixer_resources {
int irq;
void __iomem *mixer_regs;
@@ -89,6 +68,7 @@ struct mixer_context {
struct device *dev;
struct drm_device *drm_dev;
struct exynos_drm_crtc *crtc;
+ struct exynos_drm_plane planes[MIXER_WIN_NR];
int pipe;
bool interlace;
bool powered;
@@ -98,7 +78,6 @@ struct mixer_context {
struct mutex mixer_mutex;
struct mixer_resources mixer_res;
- struct hdmi_win_data win_data[MIXER_WIN_NR];
enum mixer_version_id mxr_ver;
wait_queue_head_t wait_vsync_queue;
atomic_t wait_vsync_event;
@@ -288,7 +267,7 @@ static void mixer_cfg_scan(struct mixer_context *ctx, unsigned int height)
/* choosing between interlace and progressive mode */
val = (ctx->interlace ? MXR_CFG_SCAN_INTERLACE :
- MXR_CFG_SCAN_PROGRASSIVE);
+ MXR_CFG_SCAN_PROGRESSIVE);
if (ctx->mxr_ver != MXR_VER_128_0_0_184) {
/* choosing between proper HD and SD mode */
@@ -402,17 +381,16 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
{
struct mixer_resources *res = &ctx->mixer_res;
unsigned long flags;
- struct hdmi_win_data *win_data;
- unsigned int x_ratio, y_ratio;
+ struct exynos_drm_plane *plane;
unsigned int buf_num = 1;
dma_addr_t luma_addr[2], chroma_addr[2];
bool tiled_mode = false;
bool crcb_mode = false;
u32 val;
- win_data = &ctx->win_data[win];
+ plane = &ctx->planes[win];
- switch (win_data->pixel_format) {
+ switch (plane->pixel_format) {
case DRM_FORMAT_NV12:
crcb_mode = false;
buf_num = 2;
@@ -420,35 +398,31 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
/* TODO: single buffer format NV12, NV21 */
default:
/* ignore pixel format at disable time */
- if (!win_data->dma_addr)
+ if (!plane->dma_addr[0])
break;
DRM_ERROR("pixel format for vp is wrong [%d].\n",
- win_data->pixel_format);
+ plane->pixel_format);
return;
}
- /* scaling feature: (src << 16) / dst */
- x_ratio = (win_data->src_width << 16) / win_data->crtc_width;
- y_ratio = (win_data->src_height << 16) / win_data->crtc_height;
-
if (buf_num == 2) {
- luma_addr[0] = win_data->dma_addr;
- chroma_addr[0] = win_data->chroma_dma_addr;
+ luma_addr[0] = plane->dma_addr[0];
+ chroma_addr[0] = plane->dma_addr[1];
} else {
- luma_addr[0] = win_data->dma_addr;
- chroma_addr[0] = win_data->dma_addr
- + (win_data->fb_width * win_data->fb_height);
+ luma_addr[0] = plane->dma_addr[0];
+ chroma_addr[0] = plane->dma_addr[0]
+ + (plane->pitch * plane->fb_height);
}
- if (win_data->scan_flags & DRM_MODE_FLAG_INTERLACE) {
+ if (plane->scan_flag & DRM_MODE_FLAG_INTERLACE) {
ctx->interlace = true;
if (tiled_mode) {
luma_addr[1] = luma_addr[0] + 0x40;
chroma_addr[1] = chroma_addr[0] + 0x40;
} else {
- luma_addr[1] = luma_addr[0] + win_data->fb_width;
- chroma_addr[1] = chroma_addr[0] + win_data->fb_width;
+ luma_addr[1] = luma_addr[0] + plane->pitch;
+ chroma_addr[1] = chroma_addr[0] + plane->pitch;
}
} else {
ctx->interlace = false;
@@ -469,30 +443,30 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
vp_reg_writemask(res, VP_MODE, val, VP_MODE_FMT_MASK);
/* setting size of input image */
- vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(win_data->fb_width) |
- VP_IMG_VSIZE(win_data->fb_height));
+ vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(plane->pitch) |
+ VP_IMG_VSIZE(plane->fb_height));
/* chroma height has to reduced by 2 to avoid chroma distorions */
- vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(win_data->fb_width) |
- VP_IMG_VSIZE(win_data->fb_height / 2));
+ vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(plane->pitch) |
+ VP_IMG_VSIZE(plane->fb_height / 2));
- vp_reg_write(res, VP_SRC_WIDTH, win_data->src_width);
- vp_reg_write(res, VP_SRC_HEIGHT, win_data->src_height);
+ vp_reg_write(res, VP_SRC_WIDTH, plane->src_width);
+ vp_reg_write(res, VP_SRC_HEIGHT, plane->src_height);
vp_reg_write(res, VP_SRC_H_POSITION,
- VP_SRC_H_POSITION_VAL(win_data->fb_x));
- vp_reg_write(res, VP_SRC_V_POSITION, win_data->fb_y);
+ VP_SRC_H_POSITION_VAL(plane->src_x));
+ vp_reg_write(res, VP_SRC_V_POSITION, plane->src_y);
- vp_reg_write(res, VP_DST_WIDTH, win_data->crtc_width);
- vp_reg_write(res, VP_DST_H_POSITION, win_data->crtc_x);
+ vp_reg_write(res, VP_DST_WIDTH, plane->crtc_width);
+ vp_reg_write(res, VP_DST_H_POSITION, plane->crtc_x);
if (ctx->interlace) {
- vp_reg_write(res, VP_DST_HEIGHT, win_data->crtc_height / 2);
- vp_reg_write(res, VP_DST_V_POSITION, win_data->crtc_y / 2);
+ vp_reg_write(res, VP_DST_HEIGHT, plane->crtc_height / 2);
+ vp_reg_write(res, VP_DST_V_POSITION, plane->crtc_y / 2);
} else {
- vp_reg_write(res, VP_DST_HEIGHT, win_data->crtc_height);
- vp_reg_write(res, VP_DST_V_POSITION, win_data->crtc_y);
+ vp_reg_write(res, VP_DST_HEIGHT, plane->crtc_height);
+ vp_reg_write(res, VP_DST_V_POSITION, plane->crtc_y);
}
- vp_reg_write(res, VP_H_RATIO, x_ratio);
- vp_reg_write(res, VP_V_RATIO, y_ratio);
+ vp_reg_write(res, VP_H_RATIO, plane->h_ratio);
+ vp_reg_write(res, VP_V_RATIO, plane->v_ratio);
vp_reg_write(res, VP_ENDIAN_MODE, VP_ENDIAN_MODE_LITTLE);
@@ -502,8 +476,8 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
vp_reg_write(res, VP_TOP_C_PTR, chroma_addr[0]);
vp_reg_write(res, VP_BOT_C_PTR, chroma_addr[1]);
- mixer_cfg_scan(ctx, win_data->mode_height);
- mixer_cfg_rgb_fmt(ctx, win_data->mode_height);
+ mixer_cfg_scan(ctx, plane->mode_height);
+ mixer_cfg_rgb_fmt(ctx, plane->mode_height);
mixer_cfg_layer(ctx, win, true);
mixer_run(ctx);
@@ -520,25 +494,49 @@ static void mixer_layer_update(struct mixer_context *ctx)
mixer_reg_writemask(res, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE);
}
+static int mixer_setup_scale(const struct exynos_drm_plane *plane,
+ unsigned int *x_ratio, unsigned int *y_ratio)
+{
+ if (plane->crtc_width != plane->src_width) {
+ if (plane->crtc_width == 2 * plane->src_width)
+ *x_ratio = 1;
+ else
+ goto fail;
+ }
+
+ if (plane->crtc_height != plane->src_height) {
+ if (plane->crtc_height == 2 * plane->src_height)
+ *y_ratio = 1;
+ else
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ DRM_DEBUG_KMS("only 2x width/height scaling of plane supported\n");
+ return -ENOTSUPP;
+}
+
static void mixer_graph_buffer(struct mixer_context *ctx, int win)
{
struct mixer_resources *res = &ctx->mixer_res;
unsigned long flags;
- struct hdmi_win_data *win_data;
- unsigned int x_ratio, y_ratio;
+ struct exynos_drm_plane *plane;
+ unsigned int x_ratio = 0, y_ratio = 0;
unsigned int src_x_offset, src_y_offset, dst_x_offset, dst_y_offset;
dma_addr_t dma_addr;
unsigned int fmt;
u32 val;
- win_data = &ctx->win_data[win];
+ plane = &ctx->planes[win];
#define RGB565 4
#define ARGB1555 5
#define ARGB4444 6
#define ARGB8888 7
- switch (win_data->bpp) {
+ switch (plane->bpp) {
case 16:
fmt = ARGB4444;
break;
@@ -549,21 +547,21 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win)
fmt = ARGB8888;
}
- /* 2x scaling feature */
- x_ratio = 0;
- y_ratio = 0;
+ /* check if mixer supports requested scaling setup */
+ if (mixer_setup_scale(plane, &x_ratio, &y_ratio))
+ return;
- dst_x_offset = win_data->crtc_x;
- dst_y_offset = win_data->crtc_y;
+ dst_x_offset = plane->crtc_x;
+ dst_y_offset = plane->crtc_y;
/* converting dma address base and source offset */
- dma_addr = win_data->dma_addr
- + (win_data->fb_x * win_data->bpp >> 3)
- + (win_data->fb_y * win_data->fb_width * win_data->bpp >> 3);
+ dma_addr = plane->dma_addr[0]
+ + (plane->src_x * plane->bpp >> 3)
+ + (plane->src_y * plane->pitch);
src_x_offset = 0;
src_y_offset = 0;
- if (win_data->scan_flags & DRM_MODE_FLAG_INTERLACE)
+ if (plane->scan_flag & DRM_MODE_FLAG_INTERLACE)
ctx->interlace = true;
else
ctx->interlace = false;
@@ -576,18 +574,19 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win)
MXR_GRP_CFG_FORMAT_VAL(fmt), MXR_GRP_CFG_FORMAT_MASK);
/* setup geometry */
- mixer_reg_write(res, MXR_GRAPHIC_SPAN(win), win_data->fb_width);
+ mixer_reg_write(res, MXR_GRAPHIC_SPAN(win),
+ plane->pitch / (plane->bpp >> 3));
/* setup display size */
if (ctx->mxr_ver == MXR_VER_128_0_0_184 &&
win == MIXER_DEFAULT_WIN) {
- val = MXR_MXR_RES_HEIGHT(win_data->mode_height);
- val |= MXR_MXR_RES_WIDTH(win_data->mode_width);
+ val = MXR_MXR_RES_HEIGHT(plane->mode_height);
+ val |= MXR_MXR_RES_WIDTH(plane->mode_width);
mixer_reg_write(res, MXR_RESOLUTION, val);
}
- val = MXR_GRP_WH_WIDTH(win_data->crtc_width);
- val |= MXR_GRP_WH_HEIGHT(win_data->crtc_height);
+ val = MXR_GRP_WH_WIDTH(plane->src_width);
+ val |= MXR_GRP_WH_HEIGHT(plane->src_height);
val |= MXR_GRP_WH_H_SCALE(x_ratio);
val |= MXR_GRP_WH_V_SCALE(y_ratio);
mixer_reg_write(res, MXR_GRAPHIC_WH(win), val);
@@ -605,8 +604,8 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win)
/* set buffer address to mixer */
mixer_reg_write(res, MXR_GRAPHIC_BASE(win), dma_addr);
- mixer_cfg_scan(ctx, win_data->mode_height);
- mixer_cfg_rgb_fmt(ctx, win_data->mode_height);
+ mixer_cfg_scan(ctx, plane->mode_height);
+ mixer_cfg_rgb_fmt(ctx, plane->mode_height);
mixer_cfg_layer(ctx, win, true);
/* layer update mandatory for mixer 16.0.33.0 */
@@ -918,62 +917,9 @@ static void mixer_disable_vblank(struct exynos_drm_crtc *crtc)
mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
}
-static void mixer_win_mode_set(struct exynos_drm_crtc *crtc,
- struct exynos_drm_plane *plane)
-{
- struct mixer_context *mixer_ctx = crtc->ctx;
- struct hdmi_win_data *win_data;
- int win;
-
- if (!plane) {
- DRM_ERROR("plane is NULL\n");
- return;
- }
-
- DRM_DEBUG_KMS("set [%d]x[%d] at (%d,%d) to [%d]x[%d] at (%d,%d)\n",
- plane->fb_width, plane->fb_height,
- plane->fb_x, plane->fb_y,
- plane->crtc_width, plane->crtc_height,
- plane->crtc_x, plane->crtc_y);
-
- win = plane->zpos;
- if (win == DEFAULT_ZPOS)
- win = MIXER_DEFAULT_WIN;
-
- if (win < 0 || win >= MIXER_WIN_NR) {
- DRM_ERROR("mixer window[%d] is wrong\n", win);
- return;
- }
-
- win_data = &mixer_ctx->win_data[win];
-
- win_data->dma_addr = plane->dma_addr[0];
- win_data->chroma_dma_addr = plane->dma_addr[1];
- win_data->pixel_format = plane->pixel_format;
- win_data->bpp = plane->bpp;
-
- win_data->crtc_x = plane->crtc_x;
- win_data->crtc_y = plane->crtc_y;
- win_data->crtc_width = plane->crtc_width;
- win_data->crtc_height = plane->crtc_height;
-
- win_data->fb_x = plane->fb_x;
- win_data->fb_y = plane->fb_y;
- win_data->fb_width = plane->fb_width;
- win_data->fb_height = plane->fb_height;
- win_data->src_width = plane->src_width;
- win_data->src_height = plane->src_height;
-
- win_data->mode_width = plane->mode_width;
- win_data->mode_height = plane->mode_height;
-
- win_data->scan_flags = plane->scan_flag;
-}
-
-static void mixer_win_commit(struct exynos_drm_crtc *crtc, int zpos)
+static void mixer_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
{
struct mixer_context *mixer_ctx = crtc->ctx;
- int win = zpos == DEFAULT_ZPOS ? MIXER_DEFAULT_WIN : zpos;
DRM_DEBUG_KMS("win: %d\n", win);
@@ -989,14 +935,13 @@ static void mixer_win_commit(struct exynos_drm_crtc *crtc, int zpos)
else
mixer_graph_buffer(mixer_ctx, win);
- mixer_ctx->win_data[win].enabled = true;
+ mixer_ctx->planes[win].enabled = true;
}
-static void mixer_win_disable(struct exynos_drm_crtc *crtc, int zpos)
+static void mixer_win_disable(struct exynos_drm_crtc *crtc, unsigned int win)
{
struct mixer_context *mixer_ctx = crtc->ctx;
struct mixer_resources *res = &mixer_ctx->mixer_res;
- int win = zpos == DEFAULT_ZPOS ? MIXER_DEFAULT_WIN : zpos;
unsigned long flags;
DRM_DEBUG_KMS("win: %d\n", win);
@@ -1004,7 +949,7 @@ static void mixer_win_disable(struct exynos_drm_crtc *crtc, int zpos)
mutex_lock(&mixer_ctx->mixer_mutex);
if (!mixer_ctx->powered) {
mutex_unlock(&mixer_ctx->mixer_mutex);
- mixer_ctx->win_data[win].resume = false;
+ mixer_ctx->planes[win].resume = false;
return;
}
mutex_unlock(&mixer_ctx->mixer_mutex);
@@ -1017,7 +962,7 @@ static void mixer_win_disable(struct exynos_drm_crtc *crtc, int zpos)
mixer_vsync_set_update(mixer_ctx, true);
spin_unlock_irqrestore(&res->reg_slock, flags);
- mixer_ctx->win_data[win].enabled = false;
+ mixer_ctx->planes[win].enabled = false;
}
static void mixer_wait_for_vblank(struct exynos_drm_crtc *crtc)
@@ -1054,12 +999,12 @@ static void mixer_wait_for_vblank(struct exynos_drm_crtc *crtc)
static void mixer_window_suspend(struct mixer_context *ctx)
{
- struct hdmi_win_data *win_data;
+ struct exynos_drm_plane *plane;
int i;
for (i = 0; i < MIXER_WIN_NR; i++) {
- win_data = &ctx->win_data[i];
- win_data->resume = win_data->enabled;
+ plane = &ctx->planes[i];
+ plane->resume = plane->enabled;
mixer_win_disable(ctx->crtc, i);
}
mixer_wait_for_vblank(ctx->crtc);
@@ -1067,14 +1012,14 @@ static void mixer_window_suspend(struct mixer_context *ctx)
static void mixer_window_resume(struct mixer_context *ctx)
{
- struct hdmi_win_data *win_data;
+ struct exynos_drm_plane *plane;
int i;
for (i = 0; i < MIXER_WIN_NR; i++) {
- win_data = &ctx->win_data[i];
- win_data->enabled = win_data->resume;
- win_data->resume = false;
- if (win_data->enabled)
+ plane = &ctx->planes[i];
+ plane->enabled = plane->resume;
+ plane->resume = false;
+ if (plane->enabled)
mixer_win_commit(ctx->crtc, i);
}
}
@@ -1186,7 +1131,6 @@ static struct exynos_drm_crtc_ops mixer_crtc_ops = {
.enable_vblank = mixer_enable_vblank,
.disable_vblank = mixer_disable_vblank,
.wait_for_vblank = mixer_wait_for_vblank,
- .win_mode_set = mixer_win_mode_set,
.win_commit = mixer_win_commit,
.win_disable = mixer_win_disable,
};
@@ -1250,15 +1194,28 @@ static int mixer_bind(struct device *dev, struct device *manager, void *data)
{
struct mixer_context *ctx = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
+ struct exynos_drm_plane *exynos_plane;
+ enum drm_plane_type type;
+ unsigned int zpos;
int ret;
ret = mixer_initialize(ctx, drm_dev);
if (ret)
return ret;
- ctx->crtc = exynos_drm_crtc_create(drm_dev, ctx->pipe,
- EXYNOS_DISPLAY_TYPE_HDMI,
- &mixer_crtc_ops, ctx);
+ for (zpos = 0; zpos < MIXER_WIN_NR; zpos++) {
+ type = (zpos == MIXER_DEFAULT_WIN) ? DRM_PLANE_TYPE_PRIMARY :
+ DRM_PLANE_TYPE_OVERLAY;
+ ret = exynos_plane_init(drm_dev, &ctx->planes[zpos],
+ 1 << ctx->pipe, type, zpos);
+ if (ret)
+ return ret;
+ }
+
+ exynos_plane = &ctx->planes[MIXER_DEFAULT_WIN];
+ ctx->crtc = exynos_drm_crtc_create(drm_dev, &exynos_plane->base,
+ ctx->pipe, EXYNOS_DISPLAY_TYPE_HDMI,
+ &mixer_crtc_ops, ctx);
if (IS_ERR(ctx->crtc)) {
mixer_ctx_remove(ctx);
ret = PTR_ERR(ctx->crtc);
diff --git a/drivers/gpu/drm/exynos/regs-mixer.h b/drivers/gpu/drm/exynos/regs-mixer.h
index 5f32e1a29411..ac60260c2389 100644
--- a/drivers/gpu/drm/exynos/regs-mixer.h
+++ b/drivers/gpu/drm/exynos/regs-mixer.h
@@ -101,7 +101,7 @@
#define MXR_CFG_GRP0_ENABLE (1 << 4)
#define MXR_CFG_VP_ENABLE (1 << 3)
#define MXR_CFG_SCAN_INTERLACE (0 << 2)
-#define MXR_CFG_SCAN_PROGRASSIVE (1 << 2)
+#define MXR_CFG_SCAN_PROGRESSIVE (1 << 2)
#define MXR_CFG_SCAN_NTSC (0 << 1)
#define MXR_CFG_SCAN_PAL (1 << 1)
#define MXR_CFG_SCAN_SD (0 << 0)
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index d3ebaf204408..a69002e2257d 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -28,6 +28,7 @@ i915-y += i915_cmd_parser.o \
i915_gem_execbuffer.o \
i915_gem_gtt.o \
i915_gem.o \
+ i915_gem_shrinker.o \
i915_gem_stolen.o \
i915_gem_tiling.o \
i915_gem_userptr.o \
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 1a52d6ab0f80..007c7d7d8295 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1200,6 +1200,9 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "Max overclocked frequency: %dMHz\n",
intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
+
+ seq_printf(m, "Idle freq: %d MHz\n",
+ intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
} else if (IS_VALLEYVIEW(dev)) {
u32 freq_sts;
@@ -1214,6 +1217,9 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "min GPU freq: %d MHz\n",
intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));
+ seq_printf(m, "idle GPU freq: %d MHz\n",
+ intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
+
seq_printf(m,
"efficient (RPe) frequency: %d MHz\n",
intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
@@ -2308,7 +2314,7 @@ static int i915_sink_crc(struct seq_file *m, void *data)
u8 crc[6];
drm_modeset_lock_all(dev);
- for_each_intel_encoder(dev, connector) {
+ for_each_intel_connector(dev, connector) {
if (connector->base.dpms != DRM_MODE_DPMS_ON)
continue;
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index d49ed68f041e..68e0c85a17cf 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1199,7 +1199,7 @@ const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 8ba7e1b7b733..e326ac9730cf 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -56,7 +56,7 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20150313"
+#define DRIVER_DATE "20150327"
#undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */
@@ -427,6 +427,8 @@ struct drm_i915_error_state {
u32 forcewake;
u32 error; /* gen6+ */
u32 err_int; /* gen7 */
+ u32 fault_data0; /* gen8, gen9 */
+ u32 fault_data1; /* gen8, gen9 */
u32 done_reg;
u32 gac_eco;
u32 gam_ecochk;
@@ -544,7 +546,7 @@ struct drm_i915_display_funcs {
* Returns true on success, false on failure.
*/
bool (*find_dpll)(const struct intel_limit *limit,
- struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
int target, int refclk,
struct dpll *match_clock,
struct dpll *best_clock);
@@ -553,7 +555,7 @@ struct drm_i915_display_funcs {
struct drm_crtc *crtc,
uint32_t sprite_width, uint32_t sprite_height,
int pixel_size, bool enable, bool scaled);
- void (*modeset_global_resources)(struct drm_device *dev);
+ void (*modeset_global_resources)(struct drm_atomic_state *state);
/* Returns the active state of the crtc, and if the crtc is active,
* fills out the pipe-config with the hw state. */
bool (*get_pipe_config)(struct intel_crtc *,
@@ -1025,13 +1027,12 @@ struct intel_gen6_power_mgmt {
u8 max_freq_softlimit; /* Max frequency permitted by the driver */
u8 max_freq; /* Maximum frequency, RP0 if not overclocking */
u8 min_freq; /* AKA RPn. Minimum frequency */
+ u8 idle_freq; /* Frequency to request when we are idle */
u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */
u8 rp1_freq; /* "less than" RP0 power/freqency */
u8 rp0_freq; /* Non-overclocked max frequency. */
u32 cz_freq;
- u32 ei_interrupt_count;
-
int last_adj;
enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
@@ -2442,7 +2443,6 @@ extern int i915_resume_legacy(struct drm_device *dev);
struct i915_params {
int modeset;
int panel_ignore_lid;
- unsigned int powersave;
int semaphores;
unsigned int lvds_downclock;
int lvds_channel_mode;
@@ -2462,6 +2462,7 @@ struct i915_params {
bool enable_hangcheck;
bool fastboot;
bool prefault_disable;
+ bool load_detect_test;
bool reset;
bool disable_display;
bool disable_vtd_wa;
@@ -2601,12 +2602,6 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void i915_gem_load(struct drm_device *dev);
-unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
- long target,
- unsigned flags);
-#define I915_SHRINK_PURGEABLE 0x1
-#define I915_SHRINK_UNBOUND 0x2
-#define I915_SHRINK_BOUND 0x4
void *i915_gem_object_alloc(struct drm_device *dev);
void i915_gem_object_free(struct drm_i915_gem_object *obj);
void i915_gem_object_init(struct drm_i915_gem_object *obj,
@@ -2623,20 +2618,16 @@ void i915_gem_vma_destroy(struct i915_vma *vma);
#define PIN_GLOBAL 0x4
#define PIN_OFFSET_BIAS 0x8
#define PIN_OFFSET_MASK (~4095)
-int __must_check i915_gem_object_pin_view(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- uint32_t alignment,
- uint64_t flags,
- const struct i915_ggtt_view *view);
-static inline
-int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- uint32_t alignment,
- uint64_t flags)
-{
- return i915_gem_object_pin_view(obj, vm, alignment, flags,
- &i915_ggtt_view_normal);
-}
+int __must_check
+i915_gem_object_pin(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ uint32_t alignment,
+ uint64_t flags);
+int __must_check
+i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
+ const struct i915_ggtt_view *view,
+ uint32_t alignment,
+ uint64_t flags);
int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
u32 flags);
@@ -2776,8 +2767,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
int __must_check
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment,
- struct intel_engine_cs *pipelined);
-void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj);
+ struct intel_engine_cs *pipelined,
+ const struct i915_ggtt_view *view);
+void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
+ const struct i915_ggtt_view *view);
int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
int align);
int i915_gem_open(struct drm_device *dev, struct drm_file *file);
@@ -2800,60 +2793,46 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
void i915_gem_restore_fences(struct drm_device *dev);
-unsigned long i915_gem_obj_offset_view(struct drm_i915_gem_object *o,
- struct i915_address_space *vm,
- enum i915_ggtt_view_type view);
-static inline
-unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
- struct i915_address_space *vm)
+unsigned long
+i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
+ const struct i915_ggtt_view *view);
+unsigned long
+i915_gem_obj_offset(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm);
+static inline unsigned long
+i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o)
{
- return i915_gem_obj_offset_view(o, vm, I915_GGTT_VIEW_NORMAL);
+ return i915_gem_obj_ggtt_offset_view(o, &i915_ggtt_view_normal);
}
+
bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o);
-bool i915_gem_obj_bound_view(struct drm_i915_gem_object *o,
- struct i915_address_space *vm,
- enum i915_ggtt_view_type view);
-static inline
+bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
+ const struct i915_ggtt_view *view);
bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
- struct i915_address_space *vm)
-{
- return i915_gem_obj_bound_view(o, vm, I915_GGTT_VIEW_NORMAL);
-}
+ struct i915_address_space *vm);
unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
struct i915_address_space *vm);
-struct i915_vma *i915_gem_obj_to_vma_view(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *view);
-static inline
-struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm)
-{
- return i915_gem_obj_to_vma_view(obj, vm, &i915_ggtt_view_normal);
-}
-
struct i915_vma *
-i915_gem_obj_lookup_or_create_vma_view(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *view);
+i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm);
+struct i915_vma *
+i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
+ const struct i915_ggtt_view *view);
-static inline
struct i915_vma *
i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm)
-{
- return i915_gem_obj_lookup_or_create_vma_view(obj, vm,
- &i915_ggtt_view_normal);
-}
+ struct i915_address_space *vm);
+struct i915_vma *
+i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
+ const struct i915_ggtt_view *view);
-struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj);
-static inline bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) {
- struct i915_vma *vma;
- list_for_each_entry(vma, &obj->vma_list, vma_link)
- if (vma->pin_count > 0)
- return true;
- return false;
+static inline struct i915_vma *
+i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
+{
+ return i915_gem_obj_to_ggtt_view(obj, &i915_ggtt_view_normal);
}
+bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj);
/* Some GGTT VM helpers */
#define i915_obj_to_ggtt(obj) \
@@ -2876,13 +2855,7 @@ i915_vm_to_ppgtt(struct i915_address_space *vm)
static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
{
- return i915_gem_obj_bound(obj, i915_obj_to_ggtt(obj));
-}
-
-static inline unsigned long
-i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *obj)
-{
- return i915_gem_obj_offset(obj, i915_obj_to_ggtt(obj));
+ return i915_gem_obj_ggtt_bound_view(obj, &i915_ggtt_view_normal);
}
static inline unsigned long
@@ -2906,7 +2879,13 @@ i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
return i915_vma_unbind(i915_gem_obj_to_ggtt(obj));
}
-void i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj);
+void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
+ const struct i915_ggtt_view *view);
+static inline void
+i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
+{
+ i915_gem_object_ggtt_unpin_view(obj, &i915_ggtt_view_normal);
+}
/* i915_gem_context.c */
int __must_check i915_gem_context_init(struct drm_device *dev);
@@ -2978,6 +2957,17 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
u32 gtt_offset,
u32 size);
+/* i915_gem_shrinker.c */
+unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
+ long target,
+ unsigned flags);
+#define I915_SHRINK_PURGEABLE 0x1
+#define I915_SHRINK_UNBOUND 0x2
+#define I915_SHRINK_BOUND 0x4
+unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
+void i915_gem_shrinker_init(struct drm_i915_private *dev_priv);
+
+
/* i915_gem_tiling.c */
static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
{
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 0fe313d0f609..d07c0b1fb498 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1,5 +1,5 @@
/*
- * Copyright © 2008 Intel Corporation
+ * Copyright © 2008-2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -32,7 +32,6 @@
#include "i915_vgpu.h"
#include "i915_trace.h"
#include "intel_drv.h"
-#include <linux/oom.h>
#include <linux/shmem_fs.h>
#include <linux/slab.h>
#include <linux/swap.h>
@@ -53,15 +52,6 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
struct drm_i915_fence_reg *fence,
bool enable);
-static unsigned long i915_gem_shrinker_count(struct shrinker *shrinker,
- struct shrink_control *sc);
-static unsigned long i915_gem_shrinker_scan(struct shrinker *shrinker,
- struct shrink_control *sc);
-static int i915_gem_shrinker_oom(struct notifier_block *nb,
- unsigned long event,
- void *ptr);
-static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
-
static bool cpu_cache_is_coherent(struct drm_device *dev,
enum i915_cache_level level)
{
@@ -1936,12 +1926,6 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
}
-static inline int
-i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
-{
- return obj->madv == I915_MADV_DONTNEED;
-}
-
/* Immediately discard the backing storage */
static void
i915_gem_object_truncate(struct drm_i915_gem_object *obj)
@@ -2047,85 +2031,6 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
return 0;
}
-unsigned long
-i915_gem_shrink(struct drm_i915_private *dev_priv,
- long target, unsigned flags)
-{
- const struct {
- struct list_head *list;
- unsigned int bit;
- } phases[] = {
- { &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND },
- { &dev_priv->mm.bound_list, I915_SHRINK_BOUND },
- { NULL, 0 },
- }, *phase;
- unsigned long count = 0;
-
- /*
- * As we may completely rewrite the (un)bound list whilst unbinding
- * (due to retiring requests) we have to strictly process only
- * one element of the list at the time, and recheck the list
- * on every iteration.
- *
- * In particular, we must hold a reference whilst removing the
- * object as we may end up waiting for and/or retiring the objects.
- * This might release the final reference (held by the active list)
- * and result in the object being freed from under us. This is
- * similar to the precautions the eviction code must take whilst
- * removing objects.
- *
- * Also note that although these lists do not hold a reference to
- * the object we can safely grab one here: The final object
- * unreferencing and the bound_list are both protected by the
- * dev->struct_mutex and so we won't ever be able to observe an
- * object on the bound_list with a reference count equals 0.
- */
- for (phase = phases; phase->list; phase++) {
- struct list_head still_in_list;
-
- if ((flags & phase->bit) == 0)
- continue;
-
- INIT_LIST_HEAD(&still_in_list);
- while (count < target && !list_empty(phase->list)) {
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma, *v;
-
- obj = list_first_entry(phase->list,
- typeof(*obj), global_list);
- list_move_tail(&obj->global_list, &still_in_list);
-
- if (flags & I915_SHRINK_PURGEABLE &&
- !i915_gem_object_is_purgeable(obj))
- continue;
-
- drm_gem_object_reference(&obj->base);
-
- /* For the unbound phase, this should be a no-op! */
- list_for_each_entry_safe(vma, v,
- &obj->vma_list, vma_link)
- if (i915_vma_unbind(vma))
- break;
-
- if (i915_gem_object_put_pages(obj) == 0)
- count += obj->base.size >> PAGE_SHIFT;
-
- drm_gem_object_unreference(&obj->base);
- }
- list_splice(&still_in_list, phase->list);
- }
-
- return count;
-}
-
-static unsigned long
-i915_gem_shrink_all(struct drm_i915_private *dev_priv)
-{
- i915_gem_evict_everything(dev_priv->dev);
- return i915_gem_shrink(dev_priv, LONG_MAX,
- I915_SHRINK_BOUND | I915_SHRINK_UNBOUND);
-}
-
static int
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
{
@@ -2755,24 +2660,11 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
WARN_ON(i915_verify_lists(ring->dev));
- /* Move any buffers on the active list that are no longer referenced
- * by the ringbuffer to the flushing/inactive lists as appropriate,
- * before we free the context associated with the requests.
+ /* Retire requests first as we use it above for the early return.
+ * If we retire requests last, we may use a later seqno and so clear
+ * the requests lists without clearing the active list, leading to
+ * confusion.
*/
- while (!list_empty(&ring->active_list)) {
- struct drm_i915_gem_object *obj;
-
- obj = list_first_entry(&ring->active_list,
- struct drm_i915_gem_object,
- ring_list);
-
- if (!i915_gem_request_completed(obj->last_read_req, true))
- break;
-
- i915_gem_object_move_to_inactive(obj);
- }
-
-
while (!list_empty(&ring->request_list)) {
struct drm_i915_gem_request *request;
@@ -2795,6 +2687,23 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
i915_gem_free_request(request);
}
+ /* Move any buffers on the active list that are no longer referenced
+ * by the ringbuffer to the flushing/inactive lists as appropriate,
+ * before we free the context associated with the requests.
+ */
+ while (!list_empty(&ring->active_list)) {
+ struct drm_i915_gem_object *obj;
+
+ obj = list_first_entry(&ring->active_list,
+ struct drm_i915_gem_object,
+ ring_list);
+
+ if (!i915_gem_request_completed(obj->last_read_req, true))
+ break;
+
+ i915_gem_object_move_to_inactive(obj);
+ }
+
if (unlikely(ring->trace_irq_req &&
i915_gem_request_completed(ring->trace_irq_req, true))) {
ring->irq_put(ring);
@@ -3518,9 +3427,9 @@ static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
static struct i915_vma *
i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
+ const struct i915_ggtt_view *ggtt_view,
unsigned alignment,
- uint64_t flags,
- const struct i915_ggtt_view *view)
+ uint64_t flags)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3532,6 +3441,9 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
struct i915_vma *vma;
int ret;
+ if(WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
+ return ERR_PTR(-EINVAL);
+
fence_size = i915_gem_get_gtt_size(dev,
obj->base.size,
obj->tiling_mode);
@@ -3570,7 +3482,9 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
i915_gem_object_pin_pages(obj);
- vma = i915_gem_obj_lookup_or_create_vma_view(obj, vm, view);
+ vma = ggtt_view ? i915_gem_obj_lookup_or_create_ggtt_vma(obj, ggtt_view) :
+ i915_gem_obj_lookup_or_create_vma(obj, vm);
+
if (IS_ERR(vma))
goto err_unpin;
@@ -3600,6 +3514,17 @@ search_free:
if (ret)
goto err_remove_node;
+ /* allocate before insert / bind */
+ if (vma->vm->allocate_va_range) {
+ trace_i915_va_alloc(vma->vm, vma->node.start, vma->node.size,
+ VM_TO_TRACE_NAME(vma->vm));
+ ret = vma->vm->allocate_va_range(vma->vm,
+ vma->node.start,
+ vma->node.size);
+ if (ret)
+ goto err_remove_node;
+ }
+
trace_i915_vma_bind(vma, flags);
ret = i915_vma_bind(vma, obj->cache_level,
flags & PIN_GLOBAL ? GLOBAL_BIND : 0);
@@ -3952,7 +3877,8 @@ static bool is_pin_display(struct drm_i915_gem_object *obj)
int
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment,
- struct intel_engine_cs *pipelined)
+ struct intel_engine_cs *pipelined,
+ const struct i915_ggtt_view *view)
{
u32 old_read_domains, old_write_domain;
bool was_pin_display;
@@ -3988,7 +3914,9 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
* (e.g. libkms for the bootup splash), we have to ensure that we
* always use map_and_fenceable for all scanout buffers.
*/
- ret = i915_gem_obj_ggtt_pin(obj, alignment, PIN_MAPPABLE);
+ ret = i915_gem_object_ggtt_pin(obj, view, alignment,
+ view->type == I915_GGTT_VIEW_NORMAL ?
+ PIN_MAPPABLE : 0);
if (ret)
goto err_unpin_display;
@@ -4016,9 +3944,11 @@ err_unpin_display:
}
void
-i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
+i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
+ const struct i915_ggtt_view *view)
{
- i915_gem_object_ggtt_unpin(obj);
+ i915_gem_object_ggtt_unpin_view(obj, view);
+
obj->pin_display = is_pin_display(obj);
}
@@ -4167,12 +4097,12 @@ i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
return false;
}
-int
-i915_gem_object_pin_view(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- uint32_t alignment,
- uint64_t flags,
- const struct i915_ggtt_view *view)
+static int
+i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *ggtt_view,
+ uint32_t alignment,
+ uint64_t flags)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
struct i915_vma *vma;
@@ -4188,17 +4118,29 @@ i915_gem_object_pin_view(struct drm_i915_gem_object *obj,
if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE))
return -EINVAL;
- vma = i915_gem_obj_to_vma_view(obj, vm, view);
+ if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
+ return -EINVAL;
+
+ vma = ggtt_view ? i915_gem_obj_to_ggtt_view(obj, ggtt_view) :
+ i915_gem_obj_to_vma(obj, vm);
+
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
if (vma) {
if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
return -EBUSY;
if (i915_vma_misplaced(vma, alignment, flags)) {
+ unsigned long offset;
+ offset = ggtt_view ? i915_gem_obj_ggtt_offset_view(obj, ggtt_view) :
+ i915_gem_obj_offset(obj, vm);
WARN(vma->pin_count,
- "bo is already pinned with incorrect alignment:"
+ "bo is already pinned in %s with incorrect alignment:"
" offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
" obj->map_and_fenceable=%d\n",
- i915_gem_obj_offset_view(obj, vm, view->type),
+ ggtt_view ? "ggtt" : "ppgtt",
+ offset,
alignment,
!!(flags & PIN_MAPPABLE),
obj->map_and_fenceable);
@@ -4212,8 +4154,12 @@ i915_gem_object_pin_view(struct drm_i915_gem_object *obj,
bound = vma ? vma->bound : 0;
if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
- vma = i915_gem_object_bind_to_vm(obj, vm, alignment,
- flags, view);
+ /* In true PPGTT, bind has possibly changed PDEs, which
+ * means we must do a context switch before the GPU can
+ * accurately read some of the VMAs.
+ */
+ vma = i915_gem_object_bind_to_vm(obj, vm, ggtt_view, alignment,
+ flags);
if (IS_ERR(vma))
return PTR_ERR(vma);
}
@@ -4254,16 +4200,41 @@ i915_gem_object_pin_view(struct drm_i915_gem_object *obj,
return 0;
}
+int
+i915_gem_object_pin(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ uint32_t alignment,
+ uint64_t flags)
+{
+ return i915_gem_object_do_pin(obj, vm,
+ i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL,
+ alignment, flags);
+}
+
+int
+i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
+ const struct i915_ggtt_view *view,
+ uint32_t alignment,
+ uint64_t flags)
+{
+ if (WARN_ONCE(!view, "no view specified"))
+ return -EINVAL;
+
+ return i915_gem_object_do_pin(obj, i915_obj_to_ggtt(obj), view,
+ alignment, flags | PIN_GLOBAL);
+}
+
void
-i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
+i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
+ const struct i915_ggtt_view *view)
{
- struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
+ struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view);
BUG_ON(!vma);
- BUG_ON(vma->pin_count == 0);
- BUG_ON(!i915_gem_obj_ggtt_bound(obj));
+ WARN_ON(vma->pin_count == 0);
+ WARN_ON(!i915_gem_obj_ggtt_bound_view(obj, view));
- if (--vma->pin_count == 0)
+ if (--vma->pin_count == 0 && view->type == I915_GGTT_VIEW_NORMAL)
obj->pin_mappable = false;
}
@@ -4384,7 +4355,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
obj->madv = args->madv;
/* if the object is no longer attached, discard its backing storage */
- if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
+ if (obj->madv == I915_MADV_DONTNEED && obj->pages == NULL)
i915_gem_object_truncate(obj);
args->retained = obj->madv != __I915_MADV_PURGED;
@@ -4559,15 +4530,33 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
intel_runtime_pm_put(dev_priv);
}
-struct i915_vma *i915_gem_obj_to_vma_view(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *view)
+struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm)
{
struct i915_vma *vma;
- list_for_each_entry(vma, &obj->vma_list, vma_link)
- if (vma->vm == vm && vma->ggtt_view.type == view->type)
+ list_for_each_entry(vma, &obj->vma_list, vma_link) {
+ if (i915_is_ggtt(vma->vm) &&
+ vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
+ continue;
+ if (vma->vm == vm)
return vma;
+ }
+ return NULL;
+}
+struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
+ const struct i915_ggtt_view *view)
+{
+ struct i915_address_space *ggtt = i915_obj_to_ggtt(obj);
+ struct i915_vma *vma;
+
+ if (WARN_ONCE(!view, "no view specified"))
+ return ERR_PTR(-EINVAL);
+
+ list_for_each_entry(vma, &obj->vma_list, vma_link)
+ if (vma->vm == ggtt &&
+ i915_ggtt_view_equal(&vma->ggtt_view, view))
+ return vma;
return NULL;
}
@@ -5006,13 +4995,7 @@ i915_gem_load(struct drm_device *dev)
dev_priv->mm.interruptible = true;
- dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
- dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
- dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
- register_shrinker(&dev_priv->mm.shrinker);
-
- dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
- register_oom_notifier(&dev_priv->mm.oom_notifier);
+ i915_gem_shrinker_init(dev_priv);
i915_gem_batch_pool_init(dev, &dev_priv->mm.batch_pool);
@@ -5104,106 +5087,70 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
}
}
-static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
-{
- if (!mutex_is_locked(mutex))
- return false;
-
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
- return mutex->owner == task;
-#else
- /* Since UP may be pre-empted, we cannot assume that we own the lock */
- return false;
-#endif
-}
-
-static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
+/* All the new VM stuff */
+unsigned long
+i915_gem_obj_offset(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm)
{
- if (!mutex_trylock(&dev->struct_mutex)) {
- if (!mutex_is_locked_by(&dev->struct_mutex, current))
- return false;
+ struct drm_i915_private *dev_priv = o->base.dev->dev_private;
+ struct i915_vma *vma;
- if (to_i915(dev)->mm.shrinker_no_lock_stealing)
- return false;
+ WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
- *unlock = false;
- } else
- *unlock = true;
+ list_for_each_entry(vma, &o->vma_list, vma_link) {
+ if (i915_is_ggtt(vma->vm) &&
+ vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
+ continue;
+ if (vma->vm == vm)
+ return vma->node.start;
+ }
- return true;
+ WARN(1, "%s vma for this object not found.\n",
+ i915_is_ggtt(vm) ? "global" : "ppgtt");
+ return -1;
}
-static int num_vma_bound(struct drm_i915_gem_object *obj)
+unsigned long
+i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
+ const struct i915_ggtt_view *view)
{
+ struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
struct i915_vma *vma;
- int count = 0;
-
- list_for_each_entry(vma, &obj->vma_list, vma_link)
- if (drm_mm_node_allocated(&vma->node))
- count++;
- return count;
-}
-
-static unsigned long
-i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
-{
- struct drm_i915_private *dev_priv =
- container_of(shrinker, struct drm_i915_private, mm.shrinker);
- struct drm_device *dev = dev_priv->dev;
- struct drm_i915_gem_object *obj;
- unsigned long count;
- bool unlock;
-
- if (!i915_gem_shrinker_lock(dev, &unlock))
- return 0;
-
- count = 0;
- list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
- if (obj->pages_pin_count == 0)
- count += obj->base.size >> PAGE_SHIFT;
-
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
- if (!i915_gem_obj_is_pinned(obj) &&
- obj->pages_pin_count == num_vma_bound(obj))
- count += obj->base.size >> PAGE_SHIFT;
- }
-
- if (unlock)
- mutex_unlock(&dev->struct_mutex);
+ list_for_each_entry(vma, &o->vma_list, vma_link)
+ if (vma->vm == ggtt &&
+ i915_ggtt_view_equal(&vma->ggtt_view, view))
+ return vma->node.start;
- return count;
+ WARN(1, "global vma for this object not found.\n");
+ return -1;
}
-/* All the new VM stuff */
-unsigned long i915_gem_obj_offset_view(struct drm_i915_gem_object *o,
- struct i915_address_space *vm,
- enum i915_ggtt_view_type view)
+bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm)
{
- struct drm_i915_private *dev_priv = o->base.dev->dev_private;
struct i915_vma *vma;
- WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
-
list_for_each_entry(vma, &o->vma_list, vma_link) {
- if (vma->vm == vm && vma->ggtt_view.type == view)
- return vma->node.start;
-
+ if (i915_is_ggtt(vma->vm) &&
+ vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
+ continue;
+ if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
+ return true;
}
- WARN(1, "%s vma for this object not found.\n",
- i915_is_ggtt(vm) ? "global" : "ppgtt");
- return -1;
+
+ return false;
}
-bool i915_gem_obj_bound_view(struct drm_i915_gem_object *o,
- struct i915_address_space *vm,
- enum i915_ggtt_view_type view)
+bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
+ const struct i915_ggtt_view *view)
{
+ struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
struct i915_vma *vma;
list_for_each_entry(vma, &o->vma_list, vma_link)
- if (vma->vm == vm &&
- vma->ggtt_view.type == view &&
+ if (vma->vm == ggtt &&
+ i915_ggtt_view_equal(&vma->ggtt_view, view) &&
drm_mm_node_allocated(&vma->node))
return true;
@@ -5231,118 +5178,26 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
BUG_ON(list_empty(&o->vma_list));
- list_for_each_entry(vma, &o->vma_list, vma_link)
+ list_for_each_entry(vma, &o->vma_list, vma_link) {
+ if (i915_is_ggtt(vma->vm) &&
+ vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
+ continue;
if (vma->vm == vm)
return vma->node.size;
-
+ }
return 0;
}
-static unsigned long
-i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
+bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
{
- struct drm_i915_private *dev_priv =
- container_of(shrinker, struct drm_i915_private, mm.shrinker);
- struct drm_device *dev = dev_priv->dev;
- unsigned long freed;
- bool unlock;
-
- if (!i915_gem_shrinker_lock(dev, &unlock))
- return SHRINK_STOP;
-
- freed = i915_gem_shrink(dev_priv,
- sc->nr_to_scan,
- I915_SHRINK_BOUND |
- I915_SHRINK_UNBOUND |
- I915_SHRINK_PURGEABLE);
- if (freed < sc->nr_to_scan)
- freed += i915_gem_shrink(dev_priv,
- sc->nr_to_scan - freed,
- I915_SHRINK_BOUND |
- I915_SHRINK_UNBOUND);
- if (unlock)
- mutex_unlock(&dev->struct_mutex);
-
- return freed;
-}
-
-static int
-i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
-{
- struct drm_i915_private *dev_priv =
- container_of(nb, struct drm_i915_private, mm.oom_notifier);
- struct drm_device *dev = dev_priv->dev;
- struct drm_i915_gem_object *obj;
- unsigned long timeout = msecs_to_jiffies(5000) + 1;
- unsigned long pinned, bound, unbound, freed_pages;
- bool was_interruptible;
- bool unlock;
-
- while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout) {
- schedule_timeout_killable(1);
- if (fatal_signal_pending(current))
- return NOTIFY_DONE;
- }
- if (timeout == 0) {
- pr_err("Unable to purge GPU memory due lock contention.\n");
- return NOTIFY_DONE;
- }
-
- was_interruptible = dev_priv->mm.interruptible;
- dev_priv->mm.interruptible = false;
-
- freed_pages = i915_gem_shrink_all(dev_priv);
-
- dev_priv->mm.interruptible = was_interruptible;
-
- /* Because we may be allocating inside our own driver, we cannot
- * assert that there are no objects with pinned pages that are not
- * being pointed to by hardware.
- */
- unbound = bound = pinned = 0;
- list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
- if (!obj->base.filp) /* not backed by a freeable object */
- continue;
-
- if (obj->pages_pin_count)
- pinned += obj->base.size;
- else
- unbound += obj->base.size;
- }
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
- if (!obj->base.filp)
+ struct i915_vma *vma;
+ list_for_each_entry(vma, &obj->vma_list, vma_link) {
+ if (i915_is_ggtt(vma->vm) &&
+ vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
continue;
-
- if (obj->pages_pin_count)
- pinned += obj->base.size;
- else
- bound += obj->base.size;
+ if (vma->pin_count > 0)
+ return true;
}
-
- if (unlock)
- mutex_unlock(&dev->struct_mutex);
-
- if (freed_pages || unbound || bound)
- pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n",
- freed_pages << PAGE_SHIFT, pinned);
- if (unbound || bound)
- pr_err("%lu and %lu bytes still available in the "
- "bound and unbound GPU page lists.\n",
- bound, unbound);
-
- *(unsigned long *)ptr += freed_pages;
- return NOTIFY_DONE;
+ return false;
}
-struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
-{
- struct i915_address_space *ggtt = i915_obj_to_ggtt(obj);
- struct i915_vma *vma;
-
- list_for_each_entry(vma, &obj->vma_list, vma_link)
- if (vma->vm == ggtt &&
- vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
- return vma;
-
- return NULL;
-}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 70346b0028f9..f3e84c44d009 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -569,6 +569,66 @@ mi_set_context(struct intel_engine_cs *ring,
return ret;
}
+static inline bool should_skip_switch(struct intel_engine_cs *ring,
+ struct intel_context *from,
+ struct intel_context *to)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
+ if (to->remap_slice)
+ return false;
+
+ if (to->ppgtt) {
+ if (from == to && !test_bit(ring->id,
+ &to->ppgtt->pd_dirty_rings))
+ return true;
+ } else if (dev_priv->mm.aliasing_ppgtt) {
+ if (from == to && !test_bit(ring->id,
+ &dev_priv->mm.aliasing_ppgtt->pd_dirty_rings))
+ return true;
+ }
+
+ return false;
+}
+
+static bool
+needs_pd_load_pre(struct intel_engine_cs *ring, struct intel_context *to)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
+ if (!to->ppgtt)
+ return false;
+
+ if (INTEL_INFO(ring->dev)->gen < 8)
+ return true;
+
+ if (ring != &dev_priv->ring[RCS])
+ return true;
+
+ return false;
+}
+
+static bool
+needs_pd_load_post(struct intel_engine_cs *ring, struct intel_context *to,
+ u32 hw_flags)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
+ if (!to->ppgtt)
+ return false;
+
+ if (!IS_GEN8(ring->dev))
+ return false;
+
+ if (ring != &dev_priv->ring[RCS])
+ return false;
+
+ if (hw_flags & MI_RESTORE_INHIBIT)
+ return true;
+
+ return false;
+}
+
static int do_switch(struct intel_engine_cs *ring,
struct intel_context *to)
{
@@ -584,7 +644,7 @@ static int do_switch(struct intel_engine_cs *ring,
BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state));
}
- if (from == to && !to->remap_slice)
+ if (should_skip_switch(ring, from, to))
return 0;
/* Trying to pin first makes error handling easier. */
@@ -602,11 +662,18 @@ static int do_switch(struct intel_engine_cs *ring,
*/
from = ring->last_context;
- if (to->ppgtt) {
+ if (needs_pd_load_pre(ring, to)) {
+ /* Older GENs and non render rings still want the load first,
+ * "PP_DCLV followed by PP_DIR_BASE register through Load
+ * Register Immediate commands in Ring Buffer before submitting
+ * a context."*/
trace_switch_mm(ring, to);
ret = to->ppgtt->switch_mm(to->ppgtt, ring);
if (ret)
goto unpin_out;
+
+ /* Doing a PD load always reloads the page dirs */
+ clear_bit(ring->id, &to->ppgtt->pd_dirty_rings);
}
if (ring != &dev_priv->ring[RCS]) {
@@ -637,13 +704,41 @@ static int do_switch(struct intel_engine_cs *ring,
goto unpin_out;
}
- if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to))
+ if (!to->legacy_hw_ctx.initialized) {
hw_flags |= MI_RESTORE_INHIBIT;
+ /* NB: If we inhibit the restore, the context is not allowed to
+ * die because future work may end up depending on valid address
+ * space. This means we must enforce that a page table load
+ * occur when this occurs. */
+ } else if (to->ppgtt &&
+ test_and_clear_bit(ring->id, &to->ppgtt->pd_dirty_rings))
+ hw_flags |= MI_FORCE_RESTORE;
+
+ /* We should never emit switch_mm more than once */
+ WARN_ON(needs_pd_load_pre(ring, to) &&
+ needs_pd_load_post(ring, to, hw_flags));
ret = mi_set_context(ring, to, hw_flags);
if (ret)
goto unpin_out;
+ /* GEN8 does *not* require an explicit reload if the PDPs have been
+ * setup, and we do not wish to move them.
+ */
+ if (needs_pd_load_post(ring, to, hw_flags)) {
+ trace_switch_mm(ring, to);
+ ret = to->ppgtt->switch_mm(to->ppgtt, ring);
+ /* The hardware context switch is emitted, but we haven't
+ * actually changed the state - so it's probably safe to bail
+ * here. Still, let the user know something dangerous has
+ * happened.
+ */
+ if (ret) {
+ DRM_ERROR("Failed to change address space on context switch\n");
+ goto unpin_out;
+ }
+ }
+
for (i = 0; i < MAX_L3_SLICES; i++) {
if (!(to->remap_slice & (1<<i)))
continue;
@@ -681,7 +776,7 @@ static int do_switch(struct intel_engine_cs *ring,
i915_gem_context_unreference(from);
}
- uninitialized = !to->legacy_hw_ctx.initialized && from == NULL;
+ uninitialized = !to->legacy_hw_ctx.initialized;
to->legacy_hw_ctx.initialized = true;
done:
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index e3a49d94da3a..d09e35ed9c9a 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -63,6 +63,10 @@ mark_free(struct i915_vma *vma, struct list_head *unwind)
*
* This function is used by the object/vma binding code.
*
+ * Since this function is only used to free up virtual address space it only
+ * ignores pinned vmas, and not object where the backing storage itself is
+ * pinned. Hence obj->pages_pin_count does not protect against eviction.
+ *
* To clarify: This is for freeing up virtual address space, not for freeing
* memory in e.g. the shrinker.
*/
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index dc10bc43864e..a3190e793ed4 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -251,7 +251,6 @@ static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
{
return (HAS_LLC(obj->base.dev) ||
obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
- !obj->map_and_fenceable ||
obj->cache_level != I915_CACHE_NONE);
}
@@ -337,6 +336,51 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
return 0;
}
+static void
+clflush_write32(void *addr, uint32_t value)
+{
+ /* This is not a fast path, so KISS. */
+ drm_clflush_virt_range(addr, sizeof(uint32_t));
+ *(uint32_t *)addr = value;
+ drm_clflush_virt_range(addr, sizeof(uint32_t));
+}
+
+static int
+relocate_entry_clflush(struct drm_i915_gem_object *obj,
+ struct drm_i915_gem_relocation_entry *reloc,
+ uint64_t target_offset)
+{
+ struct drm_device *dev = obj->base.dev;
+ uint32_t page_offset = offset_in_page(reloc->offset);
+ uint64_t delta = (int)reloc->delta + target_offset;
+ char *vaddr;
+ int ret;
+
+ ret = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (ret)
+ return ret;
+
+ vaddr = kmap_atomic(i915_gem_object_get_page(obj,
+ reloc->offset >> PAGE_SHIFT));
+ clflush_write32(vaddr + page_offset, lower_32_bits(delta));
+
+ if (INTEL_INFO(dev)->gen >= 8) {
+ page_offset = offset_in_page(page_offset + sizeof(uint32_t));
+
+ if (page_offset == 0) {
+ kunmap_atomic(vaddr);
+ vaddr = kmap_atomic(i915_gem_object_get_page(obj,
+ (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
+ }
+
+ clflush_write32(vaddr + page_offset, upper_32_bits(delta));
+ }
+
+ kunmap_atomic(vaddr);
+
+ return 0;
+}
+
static int
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
struct eb_vmas *eb,
@@ -426,8 +470,14 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
if (use_cpu_reloc(obj))
ret = relocate_entry_cpu(obj, reloc, target_offset);
- else
+ else if (obj->map_and_fenceable)
ret = relocate_entry_gtt(obj, reloc, target_offset);
+ else if (cpu_has_clflush)
+ ret = relocate_entry_clflush(obj, reloc, target_offset);
+ else {
+ WARN_ONCE(1, "Impossible case in relocation handling\n");
+ ret = -ENODEV;
+ }
if (ret)
return ret;
@@ -525,6 +575,12 @@ i915_gem_execbuffer_relocate(struct eb_vmas *eb)
return ret;
}
+static bool only_mappable_for_reloc(unsigned int flags)
+{
+ return (flags & (EXEC_OBJECT_NEEDS_FENCE | __EXEC_OBJECT_NEEDS_MAP)) ==
+ __EXEC_OBJECT_NEEDS_MAP;
+}
+
static int
i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
struct intel_engine_cs *ring,
@@ -536,14 +592,21 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
int ret;
flags = 0;
- if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
- flags |= PIN_GLOBAL | PIN_MAPPABLE;
- if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
- flags |= PIN_GLOBAL;
- if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
- flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
+ if (!drm_mm_node_allocated(&vma->node)) {
+ if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
+ flags |= PIN_GLOBAL | PIN_MAPPABLE;
+ if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
+ flags |= PIN_GLOBAL;
+ if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
+ flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
+ }
ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
+ if ((ret == -ENOSPC || ret == -E2BIG) &&
+ only_mappable_for_reloc(entry->flags))
+ ret = i915_gem_object_pin(obj, vma->vm,
+ entry->alignment,
+ flags & ~(PIN_GLOBAL | PIN_MAPPABLE));
if (ret)
return ret;
@@ -605,13 +668,14 @@ eb_vma_misplaced(struct i915_vma *vma)
vma->node.start & (entry->alignment - 1))
return true;
- if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable)
- return true;
-
if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
vma->node.start < BATCH_OFFSET_BIAS)
return true;
+ /* avoid costly ping-pong once a batch bo ended up non-mappable */
+ if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable)
+ return !only_mappable_for_reloc(entry->flags);
+
return false;
}
@@ -1187,6 +1251,13 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
if (ret)
goto error;
+ if (ctx->ppgtt)
+ WARN(ctx->ppgtt->pd_dirty_rings & (1<<ring->id),
+ "%s didn't clear reload\n", ring->name);
+ else if (dev_priv->mm.aliasing_ppgtt)
+ WARN(dev_priv->mm.aliasing_ppgtt->pd_dirty_rings &
+ (1<<ring->id), "%s didn't clear reload\n", ring->name);
+
instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
instp_mask = I915_EXEC_CONSTANTS_MASK;
switch (instp_mode) {
@@ -1476,7 +1547,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto err;
}
- if (i915_needs_cmd_parser(ring)) {
+ if (i915_needs_cmd_parser(ring) && args->batch_len) {
batch_obj = i915_gem_execbuffer_parse(ring,
&shadow_exec_entry,
eb,
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 2034f7cf238b..0239fbff7bf7 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -67,8 +67,9 @@
* i915_ggtt_view_type and struct i915_ggtt_view.
*
* A new flavour of core GEM functions which work with GGTT bound objects were
- * added with the _view suffix. They take the struct i915_ggtt_view parameter
- * encapsulating all metadata required to implement a view.
+ * added with the _ggtt_ infix, and sometimes with _view postfix to avoid
+ * renaming in large amounts of code. They take the struct i915_ggtt_view
+ * parameter encapsulating all metadata required to implement a view.
*
* As a helper for callers which are only interested in the normal view,
* globally const i915_ggtt_view_normal singleton instance exists. All old core
@@ -92,6 +93,9 @@
*/
const struct i915_ggtt_view i915_ggtt_view_normal;
+const struct i915_ggtt_view i915_ggtt_view_rotated = {
+ .type = I915_GGTT_VIEW_ROTATED
+};
static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv);
static void chv_setup_private_ppat(struct drm_i915_private *dev_priv);
@@ -147,11 +151,11 @@ static void ppgtt_bind_vma(struct i915_vma *vma,
u32 flags);
static void ppgtt_unbind_vma(struct i915_vma *vma);
-static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- bool valid)
+static inline gen8_pte_t gen8_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ bool valid)
{
- gen8_gtt_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0;
+ gen8_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0;
pte |= addr;
switch (level) {
@@ -169,11 +173,11 @@ static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr,
return pte;
}
-static inline gen8_ppgtt_pde_t gen8_pde_encode(struct drm_device *dev,
- dma_addr_t addr,
- enum i915_cache_level level)
+static inline gen8_pde_t gen8_pde_encode(struct drm_device *dev,
+ dma_addr_t addr,
+ enum i915_cache_level level)
{
- gen8_ppgtt_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
+ gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
pde |= addr;
if (level != I915_CACHE_NONE)
pde |= PPAT_CACHED_PDE_INDEX;
@@ -182,11 +186,11 @@ static inline gen8_ppgtt_pde_t gen8_pde_encode(struct drm_device *dev,
return pde;
}
-static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- bool valid, u32 unused)
+static gen6_pte_t snb_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ bool valid, u32 unused)
{
- gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+ gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
switch (level) {
@@ -204,11 +208,11 @@ static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
return pte;
}
-static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- bool valid, u32 unused)
+static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ bool valid, u32 unused)
{
- gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+ gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
switch (level) {
@@ -228,11 +232,11 @@ static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
return pte;
}
-static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- bool valid, u32 flags)
+static gen6_pte_t byt_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ bool valid, u32 flags)
{
- gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+ gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
if (!(flags & PTE_READ_ONLY))
@@ -244,11 +248,11 @@ static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
return pte;
}
-static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- bool valid, u32 unused)
+static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ bool valid, u32 unused)
{
- gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+ gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= HSW_PTE_ADDR_ENCODE(addr);
if (level != I915_CACHE_NONE)
@@ -257,11 +261,11 @@ static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
return pte;
}
-static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- bool valid, u32 unused)
+static gen6_pte_t iris_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ bool valid, u32 unused)
{
- gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+ gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= HSW_PTE_ADDR_ENCODE(addr);
switch (level) {
@@ -278,29 +282,91 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
return pte;
}
-static void unmap_and_free_pt(struct i915_page_table_entry *pt, struct drm_device *dev)
+#define i915_dma_unmap_single(px, dev) \
+ __i915_dma_unmap_single((px)->daddr, dev)
+
+static inline void __i915_dma_unmap_single(dma_addr_t daddr,
+ struct drm_device *dev)
+{
+ struct device *device = &dev->pdev->dev;
+
+ dma_unmap_page(device, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
+}
+
+/**
+ * i915_dma_map_single() - Create a dma mapping for a page table/dir/etc.
+ * @px: Page table/dir/etc to get a DMA map for
+ * @dev: drm device
+ *
+ * Page table allocations are unified across all gens. They always require a
+ * single 4k allocation, as well as a DMA mapping. If we keep the structs
+ * symmetric here, the simple macro covers us for every page table type.
+ *
+ * Return: 0 if success.
+ */
+#define i915_dma_map_single(px, dev) \
+ i915_dma_map_page_single((px)->page, (dev), &(px)->daddr)
+
+static inline int i915_dma_map_page_single(struct page *page,
+ struct drm_device *dev,
+ dma_addr_t *daddr)
+{
+ struct device *device = &dev->pdev->dev;
+
+ *daddr = dma_map_page(device, page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(device, *daddr))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void unmap_and_free_pt(struct i915_page_table_entry *pt,
+ struct drm_device *dev)
{
if (WARN_ON(!pt->page))
return;
+
+ i915_dma_unmap_single(pt, dev);
__free_page(pt->page);
+ kfree(pt->used_ptes);
kfree(pt);
}
static struct i915_page_table_entry *alloc_pt_single(struct drm_device *dev)
{
struct i915_page_table_entry *pt;
+ const size_t count = INTEL_INFO(dev)->gen >= 8 ?
+ GEN8_PTES : GEN6_PTES;
+ int ret = -ENOMEM;
pt = kzalloc(sizeof(*pt), GFP_KERNEL);
if (!pt)
return ERR_PTR(-ENOMEM);
- pt->page = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (!pt->page) {
- kfree(pt);
- return ERR_PTR(-ENOMEM);
- }
+ pt->used_ptes = kcalloc(BITS_TO_LONGS(count), sizeof(*pt->used_ptes),
+ GFP_KERNEL);
+
+ if (!pt->used_ptes)
+ goto fail_bitmap;
+
+ pt->page = alloc_page(GFP_KERNEL);
+ if (!pt->page)
+ goto fail_page;
+
+ ret = i915_dma_map_single(pt, dev);
+ if (ret)
+ goto fail_dma;
return pt;
+
+fail_dma:
+ __free_page(pt->page);
+fail_page:
+ kfree(pt->used_ptes);
+fail_bitmap:
+ kfree(pt);
+
+ return ERR_PTR(ret);
}
/**
@@ -318,12 +384,12 @@ static struct i915_page_table_entry *alloc_pt_single(struct drm_device *dev)
* Return: 0 if allocation succeeded.
*/
static int alloc_pt_range(struct i915_page_directory_entry *pd, uint16_t pde, size_t count,
- struct drm_device *dev)
+ struct drm_device *dev)
{
int i, ret;
/* 512 is the max page tables per page_directory on any platform. */
- if (WARN_ON(pde + count > GEN6_PPGTT_PD_ENTRIES))
+ if (WARN_ON(pde + count > I915_PDES))
return -EINVAL;
for (i = pde; i < pde + count; i++) {
@@ -401,7 +467,7 @@ static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
int i, ret;
/* bit of a hack to find the actual last used pd */
- int used_pd = ppgtt->num_pd_entries / GEN8_PDES_PER_PAGE;
+ int used_pd = ppgtt->num_pd_entries / I915_PDES;
for (i = used_pd - 1; i >= 0; i--) {
dma_addr_t addr = ppgtt->pdp.page_directory[i]->daddr;
@@ -420,7 +486,7 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
- gen8_gtt_pte_t *pt_vaddr, scratch_pte;
+ gen8_pte_t *pt_vaddr, scratch_pte;
unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK;
unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK;
unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK;
@@ -451,8 +517,8 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
page_table = pt->page;
last_pte = pte + num_entries;
- if (last_pte > GEN8_PTES_PER_PAGE)
- last_pte = GEN8_PTES_PER_PAGE;
+ if (last_pte > GEN8_PTES)
+ last_pte = GEN8_PTES;
pt_vaddr = kmap_atomic(page_table);
@@ -466,7 +532,7 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
kunmap_atomic(pt_vaddr);
pte = 0;
- if (++pde == GEN8_PDES_PER_PAGE) {
+ if (++pde == I915_PDES) {
pdpe++;
pde = 0;
}
@@ -480,7 +546,7 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
- gen8_gtt_pte_t *pt_vaddr;
+ gen8_pte_t *pt_vaddr;
unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK;
unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK;
unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK;
@@ -503,12 +569,12 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
pt_vaddr[pte] =
gen8_pte_encode(sg_page_iter_dma_address(&sg_iter),
cache_level, true);
- if (++pte == GEN8_PTES_PER_PAGE) {
+ if (++pte == GEN8_PTES) {
if (!HAS_LLC(ppgtt->base.dev))
drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
kunmap_atomic(pt_vaddr);
pt_vaddr = NULL;
- if (++pde == GEN8_PDES_PER_PAGE) {
+ if (++pde == I915_PDES) {
pdpe++;
pde = 0;
}
@@ -529,7 +595,7 @@ static void gen8_free_page_tables(struct i915_page_directory_entry *pd, struct d
if (!pd->page)
return;
- for (i = 0; i < GEN8_PDES_PER_PAGE; i++) {
+ for (i = 0; i < I915_PDES; i++) {
if (WARN_ON(!pd->page_table[i]))
continue;
@@ -565,7 +631,7 @@ static void gen8_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
pci_unmap_page(hwdev, ppgtt->pdp.page_directory[i]->daddr, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
- for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
+ for (j = 0; j < I915_PDES; j++) {
struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[i];
struct i915_page_table_entry *pt;
dma_addr_t addr;
@@ -598,7 +664,7 @@ static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt)
for (i = 0; i < ppgtt->num_pd_pages; i++) {
ret = alloc_pt_range(ppgtt->pdp.page_directory[i],
- 0, GEN8_PDES_PER_PAGE, ppgtt->base.dev);
+ 0, I915_PDES, ppgtt->base.dev);
if (ret)
goto unwind_out;
}
@@ -648,7 +714,7 @@ static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt,
if (ret)
goto err_out;
- ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE;
+ ppgtt->num_pd_entries = max_pdp * I915_PDES;
return 0;
@@ -697,7 +763,7 @@ static int gen8_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt,
return 0;
}
-/**
+/*
* GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
* with a net effect resembling a 2-level page table in normal x86 terms. Each
* PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
@@ -710,7 +776,7 @@ static int gen8_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt,
static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
{
const int max_pdp = DIV_ROUND_UP(size, 1 << 30);
- const int min_pt_pages = GEN8_PDES_PER_PAGE * max_pdp;
+ const int min_pt_pages = I915_PDES * max_pdp;
int i, j, ret;
if (size % (1<<30))
@@ -733,7 +799,7 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
if (ret)
goto bail;
- for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
+ for (j = 0; j < I915_PDES; j++) {
ret = gen8_ppgtt_setup_page_tables(ppgtt, i, j);
if (ret)
goto bail;
@@ -750,9 +816,9 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
*/
for (i = 0; i < GEN8_LEGACY_PDPES; i++) {
struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[i];
- gen8_ppgtt_pde_t *pd_vaddr;
+ gen8_pde_t *pd_vaddr;
pd_vaddr = kmap_atomic(ppgtt->pdp.page_directory[i]->page);
- for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
+ for (j = 0; j < I915_PDES; j++) {
struct i915_page_table_entry *pt = pd->page_table[j];
dma_addr_t addr = pt->daddr;
pd_vaddr[j] = gen8_pde_encode(ppgtt->base.dev, addr,
@@ -770,11 +836,11 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
ppgtt->base.start = 0;
/* This is the area that we advertise as usable for the caller */
- ppgtt->base.total = max_pdp * GEN8_PDES_PER_PAGE * GEN8_PTES_PER_PAGE * PAGE_SIZE;
+ ppgtt->base.total = max_pdp * I915_PDES * GEN8_PTES * PAGE_SIZE;
/* Set all ptes to a valid scratch page. Also above requested space */
ppgtt->base.clear_range(&ppgtt->base, 0,
- ppgtt->num_pd_pages * GEN8_PTES_PER_PAGE * PAGE_SIZE,
+ ppgtt->num_pd_pages * GEN8_PTES * PAGE_SIZE,
true);
DRM_DEBUG_DRIVER("Allocated %d pages for page directories (%d wasted)\n",
@@ -794,22 +860,22 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
{
struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
struct i915_address_space *vm = &ppgtt->base;
- gen6_gtt_pte_t __iomem *pd_addr;
- gen6_gtt_pte_t scratch_pte;
+ gen6_pte_t __iomem *pd_addr;
+ gen6_pte_t scratch_pte;
uint32_t pd_entry;
int pte, pde;
scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
- pd_addr = (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm +
- ppgtt->pd.pd_offset / sizeof(gen6_gtt_pte_t);
+ pd_addr = (gen6_pte_t __iomem *)dev_priv->gtt.gsm +
+ ppgtt->pd.pd_offset / sizeof(gen6_pte_t);
seq_printf(m, " VM %p (pd_offset %x-%x):\n", vm,
ppgtt->pd.pd_offset,
ppgtt->pd.pd_offset + ppgtt->num_pd_entries);
for (pde = 0; pde < ppgtt->num_pd_entries; pde++) {
u32 expected;
- gen6_gtt_pte_t *pt_vaddr;
+ gen6_pte_t *pt_vaddr;
dma_addr_t pt_addr = ppgtt->pd.page_table[pde]->daddr;
pd_entry = readl(pd_addr + pde);
expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
@@ -822,9 +888,9 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
seq_printf(m, "\tPDE: %x\n", pd_entry);
pt_vaddr = kmap_atomic(ppgtt->pd.page_table[pde]->page);
- for (pte = 0; pte < I915_PPGTT_PT_ENTRIES; pte+=4) {
+ for (pte = 0; pte < GEN6_PTES; pte+=4) {
unsigned long va =
- (pde * PAGE_SIZE * I915_PPGTT_PT_ENTRIES) +
+ (pde * PAGE_SIZE * GEN6_PTES) +
(pte * PAGE_SIZE);
int i;
bool found = false;
@@ -847,26 +913,36 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
}
}
-static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
+/* Write pde (index) from the page directory @pd to the page table @pt */
+static void gen6_write_pde(struct i915_page_directory_entry *pd,
+ const int pde, struct i915_page_table_entry *pt)
{
- struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
- gen6_gtt_pte_t __iomem *pd_addr;
- uint32_t pd_entry;
- int i;
+ /* Caller needs to make sure the write completes if necessary */
+ struct i915_hw_ppgtt *ppgtt =
+ container_of(pd, struct i915_hw_ppgtt, pd);
+ u32 pd_entry;
- WARN_ON(ppgtt->pd.pd_offset & 0x3f);
- pd_addr = (gen6_gtt_pte_t __iomem*)dev_priv->gtt.gsm +
- ppgtt->pd.pd_offset / sizeof(gen6_gtt_pte_t);
- for (i = 0; i < ppgtt->num_pd_entries; i++) {
- dma_addr_t pt_addr;
+ pd_entry = GEN6_PDE_ADDR_ENCODE(pt->daddr);
+ pd_entry |= GEN6_PDE_VALID;
- pt_addr = ppgtt->pd.page_table[i]->daddr;
- pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
- pd_entry |= GEN6_PDE_VALID;
+ writel(pd_entry, ppgtt->pd_addr + pde);
+}
- writel(pd_entry, pd_addr + i);
- }
- readl(pd_addr);
+/* Write all the page tables found in the ppgtt structure to incrementing page
+ * directories. */
+static void gen6_write_page_range(struct drm_i915_private *dev_priv,
+ struct i915_page_directory_entry *pd,
+ uint32_t start, uint32_t length)
+{
+ struct i915_page_table_entry *pt;
+ uint32_t pde, temp;
+
+ gen6_for_each_pde(pt, pd, start, length, temp, pde)
+ gen6_write_pde(pd, pde, pt);
+
+ /* Make sure write is complete before other code can use this page
+ * table. Also require for WC mapped PTEs */
+ readl(dev_priv->gtt.gsm);
}
static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
@@ -1022,19 +1098,19 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
- gen6_gtt_pte_t *pt_vaddr, scratch_pte;
+ gen6_pte_t *pt_vaddr, scratch_pte;
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
- unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
- unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
+ unsigned act_pt = first_entry / GEN6_PTES;
+ unsigned first_pte = first_entry % GEN6_PTES;
unsigned last_pte, i;
scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
while (num_entries) {
last_pte = first_pte + num_entries;
- if (last_pte > I915_PPGTT_PT_ENTRIES)
- last_pte = I915_PPGTT_PT_ENTRIES;
+ if (last_pte > GEN6_PTES)
+ last_pte = GEN6_PTES;
pt_vaddr = kmap_atomic(ppgtt->pd.page_table[act_pt]->page);
@@ -1056,10 +1132,10 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
- gen6_gtt_pte_t *pt_vaddr;
+ gen6_pte_t *pt_vaddr;
unsigned first_entry = start >> PAGE_SHIFT;
- unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
- unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
+ unsigned act_pt = first_entry / GEN6_PTES;
+ unsigned act_pte = first_entry % GEN6_PTES;
struct sg_page_iter sg_iter;
pt_vaddr = NULL;
@@ -1071,7 +1147,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
cache_level, true, flags);
- if (++act_pte == I915_PPGTT_PT_ENTRIES) {
+ if (++act_pte == GEN6_PTES) {
kunmap_atomic(pt_vaddr);
pt_vaddr = NULL;
act_pt++;
@@ -1082,23 +1158,133 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
kunmap_atomic(pt_vaddr);
}
-static void gen6_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
+/* PDE TLBs are a pain invalidate pre GEN8. It requires a context reload. If we
+ * are switching between contexts with the same LRCA, we also must do a force
+ * restore.
+ */
+static inline void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
{
+ /* If current vm != vm, */
+ ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.dev)->ring_mask;
+}
+
+static void gen6_initialize_pt(struct i915_address_space *vm,
+ struct i915_page_table_entry *pt)
+{
+ gen6_pte_t *pt_vaddr, scratch_pte;
int i;
- for (i = 0; i < ppgtt->num_pd_entries; i++)
- pci_unmap_page(ppgtt->base.dev->pdev,
- ppgtt->pd.page_table[i]->daddr,
- 4096, PCI_DMA_BIDIRECTIONAL);
+ WARN_ON(vm->scratch.addr == 0);
+
+ scratch_pte = vm->pte_encode(vm->scratch.addr,
+ I915_CACHE_LLC, true, 0);
+
+ pt_vaddr = kmap_atomic(pt->page);
+
+ for (i = 0; i < GEN6_PTES; i++)
+ pt_vaddr[i] = scratch_pte;
+
+ kunmap_atomic(pt_vaddr);
+}
+
+static int gen6_alloc_va_range(struct i915_address_space *vm,
+ uint64_t start, uint64_t length)
+{
+ DECLARE_BITMAP(new_page_tables, I915_PDES);
+ struct drm_device *dev = vm->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_hw_ppgtt *ppgtt =
+ container_of(vm, struct i915_hw_ppgtt, base);
+ struct i915_page_table_entry *pt;
+ const uint32_t start_save = start, length_save = length;
+ uint32_t pde, temp;
+ int ret;
+
+ WARN_ON(upper_32_bits(start));
+
+ bitmap_zero(new_page_tables, I915_PDES);
+
+ /* The allocation is done in two stages so that we can bail out with
+ * minimal amount of pain. The first stage finds new page tables that
+ * need allocation. The second stage marks use ptes within the page
+ * tables.
+ */
+ gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) {
+ if (pt != ppgtt->scratch_pt) {
+ WARN_ON(bitmap_empty(pt->used_ptes, GEN6_PTES));
+ continue;
+ }
+
+ /* We've already allocated a page table */
+ WARN_ON(!bitmap_empty(pt->used_ptes, GEN6_PTES));
+
+ pt = alloc_pt_single(dev);
+ if (IS_ERR(pt)) {
+ ret = PTR_ERR(pt);
+ goto unwind_out;
+ }
+
+ gen6_initialize_pt(vm, pt);
+
+ ppgtt->pd.page_table[pde] = pt;
+ set_bit(pde, new_page_tables);
+ trace_i915_page_table_entry_alloc(vm, pde, start, GEN6_PDE_SHIFT);
+ }
+
+ start = start_save;
+ length = length_save;
+
+ gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) {
+ DECLARE_BITMAP(tmp_bitmap, GEN6_PTES);
+
+ bitmap_zero(tmp_bitmap, GEN6_PTES);
+ bitmap_set(tmp_bitmap, gen6_pte_index(start),
+ gen6_pte_count(start, length));
+
+ if (test_and_clear_bit(pde, new_page_tables))
+ gen6_write_pde(&ppgtt->pd, pde, pt);
+
+ trace_i915_page_table_entry_map(vm, pde, pt,
+ gen6_pte_index(start),
+ gen6_pte_count(start, length),
+ GEN6_PTES);
+ bitmap_or(pt->used_ptes, tmp_bitmap, pt->used_ptes,
+ GEN6_PTES);
+ }
+
+ WARN_ON(!bitmap_empty(new_page_tables, I915_PDES));
+
+ /* Make sure write is complete before other code can use this page
+ * table. Also require for WC mapped PTEs */
+ readl(dev_priv->gtt.gsm);
+
+ mark_tlbs_dirty(ppgtt);
+ return 0;
+
+unwind_out:
+ for_each_set_bit(pde, new_page_tables, I915_PDES) {
+ struct i915_page_table_entry *pt = ppgtt->pd.page_table[pde];
+
+ ppgtt->pd.page_table[pde] = ppgtt->scratch_pt;
+ unmap_and_free_pt(pt, vm->dev);
+ }
+
+ mark_tlbs_dirty(ppgtt);
+ return ret;
}
static void gen6_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
{
int i;
- for (i = 0; i < ppgtt->num_pd_entries; i++)
- unmap_and_free_pt(ppgtt->pd.page_table[i], ppgtt->base.dev);
+ for (i = 0; i < ppgtt->num_pd_entries; i++) {
+ struct i915_page_table_entry *pt = ppgtt->pd.page_table[i];
+ if (pt != ppgtt->scratch_pt)
+ unmap_and_free_pt(ppgtt->pd.page_table[i], ppgtt->base.dev);
+ }
+
+ unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev);
unmap_and_free_pd(&ppgtt->pd);
}
@@ -1109,7 +1295,6 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
drm_mm_remove_node(&ppgtt->node);
- gen6_ppgtt_unmap_pages(ppgtt);
gen6_ppgtt_free(ppgtt);
}
@@ -1125,6 +1310,12 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
* size. We allocate at the top of the GTT to avoid fragmentation.
*/
BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm));
+ ppgtt->scratch_pt = alloc_pt_single(ppgtt->base.dev);
+ if (IS_ERR(ppgtt->scratch_pt))
+ return PTR_ERR(ppgtt->scratch_pt);
+
+ gen6_initialize_pt(&ppgtt->base, ppgtt->scratch_pt);
+
alloc:
ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
&ppgtt->node, GEN6_PD_SIZE,
@@ -1138,66 +1329,43 @@ alloc:
0, dev_priv->gtt.base.total,
0);
if (ret)
- return ret;
+ goto err_out;
retried = true;
goto alloc;
}
if (ret)
- return ret;
+ goto err_out;
+
if (ppgtt->node.start < dev_priv->gtt.mappable_end)
DRM_DEBUG("Forced to use aperture for PDEs\n");
- ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES;
+ ppgtt->num_pd_entries = I915_PDES;
return 0;
+
+err_out:
+ unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev);
+ return ret;
}
static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
{
- int ret;
-
- ret = gen6_ppgtt_allocate_page_directories(ppgtt);
- if (ret)
- return ret;
-
- ret = alloc_pt_range(&ppgtt->pd, 0, ppgtt->num_pd_entries,
- ppgtt->base.dev);
-
- if (ret) {
- drm_mm_remove_node(&ppgtt->node);
- return ret;
- }
-
- return 0;
+ return gen6_ppgtt_allocate_page_directories(ppgtt);
}
-static int gen6_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt)
+static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
+ uint64_t start, uint64_t length)
{
- struct drm_device *dev = ppgtt->base.dev;
- int i;
-
- for (i = 0; i < ppgtt->num_pd_entries; i++) {
- struct page *page;
- dma_addr_t pt_addr;
-
- page = ppgtt->pd.page_table[i]->page;
- pt_addr = pci_map_page(dev->pdev, page, 0, 4096,
- PCI_DMA_BIDIRECTIONAL);
-
- if (pci_dma_mapping_error(dev->pdev, pt_addr)) {
- gen6_ppgtt_unmap_pages(ppgtt);
- return -EIO;
- }
+ struct i915_page_table_entry *unused;
+ uint32_t pde, temp;
- ppgtt->pd.page_table[i]->daddr = pt_addr;
- }
-
- return 0;
+ gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde)
+ ppgtt->pd.page_table[pde] = ppgtt->scratch_pt;
}
-static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
+static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt, bool aliasing)
{
struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1220,36 +1388,50 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
if (ret)
return ret;
- ret = gen6_ppgtt_setup_page_tables(ppgtt);
- if (ret) {
- gen6_ppgtt_free(ppgtt);
- return ret;
+ if (aliasing) {
+ /* preallocate all pts */
+ ret = alloc_pt_range(&ppgtt->pd, 0, ppgtt->num_pd_entries,
+ ppgtt->base.dev);
+
+ if (ret) {
+ gen6_ppgtt_cleanup(&ppgtt->base);
+ return ret;
+ }
}
+ ppgtt->base.allocate_va_range = gen6_alloc_va_range;
ppgtt->base.clear_range = gen6_ppgtt_clear_range;
ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
ppgtt->base.cleanup = gen6_ppgtt_cleanup;
ppgtt->base.start = 0;
- ppgtt->base.total = ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES * PAGE_SIZE;
+ ppgtt->base.total = ppgtt->num_pd_entries * GEN6_PTES * PAGE_SIZE;
ppgtt->debug_dump = gen6_dump_ppgtt;
ppgtt->pd.pd_offset =
- ppgtt->node.start / PAGE_SIZE * sizeof(gen6_gtt_pte_t);
+ ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t);
+
+ ppgtt->pd_addr = (gen6_pte_t __iomem *)dev_priv->gtt.gsm +
+ ppgtt->pd.pd_offset / sizeof(gen6_pte_t);
+
+ if (aliasing)
+ ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
+ else
+ gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
- ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
+ gen6_write_page_range(dev_priv, &ppgtt->pd, 0, ppgtt->base.total);
DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n",
ppgtt->node.size >> 20,
ppgtt->node.start / PAGE_SIZE);
- gen6_write_pdes(ppgtt);
DRM_DEBUG("Adding PPGTT at offset %x\n",
ppgtt->pd.pd_offset << 10);
return 0;
}
-static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
+static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt,
+ bool aliasing)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1257,7 +1439,7 @@ static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
ppgtt->base.scratch = dev_priv->gtt.base.scratch;
if (INTEL_INFO(dev)->gen < 8)
- return gen6_ppgtt_init(ppgtt);
+ return gen6_ppgtt_init(ppgtt, aliasing);
else
return gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total);
}
@@ -1266,7 +1448,7 @@ int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret = 0;
- ret = __hw_ppgtt_init(dev, ppgtt);
+ ret = __hw_ppgtt_init(dev, ppgtt, false);
if (ret == 0) {
kref_init(&ppgtt->ref);
drm_mm_init(&ppgtt->base.mm, ppgtt->base.start,
@@ -1513,15 +1695,20 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
return;
}
- list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
- /* TODO: Perhaps it shouldn't be gen6 specific */
- if (i915_is_ggtt(vm)) {
- if (dev_priv->mm.aliasing_ppgtt)
- gen6_write_pdes(dev_priv->mm.aliasing_ppgtt);
- continue;
- }
+ if (USES_PPGTT(dev)) {
+ list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
+ /* TODO: Perhaps it shouldn't be gen6 specific */
- gen6_write_pdes(container_of(vm, struct i915_hw_ppgtt, base));
+ struct i915_hw_ppgtt *ppgtt =
+ container_of(vm, struct i915_hw_ppgtt,
+ base);
+
+ if (i915_is_ggtt(vm))
+ ppgtt = dev_priv->mm.aliasing_ppgtt;
+
+ gen6_write_page_range(dev_priv, &ppgtt->pd,
+ 0, ppgtt->base.total);
+ }
}
i915_ggtt_flush(dev_priv);
@@ -1540,7 +1727,7 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
return 0;
}
-static inline void gen8_set_pte(void __iomem *addr, gen8_gtt_pte_t pte)
+static inline void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
{
#ifdef writeq
writeq(pte, addr);
@@ -1557,8 +1744,8 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
{
struct drm_i915_private *dev_priv = vm->dev->dev_private;
unsigned first_entry = start >> PAGE_SHIFT;
- gen8_gtt_pte_t __iomem *gtt_entries =
- (gen8_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
+ gen8_pte_t __iomem *gtt_entries =
+ (gen8_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
int i = 0;
struct sg_page_iter sg_iter;
dma_addr_t addr = 0; /* shut up gcc */
@@ -1603,8 +1790,8 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
{
struct drm_i915_private *dev_priv = vm->dev->dev_private;
unsigned first_entry = start >> PAGE_SHIFT;
- gen6_gtt_pte_t __iomem *gtt_entries =
- (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
+ gen6_pte_t __iomem *gtt_entries =
+ (gen6_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
int i = 0;
struct sg_page_iter sg_iter;
dma_addr_t addr = 0;
@@ -1642,8 +1829,8 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
struct drm_i915_private *dev_priv = vm->dev->dev_private;
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
- gen8_gtt_pte_t scratch_pte, __iomem *gtt_base =
- (gen8_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
+ gen8_pte_t scratch_pte, __iomem *gtt_base =
+ (gen8_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
int i;
@@ -1668,8 +1855,8 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
struct drm_i915_private *dev_priv = vm->dev->dev_private;
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
- gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
- (gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
+ gen6_pte_t scratch_pte, __iomem *gtt_base =
+ (gen6_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
int i;
@@ -1726,11 +1913,15 @@ static void ggtt_bind_vma(struct i915_vma *vma,
struct drm_device *dev = vma->vm->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj = vma->obj;
+ struct sg_table *pages = obj->pages;
/* Currently applicable only to VLV */
if (obj->gt_ro)
flags |= PTE_READ_ONLY;
+ if (i915_is_ggtt(vma->vm))
+ pages = vma->ggtt_view.pages;
+
/* If there is no aliasing PPGTT, or the caller needs a global mapping,
* or we have a global mapping already but the cacheability flags have
* changed, set the global PTEs.
@@ -1745,7 +1936,7 @@ static void ggtt_bind_vma(struct i915_vma *vma,
if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) {
if (!(vma->bound & GLOBAL_BIND) ||
(cache_level != obj->cache_level)) {
- vma->vm->insert_entries(vma->vm, vma->ggtt_view.pages,
+ vma->vm->insert_entries(vma->vm, pages,
vma->node.start,
cache_level, flags);
vma->bound |= GLOBAL_BIND;
@@ -1756,8 +1947,7 @@ static void ggtt_bind_vma(struct i915_vma *vma,
(!(vma->bound & LOCAL_BIND) ||
(cache_level != obj->cache_level))) {
struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
- appgtt->base.insert_entries(&appgtt->base,
- vma->ggtt_view.pages,
+ appgtt->base.insert_entries(&appgtt->base, pages,
vma->node.start,
cache_level, flags);
vma->bound |= LOCAL_BIND;
@@ -1893,9 +2083,11 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
if (!ppgtt)
return -ENOMEM;
- ret = __hw_ppgtt_init(dev, ppgtt);
- if (ret != 0)
+ ret = __hw_ppgtt_init(dev, ppgtt, true);
+ if (ret) {
+ kfree(ppgtt);
return ret;
+ }
dev_priv->mm.aliasing_ppgtt = ppgtt;
}
@@ -2181,7 +2373,7 @@ static int gen8_gmch_probe(struct drm_device *dev,
gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
}
- *gtt_total = (gtt_size / sizeof(gen8_gtt_pte_t)) << PAGE_SHIFT;
+ *gtt_total = (gtt_size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
if (IS_CHERRYVIEW(dev))
chv_setup_private_ppat(dev_priv);
@@ -2226,7 +2418,7 @@ static int gen6_gmch_probe(struct drm_device *dev,
*stolen = gen6_get_stolen_size(snb_gmch_ctl);
gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
- *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT;
+ *gtt_total = (gtt_size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
ret = ggtt_probe_common(dev, gtt_size);
@@ -2331,11 +2523,16 @@ int i915_gem_gtt_init(struct drm_device *dev)
return 0;
}
-static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *view)
+static struct i915_vma *
+__i915_gem_vma_create(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *ggtt_view)
{
- struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
+ struct i915_vma *vma;
+
+ if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
+ return ERR_PTR(-EINVAL);
+ vma = kzalloc(sizeof(*vma), GFP_KERNEL);
if (vma == NULL)
return ERR_PTR(-ENOMEM);
@@ -2344,10 +2541,11 @@ static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
INIT_LIST_HEAD(&vma->exec_list);
vma->vm = vm;
vma->obj = obj;
- vma->ggtt_view = *view;
if (INTEL_INFO(vm->dev)->gen >= 6) {
if (i915_is_ggtt(vm)) {
+ vma->ggtt_view = *ggtt_view;
+
vma->unbind_vma = ggtt_unbind_vma;
vma->bind_vma = ggtt_bind_vma;
} else {
@@ -2356,6 +2554,7 @@ static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
}
} else {
BUG_ON(!i915_is_ggtt(vm));
+ vma->ggtt_view = *ggtt_view;
vma->unbind_vma = i915_ggtt_unbind_vma;
vma->bind_vma = i915_ggtt_bind_vma;
}
@@ -2368,38 +2567,170 @@ static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
}
struct i915_vma *
-i915_gem_obj_lookup_or_create_vma_view(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
+i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm)
+{
+ struct i915_vma *vma;
+
+ vma = i915_gem_obj_to_vma(obj, vm);
+ if (!vma)
+ vma = __i915_gem_vma_create(obj, vm,
+ i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL);
+
+ return vma;
+}
+
+struct i915_vma *
+i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view)
{
+ struct i915_address_space *ggtt = i915_obj_to_ggtt(obj);
struct i915_vma *vma;
- vma = i915_gem_obj_to_vma_view(obj, vm, view);
+ if (WARN_ON(!view))
+ return ERR_PTR(-EINVAL);
+
+ vma = i915_gem_obj_to_ggtt_view(obj, view);
+
+ if (IS_ERR(vma))
+ return vma;
+
if (!vma)
- vma = __i915_gem_vma_create(obj, vm, view);
+ vma = __i915_gem_vma_create(obj, ggtt, view);
return vma;
+
+}
+
+static void
+rotate_pages(dma_addr_t *in, unsigned int width, unsigned int height,
+ struct sg_table *st)
+{
+ unsigned int column, row;
+ unsigned int src_idx;
+ struct scatterlist *sg = st->sgl;
+
+ st->nents = 0;
+
+ for (column = 0; column < width; column++) {
+ src_idx = width * (height - 1) + column;
+ for (row = 0; row < height; row++) {
+ st->nents++;
+ /* We don't need the pages, but need to initialize
+ * the entries so the sg list can be happily traversed.
+ * The only thing we need are DMA addresses.
+ */
+ sg_set_page(sg, NULL, PAGE_SIZE, 0);
+ sg_dma_address(sg) = in[src_idx];
+ sg_dma_len(sg) = PAGE_SIZE;
+ sg = sg_next(sg);
+ src_idx -= width;
+ }
+ }
}
-static inline
-int i915_get_vma_pages(struct i915_vma *vma)
+static struct sg_table *
+intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
+ struct drm_i915_gem_object *obj)
{
+ struct drm_device *dev = obj->base.dev;
+ struct intel_rotation_info *rot_info = &ggtt_view->rotation_info;
+ unsigned long size, pages, rot_pages;
+ struct sg_page_iter sg_iter;
+ unsigned long i;
+ dma_addr_t *page_addr_list;
+ struct sg_table *st;
+ unsigned int tile_pitch, tile_height;
+ unsigned int width_pages, height_pages;
+ int ret = -ENOMEM;
+
+ pages = obj->base.size / PAGE_SIZE;
+
+ /* Calculate tiling geometry. */
+ tile_height = intel_tile_height(dev, rot_info->pixel_format,
+ rot_info->fb_modifier);
+ tile_pitch = PAGE_SIZE / tile_height;
+ width_pages = DIV_ROUND_UP(rot_info->pitch, tile_pitch);
+ height_pages = DIV_ROUND_UP(rot_info->height, tile_height);
+ rot_pages = width_pages * height_pages;
+ size = rot_pages * PAGE_SIZE;
+
+ /* Allocate a temporary list of source pages for random access. */
+ page_addr_list = drm_malloc_ab(pages, sizeof(dma_addr_t));
+ if (!page_addr_list)
+ return ERR_PTR(ret);
+
+ /* Allocate target SG list. */
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ goto err_st_alloc;
+
+ ret = sg_alloc_table(st, rot_pages, GFP_KERNEL);
+ if (ret)
+ goto err_sg_alloc;
+
+ /* Populate source page list from the object. */
+ i = 0;
+ for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
+ page_addr_list[i] = sg_page_iter_dma_address(&sg_iter);
+ i++;
+ }
+
+ /* Rotate the pages. */
+ rotate_pages(page_addr_list, width_pages, height_pages, st);
+
+ DRM_DEBUG_KMS(
+ "Created rotated page mapping for object size %lu (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %lu pages).\n",
+ size, rot_info->pitch, rot_info->height,
+ rot_info->pixel_format, width_pages, height_pages,
+ rot_pages);
+
+ drm_free_large(page_addr_list);
+
+ return st;
+
+err_sg_alloc:
+ kfree(st);
+err_st_alloc:
+ drm_free_large(page_addr_list);
+
+ DRM_DEBUG_KMS(
+ "Failed to create rotated mapping for object size %lu! (%d) (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %lu pages)\n",
+ size, ret, rot_info->pitch, rot_info->height,
+ rot_info->pixel_format, width_pages, height_pages,
+ rot_pages);
+ return ERR_PTR(ret);
+}
+
+static inline int
+i915_get_ggtt_vma_pages(struct i915_vma *vma)
+{
+ int ret = 0;
+
if (vma->ggtt_view.pages)
return 0;
if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
vma->ggtt_view.pages = vma->obj->pages;
+ else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
+ vma->ggtt_view.pages =
+ intel_rotate_fb_obj_pages(&vma->ggtt_view, vma->obj);
else
WARN_ONCE(1, "GGTT view %u not implemented!\n",
vma->ggtt_view.type);
if (!vma->ggtt_view.pages) {
- DRM_ERROR("Failed to get pages for VMA view type %u!\n",
+ DRM_ERROR("Failed to get pages for GGTT view type %u!\n",
vma->ggtt_view.type);
- return -EINVAL;
+ ret = -EINVAL;
+ } else if (IS_ERR(vma->ggtt_view.pages)) {
+ ret = PTR_ERR(vma->ggtt_view.pages);
+ vma->ggtt_view.pages = NULL;
+ DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
+ vma->ggtt_view.type, ret);
}
- return 0;
+ return ret;
}
/**
@@ -2415,10 +2746,12 @@ int i915_get_vma_pages(struct i915_vma *vma)
int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
u32 flags)
{
- int ret = i915_get_vma_pages(vma);
+ if (i915_is_ggtt(vma->vm)) {
+ int ret = i915_get_ggtt_vma_pages(vma);
- if (ret)
- return ret;
+ if (ret)
+ return ret;
+ }
vma->bind_vma(vma, cache_level, flags);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index c9e93f5070bc..fc03c99317c9 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -36,13 +36,13 @@
struct drm_i915_file_private;
-typedef uint32_t gen6_gtt_pte_t;
-typedef uint64_t gen8_gtt_pte_t;
-typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
+typedef uint32_t gen6_pte_t;
+typedef uint64_t gen8_pte_t;
+typedef uint64_t gen8_pde_t;
#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
-#define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t))
+
/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
@@ -51,9 +51,16 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
#define GEN6_PTE_UNCACHED (1 << 1)
#define GEN6_PTE_VALID (1 << 0)
-#define GEN6_PPGTT_PD_ENTRIES 512
-#define GEN6_PD_SIZE (GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE)
+#define I915_PTES(pte_len) (PAGE_SIZE / (pte_len))
+#define I915_PTE_MASK(pte_len) (I915_PTES(pte_len) - 1)
+#define I915_PDES 512
+#define I915_PDE_MASK (I915_PDES - 1)
+#define NUM_PTE(pde_shift) (1 << (pde_shift - PAGE_SHIFT))
+
+#define GEN6_PTES I915_PTES(sizeof(gen6_pte_t))
+#define GEN6_PD_SIZE (I915_PDES * PAGE_SIZE)
#define GEN6_PD_ALIGN (PAGE_SIZE * 16)
+#define GEN6_PDE_SHIFT 22
#define GEN6_PDE_VALID (1 << 0)
#define GEN7_PTE_CACHE_L3_LLC (3 << 1)
@@ -89,8 +96,7 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
#define GEN8_PTE_SHIFT 12
#define GEN8_PTE_MASK 0x1ff
#define GEN8_LEGACY_PDPES 4
-#define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t))
-#define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t))
+#define GEN8_PTES I915_PTES(sizeof(gen8_pte_t))
#define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD)
#define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */
@@ -111,15 +117,28 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
enum i915_ggtt_view_type {
I915_GGTT_VIEW_NORMAL = 0,
+ I915_GGTT_VIEW_ROTATED
+};
+
+struct intel_rotation_info {
+ unsigned int height;
+ unsigned int pitch;
+ uint32_t pixel_format;
+ uint64_t fb_modifier;
};
struct i915_ggtt_view {
enum i915_ggtt_view_type type;
struct sg_table *pages;
+
+ union {
+ struct intel_rotation_info rotation_info;
+ };
};
extern const struct i915_ggtt_view i915_ggtt_view_normal;
+extern const struct i915_ggtt_view i915_ggtt_view_rotated;
enum i915_cache_level;
@@ -190,6 +209,8 @@ struct i915_vma {
struct i915_page_table_entry {
struct page *page;
dma_addr_t daddr;
+
+ unsigned long *used_ptes;
};
struct i915_page_directory_entry {
@@ -199,7 +220,7 @@ struct i915_page_directory_entry {
dma_addr_t daddr;
};
- struct i915_page_table_entry *page_table[GEN6_PPGTT_PD_ENTRIES]; /* PDEs */
+ struct i915_page_table_entry *page_table[I915_PDES]; /* PDEs */
};
struct i915_page_directory_pointer_entry {
@@ -243,9 +264,12 @@ struct i915_address_space {
struct list_head inactive_list;
/* FIXME: Need a more generic return type */
- gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
- enum i915_cache_level level,
- bool valid, u32 flags); /* Create a valid PTE */
+ gen6_pte_t (*pte_encode)(dma_addr_t addr,
+ enum i915_cache_level level,
+ bool valid, u32 flags); /* Create a valid PTE */
+ int (*allocate_va_range)(struct i915_address_space *vm,
+ uint64_t start,
+ uint64_t length);
void (*clear_range)(struct i915_address_space *vm,
uint64_t start,
uint64_t length,
@@ -289,6 +313,7 @@ struct i915_hw_ppgtt {
struct i915_address_space base;
struct kref ref;
struct drm_mm_node node;
+ unsigned long pd_dirty_rings;
unsigned num_pd_entries;
unsigned num_pd_pages; /* gen8+ */
union {
@@ -296,14 +321,82 @@ struct i915_hw_ppgtt {
struct i915_page_directory_entry pd;
};
+ struct i915_page_table_entry *scratch_pt;
+
struct drm_i915_file_private *file_priv;
+ gen6_pte_t __iomem *pd_addr;
+
int (*enable)(struct i915_hw_ppgtt *ppgtt);
int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
struct intel_engine_cs *ring);
void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
};
+/* For each pde iterates over every pde between from start until start + length.
+ * If start, and start+length are not perfectly divisible, the macro will round
+ * down, and up as needed. The macro modifies pde, start, and length. Dev is
+ * only used to differentiate shift values. Temp is temp. On gen6/7, start = 0,
+ * and length = 2G effectively iterates over every PDE in the system.
+ *
+ * XXX: temp is not actually needed, but it saves doing the ALIGN operation.
+ */
+#define gen6_for_each_pde(pt, pd, start, length, temp, iter) \
+ for (iter = gen6_pde_index(start); \
+ pt = (pd)->page_table[iter], length > 0 && iter < I915_PDES; \
+ iter++, \
+ temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT) - start, \
+ temp = min_t(unsigned, temp, length), \
+ start += temp, length -= temp)
+
+static inline uint32_t i915_pte_index(uint64_t address, uint32_t pde_shift)
+{
+ const uint32_t mask = NUM_PTE(pde_shift) - 1;
+
+ return (address >> PAGE_SHIFT) & mask;
+}
+
+/* Helper to counts the number of PTEs within the given length. This count
+ * does not cross a page table boundary, so the max value would be
+ * GEN6_PTES for GEN6, and GEN8_PTES for GEN8.
+*/
+static inline uint32_t i915_pte_count(uint64_t addr, size_t length,
+ uint32_t pde_shift)
+{
+ const uint64_t mask = ~((1 << pde_shift) - 1);
+ uint64_t end;
+
+ WARN_ON(length == 0);
+ WARN_ON(offset_in_page(addr|length));
+
+ end = addr + length;
+
+ if ((addr & mask) != (end & mask))
+ return NUM_PTE(pde_shift) - i915_pte_index(addr, pde_shift);
+
+ return i915_pte_index(end, pde_shift) - i915_pte_index(addr, pde_shift);
+}
+
+static inline uint32_t i915_pde_index(uint64_t addr, uint32_t shift)
+{
+ return (addr >> shift) & I915_PDE_MASK;
+}
+
+static inline uint32_t gen6_pte_index(uint32_t addr)
+{
+ return i915_pte_index(addr, GEN6_PDE_SHIFT);
+}
+
+static inline size_t gen6_pte_count(uint32_t addr, uint32_t length)
+{
+ return i915_pte_count(addr, length, GEN6_PDE_SHIFT);
+}
+
+static inline uint32_t gen6_pde_index(uint32_t addr)
+{
+ return i915_pde_index(addr, GEN6_PDE_SHIFT);
+}
+
int i915_gem_gtt_init(struct drm_device *dev);
void i915_gem_init_global_gtt(struct drm_device *dev);
void i915_global_gtt_cleanup(struct drm_device *dev);
@@ -332,4 +425,14 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev);
int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
+static inline bool
+i915_ggtt_view_equal(const struct i915_ggtt_view *a,
+ const struct i915_ggtt_view *b)
+{
+ if (WARN_ON(!a || !b))
+ return false;
+
+ return a->type == b->type;
+}
+
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
new file mode 100644
index 000000000000..f7929e769250
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -0,0 +1,335 @@
+/*
+ * Copyright © 2008-2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/oom.h>
+#include <linux/shmem_fs.h>
+#include <linux/slab.h>
+#include <linux/swap.h>
+#include <linux/pci.h>
+#include <linux/dma-buf.h>
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+
+#include "i915_drv.h"
+#include "i915_trace.h"
+
+static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
+{
+ if (!mutex_is_locked(mutex))
+ return false;
+
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
+ return mutex->owner == task;
+#else
+ /* Since UP may be pre-empted, we cannot assume that we own the lock */
+ return false;
+#endif
+}
+
+/**
+ * i915_gem_shrink - Shrink buffer object caches
+ * @dev_priv: i915 device
+ * @target: amount of memory to make available, in pages
+ * @flags: control flags for selecting cache types
+ *
+ * This function is the main interface to the shrinker. It will try to release
+ * up to @target pages of main memory backing storage from buffer objects.
+ * Selection of the specific caches can be done with @flags. This is e.g. useful
+ * when purgeable objects should be removed from caches preferentially.
+ *
+ * Note that it's not guaranteed that released amount is actually available as
+ * free system memory - the pages might still be in-used to due to other reasons
+ * (like cpu mmaps) or the mm core has reused them before we could grab them.
+ * Therefore code that needs to explicitly shrink buffer objects caches (e.g. to
+ * avoid deadlocks in memory reclaim) must fall back to i915_gem_shrink_all().
+ *
+ * Also note that any kind of pinning (both per-vma address space pins and
+ * backing storage pins at the buffer object level) result in the shrinker code
+ * having to skip the object.
+ *
+ * Returns:
+ * The number of pages of backing storage actually released.
+ */
+unsigned long
+i915_gem_shrink(struct drm_i915_private *dev_priv,
+ long target, unsigned flags)
+{
+ const struct {
+ struct list_head *list;
+ unsigned int bit;
+ } phases[] = {
+ { &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND },
+ { &dev_priv->mm.bound_list, I915_SHRINK_BOUND },
+ { NULL, 0 },
+ }, *phase;
+ unsigned long count = 0;
+
+ /*
+ * As we may completely rewrite the (un)bound list whilst unbinding
+ * (due to retiring requests) we have to strictly process only
+ * one element of the list at the time, and recheck the list
+ * on every iteration.
+ *
+ * In particular, we must hold a reference whilst removing the
+ * object as we may end up waiting for and/or retiring the objects.
+ * This might release the final reference (held by the active list)
+ * and result in the object being freed from under us. This is
+ * similar to the precautions the eviction code must take whilst
+ * removing objects.
+ *
+ * Also note that although these lists do not hold a reference to
+ * the object we can safely grab one here: The final object
+ * unreferencing and the bound_list are both protected by the
+ * dev->struct_mutex and so we won't ever be able to observe an
+ * object on the bound_list with a reference count equals 0.
+ */
+ for (phase = phases; phase->list; phase++) {
+ struct list_head still_in_list;
+
+ if ((flags & phase->bit) == 0)
+ continue;
+
+ INIT_LIST_HEAD(&still_in_list);
+ while (count < target && !list_empty(phase->list)) {
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma, *v;
+
+ obj = list_first_entry(phase->list,
+ typeof(*obj), global_list);
+ list_move_tail(&obj->global_list, &still_in_list);
+
+ if (flags & I915_SHRINK_PURGEABLE &&
+ obj->madv != I915_MADV_DONTNEED)
+ continue;
+
+ drm_gem_object_reference(&obj->base);
+
+ /* For the unbound phase, this should be a no-op! */
+ list_for_each_entry_safe(vma, v,
+ &obj->vma_list, vma_link)
+ if (i915_vma_unbind(vma))
+ break;
+
+ if (i915_gem_object_put_pages(obj) == 0)
+ count += obj->base.size >> PAGE_SHIFT;
+
+ drm_gem_object_unreference(&obj->base);
+ }
+ list_splice(&still_in_list, phase->list);
+ }
+
+ return count;
+}
+
+/**
+ * i915_gem_shrink - Shrink buffer object caches completely
+ * @dev_priv: i915 device
+ *
+ * This is a simple wraper around i915_gem_shrink() to aggressively shrink all
+ * caches completely. It also first waits for and retires all outstanding
+ * requests to also be able to release backing storage for active objects.
+ *
+ * This should only be used in code to intentionally quiescent the gpu or as a
+ * last-ditch effort when memory seems to have run out.
+ *
+ * Returns:
+ * The number of pages of backing storage actually released.
+ */
+unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
+{
+ i915_gem_evict_everything(dev_priv->dev);
+ return i915_gem_shrink(dev_priv, LONG_MAX,
+ I915_SHRINK_BOUND | I915_SHRINK_UNBOUND);
+}
+
+static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
+{
+ if (!mutex_trylock(&dev->struct_mutex)) {
+ if (!mutex_is_locked_by(&dev->struct_mutex, current))
+ return false;
+
+ if (to_i915(dev)->mm.shrinker_no_lock_stealing)
+ return false;
+
+ *unlock = false;
+ } else
+ *unlock = true;
+
+ return true;
+}
+
+static int num_vma_bound(struct drm_i915_gem_object *obj)
+{
+ struct i915_vma *vma;
+ int count = 0;
+
+ list_for_each_entry(vma, &obj->vma_list, vma_link)
+ if (drm_mm_node_allocated(&vma->node))
+ count++;
+
+ return count;
+}
+
+static unsigned long
+i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(shrinker, struct drm_i915_private, mm.shrinker);
+ struct drm_device *dev = dev_priv->dev;
+ struct drm_i915_gem_object *obj;
+ unsigned long count;
+ bool unlock;
+
+ if (!i915_gem_shrinker_lock(dev, &unlock))
+ return 0;
+
+ count = 0;
+ list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
+ if (obj->pages_pin_count == 0)
+ count += obj->base.size >> PAGE_SHIFT;
+
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ if (!i915_gem_obj_is_pinned(obj) &&
+ obj->pages_pin_count == num_vma_bound(obj))
+ count += obj->base.size >> PAGE_SHIFT;
+ }
+
+ if (unlock)
+ mutex_unlock(&dev->struct_mutex);
+
+ return count;
+}
+
+static unsigned long
+i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(shrinker, struct drm_i915_private, mm.shrinker);
+ struct drm_device *dev = dev_priv->dev;
+ unsigned long freed;
+ bool unlock;
+
+ if (!i915_gem_shrinker_lock(dev, &unlock))
+ return SHRINK_STOP;
+
+ freed = i915_gem_shrink(dev_priv,
+ sc->nr_to_scan,
+ I915_SHRINK_BOUND |
+ I915_SHRINK_UNBOUND |
+ I915_SHRINK_PURGEABLE);
+ if (freed < sc->nr_to_scan)
+ freed += i915_gem_shrink(dev_priv,
+ sc->nr_to_scan - freed,
+ I915_SHRINK_BOUND |
+ I915_SHRINK_UNBOUND);
+ if (unlock)
+ mutex_unlock(&dev->struct_mutex);
+
+ return freed;
+}
+
+static int
+i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(nb, struct drm_i915_private, mm.oom_notifier);
+ struct drm_device *dev = dev_priv->dev;
+ struct drm_i915_gem_object *obj;
+ unsigned long timeout = msecs_to_jiffies(5000) + 1;
+ unsigned long pinned, bound, unbound, freed_pages;
+ bool was_interruptible;
+ bool unlock;
+
+ while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout) {
+ schedule_timeout_killable(1);
+ if (fatal_signal_pending(current))
+ return NOTIFY_DONE;
+ }
+ if (timeout == 0) {
+ pr_err("Unable to purge GPU memory due lock contention.\n");
+ return NOTIFY_DONE;
+ }
+
+ was_interruptible = dev_priv->mm.interruptible;
+ dev_priv->mm.interruptible = false;
+
+ freed_pages = i915_gem_shrink_all(dev_priv);
+
+ dev_priv->mm.interruptible = was_interruptible;
+
+ /* Because we may be allocating inside our own driver, we cannot
+ * assert that there are no objects with pinned pages that are not
+ * being pointed to by hardware.
+ */
+ unbound = bound = pinned = 0;
+ list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
+ if (!obj->base.filp) /* not backed by a freeable object */
+ continue;
+
+ if (obj->pages_pin_count)
+ pinned += obj->base.size;
+ else
+ unbound += obj->base.size;
+ }
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ if (!obj->base.filp)
+ continue;
+
+ if (obj->pages_pin_count)
+ pinned += obj->base.size;
+ else
+ bound += obj->base.size;
+ }
+
+ if (unlock)
+ mutex_unlock(&dev->struct_mutex);
+
+ if (freed_pages || unbound || bound)
+ pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n",
+ freed_pages << PAGE_SHIFT, pinned);
+ if (unbound || bound)
+ pr_err("%lu and %lu bytes still available in the "
+ "bound and unbound GPU page lists.\n",
+ bound, unbound);
+
+ *(unsigned long *)ptr += freed_pages;
+ return NOTIFY_DONE;
+}
+
+/**
+ * i915_gem_shrinker_init - Initialize i915 shrinker
+ * @dev_priv: i915 device
+ *
+ * This function registers and sets up the i915 shrinker and OOM handler.
+ */
+void i915_gem_shrinker_init(struct drm_i915_private *dev_priv)
+{
+ dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
+ dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
+ dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
+ register_shrinker(&dev_priv->mm.shrinker);
+
+ dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
+ register_oom_notifier(&dev_priv->mm.oom_notifier);
+}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index a982849a5edd..1d4e60df8883 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -386,6 +386,11 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
if (INTEL_INFO(dev)->gen >= 6) {
err_printf(m, "ERROR: 0x%08x\n", error->error);
+
+ if (INTEL_INFO(dev)->gen >= 8)
+ err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
+ error->fault_data1, error->fault_data0);
+
err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
}
@@ -555,7 +560,14 @@ static void i915_error_state_free(struct kref *error_ref)
}
i915_error_object_free(error->semaphore_obj);
+
+ for (i = 0; i < error->vm_count; i++)
+ kfree(error->active_bo[i]);
+
kfree(error->active_bo);
+ kfree(error->active_bo_count);
+ kfree(error->pinned_bo);
+ kfree(error->pinned_bo_count);
kfree(error->overlay);
kfree(error->display);
kfree(error);
@@ -1164,6 +1176,11 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
if (IS_GEN7(dev))
error->err_int = I915_READ(GEN7_ERR_INT);
+ if (INTEL_INFO(dev)->gen >= 8) {
+ error->fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0);
+ error->fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
+ }
+
if (IS_GEN6(dev)) {
error->forcewake = I915_READ(FORCEWAKE);
error->gab_ctl = I915_READ(GAB_CTL);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 49ad5fb82ace..14ecb4d13a1a 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -277,6 +277,7 @@ void gen6_reset_rps_interrupts(struct drm_device *dev)
I915_WRITE(reg, dev_priv->pm_rps_events);
I915_WRITE(reg, dev_priv->pm_rps_events);
POSTING_READ(reg);
+ dev_priv->rps.pm_iir = 0;
spin_unlock_irq(&dev_priv->irq_lock);
}
@@ -330,12 +331,10 @@ void gen6_disable_rps_interrupts(struct drm_device *dev)
__gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
~dev_priv->pm_rps_events);
- I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events);
- I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events);
-
- dev_priv->rps.pm_iir = 0;
spin_unlock_irq(&dev_priv->irq_lock);
+
+ synchronize_irq(dev->irq);
}
/**
@@ -997,129 +996,73 @@ static void notify_ring(struct drm_device *dev,
wake_up_all(&ring->irq_queue);
}
-static u32 vlv_c0_residency(struct drm_i915_private *dev_priv,
- struct intel_rps_ei *rps_ei)
+static void vlv_c0_read(struct drm_i915_private *dev_priv,
+ struct intel_rps_ei *ei)
{
- u32 cz_ts, cz_freq_khz;
- u32 render_count, media_count;
- u32 elapsed_render, elapsed_media, elapsed_time;
- u32 residency = 0;
-
- cz_ts = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
- cz_freq_khz = DIV_ROUND_CLOSEST(dev_priv->mem_freq * 1000, 4);
-
- render_count = I915_READ(VLV_RENDER_C0_COUNT_REG);
- media_count = I915_READ(VLV_MEDIA_C0_COUNT_REG);
-
- if (rps_ei->cz_clock == 0) {
- rps_ei->cz_clock = cz_ts;
- rps_ei->render_c0 = render_count;
- rps_ei->media_c0 = media_count;
-
- return dev_priv->rps.cur_freq;
- }
-
- elapsed_time = cz_ts - rps_ei->cz_clock;
- rps_ei->cz_clock = cz_ts;
+ ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
+ ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
+ ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
+}
- elapsed_render = render_count - rps_ei->render_c0;
- rps_ei->render_c0 = render_count;
+static bool vlv_c0_above(struct drm_i915_private *dev_priv,
+ const struct intel_rps_ei *old,
+ const struct intel_rps_ei *now,
+ int threshold)
+{
+ u64 time, c0;
- elapsed_media = media_count - rps_ei->media_c0;
- rps_ei->media_c0 = media_count;
+ if (old->cz_clock == 0)
+ return false;
- /* Convert all the counters into common unit of milli sec */
- elapsed_time /= VLV_CZ_CLOCK_TO_MILLI_SEC;
- elapsed_render /= cz_freq_khz;
- elapsed_media /= cz_freq_khz;
+ time = now->cz_clock - old->cz_clock;
+ time *= threshold * dev_priv->mem_freq;
- /*
- * Calculate overall C0 residency percentage
- * only if elapsed time is non zero
+ /* Workload can be split between render + media, e.g. SwapBuffers
+ * being blitted in X after being rendered in mesa. To account for
+ * this we need to combine both engines into our activity counter.
*/
- if (elapsed_time) {
- residency =
- ((max(elapsed_render, elapsed_media) * 100)
- / elapsed_time);
- }
+ c0 = now->render_c0 - old->render_c0;
+ c0 += now->media_c0 - old->media_c0;
+ c0 *= 100 * VLV_CZ_CLOCK_TO_MILLI_SEC * 4 / 1000;
- return residency;
+ return c0 >= time;
}
-/**
- * vlv_calc_delay_from_C0_counters - Increase/Decrease freq based on GPU
- * busy-ness calculated from C0 counters of render & media power wells
- * @dev_priv: DRM device private
- *
- */
-static int vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv)
+void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
{
- u32 residency_C0_up = 0, residency_C0_down = 0;
- int new_delay, adj;
-
- dev_priv->rps.ei_interrupt_count++;
-
- WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
-
-
- if (dev_priv->rps.up_ei.cz_clock == 0) {
- vlv_c0_residency(dev_priv, &dev_priv->rps.up_ei);
- vlv_c0_residency(dev_priv, &dev_priv->rps.down_ei);
- return dev_priv->rps.cur_freq;
- }
+ vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
+ dev_priv->rps.up_ei = dev_priv->rps.down_ei;
+}
+static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
+{
+ struct intel_rps_ei now;
+ u32 events = 0;
- /*
- * To down throttle, C0 residency should be less than down threshold
- * for continous EI intervals. So calculate down EI counters
- * once in VLV_INT_COUNT_FOR_DOWN_EI
- */
- if (dev_priv->rps.ei_interrupt_count == VLV_INT_COUNT_FOR_DOWN_EI) {
+ if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
+ return 0;
- dev_priv->rps.ei_interrupt_count = 0;
+ vlv_c0_read(dev_priv, &now);
+ if (now.cz_clock == 0)
+ return 0;
- residency_C0_down = vlv_c0_residency(dev_priv,
- &dev_priv->rps.down_ei);
- } else {
- residency_C0_up = vlv_c0_residency(dev_priv,
- &dev_priv->rps.up_ei);
+ if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
+ if (!vlv_c0_above(dev_priv,
+ &dev_priv->rps.down_ei, &now,
+ VLV_RP_DOWN_EI_THRESHOLD))
+ events |= GEN6_PM_RP_DOWN_THRESHOLD;
+ dev_priv->rps.down_ei = now;
}
- new_delay = dev_priv->rps.cur_freq;
-
- adj = dev_priv->rps.last_adj;
- /* C0 residency is greater than UP threshold. Increase Frequency */
- if (residency_C0_up >= VLV_RP_UP_EI_THRESHOLD) {
- if (adj > 0)
- adj *= 2;
- else
- adj = 1;
-
- if (dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit)
- new_delay = dev_priv->rps.cur_freq + adj;
-
- /*
- * For better performance, jump directly
- * to RPe if we're below it.
- */
- if (new_delay < dev_priv->rps.efficient_freq)
- new_delay = dev_priv->rps.efficient_freq;
-
- } else if (!dev_priv->rps.ei_interrupt_count &&
- (residency_C0_down < VLV_RP_DOWN_EI_THRESHOLD)) {
- if (adj < 0)
- adj *= 2;
- else
- adj = -1;
- /*
- * This means, C0 residency is less than down threshold over
- * a period of VLV_INT_COUNT_FOR_DOWN_EI. So, reduce the freq
- */
- if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit)
- new_delay = dev_priv->rps.cur_freq + adj;
+ if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
+ if (vlv_c0_above(dev_priv,
+ &dev_priv->rps.up_ei, &now,
+ VLV_RP_UP_EI_THRESHOLD))
+ events |= GEN6_PM_RP_UP_THRESHOLD;
+ dev_priv->rps.up_ei = now;
}
- return new_delay;
+ return events;
}
static void gen6_pm_rps_work(struct work_struct *work)
@@ -1149,6 +1092,8 @@ static void gen6_pm_rps_work(struct work_struct *work)
mutex_lock(&dev_priv->rps.hw_lock);
+ pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
+
adj = dev_priv->rps.last_adj;
if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
if (adj > 0)
@@ -1171,8 +1116,6 @@ static void gen6_pm_rps_work(struct work_struct *work)
else
new_delay = dev_priv->rps.min_freq_softlimit;
adj = 0;
- } else if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
- new_delay = vlv_calc_delay_from_C0_counters(dev_priv);
} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
if (adj < 0)
adj *= 2;
@@ -4299,7 +4242,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
/* Let's track the enabled rps events */
if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
/* WaGsvRC0ResidencyMethod:vlv */
- dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
+ dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
else
dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index e2d20ffe6586..bb64415a1c3e 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -27,7 +27,6 @@
struct i915_params i915 __read_mostly = {
.modeset = -1,
.panel_ignore_lid = 1,
- .powersave = 1,
.semaphores = -1,
.lvds_downclock = 0,
.lvds_channel_mode = 0,
@@ -44,6 +43,7 @@ struct i915_params i915 __read_mostly = {
.enable_ips = 1,
.fastboot = 0,
.prefault_disable = 0,
+ .load_detect_test = 0,
.reset = true,
.invert_brightness = 0,
.disable_display = 0,
@@ -65,10 +65,6 @@ MODULE_PARM_DESC(panel_ignore_lid,
"Override lid status (0=autodetect, 1=autodetect disabled [default], "
"-1=force lid closed, -2=force lid open)");
-module_param_named(powersave, i915.powersave, int, 0600);
-MODULE_PARM_DESC(powersave,
- "Enable powersavings, fbc, downclocking, etc. (default: true)");
-
module_param_named_unsafe(semaphores, i915.semaphores, int, 0400);
MODULE_PARM_DESC(semaphores,
"Use semaphores for inter-ring sync "
@@ -144,11 +140,16 @@ module_param_named(fastboot, i915.fastboot, bool, 0600);
MODULE_PARM_DESC(fastboot,
"Try to skip unnecessary mode sets at boot time (default: false)");
-module_param_named(prefault_disable, i915.prefault_disable, bool, 0600);
+module_param_named_unsafe(prefault_disable, i915.prefault_disable, bool, 0600);
MODULE_PARM_DESC(prefault_disable,
"Disable page prefaulting for pread/pwrite/reloc (default:false). "
"For developers only.");
+module_param_named_unsafe(load_detect_test, i915.load_detect_test, bool, 0600);
+MODULE_PARM_DESC(load_detect_test,
+ "Force-enable the VGA load detect code for testing (default:false). "
+ "For developers only.");
+
module_param_named(invert_brightness, i915.invert_brightness, int, 0600);
MODULE_PARM_DESC(invert_brightness,
"Invert backlight brightness "
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index cc8ebabc488d..b522eb6e59a4 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -673,7 +673,6 @@ enum skl_disp_power_wells {
#define VLV_CZ_CLOCK_TO_MILLI_SEC 100000
#define VLV_RP_UP_EI_THRESHOLD 90
#define VLV_RP_DOWN_EI_THRESHOLD 70
-#define VLV_INT_COUNT_FOR_DOWN_EI 5
/* vlv2 north clock has */
#define CCK_FUSE_REG 0x8
@@ -1307,6 +1306,9 @@ enum skl_disp_power_wells {
#define ERR_INT_FIFO_UNDERRUN_A (1<<0)
#define ERR_INT_FIFO_UNDERRUN(pipe) (1<<(pipe*3))
+#define GEN8_FAULT_TLB_DATA0 0x04b10
+#define GEN8_FAULT_TLB_DATA1 0x04b14
+
#define FPGA_DBG 0x42300
#define FPGA_DBG_RM_NOCLAIM (1<<31)
@@ -6220,8 +6222,8 @@ enum skl_disp_power_wells {
#define GEN6_GT_GFX_RC6p 0x13810C
#define GEN6_GT_GFX_RC6pp 0x138110
-#define VLV_RENDER_C0_COUNT_REG 0x138118
-#define VLV_MEDIA_C0_COUNT_REG 0x13811C
+#define VLV_RENDER_C0_COUNT 0x138118
+#define VLV_MEDIA_C0_COUNT 0x13811C
#define GEN6_PCODE_MAILBOX 0x138124
#define GEN6_PCODE_READY (1<<31)
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index f004d3d89b87..b3070a4501ab 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -156,6 +156,105 @@ TRACE_EVENT(i915_vma_unbind,
__entry->obj, __entry->offset, __entry->size, __entry->vm)
);
+#define VM_TO_TRACE_NAME(vm) \
+ (i915_is_ggtt(vm) ? "G" : \
+ "P")
+
+DECLARE_EVENT_CLASS(i915_va,
+ TP_PROTO(struct i915_address_space *vm, u64 start, u64 length, const char *name),
+ TP_ARGS(vm, start, length, name),
+
+ TP_STRUCT__entry(
+ __field(struct i915_address_space *, vm)
+ __field(u64, start)
+ __field(u64, end)
+ __string(name, name)
+ ),
+
+ TP_fast_assign(
+ __entry->vm = vm;
+ __entry->start = start;
+ __entry->end = start + length - 1;
+ __assign_str(name, name);
+ ),
+
+ TP_printk("vm=%p (%s), 0x%llx-0x%llx",
+ __entry->vm, __get_str(name), __entry->start, __entry->end)
+);
+
+DEFINE_EVENT(i915_va, i915_va_alloc,
+ TP_PROTO(struct i915_address_space *vm, u64 start, u64 length, const char *name),
+ TP_ARGS(vm, start, length, name)
+);
+
+DECLARE_EVENT_CLASS(i915_page_table_entry,
+ TP_PROTO(struct i915_address_space *vm, u32 pde, u64 start, u64 pde_shift),
+ TP_ARGS(vm, pde, start, pde_shift),
+
+ TP_STRUCT__entry(
+ __field(struct i915_address_space *, vm)
+ __field(u32, pde)
+ __field(u64, start)
+ __field(u64, end)
+ ),
+
+ TP_fast_assign(
+ __entry->vm = vm;
+ __entry->pde = pde;
+ __entry->start = start;
+ __entry->end = ((start + (1ULL << pde_shift)) & ~((1ULL << pde_shift)-1)) - 1;
+ ),
+
+ TP_printk("vm=%p, pde=%d (0x%llx-0x%llx)",
+ __entry->vm, __entry->pde, __entry->start, __entry->end)
+);
+
+DEFINE_EVENT(i915_page_table_entry, i915_page_table_entry_alloc,
+ TP_PROTO(struct i915_address_space *vm, u32 pde, u64 start, u64 pde_shift),
+ TP_ARGS(vm, pde, start, pde_shift)
+);
+
+/* Avoid extra math because we only support two sizes. The format is defined by
+ * bitmap_scnprintf. Each 32 bits is 8 HEX digits followed by comma */
+#define TRACE_PT_SIZE(bits) \
+ ((((bits) == 1024) ? 288 : 144) + 1)
+
+DECLARE_EVENT_CLASS(i915_page_table_entry_update,
+ TP_PROTO(struct i915_address_space *vm, u32 pde,
+ struct i915_page_table_entry *pt, u32 first, u32 count, u32 bits),
+ TP_ARGS(vm, pde, pt, first, count, bits),
+
+ TP_STRUCT__entry(
+ __field(struct i915_address_space *, vm)
+ __field(u32, pde)
+ __field(u32, first)
+ __field(u32, last)
+ __dynamic_array(char, cur_ptes, TRACE_PT_SIZE(bits))
+ ),
+
+ TP_fast_assign(
+ __entry->vm = vm;
+ __entry->pde = pde;
+ __entry->first = first;
+ __entry->last = first + count - 1;
+ scnprintf(__get_str(cur_ptes),
+ TRACE_PT_SIZE(bits),
+ "%*pb",
+ bits,
+ pt->used_ptes);
+ ),
+
+ TP_printk("vm=%p, pde=%d, updating %u:%u\t%s",
+ __entry->vm, __entry->pde, __entry->last, __entry->first,
+ __get_str(cur_ptes))
+);
+
+DEFINE_EVENT(i915_page_table_entry_update, i915_page_table_entry_map,
+ TP_PROTO(struct i915_address_space *vm, u32 pde,
+ struct i915_page_table_entry *pt, u32 first, u32 count, u32 bits),
+ TP_ARGS(vm, pde, pt, first, count, bits)
+);
+
TRACE_EVENT(i915_gem_object_change_domain,
TP_PROTO(struct drm_i915_gem_object *obj, u32 old_read, u32 old_write),
TP_ARGS(obj, old_read, old_write),
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index e66e17af0a56..6095a998bdac 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -690,7 +690,7 @@ intel_crt_detect(struct drm_connector *connector, bool force)
* broken monitor (without edid) to work behind a broken kvm (that fails
* to have the right resistors for HP detection) needs to fix this up.
* For now just bail out. */
- if (I915_HAS_HOTPLUG(dev)) {
+ if (I915_HAS_HOTPLUG(dev) && !i915.load_detect_test) {
status = connector_status_disconnected;
goto out;
}
@@ -706,9 +706,11 @@ intel_crt_detect(struct drm_connector *connector, bool force)
if (intel_get_load_detect_pipe(connector, NULL, &tmp, &ctx)) {
if (intel_crt_detect_ddc(connector))
status = connector_status_connected;
- else
+ else if (INTEL_INFO(dev)->gen < 4)
status = intel_crt_load_detect(crt);
- intel_release_load_detect_pipe(connector, &tmp);
+ else
+ status = connector_status_unknown;
+ intel_release_load_detect_pipe(connector, &tmp, &ctx);
} else
status = connector_status_unknown;
@@ -794,6 +796,7 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = {
.destroy = intel_crt_destroy,
.set_property = intel_crt_set_property,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_get_property = intel_connector_atomic_get_property,
};
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 8aee7d77ce9d..47b9307da24b 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -492,17 +492,23 @@ intel_ddi_get_crtc_encoder(struct drm_crtc *crtc)
}
static struct intel_encoder *
-intel_ddi_get_crtc_new_encoder(struct intel_crtc *crtc)
+intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct intel_encoder *intel_encoder, *ret = NULL;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_encoder *ret = NULL;
+ struct drm_atomic_state *state;
int num_encoders = 0;
+ int i;
- for_each_intel_encoder(dev, intel_encoder) {
- if (intel_encoder->new_crtc == crtc) {
- ret = intel_encoder;
- num_encoders++;
- }
+ state = crtc_state->base.state;
+
+ for (i = 0; i < state->num_connector; i++) {
+ if (!state->connectors[i] ||
+ state->connector_states[i]->crtc != crtc_state->base.crtc)
+ continue;
+
+ ret = to_intel_encoder(state->connector_states[i]->best_encoder);
+ num_encoders++;
}
WARN(num_encoders != 1, "%d encoders on crtc for pipe %c\n", num_encoders,
@@ -1216,7 +1222,7 @@ bool intel_ddi_pll_select(struct intel_crtc *intel_crtc,
{
struct drm_device *dev = intel_crtc->base.dev;
struct intel_encoder *intel_encoder =
- intel_ddi_get_crtc_new_encoder(intel_crtc);
+ intel_ddi_get_crtc_new_encoder(crtc_state);
int clock = crtc_state->port_clock;
if (IS_SKYLAKE(dev))
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 90b460cf2b57..75955fee6d24 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -83,7 +83,8 @@ static void ironlake_pch_clock_get(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
- int x, int y, struct drm_framebuffer *old_fb);
+ int x, int y, struct drm_framebuffer *old_fb,
+ struct drm_atomic_state *state);
static int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *ifb,
struct drm_mode_fb_cmd2 *mode_cmd,
@@ -430,25 +431,41 @@ bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type)
* intel_pipe_has_type() but looking at encoder->new_crtc instead of
* encoder->crtc.
*/
-static bool intel_pipe_will_have_type(struct intel_crtc *crtc, int type)
+static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state,
+ int type)
{
- struct drm_device *dev = crtc->base.dev;
+ struct drm_atomic_state *state = crtc_state->base.state;
+ struct drm_connector_state *connector_state;
struct intel_encoder *encoder;
+ int i, num_connectors = 0;
+
+ for (i = 0; i < state->num_connector; i++) {
+ if (!state->connectors[i])
+ continue;
+
+ connector_state = state->connector_states[i];
+ if (connector_state->crtc != crtc_state->base.crtc)
+ continue;
- for_each_intel_encoder(dev, encoder)
- if (encoder->new_crtc == crtc && encoder->type == type)
+ num_connectors++;
+
+ encoder = to_intel_encoder(connector_state->best_encoder);
+ if (encoder->type == type)
return true;
+ }
+
+ WARN_ON(num_connectors == 0);
return false;
}
-static const intel_limit_t *intel_ironlake_limit(struct intel_crtc *crtc,
- int refclk)
+static const intel_limit_t *
+intel_ironlake_limit(struct intel_crtc_state *crtc_state, int refclk)
{
- struct drm_device *dev = crtc->base.dev;
+ struct drm_device *dev = crtc_state->base.crtc->dev;
const intel_limit_t *limit;
- if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_is_dual_link_lvds(dev)) {
if (refclk == 100000)
limit = &intel_limits_ironlake_dual_lvds_100m;
@@ -466,20 +483,21 @@ static const intel_limit_t *intel_ironlake_limit(struct intel_crtc *crtc,
return limit;
}
-static const intel_limit_t *intel_g4x_limit(struct intel_crtc *crtc)
+static const intel_limit_t *
+intel_g4x_limit(struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
+ struct drm_device *dev = crtc_state->base.crtc->dev;
const intel_limit_t *limit;
- if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_is_dual_link_lvds(dev))
limit = &intel_limits_g4x_dual_channel_lvds;
else
limit = &intel_limits_g4x_single_channel_lvds;
- } else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_HDMI) ||
- intel_pipe_will_have_type(crtc, INTEL_OUTPUT_ANALOG)) {
+ } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI) ||
+ intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
limit = &intel_limits_g4x_hdmi;
- } else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_SDVO)) {
+ } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO)) {
limit = &intel_limits_g4x_sdvo;
} else /* The option is for other outputs */
limit = &intel_limits_i9xx_sdvo;
@@ -487,17 +505,18 @@ static const intel_limit_t *intel_g4x_limit(struct intel_crtc *crtc)
return limit;
}
-static const intel_limit_t *intel_limit(struct intel_crtc *crtc, int refclk)
+static const intel_limit_t *
+intel_limit(struct intel_crtc_state *crtc_state, int refclk)
{
- struct drm_device *dev = crtc->base.dev;
+ struct drm_device *dev = crtc_state->base.crtc->dev;
const intel_limit_t *limit;
if (HAS_PCH_SPLIT(dev))
- limit = intel_ironlake_limit(crtc, refclk);
+ limit = intel_ironlake_limit(crtc_state, refclk);
else if (IS_G4X(dev)) {
- limit = intel_g4x_limit(crtc);
+ limit = intel_g4x_limit(crtc_state);
} else if (IS_PINEVIEW(dev)) {
- if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
limit = &intel_limits_pineview_lvds;
else
limit = &intel_limits_pineview_sdvo;
@@ -506,14 +525,14 @@ static const intel_limit_t *intel_limit(struct intel_crtc *crtc, int refclk)
} else if (IS_VALLEYVIEW(dev)) {
limit = &intel_limits_vlv;
} else if (!IS_GEN2(dev)) {
- if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
limit = &intel_limits_i9xx_lvds;
else
limit = &intel_limits_i9xx_sdvo;
} else {
- if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
limit = &intel_limits_i8xx_lvds;
- else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_DVO))
+ else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO))
limit = &intel_limits_i8xx_dvo;
else
limit = &intel_limits_i8xx_dac;
@@ -600,15 +619,17 @@ static bool intel_PLL_is_valid(struct drm_device *dev,
}
static bool
-i9xx_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
+i9xx_find_best_dpll(const intel_limit_t *limit,
+ struct intel_crtc_state *crtc_state,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_device *dev = crtc->base.dev;
intel_clock_t clock;
int err = target;
- if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
/*
* For LVDS just rely on its current settings for dual-channel.
* We haven't figured out how to reliably set up different
@@ -661,15 +682,17 @@ i9xx_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
}
static bool
-pnv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
+pnv_find_best_dpll(const intel_limit_t *limit,
+ struct intel_crtc_state *crtc_state,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_device *dev = crtc->base.dev;
intel_clock_t clock;
int err = target;
- if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
/*
* For LVDS just rely on its current settings for dual-channel.
* We haven't figured out how to reliably set up different
@@ -720,10 +743,12 @@ pnv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
}
static bool
-g4x_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
+g4x_find_best_dpll(const intel_limit_t *limit,
+ struct intel_crtc_state *crtc_state,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_device *dev = crtc->base.dev;
intel_clock_t clock;
int max_n;
@@ -732,7 +757,7 @@ g4x_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
int err_most = (target >> 8) + (target >> 9);
found = false;
- if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_is_dual_link_lvds(dev))
clock.p2 = limit->p2.p2_fast;
else
@@ -776,11 +801,53 @@ g4x_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
return found;
}
+/*
+ * Check if the calculated PLL configuration is more optimal compared to the
+ * best configuration and error found so far. Return the calculated error.
+ */
+static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
+ const intel_clock_t *calculated_clock,
+ const intel_clock_t *best_clock,
+ unsigned int best_error_ppm,
+ unsigned int *error_ppm)
+{
+ /*
+ * For CHV ignore the error and consider only the P value.
+ * Prefer a bigger P value based on HW requirements.
+ */
+ if (IS_CHERRYVIEW(dev)) {
+ *error_ppm = 0;
+
+ return calculated_clock->p > best_clock->p;
+ }
+
+ if (WARN_ON_ONCE(!target_freq))
+ return false;
+
+ *error_ppm = div_u64(1000000ULL *
+ abs(target_freq - calculated_clock->dot),
+ target_freq);
+ /*
+ * Prefer a better P value over a better (smaller) error if the error
+ * is small. Ensure this preference for future configurations too by
+ * setting the error to 0.
+ */
+ if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
+ *error_ppm = 0;
+
+ return true;
+ }
+
+ return *error_ppm + 10 < best_error_ppm;
+}
+
static bool
-vlv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
+vlv_find_best_dpll(const intel_limit_t *limit,
+ struct intel_crtc_state *crtc_state,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_device *dev = crtc->base.dev;
intel_clock_t clock;
unsigned int bestppm = 1000000;
@@ -800,7 +867,7 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
clock.p = clock.p1 * clock.p2;
/* based on hardware requirement, prefer bigger m1,m2 values */
for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
- unsigned int ppm, diff;
+ unsigned int ppm;
clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
refclk * clock.m1);
@@ -811,20 +878,15 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
&clock))
continue;
- diff = abs(clock.dot - target);
- ppm = div_u64(1000000ULL * diff, target);
-
- if (ppm < 100 && clock.p > best_clock->p) {
- bestppm = 0;
- *best_clock = clock;
- found = true;
- }
+ if (!vlv_PLL_is_optimal(dev, target,
+ &clock,
+ best_clock,
+ bestppm, &ppm))
+ continue;
- if (bestppm >= 10 && ppm < bestppm - 10) {
- bestppm = ppm;
- *best_clock = clock;
- found = true;
- }
+ *best_clock = clock;
+ bestppm = ppm;
+ found = true;
}
}
}
@@ -834,16 +896,20 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
}
static bool
-chv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
+chv_find_best_dpll(const intel_limit_t *limit,
+ struct intel_crtc_state *crtc_state,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_device *dev = crtc->base.dev;
+ unsigned int best_error_ppm;
intel_clock_t clock;
uint64_t m2;
int found = false;
memset(best_clock, 0, sizeof(*best_clock));
+ best_error_ppm = 1000000;
/*
* Based on hardware doc, the n always set to 1, and m1 always
@@ -857,6 +923,7 @@ chv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
for (clock.p2 = limit->p2.p2_fast;
clock.p2 >= limit->p2.p2_slow;
clock.p2 -= clock.p2 > 10 ? 2 : 1) {
+ unsigned int error_ppm;
clock.p = clock.p1 * clock.p2;
@@ -873,12 +940,13 @@ chv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
if (!intel_PLL_is_valid(dev, limit, &clock))
continue;
- /* based on hardware requirement, prefer bigger p
- */
- if (clock.p > best_clock->p) {
- *best_clock = clock;
- found = true;
- }
+ if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
+ best_error_ppm, &error_ppm))
+ continue;
+
+ *best_clock = clock;
+ best_error_ppm = error_ppm;
+ found = true;
}
}
@@ -2194,13 +2262,12 @@ static bool need_vtd_wa(struct drm_device *dev)
return false;
}
-int
-intel_fb_align_height(struct drm_device *dev, int height,
- uint32_t pixel_format,
- uint64_t fb_format_modifier)
+unsigned int
+intel_tile_height(struct drm_device *dev, uint32_t pixel_format,
+ uint64_t fb_format_modifier)
{
- int tile_height;
- uint32_t bits_per_pixel;
+ unsigned int tile_height;
+ uint32_t pixel_bytes;
switch (fb_format_modifier) {
case DRM_FORMAT_MOD_NONE:
@@ -2213,20 +2280,20 @@ intel_fb_align_height(struct drm_device *dev, int height,
tile_height = 32;
break;
case I915_FORMAT_MOD_Yf_TILED:
- bits_per_pixel = drm_format_plane_cpp(pixel_format, 0) * 8;
- switch (bits_per_pixel) {
+ pixel_bytes = drm_format_plane_cpp(pixel_format, 0);
+ switch (pixel_bytes) {
default:
- case 8:
+ case 1:
tile_height = 64;
break;
- case 16:
- case 32:
+ case 2:
+ case 4:
tile_height = 32;
break;
- case 64:
+ case 8:
tile_height = 16;
break;
- case 128:
+ case 16:
WARN_ONCE(1,
"128-bit pixels are not supported for display!");
tile_height = 16;
@@ -2239,17 +2306,58 @@ intel_fb_align_height(struct drm_device *dev, int height,
break;
}
- return ALIGN(height, tile_height);
+ return tile_height;
+}
+
+unsigned int
+intel_fb_align_height(struct drm_device *dev, unsigned int height,
+ uint32_t pixel_format, uint64_t fb_format_modifier)
+{
+ return ALIGN(height, intel_tile_height(dev, pixel_format,
+ fb_format_modifier));
+}
+
+static int
+intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb,
+ const struct drm_plane_state *plane_state)
+{
+ struct intel_rotation_info *info = &view->rotation_info;
+
+ *view = i915_ggtt_view_normal;
+
+ if (!plane_state)
+ return 0;
+
+ if (!intel_rotation_90_or_270(plane_state->rotation))
+ return 0;
+
+ *view = i915_ggtt_view_rotated;
+
+ info->height = fb->height;
+ info->pixel_format = fb->pixel_format;
+ info->pitch = fb->pitches[0];
+ info->fb_modifier = fb->modifier[0];
+
+ if (!(info->fb_modifier == I915_FORMAT_MOD_Y_TILED ||
+ info->fb_modifier == I915_FORMAT_MOD_Yf_TILED)) {
+ DRM_DEBUG_KMS(
+ "Y or Yf tiling is needed for 90/270 rotation!\n");
+ return -EINVAL;
+ }
+
+ return 0;
}
int
intel_pin_and_fence_fb_obj(struct drm_plane *plane,
struct drm_framebuffer *fb,
+ const struct drm_plane_state *plane_state,
struct intel_engine_cs *pipelined)
{
struct drm_device *dev = fb->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct i915_ggtt_view view;
u32 alignment;
int ret;
@@ -2286,6 +2394,10 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane,
return -EINVAL;
}
+ ret = intel_fill_fb_ggtt_view(&view, fb, plane_state);
+ if (ret)
+ return ret;
+
/* Note that the w/a also requires 64 PTE of padding following the
* bo. We currently fill all unused PTE with the shadow page and so
* we should always have valid PTE following the scanout preventing
@@ -2304,7 +2416,8 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane,
intel_runtime_pm_get(dev_priv);
dev_priv->mm.interruptible = false;
- ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
+ ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined,
+ &view);
if (ret)
goto err_interruptible;
@@ -2324,19 +2437,27 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane,
return 0;
err_unpin:
- i915_gem_object_unpin_from_display_plane(obj);
+ i915_gem_object_unpin_from_display_plane(obj, &view);
err_interruptible:
dev_priv->mm.interruptible = true;
intel_runtime_pm_put(dev_priv);
return ret;
}
-static void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
+static void intel_unpin_fb_obj(struct drm_framebuffer *fb,
+ const struct drm_plane_state *plane_state)
{
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct i915_ggtt_view view;
+ int ret;
+
WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
+ ret = intel_fill_fb_ggtt_view(&view, fb, plane_state);
+ WARN_ONCE(ret, "Couldn't get view from plane state!");
+
i915_gem_object_unpin_fence(obj);
- i915_gem_object_unpin_from_display_plane(obj);
+ i915_gem_object_unpin_from_display_plane(obj, &view);
}
/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
@@ -2414,8 +2535,8 @@ static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
}
static bool
-intel_alloc_plane_obj(struct intel_crtc *crtc,
- struct intel_initial_plane_config *plane_config)
+intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
+ struct intel_initial_plane_config *plane_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_gem_object *obj = NULL;
@@ -2449,17 +2570,14 @@ intel_alloc_plane_obj(struct intel_crtc *crtc,
mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
mutex_lock(&dev->struct_mutex);
-
if (intel_framebuffer_init(dev, to_intel_framebuffer(fb),
&mode_cmd, obj)) {
DRM_DEBUG_KMS("intel fb init failed\n");
goto out_unref_obj;
}
-
- obj->frontbuffer_bits = INTEL_FRONTBUFFER_PRIMARY(crtc->pipe);
mutex_unlock(&dev->struct_mutex);
- DRM_DEBUG_KMS("plane fb obj %p\n", obj);
+ DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
return true;
out_unref_obj:
@@ -2483,26 +2601,23 @@ update_state_fb(struct drm_plane *plane)
}
static void
-intel_find_plane_obj(struct intel_crtc *intel_crtc,
- struct intel_initial_plane_config *plane_config)
+intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
+ struct intel_initial_plane_config *plane_config)
{
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *c;
struct intel_crtc *i;
struct drm_i915_gem_object *obj;
+ struct drm_plane *primary = intel_crtc->base.primary;
+ struct drm_framebuffer *fb;
if (!plane_config->fb)
return;
- if (intel_alloc_plane_obj(intel_crtc, plane_config)) {
- struct drm_plane *primary = intel_crtc->base.primary;
-
- primary->fb = &plane_config->fb->base;
- primary->state->crtc = &intel_crtc->base;
- update_state_fb(primary);
-
- return;
+ if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
+ fb = &plane_config->fb->base;
+ goto valid_fb;
}
kfree(plane_config->fb);
@@ -2520,24 +2635,29 @@ intel_find_plane_obj(struct intel_crtc *intel_crtc,
if (!i->active)
continue;
- obj = intel_fb_obj(c->primary->fb);
- if (obj == NULL)
+ fb = c->primary->fb;
+ if (!fb)
continue;
+ obj = intel_fb_obj(fb);
if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
- struct drm_plane *primary = intel_crtc->base.primary;
-
- if (obj->tiling_mode != I915_TILING_NONE)
- dev_priv->preserve_bios_swizzle = true;
-
- drm_framebuffer_reference(c->primary->fb);
- primary->fb = c->primary->fb;
- primary->state->crtc = &intel_crtc->base;
- update_state_fb(intel_crtc->base.primary);
- obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
- break;
+ drm_framebuffer_reference(fb);
+ goto valid_fb;
}
}
+
+ return;
+
+valid_fb:
+ obj = intel_fb_obj(fb);
+ if (obj->tiling_mode != I915_TILING_NONE)
+ dev_priv->preserve_bios_swizzle = true;
+
+ primary->fb = fb;
+ primary->state->crtc = &intel_crtc->base;
+ primary->crtc = &intel_crtc->base;
+ update_state_fb(primary);
+ obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
}
static void i9xx_update_primary_plane(struct drm_crtc *crtc,
@@ -2805,6 +2925,17 @@ u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier,
}
}
+unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane,
+ struct drm_i915_gem_object *obj)
+{
+ const struct i915_ggtt_view *view = &i915_ggtt_view_normal;
+
+ if (intel_rotation_90_or_270(intel_plane->base.state->rotation))
+ view = &i915_ggtt_view_rotated;
+
+ return i915_gem_obj_ggtt_offset_view(obj, view);
+}
+
static void skylake_update_primary_plane(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int x, int y)
@@ -2815,6 +2946,7 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc,
struct drm_i915_gem_object *obj;
int pipe = intel_crtc->pipe;
u32 plane_ctl, stride_div;
+ unsigned long surf_addr;
if (!intel_crtc->primary_enabled) {
I915_WRITE(PLANE_CTL(pipe, 0), 0);
@@ -2881,16 +3013,16 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc,
obj = intel_fb_obj(fb);
stride_div = intel_fb_stride_alignment(dev, fb->modifier[0],
fb->pixel_format);
+ surf_addr = intel_plane_obj_offset(to_intel_plane(crtc->primary), obj);
I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
-
I915_WRITE(PLANE_POS(pipe, 0), 0);
I915_WRITE(PLANE_OFFSET(pipe, 0), (y << 16) | x);
I915_WRITE(PLANE_SIZE(pipe, 0),
(intel_crtc->config->pipe_src_h - 1) << 16 |
(intel_crtc->config->pipe_src_w - 1));
I915_WRITE(PLANE_STRIDE(pipe, 0), fb->pitches[0] / stride_div);
- I915_WRITE(PLANE_SURF(pipe, 0), i915_gem_obj_ggtt_offset(obj));
+ I915_WRITE(PLANE_SURF(pipe, 0), surf_addr);
POSTING_READ(PLANE_SURF(pipe, 0));
}
@@ -4824,8 +4956,9 @@ static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
return mask;
}
-static void modeset_update_crtc_power_domains(struct drm_device *dev)
+static void modeset_update_crtc_power_domains(struct drm_atomic_state *state)
{
+ struct drm_device *dev = state->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long pipe_domains[I915_MAX_PIPES] = { 0, };
struct intel_crtc *crtc;
@@ -4847,7 +4980,7 @@ static void modeset_update_crtc_power_domains(struct drm_device *dev)
}
if (dev_priv->display.modeset_global_resources)
- dev_priv->display.modeset_global_resources(dev);
+ dev_priv->display.modeset_global_resources(state);
for_each_intel_crtc(dev, crtc) {
enum intel_display_power_domain domain;
@@ -5095,8 +5228,9 @@ static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND);
}
-static void valleyview_modeset_global_resources(struct drm_device *dev)
+static void valleyview_modeset_global_resources(struct drm_atomic_state *state)
{
+ struct drm_device *dev = state->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int max_pixclk = intel_mode_max_pixclk(dev_priv);
int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
@@ -5687,7 +5821,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
* - LVDS dual channel mode
* - Double wide pipe
*/
- if ((intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
+ if ((intel_pipe_will_have_type(pipe_config, INTEL_OUTPUT_LVDS) &&
intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
pipe_config->pipe_src_w &= ~1;
@@ -5866,15 +6000,18 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
}
-static int i9xx_get_refclk(struct intel_crtc *crtc, int num_connectors)
+static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state,
+ int num_connectors)
{
- struct drm_device *dev = crtc->base.dev;
+ struct drm_device *dev = crtc_state->base.crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int refclk;
+ WARN_ON(!crtc_state->base.state);
+
if (IS_VALLEYVIEW(dev)) {
refclk = 100000;
- } else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
+ } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
refclk = dev_priv->vbt.lvds_ssc_freq;
DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
@@ -5917,8 +6054,8 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
crtc_state->dpll_hw_state.fp0 = fp;
crtc->lowfreq_avail = false;
- if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
- reduced_clock && i915.powersave) {
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+ reduced_clock) {
crtc_state->dpll_hw_state.fp1 = fp2;
crtc->lowfreq_avail = true;
} else {
@@ -6275,6 +6412,7 @@ void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
struct intel_crtc *crtc =
to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
struct intel_crtc_state pipe_config = {
+ .base.crtc = &crtc->base,
.pixel_multiplier = 1,
.dpll = *dpll,
};
@@ -6319,12 +6457,12 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
- is_sdvo = intel_pipe_will_have_type(crtc, INTEL_OUTPUT_SDVO) ||
- intel_pipe_will_have_type(crtc, INTEL_OUTPUT_HDMI);
+ is_sdvo = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO) ||
+ intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI);
dpll = DPLL_VGA_MODE_DIS;
- if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
dpll |= DPLLB_MODE_LVDS;
else
dpll |= DPLLB_MODE_DAC_SERIAL;
@@ -6367,7 +6505,7 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
if (crtc_state->sdvo_tv_clock)
dpll |= PLL_REF_INPUT_TVCLKINBC;
- else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
+ else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv) && num_connectors < 2)
dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
else
@@ -6397,7 +6535,7 @@ static void i8xx_update_pll(struct intel_crtc *crtc,
dpll = DPLL_VGA_MODE_DIS;
- if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
} else {
if (clock->p1 == 2)
@@ -6408,10 +6546,10 @@ static void i8xx_update_pll(struct intel_crtc *crtc,
dpll |= PLL_P2_DIVIDE_BY_4;
}
- if (!IS_I830(dev) && intel_pipe_will_have_type(crtc, INTEL_OUTPUT_DVO))
+ if (!IS_I830(dev) && intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO))
dpll |= DPLL_DVO_2X_MODE;
- if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv) && num_connectors < 2)
dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
else
@@ -6625,11 +6763,20 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
bool is_lvds = false, is_dsi = false;
struct intel_encoder *encoder;
const intel_limit_t *limit;
+ struct drm_atomic_state *state = crtc_state->base.state;
+ struct drm_connector_state *connector_state;
+ int i;
- for_each_intel_encoder(dev, encoder) {
- if (encoder->new_crtc != crtc)
+ for (i = 0; i < state->num_connector; i++) {
+ if (!state->connectors[i])
continue;
+ connector_state = state->connector_states[i];
+ if (connector_state->crtc != &crtc->base)
+ continue;
+
+ encoder = to_intel_encoder(connector_state->best_encoder);
+
switch (encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
@@ -6648,7 +6795,7 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
return 0;
if (!crtc_state->clock_set) {
- refclk = i9xx_get_refclk(crtc, num_connectors);
+ refclk = i9xx_get_refclk(crtc_state, num_connectors);
/*
* Returns a set of divisors for the desired target clock with
@@ -6656,8 +6803,8 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
* the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n +
* 2) / p1 / p2.
*/
- limit = intel_limit(crtc, refclk);
- ok = dev_priv->display.find_dpll(limit, crtc,
+ limit = intel_limit(crtc_state, refclk);
+ ok = dev_priv->display.find_dpll(limit, crtc_state,
crtc_state->port_clock,
refclk, NULL, &clock);
if (!ok) {
@@ -6673,7 +6820,7 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
* we will disable the LVDS downclock feature.
*/
has_reduced_clock =
- dev_priv->display.find_dpll(limit, crtc,
+ dev_priv->display.find_dpll(limit, crtc_state,
dev_priv->lvds_downclock,
refclk, &clock,
&reduced_clock);
@@ -6772,7 +6919,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
u32 val, base, offset;
int pipe = crtc->pipe, plane = crtc->plane;
int fourcc, pixel_format;
- int aligned_height;
+ unsigned int aligned_height;
struct drm_framebuffer *fb;
struct intel_framebuffer *intel_fb;
@@ -7303,18 +7450,26 @@ void intel_init_pch_refclk(struct drm_device *dev)
lpt_init_pch_refclk(dev);
}
-static int ironlake_get_refclk(struct drm_crtc *crtc)
+static int ironlake_get_refclk(struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc_state->base.crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_atomic_state *state = crtc_state->base.state;
+ struct drm_connector_state *connector_state;
struct intel_encoder *encoder;
- int num_connectors = 0;
+ int num_connectors = 0, i;
bool is_lvds = false;
- for_each_intel_encoder(dev, encoder) {
- if (encoder->new_crtc != to_intel_crtc(crtc))
+ for (i = 0; i < state->num_connector; i++) {
+ if (!state->connectors[i])
+ continue;
+
+ connector_state = state->connector_states[i];
+ if (connector_state->crtc != crtc_state->base.crtc)
continue;
+ encoder = to_intel_encoder(connector_state->best_encoder);
+
switch (encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
@@ -7501,22 +7656,21 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int refclk;
const intel_limit_t *limit;
bool ret, is_lvds = false;
- is_lvds = intel_pipe_will_have_type(intel_crtc, INTEL_OUTPUT_LVDS);
+ is_lvds = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS);
- refclk = ironlake_get_refclk(crtc);
+ refclk = ironlake_get_refclk(crtc_state);
/*
* Returns a set of divisors for the desired target clock with the given
* refclk, or FALSE. The returned values represent the clock equation:
* reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
*/
- limit = intel_limit(intel_crtc, refclk);
- ret = dev_priv->display.find_dpll(limit, intel_crtc,
+ limit = intel_limit(crtc_state, refclk);
+ ret = dev_priv->display.find_dpll(limit, crtc_state,
crtc_state->port_clock,
refclk, NULL, clock);
if (!ret)
@@ -7530,7 +7684,7 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
* downclock feature.
*/
*has_reduced_clock =
- dev_priv->display.find_dpll(limit, intel_crtc,
+ dev_priv->display.find_dpll(limit, crtc_state,
dev_priv->lvds_downclock,
refclk, clock,
reduced_clock);
@@ -7563,16 +7717,24 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
struct drm_crtc *crtc = &intel_crtc->base;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_encoder *intel_encoder;
+ struct drm_atomic_state *state = crtc_state->base.state;
+ struct drm_connector_state *connector_state;
+ struct intel_encoder *encoder;
uint32_t dpll;
- int factor, num_connectors = 0;
+ int factor, num_connectors = 0, i;
bool is_lvds = false, is_sdvo = false;
- for_each_intel_encoder(dev, intel_encoder) {
- if (intel_encoder->new_crtc != to_intel_crtc(crtc))
+ for (i = 0; i < state->num_connector; i++) {
+ if (!state->connectors[i])
continue;
- switch (intel_encoder->type) {
+ connector_state = state->connector_states[i];
+ if (connector_state->crtc != crtc_state->base.crtc)
+ continue;
+
+ encoder = to_intel_encoder(connector_state->best_encoder);
+
+ switch (encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
@@ -7701,7 +7863,7 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
}
}
- if (is_lvds && has_reduced_clock && i915.powersave)
+ if (is_lvds && has_reduced_clock)
crtc->lowfreq_avail = true;
else
crtc->lowfreq_avail = false;
@@ -7810,7 +7972,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
u32 val, base, offset, stride_mult, tiling;
int pipe = crtc->pipe;
int fourcc, pixel_format;
- int aligned_height;
+ unsigned int aligned_height;
struct drm_framebuffer *fb;
struct intel_framebuffer *intel_fb;
@@ -7918,7 +8080,7 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
u32 val, base, offset;
int pipe = crtc->pipe;
int fourcc, pixel_format;
- int aligned_height;
+ unsigned int aligned_height;
struct drm_framebuffer *fb;
struct intel_framebuffer *intel_fb;
@@ -8802,6 +8964,8 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
struct drm_device *dev = encoder->dev;
struct drm_framebuffer *fb;
struct drm_mode_config *config = &dev->mode_config;
+ struct drm_atomic_state *state = NULL;
+ struct drm_connector_state *connector_state;
int ret, i = -1;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
@@ -8883,6 +9047,21 @@ retry:
old->load_detect_temp = true;
old->release_fb = NULL;
+ state = drm_atomic_state_alloc(dev);
+ if (!state)
+ return false;
+
+ state->acquire_ctx = ctx;
+
+ connector_state = drm_atomic_get_connector_state(state, connector);
+ if (IS_ERR(connector_state)) {
+ ret = PTR_ERR(connector_state);
+ goto fail;
+ }
+
+ connector_state->crtc = crtc;
+ connector_state->best_encoder = &intel_encoder->base;
+
if (!mode)
mode = &load_detect_mode;
@@ -8905,7 +9084,7 @@ retry:
goto fail;
}
- if (intel_set_mode(crtc, mode, 0, 0, fb)) {
+ if (intel_set_mode(crtc, mode, 0, 0, fb, state)) {
DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
if (old->release_fb)
old->release_fb->funcs->destroy(old->release_fb);
@@ -8924,6 +9103,11 @@ retry:
else
intel_crtc->new_config = NULL;
fail_unlock:
+ if (state) {
+ drm_atomic_state_free(state);
+ state = NULL;
+ }
+
if (ret == -EDEADLK) {
drm_modeset_backoff(ctx);
goto retry;
@@ -8933,24 +9117,44 @@ fail_unlock:
}
void intel_release_load_detect_pipe(struct drm_connector *connector,
- struct intel_load_detect_pipe *old)
+ struct intel_load_detect_pipe *old,
+ struct drm_modeset_acquire_ctx *ctx)
{
+ struct drm_device *dev = connector->dev;
struct intel_encoder *intel_encoder =
intel_attached_encoder(connector);
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_atomic_state *state;
+ struct drm_connector_state *connector_state;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
connector->base.id, connector->name,
encoder->base.id, encoder->name);
if (old->load_detect_temp) {
+ state = drm_atomic_state_alloc(dev);
+ if (!state)
+ goto fail;
+
+ state->acquire_ctx = ctx;
+
+ connector_state = drm_atomic_get_connector_state(state, connector);
+ if (IS_ERR(connector_state))
+ goto fail;
+
to_intel_connector(connector)->new_encoder = NULL;
intel_encoder->new_crtc = NULL;
intel_crtc->new_enabled = false;
intel_crtc->new_config = NULL;
- intel_set_mode(crtc, NULL, 0, 0, NULL);
+
+ connector_state->best_encoder = NULL;
+ connector_state->crtc = NULL;
+
+ intel_set_mode(crtc, NULL, 0, 0, NULL, state);
+
+ drm_atomic_state_free(state);
if (old->release_fb) {
drm_framebuffer_unregister_private(old->release_fb);
@@ -8963,6 +9167,11 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
/* Switch crtc and encoder back off if necessary */
if (old->dpms_mode != DRM_MODE_DPMS_ON)
connector->funcs->dpms(connector, old->dpms_mode);
+
+ return;
+fail:
+ DRM_DEBUG_KMS("Couldn't release load detect pipe.\n");
+ drm_atomic_state_free(state);
}
static int i9xx_pll_refclk(struct drm_device *dev,
@@ -9201,6 +9410,8 @@ void intel_mark_busy(struct drm_device *dev)
intel_runtime_pm_get(dev_priv);
i915_update_gfx_val(dev_priv);
+ if (INTEL_INFO(dev)->gen >= 6)
+ gen6_rps_busy(dev_priv);
dev_priv->mm.busy = true;
}
@@ -9214,9 +9425,6 @@ void intel_mark_idle(struct drm_device *dev)
dev_priv->mm.busy = false;
- if (!i915.powersave)
- goto out;
-
for_each_crtc(dev, crtc) {
if (!crtc->primary->fb)
continue;
@@ -9227,7 +9435,6 @@ void intel_mark_idle(struct drm_device *dev)
if (INTEL_INFO(dev)->gen >= 6)
gen6_rps_idle(dev->dev_private);
-out:
intel_runtime_pm_put(dev_priv);
}
@@ -9269,7 +9476,7 @@ static void intel_unpin_work_fn(struct work_struct *__work)
enum pipe pipe = to_intel_crtc(work->crtc)->pipe;
mutex_lock(&dev->struct_mutex);
- intel_unpin_fb_obj(intel_fb_obj(work->old_fb));
+ intel_unpin_fb_obj(work->old_fb, work->crtc->primary->state);
drm_gem_object_unreference(&work->pending_flip_obj->base);
intel_fbc_update(dev);
@@ -9977,12 +10184,13 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
ring = &dev_priv->ring[RCS];
}
- ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, ring);
+ ret = intel_pin_and_fence_fb_obj(crtc->primary, fb,
+ crtc->primary->state, ring);
if (ret)
goto cleanup_pending;
- work->gtt_offset =
- i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset;
+ work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary), obj)
+ + intel_crtc->dspaddr_offset;
if (use_mmio_flip(ring, obj)) {
ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring,
@@ -10017,7 +10225,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
return 0;
cleanup_unpin:
- intel_unpin_fb_obj(obj);
+ intel_unpin_fb_obj(fb, crtc->primary->state);
cleanup_pending:
atomic_dec(&intel_crtc->unpin_work_count);
mutex_unlock(&dev->struct_mutex);
@@ -10087,6 +10295,27 @@ static void intel_modeset_update_staged_output_state(struct drm_device *dev)
}
}
+/* Transitional helper to copy current connector/encoder state to
+ * connector->state. This is needed so that code that is partially
+ * converted to atomic does the right thing.
+ */
+static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
+{
+ struct intel_connector *connector;
+
+ for_each_intel_connector(dev, connector) {
+ if (connector->base.encoder) {
+ connector->base.state->best_encoder =
+ connector->base.encoder;
+ connector->base.state->crtc =
+ connector->base.encoder->crtc;
+ } else {
+ connector->base.state->best_encoder = NULL;
+ connector->base.state->crtc = NULL;
+ }
+ }
+}
+
/**
* intel_modeset_commit_output_state
*
@@ -10110,6 +10339,8 @@ static void intel_modeset_commit_output_state(struct drm_device *dev)
crtc->base.state->enable = crtc->new_enabled;
crtc->base.enabled = crtc->new_enabled;
}
+
+ intel_modeset_update_connector_atomic_state(dev);
}
static void
@@ -10144,8 +10375,9 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
+ struct drm_atomic_state *state;
struct intel_connector *connector;
- int bpp;
+ int bpp, i;
switch (fb->pixel_format) {
case DRM_FORMAT_C8:
@@ -10185,10 +10417,15 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc,
pipe_config->pipe_bpp = bpp;
+ state = pipe_config->base.state;
+
/* Clamp display bpp to EDID value */
- for_each_intel_connector(dev, connector) {
- if (!connector->new_encoder ||
- connector->new_encoder->new_crtc != crtc)
+ for (i = 0; i < state->num_connector; i++) {
+ if (!state->connectors[i])
+ continue;
+
+ connector = to_intel_connector(state->connectors[i]);
+ if (state->connector_states[i]->crtc != &crtc->base)
continue;
connected_sink_compute_bpp(connector, pipe_config);
@@ -10344,15 +10581,30 @@ static bool check_digital_port_conflicts(struct drm_device *dev)
return true;
}
+static void
+clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
+{
+ struct drm_crtc_state tmp_state;
+
+ /* Clear only the intel specific part of the crtc state */
+ tmp_state = crtc_state->base;
+ memset(crtc_state, 0, sizeof *crtc_state);
+ crtc_state->base = tmp_state;
+}
+
static struct intel_crtc_state *
intel_modeset_pipe_config(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_display_mode *mode)
+ struct drm_display_mode *mode,
+ struct drm_atomic_state *state)
{
struct drm_device *dev = crtc->dev;
struct intel_encoder *encoder;
+ struct intel_connector *connector;
+ struct drm_connector_state *connector_state;
struct intel_crtc_state *pipe_config;
int plane_bpp, ret = -EINVAL;
+ int i;
bool retry = true;
if (!check_encoder_cloning(to_intel_crtc(crtc))) {
@@ -10365,9 +10617,11 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
return ERR_PTR(-EINVAL);
}
- pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
- if (!pipe_config)
- return ERR_PTR(-ENOMEM);
+ pipe_config = intel_atomic_get_crtc_state(state, to_intel_crtc(crtc));
+ if (IS_ERR(pipe_config))
+ return pipe_config;
+
+ clear_intel_crtc_state(pipe_config);
pipe_config->base.crtc = crtc;
drm_mode_copy(&pipe_config->base.adjusted_mode, mode);
@@ -10424,11 +10678,17 @@ encoder_retry:
* adjust it according to limitations or connector properties, and also
* a chance to reject the mode entirely.
*/
- for_each_intel_encoder(dev, encoder) {
+ for (i = 0; i < state->num_connector; i++) {
+ connector = to_intel_connector(state->connectors[i]);
+ if (!connector)
+ continue;
- if (&encoder->new_crtc->base != crtc)
+ connector_state = state->connector_states[i];
+ if (connector_state->crtc != crtc)
continue;
+ encoder = to_intel_encoder(connector_state->best_encoder);
+
if (!(encoder->compute_config(encoder, pipe_config))) {
DRM_DEBUG_KMS("Encoder config failure\n");
goto fail;
@@ -10464,7 +10724,6 @@ encoder_retry:
return pipe_config;
fail:
- kfree(pipe_config);
return ERR_PTR(ret);
}
@@ -11143,17 +11402,30 @@ static struct intel_crtc_state *
intel_modeset_compute_config(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_framebuffer *fb,
+ struct drm_atomic_state *state,
unsigned *modeset_pipes,
unsigned *prepare_pipes,
unsigned *disable_pipes)
{
+ struct drm_device *dev = crtc->dev;
struct intel_crtc_state *pipe_config = NULL;
+ struct intel_crtc *intel_crtc;
+ int ret = 0;
+
+ ret = drm_atomic_add_affected_connectors(state, crtc);
+ if (ret)
+ return ERR_PTR(ret);
intel_modeset_affected_pipes(crtc, modeset_pipes,
prepare_pipes, disable_pipes);
- if ((*modeset_pipes) == 0)
- goto out;
+ for_each_intel_crtc_masked(dev, *disable_pipes, intel_crtc) {
+ pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
+ if (IS_ERR(pipe_config))
+ return pipe_config;
+
+ pipe_config->base.enable = false;
+ }
/*
* Note this needs changes when we start tracking multiple modes
@@ -11161,15 +11433,21 @@ intel_modeset_compute_config(struct drm_crtc *crtc,
* (i.e. one pipe_config for each crtc) rather than just the one
* for this crtc.
*/
- pipe_config = intel_modeset_pipe_config(crtc, fb, mode);
- if (IS_ERR(pipe_config)) {
- goto out;
+ for_each_intel_crtc_masked(dev, *modeset_pipes, intel_crtc) {
+ /* FIXME: For now we still expect modeset_pipes has at most
+ * one bit set. */
+ if (WARN_ON(&intel_crtc->base != crtc))
+ continue;
+
+ pipe_config = intel_modeset_pipe_config(crtc, fb, mode, state);
+ if (IS_ERR(pipe_config))
+ return pipe_config;
+
+ intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
+ "[modeset]");
}
- intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
- "[modeset]");
-out:
- return pipe_config;
+ return intel_atomic_get_crtc_state(state, to_intel_crtc(crtc));;
}
static int __intel_set_mode_setup_plls(struct drm_device *dev,
@@ -11213,6 +11491,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *saved_mode;
+ struct intel_crtc_state *crtc_state_copy = NULL;
struct intel_crtc *intel_crtc;
int ret = 0;
@@ -11220,6 +11499,12 @@ static int __intel_set_mode(struct drm_crtc *crtc,
if (!saved_mode)
return -ENOMEM;
+ crtc_state_copy = kmalloc(sizeof(*crtc_state_copy), GFP_KERNEL);
+ if (!crtc_state_copy) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
*saved_mode = crtc->mode;
if (modeset_pipes)
@@ -11277,7 +11562,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
* update the the output configuration. */
intel_modeset_update_state(dev, prepare_pipes);
- modeset_update_crtc_power_domains(dev);
+ modeset_update_crtc_power_domains(pipe_config->base.state);
/* Set up the DPLL and any encoders state that needs to adjust or depend
* on the DPLL.
@@ -11306,6 +11591,22 @@ done:
if (ret && crtc->state->enable)
crtc->mode = *saved_mode;
+ if (ret == 0 && pipe_config) {
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ /* The pipe_config will be freed with the atomic state, so
+ * make a copy. */
+ memcpy(crtc_state_copy, intel_crtc->config,
+ sizeof *crtc_state_copy);
+ intel_crtc->config = crtc_state_copy;
+ intel_crtc->base.state = &crtc_state_copy->base;
+
+ if (modeset_pipes)
+ intel_crtc->new_config = intel_crtc->config;
+ } else {
+ kfree(crtc_state_copy);
+ }
+
kfree(saved_mode);
return ret;
}
@@ -11331,27 +11632,81 @@ static int intel_set_mode_pipes(struct drm_crtc *crtc,
static int intel_set_mode(struct drm_crtc *crtc,
struct drm_display_mode *mode,
- int x, int y, struct drm_framebuffer *fb)
+ int x, int y, struct drm_framebuffer *fb,
+ struct drm_atomic_state *state)
{
struct intel_crtc_state *pipe_config;
unsigned modeset_pipes, prepare_pipes, disable_pipes;
+ int ret = 0;
- pipe_config = intel_modeset_compute_config(crtc, mode, fb,
+ pipe_config = intel_modeset_compute_config(crtc, mode, fb, state,
&modeset_pipes,
&prepare_pipes,
&disable_pipes);
- if (IS_ERR(pipe_config))
- return PTR_ERR(pipe_config);
+ if (IS_ERR(pipe_config)) {
+ ret = PTR_ERR(pipe_config);
+ goto out;
+ }
+
+ ret = intel_set_mode_pipes(crtc, mode, x, y, fb, pipe_config,
+ modeset_pipes, prepare_pipes,
+ disable_pipes);
+ if (ret)
+ goto out;
- return intel_set_mode_pipes(crtc, mode, x, y, fb, pipe_config,
- modeset_pipes, prepare_pipes,
- disable_pipes);
+out:
+ return ret;
}
void intel_crtc_restore_mode(struct drm_crtc *crtc)
{
- intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->primary->fb);
+ struct drm_device *dev = crtc->dev;
+ struct drm_atomic_state *state;
+ struct intel_encoder *encoder;
+ struct intel_connector *connector;
+ struct drm_connector_state *connector_state;
+
+ state = drm_atomic_state_alloc(dev);
+ if (!state) {
+ DRM_DEBUG_KMS("[CRTC:%d] mode restore failed, out of memory",
+ crtc->base.id);
+ return;
+ }
+
+ state->acquire_ctx = dev->mode_config.acquire_ctx;
+
+ /* The force restore path in the HW readout code relies on the staged
+ * config still keeping the user requested config while the actual
+ * state has been overwritten by the configuration read from HW. We
+ * need to copy the staged config to the atomic state, otherwise the
+ * mode set will just reapply the state the HW is already in. */
+ for_each_intel_encoder(dev, encoder) {
+ if (&encoder->new_crtc->base != crtc)
+ continue;
+
+ for_each_intel_connector(dev, connector) {
+ if (connector->new_encoder != encoder)
+ continue;
+
+ connector_state = drm_atomic_get_connector_state(state, &connector->base);
+ if (IS_ERR(connector_state)) {
+ DRM_DEBUG_KMS("Failed to add [CONNECTOR:%d:%s] to state: %ld\n",
+ connector->base.base.id,
+ connector->base.name,
+ PTR_ERR(connector_state));
+ continue;
+ }
+
+ connector_state->crtc = crtc;
+ connector_state->best_encoder = &encoder->base;
+ }
+ }
+
+ intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->primary->fb,
+ state);
+
+ drm_atomic_state_free(state);
}
#undef for_each_intel_crtc_masked
@@ -11520,9 +11875,11 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set,
static int
intel_modeset_stage_output_state(struct drm_device *dev,
struct drm_mode_set *set,
- struct intel_set_config *config)
+ struct intel_set_config *config,
+ struct drm_atomic_state *state)
{
struct intel_connector *connector;
+ struct drm_connector_state *connector_state;
struct intel_encoder *encoder;
struct intel_crtc *crtc;
int ro;
@@ -11586,6 +11943,14 @@ intel_modeset_stage_output_state(struct drm_device *dev,
}
connector->new_encoder->new_crtc = to_intel_crtc(new_crtc);
+ connector_state =
+ drm_atomic_get_connector_state(state, &connector->base);
+ if (IS_ERR(connector_state))
+ return PTR_ERR(connector_state);
+
+ connector_state->crtc = new_crtc;
+ connector_state->best_encoder = &connector->new_encoder->base;
+
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
connector->base.base.id,
connector->base.name,
@@ -11618,9 +11983,17 @@ intel_modeset_stage_output_state(struct drm_device *dev,
}
/* Now we've also updated encoder->new_crtc for all encoders. */
for_each_intel_connector(dev, connector) {
- if (connector->new_encoder)
+ connector_state =
+ drm_atomic_get_connector_state(state, &connector->base);
+ if (IS_ERR(connector_state))
+ return PTR_ERR(connector_state);
+
+ if (connector->new_encoder) {
if (connector->new_encoder != connector->encoder)
connector->encoder = connector->new_encoder;
+ } else {
+ connector_state->crtc = NULL;
+ }
}
for_each_intel_crtc(dev, crtc) {
crtc->new_enabled = false;
@@ -11676,6 +12049,7 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
{
struct drm_device *dev;
struct drm_mode_set save_set;
+ struct drm_atomic_state *state = NULL;
struct intel_set_config *config;
struct intel_crtc_state *pipe_config;
unsigned modeset_pipes, prepare_pipes, disable_pipes;
@@ -11720,12 +12094,20 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
* such cases. */
intel_set_config_compute_mode_changes(set, config);
- ret = intel_modeset_stage_output_state(dev, set, config);
+ state = drm_atomic_state_alloc(dev);
+ if (!state) {
+ ret = -ENOMEM;
+ goto out_config;
+ }
+
+ state->acquire_ctx = dev->mode_config.acquire_ctx;
+
+ ret = intel_modeset_stage_output_state(dev, set, config, state);
if (ret)
goto fail;
pipe_config = intel_modeset_compute_config(set->crtc, set->mode,
- set->fb,
+ set->fb, state,
&modeset_pipes,
&prepare_pipes,
&disable_pipes);
@@ -11745,10 +12127,6 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
*/
}
- /* set_mode will free it in the mode_changed case */
- if (!config->mode_changed)
- kfree(pipe_config);
-
intel_update_pipe_size(to_intel_crtc(set->crtc));
if (config->mode_changed) {
@@ -11794,6 +12172,8 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
fail:
intel_set_config_restore_state(dev, config);
+ drm_atomic_state_clear(state);
+
/*
* HACK: if the pipe was on, but we didn't have a framebuffer,
* force the pipe off to avoid oopsing in the modeset code
@@ -11806,11 +12186,15 @@ fail:
/* Try to restore the config */
if (config->mode_changed &&
intel_set_mode(save_set.crtc, save_set.mode,
- save_set.x, save_set.y, save_set.fb))
+ save_set.x, save_set.y, save_set.fb,
+ state))
DRM_ERROR("failed to restore config after modeset failure\n");
}
out_config:
+ if (state)
+ drm_atomic_state_free(state);
+
intel_set_config_free(config);
return ret;
}
@@ -11925,6 +12309,28 @@ static void intel_shared_dpll_init(struct drm_device *dev)
}
/**
+ * intel_wm_need_update - Check whether watermarks need updating
+ * @plane: drm plane
+ * @state: new plane state
+ *
+ * Check current plane state versus the new one to determine whether
+ * watermarks need to be recalculated.
+ *
+ * Returns true or false.
+ */
+bool intel_wm_need_update(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ /* Update watermarks on tiling changes. */
+ if (!plane->state->fb || !state->fb ||
+ plane->state->fb->modifier[0] != state->fb->modifier[0] ||
+ plane->state->rotation != state->rotation)
+ return true;
+
+ return false;
+}
+
+/**
* intel_prepare_plane_fb - Prepare fb for usage on plane
* @plane: drm plane to prepare for
* @fb: framebuffer to prepare for presentation
@@ -11973,7 +12379,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
if (ret)
DRM_DEBUG_KMS("failed to attach phys object\n");
} else {
- ret = intel_pin_and_fence_fb_obj(plane, fb, NULL);
+ ret = intel_pin_and_fence_fb_obj(plane, fb, new_state, NULL);
}
if (ret == 0)
@@ -12005,7 +12411,7 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
if (plane->type != DRM_PLANE_TYPE_CURSOR ||
!INTEL_INFO(dev)->cursor_needs_physical) {
mutex_lock(&dev->struct_mutex);
- intel_unpin_fb_obj(obj);
+ intel_unpin_fb_obj(fb, old_state);
mutex_unlock(&dev->struct_mutex);
}
}
@@ -12070,10 +12476,7 @@ intel_check_primary_plane(struct drm_plane *plane,
intel_crtc->atomic.update_fbc = true;
- /* Update watermarks on tiling changes. */
- if (!plane->state->fb || !state->base.fb ||
- plane->state->fb->modifier[0] !=
- state->base.fb->modifier[0])
+ if (intel_wm_need_update(plane, &state->base))
intel_crtc->atomic.update_wm = true;
}
@@ -12089,8 +12492,6 @@ intel_commit_primary_plane(struct drm_plane *plane,
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc;
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_rect *src = &state->src;
crtc = crtc ? crtc : plane->crtc;
@@ -12100,8 +12501,6 @@ intel_commit_primary_plane(struct drm_plane *plane,
crtc->x = src->x1 >> 16;
crtc->y = src->y1 >> 16;
- intel_plane->obj = obj;
-
if (intel_crtc->active) {
if (state->visible) {
/* FIXME: kill this fastboot hack */
@@ -12365,7 +12764,6 @@ intel_commit_cursor_plane(struct drm_plane *plane,
struct drm_crtc *crtc = state->base.crtc;
struct drm_device *dev = plane->dev;
struct intel_crtc *intel_crtc;
- struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb);
uint32_t addr;
@@ -12376,8 +12774,6 @@ intel_commit_cursor_plane(struct drm_plane *plane,
crtc->cursor_x = state->base.crtc_x;
crtc->cursor_y = state->base.crtc_y;
- intel_plane->obj = obj;
-
if (intel_crtc->cursor_bo == obj)
goto update;
@@ -12758,19 +13154,21 @@ static void intel_setup_outputs(struct drm_device *dev)
* testing/debug of the plane operations (and only when a specific
* kernel module option is given), that shouldn't really matter.
*
+ * We are also relying on these states to convert the legacy mode set
+ * to use a drm_atomic_state struct. The states are kept consistent
+ * with actual state, so that it is safe to rely on that instead of
+ * the staged config.
+ *
* Once atomic support for crtc's + connectors lands, this loop should
* be removed since we'll be setting up real connector state, which
* will contain Intel-specific properties.
*/
- if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
- list_for_each_entry(connector,
- &dev->mode_config.connector_list,
- head) {
- if (!WARN_ON(connector->state)) {
- connector->state =
- kzalloc(sizeof(*connector->state),
- GFP_KERNEL);
- }
+ list_for_each_entry(connector,
+ &dev->mode_config.connector_list,
+ head) {
+ if (!WARN_ON(connector->state)) {
+ connector->state = kzalloc(sizeof(*connector->state),
+ GFP_KERNEL);
}
}
@@ -12849,7 +13247,7 @@ static int intel_framebuffer_init(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj)
{
- int aligned_height;
+ unsigned int aligned_height;
int ret;
u32 pitch_limit, stride_alignment;
@@ -12885,8 +13283,8 @@ static int intel_framebuffer_init(struct drm_device *dev,
case I915_FORMAT_MOD_X_TILED:
break;
default:
- DRM_ERROR("Unsupported fb modifier 0x%llx!\n",
- mode_cmd->modifier[0]);
+ DRM_DEBUG("Unsupported fb modifier 0x%llx!\n",
+ mode_cmd->modifier[0]);
return -EINVAL;
}
@@ -13453,7 +13851,7 @@ void intel_modeset_init(struct drm_device *dev)
* If the fb is shared between multiple heads, we'll
* just get the first one.
*/
- intel_find_plane_obj(crtc, &crtc->plane_config);
+ intel_find_initial_plane_obj(crtc, &crtc->plane_config);
}
}
}
@@ -13479,7 +13877,7 @@ static void intel_enable_pipe_a(struct drm_device *dev)
return;
if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx))
- intel_release_load_detect_pipe(crt, &load_detect_temp);
+ intel_release_load_detect_pipe(crt, &load_detect_temp, ctx);
}
static bool
@@ -13823,6 +14221,8 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
"[setup_hw_state]");
}
+ intel_modeset_update_connector_atomic_state(dev);
+
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
@@ -13851,8 +14251,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
struct drm_crtc *crtc =
dev_priv->pipe_to_crtc_mapping[pipe];
- intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
- crtc->primary->fb);
+ intel_crtc_restore_mode(crtc);
}
} else {
intel_modeset_update_staged_output_state(dev);
@@ -13898,6 +14297,7 @@ void intel_modeset_gem_init(struct drm_device *dev)
if (intel_pin_and_fence_fb_obj(c->primary,
c->primary->fb,
+ c->primary->state,
NULL)) {
DRM_ERROR("failed to pin boot fb on pipe %d\n",
to_intel_crtc(c)->pipe);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index ca60060710d2..b70e635ccaf4 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -85,10 +85,12 @@ static const struct dp_link_dpll chv_dpll[] = {
{ .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
};
/* Skylake supports following rates */
-static const uint32_t gen9_rates[] = { 162000, 216000, 270000, 324000,
- 432000, 540000 };
-
-static const uint32_t default_rates[] = { 162000, 270000, 540000 };
+static const int gen9_rates[] = { 162000, 216000, 270000,
+ 324000, 432000, 540000 };
+static const int chv_rates[] = { 162000, 202500, 210000, 216000,
+ 243000, 270000, 324000, 405000,
+ 420000, 432000, 540000 };
+static const int default_rates[] = { 162000, 270000, 540000 };
/**
* is_edp - is the given port attached to an eDP panel (either CPU or PCH)
@@ -123,26 +125,15 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
static void vlv_steal_power_sequencer(struct drm_device *dev,
enum pipe pipe);
-int
-intel_dp_max_link_bw(struct intel_dp *intel_dp)
+static int
+intel_dp_max_link_bw(struct intel_dp *intel_dp)
{
int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
- struct drm_device *dev = intel_dp->attached_connector->base.dev;
switch (max_link_bw) {
case DP_LINK_BW_1_62:
case DP_LINK_BW_2_7:
- break;
- case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */
- if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
- /* WaDisableHBR2:skl */
- max_link_bw = DP_LINK_BW_2_7;
- else if (((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) ||
- INTEL_INFO(dev)->gen >= 8) &&
- intel_dp->dpcd[DP_DPCD_REV] >= 0x12)
- max_link_bw = DP_LINK_BW_5_4;
- else
- max_link_bw = DP_LINK_BW_2_7;
+ case DP_LINK_BW_5_4:
break;
default:
WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
@@ -218,7 +209,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
target_clock = fixed_mode->clock;
}
- max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
+ max_link_clock = intel_dp_max_link_rate(intel_dp);
max_lanes = intel_dp_max_lane_count(intel_dp);
max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
@@ -951,8 +942,9 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
size_t txsize, rxsize;
int ret;
- txbuf[0] = msg->request << 4;
- txbuf[1] = msg->address >> 8;
+ txbuf[0] = (msg->request << 4) |
+ ((msg->address >> 16) & 0xf);
+ txbuf[1] = (msg->address >> 8) & 0xff;
txbuf[2] = msg->address & 0xff;
txbuf[3] = msg->size - 1;
@@ -960,7 +952,7 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
case DP_AUX_NATIVE_WRITE:
case DP_AUX_I2C_WRITE:
txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
- rxsize = 1;
+ rxsize = 2; /* 0 or 1 data bytes */
if (WARN_ON(txsize > 20))
return -E2BIG;
@@ -971,8 +963,13 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
if (ret > 0) {
msg->reply = rxbuf[0] >> 4;
- /* Return payload size. */
- ret = msg->size;
+ if (ret > 1) {
+ /* Number of bytes written in a short write. */
+ ret = clamp_t(int, rxbuf[1], 0, msg->size);
+ } else {
+ /* Return payload size. */
+ ret = msg->size;
+ }
}
break;
@@ -1142,49 +1139,39 @@ hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
}
static int
-intel_read_sink_rates(struct intel_dp *intel_dp, uint32_t *sink_rates)
+intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- int i = 0;
- uint16_t val;
-
- if (INTEL_INFO(dev)->gen >= 9 && intel_dp->supported_rates[0]) {
- /*
- * Receiver supports only main-link rate selection by
- * link rate table method, so read link rates from
- * supported_link_rates
- */
- for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i) {
- val = le16_to_cpu(intel_dp->supported_rates[i]);
- if (val == 0)
- break;
+ if (intel_dp->num_sink_rates) {
+ *sink_rates = intel_dp->sink_rates;
+ return intel_dp->num_sink_rates;
+ }
- sink_rates[i] = val * 200;
- }
+ *sink_rates = default_rates;
- if (i <= 0)
- DRM_ERROR("No rates in SUPPORTED_LINK_RATES");
- }
- return i;
+ return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
}
static int
-intel_read_source_rates(struct intel_dp *intel_dp, uint32_t *source_rates)
+intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- int i;
- int max_default_rate;
-
- if (INTEL_INFO(dev)->gen >= 9 && intel_dp->supported_rates[0]) {
- for (i = 0; i < ARRAY_SIZE(gen9_rates); ++i)
- source_rates[i] = gen9_rates[i];
- } else {
- /* Index of the max_link_bw supported + 1 */
- max_default_rate = (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
- for (i = 0; i < max_default_rate; ++i)
- source_rates[i] = default_rates[i];
+ if (INTEL_INFO(dev)->gen >= 9) {
+ *source_rates = gen9_rates;
+ return ARRAY_SIZE(gen9_rates);
+ } else if (IS_CHERRYVIEW(dev)) {
+ *source_rates = chv_rates;
+ return ARRAY_SIZE(chv_rates);
}
- return i;
+
+ *source_rates = default_rates;
+
+ if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
+ /* WaDisableHBR2:skl */
+ return (DP_LINK_BW_2_7 >> 3) + 1;
+ else if (INTEL_INFO(dev)->gen >= 8 ||
+ (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
+ return (DP_LINK_BW_5_4 >> 3) + 1;
+ else
+ return (DP_LINK_BW_2_7 >> 3) + 1;
}
static void
@@ -1220,22 +1207,17 @@ intel_dp_set_clock(struct intel_encoder *encoder,
}
}
-static int intel_supported_rates(const uint32_t *source_rates, int source_len,
-const uint32_t *sink_rates, int sink_len, uint32_t *supported_rates)
+static int intersect_rates(const int *source_rates, int source_len,
+ const int *sink_rates, int sink_len,
+ int *common_rates)
{
int i = 0, j = 0, k = 0;
- /* For panels with edp version less than 1.4 */
- if (sink_len == 0) {
- for (i = 0; i < source_len; ++i)
- supported_rates[i] = source_rates[i];
- return source_len;
- }
-
- /* For edp1.4 panels, find the common rates between source and sink */
while (i < source_len && j < sink_len) {
if (source_rates[i] == sink_rates[j]) {
- supported_rates[k] = source_rates[i];
+ if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
+ return k;
+ common_rates[k] = source_rates[i];
++k;
++i;
++j;
@@ -1248,7 +1230,62 @@ const uint32_t *sink_rates, int sink_len, uint32_t *supported_rates)
return k;
}
-static int rate_to_index(uint32_t find, const uint32_t *rates)
+static int intel_dp_common_rates(struct intel_dp *intel_dp,
+ int *common_rates)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ const int *source_rates, *sink_rates;
+ int source_len, sink_len;
+
+ sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
+ source_len = intel_dp_source_rates(dev, &source_rates);
+
+ return intersect_rates(source_rates, source_len,
+ sink_rates, sink_len,
+ common_rates);
+}
+
+static void snprintf_int_array(char *str, size_t len,
+ const int *array, int nelem)
+{
+ int i;
+
+ str[0] = '\0';
+
+ for (i = 0; i < nelem; i++) {
+ int r = snprintf(str, len, "%d,", array[i]);
+ if (r >= len)
+ return;
+ str += r;
+ len -= r;
+ }
+}
+
+static void intel_dp_print_rates(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ const int *source_rates, *sink_rates;
+ int source_len, sink_len, common_len;
+ int common_rates[DP_MAX_SUPPORTED_RATES];
+ char str[128]; /* FIXME: too big for stack? */
+
+ if ((drm_debug & DRM_UT_KMS) == 0)
+ return;
+
+ source_len = intel_dp_source_rates(dev, &source_rates);
+ snprintf_int_array(str, sizeof(str), source_rates, source_len);
+ DRM_DEBUG_KMS("source rates: %s\n", str);
+
+ sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
+ snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
+ DRM_DEBUG_KMS("sink rates: %s\n", str);
+
+ common_len = intel_dp_common_rates(intel_dp, common_rates);
+ snprintf_int_array(str, sizeof(str), common_rates, common_len);
+ DRM_DEBUG_KMS("common rates: %s\n", str);
+}
+
+static int rate_to_index(int find, const int *rates)
{
int i = 0;
@@ -1259,6 +1296,24 @@ static int rate_to_index(uint32_t find, const uint32_t *rates)
return i;
}
+int
+intel_dp_max_link_rate(struct intel_dp *intel_dp)
+{
+ int rates[DP_MAX_SUPPORTED_RATES] = {};
+ int len;
+
+ len = intel_dp_common_rates(intel_dp, rates);
+ if (WARN_ON(len <= 0))
+ return 162000;
+
+ return rates[rate_to_index(0, rates) - 1];
+}
+
+int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
+{
+ return rate_to_index(rate, intel_dp->sink_rates);
+}
+
bool
intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
@@ -1268,7 +1323,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = dp_to_dig_port(intel_dp)->port;
- struct intel_crtc *intel_crtc = encoder->new_crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
struct intel_connector *intel_connector = intel_dp->attached_connector;
int lane_count, clock;
int min_lane_count = 1;
@@ -1278,22 +1333,15 @@ intel_dp_compute_config(struct intel_encoder *encoder,
int max_clock;
int bpp, mode_rate;
int link_avail, link_clock;
- uint32_t sink_rates[8];
- uint32_t supported_rates[8] = {0};
- uint32_t source_rates[8];
- int source_len, sink_len, supported_len;
-
- sink_len = intel_read_sink_rates(intel_dp, sink_rates);
+ int common_rates[DP_MAX_SUPPORTED_RATES] = {};
+ int common_len;
- source_len = intel_read_source_rates(intel_dp, source_rates);
-
- supported_len = intel_supported_rates(source_rates, source_len,
- sink_rates, sink_len, supported_rates);
+ common_len = intel_dp_common_rates(intel_dp, common_rates);
/* No common link rates between source and sink */
- WARN_ON(supported_len <= 0);
+ WARN_ON(common_len <= 0);
- max_clock = supported_len - 1;
+ max_clock = common_len - 1;
if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
pipe_config->has_pch_encoder = true;
@@ -1318,7 +1366,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
DRM_DEBUG_KMS("DP link computation with max lane count %i "
"max bw %d pixel clock %iKHz\n",
- max_lane_count, supported_rates[max_clock],
+ max_lane_count, common_rates[max_clock],
adjusted_mode->crtc_clock);
/* Walk through all bpp values. Luckily they're all nicely spaced with 2
@@ -1351,7 +1399,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
lane_count <= max_lane_count;
lane_count <<= 1) {
- link_clock = supported_rates[clock];
+ link_clock = common_rates[clock];
link_avail = intel_dp_max_data_rate(link_clock,
lane_count);
@@ -1382,17 +1430,18 @@ found:
intel_dp->lane_count = lane_count;
- intel_dp->link_bw =
- drm_dp_link_rate_to_bw_code(supported_rates[clock]);
-
- if (INTEL_INFO(dev)->gen >= 9 && intel_dp->supported_rates[0]) {
- intel_dp->rate_select =
- rate_to_index(supported_rates[clock], sink_rates);
+ if (intel_dp->num_sink_rates) {
intel_dp->link_bw = 0;
+ intel_dp->rate_select =
+ intel_dp_rate_select(intel_dp, common_rates[clock]);
+ } else {
+ intel_dp->link_bw =
+ drm_dp_link_rate_to_bw_code(common_rates[clock]);
+ intel_dp->rate_select = 0;
}
pipe_config->pipe_bpp = bpp;
- pipe_config->port_clock = supported_rates[clock];
+ pipe_config->port_clock = common_rates[clock];
DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
intel_dp->link_bw, intel_dp->lane_count,
@@ -1415,7 +1464,7 @@ found:
}
if (IS_SKYLAKE(dev) && is_edp(intel_dp))
- skl_edp_set_pll_config(pipe_config, supported_rates[clock]);
+ skl_edp_set_pll_config(pipe_config, common_rates[clock]);
else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
else
@@ -3502,7 +3551,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
- if (INTEL_INFO(dev)->gen >= 9 && intel_dp->supported_rates[0])
+ if (intel_dp->num_sink_rates)
drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
&intel_dp->rate_select, 1);
@@ -3754,11 +3803,27 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
(intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
(intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
(rev >= 0x03)) { /* eDp v1.4 or higher */
+ __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
+ int i;
+
intel_dp_dpcd_read_wake(&intel_dp->aux,
DP_SUPPORTED_LINK_RATES,
- intel_dp->supported_rates,
- sizeof(intel_dp->supported_rates));
+ sink_rates,
+ sizeof(sink_rates));
+
+ for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
+ int val = le16_to_cpu(sink_rates[i]);
+
+ if (val == 0)
+ break;
+
+ intel_dp->sink_rates[i] = val * 200;
+ }
+ intel_dp->num_sink_rates = i;
}
+
+ intel_dp_print_rates(intel_dp);
+
if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
DP_DWN_STRM_PORT_PRESENT))
return true; /* native DP sink */
@@ -4548,6 +4613,7 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = {
.atomic_get_property = intel_connector_atomic_get_property,
.destroy = intel_dp_connector_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
};
static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
@@ -4934,7 +5000,7 @@ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
dig_port = dp_to_dig_port(intel_dp);
encoder = &dig_port->base;
- intel_crtc = encoder->new_crtc;
+ intel_crtc = to_intel_crtc(encoder->base.crtc);
if (!intel_crtc) {
DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
@@ -5272,8 +5338,6 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
struct edid *edid;
enum pipe pipe = INVALID_PIPE;
- dev_priv->drrs.type = DRRS_NOT_SUPPORTED;
-
if (!is_edp(intel_dp))
return true;
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index be124928ca14..5329c855acce 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -36,11 +36,11 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
- struct drm_device *dev = encoder->base.dev;
- int bpp;
- int lane_count, slots;
+ struct drm_atomic_state *state;
+ int bpp, i;
+ int lane_count, slots, rate;
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
- struct intel_connector *found = NULL, *intel_connector;
+ struct intel_connector *found = NULL;
int mst_pbn;
pipe_config->dp_encoder_is_mst = true;
@@ -52,15 +52,30 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
* seem to suggest we should do otherwise.
*/
lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
- intel_dp->link_bw = intel_dp_max_link_bw(intel_dp);
+
+ rate = intel_dp_max_link_rate(intel_dp);
+
+ if (intel_dp->num_sink_rates) {
+ intel_dp->link_bw = 0;
+ intel_dp->rate_select = intel_dp_rate_select(intel_dp, rate);
+ } else {
+ intel_dp->link_bw = drm_dp_link_rate_to_bw_code(rate);
+ intel_dp->rate_select = 0;
+ }
+
intel_dp->lane_count = lane_count;
pipe_config->pipe_bpp = 24;
- pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
+ pipe_config->port_clock = rate;
- for_each_intel_connector(dev, intel_connector) {
- if (intel_connector->new_encoder == encoder) {
- found = intel_connector;
+ state = pipe_config->base.state;
+
+ for (i = 0; i < state->num_connector; i++) {
+ if (!state->connectors[i])
+ continue;
+
+ if (state->connector_states[i]->best_encoder == &encoder->base) {
+ found = to_intel_connector(state->connectors[i]);
break;
}
}
@@ -317,6 +332,7 @@ static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
.atomic_get_property = intel_connector_atomic_get_property,
.destroy = intel_dp_mst_connector_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
};
static int intel_dp_mst_get_modes(struct drm_connector *connector)
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index c77128c67cf8..6036e3b73b7b 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -35,6 +35,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_dp_mst_helper.h>
#include <drm/drm_rect.h>
+#include <drm/drm_atomic.h>
#define DIV_ROUND_CLOSEST_ULL(ll, d) \
({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; })
@@ -56,8 +57,8 @@
ret__ = -ETIMEDOUT; \
break; \
} \
- if (W && drm_can_sleep()) { \
- msleep(W); \
+ if ((W) && drm_can_sleep()) { \
+ usleep_range((W)*1000, (W)*2000); \
} else { \
cpu_relax(); \
} \
@@ -501,16 +502,19 @@ struct intel_plane_wm_parameters {
bool enabled;
bool scaled;
u64 tiling;
+ unsigned int rotation;
};
struct intel_plane {
struct drm_plane base;
int plane;
enum pipe pipe;
- struct drm_i915_gem_object *obj;
bool can_scale;
int max_downscale;
+ /* FIXME convert to properties */
+ struct drm_intel_sprite_colorkey ckey;
+
/* Since we need to change the watermarks before/after
* enabling/disabling the planes, we need to store the parameters here
* as the other pieces of the struct may not reflect the values we want
@@ -527,7 +531,6 @@ struct intel_plane {
void (*update_plane)(struct drm_plane *plane,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
@@ -538,10 +541,6 @@ struct intel_plane {
struct intel_plane_state *state);
void (*commit_plane)(struct drm_plane *plane,
struct intel_plane_state *state);
- int (*update_colorkey)(struct drm_plane *plane,
- struct drm_intel_sprite_colorkey *key);
- void (*get_colorkey)(struct drm_plane *plane,
- struct drm_intel_sprite_colorkey *key);
};
struct intel_watermark_params {
@@ -564,6 +563,7 @@ struct cxsr_latency {
};
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
+#define to_intel_crtc_state(x) container_of(x, struct intel_crtc_state, base)
#define to_intel_connector(x) container_of(x, struct intel_connector, base)
#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
@@ -627,7 +627,9 @@ struct intel_dp {
uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
- __le16 supported_rates[DP_MAX_SUPPORTED_RATES];
+ /* sink rates as reported by DP_SUPPORTED_LINK_RATES */
+ uint8_t num_sink_rates;
+ int sink_rates[DP_MAX_SUPPORTED_RATES];
struct drm_dp_aux aux;
uint8_t train_set[4];
int panel_power_up_delay;
@@ -902,9 +904,10 @@ void intel_frontbuffer_flip(struct drm_device *dev,
intel_frontbuffer_flush(dev, frontbuffer_bits);
}
-int intel_fb_align_height(struct drm_device *dev, int height,
- uint32_t pixel_format,
- uint64_t fb_format_modifier);
+unsigned int intel_fb_align_height(struct drm_device *dev,
+ unsigned int height,
+ uint32_t pixel_format,
+ uint64_t fb_format_modifier);
void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire);
u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier,
@@ -956,9 +959,11 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
struct intel_load_detect_pipe *old,
struct drm_modeset_acquire_ctx *ctx);
void intel_release_load_detect_pipe(struct drm_connector *connector,
- struct intel_load_detect_pipe *old);
+ struct intel_load_detect_pipe *old,
+ struct drm_modeset_acquire_ctx *ctx);
int intel_pin_and_fence_fb_obj(struct drm_plane *plane,
struct drm_framebuffer *fb,
+ const struct drm_plane_state *plane_state,
struct intel_engine_cs *pipelined);
struct drm_framebuffer *
__intel_framebuffer_create(struct drm_device *dev,
@@ -983,6 +988,19 @@ int intel_plane_atomic_set_property(struct drm_plane *plane,
struct drm_property *property,
uint64_t val);
+unsigned int
+intel_tile_height(struct drm_device *dev, uint32_t pixel_format,
+ uint64_t fb_format_modifier);
+
+static inline bool
+intel_rotation_90_or_270(unsigned int rotation)
+{
+ return rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270));
+}
+
+bool intel_wm_need_update(struct drm_plane *plane,
+ struct drm_plane_state *state);
+
/* shared dpll functions */
struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
void assert_shared_dpll(struct drm_i915_private *dev_priv,
@@ -1037,6 +1055,9 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc);
void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file);
+unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane,
+ struct drm_i915_gem_object *obj);
+
/* intel_dp.c */
void intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
@@ -1060,7 +1081,8 @@ void intel_edp_panel_off(struct intel_dp *intel_dp);
void intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector);
void intel_dp_mst_suspend(struct drm_device *dev);
void intel_dp_mst_resume(struct drm_device *dev);
-int intel_dp_max_link_bw(struct intel_dp *intel_dp);
+int intel_dp_max_link_rate(struct intel_dp *intel_dp);
+int intel_dp_rate_select(struct intel_dp *intel_dp, int rate);
void intel_dp_hot_plug(struct intel_encoder *intel_encoder);
void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv);
uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes);
@@ -1239,6 +1261,8 @@ void intel_disable_gt_powersave(struct drm_device *dev);
void intel_suspend_gt_powersave(struct drm_device *dev);
void intel_reset_gt_powersave(struct drm_device *dev);
void gen6_update_ring_freq(struct drm_device *dev);
+void gen6_rps_busy(struct drm_i915_private *dev_priv);
+void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
void gen6_rps_idle(struct drm_i915_private *dev_priv);
void gen6_rps_boost(struct drm_i915_private *dev_priv);
void ilk_wm_get_hw_state(struct drm_device *dev);
@@ -1258,8 +1282,6 @@ void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
int intel_plane_restore(struct drm_plane *plane);
int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
bool intel_pipe_update_start(struct intel_crtc *crtc,
uint32_t *start_vbl_count);
void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count);
@@ -1282,6 +1304,17 @@ int intel_connector_atomic_get_property(struct drm_connector *connector,
struct drm_crtc_state *intel_crtc_duplicate_state(struct drm_crtc *crtc);
void intel_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state);
+static inline struct intel_crtc_state *
+intel_atomic_get_crtc_state(struct drm_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_crtc_state *crtc_state;
+ crtc_state = drm_atomic_get_crtc_state(state, &crtc->base);
+ if (IS_ERR(crtc_state))
+ return ERR_PTR(PTR_ERR(crtc_state));
+
+ return to_intel_crtc_state(crtc_state);
+}
/* intel_atomic_plane.c */
struct intel_plane_state *intel_create_plane_state(struct drm_plane *plane);
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index c8c8b24e300c..572251e9810b 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -975,6 +975,7 @@ static const struct drm_connector_funcs intel_dsi_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_get_property = intel_connector_atomic_get_property,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
};
void intel_dsi_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index d8579510beb0..4ccd6c3f133d 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -393,6 +393,7 @@ static const struct drm_connector_funcs intel_dvo_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_get_property = intel_connector_atomic_get_property,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
};
static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = {
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 9fcf446e95f5..4165ce0644f7 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -521,7 +521,7 @@ void intel_fbc_update(struct drm_device *dev)
goto out_disable;
}
- if (!i915.enable_fbc || !i915.powersave) {
+ if (!i915.enable_fbc) {
if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
DRM_DEBUG_KMS("fbc disabled per module param\n");
goto out_disable;
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 757c0d216f80..4e7e7da2e03b 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -151,7 +151,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
}
/* Flush everything out, we'll be doing GTT only from now on */
- ret = intel_pin_and_fence_fb_obj(NULL, fb, NULL);
+ ret = intel_pin_and_fence_fb_obj(NULL, fb, NULL, NULL);
if (ret) {
DRM_ERROR("failed to pin obj: %d\n", ret);
goto out_fb;
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c
index 0a1bac8ac72b..a20cffb78c0f 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.c
@@ -110,9 +110,6 @@ static void intel_mark_fb_busy(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe;
- if (!i915.powersave)
- return;
-
for_each_pipe(dev_priv, pipe) {
if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)))
continue;
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 995c5b261f4f..cacbafdad3ab 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -951,19 +951,30 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-static bool hdmi_12bpc_possible(struct intel_crtc *crtc)
+static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
+ struct drm_device *dev = crtc_state->base.crtc->dev;
+ struct drm_atomic_state *state;
struct intel_encoder *encoder;
+ struct drm_connector_state *connector_state;
int count = 0, count_hdmi = 0;
+ int i;
if (HAS_GMCH_DISPLAY(dev))
return false;
- for_each_intel_encoder(dev, encoder) {
- if (encoder->new_crtc != crtc)
+ state = crtc_state->base.state;
+
+ for (i = 0; i < state->num_connector; i++) {
+ if (!state->connectors[i])
continue;
+ connector_state = state->connector_states[i];
+ if (connector_state->crtc != crtc_state->base.crtc)
+ continue;
+
+ encoder = to_intel_encoder(connector_state->best_encoder);
+
count_hdmi += encoder->type == INTEL_OUTPUT_HDMI;
count++;
}
@@ -1020,7 +1031,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
*/
if (pipe_config->pipe_bpp > 8*3 && pipe_config->has_hdmi_sink &&
clock_12bpc <= portclock_limit &&
- hdmi_12bpc_possible(encoder->new_crtc)) {
+ hdmi_12bpc_possible(pipe_config)) {
DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
desired_bpp = 12*3;
@@ -1618,6 +1629,7 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
.atomic_get_property = intel_connector_atomic_get_property,
.destroy = intel_hdmi_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
};
static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = {
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 24e8730dc189..06d2da336f7c 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -286,7 +286,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
struct intel_connector *intel_connector =
&lvds_encoder->attached_connector->base;
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
- struct intel_crtc *intel_crtc = lvds_encoder->base.new_crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
unsigned int lvds_bpp;
/* Should never happen!! */
@@ -535,6 +535,7 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
.atomic_get_property = intel_connector_atomic_get_property,
.destroy = intel_lvds_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
};
static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 823d1d97a000..dd92122ed95c 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -720,7 +720,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
if (ret != 0)
return ret;
- ret = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL);
+ ret = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL,
+ &i915_ggtt_view_normal);
if (ret != 0)
return ret;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 288c9d24098e..fa4ccb346389 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2840,6 +2840,7 @@ static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc,
}
p->plane[0].horiz_pixels = intel_crtc->config->pipe_src_w;
p->plane[0].vert_pixels = intel_crtc->config->pipe_src_h;
+ p->plane[0].rotation = crtc->primary->state->rotation;
fb = crtc->cursor->state->fb;
if (fb) {
@@ -2897,7 +2898,21 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
if (p_params->tiling == I915_FORMAT_MOD_Y_TILED ||
p_params->tiling == I915_FORMAT_MOD_Yf_TILED) {
- uint32_t y_tile_minimum = plane_blocks_per_line * 4;
+ uint32_t min_scanlines = 4;
+ uint32_t y_tile_minimum;
+ if (intel_rotation_90_or_270(p_params->rotation)) {
+ switch (p_params->bytes_per_pixel) {
+ case 1:
+ min_scanlines = 16;
+ break;
+ case 2:
+ min_scanlines = 8;
+ break;
+ case 8:
+ WARN(1, "Unsupported pixel depth for rotation");
+ }
+ }
+ y_tile_minimum = plane_blocks_per_line * min_scanlines;
selected_result = max(method2, y_tile_minimum);
} else {
if ((ddb_allocation / plane_blocks_per_line) >= 1)
@@ -3357,6 +3372,7 @@ skl_update_sprite_wm(struct drm_plane *plane, struct drm_crtc *crtc,
*/
if (fb)
intel_plane->wm.tiling = fb->modifier[0];
+ intel_plane->wm.rotation = plane->state->rotation;
skl_update_wm(crtc);
}
@@ -3855,9 +3871,9 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
break;
}
/* Max/min bins are special */
- if (val == dev_priv->rps.min_freq_softlimit)
+ if (val <= dev_priv->rps.min_freq_softlimit)
new_power = LOW_POWER;
- if (val == dev_priv->rps.max_freq_softlimit)
+ if (val >= dev_priv->rps.max_freq_softlimit)
new_power = HIGH_POWER;
if (new_power == dev_priv->rps.power)
return;
@@ -3922,11 +3938,10 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
u32 mask = 0;
if (val > dev_priv->rps.min_freq_softlimit)
- mask |= GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
+ mask |= GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
if (val < dev_priv->rps.max_freq_softlimit)
- mask |= GEN6_PM_RP_UP_THRESHOLD;
+ mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
- mask |= dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED);
mask &= dev_priv->pm_rps_events;
return gen6_sanitize_rps_pm_mask(dev_priv, ~mask);
@@ -3940,8 +3955,8 @@ static void gen6_set_rps(struct drm_device *dev, u8 val)
struct drm_i915_private *dev_priv = dev->dev_private;
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
- WARN_ON(val > dev_priv->rps.max_freq_softlimit);
- WARN_ON(val < dev_priv->rps.min_freq_softlimit);
+ WARN_ON(val > dev_priv->rps.max_freq);
+ WARN_ON(val < dev_priv->rps.min_freq);
/* min/max delay may still have been modified so be sure to
* write the limits value.
@@ -3979,8 +3994,8 @@ static void valleyview_set_rps(struct drm_device *dev, u8 val)
struct drm_i915_private *dev_priv = dev->dev_private;
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
- WARN_ON(val > dev_priv->rps.max_freq_softlimit);
- WARN_ON(val < dev_priv->rps.min_freq_softlimit);
+ WARN_ON(val > dev_priv->rps.max_freq);
+ WARN_ON(val < dev_priv->rps.min_freq);
if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1),
"Odd GPU freq value\n"))
@@ -4007,10 +4022,11 @@ static void valleyview_set_rps(struct drm_device *dev, u8 val)
static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
+ u32 val = dev_priv->rps.idle_freq;
/* CHV and latest VLV don't need to force the gfx clock */
if (IS_CHERRYVIEW(dev) || dev->pdev->revision >= 0xd) {
- valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
+ valleyview_set_rps(dev_priv->dev, val);
return;
}
@@ -4018,7 +4034,7 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
* When we are idle. Drop to min voltage state.
*/
- if (dev_priv->rps.cur_freq <= dev_priv->rps.min_freq_softlimit)
+ if (dev_priv->rps.cur_freq <= val)
return;
/* Mask turbo interrupt so that they will not come in between */
@@ -4027,10 +4043,9 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
vlv_force_gfx_clock(dev_priv, true);
- dev_priv->rps.cur_freq = dev_priv->rps.min_freq_softlimit;
+ dev_priv->rps.cur_freq = val;
- vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ,
- dev_priv->rps.min_freq_softlimit);
+ vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS))
& GENFREQSTATUS) == 0, 100))
@@ -4038,8 +4053,19 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
vlv_force_gfx_clock(dev_priv, false);
- I915_WRITE(GEN6_PMINTRMSK,
- gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
+ I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
+}
+
+void gen6_rps_busy(struct drm_i915_private *dev_priv)
+{
+ mutex_lock(&dev_priv->rps.hw_lock);
+ if (dev_priv->rps.enabled) {
+ if (dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED))
+ gen6_rps_reset_ei(dev_priv);
+ I915_WRITE(GEN6_PMINTRMSK,
+ gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
+ }
+ mutex_unlock(&dev_priv->rps.hw_lock);
}
void gen6_rps_idle(struct drm_i915_private *dev_priv)
@@ -4051,17 +4077,23 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
if (IS_VALLEYVIEW(dev))
vlv_set_rps_idle(dev_priv);
else
- gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
+ gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
dev_priv->rps.last_adj = 0;
+ I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
}
mutex_unlock(&dev_priv->rps.hw_lock);
}
void gen6_rps_boost(struct drm_i915_private *dev_priv)
{
+ u32 val;
+
mutex_lock(&dev_priv->rps.hw_lock);
- if (dev_priv->rps.enabled) {
- intel_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
+ val = dev_priv->rps.max_freq_softlimit;
+ if (dev_priv->rps.enabled &&
+ dev_priv->mm.busy &&
+ dev_priv->rps.cur_freq < val) {
+ intel_set_rps(dev_priv->dev, val);
dev_priv->rps.last_adj = 0;
}
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -4209,6 +4241,8 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
dev_priv->rps.max_freq);
}
+ dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
+
/* Preserve min/max settings in case of re-init */
if (dev_priv->rps.max_freq_softlimit == 0)
dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
@@ -4375,7 +4409,7 @@ static void gen8_enable_rps(struct drm_device *dev)
/* 6: Ring frequency + overclocking (our driver does this later */
dev_priv->rps.power = HIGH_POWER; /* force a reset */
- gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
+ gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
@@ -4469,7 +4503,7 @@ static void gen6_enable_rps(struct drm_device *dev)
}
dev_priv->rps.power = HIGH_POWER; /* force a reset */
- gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
+ gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
rc6vids = 0;
ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
@@ -4834,6 +4868,8 @@ static void valleyview_init_gt_powersave(struct drm_device *dev)
intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
dev_priv->rps.min_freq);
+ dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
+
/* Preserve min/max settings in case of re-init */
if (dev_priv->rps.max_freq_softlimit == 0)
dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
@@ -4909,6 +4945,8 @@ static void cherryview_init_gt_powersave(struct drm_device *dev)
dev_priv->rps.min_freq) & 1,
"Odd GPU freq values\n");
+ dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
+
/* Preserve min/max settings in case of re-init */
if (dev_priv->rps.max_freq_softlimit == 0)
dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
@@ -5686,6 +5724,13 @@ static void intel_gen6_powersave_work(struct work_struct *work)
gen6_enable_rps(dev);
__gen6_update_ring_freq(dev);
}
+
+ WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq);
+ WARN_ON(dev_priv->rps.idle_freq > dev_priv->rps.max_freq);
+
+ WARN_ON(dev_priv->rps.efficient_freq < dev_priv->rps.min_freq);
+ WARN_ON(dev_priv->rps.efficient_freq > dev_priv->rps.max_freq);
+
dev_priv->rps.enabled = true;
gen6_enable_rps_interrupts(dev);
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index b9f40c2e0af7..a8f9348259ae 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -532,8 +532,6 @@ static void intel_psr_exit(struct drm_device *dev)
WARN_ON(!(val & EDP_PSR_ENABLE));
I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE);
-
- dev_priv->psr.active = false;
} else {
val = I915_READ(VLV_PSRCTL(pipe));
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 9e554c2cfbb4..f5b7e1e7c5e0 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -2194,6 +2194,7 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
.atomic_get_property = intel_connector_atomic_get_property,
.destroy = intel_sdvo_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
};
static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = {
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index a82873631851..a4c0a04b5044 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -179,7 +179,7 @@ static void intel_update_primary_plane(struct intel_crtc *crtc)
static void
skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
+ int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
uint32_t src_w, uint32_t src_h)
@@ -187,23 +187,16 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
struct drm_device *dev = drm_plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(drm_plane);
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
const int pipe = intel_plane->pipe;
const int plane = intel_plane->plane + 1;
u32 plane_ctl, stride_div;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+ const struct drm_intel_sprite_colorkey *key = &intel_plane->ckey;
+ unsigned long surf_addr;
- plane_ctl = I915_READ(PLANE_CTL(pipe, plane));
-
- /* Mask out pixel format bits in case we change it */
- plane_ctl &= ~PLANE_CTL_FORMAT_MASK;
- plane_ctl &= ~PLANE_CTL_ORDER_RGBX;
- plane_ctl &= ~PLANE_CTL_YUV422_ORDER_MASK;
- plane_ctl &= ~PLANE_CTL_TILED_MASK;
- plane_ctl &= ~PLANE_CTL_ALPHA_MASK;
- plane_ctl &= ~PLANE_CTL_ROTATE_MASK;
-
- /* Trickle feed has to be enabled */
- plane_ctl &= ~PLANE_CTL_TRICKLE_FEED_DISABLE;
+ plane_ctl = PLANE_CTL_ENABLE |
+ PLANE_CTL_PIPE_CSC_ENABLE;
switch (fb->pixel_format) {
case DRM_FORMAT_RGB565:
@@ -264,9 +257,6 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
if (drm_plane->state->rotation == BIT(DRM_ROTATE_180))
plane_ctl |= PLANE_CTL_ROTATE_180;
- plane_ctl |= PLANE_CTL_ENABLE;
- plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
-
intel_update_sprite_watermarks(drm_plane, crtc, src_w, src_h,
pixel_size, true,
src_w != crtc_w || src_h != crtc_h);
@@ -280,12 +270,25 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
crtc_w--;
crtc_h--;
+ if (key->flags) {
+ I915_WRITE(PLANE_KEYVAL(pipe, plane), key->min_value);
+ I915_WRITE(PLANE_KEYMAX(pipe, plane), key->max_value);
+ I915_WRITE(PLANE_KEYMSK(pipe, plane), key->channel_mask);
+ }
+
+ if (key->flags & I915_SET_COLORKEY_DESTINATION)
+ plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
+ else if (key->flags & I915_SET_COLORKEY_SOURCE)
+ plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
+
+ surf_addr = intel_plane_obj_offset(intel_plane, obj);
+
I915_WRITE(PLANE_OFFSET(pipe, plane), (y << 16) | x);
I915_WRITE(PLANE_STRIDE(pipe, plane), fb->pitches[0] / stride_div);
I915_WRITE(PLANE_POS(pipe, plane), (crtc_y << 16) | crtc_x);
I915_WRITE(PLANE_SIZE(pipe, plane), (crtc_h << 16) | crtc_w);
I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
- I915_WRITE(PLANE_SURF(pipe, plane), i915_gem_obj_ggtt_offset(obj));
+ I915_WRITE(PLANE_SURF(pipe, plane), surf_addr);
POSTING_READ(PLANE_SURF(pipe, plane));
}
@@ -298,73 +301,15 @@ skl_disable_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc)
const int pipe = intel_plane->pipe;
const int plane = intel_plane->plane + 1;
- I915_WRITE(PLANE_CTL(pipe, plane),
- I915_READ(PLANE_CTL(pipe, plane)) & ~PLANE_CTL_ENABLE);
+ I915_WRITE(PLANE_CTL(pipe, plane), 0);
/* Activate double buffered register update */
- I915_WRITE(PLANE_CTL(pipe, plane), 0);
- POSTING_READ(PLANE_CTL(pipe, plane));
+ I915_WRITE(PLANE_SURF(pipe, plane), 0);
+ POSTING_READ(PLANE_SURF(pipe, plane));
intel_update_sprite_watermarks(drm_plane, crtc, 0, 0, 0, false, false);
}
-static int
-skl_update_colorkey(struct drm_plane *drm_plane,
- struct drm_intel_sprite_colorkey *key)
-{
- struct drm_device *dev = drm_plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_plane *intel_plane = to_intel_plane(drm_plane);
- const int pipe = intel_plane->pipe;
- const int plane = intel_plane->plane;
- u32 plane_ctl;
-
- I915_WRITE(PLANE_KEYVAL(pipe, plane), key->min_value);
- I915_WRITE(PLANE_KEYMAX(pipe, plane), key->max_value);
- I915_WRITE(PLANE_KEYMSK(pipe, plane), key->channel_mask);
-
- plane_ctl = I915_READ(PLANE_CTL(pipe, plane));
- plane_ctl &= ~PLANE_CTL_KEY_ENABLE_MASK;
- if (key->flags & I915_SET_COLORKEY_DESTINATION)
- plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
- else if (key->flags & I915_SET_COLORKEY_SOURCE)
- plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
- I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
-
- POSTING_READ(PLANE_CTL(pipe, plane));
-
- return 0;
-}
-
-static void
-skl_get_colorkey(struct drm_plane *drm_plane,
- struct drm_intel_sprite_colorkey *key)
-{
- struct drm_device *dev = drm_plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_plane *intel_plane = to_intel_plane(drm_plane);
- const int pipe = intel_plane->pipe;
- const int plane = intel_plane->plane;
- u32 plane_ctl;
-
- key->min_value = I915_READ(PLANE_KEYVAL(pipe, plane));
- key->max_value = I915_READ(PLANE_KEYMAX(pipe, plane));
- key->channel_mask = I915_READ(PLANE_KEYMSK(pipe, plane));
-
- plane_ctl = I915_READ(PLANE_CTL(pipe, plane));
-
- switch (plane_ctl & PLANE_CTL_KEY_ENABLE_MASK) {
- case PLANE_CTL_KEY_ENABLE_DESTINATION:
- key->flags = I915_SET_COLORKEY_DESTINATION;
- break;
- case PLANE_CTL_KEY_ENABLE_SOURCE:
- key->flags = I915_SET_COLORKEY_SOURCE;
- break;
- default:
- key->flags = I915_SET_COLORKEY_NONE;
- }
-}
-
static void
chv_update_csc(struct intel_plane *intel_plane, uint32_t format)
{
@@ -407,7 +352,7 @@ chv_update_csc(struct intel_plane *intel_plane, uint32_t format)
static void
vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
+ int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
uint32_t src_w, uint32_t src_h)
@@ -416,19 +361,15 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(dplane);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int pipe = intel_plane->pipe;
int plane = intel_plane->plane;
u32 sprctl;
unsigned long sprsurf_offset, linear_offset;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+ const struct drm_intel_sprite_colorkey *key = &intel_plane->ckey;
- sprctl = I915_READ(SPCNTR(pipe, plane));
-
- /* Mask out pixel format bits in case we change it */
- sprctl &= ~SP_PIXFORMAT_MASK;
- sprctl &= ~SP_YUV_BYTE_ORDER_MASK;
- sprctl &= ~SP_TILED;
- sprctl &= ~SP_ROTATE_180;
+ sprctl = SP_ENABLE;
switch (fb->pixel_format) {
case DRM_FORMAT_YUYV:
@@ -482,8 +423,6 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
if (obj->tiling_mode != I915_TILING_NONE)
sprctl |= SP_TILED;
- sprctl |= SP_ENABLE;
-
intel_update_sprite_watermarks(dplane, crtc, src_w, src_h,
pixel_size, true,
src_w != crtc_w || src_h != crtc_h);
@@ -511,6 +450,15 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
intel_update_primary_plane(intel_crtc);
+ if (key->flags) {
+ I915_WRITE(SPKEYMINVAL(pipe, plane), key->min_value);
+ I915_WRITE(SPKEYMAXVAL(pipe, plane), key->max_value);
+ I915_WRITE(SPKEYMSK(pipe, plane), key->channel_mask);
+ }
+
+ if (key->flags & I915_SET_COLORKEY_SOURCE)
+ sprctl |= SP_SOURCE_KEY;
+
if (IS_CHERRYVIEW(dev) && pipe == PIPE_B)
chv_update_csc(intel_plane, fb->pixel_format);
@@ -544,8 +492,8 @@ vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
intel_update_primary_plane(intel_crtc);
- I915_WRITE(SPCNTR(pipe, plane), I915_READ(SPCNTR(pipe, plane)) &
- ~SP_ENABLE);
+ I915_WRITE(SPCNTR(pipe, plane), 0);
+
/* Activate double buffered register update */
I915_WRITE(SPSURF(pipe, plane), 0);
@@ -554,61 +502,11 @@ vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
intel_update_sprite_watermarks(dplane, crtc, 0, 0, 0, false, false);
}
-static int
-vlv_update_colorkey(struct drm_plane *dplane,
- struct drm_intel_sprite_colorkey *key)
-{
- struct drm_device *dev = dplane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_plane *intel_plane = to_intel_plane(dplane);
- int pipe = intel_plane->pipe;
- int plane = intel_plane->plane;
- u32 sprctl;
-
- if (key->flags & I915_SET_COLORKEY_DESTINATION)
- return -EINVAL;
-
- I915_WRITE(SPKEYMINVAL(pipe, plane), key->min_value);
- I915_WRITE(SPKEYMAXVAL(pipe, plane), key->max_value);
- I915_WRITE(SPKEYMSK(pipe, plane), key->channel_mask);
-
- sprctl = I915_READ(SPCNTR(pipe, plane));
- sprctl &= ~SP_SOURCE_KEY;
- if (key->flags & I915_SET_COLORKEY_SOURCE)
- sprctl |= SP_SOURCE_KEY;
- I915_WRITE(SPCNTR(pipe, plane), sprctl);
-
- POSTING_READ(SPKEYMSK(pipe, plane));
-
- return 0;
-}
-
-static void
-vlv_get_colorkey(struct drm_plane *dplane,
- struct drm_intel_sprite_colorkey *key)
-{
- struct drm_device *dev = dplane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_plane *intel_plane = to_intel_plane(dplane);
- int pipe = intel_plane->pipe;
- int plane = intel_plane->plane;
- u32 sprctl;
-
- key->min_value = I915_READ(SPKEYMINVAL(pipe, plane));
- key->max_value = I915_READ(SPKEYMAXVAL(pipe, plane));
- key->channel_mask = I915_READ(SPKEYMSK(pipe, plane));
-
- sprctl = I915_READ(SPCNTR(pipe, plane));
- if (sprctl & SP_SOURCE_KEY)
- key->flags = I915_SET_COLORKEY_SOURCE;
- else
- key->flags = I915_SET_COLORKEY_NONE;
-}
static void
ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
+ int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
uint32_t src_w, uint32_t src_h)
@@ -617,19 +515,14 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(plane);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_plane->pipe;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ enum pipe pipe = intel_plane->pipe;
u32 sprctl, sprscale = 0;
unsigned long sprsurf_offset, linear_offset;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+ const struct drm_intel_sprite_colorkey *key = &intel_plane->ckey;
- sprctl = I915_READ(SPRCTL(pipe));
-
- /* Mask out pixel format bits in case we change it */
- sprctl &= ~SPRITE_PIXFORMAT_MASK;
- sprctl &= ~SPRITE_RGB_ORDER_RGBX;
- sprctl &= ~SPRITE_YUV_BYTE_ORDER_MASK;
- sprctl &= ~SPRITE_TILED;
- sprctl &= ~SPRITE_ROTATE_180;
+ sprctl = SPRITE_ENABLE;
switch (fb->pixel_format) {
case DRM_FORMAT_XBGR8888:
@@ -668,8 +561,6 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
else
sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
- sprctl |= SPRITE_ENABLE;
-
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
sprctl |= SPRITE_PIPE_CSC_ENABLE;
@@ -706,6 +597,17 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
intel_update_primary_plane(intel_crtc);
+ if (key->flags) {
+ I915_WRITE(SPRKEYVAL(pipe), key->min_value);
+ I915_WRITE(SPRKEYMAX(pipe), key->max_value);
+ I915_WRITE(SPRKEYMSK(pipe), key->channel_mask);
+ }
+
+ if (key->flags & I915_SET_COLORKEY_DESTINATION)
+ sprctl |= SPRITE_DEST_KEY;
+ else if (key->flags & I915_SET_COLORKEY_SOURCE)
+ sprctl |= SPRITE_SOURCE_KEY;
+
I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
@@ -747,73 +649,12 @@ ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
I915_WRITE(SPRSURF(pipe), 0);
intel_flush_primary_plane(dev_priv, intel_crtc->plane);
-
- /*
- * Avoid underruns when disabling the sprite.
- * FIXME remove once watermark updates are done properly.
- */
- intel_crtc->atomic.wait_vblank = true;
- intel_crtc->atomic.update_sprite_watermarks |= (1 << drm_plane_index(plane));
-}
-
-static int
-ivb_update_colorkey(struct drm_plane *plane,
- struct drm_intel_sprite_colorkey *key)
-{
- struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_plane *intel_plane;
- u32 sprctl;
- int ret = 0;
-
- intel_plane = to_intel_plane(plane);
-
- I915_WRITE(SPRKEYVAL(intel_plane->pipe), key->min_value);
- I915_WRITE(SPRKEYMAX(intel_plane->pipe), key->max_value);
- I915_WRITE(SPRKEYMSK(intel_plane->pipe), key->channel_mask);
-
- sprctl = I915_READ(SPRCTL(intel_plane->pipe));
- sprctl &= ~(SPRITE_SOURCE_KEY | SPRITE_DEST_KEY);
- if (key->flags & I915_SET_COLORKEY_DESTINATION)
- sprctl |= SPRITE_DEST_KEY;
- else if (key->flags & I915_SET_COLORKEY_SOURCE)
- sprctl |= SPRITE_SOURCE_KEY;
- I915_WRITE(SPRCTL(intel_plane->pipe), sprctl);
-
- POSTING_READ(SPRKEYMSK(intel_plane->pipe));
-
- return ret;
-}
-
-static void
-ivb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
-{
- struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_plane *intel_plane;
- u32 sprctl;
-
- intel_plane = to_intel_plane(plane);
-
- key->min_value = I915_READ(SPRKEYVAL(intel_plane->pipe));
- key->max_value = I915_READ(SPRKEYMAX(intel_plane->pipe));
- key->channel_mask = I915_READ(SPRKEYMSK(intel_plane->pipe));
- key->flags = 0;
-
- sprctl = I915_READ(SPRCTL(intel_plane->pipe));
-
- if (sprctl & SPRITE_DEST_KEY)
- key->flags = I915_SET_COLORKEY_DESTINATION;
- else if (sprctl & SPRITE_SOURCE_KEY)
- key->flags = I915_SET_COLORKEY_SOURCE;
- else
- key->flags = I915_SET_COLORKEY_NONE;
}
static void
ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
+ int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
uint32_t src_w, uint32_t src_h)
@@ -822,19 +663,14 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(plane);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int pipe = intel_plane->pipe;
unsigned long dvssurf_offset, linear_offset;
u32 dvscntr, dvsscale;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+ const struct drm_intel_sprite_colorkey *key = &intel_plane->ckey;
- dvscntr = I915_READ(DVSCNTR(pipe));
-
- /* Mask out pixel format bits in case we change it */
- dvscntr &= ~DVS_PIXFORMAT_MASK;
- dvscntr &= ~DVS_RGB_ORDER_XBGR;
- dvscntr &= ~DVS_YUV_BYTE_ORDER_MASK;
- dvscntr &= ~DVS_TILED;
- dvscntr &= ~DVS_ROTATE_180;
+ dvscntr = DVS_ENABLE;
switch (fb->pixel_format) {
case DRM_FORMAT_XBGR8888:
@@ -870,7 +706,6 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
if (IS_GEN6(dev))
dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */
- dvscntr |= DVS_ENABLE;
intel_update_sprite_watermarks(plane, crtc, src_w, src_h,
pixel_size, true,
@@ -902,6 +737,17 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
intel_update_primary_plane(intel_crtc);
+ if (key->flags) {
+ I915_WRITE(DVSKEYVAL(pipe), key->min_value);
+ I915_WRITE(DVSKEYMAX(pipe), key->max_value);
+ I915_WRITE(DVSKEYMSK(pipe), key->channel_mask);
+ }
+
+ if (key->flags & I915_SET_COLORKEY_DESTINATION)
+ dvscntr |= DVS_DEST_KEY;
+ else if (key->flags & I915_SET_COLORKEY_SOURCE)
+ dvscntr |= DVS_SOURCE_KEY;
+
I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
@@ -930,20 +776,14 @@ ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
intel_update_primary_plane(intel_crtc);
- I915_WRITE(DVSCNTR(pipe), I915_READ(DVSCNTR(pipe)) & ~DVS_ENABLE);
+ I915_WRITE(DVSCNTR(pipe), 0);
/* Disable the scaler */
I915_WRITE(DVSSCALE(pipe), 0);
+
/* Flush double buffered register updates */
I915_WRITE(DVSSURF(pipe), 0);
intel_flush_primary_plane(dev_priv, intel_crtc->plane);
-
- /*
- * Avoid underruns when disabling the sprite.
- * FIXME remove once watermark updates are done properly.
- */
- intel_crtc->atomic.wait_vblank = true;
- intel_crtc->atomic.update_sprite_watermarks |= (1 << drm_plane_index(plane));
}
/**
@@ -1014,67 +854,9 @@ intel_pre_disable_primary(struct drm_crtc *crtc)
hsw_disable_ips(intel_crtc);
}
-static int
-ilk_update_colorkey(struct drm_plane *plane,
- struct drm_intel_sprite_colorkey *key)
-{
- struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_plane *intel_plane;
- u32 dvscntr;
- int ret = 0;
-
- intel_plane = to_intel_plane(plane);
-
- I915_WRITE(DVSKEYVAL(intel_plane->pipe), key->min_value);
- I915_WRITE(DVSKEYMAX(intel_plane->pipe), key->max_value);
- I915_WRITE(DVSKEYMSK(intel_plane->pipe), key->channel_mask);
-
- dvscntr = I915_READ(DVSCNTR(intel_plane->pipe));
- dvscntr &= ~(DVS_SOURCE_KEY | DVS_DEST_KEY);
- if (key->flags & I915_SET_COLORKEY_DESTINATION)
- dvscntr |= DVS_DEST_KEY;
- else if (key->flags & I915_SET_COLORKEY_SOURCE)
- dvscntr |= DVS_SOURCE_KEY;
- I915_WRITE(DVSCNTR(intel_plane->pipe), dvscntr);
-
- POSTING_READ(DVSKEYMSK(intel_plane->pipe));
-
- return ret;
-}
-
-static void
-ilk_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
-{
- struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_plane *intel_plane;
- u32 dvscntr;
-
- intel_plane = to_intel_plane(plane);
-
- key->min_value = I915_READ(DVSKEYVAL(intel_plane->pipe));
- key->max_value = I915_READ(DVSKEYMAX(intel_plane->pipe));
- key->channel_mask = I915_READ(DVSKEYMSK(intel_plane->pipe));
- key->flags = 0;
-
- dvscntr = I915_READ(DVSCNTR(intel_plane->pipe));
-
- if (dvscntr & DVS_DEST_KEY)
- key->flags = I915_SET_COLORKEY_DESTINATION;
- else if (dvscntr & DVS_SOURCE_KEY)
- key->flags = I915_SET_COLORKEY_SOURCE;
- else
- key->flags = I915_SET_COLORKEY_NONE;
-}
-
static bool colorkey_enabled(struct intel_plane *intel_plane)
{
- struct drm_intel_sprite_colorkey key;
-
- intel_plane->get_colorkey(&intel_plane->base, &key);
-
- return key.flags != I915_SET_COLORKEY_NONE;
+ return intel_plane->ckey.flags != I915_SET_COLORKEY_NONE;
}
static int
@@ -1257,11 +1039,18 @@ finish:
if (!intel_crtc->primary_enabled && !state->hides_primary)
intel_crtc->atomic.post_enable_primary = true;
- /* Update watermarks on tiling changes. */
- if (!plane->state->fb || !state->base.fb ||
- plane->state->fb->modifier[0] !=
- state->base.fb->modifier[0])
+ if (intel_wm_need_update(plane, &state->base))
intel_crtc->atomic.update_wm = true;
+
+ if (!state->visible) {
+ /*
+ * Avoid underruns when disabling the sprite.
+ * FIXME remove once watermark updates are done properly.
+ */
+ intel_crtc->atomic.wait_vblank = true;
+ intel_crtc->atomic.update_sprite_watermarks |=
+ (1 << drm_plane_index(plane));
+ }
}
return 0;
@@ -1275,7 +1064,6 @@ intel_commit_sprite_plane(struct drm_plane *plane,
struct intel_crtc *intel_crtc;
struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_framebuffer *fb = state->base.fb;
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int crtc_x, crtc_y;
unsigned int crtc_w, crtc_h;
uint32_t src_x, src_y, src_w, src_h;
@@ -1283,8 +1071,7 @@ intel_commit_sprite_plane(struct drm_plane *plane,
crtc = crtc ? crtc : plane->crtc;
intel_crtc = to_intel_crtc(crtc);
- plane->fb = state->base.fb;
- intel_plane->obj = obj;
+ plane->fb = fb;
if (intel_crtc->active) {
intel_crtc->primary_enabled = !state->hides_primary;
@@ -1298,7 +1085,7 @@ intel_commit_sprite_plane(struct drm_plane *plane,
src_y = state->src.y1;
src_w = drm_rect_width(&state->src);
src_h = drm_rect_height(&state->src);
- intel_plane->update_plane(plane, crtc, fb, obj,
+ intel_plane->update_plane(plane, crtc, fb,
crtc_x, crtc_y, crtc_w, crtc_h,
src_x, src_y, src_w, src_h);
} else {
@@ -1319,40 +1106,28 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
return -EINVAL;
+ if (IS_VALLEYVIEW(dev) &&
+ set->flags & I915_SET_COLORKEY_DESTINATION)
+ return -EINVAL;
+
drm_modeset_lock_all(dev);
plane = drm_plane_find(dev, set->plane_id);
- if (!plane) {
+ if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY) {
ret = -ENOENT;
goto out_unlock;
}
intel_plane = to_intel_plane(plane);
- ret = intel_plane->update_colorkey(plane, set);
-
-out_unlock:
- drm_modeset_unlock_all(dev);
- return ret;
-}
-
-int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_intel_sprite_colorkey *get = data;
- struct drm_plane *plane;
- struct intel_plane *intel_plane;
- int ret = 0;
+ intel_plane->ckey = *set;
- drm_modeset_lock_all(dev);
-
- plane = drm_plane_find(dev, get->plane_id);
- if (!plane) {
- ret = -ENOENT;
- goto out_unlock;
- }
-
- intel_plane = to_intel_plane(plane);
- intel_plane->get_colorkey(plane, get);
+ /*
+ * The only way this could fail would be due to
+ * the current plane state being unsupportable already,
+ * and we dont't consider that an error for the
+ * colorkey ioctl. So just ignore any error.
+ */
+ intel_plane_restore(plane);
out_unlock:
drm_modeset_unlock_all(dev);
@@ -1445,8 +1220,6 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
intel_plane->max_downscale = 16;
intel_plane->update_plane = ilk_update_plane;
intel_plane->disable_plane = ilk_disable_plane;
- intel_plane->update_colorkey = ilk_update_colorkey;
- intel_plane->get_colorkey = ilk_get_colorkey;
if (IS_GEN6(dev)) {
plane_formats = snb_plane_formats;
@@ -1470,16 +1243,12 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
if (IS_VALLEYVIEW(dev)) {
intel_plane->update_plane = vlv_update_plane;
intel_plane->disable_plane = vlv_disable_plane;
- intel_plane->update_colorkey = vlv_update_colorkey;
- intel_plane->get_colorkey = vlv_get_colorkey;
plane_formats = vlv_plane_formats;
num_plane_formats = ARRAY_SIZE(vlv_plane_formats);
} else {
intel_plane->update_plane = ivb_update_plane;
intel_plane->disable_plane = ivb_disable_plane;
- intel_plane->update_colorkey = ivb_update_colorkey;
- intel_plane->get_colorkey = ivb_get_colorkey;
plane_formats = snb_plane_formats;
num_plane_formats = ARRAY_SIZE(snb_plane_formats);
@@ -1494,8 +1263,6 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
intel_plane->max_downscale = 1;
intel_plane->update_plane = skl_update_plane;
intel_plane->disable_plane = skl_disable_plane;
- intel_plane->update_colorkey = skl_update_colorkey;
- intel_plane->get_colorkey = skl_get_colorkey;
plane_formats = skl_plane_formats;
num_plane_formats = ARRAY_SIZE(skl_plane_formats);
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 892d23c8479d..bc1d9d740904 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1332,7 +1332,7 @@ intel_tv_detect(struct drm_connector *connector, bool force)
if (intel_get_load_detect_pipe(connector, &mode, &tmp, &ctx)) {
type = intel_tv_detect_type(intel_tv, connector);
- intel_release_load_detect_pipe(connector, &tmp);
+ intel_release_load_detect_pipe(connector, &tmp, &ctx);
status = type < 0 ?
connector_status_disconnected :
connector_status_connected;
@@ -1516,6 +1516,7 @@ static const struct drm_connector_funcs intel_tv_connector_funcs = {
.atomic_get_property = intel_connector_atomic_get_property,
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
};
static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = {
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index c6f2c4723b1b..74f505b0dd02 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -431,15 +431,6 @@ int imx_drm_encoder_parse_of(struct drm_device *drm,
}
EXPORT_SYMBOL_GPL(imx_drm_encoder_parse_of);
-static struct device_node *imx_drm_of_get_next_endpoint(
- const struct device_node *parent, struct device_node *prev)
-{
- struct device_node *node = of_graph_get_next_endpoint(parent, prev);
-
- of_node_put(prev);
- return node;
-}
-
/*
* @node: device tree node containing encoder input ports
* @encoder: drm_encoder
@@ -448,7 +439,7 @@ int imx_drm_encoder_get_mux_id(struct device_node *node,
struct drm_encoder *encoder)
{
struct imx_drm_crtc *imx_crtc = imx_drm_find_crtc(encoder->crtc);
- struct device_node *ep = NULL;
+ struct device_node *ep;
struct of_endpoint endpoint;
struct device_node *port;
int ret;
@@ -456,18 +447,15 @@ int imx_drm_encoder_get_mux_id(struct device_node *node,
if (!node || !imx_crtc)
return -EINVAL;
- do {
- ep = imx_drm_of_get_next_endpoint(node, ep);
- if (!ep)
- break;
-
+ for_each_endpoint_of_node(node, ep) {
port = of_graph_get_remote_port(ep);
of_node_put(port);
if (port == imx_crtc->crtc->port) {
ret = of_graph_parse_endpoint(ep, &endpoint);
+ of_node_put(ep);
return ret ? ret : endpoint.port;
}
- } while (ep);
+ }
return -EINVAL;
}
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index bacbbb70f679..0a6f6764a37c 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -35,3 +35,14 @@ config DRM_MSM_REGISTER_LOGGING
Compile in support for logging register reads/writes in a format
that can be parsed by envytools demsm tool. If enabled, register
logging can be switched on via msm.reglog=y module param.
+
+config DRM_MSM_DSI
+ bool "Enable DSI support in MSM DRM driver"
+ depends on DRM_MSM
+ select DRM_PANEL
+ select DRM_MIPI_DSI
+ default y
+ help
+ Choose this option if you have a need for MIPI DSI connector
+ support.
+
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 674a132fd76e..ab2086783fee 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -50,5 +50,10 @@ msm-y := \
msm-$(CONFIG_DRM_MSM_FBDEV) += msm_fbdev.o
msm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o
+msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \
+ dsi/dsi_host.o \
+ dsi/dsi_manager.o \
+ dsi/dsi_phy.o \
+ mdp/mdp5/mdp5_cmd_encoder.o
obj-$(CONFIG_DRM_MSM) += msm.o
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
new file mode 100644
index 000000000000..28d1f95a90cc
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -0,0 +1,212 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "dsi.h"
+
+struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi)
+{
+ if (!msm_dsi || !msm_dsi->panel)
+ return NULL;
+
+ return (msm_dsi->panel_flags & MIPI_DSI_MODE_VIDEO) ?
+ msm_dsi->encoders[MSM_DSI_VIDEO_ENCODER_ID] :
+ msm_dsi->encoders[MSM_DSI_CMD_ENCODER_ID];
+}
+
+static void dsi_destroy(struct msm_dsi *msm_dsi)
+{
+ if (!msm_dsi)
+ return;
+
+ msm_dsi_manager_unregister(msm_dsi);
+ if (msm_dsi->host) {
+ msm_dsi_host_destroy(msm_dsi->host);
+ msm_dsi->host = NULL;
+ }
+
+ platform_set_drvdata(msm_dsi->pdev, NULL);
+}
+
+static struct msm_dsi *dsi_init(struct platform_device *pdev)
+{
+ struct msm_dsi *msm_dsi = NULL;
+ int ret;
+
+ if (!pdev) {
+ dev_err(&pdev->dev, "no dsi device\n");
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ msm_dsi = devm_kzalloc(&pdev->dev, sizeof(*msm_dsi), GFP_KERNEL);
+ if (!msm_dsi) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ DBG("dsi probed=%p", msm_dsi);
+
+ msm_dsi->pdev = pdev;
+ platform_set_drvdata(pdev, msm_dsi);
+
+ /* Init dsi host */
+ ret = msm_dsi_host_init(msm_dsi);
+ if (ret)
+ goto fail;
+
+ /* Register to dsi manager */
+ ret = msm_dsi_manager_register(msm_dsi);
+ if (ret)
+ goto fail;
+
+ return msm_dsi;
+
+fail:
+ if (msm_dsi)
+ dsi_destroy(msm_dsi);
+
+ return ERR_PTR(ret);
+}
+
+static int dsi_bind(struct device *dev, struct device *master, void *data)
+{
+ struct drm_device *drm = dev_get_drvdata(master);
+ struct msm_drm_private *priv = drm->dev_private;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct msm_dsi *msm_dsi;
+
+ DBG("");
+ msm_dsi = dsi_init(pdev);
+ if (IS_ERR(msm_dsi))
+ return PTR_ERR(msm_dsi);
+
+ priv->dsi[msm_dsi->id] = msm_dsi;
+
+ return 0;
+}
+
+static void dsi_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct drm_device *drm = dev_get_drvdata(master);
+ struct msm_drm_private *priv = drm->dev_private;
+ struct msm_dsi *msm_dsi = dev_get_drvdata(dev);
+ int id = msm_dsi->id;
+
+ if (priv->dsi[id]) {
+ dsi_destroy(msm_dsi);
+ priv->dsi[id] = NULL;
+ }
+}
+
+static const struct component_ops dsi_ops = {
+ .bind = dsi_bind,
+ .unbind = dsi_unbind,
+};
+
+static int dsi_dev_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &dsi_ops);
+}
+
+static int dsi_dev_remove(struct platform_device *pdev)
+{
+ DBG("");
+ component_del(&pdev->dev, &dsi_ops);
+ return 0;
+}
+
+static const struct of_device_id dt_match[] = {
+ { .compatible = "qcom,mdss-dsi-ctrl" },
+ {}
+};
+
+static struct platform_driver dsi_driver = {
+ .probe = dsi_dev_probe,
+ .remove = dsi_dev_remove,
+ .driver = {
+ .name = "msm_dsi",
+ .of_match_table = dt_match,
+ },
+};
+
+void __init msm_dsi_register(void)
+{
+ DBG("");
+ platform_driver_register(&dsi_driver);
+}
+
+void __exit msm_dsi_unregister(void)
+{
+ DBG("");
+ platform_driver_unregister(&dsi_driver);
+}
+
+int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
+ struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM])
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ int ret, i;
+
+ if (WARN_ON(!encoders[MSM_DSI_VIDEO_ENCODER_ID] ||
+ !encoders[MSM_DSI_CMD_ENCODER_ID]))
+ return -EINVAL;
+
+ msm_dsi->dev = dev;
+
+ ret = msm_dsi_host_modeset_init(msm_dsi->host, dev);
+ if (ret) {
+ dev_err(dev->dev, "failed to modeset init host: %d\n", ret);
+ goto fail;
+ }
+
+ msm_dsi->bridge = msm_dsi_manager_bridge_init(msm_dsi->id);
+ if (IS_ERR(msm_dsi->bridge)) {
+ ret = PTR_ERR(msm_dsi->bridge);
+ dev_err(dev->dev, "failed to create dsi bridge: %d\n", ret);
+ msm_dsi->bridge = NULL;
+ goto fail;
+ }
+
+ msm_dsi->connector = msm_dsi_manager_connector_init(msm_dsi->id);
+ if (IS_ERR(msm_dsi->connector)) {
+ ret = PTR_ERR(msm_dsi->connector);
+ dev_err(dev->dev, "failed to create dsi connector: %d\n", ret);
+ msm_dsi->connector = NULL;
+ goto fail;
+ }
+
+ for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) {
+ encoders[i]->bridge = msm_dsi->bridge;
+ msm_dsi->encoders[i] = encoders[i];
+ }
+
+ priv->bridges[priv->num_bridges++] = msm_dsi->bridge;
+ priv->connectors[priv->num_connectors++] = msm_dsi->connector;
+
+ return 0;
+fail:
+ if (msm_dsi) {
+ /* bridge/connector are normally destroyed by drm: */
+ if (msm_dsi->bridge) {
+ msm_dsi_manager_bridge_destroy(msm_dsi->bridge);
+ msm_dsi->bridge = NULL;
+ }
+ if (msm_dsi->connector) {
+ msm_dsi->connector->funcs->destroy(msm_dsi->connector);
+ msm_dsi->connector = NULL;
+ }
+ }
+
+ return ret;
+}
+
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
new file mode 100644
index 000000000000..10f54d4e379a
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DSI_CONNECTOR_H__
+#define __DSI_CONNECTOR_H__
+
+#include <linux/platform_device.h>
+
+#include "drm_crtc.h"
+#include "drm_mipi_dsi.h"
+#include "drm_panel.h"
+
+#include "msm_drv.h"
+
+#define DSI_0 0
+#define DSI_1 1
+#define DSI_MAX 2
+
+#define DSI_CLOCK_MASTER DSI_0
+#define DSI_CLOCK_SLAVE DSI_1
+
+#define DSI_LEFT DSI_0
+#define DSI_RIGHT DSI_1
+
+/* According to the current drm framework sequence, take the encoder of
+ * DSI_1 as master encoder
+ */
+#define DSI_ENCODER_MASTER DSI_1
+#define DSI_ENCODER_SLAVE DSI_0
+
+struct msm_dsi {
+ struct drm_device *dev;
+ struct platform_device *pdev;
+
+ struct drm_connector *connector;
+ struct drm_bridge *bridge;
+
+ struct mipi_dsi_host *host;
+ struct msm_dsi_phy *phy;
+ struct drm_panel *panel;
+ unsigned long panel_flags;
+ bool phy_enabled;
+
+ /* the encoders we are hooked to (outside of dsi block) */
+ struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM];
+
+ int id;
+};
+
+/* dsi manager */
+struct drm_bridge *msm_dsi_manager_bridge_init(u8 id);
+void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge);
+struct drm_connector *msm_dsi_manager_connector_init(u8 id);
+int msm_dsi_manager_phy_enable(int id,
+ const unsigned long bit_rate, const unsigned long esc_rate,
+ u32 *clk_pre, u32 *clk_post);
+void msm_dsi_manager_phy_disable(int id);
+int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg);
+bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 iova, u32 len);
+int msm_dsi_manager_register(struct msm_dsi *msm_dsi);
+void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi);
+
+/* msm dsi */
+struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi);
+
+/* dsi host */
+int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg);
+void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg);
+int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg);
+int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg);
+void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host,
+ u32 iova, u32 len);
+int msm_dsi_host_enable(struct mipi_dsi_host *host);
+int msm_dsi_host_disable(struct mipi_dsi_host *host);
+int msm_dsi_host_power_on(struct mipi_dsi_host *host);
+int msm_dsi_host_power_off(struct mipi_dsi_host *host);
+int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
+ struct drm_display_mode *mode);
+struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host,
+ unsigned long *panel_flags);
+int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer);
+void msm_dsi_host_unregister(struct mipi_dsi_host *host);
+void msm_dsi_host_destroy(struct mipi_dsi_host *host);
+int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
+ struct drm_device *dev);
+int msm_dsi_host_init(struct msm_dsi *msm_dsi);
+
+/* dsi phy */
+struct msm_dsi_phy;
+enum msm_dsi_phy_type {
+ MSM_DSI_PHY_UNKNOWN,
+ MSM_DSI_PHY_28NM,
+ MSM_DSI_PHY_MAX
+};
+struct msm_dsi_phy *msm_dsi_phy_init(struct platform_device *pdev,
+ enum msm_dsi_phy_type type, int id);
+int msm_dsi_phy_enable(struct msm_dsi_phy *phy, bool is_dual_panel,
+ const unsigned long bit_rate, const unsigned long esc_rate);
+int msm_dsi_phy_disable(struct msm_dsi_phy *phy);
+void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy,
+ u32 *clk_pre, u32 *clk_post);
+#endif /* __DSI_CONNECTOR_H__ */
+
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
index abf1bba520bf..1dcfae265e98 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.xml.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -8,19 +8,10 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20908 bytes, from 2014-12-08 16:13:00)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2014-12-08 16:13:00)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 27208 bytes, from 2015-01-13 23:56:11)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 26848 bytes, from 2015-01-13 23:55:57)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 8253 bytes, from 2014-12-08 16:13:00)
-
-Copyright (C) 2013 by the following authors:
+- /usr2/hali/local/envytools/envytools/rnndb/dsi/dsi.xml ( 18681 bytes, from 2015-03-04 23:08:31)
+- /usr2/hali/local/envytools/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-01-28 21:43:22)
+
+Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
Permission is hereby granted, free of charge, to any person obtaining
@@ -51,11 +42,11 @@ enum dsi_traffic_mode {
BURST_MODE = 2,
};
-enum dsi_dst_format {
- DST_FORMAT_RGB565 = 0,
- DST_FORMAT_RGB666 = 1,
- DST_FORMAT_RGB666_LOOSE = 2,
- DST_FORMAT_RGB888 = 3,
+enum dsi_vid_dst_format {
+ VID_DST_FORMAT_RGB565 = 0,
+ VID_DST_FORMAT_RGB666 = 1,
+ VID_DST_FORMAT_RGB666_LOOSE = 2,
+ VID_DST_FORMAT_RGB888 = 3,
};
enum dsi_rgb_swap {
@@ -69,20 +60,63 @@ enum dsi_rgb_swap {
enum dsi_cmd_trigger {
TRIGGER_NONE = 0,
+ TRIGGER_SEOF = 1,
TRIGGER_TE = 2,
TRIGGER_SW = 4,
TRIGGER_SW_SEOF = 5,
TRIGGER_SW_TE = 6,
};
+enum dsi_cmd_dst_format {
+ CMD_DST_FORMAT_RGB111 = 0,
+ CMD_DST_FORMAT_RGB332 = 3,
+ CMD_DST_FORMAT_RGB444 = 4,
+ CMD_DST_FORMAT_RGB565 = 6,
+ CMD_DST_FORMAT_RGB666 = 7,
+ CMD_DST_FORMAT_RGB888 = 8,
+};
+
+enum dsi_lane_swap {
+ LANE_SWAP_0123 = 0,
+ LANE_SWAP_3012 = 1,
+ LANE_SWAP_2301 = 2,
+ LANE_SWAP_1230 = 3,
+ LANE_SWAP_0321 = 4,
+ LANE_SWAP_1032 = 5,
+ LANE_SWAP_2103 = 6,
+ LANE_SWAP_3210 = 7,
+};
+
#define DSI_IRQ_CMD_DMA_DONE 0x00000001
#define DSI_IRQ_MASK_CMD_DMA_DONE 0x00000002
#define DSI_IRQ_CMD_MDP_DONE 0x00000100
#define DSI_IRQ_MASK_CMD_MDP_DONE 0x00000200
#define DSI_IRQ_VIDEO_DONE 0x00010000
#define DSI_IRQ_MASK_VIDEO_DONE 0x00020000
+#define DSI_IRQ_BTA_DONE 0x00100000
+#define DSI_IRQ_MASK_BTA_DONE 0x00200000
#define DSI_IRQ_ERROR 0x01000000
#define DSI_IRQ_MASK_ERROR 0x02000000
+#define REG_DSI_6G_HW_VERSION 0x00000000
+#define DSI_6G_HW_VERSION_MAJOR__MASK 0xf0000000
+#define DSI_6G_HW_VERSION_MAJOR__SHIFT 28
+static inline uint32_t DSI_6G_HW_VERSION_MAJOR(uint32_t val)
+{
+ return ((val) << DSI_6G_HW_VERSION_MAJOR__SHIFT) & DSI_6G_HW_VERSION_MAJOR__MASK;
+}
+#define DSI_6G_HW_VERSION_MINOR__MASK 0x0fff0000
+#define DSI_6G_HW_VERSION_MINOR__SHIFT 16
+static inline uint32_t DSI_6G_HW_VERSION_MINOR(uint32_t val)
+{
+ return ((val) << DSI_6G_HW_VERSION_MINOR__SHIFT) & DSI_6G_HW_VERSION_MINOR__MASK;
+}
+#define DSI_6G_HW_VERSION_STEP__MASK 0x0000ffff
+#define DSI_6G_HW_VERSION_STEP__SHIFT 0
+static inline uint32_t DSI_6G_HW_VERSION_STEP(uint32_t val)
+{
+ return ((val) << DSI_6G_HW_VERSION_STEP__SHIFT) & DSI_6G_HW_VERSION_STEP__MASK;
+}
+
#define REG_DSI_CTRL 0x00000000
#define DSI_CTRL_ENABLE 0x00000001
#define DSI_CTRL_VID_MODE_EN 0x00000002
@@ -96,11 +130,15 @@ enum dsi_cmd_trigger {
#define DSI_CTRL_CRC_CHECK 0x01000000
#define REG_DSI_STATUS0 0x00000004
+#define DSI_STATUS0_CMD_MODE_ENGINE_BUSY 0x00000001
#define DSI_STATUS0_CMD_MODE_DMA_BUSY 0x00000002
+#define DSI_STATUS0_CMD_MODE_MDP_BUSY 0x00000004
#define DSI_STATUS0_VIDEO_MODE_ENGINE_BUSY 0x00000008
#define DSI_STATUS0_DSI_BUSY 0x00000010
+#define DSI_STATUS0_INTERLEAVE_OP_CONTENTION 0x80000000
#define REG_DSI_FIFO_STATUS 0x00000008
+#define DSI_FIFO_STATUS_CMD_MDP_FIFO_UNDERFLOW 0x00000080
#define REG_DSI_VID_CFG0 0x0000000c
#define DSI_VID_CFG0_VIRT_CHANNEL__MASK 0x00000003
@@ -111,7 +149,7 @@ static inline uint32_t DSI_VID_CFG0_VIRT_CHANNEL(uint32_t val)
}
#define DSI_VID_CFG0_DST_FORMAT__MASK 0x00000030
#define DSI_VID_CFG0_DST_FORMAT__SHIFT 4
-static inline uint32_t DSI_VID_CFG0_DST_FORMAT(enum dsi_dst_format val)
+static inline uint32_t DSI_VID_CFG0_DST_FORMAT(enum dsi_vid_dst_format val)
{
return ((val) << DSI_VID_CFG0_DST_FORMAT__SHIFT) & DSI_VID_CFG0_DST_FORMAT__MASK;
}
@@ -129,21 +167,15 @@ static inline uint32_t DSI_VID_CFG0_TRAFFIC_MODE(enum dsi_traffic_mode val)
#define DSI_VID_CFG0_PULSE_MODE_HSA_HE 0x10000000
#define REG_DSI_VID_CFG1 0x0000001c
-#define DSI_VID_CFG1_R_SEL 0x00000010
-#define DSI_VID_CFG1_G_SEL 0x00000100
-#define DSI_VID_CFG1_B_SEL 0x00001000
-#define DSI_VID_CFG1_RGB_SWAP__MASK 0x00070000
-#define DSI_VID_CFG1_RGB_SWAP__SHIFT 16
+#define DSI_VID_CFG1_R_SEL 0x00000001
+#define DSI_VID_CFG1_G_SEL 0x00000010
+#define DSI_VID_CFG1_B_SEL 0x00000100
+#define DSI_VID_CFG1_RGB_SWAP__MASK 0x00007000
+#define DSI_VID_CFG1_RGB_SWAP__SHIFT 12
static inline uint32_t DSI_VID_CFG1_RGB_SWAP(enum dsi_rgb_swap val)
{
return ((val) << DSI_VID_CFG1_RGB_SWAP__SHIFT) & DSI_VID_CFG1_RGB_SWAP__MASK;
}
-#define DSI_VID_CFG1_INTERLEAVE_MAX__MASK 0x00f00000
-#define DSI_VID_CFG1_INTERLEAVE_MAX__SHIFT 20
-static inline uint32_t DSI_VID_CFG1_INTERLEAVE_MAX(uint32_t val)
-{
- return ((val) << DSI_VID_CFG1_INTERLEAVE_MAX__SHIFT) & DSI_VID_CFG1_INTERLEAVE_MAX__MASK;
-}
#define REG_DSI_ACTIVE_H 0x00000020
#define DSI_ACTIVE_H_START__MASK 0x00000fff
@@ -201,32 +233,115 @@ static inline uint32_t DSI_ACTIVE_HSYNC_END(uint32_t val)
return ((val) << DSI_ACTIVE_HSYNC_END__SHIFT) & DSI_ACTIVE_HSYNC_END__MASK;
}
-#define REG_DSI_ACTIVE_VSYNC 0x00000034
-#define DSI_ACTIVE_VSYNC_START__MASK 0x00000fff
-#define DSI_ACTIVE_VSYNC_START__SHIFT 0
-static inline uint32_t DSI_ACTIVE_VSYNC_START(uint32_t val)
+#define REG_DSI_ACTIVE_VSYNC_HPOS 0x00000030
+#define DSI_ACTIVE_VSYNC_HPOS_START__MASK 0x00000fff
+#define DSI_ACTIVE_VSYNC_HPOS_START__SHIFT 0
+static inline uint32_t DSI_ACTIVE_VSYNC_HPOS_START(uint32_t val)
{
- return ((val) << DSI_ACTIVE_VSYNC_START__SHIFT) & DSI_ACTIVE_VSYNC_START__MASK;
+ return ((val) << DSI_ACTIVE_VSYNC_HPOS_START__SHIFT) & DSI_ACTIVE_VSYNC_HPOS_START__MASK;
}
-#define DSI_ACTIVE_VSYNC_END__MASK 0x0fff0000
-#define DSI_ACTIVE_VSYNC_END__SHIFT 16
-static inline uint32_t DSI_ACTIVE_VSYNC_END(uint32_t val)
+#define DSI_ACTIVE_VSYNC_HPOS_END__MASK 0x0fff0000
+#define DSI_ACTIVE_VSYNC_HPOS_END__SHIFT 16
+static inline uint32_t DSI_ACTIVE_VSYNC_HPOS_END(uint32_t val)
{
- return ((val) << DSI_ACTIVE_VSYNC_END__SHIFT) & DSI_ACTIVE_VSYNC_END__MASK;
+ return ((val) << DSI_ACTIVE_VSYNC_HPOS_END__SHIFT) & DSI_ACTIVE_VSYNC_HPOS_END__MASK;
+}
+
+#define REG_DSI_ACTIVE_VSYNC_VPOS 0x00000034
+#define DSI_ACTIVE_VSYNC_VPOS_START__MASK 0x00000fff
+#define DSI_ACTIVE_VSYNC_VPOS_START__SHIFT 0
+static inline uint32_t DSI_ACTIVE_VSYNC_VPOS_START(uint32_t val)
+{
+ return ((val) << DSI_ACTIVE_VSYNC_VPOS_START__SHIFT) & DSI_ACTIVE_VSYNC_VPOS_START__MASK;
+}
+#define DSI_ACTIVE_VSYNC_VPOS_END__MASK 0x0fff0000
+#define DSI_ACTIVE_VSYNC_VPOS_END__SHIFT 16
+static inline uint32_t DSI_ACTIVE_VSYNC_VPOS_END(uint32_t val)
+{
+ return ((val) << DSI_ACTIVE_VSYNC_VPOS_END__SHIFT) & DSI_ACTIVE_VSYNC_VPOS_END__MASK;
}
#define REG_DSI_CMD_DMA_CTRL 0x00000038
+#define DSI_CMD_DMA_CTRL_BROADCAST_EN 0x80000000
#define DSI_CMD_DMA_CTRL_FROM_FRAME_BUFFER 0x10000000
#define DSI_CMD_DMA_CTRL_LOW_POWER 0x04000000
#define REG_DSI_CMD_CFG0 0x0000003c
+#define DSI_CMD_CFG0_DST_FORMAT__MASK 0x0000000f
+#define DSI_CMD_CFG0_DST_FORMAT__SHIFT 0
+static inline uint32_t DSI_CMD_CFG0_DST_FORMAT(enum dsi_cmd_dst_format val)
+{
+ return ((val) << DSI_CMD_CFG0_DST_FORMAT__SHIFT) & DSI_CMD_CFG0_DST_FORMAT__MASK;
+}
+#define DSI_CMD_CFG0_R_SEL 0x00000010
+#define DSI_CMD_CFG0_G_SEL 0x00000100
+#define DSI_CMD_CFG0_B_SEL 0x00001000
+#define DSI_CMD_CFG0_INTERLEAVE_MAX__MASK 0x00f00000
+#define DSI_CMD_CFG0_INTERLEAVE_MAX__SHIFT 20
+static inline uint32_t DSI_CMD_CFG0_INTERLEAVE_MAX(uint32_t val)
+{
+ return ((val) << DSI_CMD_CFG0_INTERLEAVE_MAX__SHIFT) & DSI_CMD_CFG0_INTERLEAVE_MAX__MASK;
+}
+#define DSI_CMD_CFG0_RGB_SWAP__MASK 0x00070000
+#define DSI_CMD_CFG0_RGB_SWAP__SHIFT 16
+static inline uint32_t DSI_CMD_CFG0_RGB_SWAP(enum dsi_rgb_swap val)
+{
+ return ((val) << DSI_CMD_CFG0_RGB_SWAP__SHIFT) & DSI_CMD_CFG0_RGB_SWAP__MASK;
+}
#define REG_DSI_CMD_CFG1 0x00000040
+#define DSI_CMD_CFG1_WR_MEM_START__MASK 0x000000ff
+#define DSI_CMD_CFG1_WR_MEM_START__SHIFT 0
+static inline uint32_t DSI_CMD_CFG1_WR_MEM_START(uint32_t val)
+{
+ return ((val) << DSI_CMD_CFG1_WR_MEM_START__SHIFT) & DSI_CMD_CFG1_WR_MEM_START__MASK;
+}
+#define DSI_CMD_CFG1_WR_MEM_CONTINUE__MASK 0x0000ff00
+#define DSI_CMD_CFG1_WR_MEM_CONTINUE__SHIFT 8
+static inline uint32_t DSI_CMD_CFG1_WR_MEM_CONTINUE(uint32_t val)
+{
+ return ((val) << DSI_CMD_CFG1_WR_MEM_CONTINUE__SHIFT) & DSI_CMD_CFG1_WR_MEM_CONTINUE__MASK;
+}
+#define DSI_CMD_CFG1_INSERT_DCS_COMMAND 0x00010000
#define REG_DSI_DMA_BASE 0x00000044
#define REG_DSI_DMA_LEN 0x00000048
+#define REG_DSI_CMD_MDP_STREAM_CTRL 0x00000054
+#define DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE__MASK 0x0000003f
+#define DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE__SHIFT 0
+static inline uint32_t DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE(uint32_t val)
+{
+ return ((val) << DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE__SHIFT) & DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE__MASK;
+}
+#define DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL__MASK 0x00000300
+#define DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL__SHIFT 8
+static inline uint32_t DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL(uint32_t val)
+{
+ return ((val) << DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL__SHIFT) & DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL__MASK;
+}
+#define DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT__MASK 0xffff0000
+#define DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT__SHIFT 16
+static inline uint32_t DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT(uint32_t val)
+{
+ return ((val) << DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT__SHIFT) & DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT__MASK;
+}
+
+#define REG_DSI_CMD_MDP_STREAM_TOTAL 0x00000058
+#define DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL__MASK 0x00000fff
+#define DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL__SHIFT 0
+static inline uint32_t DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL(uint32_t val)
+{
+ return ((val) << DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL__SHIFT) & DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL__MASK;
+}
+#define DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL__MASK 0x0fff0000
+#define DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL__SHIFT 16
+static inline uint32_t DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL(uint32_t val)
+{
+ return ((val) << DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL__SHIFT) & DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL__MASK;
+}
+
#define REG_DSI_ACK_ERR_STATUS 0x00000064
static inline uint32_t REG_DSI_RDBK(uint32_t i0) { return 0x00000068 + 0x4*i0; }
@@ -234,19 +349,25 @@ static inline uint32_t REG_DSI_RDBK(uint32_t i0) { return 0x00000068 + 0x4*i0; }
static inline uint32_t REG_DSI_RDBK_DATA(uint32_t i0) { return 0x00000068 + 0x4*i0; }
#define REG_DSI_TRIG_CTRL 0x00000080
-#define DSI_TRIG_CTRL_DMA_TRIGGER__MASK 0x0000000f
+#define DSI_TRIG_CTRL_DMA_TRIGGER__MASK 0x00000007
#define DSI_TRIG_CTRL_DMA_TRIGGER__SHIFT 0
static inline uint32_t DSI_TRIG_CTRL_DMA_TRIGGER(enum dsi_cmd_trigger val)
{
return ((val) << DSI_TRIG_CTRL_DMA_TRIGGER__SHIFT) & DSI_TRIG_CTRL_DMA_TRIGGER__MASK;
}
-#define DSI_TRIG_CTRL_MDP_TRIGGER__MASK 0x000000f0
+#define DSI_TRIG_CTRL_MDP_TRIGGER__MASK 0x00000070
#define DSI_TRIG_CTRL_MDP_TRIGGER__SHIFT 4
static inline uint32_t DSI_TRIG_CTRL_MDP_TRIGGER(enum dsi_cmd_trigger val)
{
return ((val) << DSI_TRIG_CTRL_MDP_TRIGGER__SHIFT) & DSI_TRIG_CTRL_MDP_TRIGGER__MASK;
}
-#define DSI_TRIG_CTRL_STREAM 0x00000100
+#define DSI_TRIG_CTRL_STREAM__MASK 0x00000300
+#define DSI_TRIG_CTRL_STREAM__SHIFT 8
+static inline uint32_t DSI_TRIG_CTRL_STREAM(uint32_t val)
+{
+ return ((val) << DSI_TRIG_CTRL_STREAM__SHIFT) & DSI_TRIG_CTRL_STREAM__MASK;
+}
+#define DSI_TRIG_CTRL_BLOCK_DMA_WITHIN_FRAME 0x00001000
#define DSI_TRIG_CTRL_TE 0x80000000
#define REG_DSI_TRIG_DMA 0x0000008c
@@ -274,6 +395,12 @@ static inline uint32_t DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(uint32_t val)
#define DSI_EOT_PACKET_CTRL_RX_EOT_IGNORE 0x00000010
#define REG_DSI_LANE_SWAP_CTRL 0x000000ac
+#define DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL__MASK 0x00000007
+#define DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL__SHIFT 0
+static inline uint32_t DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(enum dsi_lane_swap val)
+{
+ return ((val) << DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL__SHIFT) & DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL__MASK;
+}
#define REG_DSI_ERR_INT_MASK0 0x00000108
@@ -282,8 +409,36 @@ static inline uint32_t DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(uint32_t val)
#define REG_DSI_RESET 0x00000114
#define REG_DSI_CLK_CTRL 0x00000118
+#define DSI_CLK_CTRL_AHBS_HCLK_ON 0x00000001
+#define DSI_CLK_CTRL_AHBM_SCLK_ON 0x00000002
+#define DSI_CLK_CTRL_PCLK_ON 0x00000004
+#define DSI_CLK_CTRL_DSICLK_ON 0x00000008
+#define DSI_CLK_CTRL_BYTECLK_ON 0x00000010
+#define DSI_CLK_CTRL_ESCCLK_ON 0x00000020
+#define DSI_CLK_CTRL_FORCE_ON_DYN_AHBM_HCLK 0x00000200
+
+#define REG_DSI_CLK_STATUS 0x0000011c
+#define DSI_CLK_STATUS_PLL_UNLOCKED 0x00010000
#define REG_DSI_PHY_RESET 0x00000128
+#define DSI_PHY_RESET_RESET 0x00000001
+
+#define REG_DSI_RDBK_DATA_CTRL 0x000001d0
+#define DSI_RDBK_DATA_CTRL_COUNT__MASK 0x00ff0000
+#define DSI_RDBK_DATA_CTRL_COUNT__SHIFT 16
+static inline uint32_t DSI_RDBK_DATA_CTRL_COUNT(uint32_t val)
+{
+ return ((val) << DSI_RDBK_DATA_CTRL_COUNT__SHIFT) & DSI_RDBK_DATA_CTRL_COUNT__MASK;
+}
+#define DSI_RDBK_DATA_CTRL_CLR 0x00000001
+
+#define REG_DSI_VERSION 0x000001f0
+#define DSI_VERSION_MAJOR__MASK 0xff000000
+#define DSI_VERSION_MAJOR__SHIFT 24
+static inline uint32_t DSI_VERSION_MAJOR(uint32_t val)
+{
+ return ((val) << DSI_VERSION_MAJOR__SHIFT) & DSI_VERSION_MAJOR__MASK;
+}
#define REG_DSI_PHY_PLL_CTRL_0 0x00000200
#define DSI_PHY_PLL_CTRL_0_ENABLE 0x00000001
@@ -501,5 +656,184 @@ static inline uint32_t REG_DSI_8960_LN_TEST_STR_1(uint32_t i0) { return 0x000003
#define REG_DSI_8960_PHY_CAL_STATUS 0x00000550
#define DSI_8960_PHY_CAL_STATUS_CAL_BUSY 0x00000010
+static inline uint32_t REG_DSI_28nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_0(uint32_t i0) { return 0x00000000 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_1(uint32_t i0) { return 0x00000004 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_2(uint32_t i0) { return 0x00000008 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_3(uint32_t i0) { return 0x0000000c + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_4(uint32_t i0) { return 0x00000010 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x00000014 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_DEBUG_SEL(uint32_t i0) { return 0x00000018 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_TEST_STR_0(uint32_t i0) { return 0x0000001c + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_TEST_STR_1(uint32_t i0) { return 0x00000020 + 0x40*i0; }
+
+#define REG_DSI_28nm_PHY_LNCK_CFG_0 0x00000100
+
+#define REG_DSI_28nm_PHY_LNCK_CFG_1 0x00000104
+
+#define REG_DSI_28nm_PHY_LNCK_CFG_2 0x00000108
+
+#define REG_DSI_28nm_PHY_LNCK_CFG_3 0x0000010c
+
+#define REG_DSI_28nm_PHY_LNCK_CFG_4 0x00000110
+
+#define REG_DSI_28nm_PHY_LNCK_TEST_DATAPATH 0x00000114
+
+#define REG_DSI_28nm_PHY_LNCK_DEBUG_SEL 0x00000118
+
+#define REG_DSI_28nm_PHY_LNCK_TEST_STR0 0x0000011c
+
+#define REG_DSI_28nm_PHY_LNCK_TEST_STR1 0x00000120
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_0 0x00000140
+#define DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO__MASK 0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_1 0x00000144
+#define DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK 0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_2 0x00000148
+#define DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK 0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_3 0x0000014c
+#define DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8 0x00000001
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_4 0x00000150
+#define DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT__MASK 0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_5 0x00000154
+#define DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO__MASK 0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_6 0x00000158
+#define DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE__MASK 0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_7 0x0000015c
+#define DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL__MASK 0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_8 0x00000160
+#define DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST__MASK 0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_9 0x00000164
+#define DSI_28nm_PHY_TIMING_CTRL_9_TA_GO__MASK 0x00000007
+#define DSI_28nm_PHY_TIMING_CTRL_9_TA_GO__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_9_TA_GO__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_9_TA_GO__MASK;
+}
+#define DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE__MASK 0x00000070
+#define DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE__SHIFT 4
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_10 0x00000168
+#define DSI_28nm_PHY_TIMING_CTRL_10_TA_GET__MASK 0x00000007
+#define DSI_28nm_PHY_TIMING_CTRL_10_TA_GET__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_10_TA_GET__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_10_TA_GET__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_11 0x0000016c
+#define DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK 0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK;
+}
+
+#define REG_DSI_28nm_PHY_CTRL_0 0x00000170
+
+#define REG_DSI_28nm_PHY_CTRL_1 0x00000174
+
+#define REG_DSI_28nm_PHY_CTRL_2 0x00000178
+
+#define REG_DSI_28nm_PHY_CTRL_3 0x0000017c
+
+#define REG_DSI_28nm_PHY_CTRL_4 0x00000180
+
+#define REG_DSI_28nm_PHY_STRENGTH_0 0x00000184
+
+#define REG_DSI_28nm_PHY_STRENGTH_1 0x00000188
+
+#define REG_DSI_28nm_PHY_BIST_CTRL_0 0x000001b4
+
+#define REG_DSI_28nm_PHY_BIST_CTRL_1 0x000001b8
+
+#define REG_DSI_28nm_PHY_BIST_CTRL_2 0x000001bc
+
+#define REG_DSI_28nm_PHY_BIST_CTRL_3 0x000001c0
+
+#define REG_DSI_28nm_PHY_BIST_CTRL_4 0x000001c4
+
+#define REG_DSI_28nm_PHY_BIST_CTRL_5 0x000001c8
+
+#define REG_DSI_28nm_PHY_GLBL_TEST_CTRL 0x000001d4
+
+#define REG_DSI_28nm_PHY_LDO_CNTRL 0x000001dc
+
+#define REG_DSI_28nm_PHY_REGULATOR_CTRL_0 0x00000000
+
+#define REG_DSI_28nm_PHY_REGULATOR_CTRL_1 0x00000004
+
+#define REG_DSI_28nm_PHY_REGULATOR_CTRL_2 0x00000008
+
+#define REG_DSI_28nm_PHY_REGULATOR_CTRL_3 0x0000000c
+
+#define REG_DSI_28nm_PHY_REGULATOR_CTRL_4 0x00000010
+
+#define REG_DSI_28nm_PHY_REGULATOR_CTRL_5 0x00000014
+
+#define REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG 0x00000018
+
#endif /* DSI_XML */
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
new file mode 100644
index 000000000000..fdc54e3eff55
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -0,0 +1,1993 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spinlock.h>
+#include <video/mipi_display.h>
+
+#include "dsi.h"
+#include "dsi.xml.h"
+
+#define MSM_DSI_VER_MAJOR_V2 0x02
+#define MSM_DSI_VER_MAJOR_6G 0x03
+#define MSM_DSI_6G_VER_MINOR_V1_0 0x10000000
+#define MSM_DSI_6G_VER_MINOR_V1_1 0x10010000
+#define MSM_DSI_6G_VER_MINOR_V1_1_1 0x10010001
+#define MSM_DSI_6G_VER_MINOR_V1_2 0x10020000
+#define MSM_DSI_6G_VER_MINOR_V1_3_1 0x10030001
+
+#define DSI_6G_REG_SHIFT 4
+
+#define DSI_REGULATOR_MAX 8
+struct dsi_reg_entry {
+ char name[32];
+ int min_voltage;
+ int max_voltage;
+ int enable_load;
+ int disable_load;
+};
+
+struct dsi_reg_config {
+ int num;
+ struct dsi_reg_entry regs[DSI_REGULATOR_MAX];
+};
+
+struct dsi_config {
+ u32 major;
+ u32 minor;
+ u32 io_offset;
+ enum msm_dsi_phy_type phy_type;
+ struct dsi_reg_config reg_cfg;
+};
+
+static const struct dsi_config dsi_cfgs[] = {
+ {MSM_DSI_VER_MAJOR_V2, 0, 0, MSM_DSI_PHY_UNKNOWN},
+ { /* 8974 v1 */
+ .major = MSM_DSI_VER_MAJOR_6G,
+ .minor = MSM_DSI_6G_VER_MINOR_V1_0,
+ .io_offset = DSI_6G_REG_SHIFT,
+ .phy_type = MSM_DSI_PHY_28NM,
+ .reg_cfg = {
+ .num = 4,
+ .regs = {
+ {"gdsc", -1, -1, -1, -1},
+ {"vdd", 3000000, 3000000, 150000, 100},
+ {"vdda", 1200000, 1200000, 100000, 100},
+ {"vddio", 1800000, 1800000, 100000, 100},
+ },
+ },
+ },
+ { /* 8974 v2 */
+ .major = MSM_DSI_VER_MAJOR_6G,
+ .minor = MSM_DSI_6G_VER_MINOR_V1_1,
+ .io_offset = DSI_6G_REG_SHIFT,
+ .phy_type = MSM_DSI_PHY_28NM,
+ .reg_cfg = {
+ .num = 4,
+ .regs = {
+ {"gdsc", -1, -1, -1, -1},
+ {"vdd", 3000000, 3000000, 150000, 100},
+ {"vdda", 1200000, 1200000, 100000, 100},
+ {"vddio", 1800000, 1800000, 100000, 100},
+ },
+ },
+ },
+ { /* 8974 v3 */
+ .major = MSM_DSI_VER_MAJOR_6G,
+ .minor = MSM_DSI_6G_VER_MINOR_V1_1_1,
+ .io_offset = DSI_6G_REG_SHIFT,
+ .phy_type = MSM_DSI_PHY_28NM,
+ .reg_cfg = {
+ .num = 4,
+ .regs = {
+ {"gdsc", -1, -1, -1, -1},
+ {"vdd", 3000000, 3000000, 150000, 100},
+ {"vdda", 1200000, 1200000, 100000, 100},
+ {"vddio", 1800000, 1800000, 100000, 100},
+ },
+ },
+ },
+ { /* 8084 */
+ .major = MSM_DSI_VER_MAJOR_6G,
+ .minor = MSM_DSI_6G_VER_MINOR_V1_2,
+ .io_offset = DSI_6G_REG_SHIFT,
+ .phy_type = MSM_DSI_PHY_28NM,
+ .reg_cfg = {
+ .num = 4,
+ .regs = {
+ {"gdsc", -1, -1, -1, -1},
+ {"vdd", 3000000, 3000000, 150000, 100},
+ {"vdda", 1200000, 1200000, 100000, 100},
+ {"vddio", 1800000, 1800000, 100000, 100},
+ },
+ },
+ },
+ { /* 8916 */
+ .major = MSM_DSI_VER_MAJOR_6G,
+ .minor = MSM_DSI_6G_VER_MINOR_V1_3_1,
+ .io_offset = DSI_6G_REG_SHIFT,
+ .phy_type = MSM_DSI_PHY_28NM,
+ .reg_cfg = {
+ .num = 4,
+ .regs = {
+ {"gdsc", -1, -1, -1, -1},
+ {"vdd", 2850000, 2850000, 100000, 100},
+ {"vdda", 1200000, 1200000, 100000, 100},
+ {"vddio", 1800000, 1800000, 100000, 100},
+ },
+ },
+ },
+};
+
+static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor)
+{
+ u32 ver;
+ u32 ver_6g;
+
+ if (!major || !minor)
+ return -EINVAL;
+
+ /* From DSI6G(v3), addition of a 6G_HW_VERSION register at offset 0
+ * makes all other registers 4-byte shifted down.
+ */
+ ver_6g = msm_readl(base + REG_DSI_6G_HW_VERSION);
+ if (ver_6g == 0) {
+ ver = msm_readl(base + REG_DSI_VERSION);
+ ver = FIELD(ver, DSI_VERSION_MAJOR);
+ if (ver <= MSM_DSI_VER_MAJOR_V2) {
+ /* old versions */
+ *major = ver;
+ *minor = 0;
+ return 0;
+ } else {
+ return -EINVAL;
+ }
+ } else {
+ ver = msm_readl(base + DSI_6G_REG_SHIFT + REG_DSI_VERSION);
+ ver = FIELD(ver, DSI_VERSION_MAJOR);
+ if (ver == MSM_DSI_VER_MAJOR_6G) {
+ /* 6G version */
+ *major = ver;
+ *minor = ver_6g;
+ return 0;
+ } else {
+ return -EINVAL;
+ }
+ }
+}
+
+#define DSI_ERR_STATE_ACK 0x0000
+#define DSI_ERR_STATE_TIMEOUT 0x0001
+#define DSI_ERR_STATE_DLN0_PHY 0x0002
+#define DSI_ERR_STATE_FIFO 0x0004
+#define DSI_ERR_STATE_MDP_FIFO_UNDERFLOW 0x0008
+#define DSI_ERR_STATE_INTERLEAVE_OP_CONTENTION 0x0010
+#define DSI_ERR_STATE_PLL_UNLOCKED 0x0020
+
+#define DSI_CLK_CTRL_ENABLE_CLKS \
+ (DSI_CLK_CTRL_AHBS_HCLK_ON | DSI_CLK_CTRL_AHBM_SCLK_ON | \
+ DSI_CLK_CTRL_PCLK_ON | DSI_CLK_CTRL_DSICLK_ON | \
+ DSI_CLK_CTRL_BYTECLK_ON | DSI_CLK_CTRL_ESCCLK_ON | \
+ DSI_CLK_CTRL_FORCE_ON_DYN_AHBM_HCLK)
+
+struct msm_dsi_host {
+ struct mipi_dsi_host base;
+
+ struct platform_device *pdev;
+ struct drm_device *dev;
+
+ int id;
+
+ void __iomem *ctrl_base;
+ struct regulator_bulk_data supplies[DSI_REGULATOR_MAX];
+ struct clk *mdp_core_clk;
+ struct clk *ahb_clk;
+ struct clk *axi_clk;
+ struct clk *mmss_misc_ahb_clk;
+ struct clk *byte_clk;
+ struct clk *esc_clk;
+ struct clk *pixel_clk;
+ u32 byte_clk_rate;
+
+ struct gpio_desc *disp_en_gpio;
+ struct gpio_desc *te_gpio;
+
+ const struct dsi_config *cfg;
+
+ struct completion dma_comp;
+ struct completion video_comp;
+ struct mutex dev_mutex;
+ struct mutex cmd_mutex;
+ struct mutex clk_mutex;
+ spinlock_t intr_lock; /* Protect interrupt ctrl register */
+
+ u32 err_work_state;
+ struct work_struct err_work;
+ struct workqueue_struct *workqueue;
+
+ struct drm_gem_object *tx_gem_obj;
+ u8 *rx_buf;
+
+ struct drm_display_mode *mode;
+
+ /* Panel info */
+ struct device_node *panel_node;
+ unsigned int channel;
+ unsigned int lanes;
+ enum mipi_dsi_pixel_format format;
+ unsigned long mode_flags;
+
+ u32 dma_cmd_ctrl_restore;
+
+ bool registered;
+ bool power_on;
+ int irq;
+};
+
+static u32 dsi_get_bpp(const enum mipi_dsi_pixel_format fmt)
+{
+ switch (fmt) {
+ case MIPI_DSI_FMT_RGB565: return 16;
+ case MIPI_DSI_FMT_RGB666_PACKED: return 18;
+ case MIPI_DSI_FMT_RGB666:
+ case MIPI_DSI_FMT_RGB888:
+ default: return 24;
+ }
+}
+
+static inline u32 dsi_read(struct msm_dsi_host *msm_host, u32 reg)
+{
+ return msm_readl(msm_host->ctrl_base + msm_host->cfg->io_offset + reg);
+}
+static inline void dsi_write(struct msm_dsi_host *msm_host, u32 reg, u32 data)
+{
+ msm_writel(data, msm_host->ctrl_base + msm_host->cfg->io_offset + reg);
+}
+
+static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host);
+static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host);
+
+static const struct dsi_config *dsi_get_config(struct msm_dsi_host *msm_host)
+{
+ const struct dsi_config *cfg;
+ struct regulator *gdsc_reg;
+ int i, ret;
+ u32 major = 0, minor = 0;
+
+ gdsc_reg = regulator_get(&msm_host->pdev->dev, "gdsc");
+ if (IS_ERR_OR_NULL(gdsc_reg)) {
+ pr_err("%s: cannot get gdsc\n", __func__);
+ goto fail;
+ }
+ ret = regulator_enable(gdsc_reg);
+ if (ret) {
+ pr_err("%s: unable to enable gdsc\n", __func__);
+ regulator_put(gdsc_reg);
+ goto fail;
+ }
+ ret = clk_prepare_enable(msm_host->ahb_clk);
+ if (ret) {
+ pr_err("%s: unable to enable ahb_clk\n", __func__);
+ regulator_disable(gdsc_reg);
+ regulator_put(gdsc_reg);
+ goto fail;
+ }
+
+ ret = dsi_get_version(msm_host->ctrl_base, &major, &minor);
+
+ clk_disable_unprepare(msm_host->ahb_clk);
+ regulator_disable(gdsc_reg);
+ regulator_put(gdsc_reg);
+ if (ret) {
+ pr_err("%s: Invalid version\n", __func__);
+ goto fail;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(dsi_cfgs); i++) {
+ cfg = dsi_cfgs + i;
+ if ((cfg->major == major) && (cfg->minor == minor))
+ return cfg;
+ }
+ pr_err("%s: Version %x:%x not support\n", __func__, major, minor);
+
+fail:
+ return NULL;
+}
+
+static inline struct msm_dsi_host *to_msm_dsi_host(struct mipi_dsi_host *host)
+{
+ return container_of(host, struct msm_dsi_host, base);
+}
+
+static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host)
+{
+ struct regulator_bulk_data *s = msm_host->supplies;
+ const struct dsi_reg_entry *regs = msm_host->cfg->reg_cfg.regs;
+ int num = msm_host->cfg->reg_cfg.num;
+ int i;
+
+ DBG("");
+ for (i = num - 1; i >= 0; i--)
+ if (regs[i].disable_load >= 0)
+ regulator_set_optimum_mode(s[i].consumer,
+ regs[i].disable_load);
+
+ regulator_bulk_disable(num, s);
+}
+
+static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host)
+{
+ struct regulator_bulk_data *s = msm_host->supplies;
+ const struct dsi_reg_entry *regs = msm_host->cfg->reg_cfg.regs;
+ int num = msm_host->cfg->reg_cfg.num;
+ int ret, i;
+
+ DBG("");
+ for (i = 0; i < num; i++) {
+ if (regs[i].enable_load >= 0) {
+ ret = regulator_set_optimum_mode(s[i].consumer,
+ regs[i].enable_load);
+ if (ret < 0) {
+ pr_err("regulator %d set op mode failed, %d\n",
+ i, ret);
+ goto fail;
+ }
+ }
+ }
+
+ ret = regulator_bulk_enable(num, s);
+ if (ret < 0) {
+ pr_err("regulator enable failed, %d\n", ret);
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ for (i--; i >= 0; i--)
+ regulator_set_optimum_mode(s[i].consumer, regs[i].disable_load);
+ return ret;
+}
+
+static int dsi_regulator_init(struct msm_dsi_host *msm_host)
+{
+ struct regulator_bulk_data *s = msm_host->supplies;
+ const struct dsi_reg_entry *regs = msm_host->cfg->reg_cfg.regs;
+ int num = msm_host->cfg->reg_cfg.num;
+ int i, ret;
+
+ for (i = 0; i < num; i++)
+ s[i].supply = regs[i].name;
+
+ ret = devm_regulator_bulk_get(&msm_host->pdev->dev, num, s);
+ if (ret < 0) {
+ pr_err("%s: failed to init regulator, ret=%d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ for (i = 0; i < num; i++) {
+ if ((regs[i].min_voltage >= 0) && (regs[i].max_voltage >= 0)) {
+ ret = regulator_set_voltage(s[i].consumer,
+ regs[i].min_voltage, regs[i].max_voltage);
+ if (ret < 0) {
+ pr_err("regulator %d set voltage failed, %d\n",
+ i, ret);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int dsi_clk_init(struct msm_dsi_host *msm_host)
+{
+ struct device *dev = &msm_host->pdev->dev;
+ int ret = 0;
+
+ msm_host->mdp_core_clk = devm_clk_get(dev, "mdp_core_clk");
+ if (IS_ERR(msm_host->mdp_core_clk)) {
+ ret = PTR_ERR(msm_host->mdp_core_clk);
+ pr_err("%s: Unable to get mdp core clk. ret=%d\n",
+ __func__, ret);
+ goto exit;
+ }
+
+ msm_host->ahb_clk = devm_clk_get(dev, "iface_clk");
+ if (IS_ERR(msm_host->ahb_clk)) {
+ ret = PTR_ERR(msm_host->ahb_clk);
+ pr_err("%s: Unable to get mdss ahb clk. ret=%d\n",
+ __func__, ret);
+ goto exit;
+ }
+
+ msm_host->axi_clk = devm_clk_get(dev, "bus_clk");
+ if (IS_ERR(msm_host->axi_clk)) {
+ ret = PTR_ERR(msm_host->axi_clk);
+ pr_err("%s: Unable to get axi bus clk. ret=%d\n",
+ __func__, ret);
+ goto exit;
+ }
+
+ msm_host->mmss_misc_ahb_clk = devm_clk_get(dev, "core_mmss_clk");
+ if (IS_ERR(msm_host->mmss_misc_ahb_clk)) {
+ ret = PTR_ERR(msm_host->mmss_misc_ahb_clk);
+ pr_err("%s: Unable to get mmss misc ahb clk. ret=%d\n",
+ __func__, ret);
+ goto exit;
+ }
+
+ msm_host->byte_clk = devm_clk_get(dev, "byte_clk");
+ if (IS_ERR(msm_host->byte_clk)) {
+ ret = PTR_ERR(msm_host->byte_clk);
+ pr_err("%s: can't find dsi_byte_clk. ret=%d\n",
+ __func__, ret);
+ msm_host->byte_clk = NULL;
+ goto exit;
+ }
+
+ msm_host->pixel_clk = devm_clk_get(dev, "pixel_clk");
+ if (IS_ERR(msm_host->pixel_clk)) {
+ ret = PTR_ERR(msm_host->pixel_clk);
+ pr_err("%s: can't find dsi_pixel_clk. ret=%d\n",
+ __func__, ret);
+ msm_host->pixel_clk = NULL;
+ goto exit;
+ }
+
+ msm_host->esc_clk = devm_clk_get(dev, "core_clk");
+ if (IS_ERR(msm_host->esc_clk)) {
+ ret = PTR_ERR(msm_host->esc_clk);
+ pr_err("%s: can't find dsi_esc_clk. ret=%d\n",
+ __func__, ret);
+ msm_host->esc_clk = NULL;
+ goto exit;
+ }
+
+exit:
+ return ret;
+}
+
+static int dsi_bus_clk_enable(struct msm_dsi_host *msm_host)
+{
+ int ret;
+
+ DBG("id=%d", msm_host->id);
+
+ ret = clk_prepare_enable(msm_host->mdp_core_clk);
+ if (ret) {
+ pr_err("%s: failed to enable mdp_core_clock, %d\n",
+ __func__, ret);
+ goto core_clk_err;
+ }
+
+ ret = clk_prepare_enable(msm_host->ahb_clk);
+ if (ret) {
+ pr_err("%s: failed to enable ahb clock, %d\n", __func__, ret);
+ goto ahb_clk_err;
+ }
+
+ ret = clk_prepare_enable(msm_host->axi_clk);
+ if (ret) {
+ pr_err("%s: failed to enable ahb clock, %d\n", __func__, ret);
+ goto axi_clk_err;
+ }
+
+ ret = clk_prepare_enable(msm_host->mmss_misc_ahb_clk);
+ if (ret) {
+ pr_err("%s: failed to enable mmss misc ahb clk, %d\n",
+ __func__, ret);
+ goto misc_ahb_clk_err;
+ }
+
+ return 0;
+
+misc_ahb_clk_err:
+ clk_disable_unprepare(msm_host->axi_clk);
+axi_clk_err:
+ clk_disable_unprepare(msm_host->ahb_clk);
+ahb_clk_err:
+ clk_disable_unprepare(msm_host->mdp_core_clk);
+core_clk_err:
+ return ret;
+}
+
+static void dsi_bus_clk_disable(struct msm_dsi_host *msm_host)
+{
+ DBG("");
+ clk_disable_unprepare(msm_host->mmss_misc_ahb_clk);
+ clk_disable_unprepare(msm_host->axi_clk);
+ clk_disable_unprepare(msm_host->ahb_clk);
+ clk_disable_unprepare(msm_host->mdp_core_clk);
+}
+
+static int dsi_link_clk_enable(struct msm_dsi_host *msm_host)
+{
+ int ret;
+
+ DBG("Set clk rates: pclk=%d, byteclk=%d",
+ msm_host->mode->clock, msm_host->byte_clk_rate);
+
+ ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate);
+ if (ret) {
+ pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret);
+ goto error;
+ }
+
+ ret = clk_set_rate(msm_host->pixel_clk, msm_host->mode->clock * 1000);
+ if (ret) {
+ pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
+ goto error;
+ }
+
+ ret = clk_prepare_enable(msm_host->esc_clk);
+ if (ret) {
+ pr_err("%s: Failed to enable dsi esc clk\n", __func__);
+ goto error;
+ }
+
+ ret = clk_prepare_enable(msm_host->byte_clk);
+ if (ret) {
+ pr_err("%s: Failed to enable dsi byte clk\n", __func__);
+ goto byte_clk_err;
+ }
+
+ ret = clk_prepare_enable(msm_host->pixel_clk);
+ if (ret) {
+ pr_err("%s: Failed to enable dsi pixel clk\n", __func__);
+ goto pixel_clk_err;
+ }
+
+ return 0;
+
+pixel_clk_err:
+ clk_disable_unprepare(msm_host->byte_clk);
+byte_clk_err:
+ clk_disable_unprepare(msm_host->esc_clk);
+error:
+ return ret;
+}
+
+static void dsi_link_clk_disable(struct msm_dsi_host *msm_host)
+{
+ clk_disable_unprepare(msm_host->esc_clk);
+ clk_disable_unprepare(msm_host->pixel_clk);
+ clk_disable_unprepare(msm_host->byte_clk);
+}
+
+static int dsi_clk_ctrl(struct msm_dsi_host *msm_host, bool enable)
+{
+ int ret = 0;
+
+ mutex_lock(&msm_host->clk_mutex);
+ if (enable) {
+ ret = dsi_bus_clk_enable(msm_host);
+ if (ret) {
+ pr_err("%s: Can not enable bus clk, %d\n",
+ __func__, ret);
+ goto unlock_ret;
+ }
+ ret = dsi_link_clk_enable(msm_host);
+ if (ret) {
+ pr_err("%s: Can not enable link clk, %d\n",
+ __func__, ret);
+ dsi_bus_clk_disable(msm_host);
+ goto unlock_ret;
+ }
+ } else {
+ dsi_link_clk_disable(msm_host);
+ dsi_bus_clk_disable(msm_host);
+ }
+
+unlock_ret:
+ mutex_unlock(&msm_host->clk_mutex);
+ return ret;
+}
+
+static int dsi_calc_clk_rate(struct msm_dsi_host *msm_host)
+{
+ struct drm_display_mode *mode = msm_host->mode;
+ u8 lanes = msm_host->lanes;
+ u32 bpp = dsi_get_bpp(msm_host->format);
+ u32 pclk_rate;
+
+ if (!mode) {
+ pr_err("%s: mode not set\n", __func__);
+ return -EINVAL;
+ }
+
+ pclk_rate = mode->clock * 1000;
+ if (lanes > 0) {
+ msm_host->byte_clk_rate = (pclk_rate * bpp) / (8 * lanes);
+ } else {
+ pr_err("%s: forcing mdss_dsi lanes to 1\n", __func__);
+ msm_host->byte_clk_rate = (pclk_rate * bpp) / 8;
+ }
+
+ DBG("pclk=%d, bclk=%d", pclk_rate, msm_host->byte_clk_rate);
+
+ return 0;
+}
+
+static void dsi_phy_sw_reset(struct msm_dsi_host *msm_host)
+{
+ DBG("");
+ dsi_write(msm_host, REG_DSI_PHY_RESET, DSI_PHY_RESET_RESET);
+ /* Make sure fully reset */
+ wmb();
+ udelay(1000);
+ dsi_write(msm_host, REG_DSI_PHY_RESET, 0);
+ udelay(100);
+}
+
+static void dsi_intr_ctrl(struct msm_dsi_host *msm_host, u32 mask, int enable)
+{
+ u32 intr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&msm_host->intr_lock, flags);
+ intr = dsi_read(msm_host, REG_DSI_INTR_CTRL);
+
+ if (enable)
+ intr |= mask;
+ else
+ intr &= ~mask;
+
+ DBG("intr=%x enable=%d", intr, enable);
+
+ dsi_write(msm_host, REG_DSI_INTR_CTRL, intr);
+ spin_unlock_irqrestore(&msm_host->intr_lock, flags);
+}
+
+static inline enum dsi_traffic_mode dsi_get_traffic_mode(const u32 mode_flags)
+{
+ if (mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
+ return BURST_MODE;
+ else if (mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
+ return NON_BURST_SYNCH_PULSE;
+
+ return NON_BURST_SYNCH_EVENT;
+}
+
+static inline enum dsi_vid_dst_format dsi_get_vid_fmt(
+ const enum mipi_dsi_pixel_format mipi_fmt)
+{
+ switch (mipi_fmt) {
+ case MIPI_DSI_FMT_RGB888: return VID_DST_FORMAT_RGB888;
+ case MIPI_DSI_FMT_RGB666: return VID_DST_FORMAT_RGB666_LOOSE;
+ case MIPI_DSI_FMT_RGB666_PACKED: return VID_DST_FORMAT_RGB666;
+ case MIPI_DSI_FMT_RGB565: return VID_DST_FORMAT_RGB565;
+ default: return VID_DST_FORMAT_RGB888;
+ }
+}
+
+static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt(
+ const enum mipi_dsi_pixel_format mipi_fmt)
+{
+ switch (mipi_fmt) {
+ case MIPI_DSI_FMT_RGB888: return CMD_DST_FORMAT_RGB888;
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ case MIPI_DSI_FMT_RGB666: return VID_DST_FORMAT_RGB666;
+ case MIPI_DSI_FMT_RGB565: return CMD_DST_FORMAT_RGB565;
+ default: return CMD_DST_FORMAT_RGB888;
+ }
+}
+
+static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
+ u32 clk_pre, u32 clk_post)
+{
+ u32 flags = msm_host->mode_flags;
+ enum mipi_dsi_pixel_format mipi_fmt = msm_host->format;
+ u32 data = 0;
+
+ if (!enable) {
+ dsi_write(msm_host, REG_DSI_CTRL, 0);
+ return;
+ }
+
+ if (flags & MIPI_DSI_MODE_VIDEO) {
+ if (flags & MIPI_DSI_MODE_VIDEO_HSE)
+ data |= DSI_VID_CFG0_PULSE_MODE_HSA_HE;
+ if (flags & MIPI_DSI_MODE_VIDEO_HFP)
+ data |= DSI_VID_CFG0_HFP_POWER_STOP;
+ if (flags & MIPI_DSI_MODE_VIDEO_HBP)
+ data |= DSI_VID_CFG0_HBP_POWER_STOP;
+ if (flags & MIPI_DSI_MODE_VIDEO_HSA)
+ data |= DSI_VID_CFG0_HSA_POWER_STOP;
+ /* Always set low power stop mode for BLLP
+ * to let command engine send packets
+ */
+ data |= DSI_VID_CFG0_EOF_BLLP_POWER_STOP |
+ DSI_VID_CFG0_BLLP_POWER_STOP;
+ data |= DSI_VID_CFG0_TRAFFIC_MODE(dsi_get_traffic_mode(flags));
+ data |= DSI_VID_CFG0_DST_FORMAT(dsi_get_vid_fmt(mipi_fmt));
+ data |= DSI_VID_CFG0_VIRT_CHANNEL(msm_host->channel);
+ dsi_write(msm_host, REG_DSI_VID_CFG0, data);
+
+ /* Do not swap RGB colors */
+ data = DSI_VID_CFG1_RGB_SWAP(SWAP_RGB);
+ dsi_write(msm_host, REG_DSI_VID_CFG1, 0);
+ } else {
+ /* Do not swap RGB colors */
+ data = DSI_CMD_CFG0_RGB_SWAP(SWAP_RGB);
+ data |= DSI_CMD_CFG0_DST_FORMAT(dsi_get_cmd_fmt(mipi_fmt));
+ dsi_write(msm_host, REG_DSI_CMD_CFG0, data);
+
+ data = DSI_CMD_CFG1_WR_MEM_START(MIPI_DCS_WRITE_MEMORY_START) |
+ DSI_CMD_CFG1_WR_MEM_CONTINUE(
+ MIPI_DCS_WRITE_MEMORY_CONTINUE);
+ /* Always insert DCS command */
+ data |= DSI_CMD_CFG1_INSERT_DCS_COMMAND;
+ dsi_write(msm_host, REG_DSI_CMD_CFG1, data);
+ }
+
+ dsi_write(msm_host, REG_DSI_CMD_DMA_CTRL,
+ DSI_CMD_DMA_CTRL_FROM_FRAME_BUFFER |
+ DSI_CMD_DMA_CTRL_LOW_POWER);
+
+ data = 0;
+ /* Always assume dedicated TE pin */
+ data |= DSI_TRIG_CTRL_TE;
+ data |= DSI_TRIG_CTRL_MDP_TRIGGER(TRIGGER_NONE);
+ data |= DSI_TRIG_CTRL_DMA_TRIGGER(TRIGGER_SW);
+ data |= DSI_TRIG_CTRL_STREAM(msm_host->channel);
+ if ((msm_host->cfg->major == MSM_DSI_VER_MAJOR_6G) &&
+ (msm_host->cfg->minor >= MSM_DSI_6G_VER_MINOR_V1_2))
+ data |= DSI_TRIG_CTRL_BLOCK_DMA_WITHIN_FRAME;
+ dsi_write(msm_host, REG_DSI_TRIG_CTRL, data);
+
+ data = DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(clk_post) |
+ DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE(clk_pre);
+ dsi_write(msm_host, REG_DSI_CLKOUT_TIMING_CTRL, data);
+
+ data = 0;
+ if (!(flags & MIPI_DSI_MODE_EOT_PACKET))
+ data |= DSI_EOT_PACKET_CTRL_TX_EOT_APPEND;
+ dsi_write(msm_host, REG_DSI_EOT_PACKET_CTRL, data);
+
+ /* allow only ack-err-status to generate interrupt */
+ dsi_write(msm_host, REG_DSI_ERR_INT_MASK0, 0x13ff3fe0);
+
+ dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 1);
+
+ dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
+
+ data = DSI_CTRL_CLK_EN;
+
+ DBG("lane number=%d", msm_host->lanes);
+ if (msm_host->lanes == 2) {
+ data |= DSI_CTRL_LANE1 | DSI_CTRL_LANE2;
+ /* swap lanes for 2-lane panel for better performance */
+ dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL,
+ DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(LANE_SWAP_1230));
+ } else {
+ /* Take 4 lanes as default */
+ data |= DSI_CTRL_LANE0 | DSI_CTRL_LANE1 | DSI_CTRL_LANE2 |
+ DSI_CTRL_LANE3;
+ /* Do not swap lanes for 4-lane panel */
+ dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL,
+ DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(LANE_SWAP_0123));
+ }
+ data |= DSI_CTRL_ENABLE;
+
+ dsi_write(msm_host, REG_DSI_CTRL, data);
+}
+
+static void dsi_timing_setup(struct msm_dsi_host *msm_host)
+{
+ struct drm_display_mode *mode = msm_host->mode;
+ u32 hs_start = 0, vs_start = 0; /* take sync start as 0 */
+ u32 h_total = mode->htotal;
+ u32 v_total = mode->vtotal;
+ u32 hs_end = mode->hsync_end - mode->hsync_start;
+ u32 vs_end = mode->vsync_end - mode->vsync_start;
+ u32 ha_start = h_total - mode->hsync_start;
+ u32 ha_end = ha_start + mode->hdisplay;
+ u32 va_start = v_total - mode->vsync_start;
+ u32 va_end = va_start + mode->vdisplay;
+ u32 wc;
+
+ DBG("");
+
+ if (msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) {
+ dsi_write(msm_host, REG_DSI_ACTIVE_H,
+ DSI_ACTIVE_H_START(ha_start) |
+ DSI_ACTIVE_H_END(ha_end));
+ dsi_write(msm_host, REG_DSI_ACTIVE_V,
+ DSI_ACTIVE_V_START(va_start) |
+ DSI_ACTIVE_V_END(va_end));
+ dsi_write(msm_host, REG_DSI_TOTAL,
+ DSI_TOTAL_H_TOTAL(h_total - 1) |
+ DSI_TOTAL_V_TOTAL(v_total - 1));
+
+ dsi_write(msm_host, REG_DSI_ACTIVE_HSYNC,
+ DSI_ACTIVE_HSYNC_START(hs_start) |
+ DSI_ACTIVE_HSYNC_END(hs_end));
+ dsi_write(msm_host, REG_DSI_ACTIVE_VSYNC_HPOS, 0);
+ dsi_write(msm_host, REG_DSI_ACTIVE_VSYNC_VPOS,
+ DSI_ACTIVE_VSYNC_VPOS_START(vs_start) |
+ DSI_ACTIVE_VSYNC_VPOS_END(vs_end));
+ } else { /* command mode */
+ /* image data and 1 byte write_memory_start cmd */
+ wc = mode->hdisplay * dsi_get_bpp(msm_host->format) / 8 + 1;
+
+ dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_CTRL,
+ DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT(wc) |
+ DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL(
+ msm_host->channel) |
+ DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE(
+ MIPI_DSI_DCS_LONG_WRITE));
+
+ dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_TOTAL,
+ DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL(mode->hdisplay) |
+ DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL(mode->vdisplay));
+ }
+}
+
+static void dsi_sw_reset(struct msm_dsi_host *msm_host)
+{
+ dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
+ wmb(); /* clocks need to be enabled before reset */
+
+ dsi_write(msm_host, REG_DSI_RESET, 1);
+ wmb(); /* make sure reset happen */
+ dsi_write(msm_host, REG_DSI_RESET, 0);
+}
+
+static void dsi_op_mode_config(struct msm_dsi_host *msm_host,
+ bool video_mode, bool enable)
+{
+ u32 dsi_ctrl;
+
+ dsi_ctrl = dsi_read(msm_host, REG_DSI_CTRL);
+
+ if (!enable) {
+ dsi_ctrl &= ~(DSI_CTRL_ENABLE | DSI_CTRL_VID_MODE_EN |
+ DSI_CTRL_CMD_MODE_EN);
+ dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_MDP_DONE |
+ DSI_IRQ_MASK_VIDEO_DONE, 0);
+ } else {
+ if (video_mode) {
+ dsi_ctrl |= DSI_CTRL_VID_MODE_EN;
+ } else { /* command mode */
+ dsi_ctrl |= DSI_CTRL_CMD_MODE_EN;
+ dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_MDP_DONE, 1);
+ }
+ dsi_ctrl |= DSI_CTRL_ENABLE;
+ }
+
+ dsi_write(msm_host, REG_DSI_CTRL, dsi_ctrl);
+}
+
+static void dsi_set_tx_power_mode(int mode, struct msm_dsi_host *msm_host)
+{
+ u32 data;
+
+ data = dsi_read(msm_host, REG_DSI_CMD_DMA_CTRL);
+
+ if (mode == 0)
+ data &= ~DSI_CMD_DMA_CTRL_LOW_POWER;
+ else
+ data |= DSI_CMD_DMA_CTRL_LOW_POWER;
+
+ dsi_write(msm_host, REG_DSI_CMD_DMA_CTRL, data);
+}
+
+static void dsi_wait4video_done(struct msm_dsi_host *msm_host)
+{
+ dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 1);
+
+ reinit_completion(&msm_host->video_comp);
+
+ wait_for_completion_timeout(&msm_host->video_comp,
+ msecs_to_jiffies(70));
+
+ dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 0);
+}
+
+static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host)
+{
+ if (!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO))
+ return;
+
+ if (msm_host->power_on) {
+ dsi_wait4video_done(msm_host);
+ /* delay 4 ms to skip BLLP */
+ usleep_range(2000, 4000);
+ }
+}
+
+/* dsi_cmd */
+static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size)
+{
+ struct drm_device *dev = msm_host->dev;
+ int ret;
+ u32 iova;
+
+ mutex_lock(&dev->struct_mutex);
+ msm_host->tx_gem_obj = msm_gem_new(dev, size, MSM_BO_UNCACHED);
+ if (IS_ERR(msm_host->tx_gem_obj)) {
+ ret = PTR_ERR(msm_host->tx_gem_obj);
+ pr_err("%s: failed to allocate gem, %d\n", __func__, ret);
+ msm_host->tx_gem_obj = NULL;
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+
+ ret = msm_gem_get_iova_locked(msm_host->tx_gem_obj, 0, &iova);
+ if (ret) {
+ pr_err("%s: failed to get iova, %d\n", __func__, ret);
+ return ret;
+ }
+ mutex_unlock(&dev->struct_mutex);
+
+ if (iova & 0x07) {
+ pr_err("%s: buf NOT 8 bytes aligned\n", __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
+{
+ struct drm_device *dev = msm_host->dev;
+
+ if (msm_host->tx_gem_obj) {
+ msm_gem_put_iova(msm_host->tx_gem_obj, 0);
+ mutex_lock(&dev->struct_mutex);
+ msm_gem_free_object(msm_host->tx_gem_obj);
+ msm_host->tx_gem_obj = NULL;
+ mutex_unlock(&dev->struct_mutex);
+ }
+}
+
+/*
+ * prepare cmd buffer to be txed
+ */
+static int dsi_cmd_dma_add(struct drm_gem_object *tx_gem,
+ const struct mipi_dsi_msg *msg)
+{
+ struct mipi_dsi_packet packet;
+ int len;
+ int ret;
+ u8 *data;
+
+ ret = mipi_dsi_create_packet(&packet, msg);
+ if (ret) {
+ pr_err("%s: create packet failed, %d\n", __func__, ret);
+ return ret;
+ }
+ len = (packet.size + 3) & (~0x3);
+
+ if (len > tx_gem->size) {
+ pr_err("%s: packet size is too big\n", __func__);
+ return -EINVAL;
+ }
+
+ data = msm_gem_vaddr(tx_gem);
+
+ if (IS_ERR(data)) {
+ ret = PTR_ERR(data);
+ pr_err("%s: get vaddr failed, %d\n", __func__, ret);
+ return ret;
+ }
+
+ /* MSM specific command format in memory */
+ data[0] = packet.header[1];
+ data[1] = packet.header[2];
+ data[2] = packet.header[0];
+ data[3] = BIT(7); /* Last packet */
+ if (mipi_dsi_packet_format_is_long(msg->type))
+ data[3] |= BIT(6);
+ if (msg->rx_buf && msg->rx_len)
+ data[3] |= BIT(5);
+
+ /* Long packet */
+ if (packet.payload && packet.payload_length)
+ memcpy(data + 4, packet.payload, packet.payload_length);
+
+ /* Append 0xff to the end */
+ if (packet.size < len)
+ memset(data + packet.size, 0xff, len - packet.size);
+
+ return len;
+}
+
+/*
+ * dsi_short_read1_resp: 1 parameter
+ */
+static int dsi_short_read1_resp(u8 *buf, const struct mipi_dsi_msg *msg)
+{
+ u8 *data = msg->rx_buf;
+ if (data && (msg->rx_len >= 1)) {
+ *data = buf[1]; /* strip out dcs type */
+ return 1;
+ } else {
+ pr_err("%s: read data does not match with rx_buf len %d\n",
+ __func__, msg->rx_len);
+ return -EINVAL;
+ }
+}
+
+/*
+ * dsi_short_read2_resp: 2 parameter
+ */
+static int dsi_short_read2_resp(u8 *buf, const struct mipi_dsi_msg *msg)
+{
+ u8 *data = msg->rx_buf;
+ if (data && (msg->rx_len >= 2)) {
+ data[0] = buf[1]; /* strip out dcs type */
+ data[1] = buf[2];
+ return 2;
+ } else {
+ pr_err("%s: read data does not match with rx_buf len %d\n",
+ __func__, msg->rx_len);
+ return -EINVAL;
+ }
+}
+
+static int dsi_long_read_resp(u8 *buf, const struct mipi_dsi_msg *msg)
+{
+ /* strip out 4 byte dcs header */
+ if (msg->rx_buf && msg->rx_len)
+ memcpy(msg->rx_buf, buf + 4, msg->rx_len);
+
+ return msg->rx_len;
+}
+
+
+static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
+{
+ int ret;
+ u32 iova;
+ bool triggered;
+
+ ret = msm_gem_get_iova(msm_host->tx_gem_obj, 0, &iova);
+ if (ret) {
+ pr_err("%s: failed to get iova: %d\n", __func__, ret);
+ return ret;
+ }
+
+ reinit_completion(&msm_host->dma_comp);
+
+ dsi_wait4video_eng_busy(msm_host);
+
+ triggered = msm_dsi_manager_cmd_xfer_trigger(
+ msm_host->id, iova, len);
+ if (triggered) {
+ ret = wait_for_completion_timeout(&msm_host->dma_comp,
+ msecs_to_jiffies(200));
+ DBG("ret=%d", ret);
+ if (ret == 0)
+ ret = -ETIMEDOUT;
+ else
+ ret = len;
+ } else
+ ret = len;
+
+ return ret;
+}
+
+static int dsi_cmd_dma_rx(struct msm_dsi_host *msm_host,
+ u8 *buf, int rx_byte, int pkt_size)
+{
+ u32 *lp, *temp, data;
+ int i, j = 0, cnt;
+ bool ack_error = false;
+ u32 read_cnt;
+ u8 reg[16];
+ int repeated_bytes = 0;
+ int buf_offset = buf - msm_host->rx_buf;
+
+ lp = (u32 *)buf;
+ temp = (u32 *)reg;
+ cnt = (rx_byte + 3) >> 2;
+ if (cnt > 4)
+ cnt = 4; /* 4 x 32 bits registers only */
+
+ /* Calculate real read data count */
+ read_cnt = dsi_read(msm_host, 0x1d4) >> 16;
+
+ ack_error = (rx_byte == 4) ?
+ (read_cnt == 8) : /* short pkt + 4-byte error pkt */
+ (read_cnt == (pkt_size + 6 + 4)); /* long pkt+4-byte error pkt*/
+
+ if (ack_error)
+ read_cnt -= 4; /* Remove 4 byte error pkt */
+
+ /*
+ * In case of multiple reads from the panel, after the first read, there
+ * is possibility that there are some bytes in the payload repeating in
+ * the RDBK_DATA registers. Since we read all the parameters from the
+ * panel right from the first byte for every pass. We need to skip the
+ * repeating bytes and then append the new parameters to the rx buffer.
+ */
+ if (read_cnt > 16) {
+ int bytes_shifted;
+ /* Any data more than 16 bytes will be shifted out.
+ * The temp read buffer should already contain these bytes.
+ * The remaining bytes in read buffer are the repeated bytes.
+ */
+ bytes_shifted = read_cnt - 16;
+ repeated_bytes = buf_offset - bytes_shifted;
+ }
+
+ for (i = cnt - 1; i >= 0; i--) {
+ data = dsi_read(msm_host, REG_DSI_RDBK_DATA(i));
+ *temp++ = ntohl(data); /* to host byte order */
+ DBG("data = 0x%x and ntohl(data) = 0x%x", data, ntohl(data));
+ }
+
+ for (i = repeated_bytes; i < 16; i++)
+ buf[j++] = reg[i];
+
+ return j;
+}
+
+static int dsi_cmds2buf_tx(struct msm_dsi_host *msm_host,
+ const struct mipi_dsi_msg *msg)
+{
+ int len, ret;
+ int bllp_len = msm_host->mode->hdisplay *
+ dsi_get_bpp(msm_host->format) / 8;
+
+ len = dsi_cmd_dma_add(msm_host->tx_gem_obj, msg);
+ if (!len) {
+ pr_err("%s: failed to add cmd type = 0x%x\n",
+ __func__, msg->type);
+ return -EINVAL;
+ }
+
+ /* for video mode, do not send cmds more than
+ * one pixel line, since it only transmit it
+ * during BLLP.
+ */
+ /* TODO: if the command is sent in LP mode, the bit rate is only
+ * half of esc clk rate. In this case, if the video is already
+ * actively streaming, we need to check more carefully if the
+ * command can be fit into one BLLP.
+ */
+ if ((msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) && (len > bllp_len)) {
+ pr_err("%s: cmd cannot fit into BLLP period, len=%d\n",
+ __func__, len);
+ return -EINVAL;
+ }
+
+ ret = dsi_cmd_dma_tx(msm_host, len);
+ if (ret < len) {
+ pr_err("%s: cmd dma tx failed, type=0x%x, data0=0x%x, len=%d\n",
+ __func__, msg->type, (*(u8 *)(msg->tx_buf)), len);
+ return -ECOMM;
+ }
+
+ return len;
+}
+
+static void dsi_sw_reset_restore(struct msm_dsi_host *msm_host)
+{
+ u32 data0, data1;
+
+ data0 = dsi_read(msm_host, REG_DSI_CTRL);
+ data1 = data0;
+ data1 &= ~DSI_CTRL_ENABLE;
+ dsi_write(msm_host, REG_DSI_CTRL, data1);
+ /*
+ * dsi controller need to be disabled before
+ * clocks turned on
+ */
+ wmb();
+
+ dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
+ wmb(); /* make sure clocks enabled */
+
+ /* dsi controller can only be reset while clocks are running */
+ dsi_write(msm_host, REG_DSI_RESET, 1);
+ wmb(); /* make sure reset happen */
+ dsi_write(msm_host, REG_DSI_RESET, 0);
+ wmb(); /* controller out of reset */
+ dsi_write(msm_host, REG_DSI_CTRL, data0);
+ wmb(); /* make sure dsi controller enabled again */
+}
+
+static void dsi_err_worker(struct work_struct *work)
+{
+ struct msm_dsi_host *msm_host =
+ container_of(work, struct msm_dsi_host, err_work);
+ u32 status = msm_host->err_work_state;
+
+ pr_err("%s: status=%x\n", __func__, status);
+ if (status & DSI_ERR_STATE_MDP_FIFO_UNDERFLOW)
+ dsi_sw_reset_restore(msm_host);
+
+ /* It is safe to clear here because error irq is disabled. */
+ msm_host->err_work_state = 0;
+
+ /* enable dsi error interrupt */
+ dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 1);
+}
+
+static void dsi_ack_err_status(struct msm_dsi_host *msm_host)
+{
+ u32 status;
+
+ status = dsi_read(msm_host, REG_DSI_ACK_ERR_STATUS);
+
+ if (status) {
+ dsi_write(msm_host, REG_DSI_ACK_ERR_STATUS, status);
+ /* Writing of an extra 0 needed to clear error bits */
+ dsi_write(msm_host, REG_DSI_ACK_ERR_STATUS, 0);
+ msm_host->err_work_state |= DSI_ERR_STATE_ACK;
+ }
+}
+
+static void dsi_timeout_status(struct msm_dsi_host *msm_host)
+{
+ u32 status;
+
+ status = dsi_read(msm_host, REG_DSI_TIMEOUT_STATUS);
+
+ if (status) {
+ dsi_write(msm_host, REG_DSI_TIMEOUT_STATUS, status);
+ msm_host->err_work_state |= DSI_ERR_STATE_TIMEOUT;
+ }
+}
+
+static void dsi_dln0_phy_err(struct msm_dsi_host *msm_host)
+{
+ u32 status;
+
+ status = dsi_read(msm_host, REG_DSI_DLN0_PHY_ERR);
+
+ if (status) {
+ dsi_write(msm_host, REG_DSI_DLN0_PHY_ERR, status);
+ msm_host->err_work_state |= DSI_ERR_STATE_DLN0_PHY;
+ }
+}
+
+static void dsi_fifo_status(struct msm_dsi_host *msm_host)
+{
+ u32 status;
+
+ status = dsi_read(msm_host, REG_DSI_FIFO_STATUS);
+
+ /* fifo underflow, overflow */
+ if (status) {
+ dsi_write(msm_host, REG_DSI_FIFO_STATUS, status);
+ msm_host->err_work_state |= DSI_ERR_STATE_FIFO;
+ if (status & DSI_FIFO_STATUS_CMD_MDP_FIFO_UNDERFLOW)
+ msm_host->err_work_state |=
+ DSI_ERR_STATE_MDP_FIFO_UNDERFLOW;
+ }
+}
+
+static void dsi_status(struct msm_dsi_host *msm_host)
+{
+ u32 status;
+
+ status = dsi_read(msm_host, REG_DSI_STATUS0);
+
+ if (status & DSI_STATUS0_INTERLEAVE_OP_CONTENTION) {
+ dsi_write(msm_host, REG_DSI_STATUS0, status);
+ msm_host->err_work_state |=
+ DSI_ERR_STATE_INTERLEAVE_OP_CONTENTION;
+ }
+}
+
+static void dsi_clk_status(struct msm_dsi_host *msm_host)
+{
+ u32 status;
+
+ status = dsi_read(msm_host, REG_DSI_CLK_STATUS);
+
+ if (status & DSI_CLK_STATUS_PLL_UNLOCKED) {
+ dsi_write(msm_host, REG_DSI_CLK_STATUS, status);
+ msm_host->err_work_state |= DSI_ERR_STATE_PLL_UNLOCKED;
+ }
+}
+
+static void dsi_error(struct msm_dsi_host *msm_host)
+{
+ /* disable dsi error interrupt */
+ dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 0);
+
+ dsi_clk_status(msm_host);
+ dsi_fifo_status(msm_host);
+ dsi_ack_err_status(msm_host);
+ dsi_timeout_status(msm_host);
+ dsi_status(msm_host);
+ dsi_dln0_phy_err(msm_host);
+
+ queue_work(msm_host->workqueue, &msm_host->err_work);
+}
+
+static irqreturn_t dsi_host_irq(int irq, void *ptr)
+{
+ struct msm_dsi_host *msm_host = ptr;
+ u32 isr;
+ unsigned long flags;
+
+ if (!msm_host->ctrl_base)
+ return IRQ_HANDLED;
+
+ spin_lock_irqsave(&msm_host->intr_lock, flags);
+ isr = dsi_read(msm_host, REG_DSI_INTR_CTRL);
+ dsi_write(msm_host, REG_DSI_INTR_CTRL, isr);
+ spin_unlock_irqrestore(&msm_host->intr_lock, flags);
+
+ DBG("isr=0x%x, id=%d", isr, msm_host->id);
+
+ if (isr & DSI_IRQ_ERROR)
+ dsi_error(msm_host);
+
+ if (isr & DSI_IRQ_VIDEO_DONE)
+ complete(&msm_host->video_comp);
+
+ if (isr & DSI_IRQ_CMD_DMA_DONE)
+ complete(&msm_host->dma_comp);
+
+ return IRQ_HANDLED;
+}
+
+static int dsi_host_init_panel_gpios(struct msm_dsi_host *msm_host,
+ struct device *panel_device)
+{
+ int ret;
+
+ msm_host->disp_en_gpio = devm_gpiod_get(panel_device,
+ "disp-enable");
+ if (IS_ERR(msm_host->disp_en_gpio)) {
+ DBG("cannot get disp-enable-gpios %ld",
+ PTR_ERR(msm_host->disp_en_gpio));
+ msm_host->disp_en_gpio = NULL;
+ }
+ if (msm_host->disp_en_gpio) {
+ ret = gpiod_direction_output(msm_host->disp_en_gpio, 0);
+ if (ret) {
+ pr_err("cannot set dir to disp-en-gpios %d\n", ret);
+ return ret;
+ }
+ }
+
+ msm_host->te_gpio = devm_gpiod_get(panel_device, "disp-te");
+ if (IS_ERR(msm_host->te_gpio)) {
+ DBG("cannot get disp-te-gpios %ld", PTR_ERR(msm_host->te_gpio));
+ msm_host->te_gpio = NULL;
+ }
+
+ if (msm_host->te_gpio) {
+ ret = gpiod_direction_input(msm_host->te_gpio);
+ if (ret) {
+ pr_err("%s: cannot set dir to disp-te-gpios, %d\n",
+ __func__, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int dsi_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *dsi)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ int ret;
+
+ msm_host->channel = dsi->channel;
+ msm_host->lanes = dsi->lanes;
+ msm_host->format = dsi->format;
+ msm_host->mode_flags = dsi->mode_flags;
+
+ msm_host->panel_node = dsi->dev.of_node;
+
+ /* Some gpios defined in panel DT need to be controlled by host */
+ ret = dsi_host_init_panel_gpios(msm_host, &dsi->dev);
+ if (ret)
+ return ret;
+
+ DBG("id=%d", msm_host->id);
+ if (msm_host->dev)
+ drm_helper_hpd_irq_event(msm_host->dev);
+
+ return 0;
+}
+
+static int dsi_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *dsi)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ msm_host->panel_node = NULL;
+
+ DBG("id=%d", msm_host->id);
+ if (msm_host->dev)
+ drm_helper_hpd_irq_event(msm_host->dev);
+
+ return 0;
+}
+
+static ssize_t dsi_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ int ret;
+
+ if (!msg || !msm_host->power_on)
+ return -EINVAL;
+
+ mutex_lock(&msm_host->cmd_mutex);
+ ret = msm_dsi_manager_cmd_xfer(msm_host->id, msg);
+ mutex_unlock(&msm_host->cmd_mutex);
+
+ return ret;
+}
+
+static struct mipi_dsi_host_ops dsi_host_ops = {
+ .attach = dsi_host_attach,
+ .detach = dsi_host_detach,
+ .transfer = dsi_host_transfer,
+};
+
+int msm_dsi_host_init(struct msm_dsi *msm_dsi)
+{
+ struct msm_dsi_host *msm_host = NULL;
+ struct platform_device *pdev = msm_dsi->pdev;
+ int ret;
+
+ msm_host = devm_kzalloc(&pdev->dev, sizeof(*msm_host), GFP_KERNEL);
+ if (!msm_host) {
+ pr_err("%s: FAILED: cannot alloc dsi host\n",
+ __func__);
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ ret = of_property_read_u32(pdev->dev.of_node,
+ "qcom,dsi-host-index", &msm_host->id);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "%s: host index not specified, ret=%d\n",
+ __func__, ret);
+ goto fail;
+ }
+ msm_host->pdev = pdev;
+
+ ret = dsi_clk_init(msm_host);
+ if (ret) {
+ pr_err("%s: unable to initialize dsi clks\n", __func__);
+ goto fail;
+ }
+
+ msm_host->ctrl_base = msm_ioremap(pdev, "dsi_ctrl", "DSI CTRL");
+ if (IS_ERR(msm_host->ctrl_base)) {
+ pr_err("%s: unable to map Dsi ctrl base\n", __func__);
+ ret = PTR_ERR(msm_host->ctrl_base);
+ goto fail;
+ }
+
+ msm_host->cfg = dsi_get_config(msm_host);
+ if (!msm_host->cfg) {
+ ret = -EINVAL;
+ pr_err("%s: get config failed\n", __func__);
+ goto fail;
+ }
+
+ ret = dsi_regulator_init(msm_host);
+ if (ret) {
+ pr_err("%s: regulator init failed\n", __func__);
+ goto fail;
+ }
+
+ msm_host->rx_buf = devm_kzalloc(&pdev->dev, SZ_4K, GFP_KERNEL);
+ if (!msm_host->rx_buf) {
+ pr_err("%s: alloc rx temp buf failed\n", __func__);
+ goto fail;
+ }
+
+ init_completion(&msm_host->dma_comp);
+ init_completion(&msm_host->video_comp);
+ mutex_init(&msm_host->dev_mutex);
+ mutex_init(&msm_host->cmd_mutex);
+ mutex_init(&msm_host->clk_mutex);
+ spin_lock_init(&msm_host->intr_lock);
+
+ /* setup workqueue */
+ msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0);
+ INIT_WORK(&msm_host->err_work, dsi_err_worker);
+
+ msm_dsi->phy = msm_dsi_phy_init(pdev, msm_host->cfg->phy_type,
+ msm_host->id);
+ if (!msm_dsi->phy) {
+ ret = -EINVAL;
+ pr_err("%s: phy init failed\n", __func__);
+ goto fail;
+ }
+ msm_dsi->host = &msm_host->base;
+ msm_dsi->id = msm_host->id;
+
+ DBG("Dsi Host %d initialized", msm_host->id);
+ return 0;
+
+fail:
+ return ret;
+}
+
+void msm_dsi_host_destroy(struct mipi_dsi_host *host)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ DBG("");
+ dsi_tx_buf_free(msm_host);
+ if (msm_host->workqueue) {
+ flush_workqueue(msm_host->workqueue);
+ destroy_workqueue(msm_host->workqueue);
+ msm_host->workqueue = NULL;
+ }
+
+ mutex_destroy(&msm_host->clk_mutex);
+ mutex_destroy(&msm_host->cmd_mutex);
+ mutex_destroy(&msm_host->dev_mutex);
+}
+
+int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
+ struct drm_device *dev)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ struct platform_device *pdev = msm_host->pdev;
+ int ret;
+
+ msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ if (msm_host->irq < 0) {
+ ret = msm_host->irq;
+ dev_err(dev->dev, "failed to get irq: %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_request_irq(&pdev->dev, msm_host->irq,
+ dsi_host_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ "dsi_isr", msm_host);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to request IRQ%u: %d\n",
+ msm_host->irq, ret);
+ return ret;
+ }
+
+ msm_host->dev = dev;
+ ret = dsi_tx_buf_alloc(msm_host, SZ_4K);
+ if (ret) {
+ pr_err("%s: alloc tx gem obj failed, %d\n", __func__, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ struct device_node *node;
+ int ret;
+
+ /* Register mipi dsi host */
+ if (!msm_host->registered) {
+ host->dev = &msm_host->pdev->dev;
+ host->ops = &dsi_host_ops;
+ ret = mipi_dsi_host_register(host);
+ if (ret)
+ return ret;
+
+ msm_host->registered = true;
+
+ /* If the panel driver has not been probed after host register,
+ * we should defer the host's probe.
+ * It makes sure panel is connected when fbcon detects
+ * connector status and gets the proper display mode to
+ * create framebuffer.
+ */
+ if (check_defer) {
+ node = of_get_child_by_name(msm_host->pdev->dev.of_node,
+ "panel");
+ if (node) {
+ if (!of_drm_find_panel(node))
+ return -EPROBE_DEFER;
+ }
+ }
+ }
+
+ return 0;
+}
+
+void msm_dsi_host_unregister(struct mipi_dsi_host *host)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ if (msm_host->registered) {
+ mipi_dsi_host_unregister(host);
+ host->dev = NULL;
+ host->ops = NULL;
+ msm_host->registered = false;
+ }
+}
+
+int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ /* TODO: make sure dsi_cmd_mdp is idle.
+ * Since DSI6G v1.2.0, we can set DSI_TRIG_CTRL.BLOCK_DMA_WITHIN_FRAME
+ * to ask H/W to wait until cmd mdp is idle. S/W wait is not needed.
+ * How to handle the old versions? Wait for mdp cmd done?
+ */
+
+ /*
+ * mdss interrupt is generated in mdp core clock domain
+ * mdp clock need to be enabled to receive dsi interrupt
+ */
+ dsi_clk_ctrl(msm_host, 1);
+
+ /* TODO: vote for bus bandwidth */
+
+ if (!(msg->flags & MIPI_DSI_MSG_USE_LPM))
+ dsi_set_tx_power_mode(0, msm_host);
+
+ msm_host->dma_cmd_ctrl_restore = dsi_read(msm_host, REG_DSI_CTRL);
+ dsi_write(msm_host, REG_DSI_CTRL,
+ msm_host->dma_cmd_ctrl_restore |
+ DSI_CTRL_CMD_MODE_EN |
+ DSI_CTRL_ENABLE);
+ dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 1);
+
+ return 0;
+}
+
+void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 0);
+ dsi_write(msm_host, REG_DSI_CTRL, msm_host->dma_cmd_ctrl_restore);
+
+ if (!(msg->flags & MIPI_DSI_MSG_USE_LPM))
+ dsi_set_tx_power_mode(1, msm_host);
+
+ /* TODO: unvote for bus bandwidth */
+
+ dsi_clk_ctrl(msm_host, 0);
+}
+
+int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ return dsi_cmds2buf_tx(msm_host, msg);
+}
+
+int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ int data_byte, rx_byte, dlen, end;
+ int short_response, diff, pkt_size, ret = 0;
+ char cmd;
+ int rlen = msg->rx_len;
+ u8 *buf;
+
+ if (rlen <= 2) {
+ short_response = 1;
+ pkt_size = rlen;
+ rx_byte = 4;
+ } else {
+ short_response = 0;
+ data_byte = 10; /* first read */
+ if (rlen < data_byte)
+ pkt_size = rlen;
+ else
+ pkt_size = data_byte;
+ rx_byte = data_byte + 6; /* 4 header + 2 crc */
+ }
+
+ buf = msm_host->rx_buf;
+ end = 0;
+ while (!end) {
+ u8 tx[2] = {pkt_size & 0xff, pkt_size >> 8};
+ struct mipi_dsi_msg max_pkt_size_msg = {
+ .channel = msg->channel,
+ .type = MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE,
+ .tx_len = 2,
+ .tx_buf = tx,
+ };
+
+ DBG("rlen=%d pkt_size=%d rx_byte=%d",
+ rlen, pkt_size, rx_byte);
+
+ ret = dsi_cmds2buf_tx(msm_host, &max_pkt_size_msg);
+ if (ret < 2) {
+ pr_err("%s: Set max pkt size failed, %d\n",
+ __func__, ret);
+ return -EINVAL;
+ }
+
+ if ((msm_host->cfg->major == MSM_DSI_VER_MAJOR_6G) &&
+ (msm_host->cfg->minor >= MSM_DSI_6G_VER_MINOR_V1_1)) {
+ /* Clear the RDBK_DATA registers */
+ dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL,
+ DSI_RDBK_DATA_CTRL_CLR);
+ wmb(); /* make sure the RDBK registers are cleared */
+ dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL, 0);
+ wmb(); /* release cleared status before transfer */
+ }
+
+ ret = dsi_cmds2buf_tx(msm_host, msg);
+ if (ret < msg->tx_len) {
+ pr_err("%s: Read cmd Tx failed, %d\n", __func__, ret);
+ return ret;
+ }
+
+ /*
+ * once cmd_dma_done interrupt received,
+ * return data from client is ready and stored
+ * at RDBK_DATA register already
+ * since rx fifo is 16 bytes, dcs header is kept at first loop,
+ * after that dcs header lost during shift into registers
+ */
+ dlen = dsi_cmd_dma_rx(msm_host, buf, rx_byte, pkt_size);
+
+ if (dlen <= 0)
+ return 0;
+
+ if (short_response)
+ break;
+
+ if (rlen <= data_byte) {
+ diff = data_byte - rlen;
+ end = 1;
+ } else {
+ diff = 0;
+ rlen -= data_byte;
+ }
+
+ if (!end) {
+ dlen -= 2; /* 2 crc */
+ dlen -= diff;
+ buf += dlen; /* next start position */
+ data_byte = 14; /* NOT first read */
+ if (rlen < data_byte)
+ pkt_size += rlen;
+ else
+ pkt_size += data_byte;
+ DBG("buf=%p dlen=%d diff=%d", buf, dlen, diff);
+ }
+ }
+
+ /*
+ * For single Long read, if the requested rlen < 10,
+ * we need to shift the start position of rx
+ * data buffer to skip the bytes which are not
+ * updated.
+ */
+ if (pkt_size < 10 && !short_response)
+ buf = msm_host->rx_buf + (10 - rlen);
+ else
+ buf = msm_host->rx_buf;
+
+ cmd = buf[0];
+ switch (cmd) {
+ case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
+ pr_err("%s: rx ACK_ERR_PACLAGE\n", __func__);
+ ret = 0;
+ case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
+ case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
+ ret = dsi_short_read1_resp(buf, msg);
+ break;
+ case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
+ case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
+ ret = dsi_short_read2_resp(buf, msg);
+ break;
+ case MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE:
+ case MIPI_DSI_RX_DCS_LONG_READ_RESPONSE:
+ ret = dsi_long_read_resp(buf, msg);
+ break;
+ default:
+ pr_warn("%s:Invalid response cmd\n", __func__);
+ ret = 0;
+ }
+
+ return ret;
+}
+
+void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u32 iova, u32 len)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ dsi_write(msm_host, REG_DSI_DMA_BASE, iova);
+ dsi_write(msm_host, REG_DSI_DMA_LEN, len);
+ dsi_write(msm_host, REG_DSI_TRIG_DMA, 1);
+
+ /* Make sure trigger happens */
+ wmb();
+}
+
+int msm_dsi_host_enable(struct mipi_dsi_host *host)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ dsi_op_mode_config(msm_host,
+ !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), true);
+
+ /* TODO: clock should be turned off for command mode,
+ * and only turned on before MDP START.
+ * This part of code should be enabled once mdp driver support it.
+ */
+ /* if (msm_panel->mode == MSM_DSI_CMD_MODE)
+ dsi_clk_ctrl(msm_host, 0); */
+
+ return 0;
+}
+
+int msm_dsi_host_disable(struct mipi_dsi_host *host)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ dsi_op_mode_config(msm_host,
+ !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), false);
+
+ /* Since we have disabled INTF, the video engine won't stop so that
+ * the cmd engine will be blocked.
+ * Reset to disable video engine so that we can send off cmd.
+ */
+ dsi_sw_reset(msm_host);
+
+ return 0;
+}
+
+int msm_dsi_host_power_on(struct mipi_dsi_host *host)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ u32 clk_pre = 0, clk_post = 0;
+ int ret = 0;
+
+ mutex_lock(&msm_host->dev_mutex);
+ if (msm_host->power_on) {
+ DBG("dsi host already on");
+ goto unlock_ret;
+ }
+
+ ret = dsi_calc_clk_rate(msm_host);
+ if (ret) {
+ pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
+ goto unlock_ret;
+ }
+
+ ret = dsi_host_regulator_enable(msm_host);
+ if (ret) {
+ pr_err("%s:Failed to enable vregs.ret=%d\n",
+ __func__, ret);
+ goto unlock_ret;
+ }
+
+ ret = dsi_bus_clk_enable(msm_host);
+ if (ret) {
+ pr_err("%s: failed to enable bus clocks, %d\n", __func__, ret);
+ goto fail_disable_reg;
+ }
+
+ dsi_phy_sw_reset(msm_host);
+ ret = msm_dsi_manager_phy_enable(msm_host->id,
+ msm_host->byte_clk_rate * 8,
+ clk_get_rate(msm_host->esc_clk),
+ &clk_pre, &clk_post);
+ dsi_bus_clk_disable(msm_host);
+ if (ret) {
+ pr_err("%s: failed to enable phy, %d\n", __func__, ret);
+ goto fail_disable_reg;
+ }
+
+ ret = dsi_clk_ctrl(msm_host, 1);
+ if (ret) {
+ pr_err("%s: failed to enable clocks. ret=%d\n", __func__, ret);
+ goto fail_disable_reg;
+ }
+
+ dsi_timing_setup(msm_host);
+ dsi_sw_reset(msm_host);
+ dsi_ctrl_config(msm_host, true, clk_pre, clk_post);
+
+ if (msm_host->disp_en_gpio)
+ gpiod_set_value(msm_host->disp_en_gpio, 1);
+
+ msm_host->power_on = true;
+ mutex_unlock(&msm_host->dev_mutex);
+
+ return 0;
+
+fail_disable_reg:
+ dsi_host_regulator_disable(msm_host);
+unlock_ret:
+ mutex_unlock(&msm_host->dev_mutex);
+ return ret;
+}
+
+int msm_dsi_host_power_off(struct mipi_dsi_host *host)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ mutex_lock(&msm_host->dev_mutex);
+ if (!msm_host->power_on) {
+ DBG("dsi host already off");
+ goto unlock_ret;
+ }
+
+ dsi_ctrl_config(msm_host, false, 0, 0);
+
+ if (msm_host->disp_en_gpio)
+ gpiod_set_value(msm_host->disp_en_gpio, 0);
+
+ msm_dsi_manager_phy_disable(msm_host->id);
+
+ dsi_clk_ctrl(msm_host, 0);
+
+ dsi_host_regulator_disable(msm_host);
+
+ DBG("-");
+
+ msm_host->power_on = false;
+
+unlock_ret:
+ mutex_unlock(&msm_host->dev_mutex);
+ return 0;
+}
+
+int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
+ struct drm_display_mode *mode)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ if (msm_host->mode) {
+ drm_mode_destroy(msm_host->dev, msm_host->mode);
+ msm_host->mode = NULL;
+ }
+
+ msm_host->mode = drm_mode_duplicate(msm_host->dev, mode);
+ if (IS_ERR(msm_host->mode)) {
+ pr_err("%s: cannot duplicate mode\n", __func__);
+ return PTR_ERR(msm_host->mode);
+ }
+
+ return 0;
+}
+
+struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host,
+ unsigned long *panel_flags)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ struct drm_panel *panel;
+
+ panel = of_drm_find_panel(msm_host->panel_node);
+ if (panel_flags)
+ *panel_flags = msm_host->mode_flags;
+
+ return panel;
+}
+
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
new file mode 100644
index 000000000000..ee3ebcaa33f5
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -0,0 +1,705 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm_kms.h"
+#include "dsi.h"
+
+struct msm_dsi_manager {
+ struct msm_dsi *dsi[DSI_MAX];
+
+ bool is_dual_panel;
+ bool is_sync_needed;
+ int master_panel_id;
+};
+
+static struct msm_dsi_manager msm_dsim_glb;
+
+#define IS_DUAL_PANEL() (msm_dsim_glb.is_dual_panel)
+#define IS_SYNC_NEEDED() (msm_dsim_glb.is_sync_needed)
+#define IS_MASTER_PANEL(id) (msm_dsim_glb.master_panel_id == id)
+
+static inline struct msm_dsi *dsi_mgr_get_dsi(int id)
+{
+ return msm_dsim_glb.dsi[id];
+}
+
+static inline struct msm_dsi *dsi_mgr_get_other_dsi(int id)
+{
+ return msm_dsim_glb.dsi[(id + 1) % DSI_MAX];
+}
+
+static int dsi_mgr_parse_dual_panel(struct device_node *np, int id)
+{
+ struct msm_dsi_manager *msm_dsim = &msm_dsim_glb;
+
+ /* We assume 2 dsi nodes have the same information of dual-panel and
+ * sync-mode, and only one node specifies master in case of dual mode.
+ */
+ if (!msm_dsim->is_dual_panel)
+ msm_dsim->is_dual_panel = of_property_read_bool(
+ np, "qcom,dual-panel-mode");
+
+ if (msm_dsim->is_dual_panel) {
+ if (of_property_read_bool(np, "qcom,master-panel"))
+ msm_dsim->master_panel_id = id;
+ if (!msm_dsim->is_sync_needed)
+ msm_dsim->is_sync_needed = of_property_read_bool(
+ np, "qcom,sync-dual-panel");
+ }
+
+ return 0;
+}
+
+struct dsi_connector {
+ struct drm_connector base;
+ int id;
+};
+
+struct dsi_bridge {
+ struct drm_bridge base;
+ int id;
+};
+
+#define to_dsi_connector(x) container_of(x, struct dsi_connector, base)
+#define to_dsi_bridge(x) container_of(x, struct dsi_bridge, base)
+
+static inline int dsi_mgr_connector_get_id(struct drm_connector *connector)
+{
+ struct dsi_connector *dsi_connector = to_dsi_connector(connector);
+ return dsi_connector->id;
+}
+
+static int dsi_mgr_bridge_get_id(struct drm_bridge *bridge)
+{
+ struct dsi_bridge *dsi_bridge = to_dsi_bridge(bridge);
+ return dsi_bridge->id;
+}
+
+static enum drm_connector_status dsi_mgr_connector_detect(
+ struct drm_connector *connector, bool force)
+{
+ int id = dsi_mgr_connector_get_id(connector);
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id);
+ struct msm_drm_private *priv = connector->dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+
+ DBG("id=%d", id);
+ if (!msm_dsi->panel) {
+ msm_dsi->panel = msm_dsi_host_get_panel(msm_dsi->host,
+ &msm_dsi->panel_flags);
+
+ /* There is only 1 panel in the global panel list
+ * for dual panel mode. Therefore slave dsi should get
+ * the drm_panel instance from master dsi, and
+ * keep using the panel flags got from the current DSI link.
+ */
+ if (!msm_dsi->panel && IS_DUAL_PANEL() &&
+ !IS_MASTER_PANEL(id) && other_dsi)
+ msm_dsi->panel = msm_dsi_host_get_panel(
+ other_dsi->host, NULL);
+
+ if (msm_dsi->panel && IS_DUAL_PANEL())
+ drm_object_attach_property(&connector->base,
+ connector->dev->mode_config.tile_property, 0);
+
+ /* Set split display info to kms once dual panel is connected
+ * to both hosts
+ */
+ if (msm_dsi->panel && IS_DUAL_PANEL() &&
+ other_dsi && other_dsi->panel) {
+ bool cmd_mode = !(msm_dsi->panel_flags &
+ MIPI_DSI_MODE_VIDEO);
+ struct drm_encoder *encoder = msm_dsi_get_encoder(
+ dsi_mgr_get_dsi(DSI_ENCODER_MASTER));
+ struct drm_encoder *slave_enc = msm_dsi_get_encoder(
+ dsi_mgr_get_dsi(DSI_ENCODER_SLAVE));
+
+ if (kms->funcs->set_split_display)
+ kms->funcs->set_split_display(kms, encoder,
+ slave_enc, cmd_mode);
+ else
+ pr_err("mdp does not support dual panel\n");
+ }
+ }
+
+ return msm_dsi->panel ? connector_status_connected :
+ connector_status_disconnected;
+}
+
+static void dsi_mgr_connector_destroy(struct drm_connector *connector)
+{
+ DBG("");
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+}
+
+static void dsi_dual_connector_fix_modes(struct drm_connector *connector)
+{
+ struct drm_display_mode *mode, *m;
+
+ /* Only support left-right mode */
+ list_for_each_entry_safe(mode, m, &connector->probed_modes, head) {
+ mode->clock >>= 1;
+ mode->hdisplay >>= 1;
+ mode->hsync_start >>= 1;
+ mode->hsync_end >>= 1;
+ mode->htotal >>= 1;
+ drm_mode_set_name(mode);
+ }
+}
+
+static int dsi_dual_connector_tile_init(
+ struct drm_connector *connector, int id)
+{
+ struct drm_display_mode *mode;
+ /* Fake topology id */
+ char topo_id[8] = {'M', 'S', 'M', 'D', 'U', 'D', 'S', 'I'};
+
+ if (connector->tile_group) {
+ DBG("Tile property has been initialized");
+ return 0;
+ }
+
+ /* Use the first mode only for now */
+ mode = list_first_entry(&connector->probed_modes,
+ struct drm_display_mode,
+ head);
+ if (!mode)
+ return -EINVAL;
+
+ connector->tile_group = drm_mode_get_tile_group(
+ connector->dev, topo_id);
+ if (!connector->tile_group)
+ connector->tile_group = drm_mode_create_tile_group(
+ connector->dev, topo_id);
+ if (!connector->tile_group) {
+ pr_err("%s: failed to create tile group\n", __func__);
+ return -ENOMEM;
+ }
+
+ connector->has_tile = true;
+ connector->tile_is_single_monitor = true;
+
+ /* mode has been fixed */
+ connector->tile_h_size = mode->hdisplay;
+ connector->tile_v_size = mode->vdisplay;
+
+ /* Only support left-right mode */
+ connector->num_h_tile = 2;
+ connector->num_v_tile = 1;
+
+ connector->tile_v_loc = 0;
+ connector->tile_h_loc = (id == DSI_RIGHT) ? 1 : 0;
+
+ return 0;
+}
+
+static int dsi_mgr_connector_get_modes(struct drm_connector *connector)
+{
+ int id = dsi_mgr_connector_get_id(connector);
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct drm_panel *panel = msm_dsi->panel;
+ int ret, num;
+
+ if (!panel)
+ return 0;
+
+ /* Since we have 2 connectors, but only 1 drm_panel in dual DSI mode,
+ * panel should not attach to any connector.
+ * Only temporarily attach panel to the current connector here,
+ * to let panel set mode to this connector.
+ */
+ drm_panel_attach(panel, connector);
+ num = drm_panel_get_modes(panel);
+ drm_panel_detach(panel);
+ if (!num)
+ return 0;
+
+ if (IS_DUAL_PANEL()) {
+ /* report half resolution to user */
+ dsi_dual_connector_fix_modes(connector);
+ ret = dsi_dual_connector_tile_init(connector, id);
+ if (ret)
+ return ret;
+ ret = drm_mode_connector_set_tile_property(connector);
+ if (ret) {
+ pr_err("%s: set tile property failed, %d\n",
+ __func__, ret);
+ return ret;
+ }
+ }
+
+ return num;
+}
+
+static int dsi_mgr_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ int id = dsi_mgr_connector_get_id(connector);
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct drm_encoder *encoder = msm_dsi_get_encoder(msm_dsi);
+ struct msm_drm_private *priv = connector->dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ long actual, requested;
+
+ DBG("");
+ requested = 1000 * mode->clock;
+ actual = kms->funcs->round_pixclk(kms, requested, encoder);
+
+ DBG("requested=%ld, actual=%ld", requested, actual);
+ if (actual != requested)
+ return MODE_CLOCK_RANGE;
+
+ return MODE_OK;
+}
+
+static struct drm_encoder *
+dsi_mgr_connector_best_encoder(struct drm_connector *connector)
+{
+ int id = dsi_mgr_connector_get_id(connector);
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+
+ DBG("");
+ return msm_dsi_get_encoder(msm_dsi);
+}
+
+static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
+{
+ int id = dsi_mgr_bridge_get_id(bridge);
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
+ struct mipi_dsi_host *host = msm_dsi->host;
+ struct drm_panel *panel = msm_dsi->panel;
+ bool is_dual_panel = IS_DUAL_PANEL();
+ int ret;
+
+ DBG("id=%d", id);
+ if (!panel || (is_dual_panel && (DSI_1 == id)))
+ return;
+
+ ret = msm_dsi_host_power_on(host);
+ if (ret) {
+ pr_err("%s: power on host %d failed, %d\n", __func__, id, ret);
+ goto host_on_fail;
+ }
+
+ if (is_dual_panel && msm_dsi1) {
+ ret = msm_dsi_host_power_on(msm_dsi1->host);
+ if (ret) {
+ pr_err("%s: power on host1 failed, %d\n",
+ __func__, ret);
+ goto host1_on_fail;
+ }
+ }
+
+ /* Always call panel functions once, because even for dual panels,
+ * there is only one drm_panel instance.
+ */
+ ret = drm_panel_prepare(panel);
+ if (ret) {
+ pr_err("%s: prepare panel %d failed, %d\n", __func__, id, ret);
+ goto panel_prep_fail;
+ }
+
+ ret = msm_dsi_host_enable(host);
+ if (ret) {
+ pr_err("%s: enable host %d failed, %d\n", __func__, id, ret);
+ goto host_en_fail;
+ }
+
+ if (is_dual_panel && msm_dsi1) {
+ ret = msm_dsi_host_enable(msm_dsi1->host);
+ if (ret) {
+ pr_err("%s: enable host1 failed, %d\n", __func__, ret);
+ goto host1_en_fail;
+ }
+ }
+
+ ret = drm_panel_enable(panel);
+ if (ret) {
+ pr_err("%s: enable panel %d failed, %d\n", __func__, id, ret);
+ goto panel_en_fail;
+ }
+
+ return;
+
+panel_en_fail:
+ if (is_dual_panel && msm_dsi1)
+ msm_dsi_host_disable(msm_dsi1->host);
+host1_en_fail:
+ msm_dsi_host_disable(host);
+host_en_fail:
+ drm_panel_unprepare(panel);
+panel_prep_fail:
+ if (is_dual_panel && msm_dsi1)
+ msm_dsi_host_power_off(msm_dsi1->host);
+host1_on_fail:
+ msm_dsi_host_power_off(host);
+host_on_fail:
+ return;
+}
+
+static void dsi_mgr_bridge_enable(struct drm_bridge *bridge)
+{
+ DBG("");
+}
+
+static void dsi_mgr_bridge_disable(struct drm_bridge *bridge)
+{
+ DBG("");
+}
+
+static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
+{
+ int id = dsi_mgr_bridge_get_id(bridge);
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
+ struct mipi_dsi_host *host = msm_dsi->host;
+ struct drm_panel *panel = msm_dsi->panel;
+ bool is_dual_panel = IS_DUAL_PANEL();
+ int ret;
+
+ DBG("id=%d", id);
+
+ if (!panel || (is_dual_panel && (DSI_1 == id)))
+ return;
+
+ ret = drm_panel_disable(panel);
+ if (ret)
+ pr_err("%s: Panel %d OFF failed, %d\n", __func__, id, ret);
+
+ ret = msm_dsi_host_disable(host);
+ if (ret)
+ pr_err("%s: host %d disable failed, %d\n", __func__, id, ret);
+
+ if (is_dual_panel && msm_dsi1) {
+ ret = msm_dsi_host_disable(msm_dsi1->host);
+ if (ret)
+ pr_err("%s: host1 disable failed, %d\n", __func__, ret);
+ }
+
+ ret = drm_panel_unprepare(panel);
+ if (ret)
+ pr_err("%s: Panel %d unprepare failed,%d\n", __func__, id, ret);
+
+ ret = msm_dsi_host_power_off(host);
+ if (ret)
+ pr_err("%s: host %d power off failed,%d\n", __func__, id, ret);
+
+ if (is_dual_panel && msm_dsi1) {
+ ret = msm_dsi_host_power_off(msm_dsi1->host);
+ if (ret)
+ pr_err("%s: host1 power off failed, %d\n",
+ __func__, ret);
+ }
+}
+
+static void dsi_mgr_bridge_mode_set(struct drm_bridge *bridge,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ int id = dsi_mgr_bridge_get_id(bridge);
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id);
+ struct mipi_dsi_host *host = msm_dsi->host;
+ bool is_dual_panel = IS_DUAL_PANEL();
+
+ DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
+ mode->base.id, mode->name,
+ mode->vrefresh, mode->clock,
+ mode->hdisplay, mode->hsync_start,
+ mode->hsync_end, mode->htotal,
+ mode->vdisplay, mode->vsync_start,
+ mode->vsync_end, mode->vtotal,
+ mode->type, mode->flags);
+
+ if (is_dual_panel && (DSI_1 == id))
+ return;
+
+ msm_dsi_host_set_display_mode(host, adjusted_mode);
+ if (is_dual_panel && other_dsi)
+ msm_dsi_host_set_display_mode(other_dsi->host, adjusted_mode);
+}
+
+static const struct drm_connector_funcs dsi_mgr_connector_funcs = {
+ .dpms = drm_atomic_helper_connector_dpms,
+ .detect = dsi_mgr_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = dsi_mgr_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static const struct drm_connector_helper_funcs dsi_mgr_conn_helper_funcs = {
+ .get_modes = dsi_mgr_connector_get_modes,
+ .mode_valid = dsi_mgr_connector_mode_valid,
+ .best_encoder = dsi_mgr_connector_best_encoder,
+};
+
+static const struct drm_bridge_funcs dsi_mgr_bridge_funcs = {
+ .pre_enable = dsi_mgr_bridge_pre_enable,
+ .enable = dsi_mgr_bridge_enable,
+ .disable = dsi_mgr_bridge_disable,
+ .post_disable = dsi_mgr_bridge_post_disable,
+ .mode_set = dsi_mgr_bridge_mode_set,
+};
+
+/* initialize connector */
+struct drm_connector *msm_dsi_manager_connector_init(u8 id)
+{
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct drm_connector *connector = NULL;
+ struct dsi_connector *dsi_connector;
+ int ret;
+
+ dsi_connector = devm_kzalloc(msm_dsi->dev->dev,
+ sizeof(*dsi_connector), GFP_KERNEL);
+ if (!dsi_connector) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ dsi_connector->id = id;
+
+ connector = &dsi_connector->base;
+
+ ret = drm_connector_init(msm_dsi->dev, connector,
+ &dsi_mgr_connector_funcs, DRM_MODE_CONNECTOR_DSI);
+ if (ret)
+ goto fail;
+
+ drm_connector_helper_add(connector, &dsi_mgr_conn_helper_funcs);
+
+ /* Enable HPD to let hpd event is handled
+ * when panel is attached to the host.
+ */
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+
+ /* Display driver doesn't support interlace now. */
+ connector->interlace_allowed = 0;
+ connector->doublescan_allowed = 0;
+
+ ret = drm_connector_register(connector);
+ if (ret)
+ goto fail;
+
+ return connector;
+
+fail:
+ if (connector)
+ dsi_mgr_connector_destroy(connector);
+
+ return ERR_PTR(ret);
+}
+
+/* initialize bridge */
+struct drm_bridge *msm_dsi_manager_bridge_init(u8 id)
+{
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct drm_bridge *bridge = NULL;
+ struct dsi_bridge *dsi_bridge;
+ int ret;
+
+ dsi_bridge = devm_kzalloc(msm_dsi->dev->dev,
+ sizeof(*dsi_bridge), GFP_KERNEL);
+ if (!dsi_bridge) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ dsi_bridge->id = id;
+
+ bridge = &dsi_bridge->base;
+ bridge->funcs = &dsi_mgr_bridge_funcs;
+
+ ret = drm_bridge_attach(msm_dsi->dev, bridge);
+ if (ret)
+ goto fail;
+
+ return bridge;
+
+fail:
+ if (bridge)
+ msm_dsi_manager_bridge_destroy(bridge);
+
+ return ERR_PTR(ret);
+}
+
+void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge)
+{
+}
+
+int msm_dsi_manager_phy_enable(int id,
+ const unsigned long bit_rate, const unsigned long esc_rate,
+ u32 *clk_pre, u32 *clk_post)
+{
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct msm_dsi_phy *phy = msm_dsi->phy;
+ int ret;
+
+ ret = msm_dsi_phy_enable(phy, IS_DUAL_PANEL(), bit_rate, esc_rate);
+ if (ret)
+ return ret;
+
+ msm_dsi->phy_enabled = true;
+ msm_dsi_phy_get_clk_pre_post(phy, clk_pre, clk_post);
+
+ return 0;
+}
+
+void msm_dsi_manager_phy_disable(int id)
+{
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct msm_dsi *mdsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER);
+ struct msm_dsi *sdsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE);
+ struct msm_dsi_phy *phy = msm_dsi->phy;
+
+ /* disable DSI phy
+ * In dual-dsi configuration, the phy should be disabled for the
+ * first controller only when the second controller is disabled.
+ */
+ msm_dsi->phy_enabled = false;
+ if (IS_DUAL_PANEL() && mdsi && sdsi) {
+ if (!mdsi->phy_enabled && !sdsi->phy_enabled) {
+ msm_dsi_phy_disable(sdsi->phy);
+ msm_dsi_phy_disable(mdsi->phy);
+ }
+ } else {
+ msm_dsi_phy_disable(phy);
+ }
+}
+
+int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg)
+{
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct msm_dsi *msm_dsi0 = dsi_mgr_get_dsi(DSI_0);
+ struct mipi_dsi_host *host = msm_dsi->host;
+ bool is_read = (msg->rx_buf && msg->rx_len);
+ bool need_sync = (IS_SYNC_NEEDED() && !is_read);
+ int ret;
+
+ if (!msg->tx_buf || !msg->tx_len)
+ return 0;
+
+ /* In dual master case, panel requires the same commands sent to
+ * both DSI links. Host issues the command trigger to both links
+ * when DSI_1 calls the cmd transfer function, no matter it happens
+ * before or after DSI_0 cmd transfer.
+ */
+ if (need_sync && (id == DSI_0))
+ return is_read ? msg->rx_len : msg->tx_len;
+
+ if (need_sync && msm_dsi0) {
+ ret = msm_dsi_host_xfer_prepare(msm_dsi0->host, msg);
+ if (ret) {
+ pr_err("%s: failed to prepare non-trigger host, %d\n",
+ __func__, ret);
+ return ret;
+ }
+ }
+ ret = msm_dsi_host_xfer_prepare(host, msg);
+ if (ret) {
+ pr_err("%s: failed to prepare host, %d\n", __func__, ret);
+ goto restore_host0;
+ }
+
+ ret = is_read ? msm_dsi_host_cmd_rx(host, msg) :
+ msm_dsi_host_cmd_tx(host, msg);
+
+ msm_dsi_host_xfer_restore(host, msg);
+
+restore_host0:
+ if (need_sync && msm_dsi0)
+ msm_dsi_host_xfer_restore(msm_dsi0->host, msg);
+
+ return ret;
+}
+
+bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 iova, u32 len)
+{
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct msm_dsi *msm_dsi0 = dsi_mgr_get_dsi(DSI_0);
+ struct mipi_dsi_host *host = msm_dsi->host;
+
+ if (IS_SYNC_NEEDED() && (id == DSI_0))
+ return false;
+
+ if (IS_SYNC_NEEDED() && msm_dsi0)
+ msm_dsi_host_cmd_xfer_commit(msm_dsi0->host, iova, len);
+
+ msm_dsi_host_cmd_xfer_commit(host, iova, len);
+
+ return true;
+}
+
+int msm_dsi_manager_register(struct msm_dsi *msm_dsi)
+{
+ struct msm_dsi_manager *msm_dsim = &msm_dsim_glb;
+ int id = msm_dsi->id;
+ struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id);
+ int ret;
+
+ if (id > DSI_MAX) {
+ pr_err("%s: invalid id %d\n", __func__, id);
+ return -EINVAL;
+ }
+
+ if (msm_dsim->dsi[id]) {
+ pr_err("%s: dsi%d already registered\n", __func__, id);
+ return -EBUSY;
+ }
+
+ msm_dsim->dsi[id] = msm_dsi;
+
+ ret = dsi_mgr_parse_dual_panel(msm_dsi->pdev->dev.of_node, id);
+ if (ret) {
+ pr_err("%s: failed to parse dual panel info\n", __func__);
+ return ret;
+ }
+
+ if (!IS_DUAL_PANEL()) {
+ ret = msm_dsi_host_register(msm_dsi->host, true);
+ } else if (!other_dsi) {
+ return 0;
+ } else {
+ struct msm_dsi *mdsi = IS_MASTER_PANEL(id) ?
+ msm_dsi : other_dsi;
+ struct msm_dsi *sdsi = IS_MASTER_PANEL(id) ?
+ other_dsi : msm_dsi;
+ /* Register slave host first, so that slave DSI device
+ * has a chance to probe, and do not block the master
+ * DSI device's probe.
+ * Also, do not check defer for the slave host,
+ * because only master DSI device adds the panel to global
+ * panel list. The panel's device is the master DSI device.
+ */
+ ret = msm_dsi_host_register(sdsi->host, false);
+ if (ret)
+ return ret;
+ ret = msm_dsi_host_register(mdsi->host, true);
+ }
+
+ return ret;
+}
+
+void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi)
+{
+ struct msm_dsi_manager *msm_dsim = &msm_dsim_glb;
+
+ if (msm_dsi->host)
+ msm_dsi_host_unregister(msm_dsi->host);
+ msm_dsim->dsi[msm_dsi->id] = NULL;
+}
+
diff --git a/drivers/gpu/drm/msm/dsi/dsi_phy.c b/drivers/gpu/drm/msm/dsi/dsi_phy.c
new file mode 100644
index 000000000000..f0cea8927388
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi_phy.c
@@ -0,0 +1,352 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "dsi.h"
+#include "dsi.xml.h"
+
+#define dsi_phy_read(offset) msm_readl((offset))
+#define dsi_phy_write(offset, data) msm_writel((data), (offset))
+
+struct dsi_dphy_timing {
+ u32 clk_pre;
+ u32 clk_post;
+ u32 clk_zero;
+ u32 clk_trail;
+ u32 clk_prepare;
+ u32 hs_exit;
+ u32 hs_zero;
+ u32 hs_prepare;
+ u32 hs_trail;
+ u32 hs_rqst;
+ u32 ta_go;
+ u32 ta_sure;
+ u32 ta_get;
+};
+
+struct msm_dsi_phy {
+ void __iomem *base;
+ void __iomem *reg_base;
+ int id;
+ struct dsi_dphy_timing timing;
+ int (*enable)(struct msm_dsi_phy *phy, bool is_dual_panel,
+ const unsigned long bit_rate, const unsigned long esc_rate);
+ int (*disable)(struct msm_dsi_phy *phy);
+};
+
+#define S_DIV_ROUND_UP(n, d) \
+ (((n) >= 0) ? (((n) + (d) - 1) / (d)) : (((n) - (d) + 1) / (d)))
+
+static inline s32 linear_inter(s32 tmax, s32 tmin, s32 percent,
+ s32 min_result, bool even)
+{
+ s32 v;
+ v = (tmax - tmin) * percent;
+ v = S_DIV_ROUND_UP(v, 100) + tmin;
+ if (even && (v & 0x1))
+ return max_t(s32, min_result, v - 1);
+ else
+ return max_t(s32, min_result, v);
+}
+
+static void dsi_dphy_timing_calc_clk_zero(struct dsi_dphy_timing *timing,
+ s32 ui, s32 coeff, s32 pcnt)
+{
+ s32 tmax, tmin, clk_z;
+ s32 temp;
+
+ /* reset */
+ temp = 300 * coeff - ((timing->clk_prepare >> 1) + 1) * 2 * ui;
+ tmin = S_DIV_ROUND_UP(temp, ui) - 2;
+ if (tmin > 255) {
+ tmax = 511;
+ clk_z = linear_inter(2 * tmin, tmin, pcnt, 0, true);
+ } else {
+ tmax = 255;
+ clk_z = linear_inter(tmax, tmin, pcnt, 0, true);
+ }
+
+ /* adjust */
+ temp = (timing->hs_rqst + timing->clk_prepare + clk_z) & 0x7;
+ timing->clk_zero = clk_z + 8 - temp;
+}
+
+static int dsi_dphy_timing_calc(struct dsi_dphy_timing *timing,
+ const unsigned long bit_rate, const unsigned long esc_rate)
+{
+ s32 ui, lpx;
+ s32 tmax, tmin;
+ s32 pcnt0 = 10;
+ s32 pcnt1 = (bit_rate > 1200000000) ? 15 : 10;
+ s32 pcnt2 = 10;
+ s32 pcnt3 = (bit_rate > 180000000) ? 10 : 40;
+ s32 coeff = 1000; /* Precision, should avoid overflow */
+ s32 temp;
+
+ if (!bit_rate || !esc_rate)
+ return -EINVAL;
+
+ ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
+ lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000);
+
+ tmax = S_DIV_ROUND_UP(95 * coeff, ui) - 2;
+ tmin = S_DIV_ROUND_UP(38 * coeff, ui) - 2;
+ timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, true);
+
+ temp = lpx / ui;
+ if (temp & 0x1)
+ timing->hs_rqst = temp;
+ else
+ timing->hs_rqst = max_t(s32, 0, temp - 2);
+
+ /* Calculate clk_zero after clk_prepare and hs_rqst */
+ dsi_dphy_timing_calc_clk_zero(timing, ui, coeff, pcnt2);
+
+ temp = 105 * coeff + 12 * ui - 20 * coeff;
+ tmax = S_DIV_ROUND_UP(temp, ui) - 2;
+ tmin = S_DIV_ROUND_UP(60 * coeff, ui) - 2;
+ timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, true);
+
+ temp = 85 * coeff + 6 * ui;
+ tmax = S_DIV_ROUND_UP(temp, ui) - 2;
+ temp = 40 * coeff + 4 * ui;
+ tmin = S_DIV_ROUND_UP(temp, ui) - 2;
+ timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, true);
+
+ tmax = 255;
+ temp = ((timing->hs_prepare >> 1) + 1) * 2 * ui + 2 * ui;
+ temp = 145 * coeff + 10 * ui - temp;
+ tmin = S_DIV_ROUND_UP(temp, ui) - 2;
+ timing->hs_zero = linear_inter(tmax, tmin, pcnt2, 24, true);
+
+ temp = 105 * coeff + 12 * ui - 20 * coeff;
+ tmax = S_DIV_ROUND_UP(temp, ui) - 2;
+ temp = 60 * coeff + 4 * ui;
+ tmin = DIV_ROUND_UP(temp, ui) - 2;
+ timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, true);
+
+ tmax = 255;
+ tmin = S_DIV_ROUND_UP(100 * coeff, ui) - 2;
+ timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, true);
+
+ tmax = 63;
+ temp = ((timing->hs_exit >> 1) + 1) * 2 * ui;
+ temp = 60 * coeff + 52 * ui - 24 * ui - temp;
+ tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1;
+ timing->clk_post = linear_inter(tmax, tmin, pcnt2, 0, false);
+
+ tmax = 63;
+ temp = ((timing->clk_prepare >> 1) + 1) * 2 * ui;
+ temp += ((timing->clk_zero >> 1) + 1) * 2 * ui;
+ temp += 8 * ui + lpx;
+ tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1;
+ if (tmin > tmax) {
+ temp = linear_inter(2 * tmax, tmin, pcnt2, 0, false) >> 1;
+ timing->clk_pre = temp >> 1;
+ temp = (2 * tmax - tmin) * pcnt2;
+ } else {
+ timing->clk_pre = linear_inter(tmax, tmin, pcnt2, 0, false);
+ }
+
+ timing->ta_go = 3;
+ timing->ta_sure = 0;
+ timing->ta_get = 4;
+
+ DBG("PHY timings: %d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
+ timing->clk_pre, timing->clk_post, timing->clk_zero,
+ timing->clk_trail, timing->clk_prepare, timing->hs_exit,
+ timing->hs_zero, timing->hs_prepare, timing->hs_trail,
+ timing->hs_rqst);
+
+ return 0;
+}
+
+static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
+{
+ void __iomem *base = phy->reg_base;
+
+ if (!enable) {
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 0);
+ return;
+ }
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 1);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5, 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_3, 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_2, 0x3);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1, 0x9);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x7);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4, 0x20);
+}
+
+static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, bool is_dual_panel,
+ const unsigned long bit_rate, const unsigned long esc_rate)
+{
+ struct dsi_dphy_timing *timing = &phy->timing;
+ int i;
+ void __iomem *base = phy->base;
+
+ DBG("");
+
+ if (dsi_dphy_timing_calc(timing, bit_rate, esc_rate)) {
+ pr_err("%s: D-PHY timing calculation failed\n", __func__);
+ return -EINVAL;
+ }
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_0, 0xff);
+
+ dsi_28nm_phy_regulator_ctrl(phy, true);
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x00);
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_0,
+ DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_1,
+ DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_2,
+ DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
+ if (timing->clk_zero & BIT(8))
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_3,
+ DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_4,
+ DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_5,
+ DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_6,
+ DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_7,
+ DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_8,
+ DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_9,
+ DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
+ DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_10,
+ DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_11,
+ DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_1, 0x00);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f);
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_1, 0x6);
+
+ for (i = 0; i < 4; i++) {
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_0(i), 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_1(i), 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_2(i), 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_3(i), 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_DATAPATH(i), 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_DEBUG_SEL(i), 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_0(i), 0x1);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_1(i), 0x97);
+ }
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(0), 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(1), 0x5);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(2), 0xa);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(3), 0xf);
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_CFG_1, 0xc0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR0, 0x1);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR1, 0xbb);
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f);
+
+ if (is_dual_panel && (phy->id != DSI_CLOCK_MASTER))
+ dsi_phy_write(base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL, 0x00);
+ else
+ dsi_phy_write(base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL, 0x01);
+
+ return 0;
+}
+
+static int dsi_28nm_phy_disable(struct msm_dsi_phy *phy)
+{
+ dsi_phy_write(phy->base + REG_DSI_28nm_PHY_CTRL_0, 0);
+ dsi_28nm_phy_regulator_ctrl(phy, false);
+
+ /*
+ * Wait for the registers writes to complete in order to
+ * ensure that the phy is completely disabled
+ */
+ wmb();
+
+ return 0;
+}
+
+#define dsi_phy_func_init(name) \
+ do { \
+ phy->enable = dsi_##name##_phy_enable; \
+ phy->disable = dsi_##name##_phy_disable; \
+ } while (0)
+
+struct msm_dsi_phy *msm_dsi_phy_init(struct platform_device *pdev,
+ enum msm_dsi_phy_type type, int id)
+{
+ struct msm_dsi_phy *phy;
+
+ phy = devm_kzalloc(&pdev->dev, sizeof(*phy), GFP_KERNEL);
+ if (!phy)
+ return NULL;
+
+ phy->base = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
+ if (IS_ERR_OR_NULL(phy->base)) {
+ pr_err("%s: failed to map phy base\n", __func__);
+ return NULL;
+ }
+ phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator", "DSI_PHY_REG");
+ if (IS_ERR_OR_NULL(phy->reg_base)) {
+ pr_err("%s: failed to map phy regulator base\n", __func__);
+ return NULL;
+ }
+
+ switch (type) {
+ case MSM_DSI_PHY_28NM:
+ dsi_phy_func_init(28nm);
+ break;
+ default:
+ pr_err("%s: unsupported type, %d\n", __func__, type);
+ return NULL;
+ }
+
+ phy->id = id;
+
+ return phy;
+}
+
+int msm_dsi_phy_enable(struct msm_dsi_phy *phy, bool is_dual_panel,
+ const unsigned long bit_rate, const unsigned long esc_rate)
+{
+ if (!phy || !phy->enable)
+ return -EINVAL;
+ return phy->enable(phy, is_dual_panel, bit_rate, esc_rate);
+}
+
+int msm_dsi_phy_disable(struct msm_dsi_phy *phy)
+{
+ if (!phy || !phy->disable)
+ return -EINVAL;
+ return phy->disable(phy);
+}
+
+void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy,
+ u32 *clk_pre, u32 *clk_post)
+{
+ if (!phy)
+ return;
+ if (clk_pre)
+ *clk_pre = phy->timing.clk_pre;
+ if (clk_post)
+ *clk_post = phy->timing.clk_post;
+}
+
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
index eeed006eed13..6997ec636c6d 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
@@ -53,6 +53,23 @@ struct pll_rate {
/* NOTE: keep sorted highest freq to lowest: */
static const struct pll_rate freqtbl[] = {
+ { 154000000, {
+ { 0x08, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
+ { 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
+ { 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
+ { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
+ { 0x03, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
+ { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 },
+ { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
+ { 0x0d, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
+ { 0x4d, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
+ { 0x5e, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
+ { 0x42, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
+ { 0, 0 } }
+ },
/* 1080p60/1080p50 case */
{ 148500000, {
{ 0x02, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
@@ -112,6 +129,23 @@ static const struct pll_rate freqtbl[] = {
{ 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
{ 0, 0 } }
},
+ { 74176000, {
+ { 0x18, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
+ { 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
+ { 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
+ { 0xe5, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
+ { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
+ { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 },
+ { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
+ { 0x0c, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
+ { 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
+ { 0x7d, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
+ { 0xbc, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
+ { 0, 0 } }
+ },
{ 65000000, {
{ 0x18, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
{ 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
index c276624290af..b9a4ded6e400 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
@@ -8,9 +8,9 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /local/mnt2/workspace2/sviau/envytools/rnndb/mdp/mdp5.xml ( 27229 bytes, from 2015-02-10 17:00:41)
+- /local/mnt2/workspace2/sviau/envytools/rnndb/mdp/mdp5.xml ( 29312 bytes, from 2015-03-23 21:18:48)
- /local/mnt2/workspace2/sviau/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2014-06-02 18:31:15)
-- /local/mnt2/workspace2/sviau/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2015-01-23 16:20:19)
+- /local/mnt2/workspace2/sviau/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2015-03-23 20:38:49)
Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
@@ -37,11 +37,14 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
-enum mdp5_intf {
+enum mdp5_intf_type {
+ INTF_DISABLED = 0,
INTF_DSI = 1,
INTF_HDMI = 3,
INTF_LCDC = 5,
INTF_eDP = 9,
+ INTF_VIRTUAL = 100,
+ INTF_WB = 101,
};
enum mdp5_intfnum {
@@ -67,11 +70,11 @@ enum mdp5_pipe {
enum mdp5_ctl_mode {
MODE_NONE = 0,
- MODE_ROT0 = 1,
- MODE_ROT1 = 2,
- MODE_WB0 = 3,
- MODE_WB1 = 4,
- MODE_WFD = 5,
+ MODE_WB_0_BLOCK = 1,
+ MODE_WB_1_BLOCK = 2,
+ MODE_WB_0_LINE = 3,
+ MODE_WB_1_LINE = 4,
+ MODE_WB_2_LINE = 5,
};
enum mdp5_pack_3d {
@@ -94,33 +97,6 @@ enum mdp5_pipe_bwc {
BWC_Q_MED = 2,
};
-enum mdp5_client_id {
- CID_UNUSED = 0,
- CID_VIG0_Y = 1,
- CID_VIG0_CR = 2,
- CID_VIG0_CB = 3,
- CID_VIG1_Y = 4,
- CID_VIG1_CR = 5,
- CID_VIG1_CB = 6,
- CID_VIG2_Y = 7,
- CID_VIG2_CR = 8,
- CID_VIG2_CB = 9,
- CID_DMA0_Y = 10,
- CID_DMA0_CR = 11,
- CID_DMA0_CB = 12,
- CID_DMA1_Y = 13,
- CID_DMA1_CR = 14,
- CID_DMA1_CB = 15,
- CID_RGB0 = 16,
- CID_RGB1 = 17,
- CID_RGB2 = 18,
- CID_VIG3_Y = 19,
- CID_VIG3_CR = 20,
- CID_VIG3_CB = 21,
- CID_RGB3 = 22,
- CID_MAX = 23,
-};
-
enum mdp5_cursor_format {
CURSOR_FMT_ARGB8888 = 0,
CURSOR_FMT_ARGB1555 = 2,
@@ -144,30 +120,25 @@ enum mdp5_data_format {
DATA_FORMAT_YUV = 1,
};
-#define MDP5_IRQ_INTF0_WB_ROT_COMP 0x00000001
-#define MDP5_IRQ_INTF1_WB_ROT_COMP 0x00000002
-#define MDP5_IRQ_INTF2_WB_ROT_COMP 0x00000004
-#define MDP5_IRQ_INTF3_WB_ROT_COMP 0x00000008
-#define MDP5_IRQ_INTF0_WB_WFD 0x00000010
-#define MDP5_IRQ_INTF1_WB_WFD 0x00000020
-#define MDP5_IRQ_INTF2_WB_WFD 0x00000040
-#define MDP5_IRQ_INTF3_WB_WFD 0x00000080
-#define MDP5_IRQ_INTF0_PING_PONG_COMP 0x00000100
-#define MDP5_IRQ_INTF1_PING_PONG_COMP 0x00000200
-#define MDP5_IRQ_INTF2_PING_PONG_COMP 0x00000400
-#define MDP5_IRQ_INTF3_PING_PONG_COMP 0x00000800
-#define MDP5_IRQ_INTF0_PING_PONG_RD_PTR 0x00001000
-#define MDP5_IRQ_INTF1_PING_PONG_RD_PTR 0x00002000
-#define MDP5_IRQ_INTF2_PING_PONG_RD_PTR 0x00004000
-#define MDP5_IRQ_INTF3_PING_PONG_RD_PTR 0x00008000
-#define MDP5_IRQ_INTF0_PING_PONG_WR_PTR 0x00010000
-#define MDP5_IRQ_INTF1_PING_PONG_WR_PTR 0x00020000
-#define MDP5_IRQ_INTF2_PING_PONG_WR_PTR 0x00040000
-#define MDP5_IRQ_INTF3_PING_PONG_WR_PTR 0x00080000
-#define MDP5_IRQ_INTF0_PING_PONG_AUTO_REF 0x00100000
-#define MDP5_IRQ_INTF1_PING_PONG_AUTO_REF 0x00200000
-#define MDP5_IRQ_INTF2_PING_PONG_AUTO_REF 0x00400000
-#define MDP5_IRQ_INTF3_PING_PONG_AUTO_REF 0x00800000
+#define MDP5_IRQ_WB_0_DONE 0x00000001
+#define MDP5_IRQ_WB_1_DONE 0x00000002
+#define MDP5_IRQ_WB_2_DONE 0x00000010
+#define MDP5_IRQ_PING_PONG_0_DONE 0x00000100
+#define MDP5_IRQ_PING_PONG_1_DONE 0x00000200
+#define MDP5_IRQ_PING_PONG_2_DONE 0x00000400
+#define MDP5_IRQ_PING_PONG_3_DONE 0x00000800
+#define MDP5_IRQ_PING_PONG_0_RD_PTR 0x00001000
+#define MDP5_IRQ_PING_PONG_1_RD_PTR 0x00002000
+#define MDP5_IRQ_PING_PONG_2_RD_PTR 0x00004000
+#define MDP5_IRQ_PING_PONG_3_RD_PTR 0x00008000
+#define MDP5_IRQ_PING_PONG_0_WR_PTR 0x00010000
+#define MDP5_IRQ_PING_PONG_1_WR_PTR 0x00020000
+#define MDP5_IRQ_PING_PONG_2_WR_PTR 0x00040000
+#define MDP5_IRQ_PING_PONG_3_WR_PTR 0x00080000
+#define MDP5_IRQ_PING_PONG_0_AUTO_REF 0x00100000
+#define MDP5_IRQ_PING_PONG_1_AUTO_REF 0x00200000
+#define MDP5_IRQ_PING_PONG_2_AUTO_REF 0x00400000
+#define MDP5_IRQ_PING_PONG_3_AUTO_REF 0x00800000
#define MDP5_IRQ_INTF0_UNDER_RUN 0x01000000
#define MDP5_IRQ_INTF0_VSYNC 0x02000000
#define MDP5_IRQ_INTF1_UNDER_RUN 0x04000000
@@ -176,136 +147,186 @@ enum mdp5_data_format {
#define MDP5_IRQ_INTF2_VSYNC 0x20000000
#define MDP5_IRQ_INTF3_UNDER_RUN 0x40000000
#define MDP5_IRQ_INTF3_VSYNC 0x80000000
-#define REG_MDP5_HW_VERSION 0x00000000
+#define REG_MDSS_HW_VERSION 0x00000000
+#define MDSS_HW_VERSION_STEP__MASK 0x0000ffff
+#define MDSS_HW_VERSION_STEP__SHIFT 0
+static inline uint32_t MDSS_HW_VERSION_STEP(uint32_t val)
+{
+ return ((val) << MDSS_HW_VERSION_STEP__SHIFT) & MDSS_HW_VERSION_STEP__MASK;
+}
+#define MDSS_HW_VERSION_MINOR__MASK 0x0fff0000
+#define MDSS_HW_VERSION_MINOR__SHIFT 16
+static inline uint32_t MDSS_HW_VERSION_MINOR(uint32_t val)
+{
+ return ((val) << MDSS_HW_VERSION_MINOR__SHIFT) & MDSS_HW_VERSION_MINOR__MASK;
+}
+#define MDSS_HW_VERSION_MAJOR__MASK 0xf0000000
+#define MDSS_HW_VERSION_MAJOR__SHIFT 28
+static inline uint32_t MDSS_HW_VERSION_MAJOR(uint32_t val)
+{
+ return ((val) << MDSS_HW_VERSION_MAJOR__SHIFT) & MDSS_HW_VERSION_MAJOR__MASK;
+}
+
+#define REG_MDSS_HW_INTR_STATUS 0x00000010
+#define MDSS_HW_INTR_STATUS_INTR_MDP 0x00000001
+#define MDSS_HW_INTR_STATUS_INTR_DSI0 0x00000010
+#define MDSS_HW_INTR_STATUS_INTR_DSI1 0x00000020
+#define MDSS_HW_INTR_STATUS_INTR_HDMI 0x00000100
+#define MDSS_HW_INTR_STATUS_INTR_EDP 0x00001000
-#define REG_MDP5_HW_INTR_STATUS 0x00000010
-#define MDP5_HW_INTR_STATUS_INTR_MDP 0x00000001
-#define MDP5_HW_INTR_STATUS_INTR_DSI0 0x00000010
-#define MDP5_HW_INTR_STATUS_INTR_DSI1 0x00000020
-#define MDP5_HW_INTR_STATUS_INTR_HDMI 0x00000100
-#define MDP5_HW_INTR_STATUS_INTR_EDP 0x00001000
+static inline uint32_t __offset_MDP(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return (mdp5_cfg->mdp.base[0]);
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_MDP(uint32_t i0) { return 0x00000000 + __offset_MDP(i0); }
-#define REG_MDP5_MDP_VERSION 0x00000100
-#define MDP5_MDP_VERSION_MINOR__MASK 0x00ff0000
-#define MDP5_MDP_VERSION_MINOR__SHIFT 16
-static inline uint32_t MDP5_MDP_VERSION_MINOR(uint32_t val)
+static inline uint32_t REG_MDP5_MDP_HW_VERSION(uint32_t i0) { return 0x00000000 + __offset_MDP(i0); }
+#define MDP5_MDP_HW_VERSION_STEP__MASK 0x0000ffff
+#define MDP5_MDP_HW_VERSION_STEP__SHIFT 0
+static inline uint32_t MDP5_MDP_HW_VERSION_STEP(uint32_t val)
{
- return ((val) << MDP5_MDP_VERSION_MINOR__SHIFT) & MDP5_MDP_VERSION_MINOR__MASK;
+ return ((val) << MDP5_MDP_HW_VERSION_STEP__SHIFT) & MDP5_MDP_HW_VERSION_STEP__MASK;
}
-#define MDP5_MDP_VERSION_MAJOR__MASK 0xf0000000
-#define MDP5_MDP_VERSION_MAJOR__SHIFT 28
-static inline uint32_t MDP5_MDP_VERSION_MAJOR(uint32_t val)
+#define MDP5_MDP_HW_VERSION_MINOR__MASK 0x0fff0000
+#define MDP5_MDP_HW_VERSION_MINOR__SHIFT 16
+static inline uint32_t MDP5_MDP_HW_VERSION_MINOR(uint32_t val)
{
- return ((val) << MDP5_MDP_VERSION_MAJOR__SHIFT) & MDP5_MDP_VERSION_MAJOR__MASK;
+ return ((val) << MDP5_MDP_HW_VERSION_MINOR__SHIFT) & MDP5_MDP_HW_VERSION_MINOR__MASK;
+}
+#define MDP5_MDP_HW_VERSION_MAJOR__MASK 0xf0000000
+#define MDP5_MDP_HW_VERSION_MAJOR__SHIFT 28
+static inline uint32_t MDP5_MDP_HW_VERSION_MAJOR(uint32_t val)
+{
+ return ((val) << MDP5_MDP_HW_VERSION_MAJOR__SHIFT) & MDP5_MDP_HW_VERSION_MAJOR__MASK;
}
-#define REG_MDP5_DISP_INTF_SEL 0x00000104
-#define MDP5_DISP_INTF_SEL_INTF0__MASK 0x000000ff
-#define MDP5_DISP_INTF_SEL_INTF0__SHIFT 0
-static inline uint32_t MDP5_DISP_INTF_SEL_INTF0(enum mdp5_intf val)
+static inline uint32_t REG_MDP5_MDP_DISP_INTF_SEL(uint32_t i0) { return 0x00000004 + __offset_MDP(i0); }
+#define MDP5_MDP_DISP_INTF_SEL_INTF0__MASK 0x000000ff
+#define MDP5_MDP_DISP_INTF_SEL_INTF0__SHIFT 0
+static inline uint32_t MDP5_MDP_DISP_INTF_SEL_INTF0(enum mdp5_intf_type val)
{
- return ((val) << MDP5_DISP_INTF_SEL_INTF0__SHIFT) & MDP5_DISP_INTF_SEL_INTF0__MASK;
+ return ((val) << MDP5_MDP_DISP_INTF_SEL_INTF0__SHIFT) & MDP5_MDP_DISP_INTF_SEL_INTF0__MASK;
}
-#define MDP5_DISP_INTF_SEL_INTF1__MASK 0x0000ff00
-#define MDP5_DISP_INTF_SEL_INTF1__SHIFT 8
-static inline uint32_t MDP5_DISP_INTF_SEL_INTF1(enum mdp5_intf val)
+#define MDP5_MDP_DISP_INTF_SEL_INTF1__MASK 0x0000ff00
+#define MDP5_MDP_DISP_INTF_SEL_INTF1__SHIFT 8
+static inline uint32_t MDP5_MDP_DISP_INTF_SEL_INTF1(enum mdp5_intf_type val)
{
- return ((val) << MDP5_DISP_INTF_SEL_INTF1__SHIFT) & MDP5_DISP_INTF_SEL_INTF1__MASK;
+ return ((val) << MDP5_MDP_DISP_INTF_SEL_INTF1__SHIFT) & MDP5_MDP_DISP_INTF_SEL_INTF1__MASK;
}
-#define MDP5_DISP_INTF_SEL_INTF2__MASK 0x00ff0000
-#define MDP5_DISP_INTF_SEL_INTF2__SHIFT 16
-static inline uint32_t MDP5_DISP_INTF_SEL_INTF2(enum mdp5_intf val)
+#define MDP5_MDP_DISP_INTF_SEL_INTF2__MASK 0x00ff0000
+#define MDP5_MDP_DISP_INTF_SEL_INTF2__SHIFT 16
+static inline uint32_t MDP5_MDP_DISP_INTF_SEL_INTF2(enum mdp5_intf_type val)
{
- return ((val) << MDP5_DISP_INTF_SEL_INTF2__SHIFT) & MDP5_DISP_INTF_SEL_INTF2__MASK;
+ return ((val) << MDP5_MDP_DISP_INTF_SEL_INTF2__SHIFT) & MDP5_MDP_DISP_INTF_SEL_INTF2__MASK;
}
-#define MDP5_DISP_INTF_SEL_INTF3__MASK 0xff000000
-#define MDP5_DISP_INTF_SEL_INTF3__SHIFT 24
-static inline uint32_t MDP5_DISP_INTF_SEL_INTF3(enum mdp5_intf val)
+#define MDP5_MDP_DISP_INTF_SEL_INTF3__MASK 0xff000000
+#define MDP5_MDP_DISP_INTF_SEL_INTF3__SHIFT 24
+static inline uint32_t MDP5_MDP_DISP_INTF_SEL_INTF3(enum mdp5_intf_type val)
{
- return ((val) << MDP5_DISP_INTF_SEL_INTF3__SHIFT) & MDP5_DISP_INTF_SEL_INTF3__MASK;
+ return ((val) << MDP5_MDP_DISP_INTF_SEL_INTF3__SHIFT) & MDP5_MDP_DISP_INTF_SEL_INTF3__MASK;
}
-#define REG_MDP5_INTR_EN 0x00000110
+static inline uint32_t REG_MDP5_MDP_INTR_EN(uint32_t i0) { return 0x00000010 + __offset_MDP(i0); }
-#define REG_MDP5_INTR_STATUS 0x00000114
+static inline uint32_t REG_MDP5_MDP_INTR_STATUS(uint32_t i0) { return 0x00000014 + __offset_MDP(i0); }
-#define REG_MDP5_INTR_CLEAR 0x00000118
+static inline uint32_t REG_MDP5_MDP_INTR_CLEAR(uint32_t i0) { return 0x00000018 + __offset_MDP(i0); }
-#define REG_MDP5_HIST_INTR_EN 0x0000011c
+static inline uint32_t REG_MDP5_MDP_HIST_INTR_EN(uint32_t i0) { return 0x0000001c + __offset_MDP(i0); }
-#define REG_MDP5_HIST_INTR_STATUS 0x00000120
+static inline uint32_t REG_MDP5_MDP_HIST_INTR_STATUS(uint32_t i0) { return 0x00000020 + __offset_MDP(i0); }
-#define REG_MDP5_HIST_INTR_CLEAR 0x00000124
+static inline uint32_t REG_MDP5_MDP_HIST_INTR_CLEAR(uint32_t i0) { return 0x00000024 + __offset_MDP(i0); }
-static inline uint32_t REG_MDP5_SMP_ALLOC_W(uint32_t i0) { return 0x00000180 + 0x4*i0; }
+static inline uint32_t REG_MDP5_MDP_SPARE_0(uint32_t i0) { return 0x00000028 + __offset_MDP(i0); }
+#define MDP5_MDP_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN 0x00000001
-static inline uint32_t REG_MDP5_SMP_ALLOC_W_REG(uint32_t i0) { return 0x00000180 + 0x4*i0; }
-#define MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK 0x000000ff
-#define MDP5_SMP_ALLOC_W_REG_CLIENT0__SHIFT 0
-static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT0(enum mdp5_client_id val)
+static inline uint32_t REG_MDP5_MDP_SMP_ALLOC_W(uint32_t i0, uint32_t i1) { return 0x00000080 + __offset_MDP(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP5_MDP_SMP_ALLOC_W_REG(uint32_t i0, uint32_t i1) { return 0x00000080 + __offset_MDP(i0) + 0x4*i1; }
+#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__MASK 0x000000ff
+#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__SHIFT 0
+static inline uint32_t MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0(uint32_t val)
{
- return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT0__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK;
+ return ((val) << MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__SHIFT) & MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__MASK;
}
-#define MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK 0x0000ff00
-#define MDP5_SMP_ALLOC_W_REG_CLIENT1__SHIFT 8
-static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT1(enum mdp5_client_id val)
+#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__MASK 0x0000ff00
+#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__SHIFT 8
+static inline uint32_t MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1(uint32_t val)
{
- return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT1__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK;
+ return ((val) << MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__SHIFT) & MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__MASK;
}
-#define MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK 0x00ff0000
-#define MDP5_SMP_ALLOC_W_REG_CLIENT2__SHIFT 16
-static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT2(enum mdp5_client_id val)
+#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__MASK 0x00ff0000
+#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__SHIFT 16
+static inline uint32_t MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2(uint32_t val)
{
- return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT2__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK;
+ return ((val) << MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__SHIFT) & MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__MASK;
}
-static inline uint32_t REG_MDP5_SMP_ALLOC_R(uint32_t i0) { return 0x00000230 + 0x4*i0; }
+static inline uint32_t REG_MDP5_MDP_SMP_ALLOC_R(uint32_t i0, uint32_t i1) { return 0x00000130 + __offset_MDP(i0) + 0x4*i1; }
-static inline uint32_t REG_MDP5_SMP_ALLOC_R_REG(uint32_t i0) { return 0x00000230 + 0x4*i0; }
-#define MDP5_SMP_ALLOC_R_REG_CLIENT0__MASK 0x000000ff
-#define MDP5_SMP_ALLOC_R_REG_CLIENT0__SHIFT 0
-static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT0(enum mdp5_client_id val)
+static inline uint32_t REG_MDP5_MDP_SMP_ALLOC_R_REG(uint32_t i0, uint32_t i1) { return 0x00000130 + __offset_MDP(i0) + 0x4*i1; }
+#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0__MASK 0x000000ff
+#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0__SHIFT 0
+static inline uint32_t MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0(uint32_t val)
{
- return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT0__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT0__MASK;
+ return ((val) << MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0__SHIFT) & MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0__MASK;
}
-#define MDP5_SMP_ALLOC_R_REG_CLIENT1__MASK 0x0000ff00
-#define MDP5_SMP_ALLOC_R_REG_CLIENT1__SHIFT 8
-static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT1(enum mdp5_client_id val)
+#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1__MASK 0x0000ff00
+#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1__SHIFT 8
+static inline uint32_t MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1(uint32_t val)
{
- return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT1__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT1__MASK;
+ return ((val) << MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1__SHIFT) & MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1__MASK;
}
-#define MDP5_SMP_ALLOC_R_REG_CLIENT2__MASK 0x00ff0000
-#define MDP5_SMP_ALLOC_R_REG_CLIENT2__SHIFT 16
-static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT2(enum mdp5_client_id val)
+#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2__MASK 0x00ff0000
+#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2__SHIFT 16
+static inline uint32_t MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2(uint32_t val)
{
- return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT2__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT2__MASK;
+ return ((val) << MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2__SHIFT) & MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2__MASK;
}
static inline uint32_t __offset_IGC(enum mdp5_igc_type idx)
{
switch (idx) {
- case IGC_VIG: return 0x00000300;
- case IGC_RGB: return 0x00000310;
- case IGC_DMA: return 0x00000320;
- case IGC_DSPP: return 0x00000400;
+ case IGC_VIG: return 0x00000200;
+ case IGC_RGB: return 0x00000210;
+ case IGC_DMA: return 0x00000220;
+ case IGC_DSPP: return 0x00000300;
default: return INVALID_IDX(idx);
}
}
-static inline uint32_t REG_MDP5_IGC(enum mdp5_igc_type i0) { return 0x00000000 + __offset_IGC(i0); }
+static inline uint32_t REG_MDP5_MDP_IGC(uint32_t i0, enum mdp5_igc_type i1) { return 0x00000000 + __offset_MDP(i0) + __offset_IGC(i1); }
-static inline uint32_t REG_MDP5_IGC_LUT(enum mdp5_igc_type i0, uint32_t i1) { return 0x00000000 + __offset_IGC(i0) + 0x4*i1; }
+static inline uint32_t REG_MDP5_MDP_IGC_LUT(uint32_t i0, enum mdp5_igc_type i1, uint32_t i2) { return 0x00000000 + __offset_MDP(i0) + __offset_IGC(i1) + 0x4*i2; }
-static inline uint32_t REG_MDP5_IGC_LUT_REG(enum mdp5_igc_type i0, uint32_t i1) { return 0x00000000 + __offset_IGC(i0) + 0x4*i1; }
-#define MDP5_IGC_LUT_REG_VAL__MASK 0x00000fff
-#define MDP5_IGC_LUT_REG_VAL__SHIFT 0
-static inline uint32_t MDP5_IGC_LUT_REG_VAL(uint32_t val)
+static inline uint32_t REG_MDP5_MDP_IGC_LUT_REG(uint32_t i0, enum mdp5_igc_type i1, uint32_t i2) { return 0x00000000 + __offset_MDP(i0) + __offset_IGC(i1) + 0x4*i2; }
+#define MDP5_MDP_IGC_LUT_REG_VAL__MASK 0x00000fff
+#define MDP5_MDP_IGC_LUT_REG_VAL__SHIFT 0
+static inline uint32_t MDP5_MDP_IGC_LUT_REG_VAL(uint32_t val)
{
- return ((val) << MDP5_IGC_LUT_REG_VAL__SHIFT) & MDP5_IGC_LUT_REG_VAL__MASK;
+ return ((val) << MDP5_MDP_IGC_LUT_REG_VAL__SHIFT) & MDP5_MDP_IGC_LUT_REG_VAL__MASK;
}
-#define MDP5_IGC_LUT_REG_INDEX_UPDATE 0x02000000
-#define MDP5_IGC_LUT_REG_DISABLE_PIPE_0 0x10000000
-#define MDP5_IGC_LUT_REG_DISABLE_PIPE_1 0x20000000
-#define MDP5_IGC_LUT_REG_DISABLE_PIPE_2 0x40000000
+#define MDP5_MDP_IGC_LUT_REG_INDEX_UPDATE 0x02000000
+#define MDP5_MDP_IGC_LUT_REG_DISABLE_PIPE_0 0x10000000
+#define MDP5_MDP_IGC_LUT_REG_DISABLE_PIPE_1 0x20000000
+#define MDP5_MDP_IGC_LUT_REG_DISABLE_PIPE_2 0x40000000
+
+#define REG_MDP5_SPLIT_DPL_EN 0x000003f4
+
+#define REG_MDP5_SPLIT_DPL_UPPER 0x000003f8
+#define MDP5_SPLIT_DPL_UPPER_SMART_PANEL 0x00000002
+#define MDP5_SPLIT_DPL_UPPER_SMART_PANEL_FREE_RUN 0x00000004
+#define MDP5_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX 0x00000010
+#define MDP5_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX 0x00000100
+
+#define REG_MDP5_SPLIT_DPL_LOWER 0x000004f0
+#define MDP5_SPLIT_DPL_LOWER_SMART_PANEL 0x00000002
+#define MDP5_SPLIT_DPL_LOWER_SMART_PANEL_FREE_RUN 0x00000004
+#define MDP5_SPLIT_DPL_LOWER_INTF1_TG_SYNC 0x00000010
+#define MDP5_SPLIT_DPL_LOWER_INTF2_TG_SYNC 0x00000100
static inline uint32_t __offset_CTL(uint32_t idx)
{
@@ -437,11 +458,19 @@ static inline uint32_t REG_MDP5_CTL_FLUSH(uint32_t i0) { return 0x00000018 + __o
#define MDP5_CTL_FLUSH_DSPP0 0x00002000
#define MDP5_CTL_FLUSH_DSPP1 0x00004000
#define MDP5_CTL_FLUSH_DSPP2 0x00008000
+#define MDP5_CTL_FLUSH_WB 0x00010000
#define MDP5_CTL_FLUSH_CTL 0x00020000
#define MDP5_CTL_FLUSH_VIG3 0x00040000
#define MDP5_CTL_FLUSH_RGB3 0x00080000
#define MDP5_CTL_FLUSH_LM5 0x00100000
#define MDP5_CTL_FLUSH_DSPP3 0x00200000
+#define MDP5_CTL_FLUSH_CURSOR_0 0x00400000
+#define MDP5_CTL_FLUSH_CURSOR_1 0x00800000
+#define MDP5_CTL_FLUSH_CHROMADOWN_0 0x04000000
+#define MDP5_CTL_FLUSH_TIMING_3 0x10000000
+#define MDP5_CTL_FLUSH_TIMING_2 0x20000000
+#define MDP5_CTL_FLUSH_TIMING_1 0x40000000
+#define MDP5_CTL_FLUSH_TIMING_0 0x80000000
static inline uint32_t REG_MDP5_CTL_START(uint32_t i0) { return 0x0000001c + __offset_CTL(i0); }
@@ -1117,6 +1146,94 @@ static inline uint32_t REG_MDP5_DSPP_GAMUT_BASE(uint32_t i0) { return 0x000002dc
static inline uint32_t REG_MDP5_DSPP_GC_BASE(uint32_t i0) { return 0x000002b0 + __offset_DSPP(i0); }
+static inline uint32_t __offset_PP(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return (mdp5_cfg->pp.base[0]);
+ case 1: return (mdp5_cfg->pp.base[1]);
+ case 2: return (mdp5_cfg->pp.base[2]);
+ case 3: return (mdp5_cfg->pp.base[3]);
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_PP(uint32_t i0) { return 0x00000000 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_TEAR_CHECK_EN(uint32_t i0) { return 0x00000000 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_SYNC_CONFIG_VSYNC(uint32_t i0) { return 0x00000004 + __offset_PP(i0); }
+#define MDP5_PP_SYNC_CONFIG_VSYNC_COUNT__MASK 0x0007ffff
+#define MDP5_PP_SYNC_CONFIG_VSYNC_COUNT__SHIFT 0
+static inline uint32_t MDP5_PP_SYNC_CONFIG_VSYNC_COUNT(uint32_t val)
+{
+ return ((val) << MDP5_PP_SYNC_CONFIG_VSYNC_COUNT__SHIFT) & MDP5_PP_SYNC_CONFIG_VSYNC_COUNT__MASK;
+}
+#define MDP5_PP_SYNC_CONFIG_VSYNC_COUNTER_EN 0x00080000
+#define MDP5_PP_SYNC_CONFIG_VSYNC_IN_EN 0x00100000
+
+static inline uint32_t REG_MDP5_PP_SYNC_CONFIG_HEIGHT(uint32_t i0) { return 0x00000008 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_SYNC_WRCOUNT(uint32_t i0) { return 0x0000000c + __offset_PP(i0); }
+#define MDP5_PP_SYNC_WRCOUNT_LINE_COUNT__MASK 0x0000ffff
+#define MDP5_PP_SYNC_WRCOUNT_LINE_COUNT__SHIFT 0
+static inline uint32_t MDP5_PP_SYNC_WRCOUNT_LINE_COUNT(uint32_t val)
+{
+ return ((val) << MDP5_PP_SYNC_WRCOUNT_LINE_COUNT__SHIFT) & MDP5_PP_SYNC_WRCOUNT_LINE_COUNT__MASK;
+}
+#define MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT__MASK 0xffff0000
+#define MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT__SHIFT 16
+static inline uint32_t MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT(uint32_t val)
+{
+ return ((val) << MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT__SHIFT) & MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT__MASK;
+}
+
+static inline uint32_t REG_MDP5_PP_VSYNC_INIT_VAL(uint32_t i0) { return 0x00000010 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_INT_COUNT_VAL(uint32_t i0) { return 0x00000014 + __offset_PP(i0); }
+#define MDP5_PP_INT_COUNT_VAL_LINE_COUNT__MASK 0x0000ffff
+#define MDP5_PP_INT_COUNT_VAL_LINE_COUNT__SHIFT 0
+static inline uint32_t MDP5_PP_INT_COUNT_VAL_LINE_COUNT(uint32_t val)
+{
+ return ((val) << MDP5_PP_INT_COUNT_VAL_LINE_COUNT__SHIFT) & MDP5_PP_INT_COUNT_VAL_LINE_COUNT__MASK;
+}
+#define MDP5_PP_INT_COUNT_VAL_FRAME_COUNT__MASK 0xffff0000
+#define MDP5_PP_INT_COUNT_VAL_FRAME_COUNT__SHIFT 16
+static inline uint32_t MDP5_PP_INT_COUNT_VAL_FRAME_COUNT(uint32_t val)
+{
+ return ((val) << MDP5_PP_INT_COUNT_VAL_FRAME_COUNT__SHIFT) & MDP5_PP_INT_COUNT_VAL_FRAME_COUNT__MASK;
+}
+
+static inline uint32_t REG_MDP5_PP_SYNC_THRESH(uint32_t i0) { return 0x00000018 + __offset_PP(i0); }
+#define MDP5_PP_SYNC_THRESH_START__MASK 0x0000ffff
+#define MDP5_PP_SYNC_THRESH_START__SHIFT 0
+static inline uint32_t MDP5_PP_SYNC_THRESH_START(uint32_t val)
+{
+ return ((val) << MDP5_PP_SYNC_THRESH_START__SHIFT) & MDP5_PP_SYNC_THRESH_START__MASK;
+}
+#define MDP5_PP_SYNC_THRESH_CONTINUE__MASK 0xffff0000
+#define MDP5_PP_SYNC_THRESH_CONTINUE__SHIFT 16
+static inline uint32_t MDP5_PP_SYNC_THRESH_CONTINUE(uint32_t val)
+{
+ return ((val) << MDP5_PP_SYNC_THRESH_CONTINUE__SHIFT) & MDP5_PP_SYNC_THRESH_CONTINUE__MASK;
+}
+
+static inline uint32_t REG_MDP5_PP_START_POS(uint32_t i0) { return 0x0000001c + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_RD_PTR_IRQ(uint32_t i0) { return 0x00000020 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_WR_PTR_IRQ(uint32_t i0) { return 0x00000024 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_OUT_LINE_COUNT(uint32_t i0) { return 0x00000028 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_PP_LINE_COUNT(uint32_t i0) { return 0x0000002c + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_AUTOREFRESH_CONFIG(uint32_t i0) { return 0x00000030 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_FBC_MODE(uint32_t i0) { return 0x00000034 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_FBC_BUDGET_CTL(uint32_t i0) { return 0x00000038 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_FBC_LOSSY_MODE(uint32_t i0) { return 0x0000003c + __offset_PP(i0); }
+
static inline uint32_t __offset_INTF(uint32_t idx)
{
switch (idx) {
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
index b0a44310cf2a..e001e6b2296a 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -24,13 +24,23 @@ const struct mdp5_cfg_hw *mdp5_cfg = NULL;
const struct mdp5_cfg_hw msm8x74_config = {
.name = "msm8x74",
+ .mdp = {
+ .count = 1,
+ .base = { 0x00100 },
+ },
.smp = {
.mmb_count = 22,
.mmb_size = 4096,
+ .clients = {
+ [SSPP_VIG0] = 1, [SSPP_VIG1] = 4, [SSPP_VIG2] = 7,
+ [SSPP_DMA0] = 10, [SSPP_DMA1] = 13,
+ [SSPP_RGB0] = 16, [SSPP_RGB1] = 17, [SSPP_RGB2] = 18,
+ },
},
.ctl = {
.count = 5,
.base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
+ .flush_hw_mask = 0x0003ffff,
},
.pipe_vig = {
.count = 3,
@@ -57,27 +67,49 @@ const struct mdp5_cfg_hw msm8x74_config = {
.count = 2,
.base = { 0x13100, 0x13300 }, /* NOTE: no ad in v1.0 */
},
+ .pp = {
+ .count = 3,
+ .base = { 0x12d00, 0x12e00, 0x12f00 },
+ },
.intf = {
.count = 4,
.base = { 0x12500, 0x12700, 0x12900, 0x12b00 },
},
+ .intfs = {
+ [0] = INTF_eDP,
+ [1] = INTF_DSI,
+ [2] = INTF_DSI,
+ [3] = INTF_HDMI,
+ },
.max_clk = 200000000,
};
const struct mdp5_cfg_hw apq8084_config = {
.name = "apq8084",
+ .mdp = {
+ .count = 1,
+ .base = { 0x00100 },
+ },
.smp = {
.mmb_count = 44,
.mmb_size = 8192,
+ .clients = {
+ [SSPP_VIG0] = 1, [SSPP_VIG1] = 4,
+ [SSPP_VIG2] = 7, [SSPP_VIG3] = 19,
+ [SSPP_DMA0] = 10, [SSPP_DMA1] = 13,
+ [SSPP_RGB0] = 16, [SSPP_RGB1] = 17,
+ [SSPP_RGB2] = 18, [SSPP_RGB3] = 22,
+ },
.reserved_state[0] = GENMASK(7, 0), /* first 8 MMBs */
- .reserved[CID_RGB0] = 2,
- .reserved[CID_RGB1] = 2,
- .reserved[CID_RGB2] = 2,
- .reserved[CID_RGB3] = 2,
+ .reserved = {
+ /* Two SMP blocks are statically tied to RGB pipes: */
+ [16] = 2, [17] = 2, [18] = 2, [22] = 2,
+ },
},
.ctl = {
.count = 5,
.base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
+ .flush_hw_mask = 0x003fffff,
},
.pipe_vig = {
.count = 4,
@@ -105,10 +137,69 @@ const struct mdp5_cfg_hw apq8084_config = {
.count = 3,
.base = { 0x13500, 0x13700, 0x13900 },
},
+ .pp = {
+ .count = 4,
+ .base = { 0x12f00, 0x13000, 0x13100, 0x13200 },
+ },
.intf = {
.count = 5,
.base = { 0x12500, 0x12700, 0x12900, 0x12b00, 0x12d00 },
},
+ .intfs = {
+ [0] = INTF_eDP,
+ [1] = INTF_DSI,
+ [2] = INTF_DSI,
+ [3] = INTF_HDMI,
+ },
+ .max_clk = 320000000,
+};
+
+const struct mdp5_cfg_hw msm8x16_config = {
+ .name = "msm8x16",
+ .mdp = {
+ .count = 1,
+ .base = { 0x01000 },
+ },
+ .smp = {
+ .mmb_count = 8,
+ .mmb_size = 8192,
+ .clients = {
+ [SSPP_VIG0] = 1, [SSPP_DMA0] = 4,
+ [SSPP_RGB0] = 7, [SSPP_RGB1] = 8,
+ },
+ },
+ .ctl = {
+ .count = 5,
+ .base = { 0x02000, 0x02200, 0x02400, 0x02600, 0x02800 },
+ .flush_hw_mask = 0x4003ffff,
+ },
+ .pipe_vig = {
+ .count = 1,
+ .base = { 0x05000 },
+ },
+ .pipe_rgb = {
+ .count = 2,
+ .base = { 0x15000, 0x17000 },
+ },
+ .pipe_dma = {
+ .count = 1,
+ .base = { 0x25000 },
+ },
+ .lm = {
+ .count = 2, /* LM0 and LM3 */
+ .base = { 0x45000, 0x48000 },
+ .nb_stages = 5,
+ },
+ .dspp = {
+ .count = 1,
+ .base = { 0x55000 },
+
+ },
+ .intf = {
+ .count = 1, /* INTF_1 */
+ .base = { 0x6B800 },
+ },
+ /* TODO enable .intfs[] with [1] = INTF_DSI, once DSI is implemented */
.max_clk = 320000000,
};
@@ -116,6 +207,7 @@ static const struct mdp5_cfg_handler cfg_handlers[] = {
{ .revision = 0, .config = { .hw = &msm8x74_config } },
{ .revision = 2, .config = { .hw = &msm8x74_config } },
{ .revision = 3, .config = { .hw = &apq8084_config } },
+ { .revision = 6, .config = { .hw = &msm8x16_config } },
};
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
index dba4d52cceeb..3a551b0892d8 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
@@ -44,26 +44,38 @@ struct mdp5_lm_block {
uint32_t nb_stages; /* number of stages per blender */
};
+struct mdp5_ctl_block {
+ MDP5_SUB_BLOCK_DEFINITION;
+ uint32_t flush_hw_mask; /* FLUSH register's hardware mask */
+};
+
struct mdp5_smp_block {
int mmb_count; /* number of SMP MMBs */
int mmb_size; /* MMB: size in bytes */
+ uint32_t clients[MAX_CLIENTS]; /* SMP port allocation /pipe */
mdp5_smp_state_t reserved_state;/* SMP MMBs statically allocated */
int reserved[MAX_CLIENTS]; /* # of MMBs allocated per client */
};
+#define MDP5_INTF_NUM_MAX 5
+
struct mdp5_cfg_hw {
char *name;
+ struct mdp5_sub_block mdp;
struct mdp5_smp_block smp;
- struct mdp5_sub_block ctl;
+ struct mdp5_ctl_block ctl;
struct mdp5_sub_block pipe_vig;
struct mdp5_sub_block pipe_rgb;
struct mdp5_sub_block pipe_dma;
struct mdp5_lm_block lm;
struct mdp5_sub_block dspp;
struct mdp5_sub_block ad;
+ struct mdp5_sub_block pp;
struct mdp5_sub_block intf;
+ u32 intfs[MDP5_INTF_NUM_MAX]; /* array of enum mdp5_intf_type */
+
uint32_t max_clk;
};
@@ -84,6 +96,10 @@ const struct mdp5_cfg_hw *mdp5_cfg_get_hw_config(struct mdp5_cfg_handler *cfg_hn
struct mdp5_cfg *mdp5_cfg_get_config(struct mdp5_cfg_handler *cfg_hnd);
int mdp5_cfg_get_hw_rev(struct mdp5_cfg_handler *cfg_hnd);
+#define mdp5_cfg_intf_is_virtual(intf_type) ({ \
+ typeof(intf_type) __val = (intf_type); \
+ (__val) >= INTF_VIRTUAL ? true : false; })
+
struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
uint32_t major, uint32_t minor);
void mdp5_cfg_destroy(struct mdp5_cfg_handler *cfg_hnd);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
new file mode 100644
index 000000000000..e4e89567f51d
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
@@ -0,0 +1,343 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mdp5_kms.h"
+
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+struct mdp5_cmd_encoder {
+ struct drm_encoder base;
+ struct mdp5_interface intf;
+ bool enabled;
+ uint32_t bsc;
+};
+#define to_mdp5_cmd_encoder(x) container_of(x, struct mdp5_cmd_encoder, base)
+
+static struct mdp5_kms *get_kms(struct drm_encoder *encoder)
+{
+ struct msm_drm_private *priv = encoder->dev->dev_private;
+ return to_mdp5_kms(to_mdp_kms(priv->kms));
+}
+
+#ifdef CONFIG_MSM_BUS_SCALING
+#include <mach/board.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#define MDP_BUS_VECTOR_ENTRY(ab_val, ib_val) \
+ { \
+ .src = MSM_BUS_MASTER_MDP_PORT0, \
+ .dst = MSM_BUS_SLAVE_EBI_CH0, \
+ .ab = (ab_val), \
+ .ib = (ib_val), \
+ }
+
+static struct msm_bus_vectors mdp_bus_vectors[] = {
+ MDP_BUS_VECTOR_ENTRY(0, 0),
+ MDP_BUS_VECTOR_ENTRY(2000000000, 2000000000),
+};
+static struct msm_bus_paths mdp_bus_usecases[] = { {
+ .num_paths = 1,
+ .vectors = &mdp_bus_vectors[0],
+}, {
+ .num_paths = 1,
+ .vectors = &mdp_bus_vectors[1],
+} };
+static struct msm_bus_scale_pdata mdp_bus_scale_table = {
+ .usecase = mdp_bus_usecases,
+ .num_usecases = ARRAY_SIZE(mdp_bus_usecases),
+ .name = "mdss_mdp",
+};
+
+static void bs_init(struct mdp5_cmd_encoder *mdp5_cmd_enc)
+{
+ mdp5_cmd_enc->bsc = msm_bus_scale_register_client(
+ &mdp_bus_scale_table);
+ DBG("bus scale client: %08x", mdp5_cmd_enc->bsc);
+}
+
+static void bs_fini(struct mdp5_cmd_encoder *mdp5_cmd_enc)
+{
+ if (mdp5_cmd_enc->bsc) {
+ msm_bus_scale_unregister_client(mdp5_cmd_enc->bsc);
+ mdp5_cmd_enc->bsc = 0;
+ }
+}
+
+static void bs_set(struct mdp5_cmd_encoder *mdp5_cmd_enc, int idx)
+{
+ if (mdp5_cmd_enc->bsc) {
+ DBG("set bus scaling: %d", idx);
+ /* HACK: scaling down, and then immediately back up
+ * seems to leave things broken (underflow).. so
+ * never disable:
+ */
+ idx = 1;
+ msm_bus_scale_client_update_request(mdp5_cmd_enc->bsc, idx);
+ }
+}
+#else
+static void bs_init(struct mdp5_cmd_encoder *mdp5_cmd_enc) {}
+static void bs_fini(struct mdp5_cmd_encoder *mdp5_cmd_enc) {}
+static void bs_set(struct mdp5_cmd_encoder *mdp5_cmd_enc, int idx) {}
+#endif
+
+#define VSYNC_CLK_RATE 19200000
+static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(encoder);
+ struct device *dev = encoder->dev->dev;
+ u32 total_lines_x100, vclks_line, cfg;
+ long vsync_clk_speed;
+ int pp_id = GET_PING_PONG_ID(mdp5_crtc_get_lm(encoder->crtc));
+
+ if (IS_ERR_OR_NULL(mdp5_kms->vsync_clk)) {
+ dev_err(dev, "vsync_clk is not initialized\n");
+ return -EINVAL;
+ }
+
+ total_lines_x100 = mode->vtotal * mode->vrefresh;
+ if (!total_lines_x100) {
+ dev_err(dev, "%s: vtotal(%d) or vrefresh(%d) is 0\n",
+ __func__, mode->vtotal, mode->vrefresh);
+ return -EINVAL;
+ }
+
+ vsync_clk_speed = clk_round_rate(mdp5_kms->vsync_clk, VSYNC_CLK_RATE);
+ if (vsync_clk_speed <= 0) {
+ dev_err(dev, "vsync_clk round rate failed %ld\n",
+ vsync_clk_speed);
+ return -EINVAL;
+ }
+ vclks_line = vsync_clk_speed * 100 / total_lines_x100;
+
+ cfg = MDP5_PP_SYNC_CONFIG_VSYNC_COUNTER_EN
+ | MDP5_PP_SYNC_CONFIG_VSYNC_IN_EN;
+ cfg |= MDP5_PP_SYNC_CONFIG_VSYNC_COUNT(vclks_line);
+
+ mdp5_write(mdp5_kms, REG_MDP5_PP_SYNC_CONFIG_VSYNC(pp_id), cfg);
+ mdp5_write(mdp5_kms,
+ REG_MDP5_PP_SYNC_CONFIG_HEIGHT(pp_id), 0xfff0);
+ mdp5_write(mdp5_kms,
+ REG_MDP5_PP_VSYNC_INIT_VAL(pp_id), mode->vdisplay);
+ mdp5_write(mdp5_kms, REG_MDP5_PP_RD_PTR_IRQ(pp_id), mode->vdisplay + 1);
+ mdp5_write(mdp5_kms, REG_MDP5_PP_START_POS(pp_id), mode->vdisplay);
+ mdp5_write(mdp5_kms, REG_MDP5_PP_SYNC_THRESH(pp_id),
+ MDP5_PP_SYNC_THRESH_START(4) |
+ MDP5_PP_SYNC_THRESH_CONTINUE(4));
+
+ return 0;
+}
+
+static int pingpong_tearcheck_enable(struct drm_encoder *encoder)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(encoder);
+ int pp_id = GET_PING_PONG_ID(mdp5_crtc_get_lm(encoder->crtc));
+ int ret;
+
+ ret = clk_set_rate(mdp5_kms->vsync_clk,
+ clk_round_rate(mdp5_kms->vsync_clk, VSYNC_CLK_RATE));
+ if (ret) {
+ dev_err(encoder->dev->dev,
+ "vsync_clk clk_set_rate failed, %d\n", ret);
+ return ret;
+ }
+ ret = clk_prepare_enable(mdp5_kms->vsync_clk);
+ if (ret) {
+ dev_err(encoder->dev->dev,
+ "vsync_clk clk_prepare_enable failed, %d\n", ret);
+ return ret;
+ }
+
+ mdp5_write(mdp5_kms, REG_MDP5_PP_TEAR_CHECK_EN(pp_id), 1);
+
+ return 0;
+}
+
+static void pingpong_tearcheck_disable(struct drm_encoder *encoder)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(encoder);
+ int pp_id = GET_PING_PONG_ID(mdp5_crtc_get_lm(encoder->crtc));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PP_TEAR_CHECK_EN(pp_id), 0);
+ clk_disable_unprepare(mdp5_kms->vsync_clk);
+}
+
+static void mdp5_cmd_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder);
+ bs_fini(mdp5_cmd_enc);
+ drm_encoder_cleanup(encoder);
+ kfree(mdp5_cmd_enc);
+}
+
+static const struct drm_encoder_funcs mdp5_cmd_encoder_funcs = {
+ .destroy = mdp5_cmd_encoder_destroy,
+};
+
+static bool mdp5_cmd_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder);
+
+ mode = adjusted_mode;
+
+ DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
+ mode->base.id, mode->name,
+ mode->vrefresh, mode->clock,
+ mode->hdisplay, mode->hsync_start,
+ mode->hsync_end, mode->htotal,
+ mode->vdisplay, mode->vsync_start,
+ mode->vsync_end, mode->vtotal,
+ mode->type, mode->flags);
+ pingpong_tearcheck_setup(encoder, mode);
+ mdp5_crtc_set_intf(encoder->crtc, &mdp5_cmd_enc->intf);
+}
+
+static void mdp5_cmd_encoder_disable(struct drm_encoder *encoder)
+{
+ struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder);
+ struct mdp5_kms *mdp5_kms = get_kms(encoder);
+ struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc);
+ struct mdp5_interface *intf = &mdp5_cmd_enc->intf;
+ int lm = mdp5_crtc_get_lm(encoder->crtc);
+
+ if (WARN_ON(!mdp5_cmd_enc->enabled))
+ return;
+
+ /* Wait for the last frame done */
+ mdp_irq_wait(&mdp5_kms->base, lm2ppdone(lm));
+ pingpong_tearcheck_disable(encoder);
+
+ mdp5_ctl_set_encoder_state(ctl, false);
+ mdp5_ctl_commit(ctl, mdp_ctl_flush_mask_encoder(intf));
+
+ bs_set(mdp5_cmd_enc, 0);
+
+ mdp5_cmd_enc->enabled = false;
+}
+
+static void mdp5_cmd_encoder_enable(struct drm_encoder *encoder)
+{
+ struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder);
+ struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc);
+ struct mdp5_interface *intf = &mdp5_cmd_enc->intf;
+
+ if (WARN_ON(mdp5_cmd_enc->enabled))
+ return;
+
+ bs_set(mdp5_cmd_enc, 1);
+ if (pingpong_tearcheck_enable(encoder))
+ return;
+
+ mdp5_ctl_commit(ctl, mdp_ctl_flush_mask_encoder(intf));
+
+ mdp5_ctl_set_encoder_state(ctl, true);
+
+ mdp5_cmd_enc->enabled = true;
+}
+
+static const struct drm_encoder_helper_funcs mdp5_cmd_encoder_helper_funcs = {
+ .mode_fixup = mdp5_cmd_encoder_mode_fixup,
+ .mode_set = mdp5_cmd_encoder_mode_set,
+ .disable = mdp5_cmd_encoder_disable,
+ .enable = mdp5_cmd_encoder_enable,
+};
+
+int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
+ struct drm_encoder *slave_encoder)
+{
+ struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder);
+ struct mdp5_kms *mdp5_kms;
+ int intf_num;
+ u32 data = 0;
+
+ if (!encoder || !slave_encoder)
+ return -EINVAL;
+
+ mdp5_kms = get_kms(encoder);
+ intf_num = mdp5_cmd_enc->intf.num;
+
+ /* Switch slave encoder's trigger MUX, to use the master's
+ * start signal for the slave encoder
+ */
+ if (intf_num == 1)
+ data |= MDP5_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX;
+ else if (intf_num == 2)
+ data |= MDP5_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX;
+ else
+ return -EINVAL;
+
+ /* Smart Panel, Sync mode */
+ data |= MDP5_SPLIT_DPL_UPPER_SMART_PANEL;
+
+ /* Make sure clocks are on when connectors calling this function. */
+ mdp5_enable(mdp5_kms);
+ mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_UPPER, data);
+
+ mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_LOWER,
+ MDP5_SPLIT_DPL_LOWER_SMART_PANEL);
+ mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_EN, 1);
+ mdp5_disable(mdp5_kms);
+
+ return 0;
+}
+
+/* initialize command mode encoder */
+struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev,
+ struct mdp5_interface *intf)
+{
+ struct drm_encoder *encoder = NULL;
+ struct mdp5_cmd_encoder *mdp5_cmd_enc;
+ int ret;
+
+ if (WARN_ON((intf->type != INTF_DSI) &&
+ (intf->mode != MDP5_INTF_DSI_MODE_COMMAND))) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ mdp5_cmd_enc = kzalloc(sizeof(*mdp5_cmd_enc), GFP_KERNEL);
+ if (!mdp5_cmd_enc) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ memcpy(&mdp5_cmd_enc->intf, intf, sizeof(mdp5_cmd_enc->intf));
+ encoder = &mdp5_cmd_enc->base;
+
+ drm_encoder_init(dev, encoder, &mdp5_cmd_encoder_funcs,
+ DRM_MODE_ENCODER_DSI);
+
+ drm_encoder_helper_add(encoder, &mdp5_cmd_encoder_helper_funcs);
+
+ bs_init(mdp5_cmd_enc);
+
+ return encoder;
+
+fail:
+ if (encoder)
+ mdp5_cmd_encoder_destroy(encoder);
+
+ return ERR_PTR(ret);
+}
+
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 2f2863cf8b45..c1530772187d 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -82,8 +82,6 @@ static void request_pending(struct drm_crtc *crtc, uint32_t pending)
mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
}
-#define mdp5_lm_get_flush(lm) mdp_ctl_flush_mask_lm(lm)
-
static void crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
@@ -110,8 +108,8 @@ static void crtc_flush_all(struct drm_crtc *crtc)
drm_atomic_crtc_for_each_plane(plane, crtc) {
flush_mask |= mdp5_plane_get_flush(plane);
}
- flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl);
- flush_mask |= mdp5_lm_get_flush(mdp5_crtc->lm);
+
+ flush_mask |= mdp_ctl_flush_mask_lm(mdp5_crtc->lm);
crtc_flush(crtc, flush_mask);
}
@@ -298,8 +296,6 @@ static void mdp5_crtc_enable(struct drm_crtc *crtc)
mdp5_enable(mdp5_kms);
mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
- crtc_flush_all(crtc);
-
mdp5_crtc->enabled = true;
}
@@ -444,13 +440,14 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct mdp5_kms *mdp5_kms = get_kms(crtc);
- struct drm_gem_object *cursor_bo, *old_bo;
+ struct drm_gem_object *cursor_bo, *old_bo = NULL;
uint32_t blendcfg, cursor_addr, stride;
int ret, bpp, lm;
unsigned int depth;
enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
uint32_t roi_w, roi_h;
+ bool cursor_enable = true;
unsigned long flags;
if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
@@ -463,7 +460,8 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
if (!handle) {
DBG("Cursor off");
- return mdp5_ctl_set_cursor(mdp5_crtc->ctl, false);
+ cursor_enable = false;
+ goto set_cursor;
}
cursor_bo = drm_gem_object_lookup(dev, file, handle);
@@ -504,11 +502,14 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
- ret = mdp5_ctl_set_cursor(mdp5_crtc->ctl, true);
- if (ret)
+set_cursor:
+ ret = mdp5_ctl_set_cursor(mdp5_crtc->ctl, 0, cursor_enable);
+ if (ret) {
+ dev_err(dev->dev, "failed to %sable cursor: %d\n",
+ cursor_enable ? "en" : "dis", ret);
goto end;
+ }
- flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl);
crtc_flush(crtc, flush_mask);
end:
@@ -613,64 +614,39 @@ void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
}
/* set interface for routing crtc->encoder: */
-void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
- enum mdp5_intf intf_id)
+void mdp5_crtc_set_intf(struct drm_crtc *crtc, struct mdp5_interface *intf)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_kms *mdp5_kms = get_kms(crtc);
- uint32_t flush_mask = 0;
- uint32_t intf_sel;
- unsigned long flags;
+ int lm = mdp5_crtc_get_lm(crtc);
/* now that we know what irq's we want: */
- mdp5_crtc->err.irqmask = intf2err(intf);
- mdp5_crtc->vblank.irqmask = intf2vblank(intf);
- mdp_irq_update(&mdp5_kms->base);
-
- spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
- intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
-
- switch (intf) {
- case 0:
- intf_sel &= ~MDP5_DISP_INTF_SEL_INTF0__MASK;
- intf_sel |= MDP5_DISP_INTF_SEL_INTF0(intf_id);
- break;
- case 1:
- intf_sel &= ~MDP5_DISP_INTF_SEL_INTF1__MASK;
- intf_sel |= MDP5_DISP_INTF_SEL_INTF1(intf_id);
- break;
- case 2:
- intf_sel &= ~MDP5_DISP_INTF_SEL_INTF2__MASK;
- intf_sel |= MDP5_DISP_INTF_SEL_INTF2(intf_id);
- break;
- case 3:
- intf_sel &= ~MDP5_DISP_INTF_SEL_INTF3__MASK;
- intf_sel |= MDP5_DISP_INTF_SEL_INTF3(intf_id);
- break;
- default:
- BUG();
- break;
- }
+ mdp5_crtc->err.irqmask = intf2err(intf->num);
- mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel);
- spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
+ /* Register command mode Pingpong done as vblank for now,
+ * so that atomic commit should wait for it to finish.
+ * Ideally, in the future, we should take rd_ptr done as vblank,
+ * and let atomic commit wait for pingpong done for commond mode.
+ */
+ if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
+ mdp5_crtc->vblank.irqmask = lm2ppdone(lm);
+ else
+ mdp5_crtc->vblank.irqmask = intf2vblank(lm, intf);
+ mdp_irq_update(&mdp5_kms->base);
- DBG("%s: intf_sel=%08x", mdp5_crtc->name, intf_sel);
mdp5_ctl_set_intf(mdp5_crtc->ctl, intf);
- flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl);
- flush_mask |= mdp5_lm_get_flush(mdp5_crtc->lm);
-
- crtc_flush(crtc, flush_mask);
}
int mdp5_crtc_get_lm(struct drm_crtc *crtc)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ return WARN_ON(!crtc) ? -EINVAL : mdp5_crtc->lm;
+}
- if (WARN_ON(!crtc))
- return -EINVAL;
-
- return mdp5_crtc->lm;
+struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ return WARN_ON(!crtc) ? NULL : mdp5_crtc->ctl;
}
/* initialize crtc */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
index 151129032d16..5488b687c8d1 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -33,23 +33,31 @@
* requested by the client (in mdp5_crtc_mode_set()).
*/
+struct op_mode {
+ struct mdp5_interface intf;
+
+ bool encoder_enabled;
+ uint32_t start_mask;
+};
+
struct mdp5_ctl {
struct mdp5_ctl_manager *ctlm;
u32 id;
+ int lm;
/* whether this CTL has been allocated or not: */
bool busy;
- /* memory output connection (@see mdp5_ctl_mode): */
- u32 mode;
+ /* Operation Mode Configuration for the Pipeline */
+ struct op_mode pipeline;
/* REG_MDP5_CTL_*(<id>) registers access info + lock: */
spinlock_t hw_lock;
u32 reg_offset;
- /* flush mask used to commit CTL registers */
- u32 flush_mask;
+ /* when do CTL registers need to be flushed? (mask of trigger bits) */
+ u32 pending_ctl_trigger;
bool cursor_on;
@@ -63,6 +71,9 @@ struct mdp5_ctl_manager {
u32 nlm;
u32 nctl;
+ /* to filter out non-present bits in the current hardware config */
+ u32 flush_hw_mask;
+
/* pool of CTLs + lock to protect resource allocation (ctls[i].busy) */
spinlock_t pool_lock;
struct mdp5_ctl ctls[MAX_CTL];
@@ -94,31 +105,172 @@ u32 ctl_read(struct mdp5_ctl *ctl, u32 reg)
return mdp5_read(mdp5_kms, reg);
}
+static void set_display_intf(struct mdp5_kms *mdp5_kms,
+ struct mdp5_interface *intf)
+{
+ unsigned long flags;
+ u32 intf_sel;
+
+ spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
+ intf_sel = mdp5_read(mdp5_kms, REG_MDP5_MDP_DISP_INTF_SEL(0));
+
+ switch (intf->num) {
+ case 0:
+ intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF0__MASK;
+ intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF0(intf->type);
+ break;
+ case 1:
+ intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF1__MASK;
+ intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF1(intf->type);
+ break;
+ case 2:
+ intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF2__MASK;
+ intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF2(intf->type);
+ break;
+ case 3:
+ intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF3__MASK;
+ intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF3(intf->type);
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ mdp5_write(mdp5_kms, REG_MDP5_MDP_DISP_INTF_SEL(0), intf_sel);
+ spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
+}
-int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, int intf)
+static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_interface *intf)
{
unsigned long flags;
- static const enum mdp5_intfnum intfnum[] = {
- INTF0, INTF1, INTF2, INTF3,
- };
+ u32 ctl_op = 0;
+
+ if (!mdp5_cfg_intf_is_virtual(intf->type))
+ ctl_op |= MDP5_CTL_OP_INTF_NUM(INTF0 + intf->num);
+
+ switch (intf->type) {
+ case INTF_DSI:
+ if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
+ ctl_op |= MDP5_CTL_OP_CMD_MODE;
+ break;
+
+ case INTF_WB:
+ if (intf->mode == MDP5_INTF_WB_MODE_LINE)
+ ctl_op |= MDP5_CTL_OP_MODE(MODE_WB_2_LINE);
+ break;
+
+ default:
+ break;
+ }
spin_lock_irqsave(&ctl->hw_lock, flags);
- ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id),
- MDP5_CTL_OP_MODE(ctl->mode) |
- MDP5_CTL_OP_INTF_NUM(intfnum[intf]));
+ ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), ctl_op);
spin_unlock_irqrestore(&ctl->hw_lock, flags);
+}
+
+int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, struct mdp5_interface *intf)
+{
+ struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
+ struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr);
+
+ memcpy(&ctl->pipeline.intf, intf, sizeof(*intf));
+
+ ctl->pipeline.start_mask = mdp_ctl_flush_mask_lm(ctl->lm) |
+ mdp_ctl_flush_mask_encoder(intf);
+
+ /* Virtual interfaces need not set a display intf (e.g.: Writeback) */
+ if (!mdp5_cfg_intf_is_virtual(intf->type))
+ set_display_intf(mdp5_kms, intf);
+
+ set_ctl_op(ctl, intf);
return 0;
}
-int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, bool enable)
+static bool start_signal_needed(struct mdp5_ctl *ctl)
+{
+ struct op_mode *pipeline = &ctl->pipeline;
+
+ if (!pipeline->encoder_enabled || pipeline->start_mask != 0)
+ return false;
+
+ switch (pipeline->intf.type) {
+ case INTF_WB:
+ return true;
+ case INTF_DSI:
+ return pipeline->intf.mode == MDP5_INTF_DSI_MODE_COMMAND;
+ default:
+ return false;
+ }
+}
+
+/*
+ * send_start_signal() - Overlay Processor Start Signal
+ *
+ * For a given control operation (display pipeline), a START signal needs to be
+ * executed in order to kick off operation and activate all layers.
+ * e.g.: DSI command mode, Writeback
+ */
+static void send_start_signal(struct mdp5_ctl *ctl)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctl->hw_lock, flags);
+ ctl_write(ctl, REG_MDP5_CTL_START(ctl->id), 1);
+ spin_unlock_irqrestore(&ctl->hw_lock, flags);
+}
+
+static void refill_start_mask(struct mdp5_ctl *ctl)
+{
+ struct op_mode *pipeline = &ctl->pipeline;
+ struct mdp5_interface *intf = &ctl->pipeline.intf;
+
+ pipeline->start_mask = mdp_ctl_flush_mask_lm(ctl->lm);
+
+ /*
+ * Writeback encoder needs to program & flush
+ * address registers for each page flip..
+ */
+ if (intf->type == INTF_WB)
+ pipeline->start_mask |= mdp_ctl_flush_mask_encoder(intf);
+}
+
+/**
+ * mdp5_ctl_set_encoder_state() - set the encoder state
+ *
+ * @enable: true, when encoder is ready for data streaming; false, otherwise.
+ *
+ * Note:
+ * This encoder state is needed to trigger START signal (data path kickoff).
+ */
+int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, bool enabled)
+{
+ if (WARN_ON(!ctl))
+ return -EINVAL;
+
+ ctl->pipeline.encoder_enabled = enabled;
+ DBG("intf_%d: %s", ctl->pipeline.intf.num, enabled ? "on" : "off");
+
+ if (start_signal_needed(ctl)) {
+ send_start_signal(ctl);
+ refill_start_mask(ctl);
+ }
+
+ return 0;
+}
+
+/*
+ * Note:
+ * CTL registers need to be flushed after calling this function
+ * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask)
+ */
+int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable)
{
struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
unsigned long flags;
u32 blend_cfg;
- int lm;
+ int lm = ctl->lm;
- lm = mdp5_crtc_get_lm(ctl->crtc);
if (unlikely(WARN_ON(lm < 0))) {
dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d",
ctl->id, lm);
@@ -138,12 +290,12 @@ int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, bool enable)
spin_unlock_irqrestore(&ctl->hw_lock, flags);
+ ctl->pending_ctl_trigger = mdp_ctl_flush_mask_cursor(cursor_id);
ctl->cursor_on = enable;
return 0;
}
-
int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg)
{
unsigned long flags;
@@ -157,37 +309,122 @@ int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg)
ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);
spin_unlock_irqrestore(&ctl->hw_lock, flags);
+ ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(lm);
+
return 0;
}
+u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf)
+{
+ if (intf->type == INTF_WB)
+ return MDP5_CTL_FLUSH_WB;
+
+ switch (intf->num) {
+ case 0: return MDP5_CTL_FLUSH_TIMING_0;
+ case 1: return MDP5_CTL_FLUSH_TIMING_1;
+ case 2: return MDP5_CTL_FLUSH_TIMING_2;
+ case 3: return MDP5_CTL_FLUSH_TIMING_3;
+ default: return 0;
+ }
+}
+
+u32 mdp_ctl_flush_mask_cursor(int cursor_id)
+{
+ switch (cursor_id) {
+ case 0: return MDP5_CTL_FLUSH_CURSOR_0;
+ case 1: return MDP5_CTL_FLUSH_CURSOR_1;
+ default: return 0;
+ }
+}
+
+u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe)
+{
+ switch (pipe) {
+ case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0;
+ case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1;
+ case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2;
+ case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0;
+ case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1;
+ case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2;
+ case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0;
+ case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
+ case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3;
+ case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3;
+ default: return 0;
+ }
+}
+
+u32 mdp_ctl_flush_mask_lm(int lm)
+{
+ switch (lm) {
+ case 0: return MDP5_CTL_FLUSH_LM0;
+ case 1: return MDP5_CTL_FLUSH_LM1;
+ case 2: return MDP5_CTL_FLUSH_LM2;
+ case 5: return MDP5_CTL_FLUSH_LM5;
+ default: return 0;
+ }
+}
+
+static u32 fix_sw_flush(struct mdp5_ctl *ctl, u32 flush_mask)
+{
+ struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
+ u32 sw_mask = 0;
+#define BIT_NEEDS_SW_FIX(bit) \
+ (!(ctl_mgr->flush_hw_mask & bit) && (flush_mask & bit))
+
+ /* for some targets, cursor bit is the same as LM bit */
+ if (BIT_NEEDS_SW_FIX(MDP5_CTL_FLUSH_CURSOR_0))
+ sw_mask |= mdp_ctl_flush_mask_lm(ctl->lm);
+
+ return sw_mask;
+}
+
+/**
+ * mdp5_ctl_commit() - Register Flush
+ *
+ * The flush register is used to indicate several registers are all
+ * programmed, and are safe to update to the back copy of the double
+ * buffered registers.
+ *
+ * Some registers FLUSH bits are shared when the hardware does not have
+ * dedicated bits for them; handling these is the job of fix_sw_flush().
+ *
+ * CTL registers need to be flushed in some circumstances; if that is the
+ * case, some trigger bits will be present in both flush mask and
+ * ctl->pending_ctl_trigger.
+ */
int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask)
{
struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
+ struct op_mode *pipeline = &ctl->pipeline;
unsigned long flags;
- if (flush_mask & MDP5_CTL_FLUSH_CURSOR_DUMMY) {
- int lm = mdp5_crtc_get_lm(ctl->crtc);
+ pipeline->start_mask &= ~flush_mask;
- if (unlikely(WARN_ON(lm < 0))) {
- dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d",
- ctl->id, lm);
- return -EINVAL;
- }
+ VERB("flush_mask=%x, start_mask=%x, trigger=%x", flush_mask,
+ pipeline->start_mask, ctl->pending_ctl_trigger);
- /* for current targets, cursor bit is the same as LM bit */
- flush_mask |= mdp_ctl_flush_mask_lm(lm);
+ if (ctl->pending_ctl_trigger & flush_mask) {
+ flush_mask |= MDP5_CTL_FLUSH_CTL;
+ ctl->pending_ctl_trigger = 0;
}
- spin_lock_irqsave(&ctl->hw_lock, flags);
- ctl_write(ctl, REG_MDP5_CTL_FLUSH(ctl->id), flush_mask);
- spin_unlock_irqrestore(&ctl->hw_lock, flags);
+ flush_mask |= fix_sw_flush(ctl, flush_mask);
- return 0;
-}
+ flush_mask &= ctl_mgr->flush_hw_mask;
-u32 mdp5_ctl_get_flush(struct mdp5_ctl *ctl)
-{
- return ctl->flush_mask;
+ if (flush_mask) {
+ spin_lock_irqsave(&ctl->hw_lock, flags);
+ ctl_write(ctl, REG_MDP5_CTL_FLUSH(ctl->id), flush_mask);
+ spin_unlock_irqrestore(&ctl->hw_lock, flags);
+ }
+
+ if (start_signal_needed(ctl)) {
+ send_start_signal(ctl);
+ refill_start_mask(ctl);
+ }
+
+ return 0;
}
void mdp5_ctl_release(struct mdp5_ctl *ctl)
@@ -208,6 +445,11 @@ void mdp5_ctl_release(struct mdp5_ctl *ctl)
DBG("CTL %d released", ctl->id);
}
+int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl)
+{
+ return WARN_ON(!ctl) ? -EINVAL : ctl->id;
+}
+
/*
* mdp5_ctl_request() - CTL dynamic allocation
*
@@ -235,8 +477,10 @@ struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
ctl = &ctl_mgr->ctls[c];
+ ctl->lm = mdp5_crtc_get_lm(crtc);
ctl->crtc = crtc;
ctl->busy = true;
+ ctl->pending_ctl_trigger = 0;
DBG("CTL %d allocated", ctl->id);
unlock:
@@ -267,7 +511,7 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg)
{
struct mdp5_ctl_manager *ctl_mgr;
- const struct mdp5_sub_block *ctl_cfg = &hw_cfg->ctl;
+ const struct mdp5_ctl_block *ctl_cfg = &hw_cfg->ctl;
unsigned long flags;
int c, ret;
@@ -289,6 +533,7 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
ctl_mgr->dev = dev;
ctl_mgr->nlm = hw_cfg->lm.count;
ctl_mgr->nctl = ctl_cfg->count;
+ ctl_mgr->flush_hw_mask = ctl_cfg->flush_hw_mask;
spin_lock_init(&ctl_mgr->pool_lock);
/* initialize each CTL of the pool: */
@@ -303,9 +548,7 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
}
ctl->ctlm = ctl_mgr;
ctl->id = c;
- ctl->mode = MODE_NONE;
ctl->reg_offset = ctl_cfg->base[c];
- ctl->flush_mask = MDP5_CTL_FLUSH_CTL;
ctl->busy = false;
spin_lock_init(&ctl->hw_lock);
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
index ad48788efeea..7a62000994a1 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
@@ -33,19 +33,13 @@ void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctlm);
* which is then used to call the other mdp5_ctl_*(ctl, ...) functions.
*/
struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctlm, struct drm_crtc *crtc);
+int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl);
-int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, int intf);
+struct mdp5_interface;
+int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, struct mdp5_interface *intf);
+int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, bool enabled);
-int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, bool enable);
-
-/* @blend_cfg: see LM blender config definition below */
-int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg);
-
-/* @flush_mask: see CTL flush masks definitions below */
-int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask);
-u32 mdp5_ctl_get_flush(struct mdp5_ctl *ctl);
-
-void mdp5_ctl_release(struct mdp5_ctl *ctl);
+int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable);
/*
* blend_cfg (LM blender config):
@@ -72,51 +66,32 @@ static inline u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe,
}
/*
- * flush_mask (CTL flush masks):
+ * mdp5_ctl_blend() - Blend multiple layers on a Layer Mixer (LM)
+ *
+ * @blend_cfg: see LM blender config definition below
*
- * The following functions allow each DRM entity to get and store
- * their own flush mask.
- * Once stored, these masks will then be accessed through each DRM's
- * interface and used by the caller of mdp5_ctl_commit() to specify
- * which block(s) need to be flushed through @flush_mask parameter.
+ * Note:
+ * CTL registers need to be flushed after calling this function
+ * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask)
*/
+int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg);
-#define MDP5_CTL_FLUSH_CURSOR_DUMMY 0x80000000
+/**
+ * mdp_ctl_flush_mask...() - Register FLUSH masks
+ *
+ * These masks are used to specify which block(s) need to be flushed
+ * through @flush_mask parameter in mdp5_ctl_commit(.., flush_mask).
+ */
+u32 mdp_ctl_flush_mask_lm(int lm);
+u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe);
+u32 mdp_ctl_flush_mask_cursor(int cursor_id);
+u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf);
-static inline u32 mdp_ctl_flush_mask_cursor(int cursor_id)
-{
- /* TODO: use id once multiple cursor support is present */
- (void)cursor_id;
+/* @flush_mask: see CTL flush masks definitions below */
+int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask);
- return MDP5_CTL_FLUSH_CURSOR_DUMMY;
-}
+void mdp5_ctl_release(struct mdp5_ctl *ctl);
-static inline u32 mdp_ctl_flush_mask_lm(int lm)
-{
- switch (lm) {
- case 0: return MDP5_CTL_FLUSH_LM0;
- case 1: return MDP5_CTL_FLUSH_LM1;
- case 2: return MDP5_CTL_FLUSH_LM2;
- case 5: return MDP5_CTL_FLUSH_LM5;
- default: return 0;
- }
-}
-static inline u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe)
-{
- switch (pipe) {
- case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0;
- case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1;
- case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2;
- case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0;
- case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1;
- case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2;
- case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0;
- case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
- case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3;
- case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3;
- default: return 0;
- }
-}
#endif /* __MDP5_CTL_H__ */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
index af0e02fa4f48..1188f4bf1e60 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
@@ -23,8 +23,7 @@
struct mdp5_encoder {
struct drm_encoder base;
- int intf;
- enum mdp5_intf intf_id;
+ struct mdp5_interface intf;
spinlock_t intf_lock; /* protect REG_MDP5_INTF_* registers */
bool enabled;
uint32_t bsc;
@@ -126,7 +125,7 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
struct mdp5_kms *mdp5_kms = get_kms(encoder);
struct drm_device *dev = encoder->dev;
struct drm_connector *connector;
- int intf = mdp5_encoder->intf;
+ int intf = mdp5_encoder->intf.num;
uint32_t dtv_hsync_skew, vsync_period, vsync_len, ctrl_pol;
uint32_t display_v_start, display_v_end;
uint32_t hsync_start_x, hsync_end_x;
@@ -188,7 +187,7 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
* DISPLAY_V_START = (VBP * HCYCLE) + HBP
* DISPLAY_V_END = (VBP + VACTIVE) * HCYCLE - 1 - HFP
*/
- if (mdp5_encoder->intf_id == INTF_eDP) {
+ if (mdp5_encoder->intf.type == INTF_eDP) {
display_v_start += mode->htotal - mode->hsync_start;
display_v_end -= mode->hsync_start - mode->hdisplay;
}
@@ -218,21 +217,29 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(intf), 0x3); /* frame+line? */
spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
+
+ mdp5_crtc_set_intf(encoder->crtc, &mdp5_encoder->intf);
}
static void mdp5_encoder_disable(struct drm_encoder *encoder)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_kms *mdp5_kms = get_kms(encoder);
- int intf = mdp5_encoder->intf;
+ struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc);
+ int lm = mdp5_crtc_get_lm(encoder->crtc);
+ struct mdp5_interface *intf = &mdp5_encoder->intf;
+ int intfn = mdp5_encoder->intf.num;
unsigned long flags;
if (WARN_ON(!mdp5_encoder->enabled))
return;
+ mdp5_ctl_set_encoder_state(ctl, false);
+
spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
- mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 0);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 0);
spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
+ mdp5_ctl_commit(ctl, mdp_ctl_flush_mask_encoder(intf));
/*
* Wait for a vsync so we know the ENABLE=0 latched before
@@ -242,7 +249,7 @@ static void mdp5_encoder_disable(struct drm_encoder *encoder)
* the settings changes for the new modeset (like new
* scanout buffer) don't latch properly..
*/
- mdp_irq_wait(&mdp5_kms->base, intf2vblank(intf));
+ mdp_irq_wait(&mdp5_kms->base, intf2vblank(lm, intf));
bs_set(mdp5_encoder, 0);
@@ -253,19 +260,21 @@ static void mdp5_encoder_enable(struct drm_encoder *encoder)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_kms *mdp5_kms = get_kms(encoder);
- int intf = mdp5_encoder->intf;
+ struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc);
+ struct mdp5_interface *intf = &mdp5_encoder->intf;
+ int intfn = mdp5_encoder->intf.num;
unsigned long flags;
if (WARN_ON(mdp5_encoder->enabled))
return;
- mdp5_crtc_set_intf(encoder->crtc, mdp5_encoder->intf,
- mdp5_encoder->intf_id);
-
bs_set(mdp5_encoder, 1);
spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
- mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 1);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 1);
spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
+ mdp5_ctl_commit(ctl, mdp_ctl_flush_mask_encoder(intf));
+
+ mdp5_ctl_set_encoder_state(ctl, true);
mdp5_encoder->enabled = true;
}
@@ -277,12 +286,51 @@ static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = {
.enable = mdp5_encoder_enable,
};
+int mdp5_encoder_set_split_display(struct drm_encoder *encoder,
+ struct drm_encoder *slave_encoder)
+{
+ struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+ struct mdp5_kms *mdp5_kms;
+ int intf_num;
+ u32 data = 0;
+
+ if (!encoder || !slave_encoder)
+ return -EINVAL;
+
+ mdp5_kms = get_kms(encoder);
+ intf_num = mdp5_encoder->intf.num;
+
+ /* Switch slave encoder's TimingGen Sync mode,
+ * to use the master's enable signal for the slave encoder.
+ */
+ if (intf_num == 1)
+ data |= MDP5_SPLIT_DPL_LOWER_INTF2_TG_SYNC;
+ else if (intf_num == 2)
+ data |= MDP5_SPLIT_DPL_LOWER_INTF1_TG_SYNC;
+ else
+ return -EINVAL;
+
+ /* Make sure clocks are on when connectors calling this function. */
+ mdp5_enable(mdp5_kms);
+ mdp5_write(mdp5_kms, REG_MDP5_MDP_SPARE_0(0),
+ MDP5_MDP_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN);
+ /* Dumb Panel, Sync mode */
+ mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_UPPER, 0);
+ mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_LOWER, data);
+ mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_EN, 1);
+ mdp5_disable(mdp5_kms);
+
+ return 0;
+}
+
/* initialize encoder */
-struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, int intf,
- enum mdp5_intf intf_id)
+struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
+ struct mdp5_interface *intf)
{
struct drm_encoder *encoder = NULL;
struct mdp5_encoder *mdp5_encoder;
+ int enc_type = (intf->type == INTF_DSI) ?
+ DRM_MODE_ENCODER_DSI : DRM_MODE_ENCODER_TMDS;
int ret;
mdp5_encoder = kzalloc(sizeof(*mdp5_encoder), GFP_KERNEL);
@@ -291,14 +339,13 @@ struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, int intf,
goto fail;
}
- mdp5_encoder->intf = intf;
- mdp5_encoder->intf_id = intf_id;
+ memcpy(&mdp5_encoder->intf, intf, sizeof(mdp5_encoder->intf));
encoder = &mdp5_encoder->base;
spin_lock_init(&mdp5_encoder->intf_lock);
- drm_encoder_init(dev, encoder, &mdp5_encoder_funcs,
- DRM_MODE_ENCODER_TMDS);
+ drm_encoder_init(dev, encoder, &mdp5_encoder_funcs, enc_type);
+
drm_encoder_helper_add(encoder, &mdp5_encoder_helper_funcs);
bs_init(mdp5_encoder);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
index a9407105b9b7..33bd4c6160dd 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
@@ -23,7 +23,7 @@
void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask)
{
- mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_INTR_EN, irqmask);
+ mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_MDP_INTR_EN(0), irqmask);
}
static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
@@ -35,8 +35,8 @@ void mdp5_irq_preinstall(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
mdp5_enable(mdp5_kms);
- mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, 0xffffffff);
- mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
+ mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_CLEAR(0), 0xffffffff);
+ mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_EN(0), 0x00000000);
mdp5_disable(mdp5_kms);
}
@@ -61,7 +61,7 @@ void mdp5_irq_uninstall(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
mdp5_enable(mdp5_kms);
- mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
+ mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_EN(0), 0x00000000);
mdp5_disable(mdp5_kms);
}
@@ -73,8 +73,8 @@ static void mdp5_irq_mdp(struct mdp_kms *mdp_kms)
unsigned int id;
uint32_t status;
- status = mdp5_read(mdp5_kms, REG_MDP5_INTR_STATUS);
- mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, status);
+ status = mdp5_read(mdp5_kms, REG_MDP5_MDP_INTR_STATUS(0));
+ mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_CLEAR(0), status);
VERB("status=%08x", status);
@@ -91,13 +91,13 @@ irqreturn_t mdp5_irq(struct msm_kms *kms)
struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
uint32_t intr;
- intr = mdp5_read(mdp5_kms, REG_MDP5_HW_INTR_STATUS);
+ intr = mdp5_read(mdp5_kms, REG_MDSS_HW_INTR_STATUS);
VERB("intr=%08x", intr);
- if (intr & MDP5_HW_INTR_STATUS_INTR_MDP) {
+ if (intr & MDSS_HW_INTR_STATUS_INTR_MDP) {
mdp5_irq_mdp(mdp_kms);
- intr &= ~MDP5_HW_INTR_STATUS_INTR_MDP;
+ intr &= ~MDSS_HW_INTR_STATUS_INTR_MDP;
}
while (intr) {
@@ -128,10 +128,10 @@ void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
* can register to get their irq's delivered
*/
-#define VALID_IRQS (MDP5_HW_INTR_STATUS_INTR_DSI0 | \
- MDP5_HW_INTR_STATUS_INTR_DSI1 | \
- MDP5_HW_INTR_STATUS_INTR_HDMI | \
- MDP5_HW_INTR_STATUS_INTR_EDP)
+#define VALID_IRQS (MDSS_HW_INTR_STATUS_INTR_DSI0 | \
+ MDSS_HW_INTR_STATUS_INTR_DSI1 | \
+ MDSS_HW_INTR_STATUS_INTR_HDMI | \
+ MDSS_HW_INTR_STATUS_INTR_EDP)
static void mdp5_hw_mask_irq(struct irq_data *irqd)
{
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 92b61db5754c..dfa8beb9343a 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -58,7 +58,7 @@ static int mdp5_hw_init(struct msm_kms *kms)
*/
spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
- mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0);
+ mdp5_write(mdp5_kms, REG_MDP5_MDP_DISP_INTF_SEL(0), 0);
spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
mdp5_ctlm_hw_reset(mdp5_kms->ctlm);
@@ -86,6 +86,18 @@ static long mdp5_round_pixclk(struct msm_kms *kms, unsigned long rate,
return rate;
}
+static int mdp5_set_split_display(struct msm_kms *kms,
+ struct drm_encoder *encoder,
+ struct drm_encoder *slave_encoder,
+ bool is_cmd_mode)
+{
+ if (is_cmd_mode)
+ return mdp5_cmd_encoder_set_split_display(encoder,
+ slave_encoder);
+ else
+ return mdp5_encoder_set_split_display(encoder, slave_encoder);
+}
+
static void mdp5_preclose(struct msm_kms *kms, struct drm_file *file)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
@@ -131,6 +143,7 @@ static const struct mdp_kms_funcs kms_funcs = {
.complete_commit = mdp5_complete_commit,
.get_format = mdp_get_format,
.round_pixclk = mdp5_round_pixclk,
+ .set_split_display = mdp5_set_split_display,
.preclose = mdp5_preclose,
.destroy = mdp5_destroy,
},
@@ -161,6 +174,134 @@ int mdp5_enable(struct mdp5_kms *mdp5_kms)
return 0;
}
+static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms,
+ enum mdp5_intf_type intf_type, int intf_num,
+ enum mdp5_intf_mode intf_mode)
+{
+ struct drm_device *dev = mdp5_kms->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_encoder *encoder;
+ struct mdp5_interface intf = {
+ .num = intf_num,
+ .type = intf_type,
+ .mode = intf_mode,
+ };
+
+ if ((intf_type == INTF_DSI) &&
+ (intf_mode == MDP5_INTF_DSI_MODE_COMMAND))
+ encoder = mdp5_cmd_encoder_init(dev, &intf);
+ else
+ encoder = mdp5_encoder_init(dev, &intf);
+
+ if (IS_ERR(encoder)) {
+ dev_err(dev->dev, "failed to construct encoder\n");
+ return encoder;
+ }
+
+ encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;
+ priv->encoders[priv->num_encoders++] = encoder;
+
+ return encoder;
+}
+
+static int get_dsi_id_from_intf(const struct mdp5_cfg_hw *hw_cfg, int intf_num)
+{
+ const int intf_cnt = hw_cfg->intf.count;
+ const u32 *intfs = hw_cfg->intfs;
+ int id = 0, i;
+
+ for (i = 0; i < intf_cnt; i++) {
+ if (intfs[i] == INTF_DSI) {
+ if (intf_num == i)
+ return id;
+
+ id++;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
+{
+ struct drm_device *dev = mdp5_kms->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ const struct mdp5_cfg_hw *hw_cfg =
+ mdp5_cfg_get_hw_config(mdp5_kms->cfg);
+ enum mdp5_intf_type intf_type = hw_cfg->intfs[intf_num];
+ struct drm_encoder *encoder;
+ int ret = 0;
+
+ switch (intf_type) {
+ case INTF_DISABLED:
+ break;
+ case INTF_eDP:
+ if (!priv->edp)
+ break;
+
+ encoder = construct_encoder(mdp5_kms, INTF_eDP, intf_num,
+ MDP5_INTF_MODE_NONE);
+ if (IS_ERR(encoder)) {
+ ret = PTR_ERR(encoder);
+ break;
+ }
+
+ ret = msm_edp_modeset_init(priv->edp, dev, encoder);
+ break;
+ case INTF_HDMI:
+ if (!priv->hdmi)
+ break;
+
+ encoder = construct_encoder(mdp5_kms, INTF_HDMI, intf_num,
+ MDP5_INTF_MODE_NONE);
+ if (IS_ERR(encoder)) {
+ ret = PTR_ERR(encoder);
+ break;
+ }
+
+ ret = hdmi_modeset_init(priv->hdmi, dev, encoder);
+ break;
+ case INTF_DSI:
+ {
+ int dsi_id = get_dsi_id_from_intf(hw_cfg, intf_num);
+ struct drm_encoder *dsi_encs[MSM_DSI_ENCODER_NUM];
+ enum mdp5_intf_mode mode;
+ int i;
+
+ if ((dsi_id >= ARRAY_SIZE(priv->dsi)) || (dsi_id < 0)) {
+ dev_err(dev->dev, "failed to find dsi from intf %d\n",
+ intf_num);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (!priv->dsi[dsi_id])
+ break;
+
+ for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) {
+ mode = (i == MSM_DSI_CMD_ENCODER_ID) ?
+ MDP5_INTF_DSI_MODE_COMMAND :
+ MDP5_INTF_DSI_MODE_VIDEO;
+ dsi_encs[i] = construct_encoder(mdp5_kms, INTF_DSI,
+ intf_num, mode);
+ if (IS_ERR(dsi_encs)) {
+ ret = PTR_ERR(dsi_encs);
+ break;
+ }
+ }
+
+ ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, dsi_encs);
+ break;
+ }
+ default:
+ dev_err(dev->dev, "unknown intf: %d\n", intf_type);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
static int modeset_init(struct mdp5_kms *mdp5_kms)
{
static const enum mdp5_pipe crtcs[] = {
@@ -171,7 +312,6 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
};
struct drm_device *dev = mdp5_kms->dev;
struct msm_drm_private *priv = dev->dev_private;
- struct drm_encoder *encoder;
const struct mdp5_cfg_hw *hw_cfg;
int i, ret;
@@ -222,44 +362,13 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
}
}
- if (priv->hdmi) {
- /* Construct encoder for HDMI: */
- encoder = mdp5_encoder_init(dev, 3, INTF_HDMI);
- if (IS_ERR(encoder)) {
- dev_err(dev->dev, "failed to construct encoder\n");
- ret = PTR_ERR(encoder);
- goto fail;
- }
-
- encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;;
- priv->encoders[priv->num_encoders++] = encoder;
-
- ret = hdmi_modeset_init(priv->hdmi, dev, encoder);
- if (ret) {
- dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
- goto fail;
- }
- }
-
- if (priv->edp) {
- /* Construct encoder for eDP: */
- encoder = mdp5_encoder_init(dev, 0, INTF_eDP);
- if (IS_ERR(encoder)) {
- dev_err(dev->dev, "failed to construct eDP encoder\n");
- ret = PTR_ERR(encoder);
- goto fail;
- }
-
- encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;
- priv->encoders[priv->num_encoders++] = encoder;
-
- /* Construct bridge/connector for eDP: */
- ret = msm_edp_modeset_init(priv->edp, dev, encoder);
- if (ret) {
- dev_err(dev->dev, "failed to initialize eDP: %d\n",
- ret);
+ /* Construct encoders and modeset initialize connector devices
+ * for each external display interface.
+ */
+ for (i = 0; i < ARRAY_SIZE(hw_cfg->intfs); i++) {
+ ret = modeset_init_intf(mdp5_kms, i);
+ if (ret)
goto fail;
- }
}
return 0;
@@ -274,11 +383,11 @@ static void read_hw_revision(struct mdp5_kms *mdp5_kms,
uint32_t version;
mdp5_enable(mdp5_kms);
- version = mdp5_read(mdp5_kms, REG_MDP5_MDP_VERSION);
+ version = mdp5_read(mdp5_kms, REG_MDSS_HW_VERSION);
mdp5_disable(mdp5_kms);
- *major = FIELD(version, MDP5_MDP_VERSION_MAJOR);
- *minor = FIELD(version, MDP5_MDP_VERSION_MINOR);
+ *major = FIELD(version, MDSS_HW_VERSION_MAJOR);
+ *minor = FIELD(version, MDSS_HW_VERSION_MINOR);
DBG("MDP5 version v%d.%d", *major, *minor);
}
@@ -321,6 +430,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
mdp5_kms->dev = dev;
+ /* mdp5_kms->mmio actually represents the MDSS base address */
mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5");
if (IS_ERR(mdp5_kms->mmio)) {
ret = PTR_ERR(mdp5_kms->mmio);
@@ -403,8 +513,12 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
* we don't disable):
*/
mdp5_enable(mdp5_kms);
- for (i = 0; i < config->hw->intf.count; i++)
+ for (i = 0; i < MDP5_INTF_NUM_MAX; i++) {
+ if (!config->hw->intf.base[i] ||
+ mdp5_cfg_intf_is_virtual(config->hw->intfs[i]))
+ continue;
mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0);
+ }
mdp5_disable(mdp5_kms);
mdelay(16);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index 49d011e8835b..2c0de174cc09 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -54,7 +54,7 @@ struct mdp5_kms {
/*
* lock to protect access to global resources: ie., following register:
- * - REG_MDP5_DISP_INTF_SEL
+ * - REG_MDP5_MDP_DISP_INTF_SEL
*/
spinlock_t resource_lock;
@@ -94,6 +94,24 @@ struct mdp5_plane_state {
#define to_mdp5_plane_state(x) \
container_of(x, struct mdp5_plane_state, base)
+enum mdp5_intf_mode {
+ MDP5_INTF_MODE_NONE = 0,
+
+ /* Modes used for DSI interface (INTF_DSI type): */
+ MDP5_INTF_DSI_MODE_VIDEO,
+ MDP5_INTF_DSI_MODE_COMMAND,
+
+ /* Modes used for WB interface (INTF_WB type): */
+ MDP5_INTF_WB_MODE_BLOCK,
+ MDP5_INTF_WB_MODE_LINE,
+};
+
+struct mdp5_interface {
+ int num; /* display interface number */
+ enum mdp5_intf_type type;
+ enum mdp5_intf_mode mode;
+};
+
static inline void mdp5_write(struct mdp5_kms *mdp5_kms, u32 reg, u32 data)
{
msm_writel(data, mdp5_kms->mmio + reg);
@@ -130,9 +148,9 @@ static inline int pipe2nclients(enum mdp5_pipe pipe)
}
}
-static inline uint32_t intf2err(int intf)
+static inline uint32_t intf2err(int intf_num)
{
- switch (intf) {
+ switch (intf_num) {
case 0: return MDP5_IRQ_INTF0_UNDER_RUN;
case 1: return MDP5_IRQ_INTF1_UNDER_RUN;
case 2: return MDP5_IRQ_INTF2_UNDER_RUN;
@@ -141,9 +159,23 @@ static inline uint32_t intf2err(int intf)
}
}
-static inline uint32_t intf2vblank(int intf)
+#define GET_PING_PONG_ID(layer_mixer) ((layer_mixer == 5) ? 3 : layer_mixer)
+static inline uint32_t intf2vblank(int lm, struct mdp5_interface *intf)
{
- switch (intf) {
+ /*
+ * In case of DSI Command Mode, the Ping Pong's read pointer IRQ
+ * acts as a Vblank signal. The Ping Pong buffer used is bound to
+ * layer mixer.
+ */
+
+ if ((intf->type == INTF_DSI) &&
+ (intf->mode == MDP5_INTF_DSI_MODE_COMMAND))
+ return MDP5_IRQ_PING_PONG_0_RD_PTR << GET_PING_PONG_ID(lm);
+
+ if (intf->type == INTF_WB)
+ return MDP5_IRQ_WB_2_DONE;
+
+ switch (intf->num) {
case 0: return MDP5_IRQ_INTF0_VSYNC;
case 1: return MDP5_IRQ_INTF1_VSYNC;
case 2: return MDP5_IRQ_INTF2_VSYNC;
@@ -152,6 +184,11 @@ static inline uint32_t intf2vblank(int intf)
}
}
+static inline uint32_t lm2ppdone(int lm)
+{
+ return MDP5_IRQ_PING_PONG_0_DONE << GET_PING_PONG_ID(lm);
+}
+
int mdp5_disable(struct mdp5_kms *mdp5_kms);
int mdp5_enable(struct mdp5_kms *mdp5_kms);
@@ -197,13 +234,33 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev,
uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc);
int mdp5_crtc_get_lm(struct drm_crtc *crtc);
+struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc);
void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
-void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
- enum mdp5_intf intf_id);
+void mdp5_crtc_set_intf(struct drm_crtc *crtc, struct mdp5_interface *intf);
struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
struct drm_plane *plane, int id);
-struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, int intf,
- enum mdp5_intf intf_id);
+struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
+ struct mdp5_interface *intf);
+int mdp5_encoder_set_split_display(struct drm_encoder *encoder,
+ struct drm_encoder *slave_encoder);
+
+#ifdef CONFIG_DRM_MSM_DSI
+struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev,
+ struct mdp5_interface *intf);
+int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
+ struct drm_encoder *slave_encoder);
+#else
+static inline struct drm_encoder *mdp5_cmd_encoder_init(
+ struct drm_device *dev, struct mdp5_interface *intf)
+{
+ return ERR_PTR(-EINVAL);
+}
+static inline int mdp5_cmd_encoder_set_split_display(
+ struct drm_encoder *encoder, struct drm_encoder *slave_encoder)
+{
+ return -EINVAL;
+}
+#endif
#endif /* __MDP5_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index 6bd48e246283..18a3d203b174 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -507,8 +507,8 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
spin_lock_irqsave(&mdp5_plane->pipe_lock, flags);
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe),
- MDP5_PIPE_SRC_IMG_SIZE_WIDTH(src_w) |
- MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(src_h));
+ MDP5_PIPE_SRC_IMG_SIZE_WIDTH(fb->width) |
+ MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(fb->height));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_SIZE(pipe),
MDP5_PIPE_SRC_SIZE_WIDTH(src_w) |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
index 1f795af89680..16702aecf0df 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
@@ -43,7 +43,7 @@
* set.
*
* 2) mdp5_smp_configure():
- * As hw is programmed, before FLUSH, MDP5_SMP_ALLOC registers
+ * As hw is programmed, before FLUSH, MDP5_MDP_SMP_ALLOC registers
* are configured for the union(pending, inuse)
*
* 3) mdp5_smp_commit():
@@ -74,7 +74,7 @@ struct mdp5_smp {
spinlock_t state_lock;
mdp5_smp_state_t state; /* to track smp allocation amongst pipes: */
- struct mdp5_client_smp_state client_state[CID_MAX];
+ struct mdp5_client_smp_state client_state[MAX_CLIENTS];
};
static inline
@@ -85,27 +85,31 @@ struct mdp5_kms *get_kms(struct mdp5_smp *smp)
return to_mdp5_kms(to_mdp_kms(priv->kms));
}
-static inline enum mdp5_client_id pipe2client(enum mdp5_pipe pipe, int plane)
+static inline u32 pipe2client(enum mdp5_pipe pipe, int plane)
{
- WARN_ON(plane >= pipe2nclients(pipe));
- switch (pipe) {
- case SSPP_VIG0: return CID_VIG0_Y + plane;
- case SSPP_VIG1: return CID_VIG1_Y + plane;
- case SSPP_VIG2: return CID_VIG2_Y + plane;
- case SSPP_RGB0: return CID_RGB0;
- case SSPP_RGB1: return CID_RGB1;
- case SSPP_RGB2: return CID_RGB2;
- case SSPP_DMA0: return CID_DMA0_Y + plane;
- case SSPP_DMA1: return CID_DMA1_Y + plane;
- case SSPP_VIG3: return CID_VIG3_Y + plane;
- case SSPP_RGB3: return CID_RGB3;
- default: return CID_UNUSED;
- }
+#define CID_UNUSED 0
+
+ if (WARN_ON(plane >= pipe2nclients(pipe)))
+ return CID_UNUSED;
+
+ /*
+ * Note on SMP clients:
+ * For ViG pipes, fetch Y/Cr/Cb-components clients are always
+ * consecutive, and in that order.
+ *
+ * e.g.:
+ * if mdp5_cfg->smp.clients[SSPP_VIG0] = N,
+ * Y plane's client ID is N
+ * Cr plane's client ID is N + 1
+ * Cb plane's client ID is N + 2
+ */
+
+ return mdp5_cfg->smp.clients[pipe] + plane;
}
/* step #1: update # of blocks pending for the client: */
static int smp_request_block(struct mdp5_smp *smp,
- enum mdp5_client_id cid, int nblks)
+ u32 cid, int nblks)
{
struct mdp5_kms *mdp5_kms = get_kms(smp);
const struct mdp5_cfg_hw *hw_cfg;
@@ -227,7 +231,7 @@ void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe)
}
static void update_smp_state(struct mdp5_smp *smp,
- enum mdp5_client_id cid, mdp5_smp_state_t *assigned)
+ u32 cid, mdp5_smp_state_t *assigned)
{
struct mdp5_kms *mdp5_kms = get_kms(smp);
int cnt = smp->blk_cnt;
@@ -237,25 +241,25 @@ static void update_smp_state(struct mdp5_smp *smp,
int idx = blk / 3;
int fld = blk % 3;
- val = mdp5_read(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx));
+ val = mdp5_read(mdp5_kms, REG_MDP5_MDP_SMP_ALLOC_W_REG(0, idx));
switch (fld) {
case 0:
- val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK;
- val |= MDP5_SMP_ALLOC_W_REG_CLIENT0(cid);
+ val &= ~MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__MASK;
+ val |= MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0(cid);
break;
case 1:
- val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK;
- val |= MDP5_SMP_ALLOC_W_REG_CLIENT1(cid);
+ val &= ~MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__MASK;
+ val |= MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1(cid);
break;
case 2:
- val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK;
- val |= MDP5_SMP_ALLOC_W_REG_CLIENT2(cid);
+ val &= ~MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__MASK;
+ val |= MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2(cid);
break;
}
- mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx), val);
- mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_R_REG(idx), val);
+ mdp5_write(mdp5_kms, REG_MDP5_MDP_SMP_ALLOC_W_REG(0, idx), val);
+ mdp5_write(mdp5_kms, REG_MDP5_MDP_SMP_ALLOC_R_REG(0, idx), val);
}
}
@@ -267,7 +271,7 @@ void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe)
int i;
for (i = 0; i < pipe2nclients(pipe); i++) {
- enum mdp5_client_id cid = pipe2client(pipe, i);
+ u32 cid = pipe2client(pipe, i);
struct mdp5_client_smp_state *ps = &smp->client_state[cid];
bitmap_or(assigned, ps->inuse, ps->pending, cnt);
@@ -283,7 +287,7 @@ void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
int i;
for (i = 0; i < pipe2nclients(pipe); i++) {
- enum mdp5_client_id cid = pipe2client(pipe, i);
+ u32 cid = pipe2client(pipe, i);
struct mdp5_client_smp_state *ps = &smp->client_state[cid];
/*
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index a4269119f9ea..47f4dd407671 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -182,41 +182,57 @@ static int get_mdp_ver(struct platform_device *pdev)
return 4;
}
-static int msm_load(struct drm_device *dev, unsigned long flags)
-{
- struct platform_device *pdev = dev->platformdev;
- struct msm_drm_private *priv;
- struct msm_kms *kms;
- int ret;
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- dev_err(dev->dev, "failed to allocate private data\n");
- return -ENOMEM;
- }
+#include <linux/of_address.h>
- dev->dev_private = priv;
-
- priv->wq = alloc_ordered_workqueue("msm", 0);
- init_waitqueue_head(&priv->fence_event);
- init_waitqueue_head(&priv->pending_crtcs_event);
-
- INIT_LIST_HEAD(&priv->inactive_list);
- INIT_LIST_HEAD(&priv->fence_cbs);
+static int msm_init_vram(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ unsigned long size = 0;
+ int ret = 0;
- drm_mode_config_init(dev);
+#ifdef CONFIG_OF
+ /* In the device-tree world, we could have a 'memory-region'
+ * phandle, which gives us a link to our "vram". Allocating
+ * is all nicely abstracted behind the dma api, but we need
+ * to know the entire size to allocate it all in one go. There
+ * are two cases:
+ * 1) device with no IOMMU, in which case we need exclusive
+ * access to a VRAM carveout big enough for all gpu
+ * buffers
+ * 2) device with IOMMU, but where the bootloader puts up
+ * a splash screen. In this case, the VRAM carveout
+ * need only be large enough for fbdev fb. But we need
+ * exclusive access to the buffer to avoid the kernel
+ * using those pages for other purposes (which appears
+ * as corruption on screen before we have a chance to
+ * load and do initial modeset)
+ */
+ struct device_node *node;
+
+ node = of_parse_phandle(dev->dev->of_node, "memory-region", 0);
+ if (node) {
+ struct resource r;
+ ret = of_address_to_resource(node, 0, &r);
+ if (ret)
+ return ret;
+ size = r.end - r.start;
+ DRM_INFO("using VRAM carveout: %lx@%08x\n", size, r.start);
+ } else
+#endif
/* if we have no IOMMU, then we need to use carveout allocator.
* Grab the entire CMA chunk carved out in early startup in
* mach-msm:
*/
if (!iommu_present(&platform_bus_type)) {
+ DRM_INFO("using %s VRAM carveout\n", vram);
+ size = memparse(vram, NULL);
+ }
+
+ if (size) {
DEFINE_DMA_ATTRS(attrs);
- unsigned long size;
void *p;
- DBG("using %s VRAM carveout", vram);
- size = memparse(vram, NULL);
priv->vram.size = size;
drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1);
@@ -232,8 +248,7 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
if (!p) {
dev_err(dev->dev, "failed to allocate VRAM\n");
priv->vram.paddr = 0;
- ret = -ENOMEM;
- goto fail;
+ return -ENOMEM;
}
dev_info(dev->dev, "VRAM: %08x->%08x\n",
@@ -241,6 +256,37 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
(uint32_t)(priv->vram.paddr + size));
}
+ return ret;
+}
+
+static int msm_load(struct drm_device *dev, unsigned long flags)
+{
+ struct platform_device *pdev = dev->platformdev;
+ struct msm_drm_private *priv;
+ struct msm_kms *kms;
+ int ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ dev_err(dev->dev, "failed to allocate private data\n");
+ return -ENOMEM;
+ }
+
+ dev->dev_private = priv;
+
+ priv->wq = alloc_ordered_workqueue("msm", 0);
+ init_waitqueue_head(&priv->fence_event);
+ init_waitqueue_head(&priv->pending_crtcs_event);
+
+ INIT_LIST_HEAD(&priv->inactive_list);
+ INIT_LIST_HEAD(&priv->fence_cbs);
+
+ drm_mode_config_init(dev);
+
+ ret = msm_init_vram(dev);
+ if (ret)
+ goto fail;
+
platform_set_drvdata(pdev, dev);
/* Bind all our sub-components: */
@@ -1030,6 +1076,7 @@ static struct platform_driver msm_platform_driver = {
static int __init msm_drm_register(void)
{
DBG("init");
+ msm_dsi_register();
msm_edp_register();
hdmi_register();
adreno_register();
@@ -1043,6 +1090,7 @@ static void __exit msm_drm_unregister(void)
hdmi_unregister();
adreno_unregister();
msm_edp_unregister();
+ msm_dsi_unregister();
}
module_init(msm_drm_register);
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 9e8d441b61c3..04db4bd1b5b6 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -82,6 +82,9 @@ struct msm_drm_private {
*/
struct msm_edp *edp;
+ /* DSI is shared by mdp4 and mdp5 */
+ struct msm_dsi *dsi[2];
+
/* when we have more than one 'msm_gpu' these need to be an array: */
struct msm_gpu *gpu;
struct msm_file_private *lastctx;
@@ -236,6 +239,32 @@ void __exit msm_edp_unregister(void);
int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
struct drm_encoder *encoder);
+struct msm_dsi;
+enum msm_dsi_encoder_id {
+ MSM_DSI_VIDEO_ENCODER_ID = 0,
+ MSM_DSI_CMD_ENCODER_ID = 1,
+ MSM_DSI_ENCODER_NUM = 2
+};
+#ifdef CONFIG_DRM_MSM_DSI
+void __init msm_dsi_register(void);
+void __exit msm_dsi_unregister(void);
+int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
+ struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM]);
+#else
+static inline void __init msm_dsi_register(void)
+{
+}
+static inline void __exit msm_dsi_unregister(void)
+{
+}
+static inline int msm_dsi_modeset_init(struct msm_dsi *msm_dsi,
+ struct drm_device *dev,
+ struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM])
+{
+ return -EINVAL;
+}
+#endif
+
#ifdef CONFIG_DEBUG_FS
void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m);
void msm_gem_describe_objects(struct list_head *list, struct seq_file *m);
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index df60f65728ff..95f6532df02d 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -110,7 +110,8 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
size = mode_cmd.pitches[0] * mode_cmd.height;
DBG("allocating %d bytes for fb %d", size, dev->primary->index);
mutex_lock(&dev->struct_mutex);
- fbdev->bo = msm_gem_new(dev, size, MSM_BO_SCANOUT | MSM_BO_WC);
+ fbdev->bo = msm_gem_new(dev, size, MSM_BO_SCANOUT |
+ MSM_BO_WC | MSM_BO_STOLEN);
mutex_unlock(&dev->struct_mutex);
if (IS_ERR(fbdev->bo)) {
ret = PTR_ERR(fbdev->bo);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 49dea4fb55ac..479d8af72bcb 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -32,6 +32,12 @@ static dma_addr_t physaddr(struct drm_gem_object *obj)
priv->vram.paddr;
}
+static bool use_pages(struct drm_gem_object *obj)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ return !msm_obj->vram_node;
+}
+
/* allocate pages from VRAM carveout, used when no IOMMU: */
static struct page **get_pages_vram(struct drm_gem_object *obj,
int npages)
@@ -72,7 +78,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
struct page **p;
int npages = obj->size >> PAGE_SHIFT;
- if (iommu_present(&platform_bus_type))
+ if (use_pages(obj))
p = drm_gem_get_pages(obj);
else
p = get_pages_vram(obj, npages);
@@ -116,7 +122,7 @@ static void put_pages(struct drm_gem_object *obj)
sg_free_table(msm_obj->sgt);
kfree(msm_obj->sgt);
- if (iommu_present(&platform_bus_type))
+ if (use_pages(obj))
drm_gem_put_pages(obj, msm_obj->pages, true, false);
else {
drm_mm_remove_node(msm_obj->vram_node);
@@ -580,6 +586,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj;
unsigned sz;
+ bool use_vram = false;
switch (flags & MSM_BO_CACHE_MASK) {
case MSM_BO_UNCACHED:
@@ -592,15 +599,23 @@ static int msm_gem_new_impl(struct drm_device *dev,
return -EINVAL;
}
- sz = sizeof(*msm_obj);
if (!iommu_present(&platform_bus_type))
+ use_vram = true;
+ else if ((flags & MSM_BO_STOLEN) && priv->vram.size)
+ use_vram = true;
+
+ if (WARN_ON(use_vram && !priv->vram.size))
+ return -EINVAL;
+
+ sz = sizeof(*msm_obj);
+ if (use_vram)
sz += sizeof(struct drm_mm_node);
msm_obj = kzalloc(sz, GFP_KERNEL);
if (!msm_obj)
return -ENOMEM;
- if (!iommu_present(&platform_bus_type))
+ if (use_vram)
msm_obj->vram_node = (void *)&msm_obj[1];
msm_obj->flags = flags;
@@ -630,7 +645,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
if (ret)
goto fail;
- if (iommu_present(&platform_bus_type)) {
+ if (use_pages(obj)) {
ret = drm_gem_object_init(dev, obj, size);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 8fbbd0594c46..85d481e29276 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -21,6 +21,9 @@
#include <linux/reservation.h>
#include "msm_drv.h"
+/* Additional internal-use only BO flags: */
+#define MSM_BO_STOLEN 0x10000000 /* try to use stolen/splash memory */
+
struct msm_gem_object {
struct drm_gem_object base;
@@ -59,7 +62,7 @@ struct msm_gem_object {
struct reservation_object _resv;
/* For physically contiguous buffers. Used when we don't have
- * an IOMMU.
+ * an IOMMU. Also used for stolen/splashscreen buffer.
*/
struct drm_mm_node *vram_node;
};
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index 3a78cb48662b..a9f17bdb4530 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -47,6 +47,10 @@ struct msm_kms_funcs {
const struct msm_format *(*get_format)(struct msm_kms *kms, uint32_t format);
long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
struct drm_encoder *encoder);
+ int (*set_split_display)(struct msm_kms *kms,
+ struct drm_encoder *encoder,
+ struct drm_encoder *slave_encoder,
+ bool is_cmd_mode);
/* cleanup: */
void (*preclose)(struct msm_kms *kms, struct drm_file *file);
void (*destroy)(struct msm_kms *kms);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 29bd539af183..6efa8f38ff54 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -340,11 +340,13 @@ nvkm_devobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
/* switch mmio to cpu's native endianness */
#ifndef __BIG_ENDIAN
- if (ioread32_native(map + 0x000004) != 0x00000000)
+ if (ioread32_native(map + 0x000004) != 0x00000000) {
#else
- if (ioread32_native(map + 0x000004) == 0x00000000)
+ if (ioread32_native(map + 0x000004) == 0x00000000) {
#endif
iowrite32_native(0x01000001, map + 0x000004);
+ ioread32_native(map);
+ }
/* read boot0 and strapping information */
boot0 = ioread32_native(map + 0x000000);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c
index 539561ed3281..108d048da764 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c
@@ -142,6 +142,49 @@ gm100_identify(struct nvkm_device *device)
device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
#endif
break;
+ case 0x126:
+ device->cname = "GM206";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = gk104_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = gm204_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_FUSE ] = &gm107_fuse_oclass;
+#if 0
+ /* looks to be some non-trivial changes */
+ device->oclass[NVDEV_SUBDEV_CLK ] = &gk104_clk_oclass;
+ /* priv ring says no to 0x10eb14 writes */
+ device->oclass[NVDEV_SUBDEV_THERM ] = &gm107_therm_oclass;
+#endif
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = gm204_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &gk20a_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = gm107_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_LTC ] = gm107_ltc_oclass;
+ device->oclass[NVDEV_SUBDEV_IBUS ] = &gk104_ibus_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PMU ] = gk208_pmu_oclass;
+#if 0
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+#endif
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass;
+#if 0
+ device->oclass[NVDEV_ENGINE_FIFO ] = gk208_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = gm107_gr_oclass;
+#endif
+ device->oclass[NVDEV_ENGINE_DISP ] = gm204_disp_oclass;
+#if 0
+ device->oclass[NVDEV_ENGINE_CE0 ] = &gm204_ce0_oclass;
+ device->oclass[NVDEV_ENGINE_CE1 ] = &gm204_ce1_oclass;
+ device->oclass[NVDEV_ENGINE_CE2 ] = &gm204_ce2_oclass;
+ device->oclass[NVDEV_ENGINE_MSVLD ] = &gk104_msvld_oclass;
+ device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
+ device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
+#endif
+ break;
default:
nv_fatal(device, "unknown Maxwell chipset\n");
return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
index b038b6eb51db..043e4296084c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
@@ -502,72 +502,57 @@ nv04_fifo_intr(struct nvkm_subdev *subdev)
{
struct nvkm_device *device = nv_device(subdev);
struct nv04_fifo_priv *priv = (void *)subdev;
- uint32_t status, reassign;
- int cnt = 0;
+ u32 mask = nv_rd32(priv, NV03_PFIFO_INTR_EN_0);
+ u32 stat = nv_rd32(priv, NV03_PFIFO_INTR_0) & mask;
+ u32 reassign, chid, get, sem;
reassign = nv_rd32(priv, NV03_PFIFO_CACHES) & 1;
- while ((status = nv_rd32(priv, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) {
- uint32_t chid, get;
-
- nv_wr32(priv, NV03_PFIFO_CACHES, 0);
-
- chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max;
- get = nv_rd32(priv, NV03_PFIFO_CACHE1_GET);
+ nv_wr32(priv, NV03_PFIFO_CACHES, 0);
- if (status & NV_PFIFO_INTR_CACHE_ERROR) {
- nv04_fifo_cache_error(device, priv, chid, get);
- status &= ~NV_PFIFO_INTR_CACHE_ERROR;
- }
+ chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max;
+ get = nv_rd32(priv, NV03_PFIFO_CACHE1_GET);
- if (status & NV_PFIFO_INTR_DMA_PUSHER) {
- nv04_fifo_dma_pusher(device, priv, chid);
- status &= ~NV_PFIFO_INTR_DMA_PUSHER;
- }
+ if (stat & NV_PFIFO_INTR_CACHE_ERROR) {
+ nv04_fifo_cache_error(device, priv, chid, get);
+ stat &= ~NV_PFIFO_INTR_CACHE_ERROR;
+ }
- if (status & NV_PFIFO_INTR_SEMAPHORE) {
- uint32_t sem;
+ if (stat & NV_PFIFO_INTR_DMA_PUSHER) {
+ nv04_fifo_dma_pusher(device, priv, chid);
+ stat &= ~NV_PFIFO_INTR_DMA_PUSHER;
+ }
- status &= ~NV_PFIFO_INTR_SEMAPHORE;
- nv_wr32(priv, NV03_PFIFO_INTR_0,
- NV_PFIFO_INTR_SEMAPHORE);
+ if (stat & NV_PFIFO_INTR_SEMAPHORE) {
+ stat &= ~NV_PFIFO_INTR_SEMAPHORE;
+ nv_wr32(priv, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_SEMAPHORE);
- sem = nv_rd32(priv, NV10_PFIFO_CACHE1_SEMAPHORE);
- nv_wr32(priv, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
+ sem = nv_rd32(priv, NV10_PFIFO_CACHE1_SEMAPHORE);
+ nv_wr32(priv, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
- nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4);
- nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
- }
+ nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4);
+ nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
+ }
- if (device->card_type == NV_50) {
- if (status & 0x00000010) {
- status &= ~0x00000010;
- nv_wr32(priv, 0x002100, 0x00000010);
- }
-
- if (status & 0x40000000) {
- nv_wr32(priv, 0x002100, 0x40000000);
- nvkm_fifo_uevent(&priv->base);
- status &= ~0x40000000;
- }
+ if (device->card_type == NV_50) {
+ if (stat & 0x00000010) {
+ stat &= ~0x00000010;
+ nv_wr32(priv, 0x002100, 0x00000010);
}
- if (status) {
- nv_warn(priv, "unknown intr 0x%08x, ch %d\n",
- status, chid);
- nv_wr32(priv, NV03_PFIFO_INTR_0, status);
- status = 0;
+ if (stat & 0x40000000) {
+ nv_wr32(priv, 0x002100, 0x40000000);
+ nvkm_fifo_uevent(&priv->base);
+ stat &= ~0x40000000;
}
-
- nv_wr32(priv, NV03_PFIFO_CACHES, reassign);
}
- if (status) {
- nv_error(priv, "still angry after %d spins, halt\n", cnt);
- nv_wr32(priv, 0x002140, 0);
- nv_wr32(priv, 0x000140, 0);
+ if (stat) {
+ nv_warn(priv, "unknown intr 0x%08x\n", stat);
+ nv_mask(priv, NV03_PFIFO_INTR_EN_0, stat, 0x00000000);
+ nv_wr32(priv, NV03_PFIFO_INTR_0, stat);
}
- nv_wr32(priv, 0x000100, 0x00000100);
+ nv_wr32(priv, NV03_PFIFO_CACHES, reassign);
}
static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
index 2e7ec389eea7..57e2c5b13123 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
@@ -1032,9 +1032,9 @@ gf100_grctx_generate_bundle(struct gf100_grctx *info)
const int s = 8;
const int b = mmio_vram(info, impl->bundle_size, (1 << s), access);
mmio_refn(info, 0x408004, 0x00000000, s, b);
- mmio_refn(info, 0x408008, 0x80000000 | (impl->bundle_size >> s), 0, b);
+ mmio_wr32(info, 0x408008, 0x80000000 | (impl->bundle_size >> s));
mmio_refn(info, 0x418808, 0x00000000, s, b);
- mmio_refn(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s), 0, b);
+ mmio_wr32(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s));
}
void
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
index b52300d8861a..5e9454ba158f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
@@ -851,9 +851,9 @@ gk104_grctx_generate_bundle(struct gf100_grctx *info)
const int s = 8;
const int b = mmio_vram(info, impl->bundle_size, (1 << s), access);
mmio_refn(info, 0x408004, 0x00000000, s, b);
- mmio_refn(info, 0x408008, 0x80000000 | (impl->bundle_size >> s), 0, b);
+ mmio_wr32(info, 0x408008, 0x80000000 | (impl->bundle_size >> s));
mmio_refn(info, 0x418808, 0x00000000, s, b);
- mmio_refn(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s), 0, b);
+ mmio_wr32(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s));
mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
index 956f4dce960c..b2fae6e389e2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
@@ -871,9 +871,9 @@ gm107_grctx_generate_bundle(struct gf100_grctx *info)
const int s = 8;
const int b = mmio_vram(info, impl->bundle_size, (1 << s), access);
mmio_refn(info, 0x408004, 0x00000000, s, b);
- mmio_refn(info, 0x408008, 0x80000000 | (impl->bundle_size >> s), 0, b);
+ mmio_wr32(info, 0x408008, 0x80000000 | (impl->bundle_size >> s));
mmio_refn(info, 0x418e24, 0x00000000, s, b);
- mmio_refn(info, 0x418e28, 0x80000000 | (impl->bundle_size >> s), 0, b);
+ mmio_wr32(info, 0x418e28, 0x80000000 | (impl->bundle_size >> s));
mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c
index d1a89b2bd5c1..c4e1f085ee10 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c
@@ -74,7 +74,11 @@ dcb_i2c_parse(struct nvkm_bios *bios, u8 idx, struct dcb_i2c_entry *info)
u16 ent = dcb_i2c_entry(bios, idx, &ver, &len);
if (ent) {
if (ver >= 0x41) {
- if (!(nv_ro32(bios, ent) & 0x80000000))
+ u32 ent_value = nv_ro32(bios, ent);
+ u8 i2c_port = (ent_value >> 27) & 0x1f;
+ u8 dpaux_port = (ent_value >> 22) & 0x1f;
+ /* value 0x1f means unused according to DCB 4.x spec */
+ if (i2c_port == 0x1f && dpaux_port == 0x1f)
info->type = DCB_I2C_UNUSED;
else
info->type = DCB_I2C_PMGR;
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
index a94b11f7859d..b41965c2888d 100644
--- a/drivers/gpu/drm/omapdrm/omap_connector.c
+++ b/drivers/gpu/drm/omapdrm/omap_connector.c
@@ -271,18 +271,6 @@ static const struct drm_connector_helper_funcs omap_connector_helper_funcs = {
.best_encoder = omap_connector_attached_encoder,
};
-/* flush an area of the framebuffer (in case of manual update display that
- * is not automatically flushed)
- */
-void omap_connector_flush(struct drm_connector *connector,
- int x, int y, int w, int h)
-{
- struct omap_connector *omap_connector = to_omap_connector(connector);
-
- /* TODO: enable when supported in dss */
- VERB("%s: %d,%d, %dx%d", omap_connector->dssdev->name, x, y, w, h);
-}
-
/* initialize connector */
struct drm_connector *omap_connector_init(struct drm_device *dev,
int connector_type, struct omap_dss_device *dssdev,
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index b0566a1ca28f..f456544bf300 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -28,7 +28,6 @@
struct omap_crtc {
struct drm_crtc base;
- struct drm_plane *plane;
const char *name;
int pipe;
@@ -46,7 +45,6 @@ struct omap_crtc {
struct omap_video_timings timings;
bool enabled;
- bool full_update;
struct omap_drm_apply apply;
@@ -74,8 +72,14 @@ struct omap_crtc {
* XXX maybe fold into apply_work??
*/
struct work_struct page_flip_work;
+
+ bool ignore_digit_sync_lost;
};
+/* -----------------------------------------------------------------------------
+ * Helper Functions
+ */
+
uint32_t pipe2vbl(struct drm_crtc *crtc)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
@@ -83,6 +87,22 @@ uint32_t pipe2vbl(struct drm_crtc *crtc)
return dispc_mgr_get_vsync_irq(omap_crtc->channel);
}
+const struct omap_video_timings *omap_crtc_timings(struct drm_crtc *crtc)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ return &omap_crtc->timings;
+}
+
+enum omap_channel omap_crtc_channel(struct drm_crtc *crtc)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ return omap_crtc->channel;
+}
+
+/* -----------------------------------------------------------------------------
+ * DSS Manager Functions
+ */
+
/*
* Manager-ops, callbacks from output when they need to configure
* the upstream part of the video pipe.
@@ -122,7 +142,63 @@ static void omap_crtc_start_update(struct omap_overlay_manager *mgr)
{
}
-static void set_enabled(struct drm_crtc *crtc, bool enable);
+/* Called only from CRTC pre_apply and suspend/resume handlers. */
+static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable)
+{
+ struct drm_device *dev = crtc->dev;
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ enum omap_channel channel = omap_crtc->channel;
+ struct omap_irq_wait *wait;
+ u32 framedone_irq, vsync_irq;
+ int ret;
+
+ if (dispc_mgr_is_enabled(channel) == enable)
+ return;
+
+ if (omap_crtc->channel == OMAP_DSS_CHANNEL_DIGIT) {
+ /*
+ * Digit output produces some sync lost interrupts during the
+ * first frame when enabling, so we need to ignore those.
+ */
+ omap_crtc->ignore_digit_sync_lost = true;
+ }
+
+ framedone_irq = dispc_mgr_get_framedone_irq(channel);
+ vsync_irq = dispc_mgr_get_vsync_irq(channel);
+
+ if (enable) {
+ wait = omap_irq_wait_init(dev, vsync_irq, 1);
+ } else {
+ /*
+ * When we disable the digit output, we need to wait for
+ * FRAMEDONE to know that DISPC has finished with the output.
+ *
+ * OMAP2/3 does not have FRAMEDONE irq for digit output, and in
+ * that case we need to use vsync interrupt, and wait for both
+ * even and odd frames.
+ */
+
+ if (framedone_irq)
+ wait = omap_irq_wait_init(dev, framedone_irq, 1);
+ else
+ wait = omap_irq_wait_init(dev, vsync_irq, 2);
+ }
+
+ dispc_mgr_enable(channel, enable);
+
+ ret = omap_irq_wait(dev, wait, msecs_to_jiffies(100));
+ if (ret) {
+ dev_err(dev->dev, "%s: timeout waiting for %s\n",
+ omap_crtc->name, enable ? "enable" : "disable");
+ }
+
+ if (omap_crtc->channel == OMAP_DSS_CHANNEL_DIGIT) {
+ omap_crtc->ignore_digit_sync_lost = false;
+ /* make sure the irq handler sees the value above */
+ mb();
+ }
+}
+
static int omap_crtc_enable(struct omap_overlay_manager *mgr)
{
@@ -131,7 +207,7 @@ static int omap_crtc_enable(struct omap_overlay_manager *mgr)
dispc_mgr_setup(omap_crtc->channel, &omap_crtc->info);
dispc_mgr_set_timings(omap_crtc->channel,
&omap_crtc->timings);
- set_enabled(&omap_crtc->base, true);
+ omap_crtc_set_enabled(&omap_crtc->base, true);
return 0;
}
@@ -140,7 +216,7 @@ static void omap_crtc_disable(struct omap_overlay_manager *mgr)
{
struct omap_crtc *omap_crtc = omap_crtcs[mgr->id];
- set_enabled(&omap_crtc->base, false);
+ omap_crtc_set_enabled(&omap_crtc->base, false);
}
static void omap_crtc_set_timings(struct omap_overlay_manager *mgr,
@@ -149,7 +225,6 @@ static void omap_crtc_set_timings(struct omap_overlay_manager *mgr,
struct omap_crtc *omap_crtc = omap_crtcs[mgr->id];
DBG("%s", omap_crtc->name);
omap_crtc->timings = *timings;
- omap_crtc->full_update = true;
}
static void omap_crtc_set_lcd_config(struct omap_overlay_manager *mgr,
@@ -174,19 +249,201 @@ static void omap_crtc_unregister_framedone_handler(
}
static const struct dss_mgr_ops mgr_ops = {
- .connect = omap_crtc_connect,
- .disconnect = omap_crtc_disconnect,
- .start_update = omap_crtc_start_update,
- .enable = omap_crtc_enable,
- .disable = omap_crtc_disable,
- .set_timings = omap_crtc_set_timings,
- .set_lcd_config = omap_crtc_set_lcd_config,
- .register_framedone_handler = omap_crtc_register_framedone_handler,
- .unregister_framedone_handler = omap_crtc_unregister_framedone_handler,
+ .connect = omap_crtc_connect,
+ .disconnect = omap_crtc_disconnect,
+ .start_update = omap_crtc_start_update,
+ .enable = omap_crtc_enable,
+ .disable = omap_crtc_disable,
+ .set_timings = omap_crtc_set_timings,
+ .set_lcd_config = omap_crtc_set_lcd_config,
+ .register_framedone_handler = omap_crtc_register_framedone_handler,
+ .unregister_framedone_handler = omap_crtc_unregister_framedone_handler,
};
-/*
- * CRTC funcs:
+/* -----------------------------------------------------------------------------
+ * Apply Logic
+ */
+
+static void omap_crtc_error_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
+{
+ struct omap_crtc *omap_crtc =
+ container_of(irq, struct omap_crtc, error_irq);
+
+ if (omap_crtc->ignore_digit_sync_lost) {
+ irqstatus &= ~DISPC_IRQ_SYNC_LOST_DIGIT;
+ if (!irqstatus)
+ return;
+ }
+
+ DRM_ERROR_RATELIMITED("%s: errors: %08x\n", omap_crtc->name, irqstatus);
+}
+
+static void omap_crtc_apply_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
+{
+ struct omap_crtc *omap_crtc =
+ container_of(irq, struct omap_crtc, apply_irq);
+ struct drm_crtc *crtc = &omap_crtc->base;
+
+ if (!dispc_mgr_go_busy(omap_crtc->channel)) {
+ struct omap_drm_private *priv =
+ crtc->dev->dev_private;
+ DBG("%s: apply done", omap_crtc->name);
+ __omap_irq_unregister(crtc->dev, &omap_crtc->apply_irq);
+ queue_work(priv->wq, &omap_crtc->apply_work);
+ }
+}
+
+static void apply_worker(struct work_struct *work)
+{
+ struct omap_crtc *omap_crtc =
+ container_of(work, struct omap_crtc, apply_work);
+ struct drm_crtc *crtc = &omap_crtc->base;
+ struct drm_device *dev = crtc->dev;
+ struct omap_drm_apply *apply, *n;
+ bool need_apply;
+
+ /*
+ * Synchronize everything on mode_config.mutex, to keep
+ * the callbacks and list modification all serialized
+ * with respect to modesetting ioctls from userspace.
+ */
+ drm_modeset_lock(&crtc->mutex, NULL);
+ dispc_runtime_get();
+
+ /*
+ * If we are still pending a previous update, wait.. when the
+ * pending update completes, we get kicked again.
+ */
+ if (omap_crtc->apply_irq.registered)
+ goto out;
+
+ /* finish up previous apply's: */
+ list_for_each_entry_safe(apply, n,
+ &omap_crtc->pending_applies, pending_node) {
+ apply->post_apply(apply);
+ list_del(&apply->pending_node);
+ }
+
+ need_apply = !list_empty(&omap_crtc->queued_applies);
+
+ /* then handle the next round of of queued apply's: */
+ list_for_each_entry_safe(apply, n,
+ &omap_crtc->queued_applies, queued_node) {
+ apply->pre_apply(apply);
+ list_del(&apply->queued_node);
+ apply->queued = false;
+ list_add_tail(&apply->pending_node,
+ &omap_crtc->pending_applies);
+ }
+
+ if (need_apply) {
+ enum omap_channel channel = omap_crtc->channel;
+
+ DBG("%s: GO", omap_crtc->name);
+
+ if (dispc_mgr_is_enabled(channel)) {
+ dispc_mgr_go(channel);
+ omap_irq_register(dev, &omap_crtc->apply_irq);
+ } else {
+ struct omap_drm_private *priv = dev->dev_private;
+ queue_work(priv->wq, &omap_crtc->apply_work);
+ }
+ }
+
+out:
+ dispc_runtime_put();
+ drm_modeset_unlock(&crtc->mutex);
+}
+
+int omap_crtc_apply(struct drm_crtc *crtc,
+ struct omap_drm_apply *apply)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+
+ WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
+
+ /* no need to queue it again if it is already queued: */
+ if (apply->queued)
+ return 0;
+
+ apply->queued = true;
+ list_add_tail(&apply->queued_node, &omap_crtc->queued_applies);
+
+ /*
+ * If there are no currently pending updates, then go ahead and
+ * kick the worker immediately, otherwise it will run again when
+ * the current update finishes.
+ */
+ if (list_empty(&omap_crtc->pending_applies)) {
+ struct omap_drm_private *priv = crtc->dev->dev_private;
+ queue_work(priv->wq, &omap_crtc->apply_work);
+ }
+
+ return 0;
+}
+
+static void omap_crtc_pre_apply(struct omap_drm_apply *apply)
+{
+ struct omap_crtc *omap_crtc =
+ container_of(apply, struct omap_crtc, apply);
+ struct drm_crtc *crtc = &omap_crtc->base;
+ struct omap_drm_private *priv = crtc->dev->dev_private;
+ struct drm_encoder *encoder = NULL;
+ unsigned int i;
+
+ DBG("%s: enabled=%d", omap_crtc->name, omap_crtc->enabled);
+
+ for (i = 0; i < priv->num_encoders; i++) {
+ if (priv->encoders[i]->crtc == crtc) {
+ encoder = priv->encoders[i];
+ break;
+ }
+ }
+
+ if (omap_crtc->current_encoder && encoder != omap_crtc->current_encoder)
+ omap_encoder_set_enabled(omap_crtc->current_encoder, false);
+
+ omap_crtc->current_encoder = encoder;
+
+ if (!omap_crtc->enabled) {
+ if (encoder)
+ omap_encoder_set_enabled(encoder, false);
+ } else {
+ if (encoder) {
+ omap_encoder_set_enabled(encoder, false);
+ omap_encoder_update(encoder, omap_crtc->mgr,
+ &omap_crtc->timings);
+ omap_encoder_set_enabled(encoder, true);
+ }
+ }
+}
+
+static void omap_crtc_post_apply(struct omap_drm_apply *apply)
+{
+ /* nothing needed for post-apply */
+}
+
+void omap_crtc_flush(struct drm_crtc *crtc)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ int loops = 0;
+
+ while (!list_empty(&omap_crtc->pending_applies) ||
+ !list_empty(&omap_crtc->queued_applies) ||
+ omap_crtc->event || omap_crtc->old_fb) {
+
+ if (++loops > 10) {
+ dev_err(crtc->dev->dev,
+ "omap_crtc_flush() timeout\n");
+ break;
+ }
+
+ schedule_timeout_uninterruptible(msecs_to_jiffies(20));
+ }
+}
+
+/* -----------------------------------------------------------------------------
+ * CRTC Functions
*/
static void omap_crtc_destroy(struct drm_crtc *crtc)
@@ -214,17 +471,13 @@ static void omap_crtc_dpms(struct drm_crtc *crtc, int mode)
if (enabled != omap_crtc->enabled) {
omap_crtc->enabled = enabled;
- omap_crtc->full_update = true;
omap_crtc_apply(crtc, &omap_crtc->apply);
- /* also enable our private plane: */
- WARN_ON(omap_plane_dpms(omap_crtc->plane, mode));
-
- /* and any attached overlay planes: */
+ /* Enable/disable all planes associated with the CRTC. */
for (i = 0; i < priv->num_planes; i++) {
struct drm_plane *plane = priv->planes[i];
if (plane->crtc == crtc)
- WARN_ON(omap_plane_dpms(plane, mode));
+ WARN_ON(omap_plane_set_enable(plane, enabled));
}
}
}
@@ -256,13 +509,17 @@ static int omap_crtc_mode_set(struct drm_crtc *crtc,
mode->type, mode->flags);
copy_timings_drm_to_omap(&omap_crtc->timings, mode);
- omap_crtc->full_update = true;
- return omap_plane_mode_set(omap_crtc->plane, crtc, crtc->primary->fb,
- 0, 0, mode->hdisplay, mode->vdisplay,
- x << 16, y << 16,
- mode->hdisplay << 16, mode->vdisplay << 16,
- NULL, NULL);
+ /*
+ * The primary plane CRTC can be reset if the plane is disabled directly
+ * through the universal plane API. Set it again here.
+ */
+ crtc->primary->crtc = crtc;
+
+ return omap_plane_mode_set(crtc->primary, crtc, crtc->primary->fb,
+ 0, 0, mode->hdisplay, mode->vdisplay,
+ x, y, mode->hdisplay, mode->vdisplay,
+ NULL, NULL);
}
static void omap_crtc_prepare(struct drm_crtc *crtc)
@@ -282,15 +539,13 @@ static void omap_crtc_commit(struct drm_crtc *crtc)
static int omap_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
- struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
- struct drm_plane *plane = omap_crtc->plane;
+ struct drm_plane *plane = crtc->primary;
struct drm_display_mode *mode = &crtc->mode;
return omap_plane_mode_set(plane, crtc, crtc->primary->fb,
- 0, 0, mode->hdisplay, mode->vdisplay,
- x << 16, y << 16,
- mode->hdisplay << 16, mode->vdisplay << 16,
- NULL, NULL);
+ 0, 0, mode->hdisplay, mode->vdisplay,
+ x, y, mode->hdisplay, mode->vdisplay,
+ NULL, NULL);
}
static void vblank_cb(void *arg)
@@ -299,6 +554,7 @@ static void vblank_cb(void *arg)
struct drm_device *dev = crtc->dev;
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
unsigned long flags;
+ struct drm_framebuffer *fb;
spin_lock_irqsave(&dev->event_lock, flags);
@@ -306,10 +562,15 @@ static void vblank_cb(void *arg)
if (omap_crtc->event)
drm_send_vblank_event(dev, omap_crtc->pipe, omap_crtc->event);
+ fb = omap_crtc->old_fb;
+
omap_crtc->event = NULL;
omap_crtc->old_fb = NULL;
spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ if (fb)
+ drm_framebuffer_unreference(fb);
}
static void page_flip_worker(struct work_struct *work)
@@ -321,11 +582,10 @@ static void page_flip_worker(struct work_struct *work)
struct drm_gem_object *bo;
drm_modeset_lock(&crtc->mutex, NULL);
- omap_plane_mode_set(omap_crtc->plane, crtc, crtc->primary->fb,
- 0, 0, mode->hdisplay, mode->vdisplay,
- crtc->x << 16, crtc->y << 16,
- mode->hdisplay << 16, mode->vdisplay << 16,
- vblank_cb, crtc);
+ omap_plane_mode_set(crtc->primary, crtc, crtc->primary->fb,
+ 0, 0, mode->hdisplay, mode->vdisplay,
+ crtc->x, crtc->y, mode->hdisplay, mode->vdisplay,
+ vblank_cb, crtc);
drm_modeset_unlock(&crtc->mutex);
bo = omap_framebuffer_bo(crtc->primary->fb, 0);
@@ -361,11 +621,12 @@ static int omap_crtc_page_flip_locked(struct drm_crtc *crtc,
if (omap_crtc->old_fb) {
spin_unlock_irqrestore(&dev->event_lock, flags);
dev_err(dev->dev, "already a pending flip\n");
- return -EINVAL;
+ return -EBUSY;
}
omap_crtc->event = event;
omap_crtc->old_fb = primary->fb = fb;
+ drm_framebuffer_reference(omap_crtc->old_fb);
spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -385,7 +646,6 @@ static int omap_crtc_page_flip_locked(struct drm_crtc *crtc,
static int omap_crtc_set_property(struct drm_crtc *crtc,
struct drm_property *property, uint64_t val)
{
- struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
struct omap_drm_private *priv = crtc->dev->dev_private;
if (property == priv->rotation_prop) {
@@ -393,7 +653,7 @@ static int omap_crtc_set_property(struct drm_crtc *crtc,
!!(val & ((1LL << DRM_ROTATE_90) | (1LL << DRM_ROTATE_270)));
}
- return omap_plane_set_property(omap_crtc->plane, property, val);
+ return omap_plane_set_property(crtc->primary, property, val);
}
static const struct drm_crtc_funcs omap_crtc_funcs = {
@@ -412,256 +672,15 @@ static const struct drm_crtc_helper_funcs omap_crtc_helper_funcs = {
.mode_set_base = omap_crtc_mode_set_base,
};
-const struct omap_video_timings *omap_crtc_timings(struct drm_crtc *crtc)
-{
- struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
- return &omap_crtc->timings;
-}
-
-enum omap_channel omap_crtc_channel(struct drm_crtc *crtc)
-{
- struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
- return omap_crtc->channel;
-}
-
-static void omap_crtc_error_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
-{
- struct omap_crtc *omap_crtc =
- container_of(irq, struct omap_crtc, error_irq);
- struct drm_crtc *crtc = &omap_crtc->base;
- DRM_ERROR("%s: errors: %08x\n", omap_crtc->name, irqstatus);
- /* avoid getting in a flood, unregister the irq until next vblank */
- __omap_irq_unregister(crtc->dev, &omap_crtc->error_irq);
-}
-
-static void omap_crtc_apply_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
-{
- struct omap_crtc *omap_crtc =
- container_of(irq, struct omap_crtc, apply_irq);
- struct drm_crtc *crtc = &omap_crtc->base;
-
- if (!omap_crtc->error_irq.registered)
- __omap_irq_register(crtc->dev, &omap_crtc->error_irq);
-
- if (!dispc_mgr_go_busy(omap_crtc->channel)) {
- struct omap_drm_private *priv =
- crtc->dev->dev_private;
- DBG("%s: apply done", omap_crtc->name);
- __omap_irq_unregister(crtc->dev, &omap_crtc->apply_irq);
- queue_work(priv->wq, &omap_crtc->apply_work);
- }
-}
-
-static void apply_worker(struct work_struct *work)
-{
- struct omap_crtc *omap_crtc =
- container_of(work, struct omap_crtc, apply_work);
- struct drm_crtc *crtc = &omap_crtc->base;
- struct drm_device *dev = crtc->dev;
- struct omap_drm_apply *apply, *n;
- bool need_apply;
-
- /*
- * Synchronize everything on mode_config.mutex, to keep
- * the callbacks and list modification all serialized
- * with respect to modesetting ioctls from userspace.
- */
- drm_modeset_lock(&crtc->mutex, NULL);
- dispc_runtime_get();
-
- /*
- * If we are still pending a previous update, wait.. when the
- * pending update completes, we get kicked again.
- */
- if (omap_crtc->apply_irq.registered)
- goto out;
-
- /* finish up previous apply's: */
- list_for_each_entry_safe(apply, n,
- &omap_crtc->pending_applies, pending_node) {
- apply->post_apply(apply);
- list_del(&apply->pending_node);
- }
-
- need_apply = !list_empty(&omap_crtc->queued_applies);
-
- /* then handle the next round of of queued apply's: */
- list_for_each_entry_safe(apply, n,
- &omap_crtc->queued_applies, queued_node) {
- apply->pre_apply(apply);
- list_del(&apply->queued_node);
- apply->queued = false;
- list_add_tail(&apply->pending_node,
- &omap_crtc->pending_applies);
- }
-
- if (need_apply) {
- enum omap_channel channel = omap_crtc->channel;
-
- DBG("%s: GO", omap_crtc->name);
-
- if (dispc_mgr_is_enabled(channel)) {
- omap_irq_register(dev, &omap_crtc->apply_irq);
- dispc_mgr_go(channel);
- } else {
- struct omap_drm_private *priv = dev->dev_private;
- queue_work(priv->wq, &omap_crtc->apply_work);
- }
- }
-
-out:
- dispc_runtime_put();
- drm_modeset_unlock(&crtc->mutex);
-}
-
-int omap_crtc_apply(struct drm_crtc *crtc,
- struct omap_drm_apply *apply)
-{
- struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
-
- WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
-
- /* no need to queue it again if it is already queued: */
- if (apply->queued)
- return 0;
-
- apply->queued = true;
- list_add_tail(&apply->queued_node, &omap_crtc->queued_applies);
-
- /*
- * If there are no currently pending updates, then go ahead and
- * kick the worker immediately, otherwise it will run again when
- * the current update finishes.
- */
- if (list_empty(&omap_crtc->pending_applies)) {
- struct omap_drm_private *priv = crtc->dev->dev_private;
- queue_work(priv->wq, &omap_crtc->apply_work);
- }
-
- return 0;
-}
-
-/* called only from apply */
-static void set_enabled(struct drm_crtc *crtc, bool enable)
-{
- struct drm_device *dev = crtc->dev;
- struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
- enum omap_channel channel = omap_crtc->channel;
- struct omap_irq_wait *wait;
- u32 framedone_irq, vsync_irq;
- int ret;
-
- if (dispc_mgr_is_enabled(channel) == enable)
- return;
-
- /*
- * Digit output produces some sync lost interrupts during the first
- * frame when enabling, so we need to ignore those.
- */
- omap_irq_unregister(crtc->dev, &omap_crtc->error_irq);
-
- framedone_irq = dispc_mgr_get_framedone_irq(channel);
- vsync_irq = dispc_mgr_get_vsync_irq(channel);
-
- if (enable) {
- wait = omap_irq_wait_init(dev, vsync_irq, 1);
- } else {
- /*
- * When we disable the digit output, we need to wait for
- * FRAMEDONE to know that DISPC has finished with the output.
- *
- * OMAP2/3 does not have FRAMEDONE irq for digit output, and in
- * that case we need to use vsync interrupt, and wait for both
- * even and odd frames.
- */
-
- if (framedone_irq)
- wait = omap_irq_wait_init(dev, framedone_irq, 1);
- else
- wait = omap_irq_wait_init(dev, vsync_irq, 2);
- }
-
- dispc_mgr_enable(channel, enable);
-
- ret = omap_irq_wait(dev, wait, msecs_to_jiffies(100));
- if (ret) {
- dev_err(dev->dev, "%s: timeout waiting for %s\n",
- omap_crtc->name, enable ? "enable" : "disable");
- }
-
- omap_irq_register(crtc->dev, &omap_crtc->error_irq);
-}
-
-static void omap_crtc_pre_apply(struct omap_drm_apply *apply)
-{
- struct omap_crtc *omap_crtc =
- container_of(apply, struct omap_crtc, apply);
- struct drm_crtc *crtc = &omap_crtc->base;
- struct drm_encoder *encoder = NULL;
-
- DBG("%s: enabled=%d, full=%d", omap_crtc->name,
- omap_crtc->enabled, omap_crtc->full_update);
-
- if (omap_crtc->full_update) {
- struct omap_drm_private *priv = crtc->dev->dev_private;
- int i;
- for (i = 0; i < priv->num_encoders; i++) {
- if (priv->encoders[i]->crtc == crtc) {
- encoder = priv->encoders[i];
- break;
- }
- }
- }
-
- if (omap_crtc->current_encoder && encoder != omap_crtc->current_encoder)
- omap_encoder_set_enabled(omap_crtc->current_encoder, false);
-
- omap_crtc->current_encoder = encoder;
-
- if (!omap_crtc->enabled) {
- if (encoder)
- omap_encoder_set_enabled(encoder, false);
- } else {
- if (encoder) {
- omap_encoder_set_enabled(encoder, false);
- omap_encoder_update(encoder, omap_crtc->mgr,
- &omap_crtc->timings);
- omap_encoder_set_enabled(encoder, true);
- }
- }
-
- omap_crtc->full_update = false;
-}
-
-static void omap_crtc_post_apply(struct omap_drm_apply *apply)
-{
- /* nothing needed for post-apply */
-}
-
-void omap_crtc_flush(struct drm_crtc *crtc)
-{
- struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
- int loops = 0;
-
- while (!list_empty(&omap_crtc->pending_applies) ||
- !list_empty(&omap_crtc->queued_applies) ||
- omap_crtc->event || omap_crtc->old_fb) {
-
- if (++loops > 10) {
- dev_err(crtc->dev->dev,
- "omap_crtc_flush() timeout\n");
- break;
- }
-
- schedule_timeout_uninterruptible(msecs_to_jiffies(20));
- }
-}
+/* -----------------------------------------------------------------------------
+ * Init and Cleanup
+ */
static const char *channel_names[] = {
- [OMAP_DSS_CHANNEL_LCD] = "lcd",
- [OMAP_DSS_CHANNEL_DIGIT] = "tv",
- [OMAP_DSS_CHANNEL_LCD2] = "lcd2",
- [OMAP_DSS_CHANNEL_LCD3] = "lcd3",
+ [OMAP_DSS_CHANNEL_LCD] = "lcd",
+ [OMAP_DSS_CHANNEL_DIGIT] = "tv",
+ [OMAP_DSS_CHANNEL_LCD2] = "lcd2",
+ [OMAP_DSS_CHANNEL_LCD3] = "lcd3",
};
void omap_crtc_pre_init(void)
@@ -681,12 +700,13 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
struct drm_crtc *crtc = NULL;
struct omap_crtc *omap_crtc;
struct omap_overlay_manager_info *info;
+ int ret;
DBG("%s", channel_names[channel]);
omap_crtc = kzalloc(sizeof(*omap_crtc), GFP_KERNEL);
if (!omap_crtc)
- goto fail;
+ return NULL;
crtc = &omap_crtc->base;
@@ -700,8 +720,6 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
omap_crtc->apply.post_apply = omap_crtc_post_apply;
omap_crtc->channel = channel;
- omap_crtc->plane = plane;
- omap_crtc->plane->crtc = crtc;
omap_crtc->name = channel_names[channel];
omap_crtc->pipe = id;
@@ -723,18 +741,18 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
info->trans_key_type = OMAP_DSS_COLOR_KEY_GFX_DST;
info->trans_enabled = false;
- drm_crtc_init(dev, crtc, &omap_crtc_funcs);
+ ret = drm_crtc_init_with_planes(dev, crtc, plane, NULL,
+ &omap_crtc_funcs);
+ if (ret < 0) {
+ kfree(omap_crtc);
+ return NULL;
+ }
+
drm_crtc_helper_add(crtc, &omap_crtc_helper_funcs);
- omap_plane_install_properties(omap_crtc->plane, &crtc->base);
+ omap_plane_install_properties(crtc->primary, &crtc->base);
omap_crtcs[channel] = omap_crtc;
return crtc;
-
-fail:
- if (crtc)
- omap_crtc_destroy(crtc);
-
- return NULL;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_priv.h b/drivers/gpu/drm/omapdrm/omap_dmm_priv.h
index 58bcd6ae0255..9f32a83ca507 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_priv.h
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_priv.h
@@ -148,11 +148,15 @@ struct refill_engine {
bool async;
- wait_queue_head_t wait_for_refill;
+ struct completion compl;
struct list_head idle_node;
};
+struct dmm_platform_data {
+ uint32_t cpu_cache_flags;
+};
+
struct dmm {
struct device *dev;
void __iomem *base;
@@ -183,6 +187,8 @@ struct dmm {
/* allocation list and lock */
struct list_head alloc_head;
+
+ const struct dmm_platform_data *plat_data;
};
#endif
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index 56c60552abba..042038e8a662 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -29,6 +29,7 @@
#include <linux/mm.h>
#include <linux/time.h>
#include <linux/list.h>
+#include <linux/completion.h>
#include "omap_dmm_tiler.h"
#include "omap_dmm_priv.h"
@@ -39,6 +40,10 @@
static struct tcm *containers[TILFMT_NFORMATS];
static struct dmm *omap_dmm;
+#if defined(CONFIG_OF)
+static const struct of_device_id dmm_of_match[];
+#endif
+
/* global spinlock for protecting lists */
static DEFINE_SPINLOCK(list_lock);
@@ -58,19 +63,19 @@ static const struct {
uint32_t slot_w; /* width of each slot (in pixels) */
uint32_t slot_h; /* height of each slot (in pixels) */
} geom[TILFMT_NFORMATS] = {
- [TILFMT_8BIT] = GEOM(0, 0, 1),
- [TILFMT_16BIT] = GEOM(0, 1, 2),
- [TILFMT_32BIT] = GEOM(1, 1, 4),
- [TILFMT_PAGE] = GEOM(SLOT_WIDTH_BITS, SLOT_HEIGHT_BITS, 1),
+ [TILFMT_8BIT] = GEOM(0, 0, 1),
+ [TILFMT_16BIT] = GEOM(0, 1, 2),
+ [TILFMT_32BIT] = GEOM(1, 1, 4),
+ [TILFMT_PAGE] = GEOM(SLOT_WIDTH_BITS, SLOT_HEIGHT_BITS, 1),
};
/* lookup table for registers w/ per-engine instances */
static const uint32_t reg[][4] = {
- [PAT_STATUS] = {DMM_PAT_STATUS__0, DMM_PAT_STATUS__1,
- DMM_PAT_STATUS__2, DMM_PAT_STATUS__3},
- [PAT_DESCR] = {DMM_PAT_DESCR__0, DMM_PAT_DESCR__1,
- DMM_PAT_DESCR__2, DMM_PAT_DESCR__3},
+ [PAT_STATUS] = {DMM_PAT_STATUS__0, DMM_PAT_STATUS__1,
+ DMM_PAT_STATUS__2, DMM_PAT_STATUS__3},
+ [PAT_DESCR] = {DMM_PAT_DESCR__0, DMM_PAT_DESCR__1,
+ DMM_PAT_DESCR__2, DMM_PAT_DESCR__3},
};
/* simple allocator to grab next 16 byte aligned memory from txn */
@@ -142,10 +147,10 @@ static irqreturn_t omap_dmm_irq_handler(int irq, void *arg)
for (i = 0; i < dmm->num_engines; i++) {
if (status & DMM_IRQSTAT_LST) {
- wake_up_interruptible(&dmm->engines[i].wait_for_refill);
-
if (dmm->engines[i].async)
release_engine(&dmm->engines[i]);
+
+ complete(&dmm->engines[i].compl);
}
status >>= 8;
@@ -269,15 +274,17 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
/* mark whether it is async to denote list management in IRQ handler */
engine->async = wait ? false : true;
+ reinit_completion(&engine->compl);
+ /* verify that the irq handler sees the 'async' and completion value */
+ smp_mb();
/* kick reload */
writel(engine->refill_pa,
dmm->base + reg[PAT_DESCR][engine->id]);
if (wait) {
- if (wait_event_interruptible_timeout(engine->wait_for_refill,
- wait_status(engine, DMM_PATSTATUS_READY) == 0,
- msecs_to_jiffies(1)) <= 0) {
+ if (!wait_for_completion_timeout(&engine->compl,
+ msecs_to_jiffies(1))) {
dev_err(dmm->dev, "timed out waiting for done\n");
ret = -ETIMEDOUT;
}
@@ -529,6 +536,11 @@ size_t tiler_vsize(enum tiler_fmt fmt, uint16_t w, uint16_t h)
return round_up(geom[fmt].cpp * w, PAGE_SIZE) * h;
}
+uint32_t tiler_get_cpu_cache_flags(void)
+{
+ return omap_dmm->plat_data->cpu_cache_flags;
+}
+
bool dmm_is_available(void)
{
return omap_dmm ? true : false;
@@ -592,6 +604,18 @@ static int omap_dmm_probe(struct platform_device *dev)
init_waitqueue_head(&omap_dmm->engine_queue);
+ if (dev->dev.of_node) {
+ const struct of_device_id *match;
+
+ match = of_match_node(dmm_of_match, dev->dev.of_node);
+ if (!match) {
+ dev_err(&dev->dev, "failed to find matching device node\n");
+ return -ENODEV;
+ }
+
+ omap_dmm->plat_data = match->data;
+ }
+
/* lookup hwmod data - base address and irq */
mem = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (!mem) {
@@ -696,7 +720,7 @@ static int omap_dmm_probe(struct platform_device *dev)
(REFILL_BUFFER_SIZE * i);
omap_dmm->engines[i].refill_pa = omap_dmm->refill_pa +
(REFILL_BUFFER_SIZE * i);
- init_waitqueue_head(&omap_dmm->engines[i].wait_for_refill);
+ init_completion(&omap_dmm->engines[i].compl);
list_add(&omap_dmm->engines[i].idle_node, &omap_dmm->idle_head);
}
@@ -941,7 +965,7 @@ error:
}
#endif
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int omap_dmm_resume(struct device *dev)
{
struct tcm_area area;
@@ -965,16 +989,28 @@ static int omap_dmm_resume(struct device *dev)
return 0;
}
-
-static const struct dev_pm_ops omap_dmm_pm_ops = {
- .resume = omap_dmm_resume,
-};
#endif
+static SIMPLE_DEV_PM_OPS(omap_dmm_pm_ops, NULL, omap_dmm_resume);
+
#if defined(CONFIG_OF)
+static const struct dmm_platform_data dmm_omap4_platform_data = {
+ .cpu_cache_flags = OMAP_BO_WC,
+};
+
+static const struct dmm_platform_data dmm_omap5_platform_data = {
+ .cpu_cache_flags = OMAP_BO_UNCACHED,
+};
+
static const struct of_device_id dmm_of_match[] = {
- { .compatible = "ti,omap4-dmm", },
- { .compatible = "ti,omap5-dmm", },
+ {
+ .compatible = "ti,omap4-dmm",
+ .data = &dmm_omap4_platform_data,
+ },
+ {
+ .compatible = "ti,omap5-dmm",
+ .data = &dmm_omap5_platform_data,
+ },
{},
};
#endif
@@ -986,9 +1022,7 @@ struct platform_driver omap_dmm_driver = {
.owner = THIS_MODULE,
.name = DMM_DRIVER_NAME,
.of_match_table = of_match_ptr(dmm_of_match),
-#ifdef CONFIG_PM
.pm = &omap_dmm_pm_ops,
-#endif
},
};
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h
index 4fdd61e54bd2..e83c78372db8 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h
@@ -106,6 +106,7 @@ uint32_t tiler_stride(enum tiler_fmt fmt, uint32_t orient);
size_t tiler_size(enum tiler_fmt fmt, uint16_t w, uint16_t h);
size_t tiler_vsize(enum tiler_fmt fmt, uint16_t w, uint16_t h);
void tiler_align(enum tiler_fmt fmt, uint16_t *w, uint16_t *h);
+uint32_t tiler_get_cpu_cache_flags(void);
bool dmm_is_available(void);
extern struct platform_driver omap_dmm_driver;
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 8241ed9b353c..94920d47e3b6 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -128,6 +128,29 @@ cleanup:
return r;
}
+static int omap_modeset_create_crtc(struct drm_device *dev, int id,
+ enum omap_channel channel)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+
+ plane = omap_plane_init(dev, id, DRM_PLANE_TYPE_PRIMARY);
+ if (IS_ERR(plane))
+ return PTR_ERR(plane);
+
+ crtc = omap_crtc_init(dev, plane, channel, id);
+
+ BUG_ON(priv->num_crtcs >= ARRAY_SIZE(priv->crtcs));
+ priv->crtcs[id] = crtc;
+ priv->num_crtcs++;
+
+ priv->planes[id] = plane;
+ priv->num_planes++;
+
+ return 0;
+}
+
static int omap_modeset_init(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
@@ -136,6 +159,7 @@ static int omap_modeset_init(struct drm_device *dev)
int num_mgrs = dss_feat_get_num_mgrs();
int num_crtcs;
int i, id = 0;
+ int ret;
drm_mode_config_init(dev);
@@ -209,18 +233,13 @@ static int omap_modeset_init(struct drm_device *dev)
* allocated crtc, we create a new crtc for it
*/
if (!channel_used(dev, channel)) {
- struct drm_plane *plane;
- struct drm_crtc *crtc;
-
- plane = omap_plane_init(dev, id, true);
- crtc = omap_crtc_init(dev, plane, channel, id);
-
- BUG_ON(priv->num_crtcs >= ARRAY_SIZE(priv->crtcs));
- priv->crtcs[id] = crtc;
- priv->num_crtcs++;
-
- priv->planes[id] = plane;
- priv->num_planes++;
+ ret = omap_modeset_create_crtc(dev, id, channel);
+ if (ret < 0) {
+ dev_err(dev->dev,
+ "could not create CRTC (channel %u)\n",
+ channel);
+ return ret;
+ }
id++;
}
@@ -234,26 +253,8 @@ static int omap_modeset_init(struct drm_device *dev)
/* find a free manager for this crtc */
for (i = 0; i < num_mgrs; i++) {
- if (!channel_used(dev, i)) {
- struct drm_plane *plane;
- struct drm_crtc *crtc;
-
- plane = omap_plane_init(dev, id, true);
- crtc = omap_crtc_init(dev, plane, i, id);
-
- BUG_ON(priv->num_crtcs >=
- ARRAY_SIZE(priv->crtcs));
-
- priv->crtcs[id] = crtc;
- priv->num_crtcs++;
-
- priv->planes[id] = plane;
- priv->num_planes++;
-
+ if (!channel_used(dev, i))
break;
- } else {
- continue;
- }
}
if (i == num_mgrs) {
@@ -261,13 +262,24 @@ static int omap_modeset_init(struct drm_device *dev)
dev_err(dev->dev, "no managers left for crtc\n");
return -ENOMEM;
}
+
+ ret = omap_modeset_create_crtc(dev, id, i);
+ if (ret < 0) {
+ dev_err(dev->dev,
+ "could not create CRTC (channel %u)\n", i);
+ return ret;
+ }
}
/*
* Create normal planes for the remaining overlays:
*/
for (; id < num_ovls; id++) {
- struct drm_plane *plane = omap_plane_init(dev, id, false);
+ struct drm_plane *plane;
+
+ plane = omap_plane_init(dev, id, DRM_PLANE_TYPE_OVERLAY);
+ if (IS_ERR(plane))
+ return PTR_ERR(plane);
BUG_ON(priv->num_planes >= ARRAY_SIZE(priv->planes));
priv->planes[priv->num_planes++] = plane;
@@ -286,14 +298,13 @@ static int omap_modeset_init(struct drm_device *dev)
for (id = 0; id < priv->num_crtcs; id++) {
struct drm_crtc *crtc = priv->crtcs[id];
enum omap_channel crtc_channel;
- enum omap_dss_output_id supported_outputs;
crtc_channel = omap_crtc_channel(crtc);
- supported_outputs =
- dss_feat_get_supported_outputs(crtc_channel);
- if (supported_outputs & output->id)
+ if (output->dispc_channel == crtc_channel) {
encoder->possible_crtcs |= (1 << id);
+ break;
+ }
}
omap_dss_put_device(output);
@@ -480,6 +491,7 @@ static int dev_load(struct drm_device *dev, unsigned long flags)
priv->wq = alloc_ordered_workqueue("omapdrm", 0);
+ spin_lock_init(&priv->list_lock);
INIT_LIST_HEAD(&priv->obj_list);
omap_gem_init(dev);
@@ -519,7 +531,8 @@ static int dev_unload(struct drm_device *dev)
drm_kms_helper_poll_fini(dev);
- omap_fbdev_free(dev);
+ if (priv->fbdev)
+ omap_fbdev_free(dev);
/* flush crtcs so the fbs get released */
for (i = 0; i < priv->num_crtcs; i++)
@@ -588,9 +601,11 @@ static void dev_lastclose(struct drm_device *dev)
}
}
- ret = drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev);
- if (ret)
- DBG("failed to restore crtc mode");
+ if (priv->fbdev) {
+ ret = drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev);
+ if (ret)
+ DBG("failed to restore crtc mode");
+ }
}
static void dev_preclose(struct drm_device *dev, struct drm_file *file)
@@ -610,74 +625,57 @@ static const struct vm_operations_struct omap_gem_vm_ops = {
};
static const struct file_operations omapdriver_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .unlocked_ioctl = drm_ioctl,
- .release = drm_release,
- .mmap = omap_gem_mmap,
- .poll = drm_poll,
- .read = drm_read,
- .llseek = noop_llseek,
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .unlocked_ioctl = drm_ioctl,
+ .release = drm_release,
+ .mmap = omap_gem_mmap,
+ .poll = drm_poll,
+ .read = drm_read,
+ .llseek = noop_llseek,
};
static struct drm_driver omap_drm_driver = {
- .driver_features =
- DRIVER_HAVE_IRQ | DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
- .load = dev_load,
- .unload = dev_unload,
- .open = dev_open,
- .lastclose = dev_lastclose,
- .preclose = dev_preclose,
- .postclose = dev_postclose,
- .set_busid = drm_platform_set_busid,
- .get_vblank_counter = drm_vblank_count,
- .enable_vblank = omap_irq_enable_vblank,
- .disable_vblank = omap_irq_disable_vblank,
- .irq_preinstall = omap_irq_preinstall,
- .irq_postinstall = omap_irq_postinstall,
- .irq_uninstall = omap_irq_uninstall,
- .irq_handler = omap_irq_handler,
+ .driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET | DRIVER_GEM
+ | DRIVER_PRIME,
+ .load = dev_load,
+ .unload = dev_unload,
+ .open = dev_open,
+ .lastclose = dev_lastclose,
+ .preclose = dev_preclose,
+ .postclose = dev_postclose,
+ .set_busid = drm_platform_set_busid,
+ .get_vblank_counter = drm_vblank_count,
+ .enable_vblank = omap_irq_enable_vblank,
+ .disable_vblank = omap_irq_disable_vblank,
+ .irq_preinstall = omap_irq_preinstall,
+ .irq_postinstall = omap_irq_postinstall,
+ .irq_uninstall = omap_irq_uninstall,
+ .irq_handler = omap_irq_handler,
#ifdef CONFIG_DEBUG_FS
- .debugfs_init = omap_debugfs_init,
- .debugfs_cleanup = omap_debugfs_cleanup,
+ .debugfs_init = omap_debugfs_init,
+ .debugfs_cleanup = omap_debugfs_cleanup,
#endif
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = omap_gem_prime_export,
- .gem_prime_import = omap_gem_prime_import,
- .gem_free_object = omap_gem_free_object,
- .gem_vm_ops = &omap_gem_vm_ops,
- .dumb_create = omap_gem_dumb_create,
- .dumb_map_offset = omap_gem_dumb_map_offset,
- .dumb_destroy = drm_gem_dumb_destroy,
- .ioctls = ioctls,
- .num_ioctls = DRM_OMAP_NUM_IOCTLS,
- .fops = &omapdriver_fops,
- .name = DRIVER_NAME,
- .desc = DRIVER_DESC,
- .date = DRIVER_DATE,
- .major = DRIVER_MAJOR,
- .minor = DRIVER_MINOR,
- .patchlevel = DRIVER_PATCHLEVEL,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = omap_gem_prime_export,
+ .gem_prime_import = omap_gem_prime_import,
+ .gem_free_object = omap_gem_free_object,
+ .gem_vm_ops = &omap_gem_vm_ops,
+ .dumb_create = omap_gem_dumb_create,
+ .dumb_map_offset = omap_gem_dumb_map_offset,
+ .dumb_destroy = drm_gem_dumb_destroy,
+ .ioctls = ioctls,
+ .num_ioctls = DRM_OMAP_NUM_IOCTLS,
+ .fops = &omapdriver_fops,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
};
-static int pdev_suspend(struct platform_device *pDevice, pm_message_t state)
-{
- DBG("");
- return 0;
-}
-
-static int pdev_resume(struct platform_device *device)
-{
- DBG("");
- return 0;
-}
-
-static void pdev_shutdown(struct platform_device *device)
-{
- DBG("");
-}
-
static int pdev_probe(struct platform_device *device)
{
int r;
@@ -709,24 +707,35 @@ static int pdev_remove(struct platform_device *device)
return 0;
}
-#ifdef CONFIG_PM
-static const struct dev_pm_ops omapdrm_pm_ops = {
- .resume = omap_gem_resume,
-};
+#ifdef CONFIG_PM_SLEEP
+static int omap_drm_suspend(struct device *dev)
+{
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
+
+ drm_kms_helper_poll_disable(drm_dev);
+
+ return 0;
+}
+
+static int omap_drm_resume(struct device *dev)
+{
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
+
+ drm_kms_helper_poll_enable(drm_dev);
+
+ return omap_gem_resume(dev);
+}
#endif
+static SIMPLE_DEV_PM_OPS(omapdrm_pm_ops, omap_drm_suspend, omap_drm_resume);
+
static struct platform_driver pdev = {
- .driver = {
- .name = DRIVER_NAME,
-#ifdef CONFIG_PM
- .pm = &omapdrm_pm_ops,
-#endif
- },
- .probe = pdev_probe,
- .remove = pdev_remove,
- .suspend = pdev_suspend,
- .resume = pdev_resume,
- .shutdown = pdev_shutdown,
+ .driver = {
+ .name = DRIVER_NAME,
+ .pm = &omapdrm_pm_ops,
+ },
+ .probe = pdev_probe,
+ .remove = pdev_remove,
};
static int __init omap_drm_init(void)
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 60e47b33c801..b31c79f15aed 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -105,6 +105,9 @@ struct omap_drm_private {
struct workqueue_struct *wq;
+ /* lock for obj_list below */
+ spinlock_t list_lock;
+
/* list of GEM objects: */
struct list_head obj_list;
@@ -160,15 +163,15 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
void omap_crtc_flush(struct drm_crtc *crtc);
struct drm_plane *omap_plane_init(struct drm_device *dev,
- int plane_id, bool private_plane);
-int omap_plane_dpms(struct drm_plane *plane, int mode);
+ int id, enum drm_plane_type type);
+int omap_plane_set_enable(struct drm_plane *plane, bool enable);
int omap_plane_mode_set(struct drm_plane *plane,
- struct drm_crtc *crtc, struct drm_framebuffer *fb,
- int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h,
- void (*fxn)(void *), void *arg);
+ struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ unsigned int src_x, unsigned int src_y,
+ unsigned int src_w, unsigned int src_h,
+ void (*fxn)(void *), void *arg);
void omap_plane_install_properties(struct drm_plane *plane,
struct drm_mode_object *obj);
int omap_plane_set_property(struct drm_plane *plane,
@@ -186,8 +189,6 @@ struct drm_connector *omap_connector_init(struct drm_device *dev,
struct drm_encoder *encoder);
struct drm_encoder *omap_connector_attached_encoder(
struct drm_connector *connector);
-void omap_connector_flush(struct drm_connector *connector,
- int x, int y, int w, int h);
bool omap_connector_get_hdmi_mode(struct drm_connector *connector);
void copy_timings_omap_to_drm(struct drm_display_mode *mode,
@@ -208,8 +209,6 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
struct omap_drm_window *win, struct omap_overlay_info *info);
struct drm_connector *omap_framebuffer_get_next_connector(
struct drm_framebuffer *fb, struct drm_connector *from);
-void omap_framebuffer_flush(struct drm_framebuffer *fb,
- int x, int y, int w, int h);
void omap_gem_init(struct drm_device *dev);
void omap_gem_deinit(struct drm_device *dev);
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index 2a5cacdc344b..b2c1a29cc12b 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -86,6 +86,7 @@ struct plane {
struct omap_framebuffer {
struct drm_framebuffer base;
+ int pin_count;
const struct format *format;
struct plane planes[4];
};
@@ -121,18 +122,6 @@ static int omap_framebuffer_dirty(struct drm_framebuffer *fb,
struct drm_file *file_priv, unsigned flags, unsigned color,
struct drm_clip_rect *clips, unsigned num_clips)
{
- int i;
-
- drm_modeset_lock_all(fb->dev);
-
- for (i = 0; i < num_clips; i++) {
- omap_framebuffer_flush(fb, clips[i].x1, clips[i].y1,
- clips[i].x2 - clips[i].x1,
- clips[i].y2 - clips[i].y1);
- }
-
- drm_modeset_unlock_all(fb->dev);
-
return 0;
}
@@ -261,6 +250,11 @@ int omap_framebuffer_pin(struct drm_framebuffer *fb)
struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
int ret, i, n = drm_format_num_planes(fb->pixel_format);
+ if (omap_fb->pin_count > 0) {
+ omap_fb->pin_count++;
+ return 0;
+ }
+
for (i = 0; i < n; i++) {
struct plane *plane = &omap_fb->planes[i];
ret = omap_gem_get_paddr(plane->bo, &plane->paddr, true);
@@ -269,6 +263,8 @@ int omap_framebuffer_pin(struct drm_framebuffer *fb)
omap_gem_dma_sync(plane->bo, DMA_TO_DEVICE);
}
+ omap_fb->pin_count++;
+
return 0;
fail:
@@ -287,6 +283,11 @@ int omap_framebuffer_unpin(struct drm_framebuffer *fb)
struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
int ret, i, n = drm_format_num_planes(fb->pixel_format);
+ omap_fb->pin_count--;
+
+ if (omap_fb->pin_count > 0)
+ return 0;
+
for (i = 0; i < n; i++) {
struct plane *plane = &omap_fb->planes[i];
ret = omap_gem_put_paddr(plane->bo);
@@ -336,34 +337,6 @@ struct drm_connector *omap_framebuffer_get_next_connector(
return NULL;
}
-/* flush an area of the framebuffer (in case of manual update display that
- * is not automatically flushed)
- */
-void omap_framebuffer_flush(struct drm_framebuffer *fb,
- int x, int y, int w, int h)
-{
- struct drm_connector *connector = NULL;
-
- VERB("flush: %d,%d %dx%d, fb=%p", x, y, w, h, fb);
-
- /* FIXME: This is racy - no protection against modeset config changes. */
- while ((connector = omap_framebuffer_get_next_connector(fb, connector))) {
- /* only consider connectors that are part of a chain */
- if (connector->encoder && connector->encoder->crtc) {
- /* TODO: maybe this should propagate thru the crtc who
- * could do the coordinate translation..
- */
- struct drm_crtc *crtc = connector->encoder->crtc;
- int cx = max(0, x - crtc->x);
- int cy = max(0, y - crtc->y);
- int cw = w + (x - crtc->x) - cx;
- int ch = h + (y - crtc->y) - cy;
-
- omap_connector_flush(connector, cx, cy, cw, ch);
- }
- }
-}
-
#ifdef CONFIG_DEBUG_FS
void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
{
@@ -407,7 +380,7 @@ struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos)
{
- struct omap_framebuffer *omap_fb;
+ struct omap_framebuffer *omap_fb = NULL;
struct drm_framebuffer *fb = NULL;
const struct format *format = NULL;
int ret, i, n = drm_format_num_planes(mode_cmd->pixel_format);
@@ -450,6 +423,14 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
goto fail;
}
+ if (pitch % format->planes[i].stride_bpp != 0) {
+ dev_err(dev->dev,
+ "buffer pitch (%d bytes) is not a multiple of pixel size (%d bytes)\n",
+ pitch, format->planes[i].stride_bpp);
+ ret = -EINVAL;
+ goto fail;
+ }
+
size = pitch * mode_cmd->height / format->planes[i].sub_y;
if (size > (omap_gem_mmap_size(bos[i]) - mode_cmd->offsets[i])) {
@@ -478,8 +459,7 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
return fb;
fail:
- if (fb)
- omap_framebuffer_destroy(fb);
+ kfree(omap_fb);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index d292d24b3a6e..950cd3389092 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -42,42 +42,8 @@ struct omap_fbdev {
struct work_struct work;
};
-static void omap_fbdev_flush(struct fb_info *fbi, int x, int y, int w, int h);
static struct drm_fb_helper *get_fb(struct fb_info *fbi);
-static ssize_t omap_fbdev_write(struct fb_info *fbi, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- ssize_t res;
-
- res = fb_sys_write(fbi, buf, count, ppos);
- omap_fbdev_flush(fbi, 0, 0, fbi->var.xres, fbi->var.yres);
-
- return res;
-}
-
-static void omap_fbdev_fillrect(struct fb_info *fbi,
- const struct fb_fillrect *rect)
-{
- sys_fillrect(fbi, rect);
- omap_fbdev_flush(fbi, rect->dx, rect->dy, rect->width, rect->height);
-}
-
-static void omap_fbdev_copyarea(struct fb_info *fbi,
- const struct fb_copyarea *area)
-{
- sys_copyarea(fbi, area);
- omap_fbdev_flush(fbi, area->dx, area->dy, area->width, area->height);
-}
-
-static void omap_fbdev_imageblit(struct fb_info *fbi,
- const struct fb_image *image)
-{
- sys_imageblit(fbi, image);
- omap_fbdev_flush(fbi, image->dx, image->dy,
- image->width, image->height);
-}
-
static void pan_worker(struct work_struct *work)
{
struct omap_fbdev *fbdev = container_of(work, struct omap_fbdev, work);
@@ -121,10 +87,10 @@ static struct fb_ops omap_fb_ops = {
* basic fbdev ops which write to the framebuffer
*/
.fb_read = fb_sys_read,
- .fb_write = omap_fbdev_write,
- .fb_fillrect = omap_fbdev_fillrect,
- .fb_copyarea = omap_fbdev_copyarea,
- .fb_imageblit = omap_fbdev_imageblit,
+ .fb_write = fb_sys_write,
+ .fb_fillrect = sys_fillrect,
+ .fb_copyarea = sys_copyarea,
+ .fb_imageblit = sys_imageblit,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
@@ -294,21 +260,6 @@ static struct drm_fb_helper *get_fb(struct fb_info *fbi)
return fbi->par;
}
-/* flush an area of the framebuffer (in case of manual update display that
- * is not automatically flushed)
- */
-static void omap_fbdev_flush(struct fb_info *fbi, int x, int y, int w, int h)
-{
- struct drm_fb_helper *helper = get_fb(fbi);
-
- if (!helper)
- return;
-
- VERB("flush fbdev: %d,%d %dx%d, fbi=%p", x, y, w, h, fbi);
-
- omap_framebuffer_flush(helper->fb, x, y, w, h);
-}
-
/* initialize fbdev helper */
struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev)
{
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index aeb91ed653c9..e9718b99a8a9 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -828,6 +828,7 @@ int omap_gem_put_paddr(struct drm_gem_object *obj)
dev_err(obj->dev->dev,
"could not release unmap: %d\n", ret);
}
+ omap_obj->paddr = 0;
omap_obj->block = NULL;
}
}
@@ -1272,13 +1273,16 @@ unlock:
void omap_gem_free_object(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
+ struct omap_drm_private *priv = dev->dev_private;
struct omap_gem_object *omap_obj = to_omap_bo(obj);
evict(obj);
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+ spin_lock(&priv->list_lock);
list_del(&omap_obj->mm_list);
+ spin_unlock(&priv->list_lock);
drm_gem_free_mmap_offset(obj);
@@ -1358,8 +1362,8 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
/* currently don't allow cached buffers.. there is some caching
* stuff that needs to be handled better
*/
- flags &= ~(OMAP_BO_CACHED|OMAP_BO_UNCACHED);
- flags |= OMAP_BO_WC;
+ flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED);
+ flags |= tiler_get_cpu_cache_flags();
/* align dimensions to slot boundaries... */
tiler_align(gem2fmt(flags),
@@ -1376,7 +1380,9 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
if (!omap_obj)
goto fail;
+ spin_lock(&priv->list_lock);
list_add(&omap_obj->mm_list, &priv->obj_list);
+ spin_unlock(&priv->list_lock);
obj = &omap_obj->base;
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index a2dbfb1737b4..b46dabd9faf7 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -156,16 +156,16 @@ static int omap_gem_dmabuf_mmap(struct dma_buf *buffer,
}
static struct dma_buf_ops omap_dmabuf_ops = {
- .map_dma_buf = omap_gem_map_dma_buf,
- .unmap_dma_buf = omap_gem_unmap_dma_buf,
- .release = omap_gem_dmabuf_release,
- .begin_cpu_access = omap_gem_dmabuf_begin_cpu_access,
- .end_cpu_access = omap_gem_dmabuf_end_cpu_access,
- .kmap_atomic = omap_gem_dmabuf_kmap_atomic,
- .kunmap_atomic = omap_gem_dmabuf_kunmap_atomic,
- .kmap = omap_gem_dmabuf_kmap,
- .kunmap = omap_gem_dmabuf_kunmap,
- .mmap = omap_gem_dmabuf_mmap,
+ .map_dma_buf = omap_gem_map_dma_buf,
+ .unmap_dma_buf = omap_gem_unmap_dma_buf,
+ .release = omap_gem_dmabuf_release,
+ .begin_cpu_access = omap_gem_dmabuf_begin_cpu_access,
+ .end_cpu_access = omap_gem_dmabuf_end_cpu_access,
+ .kmap_atomic = omap_gem_dmabuf_kmap_atomic,
+ .kunmap_atomic = omap_gem_dmabuf_kunmap_atomic,
+ .kmap = omap_gem_dmabuf_kmap,
+ .kunmap = omap_gem_dmabuf_kunmap,
+ .mmap = omap_gem_dmabuf_mmap,
};
struct dma_buf *omap_gem_prime_export(struct drm_device *dev,
diff --git a/drivers/gpu/drm/omapdrm/omap_irq.c b/drivers/gpu/drm/omapdrm/omap_irq.c
index f035d2bceae7..3eb097efc488 100644
--- a/drivers/gpu/drm/omapdrm/omap_irq.c
+++ b/drivers/gpu/drm/omapdrm/omap_irq.c
@@ -34,7 +34,7 @@ static void omap_irq_update(struct drm_device *dev)
struct omap_drm_irq *irq;
uint32_t irqmask = priv->vblank_mask;
- BUG_ON(!spin_is_locked(&list_lock));
+ assert_spin_locked(&list_lock);
list_for_each_entry(irq, &priv->irq_list, node)
irqmask |= irq->irqmask;
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index ee8e2b3a117e..1c6b63f39474 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -65,12 +65,16 @@ struct omap_plane {
struct callback apply_done_cb;
};
-static void unpin_worker(struct drm_flip_work *work, void *val)
+static void omap_plane_unpin_worker(struct drm_flip_work *work, void *val)
{
struct omap_plane *omap_plane =
container_of(work, struct omap_plane, unpin_work);
struct drm_device *dev = omap_plane->base.dev;
+ /*
+ * omap_framebuffer_pin/unpin are always called from priv->wq,
+ * so there's no need for locking here.
+ */
omap_framebuffer_unpin(val);
mutex_lock(&dev->mode_config.mutex);
drm_framebuffer_unreference(val);
@@ -78,7 +82,8 @@ static void unpin_worker(struct drm_flip_work *work, void *val)
}
/* update which fb (if any) is pinned for scanout */
-static int update_pin(struct drm_plane *plane, struct drm_framebuffer *fb)
+static int omap_plane_update_pin(struct drm_plane *plane,
+ struct drm_framebuffer *fb)
{
struct omap_plane *omap_plane = to_omap_plane(plane);
struct drm_framebuffer *pinned_fb = omap_plane->pinned_fb;
@@ -121,13 +126,12 @@ static void omap_plane_pre_apply(struct omap_drm_apply *apply)
struct drm_crtc *crtc = plane->crtc;
enum omap_channel channel;
bool enabled = omap_plane->enabled && crtc;
- bool ilace, replication;
int ret;
DBG("%s, enabled=%d", omap_plane->name, enabled);
/* if fb has changed, pin new fb: */
- update_pin(plane, enabled ? plane->fb : NULL);
+ omap_plane_update_pin(plane, enabled ? plane->fb : NULL);
if (!enabled) {
dispc_ovl_enable(omap_plane->id, false);
@@ -145,20 +149,17 @@ static void omap_plane_pre_apply(struct omap_drm_apply *apply)
DBG("%d,%d %pad %pad", info->pos_x, info->pos_y,
&info->paddr, &info->p_uv_addr);
- /* TODO: */
- ilace = false;
- replication = false;
+ dispc_ovl_set_channel_out(omap_plane->id, channel);
/* and finally, update omapdss: */
- ret = dispc_ovl_setup(omap_plane->id, info,
- replication, omap_crtc_timings(crtc), false);
+ ret = dispc_ovl_setup(omap_plane->id, info, false,
+ omap_crtc_timings(crtc), false);
if (ret) {
dev_err(dev->dev, "dispc_ovl_setup failed: %d\n", ret);
return;
}
dispc_ovl_enable(omap_plane->id, true);
- dispc_ovl_set_channel_out(omap_plane->id, channel);
}
static void omap_plane_post_apply(struct omap_drm_apply *apply)
@@ -167,7 +168,6 @@ static void omap_plane_post_apply(struct omap_drm_apply *apply)
container_of(apply, struct omap_plane, apply);
struct drm_plane *plane = &omap_plane->base;
struct omap_drm_private *priv = plane->dev->dev_private;
- struct omap_overlay_info *info = &omap_plane->info;
struct callback cb;
cb = omap_plane->apply_done_cb;
@@ -177,14 +177,9 @@ static void omap_plane_post_apply(struct omap_drm_apply *apply)
if (cb.fxn)
cb.fxn(cb.arg);
-
- if (omap_plane->enabled) {
- omap_framebuffer_flush(plane->fb, info->pos_x, info->pos_y,
- info->out_width, info->out_height);
- }
}
-static int apply(struct drm_plane *plane)
+static int omap_plane_apply(struct drm_plane *plane)
{
if (plane->crtc) {
struct omap_plane *omap_plane = to_omap_plane(plane);
@@ -194,12 +189,12 @@ static int apply(struct drm_plane *plane)
}
int omap_plane_mode_set(struct drm_plane *plane,
- struct drm_crtc *crtc, struct drm_framebuffer *fb,
- int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h,
- void (*fxn)(void *), void *arg)
+ struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ unsigned int src_x, unsigned int src_y,
+ unsigned int src_w, unsigned int src_h,
+ void (*fxn)(void *), void *arg)
{
struct omap_plane *omap_plane = to_omap_plane(plane);
struct omap_drm_window *win = &omap_plane->win;
@@ -209,11 +204,10 @@ int omap_plane_mode_set(struct drm_plane *plane,
win->crtc_w = crtc_w;
win->crtc_h = crtc_h;
- /* src values are in Q16 fixed point, convert to integer: */
- win->src_x = src_x >> 16;
- win->src_y = src_y >> 16;
- win->src_w = src_w >> 16;
- win->src_h = src_h >> 16;
+ win->src_x = src_x;
+ win->src_y = src_y;
+ win->src_w = src_w;
+ win->src_h = src_h;
if (fxn) {
/* omap_crtc should ensure that a new page flip
@@ -225,15 +219,7 @@ int omap_plane_mode_set(struct drm_plane *plane,
omap_plane->apply_done_cb.arg = arg;
}
- if (plane->fb)
- drm_framebuffer_unreference(plane->fb);
-
- drm_framebuffer_reference(fb);
-
- plane->fb = fb;
- plane->crtc = crtc;
-
- return apply(plane);
+ return omap_plane_apply(plane);
}
static int omap_plane_update(struct drm_plane *plane,
@@ -254,17 +240,29 @@ static int omap_plane_update(struct drm_plane *plane,
break;
}
+ /*
+ * We don't need to take a reference to the framebuffer as the DRM core
+ * has already done so for the purpose of setting plane->fb.
+ */
+ plane->fb = fb;
+ plane->crtc = crtc;
+
+ /* src values are in Q16 fixed point, convert to integer: */
return omap_plane_mode_set(plane, crtc, fb,
crtc_x, crtc_y, crtc_w, crtc_h,
- src_x, src_y, src_w, src_h,
+ src_x >> 16, src_y >> 16, src_w >> 16, src_h >> 16,
NULL, NULL);
}
static int omap_plane_disable(struct drm_plane *plane)
{
struct omap_plane *omap_plane = to_omap_plane(plane);
+
omap_plane->win.rotation = BIT(DRM_ROTATE_0);
- return omap_plane_dpms(plane, DRM_MODE_DPMS_OFF);
+ omap_plane->info.zorder = plane->type == DRM_PLANE_TYPE_PRIMARY
+ ? 0 : omap_plane->id;
+
+ return omap_plane_set_enable(plane, false);
}
static void omap_plane_destroy(struct drm_plane *plane)
@@ -275,7 +273,6 @@ static void omap_plane_destroy(struct drm_plane *plane)
omap_irq_unregister(plane->dev, &omap_plane->error_irq);
- omap_plane_disable(plane);
drm_plane_cleanup(plane);
drm_flip_work_cleanup(&omap_plane->unpin_work);
@@ -283,18 +280,15 @@ static void omap_plane_destroy(struct drm_plane *plane)
kfree(omap_plane);
}
-int omap_plane_dpms(struct drm_plane *plane, int mode)
+int omap_plane_set_enable(struct drm_plane *plane, bool enable)
{
struct omap_plane *omap_plane = to_omap_plane(plane);
- bool enabled = (mode == DRM_MODE_DPMS_ON);
- int ret = 0;
- if (enabled != omap_plane->enabled) {
- omap_plane->enabled = enabled;
- ret = apply(plane);
- }
+ if (enable == omap_plane->enabled)
+ return 0;
- return ret;
+ omap_plane->enabled = enable;
+ return omap_plane_apply(plane);
}
/* helper to install properties which are common to planes and crtcs */
@@ -342,61 +336,63 @@ int omap_plane_set_property(struct drm_plane *plane,
if (property == priv->rotation_prop) {
DBG("%s: rotation: %02x", omap_plane->name, (uint32_t)val);
omap_plane->win.rotation = val;
- ret = apply(plane);
+ ret = omap_plane_apply(plane);
} else if (property == priv->zorder_prop) {
DBG("%s: zorder: %02x", omap_plane->name, (uint32_t)val);
omap_plane->info.zorder = val;
- ret = apply(plane);
+ ret = omap_plane_apply(plane);
}
return ret;
}
static const struct drm_plane_funcs omap_plane_funcs = {
- .update_plane = omap_plane_update,
- .disable_plane = omap_plane_disable,
- .destroy = omap_plane_destroy,
- .set_property = omap_plane_set_property,
+ .update_plane = omap_plane_update,
+ .disable_plane = omap_plane_disable,
+ .destroy = omap_plane_destroy,
+ .set_property = omap_plane_set_property,
};
static void omap_plane_error_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
{
struct omap_plane *omap_plane =
container_of(irq, struct omap_plane, error_irq);
- DRM_ERROR("%s: errors: %08x\n", omap_plane->name, irqstatus);
+ DRM_ERROR_RATELIMITED("%s: errors: %08x\n", omap_plane->name,
+ irqstatus);
}
static const char *plane_names[] = {
- [OMAP_DSS_GFX] = "gfx",
- [OMAP_DSS_VIDEO1] = "vid1",
- [OMAP_DSS_VIDEO2] = "vid2",
- [OMAP_DSS_VIDEO3] = "vid3",
+ [OMAP_DSS_GFX] = "gfx",
+ [OMAP_DSS_VIDEO1] = "vid1",
+ [OMAP_DSS_VIDEO2] = "vid2",
+ [OMAP_DSS_VIDEO3] = "vid3",
};
static const uint32_t error_irqs[] = {
- [OMAP_DSS_GFX] = DISPC_IRQ_GFX_FIFO_UNDERFLOW,
- [OMAP_DSS_VIDEO1] = DISPC_IRQ_VID1_FIFO_UNDERFLOW,
- [OMAP_DSS_VIDEO2] = DISPC_IRQ_VID2_FIFO_UNDERFLOW,
- [OMAP_DSS_VIDEO3] = DISPC_IRQ_VID3_FIFO_UNDERFLOW,
+ [OMAP_DSS_GFX] = DISPC_IRQ_GFX_FIFO_UNDERFLOW,
+ [OMAP_DSS_VIDEO1] = DISPC_IRQ_VID1_FIFO_UNDERFLOW,
+ [OMAP_DSS_VIDEO2] = DISPC_IRQ_VID2_FIFO_UNDERFLOW,
+ [OMAP_DSS_VIDEO3] = DISPC_IRQ_VID3_FIFO_UNDERFLOW,
};
/* initialize plane */
struct drm_plane *omap_plane_init(struct drm_device *dev,
- int id, bool private_plane)
+ int id, enum drm_plane_type type)
{
struct omap_drm_private *priv = dev->dev_private;
- struct drm_plane *plane = NULL;
+ struct drm_plane *plane;
struct omap_plane *omap_plane;
struct omap_overlay_info *info;
+ int ret;
- DBG("%s: priv=%d", plane_names[id], private_plane);
+ DBG("%s: type=%d", plane_names[id], type);
omap_plane = kzalloc(sizeof(*omap_plane), GFP_KERNEL);
if (!omap_plane)
- return NULL;
+ return ERR_PTR(-ENOMEM);
drm_flip_work_init(&omap_plane->unpin_work,
- "unpin", unpin_worker);
+ "unpin", omap_plane_unpin_worker);
omap_plane->nformats = omap_framebuffer_get_formats(
omap_plane->formats, ARRAY_SIZE(omap_plane->formats),
@@ -413,8 +409,11 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
omap_plane->error_irq.irq = omap_plane_error_irq;
omap_irq_register(dev, &omap_plane->error_irq);
- drm_plane_init(dev, plane, (1 << priv->num_crtcs) - 1, &omap_plane_funcs,
- omap_plane->formats, omap_plane->nformats, private_plane);
+ ret = drm_universal_plane_init(dev, plane, (1 << priv->num_crtcs) - 1,
+ &omap_plane_funcs, omap_plane->formats,
+ omap_plane->nformats, type);
+ if (ret < 0)
+ goto error;
omap_plane_install_properties(plane, &plane->base);
@@ -432,10 +431,15 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
* TODO add ioctl to give userspace an API to change this.. this
* will come in a subsequent patch.
*/
- if (private_plane)
+ if (type == DRM_PLANE_TYPE_PRIMARY)
omap_plane->info.zorder = 0;
else
omap_plane->info.zorder = id;
return plane;
+
+error:
+ omap_irq_unregister(plane->dev, &omap_plane->error_irq);
+ kfree(omap_plane);
+ return NULL;
}
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index d84583776d50..6d64c7bb908b 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -11,6 +11,7 @@ config DRM_PANEL_SIMPLE
tristate "support for simple panels"
depends on OF
depends on BACKLIGHT_CLASS_DEVICE
+ select VIDEOMODE_HELPERS
help
DRM panel driver for dumb panels that need at most a regulator and
a GPIO to be powered up. Optionally a backlight can be attached so
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 39806c335339..30904a9b2a4c 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -33,9 +33,14 @@
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_panel.h>
+#include <video/display_timing.h>
+#include <video/videomode.h>
+
struct panel_desc {
const struct drm_display_mode *modes;
unsigned int num_modes;
+ const struct display_timing *timings;
+ unsigned int num_timings;
unsigned int bpc;
@@ -94,6 +99,25 @@ static int panel_simple_get_fixed_modes(struct panel_simple *panel)
if (!panel->desc)
return 0;
+ for (i = 0; i < panel->desc->num_timings; i++) {
+ const struct display_timing *dt = &panel->desc->timings[i];
+ struct videomode vm;
+
+ videomode_from_timing(dt, &vm);
+ mode = drm_mode_create(drm);
+ if (!mode) {
+ dev_err(drm->dev, "failed to add mode %ux%u\n",
+ dt->hactive.typ, dt->vactive.typ);
+ continue;
+ }
+
+ drm_display_mode_from_videomode(&vm, mode);
+ drm_mode_set_name(mode);
+
+ drm_mode_probed_add(connector, mode);
+ num++;
+ }
+
for (i = 0; i < panel->desc->num_modes; i++) {
const struct drm_display_mode *m = &panel->desc->modes[i];
@@ -226,12 +250,30 @@ static int panel_simple_get_modes(struct drm_panel *panel)
return num;
}
+static int panel_simple_get_timings(struct drm_panel *panel,
+ unsigned int num_timings,
+ struct display_timing *timings)
+{
+ struct panel_simple *p = to_panel_simple(panel);
+ unsigned int i;
+
+ if (p->desc->num_timings < num_timings)
+ num_timings = p->desc->num_timings;
+
+ if (timings)
+ for (i = 0; i < num_timings; i++)
+ timings[i] = p->desc->timings[i];
+
+ return p->desc->num_timings;
+}
+
static const struct drm_panel_funcs panel_simple_funcs = {
.disable = panel_simple_disable,
.unprepare = panel_simple_unprepare,
.prepare = panel_simple_prepare,
.enable = panel_simple_enable,
.get_modes = panel_simple_get_modes,
+ .get_timings = panel_simple_get_timings,
};
static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
@@ -327,6 +369,31 @@ static void panel_simple_shutdown(struct device *dev)
panel_simple_disable(&panel->base);
}
+static const struct drm_display_mode ampire_am800480r3tmqwa1h_mode = {
+ .clock = 33333,
+ .hdisplay = 800,
+ .hsync_start = 800 + 0,
+ .hsync_end = 800 + 0 + 255,
+ .htotal = 800 + 0 + 255 + 0,
+ .vdisplay = 480,
+ .vsync_start = 480 + 2,
+ .vsync_end = 480 + 2 + 45,
+ .vtotal = 480 + 2 + 45 + 0,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+};
+
+static const struct panel_desc ampire_am800480r3tmqwa1h = {
+ .modes = &ampire_am800480r3tmqwa1h_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 152,
+ .height = 91,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+};
+
static const struct drm_display_mode auo_b101aw03_mode = {
.clock = 51450,
.hdisplay = 1024,
@@ -350,6 +417,29 @@ static const struct panel_desc auo_b101aw03 = {
},
};
+static const struct drm_display_mode auo_b101ean01_mode = {
+ .clock = 72500,
+ .hdisplay = 1280,
+ .hsync_start = 1280 + 119,
+ .hsync_end = 1280 + 119 + 32,
+ .htotal = 1280 + 119 + 32 + 21,
+ .vdisplay = 800,
+ .vsync_start = 800 + 4,
+ .vsync_end = 800 + 4 + 20,
+ .vtotal = 800 + 4 + 20 + 8,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc auo_b101ean01 = {
+ .modes = &auo_b101ean01_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 217,
+ .height = 136,
+ },
+};
+
static const struct drm_display_mode auo_b101xtn01_mode = {
.clock = 72000,
.hdisplay = 1366,
@@ -615,24 +705,25 @@ static const struct panel_desc giantplus_gpg482739qs5 = {
.width = 95,
.height = 54,
},
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
};
-static const struct drm_display_mode hannstar_hsd070pww1_mode = {
- .clock = 71100,
- .hdisplay = 1280,
- .hsync_start = 1280 + 1,
- .hsync_end = 1280 + 1 + 158,
- .htotal = 1280 + 1 + 158 + 1,
- .vdisplay = 800,
- .vsync_start = 800 + 1,
- .vsync_end = 800 + 1 + 21,
- .vtotal = 800 + 1 + 21 + 1,
- .vrefresh = 60,
+static const struct display_timing hannstar_hsd070pww1_timing = {
+ .pixelclock = { 64300000, 71100000, 82000000 },
+ .hactive = { 1280, 1280, 1280 },
+ .hfront_porch = { 1, 1, 10 },
+ .hback_porch = { 1, 1, 10 },
+ .hsync_len = { 52, 158, 661 },
+ .vactive = { 800, 800, 800 },
+ .vfront_porch = { 1, 1, 10 },
+ .vback_porch = { 1, 1, 10 },
+ .vsync_len = { 1, 21, 203 },
+ .flags = DISPLAY_FLAGS_DE_HIGH,
};
static const struct panel_desc hannstar_hsd070pww1 = {
- .modes = &hannstar_hsd070pww1_mode,
- .num_modes = 1,
+ .timings = &hannstar_hsd070pww1_timing,
+ .num_timings = 1,
.bpc = 6,
.size = {
.width = 151,
@@ -663,6 +754,31 @@ static const struct panel_desc hitachi_tx23d38vm0caa = {
},
};
+static const struct drm_display_mode innolux_at043tn24_mode = {
+ .clock = 9000,
+ .hdisplay = 480,
+ .hsync_start = 480 + 2,
+ .hsync_end = 480 + 2 + 41,
+ .htotal = 480 + 2 + 41 + 2,
+ .vdisplay = 272,
+ .vsync_start = 272 + 2,
+ .vsync_end = 272 + 2 + 11,
+ .vtotal = 272 + 2 + 11 + 2,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+};
+
+static const struct panel_desc innolux_at043tn24 = {
+ .modes = &innolux_at043tn24_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 95,
+ .height = 54,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+};
+
static const struct drm_display_mode innolux_g121i1_l01_mode = {
.clock = 71000,
.hdisplay = 1280,
@@ -733,6 +849,29 @@ static const struct panel_desc innolux_n156bge_l21 = {
},
};
+static const struct drm_display_mode innolux_zj070na_01p_mode = {
+ .clock = 51501,
+ .hdisplay = 1024,
+ .hsync_start = 1024 + 128,
+ .hsync_end = 1024 + 128 + 64,
+ .htotal = 1024 + 128 + 64 + 128,
+ .vdisplay = 600,
+ .vsync_start = 600 + 16,
+ .vsync_end = 600 + 16 + 4,
+ .vtotal = 600 + 16 + 4 + 16,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc innolux_zj070na_01p = {
+ .modes = &innolux_zj070na_01p_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 1024,
+ .height = 600,
+ },
+};
+
static const struct drm_display_mode lg_lp129qe_mode = {
.clock = 285250,
.hdisplay = 2560,
@@ -756,6 +895,30 @@ static const struct panel_desc lg_lp129qe = {
},
};
+static const struct drm_display_mode ortustech_com43h4m85ulc_mode = {
+ .clock = 25000,
+ .hdisplay = 480,
+ .hsync_start = 480 + 10,
+ .hsync_end = 480 + 10 + 10,
+ .htotal = 480 + 10 + 10 + 15,
+ .vdisplay = 800,
+ .vsync_start = 800 + 3,
+ .vsync_end = 800 + 3 + 3,
+ .vtotal = 800 + 3 + 3 + 3,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc ortustech_com43h4m85ulc = {
+ .modes = &ortustech_com43h4m85ulc_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 56,
+ .height = 93,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+};
+
static const struct drm_display_mode samsung_ltn101nt05_mode = {
.clock = 54030,
.hdisplay = 1024,
@@ -779,11 +942,63 @@ static const struct panel_desc samsung_ltn101nt05 = {
},
};
+static const struct drm_display_mode samsung_ltn140at29_301_mode = {
+ .clock = 76300,
+ .hdisplay = 1366,
+ .hsync_start = 1366 + 64,
+ .hsync_end = 1366 + 64 + 48,
+ .htotal = 1366 + 64 + 48 + 128,
+ .vdisplay = 768,
+ .vsync_start = 768 + 2,
+ .vsync_end = 768 + 2 + 5,
+ .vtotal = 768 + 2 + 5 + 17,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc samsung_ltn140at29_301 = {
+ .modes = &samsung_ltn140at29_301_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 320,
+ .height = 187,
+ },
+};
+
+static const struct drm_display_mode shelly_sca07010_bfn_lnn_mode = {
+ .clock = 33300,
+ .hdisplay = 800,
+ .hsync_start = 800 + 1,
+ .hsync_end = 800 + 1 + 64,
+ .htotal = 800 + 1 + 64 + 64,
+ .vdisplay = 480,
+ .vsync_start = 480 + 1,
+ .vsync_end = 480 + 1 + 23,
+ .vtotal = 480 + 1 + 23 + 22,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc shelly_sca07010_bfn_lnn = {
+ .modes = &shelly_sca07010_bfn_lnn_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 152,
+ .height = 91,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+};
+
static const struct of_device_id platform_of_match[] = {
{
+ .compatible = "ampire,am800480r3tmqwa1h",
+ .data = &ampire_am800480r3tmqwa1h,
+ }, {
.compatible = "auo,b101aw03",
.data = &auo_b101aw03,
}, {
+ .compatible = "auo,b101ean01",
+ .data = &auo_b101ean01,
+ }, {
.compatible = "auo,b101xtn01",
.data = &auo_b101xtn01,
}, {
@@ -826,6 +1041,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "hit,tx23d38vm0caa",
.data = &hitachi_tx23d38vm0caa
}, {
+ .compatible = "innolux,at043tn24",
+ .data = &innolux_at043tn24,
+ }, {
.compatible ="innolux,g121i1-l01",
.data = &innolux_g121i1_l01
}, {
@@ -835,12 +1053,24 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "innolux,n156bge-l21",
.data = &innolux_n156bge_l21,
}, {
+ .compatible = "innolux,zj070na-01p",
+ .data = &innolux_zj070na_01p,
+ }, {
.compatible = "lg,lp129qe",
.data = &lg_lp129qe,
}, {
+ .compatible = "ortustech,com43h4m85ulc",
+ .data = &ortustech_com43h4m85ulc,
+ }, {
.compatible = "samsung,ltn101nt05",
.data = &samsung_ltn101nt05,
}, {
+ .compatible = "samsung,ltn140at29-301",
+ .data = &samsung_ltn140at29_301,
+ }, {
+ .compatible = "shelly,sca07010-bfn-lnn",
+ .data = &shelly_sca07010_bfn_lnn,
+ }, {
/* sentinel */
}
};
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
index 4870df898230..0089d837a8e3 100644
--- a/drivers/gpu/drm/radeon/cikd.h
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -2131,6 +2131,7 @@
#define VCE_UENC_REG_CLOCK_GATING 0x207c0
#define VCE_SYS_INT_EN 0x21300
# define VCE_SYS_INT_TRAP_INTERRUPT_EN (1 << 3)
+#define VCE_LMI_VCPU_CACHE_40BIT_BAR 0x2145c
#define VCE_LMI_CTRL2 0x21474
#define VCE_LMI_CTRL 0x21498
#define VCE_LMI_VM_CTRL 0x214a0
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 35ab65d53cc1..73a6432da1a5 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -1567,6 +1567,7 @@ struct radeon_dpm {
int new_active_crtc_count;
u32 current_active_crtcs;
int current_active_crtc_count;
+ bool single_display;
struct radeon_dpm_dynamic_state dyn_state;
struct radeon_dpm_fan fan;
u32 tdp_limit;
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 63ccb8fa799c..d27e4ccb848c 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -76,7 +76,7 @@ static bool igp_read_bios_from_vram(struct radeon_device *rdev)
static bool radeon_read_bios(struct radeon_device *rdev)
{
- uint8_t __iomem *bios;
+ uint8_t __iomem *bios, val1, val2;
size_t size;
rdev->bios = NULL;
@@ -86,15 +86,19 @@ static bool radeon_read_bios(struct radeon_device *rdev)
return false;
}
- if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) {
+ val1 = readb(&bios[0]);
+ val2 = readb(&bios[1]);
+
+ if (size == 0 || val1 != 0x55 || val2 != 0xaa) {
pci_unmap_rom(rdev->pdev, bios);
return false;
}
- rdev->bios = kmemdup(bios, size, GFP_KERNEL);
+ rdev->bios = kzalloc(size, GFP_KERNEL);
if (rdev->bios == NULL) {
pci_unmap_rom(rdev->pdev, bios);
return false;
}
+ memcpy_fromio(rdev->bios, bios, size);
pci_unmap_rom(rdev->pdev, bios);
return true;
}
diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c
index 4cdcaf8361e1..3db23007cdf4 100644
--- a/drivers/gpu/drm/radeon/radeon_kfd.c
+++ b/drivers/gpu/drm/radeon/radeon_kfd.c
@@ -153,7 +153,7 @@ void radeon_kfd_device_init(struct radeon_device *rdev)
.compute_vmid_bitmap = 0xFF00,
.first_compute_pipe = 1,
- .compute_pipe_count = 8 - 1,
+ .compute_pipe_count = 4 - 1,
};
radeon_doorbell_get_kfd_info(rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c
index a69bd441dd2d..572b4dbec186 100644
--- a/drivers/gpu/drm/radeon/radeon_mn.c
+++ b/drivers/gpu/drm/radeon/radeon_mn.c
@@ -122,7 +122,6 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
it = interval_tree_iter_first(&rmn->objects, start, end);
while (it) {
struct radeon_bo *bo;
- struct fence *fence;
int r;
bo = container_of(it, struct radeon_bo, mn_it);
@@ -134,12 +133,10 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
continue;
}
- fence = reservation_object_get_excl(bo->tbo.resv);
- if (fence) {
- r = radeon_fence_wait((struct radeon_fence *)fence, false);
- if (r)
- DRM_ERROR("(%d) failed to wait for user bo\n", r);
- }
+ r = reservation_object_wait_timeout_rcu(bo->tbo.resv, true,
+ false, MAX_SCHEDULE_TIMEOUT);
+ if (r)
+ DRM_ERROR("(%d) failed to wait for user bo\n", r);
radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU);
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 43e09942823e..318165d4855c 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -173,17 +173,6 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
else
rbo->placements[i].lpfn = 0;
}
-
- /*
- * Use two-ended allocation depending on the buffer size to
- * improve fragmentation quality.
- * 512kb was measured as the most optimal number.
- */
- if (rbo->tbo.mem.size > 512 * 1024) {
- for (i = 0; i < c; i++) {
- rbo->placements[i].flags |= TTM_PL_FLAG_TOPDOWN;
- }
- }
}
int radeon_bo_create(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 33cf4108386d..c1ba83a8dd8c 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -837,12 +837,8 @@ static void radeon_dpm_thermal_work_handler(struct work_struct *work)
radeon_pm_compute_clocks(rdev);
}
-static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
- enum radeon_pm_state_type dpm_state)
+static bool radeon_dpm_single_display(struct radeon_device *rdev)
{
- int i;
- struct radeon_ps *ps;
- u32 ui_class;
bool single_display = (rdev->pm.dpm.new_active_crtc_count < 2) ?
true : false;
@@ -858,6 +854,17 @@ static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
if (single_display && (r600_dpm_get_vrefresh(rdev) >= 120))
single_display = false;
+ return single_display;
+}
+
+static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
+ enum radeon_pm_state_type dpm_state)
+{
+ int i;
+ struct radeon_ps *ps;
+ u32 ui_class;
+ bool single_display = radeon_dpm_single_display(rdev);
+
/* certain older asics have a separare 3D performance state,
* so try that first if the user selected performance
*/
@@ -983,6 +990,7 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
struct radeon_ps *ps;
enum radeon_pm_state_type dpm_state;
int ret;
+ bool single_display = radeon_dpm_single_display(rdev);
/* if dpm init failed */
if (!rdev->pm.dpm_enabled)
@@ -1007,6 +1015,9 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
/* vce just modifies an existing state so force a change */
if (ps->vce_active != rdev->pm.dpm.vce_active)
goto force;
+ /* user has made a display change (such as timing) */
+ if (rdev->pm.dpm.single_display != single_display)
+ goto force;
if ((rdev->family < CHIP_BARTS) || (rdev->flags & RADEON_IS_IGP)) {
/* for pre-BTC and APUs if the num crtcs changed but state is the same,
* all we need to do is update the display configuration.
@@ -1069,6 +1080,7 @@ force:
rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
+ rdev->pm.dpm.single_display = single_display;
/* wait for the rings to drain */
for (i = 0; i < RADEON_NUM_RINGS; i++) {
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 2456f69efd23..8c7872339c2a 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -495,7 +495,7 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
seq_printf(m, "%u dwords in ring\n", count);
- if (!ring->ready)
+ if (!ring->ring)
return 0;
/* print 8 dw before current rptr as often it's the last executed
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index d02aa1d0f588..b292aca0f342 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -598,6 +598,10 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
enum dma_data_direction direction = write ?
DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+ /* double check that we don't free the table twice */
+ if (!ttm->sg->sgl)
+ return;
+
/* free the sg table and pages again */
dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
diff --git a/drivers/gpu/drm/radeon/vce_v2_0.c b/drivers/gpu/drm/radeon/vce_v2_0.c
index 1ac7bb825a1b..fbbe78fbd087 100644
--- a/drivers/gpu/drm/radeon/vce_v2_0.c
+++ b/drivers/gpu/drm/radeon/vce_v2_0.c
@@ -156,6 +156,9 @@ int vce_v2_0_resume(struct radeon_device *rdev)
WREG32(VCE_LMI_SWAP_CNTL1, 0);
WREG32(VCE_LMI_VM_CTRL, 0);
+ WREG32(VCE_LMI_VCPU_CACHE_40BIT_BAR, addr >> 8);
+
+ addr &= 0xff;
size = RADEON_GPU_PAGE_ALIGN(rdev->vce_fw->size);
WREG32(VCE_VCPU_CACHE_OFFSET0, addr & 0x7fffffff);
WREG32(VCE_VCPU_CACHE_SIZE0, size);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index fb052bca574f..93117f159a3b 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -509,7 +509,7 @@ static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu,
enum rcar_du_encoder_type enc_type = RCAR_DU_ENCODER_NONE;
struct device_node *connector = NULL;
struct device_node *encoder = NULL;
- struct device_node *prev = NULL;
+ struct device_node *ep_node = NULL;
struct device_node *entity_ep_node;
struct device_node *entity;
int ret;
@@ -527,16 +527,7 @@ static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu,
entity_ep_node = of_parse_phandle(ep->local_node, "remote-endpoint", 0);
- while (1) {
- struct device_node *ep_node;
-
- ep_node = of_graph_get_next_endpoint(entity, prev);
- of_node_put(prev);
- prev = ep_node;
-
- if (!ep_node)
- break;
-
+ for_each_endpoint_of_node(entity, ep_node) {
if (ep_node == entity_ep_node)
continue;
@@ -603,27 +594,19 @@ static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu,
static int rcar_du_encoders_init(struct rcar_du_device *rcdu)
{
struct device_node *np = rcdu->dev->of_node;
- struct device_node *prev = NULL;
+ struct device_node *ep_node;
unsigned int num_encoders = 0;
/*
* Iterate over the endpoints and create one encoder for each output
* pipeline.
*/
- while (1) {
- struct device_node *ep_node;
+ for_each_endpoint_of_node(np, ep_node) {
enum rcar_du_output output;
struct of_endpoint ep;
unsigned int i;
int ret;
- ep_node = of_graph_get_next_endpoint(np, prev);
- of_node_put(prev);
- prev = ep_node;
-
- if (ep_node == NULL)
- break;
-
ret = of_graph_parse_endpoint(ep_node, &ep);
if (ret < 0) {
of_node_put(ep_node);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 21a481b224eb..3962176ee713 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -129,6 +129,7 @@ static int rockchip_drm_load(struct drm_device *drm_dev, unsigned long flags)
struct rockchip_drm_private *private;
struct dma_iommu_mapping *mapping;
struct device *dev = drm_dev->dev;
+ struct drm_connector *connector;
int ret;
private = devm_kzalloc(drm_dev->dev, sizeof(*private), GFP_KERNEL);
@@ -171,6 +172,23 @@ static int rockchip_drm_load(struct drm_device *drm_dev, unsigned long flags)
if (ret)
goto err_detach_device;
+ /*
+ * All components are now added, we can publish the connector sysfs
+ * entries to userspace. This will generate hotplug events and so
+ * userspace will expect to be able to access DRM at this point.
+ */
+ list_for_each_entry(connector, &drm_dev->mode_config.connector_list,
+ head) {
+ ret = drm_connector_register(connector);
+ if (ret) {
+ dev_err(drm_dev->dev,
+ "[CONNECTOR:%d:%s] drm_connector_register failed: %d\n",
+ connector->base.id,
+ connector->name, ret);
+ goto err_unbind;
+ }
+ }
+
/* init kms poll for handling hpd */
drm_kms_helper_poll_init(drm_dev);
@@ -200,6 +218,7 @@ err_vblank_cleanup:
drm_vblank_cleanup(drm_dev);
err_kms_helper_poll_fini:
drm_kms_helper_poll_fini(drm_dev);
+err_unbind:
component_unbind_all(dev, drm_dev);
err_detach_device:
arm_iommu_detach_device(dev);
@@ -366,7 +385,7 @@ static const struct dev_pm_ops rockchip_drm_pm_ops = {
int rockchip_drm_encoder_get_mux_id(struct device_node *node,
struct drm_encoder *encoder)
{
- struct device_node *ep = NULL;
+ struct device_node *ep;
struct drm_crtc *crtc = encoder->crtc;
struct of_endpoint endpoint;
struct device_node *port;
@@ -375,18 +394,15 @@ int rockchip_drm_encoder_get_mux_id(struct device_node *node,
if (!node || !crtc)
return -EINVAL;
- do {
- ep = of_graph_get_next_endpoint(node, ep);
- if (!ep)
- break;
-
+ for_each_endpoint_of_node(node, ep) {
port = of_graph_get_remote_port(ep);
of_node_put(port);
if (port == crtc->port) {
ret = of_graph_parse_endpoint(ep, &endpoint);
+ of_node_put(ep);
return ret ?: endpoint.id;
}
- } while (ep);
+ }
return -EINVAL;
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
index d5c1248916b2..5b0dc0f6fd94 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
@@ -119,6 +119,9 @@ static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper,
DRM_DEBUG_KMS("FB [%dx%d]-%d kvaddr=%p offset=%ld size=%d\n",
fb->width, fb->height, fb->depth, rk_obj->kvaddr,
offset, size);
+
+ fbi->skip_vt_switch = true;
+
return 0;
err_drm_framebuffer_unref:
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index d041921b3bb9..ccb0ce073ef2 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -421,6 +421,12 @@ static void vop_enable(struct drm_crtc *crtc)
if (vop->is_enabled)
return;
+ ret = pm_runtime_get_sync(vop->dev);
+ if (ret < 0) {
+ dev_err(vop->dev, "failed to get pm runtime: %d\n", ret);
+ return;
+ }
+
ret = clk_enable(vop->hclk);
if (ret < 0) {
dev_err(vop->dev, "failed to enable hclk - %d\n", ret);
@@ -517,6 +523,7 @@ static void vop_disable(struct drm_crtc *crtc)
clk_disable(vop->dclk);
clk_disable(vop->aclk);
clk_disable(vop->hclk);
+ pm_runtime_put(vop->dev);
}
/*
@@ -893,7 +900,7 @@ static int vop_crtc_mode_set(struct drm_crtc *crtc,
u16 vsync_len = adjusted_mode->vsync_end - adjusted_mode->vsync_start;
u16 vact_st = adjusted_mode->vtotal - adjusted_mode->vsync_start;
u16 vact_end = vact_st + vdisplay;
- int ret;
+ int ret, ret_clk;
uint32_t val;
/*
@@ -915,7 +922,8 @@ static int vop_crtc_mode_set(struct drm_crtc *crtc,
default:
DRM_ERROR("unsupport connector_type[%d]\n",
vop->connector_type);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
};
VOP_CTRL_SET(vop, out_mode, vop->connector_out_mode);
@@ -938,7 +946,7 @@ static int vop_crtc_mode_set(struct drm_crtc *crtc,
ret = vop_crtc_mode_set_base(crtc, x, y, fb);
if (ret)
- return ret;
+ goto out;
/*
* reset dclk, take all mode config affect, so the clk would run in
@@ -949,13 +957,14 @@ static int vop_crtc_mode_set(struct drm_crtc *crtc,
reset_control_deassert(vop->dclk_rst);
clk_set_rate(vop->dclk, adjusted_mode->clock * 1000);
- ret = clk_enable(vop->dclk);
- if (ret < 0) {
- dev_err(vop->dev, "failed to enable dclk - %d\n", ret);
- return ret;
+out:
+ ret_clk = clk_enable(vop->dclk);
+ if (ret_clk < 0) {
+ dev_err(vop->dev, "failed to enable dclk - %d\n", ret_clk);
+ return ret_clk;
}
- return 0;
+ return ret;
}
static void vop_crtc_commit(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index b7f781573b15..a287e4fec865 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -425,8 +425,8 @@ static void tegra_plane_reset(struct drm_plane *plane)
{
struct tegra_plane_state *state;
- if (plane->state && plane->state->fb)
- drm_framebuffer_unreference(plane->state->fb);
+ if (plane->state)
+ __drm_atomic_helper_plane_destroy_state(plane, plane->state);
kfree(plane->state);
plane->state = NULL;
@@ -443,12 +443,14 @@ static struct drm_plane_state *tegra_plane_atomic_duplicate_state(struct drm_pla
struct tegra_plane_state *state = to_tegra_plane_state(plane->state);
struct tegra_plane_state *copy;
- copy = kmemdup(state, sizeof(*state), GFP_KERNEL);
+ copy = kmalloc(sizeof(*copy), GFP_KERNEL);
if (!copy)
return NULL;
- if (copy->base.fb)
- drm_framebuffer_reference(copy->base.fb);
+ __drm_atomic_helper_plane_duplicate_state(plane, &copy->base);
+ copy->tiling = state->tiling;
+ copy->format = state->format;
+ copy->swap = state->swap;
return &copy->base;
}
@@ -456,9 +458,7 @@ static struct drm_plane_state *tegra_plane_atomic_duplicate_state(struct drm_pla
static void tegra_plane_atomic_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
- if (state->fb)
- drm_framebuffer_unreference(state->fb);
-
+ __drm_atomic_helper_plane_destroy_state(plane, state);
kfree(state);
}
@@ -908,6 +908,15 @@ static int tegra_dc_add_planes(struct drm_device *drm, struct tegra_dc *dc)
return 0;
}
+u32 tegra_dc_get_vblank_counter(struct tegra_dc *dc)
+{
+ if (dc->syncpt)
+ return host1x_syncpt_read(dc->syncpt);
+
+ /* fallback to software emulated VBLANK counter */
+ return drm_crtc_vblank_count(&dc->base);
+}
+
void tegra_dc_enable_vblank(struct tegra_dc *dc)
{
unsigned long value, flags;
@@ -995,6 +1004,9 @@ static void tegra_crtc_reset(struct drm_crtc *crtc)
{
struct tegra_dc_state *state;
+ if (crtc->state)
+ __drm_atomic_helper_crtc_destroy_state(crtc, crtc->state);
+
kfree(crtc->state);
crtc->state = NULL;
@@ -1011,14 +1023,15 @@ tegra_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
struct tegra_dc_state *state = to_dc_state(crtc->state);
struct tegra_dc_state *copy;
- copy = kmemdup(state, sizeof(*state), GFP_KERNEL);
+ copy = kmalloc(sizeof(*copy), GFP_KERNEL);
if (!copy)
return NULL;
- copy->base.mode_changed = false;
- copy->base.active_changed = false;
- copy->base.planes_changed = false;
- copy->base.event = NULL;
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &copy->base);
+ copy->clk = state->clk;
+ copy->pclk = state->pclk;
+ copy->div = state->div;
+ copy->planes = state->planes;
return &copy->base;
}
@@ -1026,6 +1039,7 @@ tegra_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
static void tegra_crtc_atomic_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
+ __drm_atomic_helper_crtc_destroy_state(crtc, state);
kfree(state);
}
@@ -1152,26 +1166,18 @@ static int tegra_dc_set_timings(struct tegra_dc *dc,
return 0;
}
-int tegra_dc_setup_clock(struct tegra_dc *dc, struct clk *parent,
- unsigned long pclk, unsigned int div)
-{
- u32 value;
- int err;
-
- err = clk_set_parent(dc->clk, parent);
- if (err < 0) {
- dev_err(dc->dev, "failed to set parent clock: %d\n", err);
- return err;
- }
-
- DRM_DEBUG_KMS("rate: %lu, div: %u\n", clk_get_rate(dc->clk), div);
-
- value = SHIFT_CLK_DIVIDER(div) | PIXEL_CLK_DIVIDER_PCD1;
- tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL);
-
- return 0;
-}
-
+/**
+ * tegra_dc_state_setup_clock - check clock settings and store them in atomic
+ * state
+ * @dc: display controller
+ * @crtc_state: CRTC atomic state
+ * @clk: parent clock for display controller
+ * @pclk: pixel clock
+ * @div: shift clock divider
+ *
+ * Returns:
+ * 0 on success or a negative error-code on failure.
+ */
int tegra_dc_state_setup_clock(struct tegra_dc *dc,
struct drm_crtc_state *crtc_state,
struct clk *clk, unsigned long pclk,
@@ -1179,6 +1185,9 @@ int tegra_dc_state_setup_clock(struct tegra_dc *dc,
{
struct tegra_dc_state *state = to_dc_state(crtc_state);
+ if (!clk_has_parent(dc->clk, clk))
+ return -EINVAL;
+
state->clk = clk;
state->pclk = pclk;
state->div = div;
@@ -1294,9 +1303,7 @@ static void tegra_crtc_atomic_flush(struct drm_crtc *crtc)
static const struct drm_crtc_helper_funcs tegra_crtc_helper_funcs = {
.disable = tegra_crtc_disable,
.mode_fixup = tegra_crtc_mode_fixup,
- .mode_set = drm_helper_crtc_mode_set,
.mode_set_nofb = tegra_crtc_mode_set_nofb,
- .mode_set_base = drm_helper_crtc_mode_set_base,
.prepare = tegra_crtc_prepare,
.commit = tegra_crtc_commit,
.atomic_check = tegra_crtc_atomic_check,
@@ -1631,7 +1638,6 @@ static int tegra_dc_init(struct host1x_client *client)
struct tegra_drm *tegra = drm->dev_private;
struct drm_plane *primary = NULL;
struct drm_plane *cursor = NULL;
- unsigned int syncpt;
u32 value;
int err;
@@ -1700,13 +1706,15 @@ static int tegra_dc_init(struct host1x_client *client)
}
/* initialize display controller */
- if (dc->pipe)
- syncpt = SYNCPT_VBLANK1;
- else
- syncpt = SYNCPT_VBLANK0;
+ if (dc->syncpt) {
+ u32 syncpt = host1x_syncpt_id(dc->syncpt);
- tegra_dc_writel(dc, 0x00000100, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
- tegra_dc_writel(dc, 0x100 | syncpt, DC_CMD_CONT_SYNCPT_VSYNC);
+ value = SYNCPT_CNTRL_NO_STALL;
+ tegra_dc_writel(dc, value, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
+
+ value = SYNCPT_VSYNC_ENABLE | syncpt;
+ tegra_dc_writel(dc, value, DC_CMD_CONT_SYNCPT_VSYNC);
+ }
value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | WIN_A_OF_INT;
tegra_dc_writel(dc, value, DC_CMD_INT_TYPE);
@@ -1874,6 +1882,7 @@ static int tegra_dc_parse_dt(struct tegra_dc *dc)
static int tegra_dc_probe(struct platform_device *pdev)
{
+ unsigned long flags = HOST1X_SYNCPT_CLIENT_MANAGED;
const struct of_device_id *id;
struct resource *regs;
struct tegra_dc *dc;
@@ -1965,6 +1974,10 @@ static int tegra_dc_probe(struct platform_device *pdev)
return err;
}
+ dc->syncpt = host1x_syncpt_request(&pdev->dev, flags);
+ if (!dc->syncpt)
+ dev_warn(&pdev->dev, "failed to allocate syncpoint\n");
+
platform_set_drvdata(pdev, dc);
return 0;
@@ -1975,6 +1988,8 @@ static int tegra_dc_remove(struct platform_device *pdev)
struct tegra_dc *dc = platform_get_drvdata(pdev);
int err;
+ host1x_syncpt_free(dc->syncpt);
+
err = host1x_client_unregister(&dc->client);
if (err < 0) {
dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h
index 705c93b00794..55792daabbb5 100644
--- a/drivers/gpu/drm/tegra/dc.h
+++ b/drivers/gpu/drm/tegra/dc.h
@@ -12,6 +12,8 @@
#define DC_CMD_GENERAL_INCR_SYNCPT 0x000
#define DC_CMD_GENERAL_INCR_SYNCPT_CNTRL 0x001
+#define SYNCPT_CNTRL_NO_STALL (1 << 8)
+#define SYNCPT_CNTRL_SOFT_RESET (1 << 0)
#define DC_CMD_GENERAL_INCR_SYNCPT_ERROR 0x002
#define DC_CMD_WIN_A_INCR_SYNCPT 0x008
#define DC_CMD_WIN_A_INCR_SYNCPT_CNTRL 0x009
@@ -23,6 +25,7 @@
#define DC_CMD_WIN_C_INCR_SYNCPT_CNTRL 0x019
#define DC_CMD_WIN_C_INCR_SYNCPT_ERROR 0x01a
#define DC_CMD_CONT_SYNCPT_VSYNC 0x028
+#define SYNCPT_VSYNC_ENABLE (1 << 8)
#define DC_CMD_DISPLAY_COMMAND_OPTION0 0x031
#define DC_CMD_DISPLAY_COMMAND 0x032
#define DISP_CTRL_MODE_STOP (0 << 5)
@@ -438,8 +441,4 @@
#define DC_WINBUF_BD_UFLOW_STATUS 0xdca
#define DC_WINBUF_CD_UFLOW_STATUS 0xfca
-/* synchronization points */
-#define SYNCPT_VBLANK0 26
-#define SYNCPT_VBLANK1 27
-
#endif /* TEGRA_DC_H */
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 5f1880766110..1833abd7d3aa 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -172,6 +172,10 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
*/
drm->irq_enabled = true;
+ /* syncpoints are used for full 32-bit hardware VBLANK counters */
+ drm->vblank_disable_immediate = true;
+ drm->max_vblank_count = 0xffffffff;
+
err = drm_vblank_init(drm, drm->mode_config.num_crtc);
if (err < 0)
goto device;
@@ -813,12 +817,12 @@ static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm,
static u32 tegra_drm_get_vblank_counter(struct drm_device *drm, int pipe)
{
struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
+ struct tegra_dc *dc = to_tegra_dc(crtc);
if (!crtc)
return 0;
- /* TODO: implement real hardware counter using syncpoints */
- return drm_crtc_vblank_count(crtc);
+ return tegra_dc_get_vblank_counter(dc);
}
static int tegra_drm_enable_vblank(struct drm_device *drm, int pipe)
@@ -879,8 +883,18 @@ static int tegra_debugfs_framebuffers(struct seq_file *s, void *data)
return 0;
}
+static int tegra_debugfs_iova(struct seq_file *s, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *)s->private;
+ struct drm_device *drm = node->minor->dev;
+ struct tegra_drm *tegra = drm->dev_private;
+
+ return drm_mm_dump_table(s, &tegra->mm);
+}
+
static struct drm_info_list tegra_debugfs_list[] = {
{ "framebuffers", tegra_debugfs_framebuffers, 0 },
+ { "iova", tegra_debugfs_iova, 0 },
};
static int tegra_debugfs_init(struct drm_minor *minor)
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 8cb2dfeaa957..659b2fcc986d 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -106,6 +106,7 @@ struct tegra_output;
struct tegra_dc {
struct host1x_client client;
+ struct host1x_syncpt *syncpt;
struct device *dev;
spinlock_t lock;
@@ -180,12 +181,11 @@ struct tegra_dc_window {
};
/* from dc.c */
+u32 tegra_dc_get_vblank_counter(struct tegra_dc *dc);
void tegra_dc_enable_vblank(struct tegra_dc *dc);
void tegra_dc_disable_vblank(struct tegra_dc *dc);
void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file);
void tegra_dc_commit(struct tegra_dc *dc);
-int tegra_dc_setup_clock(struct tegra_dc *dc, struct clk *parent,
- unsigned long pclk, unsigned int div);
int tegra_dc_state_setup_clock(struct tegra_dc *dc,
struct drm_crtc_state *crtc_state,
struct clk *clk, unsigned long pclk,
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index 7eaaee74a039..06ab1783bba1 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -952,7 +952,7 @@ static void tegra_hdmi_encoder_mode_set(struct drm_encoder *encoder,
}
tegra_hdmi_writel(hdmi,
- SOR_SEQ_CTL_PU_PC(0) |
+ SOR_SEQ_PU_PC(0) |
SOR_SEQ_PU_PC_ALT(0) |
SOR_SEQ_PD_PC(8) |
SOR_SEQ_PD_PC_ALT(8),
@@ -1394,8 +1394,8 @@ static int tegra_hdmi_exit(struct host1x_client *client)
tegra_output_exit(&hdmi->output);
- clk_disable_unprepare(hdmi->clk);
reset_control_assert(hdmi->rst);
+ clk_disable_unprepare(hdmi->clk);
regulator_disable(hdmi->vdd);
regulator_disable(hdmi->pll);
diff --git a/drivers/gpu/drm/tegra/hdmi.h b/drivers/gpu/drm/tegra/hdmi.h
index 919a19df4e1b..a882514389cd 100644
--- a/drivers/gpu/drm/tegra/hdmi.h
+++ b/drivers/gpu/drm/tegra/hdmi.h
@@ -201,7 +201,7 @@
#define HDMI_NV_PDISP_SOR_CRCB 0x5d
#define HDMI_NV_PDISP_SOR_BLANK 0x5e
#define HDMI_NV_PDISP_SOR_SEQ_CTL 0x5f
-#define SOR_SEQ_CTL_PU_PC(x) (((x) & 0xf) << 0)
+#define SOR_SEQ_PU_PC(x) (((x) & 0xf) << 0)
#define SOR_SEQ_PU_PC_ALT(x) (((x) & 0xf) << 4)
#define SOR_SEQ_PD_PC(x) (((x) & 0xf) << 8)
#define SOR_SEQ_PD_PC_ALT(x) (((x) & 0xf) << 12)
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 2afe478ded3b..7591d8901f9a 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -41,6 +41,8 @@ struct tegra_sor {
struct mutex lock;
bool enabled;
+ struct drm_info_list *debugfs_files;
+ struct drm_minor *minor;
struct dentry *debugfs;
};
@@ -68,13 +70,12 @@ static inline struct tegra_sor *to_sor(struct tegra_output *output)
return container_of(output, struct tegra_sor, output);
}
-static inline unsigned long tegra_sor_readl(struct tegra_sor *sor,
- unsigned long offset)
+static inline u32 tegra_sor_readl(struct tegra_sor *sor, unsigned long offset)
{
return readl(sor->regs + (offset << 2));
}
-static inline void tegra_sor_writel(struct tegra_sor *sor, unsigned long value,
+static inline void tegra_sor_writel(struct tegra_sor *sor, u32 value,
unsigned long offset)
{
writel(value, sor->regs + (offset << 2));
@@ -83,9 +84,9 @@ static inline void tegra_sor_writel(struct tegra_sor *sor, unsigned long value,
static int tegra_sor_dp_train_fast(struct tegra_sor *sor,
struct drm_dp_link *link)
{
- unsigned long value;
unsigned int i;
u8 pattern;
+ u32 value;
int err;
/* setup lane parameters */
@@ -202,7 +203,7 @@ static void tegra_sor_update(struct tegra_sor *sor)
static int tegra_sor_setup_pwm(struct tegra_sor *sor, unsigned long timeout)
{
- unsigned long value;
+ u32 value;
value = tegra_sor_readl(sor, SOR_PWM_DIV);
value &= ~SOR_PWM_DIV_MASK;
@@ -281,7 +282,7 @@ static int tegra_sor_wakeup(struct tegra_sor *sor)
static int tegra_sor_power_up(struct tegra_sor *sor, unsigned long timeout)
{
- unsigned long value;
+ u32 value;
value = tegra_sor_readl(sor, SOR_PWR);
value |= SOR_PWR_TRIGGER | SOR_PWR_NORMAL_STATE_PU;
@@ -674,38 +675,195 @@ static const struct file_operations tegra_sor_crc_fops = {
.release = tegra_sor_crc_release,
};
+static int tegra_sor_show_regs(struct seq_file *s, void *data)
+{
+ struct drm_info_node *node = s->private;
+ struct tegra_sor *sor = node->info_ent->data;
+
+#define DUMP_REG(name) \
+ seq_printf(s, "%-38s %#05x %08x\n", #name, name, \
+ tegra_sor_readl(sor, name))
+
+ DUMP_REG(SOR_CTXSW);
+ DUMP_REG(SOR_SUPER_STATE_0);
+ DUMP_REG(SOR_SUPER_STATE_1);
+ DUMP_REG(SOR_STATE_0);
+ DUMP_REG(SOR_STATE_1);
+ DUMP_REG(SOR_HEAD_STATE_0(0));
+ DUMP_REG(SOR_HEAD_STATE_0(1));
+ DUMP_REG(SOR_HEAD_STATE_1(0));
+ DUMP_REG(SOR_HEAD_STATE_1(1));
+ DUMP_REG(SOR_HEAD_STATE_2(0));
+ DUMP_REG(SOR_HEAD_STATE_2(1));
+ DUMP_REG(SOR_HEAD_STATE_3(0));
+ DUMP_REG(SOR_HEAD_STATE_3(1));
+ DUMP_REG(SOR_HEAD_STATE_4(0));
+ DUMP_REG(SOR_HEAD_STATE_4(1));
+ DUMP_REG(SOR_HEAD_STATE_5(0));
+ DUMP_REG(SOR_HEAD_STATE_5(1));
+ DUMP_REG(SOR_CRC_CNTRL);
+ DUMP_REG(SOR_DP_DEBUG_MVID);
+ DUMP_REG(SOR_CLK_CNTRL);
+ DUMP_REG(SOR_CAP);
+ DUMP_REG(SOR_PWR);
+ DUMP_REG(SOR_TEST);
+ DUMP_REG(SOR_PLL_0);
+ DUMP_REG(SOR_PLL_1);
+ DUMP_REG(SOR_PLL_2);
+ DUMP_REG(SOR_PLL_3);
+ DUMP_REG(SOR_CSTM);
+ DUMP_REG(SOR_LVDS);
+ DUMP_REG(SOR_CRC_A);
+ DUMP_REG(SOR_CRC_B);
+ DUMP_REG(SOR_BLANK);
+ DUMP_REG(SOR_SEQ_CTL);
+ DUMP_REG(SOR_LANE_SEQ_CTL);
+ DUMP_REG(SOR_SEQ_INST(0));
+ DUMP_REG(SOR_SEQ_INST(1));
+ DUMP_REG(SOR_SEQ_INST(2));
+ DUMP_REG(SOR_SEQ_INST(3));
+ DUMP_REG(SOR_SEQ_INST(4));
+ DUMP_REG(SOR_SEQ_INST(5));
+ DUMP_REG(SOR_SEQ_INST(6));
+ DUMP_REG(SOR_SEQ_INST(7));
+ DUMP_REG(SOR_SEQ_INST(8));
+ DUMP_REG(SOR_SEQ_INST(9));
+ DUMP_REG(SOR_SEQ_INST(10));
+ DUMP_REG(SOR_SEQ_INST(11));
+ DUMP_REG(SOR_SEQ_INST(12));
+ DUMP_REG(SOR_SEQ_INST(13));
+ DUMP_REG(SOR_SEQ_INST(14));
+ DUMP_REG(SOR_SEQ_INST(15));
+ DUMP_REG(SOR_PWM_DIV);
+ DUMP_REG(SOR_PWM_CTL);
+ DUMP_REG(SOR_VCRC_A_0);
+ DUMP_REG(SOR_VCRC_A_1);
+ DUMP_REG(SOR_VCRC_B_0);
+ DUMP_REG(SOR_VCRC_B_1);
+ DUMP_REG(SOR_CCRC_A_0);
+ DUMP_REG(SOR_CCRC_A_1);
+ DUMP_REG(SOR_CCRC_B_0);
+ DUMP_REG(SOR_CCRC_B_1);
+ DUMP_REG(SOR_EDATA_A_0);
+ DUMP_REG(SOR_EDATA_A_1);
+ DUMP_REG(SOR_EDATA_B_0);
+ DUMP_REG(SOR_EDATA_B_1);
+ DUMP_REG(SOR_COUNT_A_0);
+ DUMP_REG(SOR_COUNT_A_1);
+ DUMP_REG(SOR_COUNT_B_0);
+ DUMP_REG(SOR_COUNT_B_1);
+ DUMP_REG(SOR_DEBUG_A_0);
+ DUMP_REG(SOR_DEBUG_A_1);
+ DUMP_REG(SOR_DEBUG_B_0);
+ DUMP_REG(SOR_DEBUG_B_1);
+ DUMP_REG(SOR_TRIG);
+ DUMP_REG(SOR_MSCHECK);
+ DUMP_REG(SOR_XBAR_CTRL);
+ DUMP_REG(SOR_XBAR_POL);
+ DUMP_REG(SOR_DP_LINKCTL_0);
+ DUMP_REG(SOR_DP_LINKCTL_1);
+ DUMP_REG(SOR_LANE_DRIVE_CURRENT_0);
+ DUMP_REG(SOR_LANE_DRIVE_CURRENT_1);
+ DUMP_REG(SOR_LANE4_DRIVE_CURRENT_0);
+ DUMP_REG(SOR_LANE4_DRIVE_CURRENT_1);
+ DUMP_REG(SOR_LANE_PREEMPHASIS_0);
+ DUMP_REG(SOR_LANE_PREEMPHASIS_1);
+ DUMP_REG(SOR_LANE4_PREEMPHASIS_0);
+ DUMP_REG(SOR_LANE4_PREEMPHASIS_1);
+ DUMP_REG(SOR_LANE_POST_CURSOR_0);
+ DUMP_REG(SOR_LANE_POST_CURSOR_1);
+ DUMP_REG(SOR_DP_CONFIG_0);
+ DUMP_REG(SOR_DP_CONFIG_1);
+ DUMP_REG(SOR_DP_MN_0);
+ DUMP_REG(SOR_DP_MN_1);
+ DUMP_REG(SOR_DP_PADCTL_0);
+ DUMP_REG(SOR_DP_PADCTL_1);
+ DUMP_REG(SOR_DP_DEBUG_0);
+ DUMP_REG(SOR_DP_DEBUG_1);
+ DUMP_REG(SOR_DP_SPARE_0);
+ DUMP_REG(SOR_DP_SPARE_1);
+ DUMP_REG(SOR_DP_AUDIO_CTRL);
+ DUMP_REG(SOR_DP_AUDIO_HBLANK_SYMBOLS);
+ DUMP_REG(SOR_DP_AUDIO_VBLANK_SYMBOLS);
+ DUMP_REG(SOR_DP_GENERIC_INFOFRAME_HEADER);
+ DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_0);
+ DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_1);
+ DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_2);
+ DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_3);
+ DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_4);
+ DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_5);
+ DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_6);
+ DUMP_REG(SOR_DP_TPG);
+ DUMP_REG(SOR_DP_TPG_CONFIG);
+ DUMP_REG(SOR_DP_LQ_CSTM_0);
+ DUMP_REG(SOR_DP_LQ_CSTM_1);
+ DUMP_REG(SOR_DP_LQ_CSTM_2);
+
+#undef DUMP_REG
+
+ return 0;
+}
+
+static const struct drm_info_list debugfs_files[] = {
+ { "regs", tegra_sor_show_regs, 0, NULL },
+};
+
static int tegra_sor_debugfs_init(struct tegra_sor *sor,
struct drm_minor *minor)
{
struct dentry *entry;
+ unsigned int i;
int err = 0;
sor->debugfs = debugfs_create_dir("sor", minor->debugfs_root);
if (!sor->debugfs)
return -ENOMEM;
+ sor->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files),
+ GFP_KERNEL);
+ if (!sor->debugfs_files) {
+ err = -ENOMEM;
+ goto remove;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
+ sor->debugfs_files[i].data = sor;
+
+ err = drm_debugfs_create_files(sor->debugfs_files,
+ ARRAY_SIZE(debugfs_files),
+ sor->debugfs, minor);
+ if (err < 0)
+ goto free;
+
entry = debugfs_create_file("crc", 0644, sor->debugfs, sor,
&tegra_sor_crc_fops);
if (!entry) {
- dev_err(sor->dev,
- "cannot create /sys/kernel/debug/dri/%s/sor/crc\n",
- minor->debugfs_root->d_name.name);
err = -ENOMEM;
- goto remove;
+ goto free;
}
return err;
+free:
+ kfree(sor->debugfs_files);
+ sor->debugfs_files = NULL;
remove:
- debugfs_remove(sor->debugfs);
+ debugfs_remove_recursive(sor->debugfs);
sor->debugfs = NULL;
return err;
}
static void tegra_sor_debugfs_exit(struct tegra_sor *sor)
{
- debugfs_remove_recursive(sor->debugfs);
+ drm_debugfs_remove_files(sor->debugfs_files, ARRAY_SIZE(debugfs_files),
+ sor->minor);
+ sor->minor = NULL;
+
+ kfree(sor->debugfs_files);
sor->debugfs = NULL;
+
+ debugfs_remove_recursive(sor->debugfs);
+ sor->debugfs_files = NULL;
}
static void tegra_sor_connector_dpms(struct drm_connector *connector, int mode)
@@ -791,8 +949,8 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
struct tegra_sor_config config;
struct drm_dp_link link;
struct drm_dp_aux *aux;
- unsigned long value;
int err = 0;
+ u32 value;
mutex_lock(&sor->lock);
@@ -1354,12 +1512,30 @@ static int tegra_sor_init(struct host1x_client *client)
}
}
+ /*
+ * XXX: Remove this reset once proper hand-over from firmware to
+ * kernel is possible.
+ */
+ err = reset_control_assert(sor->rst);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to assert SOR reset: %d\n", err);
+ return err;
+ }
+
err = clk_prepare_enable(sor->clk);
if (err < 0) {
dev_err(sor->dev, "failed to enable clock: %d\n", err);
return err;
}
+ usleep_range(1000, 3000);
+
+ err = reset_control_deassert(sor->rst);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to deassert SOR reset: %d\n", err);
+ return err;
+ }
+
err = clk_prepare_enable(sor->clk_safe);
if (err < 0)
return err;
diff --git a/drivers/gpu/drm/vgem/Makefile b/drivers/gpu/drm/vgem/Makefile
new file mode 100644
index 000000000000..1055cb79096c
--- /dev/null
+++ b/drivers/gpu/drm/vgem/Makefile
@@ -0,0 +1,4 @@
+ccflags-y := -Iinclude/drm
+vgem-y := vgem_drv.o vgem_dma_buf.o
+
+obj-$(CONFIG_DRM_VGEM) += vgem.o
diff --git a/drivers/gpu/drm/vgem/vgem_dma_buf.c b/drivers/gpu/drm/vgem/vgem_dma_buf.c
new file mode 100644
index 000000000000..0254438ad1a6
--- /dev/null
+++ b/drivers/gpu/drm/vgem/vgem_dma_buf.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright © 2012 Intel Corporation
+ * Copyright © 2014 The Chromium OS Authors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Ben Widawsky <ben@bwidawsk.net>
+ *
+ */
+
+#include <linux/dma-buf.h>
+#include "vgem_drv.h"
+
+struct sg_table *vgem_gem_prime_get_sg_table(struct drm_gem_object *gobj)
+{
+ struct drm_vgem_gem_object *obj = to_vgem_bo(gobj);
+ BUG_ON(obj->pages == NULL);
+
+ return drm_prime_pages_to_sg(obj->pages, obj->base.size / PAGE_SIZE);
+}
+
+int vgem_gem_prime_pin(struct drm_gem_object *gobj)
+{
+ struct drm_vgem_gem_object *obj = to_vgem_bo(gobj);
+ return vgem_gem_get_pages(obj);
+}
+
+void vgem_gem_prime_unpin(struct drm_gem_object *gobj)
+{
+ struct drm_vgem_gem_object *obj = to_vgem_bo(gobj);
+ vgem_gem_put_pages(obj);
+}
+
+void *vgem_gem_prime_vmap(struct drm_gem_object *gobj)
+{
+ struct drm_vgem_gem_object *obj = to_vgem_bo(gobj);
+ BUG_ON(obj->pages == NULL);
+
+ return vmap(obj->pages, obj->base.size / PAGE_SIZE, 0, PAGE_KERNEL);
+}
+
+void vgem_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+ vunmap(vaddr);
+}
+
+struct drm_gem_object *vgem_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf)
+{
+ struct drm_vgem_gem_object *obj = NULL;
+ int ret;
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (obj == NULL) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ ret = drm_gem_object_init(dev, &obj->base, dma_buf->size);
+ if (ret) {
+ ret = -ENOMEM;
+ goto fail_free;
+ }
+
+ get_dma_buf(dma_buf);
+
+ obj->base.dma_buf = dma_buf;
+ obj->use_dma_buf = true;
+
+ return &obj->base;
+
+fail_free:
+ kfree(obj);
+fail:
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
new file mode 100644
index 000000000000..cb3b43525b2d
--- /dev/null
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -0,0 +1,364 @@
+/*
+ * Copyright 2011 Red Hat, Inc.
+ * Copyright © 2014 The Chromium OS Authors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software")
+ * to deal in the software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * them Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTIBILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Adam Jackson <ajax@redhat.com>
+ * Ben Widawsky <ben@bwidawsk.net>
+ */
+
+/**
+ * This is vgem, a (non-hardware-backed) GEM service. This is used by Mesa's
+ * software renderer and the X server for efficient buffer sharing.
+ */
+
+#include <linux/module.h>
+#include <linux/ramfs.h>
+#include <linux/shmem_fs.h>
+#include <linux/dma-buf.h>
+#include "vgem_drv.h"
+
+#define DRIVER_NAME "vgem"
+#define DRIVER_DESC "Virtual GEM provider"
+#define DRIVER_DATE "20120112"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+
+void vgem_gem_put_pages(struct drm_vgem_gem_object *obj)
+{
+ drm_gem_put_pages(&obj->base, obj->pages, false, false);
+ obj->pages = NULL;
+}
+
+static void vgem_gem_free_object(struct drm_gem_object *obj)
+{
+ struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj);
+
+ drm_gem_free_mmap_offset(obj);
+
+ if (vgem_obj->use_dma_buf && obj->dma_buf) {
+ dma_buf_put(obj->dma_buf);
+ obj->dma_buf = NULL;
+ }
+
+ drm_gem_object_release(obj);
+
+ if (vgem_obj->pages)
+ vgem_gem_put_pages(vgem_obj);
+
+ vgem_obj->pages = NULL;
+
+ kfree(vgem_obj);
+}
+
+int vgem_gem_get_pages(struct drm_vgem_gem_object *obj)
+{
+ struct page **pages;
+
+ if (obj->pages || obj->use_dma_buf)
+ return 0;
+
+ pages = drm_gem_get_pages(&obj->base);
+ if (IS_ERR(pages)) {
+ return PTR_ERR(pages);
+ }
+
+ obj->pages = pages;
+
+ return 0;
+}
+
+static int vgem_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct drm_vgem_gem_object *obj = vma->vm_private_data;
+ struct drm_device *dev = obj->base.dev;
+ loff_t num_pages;
+ pgoff_t page_offset;
+ int ret;
+
+ /* We don't use vmf->pgoff since that has the fake offset */
+ page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
+ PAGE_SHIFT;
+
+ num_pages = DIV_ROUND_UP(obj->base.size, PAGE_SIZE);
+
+ if (page_offset > num_pages)
+ return VM_FAULT_SIGBUS;
+
+ mutex_lock(&dev->struct_mutex);
+
+ ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address,
+ obj->pages[page_offset]);
+
+ mutex_unlock(&dev->struct_mutex);
+ switch (ret) {
+ case 0:
+ return VM_FAULT_NOPAGE;
+ case -ENOMEM:
+ return VM_FAULT_OOM;
+ case -EBUSY:
+ return VM_FAULT_RETRY;
+ case -EFAULT:
+ case -EINVAL:
+ return VM_FAULT_SIGBUS;
+ default:
+ WARN_ON(1);
+ return VM_FAULT_SIGBUS;
+ }
+}
+
+static struct vm_operations_struct vgem_gem_vm_ops = {
+ .fault = vgem_gem_fault,
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+/* ioctls */
+
+static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
+ struct drm_file *file,
+ unsigned int *handle,
+ unsigned long size)
+{
+ struct drm_vgem_gem_object *obj;
+ struct drm_gem_object *gem_object;
+ int err;
+
+ size = roundup(size, PAGE_SIZE);
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+
+ gem_object = &obj->base;
+
+ err = drm_gem_object_init(dev, gem_object, size);
+ if (err)
+ goto out;
+
+ err = drm_gem_handle_create(file, gem_object, handle);
+ if (err)
+ goto handle_out;
+
+ drm_gem_object_unreference_unlocked(gem_object);
+
+ return gem_object;
+
+handle_out:
+ drm_gem_object_release(gem_object);
+out:
+ kfree(obj);
+ return ERR_PTR(err);
+}
+
+static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ struct drm_gem_object *gem_object;
+ uint64_t size;
+ uint64_t pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
+
+ size = args->height * pitch;
+ if (size == 0)
+ return -EINVAL;
+
+ gem_object = vgem_gem_create(dev, file, &args->handle, size);
+
+ if (IS_ERR(gem_object)) {
+ DRM_DEBUG_DRIVER("object creation failed\n");
+ return PTR_ERR(gem_object);
+ }
+
+ args->size = gem_object->size;
+ args->pitch = pitch;
+
+ DRM_DEBUG_DRIVER("Created object of size %lld\n", size);
+
+ return 0;
+}
+
+int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset)
+{
+ int ret = 0;
+ struct drm_gem_object *obj;
+
+ mutex_lock(&dev->struct_mutex);
+ obj = drm_gem_object_lookup(dev, file, handle);
+ if (!obj) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+
+ if (!drm_vma_node_has_offset(&obj->vma_node)) {
+ ret = drm_gem_create_mmap_offset(obj);
+ if (ret)
+ goto unref;
+ }
+
+ BUG_ON(!obj->filp);
+
+ obj->filp->private_data = obj;
+
+ ret = vgem_gem_get_pages(to_vgem_bo(obj));
+ if (ret)
+ goto fail_get_pages;
+
+ *offset = drm_vma_node_offset_addr(&obj->vma_node);
+
+ goto unref;
+
+fail_get_pages:
+ drm_gem_free_mmap_offset(obj);
+unref:
+ drm_gem_object_unreference(obj);
+unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+int vgem_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->minor->dev;
+ struct drm_vma_offset_node *node;
+ struct drm_gem_object *obj;
+ struct drm_vgem_gem_object *vgem_obj;
+ int ret = 0;
+
+ mutex_lock(&dev->struct_mutex);
+
+ node = drm_vma_offset_exact_lookup(dev->vma_offset_manager,
+ vma->vm_pgoff,
+ vma_pages(vma));
+ if (!node) {
+ ret = -EINVAL;
+ goto out_unlock;
+ } else if (!drm_vma_node_is_allowed(node, filp)) {
+ ret = -EACCES;
+ goto out_unlock;
+ }
+
+ obj = container_of(node, struct drm_gem_object, vma_node);
+
+ vgem_obj = to_vgem_bo(obj);
+
+ if (obj->dma_buf && vgem_obj->use_dma_buf) {
+ ret = dma_buf_mmap(obj->dma_buf, vma, 0);
+ goto out_unlock;
+ }
+
+ if (!obj->dev->driver->gem_vm_ops) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_ops = obj->dev->driver->gem_vm_ops;
+ vma->vm_private_data = vgem_obj;
+ vma->vm_page_prot =
+ pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+
+ mutex_unlock(&dev->struct_mutex);
+ drm_gem_vm_open(vma);
+ return ret;
+
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+
+static struct drm_ioctl_desc vgem_ioctls[] = {
+};
+
+static const struct file_operations vgem_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .mmap = vgem_drm_gem_mmap,
+ .poll = drm_poll,
+ .read = drm_read,
+ .unlocked_ioctl = drm_ioctl,
+ .release = drm_release,
+};
+
+static struct drm_driver vgem_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_PRIME,
+ .gem_free_object = vgem_gem_free_object,
+ .gem_vm_ops = &vgem_gem_vm_ops,
+ .ioctls = vgem_ioctls,
+ .fops = &vgem_driver_fops,
+ .dumb_create = vgem_gem_dumb_create,
+ .dumb_map_offset = vgem_gem_dumb_map,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_import = vgem_gem_prime_import,
+ .gem_prime_pin = vgem_gem_prime_pin,
+ .gem_prime_unpin = vgem_gem_prime_unpin,
+ .gem_prime_get_sg_table = vgem_gem_prime_get_sg_table,
+ .gem_prime_vmap = vgem_gem_prime_vmap,
+ .gem_prime_vunmap = vgem_gem_prime_vunmap,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+};
+
+struct drm_device *vgem_device;
+
+static int __init vgem_init(void)
+{
+ int ret;
+
+ vgem_device = drm_dev_alloc(&vgem_driver, NULL);
+ if (!vgem_device) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = drm_dev_register(vgem_device, 0);
+
+ if (ret)
+ goto out_unref;
+
+ return 0;
+
+out_unref:
+ drm_dev_unref(vgem_device);
+out:
+ return ret;
+}
+
+static void __exit vgem_exit(void)
+{
+ drm_dev_unregister(vgem_device);
+ drm_dev_unref(vgem_device);
+}
+
+module_init(vgem_init);
+module_exit(vgem_exit);
+
+MODULE_AUTHOR("Red Hat, Inc.");
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/vgem/vgem_drv.h b/drivers/gpu/drm/vgem/vgem_drv.h
new file mode 100644
index 000000000000..57ab4d8f41f9
--- /dev/null
+++ b/drivers/gpu/drm/vgem/vgem_drv.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright © 2012 Intel Corporation
+ * Copyright © 2014 The Chromium OS Authors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Ben Widawsky <ben@bwidawsk.net>
+ *
+ */
+
+#ifndef _VGEM_DRV_H_
+#define _VGEM_DRV_H_
+
+#include <drm/drmP.h>
+#include <drm/drm_gem.h>
+
+#define to_vgem_bo(x) container_of(x, struct drm_vgem_gem_object, base)
+struct drm_vgem_gem_object {
+ struct drm_gem_object base;
+ struct page **pages;
+ bool use_dma_buf;
+};
+
+/* vgem_drv.c */
+extern void vgem_gem_put_pages(struct drm_vgem_gem_object *obj);
+extern int vgem_gem_get_pages(struct drm_vgem_gem_object *obj);
+
+/* vgem_dma_buf.c */
+extern struct sg_table *vgem_gem_prime_get_sg_table(
+ struct drm_gem_object *gobj);
+extern int vgem_gem_prime_pin(struct drm_gem_object *gobj);
+extern void vgem_gem_prime_unpin(struct drm_gem_object *gobj);
+extern void *vgem_gem_prime_vmap(struct drm_gem_object *gobj);
+extern void vgem_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+extern struct drm_gem_object *vgem_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf);
+
+
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index e13b9cbc304e..620bb5cf617c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -134,7 +134,7 @@
*/
#define VMW_IOCTL_DEF(ioctl, func, flags) \
- [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_##ioctl, flags, func, DRM_IOCTL_##ioctl}
+ [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_IOCTL_##ioctl, flags, func}
/**
* Ioctl definitions.
@@ -1044,7 +1044,7 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
const struct drm_ioctl_desc *ioctl =
&vmw_ioctls[nr - DRM_COMMAND_BASE];
- if (unlikely(ioctl->cmd_drv != cmd)) {
+ if (unlikely(ioctl->cmd != cmd)) {
DRM_ERROR("Invalid command format, ioctl %d\n",
nr - DRM_COMMAND_BASE);
return -EINVAL;
diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c
index b10550ee1d89..6b7fdc1e2ed0 100644
--- a/drivers/gpu/host1x/syncpt.c
+++ b/drivers/gpu/host1x/syncpt.c
@@ -425,6 +425,12 @@ u32 host1x_syncpt_read_min(struct host1x_syncpt *sp)
}
EXPORT_SYMBOL(host1x_syncpt_read_min);
+u32 host1x_syncpt_read(struct host1x_syncpt *sp)
+{
+ return host1x_syncpt_load(sp);
+}
+EXPORT_SYMBOL(host1x_syncpt_read);
+
int host1x_syncpt_nb_pts(struct host1x *host)
{
return host->info->nb_pts;
diff --git a/drivers/gpu/ipu-v3/ipu-di.c b/drivers/gpu/ipu-v3/ipu-di.c
index 3ddfb3d0b64d..2970c6bb668c 100644
--- a/drivers/gpu/ipu-v3/ipu-di.c
+++ b/drivers/gpu/ipu-v3/ipu-di.c
@@ -441,8 +441,7 @@ static void ipu_di_config_clock(struct ipu_di *di,
in_rate = clk_get_rate(clk);
div = DIV_ROUND_CLOSEST(in_rate, sig->mode.pixelclock);
- if (div == 0)
- div = 1;
+ div = clamp(div, 1U, 255U);
clkgen0 = div << 4;
}
@@ -459,8 +458,7 @@ static void ipu_di_config_clock(struct ipu_di *di,
clkrate = clk_get_rate(di->clk_ipu);
div = DIV_ROUND_CLOSEST(clkrate, sig->mode.pixelclock);
- if (div == 0)
- div = 1;
+ div = clamp(div, 1U, 255U);
rate = clkrate / div;
error = rate / (sig->mode.pixelclock / 1000);
@@ -483,8 +481,7 @@ static void ipu_di_config_clock(struct ipu_di *di,
in_rate = clk_get_rate(clk);
div = DIV_ROUND_CLOSEST(in_rate, sig->mode.pixelclock);
- if (div == 0)
- div = 1;
+ div = clamp(div, 1U, 255U);
clkgen0 = div << 4;
}
diff --git a/drivers/gpu/ipu-v3/ipu-ic.c b/drivers/gpu/ipu-v3/ipu-ic.c
index ad75588e1629..1dcb96ccda66 100644
--- a/drivers/gpu/ipu-v3/ipu-ic.c
+++ b/drivers/gpu/ipu-v3/ipu-ic.c
@@ -297,8 +297,8 @@ static int calc_resize_coeffs(struct ipu_ic *ic,
return -EINVAL;
}
- /* Cannot downsize more than 8:1 */
- if ((out_size << 3) < in_size) {
+ /* Cannot downsize more than 4:1 */
+ if ((out_size << 2) < in_size) {
dev_err(ipu->dev, "Unsupported downsize\n");
return -EINVAL;
}