From 667ce33e57d0de4074a8fb62d24daeefd03f6333 Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Wed, 28 Sep 2016 19:58:32 -0400 Subject: drm/msm: support multiple address spaces We can have various combinations of 64b and 32b address space, ie. 64b CPU but 32b display and gpu, or 64b CPU and GPU but 32b display. So best to decouple the device iova's from mmap offset. Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/msm_gpu.c | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm/msm/msm_gpu.c') diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 3249707e6834..895abfa51ec7 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -656,12 +656,17 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, */ iommu = iommu_domain_alloc(&platform_bus_type); if (iommu) { + /* TODO 32b vs 64b address space.. */ + iommu->geometry.aperture_start = 0x1000; + iommu->geometry.aperture_end = 0xffffffff; + dev_info(drm->dev, "%s: using IOMMU\n", name); - gpu->mmu = msm_iommu_new(&pdev->dev, iommu); - if (IS_ERR(gpu->mmu)) { - ret = PTR_ERR(gpu->mmu); + gpu->aspace = msm_gem_address_space_create(&pdev->dev, + iommu, "gpu"); + if (IS_ERR(gpu->aspace)) { + ret = PTR_ERR(gpu->aspace); dev_err(drm->dev, "failed to init iommu: %d\n", ret); - gpu->mmu = NULL; + gpu->aspace = NULL; iommu_domain_free(iommu); goto fail; } @@ -669,7 +674,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, } else { dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name); } - gpu->id = msm_register_mmu(drm, gpu->mmu); + gpu->id = msm_register_address_space(drm, gpu->aspace); /* Create ringbuffer: */ @@ -705,8 +710,8 @@ void msm_gpu_cleanup(struct msm_gpu *gpu) msm_ringbuffer_destroy(gpu->rb); } - if (gpu->mmu) - gpu->mmu->funcs->destroy(gpu->mmu); + if (gpu->aspace) + msm_gem_address_space_destroy(gpu->aspace); if (gpu->fctx) msm_fence_context_free(gpu->fctx); -- cgit v1.2.3 From 78babc1633c4b0664ea516500c2ace9bf1f17bc7 Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Fri, 11 Nov 2016 12:06:46 -0500 Subject: drm/msm: convert iova to 64b For a5xx the gpu is 64b so we need to change iova to 64b everywhere. On the display side, iova is still 32b so it can ignore the upper bits. (Although all the armv8 devices have an iommu that can map 64b pa to 32b iova.) Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/adreno/adreno_gpu.h | 2 +- drivers/gpu/drm/msm/dsi/dsi_host.c | 4 ++-- drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c | 4 ++-- drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h | 2 +- drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c | 3 ++- drivers/gpu/drm/msm/msm_drv.h | 6 +++--- drivers/gpu/drm/msm/msm_fb.c | 4 ++-- drivers/gpu/drm/msm/msm_fbdev.c | 2 +- drivers/gpu/drm/msm/msm_gem.c | 6 +++--- drivers/gpu/drm/msm/msm_gem.h | 4 ++-- drivers/gpu/drm/msm/msm_gem_submit.c | 9 +++++---- drivers/gpu/drm/msm/msm_gpu.c | 2 +- drivers/gpu/drm/msm/msm_gpu.h | 2 +- drivers/gpu/drm/msm/msm_iommu.c | 12 ++++++------ drivers/gpu/drm/msm/msm_mmu.h | 4 ++-- 15 files changed, 34 insertions(+), 32 deletions(-) (limited to 'drivers/gpu/drm/msm/msm_gpu.c') diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h index 07d99bdf7c99..a2974864d054 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h @@ -153,7 +153,7 @@ struct adreno_gpu { // different for z180.. struct adreno_rbmemptrs *memptrs; struct drm_gem_object *memptrs_bo; - uint32_t memptrs_iova; + uint64_t memptrs_iova; /* * Register offsets are different between some GPUs. diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index f05ed0e1f3d6..ed9f2065f3e9 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -981,7 +981,7 @@ static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size) struct drm_device *dev = msm_host->dev; const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; int ret; - u32 iova; + uint64_t iova; if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) { mutex_lock(&dev->struct_mutex); @@ -1146,7 +1146,7 @@ static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len) { const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; int ret; - u32 dma_base; + uint64_t dma_base; bool triggered; if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) { diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c index 9527dafc3e69..1c29618f4ddb 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c @@ -373,7 +373,7 @@ static void update_cursor(struct drm_crtc *crtc) if (mdp4_crtc->cursor.stale) { struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo; struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo; - uint32_t iova = mdp4_crtc->cursor.next_iova; + uint64_t iova = mdp4_crtc->cursor.next_iova; if (next_bo) { /* take a obj ref + iova ref when we start scanning out: */ @@ -418,7 +418,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc, struct drm_device *dev = crtc->dev; struct drm_gem_object *cursor_bo, *old_bo; unsigned long flags; - uint32_t iova; + uint64_t iova; int ret; if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) { diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h index 8e9d59ed860a..62712ca164ee 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h @@ -51,7 +51,7 @@ struct mdp4_kms { /* empty/blank cursor bo to use when cursor is "disabled" */ struct drm_gem_object *blank_cursor_bo; - uint32_t blank_cursor_iova; + uint64_t blank_cursor_iova; }; #define to_mdp4_kms(x) container_of(x, struct mdp4_kms, base) diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c index 1272f40417ab..1ce8a01a5a28 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c @@ -489,7 +489,8 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, struct drm_device *dev = crtc->dev; struct mdp5_kms *mdp5_kms = get_kms(crtc); struct drm_gem_object *cursor_bo, *old_bo = NULL; - uint32_t blendcfg, cursor_addr, stride; + uint32_t blendcfg, stride; + uint64_t cursor_addr; int ret, lm; enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL; uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0); diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 6cee9cfaaa56..ed4dad3ca133 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -209,9 +209,9 @@ int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma); int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj); int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, - uint32_t *iova); -int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova); -uint32_t msm_gem_iova(struct drm_gem_object *obj, int id); + uint64_t *iova); +int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint64_t *iova); +uint64_t msm_gem_iova(struct drm_gem_object *obj, int id); struct page **msm_gem_get_pages(struct drm_gem_object *obj); void msm_gem_put_pages(struct drm_gem_object *obj); void msm_gem_put_iova(struct drm_gem_object *obj, int id); diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c index 95cf8fe72ee5..9acf544e7a8f 100644 --- a/drivers/gpu/drm/msm/msm_fb.c +++ b/drivers/gpu/drm/msm/msm_fb.c @@ -88,11 +88,11 @@ int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id) { struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb); int ret, i, n = drm_format_num_planes(fb->pixel_format); - uint32_t iova; + uint64_t iova; for (i = 0; i < n; i++) { ret = msm_gem_get_iova(msm_fb->planes[i], id, &iova); - DBG("FB[%u]: iova[%d]: %08x (%d)", fb->base.id, i, iova, ret); + DBG("FB[%u]: iova[%d]: %08llx (%d)", fb->base.id, i, iova, ret); if (ret) return ret; } diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c index d29f5e82a410..bffe93498512 100644 --- a/drivers/gpu/drm/msm/msm_fbdev.c +++ b/drivers/gpu/drm/msm/msm_fbdev.c @@ -76,7 +76,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, struct drm_framebuffer *fb = NULL; struct fb_info *fbi = NULL; struct drm_mode_fb_cmd2 mode_cmd = {0}; - uint32_t paddr; + uint64_t paddr; int ret, size; DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width, diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 7065e548fab4..cd06cfd94687 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -309,7 +309,7 @@ put_iova(struct drm_gem_object *obj) * the refcnt counter needs to be atomic_t. */ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, - uint32_t *iova) + uint64_t *iova) { struct msm_gem_object *msm_obj = to_msm_bo(obj); int ret = 0; @@ -336,7 +336,7 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, } /* get iova, taking a reference. Should have a matching put */ -int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova) +int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint64_t *iova) { struct msm_gem_object *msm_obj = to_msm_bo(obj); int ret; @@ -358,7 +358,7 @@ int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova) /* get iova without taking a reference, used in places where you have * already done a 'msm_gem_get_iova()'. */ -uint32_t msm_gem_iova(struct drm_gem_object *obj, int id) +uint64_t msm_gem_iova(struct drm_gem_object *obj, int id) { struct msm_gem_object *msm_obj = to_msm_bo(obj); WARN_ON(!msm_obj->domain[id].iova); diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index 58bc45fa2826..7d529516b332 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -123,13 +123,13 @@ struct msm_gem_submit { struct { uint32_t type; uint32_t size; /* in dwords */ - uint32_t iova; + uint64_t iova; uint32_t idx; /* cmdstream buffer idx in bos[] */ } *cmd; /* array of size nr_cmds */ struct { uint32_t flags; struct msm_gem_object *obj; - uint32_t iova; + uint64_t iova; } bos[0]; }; diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 25e8786fa4ca..166e84e4f0d4 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -241,7 +241,7 @@ static int submit_pin_objects(struct msm_gem_submit *submit) for (i = 0; i < submit->nr_bos; i++) { struct msm_gem_object *msm_obj = submit->bos[i].obj; - uint32_t iova; + uint64_t iova; /* if locking succeeded, pin bo: */ ret = msm_gem_get_iova_locked(&msm_obj->base, @@ -266,7 +266,7 @@ static int submit_pin_objects(struct msm_gem_submit *submit) } static int submit_bo(struct msm_gem_submit *submit, uint32_t idx, - struct msm_gem_object **obj, uint32_t *iova, bool *valid) + struct msm_gem_object **obj, uint64_t *iova, bool *valid) { if (idx >= submit->nr_bos) { DRM_ERROR("invalid buffer index: %u (out of %u)\n", @@ -312,7 +312,8 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob struct drm_msm_gem_submit_reloc submit_reloc; void __user *userptr = u64_to_user_ptr(relocs + (i * sizeof(submit_reloc))); - uint32_t iova, off; + uint32_t off; + uint64_t iova; bool valid; ret = copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc)); @@ -461,7 +462,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, void __user *userptr = u64_to_user_ptr(args->cmds + (i * sizeof(submit_cmd))); struct msm_gem_object *msm_obj; - uint32_t iova; + uint64_t iova; ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd)); if (ret) { diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 895abfa51ec7..1277088426a7 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -528,7 +528,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, for (i = 0; i < submit->nr_bos; i++) { struct msm_gem_object *msm_obj = submit->bos[i].obj; - uint32_t iova; + uint64_t iova; /* can't happen yet.. but when we add 2d support we'll have * to deal w/ cross-ring synchronization: diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index c6bf5d6ebc20..6a7e78b317f2 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -80,7 +80,7 @@ struct msm_gpu { /* ringbuffer: */ struct msm_ringbuffer *rb; - uint32_t rb_iova; + uint64_t rb_iova; /* list of GEM active objects: */ struct list_head active_list; diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c index 3a294d0da3a0..61aaaa1de6eb 100644 --- a/drivers/gpu/drm/msm/msm_iommu.c +++ b/drivers/gpu/drm/msm/msm_iommu.c @@ -45,13 +45,13 @@ static void msm_iommu_detach(struct msm_mmu *mmu, const char * const *names, iommu_detach_device(iommu->domain, mmu->dev); } -static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova, +static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt, unsigned len, int prot) { struct msm_iommu *iommu = to_msm_iommu(mmu); struct iommu_domain *domain = iommu->domain; struct scatterlist *sg; - unsigned int da = iova; + unsigned long da = iova; unsigned int i, j; int ret; @@ -62,7 +62,7 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova, dma_addr_t pa = sg_phys(sg) - sg->offset; size_t bytes = sg->length + sg->offset; - VERB("map[%d]: %08x %08lx(%zx)", i, da, (unsigned long)pa, bytes); + VERB("map[%d]: %08lx %08lx(%zx)", i, da, (unsigned long)pa, bytes); ret = iommu_map(domain, da, pa, bytes, prot); if (ret) @@ -84,13 +84,13 @@ fail: return ret; } -static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova, +static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt, unsigned len) { struct msm_iommu *iommu = to_msm_iommu(mmu); struct iommu_domain *domain = iommu->domain; struct scatterlist *sg; - unsigned int da = iova; + unsigned long da = iova; int i; for_each_sg(sgt->sgl, sg, sgt->nents, i) { @@ -101,7 +101,7 @@ static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova, if (unmapped < bytes) return unmapped; - VERB("unmap[%d]: %08x(%zx)", i, da, bytes); + VERB("unmap[%d]: %08lx(%zx)", i, da, bytes); BUG_ON(!PAGE_ALIGNED(bytes)); diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h index b8ca9a0e9170..f85c879e68d2 100644 --- a/drivers/gpu/drm/msm/msm_mmu.h +++ b/drivers/gpu/drm/msm/msm_mmu.h @@ -23,9 +23,9 @@ struct msm_mmu_funcs { int (*attach)(struct msm_mmu *mmu, const char * const *names, int cnt); void (*detach)(struct msm_mmu *mmu, const char * const *names, int cnt); - int (*map)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt, + int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt, unsigned len, int prot); - int (*unmap)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt, + int (*unmap)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt, unsigned len); void (*destroy)(struct msm_mmu *mmu); }; -- cgit v1.2.3 From 89d777a572459d6ea726b609838beaef0c1b94a7 Mon Sep 17 00:00:00 2001 From: Jordan Crouse Date: Mon, 28 Nov 2016 12:28:31 -0700 Subject: drm/msm: Remove 'src_clk' from adreno configuration The adreno code inherited a silly workaround from downstream from the bad old days before decent clock control. grp_clk[0] (named 'src_clk') doesn't actually exist - it was used as a proxy for whatever the core clock actually was (usually 'core_clk'). All targets should be able to correctly request 'core_clk' and get the right thing back so zap the anachronism and directly use grp_clk[0] to control the clock rate. Signed-off-by: Jordan Crouse Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/msm_gpu.c | 36 +++++++++++++----------------------- drivers/gpu/drm/msm/msm_gpu.h | 2 +- 2 files changed, 14 insertions(+), 24 deletions(-) (limited to 'drivers/gpu/drm/msm/msm_gpu.c') diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 1277088426a7..3d6e3b7a13e2 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -91,21 +91,16 @@ static int disable_pwrrail(struct msm_gpu *gpu) static int enable_clk(struct msm_gpu *gpu) { - struct clk *rate_clk = NULL; int i; - /* NOTE: kgsl_pwrctrl_clk() ignores grp_clks[0].. */ - for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) { - if (gpu->grp_clks[i]) { - clk_prepare(gpu->grp_clks[i]); - rate_clk = gpu->grp_clks[i]; - } - } + if (gpu->grp_clks[0] && gpu->fast_rate) + clk_set_rate(gpu->grp_clks[0], gpu->fast_rate); - if (rate_clk && gpu->fast_rate) - clk_set_rate(rate_clk, gpu->fast_rate); + for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i >= 0; i--) + if (gpu->grp_clks[i]) + clk_prepare(gpu->grp_clks[i]); - for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) + for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i >= 0; i--) if (gpu->grp_clks[i]) clk_enable(gpu->grp_clks[i]); @@ -114,24 +109,19 @@ static int enable_clk(struct msm_gpu *gpu) static int disable_clk(struct msm_gpu *gpu) { - struct clk *rate_clk = NULL; int i; - /* NOTE: kgsl_pwrctrl_clk() ignores grp_clks[0].. */ - for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) { - if (gpu->grp_clks[i]) { + for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i >= 0; i--) + if (gpu->grp_clks[i]) clk_disable(gpu->grp_clks[i]); - rate_clk = gpu->grp_clks[i]; - } - } - if (rate_clk && gpu->slow_rate) - clk_set_rate(rate_clk, gpu->slow_rate); - - for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) + for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i >= 0; i--) if (gpu->grp_clks[i]) clk_unprepare(gpu->grp_clks[i]); + if (gpu->grp_clks[0] && gpu->slow_rate) + clk_set_rate(gpu->grp_clks[0], gpu->slow_rate); + return 0; } @@ -563,7 +553,7 @@ static irqreturn_t irq_handler(int irq, void *data) } static const char *clk_names[] = { - "src_clk", "core_clk", "iface_clk", "mem_clk", "mem_iface_clk", + "core_clk", "iface_clk", "mem_clk", "mem_iface_clk", "alt_mem_iface_clk", }; diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index c4c39d3272c7..10252d07bb14 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -103,7 +103,7 @@ struct msm_gpu { /* Power Control: */ struct regulator *gpu_reg, *gpu_cx; - struct clk *ebi1_clk, *grp_clks[6]; + struct clk *ebi1_clk, *grp_clks[5]; uint32_t fast_rate, slow_rate, bus_freq; #ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING -- cgit v1.2.3 From b5f103ab98c77ca5998b39533c2b46959fbd37d9 Mon Sep 17 00:00:00 2001 From: Jordan Crouse Date: Mon, 28 Nov 2016 12:28:33 -0700 Subject: drm/msm: gpu: Add A5XX target support Add support for the A5XX family of Adreno GPUs. Signed-off-by: Jordan Crouse Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/Makefile | 1 + drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 830 +++++++++++++++++++++++++++++ drivers/gpu/drm/msm/adreno/a5xx_gpu.h | 37 ++ drivers/gpu/drm/msm/adreno/adreno_device.c | 24 +- drivers/gpu/drm/msm/adreno/adreno_gpu.c | 6 +- drivers/gpu/drm/msm/adreno/adreno_gpu.h | 41 ++ drivers/gpu/drm/msm/msm_gpu.c | 13 +- drivers/gpu/drm/msm/msm_gpu.h | 2 +- 8 files changed, 945 insertions(+), 9 deletions(-) create mode 100644 drivers/gpu/drm/msm/adreno/a5xx_gpu.c create mode 100644 drivers/gpu/drm/msm/adreno/a5xx_gpu.h (limited to 'drivers/gpu/drm/msm/msm_gpu.c') diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index 90f66c408120..3c9f0ccc8abb 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -6,6 +6,7 @@ msm-y := \ adreno/adreno_gpu.o \ adreno/a3xx_gpu.o \ adreno/a4xx_gpu.o \ + adreno/a5xx_gpu.o \ hdmi/hdmi.o \ hdmi/hdmi_audio.o \ hdmi/hdmi_bridge.o \ diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c new file mode 100644 index 000000000000..bf0a93038554 --- /dev/null +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -0,0 +1,830 @@ +/* Copyright (c) 2016 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include "msm_gem.h" +#include "a5xx_gpu.h" + +extern bool hang_debug; +static void a5xx_dump(struct msm_gpu *gpu); + +static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, + struct msm_file_private *ctx) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct msm_drm_private *priv = gpu->dev->dev_private; + struct msm_ringbuffer *ring = gpu->rb; + unsigned int i, ibs = 0; + + for (i = 0; i < submit->nr_cmds; i++) { + switch (submit->cmd[i].type) { + case MSM_SUBMIT_CMD_IB_TARGET_BUF: + break; + case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: + if (priv->lastctx == ctx) + break; + case MSM_SUBMIT_CMD_BUF: + OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3); + OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); + OUT_RING(ring, upper_32_bits(submit->cmd[i].iova)); + OUT_RING(ring, submit->cmd[i].size); + ibs++; + break; + } + } + + OUT_PKT4(ring, REG_A5XX_CP_SCRATCH_REG(2), 1); + OUT_RING(ring, submit->fence->seqno); + + OUT_PKT7(ring, CP_EVENT_WRITE, 4); + OUT_RING(ring, CACHE_FLUSH_TS | (1 << 31)); + OUT_RING(ring, lower_32_bits(rbmemptr(adreno_gpu, fence))); + OUT_RING(ring, upper_32_bits(rbmemptr(adreno_gpu, fence))); + OUT_RING(ring, submit->fence->seqno); + + gpu->funcs->flush(gpu); +} + +struct a5xx_hwcg { + u32 offset; + u32 value; +}; + +static const struct a5xx_hwcg a530_hwcg[] = { + {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222}, + {REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222}, + {REG_A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222}, + {REG_A5XX_RBBM_CLOCK_CNTL_SP3, 0x02222222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220}, + {REG_A5XX_RBBM_CLOCK_CNTL2_SP1, 0x02222220}, + {REG_A5XX_RBBM_CLOCK_CNTL2_SP2, 0x02222220}, + {REG_A5XX_RBBM_CLOCK_CNTL2_SP3, 0x02222220}, + {REG_A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF}, + {REG_A5XX_RBBM_CLOCK_HYST_SP1, 0x0000F3CF}, + {REG_A5XX_RBBM_CLOCK_HYST_SP2, 0x0000F3CF}, + {REG_A5XX_RBBM_CLOCK_HYST_SP3, 0x0000F3CF}, + {REG_A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080}, + {REG_A5XX_RBBM_CLOCK_DELAY_SP1, 0x00000080}, + {REG_A5XX_RBBM_CLOCK_DELAY_SP2, 0x00000080}, + {REG_A5XX_RBBM_CLOCK_DELAY_SP3, 0x00000080}, + {REG_A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL_TP1, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL_TP2, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL_TP3, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_TP2, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_TP3, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222}, + {REG_A5XX_RBBM_CLOCK_CNTL3_TP1, 0x00002222}, + {REG_A5XX_RBBM_CLOCK_CNTL3_TP2, 0x00002222}, + {REG_A5XX_RBBM_CLOCK_CNTL3_TP3, 0x00002222}, + {REG_A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777}, + {REG_A5XX_RBBM_CLOCK_HYST_TP1, 0x77777777}, + {REG_A5XX_RBBM_CLOCK_HYST_TP2, 0x77777777}, + {REG_A5XX_RBBM_CLOCK_HYST_TP3, 0x77777777}, + {REG_A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777}, + {REG_A5XX_RBBM_CLOCK_HYST2_TP1, 0x77777777}, + {REG_A5XX_RBBM_CLOCK_HYST2_TP2, 0x77777777}, + {REG_A5XX_RBBM_CLOCK_HYST2_TP3, 0x77777777}, + {REG_A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777}, + {REG_A5XX_RBBM_CLOCK_HYST3_TP1, 0x00007777}, + {REG_A5XX_RBBM_CLOCK_HYST3_TP2, 0x00007777}, + {REG_A5XX_RBBM_CLOCK_HYST3_TP3, 0x00007777}, + {REG_A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111}, + {REG_A5XX_RBBM_CLOCK_DELAY_TP1, 0x11111111}, + {REG_A5XX_RBBM_CLOCK_DELAY_TP2, 0x11111111}, + {REG_A5XX_RBBM_CLOCK_DELAY_TP3, 0x11111111}, + {REG_A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111}, + {REG_A5XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111}, + {REG_A5XX_RBBM_CLOCK_DELAY2_TP2, 0x11111111}, + {REG_A5XX_RBBM_CLOCK_DELAY2_TP3, 0x11111111}, + {REG_A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111}, + {REG_A5XX_RBBM_CLOCK_DELAY3_TP1, 0x00001111}, + {REG_A5XX_RBBM_CLOCK_DELAY3_TP2, 0x00001111}, + {REG_A5XX_RBBM_CLOCK_DELAY3_TP3, 0x00001111}, + {REG_A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222}, + {REG_A5XX_RBBM_CLOCK_HYST_UCHE, 0x00444444}, + {REG_A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002}, + {REG_A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL_RB1, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL_RB2, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL_RB3, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_RB1, 0x00222222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_RB2, 0x00222222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_RB3, 0x00222222}, + {REG_A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220}, + {REG_A5XX_RBBM_CLOCK_CNTL_CCU1, 0x00022220}, + {REG_A5XX_RBBM_CLOCK_CNTL_CCU2, 0x00022220}, + {REG_A5XX_RBBM_CLOCK_CNTL_CCU3, 0x00022220}, + {REG_A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555}, + {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404}, + {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU1, 0x04040404}, + {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU2, 0x04040404}, + {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU3, 0x04040404}, + {REG_A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044}, + {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002}, + {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1, 0x00000002}, + {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_2, 0x00000002}, + {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_3, 0x00000002}, + {REG_A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011}, + {REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222}, + {REG_A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222}, + {REG_A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222}, + {REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000}, + {REG_A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004}, + {REG_A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000}, + {REG_A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000}, + {REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000}, + {REG_A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200}, + {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222} +}; + +static const struct { + int (*test)(struct adreno_gpu *gpu); + const struct a5xx_hwcg *regs; + unsigned int count; +} a5xx_hwcg_regs[] = { + { adreno_is_a530, a530_hwcg, ARRAY_SIZE(a530_hwcg), }, +}; + +static void _a5xx_enable_hwcg(struct msm_gpu *gpu, + const struct a5xx_hwcg *regs, unsigned int count) +{ + unsigned int i; + + for (i = 0; i < count; i++) + gpu_write(gpu, regs[i].offset, regs[i].value); + + gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0xAAA8AA00); + gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, 0x182); +} + +static void a5xx_enable_hwcg(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(a5xx_hwcg_regs); i++) { + if (a5xx_hwcg_regs[i].test(adreno_gpu)) { + _a5xx_enable_hwcg(gpu, a5xx_hwcg_regs[i].regs, + a5xx_hwcg_regs[i].count); + return; + } + } +} + +static int a5xx_me_init(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct msm_ringbuffer *ring = gpu->rb; + + OUT_PKT7(ring, CP_ME_INIT, 8); + + OUT_RING(ring, 0x0000002F); + + /* Enable multiple hardware contexts */ + OUT_RING(ring, 0x00000003); + + /* Enable error detection */ + OUT_RING(ring, 0x20000000); + + /* Don't enable header dump */ + OUT_RING(ring, 0x00000000); + OUT_RING(ring, 0x00000000); + + /* Specify workarounds for various microcode issues */ + if (adreno_is_a530(adreno_gpu)) { + /* Workaround for token end syncs + * Force a WFI after every direct-render 3D mode draw and every + * 2D mode 3 draw + */ + OUT_RING(ring, 0x0000000B); + } else { + /* No workarounds enabled */ + OUT_RING(ring, 0x00000000); + } + + OUT_RING(ring, 0x00000000); + OUT_RING(ring, 0x00000000); + + gpu->funcs->flush(gpu); + + return gpu->funcs->idle(gpu) ? 0 : -EINVAL; +} + +static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu, + const struct firmware *fw, u64 *iova) +{ + struct drm_device *drm = gpu->dev; + struct drm_gem_object *bo; + void *ptr; + + mutex_lock(&drm->struct_mutex); + bo = msm_gem_new(drm, fw->size - 4, MSM_BO_UNCACHED); + mutex_unlock(&drm->struct_mutex); + + if (IS_ERR(bo)) + return bo; + + ptr = msm_gem_get_vaddr(bo); + if (!ptr) { + drm_gem_object_unreference_unlocked(bo); + return ERR_PTR(-ENOMEM); + } + + if (iova) { + int ret = msm_gem_get_iova(bo, gpu->id, iova); + + if (ret) { + drm_gem_object_unreference_unlocked(bo); + return ERR_PTR(ret); + } + } + + memcpy(ptr, &fw->data[4], fw->size - 4); + + msm_gem_put_vaddr(bo); + return bo; +} + +static int a5xx_ucode_init(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); + int ret; + + if (!a5xx_gpu->pm4_bo) { + a5xx_gpu->pm4_bo = a5xx_ucode_load_bo(gpu, adreno_gpu->pm4, + &a5xx_gpu->pm4_iova); + + if (IS_ERR(a5xx_gpu->pm4_bo)) { + ret = PTR_ERR(a5xx_gpu->pm4_bo); + a5xx_gpu->pm4_bo = NULL; + dev_err(gpu->dev->dev, "could not allocate PM4: %d\n", + ret); + return ret; + } + } + + if (!a5xx_gpu->pfp_bo) { + a5xx_gpu->pfp_bo = a5xx_ucode_load_bo(gpu, adreno_gpu->pfp, + &a5xx_gpu->pfp_iova); + + if (IS_ERR(a5xx_gpu->pfp_bo)) { + ret = PTR_ERR(a5xx_gpu->pfp_bo); + a5xx_gpu->pfp_bo = NULL; + dev_err(gpu->dev->dev, "could not allocate PFP: %d\n", + ret); + return ret; + } + } + + gpu_write64(gpu, REG_A5XX_CP_ME_INSTR_BASE_LO, + REG_A5XX_CP_ME_INSTR_BASE_HI, a5xx_gpu->pm4_iova); + + gpu_write64(gpu, REG_A5XX_CP_PFP_INSTR_BASE_LO, + REG_A5XX_CP_PFP_INSTR_BASE_HI, a5xx_gpu->pfp_iova); + + return 0; +} + +#define A5XX_INT_MASK (A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR | \ + A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT | \ + A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT | \ + A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT | \ + A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT | \ + A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW | \ + A5XX_RBBM_INT_0_MASK_CP_HW_ERROR | \ + A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \ + A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \ + A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP) + +static int a5xx_hw_init(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + int ret; + + gpu_write(gpu, REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003); + + /* Make all blocks contribute to the GPU BUSY perf counter */ + gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xFFFFFFFF); + + /* Enable RBBM error reporting bits */ + gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL0, 0x00000001); + + if (adreno_gpu->quirks & ADRENO_QUIRK_FAULT_DETECT_MASK) { + /* + * Mask out the activity signals from RB1-3 to avoid false + * positives + */ + + gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL11, + 0xF0000000); + gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL12, + 0xFFFFFFFF); + gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL13, + 0xFFFFFFFF); + gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL14, + 0xFFFFFFFF); + gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL15, + 0xFFFFFFFF); + gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL16, + 0xFFFFFFFF); + gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL17, + 0xFFFFFFFF); + gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL18, + 0xFFFFFFFF); + } + + /* Enable fault detection */ + gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_INT_CNTL, + (1 << 30) | 0xFFFF); + + /* Turn on performance counters */ + gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_CNTL, 0x01); + + /* Increase VFD cache access so LRZ and other data gets evicted less */ + gpu_write(gpu, REG_A5XX_UCHE_CACHE_WAYS, 0x02); + + /* Disable L2 bypass in the UCHE */ + gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_LO, 0xFFFF0000); + gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_HI, 0x0001FFFF); + gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_LO, 0xFFFF0000); + gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_HI, 0x0001FFFF); + + /* Set the GMEM VA range (0 to gpu->gmem) */ + gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MIN_LO, 0x00100000); + gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MIN_HI, 0x00000000); + gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_LO, + 0x00100000 + adreno_gpu->gmem - 1); + gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_HI, 0x00000000); + + gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x40); + gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x40); + gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x80000060); + gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16); + + gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, (0x400 << 11 | 0x300 << 22)); + + if (adreno_gpu->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI) + gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8)); + + gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0xc0200100); + + /* Enable USE_RETENTION_FLOPS */ + gpu_write(gpu, REG_A5XX_CP_CHICKEN_DBG, 0x02000000); + + /* Enable ME/PFP split notification */ + gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF); + + /* Enable HWCG */ + a5xx_enable_hwcg(gpu); + + gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F); + + /* Set the highest bank bit */ + gpu_write(gpu, REG_A5XX_TPL1_MODE_CNTL, 2 << 7); + gpu_write(gpu, REG_A5XX_RB_MODE_CNTL, 2 << 1); + + /* Protect registers from the CP */ + gpu_write(gpu, REG_A5XX_CP_PROTECT_CNTL, 0x00000007); + + /* RBBM */ + gpu_write(gpu, REG_A5XX_CP_PROTECT(0), ADRENO_PROTECT_RW(0x04, 4)); + gpu_write(gpu, REG_A5XX_CP_PROTECT(1), ADRENO_PROTECT_RW(0x08, 8)); + gpu_write(gpu, REG_A5XX_CP_PROTECT(2), ADRENO_PROTECT_RW(0x10, 16)); + gpu_write(gpu, REG_A5XX_CP_PROTECT(3), ADRENO_PROTECT_RW(0x20, 32)); + gpu_write(gpu, REG_A5XX_CP_PROTECT(4), ADRENO_PROTECT_RW(0x40, 64)); + gpu_write(gpu, REG_A5XX_CP_PROTECT(5), ADRENO_PROTECT_RW(0x80, 64)); + + /* Content protect */ + gpu_write(gpu, REG_A5XX_CP_PROTECT(6), + ADRENO_PROTECT_RW(REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO, + 16)); + gpu_write(gpu, REG_A5XX_CP_PROTECT(7), + ADRENO_PROTECT_RW(REG_A5XX_RBBM_SECVID_TRUST_CNTL, 2)); + + /* CP */ + gpu_write(gpu, REG_A5XX_CP_PROTECT(8), ADRENO_PROTECT_RW(0x800, 64)); + gpu_write(gpu, REG_A5XX_CP_PROTECT(9), ADRENO_PROTECT_RW(0x840, 8)); + gpu_write(gpu, REG_A5XX_CP_PROTECT(10), ADRENO_PROTECT_RW(0x880, 32)); + gpu_write(gpu, REG_A5XX_CP_PROTECT(11), ADRENO_PROTECT_RW(0xAA0, 1)); + + /* RB */ + gpu_write(gpu, REG_A5XX_CP_PROTECT(12), ADRENO_PROTECT_RW(0xCC0, 1)); + gpu_write(gpu, REG_A5XX_CP_PROTECT(13), ADRENO_PROTECT_RW(0xCF0, 2)); + + /* VPC */ + gpu_write(gpu, REG_A5XX_CP_PROTECT(14), ADRENO_PROTECT_RW(0xE68, 8)); + gpu_write(gpu, REG_A5XX_CP_PROTECT(15), ADRENO_PROTECT_RW(0xE70, 4)); + + /* UCHE */ + gpu_write(gpu, REG_A5XX_CP_PROTECT(16), ADRENO_PROTECT_RW(0xE80, 16)); + + if (adreno_is_a530(adreno_gpu)) + gpu_write(gpu, REG_A5XX_CP_PROTECT(17), + ADRENO_PROTECT_RW(0x10000, 0x8000)); + + gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_CNTL, 0); + /* + * Disable the trusted memory range - we don't actually supported secure + * memory rendering at this point in time and we don't want to block off + * part of the virtual memory space. + */ + gpu_write64(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO, + REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI, 0x00000000); + gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000); + + ret = adreno_hw_init(gpu); + if (ret) + return ret; + + ret = a5xx_ucode_init(gpu); + if (ret) + return ret; + + /* Disable the interrupts through the initial bringup stage */ + gpu_write(gpu, REG_A5XX_RBBM_INT_0_MASK, A5XX_INT_MASK); + + /* Clear ME_HALT to start the micro engine */ + gpu_write(gpu, REG_A5XX_CP_PFP_ME_CNTL, 0); + ret = a5xx_me_init(gpu); + if (ret) + return ret; + + /* Put the GPU into insecure mode */ + gpu_write(gpu, REG_A5XX_RBBM_SECVID_TRUST_CNTL, 0x0); + + /* + * Send a pipeline event stat to get misbehaving counters to start + * ticking correctly + */ + if (adreno_is_a530(adreno_gpu)) { + OUT_PKT7(gpu->rb, CP_EVENT_WRITE, 1); + OUT_RING(gpu->rb, 0x0F); + + gpu->funcs->flush(gpu); + if (!gpu->funcs->idle(gpu)) + return -EINVAL; + } + + return 0; +} + +static void a5xx_recover(struct msm_gpu *gpu) +{ + int i; + + adreno_dump_info(gpu); + + for (i = 0; i < 8; i++) { + printk("CP_SCRATCH_REG%d: %u\n", i, + gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(i))); + } + + if (hang_debug) + a5xx_dump(gpu); + + gpu_write(gpu, REG_A5XX_RBBM_SW_RESET_CMD, 1); + gpu_read(gpu, REG_A5XX_RBBM_SW_RESET_CMD); + gpu_write(gpu, REG_A5XX_RBBM_SW_RESET_CMD, 0); + adreno_recover(gpu); +} + +static void a5xx_destroy(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); + + DBG("%s", gpu->name); + + if (a5xx_gpu->pm4_bo) { + if (a5xx_gpu->pm4_iova) + msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->id); + drm_gem_object_unreference_unlocked(a5xx_gpu->pm4_bo); + } + + if (a5xx_gpu->pfp_bo) { + if (a5xx_gpu->pfp_iova) + msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->id); + drm_gem_object_unreference_unlocked(a5xx_gpu->pfp_bo); + } + + adreno_gpu_cleanup(adreno_gpu); + kfree(a5xx_gpu); +} + +static inline bool _a5xx_check_idle(struct msm_gpu *gpu) +{ + if (gpu_read(gpu, REG_A5XX_RBBM_STATUS) & ~A5XX_RBBM_STATUS_HI_BUSY) + return false; + + /* + * Nearly every abnormality ends up pausing the GPU and triggering a + * fault so we can safely just watch for this one interrupt to fire + */ + return !(gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS) & + A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT); +} + +static bool a5xx_idle(struct msm_gpu *gpu) +{ + /* wait for CP to drain ringbuffer: */ + if (!adreno_idle(gpu)) + return false; + + if (spin_until(_a5xx_check_idle(gpu))) { + DRM_ERROR("%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X\n", + gpu->name, __builtin_return_address(0), + gpu_read(gpu, REG_A5XX_RBBM_STATUS), + gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS)); + + return false; + } + + return true; +} + +static void a5xx_cp_err_irq(struct msm_gpu *gpu) +{ + u32 status = gpu_read(gpu, REG_A5XX_CP_INTERRUPT_STATUS); + + if (status & A5XX_CP_INT_CP_OPCODE_ERROR) { + u32 val; + + gpu_write(gpu, REG_A5XX_CP_PFP_STAT_ADDR, 0); + + /* + * REG_A5XX_CP_PFP_STAT_DATA is indexed, and we want index 1 so + * read it twice + */ + + gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA); + val = gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA); + + dev_err_ratelimited(gpu->dev->dev, "CP | opcode error | possible opcode=0x%8.8X\n", + val); + } + + if (status & A5XX_CP_INT_CP_HW_FAULT_ERROR) + dev_err_ratelimited(gpu->dev->dev, "CP | HW fault | status=0x%8.8X\n", + gpu_read(gpu, REG_A5XX_CP_HW_FAULT)); + + if (status & A5XX_CP_INT_CP_DMA_ERROR) + dev_err_ratelimited(gpu->dev->dev, "CP | DMA error\n"); + + if (status & A5XX_CP_INT_CP_REGISTER_PROTECTION_ERROR) { + u32 val = gpu_read(gpu, REG_A5XX_CP_PROTECT_STATUS); + + dev_err_ratelimited(gpu->dev->dev, + "CP | protected mode error | %s | addr=0x%8.8X | status=0x%8.8X\n", + val & (1 << 24) ? "WRITE" : "READ", + (val & 0xFFFFF) >> 2, val); + } + + if (status & A5XX_CP_INT_CP_AHB_ERROR) { + u32 status = gpu_read(gpu, REG_A5XX_CP_AHB_FAULT); + const char *access[16] = { "reserved", "reserved", + "timestamp lo", "timestamp hi", "pfp read", "pfp write", + "", "", "me read", "me write", "", "", "crashdump read", + "crashdump write" }; + + dev_err_ratelimited(gpu->dev->dev, + "CP | AHB error | addr=%X access=%s error=%d | status=0x%8.8X\n", + status & 0xFFFFF, access[(status >> 24) & 0xF], + (status & (1 << 31)), status); + } +} + +static void a5xx_rbbm_err_irq(struct msm_gpu *gpu) +{ + u32 status = gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS); + + if (status & A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR) { + u32 val = gpu_read(gpu, REG_A5XX_RBBM_AHB_ERROR_STATUS); + + dev_err_ratelimited(gpu->dev->dev, + "RBBM | AHB bus error | %s | addr=0x%X | ports=0x%X:0x%X\n", + val & (1 << 28) ? "WRITE" : "READ", + (val & 0xFFFFF) >> 2, (val >> 20) & 0x3, + (val >> 24) & 0xF); + + /* Clear the error */ + gpu_write(gpu, REG_A5XX_RBBM_AHB_CMD, (1 << 4)); + } + + if (status & A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT) + dev_err_ratelimited(gpu->dev->dev, "RBBM | AHB transfer timeout\n"); + + if (status & A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT) + dev_err_ratelimited(gpu->dev->dev, "RBBM | ME master split | status=0x%X\n", + gpu_read(gpu, REG_A5XX_RBBM_AHB_ME_SPLIT_STATUS)); + + if (status & A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT) + dev_err_ratelimited(gpu->dev->dev, "RBBM | PFP master split | status=0x%X\n", + gpu_read(gpu, REG_A5XX_RBBM_AHB_PFP_SPLIT_STATUS)); + + if (status & A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT) + dev_err_ratelimited(gpu->dev->dev, "RBBM | ETS master split | status=0x%X\n", + gpu_read(gpu, REG_A5XX_RBBM_AHB_ETS_SPLIT_STATUS)); + + if (status & A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW) + dev_err_ratelimited(gpu->dev->dev, "RBBM | ATB ASYNC overflow\n"); + + if (status & A5XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW) + dev_err_ratelimited(gpu->dev->dev, "RBBM | ATB bus overflow\n"); +} + +static void a5xx_uche_err_irq(struct msm_gpu *gpu) +{ + uint64_t addr = (uint64_t) gpu_read(gpu, REG_A5XX_UCHE_TRAP_LOG_HI); + + addr |= gpu_read(gpu, REG_A5XX_UCHE_TRAP_LOG_LO); + + dev_err_ratelimited(gpu->dev->dev, "UCHE | Out of bounds access | addr=0x%llX\n", + addr); +} + +static void a5xx_gpmu_err_irq(struct msm_gpu *gpu) +{ + dev_err_ratelimited(gpu->dev->dev, "GPMU | voltage droop\n"); +} + +#define RBBM_ERROR_MASK \ + (A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR | \ + A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT | \ + A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT | \ + A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT | \ + A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT | \ + A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW) + +static irqreturn_t a5xx_irq(struct msm_gpu *gpu) +{ + u32 status = gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS); + + gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD, status); + + if (status & RBBM_ERROR_MASK) + a5xx_rbbm_err_irq(gpu); + + if (status & A5XX_RBBM_INT_0_MASK_CP_HW_ERROR) + a5xx_cp_err_irq(gpu); + + if (status & A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS) + a5xx_uche_err_irq(gpu); + + if (status & A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP) + a5xx_gpmu_err_irq(gpu); + + if (status & A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS) + msm_gpu_retire(gpu); + + return IRQ_HANDLED; +} + +static const u32 a5xx_register_offsets[REG_ADRENO_REGISTER_MAX] = { + REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A5XX_CP_RB_BASE), + REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE_HI, REG_A5XX_CP_RB_BASE_HI), + REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_A5XX_CP_RB_RPTR_ADDR), + REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR_HI, + REG_A5XX_CP_RB_RPTR_ADDR_HI), + REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A5XX_CP_RB_RPTR), + REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A5XX_CP_RB_WPTR), + REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A5XX_CP_RB_CNTL), +}; + +static const u32 a5xx_registers[] = { + 0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002B, + 0x002E, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095, + 0x0097, 0x00BB, 0x03A0, 0x0464, 0x0469, 0x046F, 0x04D2, 0x04D3, + 0x04E0, 0x0533, 0x0540, 0x0555, 0xF400, 0xF400, 0xF800, 0xF807, + 0x0800, 0x081A, 0x081F, 0x0841, 0x0860, 0x0860, 0x0880, 0x08A0, + 0x0B00, 0x0B12, 0x0B15, 0x0B28, 0x0B78, 0x0B7F, 0x0BB0, 0x0BBD, + 0x0BC0, 0x0BC6, 0x0BD0, 0x0C53, 0x0C60, 0x0C61, 0x0C80, 0x0C82, + 0x0C84, 0x0C85, 0x0C90, 0x0C98, 0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2, + 0x2180, 0x2185, 0x2580, 0x2585, 0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7, + 0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8, 0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8, + 0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E, 0x2100, 0x211E, 0x2140, 0x2145, + 0x2500, 0x251E, 0x2540, 0x2545, 0x0D10, 0x0D17, 0x0D20, 0x0D23, + 0x0D30, 0x0D30, 0x20C0, 0x20C0, 0x24C0, 0x24C0, 0x0E40, 0x0E43, + 0x0E4A, 0x0E4A, 0x0E50, 0x0E57, 0x0E60, 0x0E7C, 0x0E80, 0x0E8E, + 0x0E90, 0x0E96, 0x0EA0, 0x0EA8, 0x0EB0, 0x0EB2, 0xE140, 0xE147, + 0xE150, 0xE187, 0xE1A0, 0xE1A9, 0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7, + 0xE1D0, 0xE1D1, 0xE200, 0xE201, 0xE210, 0xE21C, 0xE240, 0xE268, + 0xE000, 0xE006, 0xE010, 0xE09A, 0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB, + 0xE100, 0xE105, 0xE380, 0xE38F, 0xE3B0, 0xE3B0, 0xE400, 0xE405, + 0xE408, 0xE4E9, 0xE4F0, 0xE4F0, 0xE280, 0xE280, 0xE282, 0xE2A3, + 0xE2A5, 0xE2C2, 0xE940, 0xE947, 0xE950, 0xE987, 0xE9A0, 0xE9A9, + 0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7, 0xE9D0, 0xE9D1, 0xEA00, 0xEA01, + 0xEA10, 0xEA1C, 0xEA40, 0xEA68, 0xE800, 0xE806, 0xE810, 0xE89A, + 0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB, 0xE900, 0xE905, 0xEB80, 0xEB8F, + 0xEBB0, 0xEBB0, 0xEC00, 0xEC05, 0xEC08, 0xECE9, 0xECF0, 0xECF0, + 0xEA80, 0xEA80, 0xEA82, 0xEAA3, 0xEAA5, 0xEAC2, 0xA800, 0xA8FF, + 0xAC60, 0xAC60, 0xB000, 0xB97F, 0xB9A0, 0xB9BF, + ~0 +}; + +static void a5xx_dump(struct msm_gpu *gpu) +{ + dev_info(gpu->dev->dev, "status: %08x\n", + gpu_read(gpu, REG_A5XX_RBBM_STATUS)); + adreno_dump(gpu); +} + +static int a5xx_pm_resume(struct msm_gpu *gpu) +{ + return msm_gpu_pm_resume(gpu); +} + +static int a5xx_pm_suspend(struct msm_gpu *gpu) +{ + return msm_gpu_pm_suspend(gpu); +} + +static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value) +{ + *value = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_CP_0_LO, + REG_A5XX_RBBM_PERFCTR_CP_0_HI); + + return 0; +} + +#ifdef CONFIG_DEBUG_FS +static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m) +{ + gpu->funcs->pm_resume(gpu); + + seq_printf(m, "status: %08x\n", + gpu_read(gpu, REG_A5XX_RBBM_STATUS)); + gpu->funcs->pm_suspend(gpu); + + adreno_show(gpu, m); +} +#endif + +static const struct adreno_gpu_funcs funcs = { + .base = { + .get_param = adreno_get_param, + .hw_init = a5xx_hw_init, + .pm_suspend = a5xx_pm_suspend, + .pm_resume = a5xx_pm_resume, + .recover = a5xx_recover, + .last_fence = adreno_last_fence, + .submit = a5xx_submit, + .flush = adreno_flush, + .idle = a5xx_idle, + .irq = a5xx_irq, + .destroy = a5xx_destroy, + .show = a5xx_show, + }, + .get_timestamp = a5xx_get_timestamp, +}; + +struct msm_gpu *a5xx_gpu_init(struct drm_device *dev) +{ + struct msm_drm_private *priv = dev->dev_private; + struct platform_device *pdev = priv->gpu_pdev; + struct a5xx_gpu *a5xx_gpu = NULL; + struct adreno_gpu *adreno_gpu; + struct msm_gpu *gpu; + int ret; + + if (!pdev) { + dev_err(dev->dev, "No A5XX device is defined\n"); + return ERR_PTR(-ENXIO); + } + + a5xx_gpu = kzalloc(sizeof(*a5xx_gpu), GFP_KERNEL); + if (!a5xx_gpu) + return ERR_PTR(-ENOMEM); + + adreno_gpu = &a5xx_gpu->base; + gpu = &adreno_gpu->base; + + a5xx_gpu->pdev = pdev; + adreno_gpu->registers = a5xx_registers; + adreno_gpu->reg_offsets = a5xx_register_offsets; + + ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs); + if (ret) { + a5xx_destroy(&(a5xx_gpu->base.base)); + return ERR_PTR(ret); + } + + return gpu; +} diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h new file mode 100644 index 000000000000..39a07f400b35 --- /dev/null +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h @@ -0,0 +1,37 @@ +/* Copyright (c) 2016 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef __A5XX_GPU_H__ +#define __A5XX_GPU_H__ + +#include "adreno_gpu.h" + +/* Bringing over the hack from the previous targets */ +#undef ROP_COPY +#undef ROP_XOR + +#include "a5xx.xml.h" + +struct a5xx_gpu { + struct adreno_gpu base; + struct platform_device *pdev; + + struct drm_gem_object *pm4_bo; + uint64_t pm4_iova; + + struct drm_gem_object *pfp_bo; + uint64_t pfp_iova; +}; + +#define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base) + +#endif /* __A5XX_GPU_H__ */ diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c index 1fa1fdda9ee2..985d95fbb726 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_device.c +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c @@ -74,6 +74,14 @@ static const struct adreno_info gpulist[] = { .pfpfw = "a420_pfp.fw", .gmem = (SZ_1M + SZ_512K), .init = a4xx_gpu_init, + }, { + .rev = ADRENO_REV(5, 3, 0, ANY_ID), + .revn = 530, + .name = "A530", + .pm4fw = "a530_pm4.fw", + .pfpfw = "a530_pfp.fw", + .gmem = SZ_1M, + .init = a5xx_gpu_init, }, }; @@ -83,6 +91,8 @@ MODULE_FIRMWARE("a330_pm4.fw"); MODULE_FIRMWARE("a330_pfp.fw"); MODULE_FIRMWARE("a420_pm4.fw"); MODULE_FIRMWARE("a420_pfp.fw"); +MODULE_FIRMWARE("a530_fm4.fw"); +MODULE_FIRMWARE("a530_pfp.fw"); static inline bool _rev_match(uint8_t entry, uint8_t id) { @@ -170,12 +180,20 @@ static void set_gpu_pdev(struct drm_device *dev, priv->gpu_pdev = pdev; } +static const struct { + const char *str; + uint32_t flag; +} quirks[] = { + { "qcom,gpu-quirk-two-pass-use-wfi", ADRENO_QUIRK_TWO_PASS_USE_WFI }, + { "qcom,gpu-quirk-fault-detect-mask", ADRENO_QUIRK_FAULT_DETECT_MASK }, +}; + static int adreno_bind(struct device *dev, struct device *master, void *data) { static struct adreno_platform_config config = {}; struct device_node *child, *node = dev->of_node; u32 val; - int ret; + int ret, i; ret = of_property_read_u32(node, "qcom,chipid", &val); if (ret) { @@ -209,6 +227,10 @@ static int adreno_bind(struct device *dev, struct device *master, void *data) return -ENXIO; } + for (i = 0; i < ARRAY_SIZE(quirks); i++) + if (of_property_read_bool(node, quirks[i].str)) + config.quirks |= quirks[i].flag; + dev->platform_data = &config; set_gpu_pdev(dev_get_drvdata(master), to_platform_device(dev)); return 0; diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index c3a4c53eb8b3..a18126150e11 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -22,7 +22,7 @@ #include "msm_mmu.h" #define RB_SIZE SZ_32K -#define RB_BLKSIZE 16 +#define RB_BLKSIZE 32 int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value) { @@ -54,9 +54,6 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value) } } -#define rbmemptr(adreno_gpu, member) \ - ((adreno_gpu)->memptrs_iova + offsetof(struct adreno_rbmemptrs, member)) - int adreno_hw_init(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); @@ -349,6 +346,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, adreno_gpu->gmem = adreno_gpu->info->gmem; adreno_gpu->revn = adreno_gpu->info->revn; adreno_gpu->rev = config->rev; + adreno_gpu->quirks = config->quirks; gpu->fast_rate = config->fast_rate; gpu->slow_rate = config->slow_rate; diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h index 9f21ca010525..0d1f4e757f59 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h @@ -48,6 +48,11 @@ enum adreno_regs { REG_ADRENO_REGISTER_MAX, }; +enum adreno_quirks { + ADRENO_QUIRK_TWO_PASS_USE_WFI = 1, + ADRENO_QUIRK_FAULT_DETECT_MASK = 2, +}; + struct adreno_rev { uint8_t core; uint8_t major; @@ -74,6 +79,9 @@ struct adreno_info { const struct adreno_info *adreno_info(struct adreno_rev rev); +#define rbmemptr(adreno_gpu, member) \ + ((adreno_gpu)->memptrs_iova + offsetof(struct adreno_rbmemptrs, member)) + struct adreno_rbmemptrs { volatile uint32_t rptr; volatile uint32_t wptr; @@ -107,6 +115,8 @@ struct adreno_gpu { * code (a3xx_gpu.c) and stored in this common location. */ const unsigned int *reg_offsets; + + uint32_t quirks; }; #define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base) @@ -117,6 +127,7 @@ struct adreno_platform_config { #ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING struct msm_bus_scale_pdata *bus_scale_table; #endif + uint32_t quirks; }; #define ADRENO_IDLE_TIMEOUT msecs_to_jiffies(1000) @@ -180,6 +191,11 @@ static inline int adreno_is_a430(struct adreno_gpu *gpu) return gpu->revn == 430; } +static inline int adreno_is_a530(struct adreno_gpu *gpu) +{ + return gpu->revn == 530; +} + int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value); int adreno_hw_init(struct msm_gpu *gpu); uint32_t adreno_last_fence(struct msm_gpu *gpu); @@ -299,6 +315,7 @@ static inline void adreno_gpu_write(struct adreno_gpu *gpu, struct msm_gpu *a3xx_gpu_init(struct drm_device *dev); struct msm_gpu *a4xx_gpu_init(struct drm_device *dev); +struct msm_gpu *a5xx_gpu_init(struct drm_device *dev); static inline void adreno_gpu_write64(struct adreno_gpu *gpu, enum adreno_regs lo, enum adreno_regs hi, u64 data) @@ -307,4 +324,28 @@ static inline void adreno_gpu_write64(struct adreno_gpu *gpu, adreno_gpu_write(gpu, hi, upper_32_bits(data)); } +/* + * Given a register and a count, return a value to program into + * REG_CP_PROTECT_REG(n) - this will block both reads and writes for _len + * registers starting at _reg. + * + * The register base needs to be a multiple of the length. If it is not, the + * hardware will quietly mask off the bits for you and shift the size. For + * example, if you intend the protection to start at 0x07 for a length of 4 + * (0x07-0x0A) the hardware will actually protect (0x04-0x07) which might + * expose registers you intended to protect! + */ +#define ADRENO_PROTECT_RW(_reg, _len) \ + ((1 << 30) | (1 << 29) | \ + ((ilog2((_len)) & 0x1F) << 24) | (((_reg) << 2) & 0xFFFFF)) + +/* + * Same as above, but allow reads over the range. For areas of mixed use (such + * as performance counters) this allows us to protect a much larger range with a + * single register + */ +#define ADRENO_PROTECT_RDONLY(_reg, _len) \ + ((1 << 29) \ + ((ilog2((_len)) & 0x1F) << 24) | (((_reg) << 2) & 0xFFFFF)) + #endif /* __ADRENO_GPU_H__ */ diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 3d6e3b7a13e2..b28527a65d09 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -96,6 +96,10 @@ static int enable_clk(struct msm_gpu *gpu) if (gpu->grp_clks[0] && gpu->fast_rate) clk_set_rate(gpu->grp_clks[0], gpu->fast_rate); + /* Set the RBBM timer rate to 19.2Mhz */ + if (gpu->grp_clks[2]) + clk_set_rate(gpu->grp_clks[2], 19200000); + for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i >= 0; i--) if (gpu->grp_clks[i]) clk_prepare(gpu->grp_clks[i]); @@ -122,6 +126,9 @@ static int disable_clk(struct msm_gpu *gpu) if (gpu->grp_clks[0] && gpu->slow_rate) clk_set_rate(gpu->grp_clks[0], gpu->slow_rate); + if (gpu->grp_clks[2]) + clk_set_rate(gpu->grp_clks[2], 0); + return 0; } @@ -553,8 +560,8 @@ static irqreturn_t irq_handler(int irq, void *data) } static const char *clk_names[] = { - "core_clk", "iface_clk", "mem_clk", "mem_iface_clk", - "alt_mem_iface_clk", + "core_clk", "iface_clk", "rbbmtimer_clk", "mem_clk", + "mem_iface_clk", "alt_mem_iface_clk", }; int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, @@ -647,7 +654,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, iommu = iommu_domain_alloc(&platform_bus_type); if (iommu) { /* TODO 32b vs 64b address space.. */ - iommu->geometry.aperture_start = 0x1000; + iommu->geometry.aperture_start = SZ_16M; iommu->geometry.aperture_end = 0xffffffff; dev_info(drm->dev, "%s: using IOMMU\n", name); diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index 10252d07bb14..c4c39d3272c7 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -103,7 +103,7 @@ struct msm_gpu { /* Power Control: */ struct regulator *gpu_reg, *gpu_cx; - struct clk *ebi1_clk, *grp_clks[5]; + struct clk *ebi1_clk, *grp_clks[6]; uint32_t fast_rate, slow_rate, bus_freq; #ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING -- cgit v1.2.3