diff options
-rw-r--r-- | drivers/gpu/drm/msm/msm_drv.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/msm_drv.h | 13 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/msm_gem.c | 44 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/msm_gem.h | 47 |
4 files changed, 90 insertions, 16 deletions
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 09a968c4d27e..eefe6ebfc985 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -452,7 +452,7 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv) INIT_LIST_HEAD(&priv->inactive_willneed); INIT_LIST_HEAD(&priv->inactive_dontneed); - INIT_LIST_HEAD(&priv->inactive_purged); + INIT_LIST_HEAD(&priv->inactive_unpinned); mutex_init(&priv->mm_lock); /* Teach lockdep about lock ordering wrt. shrinker: */ diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 6a42cdf4cf7e..2668941df529 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -182,11 +182,15 @@ struct msm_drm_private { struct mutex obj_lock; /** - * Lists of inactive GEM objects. Every bo is either in one of the + * LRUs of inactive GEM objects. Every bo is either in one of the * inactive lists (depending on whether or not it is shrinkable) or * gpu->active_list (for the gpu it is active on[1]), or transiently * on a temporary list as the shrinker is running. * + * Note that inactive_willneed also contains pinned and vmap'd bos, + * but the number of pinned-but-not-active objects is small (scanout + * buffers, ringbuffer, etc). + * * These lists are protected by mm_lock (which should be acquired * before per GEM object lock). One should *not* hold mm_lock in * get_pages()/vmap()/etc paths, as they can trigger the shrinker. @@ -194,10 +198,11 @@ struct msm_drm_private { * [1] if someone ever added support for the old 2d cores, there could be * more than one gpu object */ - struct list_head inactive_willneed; /* inactive + !shrinkable */ - struct list_head inactive_dontneed; /* inactive + shrinkable */ - struct list_head inactive_purged; /* inactive + purged */ + struct list_head inactive_willneed; /* inactive + potentially unpin/evictable */ + struct list_head inactive_dontneed; /* inactive + shrinkable */ + struct list_head inactive_unpinned; /* inactive + purged or unpinned */ long shrinkable_count; /* write access under mm_lock */ + long evictable_count; /* write access under mm_lock */ struct mutex mm_lock; struct workqueue_struct *wq; diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 124306bec6f8..58051bc54a9c 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -130,6 +130,9 @@ static struct page **get_pages(struct drm_gem_object *obj) */ if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) sync_for_device(msm_obj); + + GEM_WARN_ON(msm_obj->active_count); + update_inactive(msm_obj); } return msm_obj->pages; @@ -428,7 +431,7 @@ static int msm_gem_pin_iova(struct drm_gem_object *obj, struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_vma *vma; struct page **pages; - int prot = IOMMU_READ; + int ret, prot = IOMMU_READ; if (!(msm_obj->flags & MSM_BO_GPU_READONLY)) prot |= IOMMU_WRITE; @@ -449,8 +452,13 @@ static int msm_gem_pin_iova(struct drm_gem_object *obj, if (IS_ERR(pages)) return PTR_ERR(pages); - return msm_gem_map_vma(aspace, vma, prot, + ret = msm_gem_map_vma(aspace, vma, prot, msm_obj->sgt, obj->size >> PAGE_SHIFT); + + if (!ret) + msm_obj->pin_count++; + + return ret; } static int get_and_pin_iova_range_locked(struct drm_gem_object *obj, @@ -542,14 +550,21 @@ uint64_t msm_gem_iova(struct drm_gem_object *obj, void msm_gem_unpin_iova_locked(struct drm_gem_object *obj, struct msm_gem_address_space *aspace) { + struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_vma *vma; GEM_WARN_ON(!msm_gem_is_locked(obj)); vma = lookup_vma(obj, aspace); - if (!GEM_WARN_ON(!vma)) + if (!GEM_WARN_ON(!vma)) { msm_gem_unmap_vma(aspace, vma); + + msm_obj->pin_count--; + GEM_WARN_ON(msm_obj->pin_count < 0); + + update_inactive(msm_obj); + } } /* @@ -800,9 +815,12 @@ void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu) GEM_WARN_ON(!msm_gem_is_locked(obj)); GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED); GEM_WARN_ON(msm_obj->dontneed); + GEM_WARN_ON(!msm_obj->sgt); if (msm_obj->active_count++ == 0) { mutex_lock(&priv->mm_lock); + if (msm_obj->evictable) + mark_unevictable(msm_obj); list_del(&msm_obj->mm_list); list_add_tail(&msm_obj->mm_list, &gpu->active_list); mutex_unlock(&priv->mm_lock); @@ -825,21 +843,28 @@ static void update_inactive(struct msm_gem_object *msm_obj) { struct msm_drm_private *priv = msm_obj->base.dev->dev_private; + GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base)); + + if (msm_obj->active_count != 0) + return; + mutex_lock(&priv->mm_lock); - GEM_WARN_ON(msm_obj->active_count != 0); if (msm_obj->dontneed) mark_unpurgeable(msm_obj); + if (msm_obj->evictable) + mark_unevictable(msm_obj); list_del(&msm_obj->mm_list); - if (msm_obj->madv == MSM_MADV_WILLNEED) { + if ((msm_obj->madv == MSM_MADV_WILLNEED) && msm_obj->sgt) { list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed); + mark_evictable(msm_obj); } else if (msm_obj->madv == MSM_MADV_DONTNEED) { list_add_tail(&msm_obj->mm_list, &priv->inactive_dontneed); mark_purgeable(msm_obj); } else { - GEM_WARN_ON(msm_obj->madv != __MSM_MADV_PURGED); - list_add_tail(&msm_obj->mm_list, &priv->inactive_purged); + GEM_WARN_ON((msm_obj->madv != __MSM_MADV_PURGED) && msm_obj->sgt); + list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned); } mutex_unlock(&priv->mm_lock); @@ -1201,8 +1226,7 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev, } mutex_lock(&priv->mm_lock); - /* Initially obj is idle, obj->madv == WILLNEED: */ - list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed); + list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned); mutex_unlock(&priv->mm_lock); mutex_lock(&priv->obj_lock); @@ -1276,7 +1300,7 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev, msm_gem_unlock(obj); mutex_lock(&priv->mm_lock); - list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed); + list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned); mutex_unlock(&priv->mm_lock); mutex_lock(&priv->obj_lock); diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index 4ac0062312d4..a6480d2c81b2 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -61,6 +61,11 @@ struct msm_gem_object { bool dontneed : 1; /** + * Is object evictable (ie. counted in priv->evictable_count)? + */ + bool evictable : 1; + + /** * count of active vmap'ing */ uint8_t vmap_count; @@ -74,7 +79,7 @@ struct msm_gem_object { /** * An object is either: * inactive - on priv->inactive_dontneed or priv->inactive_willneed - * (depending on purgability status) + * (depending on purgeability status) * active - on one one of the gpu's active_list.. well, at * least for now we don't have (I don't think) hw sync between * 2d and 3d one devices which have both, meaning we need to @@ -103,6 +108,7 @@ struct msm_gem_object { char name[32]; /* Identifier to print for the debugfs files */ int active_count; + int pin_count; }; #define to_msm_bo(x) container_of(x, struct msm_gem_object, base) @@ -263,7 +269,46 @@ static inline void mark_unpurgeable(struct msm_gem_object *msm_obj) msm_obj->dontneed = false; } +static inline bool is_unevictable(struct msm_gem_object *msm_obj) +{ + return is_unpurgeable(msm_obj) || msm_obj->pin_count || msm_obj->vaddr; +} + +static inline void mark_evictable(struct msm_gem_object *msm_obj) +{ + struct msm_drm_private *priv = msm_obj->base.dev->dev_private; + + WARN_ON(!mutex_is_locked(&priv->mm_lock)); + + if (is_unevictable(msm_obj)) + return; + + if (WARN_ON(msm_obj->evictable)) + return; + + priv->evictable_count += msm_obj->base.size >> PAGE_SHIFT; + msm_obj->evictable = true; +} + +static inline void mark_unevictable(struct msm_gem_object *msm_obj) +{ + struct msm_drm_private *priv = msm_obj->base.dev->dev_private; + + WARN_ON(!mutex_is_locked(&priv->mm_lock)); + + if (is_unevictable(msm_obj)) + return; + + if (WARN_ON(!msm_obj->evictable)) + return; + + priv->evictable_count -= msm_obj->base.size >> PAGE_SHIFT; + WARN_ON(priv->evictable_count < 0); + msm_obj->evictable = false; +} + void msm_gem_purge(struct drm_gem_object *obj); +void msm_gem_evict(struct drm_gem_object *obj); void msm_gem_vunmap(struct drm_gem_object *obj); /* Created per submit-ioctl, to track bo's and cmdstream bufs, etc, |