summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2019-08-02 22:21:37 +0100
committerChris Wilson <chris@chris-wilson.co.uk>2019-08-02 23:39:48 +0100
commit515b8b7e935ef3f59c4efda04a3b05353ed6fbb7 (patch)
tree5e857d58e9b11b258f7f7079ef7f4273468be7eb /drivers
parent1aff1903d0ff53f055088a77948ac8d8224d42db (diff)
drm/i915: Flush the freed object list on file close
As we increase the number of RCU objects, it becomes easier for us to have several hundred thousand objects in the deferred RCU free queues. An example is gem_ctx_create/files which continually creates active contexts, which are not immediately freed upon close as they are kept alive by outstanding requests. This lack of backpressure allows the context objects to persist until they overwhelm and starve the system. We can increase our backpressure by flushing the freed object queue upon closing the device fd which should then not impact other clients. Testcase: igt/gem_ctx_create/*files Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Matthew Auld <matthew.auld@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190802212137.22207-2-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c38
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c3
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c1
4 files changed, 7 insertions, 36 deletions
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 4ea97fca9c35..19d55115747c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -211,48 +211,18 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
void i915_gem_flush_free_objects(struct drm_i915_private *i915)
{
- struct llist_node *freed;
-
- /* Free the oldest, most stale object to keep the free_list short */
- freed = NULL;
- if (!llist_empty(&i915->mm.free_list)) { /* quick test for hotpath */
- /* Only one consumer of llist_del_first() allowed */
- spin_lock(&i915->mm.free_lock);
- freed = llist_del_first(&i915->mm.free_list);
- spin_unlock(&i915->mm.free_lock);
- }
- if (unlikely(freed)) {
- freed->next = NULL;
+ struct llist_node *freed = llist_del_all(&i915->mm.free_list);
+
+ if (unlikely(freed))
__i915_gem_free_objects(i915, freed);
- }
}
static void __i915_gem_free_work(struct work_struct *work)
{
struct drm_i915_private *i915 =
container_of(work, struct drm_i915_private, mm.free_work);
- struct llist_node *freed;
-
- /*
- * All file-owned VMA should have been released by this point through
- * i915_gem_close_object(), or earlier by i915_gem_context_close().
- * However, the object may also be bound into the global GTT (e.g.
- * older GPUs without per-process support, or for direct access through
- * the GTT either for the user or for scanout). Those VMA still need to
- * unbound now.
- */
-
- spin_lock(&i915->mm.free_lock);
- while ((freed = llist_del_all(&i915->mm.free_list))) {
- spin_unlock(&i915->mm.free_lock);
- __i915_gem_free_objects(i915, freed);
- if (need_resched())
- return;
-
- spin_lock(&i915->mm.free_lock);
- }
- spin_unlock(&i915->mm.free_lock);
+ i915_gem_flush_free_objects(i915);
}
void i915_gem_free_object(struct drm_gem_object *gem_obj)
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 21f1b29d06a2..b9c6ae09d61f 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -2052,6 +2052,9 @@ static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
mutex_unlock(&dev->struct_mutex);
kfree(file_priv);
+
+ /* Catch up with all the deferred frees from "this" client */
+ i915_gem_flush_free_objects(to_i915(dev));
}
static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 98045875ebba..5f3e5c13fbaa 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -773,7 +773,6 @@ struct i915_gem_mm {
*/
struct llist_head free_list;
struct work_struct free_work;
- spinlock_t free_lock;
/**
* Count of objects pending destructions. Used to skip needlessly
* waiting on an RCU barrier if no objects are waiting to be freed.
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index deaca3c2416d..eb34f3e5a74d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1648,7 +1648,6 @@ void i915_gem_init_mmio(struct drm_i915_private *i915)
static void i915_gem_init__mm(struct drm_i915_private *i915)
{
spin_lock_init(&i915->mm.obj_lock);
- spin_lock_init(&i915->mm.free_lock);
init_llist_head(&i915->mm.free_list);