From 6efa4b465cfdaa9be251ba043253e302ddb9976f Mon Sep 17 00:00:00 2001 From: Luben Tuikov Date: Thu, 3 Dec 2020 22:17:19 -0500 Subject: gpu/drm: ring_mirror_list --> pending_list MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rename "ring_mirror_list" to "pending_list", to describe what something is, not what it does, how it's used, or how the hardware implements it. This also abstracts the actual hardware implementation, i.e. how the low-level driver communicates with the device it drives, ring, CAM, etc., shouldn't be exposed to DRM. The pending_list keeps jobs submitted, which are out of our control. Usually this means they are pending execution status in hardware, but the latter definition is a more general (inclusive) definition. Signed-off-by: Luben Tuikov Acked-by: Christian König Link: https://patchwork.freedesktop.org/patch/405573/ Cc: Alexander Deucher Cc: Andrey Grodzovsky Cc: Christian König Cc: Daniel Vetter Signed-off-by: Christian König --- include/drm/gpu_scheduler.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'include/drm') diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index 3add0072bd37..2e0c368e19f6 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -174,7 +174,7 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f); * @sched: the scheduler instance on which this job is scheduled. * @s_fence: contains the fences for the scheduling of job. * @finish_cb: the callback for the finished fence. - * @node: used to append this struct to the @drm_gpu_scheduler.ring_mirror_list. + * @node: used to append this struct to the @drm_gpu_scheduler.pending_list. * @id: a unique id assigned to each job scheduled on the scheduler. * @karma: increment on every hang caused by this job. If this exceeds the hang * limit of the scheduler then the job is marked guilty and will not @@ -203,7 +203,7 @@ struct drm_sched_job { static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job, int threshold) { - return (s_job && atomic_inc_return(&s_job->karma) > threshold); + return s_job && atomic_inc_return(&s_job->karma) > threshold; } /** @@ -260,8 +260,8 @@ struct drm_sched_backend_ops { * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the * timeout interval is over. * @thread: the kthread on which the scheduler which run. - * @ring_mirror_list: the list of jobs which are currently in the job queue. - * @job_list_lock: lock to protect the ring_mirror_list. + * @pending_list: the list of jobs which are currently in the job queue. + * @job_list_lock: lock to protect the pending_list. * @hang_limit: once the hangs by a job crosses this limit then it is marked * guilty and it will be considered for scheduling further. * @score: score to help loadbalancer pick a idle sched @@ -282,7 +282,7 @@ struct drm_gpu_scheduler { atomic64_t job_id_count; struct delayed_work work_tdr; struct task_struct *thread; - struct list_head ring_mirror_list; + struct list_head pending_list; spinlock_t job_list_lock; int hang_limit; atomic_t score; -- cgit v1.2.3