summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2020-03-13 09:09:11 +1000
committerDave Airlie <airlied@redhat.com>2020-03-13 09:09:11 +1000
commit69ddce0970d9d1de63bed9c24eefa0814db29a5a (patch)
tree2e64e14ab5ad2448cb60dcc77a34966dfaa157ee /drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
parent9e12da086e5e38f7b4f8d6e02a82447f4165fbee (diff)
parent5d11e37c021f925496a3a3c019cadf69435f65ed (diff)
Merge tag 'amd-drm-next-5.7-2020-03-10' of git://people.freedesktop.org/~agd5f/linux into drm-next
amd-drm-next-5.7-2020-03-10: amdgpu: - SR-IOV fixes - Fix up fallout from drm load/unload callback removal - Navi, renoir power management watermark fixes - Refactor smu parameter handling - Display FEC fixes - Display DCC fixes - HDCP fixes - Add support for USB-C PD firmware updates - Pollock detection fix - Rework compute ring priority handling - RAS fixes - Misc cleanups amdkfd: - Consolidate more gfx config details in amdgpu - Consolidate bo alloc flags - Improve code comments - SDMA MQD fixes - Misc cleanups gpu scheduler: - Add suport for modifying the sched list uapi: - Clarify comments about GEM_CREATE flags that are not used by userspace. The kernel driver has always prevented userspace from using these. They are only used internally in the kernel driver. Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexdeucher@gmail.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200310212748.4519-1-alexander.deucher@amd.com
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c89
1 files changed, 78 insertions, 11 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 94a6c42f29ea..fa575bdc03c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -61,12 +61,24 @@ static int amdgpu_ctx_priority_permit(struct drm_file *filp,
return -EACCES;
}
+static enum gfx_pipe_priority amdgpu_ctx_sched_prio_to_compute_prio(enum drm_sched_priority prio)
+{
+ switch (prio) {
+ case DRM_SCHED_PRIORITY_HIGH_HW:
+ case DRM_SCHED_PRIORITY_KERNEL:
+ return AMDGPU_GFX_PIPE_PRIO_HIGH;
+ default:
+ return AMDGPU_GFX_PIPE_PRIO_NORMAL;
+ }
+}
+
static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, const u32 hw_ip, const u32 ring)
{
struct amdgpu_device *adev = ctx->adev;
struct amdgpu_ctx_entity *entity;
struct drm_gpu_scheduler **scheds = NULL, *sched = NULL;
unsigned num_scheds = 0;
+ enum gfx_pipe_priority hw_prio;
enum drm_sched_priority priority;
int r;
@@ -85,8 +97,9 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, const u32 hw_ip, const
num_scheds = 1;
break;
case AMDGPU_HW_IP_COMPUTE:
- scheds = adev->gfx.compute_sched;
- num_scheds = adev->gfx.num_compute_sched;
+ hw_prio = amdgpu_ctx_sched_prio_to_compute_prio(priority);
+ scheds = adev->gfx.compute_prio_sched[hw_prio];
+ num_scheds = adev->gfx.num_compute_sched[hw_prio];
break;
case AMDGPU_HW_IP_DMA:
scheds = adev->sdma.sdma_sched;
@@ -502,6 +515,29 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
return fence;
}
+static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx,
+ struct amdgpu_ctx_entity *aentity,
+ int hw_ip,
+ enum drm_sched_priority priority)
+{
+ struct amdgpu_device *adev = ctx->adev;
+ enum gfx_pipe_priority hw_prio;
+ struct drm_gpu_scheduler **scheds = NULL;
+ unsigned num_scheds;
+
+ /* set sw priority */
+ drm_sched_entity_set_priority(&aentity->entity, priority);
+
+ /* set hw priority */
+ if (hw_ip == AMDGPU_HW_IP_COMPUTE) {
+ hw_prio = amdgpu_ctx_sched_prio_to_compute_prio(priority);
+ scheds = adev->gfx.compute_prio_sched[hw_prio];
+ num_scheds = adev->gfx.num_compute_sched[hw_prio];
+ drm_sched_entity_modify_sched(&aentity->entity, scheds,
+ num_scheds);
+ }
+}
+
void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
enum drm_sched_priority priority)
{
@@ -514,13 +550,11 @@ void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
ctx->init_priority : ctx->override_priority;
for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
- struct drm_sched_entity *entity;
-
if (!ctx->entities[i][j])
continue;
- entity = &ctx->entities[i][j]->entity;
- drm_sched_entity_set_priority(entity, ctx_prio);
+ amdgpu_ctx_set_entity_priority(ctx, ctx->entities[i][j],
+ i, ctx_prio);
}
}
}
@@ -628,20 +662,53 @@ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
mutex_destroy(&mgr->lock);
}
+
+static void amdgpu_ctx_init_compute_sched(struct amdgpu_device *adev)
+{
+ int num_compute_sched_normal = 0;
+ int num_compute_sched_high = AMDGPU_MAX_COMPUTE_RINGS - 1;
+ int i;
+
+ /* use one drm sched array, gfx.compute_sched to store both high and
+ * normal priority drm compute schedulers */
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ if (!adev->gfx.compute_ring[i].has_high_prio)
+ adev->gfx.compute_sched[num_compute_sched_normal++] =
+ &adev->gfx.compute_ring[i].sched;
+ else
+ adev->gfx.compute_sched[num_compute_sched_high--] =
+ &adev->gfx.compute_ring[i].sched;
+ }
+
+ /* compute ring only has two priority for now */
+ i = AMDGPU_GFX_PIPE_PRIO_NORMAL;
+ adev->gfx.compute_prio_sched[i] = &adev->gfx.compute_sched[0];
+ adev->gfx.num_compute_sched[i] = num_compute_sched_normal;
+
+ i = AMDGPU_GFX_PIPE_PRIO_HIGH;
+ if (num_compute_sched_high == (AMDGPU_MAX_COMPUTE_RINGS - 1)) {
+ /* When compute has no high priority rings then use */
+ /* normal priority sched array */
+ adev->gfx.compute_prio_sched[i] = &adev->gfx.compute_sched[0];
+ adev->gfx.num_compute_sched[i] = num_compute_sched_normal;
+ } else {
+ adev->gfx.compute_prio_sched[i] =
+ &adev->gfx.compute_sched[num_compute_sched_high - 1];
+ adev->gfx.num_compute_sched[i] =
+ adev->gfx.num_compute_rings - num_compute_sched_normal;
+ }
+}
+
void amdgpu_ctx_init_sched(struct amdgpu_device *adev)
{
int i, j;
+ amdgpu_ctx_init_compute_sched(adev);
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
adev->gfx.gfx_sched[i] = &adev->gfx.gfx_ring[i].sched;
adev->gfx.num_gfx_sched++;
}
- for (i = 0; i < adev->gfx.num_compute_rings; i++) {
- adev->gfx.compute_sched[i] = &adev->gfx.compute_ring[i].sched;
- adev->gfx.num_compute_sched++;
- }
-
for (i = 0; i < adev->sdma.num_instances; i++) {
adev->sdma.sdma_sched[i] = &adev->sdma.instance[i].ring.sched;
adev->sdma.num_sdma_sched++;