diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-04-10 12:38:28 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-04-10 12:38:28 -0700 |
commit | 21c5b3c6d7579944d21ff268f241d6bec425a9b4 (patch) | |
tree | 62538b9dd6d29ff6681fdefb3b5a41157e623ff5 /drivers/gpu/drm | |
parent | 4aafdf688360bacd4b48c87e9a3d0c208baf31c4 (diff) | |
parent | 74bd4f0c921cc9cf5f99ba4129dafe35496de6f3 (diff) |
Merge tag 'drm-next-2020-04-10' of git://anongit.freedesktop.org/drm/drm
Pull more drm fixes from Dave Airlie:
"As expected, more fixes did turn up in the latter part of the week.
The drm_local_map build regression fix is here, along with temporary
disabling of the hugepage work due to some amdgpu related crashes.
Otherwise it's just a bunch of i915, and amdgpu fixes.
legacy:
- fix drm_local_map.offset type
ttm:
- temporarily disable hugepages to debug amdgpu problems.
prime:
- fix sg extraction
amdgpu:
- Various Renoir fixes
- Fix gfx clockgating sequence on gfx10
- RAS fixes
- Avoid MST property creation after registration
- Various cursor/viewport fixes
- Fix a confusing log message about optional firmwares
i915:
- Flush all the reloc_gpu batch (Chris)
- Ignore readonly failures when updating relocs (Chris)
- Fill all the unused space in the GGTT (Chris)
- Return the right vswing table (Jose)
- Don't enable DDI IO power on a TypeC port in TBT mode for ICL+ (Imre)
analogix_dp:
- probe fix
virtio:
- oob fix in object create"
* tag 'drm-next-2020-04-10' of git://anongit.freedesktop.org/drm/drm: (34 commits)
drm/ttm: Temporarily disable the huge_fault() callback
drm/bridge: analogix_dp: Split bind() into probe() and real bind()
drm/legacy: Fix type for drm_local_map.offset
drm/amdgpu/display: fix warning when compiling without debugfs
drm/amdgpu: unify fw_write_wait for new gfx9 asics
drm/amd/powerplay: error out on forcing clock setting not supported
drm/amdgpu: fix gfx hang during suspend with video playback (v2)
drm/amd/display: Check for null fclk voltage when parsing clock table
drm/amd/display: Acknowledge wm_optimized_required
drm/amd/display: Make cursor source translation adjustment optional
drm/amd/display: Calculate scaling ratios on every medium/full update
drm/amd/display: Program viewport when source pos changes for DCN20 hw seq
drm/amd/display: Fix incorrect cursor pos on scaled primary plane
drm/amd/display: change default pipe_split policy for DCN1
drm/amd/display: Translate cursor position by source rect
drm/amd/display: Update stream adjust in dc_stream_adjust_vmin_vmax
drm/amd/display: Avoid create MST prop after registration
drm/amdgpu/psp: dont warn on missing optional TA's
drm/amdgpu: update RAS related dmesg print
drm/amdgpu: resolve mGPU RAS query instability
...
Diffstat (limited to 'drivers/gpu/drm')
31 files changed, 350 insertions, 189 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index faa3e7102156..559dc24ef436 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2340,8 +2340,6 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev) { int i, r; - amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); - amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); for (i = adev->num_ip_blocks - 1; i >= 0; i--) { if (!adev->ip_blocks[i].status.valid) @@ -3356,6 +3354,9 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) } } + amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); + amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); + amdgpu_amdkfd_suspend(adev, !fbcon); amdgpu_ras_suspend(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index f197f1be0969..abe94a55ecad 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -89,7 +89,8 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev) adev->pm.ac_power = true; else adev->pm.ac_power = false; - if (adev->powerplay.pp_funcs->enable_bapm) + if (adev->powerplay.pp_funcs && + adev->powerplay.pp_funcs->enable_bapm) amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power); mutex_unlock(&adev->pm.mutex); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index be50867ea644..deaa26808841 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -818,7 +818,7 @@ static int psp_ras_initialize(struct psp_context *psp) if (!psp->adev->psp.ta_ras_ucode_size || !psp->adev->psp.ta_ras_start_addr) { - dev_warn(psp->adev->dev, "RAS: ras ta ucode is not available\n"); + dev_info(psp->adev->dev, "RAS: optional ras ta ucode is not available\n"); return 0; } @@ -902,7 +902,7 @@ static int psp_hdcp_initialize(struct psp_context *psp) if (!psp->adev->psp.ta_hdcp_ucode_size || !psp->adev->psp.ta_hdcp_start_addr) { - dev_warn(psp->adev->dev, "HDCP: hdcp ta ucode is not available\n"); + dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n"); return 0; } @@ -1048,7 +1048,7 @@ static int psp_dtm_initialize(struct psp_context *psp) if (!psp->adev->psp.ta_dtm_ucode_size || !psp->adev->psp.ta_dtm_start_addr) { - dev_warn(psp->adev->dev, "DTM: dtm ta ucode is not available\n"); + dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n"); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 3c32a94d2424..ab379b44679c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -1424,12 +1424,22 @@ static void amdgpu_ras_do_recovery(struct work_struct *work) { struct amdgpu_ras *ras = container_of(work, struct amdgpu_ras, recovery_work); + struct amdgpu_device *remote_adev = NULL; + struct amdgpu_device *adev = ras->adev; + struct list_head device_list, *device_list_handle = NULL; + struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, false); + + /* Build list of devices to query RAS related errors */ + if (hive && adev->gmc.xgmi.num_physical_nodes > 1) { + device_list_handle = &hive->device_list; + } else { + list_add_tail(&adev->gmc.xgmi.head, &device_list); + device_list_handle = &device_list; + } - /* - * Query and print non zero error counter per IP block for - * awareness before recovering GPU. - */ - amdgpu_ras_log_on_err_counter(ras->adev); + list_for_each_entry(remote_adev, device_list_handle, gmc.xgmi.head) { + amdgpu_ras_log_on_err_counter(remote_adev); + } if (amdgpu_device_should_recover_gpu(ras->adev)) amdgpu_device_gpu_recover(ras->adev, 0); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index f6e3f59efa2f..d78059fd2c72 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -279,7 +279,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2_nv12[] = #define DEFAULT_SH_MEM_CONFIG \ ((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \ - (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \ + (SH_MEM_ALIGNMENT_MODE_DWORD << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \ (SH_MEM_RETRY_MODE_ALL << SH_MEM_CONFIG__RETRY_MODE__SHIFT) | \ (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT)) @@ -4104,6 +4104,12 @@ static void gfx_v10_0_update_medium_grain_clock_gating(struct amdgpu_device *ade /* It is disabled by HW by default */ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) { + /* 0 - Disable some blocks' MGCG */ + WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000); + WREG32_SOC15(GC, 0, mmCGTT_WD_CLK_CTRL, 0xff000000); + WREG32_SOC15(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xff000000); + WREG32_SOC15(GC, 0, mmCGTT_IA_CLK_CTRL, 0xff000000); + /* 1 - RLC_CGTT_MGCG_OVERRIDE */ def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | @@ -4143,19 +4149,20 @@ static void gfx_v10_0_update_medium_grain_clock_gating(struct amdgpu_device *ade if (def != data) WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data); - /* 2 - disable MGLS in RLC */ + /* 2 - disable MGLS in CP */ + data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL); + if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) { + data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK; + WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data); + } + + /* 3 - disable MGLS in RLC */ data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL); if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) { data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK; WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data); } - /* 3 - disable MGLS in CP */ - data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL); - if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) { - data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK; - WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data); - } } } @@ -4266,7 +4273,7 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev, /* === CGCG /CGLS for GFX 3D Only === */ gfx_v10_0_update_3d_clock_gating(adev, enable); /* === MGCG + MGLS === */ - gfx_v10_0_update_medium_grain_clock_gating(adev, enable); + /* gfx_v10_0_update_medium_grain_clock_gating(adev, enable); */ } if (adev->cg_flags & diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 608ffe3b684e..e6b113ed2f40 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1217,6 +1217,8 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev) adev->gfx.mec_fw_write_wait = true; break; default: + adev->gfx.me_fw_write_wait = true; + adev->gfx.mec_fw_write_wait = true; break; } } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c index cceb46faf212..dce945ef21a5 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c @@ -710,14 +710,16 @@ static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev, sec_count = REG_GET_FIELD(data, VML2_MEM_ECC_CNTL, SEC_COUNT); if (sec_count) { - DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i, + dev_info(adev->dev, + "Instance[%d]: SubBlock %s, SEC %d\n", i, vml2_mems[i], sec_count); err_data->ce_count += sec_count; } ded_count = REG_GET_FIELD(data, VML2_MEM_ECC_CNTL, DED_COUNT); if (ded_count) { - DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i, + dev_info(adev->dev, + "Instance[%d]: SubBlock %s, DED %d\n", i, vml2_mems[i], ded_count); err_data->ue_count += ded_count; } diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c index 0d413fabd015..c0e3efcb09bf 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c @@ -1539,8 +1539,11 @@ static const struct soc15_reg_entry mmhub_v9_4_edc_cnt_regs[] = { { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3), 0, 0, 0 }, }; -static int mmhub_v9_4_get_ras_error_count(const struct soc15_reg_entry *reg, - uint32_t value, uint32_t *sec_count, uint32_t *ded_count) +static int mmhub_v9_4_get_ras_error_count(struct amdgpu_device *adev, + const struct soc15_reg_entry *reg, + uint32_t value, + uint32_t *sec_count, + uint32_t *ded_count) { uint32_t i; uint32_t sec_cnt, ded_cnt; @@ -1553,7 +1556,7 @@ static int mmhub_v9_4_get_ras_error_count(const struct soc15_reg_entry *reg, mmhub_v9_4_ras_fields[i].sec_count_mask) >> mmhub_v9_4_ras_fields[i].sec_count_shift; if (sec_cnt) { - DRM_INFO("MMHUB SubBlock %s, SEC %d\n", + dev_info(adev->dev, "MMHUB SubBlock %s, SEC %d\n", mmhub_v9_4_ras_fields[i].name, sec_cnt); *sec_count += sec_cnt; @@ -1563,7 +1566,7 @@ static int mmhub_v9_4_get_ras_error_count(const struct soc15_reg_entry *reg, mmhub_v9_4_ras_fields[i].ded_count_mask) >> mmhub_v9_4_ras_fields[i].ded_count_shift; if (ded_cnt) { - DRM_INFO("MMHUB SubBlock %s, DED %d\n", + dev_info(adev->dev, "MMHUB SubBlock %s, DED %d\n", mmhub_v9_4_ras_fields[i].name, ded_cnt); *ded_count += ded_cnt; @@ -1588,7 +1591,7 @@ static void mmhub_v9_4_query_ras_error_count(struct amdgpu_device *adev, reg_value = RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v9_4_edc_cnt_regs[i])); if (reg_value) - mmhub_v9_4_get_ras_error_count(&mmhub_v9_4_edc_cnt_regs[i], + mmhub_v9_4_get_ras_error_count(adev, &mmhub_v9_4_edc_cnt_regs[i], reg_value, &sec_count, &ded_count); } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index bab587ab6e8d..f7c5cdc10a70 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -4723,10 +4723,10 @@ amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector) static int amdgpu_dm_connector_late_register(struct drm_connector *connector) { +#if defined(CONFIG_DEBUG_FS) struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); -#if defined(CONFIG_DEBUG_FS) connector_debugfs_init(amdgpu_dm_connector); #endif @@ -5929,7 +5929,8 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, adev->mode_info.underscan_vborder_property, 0); - drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16); + if (!aconnector->mst_port) + drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16); /* This defaults to the max in the range, but we want 8bpc for non-edp. */ aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8; @@ -5948,8 +5949,9 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, &aconnector->base.base, dm->ddev->mode_config.hdr_output_metadata_property, 0); - drm_connector_attach_vrr_capable_property( - &aconnector->base); + if (!aconnector->mst_port) + drm_connector_attach_vrr_capable_property(&aconnector->base); + #ifdef CONFIG_DRM_AMD_DC_HDCP if (adev->dm.hdcp_workqueue) drm_connector_attach_content_protection_property(&aconnector->base, true); @@ -6272,12 +6274,6 @@ static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc, y <= -amdgpu_crtc->max_cursor_height) return 0; - if (crtc->primary->state) { - /* avivo cursor are offset into the total surface */ - x += crtc->primary->state->src_x >> 16; - y += crtc->primary->state->src_y >> 16; - } - if (x < 0) { xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1); x = 0; @@ -6287,6 +6283,7 @@ static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc, y = 0; } position->enable = true; + position->translate_by_source = true; position->x = x; position->y = y; position->x_hotspot = xorigin; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index e8208df420d9..fabbe78d5aef 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -410,6 +410,14 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, drm_connector_attach_encoder(&aconnector->base, &aconnector->mst_encoder->base); + connector->max_bpc_property = master->base.max_bpc_property; + if (connector->max_bpc_property) + drm_connector_attach_max_bpc_property(connector, 8, 16); + + connector->vrr_capable_property = master->base.vrr_capable_property; + if (connector->vrr_capable_property) + drm_connector_attach_vrr_capable_property(connector); + drm_object_attach_property( &connector->base, dev->mode_config.path_property, diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c index ab267ddd4abe..24c5765890fa 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c @@ -643,7 +643,7 @@ static void rn_clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params /* Find lowest DPM, FCLK is filled in reverse order*/ for (i = PP_SMU_NUM_FCLK_DPM_LEVELS - 1; i >= 0; i--) { - if (clock_table->FClocks[i].Freq != 0) { + if (clock_table->FClocks[i].Freq != 0 && clock_table->FClocks[i].Vol != 0) { j = i; break; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index f21bbb295ad3..8489f1e56892 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -283,6 +283,8 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc, int i = 0; bool ret = false; + stream->adjust = *adjust; + for (i = 0; i < MAX_PIPES; i++) { struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; @@ -1859,8 +1861,9 @@ enum surface_update_type dc_check_update_surfaces_for_stream( // Else we fallback to mem compare. } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) { dc->optimized_required = true; - } else if (dc->wm_optimized_required) - dc->optimized_required = true; + } + + dc->optimized_required |= dc->wm_optimized_required; } return type; @@ -2462,7 +2465,7 @@ void dc_commit_updates_for_stream(struct dc *dc, enum surface_update_type update_type; struct dc_state *context; struct dc_context *dc_ctx = dc->ctx; - int i; + int i, j; stream_status = dc_stream_get_status(stream); context = dc->current_state; @@ -2500,6 +2503,17 @@ void dc_commit_updates_for_stream(struct dc *dc, copy_surface_update_to_plane(surface, &srf_updates[i]); + if (update_type >= UPDATE_TYPE_MED) { + for (j = 0; j < dc->res_pool->pipe_count; j++) { + struct pipe_ctx *pipe_ctx = + &context->res_ctx.pipe_ctx[j]; + + if (pipe_ctx->plane_state != surface) + continue; + + resource_build_scaling_params(pipe_ctx); + } + } } copy_stream_update_to_stream(dc, context, stream, stream_update); diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h index 25c50bcab9e9..a8dc3082e3e1 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h @@ -385,6 +385,8 @@ struct dc_cursor_position { */ bool enable; + /* Translate cursor x/y by the source rectangle for each plane. */ + bool translate_by_source; }; struct dc_cursor_mi_param { diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 0976e378659f..c279982947e1 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -2685,6 +2685,23 @@ void dce110_set_cursor_position(struct pipe_ctx *pipe_ctx) .mirror = pipe_ctx->plane_state->horizontal_mirror }; + /** + * If the cursor's source viewport is clipped then we need to + * translate the cursor to appear in the correct position on + * the screen. + * + * This translation isn't affected by scaling so it needs to be + * done *after* we adjust the position for the scale factor. + * + * This is only done by opt-in for now since there are still + * some usecases like tiled display that might enable the + * cursor on both streams while expecting dc to clip it. + */ + if (pos_cpy.translate_by_source) { + pos_cpy.x += pipe_ctx->plane_state->src_rect.x; + pos_cpy.y += pipe_ctx->plane_state->src_rect.y; + } + if (pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE) pos_cpy.enable = false; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 0be010085575..b0357546471b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -3021,12 +3021,50 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx) int x_pos = pos_cpy.x; int y_pos = pos_cpy.y; - // translate cursor from stream space to plane space + /** + * DC cursor is stream space, HW cursor is plane space and drawn + * as part of the framebuffer. + * + * Cursor position can't be negative, but hotspot can be used to + * shift cursor out of the plane bounds. Hotspot must be smaller + * than the cursor size. + */ + + /** + * Translate cursor from stream space to plane space. + * + * If the cursor is scaled then we need to scale the position + * to be in the approximately correct place. We can't do anything + * about the actual size being incorrect, that's a limitation of + * the hardware. + */ x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.width / pipe_ctx->plane_state->dst_rect.width; y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height / pipe_ctx->plane_state->dst_rect.height; + /** + * If the cursor's source viewport is clipped then we need to + * translate the cursor to appear in the correct position on + * the screen. + * + * This translation isn't affected by scaling so it needs to be + * done *after* we adjust the position for the scale factor. + * + * This is only done by opt-in for now since there are still + * some usecases like tiled display that might enable the + * cursor on both streams while expecting dc to clip it. + */ + if (pos_cpy.translate_by_source) { + x_pos += pipe_ctx->plane_state->src_rect.x; + y_pos += pipe_ctx->plane_state->src_rect.y; + } + + /** + * If the position is negative then we need to add to the hotspot + * to shift the cursor outside the plane. + */ + if (x_pos < 0) { pos_cpy.x_hotspot -= x_pos; x_pos = 0; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index 8b7122249ddc..07265ca7d28c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -585,7 +585,7 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_pplib_clock_request = false, .disable_pplib_wm_range = false, .pplib_wm_report_mode = WM_REPORT_DEFAULT, - .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP, + .pipe_split_policy = MPC_SPLIT_DYNAMIC, .force_single_disp_pipe_split = true, .disable_dcc = DCC_ENABLE, .voltage_align_fclk = true, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 233318260da4..22f421e82733 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -1373,6 +1373,7 @@ static void dcn20_update_dchubp_dpp( } if (pipe_ctx->update_flags.bits.viewport || + (context == dc->current_state && plane_state->update_flags.bits.position_change) || (context == dc->current_state && plane_state->update_flags.bits.scaling_change) || (context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) { diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h index 8a87d0ed90ae..2359e88d6029 100644 --- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h +++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h @@ -136,6 +136,7 @@ #define RAVEN2_A0 0x81 #define RAVEN1_F0 0xF0 #define RAVEN_UNKNOWN 0xFF +#define RENOIR_A0 0x91 #ifndef ASICREV_IS_RAVEN #define ASICREV_IS_RAVEN(eChipRev) ((eChipRev >= RAVEN_A0) && eChipRev < RAVEN_UNKNOWN) #endif @@ -171,8 +172,6 @@ enum { #define ASICREV_IS_NAVI10_P(eChipRev) (eChipRev < NV_NAVI12_P_A0) #define ASICREV_IS_NAVI12_P(eChipRev) ((eChipRev >= NV_NAVI12_P_A0) && (eChipRev < NV_NAVI14_M_A0)) #define ASICREV_IS_NAVI14_M(eChipRev) ((eChipRev >= NV_NAVI14_M_A0) && (eChipRev < NV_UNKNOWN)) -#define RENOIR_A0 0x91 -#define DEVICE_ID_RENOIR_1636 0x1636 // Renoir #define ASICREV_IS_RENOIR(eChipRev) ((eChipRev >= RENOIR_A0) && (eChipRev < RAVEN1_F0)) /* @@ -183,6 +182,9 @@ enum { #define DEVICE_ID_TEMASH_9839 0x9839 #define DEVICE_ID_TEMASH_983D 0x983D +/* RENOIR */ +#define DEVICE_ID_RENOIR_1636 0x1636 + /* Asic Family IDs for different asic family. */ #define FAMILY_CI 120 /* Sea Islands: Hawaii (P), Bonaire (M) */ #define FAMILY_KV 125 /* Fusion => Kaveri: Spectre, Spooky; Kabini: Kalindi */ diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index c195575366a3..2a12614a12c2 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -1452,7 +1452,8 @@ static int pp_get_asic_baco_state(void *handle, int *state) if (!hwmgr) return -EINVAL; - if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_state) + if (!(hwmgr->not_vf && amdgpu_dpm) || + !hwmgr->hwmgr_func->get_asic_baco_state) return 0; mutex_lock(&hwmgr->smu_lock); diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c index 5db8c56066ee..1ef0923f7190 100644 --- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c @@ -794,8 +794,21 @@ static int arcturus_force_clk_levels(struct smu_context *smu, struct arcturus_dpm_table *dpm_table; struct arcturus_single_dpm_table *single_dpm_table; uint32_t soft_min_level, soft_max_level; + uint32_t smu_version; int ret = 0; + ret = smu_get_smc_version(smu, NULL, &smu_version); + if (ret) { + pr_err("Failed to get smu version!\n"); + return ret; + } + + if (smu_version >= 0x361200) { + pr_err("Forcing clock level is not supported with " + "54.18 and onwards SMU firmwares\n"); + return -EOPNOTSUPP; + } + soft_min_level = mask ? (ffs(mask) - 1) : 0; soft_max_level = mask ? (fls(mask) - 1) : 0; @@ -1512,6 +1525,38 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu, return 0; } +static int arcturus_set_performance_level(struct smu_context *smu, + enum amd_dpm_forced_level level) +{ + uint32_t smu_version; + int ret; + + ret = smu_get_smc_version(smu, NULL, &smu_version); + if (ret) { + pr_err("Failed to get smu version!\n"); + return ret; + } + + switch (level) { + case AMD_DPM_FORCED_LEVEL_HIGH: + case AMD_DPM_FORCED_LEVEL_LOW: + case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: + case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: + case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: + case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: + if (smu_version >= 0x361200) { + pr_err("Forcing clock level is not supported with " + "54.18 and onwards SMU firmwares\n"); + return -EOPNOTSUPP; + } + break; + default: + break; + } + + return smu_v11_0_set_performance_level(smu, level); +} + static void arcturus_dump_pptable(struct smu_context *smu) { struct smu_table_context *table_context = &smu->smu_table; @@ -2285,7 +2330,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = { .get_profiling_clk_mask = arcturus_get_profiling_clk_mask, .get_power_profile_mode = arcturus_get_power_profile_mode, .set_power_profile_mode = arcturus_set_power_profile_mode, - .set_performance_level = smu_v11_0_set_performance_level, + .set_performance_level = arcturus_set_performance_level, /* debug (internal used) */ .dump_pptable = arcturus_dump_pptable, .get_power_limit = arcturus_get_power_limit, diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c index 7bf52ecba01d..ff73a735b888 100644 --- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c @@ -239,6 +239,7 @@ static int renoir_print_clk_levels(struct smu_context *smu, uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0; DpmClocks_t *clk_table = smu->smu_table.clocks_table; SmuMetrics_t metrics; + bool cur_value_match_level = false; if (!clk_table || clk_type >= SMU_CLK_COUNT) return -EINVAL; @@ -297,8 +298,13 @@ static int renoir_print_clk_levels(struct smu_context *smu, GET_DPM_CUR_FREQ(clk_table, clk_type, i, value); size += sprintf(buf + size, "%d: %uMhz %s\n", i, value, cur_value == value ? "*" : ""); + if (cur_value == value) + cur_value_match_level = true; } + if (!cur_value_match_level) + size += sprintf(buf + size, " %uMhz *\n", cur_value); + return size; } @@ -887,6 +893,17 @@ static int renoir_read_sensor(struct smu_context *smu, return ret; } +static bool renoir_is_dpm_running(struct smu_context *smu) +{ + /* + * Util now, the pmfw hasn't exported the interface of SMU + * feature mask to APU SKU so just force on all the feature + * at early initial stage. + */ + return true; + +} + static const struct pptable_funcs renoir_ppt_funcs = { .get_smu_msg_index = renoir_get_smu_msg_index, .get_smu_clk_index = renoir_get_smu_clk_index, @@ -927,6 +944,7 @@ static const struct pptable_funcs renoir_ppt_funcs = { .mode2_reset = smu_v12_0_mode2_reset, .set_soft_freq_limited_range = smu_v12_0_set_soft_freq_limited_range, .set_driver_table_location = smu_v12_0_set_driver_table_location, + .is_dpm_running = renoir_is_dpm_running, }; void renoir_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.h b/drivers/gpu/drm/amd/powerplay/renoir_ppt.h index 2a390ddd37dd..89cd6da118a3 100644 --- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.h +++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.h @@ -37,7 +37,7 @@ extern void renoir_set_ppt_funcs(struct smu_context *smu); freq = table->SocClocks[dpm_level].Freq; \ break; \ case SMU_MCLK: \ - freq = table->MemClocks[dpm_level].Freq; \ + freq = table->FClocks[dpm_level].Freq; \ break; \ case SMU_DCEFCLK: \ freq = table->DcfClocks[dpm_level].Freq; \ diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c index 9ded2cef57dd..76736fb8ed94 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c @@ -1652,8 +1652,7 @@ static ssize_t analogix_dpaux_transfer(struct drm_dp_aux *aux, } struct analogix_dp_device * -analogix_dp_bind(struct device *dev, struct drm_device *drm_dev, - struct analogix_dp_plat_data *plat_data) +analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data) { struct platform_device *pdev = to_platform_device(dev); struct analogix_dp_device *dp; @@ -1756,22 +1755,30 @@ analogix_dp_bind(struct device *dev, struct drm_device *drm_dev, irq_flags, "analogix-dp", dp); if (ret) { dev_err(&pdev->dev, "failed to request irq\n"); - goto err_disable_pm_runtime; + return ERR_PTR(ret); } disable_irq(dp->irq); + return dp; +} +EXPORT_SYMBOL_GPL(analogix_dp_probe); + +int analogix_dp_bind(struct analogix_dp_device *dp, struct drm_device *drm_dev) +{ + int ret; + dp->drm_dev = drm_dev; dp->encoder = dp->plat_data->encoder; dp->aux.name = "DP-AUX"; dp->aux.transfer = analogix_dpaux_transfer; - dp->aux.dev = &pdev->dev; + dp->aux.dev = dp->dev; ret = drm_dp_aux_register(&dp->aux); if (ret) - return ERR_PTR(ret); + return ret; - pm_runtime_enable(dev); + pm_runtime_enable(dp->dev); ret = analogix_dp_create_bridge(drm_dev, dp); if (ret) { @@ -1779,13 +1786,12 @@ analogix_dp_bind(struct device *dev, struct drm_device *drm_dev, goto err_disable_pm_runtime; } - return dp; + return 0; err_disable_pm_runtime: + pm_runtime_disable(dp->dev); - pm_runtime_disable(dev); - - return ERR_PTR(ret); + return ret; } EXPORT_SYMBOL_GPL(analogix_dp_bind); @@ -1802,10 +1808,15 @@ void analogix_dp_unbind(struct analogix_dp_device *dp) drm_dp_aux_unregister(&dp->aux); pm_runtime_disable(dp->dev); - clk_disable_unprepare(dp->clock); } EXPORT_SYMBOL_GPL(analogix_dp_unbind); +void analogix_dp_remove(struct analogix_dp_device *dp) +{ + clk_disable_unprepare(dp->clock); +} +EXPORT_SYMBOL_GPL(analogix_dp_remove); + #ifdef CONFIG_PM int analogix_dp_suspend(struct analogix_dp_device *dp) { diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 1de2cde2277c..282774e469ac 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -962,27 +962,40 @@ int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages, unsigned count; struct scatterlist *sg; struct page *page; - u32 len, index; + u32 page_len, page_index; dma_addr_t addr; + u32 dma_len, dma_index; - index = 0; + /* + * Scatterlist elements contains both pages and DMA addresses, but + * one shoud not assume 1:1 relation between them. The sg->length is + * the size of the physical memory chunk described by the sg->page, + * while sg_dma_len(sg) is the size of the DMA (IO virtual) chunk + * described by the sg_dma_address(sg). + */ + page_index = 0; + dma_index = 0; for_each_sg(sgt->sgl, sg, sgt->nents, count) { - len = sg_dma_len(sg); + page_len = sg->length; page = sg_page(sg); + dma_len = sg_dma_len(sg); addr = sg_dma_address(sg); - while (len > 0) { - if (WARN_ON(index >= max_entries)) + while (pages && page_len > 0) { + if (WARN_ON(page_index >= max_entries)) return -1; - if (pages) - pages[index] = page; - if (addrs) - addrs[index] = addr; - + pages[page_index] = page; page++; + page_len -= PAGE_SIZE; + page_index++; + } + while (addrs && dma_len > 0) { + if (WARN_ON(dma_index >= max_entries)) + return -1; + addrs[dma_index] = addr; addr += PAGE_SIZE; - len -= PAGE_SIZE; - index++; + dma_len -= PAGE_SIZE; + dma_index++; } } return 0; diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c index d23d3502ca91..5ee090691390 100644 --- a/drivers/gpu/drm/exynos/exynos_dp.c +++ b/drivers/gpu/drm/exynos/exynos_dp.c @@ -159,15 +159,8 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data) struct drm_device *drm_dev = data; int ret; - dp->dev = dev; dp->drm_dev = drm_dev; - dp->plat_data.dev_type = EXYNOS_DP; - dp->plat_data.power_on_start = exynos_dp_poweron; - dp->plat_data.power_off = exynos_dp_poweroff; - dp->plat_data.attach = exynos_dp_bridge_attach; - dp->plat_data.get_modes = exynos_dp_get_modes; - if (!dp->plat_data.panel && !dp->ptn_bridge) { ret = exynos_dp_dt_parse_panel(dp); if (ret) @@ -185,13 +178,11 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data) dp->plat_data.encoder = encoder; - dp->adp = analogix_dp_bind(dev, dp->drm_dev, &dp->plat_data); - if (IS_ERR(dp->adp)) { + ret = analogix_dp_bind(dp->adp, dp->drm_dev); + if (ret) dp->encoder.funcs->destroy(&dp->encoder); - return PTR_ERR(dp->adp); - } - return 0; + return ret; } static void exynos_dp_unbind(struct device *dev, struct device *master, @@ -222,6 +213,7 @@ static int exynos_dp_probe(struct platform_device *pdev) if (!dp) return -ENOMEM; + dp->dev = dev; /* * We just use the drvdata until driver run into component * add function, and then we would set drvdata to null, so @@ -247,16 +239,29 @@ static int exynos_dp_probe(struct platform_device *pdev) /* The remote port can be either a panel or a bridge */ dp->plat_data.panel = panel; + dp->plat_data.dev_type = EXYNOS_DP; + dp->plat_data.power_on_start = exynos_dp_poweron; + dp->plat_data.power_off = exynos_dp_poweroff; + dp->plat_data.attach = exynos_dp_bridge_attach; + dp->plat_data.get_modes = exynos_dp_get_modes; dp->plat_data.skip_connector = !!bridge; + dp->ptn_bridge = bridge; out: + dp->adp = analogix_dp_probe(dev, &dp->plat_data); + if (IS_ERR(dp->adp)) + return PTR_ERR(dp->adp); + return component_add(&pdev->dev, &exynos_dp_ops); } static int exynos_dp_remove(struct platform_device *pdev) { + struct exynos_dp_device *dp = platform_get_drvdata(pdev); + component_del(&pdev->dev, &exynos_dp_ops); + analogix_dp_remove(dp->adp); return 0; } diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 73d0f4648c06..2c617c98db3a 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -947,7 +947,8 @@ static const struct cnl_ddi_buf_trans * ehl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate, int *n_entries) { - if (type == INTEL_OUTPUT_DP && rate > 270000) { + if (type != INTEL_OUTPUT_HDMI && type != INTEL_OUTPUT_EDP && + rate > 270000) { *n_entries = ARRAY_SIZE(ehl_combo_phy_ddi_translations_hbr2_hbr3); return ehl_combo_phy_ddi_translations_hbr2_hbr3; } @@ -959,7 +960,7 @@ static const struct cnl_ddi_buf_trans * tgl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate, int *n_entries) { - if (type != INTEL_OUTPUT_DP) { + if (type == INTEL_OUTPUT_HDMI || type == INTEL_OUTPUT_EDP) { return icl_get_combo_buf_trans(dev_priv, type, rate, n_entries); } else if (rate > 270000) { *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr2); @@ -1869,7 +1870,11 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder, return; dig_port = enc_to_dig_port(encoder); - intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain); + + if (!intel_phy_is_tc(dev_priv, phy) || + dig_port->tc_mode != TC_PORT_TBT_ALT) + intel_display_power_get(dev_priv, + dig_port->ddi_io_power_domain); /* * AUX power is only needed for (e)DP mode, and for HDMI mode on TC diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 36d069504836..b7440f06c5e2 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -896,11 +896,13 @@ static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache) static void reloc_gpu_flush(struct reloc_cache *cache) { - GEM_BUG_ON(cache->rq_size >= cache->rq->batch->obj->base.size / sizeof(u32)); + struct drm_i915_gem_object *obj = cache->rq->batch->obj; + + GEM_BUG_ON(cache->rq_size >= obj->base.size / sizeof(u32)); cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END; - __i915_gem_object_flush_map(cache->rq->batch->obj, 0, cache->rq_size); - i915_gem_object_unpin_map(cache->rq->batch->obj); + __i915_gem_object_flush_map(obj, 0, sizeof(u32) * (cache->rq_size + 1)); + i915_gem_object_unpin_map(obj); intel_gt_chipset_flush(cache->rq->engine->gt); @@ -1477,10 +1479,8 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev) * can read from this userspace address. */ offset = gen8_canonical_addr(offset & ~UPDATE); - if (unlikely(__put_user(offset, &urelocs[r-stack].presumed_offset))) { - remain = -EFAULT; - goto out; - } + __put_user(offset, + &urelocs[r - stack].presumed_offset); } } while (r++, --count); urelocs += ARRAY_SIZE(stack); diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c index aed498a0d032..4c5a209cb669 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c @@ -191,10 +191,11 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm, enum i915_cache_level level, u32 flags) { - struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); - struct sgt_iter sgt_iter; - gen8_pte_t __iomem *gtt_entries; const gen8_pte_t pte_encode = gen8_ggtt_pte_encode(0, level, 0); + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); + gen8_pte_t __iomem *gte; + gen8_pte_t __iomem *end; + struct sgt_iter iter; dma_addr_t addr; /* @@ -202,10 +203,17 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm, * not to allow the user to override access to a read only page. */ - gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm; - gtt_entries += vma->node.start / I915_GTT_PAGE_SIZE; - for_each_sgt_daddr(addr, sgt_iter, vma->pages) - gen8_set_pte(gtt_entries++, pte_encode | addr); + gte = (gen8_pte_t __iomem *)ggtt->gsm; + gte += vma->node.start / I915_GTT_PAGE_SIZE; + end = gte + vma->node.size / I915_GTT_PAGE_SIZE; + + for_each_sgt_daddr(addr, iter, vma->pages) + gen8_set_pte(gte++, pte_encode | addr); + GEM_BUG_ON(gte > end); + + /* Fill the allocated but "unused" space beyond the end of the buffer */ + while (gte < end) + gen8_set_pte(gte++, vm->scratch[0].encode); /* * We want to flush the TLBs only after we're certain all the PTE @@ -241,13 +249,22 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm, u32 flags) { struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); - gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm; - unsigned int i = vma->node.start / I915_GTT_PAGE_SIZE; + gen6_pte_t __iomem *gte; + gen6_pte_t __iomem *end; struct sgt_iter iter; dma_addr_t addr; + gte = (gen6_pte_t __iomem *)ggtt->gsm; + gte += vma->node.start / I915_GTT_PAGE_SIZE; + end = gte + vma->node.size / I915_GTT_PAGE_SIZE; + for_each_sgt_daddr(addr, iter, vma->pages) - iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]); + iowrite32(vm->pte_encode(addr, level, flags), gte++); + GEM_BUG_ON(gte > end); + + /* Fill the allocated but "unused" space beyond the end of the buffer */ + while (gte < end) + iowrite32(vm->scratch[0].encode, gte++); /* * We want to flush the TLBs only after we're certain all the PTE diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c index f38f5e113c6b..ce98c08aa8b4 100644 --- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c +++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c @@ -325,15 +325,9 @@ static int rockchip_dp_bind(struct device *dev, struct device *master, void *data) { struct rockchip_dp_device *dp = dev_get_drvdata(dev); - const struct rockchip_dp_chip_data *dp_data; struct drm_device *drm_dev = data; int ret; - dp_data = of_device_get_match_data(dev); - if (!dp_data) - return -ENODEV; - - dp->data = dp_data; dp->drm_dev = drm_dev; ret = rockchip_dp_drm_create_encoder(dp); @@ -344,16 +338,9 @@ static int rockchip_dp_bind(struct device *dev, struct device *master, dp->plat_data.encoder = &dp->encoder; - dp->plat_data.dev_type = dp->data->chip_type; - dp->plat_data.power_on_start = rockchip_dp_poweron_start; - dp->plat_data.power_off = rockchip_dp_powerdown; - dp->plat_data.get_modes = rockchip_dp_get_modes; - - dp->adp = analogix_dp_bind(dev, dp->drm_dev, &dp->plat_data); - if (IS_ERR(dp->adp)) { - ret = PTR_ERR(dp->adp); + ret = analogix_dp_bind(dp->adp, drm_dev); + if (ret) goto err_cleanup_encoder; - } return 0; err_cleanup_encoder: @@ -368,8 +355,6 @@ static void rockchip_dp_unbind(struct device *dev, struct device *master, analogix_dp_unbind(dp->adp); dp->encoder.funcs->destroy(&dp->encoder); - - dp->adp = ERR_PTR(-ENODEV); } static const struct component_ops rockchip_dp_component_ops = { @@ -380,10 +365,15 @@ static const struct component_ops rockchip_dp_component_ops = { static int rockchip_dp_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; + const struct rockchip_dp_chip_data *dp_data; struct drm_panel *panel = NULL; struct rockchip_dp_device *dp; int ret; + dp_data = of_device_get_match_data(dev); + if (!dp_data) + return -ENODEV; + ret = drm_of_find_panel_or_bridge(dev->of_node, 1, 0, &panel, NULL); if (ret < 0) return ret; @@ -394,7 +384,12 @@ static int rockchip_dp_probe(struct platform_device *pdev) dp->dev = dev; dp->adp = ERR_PTR(-ENODEV); + dp->data = dp_data; dp->plat_data.panel = panel; + dp->plat_data.dev_type = dp->data->chip_type; + dp->plat_data.power_on_start = rockchip_dp_poweron_start; + dp->plat_data.power_off = rockchip_dp_powerdown; + dp->plat_data.get_modes = rockchip_dp_get_modes; ret = rockchip_dp_of_probe(dp); if (ret < 0) @@ -402,12 +397,19 @@ static int rockchip_dp_probe(struct platform_device *pdev) platform_set_drvdata(pdev, dp); + dp->adp = analogix_dp_probe(dev, &dp->plat_data); + if (IS_ERR(dp->adp)) + return PTR_ERR(dp->adp); + return component_add(dev, &rockchip_dp_component_ops); } static int rockchip_dp_remove(struct platform_device *pdev) { + struct rockchip_dp_device *dp = platform_get_drvdata(pdev); + component_del(&pdev->dev, &rockchip_dp_component_ops); + analogix_dp_remove(dp->adp); return 0; } diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 6ee3b96f0d13..0ad30b112982 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -442,66 +442,6 @@ vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf) } EXPORT_SYMBOL(ttm_bo_vm_fault); -#ifdef CONFIG_TRANSPARENT_HUGEPAGE -/** - * ttm_pgprot_is_wrprotecting - Is a page protection value write-protecting? - * @prot: The page protection value - * - * Return: true if @prot is write-protecting. false otherwise. - */ -static bool ttm_pgprot_is_wrprotecting(pgprot_t prot) -{ - /* - * This is meant to say "pgprot_wrprotect(prot) == prot" in a generic - * way. Unfortunately there is no generic pgprot_wrprotect. - */ - return pte_val(pte_wrprotect(__pte(pgprot_val(prot)))) == - pgprot_val(prot); -} - -static vm_fault_t ttm_bo_vm_huge_fault(struct vm_fault *vmf, - enum page_entry_size pe_size) -{ - struct vm_area_struct *vma = vmf->vma; - pgprot_t prot; - struct ttm_buffer_object *bo = vma->vm_private_data; - vm_fault_t ret; - pgoff_t fault_page_size = 0; - bool write = vmf->flags & FAULT_FLAG_WRITE; - - switch (pe_size) { - case PE_SIZE_PMD: - fault_page_size = HPAGE_PMD_SIZE >> PAGE_SHIFT; - break; -#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD - case PE_SIZE_PUD: - fault_page_size = HPAGE_PUD_SIZE >> PAGE_SHIFT; - break; -#endif - default: - WARN_ON_ONCE(1); - return VM_FAULT_FALLBACK; - } - - /* Fallback on write dirty-tracking or COW */ - if (write && ttm_pgprot_is_wrprotecting(vma->vm_page_prot)) - return VM_FAULT_FALLBACK; - - ret = ttm_bo_vm_reserve(bo, vmf); - if (ret) - return ret; - - prot = vm_get_page_prot(vma->vm_flags); - ret = ttm_bo_vm_fault_reserved(vmf, prot, 1, fault_page_size); - if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) - return ret; - - dma_resv_unlock(bo->base.resv); - - return ret; -} -#endif - void ttm_bo_vm_open(struct vm_area_struct *vma) { struct ttm_buffer_object *bo = vma->vm_private_data; @@ -604,9 +544,6 @@ static const struct vm_operations_struct ttm_bo_vm_ops = { .open = ttm_bo_vm_open, .close = ttm_bo_vm_close, .access = ttm_bo_vm_access, -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - .huge_fault = ttm_bo_vm_huge_fault, -#endif }; static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev, diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c index 2bfb13d1932e..d9039bb7c5e3 100644 --- a/drivers/gpu/drm/virtio/virtgpu_object.c +++ b/drivers/gpu/drm/virtio/virtgpu_object.c @@ -123,15 +123,17 @@ bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo) struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev, size_t size) { - struct virtio_gpu_object *bo; + struct virtio_gpu_object_shmem *shmem; + struct drm_gem_shmem_object *dshmem; - bo = kzalloc(sizeof(*bo), GFP_KERNEL); - if (!bo) + shmem = kzalloc(sizeof(*shmem), GFP_KERNEL); + if (!shmem) return NULL; - bo->base.base.funcs = &virtio_gpu_shmem_funcs; - bo->base.map_cached = true; - return &bo->base.base; + dshmem = &shmem->base.base; + dshmem->base.funcs = &virtio_gpu_shmem_funcs; + dshmem->map_cached = true; + return &dshmem->base; } static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev, |