mirror of https://github.com/torvalds/linux.git
amd-drm-fixes-6.18-2025-11-06:
amdgpu: - Reset fixes - Misc fixes - Panel scaling fixes - HDMI fix - S0ix fixes - Hibernation fix - Secure display fix - Suspend fix - MST fix amdkfd: - Process cleanup fix -----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQQgO5Idg2tXNTSZAr293/aFa7yZ2AUCaQ0AhAAKCRC93/aFa7yZ 2LqoAQCVsHqX5DG2JaF0p2AOppOxW6u0qDigzOkxeUJ4b6E5CQD/ckoQE/URSCla yNpToxmPdER+z4kQUtbxlOaGk63hYwI= =SBlF -----END PGP SIGNATURE----- Merge tag 'amd-drm-fixes-6.18-2025-11-06' of https://gitlab.freedesktop.org/agd5f/linux into drm-fixes amd-drm-fixes-6.18-2025-11-06: amdgpu: - Reset fixes - Misc fixes - Panel scaling fixes - HDMI fix - S0ix fixes - Hibernation fix - Secure display fix - Suspend fix - MST fix amdkfd: - Process cleanup fix Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexander.deucher@amd.com> Link: https://patch.msgid.link/20251106201326.807230-1-alexander.deucher@amd.com
This commit is contained in:
commit
b57b47741e
|
|
@ -1267,6 +1267,10 @@ static int unmap_bo_from_gpuvm(struct kgd_mem *mem,
|
|||
|
||||
(void)amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
|
||||
|
||||
/* VM entity stopped if process killed, don't clear freed pt bo */
|
||||
if (!amdgpu_vm_ready(vm))
|
||||
return 0;
|
||||
|
||||
(void)amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
|
||||
|
||||
(void)amdgpu_sync_fence(sync, bo_va->last_pt_update, GFP_KERNEL);
|
||||
|
|
|
|||
|
|
@ -5243,10 +5243,6 @@ int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients)
|
|||
if (amdgpu_sriov_vf(adev))
|
||||
amdgpu_virt_release_full_gpu(adev, false);
|
||||
|
||||
r = amdgpu_dpm_notify_rlc_state(adev, false);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -2632,9 +2632,14 @@ static int amdgpu_pmops_suspend_noirq(struct device *dev)
|
|||
{
|
||||
struct drm_device *drm_dev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = drm_to_adev(drm_dev);
|
||||
int r;
|
||||
|
||||
if (amdgpu_acpi_should_gpu_reset(adev))
|
||||
return amdgpu_asic_reset(adev);
|
||||
if (amdgpu_acpi_should_gpu_reset(adev)) {
|
||||
amdgpu_device_lock_reset_domain(adev->reset_domain);
|
||||
r = amdgpu_asic_reset(adev);
|
||||
amdgpu_device_unlock_reset_domain(adev->reset_domain);
|
||||
return r;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2355,8 +2355,11 @@ static int psp_securedisplay_initialize(struct psp_context *psp)
|
|||
if (!ret && !psp->securedisplay_context.context.resp_status) {
|
||||
psp->securedisplay_context.context.initialized = true;
|
||||
mutex_init(&psp->securedisplay_context.mutex);
|
||||
} else
|
||||
} else {
|
||||
/* don't try again */
|
||||
psp->securedisplay_context.context.bin_desc.size_bytes = 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
mutex_lock(&psp->securedisplay_context.mutex);
|
||||
|
||||
|
|
|
|||
|
|
@ -407,7 +407,8 @@ static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (adev->kfd.init_complete && !amdgpu_in_reset(adev))
|
||||
if (adev->kfd.init_complete && !amdgpu_in_reset(adev) &&
|
||||
!adev->in_suspend)
|
||||
flags |= AMDGPU_XCP_OPS_KFD;
|
||||
|
||||
if (flags & AMDGPU_XCP_OPS_KFD) {
|
||||
|
|
|
|||
|
|
@ -3102,6 +3102,11 @@ static int gfx_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
|
|||
return r;
|
||||
}
|
||||
|
||||
adev->gfx.gfx_supported_reset =
|
||||
amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
|
||||
adev->gfx.compute_supported_reset =
|
||||
amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -4399,6 +4399,11 @@ static int gfx_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
|
|||
|
||||
gfx_v7_0_gpu_early_init(adev);
|
||||
|
||||
adev->gfx.gfx_supported_reset =
|
||||
amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
|
||||
adev->gfx.compute_supported_reset =
|
||||
amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -2023,6 +2023,11 @@ static int gfx_v8_0_sw_init(struct amdgpu_ip_block *ip_block)
|
|||
if (r)
|
||||
return r;
|
||||
|
||||
adev->gfx.gfx_supported_reset =
|
||||
amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
|
||||
adev->gfx.compute_supported_reset =
|
||||
amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -2292,7 +2292,9 @@ static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev)
|
|||
r = amdgpu_xcp_init(adev->xcp_mgr, num_xcp, mode);
|
||||
|
||||
} else {
|
||||
if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
|
||||
if (adev->in_suspend)
|
||||
amdgpu_xcp_restore_partition_mode(adev->xcp_mgr);
|
||||
else if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
|
||||
AMDGPU_XCP_FL_NONE) ==
|
||||
AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE)
|
||||
r = amdgpu_xcp_switch_partition_mode(
|
||||
|
|
|
|||
|
|
@ -142,13 +142,37 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
|
|||
return err;
|
||||
}
|
||||
|
||||
static int psp_v11_wait_for_tos_unload(struct psp_context *psp)
|
||||
{
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
uint32_t sol_reg1, sol_reg2;
|
||||
int retry_loop;
|
||||
|
||||
/* Wait for the TOS to be unloaded */
|
||||
for (retry_loop = 0; retry_loop < 20; retry_loop++) {
|
||||
sol_reg1 = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
|
||||
usleep_range(1000, 2000);
|
||||
sol_reg2 = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
|
||||
if (sol_reg1 == sol_reg2)
|
||||
return 0;
|
||||
}
|
||||
dev_err(adev->dev, "TOS unload failed, C2PMSG_33: %x C2PMSG_81: %x",
|
||||
RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_33),
|
||||
RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81));
|
||||
|
||||
return -ETIME;
|
||||
}
|
||||
|
||||
static int psp_v11_0_wait_for_bootloader(struct psp_context *psp)
|
||||
{
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
|
||||
int ret;
|
||||
int retry_loop;
|
||||
|
||||
/* For a reset done at the end of S3, only wait for TOS to be unloaded */
|
||||
if (adev->in_s3 && !(adev->flags & AMD_IS_APU) && amdgpu_in_reset(adev))
|
||||
return psp_v11_wait_for_tos_unload(psp);
|
||||
|
||||
for (retry_loop = 0; retry_loop < 20; retry_loop++) {
|
||||
/* Wait for bootloader to signify that is
|
||||
ready having bit 31 of C2PMSG_35 set to 1 */
|
||||
|
|
|
|||
|
|
@ -3563,6 +3563,7 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
|
|||
/* Do mst topology probing after resuming cached state*/
|
||||
drm_connector_list_iter_begin(ddev, &iter);
|
||||
drm_for_each_connector_iter(connector, &iter) {
|
||||
bool init = false;
|
||||
|
||||
if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
|
||||
continue;
|
||||
|
|
@ -3572,7 +3573,14 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
|
|||
aconnector->mst_root)
|
||||
continue;
|
||||
|
||||
drm_dp_mst_topology_queue_probe(&aconnector->mst_mgr);
|
||||
scoped_guard(mutex, &aconnector->mst_mgr.lock) {
|
||||
init = !aconnector->mst_mgr.mst_primary;
|
||||
}
|
||||
if (init)
|
||||
dm_helpers_dp_mst_start_top_mgr(aconnector->dc_link->ctx,
|
||||
aconnector->dc_link, false);
|
||||
else
|
||||
drm_dp_mst_topology_queue_probe(&aconnector->mst_mgr);
|
||||
}
|
||||
drm_connector_list_iter_end(&iter);
|
||||
|
||||
|
|
@ -8030,7 +8038,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
|
|||
"mode %dx%d@%dHz is not native, enabling scaling\n",
|
||||
adjusted_mode->hdisplay, adjusted_mode->vdisplay,
|
||||
drm_mode_vrefresh(adjusted_mode));
|
||||
dm_new_connector_state->scaling = RMX_FULL;
|
||||
dm_new_connector_state->scaling = RMX_ASPECT;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1302,7 +1302,8 @@ static int odm_combine_segments_show(struct seq_file *m, void *unused)
|
|||
if (connector->status != connector_status_connected)
|
||||
return -ENODEV;
|
||||
|
||||
if (pipe_ctx != NULL && pipe_ctx->stream_res.tg->funcs->get_odm_combine_segments)
|
||||
if (pipe_ctx && pipe_ctx->stream_res.tg &&
|
||||
pipe_ctx->stream_res.tg->funcs->get_odm_combine_segments)
|
||||
pipe_ctx->stream_res.tg->funcs->get_odm_combine_segments(pipe_ctx->stream_res.tg, &segments);
|
||||
|
||||
seq_printf(m, "%d\n", segments);
|
||||
|
|
|
|||
|
|
@ -1141,6 +1141,7 @@ static bool detect_link_and_local_sink(struct dc_link *link,
|
|||
!sink->edid_caps.edid_hdmi)
|
||||
sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
|
||||
else if (dc_is_dvi_signal(sink->sink_signal) &&
|
||||
dc_is_dvi_signal(link->connector_signal) &&
|
||||
aud_support->hdmi_audio_native &&
|
||||
sink->edid_caps.edid_hdmi)
|
||||
sink->sink_signal = SIGNAL_TYPE_HDMI_TYPE_A;
|
||||
|
|
|
|||
|
|
@ -195,24 +195,6 @@ int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
|
|||
return ret;
|
||||
}
|
||||
|
||||
int amdgpu_dpm_notify_rlc_state(struct amdgpu_device *adev, bool en)
|
||||
{
|
||||
int ret = 0;
|
||||
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
|
||||
|
||||
if (pp_funcs && pp_funcs->notify_rlc_state) {
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
|
||||
ret = pp_funcs->notify_rlc_state(
|
||||
adev->powerplay.pp_handle,
|
||||
en);
|
||||
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
|
||||
{
|
||||
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
|
||||
|
|
|
|||
|
|
@ -4724,14 +4724,14 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
|
|||
ret = devm_device_add_group(adev->dev,
|
||||
&amdgpu_pm_policy_attr_group);
|
||||
if (ret)
|
||||
goto err_out0;
|
||||
goto err_out1;
|
||||
}
|
||||
|
||||
if (amdgpu_dpm_is_temp_metrics_supported(adev, SMU_TEMP_METRIC_GPUBOARD)) {
|
||||
ret = devm_device_add_group(adev->dev,
|
||||
&amdgpu_board_attr_group);
|
||||
if (ret)
|
||||
goto err_out0;
|
||||
goto err_out1;
|
||||
if (amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAXNODEPOWERLIMIT,
|
||||
(void *)&tmp) != -EOPNOTSUPP) {
|
||||
sysfs_add_file_to_group(&adev->dev->kobj,
|
||||
|
|
|
|||
|
|
@ -424,8 +424,6 @@ int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev);
|
|||
int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
|
||||
enum pp_mp1_state mp1_state);
|
||||
|
||||
int amdgpu_dpm_notify_rlc_state(struct amdgpu_device *adev, bool en);
|
||||
|
||||
int amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device *adev);
|
||||
|
||||
int amdgpu_dpm_baco_exit(struct amdgpu_device *adev);
|
||||
|
|
|
|||
|
|
@ -2040,6 +2040,12 @@ static int smu_disable_dpms(struct smu_context *smu)
|
|||
smu->is_apu && (amdgpu_in_reset(adev) || adev->in_s0ix))
|
||||
return 0;
|
||||
|
||||
/* vangogh s0ix */
|
||||
if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 5, 0) ||
|
||||
amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 5, 2)) &&
|
||||
adev->in_s0ix)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* For gpu reset, runpm and hibernation through BACO,
|
||||
* BACO feature has to be kept enabled.
|
||||
|
|
|
|||
|
|
@ -2217,6 +2217,9 @@ static int vangogh_post_smu_init(struct smu_context *smu)
|
|||
uint32_t total_cu = adev->gfx.config.max_cu_per_sh *
|
||||
adev->gfx.config.max_sh_per_se * adev->gfx.config.max_shader_engines;
|
||||
|
||||
if (adev->in_s0ix)
|
||||
return 0;
|
||||
|
||||
/* allow message will be sent after enable message on Vangogh*/
|
||||
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT) &&
|
||||
(adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
|
||||
|
|
|
|||
Loading…
Reference in New Issue