mirror of https://github.com/torvalds/linux.git
amd-drm-fixes-6.17-2025-09-18:
amdgpu: - GC 11.0.1/4 cleaner shader support - DC irq fix - OD fix amdkfd: - S0ix fix -----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQQgO5Idg2tXNTSZAr293/aFa7yZ2AUCaMxZIQAKCRC93/aFa7yZ 2IQIAQDDkWs0AA3iZiEGVRkbLqlN/SiDHxhociuGfn3pjWLX0AEAxjCkQP89vZaF UNnGaScoZ+tN3jD9/ieswyk6ZkeBEgE= =SYqV -----END PGP SIGNATURE----- Merge tag 'amd-drm-fixes-6.17-2025-09-18' of https://gitlab.freedesktop.org/agd5f/linux into drm-fixes amd-drm-fixes-6.17-2025-09-18: amdgpu: - GC 11.0.1/4 cleaner shader support - DC irq fix - OD fix amdkfd: - S0ix fix Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexander.deucher@amd.com> Link: https://lore.kernel.org/r/20250918191428.2553105-1-alexander.deucher@amd.com
This commit is contained in:
commit
feb96ccb33
|
|
@ -250,16 +250,24 @@ void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
|
|||
|
||||
void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool suspend_proc)
|
||||
{
|
||||
if (adev->kfd.dev)
|
||||
kgd2kfd_suspend(adev->kfd.dev, suspend_proc);
|
||||
if (adev->kfd.dev) {
|
||||
if (adev->in_s0ix)
|
||||
kgd2kfd_stop_sched_all_nodes(adev->kfd.dev);
|
||||
else
|
||||
kgd2kfd_suspend(adev->kfd.dev, suspend_proc);
|
||||
}
|
||||
}
|
||||
|
||||
int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool resume_proc)
|
||||
{
|
||||
int r = 0;
|
||||
|
||||
if (adev->kfd.dev)
|
||||
r = kgd2kfd_resume(adev->kfd.dev, resume_proc);
|
||||
if (adev->kfd.dev) {
|
||||
if (adev->in_s0ix)
|
||||
r = kgd2kfd_start_sched_all_nodes(adev->kfd.dev);
|
||||
else
|
||||
r = kgd2kfd_resume(adev->kfd.dev, resume_proc);
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -426,7 +426,9 @@ void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask);
|
|||
int kgd2kfd_check_and_lock_kfd(struct kfd_dev *kfd);
|
||||
void kgd2kfd_unlock_kfd(struct kfd_dev *kfd);
|
||||
int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id);
|
||||
int kgd2kfd_start_sched_all_nodes(struct kfd_dev *kfd);
|
||||
int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id);
|
||||
int kgd2kfd_stop_sched_all_nodes(struct kfd_dev *kfd);
|
||||
bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id);
|
||||
bool kgd2kfd_vmfault_fast_path(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry,
|
||||
bool retry_fault);
|
||||
|
|
@ -516,11 +518,21 @@ static inline int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline int kgd2kfd_start_sched_all_nodes(struct kfd_dev *kfd)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int kgd2kfd_stop_sched_all_nodes(struct kfd_dev *kfd)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id)
|
||||
{
|
||||
return false;
|
||||
|
|
|
|||
|
|
@ -5136,7 +5136,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients)
|
|||
adev->in_suspend = true;
|
||||
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
if (!adev->in_s0ix && !adev->in_runpm)
|
||||
if (!adev->in_runpm)
|
||||
amdgpu_amdkfd_suspend_process(adev);
|
||||
amdgpu_virt_fini_data_exchange(adev);
|
||||
r = amdgpu_virt_request_full_gpu(adev, false);
|
||||
|
|
@ -5156,10 +5156,8 @@ int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients)
|
|||
|
||||
amdgpu_device_ip_suspend_phase1(adev);
|
||||
|
||||
if (!adev->in_s0ix) {
|
||||
amdgpu_amdkfd_suspend(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
|
||||
amdgpu_userq_suspend(adev);
|
||||
}
|
||||
amdgpu_amdkfd_suspend(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
|
||||
amdgpu_userq_suspend(adev);
|
||||
|
||||
r = amdgpu_device_evict_resources(adev);
|
||||
if (r)
|
||||
|
|
@ -5254,15 +5252,13 @@ int amdgpu_device_resume(struct drm_device *dev, bool notify_clients)
|
|||
goto exit;
|
||||
}
|
||||
|
||||
if (!adev->in_s0ix) {
|
||||
r = amdgpu_amdkfd_resume(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
|
||||
if (r)
|
||||
goto exit;
|
||||
r = amdgpu_amdkfd_resume(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
|
||||
if (r)
|
||||
goto exit;
|
||||
|
||||
r = amdgpu_userq_resume(adev);
|
||||
if (r)
|
||||
goto exit;
|
||||
}
|
||||
r = amdgpu_userq_resume(adev);
|
||||
if (r)
|
||||
goto exit;
|
||||
|
||||
r = amdgpu_device_ip_late_init(adev);
|
||||
if (r)
|
||||
|
|
@ -5275,7 +5271,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool notify_clients)
|
|||
amdgpu_virt_init_data_exchange(adev);
|
||||
amdgpu_virt_release_full_gpu(adev, true);
|
||||
|
||||
if (!adev->in_s0ix && !r && !adev->in_runpm)
|
||||
if (!r && !adev->in_runpm)
|
||||
r = amdgpu_amdkfd_resume_process(adev);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1654,6 +1654,21 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
|
|||
}
|
||||
}
|
||||
break;
|
||||
case IP_VERSION(11, 0, 1):
|
||||
case IP_VERSION(11, 0, 4):
|
||||
adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex;
|
||||
adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex);
|
||||
if (adev->gfx.pfp_fw_version >= 102 &&
|
||||
adev->gfx.mec_fw_version >= 66 &&
|
||||
adev->mes.fw_version[0] >= 128) {
|
||||
adev->gfx.enable_cleaner_shader = true;
|
||||
r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
|
||||
if (r) {
|
||||
adev->gfx.enable_cleaner_shader = false;
|
||||
dev_err(adev->dev, "Failed to initialize cleaner shader\n");
|
||||
}
|
||||
}
|
||||
break;
|
||||
case IP_VERSION(11, 5, 0):
|
||||
case IP_VERSION(11, 5, 1):
|
||||
adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex;
|
||||
|
|
|
|||
|
|
@ -1550,6 +1550,25 @@ int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id)
|
|||
return ret;
|
||||
}
|
||||
|
||||
int kgd2kfd_start_sched_all_nodes(struct kfd_dev *kfd)
|
||||
{
|
||||
struct kfd_node *node;
|
||||
int i, r;
|
||||
|
||||
if (!kfd->init_complete)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < kfd->num_nodes; i++) {
|
||||
node = kfd->nodes[i];
|
||||
r = node->dqm->ops.unhalt(node->dqm);
|
||||
if (r) {
|
||||
dev_err(kfd_device, "Error in starting scheduler\n");
|
||||
return r;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id)
|
||||
{
|
||||
struct kfd_node *node;
|
||||
|
|
@ -1567,6 +1586,23 @@ int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id)
|
|||
return node->dqm->ops.halt(node->dqm);
|
||||
}
|
||||
|
||||
int kgd2kfd_stop_sched_all_nodes(struct kfd_dev *kfd)
|
||||
{
|
||||
struct kfd_node *node;
|
||||
int i, r;
|
||||
|
||||
if (!kfd->init_complete)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < kfd->num_nodes; i++) {
|
||||
node = kfd->nodes[i];
|
||||
r = node->dqm->ops.halt(node->dqm);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id)
|
||||
{
|
||||
struct kfd_node *node;
|
||||
|
|
|
|||
|
|
@ -8717,7 +8717,16 @@ static int amdgpu_dm_encoder_init(struct drm_device *dev,
|
|||
static void manage_dm_interrupts(struct amdgpu_device *adev,
|
||||
struct amdgpu_crtc *acrtc,
|
||||
struct dm_crtc_state *acrtc_state)
|
||||
{
|
||||
{ /*
|
||||
* We cannot be sure that the frontend index maps to the same
|
||||
* backend index - some even map to more than one.
|
||||
* So we have to go through the CRTC to find the right IRQ.
|
||||
*/
|
||||
int irq_type = amdgpu_display_crtc_idx_to_irq_type(
|
||||
adev,
|
||||
acrtc->crtc_id);
|
||||
struct drm_device *dev = adev_to_drm(adev);
|
||||
|
||||
struct drm_vblank_crtc_config config = {0};
|
||||
struct dc_crtc_timing *timing;
|
||||
int offdelay;
|
||||
|
|
@ -8770,7 +8779,35 @@ static void manage_dm_interrupts(struct amdgpu_device *adev,
|
|||
|
||||
drm_crtc_vblank_on_config(&acrtc->base,
|
||||
&config);
|
||||
/* Allow RX6xxx, RX7700, RX7800 GPUs to call amdgpu_irq_get.*/
|
||||
switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
|
||||
case IP_VERSION(3, 0, 0):
|
||||
case IP_VERSION(3, 0, 2):
|
||||
case IP_VERSION(3, 0, 3):
|
||||
case IP_VERSION(3, 2, 0):
|
||||
if (amdgpu_irq_get(adev, &adev->pageflip_irq, irq_type))
|
||||
drm_err(dev, "DM_IRQ: Cannot get pageflip irq!\n");
|
||||
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
|
||||
if (amdgpu_irq_get(adev, &adev->vline0_irq, irq_type))
|
||||
drm_err(dev, "DM_IRQ: Cannot get vline0 irq!\n");
|
||||
#endif
|
||||
}
|
||||
|
||||
} else {
|
||||
/* Allow RX6xxx, RX7700, RX7800 GPUs to call amdgpu_irq_put.*/
|
||||
switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
|
||||
case IP_VERSION(3, 0, 0):
|
||||
case IP_VERSION(3, 0, 2):
|
||||
case IP_VERSION(3, 0, 3):
|
||||
case IP_VERSION(3, 2, 0):
|
||||
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
|
||||
if (amdgpu_irq_put(adev, &adev->vline0_irq, irq_type))
|
||||
drm_err(dev, "DM_IRQ: Cannot put vline0 irq!\n");
|
||||
#endif
|
||||
if (amdgpu_irq_put(adev, &adev->pageflip_irq, irq_type))
|
||||
drm_err(dev, "DM_IRQ: Cannot put pageflip irq!\n");
|
||||
}
|
||||
|
||||
drm_crtc_vblank_off(&acrtc->base);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2236,7 +2236,7 @@ static int smu_resume(struct amdgpu_ip_block *ip_block)
|
|||
return ret;
|
||||
}
|
||||
|
||||
if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
|
||||
if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL && smu->od_enabled) {
|
||||
ret = smu_od_edit_dpm_table(smu, PP_OD_COMMIT_DPM_TABLE, NULL, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
|
|
|||
Loading…
Reference in New Issue