drm fixes for 6.17-rc1

i915:
 - DP LPFS fixes
 
 xe:
 - SRIOV: PF fixes and removal of need of module param
 - Fix driver unbind around Devcoredump
 - Mark xe driver as BROKEN if kernel page size is not 4kB
 
 amdgpu:
 - GC 9.5.0 fixes
 - SMU fix
 - DCE 6 DC fixes
 - mmhub client ID fixes
 - VRR fix
 - Backlight fix
 - UserQ fix
 - Legacy reset fix
 - Misc fixes
 
 amdkfd:
 - CRIU fix
 - Debugfs fix
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmiVQYoACgkQDHTzWXnE
 hr4OVg/+OT59WwFrqodc79YRnGkbMllYfhhsC+hgczRWS1WV6GCp5yYSZVqWjs5I
 H44mM/rUEywXGf5m6lNx9GCCLF+ASgTAosVY/+cSDDpdB+Chl5CSYMbwmmlaS86a
 yzOxbNz7LC+gxrD9ZSmFxaWOop789ISjRhiafAWeKcJeVIo/tuW7rX2wJsN+LdNo
 ioUnNR4UHPKhdk7SQhm6ho3izL02e3H72fK/jcUj2FMehqJRLW6ilNTSntJ+bwp4
 WmtSHSHTfeFximemIiU1l11cilwpAkWqmDjR9ZBW/F6psBU3FX3eOP4MaKL1Io2X
 0gKXcnrz8XSLlZ46rKwHLj3IB3cNuGd2jO/rTcHDTQWGhrjPgn5fZnorOsfMRlTU
 ySaEeVsgCupQyiB1hOYFMECse+tqzV++05Bu/BL3NpYn6GMikghPkianAKlwpwxS
 lVDeOTmC9WX/jD9FeW8a69nsGZJoT3oSp9Ro2xjqlo1bwm1FVmUlxKgXSRIdHofg
 oZ8PGGDQGM5hf69Y/iLNFSPC+Z22n8cDrA0in92Gek965/h2HHkttQVWEMsaVc/x
 gkxb6rWv6ZMeLMrRY4ZQFd8JaOFfEzrc7aWemknEmF0HIdyJJUu2zWiUwm2MgpI/
 tshUMJdgoJaPoTinHFJeID8HrdzgFy9owHnKUne9nNNkPFjskV4=
 =rAxo
 -----END PGP SIGNATURE-----

Merge tag 'drm-next-2025-08-08' of https://gitlab.freedesktop.org/drm/kernel

Pull drm fixes from Dave Airlie:
 "This is the fixes that built up in the merge window, mostly amdgpu and
  xe with one i915 display fix, seems like things are pretty good for
  rc1.

  i915:
   - DP LPFS fixes

  xe:
   - SRIOV: PF fixes and removal of need of module param
   - Fix driver unbind around Devcoredump
   - Mark xe driver as BROKEN if kernel page size is not 4kB

  amdgpu:
   - GC 9.5.0 fixes
   - SMU fix
   - DCE 6 DC fixes
   - mmhub client ID fixes
   - VRR fix
   - Backlight fix
   - UserQ fix
   - Legacy reset fix
   - Misc fixes

  amdkfd:
   - CRIU fix
   - Debugfs fix"

* tag 'drm-next-2025-08-08' of https://gitlab.freedesktop.org/drm/kernel: (28 commits)
  drm/amdgpu: add missing vram lost check for LEGACY RESET
  drm/amdgpu/discovery: fix fw based ip discovery
  drm/amdkfd: Destroy KFD debugfs after destroy KFD wq
  amdgpu/amdgpu_discovery: increase timeout limit for IFWI init
  drm/amdgpu: Update SDMA firmware version check for user queue support
  drm/amdgpu: Add NULL check for asic_funcs
  drm/amd/display: Revert "drm/amd/display: Fix AMDGPU_MAX_BL_LEVEL value"
  drm/amd/display: fix a Null pointer dereference vulnerability
  drm/amd/display: Add primary plane to commits for correct VRR handling
  drm/amdgpu: update mmhub 3.3 client id mappings
  drm/amdgpu: update mmhub 3.0.1 client id mappings
  drm/amdgpu: Retain job->vm in amdgpu_job_prepare_job
  drm/amd/display: Fix DCE 6.0 and 6.4 PLL programming.
  drm/amd/display: Don't overwrite dce60_clk_mgr
  drm/amdkfd: Fix checkpoint-restore on multi-xcc
  drm/amd: Restore cached manual clock settings during resume
  drm/amd: Restore cached power limit during resume
  drm/amdgpu: Update external revid for GC v9.5.0
  drm/amdgpu: Update supported modes for GC v9.5.0
  Mark xe driver as BROKEN if kernel page size is not 4kB
  ...
This commit is contained in:
Linus Torvalds 2025-08-08 06:48:14 +03:00
commit ffe8ac927d
31 changed files with 456 additions and 141 deletions

View File

@ -2570,9 +2570,6 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
adev->firmware.gpu_info_fw = NULL;
if (adev->mman.discovery_bin)
return 0;
switch (adev->asic_type) {
default:
return 0;
@ -2594,6 +2591,8 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
chip_name = "arcturus";
break;
case CHIP_NAVI12:
if (adev->mman.discovery_bin)
return 0;
chip_name = "navi12";
break;
}
@ -3271,6 +3270,7 @@ static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
* always assumed to be lost.
*/
switch (amdgpu_asic_reset_method(adev)) {
case AMD_RESET_METHOD_LEGACY:
case AMD_RESET_METHOD_LINK:
case AMD_RESET_METHOD_BACO:
case AMD_RESET_METHOD_MODE1:

View File

@ -276,7 +276,7 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
u32 msg;
if (!amdgpu_sriov_vf(adev)) {
/* It can take up to a second for IFWI init to complete on some dGPUs,
/* It can take up to two second for IFWI init to complete on some dGPUs,
* but generally it should be in the 60-100ms range. Normally this starts
* as soon as the device gets power so by the time the OS loads this has long
* completed. However, when a card is hotplugged via e.g., USB4, we need to
@ -284,7 +284,7 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
* continue.
*/
for (i = 0; i < 1000; i++) {
for (i = 0; i < 2000; i++) {
msg = RREG32(mmMP0_SMN_C2PMSG_33);
if (msg & 0x80000000)
break;
@ -2555,40 +2555,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_VEGA10:
case CHIP_VEGA12:
case CHIP_RAVEN:
case CHIP_VEGA20:
case CHIP_ARCTURUS:
case CHIP_ALDEBARAN:
/* this is not fatal. We have a fallback below
* if the new firmwares are not present. some of
* this will be overridden below to keep things
* consistent with the current behavior.
/* This is not fatal. We only need the discovery
* binary for sysfs. We don't need it for a
* functional system.
*/
r = amdgpu_discovery_reg_base_init(adev);
if (!r) {
amdgpu_discovery_harvest_ip(adev);
amdgpu_discovery_get_gfx_info(adev);
amdgpu_discovery_get_mall_info(adev);
amdgpu_discovery_get_vcn_info(adev);
}
break;
default:
r = amdgpu_discovery_reg_base_init(adev);
if (r) {
drm_err(&adev->ddev, "discovery failed: %d\n", r);
return r;
}
amdgpu_discovery_harvest_ip(adev);
amdgpu_discovery_get_gfx_info(adev);
amdgpu_discovery_get_mall_info(adev);
amdgpu_discovery_get_vcn_info(adev);
break;
}
switch (adev->asic_type) {
case CHIP_VEGA10:
amdgpu_discovery_init(adev);
vega10_reg_base_init(adev);
adev->sdma.num_instances = 2;
adev->gmc.num_umc = 4;
@ -2611,6 +2582,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0);
break;
case CHIP_VEGA12:
/* This is not fatal. We only need the discovery
* binary for sysfs. We don't need it for a
* functional system.
*/
amdgpu_discovery_init(adev);
vega10_reg_base_init(adev);
adev->sdma.num_instances = 2;
adev->gmc.num_umc = 4;
@ -2633,6 +2609,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1);
break;
case CHIP_RAVEN:
/* This is not fatal. We only need the discovery
* binary for sysfs. We don't need it for a
* functional system.
*/
amdgpu_discovery_init(adev);
vega10_reg_base_init(adev);
adev->sdma.num_instances = 1;
adev->vcn.num_vcn_inst = 1;
@ -2674,6 +2655,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
}
break;
case CHIP_VEGA20:
/* This is not fatal. We only need the discovery
* binary for sysfs. We don't need it for a
* functional system.
*/
amdgpu_discovery_init(adev);
vega20_reg_base_init(adev);
adev->sdma.num_instances = 2;
adev->gmc.num_umc = 8;
@ -2697,6 +2683,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0);
break;
case CHIP_ARCTURUS:
/* This is not fatal. We only need the discovery
* binary for sysfs. We don't need it for a
* functional system.
*/
amdgpu_discovery_init(adev);
arct_reg_base_init(adev);
adev->sdma.num_instances = 8;
adev->vcn.num_vcn_inst = 2;
@ -2725,6 +2716,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0);
break;
case CHIP_ALDEBARAN:
/* This is not fatal. We only need the discovery
* binary for sysfs. We don't need it for a
* functional system.
*/
amdgpu_discovery_init(adev);
aldebaran_reg_base_init(adev);
adev->sdma.num_instances = 5;
adev->vcn.num_vcn_inst = 2;
@ -2751,6 +2747,16 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0);
break;
default:
r = amdgpu_discovery_reg_base_init(adev);
if (r) {
drm_err(&adev->ddev, "discovery failed: %d\n", r);
return r;
}
amdgpu_discovery_harvest_ip(adev);
amdgpu_discovery_get_gfx_info(adev);
amdgpu_discovery_get_mall_info(adev);
amdgpu_discovery_get_vcn_info(adev);
break;
}

View File

@ -365,13 +365,6 @@ amdgpu_job_prepare_job(struct drm_sched_job *sched_job,
dev_err(ring->adev->dev, "Error getting VM ID (%d)\n", r);
goto error;
}
/*
* The VM structure might be released after the VMID is
* assigned, we had multiple problems with people trying to use
* the VM pointer so better set it to NULL.
*/
if (!fence)
job->vm = NULL;
return fence;
}

View File

@ -55,7 +55,8 @@ u64 amdgpu_nbio_get_pcie_replay_count(struct amdgpu_device *adev)
bool amdgpu_nbio_is_replay_cnt_supported(struct amdgpu_device *adev)
{
if (amdgpu_sriov_vf(adev) || !adev->asic_funcs->get_pcie_replay_count ||
if (amdgpu_sriov_vf(adev) || !adev->asic_funcs ||
!adev->asic_funcs->get_pcie_replay_count ||
(!adev->nbio.funcs || !adev->nbio.funcs->get_pcie_replay_count))
return false;

View File

@ -227,6 +227,7 @@ static int __aqua_vanjaram_get_px_mode_info(struct amdgpu_xcp_mgr *xcp_mgr,
uint16_t *nps_modes)
{
struct amdgpu_device *adev = xcp_mgr->adev;
uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
if (!num_xcp || !nps_modes || !(xcp_mgr->supp_xcp_modes & BIT(px_mode)))
return -EINVAL;
@ -250,12 +251,14 @@ static int __aqua_vanjaram_get_px_mode_info(struct amdgpu_xcp_mgr *xcp_mgr,
*num_xcp = 4;
*nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
BIT(AMDGPU_NPS4_PARTITION_MODE);
if (gc_ver == IP_VERSION(9, 5, 0))
*nps_modes |= BIT(AMDGPU_NPS2_PARTITION_MODE);
break;
case AMDGPU_CPX_PARTITION_MODE:
*num_xcp = NUM_XCC(adev->gfx.xcc_mask);
*nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
BIT(AMDGPU_NPS4_PARTITION_MODE);
if (amdgpu_sriov_vf(adev))
if (gc_ver == IP_VERSION(9, 5, 0))
*nps_modes |= BIT(AMDGPU_NPS2_PARTITION_MODE);
break;
default:

View File

@ -36,40 +36,47 @@
static const char *mmhub_client_ids_v3_0_1[][2] = {
[0][0] = "VMC",
[1][0] = "ISPXT",
[2][0] = "ISPIXT",
[4][0] = "DCEDMC",
[5][0] = "DCEVGA",
[6][0] = "MP0",
[7][0] = "MP1",
[8][0] = "MPIO",
[16][0] = "HDP",
[17][0] = "LSDMA",
[18][0] = "JPEG",
[19][0] = "VCNU0",
[21][0] = "VSCH",
[22][0] = "VCNU1",
[23][0] = "VCN1",
[32+20][0] = "VCN0",
[2][1] = "DBGUNBIO",
[8][0] = "MPM",
[12][0] = "ISPTNR",
[14][0] = "ISPCRD0",
[15][0] = "ISPCRD1",
[16][0] = "ISPCRD2",
[22][0] = "HDP",
[23][0] = "LSDMA",
[24][0] = "JPEG",
[27][0] = "VSCH",
[28][0] = "VCNU",
[29][0] = "VCN",
[1][1] = "ISPXT",
[2][1] = "ISPIXT",
[3][1] = "DCEDWB",
[4][1] = "DCEDMC",
[5][1] = "DCEVGA",
[6][1] = "MP0",
[7][1] = "MP1",
[8][1] = "MPIO",
[10][1] = "DBGU0",
[11][1] = "DBGU1",
[12][1] = "DBGU2",
[13][1] = "DBGU3",
[14][1] = "XDP",
[15][1] = "OSSSYS",
[16][1] = "HDP",
[17][1] = "LSDMA",
[18][1] = "JPEG",
[19][1] = "VCNU0",
[20][1] = "VCN0",
[21][1] = "VSCH",
[22][1] = "VCNU1",
[23][1] = "VCN1",
[8][1] = "MPM",
[10][1] = "ISPMWR0",
[11][1] = "ISPMWR1",
[12][1] = "ISPTNR",
[13][1] = "ISPSWR",
[14][1] = "ISPCWR0",
[15][1] = "ISPCWR1",
[16][1] = "ISPCWR2",
[17][1] = "ISPCWR3",
[18][1] = "XDP",
[21][1] = "OSSSYS",
[22][1] = "HDP",
[23][1] = "LSDMA",
[24][1] = "JPEG",
[27][1] = "VSCH",
[28][1] = "VCNU",
[29][1] = "VCN",
};
static uint32_t mmhub_v3_0_1_get_invalidate_req(unsigned int vmid,

View File

@ -39,6 +39,64 @@
#define regDAGB1_L1TLB_REG_RW_3_3_BASE_IDX 1
static const char *mmhub_client_ids_v3_3[][2] = {
[0][0] = "VMC",
[1][0] = "ISPXT",
[2][0] = "ISPIXT",
[4][0] = "DCEDMC",
[6][0] = "MP0",
[7][0] = "MP1",
[8][0] = "MPM",
[9][0] = "ISPPDPRD",
[10][0] = "ISPCSTATRD",
[11][0] = "ISPBYRPRD",
[12][0] = "ISPRGBPRD",
[13][0] = "ISPMCFPRD",
[14][0] = "ISPMCFPRD1",
[15][0] = "ISPYUVPRD",
[16][0] = "ISPMCSCRD",
[17][0] = "ISPGDCRD",
[18][0] = "ISPLMERD",
[22][0] = "ISPXT1",
[23][0] = "ISPIXT1",
[24][0] = "HDP",
[25][0] = "LSDMA",
[26][0] = "JPEG",
[27][0] = "VPE",
[28][0] = "VSCH",
[29][0] = "VCNU",
[30][0] = "VCN",
[1][1] = "ISPXT",
[2][1] = "ISPIXT",
[3][1] = "DCEDWB",
[4][1] = "DCEDMC",
[5][1] = "ISPCSISWR",
[6][1] = "MP0",
[7][1] = "MP1",
[8][1] = "MPM",
[9][1] = "ISPPDPWR",
[10][1] = "ISPCSTATWR",
[11][1] = "ISPBYRPWR",
[12][1] = "ISPRGBPWR",
[13][1] = "ISPMCFPWR",
[14][1] = "ISPMWR0",
[15][1] = "ISPYUVPWR",
[16][1] = "ISPMCSCWR",
[17][1] = "ISPGDCWR",
[18][1] = "ISPLMEWR",
[20][1] = "ISPMWR2",
[21][1] = "OSSSYS",
[22][1] = "ISPXT1",
[23][1] = "ISPIXT1",
[24][1] = "HDP",
[25][1] = "LSDMA",
[26][1] = "JPEG",
[27][1] = "VPE",
[28][1] = "VSCH",
[29][1] = "VCNU",
[30][1] = "VCN",
};
static const char *mmhub_client_ids_v3_3_1[][2] = {
[0][0] = "VMC",
[4][0] = "DCEDMC",
[6][0] = "MP0",
@ -46,10 +104,29 @@ static const char *mmhub_client_ids_v3_3[][2] = {
[8][0] = "MPM",
[24][0] = "HDP",
[25][0] = "LSDMA",
[26][0] = "JPEG",
[27][0] = "VPE",
[29][0] = "VCNU",
[30][0] = "VCN",
[26][0] = "JPEG0",
[27][0] = "VPE0",
[28][0] = "VSCH",
[29][0] = "VCNU0",
[30][0] = "VCN0",
[32+1][0] = "ISPXT",
[32+2][0] = "ISPIXT",
[32+9][0] = "ISPPDPRD",
[32+10][0] = "ISPCSTATRD",
[32+11][0] = "ISPBYRPRD",
[32+12][0] = "ISPRGBPRD",
[32+13][0] = "ISPMCFPRD",
[32+14][0] = "ISPMCFPRD1",
[32+15][0] = "ISPYUVPRD",
[32+16][0] = "ISPMCSCRD",
[32+17][0] = "ISPGDCRD",
[32+18][0] = "ISPLMERD",
[32+22][0] = "ISPXT1",
[32+23][0] = "ISPIXT1",
[32+26][0] = "JPEG1",
[32+27][0] = "VPE1",
[32+29][0] = "VCNU1",
[32+30][0] = "VCN1",
[3][1] = "DCEDWB",
[4][1] = "DCEDMC",
[6][1] = "MP0",
@ -58,10 +135,32 @@ static const char *mmhub_client_ids_v3_3[][2] = {
[21][1] = "OSSSYS",
[24][1] = "HDP",
[25][1] = "LSDMA",
[26][1] = "JPEG",
[27][1] = "VPE",
[29][1] = "VCNU",
[30][1] = "VCN",
[26][1] = "JPEG0",
[27][1] = "VPE0",
[28][1] = "VSCH",
[29][1] = "VCNU0",
[30][1] = "VCN0",
[32+1][1] = "ISPXT",
[32+2][1] = "ISPIXT",
[32+5][1] = "ISPCSISWR",
[32+9][1] = "ISPPDPWR",
[32+10][1] = "ISPCSTATWR",
[32+11][1] = "ISPBYRPWR",
[32+12][1] = "ISPRGBPWR",
[32+13][1] = "ISPMCFPWR",
[32+14][1] = "ISPMWR0",
[32+15][1] = "ISPYUVPWR",
[32+16][1] = "ISPMCSCWR",
[32+17][1] = "ISPGDCWR",
[32+18][1] = "ISPLMEWR",
[32+19][1] = "ISPMWR1",
[32+20][1] = "ISPMWR2",
[32+22][1] = "ISPXT1",
[32+23][1] = "ISPIXT1",
[32+26][1] = "JPEG1",
[32+27][1] = "VPE1",
[32+29][1] = "VCNU1",
[32+30][1] = "VCN1",
};
static uint32_t mmhub_v3_3_get_invalidate_req(unsigned int vmid,
@ -102,12 +201,16 @@ mmhub_v3_3_print_l2_protection_fault_status(struct amdgpu_device *adev,
switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
case IP_VERSION(3, 3, 0):
case IP_VERSION(3, 3, 1):
case IP_VERSION(3, 3, 2):
mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_v3_3) ?
mmhub_client_ids_v3_3[cid][rw] :
cid == 0x140 ? "UMSCH" : NULL;
break;
case IP_VERSION(3, 3, 1):
mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_v3_3_1) ?
mmhub_client_ids_v3_3_1[cid][rw] :
cid == 0x140 ? "UMSCH" : NULL;
break;
default:
mmhub_cid = NULL;
break;

View File

@ -1353,7 +1353,7 @@ static int sdma_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
case IP_VERSION(7, 0, 0):
case IP_VERSION(7, 0, 1):
if ((adev->sdma.instance[0].fw_version >= 7836028) && !adev->sdma.disable_uq)
if ((adev->sdma.instance[0].fw_version >= 7966358) && !adev->sdma.disable_uq)
adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
break;
default:

View File

@ -1218,6 +1218,8 @@ static int soc15_common_early_init(struct amdgpu_ip_block *ip_block)
AMD_PG_SUPPORT_JPEG;
/*TODO: need a new external_rev_id for GC 9.4.4? */
adev->external_rev_id = adev->rev_id + 0x46;
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
adev->external_rev_id = adev->rev_id + 0x50;
break;
default:
/* FIXME: not supported yet */

View File

@ -2725,7 +2725,7 @@ static void get_queue_checkpoint_info(struct device_queue_manager *dqm,
dqm_lock(dqm);
mqd_mgr = dqm->mqd_mgrs[mqd_type];
*mqd_size = mqd_mgr->mqd_size;
*mqd_size = mqd_mgr->mqd_size * NUM_XCC(mqd_mgr->dev->xcc_mask);
*ctl_stack_size = 0;
if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE && mqd_mgr->get_checkpoint_info)

View File

@ -78,8 +78,8 @@ static int kfd_init(void)
static void kfd_exit(void)
{
kfd_cleanup_processes();
kfd_debugfs_fini();
kfd_process_destroy_wq();
kfd_debugfs_fini();
kfd_procfs_shutdown();
kfd_topology_shutdown();
kfd_chardev_exit();

View File

@ -373,7 +373,7 @@ static void get_checkpoint_info(struct mqd_manager *mm, void *mqd, u32 *ctl_stac
{
struct v9_mqd *m = get_mqd(mqd);
*ctl_stack_size = m->cp_hqd_cntl_stack_size;
*ctl_stack_size = m->cp_hqd_cntl_stack_size * NUM_XCC(mm->dev->xcc_mask);
}
static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, void *ctl_stack_dst)
@ -388,6 +388,24 @@ static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, voi
memcpy(ctl_stack_dst, ctl_stack, m->cp_hqd_cntl_stack_size);
}
static void checkpoint_mqd_v9_4_3(struct mqd_manager *mm,
void *mqd,
void *mqd_dst,
void *ctl_stack_dst)
{
struct v9_mqd *m;
int xcc;
uint64_t size = get_mqd(mqd)->cp_mqd_stride_size;
for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) {
m = get_mqd(mqd + size * xcc);
checkpoint_mqd(mm, m,
(uint8_t *)mqd_dst + sizeof(*m) * xcc,
(uint8_t *)ctl_stack_dst + m->cp_hqd_cntl_stack_size * xcc);
}
}
static void restore_mqd(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *qp,
@ -764,13 +782,35 @@ static void restore_mqd_v9_4_3(struct mqd_manager *mm, void **mqd,
const void *mqd_src,
const void *ctl_stack_src, u32 ctl_stack_size)
{
restore_mqd(mm, mqd, mqd_mem_obj, gart_addr, qp, mqd_src, ctl_stack_src, ctl_stack_size);
if (amdgpu_sriov_multi_vf_mode(mm->dev->adev)) {
struct v9_mqd *m;
struct kfd_mem_obj xcc_mqd_mem_obj;
u32 mqd_ctl_stack_size;
struct v9_mqd *m;
u32 num_xcc;
int xcc;
m = (struct v9_mqd *) mqd_mem_obj->cpu_ptr;
m->cp_hqd_pq_doorbell_control |= 1 <<
CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_MODE__SHIFT;
uint64_t offset = mm->mqd_stride(mm, qp);
mm->dev->dqm->current_logical_xcc_start++;
num_xcc = NUM_XCC(mm->dev->xcc_mask);
mqd_ctl_stack_size = ctl_stack_size / num_xcc;
memset(&xcc_mqd_mem_obj, 0x0, sizeof(struct kfd_mem_obj));
/* Set the MQD pointer and gart address to XCC0 MQD */
*mqd = mqd_mem_obj->cpu_ptr;
if (gart_addr)
*gart_addr = mqd_mem_obj->gpu_addr;
for (xcc = 0; xcc < num_xcc; xcc++) {
get_xcc_mqd(mqd_mem_obj, &xcc_mqd_mem_obj, offset * xcc);
restore_mqd(mm, (void **)&m,
&xcc_mqd_mem_obj,
NULL,
qp,
(uint8_t *)mqd_src + xcc * sizeof(*m),
(uint8_t *)ctl_stack_src + xcc * mqd_ctl_stack_size,
mqd_ctl_stack_size);
}
}
static int destroy_mqd_v9_4_3(struct mqd_manager *mm, void *mqd,
@ -906,7 +946,6 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
mqd->free_mqd = kfd_free_mqd_cp;
mqd->is_occupied = kfd_is_occupied_cp;
mqd->get_checkpoint_info = get_checkpoint_info;
mqd->checkpoint_mqd = checkpoint_mqd;
mqd->mqd_size = sizeof(struct v9_mqd);
mqd->mqd_stride = mqd_stride_v9;
#if defined(CONFIG_DEBUG_FS)
@ -918,16 +957,18 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
mqd->init_mqd = init_mqd_v9_4_3;
mqd->load_mqd = load_mqd_v9_4_3;
mqd->update_mqd = update_mqd_v9_4_3;
mqd->restore_mqd = restore_mqd_v9_4_3;
mqd->destroy_mqd = destroy_mqd_v9_4_3;
mqd->get_wave_state = get_wave_state_v9_4_3;
mqd->checkpoint_mqd = checkpoint_mqd_v9_4_3;
mqd->restore_mqd = restore_mqd_v9_4_3;
} else {
mqd->init_mqd = init_mqd;
mqd->load_mqd = load_mqd;
mqd->update_mqd = update_mqd;
mqd->restore_mqd = restore_mqd;
mqd->destroy_mqd = kfd_destroy_mqd_cp;
mqd->get_wave_state = get_wave_state;
mqd->checkpoint_mqd = checkpoint_mqd;
mqd->restore_mqd = restore_mqd;
}
break;
case KFD_MQD_TYPE_HIQ:

View File

@ -914,7 +914,10 @@ static int criu_checkpoint_queues_device(struct kfd_process_device *pdd,
q_data = (struct kfd_criu_queue_priv_data *)q_private_data;
/* data stored in this order: priv_data, mqd, ctl_stack */
/*
* data stored in this order:
* priv_data, mqd[xcc0], mqd[xcc1],..., ctl_stack[xcc0], ctl_stack[xcc1]...
*/
q_data->mqd_size = mqd_size;
q_data->ctl_stack_size = ctl_stack_size;
@ -963,7 +966,7 @@ int kfd_criu_checkpoint_queues(struct kfd_process *p,
}
static void set_queue_properties_from_criu(struct queue_properties *qp,
struct kfd_criu_queue_priv_data *q_data)
struct kfd_criu_queue_priv_data *q_data, uint32_t num_xcc)
{
qp->is_interop = false;
qp->queue_percent = q_data->q_percent;
@ -976,7 +979,11 @@ static void set_queue_properties_from_criu(struct queue_properties *qp,
qp->eop_ring_buffer_size = q_data->eop_ring_buffer_size;
qp->ctx_save_restore_area_address = q_data->ctx_save_restore_area_address;
qp->ctx_save_restore_area_size = q_data->ctx_save_restore_area_size;
qp->ctl_stack_size = q_data->ctl_stack_size;
if (q_data->type == KFD_QUEUE_TYPE_COMPUTE)
qp->ctl_stack_size = q_data->ctl_stack_size / num_xcc;
else
qp->ctl_stack_size = q_data->ctl_stack_size;
qp->type = q_data->type;
qp->format = q_data->format;
}
@ -1036,12 +1043,15 @@ int kfd_criu_restore_queue(struct kfd_process *p,
goto exit;
}
/* data stored in this order: mqd, ctl_stack */
/*
* data stored in this order:
* mqd[xcc0], mqd[xcc1],..., ctl_stack[xcc0], ctl_stack[xcc1]...
*/
mqd = q_extra_data;
ctl_stack = mqd + q_data->mqd_size;
memset(&qp, 0, sizeof(qp));
set_queue_properties_from_criu(&qp, q_data);
set_queue_properties_from_criu(&qp, q_data, NUM_XCC(pdd->dev->adev->gfx.xcc_mask));
print_queue_properties(&qp);

View File

@ -4756,16 +4756,16 @@ static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
return 1;
}
/* Rescale from [min..max] to [0..MAX_BACKLIGHT_LEVEL] */
/* Rescale from [min..max] to [0..AMDGPU_MAX_BL_LEVEL] */
static inline u32 scale_input_to_fw(int min, int max, u64 input)
{
return DIV_ROUND_CLOSEST_ULL(input * MAX_BACKLIGHT_LEVEL, max - min);
return DIV_ROUND_CLOSEST_ULL(input * AMDGPU_MAX_BL_LEVEL, max - min);
}
/* Rescale from [0..MAX_BACKLIGHT_LEVEL] to [min..max] */
/* Rescale from [0..AMDGPU_MAX_BL_LEVEL] to [min..max] */
static inline u32 scale_fw_to_input(int min, int max, u64 input)
{
return min + DIV_ROUND_CLOSEST_ULL(input * (max - min), MAX_BACKLIGHT_LEVEL);
return min + DIV_ROUND_CLOSEST_ULL(input * (max - min), AMDGPU_MAX_BL_LEVEL);
}
static void convert_custom_brightness(const struct amdgpu_dm_backlight_caps *caps,

View File

@ -661,6 +661,15 @@ static int amdgpu_dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
return -EINVAL;
}
if (!state->legacy_cursor_update && amdgpu_dm_crtc_vrr_active(dm_crtc_state)) {
struct drm_plane_state *primary_state;
/* Pull in primary plane for correct VRR handling */
primary_state = drm_atomic_get_plane_state(state, crtc->primary);
if (IS_ERR(primary_state))
return PTR_ERR(primary_state);
}
/* In some use cases, like reset, no stream is attached */
if (!dm_crtc_state->stream)
return 0;

View File

@ -158,7 +158,6 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
return NULL;
}
dce60_clk_mgr_construct(ctx, clk_mgr);
dce_clk_mgr_construct(ctx, clk_mgr);
return &clk_mgr->base;
}
#endif

View File

@ -245,6 +245,11 @@ int dce_set_clock(
pxl_clk_params.target_pixel_clock_100hz = requested_clk_khz * 10;
pxl_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
/* DCE 6.0, DCE 6.4: engine clock is the same as PLL0 */
if (clk_mgr_base->ctx->dce_version == DCE_VERSION_6_0 ||
clk_mgr_base->ctx->dce_version == DCE_VERSION_6_4)
pxl_clk_params.pll_id = CLOCK_SOURCE_ID_PLL0;
if (clk_mgr_dce->dfs_bypass_active)
pxl_clk_params.flags.SET_DISPCLK_DFS_BYPASS = true;

View File

@ -938,17 +938,18 @@ static void dc_destruct(struct dc *dc)
if (dc->link_srv)
link_destroy_link_service(&dc->link_srv);
if (dc->ctx->gpio_service)
dal_gpio_service_destroy(&dc->ctx->gpio_service);
if (dc->ctx) {
if (dc->ctx->gpio_service)
dal_gpio_service_destroy(&dc->ctx->gpio_service);
if (dc->ctx->created_bios)
dal_bios_parser_destroy(&dc->ctx->dc_bios);
if (dc->ctx->created_bios)
dal_bios_parser_destroy(&dc->ctx->dc_bios);
kfree(dc->ctx->logger);
dc_perf_trace_destroy(&dc->ctx->perf_trace);
kfree(dc->ctx->logger);
dc_perf_trace_destroy(&dc->ctx->perf_trace);
kfree(dc->ctx);
dc->ctx = NULL;
kfree(dc->ctx);
dc->ctx = NULL;
}
kfree(dc->bw_vbios);
dc->bw_vbios = NULL;

View File

@ -373,7 +373,7 @@ static const struct resource_caps res_cap = {
.num_timing_generator = 6,
.num_audio = 6,
.num_stream_encoder = 6,
.num_pll = 2,
.num_pll = 3,
.num_ddc = 6,
};
@ -389,7 +389,7 @@ static const struct resource_caps res_cap_64 = {
.num_timing_generator = 2,
.num_audio = 2,
.num_stream_encoder = 2,
.num_pll = 2,
.num_pll = 3,
.num_ddc = 2,
};
@ -973,21 +973,24 @@ static bool dce60_construct(
if (bp->fw_info_valid && bp->fw_info.external_clock_source_frequency_for_dp != 0) {
pool->base.dp_clock_source =
dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true);
dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true);
/* DCE 6.0 and 6.4: PLL0 can only be used with DP. Don't initialize it here. */
pool->base.clock_sources[0] =
dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], false);
dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
pool->base.clock_sources[1] =
dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false);
pool->base.clk_src_count = 2;
} else {
pool->base.dp_clock_source =
dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], true);
dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], true);
pool->base.clock_sources[0] =
dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
pool->base.clk_src_count = 1;
dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
pool->base.clock_sources[1] =
dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false);
pool->base.clk_src_count = 2;
}
if (pool->base.dp_clock_source == NULL) {
@ -1365,21 +1368,24 @@ static bool dce64_construct(
if (bp->fw_info_valid && bp->fw_info.external_clock_source_frequency_for_dp != 0) {
pool->base.dp_clock_source =
dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true);
dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true);
/* DCE 6.0 and 6.4: PLL0 can only be used with DP. Don't initialize it here. */
pool->base.clock_sources[0] =
dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[0], false);
dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
pool->base.clock_sources[1] =
dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[1], false);
dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false);
pool->base.clk_src_count = 2;
} else {
pool->base.dp_clock_source =
dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[0], true);
dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], true);
pool->base.clock_sources[0] =
dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[1], false);
pool->base.clk_src_count = 1;
dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
pool->base.clock_sources[1] =
dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false);
pool->base.clk_src_count = 2;
}
if (pool->base.dp_clock_source == NULL) {

View File

@ -77,6 +77,9 @@ static void smu_power_profile_mode_get(struct smu_context *smu,
static void smu_power_profile_mode_put(struct smu_context *smu,
enum PP_SMC_POWER_PROFILE profile_mode);
static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type);
static int smu_od_edit_dpm_table(void *handle,
enum PP_OD_DPM_TABLE_COMMAND type,
long *input, uint32_t size);
static int smu_sys_get_pp_feature_mask(void *handle,
char *buf)
@ -2195,6 +2198,7 @@ static int smu_resume(struct amdgpu_ip_block *ip_block)
int ret;
struct amdgpu_device *adev = ip_block->adev;
struct smu_context *smu = adev->powerplay.pp_handle;
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
if (amdgpu_sriov_multi_vf_mode(adev))
return 0;
@ -2226,6 +2230,18 @@ static int smu_resume(struct amdgpu_ip_block *ip_block)
adev->pm.dpm_enabled = true;
if (smu->current_power_limit) {
ret = smu_set_power_limit(smu, smu->current_power_limit);
if (ret && ret != -EOPNOTSUPP)
return ret;
}
if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
ret = smu_od_edit_dpm_table(smu, PP_OD_COMMIT_DPM_TABLE, NULL, 0);
if (ret)
return ret;
}
dev_info(adev->dev, "SMU is resumed successfully!\n");
return 0;

View File

@ -3239,14 +3239,22 @@ void intel_lnl_mac_transmit_lfps(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
u8 owned_lane_mask = intel_cx0_get_owned_lane_mask(encoder);
bool enable = intel_alpm_is_alpm_aux_less(enc_to_intel_dp(encoder),
crtc_state);
intel_wakeref_t wakeref;
int i;
u8 owned_lane_mask;
if (DISPLAY_VER(display) < 20)
if (DISPLAY_VER(display) < 20 ||
!intel_alpm_is_alpm_aux_less(enc_to_intel_dp(encoder), crtc_state))
return;
owned_lane_mask = intel_cx0_get_owned_lane_mask(encoder);
wakeref = intel_cx0_phy_transaction_begin(encoder);
if (intel_encoder_is_c10phy(encoder))
intel_cx0_rmw(encoder, owned_lane_mask, PHY_C10_VDR_CONTROL(1), 0,
C10_VDR_CTRL_MSGBUS_ACCESS, MB_WRITE_COMMITTED);
for (i = 0; i < 4; i++) {
int tx = i % 2 + 1;
u8 lane_mask = i < 2 ? INTEL_CX0_LANE0 : INTEL_CX0_LANE1;
@ -3256,9 +3264,10 @@ void intel_lnl_mac_transmit_lfps(struct intel_encoder *encoder,
intel_cx0_rmw(encoder, lane_mask, PHY_CMN1_CONTROL(tx, 0),
CONTROL0_MAC_TRANSMIT_LFPS,
enable ? CONTROL0_MAC_TRANSMIT_LFPS : 0,
MB_WRITE_COMMITTED);
CONTROL0_MAC_TRANSMIT_LFPS, MB_WRITE_COMMITTED);
}
intel_cx0_phy_transaction_end(encoder, wakeref);
}
static u8 cx0_power_control_disable_val(struct intel_encoder *encoder)

View File

@ -5,6 +5,7 @@ config DRM_XE
depends on KUNIT || !KUNIT
depends on INTEL_VSEC || !INTEL_VSEC
depends on X86_PLATFORM_DEVICES || !(X86 && ACPI)
depends on PAGE_SIZE_4KB || COMPILE_TEST || BROKEN
select INTERVAL_TREE
# we need shmfs for the swappable backing store, and in particular
# the shmem_readpage() which depends upon tmpfs

View File

@ -802,10 +802,6 @@ int xe_device_probe(struct xe_device *xe)
return err;
}
err = xe_devcoredump_init(xe);
if (err)
return err;
/*
* From here on, if a step fails, make sure a Driver-FLR is triggereed
*/
@ -870,6 +866,10 @@ int xe_device_probe(struct xe_device *xe)
XE_WA(xe->tiles->media_gt, 15015404425_disable))
XE_DEVICE_WA_DISABLE(xe, 15015404425);
err = xe_devcoredump_init(xe);
if (err)
return err;
xe_nvm_init(xe);
err = xe_heci_gsc_init(xe);

View File

@ -16,6 +16,7 @@
#include "xe_gt_sriov_pf_migration.h"
#include "xe_gt_sriov_pf_service.h"
#include "xe_gt_sriov_printk.h"
#include "xe_guc_submit.h"
#include "xe_mmio.h"
#include "xe_pm.h"
@ -47,9 +48,16 @@ static int pf_alloc_metadata(struct xe_gt *gt)
static void pf_init_workers(struct xe_gt *gt)
{
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
INIT_WORK(&gt->sriov.pf.workers.restart, pf_worker_restart_func);
}
static void pf_fini_workers(struct xe_gt *gt)
{
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
disable_work_sync(&gt->sriov.pf.workers.restart);
}
/**
* xe_gt_sriov_pf_init_early - Prepare SR-IOV PF data structures on PF.
* @gt: the &xe_gt to initialize
@ -79,6 +87,21 @@ int xe_gt_sriov_pf_init_early(struct xe_gt *gt)
return 0;
}
static void pf_fini_action(void *arg)
{
struct xe_gt *gt = arg;
pf_fini_workers(gt);
}
static int pf_init_late(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
xe_gt_assert(gt, IS_SRIOV_PF(xe));
return devm_add_action_or_reset(xe->drm.dev, pf_fini_action, gt);
}
/**
* xe_gt_sriov_pf_init - Prepare SR-IOV PF data structures on PF.
* @gt: the &xe_gt to initialize
@ -95,7 +118,15 @@ int xe_gt_sriov_pf_init(struct xe_gt *gt)
if (err)
return err;
return xe_gt_sriov_pf_migration_init(gt);
err = xe_gt_sriov_pf_migration_init(gt);
if (err)
return err;
err = pf_init_late(gt);
if (err)
return err;
return 0;
}
static bool pf_needs_enable_ggtt_guest_update(struct xe_device *xe)
@ -230,3 +261,27 @@ void xe_gt_sriov_pf_restart(struct xe_gt *gt)
{
pf_queue_restart(gt);
}
static void pf_flush_restart(struct xe_gt *gt)
{
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
flush_work(&gt->sriov.pf.workers.restart);
}
/**
* xe_gt_sriov_pf_wait_ready() - Wait until per-GT PF SR-IOV support is ready.
* @gt: the &xe_gt
*
* This function can only be called on PF.
*
* Return: 0 on success or a negative error code on failure.
*/
int xe_gt_sriov_pf_wait_ready(struct xe_gt *gt)
{
/* don't wait if there is another ongoing reset */
if (xe_guc_read_stopped(&gt->uc.guc))
return -EBUSY;
pf_flush_restart(gt);
return 0;
}

View File

@ -11,6 +11,7 @@ struct xe_gt;
#ifdef CONFIG_PCI_IOV
int xe_gt_sriov_pf_init_early(struct xe_gt *gt);
int xe_gt_sriov_pf_init(struct xe_gt *gt);
int xe_gt_sriov_pf_wait_ready(struct xe_gt *gt);
void xe_gt_sriov_pf_init_hw(struct xe_gt *gt);
void xe_gt_sriov_pf_sanitize_hw(struct xe_gt *gt, unsigned int vfid);
void xe_gt_sriov_pf_stop_prepare(struct xe_gt *gt);

View File

@ -22,6 +22,7 @@
#include "xe_gt_sriov_pf_policy.h"
#include "xe_gt_sriov_pf_service.h"
#include "xe_pm.h"
#include "xe_sriov_pf.h"
/*
* /sys/kernel/debug/dri/0/
@ -205,7 +206,8 @@ static int CONFIG##_set(void *data, u64 val) \
return -EOVERFLOW; \
\
xe_pm_runtime_get(xe); \
err = xe_gt_sriov_pf_config_set_##CONFIG(gt, vfid, val); \
err = xe_sriov_pf_wait_ready(xe) ?: \
xe_gt_sriov_pf_config_set_##CONFIG(gt, vfid, val); \
xe_pm_runtime_put(xe); \
\
return err; \

View File

@ -1817,6 +1817,12 @@ void xe_engine_snapshot_print(struct xe_hw_engine_snapshot *snapshot, struct drm
str_yes_no(snapshot->kernel_reserved));
for (type = GUC_STATE_CAPTURE_TYPE_GLOBAL; type < GUC_STATE_CAPTURE_TYPE_MAX; type++) {
/*
* FIXME: During devcoredump print we should avoid accessing the
* driver pointers for gt or engine. Printing should be done only
* using the snapshot captured. Here we are accessing the gt
* pointer. It should be fixed.
*/
list = xe_guc_capture_get_reg_desc_list(gt, GUC_CAPTURE_LIST_INDEX_PF, type,
capture_class, false);
snapshot_print_by_list_order(snapshot, p, type, list);

View File

@ -27,6 +27,8 @@
#define DEFAULT_PROBE_DISPLAY true
#define DEFAULT_VRAM_BAR_SIZE 0
#define DEFAULT_FORCE_PROBE CONFIG_DRM_XE_FORCE_PROBE
#define DEFAULT_MAX_VFS ~0
#define DEFAULT_MAX_VFS_STR "unlimited"
#define DEFAULT_WEDGED_MODE 1
#define DEFAULT_SVM_NOTIFIER_SIZE 512
@ -34,6 +36,9 @@ struct xe_modparam xe_modparam = {
.probe_display = DEFAULT_PROBE_DISPLAY,
.guc_log_level = DEFAULT_GUC_LOG_LEVEL,
.force_probe = DEFAULT_FORCE_PROBE,
#ifdef CONFIG_PCI_IOV
.max_vfs = DEFAULT_MAX_VFS,
#endif
.wedged_mode = DEFAULT_WEDGED_MODE,
.svm_notifier_size = DEFAULT_SVM_NOTIFIER_SIZE,
/* the rest are 0 by default */
@ -79,7 +84,8 @@ MODULE_PARM_DESC(force_probe,
module_param_named(max_vfs, xe_modparam.max_vfs, uint, 0400);
MODULE_PARM_DESC(max_vfs,
"Limit number of Virtual Functions (VFs) that could be managed. "
"(0 = no VFs [default]; N = allow up to N VFs)");
"(0=no VFs; N=allow up to N VFs "
"[default=" DEFAULT_MAX_VFS_STR "])");
#endif
module_param_named_unsafe(wedged_mode, xe_modparam.wedged_mode, int, 0600);

View File

@ -12,6 +12,7 @@
#include "xe_pci_sriov.h"
#include "xe_pm.h"
#include "xe_sriov.h"
#include "xe_sriov_pf.h"
#include "xe_sriov_pf_helpers.h"
#include "xe_sriov_printk.h"
@ -138,6 +139,10 @@ static int pf_enable_vfs(struct xe_device *xe, int num_vfs)
xe_assert(xe, num_vfs <= total_vfs);
xe_sriov_dbg(xe, "enabling %u VF%s\n", num_vfs, str_plural(num_vfs));
err = xe_sriov_pf_wait_ready(xe);
if (err)
goto out;
/*
* We must hold additional reference to the runtime PM to keep PF in D0
* during VFs lifetime, as our VFs do not implement the PM capability.
@ -169,7 +174,7 @@ static int pf_enable_vfs(struct xe_device *xe, int num_vfs)
failed:
pf_unprovision_vfs(xe, num_vfs);
xe_pm_runtime_put(xe);
out:
xe_sriov_notice(xe, "Failed to enable %u VF%s (%pe)\n",
num_vfs, str_plural(num_vfs), ERR_PTR(err));
return err;

View File

@ -9,6 +9,7 @@
#include "xe_assert.h"
#include "xe_device.h"
#include "xe_gt_sriov_pf.h"
#include "xe_module.h"
#include "xe_sriov.h"
#include "xe_sriov_pf.h"
@ -102,6 +103,32 @@ int xe_sriov_pf_init_early(struct xe_device *xe)
return 0;
}
/**
* xe_sriov_pf_wait_ready() - Wait until PF is ready to operate.
* @xe: the &xe_device to test
*
* This function can only be called on PF.
*
* Return: 0 on success or a negative error code on failure.
*/
int xe_sriov_pf_wait_ready(struct xe_device *xe)
{
struct xe_gt *gt;
unsigned int id;
int err;
if (xe_device_wedged(xe))
return -ECANCELED;
for_each_gt(gt, xe, id) {
err = xe_gt_sriov_pf_wait_ready(gt);
if (err)
return err;
}
return 0;
}
/**
* xe_sriov_pf_print_vfs_summary - Print SR-IOV PF information.
* @xe: the &xe_device to print info from

View File

@ -15,6 +15,7 @@ struct xe_device;
#ifdef CONFIG_PCI_IOV
bool xe_sriov_pf_readiness(struct xe_device *xe);
int xe_sriov_pf_init_early(struct xe_device *xe);
int xe_sriov_pf_wait_ready(struct xe_device *xe);
void xe_sriov_pf_debugfs_register(struct xe_device *xe, struct dentry *root);
void xe_sriov_pf_print_vfs_summary(struct xe_device *xe, struct drm_printer *p);
#else