iommu/amd: Introduce struct ivhd_dte_flags to store persistent DTE flags

During early initialization, the driver parses IVRS IVHD block to get list
of downstream devices along with their DTE flags (i.e INITPass, EIntPass,
NMIPass, SysMgt, Lint0Pass, Lint1Pass). This information is currently
store in the device DTE, and needs to be preserved when clearing
and configuring each DTE, which makes it difficult to manage.

Introduce struct ivhd_dte_flags to store IVHD DTE settings for a device or
range of devices, which are stored in the amd_ivhd_dev_flags_list during
initial IVHD parsing.

Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Link: https://lore.kernel.org/r/20241118054937.5203-4-suravee.suthikulpanit@amd.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
Suravee Suthikulpanit 2024-11-18 05:49:31 +00:00 committed by Joerg Roedel
parent 82582f85ed
commit 7bea695ada
2 changed files with 100 additions and 29 deletions

View File

@ -220,6 +220,8 @@
#define DEV_ENTRY_EX 0x67 #define DEV_ENTRY_EX 0x67
#define DEV_ENTRY_SYSMGT1 0x68 #define DEV_ENTRY_SYSMGT1 0x68
#define DEV_ENTRY_SYSMGT2 0x69 #define DEV_ENTRY_SYSMGT2 0x69
#define DTE_DATA1_SYSMGT_MASK GENMASK_ULL(41, 40)
#define DEV_ENTRY_IRQ_TBL_EN 0x80 #define DEV_ENTRY_IRQ_TBL_EN 0x80
#define DEV_ENTRY_INIT_PASS 0xb8 #define DEV_ENTRY_INIT_PASS 0xb8
#define DEV_ENTRY_EINT_PASS 0xb9 #define DEV_ENTRY_EINT_PASS 0xb9
@ -516,6 +518,9 @@ extern struct kmem_cache *amd_iommu_irq_cache;
#define for_each_pdom_dev_data_safe(pdom_dev_data, next, pdom) \ #define for_each_pdom_dev_data_safe(pdom_dev_data, next, pdom) \
list_for_each_entry_safe((pdom_dev_data), (next), &pdom->dev_data_list, list) list_for_each_entry_safe((pdom_dev_data), (next), &pdom->dev_data_list, list)
#define for_each_ivhd_dte_flags(entry) \
list_for_each_entry((entry), &amd_ivhd_dev_flags_list, list)
struct amd_iommu; struct amd_iommu;
struct iommu_domain; struct iommu_domain;
struct irq_domain; struct irq_domain;
@ -884,6 +889,17 @@ struct dev_table_entry {
u64 data[4]; u64 data[4];
}; };
/*
* Structure to sture persistent DTE flags from IVHD
*/
struct ivhd_dte_flags {
struct list_head list;
u16 segid;
u16 devid_first;
u16 devid_last;
struct dev_table_entry dte;
};
/* /*
* One entry for unity mappings parsed out of the ACPI table. * One entry for unity mappings parsed out of the ACPI table.
*/ */

View File

@ -174,8 +174,8 @@ bool amd_iommu_snp_en;
EXPORT_SYMBOL(amd_iommu_snp_en); EXPORT_SYMBOL(amd_iommu_snp_en);
LIST_HEAD(amd_iommu_pci_seg_list); /* list of all PCI segments */ LIST_HEAD(amd_iommu_pci_seg_list); /* list of all PCI segments */
LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the system */
system */ LIST_HEAD(amd_ivhd_dev_flags_list); /* list of all IVHD device entry settings */
/* Number of IOMMUs present in the system */ /* Number of IOMMUs present in the system */
static int amd_iommus_present; static int amd_iommus_present;
@ -984,6 +984,14 @@ static void iommu_enable_gt(struct amd_iommu *iommu)
} }
/* sets a specific bit in the device table entry. */ /* sets a specific bit in the device table entry. */
static void set_dte_bit(struct dev_table_entry *dte, u8 bit)
{
int i = (bit >> 6) & 0x03;
int _bit = bit & 0x3f;
dte->data[i] |= (1UL << _bit);
}
static void __set_dev_entry_bit(struct dev_table_entry *dev_table, static void __set_dev_entry_bit(struct dev_table_entry *dev_table,
u16 devid, u8 bit) u16 devid, u8 bit)
{ {
@ -1136,6 +1144,19 @@ static bool copy_device_table(void)
return true; return true;
} }
static bool search_ivhd_dte_flags(u16 segid, u16 first, u16 last)
{
struct ivhd_dte_flags *e;
for_each_ivhd_dte_flags(e) {
if ((e->segid == segid) &&
(e->devid_first == first) &&
(e->devid_last == last))
return true;
}
return false;
}
void amd_iommu_apply_erratum_63(struct amd_iommu *iommu, u16 devid) void amd_iommu_apply_erratum_63(struct amd_iommu *iommu, u16 devid)
{ {
int sysmgt; int sysmgt;
@ -1151,27 +1172,66 @@ void amd_iommu_apply_erratum_63(struct amd_iommu *iommu, u16 devid)
* This function takes the device specific flags read from the ACPI * This function takes the device specific flags read from the ACPI
* table and sets up the device table entry with that information * table and sets up the device table entry with that information
*/ */
static void __init
set_dev_entry_from_acpi_range(struct amd_iommu *iommu, u16 first, u16 last,
u32 flags, u32 ext_flags)
{
int i;
struct dev_table_entry dte = {};
/* Parse IVHD DTE setting flags and store information */
if (flags) {
struct ivhd_dte_flags *d;
if (search_ivhd_dte_flags(iommu->pci_seg->id, first, last))
return;
d = kzalloc(sizeof(struct ivhd_dte_flags), GFP_KERNEL);
if (!d)
return;
pr_debug("%s: devid range %#x:%#x\n", __func__, first, last);
if (flags & ACPI_DEVFLAG_INITPASS)
set_dte_bit(&dte, DEV_ENTRY_INIT_PASS);
if (flags & ACPI_DEVFLAG_EXTINT)
set_dte_bit(&dte, DEV_ENTRY_EINT_PASS);
if (flags & ACPI_DEVFLAG_NMI)
set_dte_bit(&dte, DEV_ENTRY_NMI_PASS);
if (flags & ACPI_DEVFLAG_SYSMGT1)
set_dte_bit(&dte, DEV_ENTRY_SYSMGT1);
if (flags & ACPI_DEVFLAG_SYSMGT2)
set_dte_bit(&dte, DEV_ENTRY_SYSMGT2);
if (flags & ACPI_DEVFLAG_LINT0)
set_dte_bit(&dte, DEV_ENTRY_LINT0_PASS);
if (flags & ACPI_DEVFLAG_LINT1)
set_dte_bit(&dte, DEV_ENTRY_LINT1_PASS);
/* Apply erratum 63, which needs info in initial_dte */
if (FIELD_GET(DTE_DATA1_SYSMGT_MASK, dte.data[1]) == 0x1)
dte.data[0] |= DTE_FLAG_IW;
memcpy(&d->dte, &dte, sizeof(dte));
d->segid = iommu->pci_seg->id;
d->devid_first = first;
d->devid_last = last;
list_add_tail(&d->list, &amd_ivhd_dev_flags_list);
}
for (i = first; i <= last; i++) {
if (flags) {
struct dev_table_entry *dev_table = get_dev_table(iommu);
memcpy(&dev_table[i], &dte, sizeof(dte));
}
amd_iommu_set_rlookup_table(iommu, i);
}
}
static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu, static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
u16 devid, u32 flags, u32 ext_flags) u16 devid, u32 flags, u32 ext_flags)
{ {
if (flags & ACPI_DEVFLAG_INITPASS) set_dev_entry_from_acpi_range(iommu, devid, devid, flags, ext_flags);
set_dev_entry_bit(iommu, devid, DEV_ENTRY_INIT_PASS);
if (flags & ACPI_DEVFLAG_EXTINT)
set_dev_entry_bit(iommu, devid, DEV_ENTRY_EINT_PASS);
if (flags & ACPI_DEVFLAG_NMI)
set_dev_entry_bit(iommu, devid, DEV_ENTRY_NMI_PASS);
if (flags & ACPI_DEVFLAG_SYSMGT1)
set_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT1);
if (flags & ACPI_DEVFLAG_SYSMGT2)
set_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT2);
if (flags & ACPI_DEVFLAG_LINT0)
set_dev_entry_bit(iommu, devid, DEV_ENTRY_LINT0_PASS);
if (flags & ACPI_DEVFLAG_LINT1)
set_dev_entry_bit(iommu, devid, DEV_ENTRY_LINT1_PASS);
amd_iommu_apply_erratum_63(iommu, devid);
amd_iommu_set_rlookup_table(iommu, devid);
} }
int __init add_special_device(u8 type, u8 id, u32 *devid, bool cmd_line) int __init add_special_device(u8 type, u8 id, u32 *devid, bool cmd_line)
@ -1332,9 +1392,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
case IVHD_DEV_ALL: case IVHD_DEV_ALL:
DUMP_printk(" DEV_ALL\t\t\tsetting: %#02x\n", e->flags); DUMP_printk(" DEV_ALL\t\t\tsetting: %#02x\n", e->flags);
set_dev_entry_from_acpi_range(iommu, 0, pci_seg->last_bdf, e->flags, 0);
for (dev_i = 0; dev_i <= pci_seg->last_bdf; ++dev_i)
set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0);
break; break;
case IVHD_DEV_SELECT: case IVHD_DEV_SELECT:
@ -1428,14 +1486,11 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
devid = e->devid; devid = e->devid;
for (dev_i = devid_start; dev_i <= devid; ++dev_i) { for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
if (alias) { if (alias)
pci_seg->alias_table[dev_i] = devid_to; pci_seg->alias_table[dev_i] = devid_to;
set_dev_entry_from_acpi(iommu,
devid_to, flags, ext_flags);
}
set_dev_entry_from_acpi(iommu, dev_i,
flags, ext_flags);
} }
set_dev_entry_from_acpi_range(iommu, devid_start, devid, flags, ext_flags);
set_dev_entry_from_acpi(iommu, devid_to, flags, ext_flags);
break; break;
case IVHD_DEV_SPECIAL: { case IVHD_DEV_SPECIAL: {
u8 handle, type; u8 handle, type;