mirror of https://github.com/torvalds/linux.git
iommu/amd: Introduce per PCI segment alias_table
This will replace global alias table (amd_iommu_alias_table). Co-developed-by: Vasant Hegde <vasant.hegde@amd.com> Signed-off-by: Vasant Hegde <vasant.hegde@amd.com> Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> Link: https://lore.kernel.org/r/20220706113825.25582-9-vasant.hegde@amd.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
eb21ef0227
commit
99fc4ac3d2
|
|
@ -580,6 +580,13 @@ struct amd_iommu_pci_seg {
|
||||||
* will be copied to. It's only be used in kdump kernel.
|
* will be copied to. It's only be used in kdump kernel.
|
||||||
*/
|
*/
|
||||||
struct dev_table_entry *old_dev_tbl_cpy;
|
struct dev_table_entry *old_dev_tbl_cpy;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The alias table is a driver specific data structure which contains the
|
||||||
|
* mappings of the PCI device ids to the actual requestor ids on the IOMMU.
|
||||||
|
* More than one device can share the same requestor id.
|
||||||
|
*/
|
||||||
|
u16 *alias_table;
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
||||||
|
|
@ -700,6 +700,31 @@ static inline void free_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg)
|
||||||
pci_seg->irq_lookup_table = NULL;
|
pci_seg->irq_lookup_table = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int __init alloc_alias_table(struct amd_iommu_pci_seg *pci_seg)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
pci_seg->alias_table = (void *)__get_free_pages(GFP_KERNEL,
|
||||||
|
get_order(alias_table_size));
|
||||||
|
if (!pci_seg->alias_table)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* let all alias entries point to itself
|
||||||
|
*/
|
||||||
|
for (i = 0; i <= amd_iommu_last_bdf; ++i)
|
||||||
|
pci_seg->alias_table[i] = i;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __init free_alias_table(struct amd_iommu_pci_seg *pci_seg)
|
||||||
|
{
|
||||||
|
free_pages((unsigned long)pci_seg->alias_table,
|
||||||
|
get_order(alias_table_size));
|
||||||
|
pci_seg->alias_table = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Allocates the command buffer. This buffer is per AMD IOMMU. We can
|
* Allocates the command buffer. This buffer is per AMD IOMMU. We can
|
||||||
* write commands to that buffer later and the IOMMU will execute them
|
* write commands to that buffer later and the IOMMU will execute them
|
||||||
|
|
@ -1268,6 +1293,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
|
||||||
u32 dev_i, ext_flags = 0;
|
u32 dev_i, ext_flags = 0;
|
||||||
bool alias = false;
|
bool alias = false;
|
||||||
struct ivhd_entry *e;
|
struct ivhd_entry *e;
|
||||||
|
struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
|
||||||
u32 ivhd_size;
|
u32 ivhd_size;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
|
@ -1349,7 +1375,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
|
||||||
devid_to = e->ext >> 8;
|
devid_to = e->ext >> 8;
|
||||||
set_dev_entry_from_acpi(iommu, devid , e->flags, 0);
|
set_dev_entry_from_acpi(iommu, devid , e->flags, 0);
|
||||||
set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
|
set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
|
||||||
amd_iommu_alias_table[devid] = devid_to;
|
pci_seg->alias_table[devid] = devid_to;
|
||||||
break;
|
break;
|
||||||
case IVHD_DEV_ALIAS_RANGE:
|
case IVHD_DEV_ALIAS_RANGE:
|
||||||
|
|
||||||
|
|
@ -1407,7 +1433,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
|
||||||
devid = e->devid;
|
devid = e->devid;
|
||||||
for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
|
for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
|
||||||
if (alias) {
|
if (alias) {
|
||||||
amd_iommu_alias_table[dev_i] = devid_to;
|
pci_seg->alias_table[dev_i] = devid_to;
|
||||||
set_dev_entry_from_acpi(iommu,
|
set_dev_entry_from_acpi(iommu,
|
||||||
devid_to, flags, ext_flags);
|
devid_to, flags, ext_flags);
|
||||||
}
|
}
|
||||||
|
|
@ -1542,6 +1568,8 @@ static struct amd_iommu_pci_seg *__init alloc_pci_segment(u16 id)
|
||||||
|
|
||||||
if (alloc_dev_table(pci_seg))
|
if (alloc_dev_table(pci_seg))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
if (alloc_alias_table(pci_seg))
|
||||||
|
return NULL;
|
||||||
if (alloc_rlookup_table(pci_seg))
|
if (alloc_rlookup_table(pci_seg))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
|
@ -1568,6 +1596,7 @@ static void __init free_pci_segments(void)
|
||||||
list_del(&pci_seg->list);
|
list_del(&pci_seg->list);
|
||||||
free_irq_lookup_table(pci_seg);
|
free_irq_lookup_table(pci_seg);
|
||||||
free_rlookup_table(pci_seg);
|
free_rlookup_table(pci_seg);
|
||||||
|
free_alias_table(pci_seg);
|
||||||
free_dev_table(pci_seg);
|
free_dev_table(pci_seg);
|
||||||
kfree(pci_seg);
|
kfree(pci_seg);
|
||||||
}
|
}
|
||||||
|
|
@ -2842,7 +2871,7 @@ static void __init ivinfo_init(void *ivrs)
|
||||||
static int __init early_amd_iommu_init(void)
|
static int __init early_amd_iommu_init(void)
|
||||||
{
|
{
|
||||||
struct acpi_table_header *ivrs_base;
|
struct acpi_table_header *ivrs_base;
|
||||||
int i, remap_cache_sz, ret;
|
int remap_cache_sz, ret;
|
||||||
acpi_status status;
|
acpi_status status;
|
||||||
|
|
||||||
if (!amd_iommu_detected)
|
if (!amd_iommu_detected)
|
||||||
|
|
@ -2913,12 +2942,6 @@ static int __init early_amd_iommu_init(void)
|
||||||
if (amd_iommu_pd_alloc_bitmap == NULL)
|
if (amd_iommu_pd_alloc_bitmap == NULL)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
/*
|
|
||||||
* let all alias entries point to itself
|
|
||||||
*/
|
|
||||||
for (i = 0; i <= amd_iommu_last_bdf; ++i)
|
|
||||||
amd_iommu_alias_table[i] = i;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* never allocate domain 0 because its used as the non-allocated and
|
* never allocate domain 0 because its used as the non-allocated and
|
||||||
* error value placeholder
|
* error value placeholder
|
||||||
|
|
|
||||||
|
|
@ -243,7 +243,7 @@ static int clone_alias(struct pci_dev *pdev, u16 alias, void *data)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void clone_aliases(struct device *dev)
|
static void clone_aliases(struct amd_iommu *iommu, struct device *dev)
|
||||||
{
|
{
|
||||||
struct pci_dev *pdev;
|
struct pci_dev *pdev;
|
||||||
|
|
||||||
|
|
@ -256,14 +256,15 @@ static void clone_aliases(struct device *dev)
|
||||||
* part of the PCI DMA aliases if it's bus differs
|
* part of the PCI DMA aliases if it's bus differs
|
||||||
* from the original device.
|
* from the original device.
|
||||||
*/
|
*/
|
||||||
clone_alias(pdev, amd_iommu_alias_table[pci_dev_id(pdev)], NULL);
|
clone_alias(pdev, iommu->pci_seg->alias_table[pci_dev_id(pdev)], NULL);
|
||||||
|
|
||||||
pci_for_each_dma_alias(pdev, clone_alias, NULL);
|
pci_for_each_dma_alias(pdev, clone_alias, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void setup_aliases(struct device *dev)
|
static void setup_aliases(struct amd_iommu *iommu, struct device *dev)
|
||||||
{
|
{
|
||||||
struct pci_dev *pdev = to_pci_dev(dev);
|
struct pci_dev *pdev = to_pci_dev(dev);
|
||||||
|
struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
|
||||||
u16 ivrs_alias;
|
u16 ivrs_alias;
|
||||||
|
|
||||||
/* For ACPI HID devices, there are no aliases */
|
/* For ACPI HID devices, there are no aliases */
|
||||||
|
|
@ -274,12 +275,12 @@ static void setup_aliases(struct device *dev)
|
||||||
* Add the IVRS alias to the pci aliases if it is on the same
|
* Add the IVRS alias to the pci aliases if it is on the same
|
||||||
* bus. The IVRS table may know about a quirk that we don't.
|
* bus. The IVRS table may know about a quirk that we don't.
|
||||||
*/
|
*/
|
||||||
ivrs_alias = amd_iommu_alias_table[pci_dev_id(pdev)];
|
ivrs_alias = pci_seg->alias_table[pci_dev_id(pdev)];
|
||||||
if (ivrs_alias != pci_dev_id(pdev) &&
|
if (ivrs_alias != pci_dev_id(pdev) &&
|
||||||
PCI_BUS_NUM(ivrs_alias) == pdev->bus->number)
|
PCI_BUS_NUM(ivrs_alias) == pdev->bus->number)
|
||||||
pci_add_dma_alias(pdev, ivrs_alias & 0xff, 1);
|
pci_add_dma_alias(pdev, ivrs_alias & 0xff, 1);
|
||||||
|
|
||||||
clone_aliases(dev);
|
clone_aliases(iommu, dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct iommu_dev_data *find_dev_data(u16 devid)
|
static struct iommu_dev_data *find_dev_data(u16 devid)
|
||||||
|
|
@ -371,7 +372,7 @@ static bool check_device(struct device *dev)
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int iommu_init_device(struct device *dev)
|
static int iommu_init_device(struct amd_iommu *iommu, struct device *dev)
|
||||||
{
|
{
|
||||||
struct iommu_dev_data *dev_data;
|
struct iommu_dev_data *dev_data;
|
||||||
int devid;
|
int devid;
|
||||||
|
|
@ -388,7 +389,7 @@ static int iommu_init_device(struct device *dev)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
dev_data->dev = dev;
|
dev_data->dev = dev;
|
||||||
setup_aliases(dev);
|
setup_aliases(iommu, dev);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* By default we use passthrough mode for IOMMUv2 capable device.
|
* By default we use passthrough mode for IOMMUv2 capable device.
|
||||||
|
|
@ -409,7 +410,7 @@ static int iommu_init_device(struct device *dev)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void iommu_ignore_device(struct device *dev)
|
static void iommu_ignore_device(struct amd_iommu *iommu, struct device *dev)
|
||||||
{
|
{
|
||||||
int devid;
|
int devid;
|
||||||
|
|
||||||
|
|
@ -420,7 +421,7 @@ static void iommu_ignore_device(struct device *dev)
|
||||||
amd_iommu_rlookup_table[devid] = NULL;
|
amd_iommu_rlookup_table[devid] = NULL;
|
||||||
memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
|
memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
|
||||||
|
|
||||||
setup_aliases(dev);
|
setup_aliases(iommu, dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void amd_iommu_uninit_device(struct device *dev)
|
static void amd_iommu_uninit_device(struct device *dev)
|
||||||
|
|
@ -1290,6 +1291,7 @@ static int device_flush_dte(struct iommu_dev_data *dev_data)
|
||||||
{
|
{
|
||||||
struct amd_iommu *iommu;
|
struct amd_iommu *iommu;
|
||||||
struct pci_dev *pdev = NULL;
|
struct pci_dev *pdev = NULL;
|
||||||
|
struct amd_iommu_pci_seg *pci_seg;
|
||||||
u16 alias;
|
u16 alias;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
|
@ -1306,7 +1308,8 @@ static int device_flush_dte(struct iommu_dev_data *dev_data)
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
alias = amd_iommu_alias_table[dev_data->devid];
|
pci_seg = iommu->pci_seg;
|
||||||
|
alias = pci_seg->alias_table[dev_data->devid];
|
||||||
if (alias != dev_data->devid) {
|
if (alias != dev_data->devid) {
|
||||||
ret = iommu_flush_dte(iommu, alias);
|
ret = iommu_flush_dte(iommu, alias);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
|
@ -1622,7 +1625,7 @@ static void do_attach(struct iommu_dev_data *dev_data,
|
||||||
/* Update device table */
|
/* Update device table */
|
||||||
set_dte_entry(dev_data->devid, domain,
|
set_dte_entry(dev_data->devid, domain,
|
||||||
ats, dev_data->iommu_v2);
|
ats, dev_data->iommu_v2);
|
||||||
clone_aliases(dev_data->dev);
|
clone_aliases(iommu, dev_data->dev);
|
||||||
|
|
||||||
device_flush_dte(dev_data);
|
device_flush_dte(dev_data);
|
||||||
}
|
}
|
||||||
|
|
@ -1638,7 +1641,7 @@ static void do_detach(struct iommu_dev_data *dev_data)
|
||||||
dev_data->domain = NULL;
|
dev_data->domain = NULL;
|
||||||
list_del(&dev_data->list);
|
list_del(&dev_data->list);
|
||||||
clear_dte_entry(dev_data->devid);
|
clear_dte_entry(dev_data->devid);
|
||||||
clone_aliases(dev_data->dev);
|
clone_aliases(iommu, dev_data->dev);
|
||||||
|
|
||||||
/* Flush the DTE entry */
|
/* Flush the DTE entry */
|
||||||
device_flush_dte(dev_data);
|
device_flush_dte(dev_data);
|
||||||
|
|
@ -1821,12 +1824,12 @@ static struct iommu_device *amd_iommu_probe_device(struct device *dev)
|
||||||
if (dev_iommu_priv_get(dev))
|
if (dev_iommu_priv_get(dev))
|
||||||
return &iommu->iommu;
|
return &iommu->iommu;
|
||||||
|
|
||||||
ret = iommu_init_device(dev);
|
ret = iommu_init_device(iommu, dev);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
if (ret != -ENOTSUPP)
|
if (ret != -ENOTSUPP)
|
||||||
dev_err(dev, "Failed to initialize - trying to proceed anyway\n");
|
dev_err(dev, "Failed to initialize - trying to proceed anyway\n");
|
||||||
iommu_dev = ERR_PTR(ret);
|
iommu_dev = ERR_PTR(ret);
|
||||||
iommu_ignore_device(dev);
|
iommu_ignore_device(iommu, dev);
|
||||||
} else {
|
} else {
|
||||||
amd_iommu_set_pci_msi_domain(dev, iommu);
|
amd_iommu_set_pci_msi_domain(dev, iommu);
|
||||||
iommu_dev = &iommu->iommu;
|
iommu_dev = &iommu->iommu;
|
||||||
|
|
@ -1877,9 +1880,13 @@ static void update_device_table(struct protection_domain *domain)
|
||||||
struct iommu_dev_data *dev_data;
|
struct iommu_dev_data *dev_data;
|
||||||
|
|
||||||
list_for_each_entry(dev_data, &domain->dev_list, list) {
|
list_for_each_entry(dev_data, &domain->dev_list, list) {
|
||||||
|
struct amd_iommu *iommu = rlookup_amd_iommu(dev_data->dev);
|
||||||
|
|
||||||
|
if (!iommu)
|
||||||
|
continue;
|
||||||
set_dte_entry(dev_data->devid, domain,
|
set_dte_entry(dev_data->devid, domain,
|
||||||
dev_data->ats.enabled, dev_data->iommu_v2);
|
dev_data->ats.enabled, dev_data->iommu_v2);
|
||||||
clone_aliases(dev_data->dev);
|
clone_aliases(iommu, dev_data->dev);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -2783,6 +2790,7 @@ static struct irq_remap_table *alloc_irq_table(u16 devid, struct pci_dev *pdev)
|
||||||
{
|
{
|
||||||
struct irq_remap_table *table = NULL;
|
struct irq_remap_table *table = NULL;
|
||||||
struct irq_remap_table *new_table = NULL;
|
struct irq_remap_table *new_table = NULL;
|
||||||
|
struct amd_iommu_pci_seg *pci_seg;
|
||||||
struct amd_iommu *iommu;
|
struct amd_iommu *iommu;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
u16 alias;
|
u16 alias;
|
||||||
|
|
@ -2793,11 +2801,12 @@ static struct irq_remap_table *alloc_irq_table(u16 devid, struct pci_dev *pdev)
|
||||||
if (!iommu)
|
if (!iommu)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
||||||
|
pci_seg = iommu->pci_seg;
|
||||||
table = irq_lookup_table[devid];
|
table = irq_lookup_table[devid];
|
||||||
if (table)
|
if (table)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
||||||
alias = amd_iommu_alias_table[devid];
|
alias = pci_seg->alias_table[devid];
|
||||||
table = irq_lookup_table[alias];
|
table = irq_lookup_table[alias];
|
||||||
if (table) {
|
if (table) {
|
||||||
set_remap_table_entry(iommu, devid, table);
|
set_remap_table_entry(iommu, devid, table);
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue