mirror of https://github.com/torvalds/linux.git
IOMMU Updates for Linux v6.18:
Including:
- Inte VT-d:
- IOMMU driver updated to the latest VT-d specification.
- Don't enable PRS if PDS isn't supported.
- Replace snprintf with scnprintf.
- Fix legacy mode page table dump through debugfs.
- Miscellaneous cleanups.
- AMD-Vi:
- Support kdump boot when SNP is enabled.
- Apple-DART:
- 4-level page-table support.
- RISC-V IOMMU:
- ACPI support.
- Small number of miscellaneous cleanups and fixes.
-----BEGIN PGP SIGNATURE-----
iQIzBAABCgAdFiEEr9jSbILcajRFYWYyK/BELZcBGuMFAmjeRh4ACgkQK/BELZcB
GuMiWw/+PSR0m9bD3C0b3AGfYWcVM4qTiGKmE4DAPGUw8H0YiN21XMXS5rYQdXQJ
3lRXh26wHJfjvIMPDRHofsvl5dI5gAITO1TUrvv5fcCBKxXA706MS6A30qCopLBs
OJFzqG8ZLi3X+ROLhtf+m4B/J1JjORuGUlYGlmY4S+Ye3xDeDkhwF6ju5GlmKAwj
+GmkL4tt1MxFsk1FzX0H3XwlrYMkeS9tkdRrrVfEOoWsEIiETbt9lT19gOQ7JJtX
y0mqOoPc1Jgi8lO58VU7jsXUY7uaLHDqcm62oA1H7Gvag7heDyPWKI7QUrC1vTdY
QFzLqLIGFDDD/yNXvgc/0ETAFurJw6ePQE4NmVIXi6F4XRqU3E5IVEPm3/RHwvt9
S8pXCUKfo6bVSQxnszz4BPtCk/bo6w5N/fDuKmgxYHQDKJ1NlC6Q96FcpSeXzGLQ
D76VRH1GRZCZG/nWCiG38UlWQr2RvACaqKsFomqc0S2zp+Bo3aob3owyGtQ+QDIy
B/5NcS6lbiR7CptQAOzpymn+uxBYrqWcCd3TPuVqBlxHT560yaAWOwqyWG5gtNWM
2Vtrg1/M6wFGjwEgUz1FMKgtUoaQzbBtSFkeayd0r1UzKewVdXz33Cw6TnH2N1Vj
hcKvK+ouMjauG7JGGV1saWpz4EeCbI91NC+aKEv3/c7if4ZcDb8=
=S/Vk
-----END PGP SIGNATURE-----
Merge tag 'iommu-updates-v6.18' of git://git.kernel.org/pub/scm/linux/kernel/git/iommu/linux
Pull iommu updates from Joerg Roedel:
- Inte VT-d:
- IOMMU driver updated to the latest VT-d specification
- Don't enable PRS if PDS isn't supported
- Replace snprintf with scnprintf
- Fix legacy mode page table dump through debugfs
- Miscellaneous cleanups
- AMD-Vi:
- Support kdump boot when SNP is enabled
- Apple-DART:
- 4-level page-table support
- RISC-V IOMMU:
- ACPI support
- Small number of miscellaneous cleanups and fixes
* tag 'iommu-updates-v6.18' of git://git.kernel.org/pub/scm/linux/kernel/git/iommu/linux: (22 commits)
iommu/vt-d: Disallow dirty tracking if incoherent page walk
iommu/vt-d: debugfs: Avoid dumping context command register
iommu/vt-d: Removal of Advanced Fault Logging
iommu/vt-d: PRS isn't usable if PDS isn't supported
iommu/vt-d: Remove LPIG from page group response descriptor
iommu/vt-d: Drop unused cap_super_offset()
iommu/vt-d: debugfs: Fix legacy mode page table dump logic
iommu/vt-d: Replace snprintf with scnprintf in dmar_latency_snapshot()
iommu/io-pgtable-dart: Fix off by one error in table index check
iommu/riscv: Add ACPI support
ACPI: scan: Add support for RISC-V in acpi_iommu_configure_id()
ACPI: RISC-V: Add support for RIMT
iommu/omap: Use int type to store negative error codes
iommu/apple-dart: Clear stream error indicator bits for T8110 DARTs
iommu/amd: Skip enabling command/event buffers for kdump
crypto: ccp: Skip SEV and SNP INIT for kdump boot
iommu/amd: Reuse device table for kdump
iommu/amd: Add support to remap/unmap IOMMU buffers for kdump
iommu/apple-dart: Add 4-level page table support
iommu/io-pgtable-dart: Add 4-level page table support
...
This commit is contained in:
commit
bed0653fe2
|
|
@ -347,6 +347,7 @@ L: linux-acpi@vger.kernel.org
|
|||
L: linux-riscv@lists.infradead.org
|
||||
S: Maintained
|
||||
F: drivers/acpi/riscv/
|
||||
F: include/linux/acpi_rimt.h
|
||||
|
||||
ACPI PCC(Platform Communication Channel) MAILBOX DRIVER
|
||||
M: Sudeep Holla <sudeep.holla@arm.com>
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@ config RISCV
|
|||
select ACPI_MCFG if (ACPI && PCI)
|
||||
select ACPI_PPTT if ACPI
|
||||
select ACPI_REDUCED_HARDWARE_ONLY if ACPI
|
||||
select ACPI_RIMT if ACPI
|
||||
select ACPI_SPCR_TABLE if ACPI
|
||||
select ARCH_DMA_DEFAULT_COHERENT
|
||||
select ARCH_ENABLE_HUGEPAGE_MIGRATION if HUGETLB_PAGE && MIGRATION
|
||||
|
|
|
|||
|
|
@ -547,6 +547,10 @@ if ARM64
|
|||
source "drivers/acpi/arm64/Kconfig"
|
||||
endif
|
||||
|
||||
if RISCV
|
||||
source "drivers/acpi/riscv/Kconfig"
|
||||
endif
|
||||
|
||||
config ACPI_PPTT
|
||||
bool
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,7 @@
|
|||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
# ACPI Configuration for RISC-V
|
||||
#
|
||||
|
||||
config ACPI_RIMT
|
||||
bool
|
||||
|
|
@ -2,3 +2,4 @@
|
|||
obj-y += rhct.o init.o irq.o
|
||||
obj-$(CONFIG_ACPI_PROCESSOR_IDLE) += cpuidle.o
|
||||
obj-$(CONFIG_ACPI_CPPC_LIB) += cppc.o
|
||||
obj-$(CONFIG_ACPI_RIMT) += rimt.o
|
||||
|
|
|
|||
|
|
@ -10,4 +10,6 @@
|
|||
void __init acpi_arch_init(void)
|
||||
{
|
||||
riscv_acpi_init_gsi_mapping();
|
||||
if (IS_ENABLED(CONFIG_ACPI_RIMT))
|
||||
riscv_acpi_rimt_init();
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,3 +2,4 @@
|
|||
#include <linux/init.h>
|
||||
|
||||
void __init riscv_acpi_init_gsi_mapping(void);
|
||||
void __init riscv_acpi_rimt_init(void);
|
||||
|
|
|
|||
|
|
@ -0,0 +1,520 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (C) 2024-2025, Ventana Micro Systems Inc
|
||||
* Author: Sunil V L <sunilvl@ventanamicro.com>
|
||||
*
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "ACPI: RIMT: " fmt
|
||||
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/acpi_rimt.h>
|
||||
#include <linux/iommu.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include "init.h"
|
||||
|
||||
struct rimt_fwnode {
|
||||
struct list_head list;
|
||||
struct acpi_rimt_node *rimt_node;
|
||||
struct fwnode_handle *fwnode;
|
||||
};
|
||||
|
||||
static LIST_HEAD(rimt_fwnode_list);
|
||||
static DEFINE_SPINLOCK(rimt_fwnode_lock);
|
||||
|
||||
#define RIMT_TYPE_MASK(type) (1 << (type))
|
||||
#define RIMT_IOMMU_TYPE BIT(0)
|
||||
|
||||
/* Root pointer to the mapped RIMT table */
|
||||
static struct acpi_table_header *rimt_table;
|
||||
|
||||
/**
|
||||
* rimt_set_fwnode() - Create rimt_fwnode and use it to register
|
||||
* iommu data in the rimt_fwnode_list
|
||||
*
|
||||
* @rimt_node: RIMT table node associated with the IOMMU
|
||||
* @fwnode: fwnode associated with the RIMT node
|
||||
*
|
||||
* Returns: 0 on success
|
||||
* <0 on failure
|
||||
*/
|
||||
static int rimt_set_fwnode(struct acpi_rimt_node *rimt_node,
|
||||
struct fwnode_handle *fwnode)
|
||||
{
|
||||
struct rimt_fwnode *np;
|
||||
|
||||
np = kzalloc(sizeof(*np), GFP_ATOMIC);
|
||||
|
||||
if (WARN_ON(!np))
|
||||
return -ENOMEM;
|
||||
|
||||
INIT_LIST_HEAD(&np->list);
|
||||
np->rimt_node = rimt_node;
|
||||
np->fwnode = fwnode;
|
||||
|
||||
spin_lock(&rimt_fwnode_lock);
|
||||
list_add_tail(&np->list, &rimt_fwnode_list);
|
||||
spin_unlock(&rimt_fwnode_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* rimt_get_fwnode() - Retrieve fwnode associated with an RIMT node
|
||||
*
|
||||
* @node: RIMT table node to be looked-up
|
||||
*
|
||||
* Returns: fwnode_handle pointer on success, NULL on failure
|
||||
*/
|
||||
static struct fwnode_handle *rimt_get_fwnode(struct acpi_rimt_node *node)
|
||||
{
|
||||
struct fwnode_handle *fwnode = NULL;
|
||||
struct rimt_fwnode *curr;
|
||||
|
||||
spin_lock(&rimt_fwnode_lock);
|
||||
list_for_each_entry(curr, &rimt_fwnode_list, list) {
|
||||
if (curr->rimt_node == node) {
|
||||
fwnode = curr->fwnode;
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock(&rimt_fwnode_lock);
|
||||
|
||||
return fwnode;
|
||||
}
|
||||
|
||||
static acpi_status rimt_match_node_callback(struct acpi_rimt_node *node,
|
||||
void *context)
|
||||
{
|
||||
acpi_status status = AE_NOT_FOUND;
|
||||
struct device *dev = context;
|
||||
|
||||
if (node->type == ACPI_RIMT_NODE_TYPE_IOMMU) {
|
||||
struct acpi_rimt_iommu *iommu_node = (struct acpi_rimt_iommu *)&node->node_data;
|
||||
|
||||
if (dev_is_pci(dev)) {
|
||||
struct pci_dev *pdev;
|
||||
u16 bdf;
|
||||
|
||||
pdev = to_pci_dev(dev);
|
||||
bdf = PCI_DEVID(pdev->bus->number, pdev->devfn);
|
||||
if ((pci_domain_nr(pdev->bus) == iommu_node->pcie_segment_number) &&
|
||||
bdf == iommu_node->pcie_bdf) {
|
||||
status = AE_OK;
|
||||
} else {
|
||||
status = AE_NOT_FOUND;
|
||||
}
|
||||
} else {
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
struct resource *res;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
if (res && res->start == iommu_node->base_address)
|
||||
status = AE_OK;
|
||||
else
|
||||
status = AE_NOT_FOUND;
|
||||
}
|
||||
} else if (node->type == ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX) {
|
||||
struct acpi_rimt_pcie_rc *pci_rc;
|
||||
struct pci_bus *bus;
|
||||
|
||||
bus = to_pci_bus(dev);
|
||||
pci_rc = (struct acpi_rimt_pcie_rc *)node->node_data;
|
||||
|
||||
/*
|
||||
* It is assumed that PCI segment numbers maps one-to-one
|
||||
* with root complexes. Each segment number can represent only
|
||||
* one root complex.
|
||||
*/
|
||||
status = pci_rc->pcie_segment_number == pci_domain_nr(bus) ?
|
||||
AE_OK : AE_NOT_FOUND;
|
||||
} else if (node->type == ACPI_RIMT_NODE_TYPE_PLAT_DEVICE) {
|
||||
struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
|
||||
struct acpi_rimt_platform_device *ncomp;
|
||||
struct device *plat_dev = dev;
|
||||
struct acpi_device *adev;
|
||||
|
||||
/*
|
||||
* Walk the device tree to find a device with an
|
||||
* ACPI companion; there is no point in scanning
|
||||
* RIMT for a device matching a platform device if
|
||||
* the device does not have an ACPI companion to
|
||||
* start with.
|
||||
*/
|
||||
do {
|
||||
adev = ACPI_COMPANION(plat_dev);
|
||||
if (adev)
|
||||
break;
|
||||
|
||||
plat_dev = plat_dev->parent;
|
||||
} while (plat_dev);
|
||||
|
||||
if (!adev)
|
||||
return status;
|
||||
|
||||
status = acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &buf);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
dev_warn(plat_dev, "Can't get device full path name\n");
|
||||
return status;
|
||||
}
|
||||
|
||||
ncomp = (struct acpi_rimt_platform_device *)node->node_data;
|
||||
status = !strcmp(ncomp->device_name, buf.pointer) ?
|
||||
AE_OK : AE_NOT_FOUND;
|
||||
acpi_os_free(buf.pointer);
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static struct acpi_rimt_node *rimt_scan_node(enum acpi_rimt_node_type type,
|
||||
void *context)
|
||||
{
|
||||
struct acpi_rimt_node *rimt_node, *rimt_end;
|
||||
struct acpi_table_rimt *rimt;
|
||||
int i;
|
||||
|
||||
if (!rimt_table)
|
||||
return NULL;
|
||||
|
||||
/* Get the first RIMT node */
|
||||
rimt = (struct acpi_table_rimt *)rimt_table;
|
||||
rimt_node = ACPI_ADD_PTR(struct acpi_rimt_node, rimt,
|
||||
rimt->node_offset);
|
||||
rimt_end = ACPI_ADD_PTR(struct acpi_rimt_node, rimt_table,
|
||||
rimt_table->length);
|
||||
|
||||
for (i = 0; i < rimt->num_nodes; i++) {
|
||||
if (WARN_TAINT(rimt_node >= rimt_end, TAINT_FIRMWARE_WORKAROUND,
|
||||
"RIMT node pointer overflows, bad table!\n"))
|
||||
return NULL;
|
||||
|
||||
if (rimt_node->type == type &&
|
||||
ACPI_SUCCESS(rimt_match_node_callback(rimt_node, context)))
|
||||
return rimt_node;
|
||||
|
||||
rimt_node = ACPI_ADD_PTR(struct acpi_rimt_node, rimt_node,
|
||||
rimt_node->length);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static bool rimt_pcie_rc_supports_ats(struct acpi_rimt_node *node)
|
||||
{
|
||||
struct acpi_rimt_pcie_rc *pci_rc;
|
||||
|
||||
pci_rc = (struct acpi_rimt_pcie_rc *)node->node_data;
|
||||
return pci_rc->flags & ACPI_RIMT_PCIE_ATS_SUPPORTED;
|
||||
}
|
||||
|
||||
static int rimt_iommu_xlate(struct device *dev, struct acpi_rimt_node *node, u32 deviceid)
|
||||
{
|
||||
struct fwnode_handle *rimt_fwnode;
|
||||
|
||||
if (!node)
|
||||
return -ENODEV;
|
||||
|
||||
rimt_fwnode = rimt_get_fwnode(node);
|
||||
|
||||
/*
|
||||
* The IOMMU drivers may not be probed yet.
|
||||
* Defer the IOMMU configuration
|
||||
*/
|
||||
if (!rimt_fwnode)
|
||||
return -EPROBE_DEFER;
|
||||
|
||||
return acpi_iommu_fwspec_init(dev, deviceid, rimt_fwnode);
|
||||
}
|
||||
|
||||
struct rimt_pci_alias_info {
|
||||
struct device *dev;
|
||||
struct acpi_rimt_node *node;
|
||||
const struct iommu_ops *ops;
|
||||
};
|
||||
|
||||
static int rimt_id_map(struct acpi_rimt_id_mapping *map, u8 type, u32 rid_in, u32 *rid_out)
|
||||
{
|
||||
if (rid_in < map->source_id_base ||
|
||||
(rid_in > map->source_id_base + map->num_ids))
|
||||
return -ENXIO;
|
||||
|
||||
*rid_out = map->dest_id_base + (rid_in - map->source_id_base);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct acpi_rimt_node *rimt_node_get_id(struct acpi_rimt_node *node,
|
||||
u32 *id_out, int index)
|
||||
{
|
||||
struct acpi_rimt_platform_device *plat_node;
|
||||
u32 id_mapping_offset, num_id_mapping;
|
||||
struct acpi_rimt_pcie_rc *pci_node;
|
||||
struct acpi_rimt_id_mapping *map;
|
||||
struct acpi_rimt_node *parent;
|
||||
|
||||
if (node->type == ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX) {
|
||||
pci_node = (struct acpi_rimt_pcie_rc *)&node->node_data;
|
||||
id_mapping_offset = pci_node->id_mapping_offset;
|
||||
num_id_mapping = pci_node->num_id_mappings;
|
||||
} else if (node->type == ACPI_RIMT_NODE_TYPE_PLAT_DEVICE) {
|
||||
plat_node = (struct acpi_rimt_platform_device *)&node->node_data;
|
||||
id_mapping_offset = plat_node->id_mapping_offset;
|
||||
num_id_mapping = plat_node->num_id_mappings;
|
||||
} else {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (!id_mapping_offset || !num_id_mapping || index >= num_id_mapping)
|
||||
return NULL;
|
||||
|
||||
map = ACPI_ADD_PTR(struct acpi_rimt_id_mapping, node,
|
||||
id_mapping_offset + index * sizeof(*map));
|
||||
|
||||
/* Firmware bug! */
|
||||
if (!map->dest_offset) {
|
||||
pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
|
||||
node, node->type);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
parent = ACPI_ADD_PTR(struct acpi_rimt_node, rimt_table, map->dest_offset);
|
||||
|
||||
if (node->type == ACPI_RIMT_NODE_TYPE_PLAT_DEVICE ||
|
||||
node->type == ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX) {
|
||||
*id_out = map->dest_id_base;
|
||||
return parent;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* RISC-V supports IOMMU as a PCI device or a platform device.
|
||||
* When it is a platform device, there should be a namespace device as
|
||||
* well along with RIMT. To create the link between RIMT information and
|
||||
* the platform device, the IOMMU driver should register itself with the
|
||||
* RIMT module. This is true for PCI based IOMMU as well.
|
||||
*/
|
||||
int rimt_iommu_register(struct device *dev)
|
||||
{
|
||||
struct fwnode_handle *rimt_fwnode;
|
||||
struct acpi_rimt_node *node;
|
||||
|
||||
node = rimt_scan_node(ACPI_RIMT_NODE_TYPE_IOMMU, dev);
|
||||
if (!node) {
|
||||
pr_err("Could not find IOMMU node in RIMT\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (dev_is_pci(dev)) {
|
||||
rimt_fwnode = acpi_alloc_fwnode_static();
|
||||
if (!rimt_fwnode)
|
||||
return -ENOMEM;
|
||||
|
||||
rimt_fwnode->dev = dev;
|
||||
if (!dev->fwnode)
|
||||
dev->fwnode = rimt_fwnode;
|
||||
|
||||
rimt_set_fwnode(node, rimt_fwnode);
|
||||
} else {
|
||||
rimt_set_fwnode(node, dev->fwnode);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IOMMU_API
|
||||
|
||||
static struct acpi_rimt_node *rimt_node_map_id(struct acpi_rimt_node *node,
|
||||
u32 id_in, u32 *id_out,
|
||||
u8 type_mask)
|
||||
{
|
||||
struct acpi_rimt_platform_device *plat_node;
|
||||
u32 id_mapping_offset, num_id_mapping;
|
||||
struct acpi_rimt_pcie_rc *pci_node;
|
||||
u32 id = id_in;
|
||||
|
||||
/* Parse the ID mapping tree to find specified node type */
|
||||
while (node) {
|
||||
struct acpi_rimt_id_mapping *map;
|
||||
int i, rc = 0;
|
||||
u32 map_id = id;
|
||||
|
||||
if (RIMT_TYPE_MASK(node->type) & type_mask) {
|
||||
if (id_out)
|
||||
*id_out = id;
|
||||
return node;
|
||||
}
|
||||
|
||||
if (node->type == ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX) {
|
||||
pci_node = (struct acpi_rimt_pcie_rc *)&node->node_data;
|
||||
id_mapping_offset = pci_node->id_mapping_offset;
|
||||
num_id_mapping = pci_node->num_id_mappings;
|
||||
} else if (node->type == ACPI_RIMT_NODE_TYPE_PLAT_DEVICE) {
|
||||
plat_node = (struct acpi_rimt_platform_device *)&node->node_data;
|
||||
id_mapping_offset = plat_node->id_mapping_offset;
|
||||
num_id_mapping = plat_node->num_id_mappings;
|
||||
} else {
|
||||
goto fail_map;
|
||||
}
|
||||
|
||||
if (!id_mapping_offset || !num_id_mapping)
|
||||
goto fail_map;
|
||||
|
||||
map = ACPI_ADD_PTR(struct acpi_rimt_id_mapping, node,
|
||||
id_mapping_offset);
|
||||
|
||||
/* Firmware bug! */
|
||||
if (!map->dest_offset) {
|
||||
pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
|
||||
node, node->type);
|
||||
goto fail_map;
|
||||
}
|
||||
|
||||
/* Do the ID translation */
|
||||
for (i = 0; i < num_id_mapping; i++, map++) {
|
||||
rc = rimt_id_map(map, node->type, map_id, &id);
|
||||
if (!rc)
|
||||
break;
|
||||
}
|
||||
|
||||
if (i == num_id_mapping)
|
||||
goto fail_map;
|
||||
|
||||
node = ACPI_ADD_PTR(struct acpi_rimt_node, rimt_table,
|
||||
rc ? 0 : map->dest_offset);
|
||||
}
|
||||
|
||||
fail_map:
|
||||
/* Map input ID to output ID unchanged on mapping failure */
|
||||
if (id_out)
|
||||
*id_out = id_in;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct acpi_rimt_node *rimt_node_map_platform_id(struct acpi_rimt_node *node, u32 *id_out,
|
||||
u8 type_mask, int index)
|
||||
{
|
||||
struct acpi_rimt_node *parent;
|
||||
u32 id;
|
||||
|
||||
parent = rimt_node_get_id(node, &id, index);
|
||||
if (!parent)
|
||||
return NULL;
|
||||
|
||||
if (!(RIMT_TYPE_MASK(parent->type) & type_mask))
|
||||
parent = rimt_node_map_id(parent, id, id_out, type_mask);
|
||||
else
|
||||
if (id_out)
|
||||
*id_out = id;
|
||||
|
||||
return parent;
|
||||
}
|
||||
|
||||
static int rimt_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data)
|
||||
{
|
||||
struct rimt_pci_alias_info *info = data;
|
||||
struct acpi_rimt_node *parent;
|
||||
u32 deviceid;
|
||||
|
||||
parent = rimt_node_map_id(info->node, alias, &deviceid, RIMT_IOMMU_TYPE);
|
||||
return rimt_iommu_xlate(info->dev, parent, deviceid);
|
||||
}
|
||||
|
||||
static int rimt_plat_iommu_map(struct device *dev, struct acpi_rimt_node *node)
|
||||
{
|
||||
struct acpi_rimt_node *parent;
|
||||
int err = -ENODEV, i = 0;
|
||||
u32 deviceid = 0;
|
||||
|
||||
do {
|
||||
parent = rimt_node_map_platform_id(node, &deviceid,
|
||||
RIMT_IOMMU_TYPE,
|
||||
i++);
|
||||
|
||||
if (parent)
|
||||
err = rimt_iommu_xlate(dev, parent, deviceid);
|
||||
} while (parent && !err);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int rimt_plat_iommu_map_id(struct device *dev,
|
||||
struct acpi_rimt_node *node,
|
||||
const u32 *in_id)
|
||||
{
|
||||
struct acpi_rimt_node *parent;
|
||||
u32 deviceid;
|
||||
|
||||
parent = rimt_node_map_id(node, *in_id, &deviceid, RIMT_IOMMU_TYPE);
|
||||
if (parent)
|
||||
return rimt_iommu_xlate(dev, parent, deviceid);
|
||||
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/**
|
||||
* rimt_iommu_configure_id - Set-up IOMMU configuration for a device.
|
||||
*
|
||||
* @dev: device to configure
|
||||
* @id_in: optional input id const value pointer
|
||||
*
|
||||
* Returns: 0 on success, <0 on failure
|
||||
*/
|
||||
int rimt_iommu_configure_id(struct device *dev, const u32 *id_in)
|
||||
{
|
||||
struct acpi_rimt_node *node;
|
||||
int err = -ENODEV;
|
||||
|
||||
if (dev_is_pci(dev)) {
|
||||
struct iommu_fwspec *fwspec;
|
||||
struct pci_bus *bus = to_pci_dev(dev)->bus;
|
||||
struct rimt_pci_alias_info info = { .dev = dev };
|
||||
|
||||
node = rimt_scan_node(ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX, &bus->dev);
|
||||
if (!node)
|
||||
return -ENODEV;
|
||||
|
||||
info.node = node;
|
||||
err = pci_for_each_dma_alias(to_pci_dev(dev),
|
||||
rimt_pci_iommu_init, &info);
|
||||
|
||||
fwspec = dev_iommu_fwspec_get(dev);
|
||||
if (fwspec && rimt_pcie_rc_supports_ats(node))
|
||||
fwspec->flags |= IOMMU_FWSPEC_PCI_RC_ATS;
|
||||
} else {
|
||||
node = rimt_scan_node(ACPI_RIMT_NODE_TYPE_PLAT_DEVICE, dev);
|
||||
if (!node)
|
||||
return -ENODEV;
|
||||
|
||||
err = id_in ? rimt_plat_iommu_map_id(dev, node, id_in) :
|
||||
rimt_plat_iommu_map(dev, node);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
void __init riscv_acpi_rimt_init(void)
|
||||
{
|
||||
acpi_status status;
|
||||
|
||||
/* rimt_table will be used at runtime after the rimt init,
|
||||
* so we don't need to call acpi_put_table() to release
|
||||
* the RIMT table mapping.
|
||||
*/
|
||||
status = acpi_get_table(ACPI_SIG_RIMT, 0, &rimt_table);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
if (status != AE_NOT_FOUND) {
|
||||
const char *msg = acpi_format_exception(status);
|
||||
|
||||
pr_err("Failed to get table, %s\n", msg);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
|
@ -11,6 +11,7 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/acpi_iort.h>
|
||||
#include <linux/acpi_rimt.h>
|
||||
#include <linux/acpi_viot.h>
|
||||
#include <linux/iommu.h>
|
||||
#include <linux/signal.h>
|
||||
|
|
@ -1630,8 +1631,11 @@ static int acpi_iommu_configure_id(struct device *dev, const u32 *id_in)
|
|||
}
|
||||
|
||||
err = iort_iommu_configure_id(dev, id_in);
|
||||
if (err && err != -EPROBE_DEFER)
|
||||
err = rimt_iommu_configure_id(dev, id_in);
|
||||
if (err && err != -EPROBE_DEFER)
|
||||
err = viot_iommu_configure(dev);
|
||||
|
||||
mutex_unlock(&iommu_probe_device_lock);
|
||||
|
||||
return err;
|
||||
|
|
|
|||
|
|
@ -28,6 +28,7 @@
|
|||
#include <linux/fs_struct.h>
|
||||
#include <linux/psp.h>
|
||||
#include <linux/amd-iommu.h>
|
||||
#include <linux/crash_dump.h>
|
||||
|
||||
#include <asm/smp.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
|
@ -1526,6 +1527,15 @@ static int _sev_platform_init_locked(struct sev_platform_init_args *args)
|
|||
if (!psp_master || !psp_master->sev_data)
|
||||
return -ENODEV;
|
||||
|
||||
/*
|
||||
* Skip SNP/SEV initialization under a kdump kernel as SEV/SNP
|
||||
* may already be initialized in the previous kernel. Since no
|
||||
* SNP/SEV guests are run under a kdump kernel, there is no
|
||||
* need to initialize SNP or SEV during kdump boot.
|
||||
*/
|
||||
if (is_kdump_kernel())
|
||||
return 0;
|
||||
|
||||
sev = psp_master->sev_data;
|
||||
|
||||
if (sev->state == SEV_STATE_INIT)
|
||||
|
|
|
|||
|
|
@ -792,6 +792,11 @@ struct amd_iommu {
|
|||
u32 flags;
|
||||
volatile u64 *cmd_sem;
|
||||
atomic64_t cmd_sem_val;
|
||||
/*
|
||||
* Track physical address to directly use it in build_completion_wait()
|
||||
* and avoid adding any special checks and handling for kdump.
|
||||
*/
|
||||
u64 cmd_sem_paddr;
|
||||
|
||||
#ifdef CONFIG_AMD_IOMMU_DEBUGFS
|
||||
/* DebugFS Info */
|
||||
|
|
|
|||
|
|
@ -406,6 +406,9 @@ static void iommu_set_device_table(struct amd_iommu *iommu)
|
|||
|
||||
BUG_ON(iommu->mmio_base == NULL);
|
||||
|
||||
if (is_kdump_kernel())
|
||||
return;
|
||||
|
||||
entry = iommu_virt_to_phys(dev_table);
|
||||
entry |= (dev_table_size >> 12) - 1;
|
||||
memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
|
||||
|
|
@ -646,7 +649,10 @@ static inline int __init alloc_dev_table(struct amd_iommu_pci_seg *pci_seg)
|
|||
|
||||
static inline void free_dev_table(struct amd_iommu_pci_seg *pci_seg)
|
||||
{
|
||||
iommu_free_pages(pci_seg->dev_table);
|
||||
if (is_kdump_kernel())
|
||||
memunmap((void *)pci_seg->dev_table);
|
||||
else
|
||||
iommu_free_pages(pci_seg->dev_table);
|
||||
pci_seg->dev_table = NULL;
|
||||
}
|
||||
|
||||
|
|
@ -710,6 +716,26 @@ static void __init free_alias_table(struct amd_iommu_pci_seg *pci_seg)
|
|||
pci_seg->alias_table = NULL;
|
||||
}
|
||||
|
||||
static inline void *iommu_memremap(unsigned long paddr, size_t size)
|
||||
{
|
||||
phys_addr_t phys;
|
||||
|
||||
if (!paddr)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* Obtain true physical address in kdump kernel when SME is enabled.
|
||||
* Currently, previous kernel with SME enabled and kdump kernel
|
||||
* with SME support disabled is not supported.
|
||||
*/
|
||||
phys = __sme_clr(paddr);
|
||||
|
||||
if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
|
||||
return (__force void *)ioremap_encrypted(phys, size);
|
||||
else
|
||||
return memremap(phys, size, MEMREMAP_WB);
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocates the command buffer. This buffer is per AMD IOMMU. We can
|
||||
* write commands to that buffer later and the IOMMU will execute them
|
||||
|
|
@ -795,11 +821,16 @@ static void iommu_enable_command_buffer(struct amd_iommu *iommu)
|
|||
|
||||
BUG_ON(iommu->cmd_buf == NULL);
|
||||
|
||||
entry = iommu_virt_to_phys(iommu->cmd_buf);
|
||||
entry |= MMIO_CMD_SIZE_512;
|
||||
|
||||
memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
|
||||
&entry, sizeof(entry));
|
||||
if (!is_kdump_kernel()) {
|
||||
/*
|
||||
* Command buffer is re-used for kdump kernel and setting
|
||||
* of MMIO register is not required.
|
||||
*/
|
||||
entry = iommu_virt_to_phys(iommu->cmd_buf);
|
||||
entry |= MMIO_CMD_SIZE_512;
|
||||
memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
|
||||
&entry, sizeof(entry));
|
||||
}
|
||||
|
||||
amd_iommu_reset_cmd_buffer(iommu);
|
||||
}
|
||||
|
|
@ -850,10 +881,15 @@ static void iommu_enable_event_buffer(struct amd_iommu *iommu)
|
|||
|
||||
BUG_ON(iommu->evt_buf == NULL);
|
||||
|
||||
entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
|
||||
|
||||
memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
|
||||
&entry, sizeof(entry));
|
||||
if (!is_kdump_kernel()) {
|
||||
/*
|
||||
* Event buffer is re-used for kdump kernel and setting
|
||||
* of MMIO register is not required.
|
||||
*/
|
||||
entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
|
||||
memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
|
||||
&entry, sizeof(entry));
|
||||
}
|
||||
|
||||
/* set head and tail to zero manually */
|
||||
writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
|
||||
|
|
@ -942,8 +978,91 @@ static int iommu_init_ga_log(struct amd_iommu *iommu)
|
|||
static int __init alloc_cwwb_sem(struct amd_iommu *iommu)
|
||||
{
|
||||
iommu->cmd_sem = iommu_alloc_4k_pages(iommu, GFP_KERNEL, 1);
|
||||
if (!iommu->cmd_sem)
|
||||
return -ENOMEM;
|
||||
iommu->cmd_sem_paddr = iommu_virt_to_phys((void *)iommu->cmd_sem);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return iommu->cmd_sem ? 0 : -ENOMEM;
|
||||
static int __init remap_event_buffer(struct amd_iommu *iommu)
|
||||
{
|
||||
u64 paddr;
|
||||
|
||||
pr_info_once("Re-using event buffer from the previous kernel\n");
|
||||
paddr = readq(iommu->mmio_base + MMIO_EVT_BUF_OFFSET) & PM_ADDR_MASK;
|
||||
iommu->evt_buf = iommu_memremap(paddr, EVT_BUFFER_SIZE);
|
||||
|
||||
return iommu->evt_buf ? 0 : -ENOMEM;
|
||||
}
|
||||
|
||||
static int __init remap_command_buffer(struct amd_iommu *iommu)
|
||||
{
|
||||
u64 paddr;
|
||||
|
||||
pr_info_once("Re-using command buffer from the previous kernel\n");
|
||||
paddr = readq(iommu->mmio_base + MMIO_CMD_BUF_OFFSET) & PM_ADDR_MASK;
|
||||
iommu->cmd_buf = iommu_memremap(paddr, CMD_BUFFER_SIZE);
|
||||
|
||||
return iommu->cmd_buf ? 0 : -ENOMEM;
|
||||
}
|
||||
|
||||
static int __init remap_or_alloc_cwwb_sem(struct amd_iommu *iommu)
|
||||
{
|
||||
u64 paddr;
|
||||
|
||||
if (check_feature(FEATURE_SNP)) {
|
||||
/*
|
||||
* When SNP is enabled, the exclusion base register is used for the
|
||||
* completion wait buffer (CWB) address. Read and re-use it.
|
||||
*/
|
||||
pr_info_once("Re-using CWB buffers from the previous kernel\n");
|
||||
paddr = readq(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET) & PM_ADDR_MASK;
|
||||
iommu->cmd_sem = iommu_memremap(paddr, PAGE_SIZE);
|
||||
if (!iommu->cmd_sem)
|
||||
return -ENOMEM;
|
||||
iommu->cmd_sem_paddr = paddr;
|
||||
} else {
|
||||
return alloc_cwwb_sem(iommu);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init alloc_iommu_buffers(struct amd_iommu *iommu)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Reuse/Remap the previous kernel's allocated completion wait
|
||||
* command and event buffers for kdump boot.
|
||||
*/
|
||||
if (is_kdump_kernel()) {
|
||||
ret = remap_or_alloc_cwwb_sem(iommu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = remap_command_buffer(iommu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = remap_event_buffer(iommu);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else {
|
||||
ret = alloc_cwwb_sem(iommu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = alloc_command_buffer(iommu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = alloc_event_buffer(iommu);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __init free_cwwb_sem(struct amd_iommu *iommu)
|
||||
|
|
@ -951,6 +1070,38 @@ static void __init free_cwwb_sem(struct amd_iommu *iommu)
|
|||
if (iommu->cmd_sem)
|
||||
iommu_free_pages((void *)iommu->cmd_sem);
|
||||
}
|
||||
static void __init unmap_cwwb_sem(struct amd_iommu *iommu)
|
||||
{
|
||||
if (iommu->cmd_sem) {
|
||||
if (check_feature(FEATURE_SNP))
|
||||
memunmap((void *)iommu->cmd_sem);
|
||||
else
|
||||
iommu_free_pages((void *)iommu->cmd_sem);
|
||||
}
|
||||
}
|
||||
|
||||
static void __init unmap_command_buffer(struct amd_iommu *iommu)
|
||||
{
|
||||
memunmap((void *)iommu->cmd_buf);
|
||||
}
|
||||
|
||||
static void __init unmap_event_buffer(struct amd_iommu *iommu)
|
||||
{
|
||||
memunmap(iommu->evt_buf);
|
||||
}
|
||||
|
||||
static void __init free_iommu_buffers(struct amd_iommu *iommu)
|
||||
{
|
||||
if (is_kdump_kernel()) {
|
||||
unmap_cwwb_sem(iommu);
|
||||
unmap_command_buffer(iommu);
|
||||
unmap_event_buffer(iommu);
|
||||
} else {
|
||||
free_cwwb_sem(iommu);
|
||||
free_command_buffer(iommu);
|
||||
free_event_buffer(iommu);
|
||||
}
|
||||
}
|
||||
|
||||
static void iommu_enable_xt(struct amd_iommu *iommu)
|
||||
{
|
||||
|
|
@ -982,15 +1133,12 @@ static void set_dte_bit(struct dev_table_entry *dte, u8 bit)
|
|||
dte->data[i] |= (1UL << _bit);
|
||||
}
|
||||
|
||||
static bool __copy_device_table(struct amd_iommu *iommu)
|
||||
static bool __reuse_device_table(struct amd_iommu *iommu)
|
||||
{
|
||||
u64 int_ctl, int_tab_len, entry = 0;
|
||||
struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
|
||||
struct dev_table_entry *old_devtb = NULL;
|
||||
u32 lo, hi, devid, old_devtb_size;
|
||||
u32 lo, hi, old_devtb_size;
|
||||
phys_addr_t old_devtb_phys;
|
||||
u16 dom_id, dte_v, irq_v;
|
||||
u64 tmp;
|
||||
u64 entry;
|
||||
|
||||
/* Each IOMMU use separate device table with the same size */
|
||||
lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET);
|
||||
|
|
@ -1015,66 +1163,20 @@ static bool __copy_device_table(struct amd_iommu *iommu)
|
|||
pr_err("The address of old device table is above 4G, not trustworthy!\n");
|
||||
return false;
|
||||
}
|
||||
old_devtb = (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT) && is_kdump_kernel())
|
||||
? (__force void *)ioremap_encrypted(old_devtb_phys,
|
||||
pci_seg->dev_table_size)
|
||||
: memremap(old_devtb_phys, pci_seg->dev_table_size, MEMREMAP_WB);
|
||||
|
||||
if (!old_devtb)
|
||||
return false;
|
||||
|
||||
pci_seg->old_dev_tbl_cpy = iommu_alloc_pages_sz(
|
||||
GFP_KERNEL | GFP_DMA32, pci_seg->dev_table_size);
|
||||
/*
|
||||
* Re-use the previous kernel's device table for kdump.
|
||||
*/
|
||||
pci_seg->old_dev_tbl_cpy = iommu_memremap(old_devtb_phys, pci_seg->dev_table_size);
|
||||
if (pci_seg->old_dev_tbl_cpy == NULL) {
|
||||
pr_err("Failed to allocate memory for copying old device table!\n");
|
||||
memunmap(old_devtb);
|
||||
pr_err("Failed to remap memory for reusing old device table!\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
for (devid = 0; devid <= pci_seg->last_bdf; ++devid) {
|
||||
pci_seg->old_dev_tbl_cpy[devid] = old_devtb[devid];
|
||||
dom_id = old_devtb[devid].data[1] & DEV_DOMID_MASK;
|
||||
dte_v = old_devtb[devid].data[0] & DTE_FLAG_V;
|
||||
|
||||
if (dte_v && dom_id) {
|
||||
pci_seg->old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0];
|
||||
pci_seg->old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1];
|
||||
/* Reserve the Domain IDs used by previous kernel */
|
||||
if (ida_alloc_range(&pdom_ids, dom_id, dom_id, GFP_ATOMIC) != dom_id) {
|
||||
pr_err("Failed to reserve domain ID 0x%x\n", dom_id);
|
||||
memunmap(old_devtb);
|
||||
return false;
|
||||
}
|
||||
/* If gcr3 table existed, mask it out */
|
||||
if (old_devtb[devid].data[0] & DTE_FLAG_GV) {
|
||||
tmp = (DTE_GCR3_30_15 | DTE_GCR3_51_31);
|
||||
pci_seg->old_dev_tbl_cpy[devid].data[1] &= ~tmp;
|
||||
tmp = (DTE_GCR3_14_12 | DTE_FLAG_GV);
|
||||
pci_seg->old_dev_tbl_cpy[devid].data[0] &= ~tmp;
|
||||
}
|
||||
}
|
||||
|
||||
irq_v = old_devtb[devid].data[2] & DTE_IRQ_REMAP_ENABLE;
|
||||
int_ctl = old_devtb[devid].data[2] & DTE_IRQ_REMAP_INTCTL_MASK;
|
||||
int_tab_len = old_devtb[devid].data[2] & DTE_INTTABLEN_MASK;
|
||||
if (irq_v && (int_ctl || int_tab_len)) {
|
||||
if ((int_ctl != DTE_IRQ_REMAP_INTCTL) ||
|
||||
(int_tab_len != DTE_INTTABLEN_512 &&
|
||||
int_tab_len != DTE_INTTABLEN_2K)) {
|
||||
pr_err("Wrong old irq remapping flag: %#x\n", devid);
|
||||
memunmap(old_devtb);
|
||||
return false;
|
||||
}
|
||||
|
||||
pci_seg->old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2];
|
||||
}
|
||||
}
|
||||
memunmap(old_devtb);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool copy_device_table(void)
|
||||
static bool reuse_device_table(void)
|
||||
{
|
||||
struct amd_iommu *iommu;
|
||||
struct amd_iommu_pci_seg *pci_seg;
|
||||
|
|
@ -1082,17 +1184,17 @@ static bool copy_device_table(void)
|
|||
if (!amd_iommu_pre_enabled)
|
||||
return false;
|
||||
|
||||
pr_warn("Translation is already enabled - trying to copy translation structures\n");
|
||||
pr_warn("Translation is already enabled - trying to reuse translation structures\n");
|
||||
|
||||
/*
|
||||
* All IOMMUs within PCI segment shares common device table.
|
||||
* Hence copy device table only once per PCI segment.
|
||||
* Hence reuse device table only once per PCI segment.
|
||||
*/
|
||||
for_each_pci_segment(pci_seg) {
|
||||
for_each_iommu(iommu) {
|
||||
if (pci_seg->id != iommu->pci_seg->id)
|
||||
continue;
|
||||
if (!__copy_device_table(iommu))
|
||||
if (!__reuse_device_table(iommu))
|
||||
return false;
|
||||
break;
|
||||
}
|
||||
|
|
@ -1655,9 +1757,7 @@ static void __init free_sysfs(struct amd_iommu *iommu)
|
|||
static void __init free_iommu_one(struct amd_iommu *iommu)
|
||||
{
|
||||
free_sysfs(iommu);
|
||||
free_cwwb_sem(iommu);
|
||||
free_command_buffer(iommu);
|
||||
free_event_buffer(iommu);
|
||||
free_iommu_buffers(iommu);
|
||||
amd_iommu_free_ppr_log(iommu);
|
||||
free_ga_log(iommu);
|
||||
iommu_unmap_mmio_space(iommu);
|
||||
|
|
@ -1821,14 +1921,9 @@ static int __init init_iommu_one_late(struct amd_iommu *iommu)
|
|||
{
|
||||
int ret;
|
||||
|
||||
if (alloc_cwwb_sem(iommu))
|
||||
return -ENOMEM;
|
||||
|
||||
if (alloc_command_buffer(iommu))
|
||||
return -ENOMEM;
|
||||
|
||||
if (alloc_event_buffer(iommu))
|
||||
return -ENOMEM;
|
||||
ret = alloc_iommu_buffers(iommu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
iommu->int_enabled = false;
|
||||
|
||||
|
|
@ -2778,8 +2873,8 @@ static void early_enable_iommu(struct amd_iommu *iommu)
|
|||
* This function finally enables all IOMMUs found in the system after
|
||||
* they have been initialized.
|
||||
*
|
||||
* Or if in kdump kernel and IOMMUs are all pre-enabled, try to copy
|
||||
* the old content of device table entries. Not this case or copy failed,
|
||||
* Or if in kdump kernel and IOMMUs are all pre-enabled, try to reuse
|
||||
* the old content of device table entries. Not this case or reuse failed,
|
||||
* just continue as normal kernel does.
|
||||
*/
|
||||
static void early_enable_iommus(void)
|
||||
|
|
@ -2787,18 +2882,25 @@ static void early_enable_iommus(void)
|
|||
struct amd_iommu *iommu;
|
||||
struct amd_iommu_pci_seg *pci_seg;
|
||||
|
||||
if (!copy_device_table()) {
|
||||
if (!reuse_device_table()) {
|
||||
/*
|
||||
* If come here because of failure in copying device table from old
|
||||
* If come here because of failure in reusing device table from old
|
||||
* kernel with all IOMMUs enabled, print error message and try to
|
||||
* free allocated old_dev_tbl_cpy.
|
||||
*/
|
||||
if (amd_iommu_pre_enabled)
|
||||
pr_err("Failed to copy DEV table from previous kernel.\n");
|
||||
if (amd_iommu_pre_enabled) {
|
||||
pr_err("Failed to reuse DEV table from previous kernel.\n");
|
||||
/*
|
||||
* Bail out early if unable to remap/reuse DEV table from
|
||||
* previous kernel if SNP enabled as IOMMU commands will
|
||||
* time out without DEV table and cause kdump boot panic.
|
||||
*/
|
||||
BUG_ON(check_feature(FEATURE_SNP));
|
||||
}
|
||||
|
||||
for_each_pci_segment(pci_seg) {
|
||||
if (pci_seg->old_dev_tbl_cpy != NULL) {
|
||||
iommu_free_pages(pci_seg->old_dev_tbl_cpy);
|
||||
memunmap((void *)pci_seg->old_dev_tbl_cpy);
|
||||
pci_seg->old_dev_tbl_cpy = NULL;
|
||||
}
|
||||
}
|
||||
|
|
@ -2808,7 +2910,7 @@ static void early_enable_iommus(void)
|
|||
early_enable_iommu(iommu);
|
||||
}
|
||||
} else {
|
||||
pr_info("Copied DEV table from previous kernel.\n");
|
||||
pr_info("Reused DEV table from previous kernel.\n");
|
||||
|
||||
for_each_pci_segment(pci_seg) {
|
||||
iommu_free_pages(pci_seg->dev_table);
|
||||
|
|
|
|||
|
|
@ -14,6 +14,7 @@
|
|||
#include <linux/pci-ats.h>
|
||||
#include <linux/bitmap.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/string_choices.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
|
|
@ -265,7 +266,7 @@ static inline int get_acpihid_device_id(struct device *dev,
|
|||
return -EINVAL;
|
||||
if (fw_bug)
|
||||
dev_err_once(dev, FW_BUG "No ACPI device matched UID, but %d device%s matched HID.\n",
|
||||
hid_count, hid_count > 1 ? "s" : "");
|
||||
hid_count, str_plural(hid_count));
|
||||
if (hid_count > 1)
|
||||
return -EINVAL;
|
||||
if (entry)
|
||||
|
|
@ -1195,7 +1196,7 @@ static void build_completion_wait(struct iommu_cmd *cmd,
|
|||
struct amd_iommu *iommu,
|
||||
u64 data)
|
||||
{
|
||||
u64 paddr = iommu_virt_to_phys((void *)iommu->cmd_sem);
|
||||
u64 paddr = iommu->cmd_sem_paddr;
|
||||
|
||||
memset(cmd, 0, sizeof(*cmd));
|
||||
cmd->data[0] = lower_32_bits(paddr) | CMD_COMPL_WAIT_STORE_MASK;
|
||||
|
|
|
|||
|
|
@ -122,6 +122,8 @@
|
|||
#define DART_T8110_ERROR_ADDR_LO 0x170
|
||||
#define DART_T8110_ERROR_ADDR_HI 0x174
|
||||
|
||||
#define DART_T8110_ERROR_STREAMS 0x1c0
|
||||
|
||||
#define DART_T8110_PROTECT 0x200
|
||||
#define DART_T8110_UNPROTECT 0x204
|
||||
#define DART_T8110_PROTECT_LOCK 0x208
|
||||
|
|
@ -133,6 +135,7 @@
|
|||
#define DART_T8110_TCR 0x1000
|
||||
#define DART_T8110_TCR_REMAP GENMASK(11, 8)
|
||||
#define DART_T8110_TCR_REMAP_EN BIT(7)
|
||||
#define DART_T8110_TCR_FOUR_LEVEL BIT(3)
|
||||
#define DART_T8110_TCR_BYPASS_DAPF BIT(2)
|
||||
#define DART_T8110_TCR_BYPASS_DART BIT(1)
|
||||
#define DART_T8110_TCR_TRANSLATE_ENABLE BIT(0)
|
||||
|
|
@ -166,22 +169,23 @@ struct apple_dart_hw {
|
|||
|
||||
int max_sid_count;
|
||||
|
||||
u64 lock;
|
||||
u64 lock_bit;
|
||||
u32 lock;
|
||||
u32 lock_bit;
|
||||
|
||||
u64 error;
|
||||
u32 error;
|
||||
|
||||
u64 enable_streams;
|
||||
u32 enable_streams;
|
||||
|
||||
u64 tcr;
|
||||
u64 tcr_enabled;
|
||||
u64 tcr_disabled;
|
||||
u64 tcr_bypass;
|
||||
u32 tcr;
|
||||
u32 tcr_enabled;
|
||||
u32 tcr_disabled;
|
||||
u32 tcr_bypass;
|
||||
u32 tcr_4level;
|
||||
|
||||
u64 ttbr;
|
||||
u64 ttbr_valid;
|
||||
u64 ttbr_addr_field_shift;
|
||||
u64 ttbr_shift;
|
||||
u32 ttbr;
|
||||
u32 ttbr_valid;
|
||||
u32 ttbr_addr_field_shift;
|
||||
u32 ttbr_shift;
|
||||
int ttbr_count;
|
||||
};
|
||||
|
||||
|
|
@ -217,6 +221,7 @@ struct apple_dart {
|
|||
u32 pgsize;
|
||||
u32 num_streams;
|
||||
u32 supports_bypass : 1;
|
||||
u32 four_level : 1;
|
||||
|
||||
struct iommu_group *sid2group[DART_MAX_STREAMS];
|
||||
struct iommu_device iommu;
|
||||
|
|
@ -305,13 +310,19 @@ static struct apple_dart_domain *to_dart_domain(struct iommu_domain *dom)
|
|||
}
|
||||
|
||||
static void
|
||||
apple_dart_hw_enable_translation(struct apple_dart_stream_map *stream_map)
|
||||
apple_dart_hw_enable_translation(struct apple_dart_stream_map *stream_map, int levels)
|
||||
{
|
||||
struct apple_dart *dart = stream_map->dart;
|
||||
u32 tcr = dart->hw->tcr_enabled;
|
||||
int sid;
|
||||
|
||||
if (levels == 4)
|
||||
tcr |= dart->hw->tcr_4level;
|
||||
|
||||
WARN_ON(levels != 3 && levels != 4);
|
||||
WARN_ON(levels == 4 && !dart->four_level);
|
||||
for_each_set_bit(sid, stream_map->sidmap, dart->num_streams)
|
||||
writel(dart->hw->tcr_enabled, dart->regs + DART_TCR(dart, sid));
|
||||
writel(tcr, dart->regs + DART_TCR(dart, sid));
|
||||
}
|
||||
|
||||
static void apple_dart_hw_disable_dma(struct apple_dart_stream_map *stream_map)
|
||||
|
|
@ -569,7 +580,8 @@ apple_dart_setup_translation(struct apple_dart_domain *domain,
|
|||
for (; i < stream_map->dart->hw->ttbr_count; ++i)
|
||||
apple_dart_hw_clear_ttbr(stream_map, i);
|
||||
|
||||
apple_dart_hw_enable_translation(stream_map);
|
||||
apple_dart_hw_enable_translation(stream_map,
|
||||
pgtbl_cfg->apple_dart_cfg.n_levels);
|
||||
stream_map->dart->hw->invalidate_tlb(stream_map);
|
||||
}
|
||||
|
||||
|
|
@ -614,7 +626,7 @@ static int apple_dart_finalize_domain(struct apple_dart_domain *dart_domain,
|
|||
dart_domain->domain.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
|
||||
dart_domain->domain.geometry.aperture_start = 0;
|
||||
dart_domain->domain.geometry.aperture_end =
|
||||
(dma_addr_t)DMA_BIT_MASK(dart->ias);
|
||||
(dma_addr_t)DMA_BIT_MASK(pgtbl_cfg.ias);
|
||||
dart_domain->domain.geometry.force_aperture = true;
|
||||
|
||||
dart_domain->finalized = true;
|
||||
|
|
@ -807,6 +819,8 @@ static int apple_dart_of_xlate(struct device *dev,
|
|||
if (cfg_dart) {
|
||||
if (cfg_dart->pgsize != dart->pgsize)
|
||||
return -EINVAL;
|
||||
if (cfg_dart->ias != dart->ias)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
cfg->supports_bypass &= dart->supports_bypass;
|
||||
|
|
@ -1077,6 +1091,9 @@ static irqreturn_t apple_dart_t8110_irq(int irq, void *dev)
|
|||
error, stream_idx, error_code, fault_name, addr);
|
||||
|
||||
writel(error, dart->regs + DART_T8110_ERROR);
|
||||
for (int i = 0; i < BITS_TO_U32(dart->num_streams); i++)
|
||||
writel(U32_MAX, dart->regs + DART_T8110_ERROR_STREAMS + 4 * i);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
|
|
@ -1137,6 +1154,7 @@ static int apple_dart_probe(struct platform_device *pdev)
|
|||
dart->ias = FIELD_GET(DART_T8110_PARAMS3_VA_WIDTH, dart_params[2]);
|
||||
dart->oas = FIELD_GET(DART_T8110_PARAMS3_PA_WIDTH, dart_params[2]);
|
||||
dart->num_streams = FIELD_GET(DART_T8110_PARAMS4_NUM_SIDS, dart_params[3]);
|
||||
dart->four_level = dart->ias > 36;
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
@ -1169,9 +1187,9 @@ static int apple_dart_probe(struct platform_device *pdev)
|
|||
|
||||
dev_info(
|
||||
&pdev->dev,
|
||||
"DART [pagesize %x, %d streams, bypass support: %d, bypass forced: %d] initialized\n",
|
||||
"DART [pagesize %x, %d streams, bypass support: %d, bypass forced: %d, AS %d -> %d] initialized\n",
|
||||
dart->pgsize, dart->num_streams, dart->supports_bypass,
|
||||
dart->pgsize > PAGE_SIZE);
|
||||
dart->pgsize > PAGE_SIZE, dart->ias, dart->oas);
|
||||
return 0;
|
||||
|
||||
err_sysfs_remove:
|
||||
|
|
@ -1292,6 +1310,7 @@ static const struct apple_dart_hw apple_dart_hw_t8110 = {
|
|||
.tcr_enabled = DART_T8110_TCR_TRANSLATE_ENABLE,
|
||||
.tcr_disabled = 0,
|
||||
.tcr_bypass = DART_T8110_TCR_BYPASS_DAPF | DART_T8110_TCR_BYPASS_DART,
|
||||
.tcr_4level = DART_T8110_TCR_FOUR_LEVEL,
|
||||
|
||||
.ttbr = DART_T8110_TTBR,
|
||||
.ttbr_valid = DART_T8110_TTBR_VALID,
|
||||
|
|
|
|||
|
|
@ -62,8 +62,6 @@ static const struct iommu_regset iommu_regs_64[] = {
|
|||
IOMMU_REGSET_ENTRY(CAP),
|
||||
IOMMU_REGSET_ENTRY(ECAP),
|
||||
IOMMU_REGSET_ENTRY(RTADDR),
|
||||
IOMMU_REGSET_ENTRY(CCMD),
|
||||
IOMMU_REGSET_ENTRY(AFLOG),
|
||||
IOMMU_REGSET_ENTRY(PHMBASE),
|
||||
IOMMU_REGSET_ENTRY(PHMLIMIT),
|
||||
IOMMU_REGSET_ENTRY(IQH),
|
||||
|
|
@ -435,8 +433,21 @@ static int domain_translation_struct_show(struct seq_file *m,
|
|||
}
|
||||
pgd &= VTD_PAGE_MASK;
|
||||
} else { /* legacy mode */
|
||||
pgd = context->lo & VTD_PAGE_MASK;
|
||||
agaw = context->hi & 7;
|
||||
u8 tt = (u8)(context->lo & GENMASK_ULL(3, 2)) >> 2;
|
||||
|
||||
/*
|
||||
* According to Translation Type(TT),
|
||||
* get the page table pointer(SSPTPTR).
|
||||
*/
|
||||
switch (tt) {
|
||||
case CONTEXT_TT_MULTI_LEVEL:
|
||||
case CONTEXT_TT_DEV_IOTLB:
|
||||
pgd = context->lo & VTD_PAGE_MASK;
|
||||
agaw = context->hi & 7;
|
||||
break;
|
||||
default:
|
||||
goto iommu_unlock;
|
||||
}
|
||||
}
|
||||
|
||||
seq_printf(m, "Device %04x:%02x:%02x.%x ",
|
||||
|
|
@ -648,17 +659,11 @@ DEFINE_SHOW_ATTRIBUTE(ir_translation_struct);
|
|||
static void latency_show_one(struct seq_file *m, struct intel_iommu *iommu,
|
||||
struct dmar_drhd_unit *drhd)
|
||||
{
|
||||
int ret;
|
||||
|
||||
seq_printf(m, "IOMMU: %s Register Base Address: %llx\n",
|
||||
iommu->name, drhd->reg_base_addr);
|
||||
|
||||
ret = dmar_latency_snapshot(iommu, debug_buf, DEBUG_BUFFER_SIZE);
|
||||
if (ret < 0)
|
||||
seq_puts(m, "Failed to get latency snapshot");
|
||||
else
|
||||
seq_puts(m, debug_buf);
|
||||
seq_puts(m, "\n");
|
||||
dmar_latency_snapshot(iommu, debug_buf, DEBUG_BUFFER_SIZE);
|
||||
seq_printf(m, "%s\n", debug_buf);
|
||||
}
|
||||
|
||||
static int latency_show(struct seq_file *m, void *v)
|
||||
|
|
|
|||
|
|
@ -3817,7 +3817,7 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
|
|||
}
|
||||
|
||||
if (info->ats_supported && ecap_prs(iommu->ecap) &&
|
||||
pci_pri_supported(pdev))
|
||||
ecap_pds(iommu->ecap) && pci_pri_supported(pdev))
|
||||
info->pri_supported = 1;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -77,7 +77,6 @@
|
|||
#define DMAR_FEDATA_REG 0x3c /* Fault event interrupt data register */
|
||||
#define DMAR_FEADDR_REG 0x40 /* Fault event interrupt addr register */
|
||||
#define DMAR_FEUADDR_REG 0x44 /* Upper address register */
|
||||
#define DMAR_AFLOG_REG 0x58 /* Advanced Fault control */
|
||||
#define DMAR_PMEN_REG 0x64 /* Enable Protected Memory Region */
|
||||
#define DMAR_PLMBASE_REG 0x68 /* PMRR Low addr */
|
||||
#define DMAR_PLMLIMIT_REG 0x6c /* PMRR low limit */
|
||||
|
|
@ -173,8 +172,6 @@
|
|||
#define cap_pgsel_inv(c) (((c) >> 39) & 1)
|
||||
|
||||
#define cap_super_page_val(c) (((c) >> 34) & 0xf)
|
||||
#define cap_super_offset(c) (((find_first_bit(&cap_super_page_val(c), 4)) \
|
||||
* OFFSET_STRIDE) + 21)
|
||||
|
||||
#define cap_fault_reg_offset(c) ((((c) >> 24) & 0x3ff) * 16)
|
||||
#define cap_max_fault_reg_offset(c) \
|
||||
|
|
@ -462,7 +459,6 @@ enum {
|
|||
#define QI_PGRP_PASID(pasid) (((u64)(pasid)) << 32)
|
||||
|
||||
/* Page group response descriptor QW1 */
|
||||
#define QI_PGRP_LPIG(x) (((u64)(x)) << 2)
|
||||
#define QI_PGRP_IDX(idx) (((u64)(idx)) << 3)
|
||||
|
||||
|
||||
|
|
@ -541,7 +537,8 @@ enum {
|
|||
#define pasid_supported(iommu) (sm_supported(iommu) && \
|
||||
ecap_pasid((iommu)->ecap))
|
||||
#define ssads_supported(iommu) (sm_supported(iommu) && \
|
||||
ecap_slads((iommu)->ecap))
|
||||
ecap_slads((iommu)->ecap) && \
|
||||
ecap_smpwc(iommu->ecap))
|
||||
#define nested_supported(iommu) (sm_supported(iommu) && \
|
||||
ecap_nest((iommu)->ecap))
|
||||
|
||||
|
|
|
|||
|
|
@ -113,7 +113,7 @@ static char *latency_type_names[] = {
|
|||
" svm_prq"
|
||||
};
|
||||
|
||||
int dmar_latency_snapshot(struct intel_iommu *iommu, char *str, size_t size)
|
||||
void dmar_latency_snapshot(struct intel_iommu *iommu, char *str, size_t size)
|
||||
{
|
||||
struct latency_statistic *lstat = iommu->perf_statistic;
|
||||
unsigned long flags;
|
||||
|
|
@ -122,7 +122,7 @@ int dmar_latency_snapshot(struct intel_iommu *iommu, char *str, size_t size)
|
|||
memset(str, 0, size);
|
||||
|
||||
for (i = 0; i < COUNTS_NUM; i++)
|
||||
bytes += snprintf(str + bytes, size - bytes,
|
||||
bytes += scnprintf(str + bytes, size - bytes,
|
||||
"%s", latency_counter_names[i]);
|
||||
|
||||
spin_lock_irqsave(&latency_lock, flags);
|
||||
|
|
@ -130,7 +130,7 @@ int dmar_latency_snapshot(struct intel_iommu *iommu, char *str, size_t size)
|
|||
if (!dmar_latency_enabled(iommu, i))
|
||||
continue;
|
||||
|
||||
bytes += snprintf(str + bytes, size - bytes,
|
||||
bytes += scnprintf(str + bytes, size - bytes,
|
||||
"\n%s", latency_type_names[i]);
|
||||
|
||||
for (j = 0; j < COUNTS_NUM; j++) {
|
||||
|
|
@ -156,11 +156,9 @@ int dmar_latency_snapshot(struct intel_iommu *iommu, char *str, size_t size)
|
|||
break;
|
||||
}
|
||||
|
||||
bytes += snprintf(str + bytes, size - bytes,
|
||||
bytes += scnprintf(str + bytes, size - bytes,
|
||||
"%12lld", val);
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&latency_lock, flags);
|
||||
|
||||
return bytes;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -40,7 +40,7 @@ void dmar_latency_disable(struct intel_iommu *iommu, enum latency_type type);
|
|||
bool dmar_latency_enabled(struct intel_iommu *iommu, enum latency_type type);
|
||||
void dmar_latency_update(struct intel_iommu *iommu, enum latency_type type,
|
||||
u64 latency);
|
||||
int dmar_latency_snapshot(struct intel_iommu *iommu, char *str, size_t size);
|
||||
void dmar_latency_snapshot(struct intel_iommu *iommu, char *str, size_t size);
|
||||
#else
|
||||
static inline int
|
||||
dmar_latency_enable(struct intel_iommu *iommu, enum latency_type type)
|
||||
|
|
@ -64,9 +64,8 @@ dmar_latency_update(struct intel_iommu *iommu, enum latency_type type, u64 laten
|
|||
{
|
||||
}
|
||||
|
||||
static inline int
|
||||
static inline void
|
||||
dmar_latency_snapshot(struct intel_iommu *iommu, char *str, size_t size)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_DMAR_PERF */
|
||||
|
|
|
|||
|
|
@ -151,8 +151,7 @@ static void handle_bad_prq_event(struct intel_iommu *iommu,
|
|||
QI_PGRP_PASID_P(req->pasid_present) |
|
||||
QI_PGRP_RESP_CODE(result) |
|
||||
QI_PGRP_RESP_TYPE;
|
||||
desc.qw1 = QI_PGRP_IDX(req->prg_index) |
|
||||
QI_PGRP_LPIG(req->lpig);
|
||||
desc.qw1 = QI_PGRP_IDX(req->prg_index);
|
||||
|
||||
qi_submit_sync(iommu, &desc, 1, 0);
|
||||
}
|
||||
|
|
@ -379,19 +378,17 @@ void intel_iommu_page_response(struct device *dev, struct iopf_fault *evt,
|
|||
struct iommu_fault_page_request *prm;
|
||||
struct qi_desc desc;
|
||||
bool pasid_present;
|
||||
bool last_page;
|
||||
u16 sid;
|
||||
|
||||
prm = &evt->fault.prm;
|
||||
sid = PCI_DEVID(bus, devfn);
|
||||
pasid_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
|
||||
last_page = prm->flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
|
||||
|
||||
desc.qw0 = QI_PGRP_PASID(prm->pasid) | QI_PGRP_DID(sid) |
|
||||
QI_PGRP_PASID_P(pasid_present) |
|
||||
QI_PGRP_RESP_CODE(msg->code) |
|
||||
QI_PGRP_RESP_TYPE;
|
||||
desc.qw1 = QI_PGRP_IDX(prm->grpid) | QI_PGRP_LPIG(last_page);
|
||||
desc.qw1 = QI_PGRP_IDX(prm->grpid);
|
||||
desc.qw2 = 0;
|
||||
desc.qw3 = 0;
|
||||
|
||||
|
|
|
|||
|
|
@ -27,8 +27,9 @@
|
|||
|
||||
#define DART1_MAX_ADDR_BITS 36
|
||||
|
||||
#define DART_MAX_TABLES 4
|
||||
#define DART_LEVELS 2
|
||||
#define DART_MAX_TABLE_BITS 2
|
||||
#define DART_MAX_TABLES BIT(DART_MAX_TABLE_BITS)
|
||||
#define DART_MAX_LEVELS 4 /* Includes TTBR level */
|
||||
|
||||
/* Struct accessors */
|
||||
#define io_pgtable_to_data(x) \
|
||||
|
|
@ -68,6 +69,7 @@
|
|||
struct dart_io_pgtable {
|
||||
struct io_pgtable iop;
|
||||
|
||||
int levels;
|
||||
int tbl_bits;
|
||||
int bits_per_level;
|
||||
|
||||
|
|
@ -156,44 +158,45 @@ static dart_iopte dart_install_table(dart_iopte *table,
|
|||
return old;
|
||||
}
|
||||
|
||||
static int dart_get_table(struct dart_io_pgtable *data, unsigned long iova)
|
||||
static int dart_get_index(struct dart_io_pgtable *data, unsigned long iova, int level)
|
||||
{
|
||||
return (iova >> (3 * data->bits_per_level + ilog2(sizeof(dart_iopte)))) &
|
||||
((1 << data->tbl_bits) - 1);
|
||||
return (iova >> (level * data->bits_per_level + ilog2(sizeof(dart_iopte)))) &
|
||||
((1 << data->bits_per_level) - 1);
|
||||
}
|
||||
|
||||
static int dart_get_l1_index(struct dart_io_pgtable *data, unsigned long iova)
|
||||
{
|
||||
|
||||
return (iova >> (2 * data->bits_per_level + ilog2(sizeof(dart_iopte)))) &
|
||||
((1 << data->bits_per_level) - 1);
|
||||
}
|
||||
|
||||
static int dart_get_l2_index(struct dart_io_pgtable *data, unsigned long iova)
|
||||
static int dart_get_last_index(struct dart_io_pgtable *data, unsigned long iova)
|
||||
{
|
||||
|
||||
return (iova >> (data->bits_per_level + ilog2(sizeof(dart_iopte)))) &
|
||||
((1 << data->bits_per_level) - 1);
|
||||
}
|
||||
|
||||
static dart_iopte *dart_get_l2(struct dart_io_pgtable *data, unsigned long iova)
|
||||
static dart_iopte *dart_get_last(struct dart_io_pgtable *data, unsigned long iova)
|
||||
{
|
||||
dart_iopte pte, *ptep;
|
||||
int tbl = dart_get_table(data, iova);
|
||||
int level = data->levels;
|
||||
int tbl = dart_get_index(data, iova, level);
|
||||
|
||||
if (tbl >= (1 << data->tbl_bits))
|
||||
return NULL;
|
||||
|
||||
ptep = data->pgd[tbl];
|
||||
if (!ptep)
|
||||
return NULL;
|
||||
|
||||
ptep += dart_get_l1_index(data, iova);
|
||||
pte = READ_ONCE(*ptep);
|
||||
while (--level > 1) {
|
||||
ptep += dart_get_index(data, iova, level);
|
||||
pte = READ_ONCE(*ptep);
|
||||
|
||||
/* Valid entry? */
|
||||
if (!pte)
|
||||
return NULL;
|
||||
/* Valid entry? */
|
||||
if (!pte)
|
||||
return NULL;
|
||||
|
||||
/* Deref to get level 2 table */
|
||||
return iopte_deref(pte, data);
|
||||
/* Deref to get next level table */
|
||||
ptep = iopte_deref(pte, data);
|
||||
}
|
||||
|
||||
return ptep;
|
||||
}
|
||||
|
||||
static dart_iopte dart_prot_to_pte(struct dart_io_pgtable *data,
|
||||
|
|
@ -230,6 +233,7 @@ static int dart_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
|
|||
int ret = 0, tbl, num_entries, max_entries, map_idx_start;
|
||||
dart_iopte pte, *cptep, *ptep;
|
||||
dart_iopte prot;
|
||||
int level = data->levels;
|
||||
|
||||
if (WARN_ON(pgsize != cfg->pgsize_bitmap))
|
||||
return -EINVAL;
|
||||
|
|
@ -240,31 +244,36 @@ static int dart_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
|
|||
if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
|
||||
return -EINVAL;
|
||||
|
||||
tbl = dart_get_table(data, iova);
|
||||
tbl = dart_get_index(data, iova, level);
|
||||
|
||||
if (tbl >= (1 << data->tbl_bits))
|
||||
return -ENOMEM;
|
||||
|
||||
ptep = data->pgd[tbl];
|
||||
ptep += dart_get_l1_index(data, iova);
|
||||
pte = READ_ONCE(*ptep);
|
||||
|
||||
/* no L2 table present */
|
||||
if (!pte) {
|
||||
cptep = iommu_alloc_pages_sz(gfp, tblsz);
|
||||
if (!cptep)
|
||||
return -ENOMEM;
|
||||
|
||||
pte = dart_install_table(cptep, ptep, 0, data);
|
||||
if (pte)
|
||||
iommu_free_pages(cptep);
|
||||
|
||||
/* L2 table is present (now) */
|
||||
while (--level > 1) {
|
||||
ptep += dart_get_index(data, iova, level);
|
||||
pte = READ_ONCE(*ptep);
|
||||
}
|
||||
|
||||
ptep = iopte_deref(pte, data);
|
||||
/* no table present */
|
||||
if (!pte) {
|
||||
cptep = iommu_alloc_pages_sz(gfp, tblsz);
|
||||
if (!cptep)
|
||||
return -ENOMEM;
|
||||
|
||||
pte = dart_install_table(cptep, ptep, 0, data);
|
||||
if (pte)
|
||||
iommu_free_pages(cptep);
|
||||
|
||||
/* L2 table is present (now) */
|
||||
pte = READ_ONCE(*ptep);
|
||||
}
|
||||
|
||||
ptep = iopte_deref(pte, data);
|
||||
}
|
||||
|
||||
/* install a leaf entries into L2 table */
|
||||
prot = dart_prot_to_pte(data, iommu_prot);
|
||||
map_idx_start = dart_get_l2_index(data, iova);
|
||||
map_idx_start = dart_get_last_index(data, iova);
|
||||
max_entries = DART_PTES_PER_TABLE(data) - map_idx_start;
|
||||
num_entries = min_t(int, pgcount, max_entries);
|
||||
ptep += map_idx_start;
|
||||
|
|
@ -293,13 +302,13 @@ static size_t dart_unmap_pages(struct io_pgtable_ops *ops, unsigned long iova,
|
|||
if (WARN_ON(pgsize != cfg->pgsize_bitmap || !pgcount))
|
||||
return 0;
|
||||
|
||||
ptep = dart_get_l2(data, iova);
|
||||
ptep = dart_get_last(data, iova);
|
||||
|
||||
/* Valid L2 IOPTE pointer? */
|
||||
if (WARN_ON(!ptep))
|
||||
return 0;
|
||||
|
||||
unmap_idx_start = dart_get_l2_index(data, iova);
|
||||
unmap_idx_start = dart_get_last_index(data, iova);
|
||||
ptep += unmap_idx_start;
|
||||
|
||||
max_entries = DART_PTES_PER_TABLE(data) - unmap_idx_start;
|
||||
|
|
@ -330,13 +339,13 @@ static phys_addr_t dart_iova_to_phys(struct io_pgtable_ops *ops,
|
|||
struct dart_io_pgtable *data = io_pgtable_ops_to_data(ops);
|
||||
dart_iopte pte, *ptep;
|
||||
|
||||
ptep = dart_get_l2(data, iova);
|
||||
ptep = dart_get_last(data, iova);
|
||||
|
||||
/* Valid L2 IOPTE pointer? */
|
||||
if (!ptep)
|
||||
return 0;
|
||||
|
||||
ptep += dart_get_l2_index(data, iova);
|
||||
ptep += dart_get_last_index(data, iova);
|
||||
|
||||
pte = READ_ONCE(*ptep);
|
||||
/* Found translation */
|
||||
|
|
@ -353,21 +362,37 @@ static struct dart_io_pgtable *
|
|||
dart_alloc_pgtable(struct io_pgtable_cfg *cfg)
|
||||
{
|
||||
struct dart_io_pgtable *data;
|
||||
int tbl_bits, bits_per_level, va_bits, pg_shift;
|
||||
int levels, max_tbl_bits, tbl_bits, bits_per_level, va_bits, pg_shift;
|
||||
|
||||
/*
|
||||
* Old 4K page DARTs can use up to 4 top-level tables.
|
||||
* Newer ones only ever use a maximum of 1.
|
||||
*/
|
||||
if (cfg->pgsize_bitmap == SZ_4K)
|
||||
max_tbl_bits = DART_MAX_TABLE_BITS;
|
||||
else
|
||||
max_tbl_bits = 0;
|
||||
|
||||
pg_shift = __ffs(cfg->pgsize_bitmap);
|
||||
bits_per_level = pg_shift - ilog2(sizeof(dart_iopte));
|
||||
|
||||
va_bits = cfg->ias - pg_shift;
|
||||
|
||||
tbl_bits = max_t(int, 0, va_bits - (bits_per_level * DART_LEVELS));
|
||||
if ((1 << tbl_bits) > DART_MAX_TABLES)
|
||||
levels = max_t(int, 2, (va_bits - max_tbl_bits + bits_per_level - 1) / bits_per_level);
|
||||
|
||||
if (levels > (DART_MAX_LEVELS - 1))
|
||||
return NULL;
|
||||
|
||||
tbl_bits = max_t(int, 0, va_bits - (bits_per_level * levels));
|
||||
|
||||
if (tbl_bits > max_tbl_bits)
|
||||
return NULL;
|
||||
|
||||
data = kzalloc(sizeof(*data), GFP_KERNEL);
|
||||
if (!data)
|
||||
return NULL;
|
||||
|
||||
data->levels = levels + 1; /* Table level counts as one level */
|
||||
data->tbl_bits = tbl_bits;
|
||||
data->bits_per_level = bits_per_level;
|
||||
|
||||
|
|
@ -403,6 +428,7 @@ apple_dart_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
|
|||
return NULL;
|
||||
|
||||
cfg->apple_dart_cfg.n_ttbrs = 1 << data->tbl_bits;
|
||||
cfg->apple_dart_cfg.n_levels = data->levels;
|
||||
|
||||
for (i = 0; i < cfg->apple_dart_cfg.n_ttbrs; ++i) {
|
||||
data->pgd[i] =
|
||||
|
|
@ -422,24 +448,31 @@ apple_dart_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static void apple_dart_free_pgtable(struct io_pgtable *iop)
|
||||
static void apple_dart_free_pgtables(struct dart_io_pgtable *data, dart_iopte *ptep, int level)
|
||||
{
|
||||
struct dart_io_pgtable *data = io_pgtable_to_data(iop);
|
||||
dart_iopte *ptep, *end;
|
||||
int i;
|
||||
dart_iopte *end;
|
||||
dart_iopte *start = ptep;
|
||||
|
||||
for (i = 0; i < (1 << data->tbl_bits) && data->pgd[i]; ++i) {
|
||||
ptep = data->pgd[i];
|
||||
if (level > 1) {
|
||||
end = (void *)ptep + DART_GRANULE(data);
|
||||
|
||||
while (ptep != end) {
|
||||
dart_iopte pte = *ptep++;
|
||||
|
||||
if (pte)
|
||||
iommu_free_pages(iopte_deref(pte, data));
|
||||
apple_dart_free_pgtables(data, iopte_deref(pte, data), level - 1);
|
||||
}
|
||||
iommu_free_pages(data->pgd[i]);
|
||||
}
|
||||
iommu_free_pages(start);
|
||||
}
|
||||
|
||||
static void apple_dart_free_pgtable(struct io_pgtable *iop)
|
||||
{
|
||||
struct dart_io_pgtable *data = io_pgtable_to_data(iop);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < (1 << data->tbl_bits) && data->pgd[i]; ++i)
|
||||
apple_dart_free_pgtables(data, data->pgd[i], data->levels - 1);
|
||||
|
||||
kfree(data);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1303,8 +1303,8 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
|
|||
struct omap_iommu_device *iommu;
|
||||
struct omap_iommu *oiommu;
|
||||
struct iotlb_entry e;
|
||||
int ret = -EINVAL;
|
||||
int omap_pgsz;
|
||||
u32 ret = -EINVAL;
|
||||
int i;
|
||||
|
||||
omap_pgsz = bytes_to_iopgsz(bytes);
|
||||
|
|
|
|||
|
|
@ -10,6 +10,8 @@
|
|||
* Tomasz Jeznach <tjeznach@rivosinc.com>
|
||||
*/
|
||||
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/irqchip/riscv-imsic.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/msi.h>
|
||||
#include <linux/of_irq.h>
|
||||
|
|
@ -46,6 +48,7 @@ static int riscv_iommu_platform_probe(struct platform_device *pdev)
|
|||
enum riscv_iommu_igs_settings igs;
|
||||
struct device *dev = &pdev->dev;
|
||||
struct riscv_iommu_device *iommu = NULL;
|
||||
struct irq_domain *msi_domain;
|
||||
struct resource *res = NULL;
|
||||
int vec, ret;
|
||||
|
||||
|
|
@ -76,8 +79,13 @@ static int riscv_iommu_platform_probe(struct platform_device *pdev)
|
|||
switch (igs) {
|
||||
case RISCV_IOMMU_CAPABILITIES_IGS_BOTH:
|
||||
case RISCV_IOMMU_CAPABILITIES_IGS_MSI:
|
||||
if (is_of_node(dev->fwnode))
|
||||
if (is_of_node(dev_fwnode(dev))) {
|
||||
of_msi_configure(dev, to_of_node(dev->fwnode));
|
||||
} else {
|
||||
msi_domain = irq_find_matching_fwnode(imsic_acpi_get_fwnode(dev),
|
||||
DOMAIN_BUS_PLATFORM_MSI);
|
||||
dev_set_msi_domain(dev, msi_domain);
|
||||
}
|
||||
|
||||
if (!dev_get_msi_domain(dev)) {
|
||||
dev_warn(dev, "failed to find an MSI domain\n");
|
||||
|
|
@ -150,6 +158,12 @@ static const struct of_device_id riscv_iommu_of_match[] = {
|
|||
{},
|
||||
};
|
||||
|
||||
static const struct acpi_device_id riscv_iommu_acpi_match[] = {
|
||||
{ "RSCV0004", 0 },
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(acpi, riscv_iommu_acpi_match);
|
||||
|
||||
static struct platform_driver riscv_iommu_platform_driver = {
|
||||
.probe = riscv_iommu_platform_probe,
|
||||
.remove = riscv_iommu_platform_remove,
|
||||
|
|
@ -158,6 +172,7 @@ static struct platform_driver riscv_iommu_platform_driver = {
|
|||
.name = "riscv,iommu",
|
||||
.of_match_table = riscv_iommu_of_match,
|
||||
.suppress_bind_attrs = true,
|
||||
.acpi_match_table = riscv_iommu_acpi_match,
|
||||
},
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -12,6 +12,8 @@
|
|||
|
||||
#define pr_fmt(fmt) "riscv-iommu: " fmt
|
||||
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/acpi_rimt.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/crash_dump.h>
|
||||
#include <linux/init.h>
|
||||
|
|
@ -1650,6 +1652,14 @@ int riscv_iommu_init(struct riscv_iommu_device *iommu)
|
|||
goto err_iodir_off;
|
||||
}
|
||||
|
||||
if (!acpi_disabled) {
|
||||
rc = rimt_iommu_register(iommu->dev);
|
||||
if (rc) {
|
||||
dev_err_probe(iommu->dev, rc, "cannot register iommu with RIMT\n");
|
||||
goto err_remove_sysfs;
|
||||
}
|
||||
}
|
||||
|
||||
rc = iommu_device_register(&iommu->iommu, &riscv_iommu_ops, iommu->dev);
|
||||
if (rc) {
|
||||
dev_err_probe(iommu->dev, rc, "cannot register iommu interface\n");
|
||||
|
|
|
|||
|
|
@ -0,0 +1,28 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (C) 2024-2025, Ventana Micro Systems Inc.
|
||||
* Author: Sunil V L <sunilvl@ventanamicro.com>
|
||||
*/
|
||||
|
||||
#ifndef _ACPI_RIMT_H
|
||||
#define _ACPI_RIMT_H
|
||||
|
||||
#ifdef CONFIG_ACPI_RIMT
|
||||
int rimt_iommu_register(struct device *dev);
|
||||
#else
|
||||
static inline int rimt_iommu_register(struct device *dev)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_IOMMU_API) && defined(CONFIG_ACPI_RIMT)
|
||||
int rimt_iommu_configure_id(struct device *dev, const u32 *id_in);
|
||||
#else
|
||||
static inline int rimt_iommu_configure_id(struct device *dev, const u32 *id_in)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _ACPI_RIMT_H */
|
||||
|
|
@ -180,6 +180,7 @@ struct io_pgtable_cfg {
|
|||
struct {
|
||||
u64 ttbr[4];
|
||||
u32 n_ttbrs;
|
||||
u32 n_levels;
|
||||
} apple_dart_cfg;
|
||||
|
||||
struct {
|
||||
|
|
|
|||
Loading…
Reference in New Issue