iommu/dma: implement DMA_ATTR_MMIO for dma_iova_link().

This will replace the hacky use of DMA_ATTR_SKIP_CPU_SYNC to avoid
touching the possibly non-KVA MMIO memory.

Also correct the incorrect caching attribute for the IOMMU, MMIO
memory should not be cachable inside the IOMMU mapping or it can
possibly create system problems. Set IOMMU_MMIO for DMA_ATTR_MMIO.

Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Link: https://lore.kernel.org/r/17ba63991aeaf8a80d5aca9ba5d028f1daa58f62.1757423202.git.leonro@nvidia.com
This commit is contained in:
Leon Romanovsky 2025-09-09 16:27:30 +03:00 committed by Marek Szyprowski
parent eadaa8b255
commit c288d657dd
1 changed files with 14 additions and 4 deletions

View File

@ -724,7 +724,12 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, struct device *dev
static int dma_info_to_prot(enum dma_data_direction dir, bool coherent, static int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
unsigned long attrs) unsigned long attrs)
{ {
int prot = coherent ? IOMMU_CACHE : 0; int prot;
if (attrs & DMA_ATTR_MMIO)
prot = IOMMU_MMIO;
else
prot = coherent ? IOMMU_CACHE : 0;
if (attrs & DMA_ATTR_PRIVILEGED) if (attrs & DMA_ATTR_PRIVILEGED)
prot |= IOMMU_PRIV; prot |= IOMMU_PRIV;
@ -1838,12 +1843,13 @@ static int __dma_iova_link(struct device *dev, dma_addr_t addr,
unsigned long attrs) unsigned long attrs)
{ {
bool coherent = dev_is_dma_coherent(dev); bool coherent = dev_is_dma_coherent(dev);
int prot = dma_info_to_prot(dir, coherent, attrs);
if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) if (!coherent && !(attrs & (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_MMIO)))
arch_sync_dma_for_device(phys, size, dir); arch_sync_dma_for_device(phys, size, dir);
return iommu_map_nosync(iommu_get_dma_domain(dev), addr, phys, size, return iommu_map_nosync(iommu_get_dma_domain(dev), addr, phys, size,
dma_info_to_prot(dir, coherent, attrs), GFP_ATOMIC); prot, GFP_ATOMIC);
} }
static int iommu_dma_iova_bounce_and_link(struct device *dev, dma_addr_t addr, static int iommu_dma_iova_bounce_and_link(struct device *dev, dma_addr_t addr,
@ -1949,9 +1955,13 @@ int dma_iova_link(struct device *dev, struct dma_iova_state *state,
return -EIO; return -EIO;
if (dev_use_swiotlb(dev, size, dir) && if (dev_use_swiotlb(dev, size, dir) &&
iova_unaligned(iovad, phys, size)) iova_unaligned(iovad, phys, size)) {
if (attrs & DMA_ATTR_MMIO)
return -EPERM;
return iommu_dma_iova_link_swiotlb(dev, state, phys, offset, return iommu_dma_iova_link_swiotlb(dev, state, phys, offset,
size, dir, attrs); size, dir, attrs);
}
return __dma_iova_link(dev, state->addr + offset - iova_start_pad, return __dma_iova_link(dev, state->addr + offset - iova_start_pad,
phys - iova_start_pad, phys - iova_start_pad,