mirror of https://github.com/torvalds/linux.git
iommu/dma: rename iommu_dma_*map_page to iommu_dma_*map_phys
Rename the IOMMU DMA mapping functions to better reflect their actual calling convention. The functions iommu_dma_map_page() and iommu_dma_unmap_page() are renamed to iommu_dma_map_phys() and iommu_dma_unmap_phys() respectively, as they already operate on physical addresses rather than page structures. The calling convention changes from accepting (struct page *page, unsigned long offset) to (phys_addr_t phys), which eliminates the need for page-to-physical address conversion within the functions. This renaming prepares for the broader DMA API conversion from page-based to physical address-based mapping throughout the kernel. All callers are updated to pass physical addresses directly, including dma_map_page_attrs(), scatterlist mapping functions, and DMA page allocation helpers. The change simplifies the code by removing the page_to_phys() + offset calculation that was previously done inside the IOMMU functions. Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Signed-off-by: Leon Romanovsky <leonro@nvidia.com> Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> Link: https://lore.kernel.org/r/ed172f95f8f57782beae04f782813366894e98df.1757423202.git.leonro@nvidia.com
This commit is contained in:
parent
76bb7c49f5
commit
513559f737
|
|
@ -1195,11 +1195,9 @@ static inline size_t iova_unaligned(struct iova_domain *iovad, phys_addr_t phys,
|
||||||
return iova_offset(iovad, phys | size);
|
return iova_offset(iovad, phys | size);
|
||||||
}
|
}
|
||||||
|
|
||||||
dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
|
dma_addr_t iommu_dma_map_phys(struct device *dev, phys_addr_t phys, size_t size,
|
||||||
unsigned long offset, size_t size, enum dma_data_direction dir,
|
enum dma_data_direction dir, unsigned long attrs)
|
||||||
unsigned long attrs)
|
|
||||||
{
|
{
|
||||||
phys_addr_t phys = page_to_phys(page) + offset;
|
|
||||||
bool coherent = dev_is_dma_coherent(dev);
|
bool coherent = dev_is_dma_coherent(dev);
|
||||||
int prot = dma_info_to_prot(dir, coherent, attrs);
|
int prot = dma_info_to_prot(dir, coherent, attrs);
|
||||||
struct iommu_domain *domain = iommu_get_dma_domain(dev);
|
struct iommu_domain *domain = iommu_get_dma_domain(dev);
|
||||||
|
|
@ -1227,7 +1225,7 @@ dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
|
||||||
return iova;
|
return iova;
|
||||||
}
|
}
|
||||||
|
|
||||||
void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
|
void iommu_dma_unmap_phys(struct device *dev, dma_addr_t dma_handle,
|
||||||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||||
{
|
{
|
||||||
struct iommu_domain *domain = iommu_get_dma_domain(dev);
|
struct iommu_domain *domain = iommu_get_dma_domain(dev);
|
||||||
|
|
@ -1346,7 +1344,7 @@ static void iommu_dma_unmap_sg_swiotlb(struct device *dev, struct scatterlist *s
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for_each_sg(sg, s, nents, i)
|
for_each_sg(sg, s, nents, i)
|
||||||
iommu_dma_unmap_page(dev, sg_dma_address(s),
|
iommu_dma_unmap_phys(dev, sg_dma_address(s),
|
||||||
sg_dma_len(s), dir, attrs);
|
sg_dma_len(s), dir, attrs);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1359,8 +1357,8 @@ static int iommu_dma_map_sg_swiotlb(struct device *dev, struct scatterlist *sg,
|
||||||
sg_dma_mark_swiotlb(sg);
|
sg_dma_mark_swiotlb(sg);
|
||||||
|
|
||||||
for_each_sg(sg, s, nents, i) {
|
for_each_sg(sg, s, nents, i) {
|
||||||
sg_dma_address(s) = iommu_dma_map_page(dev, sg_page(s),
|
sg_dma_address(s) = iommu_dma_map_phys(dev, sg_phys(s),
|
||||||
s->offset, s->length, dir, attrs);
|
s->length, dir, attrs);
|
||||||
if (sg_dma_address(s) == DMA_MAPPING_ERROR)
|
if (sg_dma_address(s) == DMA_MAPPING_ERROR)
|
||||||
goto out_unmap;
|
goto out_unmap;
|
||||||
sg_dma_len(s) = s->length;
|
sg_dma_len(s) = s->length;
|
||||||
|
|
|
||||||
|
|
@ -21,10 +21,9 @@ static inline bool use_dma_iommu(struct device *dev)
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_IOMMU_DMA */
|
#endif /* CONFIG_IOMMU_DMA */
|
||||||
|
|
||||||
dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
|
dma_addr_t iommu_dma_map_phys(struct device *dev, phys_addr_t phys, size_t size,
|
||||||
unsigned long offset, size_t size, enum dma_data_direction dir,
|
enum dma_data_direction dir, unsigned long attrs);
|
||||||
unsigned long attrs);
|
void iommu_dma_unmap_phys(struct device *dev, dma_addr_t dma_handle,
|
||||||
void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
|
|
||||||
size_t size, enum dma_data_direction dir, unsigned long attrs);
|
size_t size, enum dma_data_direction dir, unsigned long attrs);
|
||||||
int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
||||||
enum dma_data_direction dir, unsigned long attrs);
|
enum dma_data_direction dir, unsigned long attrs);
|
||||||
|
|
|
||||||
|
|
@ -169,7 +169,7 @@ dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
|
||||||
arch_dma_map_page_direct(dev, phys + size))
|
arch_dma_map_page_direct(dev, phys + size))
|
||||||
addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
|
addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
|
||||||
else if (use_dma_iommu(dev))
|
else if (use_dma_iommu(dev))
|
||||||
addr = iommu_dma_map_page(dev, page, offset, size, dir, attrs);
|
addr = iommu_dma_map_phys(dev, phys, size, dir, attrs);
|
||||||
else
|
else
|
||||||
addr = ops->map_page(dev, page, offset, size, dir, attrs);
|
addr = ops->map_page(dev, page, offset, size, dir, attrs);
|
||||||
kmsan_handle_dma(page, offset, size, dir);
|
kmsan_handle_dma(page, offset, size, dir);
|
||||||
|
|
@ -190,7 +190,7 @@ void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
|
||||||
arch_dma_unmap_page_direct(dev, addr + size))
|
arch_dma_unmap_page_direct(dev, addr + size))
|
||||||
dma_direct_unmap_page(dev, addr, size, dir, attrs);
|
dma_direct_unmap_page(dev, addr, size, dir, attrs);
|
||||||
else if (use_dma_iommu(dev))
|
else if (use_dma_iommu(dev))
|
||||||
iommu_dma_unmap_page(dev, addr, size, dir, attrs);
|
iommu_dma_unmap_phys(dev, addr, size, dir, attrs);
|
||||||
else
|
else
|
||||||
ops->unmap_page(dev, addr, size, dir, attrs);
|
ops->unmap_page(dev, addr, size, dir, attrs);
|
||||||
trace_dma_unmap_phys(dev, addr, size, dir, attrs);
|
trace_dma_unmap_phys(dev, addr, size, dir, attrs);
|
||||||
|
|
|
||||||
|
|
@ -72,8 +72,8 @@ struct page *dma_common_alloc_pages(struct device *dev, size_t size,
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
if (use_dma_iommu(dev))
|
if (use_dma_iommu(dev))
|
||||||
*dma_handle = iommu_dma_map_page(dev, page, 0, size, dir,
|
*dma_handle = iommu_dma_map_phys(dev, page_to_phys(page), size,
|
||||||
DMA_ATTR_SKIP_CPU_SYNC);
|
dir, DMA_ATTR_SKIP_CPU_SYNC);
|
||||||
else
|
else
|
||||||
*dma_handle = ops->map_page(dev, page, 0, size, dir,
|
*dma_handle = ops->map_page(dev, page, 0, size, dir,
|
||||||
DMA_ATTR_SKIP_CPU_SYNC);
|
DMA_ATTR_SKIP_CPU_SYNC);
|
||||||
|
|
@ -92,7 +92,7 @@ void dma_common_free_pages(struct device *dev, size_t size, struct page *page,
|
||||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||||
|
|
||||||
if (use_dma_iommu(dev))
|
if (use_dma_iommu(dev))
|
||||||
iommu_dma_unmap_page(dev, dma_handle, size, dir,
|
iommu_dma_unmap_phys(dev, dma_handle, size, dir,
|
||||||
DMA_ATTR_SKIP_CPU_SYNC);
|
DMA_ATTR_SKIP_CPU_SYNC);
|
||||||
else if (ops->unmap_page)
|
else if (ops->unmap_page)
|
||||||
ops->unmap_page(dev, dma_handle, size, dir,
|
ops->unmap_page(dev, dma_handle, size, dir,
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue