mirror of https://github.com/torvalds/linux.git
vfio/type1: move iova increment to unmap_unpin_*() caller
Move incrementing iova to the caller of these functions as part of
preparing to handle end of address space map/unmap.
Tested-by: Alejandro Jimenez <alejandro.j.jimenez@oracle.com>
Fixes: 73fa0d10d0 ("vfio: Type1 IOMMU implementation")
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Alejandro Jimenez <alejandro.j.jimenez@oracle.com>
Signed-off-by: Alex Mastro <amastro@fb.com>
Link: https://lore.kernel.org/r/20251028-fix-unmap-v6-2-2542b96bcc8e@fb.com
Signed-off-by: Alex Williamson <alex@shazbot.org>
This commit is contained in:
parent
6012379ede
commit
1196f1f897
|
|
@ -1083,7 +1083,7 @@ static long vfio_sync_unpin(struct vfio_dma *dma, struct vfio_domain *domain,
|
|||
#define VFIO_IOMMU_TLB_SYNC_MAX 512
|
||||
|
||||
static size_t unmap_unpin_fast(struct vfio_domain *domain,
|
||||
struct vfio_dma *dma, dma_addr_t *iova,
|
||||
struct vfio_dma *dma, dma_addr_t iova,
|
||||
size_t len, phys_addr_t phys, long *unlocked,
|
||||
struct list_head *unmapped_list,
|
||||
int *unmapped_cnt,
|
||||
|
|
@ -1093,18 +1093,17 @@ static size_t unmap_unpin_fast(struct vfio_domain *domain,
|
|||
struct vfio_regions *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
|
||||
|
||||
if (entry) {
|
||||
unmapped = iommu_unmap_fast(domain->domain, *iova, len,
|
||||
unmapped = iommu_unmap_fast(domain->domain, iova, len,
|
||||
iotlb_gather);
|
||||
|
||||
if (!unmapped) {
|
||||
kfree(entry);
|
||||
} else {
|
||||
entry->iova = *iova;
|
||||
entry->iova = iova;
|
||||
entry->phys = phys;
|
||||
entry->len = unmapped;
|
||||
list_add_tail(&entry->list, unmapped_list);
|
||||
|
||||
*iova += unmapped;
|
||||
(*unmapped_cnt)++;
|
||||
}
|
||||
}
|
||||
|
|
@ -1123,18 +1122,17 @@ static size_t unmap_unpin_fast(struct vfio_domain *domain,
|
|||
}
|
||||
|
||||
static size_t unmap_unpin_slow(struct vfio_domain *domain,
|
||||
struct vfio_dma *dma, dma_addr_t *iova,
|
||||
struct vfio_dma *dma, dma_addr_t iova,
|
||||
size_t len, phys_addr_t phys,
|
||||
long *unlocked)
|
||||
{
|
||||
size_t unmapped = iommu_unmap(domain->domain, *iova, len);
|
||||
size_t unmapped = iommu_unmap(domain->domain, iova, len);
|
||||
|
||||
if (unmapped) {
|
||||
*unlocked += vfio_unpin_pages_remote(dma, *iova,
|
||||
*unlocked += vfio_unpin_pages_remote(dma, iova,
|
||||
phys >> PAGE_SHIFT,
|
||||
unmapped >> PAGE_SHIFT,
|
||||
false);
|
||||
*iova += unmapped;
|
||||
cond_resched();
|
||||
}
|
||||
return unmapped;
|
||||
|
|
@ -1197,16 +1195,18 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
|
|||
* First, try to use fast unmap/unpin. In case of failure,
|
||||
* switch to slow unmap/unpin path.
|
||||
*/
|
||||
unmapped = unmap_unpin_fast(domain, dma, &iova, len, phys,
|
||||
unmapped = unmap_unpin_fast(domain, dma, iova, len, phys,
|
||||
&unlocked, &unmapped_region_list,
|
||||
&unmapped_region_cnt,
|
||||
&iotlb_gather);
|
||||
if (!unmapped) {
|
||||
unmapped = unmap_unpin_slow(domain, dma, &iova, len,
|
||||
unmapped = unmap_unpin_slow(domain, dma, iova, len,
|
||||
phys, &unlocked);
|
||||
if (WARN_ON(!unmapped))
|
||||
break;
|
||||
}
|
||||
|
||||
iova += unmapped;
|
||||
}
|
||||
|
||||
dma->iommu_mapped = false;
|
||||
|
|
|
|||
Loading…
Reference in New Issue