vfio: selftests: replace iova=vaddr with allocated iovas

vfio_dma_mapping_test and vfio_pci_driver_test currently use iova=vaddr
as part of DMA mapping operations. However, not all IOMMUs support the
same virtual address width as the processor. For instance, older Intel
consumer platforms only support 39-bits of IOMMU address space. On such
platforms, using the virtual address as the IOVA fails.

Make the tests more robust by using iova_allocator to vend IOVAs, which
queries legally accessible IOVAs from the underlying IOMMUFD or VFIO
container.

Reviewed-by: David Matlack <dmatlack@google.com>
Tested-by: David Matlack <dmatlack@google.com>
Signed-off-by: Alex Mastro <amastro@fb.com>
Link: https://lore.kernel.org/r/20251111-iova-ranges-v3-4-7960244642c5@fb.com
Signed-off-by: Alex Williamson <alex@shazbot.org>
This commit is contained in:
Alex Mastro 2025-11-11 10:48:27 -08:00 committed by Alex Williamson
parent ce0e3c403e
commit d323ad7396
2 changed files with 12 additions and 5 deletions

View File

@ -95,6 +95,7 @@ static int iommu_mapping_get(const char *bdf, u64 iova,
FIXTURE(vfio_dma_mapping_test) {
struct vfio_pci_device *device;
struct iova_allocator *iova_allocator;
};
FIXTURE_VARIANT(vfio_dma_mapping_test) {
@ -119,10 +120,12 @@ FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(anonymous_hugetlb_1gb, SZ_1G, MAP_HUGETLB |
FIXTURE_SETUP(vfio_dma_mapping_test)
{
self->device = vfio_pci_device_init(device_bdf, variant->iommu_mode);
self->iova_allocator = iova_allocator_init(self->device);
}
FIXTURE_TEARDOWN(vfio_dma_mapping_test)
{
iova_allocator_cleanup(self->iova_allocator);
vfio_pci_device_cleanup(self->device);
}
@ -144,7 +147,7 @@ TEST_F(vfio_dma_mapping_test, dma_map_unmap)
else
ASSERT_NE(region.vaddr, MAP_FAILED);
region.iova = (u64)region.vaddr;
region.iova = iova_allocator_alloc(self->iova_allocator, size);
region.size = size;
vfio_pci_dma_map(self->device, &region);

View File

@ -19,6 +19,7 @@ static const char *device_bdf;
} while (0)
static void region_setup(struct vfio_pci_device *device,
struct iova_allocator *iova_allocator,
struct vfio_dma_region *region, u64 size)
{
const int flags = MAP_SHARED | MAP_ANONYMOUS;
@ -29,7 +30,7 @@ static void region_setup(struct vfio_pci_device *device,
VFIO_ASSERT_NE(vaddr, MAP_FAILED);
region->vaddr = vaddr;
region->iova = (u64)vaddr;
region->iova = iova_allocator_alloc(iova_allocator, size);
region->size = size;
vfio_pci_dma_map(device, region);
@ -44,6 +45,7 @@ static void region_teardown(struct vfio_pci_device *device,
FIXTURE(vfio_pci_driver_test) {
struct vfio_pci_device *device;
struct iova_allocator *iova_allocator;
struct vfio_dma_region memcpy_region;
void *vaddr;
int msi_fd;
@ -72,14 +74,15 @@ FIXTURE_SETUP(vfio_pci_driver_test)
struct vfio_pci_driver *driver;
self->device = vfio_pci_device_init(device_bdf, variant->iommu_mode);
self->iova_allocator = iova_allocator_init(self->device);
driver = &self->device->driver;
region_setup(self->device, &self->memcpy_region, SZ_1G);
region_setup(self->device, &driver->region, SZ_2M);
region_setup(self->device, self->iova_allocator, &self->memcpy_region, SZ_1G);
region_setup(self->device, self->iova_allocator, &driver->region, SZ_2M);
/* Any IOVA that doesn't overlap memcpy_region and driver->region. */
self->unmapped_iova = 8UL * SZ_1G;
self->unmapped_iova = iova_allocator_alloc(self->iova_allocator, SZ_1G);
vfio_pci_driver_init(self->device);
self->msi_fd = self->device->msi_eventfds[driver->msi];
@ -108,6 +111,7 @@ FIXTURE_TEARDOWN(vfio_pci_driver_test)
region_teardown(self->device, &self->memcpy_region);
region_teardown(self->device, &driver->region);
iova_allocator_cleanup(self->iova_allocator);
vfio_pci_device_cleanup(self->device);
}