iommu/pages: Remove iommu_alloc_page_node()

Use iommu_alloc_pages_node_sz() instead.

AMD and Intel are both using 4K pages for these structures since those
drivers only work on 4K PAGE_SIZE.

riscv is also spec'd to use SZ_4K.

Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com>
Tested-by: Alejandro Jimenez <alejandro.j.jimenez@oracle.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/21-v4-c8663abbb606+3f7-iommu_pages_jgg@nvidia.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
Jason Gunthorpe 2025-04-08 13:54:09 -03:00 committed by Joerg Roedel
parent 28024569e8
commit 5087f663c2
7 changed files with 22 additions and 28 deletions

View File

@ -114,7 +114,7 @@ static bool increase_address_space(struct amd_io_pgtable *pgtable,
bool ret = true;
u64 *pte;
pte = iommu_alloc_page_node(cfg->amd.nid, gfp);
pte = iommu_alloc_pages_node_sz(cfg->amd.nid, gfp, SZ_4K);
if (!pte)
return false;
@ -206,7 +206,8 @@ static u64 *alloc_pte(struct amd_io_pgtable *pgtable,
if (!IOMMU_PTE_PRESENT(__pte) ||
pte_level == PAGE_MODE_NONE) {
page = iommu_alloc_page_node(cfg->amd.nid, gfp);
page = iommu_alloc_pages_node_sz(cfg->amd.nid, gfp,
SZ_4K);
if (!page)
return NULL;
@ -535,7 +536,8 @@ static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo
{
struct amd_io_pgtable *pgtable = io_pgtable_cfg_to_data(cfg);
pgtable->root = iommu_alloc_page_node(cfg->amd.nid, GFP_KERNEL);
pgtable->root =
iommu_alloc_pages_node_sz(cfg->amd.nid, GFP_KERNEL, SZ_4K);
if (!pgtable->root)
return NULL;
pgtable->mode = PAGE_MODE_3_LEVEL;

View File

@ -152,7 +152,7 @@ static u64 *v2_alloc_pte(int nid, u64 *pgd, unsigned long iova,
}
if (!IOMMU_PTE_PRESENT(__pte)) {
page = iommu_alloc_page_node(nid, gfp);
page = iommu_alloc_pages_node_sz(nid, gfp, SZ_4K);
if (!page)
return NULL;
@ -346,7 +346,7 @@ static struct io_pgtable *v2_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo
struct amd_io_pgtable *pgtable = io_pgtable_cfg_to_data(cfg);
int ias = IOMMU_IN_ADDR_BIT_SIZE;
pgtable->pgd = iommu_alloc_page_node(cfg->amd.nid, GFP_KERNEL);
pgtable->pgd = iommu_alloc_pages_node_sz(cfg->amd.nid, GFP_KERNEL, SZ_4K);
if (!pgtable->pgd)
return NULL;

View File

@ -1884,7 +1884,7 @@ static int setup_gcr3_table(struct gcr3_tbl_info *gcr3_info,
return -ENOSPC;
gcr3_info->domid = domid;
gcr3_info->gcr3_tbl = iommu_alloc_page_node(nid, GFP_ATOMIC);
gcr3_info->gcr3_tbl = iommu_alloc_pages_node_sz(nid, GFP_ATOMIC, SZ_4K);
if (gcr3_info->gcr3_tbl == NULL) {
pdom_id_free(domid);
return -ENOMEM;

View File

@ -397,7 +397,8 @@ struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
if (!alloc)
return NULL;
context = iommu_alloc_page_node(iommu->node, GFP_ATOMIC);
context = iommu_alloc_pages_node_sz(iommu->node, GFP_ATOMIC,
SZ_4K);
if (!context)
return NULL;
@ -731,7 +732,8 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
if (!dma_pte_present(pte)) {
uint64_t pteval, tmp;
tmp_page = iommu_alloc_page_node(domain->nid, gfp);
tmp_page = iommu_alloc_pages_node_sz(domain->nid, gfp,
SZ_4K);
if (!tmp_page)
return NULL;
@ -982,7 +984,7 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu)
{
struct root_entry *root;
root = iommu_alloc_page_node(iommu->node, GFP_ATOMIC);
root = iommu_alloc_pages_node_sz(iommu->node, GFP_ATOMIC, SZ_4K);
if (!root) {
pr_err("Allocating root entry for %s failed\n",
iommu->name);
@ -2026,7 +2028,8 @@ static int copy_context_table(struct intel_iommu *iommu,
if (!old_ce)
goto out;
new_ce = iommu_alloc_page_node(iommu->node, GFP_KERNEL);
new_ce = iommu_alloc_pages_node_sz(iommu->node,
GFP_KERNEL, SZ_4K);
if (!new_ce)
goto out_unmap;
@ -3359,7 +3362,7 @@ static struct dmar_domain *paging_domain_alloc(struct device *dev, bool first_st
domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw);
/* always allocate the top pgd */
domain->pgd = iommu_alloc_page_node(domain->nid, GFP_KERNEL);
domain->pgd = iommu_alloc_pages_node_sz(domain->nid, GFP_KERNEL, SZ_4K);
if (!domain->pgd) {
kfree(domain);
return ERR_PTR(-ENOMEM);

View File

@ -147,7 +147,8 @@ static struct pasid_entry *intel_pasid_get_entry(struct device *dev, u32 pasid)
if (!entries) {
u64 tmp;
entries = iommu_alloc_page_node(info->iommu->node, GFP_ATOMIC);
entries = iommu_alloc_pages_node_sz(info->iommu->node,
GFP_ATOMIC, SZ_4K);
if (!entries)
return NULL;

View File

@ -114,17 +114,4 @@ static inline void *iommu_alloc_pages_sz(gfp_t gfp, size_t size)
return iommu_alloc_pages_node_sz(NUMA_NO_NODE, gfp, size);
}
/**
* iommu_alloc_page_node - allocate a zeroed page at specific NUMA node.
* @nid: memory NUMA node id
* @gfp: buddy allocator flags
*
* returns the virtual address of the allocated page
* Prefer to use iommu_alloc_pages_node_lg2()
*/
static inline void *iommu_alloc_page_node(int nid, gfp_t gfp)
{
return iommu_alloc_pages_node_sz(nid, gfp, PAGE_SIZE);
}
#endif /* __IOMMU_PAGES_H */

View File

@ -1144,7 +1144,8 @@ static unsigned long *riscv_iommu_pte_alloc(struct riscv_iommu_domain *domain,
* page table. This might race with other mappings, retry.
*/
if (_io_pte_none(pte)) {
addr = iommu_alloc_page_node(domain->numa_node, gfp);
addr = iommu_alloc_pages_node_sz(domain->numa_node, gfp,
SZ_4K);
if (!addr)
return NULL;
old = pte;
@ -1385,8 +1386,8 @@ static struct iommu_domain *riscv_iommu_alloc_paging_domain(struct device *dev)
domain->numa_node = dev_to_node(iommu->dev);
domain->amo_enabled = !!(iommu->caps & RISCV_IOMMU_CAPABILITIES_AMO_HWAD);
domain->pgd_mode = pgd_mode;
domain->pgd_root = iommu_alloc_page_node(domain->numa_node,
GFP_KERNEL_ACCOUNT);
domain->pgd_root = iommu_alloc_pages_node_sz(domain->numa_node,
GFP_KERNEL_ACCOUNT, SZ_4K);
if (!domain->pgd_root) {
kfree(domain);
return ERR_PTR(-ENOMEM);