mirror of https://github.com/torvalds/linux.git
mm/zone_device: rename page_free callback to folio_free
Change page_free to folio_free to make the folio support for zone device-private more consistent. The PCI P2PDMA callback has also been updated and changed to folio_free() as a result. For drivers that do not support folios (yet), the folio is converted back into page via &folio->page and the page is used as is, in the current callback implementation. Link: https://lkml.kernel.org/r/20251001065707.920170-3-balbirs@nvidia.com Signed-off-by: Balbir Singh <balbirs@nvidia.com> Cc: David Hildenbrand <david@redhat.com> Cc: Zi Yan <ziy@nvidia.com> Cc: Joshua Hahn <joshua.hahnjy@gmail.com> Cc: Rakie Kim <rakie.kim@sk.com> Cc: Byungchul Park <byungchul@sk.com> Cc: Gregory Price <gourry@gourry.net> Cc: Ying Huang <ying.huang@linux.alibaba.com> Cc: Alistair Popple <apopple@nvidia.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Cc: Baolin Wang <baolin.wang@linux.alibaba.com> Cc: "Liam R. Howlett" <Liam.Howlett@oracle.com> Cc: Nico Pache <npache@redhat.com> Cc: Ryan Roberts <ryan.roberts@arm.com> Cc: Dev Jain <dev.jain@arm.com> Cc: Barry Song <baohua@kernel.org> Cc: Lyude Paul <lyude@redhat.com> Cc: Danilo Krummrich <dakr@kernel.org> Cc: David Airlie <airlied@gmail.com> Cc: Simona Vetter <simona@ffwll.ch> Cc: Ralph Campbell <rcampbell@nvidia.com> Cc: Mika Penttilä <mpenttil@redhat.com> Cc: Matthew Brost <matthew.brost@intel.com> Cc: Francois Dugast <francois.dugast@intel.com> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Felix Kuehling <Felix.Kuehling@amd.com> Cc: Alex Deucher <alexander.deucher@amd.com> Cc: "Christian König" <christian.koenig@amd.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
d245f9b4ab
commit
3a5a065545
|
|
@ -165,7 +165,7 @@ The users of `ZONE_DEVICE` are:
|
|||
* pmem: Map platform persistent memory to be used as a direct-I/O target
|
||||
via DAX mappings.
|
||||
|
||||
* hmm: Extend `ZONE_DEVICE` with `->page_fault()` and `->page_free()`
|
||||
* hmm: Extend `ZONE_DEVICE` with `->page_fault()` and `->folio_free()`
|
||||
event callbacks to allow a device-driver to coordinate memory management
|
||||
events related to device-memory, typically GPU memory. See
|
||||
Documentation/mm/hmm.rst.
|
||||
|
|
|
|||
|
|
@ -1014,8 +1014,9 @@ static vm_fault_t kvmppc_uvmem_migrate_to_ram(struct vm_fault *vmf)
|
|||
* to a normal PFN during H_SVM_PAGE_OUT.
|
||||
* Gets called with kvm->arch.uvmem_lock held.
|
||||
*/
|
||||
static void kvmppc_uvmem_page_free(struct page *page)
|
||||
static void kvmppc_uvmem_folio_free(struct folio *folio)
|
||||
{
|
||||
struct page *page = &folio->page;
|
||||
unsigned long pfn = page_to_pfn(page) -
|
||||
(kvmppc_uvmem_pgmap.range.start >> PAGE_SHIFT);
|
||||
struct kvmppc_uvmem_page_pvt *pvt;
|
||||
|
|
@ -1034,7 +1035,7 @@ static void kvmppc_uvmem_page_free(struct page *page)
|
|||
}
|
||||
|
||||
static const struct dev_pagemap_ops kvmppc_uvmem_ops = {
|
||||
.page_free = kvmppc_uvmem_page_free,
|
||||
.folio_free = kvmppc_uvmem_folio_free,
|
||||
.migrate_to_ram = kvmppc_uvmem_migrate_to_ram,
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -568,8 +568,9 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
|
|||
return r < 0 ? r : 0;
|
||||
}
|
||||
|
||||
static void svm_migrate_page_free(struct page *page)
|
||||
static void svm_migrate_folio_free(struct folio *folio)
|
||||
{
|
||||
struct page *page = &folio->page;
|
||||
struct svm_range_bo *svm_bo = page->zone_device_data;
|
||||
|
||||
if (svm_bo) {
|
||||
|
|
@ -1009,7 +1010,7 @@ static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf)
|
|||
}
|
||||
|
||||
static const struct dev_pagemap_ops svm_migrate_pgmap_ops = {
|
||||
.page_free = svm_migrate_page_free,
|
||||
.folio_free = svm_migrate_folio_free,
|
||||
.migrate_to_ram = svm_migrate_to_ram,
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -752,15 +752,15 @@ static int __drm_pagemap_migrate_to_ram(struct vm_area_struct *vas,
|
|||
}
|
||||
|
||||
/**
|
||||
* drm_pagemap_page_free() - Put GPU SVM zone device data associated with a page
|
||||
* @page: Pointer to the page
|
||||
* drm_pagemap_folio_free() - Put GPU SVM zone device data associated with a folio
|
||||
* @folio: Pointer to the folio
|
||||
*
|
||||
* This function is a callback used to put the GPU SVM zone device data
|
||||
* associated with a page when it is being released.
|
||||
*/
|
||||
static void drm_pagemap_page_free(struct page *page)
|
||||
static void drm_pagemap_folio_free(struct folio *folio)
|
||||
{
|
||||
drm_pagemap_zdd_put(page->zone_device_data);
|
||||
drm_pagemap_zdd_put(folio->page.zone_device_data);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -788,7 +788,7 @@ static vm_fault_t drm_pagemap_migrate_to_ram(struct vm_fault *vmf)
|
|||
}
|
||||
|
||||
static const struct dev_pagemap_ops drm_pagemap_pagemap_ops = {
|
||||
.page_free = drm_pagemap_page_free,
|
||||
.folio_free = drm_pagemap_folio_free,
|
||||
.migrate_to_ram = drm_pagemap_migrate_to_ram,
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -108,8 +108,9 @@ unsigned long nouveau_dmem_page_addr(struct page *page)
|
|||
return chunk->bo->offset + off;
|
||||
}
|
||||
|
||||
static void nouveau_dmem_page_free(struct page *page)
|
||||
static void nouveau_dmem_folio_free(struct folio *folio)
|
||||
{
|
||||
struct page *page = &folio->page;
|
||||
struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
|
||||
struct nouveau_dmem *dmem = chunk->drm->dmem;
|
||||
|
||||
|
|
@ -220,7 +221,7 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
|
|||
}
|
||||
|
||||
static const struct dev_pagemap_ops nouveau_dmem_pagemap_ops = {
|
||||
.page_free = nouveau_dmem_page_free,
|
||||
.folio_free = nouveau_dmem_folio_free,
|
||||
.migrate_to_ram = nouveau_dmem_migrate_to_ram,
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -200,8 +200,9 @@ static const struct attribute_group p2pmem_group = {
|
|||
.name = "p2pmem",
|
||||
};
|
||||
|
||||
static void p2pdma_page_free(struct page *page)
|
||||
static void p2pdma_folio_free(struct folio *folio)
|
||||
{
|
||||
struct page *page = &folio->page;
|
||||
struct pci_p2pdma_pagemap *pgmap = to_p2p_pgmap(page_pgmap(page));
|
||||
/* safe to dereference while a reference is held to the percpu ref */
|
||||
struct pci_p2pdma *p2pdma =
|
||||
|
|
@ -214,7 +215,7 @@ static void p2pdma_page_free(struct page *page)
|
|||
}
|
||||
|
||||
static const struct dev_pagemap_ops p2pdma_pgmap_ops = {
|
||||
.page_free = p2pdma_page_free,
|
||||
.folio_free = p2pdma_folio_free,
|
||||
};
|
||||
|
||||
static void pci_p2pdma_release(void *data)
|
||||
|
|
|
|||
|
|
@ -77,11 +77,11 @@ enum memory_type {
|
|||
|
||||
struct dev_pagemap_ops {
|
||||
/*
|
||||
* Called once the page refcount reaches 0. The reference count will be
|
||||
* Called once the folio refcount reaches 0. The reference count will be
|
||||
* reset to one by the core code after the method is called to prepare
|
||||
* for handing out the page again.
|
||||
* for handing out the folio again.
|
||||
*/
|
||||
void (*page_free)(struct page *page);
|
||||
void (*folio_free)(struct folio *folio);
|
||||
|
||||
/*
|
||||
* Used for private (un-addressable) device memory only. Must migrate
|
||||
|
|
|
|||
|
|
@ -1374,8 +1374,9 @@ static const struct file_operations dmirror_fops = {
|
|||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
static void dmirror_devmem_free(struct page *page)
|
||||
static void dmirror_devmem_free(struct folio *folio)
|
||||
{
|
||||
struct page *page = &folio->page;
|
||||
struct page *rpage = BACKING_PAGE(page);
|
||||
struct dmirror_device *mdevice;
|
||||
|
||||
|
|
@ -1438,7 +1439,7 @@ static vm_fault_t dmirror_devmem_fault(struct vm_fault *vmf)
|
|||
}
|
||||
|
||||
static const struct dev_pagemap_ops dmirror_devmem_ops = {
|
||||
.page_free = dmirror_devmem_free,
|
||||
.folio_free = dmirror_devmem_free,
|
||||
.migrate_to_ram = dmirror_devmem_fault,
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -289,8 +289,8 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid)
|
|||
WARN(1, "Missing migrate_to_ram method\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
if (!pgmap->ops->page_free) {
|
||||
WARN(1, "Missing page_free method\n");
|
||||
if (!pgmap->ops->folio_free) {
|
||||
WARN(1, "Missing folio_free method\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
if (!pgmap->owner) {
|
||||
|
|
@ -299,8 +299,8 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid)
|
|||
}
|
||||
break;
|
||||
case MEMORY_DEVICE_COHERENT:
|
||||
if (!pgmap->ops->page_free) {
|
||||
WARN(1, "Missing page_free method\n");
|
||||
if (!pgmap->ops->folio_free) {
|
||||
WARN(1, "Missing folio_free method\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
if (!pgmap->owner) {
|
||||
|
|
@ -453,9 +453,9 @@ void free_zone_device_folio(struct folio *folio)
|
|||
switch (pgmap->type) {
|
||||
case MEMORY_DEVICE_PRIVATE:
|
||||
case MEMORY_DEVICE_COHERENT:
|
||||
if (WARN_ON_ONCE(!pgmap->ops || !pgmap->ops->page_free))
|
||||
if (WARN_ON_ONCE(!pgmap->ops || !pgmap->ops->folio_free))
|
||||
break;
|
||||
pgmap->ops->page_free(&folio->page);
|
||||
pgmap->ops->folio_free(folio);
|
||||
percpu_ref_put_many(&folio->pgmap->ref, nr);
|
||||
break;
|
||||
|
||||
|
|
@ -472,9 +472,9 @@ void free_zone_device_folio(struct folio *folio)
|
|||
break;
|
||||
|
||||
case MEMORY_DEVICE_PCI_P2PDMA:
|
||||
if (WARN_ON_ONCE(!pgmap->ops || !pgmap->ops->page_free))
|
||||
if (WARN_ON_ONCE(!pgmap->ops || !pgmap->ops->folio_free))
|
||||
break;
|
||||
pgmap->ops->page_free(folio_page(folio, 0));
|
||||
pgmap->ops->folio_free(folio);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue