mm/memremap: remove unused get_dev_pagemap() parameter

GUP no longer uses get_dev_pagemap().  As it was the only user of the
get_dev_pagemap() pgmap caching feature it can be removed.

Link: https://lkml.kernel.org/r/20250903225926.34702-2-apopple@nvidia.com
Signed-off-by: Alistair Popple <apopple@nvidia.com>
Acked-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Dan Williams <dan.j.williams@intel.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Peter Xu <peterx@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Alistair Popple 2025-09-04 08:59:26 +10:00 committed by Andrew Morton
parent d3f7922b92
commit 614d850efd
4 changed files with 8 additions and 24 deletions

View File

@ -211,8 +211,7 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid);
void memunmap_pages(struct dev_pagemap *pgmap); void memunmap_pages(struct dev_pagemap *pgmap);
void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap); void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap);
void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap); void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap);
struct dev_pagemap *get_dev_pagemap(unsigned long pfn, struct dev_pagemap *get_dev_pagemap(unsigned long pfn);
struct dev_pagemap *pgmap);
bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn); bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn);
unsigned long memremap_compat_align(void); unsigned long memremap_compat_align(void);
@ -234,8 +233,7 @@ static inline void devm_memunmap_pages(struct device *dev,
{ {
} }
static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn, static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn)
struct dev_pagemap *pgmap)
{ {
return NULL; return NULL;
} }

View File

@ -2194,7 +2194,7 @@ int memory_failure(unsigned long pfn, int flags)
goto unlock_mutex; goto unlock_mutex;
if (pfn_valid(pfn)) { if (pfn_valid(pfn)) {
pgmap = get_dev_pagemap(pfn, NULL); pgmap = get_dev_pagemap(pfn);
put_ref_page(pfn, flags); put_ref_page(pfn, flags);
if (pgmap) { if (pgmap) {
res = memory_failure_dev_pagemap(pfn, flags, res = memory_failure_dev_pagemap(pfn, flags,

View File

@ -375,7 +375,7 @@ struct page *pfn_to_online_page(unsigned long pfn)
* the section may be 'offline' but 'valid'. Only * the section may be 'offline' but 'valid'. Only
* get_dev_pagemap() can determine sub-section online status. * get_dev_pagemap() can determine sub-section online status.
*/ */
pgmap = get_dev_pagemap(pfn, NULL); pgmap = get_dev_pagemap(pfn);
put_dev_pagemap(pgmap); put_dev_pagemap(pgmap);
/* The presence of a pgmap indicates ZONE_DEVICE offline pfn */ /* The presence of a pgmap indicates ZONE_DEVICE offline pfn */

View File

@ -153,14 +153,14 @@ static int pagemap_range(struct dev_pagemap *pgmap, struct mhp_params *params,
"altmap not supported for multiple ranges\n")) "altmap not supported for multiple ranges\n"))
return -EINVAL; return -EINVAL;
conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->start), NULL); conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->start));
if (conflict_pgmap) { if (conflict_pgmap) {
WARN(1, "Conflicting mapping in same section\n"); WARN(1, "Conflicting mapping in same section\n");
put_dev_pagemap(conflict_pgmap); put_dev_pagemap(conflict_pgmap);
return -ENOMEM; return -ENOMEM;
} }
conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->end), NULL); conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->end));
if (conflict_pgmap) { if (conflict_pgmap) {
WARN(1, "Conflicting mapping in same section\n"); WARN(1, "Conflicting mapping in same section\n");
put_dev_pagemap(conflict_pgmap); put_dev_pagemap(conflict_pgmap);
@ -397,26 +397,12 @@ EXPORT_SYMBOL_GPL(devm_memunmap_pages);
/** /**
* get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
* @pfn: page frame number to lookup page_map * @pfn: page frame number to lookup page_map
* @pgmap: optional known pgmap that already has a reference
*
* If @pgmap is non-NULL and covers @pfn it will be returned as-is. If @pgmap
* is non-NULL but does not cover @pfn the reference to it will be released.
*/ */
struct dev_pagemap *get_dev_pagemap(unsigned long pfn, struct dev_pagemap *get_dev_pagemap(unsigned long pfn)
struct dev_pagemap *pgmap)
{ {
struct dev_pagemap *pgmap;
resource_size_t phys = PFN_PHYS(pfn); resource_size_t phys = PFN_PHYS(pfn);
/*
* In the cached case we're already holding a live reference.
*/
if (pgmap) {
if (phys >= pgmap->range.start && phys <= pgmap->range.end)
return pgmap;
put_dev_pagemap(pgmap);
}
/* fall back to slow path lookup */
rcu_read_lock(); rcu_read_lock();
pgmap = xa_load(&pgmap_array, PHYS_PFN(phys)); pgmap = xa_load(&pgmap_array, PHYS_PFN(phys));
if (pgmap && !percpu_ref_tryget_live_rcu(&pgmap->ref)) if (pgmap && !percpu_ref_tryget_live_rcu(&pgmap->ref))