mm/filemap: Add NUMA mempolicy support to filemap_alloc_folio()

Add a mempolicy parameter to filemap_alloc_folio() to enable NUMA-aware
page cache allocations. This will be used by upcoming changes to
support NUMA policies in guest-memfd, where guest_memory need to be
allocated NUMA policy specified by VMM.

All existing users pass NULL maintaining current behavior.

Reviewed-by: Pankaj Gupta <pankaj.gupta@amd.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Shivank Garg <shivankg@amd.com>
Tested-by: Ashish Kalra <ashish.kalra@amd.com>
Link: https://lore.kernel.org/r/20250827175247.83322-4-shivankg@amd.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
This commit is contained in:
Matthew Wilcox 2025-08-27 17:52:43 +00:00 committed by Sean Christopherson
parent 211ddde082
commit 7f3779a3ac
7 changed files with 20 additions and 14 deletions

View File

@ -491,8 +491,8 @@ static noinline int add_ra_bio_pages(struct inode *inode,
continue; continue;
} }
folio = filemap_alloc_folio(mapping_gfp_constraint(mapping, folio = filemap_alloc_folio(mapping_gfp_constraint(mapping, ~__GFP_FS),
~__GFP_FS), 0); 0, NULL);
if (!folio) if (!folio)
break; break;

View File

@ -742,7 +742,7 @@ static struct page *btrfs_read_merkle_tree_page(struct inode *inode,
} }
folio = filemap_alloc_folio(mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS), folio = filemap_alloc_folio(mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS),
0); 0, NULL);
if (!folio) if (!folio)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);

View File

@ -562,7 +562,7 @@ static void z_erofs_bind_cache(struct z_erofs_frontend *fe)
* Allocate a managed folio for cached I/O, or it may be * Allocate a managed folio for cached I/O, or it may be
* then filled with a file-backed folio for in-place I/O * then filled with a file-backed folio for in-place I/O
*/ */
newfolio = filemap_alloc_folio(gfp, 0); newfolio = filemap_alloc_folio(gfp, 0, NULL);
if (!newfolio) if (!newfolio)
continue; continue;
newfolio->private = Z_EROFS_PREALLOCATED_FOLIO; newfolio->private = Z_EROFS_PREALLOCATED_FOLIO;

View File

@ -1947,7 +1947,7 @@ static void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi,
return; return;
} }
cfolio = filemap_alloc_folio(__GFP_NOWARN | __GFP_IO, 0); cfolio = filemap_alloc_folio(__GFP_NOWARN | __GFP_IO, 0, NULL);
if (!cfolio) if (!cfolio)
return; return;

View File

@ -654,9 +654,11 @@ static inline void *detach_page_private(struct page *page)
} }
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order); struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order,
struct mempolicy *policy);
#else #else
static inline struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order) static inline struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order,
struct mempolicy *policy)
{ {
return folio_alloc_noprof(gfp, order); return folio_alloc_noprof(gfp, order);
} }
@ -667,7 +669,7 @@ static inline struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int o
static inline struct page *__page_cache_alloc(gfp_t gfp) static inline struct page *__page_cache_alloc(gfp_t gfp)
{ {
return &filemap_alloc_folio(gfp, 0)->page; return &filemap_alloc_folio(gfp, 0, NULL)->page;
} }
static inline gfp_t readahead_gfp_mask(struct address_space *x) static inline gfp_t readahead_gfp_mask(struct address_space *x)

View File

@ -1002,11 +1002,16 @@ int filemap_add_folio(struct address_space *mapping, struct folio *folio,
EXPORT_SYMBOL_GPL(filemap_add_folio); EXPORT_SYMBOL_GPL(filemap_add_folio);
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order) struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order,
struct mempolicy *policy)
{ {
int n; int n;
struct folio *folio; struct folio *folio;
if (policy)
return folio_alloc_mpol_noprof(gfp, order, policy,
NO_INTERLEAVE_INDEX, numa_node_id());
if (cpuset_do_page_mem_spread()) { if (cpuset_do_page_mem_spread()) {
unsigned int cpuset_mems_cookie; unsigned int cpuset_mems_cookie;
do { do {
@ -2009,7 +2014,7 @@ struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
err = -ENOMEM; err = -ENOMEM;
if (order > min_order) if (order > min_order)
alloc_gfp |= __GFP_NORETRY | __GFP_NOWARN; alloc_gfp |= __GFP_NORETRY | __GFP_NOWARN;
folio = filemap_alloc_folio(alloc_gfp, order); folio = filemap_alloc_folio(alloc_gfp, order, NULL);
if (!folio) if (!folio)
continue; continue;
@ -2551,7 +2556,7 @@ static int filemap_create_folio(struct kiocb *iocb, struct folio_batch *fbatch)
if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_WAITQ)) if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_WAITQ))
return -EAGAIN; return -EAGAIN;
folio = filemap_alloc_folio(mapping_gfp_mask(mapping), min_order); folio = filemap_alloc_folio(mapping_gfp_mask(mapping), min_order, NULL);
if (!folio) if (!folio)
return -ENOMEM; return -ENOMEM;
if (iocb->ki_flags & IOCB_DONTCACHE) if (iocb->ki_flags & IOCB_DONTCACHE)
@ -3983,8 +3988,7 @@ static struct folio *do_read_cache_folio(struct address_space *mapping,
repeat: repeat:
folio = filemap_get_folio(mapping, index); folio = filemap_get_folio(mapping, index);
if (IS_ERR(folio)) { if (IS_ERR(folio)) {
folio = filemap_alloc_folio(gfp, folio = filemap_alloc_folio(gfp, mapping_min_folio_order(mapping), NULL);
mapping_min_folio_order(mapping));
if (!folio) if (!folio)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
index = mapping_align_index(mapping, index); index = mapping_align_index(mapping, index);

View File

@ -186,7 +186,7 @@ static struct folio *ractl_alloc_folio(struct readahead_control *ractl,
{ {
struct folio *folio; struct folio *folio;
folio = filemap_alloc_folio(gfp_mask, order); folio = filemap_alloc_folio(gfp_mask, order, NULL);
if (folio && ractl->dropbehind) if (folio && ractl->dropbehind)
__folio_set_dropbehind(folio); __folio_set_dropbehind(folio);