mm: Introduce alloc_frozen_pages_nolock()

Split alloc_pages_nolock() and introduce alloc_frozen_pages_nolock()
to be used by alloc_slab_page().

Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Reviewed-by: Shakeel Butt <shakeel.butt@linux.dev>
Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
This commit is contained in:
Alexei Starovoitov 2025-09-08 18:00:04 -07:00 committed by Vlastimil Babka
parent 99253de51f
commit d7242af864
2 changed files with 32 additions and 21 deletions

View File

@ -842,6 +842,10 @@ static inline struct page *alloc_frozen_pages_noprof(gfp_t gfp, unsigned int ord
#define alloc_frozen_pages(...) \ #define alloc_frozen_pages(...) \
alloc_hooks(alloc_frozen_pages_noprof(__VA_ARGS__)) alloc_hooks(alloc_frozen_pages_noprof(__VA_ARGS__))
struct page *alloc_frozen_pages_nolock_noprof(gfp_t gfp_flags, int nid, unsigned int order);
#define alloc_frozen_pages_nolock(...) \
alloc_hooks(alloc_frozen_pages_nolock_noprof(__VA_ARGS__))
extern void zone_pcp_reset(struct zone *zone); extern void zone_pcp_reset(struct zone *zone);
extern void zone_pcp_disable(struct zone *zone); extern void zone_pcp_disable(struct zone *zone);
extern void zone_pcp_enable(struct zone *zone); extern void zone_pcp_enable(struct zone *zone);

View File

@ -7478,23 +7478,7 @@ static bool __free_unaccepted(struct page *page)
#endif /* CONFIG_UNACCEPTED_MEMORY */ #endif /* CONFIG_UNACCEPTED_MEMORY */
/** struct page *alloc_frozen_pages_nolock_noprof(gfp_t gfp_flags, int nid, unsigned int order)
* alloc_pages_nolock - opportunistic reentrant allocation from any context
* @gfp_flags: GFP flags. Only __GFP_ACCOUNT allowed.
* @nid: node to allocate from
* @order: allocation order size
*
* Allocates pages of a given order from the given node. This is safe to
* call from any context (from atomic, NMI, and also reentrant
* allocator -> tracepoint -> alloc_pages_nolock_noprof).
* Allocation is best effort and to be expected to fail easily so nobody should
* rely on the success. Failures are not reported via warn_alloc().
* See always fail conditions below.
*
* Return: allocated page or NULL on failure. NULL does not mean EBUSY or EAGAIN.
* It means ENOMEM. There is no reason to call it again and expect !NULL.
*/
struct page *alloc_pages_nolock_noprof(gfp_t gfp_flags, int nid, unsigned int order)
{ {
/* /*
* Do not specify __GFP_DIRECT_RECLAIM, since direct claim is not allowed. * Do not specify __GFP_DIRECT_RECLAIM, since direct claim is not allowed.
@ -7557,15 +7541,38 @@ struct page *alloc_pages_nolock_noprof(gfp_t gfp_flags, int nid, unsigned int or
/* Unlike regular alloc_pages() there is no __alloc_pages_slowpath(). */ /* Unlike regular alloc_pages() there is no __alloc_pages_slowpath(). */
if (page)
set_page_refcounted(page);
if (memcg_kmem_online() && page && (gfp_flags & __GFP_ACCOUNT) && if (memcg_kmem_online() && page && (gfp_flags & __GFP_ACCOUNT) &&
unlikely(__memcg_kmem_charge_page(page, alloc_gfp, order) != 0)) { unlikely(__memcg_kmem_charge_page(page, alloc_gfp, order) != 0)) {
free_pages_nolock(page, order); __free_frozen_pages(page, order, FPI_TRYLOCK);
page = NULL; page = NULL;
} }
trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype); trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype);
kmsan_alloc_page(page, order, alloc_gfp); kmsan_alloc_page(page, order, alloc_gfp);
return page; return page;
} }
/**
* alloc_pages_nolock - opportunistic reentrant allocation from any context
* @gfp_flags: GFP flags. Only __GFP_ACCOUNT allowed.
* @nid: node to allocate from
* @order: allocation order size
*
* Allocates pages of a given order from the given node. This is safe to
* call from any context (from atomic, NMI, and also reentrant
* allocator -> tracepoint -> alloc_pages_nolock_noprof).
* Allocation is best effort and to be expected to fail easily so nobody should
* rely on the success. Failures are not reported via warn_alloc().
* See always fail conditions below.
*
* Return: allocated page or NULL on failure. NULL does not mean EBUSY or EAGAIN.
* It means ENOMEM. There is no reason to call it again and expect !NULL.
*/
struct page *alloc_pages_nolock_noprof(gfp_t gfp_flags, int nid, unsigned int order)
{
struct page *page;
page = alloc_frozen_pages_nolock_noprof(gfp_flags, nid, order);
if (page)
set_page_refcounted(page);
return page;
}
EXPORT_SYMBOL_GPL(alloc_pages_nolock_noprof);