mirror of https://github.com/torvalds/linux.git
mm/cma: refuse handing out non-contiguous page ranges
Let's disallow handing out PFN ranges with non-contiguous pages, so we can remove the nth-page usage in __cma_alloc(), and so any callers don't have to worry about that either when wanting to blindly iterate pages. This is really only a problem in configs with SPARSEMEM but without SPARSEMEM_VMEMMAP, and only when we would cross memory sections in some cases. Will this cause harm? Probably not, because it's mostly 32bit that does not support SPARSEMEM_VMEMMAP. If this ever becomes a problem we could look into allocating the memmap for the memory sections spanned by a single CMA region in one go from memblock. [david@redhat.com: we can have NUMMU configs with SPARSEMEM enabled] Link: https://lkml.kernel.org/r/6ec933b1-b3f7-41c0-95d8-e518bb87375e@redhat.com Link: https://lkml.kernel.org/r/20250901150359.867252-23-david@redhat.com Signed-off-by: David Hildenbrand <david@redhat.com> Reviewed-by: Alexandru Elisei <alexandru.elisei@arm.com> Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
b71ddc9ecc
commit
6972706f95
|
|
@ -209,9 +209,15 @@ extern unsigned long sysctl_user_reserve_kbytes;
|
||||||
extern unsigned long sysctl_admin_reserve_kbytes;
|
extern unsigned long sysctl_admin_reserve_kbytes;
|
||||||
|
|
||||||
#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
|
#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
|
||||||
|
bool page_range_contiguous(const struct page *page, unsigned long nr_pages);
|
||||||
#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
|
#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
|
||||||
#else
|
#else
|
||||||
#define nth_page(page,n) ((page) + (n))
|
#define nth_page(page,n) ((page) + (n))
|
||||||
|
static inline bool page_range_contiguous(const struct page *page,
|
||||||
|
unsigned long nr_pages)
|
||||||
|
{
|
||||||
|
return true;
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* to align the pointer to the (next) page boundary */
|
/* to align the pointer to the (next) page boundary */
|
||||||
|
|
|
||||||
37
mm/cma.c
37
mm/cma.c
|
|
@ -780,10 +780,8 @@ static int cma_range_alloc(struct cma *cma, struct cma_memrange *cmr,
|
||||||
unsigned long count, unsigned int align,
|
unsigned long count, unsigned int align,
|
||||||
struct page **pagep, gfp_t gfp)
|
struct page **pagep, gfp_t gfp)
|
||||||
{
|
{
|
||||||
unsigned long mask, offset;
|
|
||||||
unsigned long pfn = -1;
|
|
||||||
unsigned long start = 0;
|
|
||||||
unsigned long bitmap_maxno, bitmap_no, bitmap_count;
|
unsigned long bitmap_maxno, bitmap_no, bitmap_count;
|
||||||
|
unsigned long start, pfn, mask, offset;
|
||||||
int ret = -EBUSY;
|
int ret = -EBUSY;
|
||||||
struct page *page = NULL;
|
struct page *page = NULL;
|
||||||
|
|
||||||
|
|
@ -795,7 +793,7 @@ static int cma_range_alloc(struct cma *cma, struct cma_memrange *cmr,
|
||||||
if (bitmap_count > bitmap_maxno)
|
if (bitmap_count > bitmap_maxno)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
for (;;) {
|
for (start = 0; ; start = bitmap_no + mask + 1) {
|
||||||
spin_lock_irq(&cma->lock);
|
spin_lock_irq(&cma->lock);
|
||||||
/*
|
/*
|
||||||
* If the request is larger than the available number
|
* If the request is larger than the available number
|
||||||
|
|
@ -812,6 +810,22 @@ static int cma_range_alloc(struct cma *cma, struct cma_memrange *cmr,
|
||||||
spin_unlock_irq(&cma->lock);
|
spin_unlock_irq(&cma->lock);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pfn = cmr->base_pfn + (bitmap_no << cma->order_per_bit);
|
||||||
|
page = pfn_to_page(pfn);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Do not hand out page ranges that are not contiguous, so
|
||||||
|
* callers can just iterate the pages without having to worry
|
||||||
|
* about these corner cases.
|
||||||
|
*/
|
||||||
|
if (!page_range_contiguous(page, count)) {
|
||||||
|
spin_unlock_irq(&cma->lock);
|
||||||
|
pr_warn_ratelimited("%s: %s: skipping incompatible area [0x%lx-0x%lx]",
|
||||||
|
__func__, cma->name, pfn, pfn + count - 1);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
bitmap_set(cmr->bitmap, bitmap_no, bitmap_count);
|
bitmap_set(cmr->bitmap, bitmap_no, bitmap_count);
|
||||||
cma->available_count -= count;
|
cma->available_count -= count;
|
||||||
/*
|
/*
|
||||||
|
|
@ -821,28 +835,23 @@ static int cma_range_alloc(struct cma *cma, struct cma_memrange *cmr,
|
||||||
*/
|
*/
|
||||||
spin_unlock_irq(&cma->lock);
|
spin_unlock_irq(&cma->lock);
|
||||||
|
|
||||||
pfn = cmr->base_pfn + (bitmap_no << cma->order_per_bit);
|
|
||||||
mutex_lock(&cma->alloc_mutex);
|
mutex_lock(&cma->alloc_mutex);
|
||||||
ret = alloc_contig_range(pfn, pfn + count, ACR_FLAGS_CMA, gfp);
|
ret = alloc_contig_range(pfn, pfn + count, ACR_FLAGS_CMA, gfp);
|
||||||
mutex_unlock(&cma->alloc_mutex);
|
mutex_unlock(&cma->alloc_mutex);
|
||||||
if (ret == 0) {
|
if (!ret)
|
||||||
page = pfn_to_page(pfn);
|
|
||||||
break;
|
break;
|
||||||
}
|
|
||||||
|
|
||||||
cma_clear_bitmap(cma, cmr, pfn, count);
|
cma_clear_bitmap(cma, cmr, pfn, count);
|
||||||
if (ret != -EBUSY)
|
if (ret != -EBUSY)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
pr_debug("%s(): memory range at pfn 0x%lx %p is busy, retrying\n",
|
pr_debug("%s(): memory range at pfn 0x%lx %p is busy, retrying\n",
|
||||||
__func__, pfn, pfn_to_page(pfn));
|
__func__, pfn, page);
|
||||||
|
|
||||||
trace_cma_alloc_busy_retry(cma->name, pfn, pfn_to_page(pfn),
|
trace_cma_alloc_busy_retry(cma->name, pfn, page, count, align);
|
||||||
count, align);
|
|
||||||
/* try again with a bit different memory target */
|
|
||||||
start = bitmap_no + mask + 1;
|
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
|
if (!ret)
|
||||||
*pagep = page;
|
*pagep = page;
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
@ -882,7 +891,7 @@ static struct page *__cma_alloc(struct cma *cma, unsigned long count,
|
||||||
*/
|
*/
|
||||||
if (page) {
|
if (page) {
|
||||||
for (i = 0; i < count; i++)
|
for (i = 0; i < count; i++)
|
||||||
page_kasan_tag_reset(nth_page(page, i));
|
page_kasan_tag_reset(page + i);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ret && !(gfp & __GFP_NOWARN)) {
|
if (ret && !(gfp & __GFP_NOWARN)) {
|
||||||
|
|
|
||||||
35
mm/util.c
35
mm/util.c
|
|
@ -1281,3 +1281,38 @@ unsigned int folio_pte_batch(struct folio *folio, pte_t *ptep, pte_t pte,
|
||||||
return folio_pte_batch_flags(folio, NULL, ptep, &pte, max_nr, 0);
|
return folio_pte_batch_flags(folio, NULL, ptep, &pte, max_nr, 0);
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_MMU */
|
#endif /* CONFIG_MMU */
|
||||||
|
|
||||||
|
#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
|
||||||
|
/**
|
||||||
|
* page_range_contiguous - test whether the page range is contiguous
|
||||||
|
* @page: the start of the page range.
|
||||||
|
* @nr_pages: the number of pages in the range.
|
||||||
|
*
|
||||||
|
* Test whether the page range is contiguous, such that they can be iterated
|
||||||
|
* naively, corresponding to iterating a contiguous PFN range.
|
||||||
|
*
|
||||||
|
* This function should primarily only be used for debug checks, or when
|
||||||
|
* working with page ranges that are not naturally contiguous (e.g., pages
|
||||||
|
* within a folio are).
|
||||||
|
*
|
||||||
|
* Returns true if contiguous, otherwise false.
|
||||||
|
*/
|
||||||
|
bool page_range_contiguous(const struct page *page, unsigned long nr_pages)
|
||||||
|
{
|
||||||
|
const unsigned long start_pfn = page_to_pfn(page);
|
||||||
|
const unsigned long end_pfn = start_pfn + nr_pages;
|
||||||
|
unsigned long pfn;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The memmap is allocated per memory section, so no need to check
|
||||||
|
* within the first section. However, we need to check each other
|
||||||
|
* spanned memory section once, making sure the first page in a
|
||||||
|
* section could similarly be reached by just iterating pages.
|
||||||
|
*/
|
||||||
|
for (pfn = ALIGN(start_pfn, PAGES_PER_SECTION);
|
||||||
|
pfn < end_pfn; pfn += PAGES_PER_SECTION)
|
||||||
|
if (unlikely(page + (pfn - start_pfn) != pfn_to_page(pfn)))
|
||||||
|
return false;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue