mm/migrate_device: add THP splitting during migration

Implement migrate_vma_split_pages() to handle THP splitting during the
migration process when destination cannot allocate compound pages.

This addresses the common scenario where migrate_vma_setup() succeeds with
MIGRATE_PFN_COMPOUND pages, but the destination device cannot allocate
large pages during the migration phase.

Key changes:
- migrate_vma_split_pages(): Split already-isolated pages during migration
- Enhanced folio_split() and __split_unmapped_folio() with isolated
  parameter to avoid redundant unmap/remap operations

This provides a fallback mechansim to ensure migration succeeds even when
large page allocation fails at the destination.

[matthew.brost@intel.com: add THP splitting during migration]
  Link: https://lkml.kernel.org/r/20251120230825.181072-2-matthew.brost@intel.com
Link: https://lkml.kernel.org/r/20251001065707.920170-12-balbirs@nvidia.com
Signed-off-by: Balbir Singh <balbirs@nvidia.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Joshua Hahn <joshua.hahnjy@gmail.com>
Cc: Rakie Kim <rakie.kim@sk.com>
Cc: Byungchul Park <byungchul@sk.com>
Cc: Gregory Price <gourry@gourry.net>
Cc: Ying Huang <ying.huang@linux.alibaba.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: "Liam R. Howlett" <Liam.Howlett@oracle.com>
Cc: Nico Pache <npache@redhat.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Lyude Paul <lyude@redhat.com>
Cc: Danilo Krummrich <dakr@kernel.org>
Cc: David Airlie <airlied@gmail.com>
Cc: Simona Vetter <simona@ffwll.ch>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: Mika Penttilä <mpenttil@redhat.com>
Cc: Francois Dugast <francois.dugast@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Balbir Singh 2025-10-01 16:57:02 +10:00 committed by Andrew Morton
parent 56ef398996
commit 4265d67e40
4 changed files with 119 additions and 34 deletions

View File

@ -365,8 +365,8 @@ unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long add
vm_flags_t vm_flags);
bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins);
int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
unsigned int new_order);
int __split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
unsigned int new_order, bool unmapped);
int min_order_for_split(struct folio *folio);
int split_folio_to_list(struct folio *folio, struct list_head *list);
bool uniform_split_supported(struct folio *folio, unsigned int new_order,
@ -375,6 +375,13 @@ bool non_uniform_split_supported(struct folio *folio, unsigned int new_order,
bool warns);
int folio_split(struct folio *folio, unsigned int new_order, struct page *page,
struct list_head *list);
static inline int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
unsigned int new_order)
{
return __split_huge_page_to_list_to_order(page, list, new_order, false);
}
/*
* try_folio_split_to_order - try to split a @folio at @page to @new_order using
* non uniform split.

View File

@ -1612,6 +1612,15 @@ static vm_fault_t dmirror_devmem_fault(struct vm_fault *vmf)
order = folio_order(page_folio(vmf->page));
nr = 1 << order;
/*
* When folios are partially mapped, we can't rely on the folio
* order of vmf->page as the folio might not be fully split yet
*/
if (vmf->pte) {
order = 0;
nr = 1;
}
/*
* Consider a per-cpu cache of src and dst pfns, but with
* large number of cpus that might not scale well.

View File

@ -3452,15 +3452,6 @@ static void __split_folio_to_order(struct folio *folio, int old_order,
new_folio->mapping = folio->mapping;
new_folio->index = folio->index + i;
/*
* page->private should not be set in tail pages. Fix up and warn once
* if private is unexpectedly set.
*/
if (unlikely(new_folio->private)) {
VM_WARN_ON_ONCE_PAGE(true, new_head);
new_folio->private = NULL;
}
if (folio_test_swapcache(folio))
new_folio->swap.val = folio->swap.val + i;
@ -3661,6 +3652,7 @@ bool uniform_split_supported(struct folio *folio, unsigned int new_order,
* @lock_at: a page within @folio to be left locked to caller
* @list: after-split folios will be put on it if non NULL
* @uniform_split: perform uniform split or not (non-uniform split)
* @unmapped: The pages are already unmapped, they are migration entries.
*
* It calls __split_unmapped_folio() to perform uniform and non-uniform split.
* It is in charge of checking whether the split is supported or not and
@ -3676,7 +3668,7 @@ bool uniform_split_supported(struct folio *folio, unsigned int new_order,
*/
static int __folio_split(struct folio *folio, unsigned int new_order,
struct page *split_at, struct page *lock_at,
struct list_head *list, bool uniform_split)
struct list_head *list, bool uniform_split, bool unmapped)
{
struct deferred_split *ds_queue = get_deferred_split_queue(folio);
XA_STATE(xas, &folio->mapping->i_pages, folio->index);
@ -3736,13 +3728,15 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
* is taken to serialise against parallel split or collapse
* operations.
*/
if (!unmapped) {
anon_vma = folio_get_anon_vma(folio);
if (!anon_vma) {
ret = -EBUSY;
goto out;
}
mapping = NULL;
anon_vma_lock_write(anon_vma);
}
mapping = NULL;
} else {
unsigned int min_order;
gfp_t gfp;
@ -3795,6 +3789,7 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
goto out_unlock;
}
if (!unmapped)
unmap_folio(folio);
/* block interrupt reentry in xa_lock and spinlock */
@ -3882,9 +3877,12 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
next = folio_next(new_folio);
zone_device_private_split_cb(folio, new_folio);
expected_refs = folio_expected_ref_count(new_folio) + 1;
folio_ref_unfreeze(new_folio, expected_refs);
if (!unmapped)
lru_add_split_folio(folio, new_folio, lruvec, list);
/*
@ -3916,6 +3914,8 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
__filemap_remove_folio(new_folio, NULL);
folio_put_refs(new_folio, nr_pages);
}
zone_device_private_split_cb(folio, NULL);
/*
* Unfreeze @folio only after all page cache entries, which
* used to point to it, have been updated with new folios.
@ -3939,6 +3939,9 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
local_irq_enable();
if (unmapped)
return ret;
if (nr_shmem_dropped)
shmem_uncharge(mapping->host, nr_shmem_dropped);
@ -4029,12 +4032,13 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
* Returns -EINVAL when trying to split to an order that is incompatible
* with the folio. Splitting to order 0 is compatible with all folios.
*/
int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
unsigned int new_order)
int __split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
unsigned int new_order, bool unmapped)
{
struct folio *folio = page_folio(page);
return __folio_split(folio, new_order, &folio->page, page, list, true);
return __folio_split(folio, new_order, &folio->page, page, list, true,
unmapped);
}
/*
@ -4063,7 +4067,7 @@ int folio_split(struct folio *folio, unsigned int new_order,
struct page *split_at, struct list_head *list)
{
return __folio_split(folio, new_order, split_at, &folio->page, list,
false);
false, false);
}
int min_order_for_split(struct folio *folio)

View File

@ -309,6 +309,25 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp,
pgmap->owner != migrate->pgmap_owner)
goto next;
folio = page_folio(page);
if (folio_test_large(folio)) {
int ret;
arch_leave_lazy_mmu_mode();
pte_unmap_unlock(ptep, ptl);
ret = migrate_vma_split_folio(folio,
migrate->fault_page);
if (ret) {
if (unmapped)
flush_tlb_range(walk->vma, start, end);
return migrate_vma_collect_skip(addr, end, walk);
}
goto again;
}
mpfn = migrate_pfn(page_to_pfn(page)) |
MIGRATE_PFN_MIGRATE;
if (is_writable_device_private_entry(entry))
@ -885,6 +904,29 @@ static int migrate_vma_insert_huge_pmd_page(struct migrate_vma *migrate,
src[i] &= ~MIGRATE_PFN_MIGRATE;
return 0;
}
static int migrate_vma_split_unmapped_folio(struct migrate_vma *migrate,
unsigned long idx, unsigned long addr,
struct folio *folio)
{
unsigned long i;
unsigned long pfn;
unsigned long flags;
int ret = 0;
folio_get(folio);
split_huge_pmd_address(migrate->vma, addr, true);
ret = __split_huge_page_to_list_to_order(folio_page(folio, 0), NULL,
0, true);
if (ret)
return ret;
migrate->src[idx] &= ~MIGRATE_PFN_COMPOUND;
flags = migrate->src[idx] & ((1UL << MIGRATE_PFN_SHIFT) - 1);
pfn = migrate->src[idx] >> MIGRATE_PFN_SHIFT;
for (i = 1; i < HPAGE_PMD_NR; i++)
migrate->src[i+idx] = migrate_pfn(pfn + i) | flags;
return ret;
}
#else /* !CONFIG_ARCH_ENABLE_THP_MIGRATION */
static int migrate_vma_insert_huge_pmd_page(struct migrate_vma *migrate,
unsigned long addr,
@ -894,6 +936,13 @@ static int migrate_vma_insert_huge_pmd_page(struct migrate_vma *migrate,
{
return 0;
}
static int migrate_vma_split_unmapped_folio(struct migrate_vma *migrate,
unsigned long idx, unsigned long addr,
struct folio *folio)
{
return 0;
}
#endif
static unsigned long migrate_vma_nr_pages(unsigned long *src)
@ -1055,8 +1104,9 @@ static void __migrate_device_pages(unsigned long *src_pfns,
struct migrate_vma *migrate)
{
struct mmu_notifier_range range;
unsigned long i;
unsigned long i, j;
bool notified = false;
unsigned long addr;
for (i = 0; i < npages; ) {
struct page *newpage = migrate_pfn_to_page(dst_pfns[i]);
@ -1098,12 +1148,16 @@ static void __migrate_device_pages(unsigned long *src_pfns,
(!(dst_pfns[i] & MIGRATE_PFN_COMPOUND))) {
nr = migrate_vma_nr_pages(&src_pfns[i]);
src_pfns[i] &= ~MIGRATE_PFN_COMPOUND;
src_pfns[i] &= ~MIGRATE_PFN_MIGRATE;
goto next;
} else {
nr = 1;
}
migrate_vma_insert_page(migrate, addr, &dst_pfns[i],
&src_pfns[i]);
for (j = 0; j < nr && i + j < npages; j++) {
src_pfns[i+j] |= MIGRATE_PFN_MIGRATE;
migrate_vma_insert_page(migrate,
addr + j * PAGE_SIZE,
&dst_pfns[i+j], &src_pfns[i+j]);
}
goto next;
}
@ -1125,7 +1179,13 @@ static void __migrate_device_pages(unsigned long *src_pfns,
MIGRATE_PFN_COMPOUND);
goto next;
}
src_pfns[i] &= ~MIGRATE_PFN_MIGRATE;
nr = 1 << folio_order(folio);
addr = migrate->start + i * PAGE_SIZE;
if (migrate_vma_split_unmapped_folio(migrate, i, addr, folio)) {
src_pfns[i] &= ~(MIGRATE_PFN_MIGRATE |
MIGRATE_PFN_COMPOUND);
goto next;
}
} else if ((src_pfns[i] & MIGRATE_PFN_MIGRATE) &&
(dst_pfns[i] & MIGRATE_PFN_COMPOUND) &&
!(src_pfns[i] & MIGRATE_PFN_COMPOUND)) {
@ -1161,11 +1221,16 @@ static void __migrate_device_pages(unsigned long *src_pfns,
if (migrate && migrate->fault_page == page)
extra_cnt = 1;
for (j = 0; j < nr && i + j < npages; j++) {
folio = page_folio(migrate_pfn_to_page(src_pfns[i+j]));
newfolio = page_folio(migrate_pfn_to_page(dst_pfns[i+j]));
r = folio_migrate_mapping(mapping, newfolio, folio, extra_cnt);
if (r)
src_pfns[i] &= ~MIGRATE_PFN_MIGRATE;
src_pfns[i+j] &= ~MIGRATE_PFN_MIGRATE;
else
folio_migrate_flags(newfolio, folio);
}
next:
i += nr;
}