lib/test_hmm: add large page allocation failure testing

Add HMM_DMIRROR_FLAG_FAIL_ALLOC flag to simulate large page allocation
failures, enabling testing of split migration code paths.

This test flag allows validation of the fallback behavior when destination
device cannot allocate compound pages.  This is useful for testing the
split migration functionality.

Link: https://lkml.kernel.org/r/20251001065707.920170-13-balbirs@nvidia.com
Signed-off-by: Balbir Singh <balbirs@nvidia.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Joshua Hahn <joshua.hahnjy@gmail.com>
Cc: Rakie Kim <rakie.kim@sk.com>
Cc: Byungchul Park <byungchul@sk.com>
Cc: Gregory Price <gourry@gourry.net>
Cc: Ying Huang <ying.huang@linux.alibaba.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: "Liam R. Howlett" <Liam.Howlett@oracle.com>
Cc: Nico Pache <npache@redhat.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Lyude Paul <lyude@redhat.com>
Cc: Danilo Krummrich <dakr@kernel.org>
Cc: David Airlie <airlied@gmail.com>
Cc: Simona Vetter <simona@ffwll.ch>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: Mika Penttilä <mpenttil@redhat.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Francois Dugast <francois.dugast@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Balbir Singh 2025-10-01 16:57:03 +10:00 committed by Andrew Morton
parent 4265d67e40
commit aa3ade4295
2 changed files with 44 additions and 20 deletions

View File

@ -92,6 +92,7 @@ struct dmirror {
struct xarray pt; struct xarray pt;
struct mmu_interval_notifier notifier; struct mmu_interval_notifier notifier;
struct mutex mutex; struct mutex mutex;
__u64 flags;
}; };
/* /*
@ -699,7 +700,12 @@ static void dmirror_migrate_alloc_and_copy(struct migrate_vma *args,
page_to_pfn(spage))) page_to_pfn(spage)))
goto next; goto next;
dpage = dmirror_devmem_alloc_page(dmirror, is_large); if (dmirror->flags & HMM_DMIRROR_FLAG_FAIL_ALLOC) {
dmirror->flags &= ~HMM_DMIRROR_FLAG_FAIL_ALLOC;
dpage = NULL;
} else
dpage = dmirror_devmem_alloc_page(dmirror, is_large);
if (!dpage) { if (!dpage) {
struct folio *folio; struct folio *folio;
unsigned long i; unsigned long i;
@ -959,44 +965,55 @@ static vm_fault_t dmirror_devmem_fault_alloc_and_copy(struct migrate_vma *args,
spage = BACKING_PAGE(spage); spage = BACKING_PAGE(spage);
order = folio_order(page_folio(spage)); order = folio_order(page_folio(spage));
if (order) if (order)
*dst = MIGRATE_PFN_COMPOUND;
if (*src & MIGRATE_PFN_WRITE)
*dst |= MIGRATE_PFN_WRITE;
if (dmirror->flags & HMM_DMIRROR_FLAG_FAIL_ALLOC) {
dmirror->flags &= ~HMM_DMIRROR_FLAG_FAIL_ALLOC;
*dst &= ~MIGRATE_PFN_COMPOUND;
dpage = NULL;
} else if (order) {
dpage = folio_page(vma_alloc_folio(GFP_HIGHUSER_MOVABLE, dpage = folio_page(vma_alloc_folio(GFP_HIGHUSER_MOVABLE,
order, args->vma, addr), 0); order, args->vma, addr), 0);
else } else {
dpage = alloc_page_vma(GFP_HIGHUSER_MOVABLE, args->vma, addr); dpage = alloc_page_vma(GFP_HIGHUSER_MOVABLE, args->vma, addr);
/* Try with smaller pages if large allocation fails */
if (!dpage && order) {
dpage = alloc_page_vma(GFP_HIGHUSER_MOVABLE, args->vma, addr);
if (!dpage)
return VM_FAULT_OOM;
order = 0;
} }
if (!dpage && !order)
return VM_FAULT_OOM;
pr_debug("migrating from sys to dev pfn src: 0x%lx pfn dst: 0x%lx\n", pr_debug("migrating from sys to dev pfn src: 0x%lx pfn dst: 0x%lx\n",
page_to_pfn(spage), page_to_pfn(dpage)); page_to_pfn(spage), page_to_pfn(dpage));
lock_page(dpage);
xa_erase(&dmirror->pt, addr >> PAGE_SHIFT); if (dpage) {
copy_highpage(dpage, spage); lock_page(dpage);
*dst = migrate_pfn(page_to_pfn(dpage)); *dst |= migrate_pfn(page_to_pfn(dpage));
if (*src & MIGRATE_PFN_WRITE) }
*dst |= MIGRATE_PFN_WRITE;
if (order)
*dst |= MIGRATE_PFN_COMPOUND;
for (i = 0; i < (1 << order); i++) { for (i = 0; i < (1 << order); i++) {
struct page *src_page; struct page *src_page;
struct page *dst_page; struct page *dst_page;
/* Try with smaller pages if large allocation fails */
if (!dpage && order) {
dpage = alloc_page_vma(GFP_HIGHUSER_MOVABLE, args->vma, addr);
lock_page(dpage);
dst[i] = migrate_pfn(page_to_pfn(dpage));
dst_page = pfn_to_page(page_to_pfn(dpage));
dpage = NULL; /* For the next iteration */
} else {
dst_page = pfn_to_page(page_to_pfn(dpage) + i);
}
src_page = pfn_to_page(page_to_pfn(spage) + i); src_page = pfn_to_page(page_to_pfn(spage) + i);
dst_page = pfn_to_page(page_to_pfn(dpage) + i);
xa_erase(&dmirror->pt, addr >> PAGE_SHIFT); xa_erase(&dmirror->pt, addr >> PAGE_SHIFT);
addr += PAGE_SIZE;
copy_highpage(dst_page, src_page); copy_highpage(dst_page, src_page);
} }
next: next:
addr += PAGE_SIZE << order;
src += 1 << order; src += 1 << order;
dst += 1 << order; dst += 1 << order;
} }
@ -1514,6 +1531,10 @@ static long dmirror_fops_unlocked_ioctl(struct file *filp,
dmirror_device_remove_chunks(dmirror->mdevice); dmirror_device_remove_chunks(dmirror->mdevice);
ret = 0; ret = 0;
break; break;
case HMM_DMIRROR_FLAGS:
dmirror->flags = cmd.npages;
ret = 0;
break;
default: default:
return -EINVAL; return -EINVAL;

View File

@ -37,6 +37,9 @@ struct hmm_dmirror_cmd {
#define HMM_DMIRROR_EXCLUSIVE _IOWR('H', 0x05, struct hmm_dmirror_cmd) #define HMM_DMIRROR_EXCLUSIVE _IOWR('H', 0x05, struct hmm_dmirror_cmd)
#define HMM_DMIRROR_CHECK_EXCLUSIVE _IOWR('H', 0x06, struct hmm_dmirror_cmd) #define HMM_DMIRROR_CHECK_EXCLUSIVE _IOWR('H', 0x06, struct hmm_dmirror_cmd)
#define HMM_DMIRROR_RELEASE _IOWR('H', 0x07, struct hmm_dmirror_cmd) #define HMM_DMIRROR_RELEASE _IOWR('H', 0x07, struct hmm_dmirror_cmd)
#define HMM_DMIRROR_FLAGS _IOWR('H', 0x08, struct hmm_dmirror_cmd)
#define HMM_DMIRROR_FLAG_FAIL_ALLOC (1ULL << 0)
/* /*
* Values returned in hmm_dmirror_cmd.ptr for HMM_DMIRROR_SNAPSHOT. * Values returned in hmm_dmirror_cmd.ptr for HMM_DMIRROR_SNAPSHOT.