mm: replace pmd_to_swp_entry() with softleaf_from_pmd()

Introduce softleaf_from_pmd() to do the equivalent operation for PMDs that
softleaf_from_pte() fulfils, and cascade changes through code base
accordingly, introducing helpers as necessary.

We are then able to eliminate pmd_to_swp_entry(),
is_pmd_migration_entry(), is_pmd_device_private_entry() and
is_pmd_non_present_folio_entry().

This further establishes the use of leaf operations throughout the code
base and further establishes the foundations for eliminating
is_swap_pmd().

No functional change intended.

[lorenzo.stoakes@oracle.com: check writable, not readable/writable, per Vlastimil]
  Link: https://lkml.kernel.org/r/cd97b6ec-00f9-45a4-9ae0-8f009c212a94@lucifer.local
Link: https://lkml.kernel.org/r/3fb431699639ded8fdc63d2210aa77a38c8891f1.1762812360.git.lorenzo.stoakes@oracle.com
Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Reviewed-by: SeongJae Park <sj@kernel.org>\
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Baoquan He <bhe@redhat.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Byungchul Park <byungchul@sk.com>
Cc: Chengming Zhou <chengming.zhou@linux.dev>
Cc: Chris Li <chrisl@kernel.org>
Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Claudio Imbrenda <imbrenda@linux.ibm.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
Cc: Gregory Price <gourry@gourry.net>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: "Huang, Ying" <ying.huang@linux.alibaba.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jann Horn <jannh@google.com>
Cc: Janosch Frank <frankja@linux.ibm.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Joshua Hahn <joshua.hahnjy@gmail.com>
Cc: Kairui Song <kasong@tencent.com>
Cc: Kemeng Shi <shikemeng@huaweicloud.com>
Cc: Lance Yang <lance.yang@linux.dev>
Cc: Leon Romanovsky <leon@kernel.org>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Mathew Brost <matthew.brost@intel.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Naoya Horiguchi <nao.horiguchi@gmail.com>
Cc: Nhat Pham <nphamcs@gmail.com>
Cc: Nico Pache <npache@redhat.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Pasha Tatashin <pasha.tatashin@soleen.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Rakie Kim <rakie.kim@sk.com>
Cc: Rik van Riel <riel@surriel.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Wei Xu <weixugc@google.com>
Cc: xu xin <xu.xin16@zte.com.cn>
Cc: Yuanchu Xie <yuanchu@google.com>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Lorenzo Stoakes 2025-11-10 22:21:28 +00:00 committed by Andrew Morton
parent 5dfa791605
commit 0ac881efe1
18 changed files with 339 additions and 225 deletions

View File

@ -1065,10 +1065,10 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
page = vm_normal_page_pmd(vma, addr, *pmd);
present = true;
} else if (unlikely(thp_migration_supported())) {
swp_entry_t entry = pmd_to_swp_entry(*pmd);
const softleaf_t entry = softleaf_from_pmd(*pmd);
if (is_pfn_swap_entry(entry))
page = pfn_swap_entry_to_page(entry);
if (softleaf_has_pfn(entry))
page = softleaf_to_page(entry);
}
if (IS_ERR_OR_NULL(page))
return;
@ -1655,7 +1655,7 @@ static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
pmd = pmd_clear_soft_dirty(pmd);
set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
} else if (is_migration_entry(pmd_to_swp_entry(pmd))) {
} else if (pmd_is_migration_entry(pmd)) {
pmd = pmd_swp_clear_soft_dirty(pmd);
set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
}
@ -2016,12 +2016,12 @@ static int pagemap_pmd_range_thp(pmd_t *pmdp, unsigned long addr,
if (pm->show_pfn)
frame = pmd_pfn(pmd) + idx;
} else if (thp_migration_supported()) {
swp_entry_t entry = pmd_to_swp_entry(pmd);
const softleaf_t entry = softleaf_from_pmd(pmd);
unsigned long offset;
if (pm->show_pfn) {
if (is_pfn_swap_entry(entry))
offset = swp_offset_pfn(entry) + idx;
if (softleaf_has_pfn(entry))
offset = softleaf_to_pfn(entry) + idx;
else
offset = swp_offset(entry) + idx;
frame = swp_type(entry) |
@ -2032,7 +2032,7 @@ static int pagemap_pmd_range_thp(pmd_t *pmdp, unsigned long addr,
flags |= PM_SOFT_DIRTY;
if (pmd_swp_uffd_wp(pmd))
flags |= PM_UFFD_WP;
VM_WARN_ON_ONCE(!is_pmd_migration_entry(pmd));
VM_WARN_ON_ONCE(!pmd_is_migration_entry(pmd));
page = pfn_swap_entry_to_page(entry);
}
@ -2426,8 +2426,6 @@ static unsigned long pagemap_thp_category(struct pagemap_scan_private *p,
if (pmd_soft_dirty(pmd))
categories |= PAGE_IS_SOFT_DIRTY;
} else {
swp_entry_t swp;
categories |= PAGE_IS_SWAPPED;
if (!pmd_swp_uffd_wp(pmd))
categories |= PAGE_IS_WRITTEN;
@ -2435,9 +2433,10 @@ static unsigned long pagemap_thp_category(struct pagemap_scan_private *p,
categories |= PAGE_IS_SOFT_DIRTY;
if (p->masks_of_interest & PAGE_IS_FILE) {
swp = pmd_to_swp_entry(pmd);
if (is_pfn_swap_entry(swp) &&
!folio_test_anon(pfn_swap_entry_folio(swp)))
const softleaf_t entry = softleaf_from_pmd(pmd);
if (softleaf_has_pfn(entry) &&
!folio_test_anon(softleaf_to_folio(entry)))
categories |= PAGE_IS_FILE;
}
}
@ -2454,7 +2453,7 @@ static void make_uffd_wp_pmd(struct vm_area_struct *vma,
old = pmdp_invalidate_ad(vma, addr, pmdp);
pmd = pmd_mkuffd_wp(old);
set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
} else if (is_migration_entry(pmd_to_swp_entry(pmd))) {
} else if (pmd_is_migration_entry(pmd)) {
pmd = pmd_swp_mkuffd_wp(pmd);
set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
}

View File

@ -61,6 +61,57 @@ static inline softleaf_t softleaf_from_pte(pte_t pte)
return pte_to_swp_entry(pte);
}
/**
* softleaf_to_pte() - Obtain a PTE entry from a leaf entry.
* @entry: Leaf entry.
*
* This generates an architecture-specific PTE entry that can be utilised to
* encode the metadata the leaf entry encodes.
*
* Returns: Architecture-specific PTE entry encoding leaf entry.
*/
static inline pte_t softleaf_to_pte(softleaf_t entry)
{
/* Temporary until swp_entry_t eliminated. */
return swp_entry_to_pte(entry);
}
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
/**
* softleaf_from_pmd() - Obtain a leaf entry from a PMD entry.
* @pmd: PMD entry.
*
* If @pmd is present (therefore not a leaf entry) the function returns an empty
* leaf entry. Otherwise, it returns a leaf entry.
*
* Returns: Leaf entry.
*/
static inline softleaf_t softleaf_from_pmd(pmd_t pmd)
{
softleaf_t arch_entry;
if (pmd_present(pmd) || pmd_none(pmd))
return softleaf_mk_none();
if (pmd_swp_soft_dirty(pmd))
pmd = pmd_swp_clear_soft_dirty(pmd);
if (pmd_swp_uffd_wp(pmd))
pmd = pmd_swp_clear_uffd_wp(pmd);
arch_entry = __pmd_to_swp_entry(pmd);
/* Temporary until swp_entry_t eliminated. */
return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
}
#else
static inline softleaf_t softleaf_from_pmd(pmd_t pmd)
{
return softleaf_mk_none();
}
#endif
/**
* softleaf_is_none() - Is the leaf entry empty?
* @entry: Leaf entry.
@ -134,6 +185,43 @@ static inline bool softleaf_is_swap(softleaf_t entry)
return softleaf_type(entry) == SOFTLEAF_SWAP;
}
/**
* softleaf_is_migration_write() - Is this leaf entry a writable migration entry?
* @entry: Leaf entry.
*
* Returns: true if the leaf entry is a writable migration entry, otherwise
* false.
*/
static inline bool softleaf_is_migration_write(softleaf_t entry)
{
return softleaf_type(entry) == SOFTLEAF_MIGRATION_WRITE;
}
/**
* softleaf_is_migration_read() - Is this leaf entry a readable migration entry?
* @entry: Leaf entry.
*
* Returns: true if the leaf entry is a readable migration entry, otherwise
* false.
*/
static inline bool softleaf_is_migration_read(softleaf_t entry)
{
return softleaf_type(entry) == SOFTLEAF_MIGRATION_READ;
}
/**
* softleaf_is_migration_read_exclusive() - Is this leaf entry an exclusive
* readable migration entry?
* @entry: Leaf entry.
*
* Returns: true if the leaf entry is an exclusive readable migration entry,
* otherwise false.
*/
static inline bool softleaf_is_migration_read_exclusive(softleaf_t entry)
{
return softleaf_type(entry) == SOFTLEAF_MIGRATION_READ_EXCLUSIVE;
}
/**
* softleaf_is_migration() - Is this leaf entry a migration entry?
* @entry: Leaf entry.
@ -152,6 +240,19 @@ static inline bool softleaf_is_migration(softleaf_t entry)
}
}
/**
* softleaf_is_device_private_write() - Is this leaf entry a device private
* writable entry?
* @entry: Leaf entry.
*
* Returns: true if the leaf entry is a device private writable entry, otherwise
* false.
*/
static inline bool softleaf_is_device_private_write(softleaf_t entry)
{
return softleaf_type(entry) == SOFTLEAF_DEVICE_PRIVATE_WRITE;
}
/**
* softleaf_is_device_private() - Is this leaf entry a device private entry?
* @entry: Leaf entry.
@ -170,10 +271,10 @@ static inline bool softleaf_is_device_private(softleaf_t entry)
}
/**
* softleaf_is_device_exclusive() - Is this leaf entry a device exclusive entry?
* softleaf_is_device_exclusive() - Is this leaf entry a device-exclusive entry?
* @entry: Leaf entry.
*
* Returns: true if the leaf entry is a device exclusive entry, otherwise false.
* Returns: true if the leaf entry is a device-exclusive entry, otherwise false.
*/
static inline bool softleaf_is_device_exclusive(softleaf_t entry)
{
@ -332,6 +433,61 @@ static inline bool softleaf_is_uffd_wp_marker(softleaf_t entry)
return softleaf_to_marker(entry) & PTE_MARKER_UFFD_WP;
}
#ifdef CONFIG_MIGRATION
/**
* softleaf_is_migration_young() - Does this migration entry contain an accessed
* bit?
* @entry: Leaf entry.
*
* If the architecture can support storing A/D bits in migration entries, this
* determines whether the accessed (or 'young') bit was set on the migrated page
* table entry.
*
* Returns: true if the entry contains an accessed bit, otherwise false.
*/
static inline bool softleaf_is_migration_young(softleaf_t entry)
{
VM_WARN_ON_ONCE(!softleaf_is_migration(entry));
if (migration_entry_supports_ad())
return swp_offset(entry) & SWP_MIG_YOUNG;
/* Keep the old behavior of aging page after migration */
return false;
}
/**
* softleaf_is_migration_dirty() - Does this migration entry contain a dirty bit?
* @entry: Leaf entry.
*
* If the architecture can support storing A/D bits in migration entries, this
* determines whether the dirty bit was set on the migrated page table entry.
*
* Returns: true if the entry contains a dirty bit, otherwise false.
*/
static inline bool softleaf_is_migration_dirty(softleaf_t entry)
{
VM_WARN_ON_ONCE(!softleaf_is_migration(entry));
if (migration_entry_supports_ad())
return swp_offset(entry) & SWP_MIG_DIRTY;
/* Keep the old behavior of clean page after migration */
return false;
}
#else /* CONFIG_MIGRATION */
static inline bool softleaf_is_migration_young(softleaf_t entry)
{
return false;
}
static inline bool softleaf_is_migration_dirty(softleaf_t entry)
{
return false;
}
#endif /* CONFIG_MIGRATION */
/**
* pte_is_marker() - Does the PTE entry encode a marker leaf entry?
* @pte: PTE entry.
@ -383,5 +539,63 @@ static inline bool pte_is_uffd_marker(pte_t pte)
return false;
}
#if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_ARCH_ENABLE_THP_MIGRATION)
/**
* pmd_is_device_private_entry() - Check if PMD contains a device private swap
* entry.
* @pmd: The PMD to check.
*
* Returns true if the PMD contains a swap entry that represents a device private
* page mapping. This is used for zone device private pages that have been
* swapped out but still need special handling during various memory management
* operations.
*
* Return: true if PMD contains device private entry, false otherwise
*/
static inline bool pmd_is_device_private_entry(pmd_t pmd)
{
return softleaf_is_device_private(softleaf_from_pmd(pmd));
}
#else /* CONFIG_ZONE_DEVICE && CONFIG_ARCH_ENABLE_THP_MIGRATION */
static inline bool pmd_is_device_private_entry(pmd_t pmd)
{
return false;
}
#endif /* CONFIG_ZONE_DEVICE && CONFIG_ARCH_ENABLE_THP_MIGRATION */
/**
* pmd_is_migration_entry() - Does this PMD entry encode a migration entry?
* @pmd: PMD entry.
*
* Returns: true if the PMD encodes a migration entry, otherwise false.
*/
static inline bool pmd_is_migration_entry(pmd_t pmd)
{
return softleaf_is_migration(softleaf_from_pmd(pmd));
}
/**
* pmd_is_valid_softleaf() - Is this PMD entry a valid leaf entry?
* @pmd: PMD entry.
*
* PMD leaf entries are valid only if they are device private or migration
* entries. This function asserts that a PMD leaf entry is valid in this
* respect.
*
* Returns: true if the PMD entry is a valid leaf entry, otherwise false.
*/
static inline bool pmd_is_valid_softleaf(pmd_t pmd)
{
const softleaf_t entry = softleaf_from_pmd(pmd);
/* Only device private, migration entries valid for PMD. */
return softleaf_is_device_private(entry) ||
softleaf_is_migration(entry);
}
#endif /* CONFIG_MMU */
#endif /* _LINUX_LEAFOPS_H */

View File

@ -65,7 +65,7 @@ bool isolate_folio_to_list(struct folio *folio, struct list_head *list);
int migrate_huge_page_move_mapping(struct address_space *mapping,
struct folio *dst, struct folio *src);
void migration_entry_wait_on_locked(swp_entry_t entry, spinlock_t *ptl)
void migration_entry_wait_on_locked(softleaf_t entry, spinlock_t *ptl)
__releases(ptl);
void folio_migrate_flags(struct folio *newfolio, struct folio *folio);
int folio_migrate_mapping(struct address_space *mapping,

View File

@ -283,14 +283,6 @@ static inline swp_entry_t make_migration_entry_young(swp_entry_t entry)
return entry;
}
static inline bool is_migration_entry_young(swp_entry_t entry)
{
if (migration_entry_supports_ad())
return swp_offset(entry) & SWP_MIG_YOUNG;
/* Keep the old behavior of aging page after migration */
return false;
}
static inline swp_entry_t make_migration_entry_dirty(swp_entry_t entry)
{
if (migration_entry_supports_ad())
@ -299,14 +291,6 @@ static inline swp_entry_t make_migration_entry_dirty(swp_entry_t entry)
return entry;
}
static inline bool is_migration_entry_dirty(swp_entry_t entry)
{
if (migration_entry_supports_ad())
return swp_offset(entry) & SWP_MIG_DIRTY;
/* Keep the old behavior of clean page after migration */
return false;
}
extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
unsigned long address);
extern void migration_entry_wait_huge(struct vm_area_struct *vma, unsigned long addr, pte_t *pte);
@ -349,20 +333,11 @@ static inline swp_entry_t make_migration_entry_young(swp_entry_t entry)
return entry;
}
static inline bool is_migration_entry_young(swp_entry_t entry)
{
return false;
}
static inline swp_entry_t make_migration_entry_dirty(swp_entry_t entry)
{
return entry;
}
static inline bool is_migration_entry_dirty(swp_entry_t entry)
{
return false;
}
#endif /* CONFIG_MIGRATION */
#ifdef CONFIG_MEMORY_FAILURE
@ -487,18 +462,6 @@ extern void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
extern void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd);
static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
{
swp_entry_t arch_entry;
if (pmd_swp_soft_dirty(pmd))
pmd = pmd_swp_clear_soft_dirty(pmd);
if (pmd_swp_uffd_wp(pmd))
pmd = pmd_swp_clear_uffd_wp(pmd);
arch_entry = __pmd_to_swp_entry(pmd);
return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
}
static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
{
swp_entry_t arch_entry;
@ -507,23 +470,7 @@ static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
return __swp_entry_to_pmd(arch_entry);
}
static inline int is_pmd_migration_entry(pmd_t pmd)
{
swp_entry_t entry;
if (pmd_present(pmd))
return 0;
entry = pmd_to_swp_entry(pmd);
return is_migration_entry(entry);
}
#else /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
static inline int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
struct page *page)
{
BUILD_BUG();
}
static inline void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
struct page *new)
{
@ -532,64 +479,17 @@ static inline void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
static inline void pmd_migration_entry_wait(struct mm_struct *m, pmd_t *p) { }
static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
{
return swp_entry(0, 0);
}
static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
{
return __pmd(0);
}
static inline int is_pmd_migration_entry(pmd_t pmd)
{
return 0;
}
#endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
#if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_ARCH_ENABLE_THP_MIGRATION)
/**
* is_pmd_device_private_entry() - Check if PMD contains a device private swap entry
* @pmd: The PMD to check
*
* Returns true if the PMD contains a swap entry that represents a device private
* page mapping. This is used for zone device private pages that have been
* swapped out but still need special handling during various memory management
* operations.
*
* Return: 1 if PMD contains device private entry, 0 otherwise
*/
static inline int is_pmd_device_private_entry(pmd_t pmd)
{
swp_entry_t entry;
if (pmd_present(pmd))
return 0;
entry = pmd_to_swp_entry(pmd);
return is_device_private_entry(entry);
}
#else /* CONFIG_ZONE_DEVICE && CONFIG_ARCH_ENABLE_THP_MIGRATION */
static inline int is_pmd_device_private_entry(pmd_t pmd)
{
return 0;
}
#endif /* CONFIG_ZONE_DEVICE && CONFIG_ARCH_ENABLE_THP_MIGRATION */
static inline int non_swap_entry(swp_entry_t entry)
{
return swp_type(entry) >= MAX_SWAPFILES;
}
static inline int is_pmd_non_present_folio_entry(pmd_t pmd)
{
return is_pmd_migration_entry(pmd) || is_pmd_device_private_entry(pmd);
}
#endif /* CONFIG_MMU */
#endif /* _LINUX_SWAPOPS_H */

View File

@ -11,7 +11,7 @@
#include <linux/pagemap.h>
#include <linux/rmap.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/leafops.h>
#include "../internal.h"
#include "ops-common.h"
@ -51,7 +51,7 @@ void damon_ptep_mkold(pte_t *pte, struct vm_area_struct *vma, unsigned long addr
if (likely(pte_present(pteval)))
pfn = pte_pfn(pteval);
else
pfn = swp_offset_pfn(pte_to_swp_entry(pteval));
pfn = softleaf_to_pfn(softleaf_from_pte(pteval));
folio = damon_get_folio(pfn);
if (!folio)
@ -83,7 +83,7 @@ void damon_pmdp_mkold(pmd_t *pmd, struct vm_area_struct *vma, unsigned long addr
if (likely(pmd_present(pmdval)))
pfn = pmd_pfn(pmdval);
else
pfn = swp_offset_pfn(pmd_to_swp_entry(pmdval));
pfn = softleaf_to_pfn(softleaf_from_pmd(pmdval));
folio = damon_get_folio(pfn);
if (!folio)

View File

@ -21,7 +21,7 @@
#include <linux/gfp.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/leafops.h>
#include <linux/syscalls.h>
#include <linux/mman.h>
#include <linux/pagemap.h>
@ -1402,7 +1402,7 @@ static inline int folio_wait_bit_common(struct folio *folio, int bit_nr,
* This follows the same logic as folio_wait_bit_common() so see the comments
* there.
*/
void migration_entry_wait_on_locked(swp_entry_t entry, spinlock_t *ptl)
void migration_entry_wait_on_locked(softleaf_t entry, spinlock_t *ptl)
__releases(ptl)
{
struct wait_page_queue wait_page;
@ -1411,7 +1411,7 @@ void migration_entry_wait_on_locked(swp_entry_t entry, spinlock_t *ptl)
unsigned long pflags;
bool in_thrashing;
wait_queue_head_t *q;
struct folio *folio = pfn_swap_entry_folio(entry);
struct folio *folio = softleaf_to_folio(entry);
q = folio_waitqueue(folio);
if (!folio_test_uptodate(folio) && folio_test_workingset(folio)) {

View File

@ -18,7 +18,7 @@
#include <linux/sched.h>
#include <linux/mmzone.h>
#include <linux/pagemap.h>
#include <linux/swapops.h>
#include <linux/leafops.h>
#include <linux/hugetlb.h>
#include <linux/memremap.h>
#include <linux/sched/mm.h>
@ -339,19 +339,19 @@ static int hmm_vma_handle_absent_pmd(struct mm_walk *walk, unsigned long start,
struct hmm_vma_walk *hmm_vma_walk = walk->private;
struct hmm_range *range = hmm_vma_walk->range;
unsigned long npages = (end - start) >> PAGE_SHIFT;
const softleaf_t entry = softleaf_from_pmd(pmd);
unsigned long addr = start;
swp_entry_t entry = pmd_to_swp_entry(pmd);
unsigned int required_fault;
if (is_device_private_entry(entry) &&
pfn_swap_entry_folio(entry)->pgmap->owner ==
if (softleaf_is_device_private(entry) &&
softleaf_to_folio(entry)->pgmap->owner ==
range->dev_private_owner) {
unsigned long cpu_flags = HMM_PFN_VALID |
hmm_pfn_flags_order(PMD_SHIFT - PAGE_SHIFT);
unsigned long pfn = swp_offset_pfn(entry);
unsigned long pfn = softleaf_to_pfn(entry);
unsigned long i;
if (is_writable_device_private_entry(entry))
if (softleaf_is_device_private_write(entry))
cpu_flags |= HMM_PFN_WRITE;
/*
@ -370,7 +370,7 @@ static int hmm_vma_handle_absent_pmd(struct mm_walk *walk, unsigned long start,
required_fault = hmm_range_need_fault(hmm_vma_walk, hmm_pfns,
npages, 0);
if (required_fault) {
if (is_device_private_entry(entry))
if (softleaf_is_device_private(entry))
return hmm_vma_fault(addr, end, required_fault, walk);
else
return -EFAULT;
@ -412,7 +412,7 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
if (pmd_none(pmd))
return hmm_vma_walk_hole(start, end, -1, walk);
if (thp_migration_supported() && is_pmd_migration_entry(pmd)) {
if (thp_migration_supported() && pmd_is_migration_entry(pmd)) {
if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0)) {
hmm_vma_walk->last = addr;
pmd_migration_entry_wait(walk->mm, pmdp);

View File

@ -1299,7 +1299,7 @@ vm_fault_t do_huge_pmd_device_private(struct vm_fault *vmf)
struct vm_area_struct *vma = vmf->vma;
vm_fault_t ret = 0;
spinlock_t *ptl;
swp_entry_t swp_entry;
softleaf_t entry;
struct page *page;
struct folio *folio;
@ -1314,8 +1314,8 @@ vm_fault_t do_huge_pmd_device_private(struct vm_fault *vmf)
return 0;
}
swp_entry = pmd_to_swp_entry(vmf->orig_pmd);
page = pfn_swap_entry_to_page(swp_entry);
entry = softleaf_from_pmd(vmf->orig_pmd);
page = softleaf_to_page(entry);
folio = page_folio(page);
vmf->page = page;
vmf->pte = NULL;
@ -1705,13 +1705,13 @@ static void copy_huge_non_present_pmd(
struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
pmd_t pmd, pgtable_t pgtable)
{
swp_entry_t entry = pmd_to_swp_entry(pmd);
softleaf_t entry = softleaf_from_pmd(pmd);
struct folio *src_folio;
VM_WARN_ON(!is_pmd_non_present_folio_entry(pmd));
VM_WARN_ON_ONCE(!pmd_is_valid_softleaf(pmd));
if (is_writable_migration_entry(entry) ||
is_readable_exclusive_migration_entry(entry)) {
if (softleaf_is_migration_write(entry) ||
softleaf_is_migration_read_exclusive(entry)) {
entry = make_readable_migration_entry(swp_offset(entry));
pmd = swp_entry_to_pmd(entry);
if (pmd_swp_soft_dirty(*src_pmd))
@ -1719,12 +1719,12 @@ static void copy_huge_non_present_pmd(
if (pmd_swp_uffd_wp(*src_pmd))
pmd = pmd_swp_mkuffd_wp(pmd);
set_pmd_at(src_mm, addr, src_pmd, pmd);
} else if (is_device_private_entry(entry)) {
} else if (softleaf_is_device_private(entry)) {
/*
* For device private entries, since there are no
* read exclusive entries, writable = !readable
*/
if (is_writable_device_private_entry(entry)) {
if (softleaf_is_device_private_write(entry)) {
entry = make_readable_device_private_entry(swp_offset(entry));
pmd = swp_entry_to_pmd(entry);
@ -1735,7 +1735,7 @@ static void copy_huge_non_present_pmd(
set_pmd_at(src_mm, addr, src_pmd, pmd);
}
src_folio = pfn_swap_entry_folio(entry);
src_folio = softleaf_to_folio(entry);
VM_WARN_ON(!folio_test_large(src_folio));
folio_get(src_folio);
@ -2195,7 +2195,7 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
if (unlikely(!pmd_present(orig_pmd))) {
VM_BUG_ON(thp_migration_supported() &&
!is_pmd_migration_entry(orig_pmd));
!pmd_is_migration_entry(orig_pmd));
goto out;
}
@ -2293,11 +2293,10 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
folio_remove_rmap_pmd(folio, page, vma);
WARN_ON_ONCE(folio_mapcount(folio) < 0);
VM_BUG_ON_PAGE(!PageHead(page), page);
} else if (is_pmd_non_present_folio_entry(orig_pmd)) {
swp_entry_t entry;
} else if (pmd_is_valid_softleaf(orig_pmd)) {
const softleaf_t entry = softleaf_from_pmd(orig_pmd);
entry = pmd_to_swp_entry(orig_pmd);
folio = pfn_swap_entry_folio(entry);
folio = softleaf_to_folio(entry);
flush_needed = 0;
if (!thp_migration_supported())
@ -2353,7 +2352,7 @@ static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl,
static pmd_t move_soft_dirty_pmd(pmd_t pmd)
{
#ifdef CONFIG_MEM_SOFT_DIRTY
if (unlikely(is_pmd_migration_entry(pmd)))
if (unlikely(pmd_is_migration_entry(pmd)))
pmd = pmd_swp_mksoft_dirty(pmd);
else if (pmd_present(pmd))
pmd = pmd_mksoft_dirty(pmd);
@ -2428,12 +2427,12 @@ static void change_non_present_huge_pmd(struct mm_struct *mm,
unsigned long addr, pmd_t *pmd, bool uffd_wp,
bool uffd_wp_resolve)
{
swp_entry_t entry = pmd_to_swp_entry(*pmd);
struct folio *folio = pfn_swap_entry_folio(entry);
softleaf_t entry = softleaf_from_pmd(*pmd);
const struct folio *folio = softleaf_to_folio(entry);
pmd_t newpmd;
VM_WARN_ON(!is_pmd_non_present_folio_entry(*pmd));
if (is_writable_migration_entry(entry)) {
VM_WARN_ON(!pmd_is_valid_softleaf(*pmd));
if (softleaf_is_migration_write(entry)) {
/*
* A protection check is difficult so
* just be safe and disable write
@ -2445,7 +2444,7 @@ static void change_non_present_huge_pmd(struct mm_struct *mm,
newpmd = swp_entry_to_pmd(entry);
if (pmd_swp_soft_dirty(*pmd))
newpmd = pmd_swp_mksoft_dirty(newpmd);
} else if (is_writable_device_private_entry(entry)) {
} else if (softleaf_is_device_private_write(entry)) {
entry = make_readable_device_private_entry(swp_offset(entry));
newpmd = swp_entry_to_pmd(entry);
} else {
@ -2643,7 +2642,7 @@ int move_pages_huge_pmd(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, pm
if (!pmd_trans_huge(src_pmdval)) {
spin_unlock(src_ptl);
if (is_pmd_migration_entry(src_pmdval)) {
if (pmd_is_migration_entry(src_pmdval)) {
pmd_migration_entry_wait(mm, &src_pmdval);
return -EAGAIN;
}
@ -2908,13 +2907,12 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr;
pte_t *pte;
int i;
swp_entry_t entry;
VM_BUG_ON(haddr & ~HPAGE_PMD_MASK);
VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma);
VM_WARN_ON(!is_pmd_non_present_folio_entry(*pmd) && !pmd_trans_huge(*pmd));
VM_WARN_ON_ONCE(!pmd_is_valid_softleaf(*pmd) && !pmd_trans_huge(*pmd));
count_vm_event(THP_SPLIT_PMD);
@ -2928,11 +2926,10 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
zap_deposited_table(mm, pmd);
if (!vma_is_dax(vma) && vma_is_special_huge(vma))
return;
if (unlikely(is_pmd_migration_entry(old_pmd))) {
swp_entry_t entry;
if (unlikely(pmd_is_migration_entry(old_pmd))) {
const softleaf_t old_entry = softleaf_from_pmd(old_pmd);
entry = pmd_to_swp_entry(old_pmd);
folio = pfn_swap_entry_folio(entry);
folio = softleaf_to_folio(old_entry);
} else if (is_huge_zero_pmd(old_pmd)) {
return;
} else {
@ -2962,31 +2959,34 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
return __split_huge_zero_page_pmd(vma, haddr, pmd);
}
if (pmd_is_migration_entry(*pmd)) {
softleaf_t entry;
if (is_pmd_migration_entry(*pmd)) {
old_pmd = *pmd;
entry = pmd_to_swp_entry(old_pmd);
page = pfn_swap_entry_to_page(entry);
entry = softleaf_from_pmd(old_pmd);
page = softleaf_to_page(entry);
folio = page_folio(page);
soft_dirty = pmd_swp_soft_dirty(old_pmd);
uffd_wp = pmd_swp_uffd_wp(old_pmd);
write = is_writable_migration_entry(entry);
write = softleaf_is_migration_write(entry);
if (PageAnon(page))
anon_exclusive = is_readable_exclusive_migration_entry(entry);
young = is_migration_entry_young(entry);
dirty = is_migration_entry_dirty(entry);
} else if (is_pmd_device_private_entry(*pmd)) {
anon_exclusive = softleaf_is_migration_read_exclusive(entry);
young = softleaf_is_migration_young(entry);
dirty = softleaf_is_migration_dirty(entry);
} else if (pmd_is_device_private_entry(*pmd)) {
softleaf_t entry;
old_pmd = *pmd;
entry = pmd_to_swp_entry(old_pmd);
page = pfn_swap_entry_to_page(entry);
entry = softleaf_from_pmd(old_pmd);
page = softleaf_to_page(entry);
folio = page_folio(page);
soft_dirty = pmd_swp_soft_dirty(old_pmd);
uffd_wp = pmd_swp_uffd_wp(old_pmd);
write = is_writable_device_private_entry(entry);
write = softleaf_is_device_private_write(entry);
anon_exclusive = PageAnonExclusive(page);
/*
@ -3090,7 +3090,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
* Note that NUMA hinting access restrictions are not transferred to
* avoid any possibility of altering permissions across VMAs.
*/
if (freeze || is_pmd_migration_entry(old_pmd)) {
if (freeze || pmd_is_migration_entry(old_pmd)) {
pte_t entry;
swp_entry_t swp_entry;
@ -3116,7 +3116,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
VM_WARN_ON(!pte_none(ptep_get(pte + i)));
set_pte_at(mm, addr, pte + i, entry);
}
} else if (is_pmd_device_private_entry(old_pmd)) {
} else if (pmd_is_device_private_entry(old_pmd)) {
pte_t entry;
swp_entry_t swp_entry;
@ -3166,7 +3166,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
}
pte_unmap(pte);
if (!is_pmd_migration_entry(*pmd))
if (!pmd_is_migration_entry(*pmd))
folio_remove_rmap_pmd(folio, page, vma);
if (freeze)
put_page(page);
@ -3179,7 +3179,7 @@ void split_huge_pmd_locked(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmd, bool freeze)
{
VM_WARN_ON_ONCE(!IS_ALIGNED(address, HPAGE_PMD_SIZE));
if (pmd_trans_huge(*pmd) || is_pmd_non_present_folio_entry(*pmd))
if (pmd_trans_huge(*pmd) || pmd_is_valid_softleaf(*pmd))
__split_huge_pmd_locked(vma, pmd, address, freeze);
}
@ -4749,25 +4749,25 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
unsigned long address = pvmw->address;
unsigned long haddr = address & HPAGE_PMD_MASK;
pmd_t pmde;
swp_entry_t entry;
softleaf_t entry;
if (!(pvmw->pmd && !pvmw->pte))
return;
entry = pmd_to_swp_entry(*pvmw->pmd);
entry = softleaf_from_pmd(*pvmw->pmd);
folio_get(folio);
pmde = folio_mk_pmd(folio, READ_ONCE(vma->vm_page_prot));
if (pmd_swp_soft_dirty(*pvmw->pmd))
pmde = pmd_mksoft_dirty(pmde);
if (is_writable_migration_entry(entry))
if (softleaf_is_migration_write(entry))
pmde = pmd_mkwrite(pmde, vma);
if (pmd_swp_uffd_wp(*pvmw->pmd))
pmde = pmd_mkuffd_wp(pmde);
if (!is_migration_entry_young(entry))
if (!softleaf_is_migration_young(entry))
pmde = pmd_mkold(pmde);
/* NOTE: this may contain setting soft-dirty on some archs */
if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
if (folio_test_dirty(folio) && softleaf_is_migration_dirty(entry))
pmde = pmd_mkdirty(pmde);
if (folio_is_device_private(folio)) {
@ -4790,7 +4790,7 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
if (folio_test_anon(folio)) {
rmap_t rmap_flags = RMAP_NONE;
if (!is_readable_migration_entry(entry))
if (!softleaf_is_migration_read(entry))
rmap_flags |= RMAP_EXCLUSIVE;
folio_add_anon_rmap_pmd(folio, new, vma, haddr, rmap_flags);

View File

@ -17,7 +17,7 @@
#include <linux/page_idle.h>
#include <linux/page_table_check.h>
#include <linux/rcupdate_wait.h>
#include <linux/swapops.h>
#include <linux/leafops.h>
#include <linux/shmem_fs.h>
#include <linux/dax.h>
#include <linux/ksm.h>
@ -941,7 +941,7 @@ static inline int check_pmd_state(pmd_t *pmd)
* collapse it. Migration success or failure will eventually end
* up with a present PMD mapping a folio again.
*/
if (is_pmd_migration_entry(pmde))
if (pmd_is_migration_entry(pmde))
return SCAN_PMD_MAPPED;
if (!pmd_present(pmde))
return SCAN_PMD_NULL;

View File

@ -390,7 +390,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
if (unlikely(!pmd_present(orig_pmd))) {
VM_BUG_ON(thp_migration_supported() &&
!is_pmd_migration_entry(orig_pmd));
!pmd_is_migration_entry(orig_pmd));
goto huge_unlock;
}

View File

@ -6352,10 +6352,10 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
goto fallback;
if (unlikely(!pmd_present(vmf.orig_pmd))) {
if (is_pmd_device_private_entry(vmf.orig_pmd))
if (pmd_is_device_private_entry(vmf.orig_pmd))
return do_huge_pmd_device_private(&vmf);
if (is_pmd_migration_entry(vmf.orig_pmd))
if (pmd_is_migration_entry(vmf.orig_pmd))
pmd_migration_entry_wait(mm, vmf.pmd);
return 0;
}

View File

@ -110,7 +110,7 @@
#include <linux/mm_inline.h>
#include <linux/mmu_notifier.h>
#include <linux/printk.h>
#include <linux/swapops.h>
#include <linux/leafops.h>
#include <linux/gcd.h>
#include <asm/tlbflush.h>
@ -647,7 +647,7 @@ static void queue_folios_pmd(pmd_t *pmd, struct mm_walk *walk)
struct folio *folio;
struct queue_pages *qp = walk->private;
if (unlikely(is_pmd_migration_entry(*pmd))) {
if (unlikely(pmd_is_migration_entry(*pmd))) {
qp->nr_failed++;
return;
}

View File

@ -16,7 +16,7 @@
#include <linux/migrate.h>
#include <linux/export.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/leafops.h>
#include <linux/pagemap.h>
#include <linux/buffer_head.h>
#include <linux/mm_inline.h>
@ -353,7 +353,7 @@ static bool remove_migration_pte(struct folio *folio,
rmap_t rmap_flags = RMAP_NONE;
pte_t old_pte;
pte_t pte;
swp_entry_t entry;
softleaf_t entry;
struct page *new;
unsigned long idx = 0;
@ -379,22 +379,22 @@ static bool remove_migration_pte(struct folio *folio,
folio_get(folio);
pte = mk_pte(new, READ_ONCE(vma->vm_page_prot));
entry = pte_to_swp_entry(old_pte);
if (!is_migration_entry_young(entry))
entry = softleaf_from_pte(old_pte);
if (!softleaf_is_migration_young(entry))
pte = pte_mkold(pte);
if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
if (folio_test_dirty(folio) && softleaf_is_migration_dirty(entry))
pte = pte_mkdirty(pte);
if (pte_swp_soft_dirty(old_pte))
pte = pte_mksoft_dirty(pte);
else
pte = pte_clear_soft_dirty(pte);
if (is_writable_migration_entry(entry))
if (softleaf_is_migration_write(entry))
pte = pte_mkwrite(pte, vma);
else if (pte_swp_uffd_wp(old_pte))
pte = pte_mkuffd_wp(pte);
if (folio_test_anon(folio) && !is_readable_migration_entry(entry))
if (folio_test_anon(folio) && !softleaf_is_migration_read(entry))
rmap_flags |= RMAP_EXCLUSIVE;
if (unlikely(is_device_private_page(new))) {
@ -404,7 +404,7 @@ static bool remove_migration_pte(struct folio *folio,
else
entry = make_readable_device_private_entry(
page_to_pfn(new));
pte = swp_entry_to_pte(entry);
pte = softleaf_to_pte(entry);
if (pte_swp_soft_dirty(old_pte))
pte = pte_swp_mksoft_dirty(pte);
if (pte_swp_uffd_wp(old_pte))
@ -543,9 +543,9 @@ void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
spinlock_t *ptl;
ptl = pmd_lock(mm, pmd);
if (!is_pmd_migration_entry(*pmd))
if (!pmd_is_migration_entry(*pmd))
goto unlock;
migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), ptl);
migration_entry_wait_on_locked(softleaf_from_pmd(*pmd), ptl);
return;
unlock:
spin_unlock(ptl);

View File

@ -13,7 +13,7 @@
#include <linux/oom.h>
#include <linux/pagewalk.h>
#include <linux/rmap.h>
#include <linux/swapops.h>
#include <linux/leafops.h>
#include <linux/pgalloc.h>
#include <asm/tlbflush.h>
#include "internal.h"
@ -141,7 +141,6 @@ static int migrate_vma_collect_huge_pmd(pmd_t *pmdp, unsigned long start,
struct folio *folio;
struct migrate_vma *migrate = walk->private;
spinlock_t *ptl;
swp_entry_t entry;
int ret;
unsigned long write = 0;
@ -165,23 +164,24 @@ static int migrate_vma_collect_huge_pmd(pmd_t *pmdp, unsigned long start,
if (pmd_write(*pmdp))
write = MIGRATE_PFN_WRITE;
} else if (!pmd_present(*pmdp)) {
entry = pmd_to_swp_entry(*pmdp);
folio = pfn_swap_entry_folio(entry);
const softleaf_t entry = softleaf_from_pmd(*pmdp);
if (!is_device_private_entry(entry) ||
folio = softleaf_to_folio(entry);
if (!softleaf_is_device_private(entry) ||
!(migrate->flags & MIGRATE_VMA_SELECT_DEVICE_PRIVATE) ||
(folio->pgmap->owner != migrate->pgmap_owner)) {
spin_unlock(ptl);
return migrate_vma_collect_skip(start, end, walk);
}
if (is_migration_entry(entry)) {
if (softleaf_is_migration(entry)) {
migration_entry_wait_on_locked(entry, ptl);
spin_unlock(ptl);
return -EAGAIN;
}
if (is_writable_device_private_entry(entry))
if (softleaf_is_device_private_write(entry))
write = MIGRATE_PFN_WRITE;
} else {
spin_unlock(ptl);

View File

@ -8,7 +8,7 @@
#include <linux/mm.h>
#include <linux/page_table_check.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/leafops.h>
#undef pr_fmt
#define pr_fmt(fmt) "page_table_check: " fmt
@ -179,10 +179,10 @@ void __page_table_check_pud_clear(struct mm_struct *mm, pud_t pud)
EXPORT_SYMBOL(__page_table_check_pud_clear);
/* Whether the swap entry cached writable information */
static inline bool swap_cached_writable(swp_entry_t entry)
static inline bool softleaf_cached_writable(softleaf_t entry)
{
return is_writable_device_private_entry(entry) ||
is_writable_migration_entry(entry);
return softleaf_is_device_private_write(entry) ||
softleaf_is_migration_write(entry);
}
static void page_table_check_pte_flags(pte_t pte)
@ -190,9 +190,9 @@ static void page_table_check_pte_flags(pte_t pte)
if (pte_present(pte)) {
WARN_ON_ONCE(pte_uffd_wp(pte) && pte_write(pte));
} else if (pte_swp_uffd_wp(pte)) {
const swp_entry_t entry = pte_to_swp_entry(pte);
const softleaf_t entry = softleaf_from_pte(pte);
WARN_ON_ONCE(swap_cached_writable(entry));
WARN_ON_ONCE(softleaf_cached_writable(entry));
}
}
@ -219,9 +219,9 @@ static inline void page_table_check_pmd_flags(pmd_t pmd)
if (pmd_uffd_wp(pmd))
WARN_ON_ONCE(pmd_write(pmd));
} else if (pmd_swp_uffd_wp(pmd)) {
swp_entry_t entry = pmd_to_swp_entry(pmd);
const softleaf_t entry = softleaf_from_pmd(pmd);
WARN_ON_ONCE(swap_cached_writable(entry));
WARN_ON_ONCE(softleaf_cached_writable(entry));
}
}

View File

@ -242,18 +242,19 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
*/
pmde = pmdp_get_lockless(pvmw->pmd);
if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
if (pmd_trans_huge(pmde) || pmd_is_migration_entry(pmde)) {
pvmw->ptl = pmd_lock(mm, pvmw->pmd);
pmde = *pvmw->pmd;
if (!pmd_present(pmde)) {
swp_entry_t entry;
softleaf_t entry;
if (!thp_migration_supported() ||
!(pvmw->flags & PVMW_MIGRATION))
return not_found(pvmw);
entry = pmd_to_swp_entry(pmde);
if (!is_migration_entry(entry) ||
!check_pmd(swp_offset_pfn(entry), pvmw))
entry = softleaf_from_pmd(pmde);
if (!softleaf_is_migration(entry) ||
!check_pmd(softleaf_to_pfn(entry), pvmw))
return not_found(pvmw);
return true;
}
@ -273,9 +274,9 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
* cannot return prematurely, while zap_huge_pmd() has
* cleared *pmd but not decremented compound_mapcount().
*/
swp_entry_t entry = pmd_to_swp_entry(pmde);
const softleaf_t entry = softleaf_from_pmd(pmde);
if (is_device_private_entry(entry)) {
if (softleaf_is_device_private(entry)) {
pvmw->ptl = pmd_lock(mm, pvmw->pmd);
return true;
}

View File

@ -5,7 +5,7 @@
#include <linux/hugetlb.h>
#include <linux/mmu_context.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/leafops.h>
#include <asm/tlbflush.h>
@ -973,10 +973,10 @@ struct folio *folio_walk_start(struct folio_walk *fw,
goto found;
}
} else if ((flags & FW_MIGRATION) &&
is_pmd_migration_entry(pmd)) {
swp_entry_t entry = pmd_to_swp_entry(pmd);
pmd_is_migration_entry(pmd)) {
const softleaf_t entry = softleaf_from_pmd(pmd);
page = pfn_swap_entry_to_page(entry);
page = softleaf_to_page(entry);
expose_page = false;
goto found;
}

View File

@ -57,7 +57,7 @@
#include <linux/sched/task.h>
#include <linux/pagemap.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/leafops.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/ksm.h>
@ -2341,7 +2341,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
if (likely(pmd_present(pmdval)))
pfn = pmd_pfn(pmdval);
else
pfn = swp_offset_pfn(pmd_to_swp_entry(pmdval));
pfn = softleaf_to_pfn(softleaf_from_pmd(pmdval));
subpage = folio_page(folio, pfn - folio_pfn(folio));