mirror of https://github.com/torvalds/linux.git
mm: replace remaining pte_to_swp_entry() with softleaf_from_pte()
There are straggler invocations of pte_to_swp_entry() lying around, replace all of these with the software leaf entry equivalent - softleaf_from_pte(). With those removed, eliminate pte_to_swp_entry() altogether. No functional change intended. Link: https://lkml.kernel.org/r/d8ee5ccefe4c42d7c4fe1a2e46f285ac40421cd3.1762812360.git.lorenzo.stoakes@oracle.com Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Alistair Popple <apopple@nvidia.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Axel Rasmussen <axelrasmussen@google.com> Cc: Baolin Wang <baolin.wang@linux.alibaba.com> Cc: Baoquan He <bhe@redhat.com> Cc: Barry Song <baohua@kernel.org> Cc: Byungchul Park <byungchul@sk.com> Cc: Chengming Zhou <chengming.zhou@linux.dev> Cc: Chris Li <chrisl@kernel.org> Cc: Christian Borntraeger <borntraeger@linux.ibm.com> Cc: Christian Brauner <brauner@kernel.org> Cc: Claudio Imbrenda <imbrenda@linux.ibm.com> Cc: David Hildenbrand <david@redhat.com> Cc: Dev Jain <dev.jain@arm.com> Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com> Cc: Gregory Price <gourry@gourry.net> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: "Huang, Ying" <ying.huang@linux.alibaba.com> Cc: Hugh Dickins <hughd@google.com> Cc: Jan Kara <jack@suse.cz> Cc: Jann Horn <jannh@google.com> Cc: Janosch Frank <frankja@linux.ibm.com> Cc: Jason Gunthorpe <jgg@ziepe.ca> Cc: Joshua Hahn <joshua.hahnjy@gmail.com> Cc: Kairui Song <kasong@tencent.com> Cc: Kemeng Shi <shikemeng@huaweicloud.com> Cc: Lance Yang <lance.yang@linux.dev> Cc: Leon Romanovsky <leon@kernel.org> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Mathew Brost <matthew.brost@intel.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Miaohe Lin <linmiaohe@huawei.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Mike Rapoport <rppt@kernel.org> Cc: Muchun Song <muchun.song@linux.dev> Cc: Naoya Horiguchi <nao.horiguchi@gmail.com> Cc: Nhat Pham <nphamcs@gmail.com> Cc: Nico Pache <npache@redhat.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: Pasha Tatashin <pasha.tatashin@soleen.com> Cc: Peter Xu <peterx@redhat.com> Cc: Rakie Kim <rakie.kim@sk.com> Cc: Rik van Riel <riel@surriel.com> Cc: Ryan Roberts <ryan.roberts@arm.com> Cc: SeongJae Park <sj@kernel.org> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Wei Xu <weixugc@google.com> Cc: xu xin <xu.xin16@zte.com.cn> Cc: Yuanchu Xie <yuanchu@google.com> Cc: Zi Yan <ziy@nvidia.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
93976a2034
commit
a3a3e215c9
|
|
@ -54,11 +54,16 @@ static inline softleaf_t softleaf_mk_none(void)
|
||||||
*/
|
*/
|
||||||
static inline softleaf_t softleaf_from_pte(pte_t pte)
|
static inline softleaf_t softleaf_from_pte(pte_t pte)
|
||||||
{
|
{
|
||||||
|
softleaf_t arch_entry;
|
||||||
|
|
||||||
if (pte_present(pte) || pte_none(pte))
|
if (pte_present(pte) || pte_none(pte))
|
||||||
return softleaf_mk_none();
|
return softleaf_mk_none();
|
||||||
|
|
||||||
|
pte = pte_swp_clear_flags(pte);
|
||||||
|
arch_entry = __pte_to_swp_entry(pte);
|
||||||
|
|
||||||
/* Temporary until swp_entry_t eliminated. */
|
/* Temporary until swp_entry_t eliminated. */
|
||||||
return pte_to_swp_entry(pte);
|
return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
||||||
|
|
@ -107,19 +107,6 @@ static inline pgoff_t swp_offset(swp_entry_t entry)
|
||||||
return entry.val & SWP_OFFSET_MASK;
|
return entry.val & SWP_OFFSET_MASK;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Convert the arch-dependent pte representation of a swp_entry_t into an
|
|
||||||
* arch-independent swp_entry_t.
|
|
||||||
*/
|
|
||||||
static inline swp_entry_t pte_to_swp_entry(pte_t pte)
|
|
||||||
{
|
|
||||||
swp_entry_t arch_entry;
|
|
||||||
|
|
||||||
pte = pte_swp_clear_flags(pte);
|
|
||||||
arch_entry = __pte_to_swp_entry(pte);
|
|
||||||
return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Convert the arch-independent representation of a swp_entry_t into the
|
* Convert the arch-independent representation of a swp_entry_t into the
|
||||||
* arch-dependent pte representation.
|
* arch-dependent pte representation.
|
||||||
|
|
|
||||||
|
|
@ -1229,7 +1229,7 @@ static int __init init_args(struct pgtable_debug_args *args)
|
||||||
init_fixed_pfns(args);
|
init_fixed_pfns(args);
|
||||||
|
|
||||||
/* See generic_max_swapfile_size(): probe the maximum offset */
|
/* See generic_max_swapfile_size(): probe the maximum offset */
|
||||||
max_swap_offset = swp_offset(pte_to_swp_entry(swp_entry_to_pte(swp_entry(0, ~0UL))));
|
max_swap_offset = swp_offset(softleaf_from_pte(softleaf_to_pte(swp_entry(0, ~0UL))));
|
||||||
/* Create a swp entry with all possible bits set while still being swap. */
|
/* Create a swp entry with all possible bits set while still being swap. */
|
||||||
args->swp_entry = swp_entry(MAX_SWAPFILES - 1, max_swap_offset);
|
args->swp_entry = swp_entry(MAX_SWAPFILES - 1, max_swap_offset);
|
||||||
/* Create a non-present migration entry. */
|
/* Create a non-present migration entry. */
|
||||||
|
|
|
||||||
|
|
@ -334,7 +334,7 @@ unsigned int folio_pte_batch(struct folio *folio, pte_t *ptep, pte_t pte,
|
||||||
*/
|
*/
|
||||||
static inline pte_t pte_move_swp_offset(pte_t pte, long delta)
|
static inline pte_t pte_move_swp_offset(pte_t pte, long delta)
|
||||||
{
|
{
|
||||||
swp_entry_t entry = pte_to_swp_entry(pte);
|
const softleaf_t entry = softleaf_from_pte(pte);
|
||||||
pte_t new = __swp_entry_to_pte(__swp_entry(swp_type(entry),
|
pte_t new = __swp_entry_to_pte(__swp_entry(swp_type(entry),
|
||||||
(swp_offset(entry) + delta)));
|
(swp_offset(entry) + delta)));
|
||||||
|
|
||||||
|
|
@ -389,11 +389,14 @@ static inline int swap_pte_batch(pte_t *start_ptep, int max_nr, pte_t pte)
|
||||||
|
|
||||||
cgroup_id = lookup_swap_cgroup_id(entry);
|
cgroup_id = lookup_swap_cgroup_id(entry);
|
||||||
while (ptep < end_ptep) {
|
while (ptep < end_ptep) {
|
||||||
|
softleaf_t entry;
|
||||||
|
|
||||||
pte = ptep_get(ptep);
|
pte = ptep_get(ptep);
|
||||||
|
|
||||||
if (!pte_same(pte, expected_pte))
|
if (!pte_same(pte, expected_pte))
|
||||||
break;
|
break;
|
||||||
if (lookup_swap_cgroup_id(pte_to_swp_entry(pte)) != cgroup_id)
|
entry = softleaf_from_pte(pte);
|
||||||
|
if (lookup_swap_cgroup_id(entry) != cgroup_id)
|
||||||
break;
|
break;
|
||||||
expected_pte = pte_next_swp_offset(expected_pte);
|
expected_pte = pte_next_swp_offset(expected_pte);
|
||||||
ptep++;
|
ptep++;
|
||||||
|
|
|
||||||
|
|
@ -51,7 +51,7 @@
|
||||||
#include <linux/backing-dev.h>
|
#include <linux/backing-dev.h>
|
||||||
#include <linux/migrate.h>
|
#include <linux/migrate.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/swapops.h>
|
#include <linux/leafops.h>
|
||||||
#include <linux/hugetlb.h>
|
#include <linux/hugetlb.h>
|
||||||
#include <linux/memory_hotplug.h>
|
#include <linux/memory_hotplug.h>
|
||||||
#include <linux/mm_inline.h>
|
#include <linux/mm_inline.h>
|
||||||
|
|
|
||||||
16
mm/memory.c
16
mm/memory.c
|
|
@ -1218,7 +1218,7 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
|
||||||
spinlock_t *src_ptl, *dst_ptl;
|
spinlock_t *src_ptl, *dst_ptl;
|
||||||
int progress, max_nr, ret = 0;
|
int progress, max_nr, ret = 0;
|
||||||
int rss[NR_MM_COUNTERS];
|
int rss[NR_MM_COUNTERS];
|
||||||
swp_entry_t entry = (swp_entry_t){0};
|
softleaf_t entry = softleaf_mk_none();
|
||||||
struct folio *prealloc = NULL;
|
struct folio *prealloc = NULL;
|
||||||
int nr;
|
int nr;
|
||||||
|
|
||||||
|
|
@ -1282,7 +1282,7 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
|
||||||
dst_vma, src_vma,
|
dst_vma, src_vma,
|
||||||
addr, rss);
|
addr, rss);
|
||||||
if (ret == -EIO) {
|
if (ret == -EIO) {
|
||||||
entry = pte_to_swp_entry(ptep_get(src_pte));
|
entry = softleaf_from_pte(ptep_get(src_pte));
|
||||||
break;
|
break;
|
||||||
} else if (ret == -EBUSY) {
|
} else if (ret == -EBUSY) {
|
||||||
break;
|
break;
|
||||||
|
|
@ -4446,13 +4446,13 @@ static struct folio *__alloc_swap_folio(struct vm_fault *vmf)
|
||||||
{
|
{
|
||||||
struct vm_area_struct *vma = vmf->vma;
|
struct vm_area_struct *vma = vmf->vma;
|
||||||
struct folio *folio;
|
struct folio *folio;
|
||||||
swp_entry_t entry;
|
softleaf_t entry;
|
||||||
|
|
||||||
folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vmf->address);
|
folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vmf->address);
|
||||||
if (!folio)
|
if (!folio)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
entry = pte_to_swp_entry(vmf->orig_pte);
|
entry = softleaf_from_pte(vmf->orig_pte);
|
||||||
if (mem_cgroup_swapin_charge_folio(folio, vma->vm_mm,
|
if (mem_cgroup_swapin_charge_folio(folio, vma->vm_mm,
|
||||||
GFP_KERNEL, entry)) {
|
GFP_KERNEL, entry)) {
|
||||||
folio_put(folio);
|
folio_put(folio);
|
||||||
|
|
@ -4470,7 +4470,7 @@ static struct folio *__alloc_swap_folio(struct vm_fault *vmf)
|
||||||
static bool can_swapin_thp(struct vm_fault *vmf, pte_t *ptep, int nr_pages)
|
static bool can_swapin_thp(struct vm_fault *vmf, pte_t *ptep, int nr_pages)
|
||||||
{
|
{
|
||||||
unsigned long addr;
|
unsigned long addr;
|
||||||
swp_entry_t entry;
|
softleaf_t entry;
|
||||||
int idx;
|
int idx;
|
||||||
pte_t pte;
|
pte_t pte;
|
||||||
|
|
||||||
|
|
@ -4480,7 +4480,7 @@ static bool can_swapin_thp(struct vm_fault *vmf, pte_t *ptep, int nr_pages)
|
||||||
|
|
||||||
if (!pte_same(pte, pte_move_swp_offset(vmf->orig_pte, -idx)))
|
if (!pte_same(pte, pte_move_swp_offset(vmf->orig_pte, -idx)))
|
||||||
return false;
|
return false;
|
||||||
entry = pte_to_swp_entry(pte);
|
entry = softleaf_from_pte(pte);
|
||||||
if (swap_pte_batch(ptep, nr_pages, pte) != nr_pages)
|
if (swap_pte_batch(ptep, nr_pages, pte) != nr_pages)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
|
|
@ -4526,7 +4526,7 @@ static struct folio *alloc_swap_folio(struct vm_fault *vmf)
|
||||||
unsigned long orders;
|
unsigned long orders;
|
||||||
struct folio *folio;
|
struct folio *folio;
|
||||||
unsigned long addr;
|
unsigned long addr;
|
||||||
swp_entry_t entry;
|
softleaf_t entry;
|
||||||
spinlock_t *ptl;
|
spinlock_t *ptl;
|
||||||
pte_t *pte;
|
pte_t *pte;
|
||||||
gfp_t gfp;
|
gfp_t gfp;
|
||||||
|
|
@ -4547,7 +4547,7 @@ static struct folio *alloc_swap_folio(struct vm_fault *vmf)
|
||||||
if (!zswap_never_enabled())
|
if (!zswap_never_enabled())
|
||||||
goto fallback;
|
goto fallback;
|
||||||
|
|
||||||
entry = pte_to_swp_entry(vmf->orig_pte);
|
entry = softleaf_from_pte(vmf->orig_pte);
|
||||||
/*
|
/*
|
||||||
* Get a list of all the (large) orders below PMD_ORDER that are enabled
|
* Get a list of all the (large) orders below PMD_ORDER that are enabled
|
||||||
* and suitable for swapping THP.
|
* and suitable for swapping THP.
|
||||||
|
|
|
||||||
|
|
@ -534,7 +534,7 @@ void migration_entry_wait_huge(struct vm_area_struct *vma, unsigned long addr, p
|
||||||
* lock release in migration_entry_wait_on_locked().
|
* lock release in migration_entry_wait_on_locked().
|
||||||
*/
|
*/
|
||||||
hugetlb_vma_unlock_read(vma);
|
hugetlb_vma_unlock_read(vma);
|
||||||
migration_entry_wait_on_locked(pte_to_swp_entry(pte), ptl);
|
migration_entry_wait_on_locked(entry, ptl);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -202,7 +202,9 @@ static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
|
||||||
for (i = 0; i < step; i++)
|
for (i = 0; i < step; i++)
|
||||||
vec[i] = 1;
|
vec[i] = 1;
|
||||||
} else { /* pte is a swap entry */
|
} else { /* pte is a swap entry */
|
||||||
*vec = mincore_swap(pte_to_swp_entry(pte), false);
|
const softleaf_t entry = softleaf_from_pte(pte);
|
||||||
|
|
||||||
|
*vec = mincore_swap(entry, false);
|
||||||
}
|
}
|
||||||
vec += step;
|
vec += step;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1969,7 +1969,9 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
|
||||||
if (likely(pte_present(pteval))) {
|
if (likely(pte_present(pteval))) {
|
||||||
pfn = pte_pfn(pteval);
|
pfn = pte_pfn(pteval);
|
||||||
} else {
|
} else {
|
||||||
pfn = softleaf_to_pfn(pte_to_swp_entry(pteval));
|
const softleaf_t entry = softleaf_from_pte(pteval);
|
||||||
|
|
||||||
|
pfn = softleaf_to_pfn(entry);
|
||||||
VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
|
VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -2368,7 +2370,9 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
|
||||||
if (likely(pte_present(pteval))) {
|
if (likely(pte_present(pteval))) {
|
||||||
pfn = pte_pfn(pteval);
|
pfn = pte_pfn(pteval);
|
||||||
} else {
|
} else {
|
||||||
pfn = softleaf_to_pfn(pte_to_swp_entry(pteval));
|
const softleaf_t entry = softleaf_from_pte(pteval);
|
||||||
|
|
||||||
|
pfn = softleaf_to_pfn(entry);
|
||||||
VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
|
VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -3202,8 +3202,17 @@ static int claim_swapfile(struct swap_info_struct *si, struct inode *inode)
|
||||||
*/
|
*/
|
||||||
unsigned long generic_max_swapfile_size(void)
|
unsigned long generic_max_swapfile_size(void)
|
||||||
{
|
{
|
||||||
return swp_offset(pte_to_swp_entry(
|
swp_entry_t entry = swp_entry(0, ~0UL);
|
||||||
swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
|
const pte_t pte = softleaf_to_pte(entry);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Since the PTE can be an invalid softleaf entry (e.g. the none PTE),
|
||||||
|
* we need to do this manually.
|
||||||
|
*/
|
||||||
|
entry = __pte_to_swp_entry(pte);
|
||||||
|
entry = swp_entry(__swp_type(entry), __swp_offset(entry));
|
||||||
|
|
||||||
|
return swp_offset(entry) + 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Can be overridden by an architecture for additional checks. */
|
/* Can be overridden by an architecture for additional checks. */
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue