mm: remove devmap related functions and page table bits

Now that DAX and all other reference counts to ZONE_DEVICE pages are
managed normally there is no need for the special devmap PTE/PMD/PUD page
table bits.  So drop all references to these, freeing up a software
defined page table bit on architectures supporting it.

Link: https://lkml.kernel.org/r/6389398c32cc9daa3dfcaa9f79c7972525d310ce.1750323463.git-series.apopple@nvidia.com
Signed-off-by: Alistair Popple <apopple@nvidia.com>
Acked-by: Will Deacon <will@kernel.org> # arm64
Acked-by: David Hildenbrand <david@redhat.com>
Suggested-by: Chunyan Zhang <zhang.lyra@gmail.com>
Reviewed-by: Björn Töpel <bjorn@rivosinc.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Cc: Balbir Singh <balbirs@nvidia.com>
Cc: Björn Töpel <bjorn@kernel.org>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Deepak Gupta <debug@rivosinc.com>
Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
Cc: Inki Dae <m.szyprowski@samsung.com>
Cc: John Groves <john@groves.net>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Alistair Popple 2025-06-19 18:58:03 +10:00 committed by Andrew Morton
parent 28dc88c39e
commit d438d27341
25 changed files with 17 additions and 319 deletions

View File

@ -30,8 +30,6 @@ PTE Page Table Helpers
+---------------------------+--------------------------------------------------+ +---------------------------+--------------------------------------------------+
| pte_protnone | Tests a PROT_NONE PTE | | pte_protnone | Tests a PROT_NONE PTE |
+---------------------------+--------------------------------------------------+ +---------------------------+--------------------------------------------------+
| pte_devmap | Tests a ZONE_DEVICE mapped PTE |
+---------------------------+--------------------------------------------------+
| pte_soft_dirty | Tests a soft dirty PTE | | pte_soft_dirty | Tests a soft dirty PTE |
+---------------------------+--------------------------------------------------+ +---------------------------+--------------------------------------------------+
| pte_swp_soft_dirty | Tests a soft dirty swapped PTE | | pte_swp_soft_dirty | Tests a soft dirty swapped PTE |
@ -104,8 +102,6 @@ PMD Page Table Helpers
+---------------------------+--------------------------------------------------+ +---------------------------+--------------------------------------------------+
| pmd_protnone | Tests a PROT_NONE PMD | | pmd_protnone | Tests a PROT_NONE PMD |
+---------------------------+--------------------------------------------------+ +---------------------------+--------------------------------------------------+
| pmd_devmap | Tests a ZONE_DEVICE mapped PMD |
+---------------------------+--------------------------------------------------+
| pmd_soft_dirty | Tests a soft dirty PMD | | pmd_soft_dirty | Tests a soft dirty PMD |
+---------------------------+--------------------------------------------------+ +---------------------------+--------------------------------------------------+
| pmd_swp_soft_dirty | Tests a soft dirty swapped PMD | | pmd_swp_soft_dirty | Tests a soft dirty swapped PMD |
@ -177,8 +173,6 @@ PUD Page Table Helpers
+---------------------------+--------------------------------------------------+ +---------------------------+--------------------------------------------------+
| pud_write | Tests a writable PUD | | pud_write | Tests a writable PUD |
+---------------------------+--------------------------------------------------+ +---------------------------+--------------------------------------------------+
| pud_devmap | Tests a ZONE_DEVICE mapped PUD |
+---------------------------+--------------------------------------------------+
| pud_mkyoung | Creates a young PUD | | pud_mkyoung | Creates a young PUD |
+---------------------------+--------------------------------------------------+ +---------------------------+--------------------------------------------------+
| pud_mkold | Creates an old PUD | | pud_mkold | Creates an old PUD |

View File

@ -44,7 +44,6 @@ config ARM64
select ARCH_HAS_NONLEAF_PMD_YOUNG if ARM64_HAFT select ARCH_HAS_NONLEAF_PMD_YOUNG if ARM64_HAFT
select ARCH_HAS_PREEMPT_LAZY select ARCH_HAS_PREEMPT_LAZY
select ARCH_HAS_PTDUMP select ARCH_HAS_PTDUMP
select ARCH_HAS_PTE_DEVMAP
select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_HW_PTE_YOUNG select ARCH_HAS_HW_PTE_YOUNG
select ARCH_HAS_SETUP_DMA_OPS select ARCH_HAS_SETUP_DMA_OPS

View File

@ -17,7 +17,6 @@
#define PTE_SWP_EXCLUSIVE (_AT(pteval_t, 1) << 2) /* only for swp ptes */ #define PTE_SWP_EXCLUSIVE (_AT(pteval_t, 1) << 2) /* only for swp ptes */
#define PTE_DIRTY (_AT(pteval_t, 1) << 55) #define PTE_DIRTY (_AT(pteval_t, 1) << 55)
#define PTE_SPECIAL (_AT(pteval_t, 1) << 56) #define PTE_SPECIAL (_AT(pteval_t, 1) << 56)
#define PTE_DEVMAP (_AT(pteval_t, 1) << 57)
/* /*
* PTE_PRESENT_INVALID=1 & PTE_VALID=0 indicates that the pte's fields should be * PTE_PRESENT_INVALID=1 & PTE_VALID=0 indicates that the pte's fields should be

View File

@ -190,7 +190,6 @@ static inline pteval_t __phys_to_pte_val(phys_addr_t phys)
#define pte_user(pte) (!!(pte_val(pte) & PTE_USER)) #define pte_user(pte) (!!(pte_val(pte) & PTE_USER))
#define pte_user_exec(pte) (!(pte_val(pte) & PTE_UXN)) #define pte_user_exec(pte) (!(pte_val(pte) & PTE_UXN))
#define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT)) #define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT))
#define pte_devmap(pte) (!!(pte_val(pte) & PTE_DEVMAP))
#define pte_tagged(pte) ((pte_val(pte) & PTE_ATTRINDX_MASK) == \ #define pte_tagged(pte) ((pte_val(pte) & PTE_ATTRINDX_MASK) == \
PTE_ATTRINDX(MT_NORMAL_TAGGED)) PTE_ATTRINDX(MT_NORMAL_TAGGED))
@ -372,11 +371,6 @@ static inline pmd_t pmd_mkcont(pmd_t pmd)
return __pmd(pmd_val(pmd) | PMD_SECT_CONT); return __pmd(pmd_val(pmd) | PMD_SECT_CONT);
} }
static inline pte_t pte_mkdevmap(pte_t pte)
{
return set_pte_bit(pte, __pgprot(PTE_DEVMAP | PTE_SPECIAL));
}
#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
static inline int pte_uffd_wp(pte_t pte) static inline int pte_uffd_wp(pte_t pte)
{ {
@ -653,14 +647,6 @@ static inline pmd_t pmd_mkhuge(pmd_t pmd)
return __pmd((pmd_val(pmd) & ~mask) | val); return __pmd((pmd_val(pmd) & ~mask) | val);
} }
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define pmd_devmap(pmd) pte_devmap(pmd_pte(pmd))
#endif
static inline pmd_t pmd_mkdevmap(pmd_t pmd)
{
return pte_pmd(set_pte_bit(pmd_pte(pmd), __pgprot(PTE_DEVMAP)));
}
#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP #ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
#define pmd_special(pte) (!!((pmd_val(pte) & PTE_SPECIAL))) #define pmd_special(pte) (!!((pmd_val(pte) & PTE_SPECIAL)))
static inline pmd_t pmd_mkspecial(pmd_t pmd) static inline pmd_t pmd_mkspecial(pmd_t pmd)
@ -1302,16 +1288,6 @@ static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
return __ptep_set_access_flags(vma, address, (pte_t *)pmdp, return __ptep_set_access_flags(vma, address, (pte_t *)pmdp,
pmd_pte(entry), dirty); pmd_pte(entry), dirty);
} }
static inline int pud_devmap(pud_t pud)
{
return 0;
}
static inline int pgd_devmap(pgd_t pgd)
{
return 0;
}
#endif #endif
#ifdef CONFIG_PAGE_TABLE_CHECK #ifdef CONFIG_PAGE_TABLE_CHECK

View File

@ -25,7 +25,6 @@ config LOONGARCH
select ARCH_HAS_NMI_SAFE_THIS_CPU_OPS select ARCH_HAS_NMI_SAFE_THIS_CPU_OPS
select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
select ARCH_HAS_PREEMPT_LAZY select ARCH_HAS_PREEMPT_LAZY
select ARCH_HAS_PTE_DEVMAP
select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_SET_MEMORY select ARCH_HAS_SET_MEMORY
select ARCH_HAS_SET_DIRECT_MAP select ARCH_HAS_SET_DIRECT_MAP

View File

@ -22,7 +22,6 @@
#define _PAGE_PFN_SHIFT 12 #define _PAGE_PFN_SHIFT 12
#define _PAGE_SWP_EXCLUSIVE_SHIFT 23 #define _PAGE_SWP_EXCLUSIVE_SHIFT 23
#define _PAGE_PFN_END_SHIFT 48 #define _PAGE_PFN_END_SHIFT 48
#define _PAGE_DEVMAP_SHIFT 59
#define _PAGE_PRESENT_INVALID_SHIFT 60 #define _PAGE_PRESENT_INVALID_SHIFT 60
#define _PAGE_NO_READ_SHIFT 61 #define _PAGE_NO_READ_SHIFT 61
#define _PAGE_NO_EXEC_SHIFT 62 #define _PAGE_NO_EXEC_SHIFT 62
@ -36,7 +35,6 @@
#define _PAGE_MODIFIED (_ULCAST_(1) << _PAGE_MODIFIED_SHIFT) #define _PAGE_MODIFIED (_ULCAST_(1) << _PAGE_MODIFIED_SHIFT)
#define _PAGE_PROTNONE (_ULCAST_(1) << _PAGE_PROTNONE_SHIFT) #define _PAGE_PROTNONE (_ULCAST_(1) << _PAGE_PROTNONE_SHIFT)
#define _PAGE_SPECIAL (_ULCAST_(1) << _PAGE_SPECIAL_SHIFT) #define _PAGE_SPECIAL (_ULCAST_(1) << _PAGE_SPECIAL_SHIFT)
#define _PAGE_DEVMAP (_ULCAST_(1) << _PAGE_DEVMAP_SHIFT)
/* We borrow bit 23 to store the exclusive marker in swap PTEs. */ /* We borrow bit 23 to store the exclusive marker in swap PTEs. */
#define _PAGE_SWP_EXCLUSIVE (_ULCAST_(1) << _PAGE_SWP_EXCLUSIVE_SHIFT) #define _PAGE_SWP_EXCLUSIVE (_ULCAST_(1) << _PAGE_SWP_EXCLUSIVE_SHIFT)
@ -76,8 +74,8 @@
#define __READABLE (_PAGE_VALID) #define __READABLE (_PAGE_VALID)
#define __WRITEABLE (_PAGE_DIRTY | _PAGE_WRITE) #define __WRITEABLE (_PAGE_DIRTY | _PAGE_WRITE)
#define _PAGE_CHG_MASK (_PAGE_MODIFIED | _PAGE_SPECIAL | _PAGE_DEVMAP | _PFN_MASK | _CACHE_MASK | _PAGE_PLV) #define _PAGE_CHG_MASK (_PAGE_MODIFIED | _PAGE_SPECIAL | _PFN_MASK | _CACHE_MASK | _PAGE_PLV)
#define _HPAGE_CHG_MASK (_PAGE_MODIFIED | _PAGE_SPECIAL | _PAGE_DEVMAP | _PFN_MASK | _CACHE_MASK | _PAGE_PLV | _PAGE_HUGE) #define _HPAGE_CHG_MASK (_PAGE_MODIFIED | _PAGE_SPECIAL | _PFN_MASK | _CACHE_MASK | _PAGE_PLV | _PAGE_HUGE)
#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_NO_READ | \ #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_NO_READ | \
_PAGE_USER | _CACHE_CC) _PAGE_USER | _CACHE_CC)

View File

@ -409,9 +409,6 @@ static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL;
static inline pte_t pte_mkspecial(pte_t pte) { pte_val(pte) |= _PAGE_SPECIAL; return pte; } static inline pte_t pte_mkspecial(pte_t pte) { pte_val(pte) |= _PAGE_SPECIAL; return pte; }
#endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */ #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
static inline int pte_devmap(pte_t pte) { return !!(pte_val(pte) & _PAGE_DEVMAP); }
static inline pte_t pte_mkdevmap(pte_t pte) { pte_val(pte) |= _PAGE_DEVMAP; return pte; }
#define pte_accessible pte_accessible #define pte_accessible pte_accessible
static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a) static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
{ {
@ -540,17 +537,6 @@ static inline pmd_t pmd_mkyoung(pmd_t pmd)
return pmd; return pmd;
} }
static inline int pmd_devmap(pmd_t pmd)
{
return !!(pmd_val(pmd) & _PAGE_DEVMAP);
}
static inline pmd_t pmd_mkdevmap(pmd_t pmd)
{
pmd_val(pmd) |= _PAGE_DEVMAP;
return pmd;
}
static inline struct page *pmd_page(pmd_t pmd) static inline struct page *pmd_page(pmd_t pmd)
{ {
if (pmd_trans_huge(pmd)) if (pmd_trans_huge(pmd))
@ -606,11 +592,6 @@ static inline long pmd_protnone(pmd_t pmd)
#define pmd_leaf(pmd) ((pmd_val(pmd) & _PAGE_HUGE) != 0) #define pmd_leaf(pmd) ((pmd_val(pmd) & _PAGE_HUGE) != 0)
#define pud_leaf(pud) ((pud_val(pud) & _PAGE_HUGE) != 0) #define pud_leaf(pud) ((pud_val(pud) & _PAGE_HUGE) != 0)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define pud_devmap(pud) (0)
#define pgd_devmap(pgd) (0)
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
/* /*
* We provide our own get_unmapped area to cope with the virtual aliasing * We provide our own get_unmapped area to cope with the virtual aliasing
* constraints placed on us by the cache architecture. * constraints placed on us by the cache architecture.

View File

@ -149,7 +149,6 @@ config PPC
select ARCH_HAS_PMEM_API select ARCH_HAS_PMEM_API
select ARCH_HAS_PREEMPT_LAZY select ARCH_HAS_PREEMPT_LAZY
select ARCH_HAS_PTDUMP select ARCH_HAS_PTDUMP
select ARCH_HAS_PTE_DEVMAP if PPC_BOOK3S_64
select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE && PPC_BOOK3S_64 select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE && PPC_BOOK3S_64
select ARCH_HAS_SET_MEMORY select ARCH_HAS_SET_MEMORY

View File

@ -168,12 +168,6 @@ extern pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm,
extern int hash__has_transparent_hugepage(void); extern int hash__has_transparent_hugepage(void);
#endif #endif
static inline pmd_t hash__pmd_mkdevmap(pmd_t pmd)
{
BUG();
return pmd;
}
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#endif /* _ASM_POWERPC_BOOK3S_64_HASH_4K_H */ #endif /* _ASM_POWERPC_BOOK3S_64_HASH_4K_H */

View File

@ -259,7 +259,7 @@ static inline void mark_hpte_slot_valid(unsigned char *hpte_slot_array,
*/ */
static inline int hash__pmd_trans_huge(pmd_t pmd) static inline int hash__pmd_trans_huge(pmd_t pmd)
{ {
return !!((pmd_val(pmd) & (_PAGE_PTE | H_PAGE_THP_HUGE | _PAGE_DEVMAP)) == return !!((pmd_val(pmd) & (_PAGE_PTE | H_PAGE_THP_HUGE)) ==
(_PAGE_PTE | H_PAGE_THP_HUGE)); (_PAGE_PTE | H_PAGE_THP_HUGE));
} }
@ -281,11 +281,6 @@ extern pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm,
extern int hash__has_transparent_hugepage(void); extern int hash__has_transparent_hugepage(void);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
static inline pmd_t hash__pmd_mkdevmap(pmd_t pmd)
{
return __pmd(pmd_val(pmd) | (_PAGE_PTE | H_PAGE_THP_HUGE | _PAGE_DEVMAP));
}
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_BOOK3S_64_HASH_64K_H */ #endif /* _ASM_POWERPC_BOOK3S_64_HASH_64K_H */

View File

@ -88,7 +88,6 @@
#define _PAGE_SOFT_DIRTY _RPAGE_SW3 /* software: software dirty tracking */ #define _PAGE_SOFT_DIRTY _RPAGE_SW3 /* software: software dirty tracking */
#define _PAGE_SPECIAL _RPAGE_SW2 /* software: special page */ #define _PAGE_SPECIAL _RPAGE_SW2 /* software: special page */
#define _PAGE_DEVMAP _RPAGE_SW1 /* software: ZONE_DEVICE page */
/* /*
* Drivers request for cache inhibited pte mapping using _PAGE_NO_CACHE * Drivers request for cache inhibited pte mapping using _PAGE_NO_CACHE
@ -109,7 +108,7 @@
*/ */
#define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \ #define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
_PAGE_ACCESSED | H_PAGE_THP_HUGE | _PAGE_PTE | \ _PAGE_ACCESSED | H_PAGE_THP_HUGE | _PAGE_PTE | \
_PAGE_SOFT_DIRTY | _PAGE_DEVMAP) _PAGE_SOFT_DIRTY)
/* /*
* user access blocked by key * user access blocked by key
*/ */
@ -123,7 +122,7 @@
*/ */
#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \ #define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
_PAGE_ACCESSED | _PAGE_SPECIAL | _PAGE_PTE | \ _PAGE_ACCESSED | _PAGE_SPECIAL | _PAGE_PTE | \
_PAGE_SOFT_DIRTY | _PAGE_DEVMAP) _PAGE_SOFT_DIRTY)
/* /*
* We define 2 sets of base prot bits, one for basic pages (ie, * We define 2 sets of base prot bits, one for basic pages (ie,
@ -609,24 +608,6 @@ static inline pte_t pte_mkhuge(pte_t pte)
return pte; return pte;
} }
static inline pte_t pte_mkdevmap(pte_t pte)
{
return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_SPECIAL | _PAGE_DEVMAP));
}
/*
* This is potentially called with a pmd as the argument, in which case it's not
* safe to check _PAGE_DEVMAP unless we also confirm that _PAGE_PTE is set.
* That's because the bit we use for _PAGE_DEVMAP is not reserved for software
* use in page directory entries (ie. non-ptes).
*/
static inline int pte_devmap(pte_t pte)
{
__be64 mask = cpu_to_be64(_PAGE_DEVMAP | _PAGE_PTE);
return (pte_raw(pte) & mask) == mask;
}
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{ {
/* FIXME!! check whether this need to be a conditional */ /* FIXME!! check whether this need to be a conditional */
@ -1379,36 +1360,6 @@ static inline bool arch_needs_pgtable_deposit(void)
} }
extern void serialize_against_pte_lookup(struct mm_struct *mm); extern void serialize_against_pte_lookup(struct mm_struct *mm);
static inline pmd_t pmd_mkdevmap(pmd_t pmd)
{
if (radix_enabled())
return radix__pmd_mkdevmap(pmd);
return hash__pmd_mkdevmap(pmd);
}
static inline pud_t pud_mkdevmap(pud_t pud)
{
if (radix_enabled())
return radix__pud_mkdevmap(pud);
BUG();
return pud;
}
static inline int pmd_devmap(pmd_t pmd)
{
return pte_devmap(pmd_pte(pmd));
}
static inline int pud_devmap(pud_t pud)
{
return pte_devmap(pud_pte(pud));
}
static inline int pgd_devmap(pgd_t pgd)
{
return 0;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION

View File

@ -264,7 +264,7 @@ static inline int radix__p4d_bad(p4d_t p4d)
static inline int radix__pmd_trans_huge(pmd_t pmd) static inline int radix__pmd_trans_huge(pmd_t pmd)
{ {
return (pmd_val(pmd) & (_PAGE_PTE | _PAGE_DEVMAP)) == _PAGE_PTE; return (pmd_val(pmd) & _PAGE_PTE) == _PAGE_PTE;
} }
static inline pmd_t radix__pmd_mkhuge(pmd_t pmd) static inline pmd_t radix__pmd_mkhuge(pmd_t pmd)
@ -274,7 +274,7 @@ static inline pmd_t radix__pmd_mkhuge(pmd_t pmd)
static inline int radix__pud_trans_huge(pud_t pud) static inline int radix__pud_trans_huge(pud_t pud)
{ {
return (pud_val(pud) & (_PAGE_PTE | _PAGE_DEVMAP)) == _PAGE_PTE; return (pud_val(pud) & _PAGE_PTE) == _PAGE_PTE;
} }
static inline pud_t radix__pud_mkhuge(pud_t pud) static inline pud_t radix__pud_mkhuge(pud_t pud)
@ -315,16 +315,6 @@ static inline int radix__has_transparent_pud_hugepage(void)
} }
#endif #endif
static inline pmd_t radix__pmd_mkdevmap(pmd_t pmd)
{
return __pmd(pmd_val(pmd) | (_PAGE_PTE | _PAGE_DEVMAP));
}
static inline pud_t radix__pud_mkdevmap(pud_t pud)
{
return __pud(pud_val(pud) | (_PAGE_PTE | _PAGE_DEVMAP));
}
struct vmem_altmap; struct vmem_altmap;
struct dev_pagemap; struct dev_pagemap;
extern int __meminit radix__vmemmap_create_mapping(unsigned long start, extern int __meminit radix__vmemmap_create_mapping(unsigned long start,

View File

@ -46,7 +46,6 @@ config RISCV
select ARCH_HAS_PREEMPT_LAZY select ARCH_HAS_PREEMPT_LAZY
select ARCH_HAS_PREPARE_SYNC_CORE_CMD select ARCH_HAS_PREPARE_SYNC_CORE_CMD
select ARCH_HAS_PTDUMP if MMU select ARCH_HAS_PTDUMP if MMU
select ARCH_HAS_PTE_DEVMAP if 64BIT && MMU
select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_SET_DIRECT_MAP if MMU select ARCH_HAS_SET_DIRECT_MAP if MMU
select ARCH_HAS_SET_MEMORY if MMU select ARCH_HAS_SET_MEMORY if MMU

View File

@ -397,24 +397,8 @@ static inline struct page *pgd_page(pgd_t pgd)
p4d_t *p4d_offset(pgd_t *pgd, unsigned long address); p4d_t *p4d_offset(pgd_t *pgd, unsigned long address);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline int pte_devmap(pte_t pte);
static inline pte_t pmd_pte(pmd_t pmd); static inline pte_t pmd_pte(pmd_t pmd);
static inline pte_t pud_pte(pud_t pud); static inline pte_t pud_pte(pud_t pud);
static inline int pmd_devmap(pmd_t pmd)
{
return pte_devmap(pmd_pte(pmd));
}
static inline int pud_devmap(pud_t pud)
{
return pte_devmap(pud_pte(pud));
}
static inline int pgd_devmap(pgd_t pgd)
{
return 0;
}
#endif #endif
#endif /* _ASM_RISCV_PGTABLE_64_H */ #endif /* _ASM_RISCV_PGTABLE_64_H */

View File

@ -19,7 +19,6 @@
#define _PAGE_SOFT (3 << 8) /* Reserved for software */ #define _PAGE_SOFT (3 << 8) /* Reserved for software */
#define _PAGE_SPECIAL (1 << 8) /* RSW: 0x1 */ #define _PAGE_SPECIAL (1 << 8) /* RSW: 0x1 */
#define _PAGE_DEVMAP (1 << 9) /* RSW, devmap */
#define _PAGE_TABLE _PAGE_PRESENT #define _PAGE_TABLE _PAGE_PRESENT
/* /*

View File

@ -409,13 +409,6 @@ static inline int pte_special(pte_t pte)
return pte_val(pte) & _PAGE_SPECIAL; return pte_val(pte) & _PAGE_SPECIAL;
} }
#ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
static inline int pte_devmap(pte_t pte)
{
return pte_val(pte) & _PAGE_DEVMAP;
}
#endif
/* static inline pte_t pte_rdprotect(pte_t pte) */ /* static inline pte_t pte_rdprotect(pte_t pte) */
static inline pte_t pte_wrprotect(pte_t pte) static inline pte_t pte_wrprotect(pte_t pte)
@ -457,11 +450,6 @@ static inline pte_t pte_mkspecial(pte_t pte)
return __pte(pte_val(pte) | _PAGE_SPECIAL); return __pte(pte_val(pte) | _PAGE_SPECIAL);
} }
static inline pte_t pte_mkdevmap(pte_t pte)
{
return __pte(pte_val(pte) | _PAGE_DEVMAP);
}
static inline pte_t pte_mkhuge(pte_t pte) static inline pte_t pte_mkhuge(pte_t pte)
{ {
return pte; return pte;
@ -790,11 +778,6 @@ static inline pmd_t pmd_mkdirty(pmd_t pmd)
return pte_pmd(pte_mkdirty(pmd_pte(pmd))); return pte_pmd(pte_mkdirty(pmd_pte(pmd)));
} }
static inline pmd_t pmd_mkdevmap(pmd_t pmd)
{
return pte_pmd(pte_mkdevmap(pmd_pte(pmd)));
}
#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP #ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
static inline bool pmd_special(pmd_t pmd) static inline bool pmd_special(pmd_t pmd)
{ {
@ -946,11 +929,6 @@ static inline pud_t pud_mkhuge(pud_t pud)
return pud; return pud;
} }
static inline pud_t pud_mkdevmap(pud_t pud)
{
return pte_pud(pte_mkdevmap(pud_pte(pud)));
}
static inline int pudp_set_access_flags(struct vm_area_struct *vma, static inline int pudp_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pud_t *pudp, unsigned long address, pud_t *pudp,
pud_t entry, int dirty) pud_t entry, int dirty)

View File

@ -101,7 +101,6 @@ config X86
select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
select ARCH_HAS_PMEM_API if X86_64 select ARCH_HAS_PMEM_API if X86_64
select ARCH_HAS_PREEMPT_LAZY select ARCH_HAS_PREEMPT_LAZY
select ARCH_HAS_PTE_DEVMAP if X86_64
select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_HW_PTE_YOUNG select ARCH_HAS_HW_PTE_YOUNG
select ARCH_HAS_NONLEAF_PMD_YOUNG if PGTABLE_LEVELS > 2 select ARCH_HAS_NONLEAF_PMD_YOUNG if PGTABLE_LEVELS > 2

View File

@ -301,16 +301,15 @@ static inline bool pmd_leaf(pmd_t pte)
} }
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
/* NOTE: when predicate huge page, consider also pmd_devmap, or use pmd_leaf */
static inline int pmd_trans_huge(pmd_t pmd) static inline int pmd_trans_huge(pmd_t pmd)
{ {
return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE; return (pmd_val(pmd) & _PAGE_PSE) == _PAGE_PSE;
} }
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
static inline int pud_trans_huge(pud_t pud) static inline int pud_trans_huge(pud_t pud)
{ {
return (pud_val(pud) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE; return (pud_val(pud) & _PAGE_PSE) == _PAGE_PSE;
} }
#endif #endif
@ -320,24 +319,6 @@ static inline int has_transparent_hugepage(void)
return boot_cpu_has(X86_FEATURE_PSE); return boot_cpu_has(X86_FEATURE_PSE);
} }
#ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
static inline int pmd_devmap(pmd_t pmd)
{
return !!(pmd_val(pmd) & _PAGE_DEVMAP);
}
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
static inline int pud_devmap(pud_t pud)
{
return !!(pud_val(pud) & _PAGE_DEVMAP);
}
#else
static inline int pud_devmap(pud_t pud)
{
return 0;
}
#endif
#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP #ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
static inline bool pmd_special(pmd_t pmd) static inline bool pmd_special(pmd_t pmd)
{ {
@ -361,12 +342,6 @@ static inline pud_t pud_mkspecial(pud_t pud)
return pud_set_flags(pud, _PAGE_SPECIAL); return pud_set_flags(pud, _PAGE_SPECIAL);
} }
#endif /* CONFIG_ARCH_SUPPORTS_PUD_PFNMAP */ #endif /* CONFIG_ARCH_SUPPORTS_PUD_PFNMAP */
static inline int pgd_devmap(pgd_t pgd)
{
return 0;
}
#endif
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
static inline pte_t pte_set_flags(pte_t pte, pteval_t set) static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
@ -527,11 +502,6 @@ static inline pte_t pte_mkspecial(pte_t pte)
return pte_set_flags(pte, _PAGE_SPECIAL); return pte_set_flags(pte, _PAGE_SPECIAL);
} }
static inline pte_t pte_mkdevmap(pte_t pte)
{
return pte_set_flags(pte, _PAGE_SPECIAL|_PAGE_DEVMAP);
}
/* See comments above mksaveddirty_shift() */ /* See comments above mksaveddirty_shift() */
static inline pmd_t pmd_mksaveddirty(pmd_t pmd) static inline pmd_t pmd_mksaveddirty(pmd_t pmd)
{ {
@ -603,11 +573,6 @@ static inline pmd_t pmd_mkwrite_shstk(pmd_t pmd)
return pmd_set_flags(pmd, _PAGE_DIRTY); return pmd_set_flags(pmd, _PAGE_DIRTY);
} }
static inline pmd_t pmd_mkdevmap(pmd_t pmd)
{
return pmd_set_flags(pmd, _PAGE_DEVMAP);
}
static inline pmd_t pmd_mkhuge(pmd_t pmd) static inline pmd_t pmd_mkhuge(pmd_t pmd)
{ {
return pmd_set_flags(pmd, _PAGE_PSE); return pmd_set_flags(pmd, _PAGE_PSE);
@ -673,11 +638,6 @@ static inline pud_t pud_mkdirty(pud_t pud)
return pud_mksaveddirty(pud); return pud_mksaveddirty(pud);
} }
static inline pud_t pud_mkdevmap(pud_t pud)
{
return pud_set_flags(pud, _PAGE_DEVMAP);
}
static inline pud_t pud_mkhuge(pud_t pud) static inline pud_t pud_mkhuge(pud_t pud)
{ {
return pud_set_flags(pud, _PAGE_PSE); return pud_set_flags(pud, _PAGE_PSE);
@ -1008,13 +968,6 @@ static inline int pte_present(pte_t a)
return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE); return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
} }
#ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
static inline int pte_devmap(pte_t a)
{
return (pte_flags(a) & _PAGE_DEVMAP) == _PAGE_DEVMAP;
}
#endif
#define pte_accessible pte_accessible #define pte_accessible pte_accessible
static inline bool pte_accessible(struct mm_struct *mm, pte_t a) static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
{ {

View File

@ -34,7 +34,6 @@
#define _PAGE_BIT_UFFD_WP _PAGE_BIT_SOFTW2 /* userfaultfd wrprotected */ #define _PAGE_BIT_UFFD_WP _PAGE_BIT_SOFTW2 /* userfaultfd wrprotected */
#define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_SOFTW3 /* software dirty tracking */ #define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_SOFTW3 /* software dirty tracking */
#define _PAGE_BIT_KERNEL_4K _PAGE_BIT_SOFTW3 /* page must not be converted to large */ #define _PAGE_BIT_KERNEL_4K _PAGE_BIT_SOFTW3 /* page must not be converted to large */
#define _PAGE_BIT_DEVMAP _PAGE_BIT_SOFTW4
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
#define _PAGE_BIT_SAVED_DIRTY _PAGE_BIT_SOFTW5 /* Saved Dirty bit (leaf) */ #define _PAGE_BIT_SAVED_DIRTY _PAGE_BIT_SOFTW5 /* Saved Dirty bit (leaf) */
@ -121,11 +120,9 @@
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX) #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
#define _PAGE_DEVMAP (_AT(u64, 1) << _PAGE_BIT_DEVMAP)
#define _PAGE_SOFTW4 (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW4) #define _PAGE_SOFTW4 (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW4)
#else #else
#define _PAGE_NX (_AT(pteval_t, 0)) #define _PAGE_NX (_AT(pteval_t, 0))
#define _PAGE_DEVMAP (_AT(pteval_t, 0))
#define _PAGE_SOFTW4 (_AT(pteval_t, 0)) #define _PAGE_SOFTW4 (_AT(pteval_t, 0))
#endif #endif
@ -154,7 +151,7 @@
#define _COMMON_PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \ #define _COMMON_PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
_PAGE_SPECIAL | _PAGE_ACCESSED | \ _PAGE_SPECIAL | _PAGE_ACCESSED | \
_PAGE_DIRTY_BITS | _PAGE_SOFT_DIRTY | \ _PAGE_DIRTY_BITS | _PAGE_SOFT_DIRTY | \
_PAGE_DEVMAP | _PAGE_CC | _PAGE_UFFD_WP) _PAGE_CC | _PAGE_UFFD_WP)
#define _PAGE_CHG_MASK (_COMMON_PAGE_CHG_MASK | _PAGE_PAT) #define _PAGE_CHG_MASK (_COMMON_PAGE_CHG_MASK | _PAGE_PAT)
#define _HPAGE_CHG_MASK (_COMMON_PAGE_CHG_MASK | _PAGE_PSE | _PAGE_PAT_LARGE) #define _HPAGE_CHG_MASK (_COMMON_PAGE_CHG_MASK | _PAGE_PSE | _PAGE_PAT_LARGE)

View File

@ -2704,13 +2704,6 @@ static inline pud_t pud_mkspecial(pud_t pud)
} }
#endif /* CONFIG_ARCH_SUPPORTS_PUD_PFNMAP */ #endif /* CONFIG_ARCH_SUPPORTS_PUD_PFNMAP */
#ifndef CONFIG_ARCH_HAS_PTE_DEVMAP
static inline int pte_devmap(pte_t pte)
{
return 0;
}
#endif
extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
spinlock_t **ptl); spinlock_t **ptl);
static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,

View File

@ -1643,21 +1643,6 @@ static inline int pud_write(pud_t pud)
} }
#endif /* pud_write */ #endif /* pud_write */
#if !defined(CONFIG_ARCH_HAS_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE)
static inline int pmd_devmap(pmd_t pmd)
{
return 0;
}
static inline int pud_devmap(pud_t pud)
{
return 0;
}
static inline int pgd_devmap(pgd_t pgd)
{
return 0;
}
#endif
#if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \ #if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \
!defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) !defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
static inline int pud_trans_huge(pud_t pud) static inline int pud_trans_huge(pud_t pud)
@ -1912,8 +1897,8 @@ typedef unsigned int pgtbl_mod_mask;
* - It should contain a huge PFN, which points to a huge page larger than * - It should contain a huge PFN, which points to a huge page larger than
* PAGE_SIZE of the platform. The PFN format isn't important here. * PAGE_SIZE of the platform. The PFN format isn't important here.
* *
* - It should cover all kinds of huge mappings (e.g., pXd_trans_huge(), * - It should cover all kinds of huge mappings (i.e. pXd_trans_huge()
* pXd_devmap(), or hugetlb mappings). * or hugetlb mappings).
*/ */
#ifndef pgd_leaf #ifndef pgd_leaf
#define pgd_leaf(x) false #define pgd_leaf(x) false

View File

@ -1117,9 +1117,6 @@ config ARCH_HAS_CURRENT_STACK_POINTER
register alias named "current_stack_pointer", this config can be register alias named "current_stack_pointer", this config can be
selected. selected.
config ARCH_HAS_PTE_DEVMAP
bool
config ARCH_HAS_ZONE_DMA_SET config ARCH_HAS_ZONE_DMA_SET
bool bool
@ -1137,7 +1134,6 @@ config ZONE_DEVICE
depends on MEMORY_HOTPLUG depends on MEMORY_HOTPLUG
depends on MEMORY_HOTREMOVE depends on MEMORY_HOTREMOVE
depends on SPARSEMEM_VMEMMAP depends on SPARSEMEM_VMEMMAP
depends on ARCH_HAS_PTE_DEVMAP
select XARRAY_MULTI select XARRAY_MULTI
help help

View File

@ -348,12 +348,6 @@ static void __init pud_advanced_tests(struct pgtable_debug_args *args)
vaddr &= HPAGE_PUD_MASK; vaddr &= HPAGE_PUD_MASK;
pud = pfn_pud(args->pud_pfn, args->page_prot); pud = pfn_pud(args->pud_pfn, args->page_prot);
/*
* Some architectures have debug checks to make sure
* huge pud mapping are only found with devmap entries
* For now test with only devmap entries.
*/
pud = pud_mkdevmap(pud);
set_pud_at(args->mm, vaddr, args->pudp, pud); set_pud_at(args->mm, vaddr, args->pudp, pud);
flush_dcache_page(page); flush_dcache_page(page);
pudp_set_wrprotect(args->mm, vaddr, args->pudp); pudp_set_wrprotect(args->mm, vaddr, args->pudp);
@ -366,7 +360,6 @@ static void __init pud_advanced_tests(struct pgtable_debug_args *args)
WARN_ON(!pud_none(pud)); WARN_ON(!pud_none(pud));
#endif /* __PAGETABLE_PMD_FOLDED */ #endif /* __PAGETABLE_PMD_FOLDED */
pud = pfn_pud(args->pud_pfn, args->page_prot); pud = pfn_pud(args->pud_pfn, args->page_prot);
pud = pud_mkdevmap(pud);
pud = pud_wrprotect(pud); pud = pud_wrprotect(pud);
pud = pud_mkclean(pud); pud = pud_mkclean(pud);
set_pud_at(args->mm, vaddr, args->pudp, pud); set_pud_at(args->mm, vaddr, args->pudp, pud);
@ -384,7 +377,6 @@ static void __init pud_advanced_tests(struct pgtable_debug_args *args)
#endif /* __PAGETABLE_PMD_FOLDED */ #endif /* __PAGETABLE_PMD_FOLDED */
pud = pfn_pud(args->pud_pfn, args->page_prot); pud = pfn_pud(args->pud_pfn, args->page_prot);
pud = pud_mkdevmap(pud);
pud = pud_mkyoung(pud); pud = pud_mkyoung(pud);
set_pud_at(args->mm, vaddr, args->pudp, pud); set_pud_at(args->mm, vaddr, args->pudp, pud);
flush_dcache_page(page); flush_dcache_page(page);
@ -693,53 +685,6 @@ static void __init pmd_protnone_tests(struct pgtable_debug_args *args)
static void __init pmd_protnone_tests(struct pgtable_debug_args *args) { } static void __init pmd_protnone_tests(struct pgtable_debug_args *args) { }
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
static void __init pte_devmap_tests(struct pgtable_debug_args *args)
{
pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
pr_debug("Validating PTE devmap\n");
WARN_ON(!pte_devmap(pte_mkdevmap(pte)));
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static void __init pmd_devmap_tests(struct pgtable_debug_args *args)
{
pmd_t pmd;
if (!has_transparent_hugepage())
return;
pr_debug("Validating PMD devmap\n");
pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
WARN_ON(!pmd_devmap(pmd_mkdevmap(pmd)));
}
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
static void __init pud_devmap_tests(struct pgtable_debug_args *args)
{
pud_t pud;
if (!has_transparent_pud_hugepage())
return;
pr_debug("Validating PUD devmap\n");
pud = pfn_pud(args->fixed_pud_pfn, args->page_prot);
WARN_ON(!pud_devmap(pud_mkdevmap(pud)));
}
#else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
static void __init pud_devmap_tests(struct pgtable_debug_args *args) { }
#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
static void __init pmd_devmap_tests(struct pgtable_debug_args *args) { }
static void __init pud_devmap_tests(struct pgtable_debug_args *args) { }
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#else
static void __init pte_devmap_tests(struct pgtable_debug_args *args) { }
static void __init pmd_devmap_tests(struct pgtable_debug_args *args) { }
static void __init pud_devmap_tests(struct pgtable_debug_args *args) { }
#endif /* CONFIG_ARCH_HAS_PTE_DEVMAP */
static void __init pte_soft_dirty_tests(struct pgtable_debug_args *args) static void __init pte_soft_dirty_tests(struct pgtable_debug_args *args)
{ {
pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot); pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
@ -1333,10 +1278,6 @@ static int __init debug_vm_pgtable(void)
pte_protnone_tests(&args); pte_protnone_tests(&args);
pmd_protnone_tests(&args); pmd_protnone_tests(&args);
pte_devmap_tests(&args);
pmd_devmap_tests(&args);
pud_devmap_tests(&args);
pte_soft_dirty_tests(&args); pte_soft_dirty_tests(&args);
pmd_soft_dirty_tests(&args); pmd_soft_dirty_tests(&args);
pte_swap_soft_dirty_tests(&args); pte_swap_soft_dirty_tests(&args);

View File

@ -405,8 +405,7 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
return 0; return 0;
} }
#if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && \ #if defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
static inline unsigned long pud_to_hmm_pfn_flags(struct hmm_range *range, static inline unsigned long pud_to_hmm_pfn_flags(struct hmm_range *range,
pud_t pud) pud_t pud)
{ {

View File

@ -1069,7 +1069,7 @@ static int guard_install_pud_entry(pud_t *pud, unsigned long addr,
pud_t pudval = pudp_get(pud); pud_t pudval = pudp_get(pud);
/* If huge return >0 so we abort the operation + zap. */ /* If huge return >0 so we abort the operation + zap. */
return pud_trans_huge(pudval) || pud_devmap(pudval); return pud_trans_huge(pudval);
} }
static int guard_install_pmd_entry(pmd_t *pmd, unsigned long addr, static int guard_install_pmd_entry(pmd_t *pmd, unsigned long addr,
@ -1078,7 +1078,7 @@ static int guard_install_pmd_entry(pmd_t *pmd, unsigned long addr,
pmd_t pmdval = pmdp_get(pmd); pmd_t pmdval = pmdp_get(pmd);
/* If huge return >0 so we abort the operation + zap. */ /* If huge return >0 so we abort the operation + zap. */
return pmd_trans_huge(pmdval) || pmd_devmap(pmdval); return pmd_trans_huge(pmdval);
} }
static int guard_install_pte_entry(pte_t *pte, unsigned long addr, static int guard_install_pte_entry(pte_t *pte, unsigned long addr,
@ -1189,7 +1189,7 @@ static int guard_remove_pud_entry(pud_t *pud, unsigned long addr,
pud_t pudval = pudp_get(pud); pud_t pudval = pudp_get(pud);
/* If huge, cannot have guard pages present, so no-op - skip. */ /* If huge, cannot have guard pages present, so no-op - skip. */
if (pud_trans_huge(pudval) || pud_devmap(pudval)) if (pud_trans_huge(pudval))
walk->action = ACTION_CONTINUE; walk->action = ACTION_CONTINUE;
return 0; return 0;
@ -1201,7 +1201,7 @@ static int guard_remove_pmd_entry(pmd_t *pmd, unsigned long addr,
pmd_t pmdval = pmdp_get(pmd); pmd_t pmdval = pmdp_get(pmd);
/* If huge, cannot have guard pages present, so no-op - skip. */ /* If huge, cannot have guard pages present, so no-op - skip. */
if (pmd_trans_huge(pmdval) || pmd_devmap(pmdval)) if (pmd_trans_huge(pmdval))
walk->action = ACTION_CONTINUE; walk->action = ACTION_CONTINUE;
return 0; return 0;