riscv: mm: add userfaultfd write-protect support

The Svrsw60t59b extension allows to free the PTE reserved bits 60 and 59
for software, this patch uses bit 60 for uffd-wp tracking

Additionally for tracking the uffd-wp state as a PTE swap bit, we borrow
bit 4 which is not involved into swap entry computation.

Link: https://lkml.kernel.org/r/20251113072806.795029-6-zhangchunyan@iscas.ac.cn
Signed-off-by: Chunyan Zhang <zhangchunyan@iscas.ac.cn>
Cc: Albert Ou <aou@eecs.berkeley.edu>
Cc: Alexandre Ghiti <alex@ghiti.fr>
Cc: Alexandre Ghiti <alexghiti@rivosinc.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Andrew Jones <ajones@ventanamicro.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Conor Dooley <conor.dooley@microchip.com>
Cc: Conor Dooley <conor@kernel.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: Deepak Gupta <debug@rivosinc.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Rob Herring <robh@kernel.org>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Yuanchu Xie <yuanchu@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Chunyan Zhang 2025-11-13 15:28:05 +08:00 committed by Andrew Morton
parent 2a3ebad4db
commit c64da3950c
3 changed files with 87 additions and 0 deletions

View File

@ -148,6 +148,7 @@ config RISCV
select HAVE_ARCH_TRANSPARENT_HUGEPAGE if 64BIT && MMU
select HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD if 64BIT && MMU
select HAVE_ARCH_USERFAULTFD_MINOR if 64BIT && USERFAULTFD
select HAVE_ARCH_USERFAULTFD_WP if 64BIT && MMU && USERFAULTFD && RISCV_ISA_SVRSW60T59B
select HAVE_ARCH_VMAP_STACK if MMU && 64BIT
select HAVE_ASM_MODVERSIONS
select HAVE_CONTEXT_TRACKING_USER

View File

@ -38,6 +38,24 @@
#define _PAGE_SWP_SOFT_DIRTY 0
#endif /* CONFIG_MEM_SOFT_DIRTY */
#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
/* ext_svrsw60t59b: Bit(60) for uffd-wp tracking */
#define _PAGE_UFFD_WP \
((riscv_has_extension_unlikely(RISCV_ISA_EXT_SVRSW60T59B)) ? \
(1UL << 60) : 0)
/*
* Bit 4 is not involved into swap entry computation, so we
* can borrow it for swap page uffd-wp tracking.
*/
#define _PAGE_SWP_UFFD_WP \
((riscv_has_extension_unlikely(RISCV_ISA_EXT_SVRSW60T59B)) ? \
_PAGE_USER : 0)
#else
#define _PAGE_UFFD_WP 0
#define _PAGE_SWP_UFFD_WP 0
#endif
#define _PAGE_TABLE _PAGE_PRESENT
/*

View File

@ -417,6 +417,41 @@ static inline pte_t pte_wrprotect(pte_t pte)
return __pte(pte_val(pte) & ~(_PAGE_WRITE));
}
#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
#define pgtable_supports_uffd_wp() \
riscv_has_extension_unlikely(RISCV_ISA_EXT_SVRSW60T59B)
static inline bool pte_uffd_wp(pte_t pte)
{
return !!(pte_val(pte) & _PAGE_UFFD_WP);
}
static inline pte_t pte_mkuffd_wp(pte_t pte)
{
return pte_wrprotect(__pte(pte_val(pte) | _PAGE_UFFD_WP));
}
static inline pte_t pte_clear_uffd_wp(pte_t pte)
{
return __pte(pte_val(pte) & ~(_PAGE_UFFD_WP));
}
static inline bool pte_swp_uffd_wp(pte_t pte)
{
return !!(pte_val(pte) & _PAGE_SWP_UFFD_WP);
}
static inline pte_t pte_swp_mkuffd_wp(pte_t pte)
{
return __pte(pte_val(pte) | _PAGE_SWP_UFFD_WP);
}
static inline pte_t pte_swp_clear_uffd_wp(pte_t pte)
{
return __pte(pte_val(pte) & ~(_PAGE_SWP_UFFD_WP));
}
#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
/* static inline pte_t pte_mkread(pte_t pte) */
static inline pte_t pte_mkwrite_novma(pte_t pte)
@ -841,6 +876,38 @@ static inline pud_t pud_mkspecial(pud_t pud)
}
#endif
#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
static inline bool pmd_uffd_wp(pmd_t pmd)
{
return pte_uffd_wp(pmd_pte(pmd));
}
static inline pmd_t pmd_mkuffd_wp(pmd_t pmd)
{
return pte_pmd(pte_mkuffd_wp(pmd_pte(pmd)));
}
static inline pmd_t pmd_clear_uffd_wp(pmd_t pmd)
{
return pte_pmd(pte_clear_uffd_wp(pmd_pte(pmd)));
}
static inline bool pmd_swp_uffd_wp(pmd_t pmd)
{
return pte_swp_uffd_wp(pmd_pte(pmd));
}
static inline pmd_t pmd_swp_mkuffd_wp(pmd_t pmd)
{
return pte_pmd(pte_swp_mkuffd_wp(pmd_pte(pmd)));
}
static inline pmd_t pmd_swp_clear_uffd_wp(pmd_t pmd)
{
return pte_pmd(pte_swp_clear_uffd_wp(pmd_pte(pmd)));
}
#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
static inline bool pmd_soft_dirty(pmd_t pmd)
{
@ -1075,6 +1142,7 @@ static inline pud_t pud_modify(pud_t pud, pgprot_t newprot)
* bit 0: _PAGE_PRESENT (zero)
* bit 1 to 2: (zero)
* bit 3: _PAGE_SWP_SOFT_DIRTY
* bit 4: _PAGE_SWP_UFFD_WP
* bit 5: _PAGE_PROT_NONE (zero)
* bit 6: exclusive marker
* bits 7 to 11: swap type