mirror of https://github.com/torvalds/linux.git
riscv: pgtable: Use riscv_has_extension_unlikely
Use riscv_has_extension_unlikely() to check for RISCV_ISA_EXT_SVVPTC,
replacing the use of asm goto with ALTERNATIVE.
The "unlikely" variant is used to match the behavior of the original
implementation using ALTERNATIVE("nop", "j %l[svvptc]", ...).
Note that this makes the check for RISCV_ISA_EXT_SVVPTC a runtime one if
RISCV_ALTERNATIVE=n, but it should still be worthwhile to do so given
that TLB flushes are relatively slow.
Signed-off-by: Vivian Wang <wangruikang@iscas.ac.cn>
Link: https://patch.msgid.link/20251020-riscv-altn-helper-wip-v4-1-ef941c87669a@iscas.ac.cn
Signed-off-by: Paul Walmsley <pjw@kernel.org>
This commit is contained in:
parent
c9a7161044
commit
0a067ae21b
|
|
@ -496,8 +496,13 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf,
|
||||||
struct vm_area_struct *vma, unsigned long address,
|
struct vm_area_struct *vma, unsigned long address,
|
||||||
pte_t *ptep, unsigned int nr)
|
pte_t *ptep, unsigned int nr)
|
||||||
{
|
{
|
||||||
asm goto(ALTERNATIVE("nop", "j %l[svvptc]", 0, RISCV_ISA_EXT_SVVPTC, 1)
|
/*
|
||||||
: : : : svvptc);
|
* Svvptc guarantees that the new valid pte will be visible within
|
||||||
|
* a bounded timeframe, so when the uarch does not cache invalid
|
||||||
|
* entries, we don't have to do anything.
|
||||||
|
*/
|
||||||
|
if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SVVPTC))
|
||||||
|
return;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The kernel assumes that TLBs don't cache invalid entries, but
|
* The kernel assumes that TLBs don't cache invalid entries, but
|
||||||
|
|
@ -509,12 +514,6 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf,
|
||||||
while (nr--)
|
while (nr--)
|
||||||
local_flush_tlb_page(address + nr * PAGE_SIZE);
|
local_flush_tlb_page(address + nr * PAGE_SIZE);
|
||||||
|
|
||||||
svvptc:;
|
|
||||||
/*
|
|
||||||
* Svvptc guarantees that the new valid pte will be visible within
|
|
||||||
* a bounded timeframe, so when the uarch does not cache invalid
|
|
||||||
* entries, we don't have to do anything.
|
|
||||||
*/
|
|
||||||
}
|
}
|
||||||
#define update_mmu_cache(vma, addr, ptep) \
|
#define update_mmu_cache(vma, addr, ptep) \
|
||||||
update_mmu_cache_range(NULL, vma, addr, ptep, 1)
|
update_mmu_cache_range(NULL, vma, addr, ptep, 1)
|
||||||
|
|
|
||||||
|
|
@ -9,18 +9,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
|
||||||
unsigned long address, pte_t *ptep,
|
unsigned long address, pte_t *ptep,
|
||||||
pte_t entry, int dirty)
|
pte_t entry, int dirty)
|
||||||
{
|
{
|
||||||
asm goto(ALTERNATIVE("nop", "j %l[svvptc]", 0, RISCV_ISA_EXT_SVVPTC, 1)
|
if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SVVPTC)) {
|
||||||
: : : : svvptc);
|
|
||||||
|
|
||||||
if (!pte_same(ptep_get(ptep), entry))
|
|
||||||
__set_pte_at(vma->vm_mm, ptep, entry);
|
|
||||||
/*
|
|
||||||
* update_mmu_cache will unconditionally execute, handling both
|
|
||||||
* the case that the PTE changed and the spurious fault case.
|
|
||||||
*/
|
|
||||||
return true;
|
|
||||||
|
|
||||||
svvptc:
|
|
||||||
if (!pte_same(ptep_get(ptep), entry)) {
|
if (!pte_same(ptep_get(ptep), entry)) {
|
||||||
__set_pte_at(vma->vm_mm, ptep, entry);
|
__set_pte_at(vma->vm_mm, ptep, entry);
|
||||||
/* Here only not svadu is impacted */
|
/* Here only not svadu is impacted */
|
||||||
|
|
@ -29,6 +18,15 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
|
||||||
}
|
}
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!pte_same(ptep_get(ptep), entry))
|
||||||
|
__set_pte_at(vma->vm_mm, ptep, entry);
|
||||||
|
/*
|
||||||
|
* update_mmu_cache will unconditionally execute, handling both
|
||||||
|
* the case that the PTE changed and the spurious fault case.
|
||||||
|
*/
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
int ptep_test_and_clear_young(struct vm_area_struct *vma,
|
int ptep_test_and_clear_young(struct vm_area_struct *vma,
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue