LoongArch fixes for v6.17-rc7

-----BEGIN PGP SIGNATURE-----
 
 iQJKBAABCAA0FiEEzOlt8mkP+tbeiYy5AoYrw/LiJnoFAmjMFIQWHGNoZW5odWFj
 YWlAa2VybmVsLm9yZwAKCRAChivD8uImeljpD/9U5QOKD2EpTpRJ2gaOxvAQe3r7
 GRqA8Y5wSHDL//eg3/gyDi/uWkS69boHxkEGqGpfeeXLBH/nR3/6r41DJ44PIwqE
 FW5foMmwZbGHzyo9jqmoyXU+TfF4yJX09YOvGWsZAnhuE7li3UndWzm07B/tfzMP
 xKh8+dOz/5BsoORw9hPym3/4QtpKEnpkUpvFBoJ0dcG3YSk7+1lotIfPrc0X9zSm
 GbP46P/X4Cz5cgnNADajuPr3O856yZadqAAfi1+gUF85G9y92TwjcloWif5LtQtu
 1JdonjW51x6RlS+uBr4VW0Il6X1US1Rs058TmoiIDf+lRPpYVuGmBX4Ekxp3D92j
 09hnZEoSs1cxwDdMM0fttJpmS7IZEA28tJiO+20CRk3EuvBKGPNdXsQ+169OHDD9
 /jnGA8ksukCN6e1UBsjA0VzxNUTT5u0cw9sXMVaQcfCWzIa2J8PtRMLoJEtDeJ7/
 gXSrPtk6a9AK6QcgX5MGAlMtR+HJnZTCSJBv/i7T0E743BdWClwBq5RgTfklFHTw
 FXEFUX1RbgIdi4CyqZ+ilwbpIah8+7BdIK5NMNALojsLEWM5y6v88PPOIz9hsdqp
 PgxQKGxZh00p04WWJun3DmIO4KNvn+AYM/uMpd4TCa9Rtqi+hrm2pBDS+9K4hHDi
 mT+7ZweDnRX8YPt44Q==
 =aJ23
 -----END PGP SIGNATURE-----

Merge tag 'loongarch-fixes-6.17-2' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson

Pull LoongArch fixes from Huacai Chen:
 "Fix some build warnings for RUST-enabled objtool check, align ACPI
  structures for ARCH_STRICT_ALIGN, fix an unreliable stack for live
  patching, add some NULL pointer checkings, and fix some bugs around
  KVM"

* tag 'loongarch-fixes-6.17-2' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson:
  LoongArch: KVM: Avoid copy_*_user() with lock hold in kvm_pch_pic_regs_access()
  LoongArch: KVM: Avoid copy_*_user() with lock hold in kvm_eiointc_sw_status_access()
  LoongArch: KVM: Avoid copy_*_user() with lock hold in kvm_eiointc_regs_access()
  LoongArch: KVM: Avoid copy_*_user() with lock hold in kvm_eiointc_ctrl_access()
  LoongArch: KVM: Fix VM migration failure with PTW enabled
  LoongArch: KVM: Remove unused returns and semicolons
  LoongArch: vDSO: Check kcalloc() result in init_vdso()
  LoongArch: Fix unreliable stack for live patching
  LoongArch: Replace sprintf() with sysfs_emit()
  LoongArch: Check the return value when creating kobj
  LoongArch: Align ACPI structures if ARCH_STRICT_ALIGN enabled
  LoongArch: Update help info of ARCH_STRICT_ALIGN
  LoongArch: Handle jump tables options for RUST
  LoongArch: Make LTO case independent in Makefile
  objtool/LoongArch: Mark special atomic instruction as INSN_BUG type
  objtool/LoongArch: Mark types based on break immediate code
This commit is contained in:
Linus Torvalds 2025-09-19 10:06:51 -07:00
commit 39879e3a41
13 changed files with 162 additions and 69 deletions

View File

@ -298,6 +298,10 @@ config AS_HAS_LVZ_EXTENSION
config CC_HAS_ANNOTATE_TABLEJUMP config CC_HAS_ANNOTATE_TABLEJUMP
def_bool $(cc-option,-mannotate-tablejump) def_bool $(cc-option,-mannotate-tablejump)
config RUSTC_HAS_ANNOTATE_TABLEJUMP
depends on RUST
def_bool $(rustc-option,-Cllvm-args=--loongarch-annotate-tablejump)
menu "Kernel type and options" menu "Kernel type and options"
source "kernel/Kconfig.hz" source "kernel/Kconfig.hz"
@ -563,10 +567,14 @@ config ARCH_STRICT_ALIGN
-mstrict-align build parameter to prevent unaligned accesses. -mstrict-align build parameter to prevent unaligned accesses.
CPUs with h/w unaligned access support: CPUs with h/w unaligned access support:
Loongson-2K2000/2K3000/3A5000/3C5000/3D5000. Loongson-2K2000/2K3000 and all of Loongson-3 series processors
based on LoongArch.
CPUs without h/w unaligned access support: CPUs without h/w unaligned access support:
Loongson-2K500/2K1000. Loongson-2K0300/2K0500/2K1000.
If you want to make sure whether to support unaligned memory access
on your hardware, please read the bit 20 (UAL) of CPUCFG1 register.
This option is enabled by default to make the kernel be able to run This option is enabled by default to make the kernel be able to run
on all LoongArch systems. But you can disable it manually if you want on all LoongArch systems. But you can disable it manually if you want

View File

@ -102,16 +102,21 @@ KBUILD_CFLAGS += $(call cc-option,-mthin-add-sub) $(call cc-option,-Wa$(comma)
ifdef CONFIG_OBJTOOL ifdef CONFIG_OBJTOOL
ifdef CONFIG_CC_HAS_ANNOTATE_TABLEJUMP ifdef CONFIG_CC_HAS_ANNOTATE_TABLEJUMP
KBUILD_CFLAGS += -mannotate-tablejump
else
KBUILD_CFLAGS += -fno-jump-tables # keep compatibility with older compilers
endif
ifdef CONFIG_RUSTC_HAS_ANNOTATE_TABLEJUMP
KBUILD_RUSTFLAGS += -Cllvm-args=--loongarch-annotate-tablejump
else
KBUILD_RUSTFLAGS += -Zno-jump-tables # keep compatibility with older compilers
endif
ifdef CONFIG_LTO_CLANG
# The annotate-tablejump option can not be passed to LLVM backend when LTO is enabled. # The annotate-tablejump option can not be passed to LLVM backend when LTO is enabled.
# Ensure it is aware of linker with LTO, '--loongarch-annotate-tablejump' also needs to # Ensure it is aware of linker with LTO, '--loongarch-annotate-tablejump' also needs to
# be passed via '-mllvm' to ld.lld. # be passed via '-mllvm' to ld.lld.
KBUILD_CFLAGS += -mannotate-tablejump
ifdef CONFIG_LTO_CLANG
KBUILD_LDFLAGS += -mllvm --loongarch-annotate-tablejump KBUILD_LDFLAGS += -mllvm --loongarch-annotate-tablejump
endif endif
else
KBUILD_CFLAGS += -fno-jump-tables # keep compatibility with older compilers
endif
endif endif
KBUILD_RUSTFLAGS += --target=loongarch64-unknown-none-softfloat -Ccode-model=small KBUILD_RUSTFLAGS += --target=loongarch64-unknown-none-softfloat -Ccode-model=small

View File

@ -10,9 +10,8 @@
#ifndef _ASM_LOONGARCH_ACENV_H #ifndef _ASM_LOONGARCH_ACENV_H
#define _ASM_LOONGARCH_ACENV_H #define _ASM_LOONGARCH_ACENV_H
/* #ifdef CONFIG_ARCH_STRICT_ALIGN
* This header is required by ACPI core, but we have nothing to fill in #define ACPI_MISALIGNMENT_NOT_SUPPORTED
* right now. Will be updated later when needed. #endif /* CONFIG_ARCH_STRICT_ALIGN */
*/
#endif /* _ASM_LOONGARCH_ACENV_H */ #endif /* _ASM_LOONGARCH_ACENV_H */

View File

@ -16,6 +16,13 @@
*/ */
#define KVM_MMU_CACHE_MIN_PAGES (CONFIG_PGTABLE_LEVELS - 1) #define KVM_MMU_CACHE_MIN_PAGES (CONFIG_PGTABLE_LEVELS - 1)
/*
* _PAGE_MODIFIED is a SW pte bit, it records page ever written on host
* kernel, on secondary MMU it records the page writeable attribute, in
* order for fast path handling.
*/
#define KVM_PAGE_WRITEABLE _PAGE_MODIFIED
#define _KVM_FLUSH_PGTABLE 0x1 #define _KVM_FLUSH_PGTABLE 0x1
#define _KVM_HAS_PGMASK 0x2 #define _KVM_HAS_PGMASK 0x2
#define kvm_pfn_pte(pfn, prot) (((pfn) << PFN_PTE_SHIFT) | pgprot_val(prot)) #define kvm_pfn_pte(pfn, prot) (((pfn) << PFN_PTE_SHIFT) | pgprot_val(prot))
@ -52,10 +59,10 @@ static inline void kvm_set_pte(kvm_pte_t *ptep, kvm_pte_t val)
WRITE_ONCE(*ptep, val); WRITE_ONCE(*ptep, val);
} }
static inline int kvm_pte_write(kvm_pte_t pte) { return pte & _PAGE_WRITE; }
static inline int kvm_pte_dirty(kvm_pte_t pte) { return pte & _PAGE_DIRTY; }
static inline int kvm_pte_young(kvm_pte_t pte) { return pte & _PAGE_ACCESSED; } static inline int kvm_pte_young(kvm_pte_t pte) { return pte & _PAGE_ACCESSED; }
static inline int kvm_pte_huge(kvm_pte_t pte) { return pte & _PAGE_HUGE; } static inline int kvm_pte_huge(kvm_pte_t pte) { return pte & _PAGE_HUGE; }
static inline int kvm_pte_dirty(kvm_pte_t pte) { return pte & __WRITEABLE; }
static inline int kvm_pte_writeable(kvm_pte_t pte) { return pte & KVM_PAGE_WRITEABLE; }
static inline kvm_pte_t kvm_pte_mkyoung(kvm_pte_t pte) static inline kvm_pte_t kvm_pte_mkyoung(kvm_pte_t pte)
{ {
@ -69,12 +76,12 @@ static inline kvm_pte_t kvm_pte_mkold(kvm_pte_t pte)
static inline kvm_pte_t kvm_pte_mkdirty(kvm_pte_t pte) static inline kvm_pte_t kvm_pte_mkdirty(kvm_pte_t pte)
{ {
return pte | _PAGE_DIRTY; return pte | __WRITEABLE;
} }
static inline kvm_pte_t kvm_pte_mkclean(kvm_pte_t pte) static inline kvm_pte_t kvm_pte_mkclean(kvm_pte_t pte)
{ {
return pte & ~_PAGE_DIRTY; return pte & ~__WRITEABLE;
} }
static inline kvm_pte_t kvm_pte_mkhuge(kvm_pte_t pte) static inline kvm_pte_t kvm_pte_mkhuge(kvm_pte_t pte)
@ -87,6 +94,11 @@ static inline kvm_pte_t kvm_pte_mksmall(kvm_pte_t pte)
return pte & ~_PAGE_HUGE; return pte & ~_PAGE_HUGE;
} }
static inline kvm_pte_t kvm_pte_mkwriteable(kvm_pte_t pte)
{
return pte | KVM_PAGE_WRITEABLE;
}
static inline int kvm_need_flush(kvm_ptw_ctx *ctx) static inline int kvm_need_flush(kvm_ptw_ctx *ctx)
{ {
return ctx->flag & _KVM_FLUSH_PGTABLE; return ctx->flag & _KVM_FLUSH_PGTABLE;

View File

@ -86,7 +86,7 @@ late_initcall(fdt_cpu_clk_init);
static ssize_t boardinfo_show(struct kobject *kobj, static ssize_t boardinfo_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf) struct kobj_attribute *attr, char *buf)
{ {
return sprintf(buf, return sysfs_emit(buf,
"BIOS Information\n" "BIOS Information\n"
"Vendor\t\t\t: %s\n" "Vendor\t\t\t: %s\n"
"Version\t\t\t: %s\n" "Version\t\t\t: %s\n"
@ -109,6 +109,8 @@ static int __init boardinfo_init(void)
struct kobject *loongson_kobj; struct kobject *loongson_kobj;
loongson_kobj = kobject_create_and_add("loongson", firmware_kobj); loongson_kobj = kobject_create_and_add("loongson", firmware_kobj);
if (!loongson_kobj)
return -ENOMEM;
return sysfs_create_file(loongson_kobj, &boardinfo_attr.attr); return sysfs_create_file(loongson_kobj, &boardinfo_attr.attr);
} }

View File

@ -51,12 +51,13 @@ int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
if (task == current) { if (task == current) {
regs->regs[3] = (unsigned long)__builtin_frame_address(0); regs->regs[3] = (unsigned long)__builtin_frame_address(0);
regs->csr_era = (unsigned long)__builtin_return_address(0); regs->csr_era = (unsigned long)__builtin_return_address(0);
regs->regs[22] = 0;
} else { } else {
regs->regs[3] = thread_saved_fp(task); regs->regs[3] = thread_saved_fp(task);
regs->csr_era = thread_saved_ra(task); regs->csr_era = thread_saved_ra(task);
regs->regs[22] = task->thread.reg22;
} }
regs->regs[1] = 0; regs->regs[1] = 0;
regs->regs[22] = 0;
for (unwind_start(&state, task, regs); for (unwind_start(&state, task, regs);
!unwind_done(&state) && !unwind_error(&state); unwind_next_frame(&state)) { !unwind_done(&state) && !unwind_error(&state); unwind_next_frame(&state)) {

View File

@ -54,6 +54,9 @@ static int __init init_vdso(void)
vdso_info.code_mapping.pages = vdso_info.code_mapping.pages =
kcalloc(vdso_info.size / PAGE_SIZE, sizeof(struct page *), GFP_KERNEL); kcalloc(vdso_info.size / PAGE_SIZE, sizeof(struct page *), GFP_KERNEL);
if (!vdso_info.code_mapping.pages)
return -ENOMEM;
pfn = __phys_to_pfn(__pa_symbol(vdso_info.vdso)); pfn = __phys_to_pfn(__pa_symbol(vdso_info.vdso));
for (i = 0; i < vdso_info.size / PAGE_SIZE; i++) for (i = 0; i < vdso_info.size / PAGE_SIZE; i++)
vdso_info.code_mapping.pages[i] = pfn_to_page(pfn + i); vdso_info.code_mapping.pages[i] = pfn_to_page(pfn + i);

View File

@ -778,10 +778,8 @@ static long kvm_save_notify(struct kvm_vcpu *vcpu)
return 0; return 0;
default: default:
return KVM_HCALL_INVALID_CODE; return KVM_HCALL_INVALID_CODE;
}; }
}
return KVM_HCALL_INVALID_CODE;
};
/* /*
* kvm_handle_lsx_disabled() - Guest used LSX while disabled in root. * kvm_handle_lsx_disabled() - Guest used LSX while disabled in root.

View File

@ -426,21 +426,26 @@ static int kvm_eiointc_ctrl_access(struct kvm_device *dev,
struct loongarch_eiointc *s = dev->kvm->arch.eiointc; struct loongarch_eiointc *s = dev->kvm->arch.eiointc;
data = (void __user *)attr->addr; data = (void __user *)attr->addr;
switch (type) {
case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_NUM_CPU:
case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_FEATURE:
if (copy_from_user(&val, data, 4))
return -EFAULT;
break;
default:
break;
}
spin_lock_irqsave(&s->lock, flags); spin_lock_irqsave(&s->lock, flags);
switch (type) { switch (type) {
case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_NUM_CPU: case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_NUM_CPU:
if (copy_from_user(&val, data, 4)) if (val >= EIOINTC_ROUTE_MAX_VCPUS)
ret = -EFAULT; ret = -EINVAL;
else { else
if (val >= EIOINTC_ROUTE_MAX_VCPUS) s->num_cpu = val;
ret = -EINVAL;
else
s->num_cpu = val;
}
break; break;
case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_FEATURE: case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_FEATURE:
if (copy_from_user(&s->features, data, 4)) s->features = val;
ret = -EFAULT;
if (!(s->features & BIT(EIOINTC_HAS_VIRT_EXTENSION))) if (!(s->features & BIT(EIOINTC_HAS_VIRT_EXTENSION)))
s->status |= BIT(EIOINTC_ENABLE); s->status |= BIT(EIOINTC_ENABLE);
break; break;
@ -462,19 +467,17 @@ static int kvm_eiointc_ctrl_access(struct kvm_device *dev,
static int kvm_eiointc_regs_access(struct kvm_device *dev, static int kvm_eiointc_regs_access(struct kvm_device *dev,
struct kvm_device_attr *attr, struct kvm_device_attr *attr,
bool is_write) bool is_write, int *data)
{ {
int addr, cpu, offset, ret = 0; int addr, cpu, offset, ret = 0;
unsigned long flags; unsigned long flags;
void *p = NULL; void *p = NULL;
void __user *data;
struct loongarch_eiointc *s; struct loongarch_eiointc *s;
s = dev->kvm->arch.eiointc; s = dev->kvm->arch.eiointc;
addr = attr->attr; addr = attr->attr;
cpu = addr >> 16; cpu = addr >> 16;
addr &= 0xffff; addr &= 0xffff;
data = (void __user *)attr->addr;
switch (addr) { switch (addr) {
case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END: case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
offset = (addr - EIOINTC_NODETYPE_START) / 4; offset = (addr - EIOINTC_NODETYPE_START) / 4;
@ -513,13 +516,10 @@ static int kvm_eiointc_regs_access(struct kvm_device *dev,
} }
spin_lock_irqsave(&s->lock, flags); spin_lock_irqsave(&s->lock, flags);
if (is_write) { if (is_write)
if (copy_from_user(p, data, 4)) memcpy(p, data, 4);
ret = -EFAULT; else
} else { memcpy(data, p, 4);
if (copy_to_user(data, p, 4))
ret = -EFAULT;
}
spin_unlock_irqrestore(&s->lock, flags); spin_unlock_irqrestore(&s->lock, flags);
return ret; return ret;
@ -527,19 +527,17 @@ static int kvm_eiointc_regs_access(struct kvm_device *dev,
static int kvm_eiointc_sw_status_access(struct kvm_device *dev, static int kvm_eiointc_sw_status_access(struct kvm_device *dev,
struct kvm_device_attr *attr, struct kvm_device_attr *attr,
bool is_write) bool is_write, int *data)
{ {
int addr, ret = 0; int addr, ret = 0;
unsigned long flags; unsigned long flags;
void *p = NULL; void *p = NULL;
void __user *data;
struct loongarch_eiointc *s; struct loongarch_eiointc *s;
s = dev->kvm->arch.eiointc; s = dev->kvm->arch.eiointc;
addr = attr->attr; addr = attr->attr;
addr &= 0xffff; addr &= 0xffff;
data = (void __user *)attr->addr;
switch (addr) { switch (addr) {
case KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_NUM_CPU: case KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_NUM_CPU:
if (is_write) if (is_write)
@ -561,13 +559,10 @@ static int kvm_eiointc_sw_status_access(struct kvm_device *dev,
return -EINVAL; return -EINVAL;
} }
spin_lock_irqsave(&s->lock, flags); spin_lock_irqsave(&s->lock, flags);
if (is_write) { if (is_write)
if (copy_from_user(p, data, 4)) memcpy(p, data, 4);
ret = -EFAULT; else
} else { memcpy(data, p, 4);
if (copy_to_user(data, p, 4))
ret = -EFAULT;
}
spin_unlock_irqrestore(&s->lock, flags); spin_unlock_irqrestore(&s->lock, flags);
return ret; return ret;
@ -576,11 +571,27 @@ static int kvm_eiointc_sw_status_access(struct kvm_device *dev,
static int kvm_eiointc_get_attr(struct kvm_device *dev, static int kvm_eiointc_get_attr(struct kvm_device *dev,
struct kvm_device_attr *attr) struct kvm_device_attr *attr)
{ {
int ret, data;
switch (attr->group) { switch (attr->group) {
case KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS: case KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS:
return kvm_eiointc_regs_access(dev, attr, false); ret = kvm_eiointc_regs_access(dev, attr, false, &data);
if (ret)
return ret;
if (copy_to_user((void __user *)attr->addr, &data, 4))
ret = -EFAULT;
return ret;
case KVM_DEV_LOONGARCH_EXTIOI_GRP_SW_STATUS: case KVM_DEV_LOONGARCH_EXTIOI_GRP_SW_STATUS:
return kvm_eiointc_sw_status_access(dev, attr, false); ret = kvm_eiointc_sw_status_access(dev, attr, false, &data);
if (ret)
return ret;
if (copy_to_user((void __user *)attr->addr, &data, 4))
ret = -EFAULT;
return ret;
default: default:
return -EINVAL; return -EINVAL;
} }
@ -589,13 +600,21 @@ static int kvm_eiointc_get_attr(struct kvm_device *dev,
static int kvm_eiointc_set_attr(struct kvm_device *dev, static int kvm_eiointc_set_attr(struct kvm_device *dev,
struct kvm_device_attr *attr) struct kvm_device_attr *attr)
{ {
int data;
switch (attr->group) { switch (attr->group) {
case KVM_DEV_LOONGARCH_EXTIOI_GRP_CTRL: case KVM_DEV_LOONGARCH_EXTIOI_GRP_CTRL:
return kvm_eiointc_ctrl_access(dev, attr); return kvm_eiointc_ctrl_access(dev, attr);
case KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS: case KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS:
return kvm_eiointc_regs_access(dev, attr, true); if (copy_from_user(&data, (void __user *)attr->addr, 4))
return -EFAULT;
return kvm_eiointc_regs_access(dev, attr, true, &data);
case KVM_DEV_LOONGARCH_EXTIOI_GRP_SW_STATUS: case KVM_DEV_LOONGARCH_EXTIOI_GRP_SW_STATUS:
return kvm_eiointc_sw_status_access(dev, attr, true); if (copy_from_user(&data, (void __user *)attr->addr, 4))
return -EFAULT;
return kvm_eiointc_sw_status_access(dev, attr, true, &data);
default: default:
return -EINVAL; return -EINVAL;
} }

View File

@ -348,6 +348,7 @@ static int kvm_pch_pic_regs_access(struct kvm_device *dev,
struct kvm_device_attr *attr, struct kvm_device_attr *attr,
bool is_write) bool is_write)
{ {
char buf[8];
int addr, offset, len = 8, ret = 0; int addr, offset, len = 8, ret = 0;
void __user *data; void __user *data;
void *p = NULL; void *p = NULL;
@ -397,17 +398,23 @@ static int kvm_pch_pic_regs_access(struct kvm_device *dev,
return -EINVAL; return -EINVAL;
} }
spin_lock(&s->lock);
/* write or read value according to is_write */
if (is_write) { if (is_write) {
if (copy_from_user(p, data, len)) if (copy_from_user(buf, data, len))
ret = -EFAULT; return -EFAULT;
} else {
if (copy_to_user(data, p, len))
ret = -EFAULT;
} }
spin_lock(&s->lock);
if (is_write)
memcpy(p, buf, len);
else
memcpy(buf, p, len);
spin_unlock(&s->lock); spin_unlock(&s->lock);
if (!is_write) {
if (copy_to_user(data, buf, len))
return -EFAULT;
}
return ret; return ret;
} }

View File

@ -569,7 +569,7 @@ static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool writ
/* Track access to pages marked old */ /* Track access to pages marked old */
new = kvm_pte_mkyoung(*ptep); new = kvm_pte_mkyoung(*ptep);
if (write && !kvm_pte_dirty(new)) { if (write && !kvm_pte_dirty(new)) {
if (!kvm_pte_write(new)) { if (!kvm_pte_writeable(new)) {
ret = -EFAULT; ret = -EFAULT;
goto out; goto out;
} }
@ -856,9 +856,9 @@ static int kvm_map_page(struct kvm_vcpu *vcpu, unsigned long gpa, bool write)
prot_bits |= _CACHE_SUC; prot_bits |= _CACHE_SUC;
if (writeable) { if (writeable) {
prot_bits |= _PAGE_WRITE; prot_bits = kvm_pte_mkwriteable(prot_bits);
if (write) if (write)
prot_bits |= __WRITEABLE; prot_bits = kvm_pte_mkdirty(prot_bits);
} }
/* Disable dirty logging on HugePages */ /* Disable dirty logging on HugePages */
@ -904,7 +904,7 @@ static int kvm_map_page(struct kvm_vcpu *vcpu, unsigned long gpa, bool write)
kvm_release_faultin_page(kvm, page, false, writeable); kvm_release_faultin_page(kvm, page, false, writeable);
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
if (prot_bits & _PAGE_DIRTY) if (kvm_pte_dirty(prot_bits))
mark_page_dirty_in_slot(kvm, memslot, gfn); mark_page_dirty_in_slot(kvm, memslot, gfn);
out: out:

View File

@ -51,6 +51,10 @@ enum reg2i16_op {
bgeu_op = 0x1b, bgeu_op = 0x1b,
}; };
enum reg3_op {
amswapw_op = 0x70c0,
};
struct reg0i15_format { struct reg0i15_format {
unsigned int immediate : 15; unsigned int immediate : 15;
unsigned int opcode : 17; unsigned int opcode : 17;
@ -96,6 +100,13 @@ struct reg2i16_format {
unsigned int opcode : 6; unsigned int opcode : 6;
}; };
struct reg3_format {
unsigned int rd : 5;
unsigned int rj : 5;
unsigned int rk : 5;
unsigned int opcode : 17;
};
union loongarch_instruction { union loongarch_instruction {
unsigned int word; unsigned int word;
struct reg0i15_format reg0i15_format; struct reg0i15_format reg0i15_format;
@ -105,6 +116,7 @@ union loongarch_instruction {
struct reg2i12_format reg2i12_format; struct reg2i12_format reg2i12_format;
struct reg2i14_format reg2i14_format; struct reg2i14_format reg2i14_format;
struct reg2i16_format reg2i16_format; struct reg2i16_format reg2i16_format;
struct reg3_format reg3_format;
}; };
#define LOONGARCH_INSN_SIZE sizeof(union loongarch_instruction) #define LOONGARCH_INSN_SIZE sizeof(union loongarch_instruction)

View File

@ -278,6 +278,25 @@ static bool decode_insn_reg2i16_fomat(union loongarch_instruction inst,
return true; return true;
} }
static bool decode_insn_reg3_fomat(union loongarch_instruction inst,
struct instruction *insn)
{
switch (inst.reg3_format.opcode) {
case amswapw_op:
if (inst.reg3_format.rd == LOONGARCH_GPR_ZERO &&
inst.reg3_format.rk == LOONGARCH_GPR_RA &&
inst.reg3_format.rj == LOONGARCH_GPR_ZERO) {
/* amswap.w $zero, $ra, $zero */
insn->type = INSN_BUG;
}
break;
default:
return false;
}
return true;
}
int arch_decode_instruction(struct objtool_file *file, const struct section *sec, int arch_decode_instruction(struct objtool_file *file, const struct section *sec,
unsigned long offset, unsigned int maxlen, unsigned long offset, unsigned int maxlen,
struct instruction *insn) struct instruction *insn)
@ -309,11 +328,19 @@ int arch_decode_instruction(struct objtool_file *file, const struct section *sec
return 0; return 0;
if (decode_insn_reg2i16_fomat(inst, insn)) if (decode_insn_reg2i16_fomat(inst, insn))
return 0; return 0;
if (decode_insn_reg3_fomat(inst, insn))
return 0;
if (inst.word == 0) if (inst.word == 0) {
/* andi $zero, $zero, 0x0 */
insn->type = INSN_NOP; insn->type = INSN_NOP;
else if (inst.reg0i15_format.opcode == break_op) { } else if (inst.reg0i15_format.opcode == break_op &&
/* break */ inst.reg0i15_format.immediate == 0x0) {
/* break 0x0 */
insn->type = INSN_TRAP;
} else if (inst.reg0i15_format.opcode == break_op &&
inst.reg0i15_format.immediate == 0x1) {
/* break 0x1 */
insn->type = INSN_BUG; insn->type = INSN_BUG;
} else if (inst.reg2_format.opcode == ertn_op) { } else if (inst.reg2_format.opcode == ertn_op) {
/* ertn */ /* ertn */