mirror of https://github.com/torvalds/linux.git
KVM: selftests: Validate more arch-events in pmu_counters_test
Add support for 5 new architectural events (4 topdown level 1 metrics events and LBR inserts event) that will first show up in Intel's Clearwater Forest CPUs. Detailed info about the new events can be found in SDM section 21.2.7 "Pre-defined Architectural Performance Events". Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com> Tested-by: Yi Lai <yi1.lai@intel.com> [sean: drop "unavailable_mask" changes] Tested-by: Dapeng Mi <dapeng1.mi@linux.intel.com> Link: https://lore.kernel.org/r/20250919214648.1585683-5-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
This commit is contained in:
parent
1fcd3053aa
commit
2922b59588
|
|
@ -61,6 +61,11 @@
|
|||
#define INTEL_ARCH_BRANCHES_RETIRED RAW_EVENT(0xc4, 0x00)
|
||||
#define INTEL_ARCH_BRANCHES_MISPREDICTED RAW_EVENT(0xc5, 0x00)
|
||||
#define INTEL_ARCH_TOPDOWN_SLOTS RAW_EVENT(0xa4, 0x01)
|
||||
#define INTEL_ARCH_TOPDOWN_BE_BOUND RAW_EVENT(0xa4, 0x02)
|
||||
#define INTEL_ARCH_TOPDOWN_BAD_SPEC RAW_EVENT(0x73, 0x00)
|
||||
#define INTEL_ARCH_TOPDOWN_FE_BOUND RAW_EVENT(0x9c, 0x01)
|
||||
#define INTEL_ARCH_TOPDOWN_RETIRING RAW_EVENT(0xc2, 0x02)
|
||||
#define INTEL_ARCH_LBR_INSERTS RAW_EVENT(0xe4, 0x01)
|
||||
|
||||
#define AMD_ZEN_CORE_CYCLES RAW_EVENT(0x76, 0x00)
|
||||
#define AMD_ZEN_INSTRUCTIONS_RETIRED RAW_EVENT(0xc0, 0x00)
|
||||
|
|
@ -80,6 +85,11 @@ enum intel_pmu_architectural_events {
|
|||
INTEL_ARCH_BRANCHES_RETIRED_INDEX,
|
||||
INTEL_ARCH_BRANCHES_MISPREDICTED_INDEX,
|
||||
INTEL_ARCH_TOPDOWN_SLOTS_INDEX,
|
||||
INTEL_ARCH_TOPDOWN_BE_BOUND_INDEX,
|
||||
INTEL_ARCH_TOPDOWN_BAD_SPEC_INDEX,
|
||||
INTEL_ARCH_TOPDOWN_FE_BOUND_INDEX,
|
||||
INTEL_ARCH_TOPDOWN_RETIRING_INDEX,
|
||||
INTEL_ARCH_LBR_INSERTS_INDEX,
|
||||
NR_INTEL_ARCH_EVENTS,
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -265,7 +265,7 @@ struct kvm_x86_cpu_property {
|
|||
#define X86_PROPERTY_PMU_NR_GP_COUNTERS KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 8, 15)
|
||||
#define X86_PROPERTY_PMU_GP_COUNTERS_BIT_WIDTH KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 16, 23)
|
||||
#define X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 24, 31)
|
||||
#define X86_PROPERTY_PMU_EVENTS_MASK KVM_X86_CPU_PROPERTY(0xa, 0, EBX, 0, 7)
|
||||
#define X86_PROPERTY_PMU_EVENTS_MASK KVM_X86_CPU_PROPERTY(0xa, 0, EBX, 0, 12)
|
||||
#define X86_PROPERTY_PMU_FIXED_COUNTERS_BITMASK KVM_X86_CPU_PROPERTY(0xa, 0, ECX, 0, 31)
|
||||
#define X86_PROPERTY_PMU_NR_FIXED_COUNTERS KVM_X86_CPU_PROPERTY(0xa, 0, EDX, 0, 4)
|
||||
#define X86_PROPERTY_PMU_FIXED_COUNTERS_BIT_WIDTH KVM_X86_CPU_PROPERTY(0xa, 0, EDX, 5, 12)
|
||||
|
|
@ -332,6 +332,11 @@ struct kvm_x86_pmu_feature {
|
|||
#define X86_PMU_FEATURE_BRANCH_INSNS_RETIRED KVM_X86_PMU_FEATURE(EBX, 5)
|
||||
#define X86_PMU_FEATURE_BRANCHES_MISPREDICTED KVM_X86_PMU_FEATURE(EBX, 6)
|
||||
#define X86_PMU_FEATURE_TOPDOWN_SLOTS KVM_X86_PMU_FEATURE(EBX, 7)
|
||||
#define X86_PMU_FEATURE_TOPDOWN_BE_BOUND KVM_X86_PMU_FEATURE(EBX, 8)
|
||||
#define X86_PMU_FEATURE_TOPDOWN_BAD_SPEC KVM_X86_PMU_FEATURE(EBX, 9)
|
||||
#define X86_PMU_FEATURE_TOPDOWN_FE_BOUND KVM_X86_PMU_FEATURE(EBX, 10)
|
||||
#define X86_PMU_FEATURE_TOPDOWN_RETIRING KVM_X86_PMU_FEATURE(EBX, 11)
|
||||
#define X86_PMU_FEATURE_LBR_INSERTS KVM_X86_PMU_FEATURE(EBX, 12)
|
||||
|
||||
#define X86_PMU_FEATURE_INSNS_RETIRED_FIXED KVM_X86_PMU_FEATURE(ECX, 0)
|
||||
#define X86_PMU_FEATURE_CPU_CYCLES_FIXED KVM_X86_PMU_FEATURE(ECX, 1)
|
||||
|
|
|
|||
|
|
@ -19,6 +19,11 @@ const uint64_t intel_pmu_arch_events[] = {
|
|||
INTEL_ARCH_BRANCHES_RETIRED,
|
||||
INTEL_ARCH_BRANCHES_MISPREDICTED,
|
||||
INTEL_ARCH_TOPDOWN_SLOTS,
|
||||
INTEL_ARCH_TOPDOWN_BE_BOUND,
|
||||
INTEL_ARCH_TOPDOWN_BAD_SPEC,
|
||||
INTEL_ARCH_TOPDOWN_FE_BOUND,
|
||||
INTEL_ARCH_TOPDOWN_RETIRING,
|
||||
INTEL_ARCH_LBR_INSERTS,
|
||||
};
|
||||
kvm_static_assert(ARRAY_SIZE(intel_pmu_arch_events) == NR_INTEL_ARCH_EVENTS);
|
||||
|
||||
|
|
|
|||
|
|
@ -75,6 +75,11 @@ static struct kvm_intel_pmu_event intel_event_to_feature(uint8_t idx)
|
|||
[INTEL_ARCH_BRANCHES_RETIRED_INDEX] = { X86_PMU_FEATURE_BRANCH_INSNS_RETIRED, X86_PMU_FEATURE_NULL },
|
||||
[INTEL_ARCH_BRANCHES_MISPREDICTED_INDEX] = { X86_PMU_FEATURE_BRANCHES_MISPREDICTED, X86_PMU_FEATURE_NULL },
|
||||
[INTEL_ARCH_TOPDOWN_SLOTS_INDEX] = { X86_PMU_FEATURE_TOPDOWN_SLOTS, X86_PMU_FEATURE_TOPDOWN_SLOTS_FIXED },
|
||||
[INTEL_ARCH_TOPDOWN_BE_BOUND_INDEX] = { X86_PMU_FEATURE_TOPDOWN_BE_BOUND, X86_PMU_FEATURE_NULL },
|
||||
[INTEL_ARCH_TOPDOWN_BAD_SPEC_INDEX] = { X86_PMU_FEATURE_TOPDOWN_BAD_SPEC, X86_PMU_FEATURE_NULL },
|
||||
[INTEL_ARCH_TOPDOWN_FE_BOUND_INDEX] = { X86_PMU_FEATURE_TOPDOWN_FE_BOUND, X86_PMU_FEATURE_NULL },
|
||||
[INTEL_ARCH_TOPDOWN_RETIRING_INDEX] = { X86_PMU_FEATURE_TOPDOWN_RETIRING, X86_PMU_FEATURE_NULL },
|
||||
[INTEL_ARCH_LBR_INSERTS_INDEX] = { X86_PMU_FEATURE_LBR_INSERTS, X86_PMU_FEATURE_NULL },
|
||||
};
|
||||
|
||||
kvm_static_assert(ARRAY_SIZE(__intel_event_to_feature) == NR_INTEL_ARCH_EVENTS);
|
||||
|
|
@ -171,9 +176,12 @@ static void guest_assert_event_count(uint8_t idx, uint32_t pmc, uint32_t pmc_msr
|
|||
fallthrough;
|
||||
case INTEL_ARCH_CPU_CYCLES_INDEX:
|
||||
case INTEL_ARCH_REFERENCE_CYCLES_INDEX:
|
||||
case INTEL_ARCH_TOPDOWN_BE_BOUND_INDEX:
|
||||
case INTEL_ARCH_TOPDOWN_FE_BOUND_INDEX:
|
||||
GUEST_ASSERT_NE(count, 0);
|
||||
break;
|
||||
case INTEL_ARCH_TOPDOWN_SLOTS_INDEX:
|
||||
case INTEL_ARCH_TOPDOWN_RETIRING_INDEX:
|
||||
__GUEST_ASSERT(count >= NUM_INSNS_RETIRED,
|
||||
"Expected top-down slots >= %u, got count = %lu",
|
||||
NUM_INSNS_RETIRED, count);
|
||||
|
|
|
|||
Loading…
Reference in New Issue