Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
120 commits
Select commit Hold shift + click to select a range
b9bb896
KVM: x86/pmu: Do not mask LVTPC when handling a PMI on AMD platforms
sandip4n Apr 5, 2024
06477fa
KVM: x86: Snapshot if a vCPU's vendor model is AMD vs. Intel compatible
sean-jc Apr 5, 2024
bbe3e86
KVM: x86: Manually retrieve CPUID.0x1 when getting FMS for RESET/INIT
sean-jc Sep 29, 2021
15650b8
KVM: vPMU: Fill get_msr MSR_CORE_PERF_GLOBAL_OVF_CTRL w/ 0
Oct 19, 2021
065bdc2
KVM: x86: Drop current_vcpu for kvm_running_vcpu + kvm_arch_vcpu vari…
sean-jc Nov 11, 2021
0baf69e
KVM: Move x86's perf guest info callbacks to generic KVM
sean-jc Nov 11, 2021
689f6d1
KVM: x86/svm: Add module param to control PMU virtualization
Nov 17, 2021
4f17fb1
KVM: x86: avoid out of bounds indices for fixed performance counters
bonzini Dec 9, 2021
65ac830
KVM: x86/pmu: Setup pmc->eventsel for fixed PMCs
Nov 30, 2021
bc4d8be
KVM: x86/pmu: Reuse pmc_perf_hw_id() and drop find_fixed_event()
Nov 30, 2021
45822f3
KVM: x86/pmu: Add pmc->intr to refactor kvm_perf_overflow{_intr}()
Nov 30, 2021
3ec65bc
KVM: x86: Update vPMCs when retiring instructions
ehankland Nov 30, 2021
a839a97
KVM: SVM: include CR3 in initial VMSA state for SEV-ES guests
mdroth Dec 16, 2021
9b1055f
KVM: x86: Do runtime CPUID update before updating vcpu->arch.cpuid_en…
vittyvk Jan 17, 2022
5b9fef6
KVM: x86: Making the module parameter of vPMU more common
Jan 11, 2022
5d7ac14
KVM: x86/pmu: Use binary search to check filtered events
jsmattsonjr Jan 15, 2022
80ff672
KVM: x86: Remove defunct pre_block/post_block kvm_x86_ops hooks
sean-jc Dec 8, 2021
e394c70
KVM: x86: Move CPUID.(EAX=0x12,ECX=1) mangling to __kvm_update_cpuid_…
vittyvk Jan 24, 2022
7ff7551
KVM: x86: skip host CPUID call for hypervisor leaves
bonzini Oct 28, 2021
c190316
KVM: x86: Drop export for .tlb_flush_current() static_call key
sean-jc Jan 28, 2022
0a132d2
KVM: x86: Rename kvm_x86_ops pointers to align w/ preferred vendor names
sean-jc Jan 28, 2022
43b1779
KVM: nVMX: Refactor PMU refresh to avoid referencing kvm_x86_ops.pmu_ops
sean-jc Jan 28, 2022
626d0d9
KVM: x86: Use more verbose names for mem encrypt kvm_x86_ops hooks
sean-jc Jan 28, 2022
6a1ddb0
KVM: x86: return 1 unconditionally for availability of KVM_CAP_VAPIC
bonzini Feb 15, 2022
5526ccb
KVM: x86: use static_call_cond for optional callbacks
bonzini Feb 1, 2022
73771eb
KVM: x86: remove KVM_X86_OP_NULL and mark optional kvm_x86_ops
bonzini Dec 9, 2021
ba2c69b
KVM: x86: warn on incorrectly NULL members of kvm_x86_ops
bonzini Dec 9, 2021
50696fb
KVM: x86: allow defining return-0 static calls
bonzini Feb 15, 2022
481ae8b
KVM: x86: Fix pointer mistmatch warning when patching RET0 static calls
sean-jc Feb 23, 2022
c7f1184
KVM: x86: add support for CPUID leaf 0x80000021
bonzini Oct 28, 2021
d033ce5
KVM: x86: synthesize CPUID leaf 0x80000021h if useful
bonzini Oct 21, 2021
cfdc787
KVM: x86: Fix clang -Wimplicit-fallthrough in do_host_cpuid()
nathanchance Mar 22, 2022
8e15d78
KVM: x86: Move lookup of indexed CPUID leafs to helper
mdroth Feb 24, 2022
6ac0f64
KVM: x86: Move kvm_ops_static_call_update() to x86.c
Mar 29, 2022
d0f446d
KVM: x86: Copy kvm_pmu_ops by value to eliminate layer of indirection
Mar 29, 2022
a93af2e
KVM: x86: Move .pmu_ops to kvm_x86_init_ops and tag as __initdata
Mar 29, 2022
3d5304a
KVM: x86: Use static calls to reduce kvm_pmu_ops overhead
Mar 29, 2022
5bdaf00
KVM: x86: work around QEMU issue with synthetic CPUID leaves
bonzini Apr 29, 2022
58e417f
kvm: x86/pmu: Fix the compare function used by the pmu event filter
suomilewis May 17, 2022
0508bde
KVM: x86/pmu: Add IA32_PEBS_ENABLE MSR emulation for extended PEBS
Apr 11, 2022
e24353d
KVM: x86/pmu: Reprogram PEBS event to emulate guest PEBS counter
Apr 11, 2022
2b3ba8a
KVM: x86/pmu: Add IA32_DS_AREA MSR emulation to support guest DS
Apr 11, 2022
6a2eb1b
KVM: x86/pmu: Add PEBS_DATA_CFG MSR emulation to support adaptive PEBS
Apr 11, 2022
5ea4f6a
KVM: x86/pmu: Move pmc_speculative_in_use() to arch/x86/kvm/pmu.h
Apr 11, 2022
4c224c1
KVM: x86/pmu: Disable guest PEBS temporarily in two rare situations
Apr 11, 2022
85058c3
KVM: x86/pmu: Add kvm_pmu_cap to optimize perf_get_x86_pmu_capability
Apr 11, 2022
ba68602
KVM: x86/pmu: remove useless prototype
bonzini May 20, 2022
cc0a5bb
KVM: x86/pmu: Don't overwrite the pmu->global_ctrl when refreshing
May 10, 2022
ae72d73
KVM: x86: always allow host-initiated writes to PMU MSRs
bonzini May 25, 2022
c53fe82
KVM: x86/pmu: Extract check_pmu_event_filter() handling both GP and f…
May 18, 2022
887728c
KVM: x86/pmu: Pass only "struct kvm_pmc *pmc" to reprogram_counter()
May 18, 2022
77e6533
KVM: x86/pmu: Drop "u64 eventsel" for reprogram_gp_counter()
May 18, 2022
ca71f8c
KVM: x86/pmu: Drop "u8 ctrl, int idx" for reprogram_fixed_counter()
May 18, 2022
f19cfed
KVM: x86/pmu: Use only the uniform interface reprogram_counter()
bonzini May 25, 2022
3852adc
KVM: x86/pmu: Use PERF_TYPE_RAW to merge reprogram_{gp,fixed}counter()
May 18, 2022
4e304d8
KVM: x86/pmu: Update global enable_pmu when PMU is undetected
May 18, 2022
bee5ae1
KVM: x86/pmu: Restrict advanced features based on module enable_pmu
Jun 1, 2022
289fdce
Revert "KVM: x86: always allow host-initiated writes to PMU MSRs"
sean-jc Jun 11, 2022
1b5ae32
KVM: VMX: Use vcpu_get_perf_capabilities() to get guest-visible value
sean-jc Jun 11, 2022
b5ba1a0
KVM: x86: Ignore benign host accesses to "unsupported" PEBS and BTS MSRs
sean-jc Jun 11, 2022
07bbb2a
KVM: x86: Provide per VM capability for disabling PMU virtualization
Feb 23, 2022
b4211a7
KVM: x86: Add dedicated helper to get CPUID entry with significant index
sean-jc Jul 12, 2022
e88a189
KVM: x86: Refresh PMU after writes to MSR_IA32_PERF_CAPABILITIES
sean-jc Jul 27, 2022
3f6e267
perf/x86/core: Completely disable guest PEBS via guest's global_ctrl
Aug 31, 2022
e88f3dd
KVM: x86/pmu: Avoid setting BIT_ULL(-1) to pmu->host_cross_mapped_mask
Aug 31, 2022
24586b2
KVM: x86/pmu: Don't generate PEBS records for emulated instructions
Aug 31, 2022
ca032a7
KVM: x86/pmu: Refactor PERF_GLOBAL_CTRL update helper for reuse by PEBS
Sep 22, 2022
dfb3f5b
KVM: x86/pmu: Avoid using PEBS perf_events for normal counters
Aug 31, 2022
c0a38b3
KVM: x86/pmu: Limit the maximum number of supported Intel GP counters
Sep 19, 2022
e938c68
KVM: x86/pmu: Limit the maximum number of supported AMD GP counters
Sep 19, 2022
d238017
KVM: x86/pmu: Defer reprogram_counter() to kvm_pmu_handle_event()
Sep 23, 2022
4b2c440
KVM: x86/pmu: Clear "reprogram" bit if counter is disabled or disallowed
sean-jc Sep 23, 2022
c9311e6
KVM: x86/pmu: Defer counter emulated overflow via pmc->prev_counter
Sep 23, 2022
3735770
KVM: x86: Update KVM-only leaf handling to allow for 100% KVM-only leafs
sean-jc Nov 25, 2022
0c925f4
KVM: x86: Advertise that the SMM_CTL MSR is not supported
jsmattsonjr Oct 7, 2022
9ea1900
KVM: x86: Move Intel Processor Trace interrupt handler to vmx.c
sean-jc Nov 11, 2021
87f4331
KVM: x86: Move hardware setup/unsetup to init/exit
sean-jc Nov 30, 2022
ff51bce
KVM: x86: Move guts of kvm_arch_init() to standalone helper
sean-jc Nov 30, 2022
1562777
KVM: x86: Serialize vendor module initialization (hardware setup)
sean-jc Nov 30, 2022
1b908fa
KVM: x86: Move open-coded CPUID leaf 0x80000021 EAX bit propagation code
kimphillamd Jan 24, 2023
13c34a7
x86/cpu, kvm: Add the Null Selector Clears Base feature
kimphillamd Jan 24, 2023
3d55887
x86/cpu: Support AMD Automatic IBRS
kimphillamd Jan 24, 2023
3128bcf
KVM: x86: Propagate the AMD Automatic IBRS feature to the guest
kimphillamd Jan 24, 2023
bd2de9c
KVM: x86/pmu: Cap kvm_pmu_cap.num_counters_gp at KVM's internal max
sean-jc Jan 24, 2023
4770c2f
KVM: x86/pmu: Use separate array for defining "PMU MSRs to save"
sean-jc Jan 24, 2023
c69fec7
docs: kvm: x86: Fix broken field list
zulinx86 Mar 31, 2023
650bb0d
KVM: x86/pmu: Rename pmc_is_enabled() to pmc_is_globally_enabled()
Feb 14, 2023
0c7b90e
KVM: VMX: Refactor intel_pmu_{g,}set_msr() to align with other helpers
sean-jc Jan 27, 2023
9e4068f
KVM: x86/pmu: Rewrite reprogram_counters() to improve performance
Feb 14, 2023
0bd1d8f
KVM: x86/pmu: Fix a typo in kvm_pmu_request_counter_reprogam()
Mar 10, 2023
69aa68e
KVM: x86/pmu: Prevent the PMU from counting disallowed events
suomilewis Mar 7, 2023
1781620
KVM: x86/pmu: Rename global_ovf_ctrl_mask to global_status_mask
sean-jc Jun 3, 2023
cc6aed8
KVM: x86/pmu: Move reprogram_counters() to pmu.h
Jun 3, 2023
e5cac0e
KVM: x86/pmu: Reject userspace attempts to set reserved GLOBAL_STATUS…
Jun 3, 2023
b819b15
KVM: x86/pmu: Move handling PERF_GLOBAL_CTRL and friends to common x86
Jun 3, 2023
f6927af
KVM: x86/pmu: Provide Intel PMU's pmc_is_enabled() as generic x86 code
Jun 3, 2023
a9e0df8
KVM: x86: Explicitly zero cpuid "0xa" leaf when PMU is disabled
Jun 3, 2023
b381765
KVM: x86/pmu: Disable vPMU if the minimum num of counters isn't met
Jun 3, 2023
a848f7e
KVM: x86/pmu: Advertise PERFCTR_CORE iff the min nr of counters is met
Jun 3, 2023
23912f4
KVM: x86/pmu: Constrain the num of guest counters with kvm_pmu_cap
Jun 3, 2023
821d1f3
KVM: x86/cpuid: Add a KVM-only leaf to redirect AMD PerfMonV2 flag
Jun 3, 2023
7184c38
KVM: x86/svm/pmu: Add AMD PerfMonV2 support
Jun 3, 2023
a801530
KVM: x86/cpuid: Add AMD CPUID ExtPerfMonAndDbg leaf 0x80000022
Jun 3, 2023
4293e10
x86/cpu: Enable STIBP on AMD if Automatic IBRS is enabled
kimphillamd Jul 20, 2023
4c92111
KVM: x86: Acquire SRCU read lock when handling fastpath MSR writes
sean-jc Jul 21, 2023
5e24dfc
KVM: x86/pmu: Truncate counter value to allowed width on write
May 4, 2023
37b9b3c
KVM: x86: Get CPL directly when checking if loaded vCPU is in kernel …
Nov 23, 2023
9e0db4b
KVM: x86/pmu: fix masking logic for MSR_CORE_PERF_GLOBAL_CTRL
bonzini Jan 4, 2024
03f92a4
KVM: x86/pmu: Zero out pmu->all_valid_pmc_idx each time it's refreshed
Apr 4, 2023
481c9ae
KVM: x86/pmu: WARN and bug the VM if PMU is refreshed after vCPU has run
sean-jc Mar 11, 2023
7f339d6
KVM: x86/pmu: Zero out PMU metadata on AMD if PMU is disabled
sean-jc Nov 10, 2023
e694576
KVM: x86/pmu: Fix type length error when reading pmu->fixed_ctr_ctrl
mzhang3579 Jan 23, 2024
2f14160
KVM: x86/pmu: Synthesize at most one PMI per VM-exit
jsmattsonjr Sep 25, 2023
20ed35e
KVM: x86: Use actual kvm_cpuid.base for clearing KVM_FEATURE_PV_UNHALT
vittyvk Feb 28, 2024
25c7a9e
KVM: x86/pmu: Expose CPUIDs feature bits PDCM, DS, DTES64
Apr 11, 2022
dc8fd6c
KVM: x86/pmu: Disable support for adaptive PEBS
sean-jc Mar 7, 2024
b40d73f
KVM: x86: Fix errant brace in KVM capability handling
Jun 13, 2022
c558c52
KVM: x86/cpuid: generalize kvm_update_kvm_cpuid_base() and also captu…
Jan 6, 2023
355bcda
KVM: x86: Introduce __kvm_get_hypervisor_cpuid() helper
vittyvk Feb 28, 2024
e7235ad
KVM: x86/cpuid: Refactor host/guest CPU model consistency check
Apr 11, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 10 additions & 7 deletions Documentation/admin-guide/hw-vuln/spectre.rst
Original file line number Diff line number Diff line change
Expand Up @@ -484,11 +484,14 @@ Spectre variant 2

Systems which support enhanced IBRS (eIBRS) enable IBRS protection once at
boot, by setting the IBRS bit, and they're automatically protected against
Spectre v2 variant attacks, including cross-thread branch target injections
on SMT systems (STIBP). In other words, eIBRS enables STIBP too.
Spectre v2 variant attacks.

Legacy IBRS systems clear the IBRS bit on exit to userspace and
therefore explicitly enable STIBP for that
On Intel's enhanced IBRS systems, this includes cross-thread branch target
injections on SMT systems (STIBP). In other words, Intel eIBRS enables
STIBP, too.

AMD Automatic IBRS does not protect userspace, and Legacy IBRS systems clear
the IBRS bit on exit to userspace, therefore both explicitly enable STIBP.

The retpoline mitigation is turned on by default on vulnerable
CPUs. It can be forced on or off by the administrator
Expand Down Expand Up @@ -622,9 +625,9 @@ kernel command line.
retpoline,generic Retpolines
retpoline,lfence LFENCE; indirect branch
retpoline,amd alias for retpoline,lfence
eibrs enhanced IBRS
eibrs,retpoline enhanced IBRS + Retpolines
eibrs,lfence enhanced IBRS + LFENCE
eibrs Enhanced/Auto IBRS
eibrs,retpoline Enhanced/Auto IBRS + Retpolines
eibrs,lfence Enhanced/Auto IBRS + LFENCE

Not specifying this option is equivalent to
spectre_v2=auto.
Expand Down
6 changes: 3 additions & 3 deletions Documentation/admin-guide/kernel-parameters.txt
Original file line number Diff line number Diff line change
Expand Up @@ -5403,9 +5403,9 @@
retpoline,generic - Retpolines
retpoline,lfence - LFENCE; indirect branch
retpoline,amd - alias for retpoline,lfence
eibrs - enhanced IBRS
eibrs,retpoline - enhanced IBRS + Retpolines
eibrs,lfence - enhanced IBRS + LFENCE
eibrs - Enhanced/Auto IBRS
eibrs,retpoline - Enhanced/Auto IBRS + Retpolines
eibrs,lfence - Enhanced/Auto IBRS + LFENCE
ibrs - use IBRS to protect kernel

Not specifying this option is equivalent to
Expand Down
22 changes: 22 additions & 0 deletions Documentation/virt/kvm/api.rst
Original file line number Diff line number Diff line change
Expand Up @@ -7424,6 +7424,28 @@ of the result of KVM_CHECK_EXTENSION. KVM will forward to userspace
the hypercalls whose corresponding bit is in the argument, and return
ENOSYS for the others.

8.35 KVM_CAP_PMU_CAPABILITY
---------------------------

:Capability: KVM_CAP_PMU_CAPABILITY
:Architectures: x86
:Type: vm
:Parameters: arg[0] is bitmask of PMU virtualization capabilities.
:Returns: 0 on success, -EINVAL when arg[0] contains invalid bits

This capability alters PMU virtualization in KVM.

Calling KVM_CHECK_EXTENSION for this capability returns a bitmask of
PMU virtualization capabilities that can be adjusted on a VM.

The argument to KVM_ENABLE_CAP is also a bitmask and selects specific
PMU virtualization capabilities to be applied to the VM. This can
only be invoked on a VM prior to the creation of VCPUs.

At this time, KVM_PMU_CAP_DISABLE is the only capability. Setting
this capability will disable PMU virtualization for that VM. Usermode
should adjust CPUID leaf 0xA to reflect that the PMU is disabled.

9. Known KVM API problems
=========================

Expand Down
6 changes: 6 additions & 0 deletions Documentation/virt/kvm/locking.rst
Original file line number Diff line number Diff line change
Expand Up @@ -257,3 +257,9 @@ time it will be set using the Dirty tracking mechanism described above.
wakeup notification event since external interrupts from the
assigned devices happens, we will find the vCPU on the list to
wakeup.

``vendor_module_lock``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
:Type: mutex
:Arch: x86
:Protects: loading a vendor module (kvm_amd or kvm_intel)
10 changes: 10 additions & 0 deletions arch/arm64/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -765,6 +765,16 @@ int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa);
void kvm_perf_init(void);
void kvm_perf_teardown(void);

/*
* Returns true if a Performance Monitoring Interrupt (PMI), a.k.a. perf event,
* arrived in guest context. For arm64, any event that arrives while a vCPU is
* loaded is considered to be "in guest".
*/
static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu)
{
return IS_ENABLED(CONFIG_GUEST_PERF_EVENTS) && !!vcpu;
}

long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu);
gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu);
void kvm_update_stolen_time(struct kvm_vcpu *vcpu);
Expand Down
5 changes: 5 additions & 0 deletions arch/arm64/kvm/arm.c
Original file line number Diff line number Diff line change
Expand Up @@ -505,6 +505,11 @@ bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
return vcpu_mode_priv(vcpu);
}

unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu)
{
return *vcpu_pc(vcpu);
}

/* Just ensure a guest exit from a particular CPU */
static void exit_vm_noop(void *info)
{
Expand Down
104 changes: 86 additions & 18 deletions arch/x86/events/intel/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/nmi.h>
#include <linux/kvm_host.h>

#include <asm/cpufeature.h>
#include <asm/hardirq.h>
Expand Down Expand Up @@ -3970,31 +3971,98 @@ static int intel_pmu_hw_config(struct perf_event *event)
return 0;
}

/*
* Currently, the only caller of this function is the atomic_switch_perf_msrs().
* The host perf conext helps to prepare the values of the real hardware for
* a set of msrs that need to be switched atomically in a vmx transaction.
*
* For example, the pseudocode needed to add a new msr should look like:
*
* arr[(*nr)++] = (struct perf_guest_switch_msr){
* .msr = the hardware msr address,
* .host = the value the hardware has when it doesn't run a guest,
* .guest = the value the hardware has when it runs a guest,
* };
*
* These values have nothing to do with the emulated values the guest sees
* when it uses {RD,WR}MSR, which should be handled by the KVM context,
* specifically in the intel_pmu_{get,set}_msr().
*/
static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr, void *data)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
struct kvm_pmu *kvm_pmu = (struct kvm_pmu *)data;
u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
u64 pebs_mask = cpuc->pebs_enabled & x86_pmu.pebs_capable;
int global_ctrl, pebs_enable;

arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL;
arr[0].host = intel_ctrl & ~cpuc->intel_ctrl_guest_mask;
arr[0].guest = intel_ctrl & ~cpuc->intel_ctrl_host_mask;
arr[0].guest &= ~(cpuc->pebs_enabled & x86_pmu.pebs_capable);
*nr = 1;
/*
* In addition to obeying exclude_guest/exclude_host, remove bits being
* used for PEBS when running a guest, because PEBS writes to virtual
* addresses (not physical addresses).
*/
*nr = 0;
global_ctrl = (*nr)++;
arr[global_ctrl] = (struct perf_guest_switch_msr){
.msr = MSR_CORE_PERF_GLOBAL_CTRL,
.host = intel_ctrl & ~cpuc->intel_ctrl_guest_mask,
.guest = intel_ctrl & ~cpuc->intel_ctrl_host_mask & ~pebs_mask,
};

if (x86_pmu.pebs && x86_pmu.pebs_no_isolation) {
/*
* If PMU counter has PEBS enabled it is not enough to
* disable counter on a guest entry since PEBS memory
* write can overshoot guest entry and corrupt guest
* memory. Disabling PEBS solves the problem.
*
* Don't do this if the CPU already enforces it.
*/
arr[1].msr = MSR_IA32_PEBS_ENABLE;
arr[1].host = cpuc->pebs_enabled;
arr[1].guest = 0;
*nr = 2;
if (!x86_pmu.pebs)
return arr;

/*
* If PMU counter has PEBS enabled it is not enough to
* disable counter on a guest entry since PEBS memory
* write can overshoot guest entry and corrupt guest
* memory. Disabling PEBS solves the problem.
*
* Don't do this if the CPU already enforces it.
*/
if (x86_pmu.pebs_no_isolation) {
arr[(*nr)++] = (struct perf_guest_switch_msr){
.msr = MSR_IA32_PEBS_ENABLE,
.host = cpuc->pebs_enabled,
.guest = 0,
};
return arr;
}

if (!kvm_pmu || !x86_pmu.pebs_ept)
return arr;
arr[(*nr)++] = (struct perf_guest_switch_msr){
.msr = MSR_IA32_DS_AREA,
.host = (unsigned long)cpuc->ds,
.guest = kvm_pmu->ds_area,
};

if (x86_pmu.intel_cap.pebs_baseline) {
arr[(*nr)++] = (struct perf_guest_switch_msr){
.msr = MSR_PEBS_DATA_CFG,
.host = cpuc->pebs_data_cfg,
.guest = kvm_pmu->pebs_data_cfg,
};
}

pebs_enable = (*nr)++;

arr[pebs_enable] = (struct perf_guest_switch_msr){
.msr = MSR_IA32_PEBS_ENABLE,
.host = cpuc->pebs_enabled & ~cpuc->intel_ctrl_guest_mask,
.guest = pebs_mask & ~cpuc->intel_ctrl_host_mask,
};

if (arr[pebs_enable].host) {
/* Disable guest PEBS if host PEBS is enabled. */
arr[pebs_enable].guest = 0;
} else {
/* Disable guest PEBS thoroughly for cross-mapped PEBS counters. */
arr[pebs_enable].guest &= ~kvm_pmu->host_cross_mapped_mask;
arr[global_ctrl].guest &= ~kvm_pmu->host_cross_mapped_mask;
/* Set hw GLOBAL_CTRL bits for PEBS counter when it runs for guest */
arr[global_ctrl].guest |= arr[pebs_enable].guest;
}

return arr;
Expand Down
2 changes: 2 additions & 0 deletions arch/x86/include/asm/cpufeatures.h
Original file line number Diff line number Diff line change
Expand Up @@ -444,6 +444,8 @@
* Reuse free bits when adding new feature flags!
*/
#define X86_FEATURE_AMD_LBR_PMC_FREEZE (21*32+ 0) /* AMD LBR and PMC Freeze */
#define X86_FEATURE_NULL_SEL_CLR_BASE (20*32+ 6) /* "" Null Selector Clears Base */
#define X86_FEATURE_AUTOIBRS (20*32+ 8) /* "" Automatic IBRS */

/*
* BUG word(s)
Expand Down
34 changes: 34 additions & 0 deletions arch/x86/include/asm/cpuid.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* CPUID-related helpers/definitions
*
* Derived from arch/x86/kvm/cpuid.c
*/

#ifndef _ASM_X86_CPUID_H
#define _ASM_X86_CPUID_H

static __always_inline bool cpuid_function_is_indexed(u32 function)
{
switch (function) {
case 4:
case 7:
case 0xb:
case 0xd:
case 0xf:
case 0x10:
case 0x12:
case 0x14:
case 0x17:
case 0x18:
case 0x1d:
case 0x1e:
case 0x1f:
case 0x8000001d:
return true;
}

return false;
}

#endif /* _ASM_X86_CPUID_H */
Loading