|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen master] x86/VPMU: set MSR bitmaps only for HVM/PVH guests
commit b72b471b08804d2ec1da01b5bf887f347c4fea04
Author: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
AuthorDate: Fri Jan 23 17:51:15 2015 +0100
Commit: Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Fri Jan 23 17:51:15 2015 +0100
x86/VPMU: set MSR bitmaps only for HVM/PVH guests
In preparation for making VPMU code shared with PV make sure that we we
update
MSR bitmaps only for HVM/PVH guests
Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
Acked-by: Kevin Tian <kevin.tian@xxxxxxxxx>
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
Reviewed-by: Dietmar Hahn <dietmar.hahn@xxxxxxxxxxxxxx>
Tested-by: Dietmar Hahn <dietmar.hahn@xxxxxxxxxxxxxx>
---
xen/arch/x86/hvm/svm/vpmu.c | 21 +++++++++++++--------
xen/arch/x86/hvm/vmx/vpmu_core2.c | 8 +++++---
2 files changed, 18 insertions(+), 11 deletions(-)
diff --git a/xen/arch/x86/hvm/svm/vpmu.c b/xen/arch/x86/hvm/svm/vpmu.c
index 4c448bb..19777e3 100644
--- a/xen/arch/x86/hvm/svm/vpmu.c
+++ b/xen/arch/x86/hvm/svm/vpmu.c
@@ -244,7 +244,8 @@ static int amd_vpmu_save(struct vcpu *v)
context_save(v);
- if ( !vpmu_is_set(vpmu, VPMU_RUNNING) && ctx->msr_bitmap_set )
+ if ( !vpmu_is_set(vpmu, VPMU_RUNNING) &&
+ has_hvm_container_vcpu(v) && ctx->msr_bitmap_set )
amd_vpmu_unset_msr_bitmap(v);
return 1;
@@ -287,8 +288,9 @@ static int amd_vpmu_do_wrmsr(unsigned int msr, uint64_t
msr_content,
ASSERT(!supported);
/* For all counters, enable guest only mode for HVM guest */
- if ( (get_pmu_reg_type(msr) == MSR_TYPE_CTRL) &&
- !(is_guest_mode(msr_content)) )
+ if ( has_hvm_container_vcpu(v) &&
+ (get_pmu_reg_type(msr) == MSR_TYPE_CTRL) &&
+ !is_guest_mode(msr_content) )
{
set_guest_mode(msr_content);
}
@@ -303,8 +305,9 @@ static int amd_vpmu_do_wrmsr(unsigned int msr, uint64_t
msr_content,
apic_write(APIC_LVTPC, PMU_APIC_VECTOR);
vpmu->hw_lapic_lvtpc = PMU_APIC_VECTOR;
- if ( !((struct amd_vpmu_context *)vpmu->context)->msr_bitmap_set )
- amd_vpmu_set_msr_bitmap(v);
+ if ( has_hvm_container_vcpu(v) &&
+ !((struct amd_vpmu_context *)vpmu->context)->msr_bitmap_set )
+ amd_vpmu_set_msr_bitmap(v);
}
/* stop saving & restore if guest stops first counter */
@@ -314,8 +317,9 @@ static int amd_vpmu_do_wrmsr(unsigned int msr, uint64_t
msr_content,
apic_write(APIC_LVTPC, PMU_APIC_VECTOR | APIC_LVT_MASKED);
vpmu->hw_lapic_lvtpc = PMU_APIC_VECTOR | APIC_LVT_MASKED;
vpmu_reset(vpmu, VPMU_RUNNING);
- if ( ((struct amd_vpmu_context *)vpmu->context)->msr_bitmap_set )
- amd_vpmu_unset_msr_bitmap(v);
+ if ( has_hvm_container_vcpu(v) &&
+ ((struct amd_vpmu_context *)vpmu->context)->msr_bitmap_set )
+ amd_vpmu_unset_msr_bitmap(v);
release_pmu_ownship(PMU_OWNER_HVM);
}
@@ -403,7 +407,8 @@ static void amd_vpmu_destroy(struct vcpu *v)
{
struct vpmu_struct *vpmu = vcpu_vpmu(v);
- if ( ((struct amd_vpmu_context *)vpmu->context)->msr_bitmap_set )
+ if ( has_hvm_container_vcpu(v) &&
+ ((struct amd_vpmu_context *)vpmu->context)->msr_bitmap_set )
amd_vpmu_unset_msr_bitmap(v);
xfree(vpmu->context);
diff --git a/xen/arch/x86/hvm/vmx/vpmu_core2.c
b/xen/arch/x86/hvm/vmx/vpmu_core2.c
index 590c2a9..c9fb202 100644
--- a/xen/arch/x86/hvm/vmx/vpmu_core2.c
+++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c
@@ -335,7 +335,8 @@ static int core2_vpmu_save(struct vcpu *v)
__core2_vpmu_save(v);
/* Unset PMU MSR bitmap to trap lazy load. */
- if ( !vpmu_is_set(vpmu, VPMU_RUNNING) && cpu_has_vmx_msr_bitmap )
+ if ( !vpmu_is_set(vpmu, VPMU_RUNNING) &&
+ has_hvm_container_vcpu(v) && cpu_has_vmx_msr_bitmap )
core2_vpmu_unset_msr_bitmap(v->arch.hvm_vmx.msr_bitmap);
return 1;
@@ -448,7 +449,8 @@ static int core2_vpmu_msr_common_check(u32 msr_index, int
*type, int *index)
{
__core2_vpmu_load(current);
vpmu_set(vpmu, VPMU_CONTEXT_LOADED);
- if ( cpu_has_vmx_msr_bitmap )
+ if ( has_hvm_container_vcpu(current) &&
+ cpu_has_vmx_msr_bitmap )
core2_vpmu_set_msr_bitmap(current->arch.hvm_vmx.msr_bitmap);
}
return 1;
@@ -820,7 +822,7 @@ static void core2_vpmu_destroy(struct vcpu *v)
xfree(core2_vpmu_cxt->pmu_enable);
xfree(vpmu->context);
- if ( cpu_has_vmx_msr_bitmap )
+ if ( has_hvm_container_vcpu(v) && cpu_has_vmx_msr_bitmap )
core2_vpmu_unset_msr_bitmap(v->arch.hvm_vmx.msr_bitmap);
release_pmu_ownship(PMU_OWNER_HVM);
vpmu_reset(vpmu, VPMU_CONTEXT_ALLOCATED);
--
generated by git-patchbot for /home/xen/git/xen.git#master
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |