[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 2/9] x86/AMD: Do not intercept access to performance counters MSRs
Access to performance counters and reads of event selects don't need to always be intercepted. Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx> Reviewed-by: Dietmar Hahn <dietmar.hahn@xxxxxxxxxxxxxx> --- xen/arch/x86/hvm/svm/svm.c | 2 +- xen/arch/x86/hvm/svm/vpmu.c | 43 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 44 insertions(+), 1 deletion(-) diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c index 8ce37c9..89e47b3 100644 --- a/xen/arch/x86/hvm/svm/svm.c +++ b/xen/arch/x86/hvm/svm/svm.c @@ -1052,8 +1052,8 @@ static int svm_vcpu_initialise(struct vcpu *v) static void svm_vcpu_destroy(struct vcpu *v) { - svm_destroy_vmcb(v); vpmu_destroy(v); + svm_destroy_vmcb(v); passive_domain_destroy(v); } diff --git a/xen/arch/x86/hvm/svm/vpmu.c b/xen/arch/x86/hvm/svm/vpmu.c index 16170da..1e54497 100644 --- a/xen/arch/x86/hvm/svm/vpmu.c +++ b/xen/arch/x86/hvm/svm/vpmu.c @@ -88,6 +88,7 @@ struct amd_vpmu_context { u64 counters[MAX_NUM_COUNTERS]; u64 ctrls[MAX_NUM_COUNTERS]; u32 hw_lapic_lvtpc; + bool_t msr_bitmap_set; }; static inline int get_pmu_reg_type(u32 addr) @@ -138,6 +139,36 @@ static inline u32 get_fam15h_addr(u32 addr) return addr; } +static void amd_vpmu_set_msr_bitmap(struct vcpu *v) +{ + unsigned int i; + struct vpmu_struct *vpmu = vcpu_vpmu(v); + struct amd_vpmu_context *ctxt = vpmu->context; + + for ( i = 0; i < num_counters; i++ ) + { + svm_intercept_msr(v, counters[i], MSR_INTERCEPT_NONE); + svm_intercept_msr(v, ctrls[i], MSR_INTERCEPT_WRITE); + } + + ctxt->msr_bitmap_set = 1; +} + +static void amd_vpmu_unset_msr_bitmap(struct vcpu *v) +{ + unsigned int i; + struct vpmu_struct *vpmu = vcpu_vpmu(v); + struct amd_vpmu_context *ctxt = vpmu->context; + + for ( i = 0; i < num_counters; i++ ) + { + svm_intercept_msr(v, counters[i], MSR_INTERCEPT_RW); + svm_intercept_msr(v, ctrls[i], MSR_INTERCEPT_RW); + } + + ctxt->msr_bitmap_set = 0; +} + static int amd_vpmu_do_interrupt(struct cpu_user_regs *regs) { struct vcpu *v = current; @@ -219,6 +250,10 @@ static void amd_vpmu_save(struct vcpu *v) return; context_save(v); + + if ( !vpmu_is_set(vpmu, VPMU_RUNNING) && ctx->msr_bitmap_set ) + amd_vpmu_unset_msr_bitmap(v); + ctx->hw_lapic_lvtpc = apic_read(APIC_LVTPC); apic_write(APIC_LVTPC, ctx->hw_lapic_lvtpc | APIC_LVT_MASKED); } @@ -267,6 +302,9 @@ static int amd_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content) return 1; vpmu_set(vpmu, VPMU_RUNNING); apic_write(APIC_LVTPC, PMU_APIC_VECTOR); + + if ( !((struct amd_vpmu_context *)vpmu->context)->msr_bitmap_set ) + amd_vpmu_set_msr_bitmap(v); } /* stop saving & restore if guest stops first counter */ @@ -275,6 +313,8 @@ static int amd_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content) { apic_write(APIC_LVTPC, PMU_APIC_VECTOR | APIC_LVT_MASKED); vpmu_reset(vpmu, VPMU_RUNNING); + if ( ((struct amd_vpmu_context *)vpmu->context)->msr_bitmap_set ) + amd_vpmu_unset_msr_bitmap(v); release_pmu_ownship(PMU_OWNER_HVM); } @@ -345,6 +385,9 @@ static void amd_vpmu_destroy(struct vcpu *v) if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) ) return; + if ( ((struct amd_vpmu_context *)vpmu->context)->msr_bitmap_set ) + amd_vpmu_unset_msr_bitmap(v); + xfree(vpmu->context); vpmu_reset(vpmu, VPMU_CONTEXT_ALLOCATED); -- 1.8.1.2 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |