[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH] Always save/restore performance counters when HVM guest switching VCPU
On 03/01/2013 03:49 PM, suravee.suthikulpanit@xxxxxxx wrote: From: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx> Currently, the performance counter registers are saved/restores when the HVM guest switchs VCPUs only if they are running. However, PERF has one check where it writes the MSR and read back the value to check if the MSR is working. This has shown to fails the check if the VCPU is moved in between rdmsr and wrmsr and resulting in the values are different. Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx> Description may need to be cleaned up a bit but other than that Acked-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx> --- xen/arch/x86/hvm/svm/vpmu.c | 62 ++++++++++++++++++++++++------------------- 1 file changed, 35 insertions(+), 27 deletions(-) diff --git a/xen/arch/x86/hvm/svm/vpmu.c b/xen/arch/x86/hvm/svm/vpmu.c index bf186fe..4854cf3 100644 --- a/xen/arch/x86/hvm/svm/vpmu.c +++ b/xen/arch/x86/hvm/svm/vpmu.c @@ -172,12 +172,16 @@ static inline void context_restore(struct vcpu *v) { wrmsrl(counters[i], ctxt->counters[i]);+ if ( !vpmu_is_set(vpmu, VPMU_RUNNING) )+ continue; + /* Force an interrupt to allow guest reset the counter, if the value is positive */ if ( is_overflowed(ctxt->counters[i]) && (ctxt->counters[i] > 0) ) { gdprintk(XENLOG_WARNING, "VPMU: Force a performance counter " - "overflow interrupt!\n"); + "overflow interrupt! (counter:%u value:0x%lx)\n", + i, ctxt->counters[i]); amd_vpmu_do_interrupt(0); } } @@ -188,12 +192,13 @@ static void amd_vpmu_restore(struct vcpu *v) struct vpmu_struct *vpmu = vcpu_vpmu(v); struct amd_vpmu_context *ctxt = vpmu->context;- if ( !(vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) &&- vpmu_is_set(vpmu, VPMU_RUNNING)) ) + if ( !(vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED)) ) return;context_restore(v);- apic_write(APIC_LVTPC, ctxt->hw_lapic_lvtpc); + + if ( vpmu_is_set(vpmu, VPMU_RUNNING) ) + apic_write(APIC_LVTPC, ctxt->hw_lapic_lvtpc); }static inline void context_save(struct vcpu *v)@@ -214,13 +219,16 @@ static void amd_vpmu_save(struct vcpu *v) struct vpmu_struct *vpmu = vcpu_vpmu(v); struct amd_vpmu_context *ctx = vpmu->context;- if ( !(vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) &&- vpmu_is_set(vpmu, VPMU_RUNNING)) ) + if ( !(vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED)) ) return;context_save(v);- ctx->hw_lapic_lvtpc = apic_read(APIC_LVTPC); - apic_write(APIC_LVTPC, ctx->hw_lapic_lvtpc | APIC_LVT_MASKED); + + if ( vpmu_is_set(vpmu, VPMU_RUNNING) ) + { + ctx->hw_lapic_lvtpc = apic_read(APIC_LVTPC); + apic_write(APIC_LVTPC, ctx->hw_lapic_lvtpc | APIC_LVT_MASKED); + } }static void context_update(unsigned int msr, u64 msr_content)@@ -303,25 +311,25 @@ static int amd_vpmu_initialise(struct vcpu *v)if ( counters == NULL ){ - switch ( family ) - { - case 0x15: - num_counters = F15H_NUM_COUNTERS; - counters = AMD_F15H_COUNTERS; - ctrls = AMD_F15H_CTRLS; - k7_counters_mirrored = 1; - break; - case 0x10: - case 0x12: - case 0x14: - case 0x16: - default: - num_counters = F10H_NUM_COUNTERS; - counters = AMD_F10H_COUNTERS; - ctrls = AMD_F10H_CTRLS; - k7_counters_mirrored = 0; - break; - } + switch ( family ) + { + case 0x15: + num_counters = F15H_NUM_COUNTERS; + counters = AMD_F15H_COUNTERS; + ctrls = AMD_F15H_CTRLS; + k7_counters_mirrored = 1; + break; + case 0x10: + case 0x12: + case 0x14: + case 0x16: + default: + num_counters = F10H_NUM_COUNTERS; + counters = AMD_F10H_COUNTERS; + ctrls = AMD_F10H_CTRLS; + k7_counters_mirrored = 0; + break; + } }ctxt = xzalloc(struct amd_vpmu_context); _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |