[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v14 for-xen-4.5 18/21] x86/VPMU: Merge vpmu_rdmsr and vpmu_wrmsr
The two routines share most of their logic. Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx> Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx> Acked-by: Kevin Tian <kevin.tian@xxxxxxxxx> Reviewed-by: Dietmar Hahn <dietmar.hahn@xxxxxxxxxxxxxx> --- xen/arch/x86/hvm/vpmu.c | 69 +++++++++++++++++------------------------- xen/include/asm-x86/hvm/vpmu.h | 14 +++++++-- 2 files changed, 39 insertions(+), 44 deletions(-) diff --git a/xen/arch/x86/hvm/vpmu.c b/xen/arch/x86/hvm/vpmu.c index 0bd95ee..14f969c 100644 --- a/xen/arch/x86/hvm/vpmu.c +++ b/xen/arch/x86/hvm/vpmu.c @@ -92,57 +92,42 @@ void vpmu_lvtpc_update(uint32_t val) apic_write(APIC_LVTPC, vpmu->hw_lapic_lvtpc); } -int vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content, uint64_t supported) +int vpmu_do_msr(unsigned int msr, uint64_t *msr_content, + uint64_t supported, bool_t is_write) { - struct vcpu *curr = current; - struct vpmu_struct *vpmu = vcpu_vpmu(curr); + struct vcpu *curr; + struct vpmu_struct *vpmu; + struct arch_vpmu_ops *ops; + int ret = 0; if ( !(vpmu_mode & (XENPMU_MODE_SELF | XENPMU_MODE_HV)) ) return 0; - if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->do_wrmsr ) - { - int ret = vpmu->arch_vpmu_ops->do_wrmsr(msr, msr_content, supported); - - /* - * We may have received a PMU interrupt during WRMSR handling - * and since do_wrmsr may load VPMU context we should save - * (and unload) it again. - */ - if ( !is_hvm_vcpu(curr) && vpmu->xenpmu_data && - (vpmu->xenpmu_data->pmu.pmu_flags & PMU_CACHED) ) - { - vpmu_set(vpmu, VPMU_CONTEXT_SAVE); - vpmu->arch_vpmu_ops->arch_vpmu_save(curr); - vpmu_reset(vpmu, VPMU_CONTEXT_SAVE | VPMU_CONTEXT_LOADED); - } - return ret; - } - return 0; -} - -int vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content) -{ - struct vcpu *curr = current; - struct vpmu_struct *vpmu = vcpu_vpmu(curr); - - if ( !(vpmu_mode & (XENPMU_MODE_SELF | XENPMU_MODE_HV)) ) + curr = current; + vpmu = vcpu_vpmu(curr); + ops = vpmu->arch_vpmu_ops; + if ( !ops ) return 0; - if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->do_rdmsr ) - { - int ret = vpmu->arch_vpmu_ops->do_rdmsr(msr, msr_content); + if ( is_write && ops->do_wrmsr ) + ret = ops->do_wrmsr(msr, *msr_content, supported); + else if ( !is_write && ops->do_rdmsr ) + ret = ops->do_rdmsr(msr, msr_content); - if ( !is_hvm_vcpu(curr) && vpmu->xenpmu_data && - (vpmu->xenpmu_data->pmu.pmu_flags & PMU_CACHED) ) - { - vpmu_set(vpmu, VPMU_CONTEXT_SAVE); - vpmu->arch_vpmu_ops->arch_vpmu_save(curr); - vpmu_reset(vpmu, VPMU_CONTEXT_SAVE | VPMU_CONTEXT_LOADED); - } - return ret; + /* + * We may have received a PMU interrupt while handling MSR access + * and since do_wr/rdmsr may load VPMU context we should save + * (and unload) it again. + */ + if ( !is_hvm_vcpu(curr) && + vpmu->xenpmu_data && (vpmu->xenpmu_data->pmu.pmu_flags & PMU_CACHED) ) + { + vpmu_set(vpmu, VPMU_CONTEXT_SAVE); + ops->arch_vpmu_save(curr); + vpmu_reset(vpmu, VPMU_CONTEXT_SAVE | VPMU_CONTEXT_LOADED); } - return 0; + + return ret; } static struct vcpu *choose_hwdom_vcpu(void) diff --git a/xen/include/asm-x86/hvm/vpmu.h b/xen/include/asm-x86/hvm/vpmu.h index 8a9a337..3efba21 100644 --- a/xen/include/asm-x86/hvm/vpmu.h +++ b/xen/include/asm-x86/hvm/vpmu.h @@ -96,8 +96,8 @@ static inline bool_t vpmu_are_all_set(const struct vpmu_struct *vpmu, } void vpmu_lvtpc_update(uint32_t val); -int vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content, uint64_t supported); -int vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content); +int vpmu_do_msr(unsigned int msr, uint64_t *msr_content, + uint64_t supported, bool_t is_write); void vpmu_do_interrupt(struct cpu_user_regs *regs); void vpmu_do_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx); @@ -107,6 +107,16 @@ void vpmu_save(struct vcpu *v); void vpmu_load(struct vcpu *v); void vpmu_dump(struct vcpu *v); +static inline int vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content, + uint64_t supported) +{ + return vpmu_do_msr(msr, &msr_content, supported, 1); +} +static inline int vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content) +{ + return vpmu_do_msr(msr, msr_content, 0, 0); +} + extern int acquire_pmu_ownership(int pmu_ownership); extern void release_pmu_ownership(int pmu_ownership); -- 1.8.1.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |