[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v6 03/19] x86/VPMU: Minor VPMU cleanup



> From: Boris Ostrovsky [mailto:boris.ostrovsky@xxxxxxxxxx]
> Sent: Tuesday, May 13, 2014 11:53 PM
> 
> Update macros that modify VPMU flags to allow changing multiple bits at
> once.
> 
> Make sure that we only touch MSR bitmap on HVM guests (both VMX and
> SVM). This
> is needed by subsequent PMU patches.
> 
> Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
> Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
> Reviewed-by: Dietmar Hahn <dietmar.hahn@xxxxxxxxxxxxxx>
> Tested-by: Dietmar Hahn <dietmar.hahn@xxxxxxxxxxxxxx>

Acked-by: Kevin Tian <kevin.tian@xxxxxxxxx>

> ---
>  xen/arch/x86/hvm/svm/vpmu.c       | 14 +++++++++-----
>  xen/arch/x86/hvm/vmx/vpmu_core2.c | 12 +++++-------
>  xen/arch/x86/hvm/vpmu.c           |  3 +--
>  xen/include/asm-x86/hvm/vpmu.h    |  9 +++++----
>  4 files changed, 20 insertions(+), 18 deletions(-)
> 
> diff --git a/xen/arch/x86/hvm/svm/vpmu.c b/xen/arch/x86/hvm/svm/vpmu.c
> index 3ac7d53..3666915 100644
> --- a/xen/arch/x86/hvm/svm/vpmu.c
> +++ b/xen/arch/x86/hvm/svm/vpmu.c
> @@ -244,7 +244,8 @@ static int amd_vpmu_save(struct vcpu *v)
> 
>      context_save(v);
> 
> -    if ( !vpmu_is_set(vpmu, VPMU_RUNNING) && ctx->msr_bitmap_set )
> +    if ( is_hvm_domain(v->domain) &&
> +        !vpmu_is_set(vpmu, VPMU_RUNNING) && ctx->msr_bitmap_set )
>          amd_vpmu_unset_msr_bitmap(v);
> 
>      return 1;
> @@ -284,7 +285,7 @@ static int amd_vpmu_do_wrmsr(unsigned int msr,
> uint64_t msr_content)
>      struct vpmu_struct *vpmu = vcpu_vpmu(v);
> 
>      /* For all counters, enable guest only mode for HVM guest */
> -    if ( (get_pmu_reg_type(msr) == MSR_TYPE_CTRL) &&
> +    if ( is_hvm_domain(v->domain) && (get_pmu_reg_type(msr) ==
> MSR_TYPE_CTRL) &&
>          !(is_guest_mode(msr_content)) )
>      {
>          set_guest_mode(msr_content);
> @@ -300,7 +301,8 @@ static int amd_vpmu_do_wrmsr(unsigned int msr,
> uint64_t msr_content)
>          apic_write(APIC_LVTPC, PMU_APIC_VECTOR);
>          vpmu->hw_lapic_lvtpc = PMU_APIC_VECTOR;
> 
> -        if ( !((struct amd_vpmu_context
> *)vpmu->context)->msr_bitmap_set )
> +        if ( is_hvm_domain(v->domain) &&
> +             !((struct amd_vpmu_context
> *)vpmu->context)->msr_bitmap_set )
>              amd_vpmu_set_msr_bitmap(v);
>      }
> 
> @@ -311,7 +313,8 @@ static int amd_vpmu_do_wrmsr(unsigned int msr,
> uint64_t msr_content)
>          apic_write(APIC_LVTPC, PMU_APIC_VECTOR | APIC_LVT_MASKED);
>          vpmu->hw_lapic_lvtpc = PMU_APIC_VECTOR | APIC_LVT_MASKED;
>          vpmu_reset(vpmu, VPMU_RUNNING);
> -        if ( ((struct amd_vpmu_context
> *)vpmu->context)->msr_bitmap_set )
> +        if ( is_hvm_domain(v->domain) &&
> +             ((struct amd_vpmu_context
> *)vpmu->context)->msr_bitmap_set )
>              amd_vpmu_unset_msr_bitmap(v);
>          release_pmu_ownship(PMU_OWNER_HVM);
>      }
> @@ -403,7 +406,8 @@ static void amd_vpmu_destroy(struct vcpu *v)
>      if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) )
>          return;
> 
> -    if ( ((struct amd_vpmu_context *)vpmu->context)->msr_bitmap_set )
> +    if ( is_hvm_domain(v->domain) &&
> +         ((struct amd_vpmu_context *)vpmu->context)->msr_bitmap_set )
>          amd_vpmu_unset_msr_bitmap(v);
> 
>      xfree(vpmu->context);
> diff --git a/xen/arch/x86/hvm/vmx/vpmu_core2.c
> b/xen/arch/x86/hvm/vmx/vpmu_core2.c
> index ccd14d9..a3fb458 100644
> --- a/xen/arch/x86/hvm/vmx/vpmu_core2.c
> +++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c
> @@ -326,16 +326,14 @@ static int core2_vpmu_save(struct vcpu *v)
>  {
>      struct vpmu_struct *vpmu = vcpu_vpmu(v);
> 
> -    if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_SAVE) )
> -        return 0;
> -
> -    if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED) )
> +    if ( !vpmu_is_set_all(vpmu, VPMU_CONTEXT_SAVE |
> VPMU_CONTEXT_LOADED) )
>          return 0;
> 
>      __core2_vpmu_save(v);
> 
>      /* Unset PMU MSR bitmap to trap lazy load. */
> -    if ( !vpmu_is_set(vpmu, VPMU_RUNNING) &&
> cpu_has_vmx_msr_bitmap )
> +    if ( !vpmu_is_set(vpmu, VPMU_RUNNING) &&
> is_hvm_domain(v->domain) &&
> +         cpu_has_vmx_msr_bitmap )
>          core2_vpmu_unset_msr_bitmap(v->arch.hvm_vmx.msr_bitmap);
> 
>      return 1;
> @@ -448,7 +446,7 @@ static int core2_vpmu_msr_common_check(u32
> msr_index, int *type, int *index)
>      {
>          __core2_vpmu_load(current);
>          vpmu_set(vpmu, VPMU_CONTEXT_LOADED);
> -        if ( cpu_has_vmx_msr_bitmap )
> +        if ( cpu_has_vmx_msr_bitmap &&
> is_hvm_domain(current->domain) )
> 
> core2_vpmu_set_msr_bitmap(current->arch.hvm_vmx.msr_bitmap);
>      }
>      return 1;
> @@ -815,7 +813,7 @@ static void core2_vpmu_destroy(struct vcpu *v)
>          return;
>      xfree(core2_vpmu_cxt->pmu_enable);
>      xfree(vpmu->context);
> -    if ( cpu_has_vmx_msr_bitmap )
> +    if ( cpu_has_vmx_msr_bitmap && is_hvm_domain(v->domain) )
>          core2_vpmu_unset_msr_bitmap(v->arch.hvm_vmx.msr_bitmap);
>      release_pmu_ownship(PMU_OWNER_HVM);
>      vpmu_reset(vpmu, VPMU_CONTEXT_ALLOCATED);
> diff --git a/xen/arch/x86/hvm/vpmu.c b/xen/arch/x86/hvm/vpmu.c
> index 63765fa..a48dae2 100644
> --- a/xen/arch/x86/hvm/vpmu.c
> +++ b/xen/arch/x86/hvm/vpmu.c
> @@ -143,8 +143,7 @@ void vpmu_save(struct vcpu *v)
>      struct vpmu_struct *vpmu = vcpu_vpmu(v);
>      int pcpu = smp_processor_id();
> 
> -    if ( !(vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) &&
> -           vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED)) )
> +    if ( !vpmu_is_set_all(vpmu, VPMU_CONTEXT_ALLOCATED |
> VPMU_CONTEXT_LOADED) )
>         return;
> 
>      vpmu->last_pcpu = pcpu;
> diff --git a/xen/include/asm-x86/hvm/vpmu.h
> b/xen/include/asm-x86/hvm/vpmu.h
> index 40f63fb..2a713be 100644
> --- a/xen/include/asm-x86/hvm/vpmu.h
> +++ b/xen/include/asm-x86/hvm/vpmu.h
> @@ -81,10 +81,11 @@ struct vpmu_struct {
>  #define VPMU_CPU_HAS_BTS                    0x200 /* Has Branch
> Trace Store */
> 
> 
> -#define vpmu_set(_vpmu, _x)    ((_vpmu)->flags |= (_x))
> -#define vpmu_reset(_vpmu, _x)  ((_vpmu)->flags &= ~(_x))
> -#define vpmu_is_set(_vpmu, _x) ((_vpmu)->flags & (_x))
> -#define vpmu_clear(_vpmu)      ((_vpmu)->flags = 0)
> +#define vpmu_set(_vpmu, _x)         ((_vpmu)->flags |= (_x))
> +#define vpmu_reset(_vpmu, _x)       ((_vpmu)->flags &= ~(_x))
> +#define vpmu_is_set(_vpmu, _x)      ((_vpmu)->flags & (_x))
> +#define vpmu_is_set_all(_vpmu, _x)  (((_vpmu)->flags & (_x)) == (_x))
> +#define vpmu_clear(_vpmu)           ((_vpmu)->flags = 0)
> 
>  int vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content);
>  int vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content);
> --
> 1.8.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.