[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v2 05/13] intel/VPMU: Clean up Intel VPMU code



On Fri, Sep 20, 2013 at 05:42:04AM -0400, Boris Ostrovsky wrote:
> Remove struct pmumsr and core2_pmu_enable. Replace static MSR structures with
> fields in core2_vpmu_context.
> 
> Call core2_get_pmc_count() once, during initialization.
> 
> Properly clean up when core2_vpmu_alloc_resource() fails and add routines
> to remove MSRs from VMCS.
> 
> Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
> ---
>  xen/arch/x86/hvm/vmx/vmcs.c              |  59 +++++++
>  xen/arch/x86/hvm/vmx/vpmu_core2.c        | 289 
> ++++++++++++++-----------------
>  xen/include/asm-x86/hvm/vmx/vmcs.h       |   2 +
>  xen/include/asm-x86/hvm/vmx/vpmu_core2.h |  19 --
>  4 files changed, 191 insertions(+), 178 deletions(-)
> 
> diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
> index de9f592..756bc13 100644
> --- a/xen/arch/x86/hvm/vmx/vmcs.c
> +++ b/xen/arch/x86/hvm/vmx/vmcs.c
> @@ -1136,6 +1136,36 @@ int vmx_add_guest_msr(u32 msr)
>      return 0;
>  }
>  
> +void vmx_rm_guest_msr(u32 msr)
> +{
> +    struct vcpu *curr = current;
> +    unsigned int i, idx, msr_count = curr->arch.hvm_vmx.msr_count;
> +    struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.msr_area;
> +
> +    if ( msr_area == NULL )
> +        return;
> +    
> +    for ( idx = 0; idx < msr_count; idx++ )
> +        if ( msr_area[idx].index == msr )
> +            break;
> +
> +    if ( idx == msr_count )
> +        return;
> +    
> +    for ( i = idx; i < msr_count - 1; i++ )
> +    {
> +        msr_area[i].index = msr_area[i + 1].index;
> +        rdmsrl(msr_area[i].index, msr_area[i].data);
> +    }
> +    msr_area[msr_count - 1].index = 0;
> +
> +    curr->arch.hvm_vmx.msr_count = --msr_count;
> +    __vmwrite(VM_EXIT_MSR_STORE_COUNT, msr_count);
> +    __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, msr_count);
> +
> +    return;
> +}
> +
>  int vmx_add_host_load_msr(u32 msr)
>  {
>      struct vcpu *curr = current;
> @@ -1166,6 +1196,35 @@ int vmx_add_host_load_msr(u32 msr)
>      return 0;
>  }
>  
> +void vmx_rm_host_load_msr(u32 msr)
> +{
> +    struct vcpu *curr = current;
> +    unsigned int i, idx,  msr_count = curr->arch.hvm_vmx.host_msr_count;
> +    struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.host_msr_area;
> +
> +    if ( msr_area == NULL )
> +        return;
> +
> +    for ( idx = 0; idx < msr_count; idx++ )
> +        if ( msr_area[idx].index == msr )
> +            break;
> +
> +    if ( idx == msr_count )
> +        return;
> +    
> +    for ( i = idx; i < msr_count - 1; i++ )
> +    {
> +        msr_area[i].index = msr_area[i + 1].index;
> +        rdmsrl(msr_area[i].index, msr_area[i].data);
> +    }
> +    msr_area[msr_count - 1].index = 0;    
> +
> +    curr->arch.hvm_vmx.host_msr_count = --msr_count;
> +    __vmwrite(VM_EXIT_MSR_LOAD_COUNT, msr_count);
> +
> +    return;
> +}
> +
>  void vmx_set_eoi_exit_bitmap(struct vcpu *v, u8 vector)
>  {
>      int index, offset, changed;
> diff --git a/xen/arch/x86/hvm/vmx/vpmu_core2.c 
> b/xen/arch/x86/hvm/vmx/vpmu_core2.c
> index 101888d..50f784f 100644
> --- a/xen/arch/x86/hvm/vmx/vpmu_core2.c
> +++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c
> @@ -65,6 +65,26 @@
>  #define PMU_FIXED_WIDTH_MASK     (((1 << PMU_FIXED_WIDTH_BITS) -1) << 
> PMU_FIXED_WIDTH_SHIFT)
>  
>  /*
> + * MSR_CORE_PERF_FIXED_CTR_CTRL contains the configuration of all fixed
> + * counters. 4 bits for every counter.
> + */
> +#define FIXED_CTR_CTRL_BITS 4
> +#define FIXED_CTR_CTRL_MASK ((1 << FIXED_CTR_CTRL_BITS) - 1)
> +
> +#define VPMU_CORE2_MAX_FIXED_PMCS     4
> +struct core2_vpmu_context {
> +    u64 fixed_ctrl;
> +    u64 ds_area;
> +    u64 pebs_enable;
> +    u64 global_ovf_status;
> +    u64 fix_counters[VPMU_CORE2_MAX_FIXED_PMCS];
> +    struct arch_msr_pair arch_msr_pair[1];
> +};
> +
> +static int arch_pmc_cnt; /* Number of general-purpose performance counters */
> +static int fixed_pmc_cnt; /* Number of fixed performance counters */
> +
> +/*
>   * QUIRK to workaround an issue on various family 6 cpus.
>   * The issue leads to endless PMC interrupt loops on the processor.
>   * If the interrupt handler is running and a pmc reaches the value 0, this
> @@ -84,11 +104,8 @@ static void check_pmc_quirk(void)
>          is_pmc_quirk = 0;    
>  }
>  
> -static int core2_get_pmc_count(void);
>  static void handle_pmc_quirk(u64 msr_content)
>  {
> -    int num_gen_pmc = core2_get_pmc_count();
> -    int num_fix_pmc  = 3;
>      int i;
>      u64 val;
>  
> @@ -96,7 +113,7 @@ static void handle_pmc_quirk(u64 msr_content)
>          return;
>  
>      val = msr_content;
> -    for ( i = 0; i < num_gen_pmc; i++ )
> +    for ( i = 0; i < arch_pmc_cnt; i++ )
>      {
>          if ( val & 0x1 )
>          {
> @@ -108,7 +125,7 @@ static void handle_pmc_quirk(u64 msr_content)
>          val >>= 1;
>      }
>      val = msr_content >> 32;
> -    for ( i = 0; i < num_fix_pmc; i++ )
> +    for ( i = 0; i < fixed_pmc_cnt; i++ )
>      {
>          if ( val & 0x1 )
>          {
> @@ -121,65 +138,32 @@ static void handle_pmc_quirk(u64 msr_content)
>      }
>  }
>  
> -static const u32 core2_fix_counters_msr[] = {
> -    MSR_CORE_PERF_FIXED_CTR0,
> -    MSR_CORE_PERF_FIXED_CTR1,
> -    MSR_CORE_PERF_FIXED_CTR2
> -};
> -
>  /*
> - * MSR_CORE_PERF_FIXED_CTR_CTRL contains the configuration of all fixed
> - * counters. 4 bits for every counter.
> + * Read the number of general counters via CPUID.EAX[0xa].EAX[8..15]
>   */
> -#define FIXED_CTR_CTRL_BITS 4
> -#define FIXED_CTR_CTRL_MASK ((1 << FIXED_CTR_CTRL_BITS) - 1)
> -
> -/* The index into the core2_ctrls_msr[] of this MSR used in 
> core2_vpmu_dump() */
> -#define MSR_CORE_PERF_FIXED_CTR_CTRL_IDX 0
> -
> -/* Core 2 Non-architectual Performance Control MSRs. */
> -static const u32 core2_ctrls_msr[] = {
> -    MSR_CORE_PERF_FIXED_CTR_CTRL,
> -    MSR_IA32_PEBS_ENABLE,
> -    MSR_IA32_DS_AREA
> -};
> -
> -struct pmumsr {
> -    unsigned int num;
> -    const u32 *msr;
> -};
> -
> -static const struct pmumsr core2_fix_counters = {
> -    VPMU_CORE2_NUM_FIXED,
> -    core2_fix_counters_msr
> -};
> +static int core2_get_arch_pmc_count(void)
> +{
> +    u32 eax, ebx, ecx, edx;
>  
> -static const struct pmumsr core2_ctrls = {
> -    VPMU_CORE2_NUM_CTRLS,
> -    core2_ctrls_msr
> -};
> -static int arch_pmc_cnt;
> +    cpuid(0xa, &eax, &ebx, &ecx, &edx);

Could you use cpuid_eax ?

> +    return ( (eax & PMU_GENERAL_NR_MASK) >> PMU_GENERAL_NR_SHIFT );
> +}
>  
>  /*
> - * Read the number of general counters via CPUID.EAX[0xa].EAX[8..15]
> + * Read the number of fixed counters via CPUID.EDX[0xa].EDX[0..4]
>   */
> -static int core2_get_pmc_count(void)
> +static int core2_get_fixed_pmc_count(void)
>  {
>      u32 eax, ebx, ecx, edx;
>  
> -    if ( arch_pmc_cnt == 0 )
> -    {
> -        cpuid(0xa, &eax, &ebx, &ecx, &edx);
> -        arch_pmc_cnt = (eax & PMU_GENERAL_NR_MASK) >> PMU_GENERAL_NR_SHIFT;
> -    }
> -
> -    return arch_pmc_cnt;
> +    cpuid(0xa, &eax, &ebx, &ecx, &edx);

Ditto here.

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.