[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v3 05/16] x86/VPMU: Handle APIC_LVTPC accesses



>>> On 06.01.14 at 20:24, Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx> wrote:
> Update APIC_LVTPC vector when HVM guest writes to it.
> 
> Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
> ---
>  xen/arch/x86/hvm/svm/vpmu.c       |  4 ----
>  xen/arch/x86/hvm/vlapic.c         |  5 ++++-
>  xen/arch/x86/hvm/vmx/vpmu_core2.c | 17 -----------------
>  xen/arch/x86/hvm/vpmu.c           | 16 +++++++++++++---
>  xen/include/asm-x86/hvm/vpmu.h    |  1 +
>  5 files changed, 18 insertions(+), 25 deletions(-)
> 
> diff --git a/xen/arch/x86/hvm/svm/vpmu.c b/xen/arch/x86/hvm/svm/vpmu.c
> index 842bce7..1f7d6b7 100644
> --- a/xen/arch/x86/hvm/svm/vpmu.c
> +++ b/xen/arch/x86/hvm/svm/vpmu.c
> @@ -290,8 +290,6 @@ static int amd_vpmu_do_wrmsr(unsigned int msr, uint64_t 
> msr_content)
>          if ( !acquire_pmu_ownership(PMU_OWNER_HVM) )
>              return 1;
>          vpmu_set(vpmu, VPMU_RUNNING);
> -        apic_write(APIC_LVTPC, PMU_APIC_VECTOR);
> -        vpmu->hw_lapic_lvtpc = PMU_APIC_VECTOR;
>  
>          if ( is_hvm_domain(v->domain) &&
>               !((struct amd_vpmu_context *)vpmu->context)->msr_bitmap_set )
> @@ -302,8 +300,6 @@ static int amd_vpmu_do_wrmsr(unsigned int msr, uint64_t 
> msr_content)
>      if ( (get_pmu_reg_type(msr) == MSR_TYPE_CTRL) &&
>          (is_pmu_enabled(msr_content) == 0) && vpmu_is_set(vpmu, 
> VPMU_RUNNING) 
> )
>      {
> -        apic_write(APIC_LVTPC, PMU_APIC_VECTOR | APIC_LVT_MASKED);
> -        vpmu->hw_lapic_lvtpc = PMU_APIC_VECTOR | APIC_LVT_MASKED;
>          vpmu_reset(vpmu, VPMU_RUNNING);
>          if ( is_hvm_domain(v->domain) &&
>               ((struct amd_vpmu_context *)vpmu->context)->msr_bitmap_set )
> diff --git a/xen/arch/x86/hvm/vlapic.c b/xen/arch/x86/hvm/vlapic.c
> index bc06010..d954f4f 100644
> --- a/xen/arch/x86/hvm/vlapic.c
> +++ b/xen/arch/x86/hvm/vlapic.c
> @@ -38,6 +38,7 @@
>  #include <asm/hvm/support.h>
>  #include <asm/hvm/vmx/vmx.h>
>  #include <asm/hvm/nestedhvm.h>
> +#include <asm/hvm/vpmu.h>
>  #include <public/hvm/ioreq.h>
>  #include <public/hvm/params.h>
>  
> @@ -732,8 +733,10 @@ static int vlapic_reg_write(struct vcpu *v,
>              vlapic_adjust_i8259_target(v->domain);
>              pt_may_unmask_irq(v->domain, NULL);
>          }
> -        if ( (offset == APIC_LVTT) && !(val & APIC_LVT_MASKED) )
> +        else if ( (offset == APIC_LVTT) && !(val & APIC_LVT_MASKED) )
>              pt_may_unmask_irq(NULL, &vlapic->pt);
> +        else if ( offset == APIC_LVTPC )
> +            vpmu_lvtpc_update(val);
>          break;
>  
>      case APIC_TMICT:
> diff --git a/xen/arch/x86/hvm/vmx/vpmu_core2.c 
> b/xen/arch/x86/hvm/vmx/vpmu_core2.c
> index 89212ec..7fd2420 100644
> --- a/xen/arch/x86/hvm/vmx/vpmu_core2.c
> +++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c
> @@ -532,19 +532,6 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, 
> uint64_t msr_content)
>      else
>          vpmu_reset(vpmu, VPMU_RUNNING);
>  
> -    /* Setup LVTPC in local apic */
> -    if ( vpmu_is_set(vpmu, VPMU_RUNNING) &&
> -         is_vlapic_lvtpc_enabled(vcpu_vlapic(v)) )
> -    {
> -        apic_write_around(APIC_LVTPC, PMU_APIC_VECTOR);
> -        vpmu->hw_lapic_lvtpc = PMU_APIC_VECTOR;
> -    }
> -    else
> -    {
> -        apic_write_around(APIC_LVTPC, PMU_APIC_VECTOR | APIC_LVT_MASKED);
> -        vpmu->hw_lapic_lvtpc = PMU_APIC_VECTOR | APIC_LVT_MASKED;
> -    }
> -
>      if ( type != MSR_TYPE_GLOBAL )
>      {
>          u64 mask;
> @@ -710,10 +697,6 @@ static int core2_vpmu_do_interrupt(struct cpu_user_regs 
> *regs)
>              return 0;
>      }
>  
> -    /* HW sets the MASK bit when performance counter interrupt occurs*/
> -    vpmu->hw_lapic_lvtpc = apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED;
> -    apic_write_around(APIC_LVTPC, vpmu->hw_lapic_lvtpc);
> -
>      return 1;
>  }
>  
> diff --git a/xen/arch/x86/hvm/vpmu.c b/xen/arch/x86/hvm/vpmu.c
> index d6a9ff6..2c1201b 100644
> --- a/xen/arch/x86/hvm/vpmu.c
> +++ b/xen/arch/x86/hvm/vpmu.c
> @@ -64,6 +64,14 @@ static void __init parse_vpmu_param(char *s)
>      }
>  }
>  
> +void vpmu_lvtpc_update(uint32_t val)
> +{
> +    struct vpmu_struct *vpmu = vcpu_vpmu(current);
> +
> +    vpmu->hw_lapic_lvtpc = PMU_APIC_VECTOR | (val & APIC_LVT_MASKED);
> +    apic_write(APIC_LVTPC, vpmu->hw_lapic_lvtpc);
> +}
> +
>  int vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content)
>  {
>      struct vpmu_struct *vpmu = vcpu_vpmu(current);
> @@ -227,19 +235,21 @@ void vpmu_initialise(struct vcpu *v)
>      case X86_VENDOR_AMD:
>          if ( svm_vpmu_initialise(v, opt_vpmu_enabled) != 0 )
>              opt_vpmu_enabled = 0;
> -        break;
> +        return;
>  
>      case X86_VENDOR_INTEL:
>          if ( vmx_vpmu_initialise(v, opt_vpmu_enabled) != 0 )
>              opt_vpmu_enabled = 0;
> -        break;
> +        return;
>  
>      default:
>          printk("VPMU: Initialization failed. "
>                 "Unknown CPU vendor %d\n", vendor);
>          opt_vpmu_enabled = 0;
> -        break;
> +        return;
>      }
> +
> +    vpmu->hw_lapic_lvtpc = PMU_APIC_VECTOR | APIC_LVT_MASKED;
>  }

So what is this good for? All code paths above use "return" now,
hence how would execution get here?

Jan

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.