[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v6 11/19] x86/VPMU: Initialize PMU for PV guests



>>> On 13.05.14 at 17:53, <boris.ostrovsky@xxxxxxxxxx> wrote:
> --- a/xen/arch/x86/hvm/svm/svm.c
> +++ b/xen/arch/x86/hvm/svm/svm.c
> @@ -1150,7 +1150,8 @@ static int svm_vcpu_initialise(struct vcpu *v)
>          return rc;
>      }
>  
> -    vpmu_initialise(v);
> +    if ( is_hvm_domain(v->domain) )
> +        vpmu_initialise(v);

Why?

> @@ -1159,7 +1160,8 @@ static int svm_vcpu_initialise(struct vcpu *v)
>  
>  static void svm_vcpu_destroy(struct vcpu *v)
>  {
> -    vpmu_destroy(v);
> +    if ( is_hvm_domain(v->domain) )
> +        vpmu_destroy(v);

Again.

> --- a/xen/arch/x86/hvm/vmx/vmx.c
> +++ b/xen/arch/x86/hvm/vmx/vmx.c
> @@ -115,7 +115,8 @@ static int vmx_vcpu_initialise(struct vcpu *v)
>          return rc;
>      }
>  
> -    vpmu_initialise(v);
> +    if ( is_hvm_domain(v->domain) )
> +        vpmu_initialise(v);

Same here.

> @@ -129,7 +130,8 @@ static int vmx_vcpu_initialise(struct vcpu *v)
>  static void vmx_vcpu_destroy(struct vcpu *v)
>  {
>      vmx_destroy_vmcs(v);
> -    vpmu_destroy(v);
> +    if ( is_hvm_domain(v->domain) )
> +        vpmu_destroy(v);

And here.

> @@ -753,6 +763,10 @@ func_out:
>      fixed_pmc_cnt = core2_get_fixed_pmc_count();
>      check_pmc_quirk();
>  
> +    /* PV domains can allocate resources immediately */
> +    if ( is_pv_domain(v->domain) && !core2_vpmu_alloc_resource(v) )
> +            return 1;
> +

Broken indentation.

> @@ -764,11 +778,17 @@ static void core2_vpmu_destroy(struct vcpu *v)
>          return;
>  
>      xfree(vpmu->context);
> -    xfree(vpmu->priv_context);
> -    if ( cpu_has_vmx_msr_bitmap && is_hvm_domain(v->domain) )
> -        core2_vpmu_unset_msr_bitmap(v->arch.hvm_vmx.msr_bitmap);
> -    release_pmu_ownship(PMU_OWNER_HVM);
> -    vpmu_reset(vpmu, VPMU_CONTEXT_ALLOCATED);
> +
> +    if ( is_hvm_domain(v->domain) )
> +    {
> +        xfree(vpmu->priv_context);
> +        if ( cpu_has_vmx_msr_bitmap )
> +            core2_vpmu_unset_msr_bitmap(v->arch.hvm_vmx.msr_bitmap);
> +        release_pmu_ownship(PMU_OWNER_HVM);
> +    }

Looks like this is a case where has_hvm_container_domain() would
be more appropriate. Since PVH - iiuc - doesn't work with vPMU anyway
until patch 18, let's at least limit the churn this series does.

> @@ -267,7 +271,13 @@ void vpmu_destroy(struct vcpu *v)
>      struct vpmu_struct *vpmu = vcpu_vpmu(v);
>  
>      if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_destroy )
> +    {
> +        /* Unload VPMU first. This will stop counters */
> +        on_selected_cpus(cpumask_of(vcpu_vpmu(v)->last_pcpu),
> +                         vpmu_save_force, (void *)v, 1);

Pointless cast.

> @@ -312,6 +322,67 @@ static void vpmu_unload_all(void)
>      }
>  }
>  
> +static int pvpmu_init(struct domain *d, xen_pmu_params_t *params)
> +{
> +    struct vcpu *v;
> +    struct page_info *page;
> +    uint64_t gfn = params->d.val;
> +
> +    if ( !is_pv_domain(d) )
> +        return -EINVAL;
> +
> +    if ( params->vcpu < 0 || params->vcpu >= d->max_vcpus )

Is ->vcpu a signed field? If so, why?

> +        return -EINVAL;
> +
> +    page = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC);
> +    if ( !page )
> +        return -EINVAL;
> +
> +    if ( !get_page_type(page, PGT_writable_page) )
> +    {
> +        put_page(page);
> +        return -EINVAL;
> +    }
> +
> +    v = d->vcpu[params->vcpu];

There's no check above that this wouldn't yield NULL.

> +static void pvpmu_finish(struct domain *d, xen_pmu_params_t *params)
> +{
> +    struct vcpu *v;
> +    uint64_t mfn;
> +
> +    if ( params->vcpu < 0 || params->vcpu >= d->max_vcpus )
> +        return;
> +
> +    v = d->vcpu[params->vcpu];
> +    if (v != current)

Same comments as above plus - coding style.

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.