[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v1 01/14] x86/np2m: refactor p2m_get_nestedp2m()



On 09/04/2017 09:14 AM, Sergey Dyasli wrote:
> 1. Add a helper function assign_np2m()
> 2. Remove useless volatile
> 3. Update function's comment in the header
> 4. Minor style fixes ('\n' and d)
> 
> Signed-off-by: Sergey Dyasli <sergey.dyasli@xxxxxxxxxx>

Reviewed-by: George Dunlap <george.dunlap@xxxxxxxxxx>

> ---
>  xen/arch/x86/mm/p2m.c     | 31 ++++++++++++++++++-------------
>  xen/include/asm-x86/p2m.h |  6 +++---
>  2 files changed, 21 insertions(+), 16 deletions(-)
> 
> diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
> index e8a57d118c..b8c8bba421 100644
> --- a/xen/arch/x86/mm/p2m.c
> +++ b/xen/arch/x86/mm/p2m.c
> @@ -1773,14 +1773,24 @@ p2m_flush_nestedp2m(struct domain *d)
>          p2m_flush_table(d->arch.nested_p2m[i]);
>  }
>  
> +static void assign_np2m(struct vcpu *v, struct p2m_domain *p2m)
> +{
> +    struct nestedvcpu *nv = &vcpu_nestedhvm(v);
> +    struct domain *d = v->domain;
> +
> +    /* Bring this np2m to the top of the LRU list */
> +    p2m_getlru_nestedp2m(d, p2m);
> +
> +    nv->nv_flushp2m = 0;
> +    nv->nv_p2m = p2m;
> +    cpumask_set_cpu(v->processor, p2m->dirty_cpumask);
> +}
> +
>  struct p2m_domain *
>  p2m_get_nestedp2m(struct vcpu *v, uint64_t np2m_base)
>  {
> -    /* Use volatile to prevent gcc to cache nv->nv_p2m in a cpu register as
> -     * this may change within the loop by an other (v)cpu.
> -     */
> -    volatile struct nestedvcpu *nv = &vcpu_nestedhvm(v);
> -    struct domain *d;
> +    struct nestedvcpu *nv = &vcpu_nestedhvm(v);
> +    struct domain *d = v->domain;
>      struct p2m_domain *p2m;
>  
>      /* Mask out low bits; this avoids collisions with P2M_BASE_EADDR */
> @@ -1790,7 +1800,6 @@ p2m_get_nestedp2m(struct vcpu *v, uint64_t np2m_base)
>          nv->nv_p2m = NULL;
>      }
>  
> -    d = v->domain;
>      nestedp2m_lock(d);
>      p2m = nv->nv_p2m;
>      if ( p2m ) 
> @@ -1798,15 +1807,13 @@ p2m_get_nestedp2m(struct vcpu *v, uint64_t np2m_base)
>          p2m_lock(p2m);
>          if ( p2m->np2m_base == np2m_base || p2m->np2m_base == P2M_BASE_EADDR 
> )
>          {
> -            nv->nv_flushp2m = 0;
> -            p2m_getlru_nestedp2m(d, p2m);
> -            nv->nv_p2m = p2m;
>              if ( p2m->np2m_base == P2M_BASE_EADDR )
>                  hvm_asid_flush_vcpu(v);
>              p2m->np2m_base = np2m_base;
> -            cpumask_set_cpu(v->processor, p2m->dirty_cpumask);
> +            assign_np2m(v, p2m);
>              p2m_unlock(p2m);
>              nestedp2m_unlock(d);
> +
>              return p2m;
>          }
>          p2m_unlock(p2m);
> @@ -1817,11 +1824,9 @@ p2m_get_nestedp2m(struct vcpu *v, uint64_t np2m_base)
>      p2m = p2m_getlru_nestedp2m(d, NULL);
>      p2m_flush_table(p2m);
>      p2m_lock(p2m);
> -    nv->nv_p2m = p2m;
>      p2m->np2m_base = np2m_base;
> -    nv->nv_flushp2m = 0;
>      hvm_asid_flush_vcpu(v);
> -    cpumask_set_cpu(v->processor, p2m->dirty_cpumask);
> +    assign_np2m(v, p2m);
>      p2m_unlock(p2m);
>      nestedp2m_unlock(d);
>  
> diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
> index 6395e8fd1d..9086bb35dc 100644
> --- a/xen/include/asm-x86/p2m.h
> +++ b/xen/include/asm-x86/p2m.h
> @@ -359,9 +359,9 @@ struct p2m_domain {
>  /* get host p2m table */
>  #define p2m_get_hostp2m(d)      ((d)->arch.p2m)
>  
> -/* Get p2m table (re)usable for specified np2m base.
> - * Automatically destroys and re-initializes a p2m if none found.
> - * If np2m_base == 0 then v->arch.hvm_vcpu.guest_cr[3] is used.
> +/*
> + * Assigns an np2m with the specified np2m_base to the specified vCPU
> + * and returns that np2m.
>   */
>  struct p2m_domain *p2m_get_nestedp2m(struct vcpu *v, uint64_t np2m_base);
>  
> 


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.