[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 3 of 3] RFC: xenpaging: use waitqueue in ept_get



>>> On 22.11.11 at 22:13, Olaf Hering <olaf@xxxxxxxxx> wrote:
> # HG changeset patch
> # User Olaf Hering <olaf@xxxxxxxxx>
> # Date 1321996199 -3600
> # Node ID 9d63ecd3969bb7a2e39841f6c859b4c23f750642
> # Parent  de6860cb9205b68d1287482288d1b7b9d0255609
> RFC: xenpaging: use waitqueue in ept_get
> 
> Signed-off-by: Olaf Hering <olaf@xxxxxxxxx>
> 
> diff -r de6860cb9205 -r 9d63ecd3969b xen/arch/x86/mm/p2m-ept.c
> --- a/xen/arch/x86/mm/p2m-ept.c
> +++ b/xen/arch/x86/mm/p2m-ept.c
> @@ -505,7 +505,7 @@ out:
>  }
>  
>  /* Read ept p2m entries */
> -static mfn_t ept_get_entry(struct p2m_domain *p2m,
> +static unsigned long _ept_get_entry(struct p2m_domain *p2m,

Looking at the rest of the patch I can't see why the type change here
and below is necessary. It is my understanding that the wrapped
types exist to reduce the chance of mistakes, so they should be kept
whenever possible.

Jan

>                             unsigned long gfn, p2m_type_t *t, p2m_access_t* a,
>                             p2m_query_t q, unsigned int *page_order)
>  {
> @@ -516,7 +516,7 @@ static mfn_t ept_get_entry(struct p2m_do
>      u32 index;
>      int i;
>      int ret = 0;
> -    mfn_t mfn = _mfn(INVALID_MFN);
> +    unsigned long mfn = INVALID_MFN;
>  
>      *t = p2m_mmio_dm;
>      *a = p2m_access_n;
> @@ -582,17 +582,14 @@ static mfn_t ept_get_entry(struct p2m_do
>          *t = ept_entry->sa_p2mt;
>          *a = ept_entry->access;
>  
> -        mfn = _mfn(ept_entry->mfn);
> +        mfn = ept_entry->mfn;
>          if ( i )
>          {
>              /* 
>               * We may meet super pages, and to split into 4k pages
>               * to emulate p2m table
>               */
> -            unsigned long split_mfn = mfn_x(mfn) +
> -                (gfn_remainder &
> -                 ((1 << (i * EPT_TABLE_ORDER)) - 1));
> -            mfn = _mfn(split_mfn);
> +            mfn += (gfn_remainder & ((1 << (i * EPT_TABLE_ORDER)) - 1));
>          }
>  
>          if ( page_order )
> @@ -604,6 +601,41 @@ out:
>      return mfn;
>  }
>  
> +static int
> +ept_get_entry_wait(unsigned long *mfn, int *populated,
> +               struct p2m_domain *p2m, unsigned long gfn, 
> +               p2m_type_t *t, p2m_access_t *a, p2m_query_t q,
> +               unsigned int *page_order)
> +{
> +    int ret = 1;
> +    *mfn = _ept_get_entry(p2m, gfn, t, a, q, page_order);
> +
> +    /* No further action in case of query */
> +    if ( q == p2m_query )
> +        goto done;
> +
> +    /* Populate the page once in case of guest access, then go to sleep */
> +    if ( p2m_is_paging(*t) && current->domain == p2m->domain ) {
> +        if ( *populated == 0 ) {
> +            *populated = 1;
> +            p2m_mem_paging_populate(p2m->domain, gfn);
> +        }
> +        ret = 0;
> +    }
> +done:
> +    return ret;
> +}
> +static mfn_t
> +ept_get_entry(struct p2m_domain *p2m, unsigned long gfn, 
> +               p2m_type_t *t, p2m_access_t *a, p2m_query_t q,
> +               unsigned int *page_order)
> +{
> +    unsigned long mfn;
> +    int populated = 0;
> +    wait_event(p2m->wq, ept_get_entry_wait(&mfn, &populated, p2m, gfn, t, a, 
> q, 
> page_order));
> +    return _mfn(mfn);
> +}
> +
>  /* WARNING: Only caller doesn't care about PoD pages.  So this function 
> will
>   * always return 0 for PoD pages, not populate them.  If that becomes 
> necessary,
>   * pass a p2m_query_t type along to distinguish. */
> diff -r de6860cb9205 -r 9d63ecd3969b xen/arch/x86/mm/p2m.c
> --- a/xen/arch/x86/mm/p2m.c
> +++ b/xen/arch/x86/mm/p2m.c
> @@ -76,6 +76,7 @@ static void p2m_initialise(struct domain
>      INIT_PAGE_LIST_HEAD(&p2m->pages);
>      INIT_PAGE_LIST_HEAD(&p2m->pod.super);
>      INIT_PAGE_LIST_HEAD(&p2m->pod.single);
> +    init_waitqueue_head(&p2m->wq);
>  
>      p2m->domain = d;
>      p2m->default_access = p2m_access_rwx;
> @@ -1072,6 +1073,8 @@ void p2m_mem_paging_resume(struct domain
>  
>      /* Unpause all vcpus that were paused because the ring was full */
>      wake_up(&d->mem_event->mem_paging.wq);
> +    /* Unpause all vcpus that were paused because the gfn was paged */
> +    wake_up(&p2m->wq);
>  }
>  
>  void p2m_mem_access_check(unsigned long gpa, bool_t gla_valid, unsigned 
> long gla, 
> diff -r de6860cb9205 -r 9d63ecd3969b xen/include/asm-x86/p2m.h
> --- a/xen/include/asm-x86/p2m.h
> +++ b/xen/include/asm-x86/p2m.h
> @@ -286,6 +286,7 @@ struct p2m_domain {
>          unsigned         reclaim_single; /* Last gpfn of a scan */
>          unsigned         max_guest;    /* gpfn of max guest demand-populate 
> */
>      } pod;
> +    struct waitqueue_head wq;
>  };
>  
>  /* get host p2m table */
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxxxxxxxx 
> http://lists.xensource.com/xen-devel 



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.