[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v10 7/9] libxc: allocate domain memory for vnuma enabled



On Wed, 2014-09-03 at 00:24 -0400, Elena Ufimtseva wrote:
> vNUMA-aware domain memory allocation based on provided
> vnode to pnode map. If this map is not defined, use
> default allocation. Default allocation will not specify
> any physical node when allocating memory.
> Domain creation will fail if at least one node was not defined.
> 
> Signed-off-by: Elena Ufimtseva <ufimtseva@xxxxxxxxx>
> ---
>  tools/libxc/xc_dom.h     |   13 ++++++++
>  tools/libxc/xc_dom_x86.c |   76 
> ++++++++++++++++++++++++++++++++++++++--------
>  2 files changed, 77 insertions(+), 12 deletions(-)
> 
> diff --git a/tools/libxc/xc_dom.h b/tools/libxc/xc_dom.h
> index 6ae6a9f..61c2a06 100644
> --- a/tools/libxc/xc_dom.h
> +++ b/tools/libxc/xc_dom.h
> @@ -164,6 +164,16 @@ struct xc_dom_image {
>  
>      /* kernel loader */
>      struct xc_dom_arch *arch_hooks;
> +
> +   /*
> +    * vNUMA topology and memory allocation structure.
> +    * Defines the way to allocate memory on per NUMA
> +    * physical defined by vnode_to_pnode.
> +    */
> +    uint32_t vnodes;
> +    uint64_t *numa_memszs;
> +    unsigned int *vnode_to_pnode;
> +
>      /* allocate up to virt_alloc_end */
>      int (*allocate) (struct xc_dom_image * dom, xen_vaddr_t up_to);
>  };
> @@ -385,6 +395,9 @@ static inline xen_pfn_t xc_dom_p2m_guest(struct 
> xc_dom_image *dom,
>  int arch_setup_meminit(struct xc_dom_image *dom);
>  int arch_setup_bootearly(struct xc_dom_image *dom);
>  int arch_setup_bootlate(struct xc_dom_image *dom);
> +int arch_boot_alloc(struct xc_dom_image *dom);

I don't think this should be public. A static helper within xc_dom_x86.c
would be fine.

Otherwise the question becomes what should be arm version of this
function be?

> +
> +#define LIBXC_VNUMA_NO_NODE ~((unsigned int)0)
>  
>  /*
>   * Local variables:
> diff --git a/tools/libxc/xc_dom_x86.c b/tools/libxc/xc_dom_x86.c
> index bf06fe4..f2b4c98 100644
> --- a/tools/libxc/xc_dom_x86.c
> +++ b/tools/libxc/xc_dom_x86.c
> @@ -759,7 +759,7 @@ static int x86_shadow(xc_interface *xch, domid_t domid)
>  int arch_setup_meminit(struct xc_dom_image *dom)
>  {
>      int rc;
> -    xen_pfn_t pfn, allocsz, i, j, mfn;
> +    xen_pfn_t pfn, i, j, mfn;
>  
>      rc = x86_compat(dom->xch, dom->guest_domid, dom->guest_type);
>      if ( rc )
> @@ -811,25 +811,77 @@ int arch_setup_meminit(struct xc_dom_image *dom)
>          /* setup initial p2m */
>          for ( pfn = 0; pfn < dom->total_pages; pfn++ )
>              dom->p2m_host[pfn] = pfn;
> +
> +        /*
> +         * Any PV domain should have at least one vNUMA node.
> +         * If no config was defined, one default vNUMA node
> +         * will be set.
> +         */
> +        if ( dom->vnodes == 0 ) {
> +            xc_dom_printf(dom->xch,
> +                         "%s: Cannot construct vNUMA topology with 0 
> vnodes\n",
> +                         __FUNCTION__);
> +            return -EINVAL;
> +        }
>          
>          /* allocate guest memory */
> -        for ( i = rc = allocsz = 0;
> -              (i < dom->total_pages) && !rc;
> -              i += allocsz )
> -        {
> -            allocsz = dom->total_pages - i;
> -            if ( allocsz > 1024*1024 )
> -                allocsz = 1024*1024;
> -            rc = xc_domain_populate_physmap_exact(
> -                dom->xch, dom->guest_domid, allocsz,
> -                0, 0, &dom->p2m_host[i]);
> -        }
> +        rc = arch_boot_alloc(dom);
> +        if ( rc )
> +            return rc;
>  
>          /* Ensure no unclaimed pages are left unused.
>           * OK to call if hadn't done the earlier claim call. */
>          (void)xc_domain_claim_pages(dom->xch, dom->guest_domid,
>                                      0 /* cancels the claim */);
>      }
> +    return rc;
> +}
> +
> +/*
> + * Allocates domain memory taking into account
> + * defined vnuma topology and vnode_to_pnode map.
> + * Any pv guest will have at least one vnuma node
> + * with vnuma_memszs[0] = domain memory and the rest
> + * topology initialized with default values.
> + */
> +int arch_boot_alloc(struct xc_dom_image *dom)

This seems to make no reference to dom->total_pages which the old code
used to do. What guarantees that the numa nodes don't sum to too much?

> +{
> +    int rc;
> +    unsigned int n, memflags;
> +    unsigned long long vnode_pages;
> +    unsigned long long allocsz = 0, node_pfn_base, i;
> +
> +    rc = allocsz = node_pfn_base = n = 0;
> +
> +    for ( n = 0; n < dom->vnodes; n++ )
> +    {
> +        memflags = 0;
> +        if ( dom->vnode_to_pnode[n] != LIBXC_VNUMA_NO_NODE )
> +        {
> +            memflags |= XENMEMF_exact_node(dom->vnode_to_pnode[n]);
> +            memflags |= XENMEMF_exact_node_request;
> +        }
> +        /* memeszs are in megabytes, calc pages from it for this node. */
> +        vnode_pages = (dom->numa_memszs[n] << 20) >> PAGE_SHIFT_X86;
> +        for ( i = 0; i < vnode_pages; i += allocsz )
> +        {
> +            allocsz = vnode_pages - i;
> +            if ( allocsz > 1024*1024 )
> +                allocsz = 1024*1024;
> +
> +            rc = xc_domain_populate_physmap_exact(dom->xch, dom->guest_domid,
> +                                            allocsz, 0, memflags,
> +                                            &dom->p2m_host[node_pfn_base + 
> i]);
> +            if ( rc )
> +            {
> +                xc_dom_panic(dom->xch, XC_INTERNAL_ERROR,
> +                        "%s: Failed allocation of %Lu pages for vnode %d on 
> pnode %d out of %lu\n",
> +                        __FUNCTION__, vnode_pages, n, 
> dom->vnode_to_pnode[n], dom->total_pages);
> +                return rc;
> +            }
> +        }
> +        node_pfn_base += i;
> +    }
>  
>      return rc;
>  }



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.