[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 03 of 10 v2] libxl, libxc: introduce libxl_get_numainfo()



On Fri, 2012-06-15 at 18:04 +0100, Dario Faggioli wrote:
> Make some NUMA node information available to the toolstack. Achieve
> this by means of xc_numainfo(), which exposes memory size and amount
> of free memory of each node, as well as the relative distances of
> each node to all the others.
> 
> For properly exposing distances we need the IDL to support arrays.
> 
> Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx>
> 
> Changes from v1:
>  * malloc converted to libxl__zalloc(NOGC, ...).
>  * The patch also accommodates some bits of what was in "libxc,
>    libxl: introduce xc_nodemap_t and libxl_nodemap" which was
>    removed as well, as full support for node maps at libxc
>    level is not needed (yet!).
> 
> diff --git a/tools/libxc/xc_misc.c b/tools/libxc/xc_misc.c
> --- a/tools/libxc/xc_misc.c
> +++ b/tools/libxc/xc_misc.c
> @@ -35,6 +35,20 @@ int xc_get_max_cpus(xc_interface *xch)
>      return max_cpus;
>  }
>  
> +int xc_get_max_nodes(xc_interface *xch)
> +{
> +    static int max_nodes = 0;
> +    xc_physinfo_t physinfo;
> +
> +    if ( max_nodes )
> +        return max_nodes;
> +
> +    if ( !xc_physinfo(xch, &physinfo) )
> +        max_nodes = physinfo.max_node_id + 1;
> +
> +    return max_nodes;
> +}
> +
>  int xc_get_cpumap_size(xc_interface *xch)
>  {
>      return (xc_get_max_cpus(xch) + 7) / 8;
> diff --git a/tools/libxc/xenctrl.h b/tools/libxc/xenctrl.h
> --- a/tools/libxc/xenctrl.h
> +++ b/tools/libxc/xenctrl.h
> @@ -329,6 +329,12 @@ int xc_get_cpumap_size(xc_interface *xch
>  /* allocate a cpumap */
>  xc_cpumap_t xc_cpumap_alloc(xc_interface *xch);
>  
> + /*
> + * NODEMAP handling
> + */
> +/* return maximum number of NUMA nodes the hypervisor supports */
> +int xc_get_max_nodes(xc_interface *xch);
> +
>  /*
>   * DOMAIN DEBUGGING FUNCTIONS
>   */
> diff --git a/tools/libxl/libxl.c b/tools/libxl/libxl.c
> --- a/tools/libxl/libxl.c
> +++ b/tools/libxl/libxl.c
> @@ -3223,6 +3223,71 @@ fail:
>      return ret;
>  }
>  
> +libxl_numainfo *libxl_get_numainfo(libxl_ctx *ctx, int *nr)
> +{
[...]

The hypercall buffer stuff all looks good.

> +    if (ret)
> +        *nr = max_nodes;

You could put this before the fail: label. Not that it matters.

> +    return ret;
> +}
> +
>  const libxl_version_info* libxl_get_version_info(libxl_ctx *ctx)
>  {
>      union {
> diff --git a/tools/libxl/libxl.h b/tools/libxl/libxl.h
> --- a/tools/libxl/libxl.h
> +++ b/tools/libxl/libxl.h
> @@ -532,6 +532,9 @@ int libxl_domain_preserve(libxl_ctx *ctx
>  /* get max. number of cpus supported by hypervisor */
>  int libxl_get_max_cpus(libxl_ctx *ctx);
>  
> +/* get max. number of NUMA nodes supported by hypervisor */
> +int libxl_get_max_nodes(libxl_ctx *ctx);
> +
>  int libxl_domain_rename(libxl_ctx *ctx, uint32_t domid,
>                          const char *old_name, const char *new_name);
>  
> @@ -769,6 +772,13 @@ int libxl_get_physinfo(libxl_ctx *ctx, l
>  #define LIBXL_CPUTOPOLOGY_INVALID_ENTRY (~(uint32_t)0)
>  libxl_cputopology *libxl_get_cpu_topology(libxl_ctx *ctx, int *nr);
>  void libxl_cputopology_list_free(libxl_cputopology *, int nr);
> +#define LIBXL_NUMAINFO_INVALID_ENTRY (~(uint32_t)0)
> +libxl_numainfo *libxl_get_numainfo(libxl_ctx *ctx, int *nr);
> +  /* On success, a list of nr libxl_numainfo elements is returned.
> +   * That is from malloc, thus it is up to the caller to invoke
> +   * libxl_cpupoolinfo_list_free() on it.

Don't you mean libxl_numinfo_list_free() ?

Also normally we put the comment before the prototype.

> +   */
> +void libxl_numainfo_list_free(libxl_numainfo *, int nr);
>  libxl_vcpuinfo *libxl_list_vcpu(libxl_ctx *ctx, uint32_t domid,
>                                         int *nb_vcpu, int *nrcpus);
>  void libxl_vcpuinfo_list_free(libxl_vcpuinfo *, int nr);
> diff --git a/tools/libxl/libxl_types.idl b/tools/libxl/libxl_types.idl
> --- a/tools/libxl/libxl_types.idl
> +++ b/tools/libxl/libxl_types.idl
> @@ -423,6 +423,12 @@ libxl_physinfo = Struct("physinfo", [
>      ("cap_hvm_directio", bool),
>      ], dir=DIR_OUT)
>  
> +libxl_numainfo = Struct("numainfo", [
> +    ("size", uint64),
> +    ("free", uint64),
> +    ("dists", Array(uint32, "num_dists")),

This is an array of distances from this node to each other node? That
could be worth a comment.


> +    ], dir=DIR_OUT)
> +
>  libxl_cputopology = Struct("cputopology", [
>      ("core", uint32),
>      ("socket", uint32),
> diff --git a/tools/libxl/libxl_utils.c b/tools/libxl/libxl_utils.c
> --- a/tools/libxl/libxl_utils.c
> +++ b/tools/libxl/libxl_utils.c
> @@ -537,6 +537,11 @@ int libxl_get_max_cpus(libxl_ctx *ctx)
>      return xc_get_max_cpus(ctx->xch);
>  }
>  
> +int libxl_get_max_nodes(libxl_ctx *ctx)
> +{
> +    return xc_get_max_nodes(ctx->xch);
> +}

Is this needed externally to libxl or do we expect all callers to use
libxl_get_numainfo? I suppose there is no harm in exporting this either
way.

> +
>  int libxl__enum_from_string(const libxl_enum_string_table *t,
>                              const char *s, int *e)
>  {
> @@ -551,6 +556,14 @@ int libxl__enum_from_string(const libxl_
>      return ERROR_FAIL;
>  }
>  
> +void libxl_numainfo_list_free(libxl_numainfo *list, int nr)
> +{
> +    int i;
> +    for (i = 0; i < nr; i++)
> +        libxl_numainfo_dispose(&list[i]);
> +    free(list);
> +}
> +
>  void libxl_cputopology_list_free(libxl_cputopology *list, int nr)
>  {
>      int i;
> diff --git a/xen/include/public/sysctl.h b/xen/include/public/sysctl.h
> --- a/xen/include/public/sysctl.h
> +++ b/xen/include/public/sysctl.h
> @@ -484,6 +484,7 @@ typedef struct xen_sysctl_topologyinfo x
>  DEFINE_XEN_GUEST_HANDLE(xen_sysctl_topologyinfo_t);
>  
>  /* XEN_SYSCTL_numainfo */
> +#define INVALID_NUMAINFO_ID (~0U)

It feels like there ought to be hunks in the hypervisor which either use
this symbol instead of a hardcoded ~0U or which remove the internal
definition in favour of this one?



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.