[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 03 of 10 [RFC]] libxc, libxl: Introduce xc_nodemap_t and libxl_nodemap



On 11/04/12 14:17, Dario Faggioli wrote:
Exactly as we have xc_cpumap_t, something similar for representing
NUMA nodes (i.e., xc_nodemap_t and nodemap_t) could be useful. The
same applies to libxl_cpumap, which on its turn now has its
node-related counterpart, libxl_nodemap.

This is all in preparation for making it possible explicit
specification of the `node affinity` of a guest.

Signed-off-by: Dario Faggioli<dario.faggioli@xxxxxxxxxx>

diff --git a/tools/libxc/xc_misc.c b/tools/libxc/xc_misc.c
--- a/tools/libxc/xc_misc.c
+++ b/tools/libxc/xc_misc.c
@@ -35,21 +35,49 @@ int xc_get_max_cpus(xc_interface *xch)
      return max_cpus;
  }

+int xc_get_max_nodes(xc_interface *xch)
+{
+    static int max_nodes = 0;
+    xc_physinfo_t physinfo;
+
+    if ( max_nodes )
+        return max_nodes;
+
+    if ( !xc_physinfo(xch,&physinfo) )
+        max_nodes = physinfo.max_node_id + 1;
+
+    return max_nodes;
+}
+
  int xc_get_cpumap_size(xc_interface *xch)
  {
      return (xc_get_max_cpus(xch) + 7) / 8;
  }

-xc_cpumap_t xc_cpumap_alloc(xc_interface *xch)
+int xc_get_nodemap_size(xc_interface *xch)
  {
-    int sz;
+    return (xc_get_max_nodes(xch) + 7) / 8;
+}

-    sz = xc_get_cpumap_size(xch);
+/* XXX See [*] below ... */
+static xc_cpumap_t __xc_map_alloc(xc_interface *xch, int sz)
+{
      if (sz == 0)
          return NULL;
      return calloc(1, sz);
  }

+xc_cpumap_t xc_cpumap_alloc(xc_interface *xch)
+{
+    return __xc_map_alloc(xch, xc_get_cpumap_size(xch));
+}
+
+xc_nodemap_t xc_nodemap_alloc(xc_interface *xch)
+{
+    /* XXX [*] How bad is this? Ideas? */
+    return (xc_nodemap_t) __xc_map_alloc(xch, xc_get_nodemap_size(xch));
+}
+
I don't think it's incredibly terrible if it would buy us something; but I don't really see that it does. Two functions would be a lot more readable, IMHO.

Other than that, looks fine.
  int xc_readconsolering(xc_interface *xch,
                         char *buffer,
                         unsigned int *pnr_chars,
diff --git a/tools/libxc/xenctrl.h b/tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h
+++ b/tools/libxc/xenctrl.h
@@ -330,6 +330,20 @@ int xc_get_cpumap_size(xc_interface *xch
  xc_cpumap_t xc_cpumap_alloc(xc_interface *xch);

  /*
+ * NODEMAP handling
+ */
+typedef uint8_t *xc_nodemap_t;
+
+/* return maximum number of NUMA nodes the hypervisor supports */
+int xc_get_max_nodes(xc_interface *xch);
+
+/* return array size for nodemap */
+int xc_get_nodemap_size(xc_interface *xch);
+
+/* allocate a nodemap */
+xc_nodemap_t xc_nodemap_alloc(xc_interface *xch);
+
+/*
   * DOMAIN DEBUGGING FUNCTIONS
   */

diff --git a/tools/libxl/libxl_utils.c b/tools/libxl/libxl_utils.c
--- a/tools/libxl/libxl_utils.c
+++ b/tools/libxl/libxl_utils.c
@@ -486,11 +486,27 @@ int libxl_cpumap_alloc(libxl_ctx *ctx, l
      return libxl_map_alloc(ctx, cpumap, max_cpus);
  }

+int libxl_nodemap_alloc(libxl_ctx *ctx, libxl_nodemap *nodemap)
+{
+    int max_nodes;
+
+    max_nodes = libxl_get_max_nodes(ctx);
+    if (max_nodes == 0)
+        return ERROR_FAIL;
+
+    return libxl_map_alloc(ctx, nodemap, max_nodes);
+}
+
  int libxl_get_max_cpus(libxl_ctx *ctx)
  {
      return xc_get_max_cpus(ctx->xch);
  }

+int libxl_get_max_nodes(libxl_ctx *ctx)
+{
+    return xc_get_max_nodes(ctx->xch);
+}
+
  int libxl__enum_from_string(const libxl_enum_string_table *t,
                              const char *s, int *e)
  {
diff --git a/tools/libxl/libxl_utils.h b/tools/libxl/libxl_utils.h
--- a/tools/libxl/libxl_utils.h
+++ b/tools/libxl/libxl_utils.h
@@ -109,6 +109,35 @@ static inline int libxl_cpumap_cpu_valid
  #define libxl_for_each_set_cpu(v, m) for (v = 0; v<  (m).size * 8; v++) \
                                               if (libxl_cpumap_test(&(m), v))

+int libxl_nodemap_alloc(libxl_ctx *ctx, libxl_nodemap *nodemap);
+static inline int libxl_nodemap_test(libxl_nodemap *nodemap, int node)
+{
+    return libxl_map_test(nodemap, node);
+}
+static inline void libxl_nodemap_set(libxl_nodemap *nodemap, int node)
+{
+    libxl_map_set(nodemap, node);
+}
+static inline void libxl_nodemap_reset(libxl_nodemap *nodemap, int node)
+{
+    libxl_map_reset(nodemap, node);
+}
+static inline void libxl_nodemap_set_any(libxl_nodemap *nodemap)
+{
+    libxl_map_set_any(nodemap);
+}
+static inline void libxl_nodemap_set_none(libxl_nodemap *nodemap)
+{
+    libxl_map_set_none(nodemap);
+}
+static inline int libxl_nodemap_node_valid(libxl_nodemap *nodemap, int node)
+{
+    return libxl_map_elem_valid(nodemap, node);
+}
+#define libxl_for_each_node(var, map) for (var = 0; var<  (map).size * 8; 
var++)
+#define libxl_for_each_set_node(v, m) for (v = 0; v<  (m).size * 8; v++) \
+                                              if (libxl_nodemap_test(&(m), v))
+
  static inline uint32_t libxl__sizekb_to_mb(uint32_t s) {
      return (s + 1023) / 1024;
  }
diff --git a/xen/common/domctl.c b/xen/common/domctl.c
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -116,6 +116,14 @@ int xenctl_cpumap_to_cpumask(cpumask_var
      return err;
  }

+int xenctl_nodemap_to_nodemask(nodemask_t *nodemask,
+                               const struct xenctl_map *xenctl_nodemap)
+{
+    return xenctl_map_to_bitmap(nodes_addr(*nodemask),
+                                xenctl_nodemap,
+                                MAX_NUMNODES);
+}
+
  static inline int is_free_domid(domid_t dom)
  {
      struct domain *d;


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.