[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 06 of 10 v2] libxl: allow for explicitly specifying node-affinity



On 19/12/12 19:07, Dario Faggioli wrote:
By introducing a nodemap in libxl_domain_build_info and
providing the get/set methods to deal with it.

Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx>
Acked-by: Juergen Gross <juergen.gross@xxxxxxxxxxxxxx>

I think you'll probably need to add a line like the following:

#define LIBXL_HAVE_NODEAFFINITY 1

So that people wanting to build against different versions of the library can behave appropriately. But IanC or IanJ would be the final word on that, I think.

 -George


diff --git a/tools/libxl/libxl.c b/tools/libxl/libxl.c
--- a/tools/libxl/libxl.c
+++ b/tools/libxl/libxl.c
@@ -4142,6 +4142,26 @@ int libxl_set_vcpuaffinity_all(libxl_ctx
      return rc;
  }
+int libxl_domain_set_nodeaffinity(libxl_ctx *ctx, uint32_t domid,
+                                  libxl_bitmap *nodemap)
+{
+    if (xc_domain_node_setaffinity(ctx->xch, domid, nodemap->map)) {
+        LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "setting node affinity");
+        return ERROR_FAIL;
+    }
+    return 0;
+}
+
+int libxl_domain_get_nodeaffinity(libxl_ctx *ctx, uint32_t domid,
+                                  libxl_bitmap *nodemap)
+{
+    if (xc_domain_node_getaffinity(ctx->xch, domid, nodemap->map)) {
+        LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "getting node affinity");
+        return ERROR_FAIL;
+    }
+    return 0;
+}
+
  int libxl_set_vcpuonline(libxl_ctx *ctx, uint32_t domid, libxl_bitmap *cpumap)
  {
      GC_INIT(ctx);
diff --git a/tools/libxl/libxl.h b/tools/libxl/libxl.h
--- a/tools/libxl/libxl.h
+++ b/tools/libxl/libxl.h
@@ -861,6 +861,10 @@ int libxl_set_vcpuaffinity(libxl_ctx *ct
                             libxl_bitmap *cpumap);
  int libxl_set_vcpuaffinity_all(libxl_ctx *ctx, uint32_t domid,
                                 unsigned int max_vcpus, libxl_bitmap *cpumap);
+int libxl_domain_set_nodeaffinity(libxl_ctx *ctx, uint32_t domid,
+                                  libxl_bitmap *nodemap);
+int libxl_domain_get_nodeaffinity(libxl_ctx *ctx, uint32_t domid,
+                                  libxl_bitmap *nodemap);
  int libxl_set_vcpuonline(libxl_ctx *ctx, uint32_t domid, libxl_bitmap 
*cpumap);
libxl_scheduler libxl_get_scheduler(libxl_ctx *ctx);
diff --git a/tools/libxl/libxl_create.c b/tools/libxl/libxl_create.c
--- a/tools/libxl/libxl_create.c
+++ b/tools/libxl/libxl_create.c
@@ -184,6 +184,12 @@ int libxl__domain_build_info_setdefault(
libxl_defbool_setdefault(&b_info->numa_placement, true); + if (!b_info->nodemap.size) {
+        if (libxl_node_bitmap_alloc(CTX, &b_info->nodemap, 0))
+            return ERROR_FAIL;
+        libxl_bitmap_set_any(&b_info->nodemap);
+    }
+
      if (b_info->max_memkb == LIBXL_MEMKB_DEFAULT)
          b_info->max_memkb = 32 * 1024;
      if (b_info->target_memkb == LIBXL_MEMKB_DEFAULT)
diff --git a/tools/libxl/libxl_dom.c b/tools/libxl/libxl_dom.c
--- a/tools/libxl/libxl_dom.c
+++ b/tools/libxl/libxl_dom.c
@@ -230,6 +230,7 @@ int libxl__build_pre(libxl__gc *gc, uint
          if (rc)
              return rc;
      }
+    libxl_domain_set_nodeaffinity(ctx, domid, &info->nodemap);
      libxl_set_vcpuaffinity_all(ctx, domid, info->max_vcpus, &info->cpumap);
xc_domain_setmaxmem(ctx->xch, domid, info->target_memkb + LIBXL_MAXMEM_CONSTANT);
diff --git a/tools/libxl/libxl_types.idl b/tools/libxl/libxl_types.idl
--- a/tools/libxl/libxl_types.idl
+++ b/tools/libxl/libxl_types.idl
@@ -261,6 +261,7 @@ libxl_domain_build_info = Struct("domain
      ("max_vcpus",       integer),
      ("avail_vcpus",     libxl_bitmap),
      ("cpumap",          libxl_bitmap),
+    ("nodemap",         libxl_bitmap),
      ("numa_placement",  libxl_defbool),
      ("tsc_mode",        libxl_tsc_mode),
      ("max_memkb",       MemKB),


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.