[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 3/4] libxl: report how much memory a domain has on each NUMA node



by calling xc_domain_numainfo(). A new data type, libxl_domain_numainfo
is being introduced. For now it only holds how much memory a domain has
allocated on each NUMA node, but it can be useful for, in future,
reporting more per-domain NUMA related information.

Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx>
---
 tools/libxl/libxl.c         |   30 ++++++++++++++++++++++++++++++
 tools/libxl/libxl.h         |    2 ++
 tools/libxl/libxl_types.idl |    4 ++++
 3 files changed, 36 insertions(+)

diff --git a/tools/libxl/libxl.c b/tools/libxl/libxl.c
index 730f6e1..e5a1cb0 100644
--- a/tools/libxl/libxl.c
+++ b/tools/libxl/libxl.c
@@ -4631,6 +4631,36 @@ int libxl_domain_get_nodeaffinity(libxl_ctx *ctx, 
uint32_t domid,
     return 0;
 }
 
+libxl_domain_numainfo *libxl_domain_get_numainfo(libxl_ctx *ctx,
+                                                 uint32_t domid)
+{
+    GC_INIT(ctx);
+    int max_nodes;
+    libxl_domain_numainfo *ret = NULL;
+
+    max_nodes = libxl_get_max_nodes(ctx);
+    if (max_nodes < 0)
+    {
+        LOG(ERROR, "Unable to determine the number of NUMA nodes");
+        goto out;
+    }
+
+    ret = libxl__zalloc(NOGC, sizeof(libxl_domain_numainfo));
+    ret->memkbs = libxl__calloc(NOGC, max_nodes, sizeof(uint64_t));
+
+    if (xc_domain_numainfo(ctx->xch, domid, &max_nodes, ret->memkbs)) {
+        LOGE(ERROR, "getting domain NUMA info");
+        libxl_domain_numainfo_dispose(ret);
+        ret = NULL;
+        goto out;
+    }
+    ret->num_memkbs = max_nodes;
+
+ out:
+    GC_FREE;
+    return ret;
+}
+
 static int libxl__set_vcpuonline_xenstore(libxl__gc *gc, uint32_t domid,
                                          libxl_bitmap *cpumap)
 {
diff --git a/tools/libxl/libxl.h b/tools/libxl/libxl.h
index 06bbca6..f452752 100644
--- a/tools/libxl/libxl.h
+++ b/tools/libxl/libxl.h
@@ -1067,6 +1067,8 @@ int libxl_domain_set_nodeaffinity(libxl_ctx *ctx, 
uint32_t domid,
                                   libxl_bitmap *nodemap);
 int libxl_domain_get_nodeaffinity(libxl_ctx *ctx, uint32_t domid,
                                   libxl_bitmap *nodemap);
+libxl_domain_numainfo *libxl_domain_get_numainfo(libxl_ctx *ctx,
+                                                 uint32_t domid);
 int libxl_set_vcpuonline(libxl_ctx *ctx, uint32_t domid, libxl_bitmap *cpumap);
 
 libxl_scheduler libxl_get_scheduler(libxl_ctx *ctx);
diff --git a/tools/libxl/libxl_types.idl b/tools/libxl/libxl_types.idl
index 649ce50..76158d7 100644
--- a/tools/libxl/libxl_types.idl
+++ b/tools/libxl/libxl_types.idl
@@ -515,6 +515,10 @@ libxl_vcpuinfo = Struct("vcpuinfo", [
     ("cpumap", libxl_bitmap), # current cpu's affinities
     ], dir=DIR_OUT)
 
+libxl_domain_numainfo = Struct("domain_numainfo", [
+    ("memkbs", Array(uint64, "num_memkbs")),
+    ], dir=DIR_OUT)
+
 libxl_physinfo = Struct("physinfo", [
     ("threads_per_core", uint32),
     ("cores_per_socket", uint32),


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.