|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 03 of 11] libxc, libxl: introduce xc_nodemap_t and libxl_nodemap
As NUMA node-related counterparts of xc_cpumap_t and libxl_cpumap.
This is in preparation of making it possible to manipulate
NUMA nodes from the toolstack(s).
Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx>
diff --git a/tools/libxc/xc_misc.c b/tools/libxc/xc_misc.c
--- a/tools/libxc/xc_misc.c
+++ b/tools/libxc/xc_misc.c
@@ -35,11 +35,30 @@ int xc_get_max_cpus(xc_interface *xch)
return max_cpus;
}
+int xc_get_max_nodes(xc_interface *xch)
+{
+ static int max_nodes = 0;
+ xc_physinfo_t physinfo;
+
+ if ( max_nodes )
+ return max_nodes;
+
+ if ( !xc_physinfo(xch, &physinfo) )
+ max_nodes = physinfo.max_node_id + 1;
+
+ return max_nodes;
+}
+
int xc_get_cpumap_size(xc_interface *xch)
{
return (xc_get_max_cpus(xch) + 7) / 8;
}
+int xc_get_nodemap_size(xc_interface *xch)
+{
+ return (xc_get_max_nodes(xch) + 7) / 8;
+}
+
xc_cpumap_t xc_cpumap_alloc(xc_interface *xch)
{
int sz;
@@ -50,6 +69,16 @@ xc_cpumap_t xc_cpumap_alloc(xc_interface
return calloc(1, sz);
}
+xc_nodemap_t xc_nodemap_alloc(xc_interface *xch)
+{
+ int sz;
+
+ sz = xc_get_nodemap_size(xch);
+ if (sz == 0)
+ return NULL;
+ return calloc(1, sz);
+}
+
int xc_readconsolering(xc_interface *xch,
char *buffer,
unsigned int *pnr_chars,
diff --git a/tools/libxc/xenctrl.h b/tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h
+++ b/tools/libxc/xenctrl.h
@@ -330,6 +330,20 @@ int xc_get_cpumap_size(xc_interface *xch
xc_cpumap_t xc_cpumap_alloc(xc_interface *xch);
/*
+ * NODEMAP handling
+ */
+typedef uint8_t *xc_nodemap_t;
+
+/* return maximum number of NUMA nodes the hypervisor supports */
+int xc_get_max_nodes(xc_interface *xch);
+
+/* return array size for nodemap */
+int xc_get_nodemap_size(xc_interface *xch);
+
+/* allocate a nodemap */
+xc_nodemap_t xc_nodemap_alloc(xc_interface *xch);
+
+/*
* DOMAIN DEBUGGING FUNCTIONS
*/
diff --git a/tools/libxl/gentest.py b/tools/libxl/gentest.py
--- a/tools/libxl/gentest.py
+++ b/tools/libxl/gentest.py
@@ -20,7 +20,7 @@ def randomize_case(s):
def randomize_enum(e):
return random.choice([v.name for v in e.values])
-handcoded = ["libxl_cpumap", "libxl_key_value_list",
+handcoded = ["libxl_cpumap", "libxl_nodemap", "libxl_key_value_list",
"libxl_cpuid_policy_list", "libxl_file_reference",
"libxl_string_list"]
diff --git a/tools/libxl/libxl.h b/tools/libxl/libxl.h
--- a/tools/libxl/libxl.h
+++ b/tools/libxl/libxl.h
@@ -294,6 +294,12 @@ static inline void libxl_cpumap_dispose(
return libxl_map_dispose(cpumap);
}
+typedef struct libxl_map libxl_nodemap;
+static inline void libxl_nodemap_dispose(libxl_nodemap *nodemap)
+{
+ return libxl_map_dispose(nodemap);
+}
+
typedef struct {
/*
* Path is always set if the file reference is valid. However if
@@ -549,6 +555,9 @@ int libxl_domain_preserve(libxl_ctx *ctx
/* get max. number of cpus supported by hypervisor */
int libxl_get_max_cpus(libxl_ctx *ctx);
+/* get max. number of NUMA nodes supported by hypervisor */
+int libxl_get_max_nodes(libxl_ctx *ctx);
+
/*
* Run the configured bootloader for a PV domain and update
* info->kernel, info->u.pv.ramdisk and info->u.pv.cmdline as
diff --git a/tools/libxl/libxl_types.idl b/tools/libxl/libxl_types.idl
--- a/tools/libxl/libxl_types.idl
+++ b/tools/libxl/libxl_types.idl
@@ -11,6 +11,7 @@ libxl_domid = Builtin("domid", json_fn =
libxl_uuid = Builtin("uuid", passby=PASS_BY_REFERENCE)
libxl_mac = Builtin("mac", passby=PASS_BY_REFERENCE)
libxl_cpumap = Builtin("cpumap", dispose_fn="libxl_cpumap_dispose",
passby=PASS_BY_REFERENCE)
+libxl_nodemap = Builtin("nodemap", dispose_fn="libxl_nodemap_dispose",
passby=PASS_BY_REFERENCE)
libxl_cpuid_policy_list = Builtin("cpuid_policy_list",
dispose_fn="libxl_cpuid_dispose", passby=PASS_BY_REFERENCE)
libxl_string_list = Builtin("string_list",
dispose_fn="libxl_string_list_dispose", passby=PASS_BY_REFERENCE)
diff --git a/tools/libxl/libxl_utils.c b/tools/libxl/libxl_utils.c
--- a/tools/libxl/libxl_utils.c
+++ b/tools/libxl/libxl_utils.c
@@ -537,11 +537,27 @@ int libxl_cpumap_alloc(libxl_ctx *ctx, l
return libxl_map_alloc(ctx, cpumap, max_cpus);
}
+int libxl_nodemap_alloc(libxl_ctx *ctx, libxl_nodemap *nodemap)
+{
+ int max_nodes;
+
+ max_nodes = libxl_get_max_nodes(ctx);
+ if (max_nodes == 0)
+ return ERROR_FAIL;
+
+ return libxl_map_alloc(ctx, nodemap, max_nodes);
+}
+
int libxl_get_max_cpus(libxl_ctx *ctx)
{
return xc_get_max_cpus(ctx->xch);
}
+int libxl_get_max_nodes(libxl_ctx *ctx)
+{
+ return xc_get_max_nodes(ctx->xch);
+}
+
int libxl__enum_from_string(const libxl_enum_string_table *t,
const char *s, int *e)
{
diff --git a/tools/libxl/libxl_utils.h b/tools/libxl/libxl_utils.h
--- a/tools/libxl/libxl_utils.h
+++ b/tools/libxl/libxl_utils.h
@@ -108,6 +108,35 @@ static inline int libxl_cpumap_cpu_valid
#define libxl_for_each_set_cpu(v, m) for (v = 0; v < (m).size * 8; v++) \
if (libxl_cpumap_test(&(m), v))
+int libxl_nodemap_alloc(libxl_ctx *ctx, libxl_nodemap *nodemap);
+static inline int libxl_nodemap_test(libxl_nodemap *nodemap, int node)
+{
+ return libxl_map_test(nodemap, node);
+}
+static inline void libxl_nodemap_set(libxl_nodemap *nodemap, int node)
+{
+ libxl_map_set(nodemap, node);
+}
+static inline void libxl_nodemap_reset(libxl_nodemap *nodemap, int node)
+{
+ libxl_map_reset(nodemap, node);
+}
+static inline void libxl_nodemap_set_any(libxl_nodemap *nodemap)
+{
+ libxl_map_set_any(nodemap);
+}
+static inline void libxl_nodemap_set_none(libxl_nodemap *nodemap)
+{
+ libxl_map_set_none(nodemap);
+}
+static inline int libxl_nodemap_node_valid(libxl_nodemap *nodemap, int node)
+{
+ return libxl_map_elem_valid(nodemap, node);
+}
+#define libxl_for_each_node(var, map) for (var = 0; var < (map).size * 8;
var++)
+#define libxl_for_each_set_node(v, m) for (v = 0; v < (m).size * 8; v++) \
+ if (libxl_nodemap_test(&(m), v))
+
static inline uint32_t libxl__sizekb_to_mb(uint32_t s) {
return (s + 1023) / 1024;
}
diff --git a/xen/common/domctl.c b/xen/common/domctl.c
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -116,6 +116,21 @@ int xenctl_map_to_cpumask(cpumask_var_t
return err;
}
+int nodemask_to_xenctl_map(struct xenctl_map *xenctl_nodemap,
+ const nodemask_t *nodemask)
+{
+ return bitmap_to_xenctl_map(xenctl_nodemap, cpumask_bits(nodemask),
+ MAX_NUMNODES);
+}
+
+int xenctl_map_to_nodemask(nodemask_t *nodemask,
+ const struct xenctl_map *xenctl_nodemap)
+{
+ return xenctl_map_to_bitmap(nodes_addr(*nodemask),
+ xenctl_nodemap,
+ MAX_NUMNODES);
+}
+
static inline int is_free_domid(domid_t dom)
{
struct domain *d;
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |