[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v5 06/17] xl: allow for node-wise specification of vcpu pinning



Making it possible to use something like the following:
 * "nodes:0-3": all pCPUs of nodes 0,1,2,3;
 * "nodes:0-3,^node:2": all pCPUS of nodes 0,1,3;
 * "1,nodes:1-2,^6": pCPU 1 plus all pCPUs of nodes 1,2
   but not pCPU 6;
 * ...

In both domain config file and `xl vcpu-pin'.

Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx>
Reviewed-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
Acked-by: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
---
Changes from v1 (of this very series):
 * actually checking for both "nodes:" and "node:" as per
   the doc says;
 * using strcmp() (rather than strncmp()) when matching
   "all", to avoid returning success on any longer string
   that just begins with "all";
 * fixing the handling (well, the rejection, actually) of
   "^all" and "^nodes:all";
 * make some string pointers const.

Changes from v2 (of original series):
 * turned a 'return' into 'goto out', consistently with the
   most of exit patterns;
 * harmonized error handling: now parse_range() return a
   libxl error code, as requested during review;
 * dealing with "all" moved inside update_cpumap_range().
   It's tricky to move it in parse_range() (as requested
   during review), since we need the cpumap being modified
   handy when dealing with it. However, having it in
   update_cpumap_range() simplifies the code just as much
   as that;
 * explicitly checking for junk after a valid value or range
   in parse_range(), as requested during review;
 * xl exits on parsing failing, so no need to reset the
   cpumap to something sensible in vcpupin_parse(), as
   suggested during review;

Changes from v1 (of original series):
 * code rearranged in order to look more simple to follow
   and understand, as requested during review;
 * improved docs in xl.cfg.pod.5, as requested during
   review;
 * strtoul() now returns into unsigned long, and the
   case where it returns ULONG_MAX is now taken into
   account, as requested during review;
 * stuff like "all,^7" now works, as requested during
   review. Specifying just "^7" does not work either
   before or after this change
 * killed some magic (i.e., `ptr += 5 + (ptr[4] == 's'`)
   by introducing STR_SKIP_PREFIX() macro, as requested
   during review.
---
 docs/man/xl.cfg.pod.5    |   20 ++++++
 tools/libxl/xl_cmdimpl.c |  153 +++++++++++++++++++++++++++++++++-------------
 2 files changed, 128 insertions(+), 45 deletions(-)

diff --git a/docs/man/xl.cfg.pod.5 b/docs/man/xl.cfg.pod.5
index 7f107cb..70d6d9f 100644
--- a/docs/man/xl.cfg.pod.5
+++ b/docs/man/xl.cfg.pod.5
@@ -115,7 +115,25 @@ To allow all the vcpus of the guest to run on all the cpus 
on the host.
 
 =item "0-3,5,^1"
 
-To allow all the vcpus of the guest to run on cpus 0,2,3,5.
+To allow all the vcpus of the guest to run on cpus 0,2,3,5. Combining
+this with "all" is possible, meaning "all,^7" results in all the vcpus
+of the guest running on all the cpus on the host except cpu 7.
+
+=item "nodes:0-3,node:^2"
+
+To allow all the vcpus of the guest to run on the cpus from NUMA nodes
+0,1,3 of the host. So, if cpus 0-3 belongs to node 0, cpus 4-7 belongs
+to node 1 and cpus 8-11 to node 3, the above would mean all the vcpus
+of the guest will run on cpus 0-3,8-11.
+
+Combining this notation with the one above is possible. For instance,
+"1,node:2,^6", means all the vcpus of the guest will run on cpu 1 and
+on all the cpus of NUMA node 2, but not on cpu 6. Following the same
+example as above, that would be cpus 1,4,5,7.
+
+Combining this with "all" is also possible, meaning "all,^nodes:1"
+results in all the vcpus of the guest running on all the cpus on the
+host, except for the cpus belonging to the host NUMA node 1.
 
 =item ["2", "3"] (or [2, 3])
 
diff --git a/tools/libxl/xl_cmdimpl.c b/tools/libxl/xl_cmdimpl.c
index 7fc049f..10ced5a 100644
--- a/tools/libxl/xl_cmdimpl.c
+++ b/tools/libxl/xl_cmdimpl.c
@@ -59,6 +59,11 @@
         }                                                               \
     })
 
+#define STR_HAS_PREFIX( a, b )  \
+    ( strncmp(a, b, strlen(b)) == 0 )
+#define STR_SKIP_PREFIX( a, b ) \
+    ( STR_HAS_PREFIX(a, b) ? ((a) += strlen(b), 1) : 0 )
+
 
 int logfile = 2;
 
@@ -562,61 +567,121 @@ static void split_string_into_string_list(const char 
*str,
     free(s);
 }
 
-static int vcpupin_parse(char *cpu, libxl_bitmap *cpumap)
+static int parse_range(const char *str, unsigned long *a, unsigned long *b)
 {
-    libxl_bitmap exclude_cpumap;
-    uint32_t cpuida, cpuidb;
-    char *endptr, *toka, *tokb, *saveptr = NULL;
-    int i, rc = 0, rmcpu;
+    const char *nstr;
+    char *endptr;
 
-    if (!strcmp(cpu, "all")) {
-        libxl_bitmap_set_any(cpumap);
-        return 0;
+    *a = *b = strtoul(str, &endptr, 10);
+    if (endptr == str || *a == ULONG_MAX)
+        return ERROR_INVAL;
+
+    if (*endptr == '-') {
+        nstr = endptr + 1;
+
+        *b = strtoul(nstr, &endptr, 10);
+        if (endptr == nstr || *b == ULONG_MAX || *b < *a)
+            return ERROR_INVAL;
+    }
+
+    /* Valid value or range so far, but we also don't want junk after that */
+    if (*endptr != '\0')
+        return ERROR_INVAL;
+
+    return 0;
+}
+
+/*
+ * Add or removes a specific set of cpus (specified in str, either as
+ * single cpus or as entire NUMA nodes) to/from cpumap.
+ */
+static int update_cpumap_range(const char *str, libxl_bitmap *cpumap)
+{
+    unsigned long ida, idb;
+    libxl_bitmap node_cpumap;
+    bool is_not = false, is_nodes = false;
+    int rc = 0;
+
+    libxl_bitmap_init(&node_cpumap);
+
+    rc = libxl_node_bitmap_alloc(ctx, &node_cpumap, 0);
+    if (rc) {
+        fprintf(stderr, "libxl_node_bitmap_alloc failed.\n");
+        goto out;
     }
 
-    if (libxl_cpu_bitmap_alloc(ctx, &exclude_cpumap, 0)) {
-        fprintf(stderr, "Error: Failed to allocate cpumap.\n");
-        return ENOMEM;
+    /* Are we adding or removing cpus/nodes? */
+    if (STR_SKIP_PREFIX(str, "^")) {
+        is_not = true;
     }
 
-    for (toka = strtok_r(cpu, ",", &saveptr); toka;
-         toka = strtok_r(NULL, ",", &saveptr)) {
-        rmcpu = 0;
-        if (*toka == '^') {
-            /* This (These) Cpu(s) will be removed from the map */
-            toka++;
-            rmcpu = 1;
-        }
-        /* Extract a valid (range of) cpu(s) */
-        cpuida = cpuidb = strtoul(toka, &endptr, 10);
-        if (endptr == toka) {
-            fprintf(stderr, "Error: Invalid argument.\n");
-            rc = EINVAL;
-            goto vcpp_out;
-        }
-        if (*endptr == '-') {
-            tokb = endptr + 1;
-            cpuidb = strtoul(tokb, &endptr, 10);
-            if (endptr == tokb || cpuida > cpuidb) {
-                fprintf(stderr, "Error: Invalid argument.\n");
-                rc = EINVAL;
-                goto vcpp_out;
+    /* Are we dealing with cpus or full nodes? */
+    if (STR_SKIP_PREFIX(str, "node:") || STR_SKIP_PREFIX(str, "nodes:")) {
+        is_nodes = true;
+    }
+
+    if (strcmp(str, "all") == 0) {
+        /* We do not accept "^all" or "^nodes:all" */
+        if (is_not) {
+            fprintf(stderr, "Can't combine \"^\" and \"all\".\n");
+            rc = ERROR_INVAL;
+        } else
+            libxl_bitmap_set_any(cpumap);
+        goto out;
+    }
+
+    rc = parse_range(str, &ida, &idb);
+    if (rc) {
+        fprintf(stderr, "Invalid pcpu range: %s.\n", str);
+        goto out;
+    }
+
+    /* Add or remove the specified cpus in the range */
+    while (ida <= idb) {
+        if (is_nodes) {
+            /* Add/Remove all the cpus of a NUMA node */
+            int i;
+
+            rc = libxl_node_to_cpumap(ctx, ida, &node_cpumap);
+            if (rc) {
+                fprintf(stderr, "libxl_node_to_cpumap failed.\n");
+                goto out;
             }
+
+            /* Add/Remove all the cpus in the node cpumap */
+            libxl_for_each_set_bit(i, node_cpumap) {
+                is_not ? libxl_bitmap_reset(cpumap, i) :
+                         libxl_bitmap_set(cpumap, i);
+            }
+        } else {
+            /* Add/Remove this cpu */
+            is_not ? libxl_bitmap_reset(cpumap, ida) :
+                     libxl_bitmap_set(cpumap, ida);
         }
-        while (cpuida <= cpuidb) {
-            rmcpu == 0 ? libxl_bitmap_set(cpumap, cpuida) :
-                         libxl_bitmap_set(&exclude_cpumap, cpuida);
-            cpuida++;
-        }
+        ida++;
     }
 
-    /* Clear all the cpus from the removal list */
-    libxl_for_each_set_bit(i, exclude_cpumap) {
-        libxl_bitmap_reset(cpumap, i);
-    }
+ out:
+    libxl_bitmap_dispose(&node_cpumap);
+    return rc;
+}
 
-vcpp_out:
-    libxl_bitmap_dispose(&exclude_cpumap);
+/*
+ * Takes a string representing a set of cpus (specified either as
+ * single cpus or as eintire NUMA nodes) and turns it into the
+ * corresponding libxl_bitmap (in cpumap).
+ */
+static int vcpupin_parse(char *cpu, libxl_bitmap *cpumap)
+{
+    char *ptr, *saveptr = NULL;
+    int rc = 0;
+
+    for (ptr = strtok_r(cpu, ",", &saveptr); ptr;
+         ptr = strtok_r(NULL, ",", &saveptr)) {
+        rc = update_cpumap_range(ptr, cpumap);
+        if (rc)
+            break;
+    }
 
     return rc;
 }


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.