[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v9 6/9] libxl/xl: deprecate the build_info->cpumap field



as, thanks to previous change ("libxl/xl: push VCPU affinity
pinning down to libxl"), we now have an array of libxl_bitmap-s
that can be used to transfer to libxl the vcpu (hard) affinity
of each vcpu of the domain. Therefore, the cpumap field is no
longer necessary: if we want all the vcpus to have the same
affinity, we just put it in all the elements of the array.

This makes the libxl code simpler and easier to understand
and maintain (only one place where to read the affinity), and
does not complicate things much on the xl side, that is why
we go for it.

Another benefit is that, by unifying the parsing (at the xl
level) and the place where the information is consumed and the
affinity are actually set (at the libxl level), it becomes
possible to do things like:

  cpus = ["3-4", "2-6"]

meaning we want vcpu 0 to be pinned to pcpu 3,4 and vcpu 1 to
be pinned to pcpu 2,3,4,5,6. Before this change, in fact, the
list variant (["xx", "yy"]) supported only single values.
(Of course, the old [2, 3] syntax, so no '"' continues to work,
although it's not possible to specify ranges with it.)

IN SUMMARY, although it is still there, and it is still honoured,
for backward compatibility reasons, the cpumap field in build_info
should not be used any longer. The vcpu_hard_affinity array is
what should be used instead.

Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx>
---
Changes from v8:
 * don't get rid of b_info->cpumap handling, so old apps
   continue to work, as requested during review;
 * changelog and code comments updated accordingly.
---
 docs/man/xl.cfg.pod.5       |    8 +++---
 tools/libxl/libxl_dom.c     |   10 ++++++-
 tools/libxl/libxl_types.idl |    7 ++++-
 tools/libxl/xl_cmdimpl.c    |   61 +++++++++++++++++--------------------------
 4 files changed, 43 insertions(+), 43 deletions(-)

diff --git a/docs/man/xl.cfg.pod.5 b/docs/man/xl.cfg.pod.5
index c087cbc..af48622 100644
--- a/docs/man/xl.cfg.pod.5
+++ b/docs/man/xl.cfg.pod.5
@@ -143,11 +143,11 @@ Combining this with "all" is also possible, meaning 
"all,^nodes:1"
 results in all the vcpus of the guest running on all the cpus on the
 host, except for the cpus belonging to the host NUMA node 1.
 
-=item ["2", "3"] (or [2, 3])
+=item ["2", "3-8,^5"]
 
-To ask for specific vcpu mapping. That means (in this example), vcpu #0
-of the guest will run on cpu #2 of the host and vcpu #1 of the guest will
-run on cpu #3 of the host.
+To ask for specific vcpu mapping. That means (in this example), vcpu 0
+of the guest will run on cpu 2 of the host and vcpu 1 of the guest will
+run on cpus 3,4,6,7,8 of the host.
 
 =back
 
diff --git a/tools/libxl/libxl_dom.c b/tools/libxl/libxl_dom.c
index 484ad84..b22b41e 100644
--- a/tools/libxl/libxl_dom.c
+++ b/tools/libxl/libxl_dom.c
@@ -250,7 +250,8 @@ int libxl__build_pre(libxl__gc *gc, uint32_t domid,
      * whatever that turns out to be.
      */
     if (libxl_defbool_val(info->numa_placement)) {
-        if (!libxl_bitmap_is_full(&info->cpumap)) {
+        if (!libxl_bitmap_is_full(&info->cpumap) ||
+            info->num_vcpu_hard_affinity) {
             LOG(ERROR, "Can run NUMA placement only if no vcpu "
                        "affinity is specified");
             return ERROR_INVAL;
@@ -261,6 +262,13 @@ int libxl__build_pre(libxl__gc *gc, uint32_t domid,
             return rc;
     }
     libxl_domain_set_nodeaffinity(ctx, domid, &info->nodemap);
+    /*
+     * info->cpumap is DEPRECATED, but we still want old applications
+     * that may be using it to continue working.
+     */
+    if (!libxl_bitmap_is_full(&info->cpumap))
+        LOG(WARN, "cpumap field of libxl_domain_build_info is DEPRECATED. "
+                  "Please, use the vcpu_hard_affinity array instead");
     libxl_set_vcpuaffinity_all(ctx, domid, info->max_vcpus,
                                &info->cpumap, NULL);
 
diff --git a/tools/libxl/libxl_types.idl b/tools/libxl/libxl_types.idl
index 05978d7..0b3e4e9 100644
--- a/tools/libxl/libxl_types.idl
+++ b/tools/libxl/libxl_types.idl
@@ -297,7 +297,12 @@ libxl_domain_sched_params = Struct("domain_sched_params",[
 libxl_domain_build_info = Struct("domain_build_info",[
     ("max_vcpus",       integer),
     ("avail_vcpus",     libxl_bitmap),
-    ("cpumap",          libxl_bitmap),
+    ("cpumap",          libxl_bitmap), # DEPRECATED!
+    # The cpumap field above has been deprecated by the introduction of the
+    # vcpu_hard_affinity array. It is not removed and it is still honoured, for
+    # API stability and backward compatibility reasons, but should not be used
+    # any longer. The vcpu_hard_affinity array is what should be used instead,
+    # to set the hard affinity of the various vCPUs.
     ("nodemap",         libxl_bitmap),
     ("vcpu_hard_affinity", Array(libxl_bitmap, "num_vcpu_hard_affinity")),
     ("numa_placement",  libxl_defbool),
diff --git a/tools/libxl/xl_cmdimpl.c b/tools/libxl/xl_cmdimpl.c
index ac603c8..d9e235e 100644
--- a/tools/libxl/xl_cmdimpl.c
+++ b/tools/libxl/xl_cmdimpl.c
@@ -656,14 +656,16 @@ static int update_cpumap_range(const char *str, 
libxl_bitmap *cpumap)
 static int vcpupin_parse(const char *cpu, libxl_bitmap *cpumap)
 {
     char *ptr, *saveptr = NULL;
+    char *buf = strdup(cpu);
     int rc = 0;
 
-    for (ptr = strtok_r(cpu, ",", &saveptr); ptr;
+    for (ptr = strtok_r(buf, ",", &saveptr); ptr;
          ptr = strtok_r(NULL, ",", &saveptr)) {
         rc = update_cpumap_range(ptr, cpumap);
         if (rc)
             break;
     }
+    free(buf);
 
     return rc;
 }
@@ -821,14 +823,11 @@ static void parse_config_data(const char *config_source,
     if (!xlu_cfg_get_long (config, "maxvcpus", &l, 0))
         b_info->max_vcpus = l;
 
-    if (!xlu_cfg_get_list (config, "cpus", &cpus, 0, 1)) {
+    buf = NULL;
+    if (!xlu_cfg_get_list (config, "cpus", &cpus, 0, 1) ||
+        !xlu_cfg_get_string (config, "cpus", &buf, 0)) {
         b_info->num_vcpu_hard_affinity = b_info->max_vcpus;
-        int n_cpus = 0;
-
-        if (libxl_cpu_bitmap_alloc(ctx, &b_info->cpumap, 0)) {
-            fprintf(stderr, "Unable to allocate cpumap\n");
-            exit(1);
-        }
+        const char *buf2;
 
         b_info->vcpu_hard_affinity =
             xmalloc(b_info->num_vcpu_hard_affinity * sizeof(libxl_bitmap));
@@ -840,42 +839,30 @@ static void parse_config_data(const char *config_source,
                 fprintf(stderr, "Unable to allocate cpumap for vcpu %d\n", i);
                 exit(1);
             }
-            libxl_bitmap_set_any(&b_info->vcpu_hard_affinity[i]);
+            libxl_bitmap_set_none(&b_info->vcpu_hard_affinity[i]);
         }
 
-        libxl_bitmap_set_none(&b_info->cpumap);
-        while ((buf = xlu_cfg_get_listitem(cpus, n_cpus)) != NULL) {
-            i = atoi(buf);
-            if (!libxl_bitmap_cpu_valid(&b_info->cpumap, i)) {
-                fprintf(stderr, "cpu %d illegal\n", i);
-                exit(1);
-            }
-            libxl_bitmap_set(&b_info->cpumap, i);
-            if (n_cpus < b_info->max_vcpus) {
-                libxl_bitmap_set_none(&b_info->vcpu_hard_affinity[n_cpus]);
-                libxl_bitmap_set(&b_info->vcpu_hard_affinity[n_cpus], i);
-            }
-            n_cpus++;
+        /*
+         * When buf is !NULL, we've been passed a string, and what we do
+         * is parse it and put the result in all the entries of the vcpu
+         * affinity array. If it's NULL, what we have is a list, and what
+         * we put in each entry of the vcpu affinity array is the result of
+         * the parsing of each element of the list (if there are more
+         * vcpus than elements, the missing ones have their affinity masks
+         * completely full).
+         */
+        for (i = 0; i < b_info->num_vcpu_hard_affinity; i++) {
+            if (buf || ((buf2 = xlu_cfg_get_listitem(cpus, i)) != NULL)) {
+                if (vcpupin_parse(buf ? buf : buf2,
+                                  &b_info->vcpu_hard_affinity[i]))
+                    exit(1);
+            } else
+                libxl_bitmap_set_any(&b_info->vcpu_hard_affinity[i]);
         }
 
         /* We have a list of cpumaps, disable automatic placement */
         libxl_defbool_set(&b_info->numa_placement, false);
     }
-    else if (!xlu_cfg_get_string (config, "cpus", &buf, 0)) {
-        char *buf2 = strdup(buf);
-
-        if (libxl_cpu_bitmap_alloc(ctx, &b_info->cpumap, 0)) {
-            fprintf(stderr, "Unable to allocate cpumap\n");
-            exit(1);
-        }
-
-        libxl_bitmap_set_none(&b_info->cpumap);
-        if (vcpupin_parse(buf2, &b_info->cpumap))
-            exit(1);
-        free(buf2);
-
-        libxl_defbool_set(&b_info->numa_placement, false);
-    }
 
     if (!xlu_cfg_get_long (config, "memory", &l, 0)) {
         b_info->max_memkb = l * 1024;


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.