[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2] xl: fix vcpus to vnode assignement in config file



In fact, right now, if the "vcpus=" list (where the
user specifies what vcpus should be part of a vnode)
has multiple elements, things don't work.
E.g., the following examples all result in failure
to create the guest:

 [ "pnode=0","size=512","vcpus=0,2","vdistances=10,20"  ]
 [ "pnode=0","size=512","vcpus=0-1,4","vdistances=10,20"  ]

Reason is we need either a multidimentional array,
or a bitmap, to temporary store the vcpus of a
vnode, while parsing the vnuma config entry.

Let's use the latter, which happens to also make it
easier to copy the outcome of the parsing to its
final destination in b_info, if everything goes ok.

Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx>
Acked-by: Wei Liu <wei.liu2@xxxxxxxxxx>
---
Changes from v1:
 * fix coding style
---
Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
Cc: Ian Campbell <ian.campbell@xxxxxxxxxx>
---
 tools/libxl/xl_cmdimpl.c |   34 ++++++++++++++++++----------------
 1 file changed, 18 insertions(+), 16 deletions(-)

diff --git a/tools/libxl/xl_cmdimpl.c b/tools/libxl/xl_cmdimpl.c
index 8cbf30e..b41874a 100644
--- a/tools/libxl/xl_cmdimpl.c
+++ b/tools/libxl/xl_cmdimpl.c
@@ -1076,9 +1076,7 @@ static void parse_vnuma_config(const XLU_Config *config,
     /* Temporary storage for parsed vcpus information to avoid
      * parsing config twice. This array has num_vnuma elements.
      */
-    struct vcpu_range_parsed {
-        unsigned long start, end;
-    } *vcpu_range_parsed;
+    libxl_bitmap *vcpu_parsed;
 
     libxl_physinfo_init(&physinfo);
     if (libxl_get_physinfo(ctx, &physinfo) != 0) {
@@ -1095,7 +1093,14 @@ static void parse_vnuma_config(const XLU_Config *config,
 
     b_info->num_vnuma_nodes = num_vnuma;
     b_info->vnuma_nodes = xcalloc(num_vnuma, sizeof(libxl_vnode_info));
-    vcpu_range_parsed = xcalloc(num_vnuma, sizeof(*vcpu_range_parsed));
+    vcpu_parsed = xcalloc(num_vnuma, sizeof(libxl_bitmap));
+    for (i = 0; i < num_vnuma; i++) {
+        libxl_bitmap_init(&vcpu_parsed[i]);
+        if (libxl_cpu_bitmap_alloc(ctx, &vcpu_parsed[i], b_info->max_vcpus)) {
+            fprintf(stderr, "libxl_node_bitmap_alloc failed.\n");
+            exit(1);
+        }
+    }
 
     for (i = 0; i < b_info->num_vnuma_nodes; i++) {
         libxl_vnode_info *p = &b_info->vnuma_nodes[i];
@@ -1165,12 +1170,14 @@ static void parse_vnuma_config(const XLU_Config *config,
                     split_string_into_string_list(value, ",", &cpu_spec_list);
                     len = libxl_string_list_length(&cpu_spec_list);
 
-                    for (j = 0; j < len; j++)
+                    for (j = 0; j < len; j++) {
                         parse_range(cpu_spec_list[j], &s, &e);
+                        for (; s <= e; s++) {
+                            libxl_bitmap_set(&vcpu_parsed[i], s);
+                            max_vcpus++;
+                        }
+                    }
 
-                    vcpu_range_parsed[i].start = s;
-                    vcpu_range_parsed[i].end   = e;
-                    max_vcpus += (e - s + 1);
                     libxl_string_list_dispose(&cpu_spec_list);
                 } else if (!strcmp("vdistances", option)) {
                     libxl_string_list vdist;
@@ -1209,17 +1216,12 @@ static void parse_vnuma_config(const XLU_Config *config,
 
     for (i = 0; i < b_info->num_vnuma_nodes; i++) {
         libxl_vnode_info *p = &b_info->vnuma_nodes[i];
-        int cpu;
 
-        libxl_cpu_bitmap_alloc(ctx, &p->vcpus, b_info->max_vcpus);
-        libxl_bitmap_set_none(&p->vcpus);
-        for (cpu = vcpu_range_parsed[i].start;
-             cpu <= vcpu_range_parsed[i].end;
-             cpu++)
-            libxl_bitmap_set(&p->vcpus, cpu);
+        libxl_bitmap_copy_alloc(ctx, &p->vcpus, &vcpu_parsed[i]);
+        libxl_bitmap_dispose(&vcpu_parsed[i]);
     }
 
-    free(vcpu_range_parsed);
+    free(vcpu_parsed);
 }
 
 static void parse_config_data(const char *config_source,


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.