|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 3 of 3 v2] xl: allow for node-wise specification of vcpu pinning
Making it possible to use something like the following:
* "nodes:0-3": all pCPUs of nodes 0,1,2,3;
* "nodes:0-3,^node:2": all pCPUS of nodes 0,1,3;
* "1,nodes:1-2,^6": pCPU 1 plus all pCPUs of nodes 1,2 but not pCPU 6;
* ...
In both domain config file and `xl vcpu-pin'.
Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx>
Acked-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
---
Changes since v1:
* strstr() replaced with strncmp().
diff --git a/docs/man/xl.cfg.pod.5 b/docs/man/xl.cfg.pod.5
--- a/docs/man/xl.cfg.pod.5
+++ b/docs/man/xl.cfg.pod.5
@@ -109,7 +109,7 @@ some cpus on its own (see below). A C<CP
=over 4
-=item "all"
+=item "all" (or "nodes:all")
To allow all the vcpus of the guest to run on all the cpus on the host.
@@ -117,6 +117,14 @@ To allow all the vcpus of the guest to r
To allow all the vcpus of the guest to run on cpus 0,2,3,5.
+=item "nodes:0-3,^node:2"
+
+To allow all the vcpus of the guest to run on the cpus belonging to
+the NUMA nodes 0,1,3 of the host. Notice that it is possible to combine
+this syntax with the one above. That means, something like "1,node:2,^6"
+is possible and means all the vcpus of the guest will run on cpus 1 plus
+on all the cpus of node 2, but never on cpu 6.
+
=item ["2", "3"] (or [2, 3])
To ask for specific vcpu mapping. That means (in this example), vcpu #0
diff --git a/tools/libxl/xl_cmdimpl.c b/tools/libxl/xl_cmdimpl.c
--- a/tools/libxl/xl_cmdimpl.c
+++ b/tools/libxl/xl_cmdimpl.c
@@ -506,31 +506,58 @@ static void split_string_into_string_lis
static int vcpupin_parse(char *cpu, libxl_bitmap *cpumap)
{
- libxl_bitmap exclude_cpumap;
- uint32_t cpuida, cpuidb;
+ libxl_bitmap nodemap, cpu_nodemap;
+ libxl_bitmap exclude_cpumap, exclude_nodemap;
+ uint32_t ida, idb;
char *endptr, *toka, *tokb, *saveptr = NULL;
- int i, rc = 0, rmcpu;
-
- if (!strcmp(cpu, "all")) {
+ int i, rc = 0, isnot, isnode;
+
+ if (!strcmp(cpu, "all") || !strcmp(cpu, "nodes:all")) {
libxl_bitmap_set_any(cpumap);
return 0;
}
- if (libxl_cpu_bitmap_alloc(ctx, &exclude_cpumap, 0)) {
+ libxl_bitmap_init(&cpu_nodemap);
+ libxl_bitmap_init(&nodemap);
+ libxl_bitmap_init(&exclude_nodemap);
+ libxl_bitmap_init(&exclude_nodemap);
+
+ rc = libxl_node_bitmap_alloc(ctx, &cpu_nodemap, 0);
+ if (rc) {
+ fprintf(stderr, "Error: Failed to allocate nodemap.\n");
+ goto vcpp_out;
+ }
+ rc = libxl_node_bitmap_alloc(ctx, &nodemap, 0);
+ if (rc) {
+ fprintf(stderr, "Error: Failed to allocate nodemap.\n");
+ goto vcpp_out;
+ }
+ rc = libxl_node_bitmap_alloc(ctx, &exclude_nodemap, 0);
+ if (rc) {
+ fprintf(stderr, "Error: Failed to allocate nodemap.\n");
+ goto vcpp_out;
+ }
+ rc = libxl_cpu_bitmap_alloc(ctx, &exclude_cpumap, 0);
+ if (rc) {
fprintf(stderr, "Error: Failed to allocate cpumap.\n");
- return ENOMEM;
+ goto vcpp_out;
}
for (toka = strtok_r(cpu, ",", &saveptr); toka;
toka = strtok_r(NULL, ",", &saveptr)) {
- rmcpu = 0;
+ isnot = 0; isnode = 0;
if (*toka == '^') {
- /* This (These) Cpu(s) will be removed from the map */
+ /* This (These) Cpu(s)/Node(s) will be removed from the map */
toka++;
- rmcpu = 1;
- }
- /* Extract a valid (range of) cpu(s) */
- cpuida = cpuidb = strtoul(toka, &endptr, 10);
+ isnot = 1;
+ }
+ /* Check if we're dealing with a full node */
+ if (!strncmp(toka, "node:", 5) || !strncmp(toka, "nodes:", 6)) {
+ toka += 5 + (toka[4] == 's');
+ isnode = 1;
+ }
+ /* Extract a valid (range of) cpu(s) or node(s) */
+ ida = idb = strtoul(toka, &endptr, 10);
if (endptr == toka) {
fprintf(stderr, "Error: Invalid argument.\n");
rc = EINVAL;
@@ -538,27 +565,48 @@ static int vcpupin_parse(char *cpu, libx
}
if (*endptr == '-') {
tokb = endptr + 1;
- cpuidb = strtoul(tokb, &endptr, 10);
- if (endptr == tokb || cpuida > cpuidb) {
+ idb = strtoul(tokb, &endptr, 10);
+ if (endptr == tokb || ida > idb) {
fprintf(stderr, "Error: Invalid argument.\n");
rc = EINVAL;
goto vcpp_out;
}
}
- while (cpuida <= cpuidb) {
- rmcpu == 0 ? libxl_bitmap_set(cpumap, cpuida) :
- libxl_bitmap_set(&exclude_cpumap, cpuida);
- cpuida++;
- }
- }
-
- /* Clear all the cpus from the removal list */
+ while (ida <= idb) {
+ if (!isnode)
+ isnot == 0 ? libxl_bitmap_set(cpumap, ida) :
+ libxl_bitmap_set(&exclude_cpumap, ida);
+ else
+ isnot == 0 ? libxl_bitmap_set(&nodemap, ida) :
+ libxl_bitmap_set(&exclude_nodemap, ida);
+ ida++;
+ }
+ }
+
+ /* Add the cpus that have been specified via "node:" items */
+ rc = libxl_nodemap_to_cpumap(ctx, &nodemap, &cpu_nodemap);
+ if (rc)
+ goto vcpp_out;
+ libxl_for_each_set_bit(i, cpu_nodemap) {
+ libxl_bitmap_set(cpumap, i);
+ }
+
+ /* Clear all the cpus from the removal cpu and node lists */
libxl_for_each_set_bit(i, exclude_cpumap) {
libxl_bitmap_reset(cpumap, i);
}
+ rc = libxl_nodemap_to_cpumap(ctx, &exclude_nodemap, &cpu_nodemap);
+ if (rc)
+ goto vcpp_out;
+ libxl_for_each_set_bit(i, cpu_nodemap) {
+ libxl_bitmap_reset(cpumap, i);
+ }
vcpp_out:
libxl_bitmap_dispose(&exclude_cpumap);
+ libxl_bitmap_dispose(&exclude_nodemap);
+ libxl_bitmap_dispose(&nodemap);
+ libxl_bitmap_dispose(&cpu_nodemap);
return rc;
}
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |