[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 1 of 8 [RFC]] xl: allow for node-wise specification of vcpu pinning
Making it possible to use something like the following: * "nodes:0-3": all pCPUs of nodes 0,1,2,3; * "nodes:0-3,^node:2": all pCPUS of nodes 0,1,3; * "1,nodes:1-2,^6": pCPU 1 plus all pCPUs of nodes 1,2 but not pCPU 6; * ... In both domain config file and `xl vcpu-pin'. Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx> diff --git a/docs/man/xl.cfg.pod.5 b/docs/man/xl.cfg.pod.5 --- a/docs/man/xl.cfg.pod.5 +++ b/docs/man/xl.cfg.pod.5 @@ -125,6 +125,26 @@ run on cpu #3 of the host. =back +A C<CPU-LIST> may also be specified NUMA node-wise as follows: + +=over 4 + +=item "nodes:all" + +To allow all the vcpus of the guest to run on all the cpus of all the NUMA +nodes of the host. + +=item "nodes:0-3,node:^2" + +To allow all the vcpus of the guest to run on the cpus belonging to +the NUMA nodes 0,1,3 of the host. + +=back + +Combining the two is allowed. For instance, "1,node:2,^6" means all the +vcpus of the guest will run on cpus 1 and on all the cpus of NUMA node 2, +but not on cpu 6. + If this option is not specified, libxl automatically tries to place the new domain on the host's NUMA nodes (provided the host has more than one NUMA node) by pinning it to the cpus of those nodes. A heuristic approach is diff --git a/tools/libxl/xl_cmdimpl.c b/tools/libxl/xl_cmdimpl.c --- a/tools/libxl/xl_cmdimpl.c +++ b/tools/libxl/xl_cmdimpl.c @@ -504,61 +504,99 @@ static void split_string_into_string_lis free(s); } +static int range_parse_bitmap(const char *str, libxl_bitmap *map) +{ + char *nstr, *endptr; + uint32_t ida, idb; + + ida = idb = strtoul(str, &endptr, 10); + if (endptr == str) + return EINVAL; + + if (*endptr == '-') { + nstr = endptr + 1; + idb = strtoul(nstr, &endptr, 10); + if (endptr == nstr) + return EINVAL; + } + + libxl_bitmap_set_none(map); + while (ida <= idb) { + libxl_bitmap_set(map, ida); + ida++; + } + + return 0; +} + static int vcpupin_parse(char *cpu, libxl_bitmap *cpumap) { - libxl_bitmap exclude_cpumap; - uint32_t cpuida, cpuidb; - char *endptr, *toka, *tokb, *saveptr = NULL; - int i, rc = 0, rmcpu; - - if (!strcmp(cpu, "all")) { + libxl_bitmap map, cpu_nodemap, *this_map; + char *ptr, *saveptr = NULL; + bool isnot, isnode; + int i, rc = 0; + + if (!strcmp(cpu, "all") || !strcmp(cpu, "nodes:all")) { libxl_bitmap_set_any(cpumap); return 0; } - if (libxl_cpu_bitmap_alloc(ctx, &exclude_cpumap, 0)) { - fprintf(stderr, "Error: Failed to allocate cpumap.\n"); - return ENOMEM; - } - - for (toka = strtok_r(cpu, ",", &saveptr); toka; - toka = strtok_r(NULL, ",", &saveptr)) { - rmcpu = 0; - if (*toka == '^') { - /* This (These) Cpu(s) will be removed from the map */ - toka++; - rmcpu = 1; - } - /* Extract a valid (range of) cpu(s) */ - cpuida = cpuidb = strtoul(toka, &endptr, 10); - if (endptr == toka) { + libxl_bitmap_init(&map); + libxl_bitmap_init(&cpu_nodemap); + + rc = libxl_node_bitmap_alloc(ctx, &cpu_nodemap, 0); + if (rc) { + fprintf(stderr, "libxl_node_bitmap_alloc failed.\n"); + goto out; + } + rc = libxl_cpu_bitmap_alloc(ctx, &map, 0); + if (rc) { + fprintf(stderr, "libxl_cpu_bitmap_alloc failed.\n"); + goto out; + } + + for (ptr = strtok_r(cpu, ",", &saveptr); ptr; + ptr = strtok_r(NULL, ",", &saveptr)) { + isnot = isnode = false; + + /* Are we dealing with cpus or nodes? */ + if (!strncmp(ptr, "node:", 5) || !strncmp(ptr, "nodes:", 6)) { + isnode = true; + ptr += 5 + (ptr[4] == 's'); + } + /* Are we adding or removing cpus/nodes? */ + if (*ptr == '^') { + isnot = true; + ptr++; + } + /* Get in map a bitmap representative of the range */ + if (range_parse_bitmap(ptr, &map)) { fprintf(stderr, "Error: Invalid argument.\n"); rc = EINVAL; - goto vcpp_out; - } - if (*endptr == '-') { - tokb = endptr + 1; - cpuidb = strtoul(tokb, &endptr, 10); - if (endptr == tokb || cpuida > cpuidb) { - fprintf(stderr, "Error: Invalid argument.\n"); - rc = EINVAL; - goto vcpp_out; + goto out; + } + + /* Add or remove the specified cpus */ + if (isnode) { + rc = libxl_nodemap_to_cpumap(ctx, &map, &cpu_nodemap); + if (rc) { + fprintf(stderr, "libxl_nodemap_to_cpumap failed.\n"); + goto out; } - } - while (cpuida <= cpuidb) { - rmcpu == 0 ? libxl_bitmap_set(cpumap, cpuida) : - libxl_bitmap_set(&exclude_cpumap, cpuida); - cpuida++; - } - } - - /* Clear all the cpus from the removal list */ - libxl_for_each_set_bit(i, exclude_cpumap) { - libxl_bitmap_reset(cpumap, i); - } - -vcpp_out: - libxl_bitmap_dispose(&exclude_cpumap); + this_map = &cpu_nodemap; + } else { + this_map = ↦ + } + + libxl_for_each_set_bit(i, *this_map) { + isnot ? libxl_bitmap_reset(cpumap, i) + : libxl_bitmap_set(cpumap, i); + } + } + + out: + libxl_bitmap_dispose(&map); + libxl_bitmap_dispose(&cpu_nodemap); return rc; } _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |