|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] libxl: allow setting more than 31 vcpus
# HG changeset patch
# User Yang Zhang <yang.z.zhang@xxxxxxxxx>
# Date 1340902316 -3600
# Node ID 51d2daabd428dd1ab74beeb4a572f542653b1492
# Parent 0671657e0f3ad4f04dfa370ac64c5b38443e16fe
libxl: allow setting more than 31 vcpus
In current implementation, it uses integer to record current avail
cpus and this only allows user to specify 31 vcpus. In following
patch, it uses cpumap instead integer which make more sense than
before. Also there is no limit to the max vcpus.
Signed-off-by: Yang Zhang <yang.z.zhang@xxxxxxxxx>
Acked-by: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
Signed-off-by: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
Committed-by: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
---
diff -r 0671657e0f3a -r 51d2daabd428 tools/libxl/libxl_create.c
--- a/tools/libxl/libxl_create.c Thu Jun 28 17:47:13 2012 +0100
+++ b/tools/libxl/libxl_create.c Thu Jun 28 17:51:56 2012 +0100
@@ -22,6 +22,7 @@
#include <xc_dom.h>
#include <xenguest.h>
+#include <xen/hvm/hvm_info_table.h>
void libxl_domain_config_init(libxl_domain_config *d_config)
{
@@ -201,8 +202,12 @@ int libxl__domain_build_info_setdefault(
if (!b_info->max_vcpus)
b_info->max_vcpus = 1;
- if (!b_info->cur_vcpus)
- b_info->cur_vcpus = 1;
+ if (!b_info->avail_vcpus.size) {
+ if (libxl_cpumap_alloc(CTX, &b_info->avail_vcpus, 1))
+ return ERROR_FAIL;
+ libxl_cpumap_set(&b_info->avail_vcpus, 0);
+ } else if (b_info->avail_vcpus.size > HVM_MAX_VCPUS)
+ return ERROR_FAIL;
if (!b_info->cpumap.size) {
if (libxl_cpumap_alloc(CTX, &b_info->cpumap, 0))
diff -r 0671657e0f3a -r 51d2daabd428 tools/libxl/libxl_dm.c
--- a/tools/libxl/libxl_dm.c Thu Jun 28 17:47:13 2012 +0100
+++ b/tools/libxl/libxl_dm.c Thu Jun 28 17:51:56 2012 +0100
@@ -160,6 +160,8 @@ static char ** libxl__build_device_model
}
if (b_info->type == LIBXL_DOMAIN_TYPE_HVM) {
int ioemu_vifs = 0;
+ int nr_set_cpus = 0;
+ char *s;
if (b_info->u.hvm.serial) {
flexarray_vappend(dm_args, "-serial", b_info->u.hvm.serial, NULL);
@@ -200,11 +202,13 @@ static char ** libxl__build_device_model
libxl__sprintf(gc, "%d", b_info->max_vcpus),
NULL);
}
- if (b_info->cur_vcpus) {
- flexarray_vappend(dm_args, "-vcpu_avail",
- libxl__sprintf(gc, "0x%x", b_info->cur_vcpus),
- NULL);
- }
+
+ nr_set_cpus = libxl_cpumap_count_set(&b_info->avail_vcpus);
+ s = libxl_cpumap_to_hex_string(&b_info->avail_vcpus);
+ flexarray_vappend(dm_args, "-vcpu_avail",
+ libxl__sprintf(gc, "%s", s), NULL);
+ free(s);
+
for (i = 0; i < num_vifs; i++) {
if (vifs[i].nictype == LIBXL_NIC_TYPE_IOEMU) {
char *smac = libxl__sprintf(gc,
@@ -443,11 +447,14 @@ static char ** libxl__build_device_model
}
if (b_info->max_vcpus > 1) {
flexarray_append(dm_args, "-smp");
- if (b_info->cur_vcpus)
+ if (b_info->avail_vcpus.size) {
+ int nr_set_cpus = 0;
+ nr_set_cpus = libxl_cpumap_count_set(&b_info->avail_vcpus);
+
flexarray_append(dm_args, libxl__sprintf(gc, "%d,maxcpus=%d",
b_info->max_vcpus,
- b_info->cur_vcpus));
- else
+ nr_set_cpus));
+ } else
flexarray_append(dm_args, libxl__sprintf(gc, "%d",
b_info->max_vcpus));
}
diff -r 0671657e0f3a -r 51d2daabd428 tools/libxl/libxl_dom.c
--- a/tools/libxl/libxl_dom.c Thu Jun 28 17:47:13 2012 +0100
+++ b/tools/libxl/libxl_dom.c Thu Jun 28 17:51:56 2012 +0100
@@ -199,8 +199,8 @@ int libxl__build_post(libxl__gc *gc, uin
ents[11] = libxl__sprintf(gc, "%lu", state->store_mfn);
for (i = 0; i < info->max_vcpus; i++) {
ents[12+(i*2)] = libxl__sprintf(gc, "cpu/%d/availability", i);
- ents[12+(i*2)+1] = (i && info->cur_vcpus && !(info->cur_vcpus & (1 <<
i)))
- ? "offline" : "online";
+ ents[12+(i*2)+1] = libxl_cpumap_test(&info->avail_vcpus, i)
+ ? "online" : "offline";
}
hvm_ents = NULL;
@@ -354,7 +354,7 @@ static int hvm_build_set_params(xc_inter
va_hvm = (struct hvm_info_table *)(va_map + HVM_INFO_OFFSET);
va_hvm->apic_mode = libxl_defbool_val(info->u.hvm.apic);
va_hvm->nr_vcpus = info->max_vcpus;
- memcpy(va_hvm->vcpu_online, &info->cur_vcpus, sizeof(info->cur_vcpus));
+ memcpy(va_hvm->vcpu_online, info->avail_vcpus.map, info->avail_vcpus.size);
for (i = 0, sum = 0; i < va_hvm->length; i++)
sum += ((uint8_t *) va_hvm)[i];
va_hvm->checksum -= sum;
diff -r 0671657e0f3a -r 51d2daabd428 tools/libxl/libxl_types.idl
--- a/tools/libxl/libxl_types.idl Thu Jun 28 17:47:13 2012 +0100
+++ b/tools/libxl/libxl_types.idl Thu Jun 28 17:51:56 2012 +0100
@@ -237,7 +237,7 @@ libxl_domain_sched_params = Struct("doma
libxl_domain_build_info = Struct("domain_build_info",[
("max_vcpus", integer),
- ("cur_vcpus", integer),
+ ("avail_vcpus", libxl_cpumap),
("cpumap", libxl_cpumap),
("tsc_mode", libxl_tsc_mode),
("max_memkb", MemKB),
diff -r 0671657e0f3a -r 51d2daabd428 tools/libxl/libxl_utils.c
--- a/tools/libxl/libxl_utils.c Thu Jun 28 17:47:13 2012 +0100
+++ b/tools/libxl/libxl_utils.c Thu Jun 28 17:51:56 2012 +0100
@@ -511,7 +511,7 @@ void libxl_cpumap_dispose(libxl_cpumap *
free(map->map);
}
-int libxl_cpumap_test(libxl_cpumap *cpumap, int cpu)
+int libxl_cpumap_test(const libxl_cpumap *cpumap, int cpu)
{
if (cpu >= cpumap->size * 8)
return 0;
@@ -532,6 +532,31 @@ void libxl_cpumap_reset(libxl_cpumap *cp
cpumap->map[cpu / 8] &= ~(1 << (cpu & 7));
}
+int libxl_cpumap_count_set(const libxl_cpumap *cpumap)
+{
+ int i, nr_set_cpus = 0;
+ libxl_for_each_set_cpu(i, *cpumap)
+ nr_set_cpus++;
+
+ return nr_set_cpus;
+}
+
+/* NB. caller is responsible for freeing the memory */
+char *libxl_cpumap_to_hex_string(const libxl_cpumap *cpumap)
+{
+ int i = cpumap->size;
+ char *p = libxl__zalloc(NULL, cpumap->size * 2 + 3);
+ char *q = p;
+ strncpy(p, "0x", 2);
+ p += 2;
+ while(--i >= 0) {
+ sprintf(p, "%02x", cpumap->map[i]);
+ p += 2;
+ }
+ *p = '\0';
+ return q;
+}
+
int libxl_get_max_cpus(libxl_ctx *ctx)
{
return xc_get_max_cpus(ctx->xch);
diff -r 0671657e0f3a -r 51d2daabd428 tools/libxl/libxl_utils.h
--- a/tools/libxl/libxl_utils.h Thu Jun 28 17:47:13 2012 +0100
+++ b/tools/libxl/libxl_utils.h Thu Jun 28 17:51:56 2012 +0100
@@ -64,9 +64,11 @@ int libxl_vdev_to_device_disk(libxl_ctx
libxl_device_disk *disk);
int libxl_cpumap_alloc(libxl_ctx *ctx, libxl_cpumap *cpumap, int max_cpus);
-int libxl_cpumap_test(libxl_cpumap *cpumap, int cpu);
+int libxl_cpumap_test(const libxl_cpumap *cpumap, int cpu);
void libxl_cpumap_set(libxl_cpumap *cpumap, int cpu);
void libxl_cpumap_reset(libxl_cpumap *cpumap, int cpu);
+int libxl_cpumap_count_set(const libxl_cpumap *cpumap);
+char *libxl_cpumap_to_hex_string(const libxl_cpumap *cpumap);
static inline void libxl_cpumap_set_any(libxl_cpumap *cpumap)
{
memset(cpumap->map, -1, cpumap->size);
diff -r 0671657e0f3a -r 51d2daabd428 tools/libxl/xl_cmdimpl.c
--- a/tools/libxl/xl_cmdimpl.c Thu Jun 28 17:47:13 2012 +0100
+++ b/tools/libxl/xl_cmdimpl.c Thu Jun 28 17:51:56 2012 +0100
@@ -647,7 +647,14 @@ static void parse_config_data(const char
if (!xlu_cfg_get_long (config, "vcpus", &l, 0)) {
b_info->max_vcpus = l;
- b_info->cur_vcpus = (1 << l) - 1;
+
+ if (libxl_cpumap_alloc(ctx, &b_info->avail_vcpus, l)) {
+ fprintf(stderr, "Unable to allocate cpumap\n");
+ exit(1);
+ }
+ libxl_cpumap_set_none(&b_info->avail_vcpus);
+ while (l-- > 0)
+ libxl_cpumap_set((&b_info->avail_vcpus), l);
}
if (!xlu_cfg_get_long (config, "maxvcpus", &l, 0))
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |