|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [RFC PATCH 4/8] libxl: calculate and set vcpu topology
Before creating vCPUs, assign an unique APIC_ID to each vCPU according the
number of vcpu, the number of core/thread in one socket/core and guest's numa
configuration. Refer to SDM "PROGRAMMING CONSIDERATIONS FOR HARDWARE
MULTI-THREADING CAPABLE PROCESSORS" for more information about how software
should extract topology from APIC_ID. The unique APIC_ID consists of three sub
fields: PACKAGE_ID, CORE_ID and SMT_ID. PACKAGE_ID is the virtual numa ID (if
no numa information, PACKAGE_ID is always 0). CORE_ID and SMT_ID increase from
0.
Signed-off-by: Chao Gao <chao.gao@xxxxxxxxx>
---
tools/libxc/include/xenctrl.h | 7 +++++
tools/libxc/xc_domain.c | 36 ++++++++++++++++++++++
tools/libxl/libxl_arch.h | 4 +++
tools/libxl/libxl_arm.c | 6 ++++
tools/libxl/libxl_dom.c | 4 +++
tools/libxl/libxl_types.idl | 1 +
tools/libxl/libxl_x86.c | 70 +++++++++++++++++++++++++++++++++++++++++++
7 files changed, 128 insertions(+)
diff --git a/tools/libxc/include/xenctrl.h b/tools/libxc/include/xenctrl.h
index 09e1363..e897e5d 100644
--- a/tools/libxc/include/xenctrl.h
+++ b/tools/libxc/include/xenctrl.h
@@ -1341,6 +1341,13 @@ int xc_domain_set_memory_map(xc_interface *xch,
int xc_get_machine_memory_map(xc_interface *xch,
struct e820entry entries[],
uint32_t max_entries);
+
+int xc_set_cpu_topology(xc_interface *xch,
+ uint32_t domid,
+ uint32_t *tid,
+ uint32_t size,
+ uint8_t thread_per_core,
+ uint8_t core_per_socket);
#endif
int xc_reserved_device_memory_map(xc_interface *xch,
diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c
index 3ccd27f..f8bb1eb 100644
--- a/tools/libxc/xc_domain.c
+++ b/tools/libxc/xc_domain.c
@@ -2435,6 +2435,42 @@ int xc_domain_soft_reset(xc_interface *xch,
domctl.domain = domid;
return do_domctl(xch, &domctl);
}
+
+int xc_set_cpu_topology(xc_interface *xch,
+ uint32_t domid,
+ uint32_t *tid,
+ uint32_t size,
+ uint8_t thread_per_core,
+ uint8_t core_per_socket)
+{
+ int rc;
+ DECLARE_DOMCTL;
+ DECLARE_HYPERCALL_BOUNCE(tid, sizeof(*tid) * size,
+ XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+
+ domctl.cmd = XEN_DOMCTL_set_cpu_topology;
+ domctl.domain = domid;
+
+ if ( xc_hypercall_bounce_pre(xch, tid) )
+ {
+ rc = -1;
+ errno = ENOMEM;
+ goto failed;
+ }
+
+ set_xen_guest_handle(domctl.u.cpu_topology.tid, tid);
+ domctl.u.cpu_topology.size = size;
+ domctl.u.cpu_topology.core_per_socket = core_per_socket;
+ domctl.u.cpu_topology.thread_per_core = thread_per_core;
+ memset(domctl.u.cpu_topology.pad, 0, sizeof(domctl.u.cpu_topology.pad));
+
+ rc = do_domctl(xch, &domctl);
+
+ failed:
+ xc_hypercall_bounce_post(xch, tid);
+
+ return rc;
+}
/*
* Local variables:
* mode: C
diff --git a/tools/libxl/libxl_arch.h b/tools/libxl/libxl_arch.h
index 784ec7f..61d9492 100644
--- a/tools/libxl/libxl_arch.h
+++ b/tools/libxl/libxl_arch.h
@@ -78,6 +78,10 @@ int libxl__arch_extra_memory(libxl__gc *gc,
const libxl_domain_build_info *info,
uint64_t *out);
+_hidden
+int libxl__arch_cpu_topology_init(libxl__gc *gc, uint32_t domid,
+ libxl_domain_config *d_config);
+
#if defined(__i386__) || defined(__x86_64__)
#define LAPIC_BASE_ADDRESS 0xfee00000
diff --git a/tools/libxl/libxl_arm.c b/tools/libxl/libxl_arm.c
index de1840b..70c328e 100644
--- a/tools/libxl/libxl_arm.c
+++ b/tools/libxl/libxl_arm.c
@@ -1154,6 +1154,12 @@ void libxl__arch_domain_build_info_acpi_setdefault(
libxl_defbool_setdefault(&b_info->acpi, false);
}
+int libxl__arch_cpu_topology_init(libxl__gc *gc, uint32_t domid,
+ libxl_domain_config *d_config)
+{
+ return 0;
+}
+
/*
* Local variables:
* mode: C
diff --git a/tools/libxl/libxl_dom.c b/tools/libxl/libxl_dom.c
index ef834e6..13e27d3 100644
--- a/tools/libxl/libxl_dom.c
+++ b/tools/libxl/libxl_dom.c
@@ -353,6 +353,10 @@ int libxl__build_pre(libxl__gc *gc, uint32_t domid,
int rc;
uint64_t size;
+ if (libxl__arch_cpu_topology_init(gc, domid, d_config)) {
+ return ERROR_FAIL;
+ }
+
if (xc_domain_max_vcpus(ctx->xch, domid, info->max_vcpus) != 0) {
LOG(ERROR, "Couldn't set max vcpu count");
return ERROR_FAIL;
diff --git a/tools/libxl/libxl_types.idl b/tools/libxl/libxl_types.idl
index 8c80e67..6e0d96a 100644
--- a/tools/libxl/libxl_types.idl
+++ b/tools/libxl/libxl_types.idl
@@ -461,6 +461,7 @@ libxl_cpu_topology = Struct("cpu_topology", [
("cores", uint8),
("threads", uint8),
("real_threads", uint8),
+ ("tid", Array(uint32, "tid_size")),
])
libxl_domain_build_info = Struct("domain_build_info",[
diff --git a/tools/libxl/libxl_x86.c b/tools/libxl/libxl_x86.c
index 5f91fe4..f28af46 100644
--- a/tools/libxl/libxl_x86.c
+++ b/tools/libxl/libxl_x86.c
@@ -596,6 +596,76 @@ void libxl__arch_domain_build_info_acpi_setdefault(
libxl_defbool_setdefault(&b_info->acpi, true);
}
+static inline int fls(unsigned int x)
+{
+ int r;
+
+ asm ( "bsr %1,%0\n\t"
+ "jnz 1f\n\t"
+ "mov $-1,%0\n"
+ "1:" : "=r" (r) : "rm" (x));
+ return r + 1;
+}
+
+int libxl__arch_cpu_topology_init(libxl__gc *gc, uint32_t domid,
+ libxl_domain_config *d_config)
+{
+ int i, rc = 0;
+ uint8_t core_shift, socket_shift, real_threads;
+ unsigned int *tid;
+ libxl_domain_build_info *const info = &d_config->b_info;
+
+ if (!info->u.hvm.cpu_topology.cores)
+ info->u.hvm.cpu_topology.cores = 128;
+ if (!info->u.hvm.cpu_topology.threads)
+ info->u.hvm.cpu_topology.threads = 2;
+ if (!info->u.hvm.cpu_topology.real_threads)
+ info->u.hvm.cpu_topology.real_threads = 1;
+
+ if (info->u.hvm.cpu_topology.threads <
+ info->u.hvm.cpu_topology.real_threads)
+ {
+ LOGE(ERROR, "threads cannot be smaller than real threads");
+ return ERROR_FAIL;
+ }
+
+ real_threads = info->u.hvm.cpu_topology.real_threads;
+ tid = libxl__calloc(gc, info->max_vcpus, sizeof(unsigned int));
+ core_shift = fls(info->u.hvm.cpu_topology.threads - 1);
+ socket_shift = core_shift + fls(info->u.hvm.cpu_topology.cores - 1);
+ if (info->num_vnuma_nodes == 0) {
+ for (i = 0; i < info->max_vcpus; i++) {
+ tid[i] = ((i / real_threads) << core_shift) + i % real_threads;
+ }
+ } else {
+ int socket_id;
+
+ for (socket_id = 0; socket_id < info->num_vnuma_nodes; socket_id++) {
+ int j = 0;
+
+ libxl_for_each_set_bit(i, info->vnuma_nodes[socket_id].vcpus) {
+ tid[i] = (socket_id << socket_shift) +
+ ((j / real_threads) << core_shift) +
+ (j % real_threads);
+ j++;
+ }
+ }
+ }
+
+ info->u.hvm.cpu_topology.tid = tid;
+ info->u.hvm.cpu_topology.tid_size = info->max_vcpus;
+
+ rc = xc_set_cpu_topology(libxl__gc_owner(gc)->xch, domid, tid,
+ info->max_vcpus, info->u.hvm.cpu_topology.threads,
+ info->u.hvm.cpu_topology.cores);
+ if (rc < 0) {
+ LOGE(ERROR, "xc_set_cpu_topology failed");
+ rc = ERROR_FAIL;
+ }
+
+ return rc;
+
+}
/*
* Local variables:
* mode: C
--
1.8.3.1
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |