|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 16/19] libxl: build, check and pass vNUMA info to Xen for HVM guest
Libxc has more involvement in building vmemranges in HVM case. The
building of vmemranges is placed after xc_hvm_build returns, because it
relies on memory hole information provided by xc_hvm_build.
Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx>
Cc: Ian Campbell <ian.campbell@xxxxxxxxxx>
Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
Cc: Dario Faggioli <dario.faggioli@xxxxxxxxxx>
Cc: Elena Ufimtseva <ufimtseva@xxxxxxxxx>
---
tools/libxl/libxl_create.c | 9 +++++++
tools/libxl/libxl_dom.c | 28 +++++++++++++++++++++
tools/libxl/libxl_internal.h | 5 ++++
tools/libxl/libxl_vnuma.c | 56 ++++++++++++++++++++++++++++++++++++++++++
4 files changed, 98 insertions(+)
diff --git a/tools/libxl/libxl_create.c b/tools/libxl/libxl_create.c
index b1ff5ae..1d96a5f 100644
--- a/tools/libxl/libxl_create.c
+++ b/tools/libxl/libxl_create.c
@@ -843,6 +843,15 @@ static void initiate_domain_create(libxl__egc *egc,
goto error_out;
}
+ /* Disallow PoD and vNUMA to be enabled at the same time because PoD
+ * pool is not vNUMA-aware yet.
+ */
+ if (pod_enabled && d_config->b_info.num_vnuma_nodes) {
+ ret = ERROR_INVAL;
+ LOG(ERROR, "Cannot enable PoD and vNUMA at the same time");
+ goto error_out;
+ }
+
ret = libxl__domain_create_info_setdefault(gc, &d_config->c_info);
if (ret) goto error_out;
diff --git a/tools/libxl/libxl_dom.c b/tools/libxl/libxl_dom.c
index 7339bbc..3fe3092 100644
--- a/tools/libxl/libxl_dom.c
+++ b/tools/libxl/libxl_dom.c
@@ -884,12 +884,40 @@ int libxl__build_hvm(libxl__gc *gc, uint32_t domid,
goto out;
}
+ if (info->num_vnuma_nodes != 0) {
+ int i;
+
+ args.nr_vnodes = info->num_vnuma_nodes;
+ args.vnode_to_pnode = libxl__malloc(gc, sizeof(*args.vnode_to_pnode) *
+ args.nr_vnodes);
+ args.vnode_size = libxl__malloc(gc, sizeof(*args.vnode_size) *
+ args.nr_vnodes);
+ for (i = 0; i < args.nr_vnodes; i++) {
+ args.vnode_to_pnode[i] = info->vnuma_nodes[i].pnode;
+ args.vnode_size[i] = info->vnuma_nodes[i].mem;
+ }
+
+ /* Consider video ram belongs to node 0 */
+ args.vnode_size[0] -= (info->video_memkb >> 10);
+ }
+
ret = xc_hvm_build(ctx->xch, domid, &args);
if (ret) {
LOGEV(ERROR, ret, "hvm building failed");
goto out;
}
+ if (info->num_vnuma_nodes != 0) {
+ ret = libxl__vnuma_build_vmemrange_hvm(gc, domid, info, state, &args);
+ if (ret) {
+ LOGEV(ERROR, ret, "hvm build vmemranges failed");
+ goto out;
+ }
+ ret = libxl__vnuma_config_check(gc, info, state);
+ if (ret) goto out;
+ ret = set_vnuma_info(gc, domid, info, state);
+ if (ret) goto out;
+ }
ret = hvm_build_set_params(ctx->xch, domid, info, state->store_port,
&state->store_mfn, state->console_port,
&state->console_mfn, state->store_domid,
diff --git a/tools/libxl/libxl_internal.h b/tools/libxl/libxl_internal.h
index b1b60cb..02e2bce 100644
--- a/tools/libxl/libxl_internal.h
+++ b/tools/libxl/libxl_internal.h
@@ -3401,6 +3401,11 @@ int libxl__vnuma_build_vmemrange_pv(libxl__gc *gc,
uint32_t domid,
libxl_domain_build_info *b_info,
libxl__domain_build_state *state);
+int libxl__vnuma_build_vmemrange_hvm(libxl__gc *gc,
+ uint32_t domid,
+ libxl_domain_build_info *b_info,
+ libxl__domain_build_state *state,
+ struct xc_hvm_build_args *args);
_hidden int libxl__ms_vm_genid_set(libxl__gc *gc, uint32_t domid,
const libxl_ms_vm_genid *id);
diff --git a/tools/libxl/libxl_vnuma.c b/tools/libxl/libxl_vnuma.c
index 1d50606..5609dce 100644
--- a/tools/libxl/libxl_vnuma.c
+++ b/tools/libxl/libxl_vnuma.c
@@ -163,6 +163,62 @@ int libxl__vnuma_build_vmemrange_pv(libxl__gc *gc,
return libxl__arch_vnuma_build_vmemrange(gc, domid, b_info, state);
}
+/* Build vmemranges for HVM guest */
+int libxl__vnuma_build_vmemrange_hvm(libxl__gc *gc,
+ uint32_t domid,
+ libxl_domain_build_info *b_info,
+ libxl__domain_build_state *state,
+ struct xc_hvm_build_args *args)
+{
+ uint64_t hole_start, hole_end, next;
+ int i, x;
+ vmemrange_t *v;
+
+ /* Derive vmemranges from vnode size and memory hole.
+ *
+ * Guest physical address space layout:
+ * [0, hole_start) [hole_start, hole_end) [hole_end, highmem_end)
+ */
+ hole_start = args->lowmem_end < args->mmio_start ?
+ args->lowmem_end : args->mmio_start;
+ hole_end = (args->mmio_start + args->mmio_size) > (1ULL << 32) ?
+ (args->mmio_start + args->mmio_size) : (1ULL << 32);
+
+ assert(state->vmemranges == NULL);
+
+ next = 0;
+ x = 0;
+ v = NULL;
+ for (i = 0; i < b_info->num_vnuma_nodes; i++) {
+ libxl_vnode_info *p = &b_info->vnuma_nodes[i];
+ uint64_t remaining = (p->mem << 20);
+
+ while (remaining > 0) {
+ uint64_t count = remaining;
+
+ if (next >= hole_start && next < hole_end)
+ next = hole_end;
+ if ((next < hole_start) && (next + remaining >= hole_start))
+ count = hole_start - next;
+
+ v = libxl__realloc(gc, v, sizeof(vmemrange_t) * (x + 1));
+ v[x].start = next;
+ v[x].end = next + count;
+ v[x].flags = 0;
+ v[x].nid = i;
+
+ x++;
+ remaining -= count;
+ next += count;
+ }
+ }
+
+ state->vmemranges = v;
+ state->num_vmemranges = x;
+
+ return 0;
+}
+
/*
* Local variables:
* mode: C
--
1.7.10.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |