|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v3 08/19] libxl: functions to build vmemranges for PV guest
Introduce a arch-independent routine to generate one vmemrange per
vnode. Also introduce arch-dependent routines for different
architectures because part of the process is arch-specific -- ARM has
yet have NUMA support and E820 is x86 only.
For those x86 guests who care about machine E820 map (i.e. with
e820_host=1), vnode is further split into several vmemranges to
accommodate memory holes. A few stubs for libxl_arm.c are created.
Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx>
Cc: Ian Campbell <ian.campbell@xxxxxxxxxx>
Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
Cc: Dario Faggioli <dario.faggioli@xxxxxxxxxx>
Cc: Elena Ufimtseva <ufimtseva@xxxxxxxxx>
---
Changes in v3:
1. Rewrite commit log.
---
tools/libxl/libxl_arch.h | 6 ++++
tools/libxl/libxl_arm.c | 9 +++++
tools/libxl/libxl_internal.h | 5 +++
tools/libxl/libxl_vnuma.c | 34 +++++++++++++++++++
tools/libxl/libxl_x86.c | 74 ++++++++++++++++++++++++++++++++++++++++++
5 files changed, 128 insertions(+)
diff --git a/tools/libxl/libxl_arch.h b/tools/libxl/libxl_arch.h
index d3bc136..e249048 100644
--- a/tools/libxl/libxl_arch.h
+++ b/tools/libxl/libxl_arch.h
@@ -27,4 +27,10 @@ int libxl__arch_domain_init_hw_description(libxl__gc *gc,
int libxl__arch_domain_finalise_hw_description(libxl__gc *gc,
libxl_domain_build_info *info,
struct xc_dom_image *dom);
+
+/* build vNUMA vmemrange with arch specific information */
+int libxl__arch_vnuma_build_vmemrange(libxl__gc *gc,
+ uint32_t domid,
+ libxl_domain_build_info *b_info,
+ libxl__domain_build_state *state);
#endif
diff --git a/tools/libxl/libxl_arm.c b/tools/libxl/libxl_arm.c
index 65a762b..d3968a7 100644
--- a/tools/libxl/libxl_arm.c
+++ b/tools/libxl/libxl_arm.c
@@ -707,6 +707,15 @@ int libxl__arch_domain_finalise_hw_description(libxl__gc
*gc,
return 0;
}
+int libxl__arch_vnuma_build_vmemrange(libxl__gc *gc,
+ uint32_t domid,
+ libxl_domain_build_info *info,
+ libxl__domain_build_state *state)
+{
+ /* Don't touch anything. */
+ return 0;
+}
+
/*
* Local variables:
* mode: C
diff --git a/tools/libxl/libxl_internal.h b/tools/libxl/libxl_internal.h
index 39356ba..73d533a 100644
--- a/tools/libxl/libxl_internal.h
+++ b/tools/libxl/libxl_internal.h
@@ -3399,6 +3399,11 @@ int libxl__vnuma_config_check(libxl__gc *gc,
const libxl_domain_build_info *b_info,
const libxl__domain_build_state *state);
+int libxl__vnuma_build_vmemrange_pv(libxl__gc *gc,
+ uint32_t domid,
+ libxl_domain_build_info *b_info,
+ libxl__domain_build_state *state);
+
_hidden int libxl__ms_vm_genid_set(libxl__gc *gc, uint32_t domid,
const libxl_ms_vm_genid *id);
diff --git a/tools/libxl/libxl_vnuma.c b/tools/libxl/libxl_vnuma.c
index 439f5ab..a72abe2 100644
--- a/tools/libxl/libxl_vnuma.c
+++ b/tools/libxl/libxl_vnuma.c
@@ -14,6 +14,7 @@
*/
#include "libxl_osdeps.h" /* must come before any other headers */
#include "libxl_internal.h"
+#include "libxl_arch.h"
#include <stdlib.h>
/* Sort vmemranges in ascending order with "start" */
@@ -128,6 +129,39 @@ out:
return rc;
}
+/* Build vmemranges for PV guest */
+int libxl__vnuma_build_vmemrange_pv(libxl__gc *gc,
+ uint32_t domid,
+ libxl_domain_build_info *b_info,
+ libxl__domain_build_state *state)
+{
+ int i;
+ uint64_t next;
+ xen_vmemrange_t *v = NULL;
+
+ assert(state->vmemranges == NULL);
+
+ /* Generate one vmemrange for each virtual node. */
+ next = 0;
+ for (i = 0; i < b_info->num_vnuma_nodes; i++) {
+ libxl_vnode_info *p = &b_info->vnuma_nodes[i];
+
+ v = libxl__realloc(gc, v, sizeof(*v) * (i+1));
+
+ v[i].start = next;
+ v[i].end = next + (p->mem << 20); /* mem is in MiB */
+ v[i].flags = 0;
+ v[i].nid = i;
+
+ next = v[i].end;
+ }
+
+ state->vmemranges = v;
+ state->num_vmemranges = i;
+
+ return libxl__arch_vnuma_build_vmemrange(gc, domid, b_info, state);
+}
+
/*
* Local variables:
* mode: C
diff --git a/tools/libxl/libxl_x86.c b/tools/libxl/libxl_x86.c
index e959e37..2018afc 100644
--- a/tools/libxl/libxl_x86.c
+++ b/tools/libxl/libxl_x86.c
@@ -338,6 +338,80 @@ int libxl__arch_domain_finalise_hw_description(libxl__gc
*gc,
return 0;
}
+int libxl__arch_vnuma_build_vmemrange(libxl__gc *gc,
+ uint32_t domid,
+ libxl_domain_build_info *b_info,
+ libxl__domain_build_state *state)
+{
+ int i, x, n, rc;
+ uint32_t nr_e820;
+ struct e820entry map[E820MAX];
+ xen_vmemrange_t *v;
+
+ /* Only touch vmemranges if it's PV guest and e820_host is true */
+ if (!(b_info->type == LIBXL_DOMAIN_TYPE_PV &&
+ libxl_defbool_val(b_info->u.pv.e820_host))) {
+ rc = 0;
+ goto out;
+ }
+
+ rc = e820_host_sanitize(gc, b_info, map, &nr_e820);
+ if (rc) goto out;
+
+ /* Ditch old vmemranges and start with host E820 map. Note, memory
+ * was gc allocated.
+ */
+ state->vmemranges = NULL;
+ state->num_vmemranges = 0;
+
+ n = 0; /* E820 counter */
+ x = 0;
+ v = NULL;
+ for (i = 0; i < b_info->num_vnuma_nodes; i++) {
+ libxl_vnode_info *p = &b_info->vnuma_nodes[i];
+ uint64_t remaining = (p->mem << 20);
+
+ while (remaining > 0) {
+ if (n >= nr_e820) {
+ rc = ERROR_FAIL;
+ goto out;
+ }
+
+ /* Skip non RAM region */
+ if (map[n].type != E820_RAM) {
+ n++;
+ continue;
+ }
+
+ v = libxl__realloc(gc, v, sizeof(xen_vmemrange_t) * (x+1));
+
+ if (map[n].size >= remaining) {
+ v[x].start = map[n].addr;
+ v[x].end = map[n].addr + remaining;
+ map[n].addr += remaining;
+ map[n].size -= remaining;
+ remaining = 0;
+ } else {
+ v[x].start = map[n].addr;
+ v[x].end = map[n].addr + map[n].size;
+ remaining -= map[n].size;
+ n++;
+ }
+
+ v[x].flags = 0;
+ v[x].nid = i;
+ x++;
+ }
+ }
+
+ state->vmemranges = v;
+ state->num_vmemranges = x;
+
+ rc = 0;
+out:
+ return rc;
+}
+
/*
* Local variables:
* mode: C
--
1.7.10.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |