|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v5 11/24] libxl: build, check and pass vNUMA info to Xen for PV guest
On 12/02/15 19:44, Wei Liu wrote:
> Transform the user supplied vNUMA configuration into libxl internal
> representations, and finally libxc representations. Check validity of
> the configuration along the line.
>
> Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx>
> Cc: Ian Campbell <ian.campbell@xxxxxxxxxx>
> Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
> Cc: Dario Faggioli <dario.faggioli@xxxxxxxxxx>
> Cc: Elena Ufimtseva <ufimtseva@xxxxxxxxx>
> Acked-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
> ---
> Changes in v5:
> 1. Adapt to change of interface (ditching xc_vnuma_info).
>
> Changes in v4:
> 1. Adapt to new interfaces.
>
> Changes in v3:
> 1. Add more commit log.
> ---
> tools/libxl/libxl_dom.c | 77
> +++++++++++++++++++++++++++++++++++++++++++++++++
> 1 file changed, 77 insertions(+)
>
> diff --git a/tools/libxl/libxl_dom.c b/tools/libxl/libxl_dom.c
> index 48d661a..1ff0704 100644
> --- a/tools/libxl/libxl_dom.c
> +++ b/tools/libxl/libxl_dom.c
> @@ -515,6 +515,51 @@ retry_transaction:
> return 0;
> }
>
> +static int set_vnuma_info(libxl__gc *gc, uint32_t domid,
> + const libxl_domain_build_info *info,
> + const libxl__domain_build_state *state)
> +{
> + int rc = 0;
> + int i, nr_vdistance;
unsigned
> + unsigned int *vcpu_to_vnode, *vnode_to_pnode, *vdistance = NULL;
> +
> + vcpu_to_vnode = libxl__calloc(gc, info->max_vcpus,
> + sizeof(unsigned int));
> + vnode_to_pnode = libxl__calloc(gc, info->num_vnuma_nodes,
> + sizeof(unsigned int));
> +
> + nr_vdistance = info->num_vnuma_nodes * info->num_vnuma_nodes;
> + vdistance = libxl__calloc(gc, nr_vdistance, sizeof(unsigned int));
> +
> + for (i = 0; i < info->num_vnuma_nodes; i++) {
> + libxl_vnode_info *v = &info->vnuma_nodes[i];
> + int bit;
> +
> + /* vnode to pnode mapping */
> + vnode_to_pnode[i] = v->pnode;
> +
> + /* vcpu to vnode mapping */
> + libxl_for_each_set_bit(bit, v->vcpus)
> + vcpu_to_vnode[bit] = i;
> +
> + /* node distances */
> + assert(info->num_vnuma_nodes == v->num_distances);
> + memcpy(vdistance + (i * info->num_vnuma_nodes),
> + v->distances,
> + v->num_distances * sizeof(unsigned int));
> + }
> +
> + if (xc_domain_setvnuma(CTX->xch, domid, info->num_vnuma_nodes,
> + state->num_vmemranges, info->max_vcpus,
> + state->vmemranges, vdistance,
> + vcpu_to_vnode, vnode_to_pnode) < 0) {
> + LOGE(ERROR, "xc_domain_setvnuma failed");
> + rc = ERROR_FAIL;
> + }
> +
> + return rc;
> +}
> +
> int libxl__build_pv(libxl__gc *gc, uint32_t domid,
> libxl_domain_build_info *info, libxl__domain_build_state *state)
> {
> @@ -572,6 +617,38 @@ int libxl__build_pv(libxl__gc *gc, uint32_t domid,
> dom->xenstore_domid = state->store_domid;
> dom->claim_enabled = libxl_defbool_val(info->claim_mode);
>
> + if (info->num_vnuma_nodes != 0) {
> + int i;
unsigned
~Andrew
> +
> + ret = libxl__vnuma_build_vmemrange_pv(gc, domid, info, state);
> + if (ret) {
> + LOGE(ERROR, "cannot build vmemranges");
> + goto out;
> + }
> + ret = libxl__vnuma_config_check(gc, info, state);
> + if (ret) goto out;
> +
> + ret = set_vnuma_info(gc, domid, info, state);
> + if (ret) goto out;
> +
> + dom->nr_vmemranges = state->num_vmemranges;
> + dom->vmemranges = xc_dom_malloc(dom, sizeof(*dom->vmemranges) *
> + dom->nr_vmemranges);
> +
> + for (i = 0; i < dom->nr_vmemranges; i++) {
> + dom->vmemranges[i].start = state->vmemranges[i].start;
> + dom->vmemranges[i].end = state->vmemranges[i].end;
> + dom->vmemranges[i].flags = state->vmemranges[i].flags;
> + dom->vmemranges[i].nid = state->vmemranges[i].nid;
> + }
> +
> + dom->nr_vnodes = info->num_vnuma_nodes;
> + dom->vnode_to_pnode = xc_dom_malloc(dom,
> sizeof(*dom->vnode_to_pnode) *
> + dom->nr_vnodes);
> + for (i = 0; i < info->num_vnuma_nodes; i++)
> + dom->vnode_to_pnode[i] = info->vnuma_nodes[0].pnode;
> + }
> +
> if ( (ret = xc_dom_boot_xen_init(dom, ctx->xch, domid)) != 0 ) {
> LOGE(ERROR, "xc_dom_boot_xen_init failed");
> goto out;
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |