[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 2/7] libxc: Plumb Xen with vNUMA topology for domain.
Per-domain vNUMA topology initialization. domctl hypercall is used to set vNUMA topology per domU during domain build time. Signed-off-by: Elena Ufimtseva <ufimtseva@xxxxxxxxx> --- Changes since RFC v2: - copy vNUMA topology information in hypercall in one go; --- tools/libxc/xc_dom.h | 9 ++++++++ tools/libxc/xc_domain.c | 59 +++++++++++++++++++++++++++++++++++++++++++++++ tools/libxc/xenctrl.h | 9 ++++++++ 3 files changed, 77 insertions(+) diff --git a/tools/libxc/xc_dom.h b/tools/libxc/xc_dom.h index 86e23ee..a271b7c 100644 --- a/tools/libxc/xc_dom.h +++ b/tools/libxc/xc_dom.h @@ -114,6 +114,15 @@ struct xc_dom_image { struct xc_dom_phys *phys_pages; int realmodearea_log; + /* + * vNUMA topology and memory allocation structure. + * Defines the way to allocate memory on per NUMA + * physical nodes that is defined by vnode_to_pnode. + */ + uint16_t nr_vnodes; + uint64_t *vnuma_memszs; + unsigned int *vnode_to_pnode; + /* malloc memory pool */ struct xc_dom_mem *memblocks; diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c index 2cea6e3..6cab681 100644 --- a/tools/libxc/xc_domain.c +++ b/tools/libxc/xc_domain.c @@ -1777,6 +1777,65 @@ int xc_domain_set_max_evtchn(xc_interface *xch, uint32_t domid, return do_domctl(xch, &domctl); } +/* Plumbs Xen with vNUMA topology */ +int xc_domain_setvnodes(xc_interface *xch, + uint32_t domid, + uint16_t nr_vnodes, + uint16_t nr_vcpus, + vnuma_memblk_t *vmemblks, + unsigned int *vdistance, + unsigned int *vcpu_to_vnode, + unsigned int *vnode_to_pnode) +{ + int rc; + DECLARE_DOMCTL; + DECLARE_HYPERCALL_BOUNCE(vmemblks, sizeof(*vmemblks) * nr_vnodes, + XC_HYPERCALL_BUFFER_BOUNCE_BOTH); + DECLARE_HYPERCALL_BOUNCE(vdistance, sizeof(*vdistance) * + nr_vnodes * nr_vnodes, + XC_HYPERCALL_BUFFER_BOUNCE_BOTH); + DECLARE_HYPERCALL_BOUNCE(vcpu_to_vnode, sizeof(*vcpu_to_vnode) * nr_vcpus, + XC_HYPERCALL_BUFFER_BOUNCE_BOTH); + DECLARE_HYPERCALL_BOUNCE(vnode_to_pnode, sizeof(*vnode_to_pnode) * + nr_vnodes, + XC_HYPERCALL_BUFFER_BOUNCE_BOTH); + + if ( vdistance == NULL || vcpu_to_vnode == NULL || + vmemblks == NULL || vnode_to_pnode == NULL ) + { + PERROR("Incorrect parameters for XEN_DOMCTL_setvnumainfo\n"); + return -EINVAL; + } + + rc = -EINVAL; + + if (xc_hypercall_bounce_pre(xch, vmemblks) || + xc_hypercall_bounce_pre(xch, vdistance) || + xc_hypercall_bounce_pre(xch, vcpu_to_vnode) || + xc_hypercall_bounce_pre(xch, vnode_to_pnode)) + { + PERROR("Could not bounce buffer for xc_domain_setvnodes"); + return rc; + } + + set_xen_guest_handle(domctl.u.vnuma.vnuma_memblks, vmemblks); + set_xen_guest_handle(domctl.u.vnuma.vdistance, vdistance); + set_xen_guest_handle(domctl.u.vnuma.vcpu_to_vnode, vcpu_to_vnode); + set_xen_guest_handle(domctl.u.vnuma.vnode_to_pnode, vnode_to_pnode); + + domctl.cmd = XEN_DOMCTL_setvnumainfo; + domctl.domain = (domid_t)domid; + domctl.u.vnuma.nr_vnodes = nr_vnodes; + rc = do_domctl(xch, &domctl); + + xc_hypercall_bounce_post(xch, vmemblks); + xc_hypercall_bounce_post(xch, vdistance); + xc_hypercall_bounce_post(xch, vcpu_to_vnode); + xc_hypercall_bounce_post(xch, vnode_to_pnode); + + return rc; +} + /* * Local variables: * mode: C diff --git a/tools/libxc/xenctrl.h b/tools/libxc/xenctrl.h index 8cf3f3b..3dbd035 100644 --- a/tools/libxc/xenctrl.h +++ b/tools/libxc/xenctrl.h @@ -1108,6 +1108,15 @@ int xc_domain_set_memmap_limit(xc_interface *xch, uint32_t domid, unsigned long map_limitkb); +int xc_domain_setvnodes(xc_interface *xch, + uint32_t domid, + uint16_t nr_vnodes, + uint16_t nr_vcpus, + vnuma_memblk_t *vmemareas, + unsigned int *vdistance, + unsigned int *vcpu_to_vnode, + unsigned int *vnode_to_pnode); + #if defined(__i386__) || defined(__x86_64__) /* * PC BIOS standard E820 types and structure. -- 1.7.10.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |