|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v5 7/8] libxl/libxc: Move libxl_get_numainfo()'s hypercall buffer management to libxc
On Thu, Mar 19, 2015 at 05:54:03PM -0400, Boris Ostrovsky wrote:
> xc_numainfo() is not expected to be used on a hot path and therefore
> hypercall buffer management can be pushed into libxc. This will simplify
> life for callers.
>
> Also update error logging macros.
>
> Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
> ---
> Changes in v5:
> * Adjust to new interface (as described in changelog for patch 4/8)
>
> tools/libxc/include/xenctrl.h | 4 ++-
> tools/libxc/xc_misc.c | 41 ++++++++++++++++++++++++-----
> tools/libxl/libxl.c | 52
> ++++++++++++-------------------------
> tools/python/xen/lowlevel/xc/xc.c | 38 ++++++++++-----------------
> 4 files changed, 68 insertions(+), 67 deletions(-)
>
> diff --git a/tools/libxc/include/xenctrl.h b/tools/libxc/include/xenctrl.h
> index 14d22ce..54540e7 100644
> --- a/tools/libxc/include/xenctrl.h
> +++ b/tools/libxc/include/xenctrl.h
> @@ -1228,6 +1228,7 @@ int xc_send_debug_keys(xc_interface *xch, char *keys);
> typedef xen_sysctl_physinfo_t xc_physinfo_t;
> typedef xen_sysctl_cputopo_t xc_cputopo_t;
> typedef xen_sysctl_numainfo_t xc_numainfo_t;
> +typedef xen_sysctl_meminfo_t xc_meminfo_t;
>
> typedef uint32_t xc_cpu_to_node_t;
> typedef uint32_t xc_cpu_to_socket_t;
> @@ -1239,7 +1240,8 @@ typedef uint32_t xc_node_to_node_dist_t;
> int xc_physinfo(xc_interface *xch, xc_physinfo_t *info);
> int xc_cputopoinfo(xc_interface *xch, unsigned *max_cpus,
> xc_cputopo_t *cputopo);
> -int xc_numainfo(xc_interface *xch, xc_numainfo_t *info);
> +int xc_numainfo(xc_interface *xch, unsigned *max_nodes,
> + xc_meminfo_t *meminfo, uint32_t *distance);
>
> int xc_sched_id(xc_interface *xch,
> int *sched_id);
> diff --git a/tools/libxc/xc_misc.c b/tools/libxc/xc_misc.c
> index 411128e..607ae61 100644
> --- a/tools/libxc/xc_misc.c
> +++ b/tools/libxc/xc_misc.c
> @@ -209,22 +209,49 @@ out:
> return ret;
> }
>
> -int xc_numainfo(xc_interface *xch,
> - xc_numainfo_t *put_info)
> +int xc_numainfo(xc_interface *xch, unsigned *max_nodes,
> + xc_meminfo_t *meminfo, uint32_t *distance)
> {
> int ret;
> DECLARE_SYSCTL;
> + DECLARE_HYPERCALL_BOUNCE(meminfo, *max_nodes * sizeof(*meminfo),
> + XC_HYPERCALL_BUFFER_BOUNCE_OUT);
> + DECLARE_HYPERCALL_BOUNCE(distance,
> + *max_nodes * *max_nodes * sizeof(*distance),
> + XC_HYPERCALL_BUFFER_BOUNCE_OUT);
>
> - sysctl.cmd = XEN_SYSCTL_numainfo;
> + if (meminfo && distance) {
The syntax in xc_misc.c looks mostly to be of the
'if ( meminfo && distance )
{
' type.
> + if ((ret = xc_hypercall_bounce_pre(xch, meminfo)))
> + goto out;
> + if ((ret = xc_hypercall_bounce_pre(xch, distance)))
> + goto out;
>
> - memcpy(&sysctl.u.numainfo, put_info, sizeof(*put_info));
> + sysctl.u.numainfo.num_nodes = *max_nodes;
> + set_xen_guest_handle(sysctl.u.numainfo.meminfo, meminfo);
> + set_xen_guest_handle(sysctl.u.numainfo.distance, distance);
> + } else if (meminfo || distance) {
> + errno = EINVAL;
> + return -1;
> + } else {
> + set_xen_guest_handle(sysctl.u.numainfo.meminfo,
> + HYPERCALL_BUFFER_NULL);
> + set_xen_guest_handle(sysctl.u.numainfo.distance,
> + HYPERCALL_BUFFER_NULL);
> + }
>
> + sysctl.cmd = XEN_SYSCTL_numainfo;
> if ((ret = do_sysctl(xch, &sysctl)) != 0)
> - return ret;
> + goto out;
>
> - memcpy(put_info, &sysctl.u.numainfo, sizeof(*put_info));
> + *max_nodes = sysctl.u.numainfo.num_nodes;
>
> - return 0;
> +out:
> + if (meminfo && distance) {
> + xc_hypercall_bounce_post(xch, meminfo);
> + xc_hypercall_bounce_post(xch, distance);
> + }
> +
> + return ret;
> }
>
>
> diff --git a/tools/libxl/libxl.c b/tools/libxl/libxl.c
> index 2b7d19c..ee97a54 100644
> --- a/tools/libxl/libxl.c
> +++ b/tools/libxl/libxl.c
> @@ -5094,62 +5094,44 @@ libxl_cputopology *libxl_get_cpu_topology(libxl_ctx
> *ctx, int *nb_cpu_out)
> libxl_numainfo *libxl_get_numainfo(libxl_ctx *ctx, int *nr)
> {
> GC_INIT(ctx);
> - xc_numainfo_t ninfo;
> - DECLARE_HYPERCALL_BUFFER(xen_sysctl_meminfo_t, meminfo);
> - DECLARE_HYPERCALL_BUFFER(uint32_t, distance);
> + xc_meminfo_t *meminfo;
> + uint32_t *distance;
> libxl_numainfo *ret = NULL;
> int i, j;
> + unsigned num_nodes;
>
> - set_xen_guest_handle(ninfo.meminfo, HYPERCALL_BUFFER_NULL);
> - set_xen_guest_handle(ninfo.distance, HYPERCALL_BUFFER_NULL);
> - if ( xc_numainfo(ctx->xch, &ninfo) != 0)
> - {
> - LIBXL__LOG(ctx, XTL_ERROR, "Unable to determine number of NODES");
> - ret = NULL;
> + if (xc_numainfo(ctx->xch, &num_nodes, NULL, NULL)) {
I think for this one you need 'if ( xc_numa.. )
{'
> + LOGEV(ERROR, errno, "Unable to determine number of nodes");
> goto out;
> }
>
> - meminfo = xc_hypercall_buffer_alloc(ctx->xch, meminfo,
> - sizeof(*meminfo) * ninfo.num_nodes);
> - distance = xc_hypercall_buffer_alloc(ctx->xch, distance,
> - sizeof(*distance) *
> - ninfo.num_nodes * ninfo.num_nodes);
> - if ((meminfo == NULL) || (distance == NULL)) {
> - LIBXL__LOG_ERRNOVAL(ctx, XTL_ERROR, ENOMEM,
> - "Unable to allocate hypercall arguments");
> - goto fail;
> - }
> + meminfo = libxl__zalloc(gc, sizeof(*meminfo) * num_nodes);
> + distance = libxl__zalloc(gc, sizeof(*distance) * num_nodes * num_nodes);
>
> - set_xen_guest_handle(ninfo.meminfo, meminfo);
> - set_xen_guest_handle(ninfo.distance, distance);
> - if (xc_numainfo(ctx->xch, &ninfo) != 0) {
> - LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "getting numainfo");
> - goto fail;
> + if (xc_numainfo(ctx->xch, &num_nodes, meminfo, distance)) {
> + LOGEV(ERROR, errno, "getting numainfo");
Ditto
> + goto out;
> }
>
> - *nr = ninfo.num_nodes;
> + *nr = num_nodes;
>
> - ret = libxl__zalloc(NOGC, sizeof(libxl_numainfo) * ninfo.num_nodes);
> - for (i = 0; i < ninfo.num_nodes; i++)
> - ret[i].dists = libxl__calloc(NOGC, ninfo.num_nodes,
> sizeof(*distance));
> + ret = libxl__zalloc(NOGC, sizeof(libxl_numainfo) * num_nodes);
> + for (i = 0; i < num_nodes; i++)
> + ret[i].dists = libxl__calloc(NOGC, num_nodes, sizeof(*distance));
>
> - for (i = 0; i < ninfo.num_nodes; i++) {
> + for (i = 0; i < num_nodes; i++) {
> #define V(val, invalid) (val == invalid) ? \
> LIBXL_NUMAINFO_INVALID_ENTRY : val
> ret[i].size = V(meminfo[i].memsize, XEN_INVALID_MEM_SZ);
> ret[i].free = V(meminfo[i].memfree, XEN_INVALID_MEM_SZ);
> - ret[i].num_dists = ninfo.num_nodes;
> + ret[i].num_dists = num_nodes;
> for (j = 0; j < ret[i].num_dists; j++) {
> - unsigned idx = i * ninfo.num_nodes + j;
> + unsigned idx = i * num_nodes + j;
> ret[i].dists[j] = V(distance[idx], XEN_INVALID_NODE_DIST);
> }
> #undef V
> }
>
> - fail:
> - xc_hypercall_buffer_free(ctx->xch, meminfo);
> - xc_hypercall_buffer_free(ctx->xch, distance);
> -
> out:
> GC_FREE;
> return ret;
> diff --git a/tools/python/xen/lowlevel/xc/xc.c
> b/tools/python/xen/lowlevel/xc/xc.c
> index 21ba57d..fbd93db 100644
> --- a/tools/python/xen/lowlevel/xc/xc.c
> +++ b/tools/python/xen/lowlevel/xc/xc.c
> @@ -1293,33 +1293,23 @@ out:
>
> static PyObject *pyxc_numainfo(XcObject *self)
> {
> - xc_numainfo_t ninfo = { 0 };
> - unsigned i, j;
> + unsigned i, j, num_nodes;
> uint64_t free_heap;
> PyObject *ret_obj = NULL, *node_to_node_dist_list_obj;
> PyObject *node_to_memsize_obj, *node_to_memfree_obj;
> PyObject *node_to_dma32_mem_obj, *node_to_node_dist_obj;
> - DECLARE_HYPERCALL_BUFFER(xen_sysctl_meminfo_t, meminfo);
> - DECLARE_HYPERCALL_BUFFER(uint32_t, distance);
> + xc_meminfo_t *meminfo = NULL;
> + uint32_t *distance = NULL;
>
> - set_xen_guest_handle(ninfo.meminfo, HYPERCALL_BUFFER_NULL);
> - set_xen_guest_handle(ninfo.distance, HYPERCALL_BUFFER_NULL);
> - if ( xc_numainfo(self->xc_handle, &ninfo) != 0 )
> + if ( xc_numainfo(self->xc_handle, &num_nodes, NULL, NULL) != 0 )
> goto out;
>
> - meminfo = xc_hypercall_buffer_alloc(self->xc_handle, meminfo,
> - sizeof(*meminfo) * ninfo.num_nodes);
> - if ( meminfo == NULL )
> - goto out;
> - distance = xc_hypercall_buffer_alloc(self->xc_handle, distance,
> - sizeof(*distance) *
> - ninfo.num_nodes * ninfo.num_nodes);
> - if ( distance == NULL )
> + meminfo = calloc(num_nodes, sizeof(*meminfo));
> + distance = calloc(num_nodes * num_nodes, sizeof(*distance));
> + if ( (meminfo == NULL) || (distance == NULL) )
> goto out;
>
> - set_xen_guest_handle(ninfo.meminfo, meminfo);
> - set_xen_guest_handle(ninfo.distance, distance);
> - if ( xc_numainfo(self->xc_handle, &ninfo) != 0 )
> + if ( xc_numainfo(self->xc_handle, &num_nodes, meminfo, distance) != 0 )
> goto out;
>
> /* Construct node-to-* lists. */
> @@ -1327,7 +1317,7 @@ static PyObject *pyxc_numainfo(XcObject *self)
> node_to_memfree_obj = PyList_New(0);
> node_to_dma32_mem_obj = PyList_New(0);
> node_to_node_dist_list_obj = PyList_New(0);
> - for ( i = 0; i < ninfo.num_nodes; i++ )
> + for ( i = 0; i < num_nodes; i++ )
> {
> PyObject *pyint;
> unsigned invalid_node;
> @@ -1351,9 +1341,9 @@ static PyObject *pyxc_numainfo(XcObject *self)
> /* Node to Node Distance */
> node_to_node_dist_obj = PyList_New(0);
> invalid_node = (meminfo[i].memsize == XEN_INVALID_MEM_SZ);
> - for ( j = 0; j < ninfo.num_nodes; j++ )
> + for ( j = 0; j < num_nodes; j++ )
> {
> - uint32_t dist = distance[i * ninfo.num_nodes + j];
> + uint32_t dist = distance[i * num_nodes + j];
> if ( invalid_node || (dist == XEN_INVALID_NODE_DIST) )
> {
> PyList_Append(node_to_node_dist_obj, Py_None);
> @@ -1369,7 +1359,7 @@ static PyObject *pyxc_numainfo(XcObject *self)
> Py_DECREF(node_to_node_dist_obj);
> }
>
> - ret_obj = Py_BuildValue("{s:i}", "max_node_index", ninfo.num_nodes + 1);
> + ret_obj = Py_BuildValue("{s:i}", "max_node_index", num_nodes + 1);
>
> PyDict_SetItemString(ret_obj, "node_memsize", node_to_memsize_obj);
> Py_DECREF(node_to_memsize_obj);
> @@ -1385,8 +1375,8 @@ static PyObject *pyxc_numainfo(XcObject *self)
> Py_DECREF(node_to_node_dist_list_obj);
>
> out:
> - xc_hypercall_buffer_free(self->xc_handle, meminfo);
> - xc_hypercall_buffer_free(self->xc_handle, distance);
> + free(meminfo);
> + free(distance);
> return ret_obj ? ret_obj : pyxc_error_to_exception(self->xc_handle);
> }
>
> --
> 1.7.1
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxx
> http://lists.xen.org/xen-devel
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |