|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v4 7/9] libxl/libxc: Move libxl_get_cpu_topology()'s hypercall buffer management to libxc
xc_cputopoinfo() is not expected to be used on a hot path and therefore
hypercall buffer management can be pushed into libxc. This will simplify
life for callers.
Also update error reporting macros.
Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
---
Changes in v4:
* Update commit message
* Make max_cpus argument in xc_cputopoinfo an unsigned
* Replace error logging macros
tools/libxc/include/xenctrl.h | 5 ++-
tools/libxc/xc_misc.c | 23 +++++++++++-----
tools/libxl/libxl.c | 33 ++++++++----------------
tools/misc/xenpm.c | 51 ++++++++++++++++---------------------
tools/python/xen/lowlevel/xc/xc.c | 16 +++--------
5 files changed, 57 insertions(+), 71 deletions(-)
diff --git a/tools/libxc/include/xenctrl.h b/tools/libxc/include/xenctrl.h
index fef4cca..5550916 100644
--- a/tools/libxc/include/xenctrl.h
+++ b/tools/libxc/include/xenctrl.h
@@ -1226,7 +1226,7 @@ int xc_readconsolering(xc_interface *xch,
int xc_send_debug_keys(xc_interface *xch, char *keys);
typedef xen_sysctl_physinfo_t xc_physinfo_t;
-typedef xen_sysctl_cputopoinfo_t xc_cputopoinfo_t;
+typedef xen_sysctl_cputopo_t xc_cputopo_t;
typedef xen_sysctl_numainfo_t xc_numainfo_t;
typedef uint32_t xc_cpu_to_node_t;
@@ -1237,7 +1237,8 @@ typedef uint64_t xc_node_to_memfree_t;
typedef uint32_t xc_node_to_node_dist_t;
int xc_physinfo(xc_interface *xch, xc_physinfo_t *info);
-int xc_cputopoinfo(xc_interface *xch, xc_cputopoinfo_t *info);
+int xc_cputopoinfo(xc_interface *xch, unsigned *max_cpus,
+ xc_cputopo_t *cputopo);
int xc_numainfo(xc_interface *xch, xc_numainfo_t *info);
int xc_sched_id(xc_interface *xch,
diff --git a/tools/libxc/xc_misc.c b/tools/libxc/xc_misc.c
index be68291..bc6eed2 100644
--- a/tools/libxc/xc_misc.c
+++ b/tools/libxc/xc_misc.c
@@ -177,22 +177,31 @@ int xc_physinfo(xc_interface *xch,
return 0;
}
-int xc_cputopoinfo(xc_interface *xch,
- xc_cputopoinfo_t *put_info)
+int xc_cputopoinfo(xc_interface *xch, unsigned *max_cpus,
+ xc_cputopo_t *cputopo)
{
int ret;
DECLARE_SYSCTL;
+ DECLARE_HYPERCALL_BOUNCE(cputopo, *max_cpus * sizeof(*cputopo),
+ XC_HYPERCALL_BUFFER_BOUNCE_OUT);
- sysctl.cmd = XEN_SYSCTL_cputopoinfo;
+ if ((ret = xc_hypercall_bounce_pre(xch, cputopo)))
+ goto out;
- memcpy(&sysctl.u.cputopoinfo, put_info, sizeof(*put_info));
+ sysctl.u.cputopoinfo.num_cpus = *max_cpus;
+ set_xen_guest_handle(sysctl.u.cputopoinfo.cputopo, cputopo);
+
+ sysctl.cmd = XEN_SYSCTL_cputopoinfo;
if ( (ret = do_sysctl(xch, &sysctl)) != 0 )
- return ret;
+ goto out;
- memcpy(put_info, &sysctl.u.cputopoinfo, sizeof(*put_info));
+ *max_cpus = sysctl.u.cputopoinfo.num_cpus;
- return 0;
+out:
+ xc_hypercall_bounce_post(xch, cputopo);
+
+ return ret;
}
int xc_numainfo(xc_interface *xch,
diff --git a/tools/libxl/libxl.c b/tools/libxl/libxl.c
index 8a6f979..9d7d9f5 100644
--- a/tools/libxl/libxl.c
+++ b/tools/libxl/libxl.c
@@ -5036,37 +5036,29 @@ int libxl_get_physinfo(libxl_ctx *ctx, libxl_physinfo
*physinfo)
libxl_cputopology *libxl_get_cpu_topology(libxl_ctx *ctx, int *nb_cpu_out)
{
GC_INIT(ctx);
- xc_cputopoinfo_t tinfo;
- DECLARE_HYPERCALL_BUFFER(xen_sysctl_cputopo_t, cputopo);
+ xc_cputopo_t *cputopo;
libxl_cputopology *ret = NULL;
int i;
int max_cpus;
+ unsigned num_cpus;
max_cpus = libxl_get_max_cpus(ctx);
if (max_cpus < 0)
{
- LIBXL__LOG(ctx, XTL_ERROR, "Unable to determine number of CPUS");
- ret = NULL;
+ LOG(ERROR, "Unable to determine number of CPUS");
goto out;
}
- cputopo = xc_hypercall_buffer_alloc(ctx->xch, cputopo,
- sizeof(*cputopo) * max_cpus);
- if (cputopo == NULL) {
- LIBXL__LOG_ERRNOVAL(ctx, XTL_ERROR, ENOMEM,
- "Unable to allocate hypercall arguments");
- goto fail;
- }
- set_xen_guest_handle(tinfo.cputopo, cputopo);
- tinfo.num_cpus = max_cpus;
+ cputopo = libxl__zalloc(gc, sizeof(*cputopo) * max_cpus);
- if (xc_cputopoinfo(ctx->xch, &tinfo) != 0) {
- LIBXL__LOG_ERRNO(ctx, XTL_ERROR, "CPU topology info hypercall failed");
- goto fail;
+ num_cpus = max_cpus;
+ if (xc_cputopoinfo(ctx->xch, &num_cpus, cputopo) != 0) {
+ LOGE(ERROR, "CPU topology info hypercall failed");
+ goto out;
}
- if (tinfo.num_cpus < max_cpus)
- max_cpus = tinfo.num_cpus;
+ if (num_cpus < max_cpus)
+ max_cpus = num_cpus;
ret = libxl__zalloc(NOGC, sizeof(libxl_cputopology) * max_cpus);
@@ -5079,11 +5071,8 @@ libxl_cputopology *libxl_get_cpu_topology(libxl_ctx
*ctx, int *nb_cpu_out)
#undef V
}
- fail:
- xc_hypercall_buffer_free(ctx->xch, cputopo);
+ *nb_cpu_out = max_cpus;
- if (ret)
- *nb_cpu_out = max_cpus;
out:
GC_FREE;
return ret;
diff --git a/tools/misc/xenpm.c b/tools/misc/xenpm.c
index 23d6b63..122bee4 100644
--- a/tools/misc/xenpm.c
+++ b/tools/misc/xenpm.c
@@ -355,12 +355,11 @@ static void signal_int_handler(int signo)
int i, j, k;
struct timeval tv;
int cx_cap = 0, px_cap = 0;
- DECLARE_HYPERCALL_BUFFER(xen_sysctl_cputopo_t, cputopo);
- xc_cputopoinfo_t info = { 0 };
-
- cputopo = xc_hypercall_buffer_alloc(xc_handle, cputopo,
- sizeof(*cputopo) * MAX_NR_CPU);
+ xc_cputopo_t *cputopo;
+ unsigned max_cpus;
+ max_cpus = MAX_NR_CPU;
+ cputopo = calloc(max_cpus, sizeof(*cputopo));
if ( cputopo == NULL )
{
fprintf(stderr, "failed to allocate hypercall buffers\n");
@@ -445,29 +444,26 @@ static void signal_int_handler(int signo)
printf(" Avg freq\t%d\tKHz\n", avgfreq[i]);
}
- set_xen_guest_handle(info.cputopo, cputopo);
- info.num_cpus = MAX_NR_CPU;
-
- if ( cx_cap && !xc_cputopoinfo(xc_handle, &info) )
+ if ( cx_cap && !xc_cputopoinfo(xc_handle, &max_cpus, cputopo) )
{
uint32_t socket_ids[MAX_NR_CPU];
uint32_t core_ids[MAX_NR_CPU];
uint32_t socket_nr = 0;
uint32_t core_nr = 0;
- if ( info.num_cpus > MAX_NR_CPU )
- info.num_cpus = MAX_NR_CPU;
+ if ( max_cpus > MAX_NR_CPU )
+ max_cpus = MAX_NR_CPU;
/* check validity */
- for ( i = 0; i < info.num_cpus; i++ )
+ for ( i = 0; i < max_cpus; i++ )
{
if ( cputopo[i].core == XEN_INVALID_CORE_ID ||
cputopo[i].socket == XEN_INVALID_SOCKET_ID )
break;
}
- if ( i >= info.num_cpus )
+ if ( i >= max_cpus )
{
/* find socket nr & core nr per socket */
- for ( i = 0; i < info.num_cpus; i++ )
+ for ( i = 0; i < max_cpus; i++ )
{
for ( j = 0; j < socket_nr; j++ )
if ( cputopo[i].socket == socket_ids[j] )
@@ -494,7 +490,7 @@ static void signal_int_handler(int signo)
unsigned int n;
uint64_t res;
- for ( j = 0; j < info.num_cpus; j++ )
+ for ( j = 0; j < max_cpus; j++ )
{
if ( cputopo[j].socket == socket_ids[i] )
break;
@@ -513,7 +509,7 @@ static void signal_int_handler(int signo)
}
for ( k = 0; k < core_nr; k++ )
{
- for ( j = 0; j < info.num_cpus; j++ )
+ for ( j = 0; j < max_cpus; j++ )
{
if ( cputopo[j].socket == socket_ids[i] &&
cputopo[j].core == core_ids[k] )
@@ -551,7 +547,7 @@ static void signal_int_handler(int signo)
free(sum);
free(avgfreq);
out:
- xc_hypercall_buffer_free(xc_handle, cputopo);
+ free(cputopo);
xc_interface_close(xc_handle);
exit(0);
}
@@ -958,22 +954,19 @@ void scaling_governor_func(int argc, char *argv[])
void cpu_topology_func(int argc, char *argv[])
{
- DECLARE_HYPERCALL_BUFFER(xen_sysctl_cputopo_t, cputopo);
- xc_cputopoinfo_t info = { 0 };
+ xc_cputopo_t *cputopo;
+ unsigned max_cpus;
int i, rc = ENOMEM;
- cputopo = xc_hypercall_buffer_alloc(xc_handle, cputopo,
- sizeof(*cputopo) * MAX_NR_CPU);
+ max_cpus = MAX_NR_CPU;
+ cputopo = calloc(max_cpus, sizeof(*cputopo));
if ( cputopo == NULL )
{
fprintf(stderr, "failed to allocate hypercall buffers\n");
goto out;
}
- set_xen_guest_handle(info.cputopo, cputopo);
- info.num_cpus = MAX_NR_CPU;
-
- if ( xc_cputopoinfo(xc_handle, &info) )
+ if ( xc_cputopoinfo(xc_handle, &max_cpus, cputopo) )
{
rc = errno;
fprintf(stderr, "Cannot get Xen CPU topology (%d - %s)\n",
@@ -981,11 +974,11 @@ void cpu_topology_func(int argc, char *argv[])
goto out;
}
- if ( info.num_cpus > (MAX_NR_CPU) )
- info.num_cpus = MAX_NR_CPU;
+ if ( max_cpus > (MAX_NR_CPU) )
+ max_cpus = MAX_NR_CPU;
printf("CPU\tcore\tsocket\tnode\n");
- for ( i = 0; i < info.num_cpus; i++ )
+ for ( i = 0; i < max_cpus; i++ )
{
if ( cputopo[i].core == XEN_INVALID_CORE_ID )
continue;
@@ -994,7 +987,7 @@ void cpu_topology_func(int argc, char *argv[])
}
rc = 0;
out:
- xc_hypercall_buffer_free(xc_handle, cputopo);
+ free(cputopo);
if ( rc )
exit(rc);
}
diff --git a/tools/python/xen/lowlevel/xc/xc.c
b/tools/python/xen/lowlevel/xc/xc.c
index edbaf60..20c4d39 100644
--- a/tools/python/xen/lowlevel/xc/xc.c
+++ b/tools/python/xen/lowlevel/xc/xc.c
@@ -1221,25 +1221,19 @@ static PyObject *pyxc_getcpuinfo(XcObject *self,
PyObject *args, PyObject *kwds)
static PyObject *pyxc_topologyinfo(XcObject *self)
{
#define MAX_CPUS 256
- xc_cputopoinfo_t tinfo = { 0 };
+ xc_cputopo_t *cputopo;
unsigned i, num_cpus;
PyObject *ret_obj = NULL;
PyObject *cpu_to_core_obj, *cpu_to_socket_obj, *cpu_to_node_obj;
- DECLARE_HYPERCALL_BUFFER(xen_sysctl_cputopo_t, cputopo);
-
- cputopo = xc_hypercall_buffer_alloc(self->xc_handle, cputopo,
- sizeof(*cputopo) * (MAX_CPUS));
+ num_cpus = MAX_CPUS;
+ cputopo = calloc(num_cpus, sizeof(*cputopo));
if ( cputopo == NULL )
goto out;
- set_xen_guest_handle(tinfo.cputopo, cputopo);
- tinfo.num_cpus = MAX_CPUS;
-
- if ( xc_cputopoinfo(self->xc_handle, &tinfo) != 0 )
+ if ( xc_cputopoinfo(self->xc_handle, &num_cpus, cputopo) != 0 )
goto out;
- num_cpus = tinfo.num_cpus;
if ( num_cpus > MAX_CPUS )
num_cpus = MAX_CPUS;
@@ -1295,7 +1289,7 @@ static PyObject *pyxc_topologyinfo(XcObject *self)
Py_DECREF(cpu_to_node_obj);
out:
- xc_hypercall_buffer_free(self->xc_handle, cputopo);
+ free(cputopo);
return ret_obj ? ret_obj : pyxc_error_to_exception(self->xc_handle);
#undef MAX_CPU_INDEX
}
--
1.7.1
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |