|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v4 3/9] sysctl: Make XEN_SYSCTL_topologyinfo sysctl a little more efficient
Instead of copying data for each field in xen_sysctl_topologyinfo separately
put cpu/socket/node into a single structure and do a single copy for each
processor.
Do not use max_cpu_index, which is almost always used for calculating number
CPUs (thus requiring adding or subtracting one), replace it with num_cpus.
There is no need to copy whole op in sysctl to user at the end, we only need
num_cpus.
Rename xen_sysctl_topologyinfo and XEN_SYSCTL_topologyinfo to reflect the fact
that these are used for CPU topology. Subsequent patch will add support for
PCI topology sysctl.
Replace INVALID_TOPOLOGY_ID with "XEN_"-prefixed macros for each invalid type
(core, socket, node).
Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
---
Changes in v4:
* Split v3's patch into two --- one for CPU topology and one for NUMA info
* Replaced max_cpu_index with num_cpus to avoid always adding/subtracting one
* Replaced INVALID_TOPOLOGY_ID with separate macros for core/socket/node
* No buffer allocation in sysctl, copy data cpu-by-cpu
* Check for BAD_APICID/NUMA_NO_NODE in sysctl
tools/libxc/include/xenctrl.h | 4 +-
tools/libxc/xc_misc.c | 10 ++--
tools/libxl/libxl.c | 46 +++++++-----------
tools/misc/xenpm.c | 93 +++++++++++++++---------------------
tools/python/xen/lowlevel/xc/xc.c | 60 ++++++++++--------------
xen/common/sysctl.c | 66 +++++++++++++++++---------
xen/include/public/sysctl.h | 48 +++++++++++--------
7 files changed, 160 insertions(+), 167 deletions(-)
diff --git a/tools/libxc/include/xenctrl.h b/tools/libxc/include/xenctrl.h
index 09d819f..fef4cca 100644
--- a/tools/libxc/include/xenctrl.h
+++ b/tools/libxc/include/xenctrl.h
@@ -1226,7 +1226,7 @@ int xc_readconsolering(xc_interface *xch,
int xc_send_debug_keys(xc_interface *xch, char *keys);
typedef xen_sysctl_physinfo_t xc_physinfo_t;
-typedef xen_sysctl_topologyinfo_t xc_topologyinfo_t;
+typedef xen_sysctl_cputopoinfo_t xc_cputopoinfo_t;
typedef xen_sysctl_numainfo_t xc_numainfo_t;
typedef uint32_t xc_cpu_to_node_t;
@@ -1237,7 +1237,7 @@ typedef uint64_t xc_node_to_memfree_t;
typedef uint32_t xc_node_to_node_dist_t;
int xc_physinfo(xc_interface *xch, xc_physinfo_t *info);
-int xc_topologyinfo(xc_interface *xch, xc_topologyinfo_t *info);
+int xc_cputopoinfo(xc_interface *xch, xc_cputopoinfo_t *info);
int xc_numainfo(xc_interface *xch, xc_numainfo_t *info);
int xc_sched_id(xc_interface *xch,
diff --git a/tools/libxc/xc_misc.c b/tools/libxc/xc_misc.c
index e253a58..be68291 100644
--- a/tools/libxc/xc_misc.c
+++ b/tools/libxc/xc_misc.c
@@ -177,20 +177,20 @@ int xc_physinfo(xc_interface *xch,
return 0;
}
-int xc_topologyinfo(xc_interface *xch,
- xc_topologyinfo_t *put_info)
+int xc_cputopoinfo(xc_interface *xch,
+ xc_cputopoinfo_t *put_info)
{
int ret;
DECLARE_SYSCTL;
- sysctl.cmd = XEN_SYSCTL_topologyinfo;
+ sysctl.cmd = XEN_SYSCTL_cputopoinfo;
- memcpy(&sysctl.u.topologyinfo, put_info, sizeof(*put_info));
+ memcpy(&sysctl.u.cputopoinfo, put_info, sizeof(*put_info));
if ( (ret = do_sysctl(xch, &sysctl)) != 0 )
return ret;
- memcpy(put_info, &sysctl.u.topologyinfo, sizeof(*put_info));
+ memcpy(put_info, &sysctl.u.cputopoinfo, sizeof(*put_info));
return 0;
}
diff --git a/tools/libxl/libxl.c b/tools/libxl/libxl.c
index a68f6ef..6660133 100644
--- a/tools/libxl/libxl.c
+++ b/tools/libxl/libxl.c
@@ -5036,10 +5036,8 @@ int libxl_get_physinfo(libxl_ctx *ctx, libxl_physinfo
*physinfo)
libxl_cputopology *libxl_get_cpu_topology(libxl_ctx *ctx, int *nb_cpu_out)
{
GC_INIT(ctx);
- xc_topologyinfo_t tinfo;
- DECLARE_HYPERCALL_BUFFER(xc_cpu_to_core_t, coremap);
- DECLARE_HYPERCALL_BUFFER(xc_cpu_to_socket_t, socketmap);
- DECLARE_HYPERCALL_BUFFER(xc_cpu_to_node_t, nodemap);
+ xc_cputopoinfo_t tinfo;
+ DECLARE_HYPERCALL_BUFFER(xen_sysctl_cputopo_t, cputopo);
libxl_cputopology *ret = NULL;
int i;
int max_cpus;
@@ -5052,45 +5050,37 @@ libxl_cputopology *libxl_get_cpu_topology(libxl_ctx
*ctx, int *nb_cpu_out)
goto out;
}
- coremap = xc_hypercall_buffer_alloc
- (ctx->xch, coremap, sizeof(*coremap) * max_cpus);
- socketmap = xc_hypercall_buffer_alloc
- (ctx->xch, socketmap, sizeof(*socketmap) * max_cpus);
- nodemap = xc_hypercall_buffer_alloc
- (ctx->xch, nodemap, sizeof(*nodemap) * max_cpus);
- if ((coremap == NULL) || (socketmap == NULL) || (nodemap == NULL)) {
+ cputopo = xc_hypercall_buffer_alloc(ctx->xch, cputopo,
+ sizeof(*cputopo) * max_cpus);
+ if (cputopo == NULL) {
LIBXL__LOG_ERRNOVAL(ctx, XTL_ERROR, ENOMEM,
"Unable to allocate hypercall arguments");
goto fail;
}
+ set_xen_guest_handle(tinfo.cputopo, cputopo);
+ tinfo.num_cpus = max_cpus;
- set_xen_guest_handle(tinfo.cpu_to_core, coremap);
- set_xen_guest_handle(tinfo.cpu_to_socket, socketmap);
- set_xen_guest_handle(tinfo.cpu_to_node, nodemap);
- tinfo.max_cpu_index = max_cpus - 1;
- if (xc_topologyinfo(ctx->xch, &tinfo) != 0) {
- LIBXL__LOG_ERRNO(ctx, XTL_ERROR, "Topology info hypercall failed");
+ if (xc_cputopoinfo(ctx->xch, &tinfo) != 0) {
+ LIBXL__LOG_ERRNO(ctx, XTL_ERROR, "CPU topology info hypercall failed");
goto fail;
}
- if (tinfo.max_cpu_index < max_cpus - 1)
- max_cpus = tinfo.max_cpu_index + 1;
+ if (tinfo.num_cpus < max_cpus)
+ max_cpus = tinfo.num_cpus;
ret = libxl__zalloc(NOGC, sizeof(libxl_cputopology) * max_cpus);
for (i = 0; i < max_cpus; i++) {
-#define V(map, i) (map[i] == INVALID_TOPOLOGY_ID) ? \
- LIBXL_CPUTOPOLOGY_INVALID_ENTRY : map[i]
- ret[i].core = V(coremap, i);
- ret[i].socket = V(socketmap, i);
- ret[i].node = V(nodemap, i);
+#define V(map, i, invalid) ( cputopo[i].map == invalid) ? \
+ LIBXL_CPUTOPOLOGY_INVALID_ENTRY : cputopo[i].map
+ ret[i].core = V(core, i, XEN_INVALID_CORE_ID);
+ ret[i].socket = V(socket, i, XEN_INVALID_SOCKET_ID);
+ ret[i].node = V(node, i, XEN_INVALID_NODE_ID);
#undef V
}
-fail:
- xc_hypercall_buffer_free(ctx->xch, coremap);
- xc_hypercall_buffer_free(ctx->xch, socketmap);
- xc_hypercall_buffer_free(ctx->xch, nodemap);
+ fail:
+ xc_hypercall_buffer_free(ctx->xch, cputopo);
if (ret)
*nb_cpu_out = max_cpus;
diff --git a/tools/misc/xenpm.c b/tools/misc/xenpm.c
index e43924c..23d6b63 100644
--- a/tools/misc/xenpm.c
+++ b/tools/misc/xenpm.c
@@ -355,16 +355,13 @@ static void signal_int_handler(int signo)
int i, j, k;
struct timeval tv;
int cx_cap = 0, px_cap = 0;
- DECLARE_HYPERCALL_BUFFER(uint32_t, cpu_to_core);
- DECLARE_HYPERCALL_BUFFER(uint32_t, cpu_to_socket);
- DECLARE_HYPERCALL_BUFFER(uint32_t, cpu_to_node);
- xc_topologyinfo_t info = { 0 };
+ DECLARE_HYPERCALL_BUFFER(xen_sysctl_cputopo_t, cputopo);
+ xc_cputopoinfo_t info = { 0 };
- cpu_to_core = xc_hypercall_buffer_alloc(xc_handle, cpu_to_core,
sizeof(*cpu_to_core) * MAX_NR_CPU);
- cpu_to_socket = xc_hypercall_buffer_alloc(xc_handle, cpu_to_socket,
sizeof(*cpu_to_socket) * MAX_NR_CPU);
- cpu_to_node = xc_hypercall_buffer_alloc(xc_handle, cpu_to_node,
sizeof(*cpu_to_node) * MAX_NR_CPU);
+ cputopo = xc_hypercall_buffer_alloc(xc_handle, cputopo,
+ sizeof(*cputopo) * MAX_NR_CPU);
- if ( cpu_to_core == NULL || cpu_to_socket == NULL || cpu_to_node == NULL )
+ if ( cputopo == NULL )
{
fprintf(stderr, "failed to allocate hypercall buffers\n");
goto out;
@@ -448,47 +445,45 @@ static void signal_int_handler(int signo)
printf(" Avg freq\t%d\tKHz\n", avgfreq[i]);
}
- set_xen_guest_handle(info.cpu_to_core, cpu_to_core);
- set_xen_guest_handle(info.cpu_to_socket, cpu_to_socket);
- set_xen_guest_handle(info.cpu_to_node, cpu_to_node);
- info.max_cpu_index = MAX_NR_CPU - 1;
+ set_xen_guest_handle(info.cputopo, cputopo);
+ info.num_cpus = MAX_NR_CPU;
- if ( cx_cap && !xc_topologyinfo(xc_handle, &info) )
+ if ( cx_cap && !xc_cputopoinfo(xc_handle, &info) )
{
uint32_t socket_ids[MAX_NR_CPU];
uint32_t core_ids[MAX_NR_CPU];
uint32_t socket_nr = 0;
uint32_t core_nr = 0;
- if ( info.max_cpu_index > MAX_NR_CPU - 1 )
- info.max_cpu_index = MAX_NR_CPU - 1;
+ if ( info.num_cpus > MAX_NR_CPU )
+ info.num_cpus = MAX_NR_CPU;
/* check validity */
- for ( i = 0; i <= info.max_cpu_index; i++ )
+ for ( i = 0; i < info.num_cpus; i++ )
{
- if ( cpu_to_core[i] == INVALID_TOPOLOGY_ID ||
- cpu_to_socket[i] == INVALID_TOPOLOGY_ID )
+ if ( cputopo[i].core == XEN_INVALID_CORE_ID ||
+ cputopo[i].socket == XEN_INVALID_SOCKET_ID )
break;
}
- if ( i > info.max_cpu_index )
+ if ( i >= info.num_cpus )
{
/* find socket nr & core nr per socket */
- for ( i = 0; i <= info.max_cpu_index; i++ )
+ for ( i = 0; i < info.num_cpus; i++ )
{
for ( j = 0; j < socket_nr; j++ )
- if ( cpu_to_socket[i] == socket_ids[j] )
+ if ( cputopo[i].socket == socket_ids[j] )
break;
if ( j == socket_nr )
{
- socket_ids[j] = cpu_to_socket[i];
+ socket_ids[j] = cputopo[i].socket;
socket_nr++;
}
for ( j = 0; j < core_nr; j++ )
- if ( cpu_to_core[i] == core_ids[j] )
+ if ( cputopo[i].core == core_ids[j] )
break;
if ( j == core_nr )
{
- core_ids[j] = cpu_to_core[i];
+ core_ids[j] = cputopo[i].core;
core_nr++;
}
}
@@ -499,9 +494,9 @@ static void signal_int_handler(int signo)
unsigned int n;
uint64_t res;
- for ( j = 0; j <= info.max_cpu_index; j++ )
+ for ( j = 0; j < info.num_cpus; j++ )
{
- if ( cpu_to_socket[j] == socket_ids[i] )
+ if ( cputopo[j].socket == socket_ids[i] )
break;
}
printf("\nSocket %d\n", socket_ids[i]);
@@ -518,10 +513,10 @@ static void signal_int_handler(int signo)
}
for ( k = 0; k < core_nr; k++ )
{
- for ( j = 0; j <= info.max_cpu_index; j++ )
+ for ( j = 0; j < info.num_cpus; j++ )
{
- if ( cpu_to_socket[j] == socket_ids[i] &&
- cpu_to_core[j] == core_ids[k] )
+ if ( cputopo[j].socket == socket_ids[i] &&
+ cputopo[j].core == core_ids[k] )
break;
}
printf("\t Core %d CPU %d\n", core_ids[k], j);
@@ -556,9 +551,7 @@ static void signal_int_handler(int signo)
free(sum);
free(avgfreq);
out:
- xc_hypercall_buffer_free(xc_handle, cpu_to_core);
- xc_hypercall_buffer_free(xc_handle, cpu_to_socket);
- xc_hypercall_buffer_free(xc_handle, cpu_to_node);
+ xc_hypercall_buffer_free(xc_handle, cputopo);
xc_interface_close(xc_handle);
exit(0);
}
@@ -965,28 +958,22 @@ void scaling_governor_func(int argc, char *argv[])
void cpu_topology_func(int argc, char *argv[])
{
- DECLARE_HYPERCALL_BUFFER(uint32_t, cpu_to_core);
- DECLARE_HYPERCALL_BUFFER(uint32_t, cpu_to_socket);
- DECLARE_HYPERCALL_BUFFER(uint32_t, cpu_to_node);
- xc_topologyinfo_t info = { 0 };
+ DECLARE_HYPERCALL_BUFFER(xen_sysctl_cputopo_t, cputopo);
+ xc_cputopoinfo_t info = { 0 };
int i, rc = ENOMEM;
- cpu_to_core = xc_hypercall_buffer_alloc(xc_handle, cpu_to_core,
sizeof(*cpu_to_core) * MAX_NR_CPU);
- cpu_to_socket = xc_hypercall_buffer_alloc(xc_handle, cpu_to_socket,
sizeof(*cpu_to_socket) * MAX_NR_CPU);
- cpu_to_node = xc_hypercall_buffer_alloc(xc_handle, cpu_to_node,
sizeof(*cpu_to_node) * MAX_NR_CPU);
-
- if ( cpu_to_core == NULL || cpu_to_socket == NULL || cpu_to_node == NULL )
+ cputopo = xc_hypercall_buffer_alloc(xc_handle, cputopo,
+ sizeof(*cputopo) * MAX_NR_CPU);
+ if ( cputopo == NULL )
{
fprintf(stderr, "failed to allocate hypercall buffers\n");
goto out;
}
- set_xen_guest_handle(info.cpu_to_core, cpu_to_core);
- set_xen_guest_handle(info.cpu_to_socket, cpu_to_socket);
- set_xen_guest_handle(info.cpu_to_node, cpu_to_node);
- info.max_cpu_index = MAX_NR_CPU-1;
+ set_xen_guest_handle(info.cputopo, cputopo);
+ info.num_cpus = MAX_NR_CPU;
- if ( xc_topologyinfo(xc_handle, &info) )
+ if ( xc_cputopoinfo(xc_handle, &info) )
{
rc = errno;
fprintf(stderr, "Cannot get Xen CPU topology (%d - %s)\n",
@@ -994,22 +981,20 @@ void cpu_topology_func(int argc, char *argv[])
goto out;
}
- if ( info.max_cpu_index > (MAX_NR_CPU-1) )
- info.max_cpu_index = MAX_NR_CPU-1;
+ if ( info.num_cpus > (MAX_NR_CPU) )
+ info.num_cpus = MAX_NR_CPU;
printf("CPU\tcore\tsocket\tnode\n");
- for ( i = 0; i <= info.max_cpu_index; i++ )
+ for ( i = 0; i < info.num_cpus; i++ )
{
- if ( cpu_to_core[i] == INVALID_TOPOLOGY_ID )
+ if ( cputopo[i].core == XEN_INVALID_CORE_ID )
continue;
printf("CPU%d\t %d\t %d\t %d\n",
- i, cpu_to_core[i], cpu_to_socket[i], cpu_to_node[i]);
+ i, cputopo[i].core, cputopo[i].socket, cputopo[i].node);
}
rc = 0;
out:
- xc_hypercall_buffer_free(xc_handle, cpu_to_core);
- xc_hypercall_buffer_free(xc_handle, cpu_to_socket);
- xc_hypercall_buffer_free(xc_handle, cpu_to_node);
+ xc_hypercall_buffer_free(xc_handle, cputopo);
if ( rc )
exit(rc);
}
diff --git a/tools/python/xen/lowlevel/xc/xc.c
b/tools/python/xen/lowlevel/xc/xc.c
index 2aa0dc7..2fd93e0 100644
--- a/tools/python/xen/lowlevel/xc/xc.c
+++ b/tools/python/xen/lowlevel/xc/xc.c
@@ -1220,78 +1220,70 @@ static PyObject *pyxc_getcpuinfo(XcObject *self,
PyObject *args, PyObject *kwds)
static PyObject *pyxc_topologyinfo(XcObject *self)
{
-#define MAX_CPU_INDEX 255
- xc_topologyinfo_t tinfo = { 0 };
- int i, max_cpu_index;
+#define MAX_CPUS 256
+ xc_cputopoinfo_t tinfo = { 0 };
+ unsigned i, num_cpus;
PyObject *ret_obj = NULL;
PyObject *cpu_to_core_obj, *cpu_to_socket_obj, *cpu_to_node_obj;
- DECLARE_HYPERCALL_BUFFER(xc_cpu_to_core_t, coremap);
- DECLARE_HYPERCALL_BUFFER(xc_cpu_to_socket_t, socketmap);
- DECLARE_HYPERCALL_BUFFER(xc_cpu_to_node_t, nodemap);
- coremap = xc_hypercall_buffer_alloc(self->xc_handle, coremap,
sizeof(*coremap) * (MAX_CPU_INDEX+1));
- if ( coremap == NULL )
- goto out;
- socketmap = xc_hypercall_buffer_alloc(self->xc_handle, socketmap,
sizeof(*socketmap) * (MAX_CPU_INDEX+1));
- if ( socketmap == NULL )
- goto out;
- nodemap = xc_hypercall_buffer_alloc(self->xc_handle, nodemap,
sizeof(*nodemap) * (MAX_CPU_INDEX+1));
- if ( nodemap == NULL )
- goto out;
+ DECLARE_HYPERCALL_BUFFER(xen_sysctl_cputopo_t, cputopo);
+
+ cputopo = xc_hypercall_buffer_alloc(self->xc_handle, cputopo,
+ sizeof(*cputopo) * (MAX_CPUS));
+ if ( cputopo == NULL )
+ goto out;
- set_xen_guest_handle(tinfo.cpu_to_core, coremap);
- set_xen_guest_handle(tinfo.cpu_to_socket, socketmap);
- set_xen_guest_handle(tinfo.cpu_to_node, nodemap);
- tinfo.max_cpu_index = MAX_CPU_INDEX;
+ set_xen_guest_handle(tinfo.cputopo, cputopo);
+ tinfo.num_cpus = MAX_CPUS;
- if ( xc_topologyinfo(self->xc_handle, &tinfo) != 0 )
+ if ( xc_cputopoinfo(self->xc_handle, &tinfo) != 0 )
goto out;
- max_cpu_index = tinfo.max_cpu_index;
- if ( max_cpu_index > MAX_CPU_INDEX )
- max_cpu_index = MAX_CPU_INDEX;
+ num_cpus = tinfo.num_cpus;
+ if ( num_cpus > MAX_CPUS )
+ num_cpus = MAX_CPUS;
/* Construct cpu-to-* lists. */
cpu_to_core_obj = PyList_New(0);
cpu_to_socket_obj = PyList_New(0);
cpu_to_node_obj = PyList_New(0);
- for ( i = 0; i <= max_cpu_index; i++ )
+ for ( i = 0; i < num_cpus; i++ )
{
- if ( coremap[i] == INVALID_TOPOLOGY_ID )
+ if ( cputopo[i].core == XEN_INVALID_CORE_ID )
{
PyList_Append(cpu_to_core_obj, Py_None);
}
else
{
- PyObject *pyint = PyInt_FromLong(coremap[i]);
+ PyObject *pyint = PyInt_FromLong(cputopo[i].core);
PyList_Append(cpu_to_core_obj, pyint);
Py_DECREF(pyint);
}
- if ( socketmap[i] == INVALID_TOPOLOGY_ID )
+ if ( cputopo[i].socket == XEN_INVALID_SOCKET_ID )
{
PyList_Append(cpu_to_socket_obj, Py_None);
}
else
{
- PyObject *pyint = PyInt_FromLong(socketmap[i]);
+ PyObject *pyint = PyInt_FromLong(cputopo[i].socket);
PyList_Append(cpu_to_socket_obj, pyint);
Py_DECREF(pyint);
}
- if ( nodemap[i] == INVALID_TOPOLOGY_ID )
+ if ( cputopo[i].node == XEN_INVALID_NODE_ID )
{
PyList_Append(cpu_to_node_obj, Py_None);
}
else
{
- PyObject *pyint = PyInt_FromLong(nodemap[i]);
+ PyObject *pyint = PyInt_FromLong(cputopo[i].node);
PyList_Append(cpu_to_node_obj, pyint);
Py_DECREF(pyint);
}
}
- ret_obj = Py_BuildValue("{s:i}", "max_cpu_index", max_cpu_index);
+ ret_obj = Py_BuildValue("{s:i}", "max_cpu_index", num_cpus + 1);
PyDict_SetItemString(ret_obj, "cpu_to_core", cpu_to_core_obj);
Py_DECREF(cpu_to_core_obj);
@@ -1303,9 +1295,7 @@ static PyObject *pyxc_topologyinfo(XcObject *self)
Py_DECREF(cpu_to_node_obj);
out:
- xc_hypercall_buffer_free(self->xc_handle, coremap);
- xc_hypercall_buffer_free(self->xc_handle, socketmap);
- xc_hypercall_buffer_free(self->xc_handle, nodemap);
+ xc_hypercall_buffer_free(self->xc_handle, cputopo);
return ret_obj ? ret_obj : pyxc_error_to_exception(self->xc_handle);
#undef MAX_CPU_INDEX
}
@@ -1375,7 +1365,7 @@ static PyObject *pyxc_numainfo(XcObject *self)
for ( j = 0; j <= max_node_index; j++ )
{
uint32_t dist = nodes_dist[i*(max_node_index+1) + j];
- if ( dist == INVALID_TOPOLOGY_ID )
+ if ( dist == ~0u )
{
PyList_Append(node_to_node_dist_obj, Py_None);
}
diff --git a/xen/common/sysctl.c b/xen/common/sysctl.c
index 0cb6ee1..fe48ee8 100644
--- a/xen/common/sysctl.c
+++ b/xen/common/sysctl.c
@@ -320,39 +320,61 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xen_sysctl_t)
u_sysctl)
}
break;
- case XEN_SYSCTL_topologyinfo:
+ case XEN_SYSCTL_cputopoinfo:
{
- uint32_t i, max_cpu_index, last_online_cpu;
- xen_sysctl_topologyinfo_t *ti = &op->u.topologyinfo;
+ uint32_t i, num_cpus;
+ xen_sysctl_cputopoinfo_t *ti = &op->u.cputopoinfo;
- last_online_cpu = cpumask_last(&cpu_online_map);
- max_cpu_index = min_t(uint32_t, ti->max_cpu_index, last_online_cpu);
- ti->max_cpu_index = last_online_cpu;
+ if ( guest_handle_is_null(ti->cputopo) )
+ {
+ ret = -EINVAL;
+ break;
+ }
- for ( i = 0; i <= max_cpu_index; i++ )
+ num_cpus = cpumask_last(&cpu_online_map) + 1;
+ if ( ti->num_cpus != num_cpus )
{
- if ( !guest_handle_is_null(ti->cpu_to_core) )
+ uint32_t array_sz = ti->num_cpus;
+
+ ti->num_cpus = num_cpus;
+ if ( __copy_field_to_guest(u_sysctl, op,
+ u.cputopoinfo.num_cpus) )
{
- uint32_t core = cpu_online(i) ? cpu_to_core(i) : ~0u;
- if ( copy_to_guest_offset(ti->cpu_to_core, i, &core, 1) )
- break;
+ ret = -EFAULT;
+ break;
+ }
+ num_cpus = min_t(uint32_t, array_sz, num_cpus);
+ }
+
+ for ( i = 0; i < num_cpus; i++ )
+ {
+ xen_sysctl_cputopo_t cputopo;
+
+ if ( cpu_present(i) )
+ {
+ cputopo.core = cpu_to_core(i);
+ if ( cputopo.core == BAD_APICID )
+ cputopo.core = XEN_INVALID_CORE_ID;
+ cputopo.socket = cpu_to_socket(i);
+ if ( cputopo.socket == BAD_APICID )
+ cputopo.socket = XEN_INVALID_SOCKET_ID;
+ cputopo.node = cpu_to_node(i);
+ if ( cputopo.node == NUMA_NO_NODE )
+ cputopo.node = XEN_INVALID_NODE_ID;
}
- if ( !guest_handle_is_null(ti->cpu_to_socket) )
+ else
{
- uint32_t socket = cpu_online(i) ? cpu_to_socket(i) : ~0u;
- if ( copy_to_guest_offset(ti->cpu_to_socket, i, &socket, 1) )
- break;
+ cputopo.core = XEN_INVALID_CORE_ID;
+ cputopo.socket = XEN_INVALID_SOCKET_ID;
+ cputopo.node = XEN_INVALID_NODE_ID;
}
- if ( !guest_handle_is_null(ti->cpu_to_node) )
+
+ if ( copy_to_guest_offset(ti->cputopo, i, &cputopo, 1) )
{
- uint32_t node = cpu_online(i) ? cpu_to_node(i) : ~0u;
- if ( copy_to_guest_offset(ti->cpu_to_node, i, &node, 1) )
- break;
+ ret = -EFAULT;
+ break;
}
}
-
- ret = ((i <= max_cpu_index) || copy_to_guest(u_sysctl, op, 1))
- ? -EFAULT : 0;
}
break;
diff --git a/xen/include/public/sysctl.h b/xen/include/public/sysctl.h
index 8552dc6..f20da69 100644
--- a/xen/include/public/sysctl.h
+++ b/xen/include/public/sysctl.h
@@ -462,31 +462,37 @@ struct xen_sysctl_lockprof_op {
typedef struct xen_sysctl_lockprof_op xen_sysctl_lockprof_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_lockprof_op_t);
-/* XEN_SYSCTL_topologyinfo */
-#define INVALID_TOPOLOGY_ID (~0U)
-struct xen_sysctl_topologyinfo {
+/* XEN_SYSCTL_cputopoinfo */
+#define XEN_INVALID_CORE_ID (~0U)
+#define XEN_INVALID_SOCKET_ID (~0U)
+#define XEN_INVALID_NODE_ID ((uint8_t)~0)
+
+struct xen_sysctl_cputopo {
+ uint32_t core;
+ uint32_t socket;
+ uint8_t node;
+};
+typedef struct xen_sysctl_cputopo xen_sysctl_cputopo_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cputopo_t);
+
+struct xen_sysctl_cputopoinfo {
/*
- * IN: maximum addressable entry in the caller-provided arrays.
- * OUT: largest cpu identifier in the system.
- * If OUT is greater than IN then the arrays are truncated!
- * If OUT is leass than IN then the array tails are not written by sysctl.
+ * IN: size of caller-provided cputopo array.
+ * OUT: Number of CPUs in the system.
*/
- uint32_t max_cpu_index;
+ uint32_t num_cpus;
/*
- * If not NULL, these arrays are filled with core/socket/node identifier
- * for each cpu.
- * If a cpu has no core/socket/node information (e.g., cpu not present)
- * then the sentinel value ~0u is written to each array.
- * The number of array elements written by the sysctl is:
- * min(@max_cpu_index_IN,@max_cpu_index_OUT)+1
+ * If not NULL, filled with core/socket/node identifier for each cpu
+ * If information for a particular entry is not avalable it is set to
+ * XEN_INVALID_<xxx>_ID.
+ * The number of array elements for CPU topology written by the sysctl is:
+ * min(@num_cpus_IN,@num_cpus_OUT)+1.
*/
- XEN_GUEST_HANDLE_64(uint32) cpu_to_core;
- XEN_GUEST_HANDLE_64(uint32) cpu_to_socket;
- XEN_GUEST_HANDLE_64(uint32) cpu_to_node;
+ XEN_GUEST_HANDLE_64(xen_sysctl_cputopo_t) cputopo;
};
-typedef struct xen_sysctl_topologyinfo xen_sysctl_topologyinfo_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_topologyinfo_t);
+typedef struct xen_sysctl_cputopoinfo xen_sysctl_cputopoinfo_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cputopoinfo_t);
/* XEN_SYSCTL_numainfo */
#define INVALID_NUMAINFO_ID (~0U)
@@ -672,7 +678,7 @@ struct xen_sysctl {
#define XEN_SYSCTL_pm_op 12
#define XEN_SYSCTL_page_offline_op 14
#define XEN_SYSCTL_lockprof_op 15
-#define XEN_SYSCTL_topologyinfo 16
+#define XEN_SYSCTL_cputopoinfo 16
#define XEN_SYSCTL_numainfo 17
#define XEN_SYSCTL_cpupool_op 18
#define XEN_SYSCTL_scheduler_op 19
@@ -683,7 +689,7 @@ struct xen_sysctl {
struct xen_sysctl_readconsole readconsole;
struct xen_sysctl_tbuf_op tbuf_op;
struct xen_sysctl_physinfo physinfo;
- struct xen_sysctl_topologyinfo topologyinfo;
+ struct xen_sysctl_cputopoinfo cputopoinfo;
struct xen_sysctl_numainfo numainfo;
struct xen_sysctl_sched_id sched_id;
struct xen_sysctl_perfc_op perfc_op;
--
1.7.1
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |