[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 6 of 9] libxl: expose cpu topology as a single list of cpu->{node, core, socket} maps
# HG changeset patch # User Ian Campbell <ian.campbell@xxxxxxxxxx> # Date 1327512175 0 # Node ID e1753f37c9064f1e84fa32dfc37ff0f286e2df1e # Parent d09c5ab835ec6685822c04a140afa1620cca1138 libxl: expose cpu topology as a single list of cpu->{node,core,socket} maps. Rather than the previous tripple list which is more complicated to work with and harder for language bindings. Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx> diff --git a/tools/libxl/gentest.py b/tools/libxl/gentest.py --- a/tools/libxl/gentest.py +++ b/tools/libxl/gentest.py @@ -195,6 +195,7 @@ static void libxl_string_list_rand_init( *p = l; } +#if 0 static void libxl_cpuarray_rand_init(libxl_cpuarray *p) { int i; @@ -209,6 +210,7 @@ static void libxl_cpuarray_rand_init(lib p->array[i] = r; } } +#endif """) for ty in builtins + types: if ty.typename not in handcoded: diff --git a/tools/libxl/libxl.c b/tools/libxl/libxl.c --- a/tools/libxl/libxl.c +++ b/tools/libxl/libxl.c @@ -2502,57 +2502,68 @@ int libxl_get_physinfo(libxl_ctx *ctx, l return 0; } -int libxl_get_topologyinfo(libxl_ctx *ctx, libxl_topologyinfo *info) +libxl_cputopology *libxl_get_cpu_topology(libxl_ctx *ctx, int *nr) { xc_topologyinfo_t tinfo; DECLARE_HYPERCALL_BUFFER(xc_cpu_to_core_t, coremap); DECLARE_HYPERCALL_BUFFER(xc_cpu_to_socket_t, socketmap); DECLARE_HYPERCALL_BUFFER(xc_cpu_to_node_t, nodemap); + libxl_cputopology *ret = NULL; int i; - int rc = 0; - - rc += libxl_cpuarray_alloc(ctx, &info->coremap); - rc += libxl_cpuarray_alloc(ctx, &info->socketmap); - rc += libxl_cpuarray_alloc(ctx, &info->nodemap); - if (rc) + int max_cpus; + + max_cpus = libxl_get_max_cpus(ctx); + if (max_cpus == 0) + { + LIBXL__LOG(ctx, XTL_ERROR, "Unable to determine number of CPUS"); + return NULL; + } + + coremap = xc_hypercall_buffer_alloc + (ctx->xch, coremap, sizeof(*coremap) * max_cpus); + socketmap = xc_hypercall_buffer_alloc + (ctx->xch, socketmap, sizeof(*socketmap) * max_cpus); + nodemap = xc_hypercall_buffer_alloc + (ctx->xch, nodemap, sizeof(*nodemap) * max_cpus); + if ((coremap == NULL) || (socketmap == NULL) || (nodemap == NULL)) { + LIBXL__LOG_ERRNOVAL(ctx, XTL_ERROR, ENOMEM, + "Unable to allocate hypercall arguments"); goto fail; - - coremap = xc_hypercall_buffer_alloc(ctx->xch, coremap, sizeof(*coremap) * info->coremap.entries); - socketmap = xc_hypercall_buffer_alloc(ctx->xch, socketmap, sizeof(*socketmap) * info->socketmap.entries); - nodemap = xc_hypercall_buffer_alloc(ctx->xch, nodemap, sizeof(*nodemap) * info->nodemap.entries); - if ((coremap == NULL) || (socketmap == NULL) || (nodemap == NULL)) - goto fail; + } set_xen_guest_handle(tinfo.cpu_to_core, coremap); set_xen_guest_handle(tinfo.cpu_to_socket, socketmap); set_xen_guest_handle(tinfo.cpu_to_node, nodemap); - tinfo.max_cpu_index = info->coremap.entries - 1; - if (xc_topologyinfo(ctx->xch, &tinfo) != 0) + tinfo.max_cpu_index = max_cpus - 1; + if (xc_topologyinfo(ctx->xch, &tinfo) != 0) { + LIBXL__LOG_ERRNO(ctx, XTL_ERROR, "Topology info hypercall failed"); goto fail; - - for (i = 0; i <= tinfo.max_cpu_index; i++) { - if (i < info->coremap.entries) - info->coremap.array[i] = (coremap[i] == INVALID_TOPOLOGY_ID) ? - LIBXL_CPUARRAY_INVALID_ENTRY : coremap[i]; - if (i < info->socketmap.entries) - info->socketmap.array[i] = (socketmap[i] == INVALID_TOPOLOGY_ID) ? - LIBXL_CPUARRAY_INVALID_ENTRY : socketmap[i]; - if (i < info->nodemap.entries) - info->nodemap.array[i] = (nodemap[i] == INVALID_TOPOLOGY_ID) ? - LIBXL_CPUARRAY_INVALID_ENTRY : nodemap[i]; } - xc_hypercall_buffer_free(ctx->xch, coremap); - xc_hypercall_buffer_free(ctx->xch, socketmap); - xc_hypercall_buffer_free(ctx->xch, nodemap); - return 0; + ret = malloc(sizeof(libxl_cputopology) * max_cpus); + if (ret == NULL) { + LIBXL__LOG_ERRNOVAL(ctx, XTL_ERROR, ENOMEM, + "Unable to allocate return value"); + goto fail; + } + + for (i = 0; i <= max_cpus; i++) { +#define V(map, i) (map[i] == INVALID_TOPOLOGY_ID) ? \ + LIBXL_CPUTOPOLOGY_INVALID_ENTRY : map[i] + ret[i].core = V(coremap, i); + ret[i].socket = V(socketmap, i); + ret[i].node = V(nodemap, i); +#undef V + } fail: xc_hypercall_buffer_free(ctx->xch, coremap); xc_hypercall_buffer_free(ctx->xch, socketmap); xc_hypercall_buffer_free(ctx->xch, nodemap); - libxl_topologyinfo_dispose(info); - return ERROR_FAIL; + + if (ret) + *nr = max_cpus; + return ret; } const libxl_version_info* libxl_get_version_info(libxl_ctx *ctx) @@ -3336,30 +3347,30 @@ int libxl_cpupool_cpuadd(libxl_ctx *ctx, int libxl_cpupool_cpuadd_node(libxl_ctx *ctx, uint32_t poolid, int node, int *cpus) { int rc = 0; - int cpu; + int cpu, nr; libxl_cpumap freemap; - libxl_topologyinfo topology; + libxl_cputopology *topology; if (libxl_get_freecpus(ctx, &freemap)) { return ERROR_FAIL; } - if (libxl_get_topologyinfo(ctx, &topology)) { + topology = libxl_get_cpu_topology(ctx, &nr); + if (!topology) { rc = ERROR_FAIL; goto out; } *cpus = 0; - for (cpu = 0; cpu < topology.nodemap.entries; cpu++) { - if (libxl_cpumap_test(&freemap, cpu) && - (topology.nodemap.array[cpu] == node) && + for (cpu = 0; cpu < nr; cpu++) { + if (libxl_cpumap_test(&freemap, cpu) && (topology[cpu].node == node) && !libxl_cpupool_cpuadd(ctx, poolid, cpu)) { (*cpus)++; } + libxl_cputopology_dispose(&topology[cpu]); } - libxl_topologyinfo_dispose(&topology); - + free(topology); out: libxl_cpumap_dispose(&freemap); return rc; @@ -3383,8 +3394,8 @@ int libxl_cpupool_cpuremove_node(libxl_c int ret = 0; int n_pools; int p; - int cpu; - libxl_topologyinfo topology; + int cpu, nr_cpus; + libxl_cputopology *topology; libxl_cpupoolinfo *poolinfo; poolinfo = libxl_list_cpupool(ctx, &n_pools); @@ -3392,7 +3403,8 @@ int libxl_cpupool_cpuremove_node(libxl_c return ERROR_NOMEM; } - if (libxl_get_topologyinfo(ctx, &topology)) { + topology = libxl_get_cpu_topology(ctx, &nr_cpus); + if (!topology) { ret = ERROR_FAIL; goto out; } @@ -3400,8 +3412,8 @@ int libxl_cpupool_cpuremove_node(libxl_c *cpus = 0; for (p = 0; p < n_pools; p++) { if (poolinfo[p].poolid == poolid) { - for (cpu = 0; cpu < topology.nodemap.entries; cpu++) { - if ((topology.nodemap.array[cpu] == node) && + for (cpu = 0; cpu < nr_cpus; cpu++) { + if ((topology[cpu].node == node) && libxl_cpumap_test(&poolinfo[p].cpumap, cpu) && !libxl_cpupool_cpuremove(ctx, poolid, cpu)) { (*cpus)++; @@ -3410,7 +3422,9 @@ int libxl_cpupool_cpuremove_node(libxl_c } } - libxl_topologyinfo_dispose(&topology); + for (cpu = 0; cpu < nr_cpus; cpu++) + libxl_cputopology_dispose(&topology[cpu]); + free(topology); out: for (p = 0; p < n_pools; p++) { diff --git a/tools/libxl/libxl.h b/tools/libxl/libxl.h --- a/tools/libxl/libxl.h +++ b/tools/libxl/libxl.h @@ -548,7 +548,8 @@ int libxl_userdata_retrieve(libxl_ctx *c */ int libxl_get_physinfo(libxl_ctx *ctx, libxl_physinfo *physinfo); -int libxl_get_topologyinfo(libxl_ctx *ctx, libxl_topologyinfo *info); +#define LIBXL_CPUTOPOLOGY_INVALID_ENTRY ~0 +libxl_cputopology *libxl_get_cpu_topology(libxl_ctx *ctx, int *nr); libxl_vcpuinfo *libxl_list_vcpu(libxl_ctx *ctx, uint32_t domid, int *nb_vcpu, int *nrcpus); int libxl_set_vcpuaffinity(libxl_ctx *ctx, uint32_t domid, uint32_t vcpuid, diff --git a/tools/libxl/libxl_types.idl b/tools/libxl/libxl_types.idl --- a/tools/libxl/libxl_types.idl +++ b/tools/libxl/libxl_types.idl @@ -377,10 +377,10 @@ libxl_physinfo = Struct("physinfo", [ ("phys_cap", uint32), ], dispose_fn=None, dir=DIR_OUT) -libxl_topologyinfo = Struct("topologyinfo", [ - ("coremap", libxl_cpuarray), # cpu to core map - ("socketmap", libxl_cpuarray), # cpu to socket map - ("nodemap", libxl_cpuarray), # cpu to node map +libxl_cputopology = Struct("cputopology", [ + ("core", uint32), + ("socket", uint32), + ("node", uint32), ]) libxl_sched_credit = Struct("sched_credit", [ diff --git a/tools/libxl/xl_cmdimpl.c b/tools/libxl/xl_cmdimpl.c --- a/tools/libxl/xl_cmdimpl.c +++ b/tools/libxl/xl_cmdimpl.c @@ -3707,10 +3707,11 @@ static void output_physinfo(void) static void output_topologyinfo(void) { - libxl_topologyinfo info; - int i; - - if (libxl_get_topologyinfo(ctx, &info)) { + libxl_cputopology *info; + int i, nr; + + info = libxl_get_cpu_topology(ctx, &nr); + if (info == NULL) { fprintf(stderr, "libxl_get_topologyinfo failed.\n"); return; } @@ -3718,16 +3719,17 @@ static void output_topologyinfo(void) printf("cpu_topology :\n"); printf("cpu: core socket node\n"); - for (i = 0; i < info.coremap.entries; i++) { - if (info.coremap.array[i] != LIBXL_CPUARRAY_INVALID_ENTRY) - printf("%3d: %4d %4d %4d\n", i, info.coremap.array[i], - info.socketmap.array[i], info.nodemap.array[i]); - } + for (i = 0; i < nr; i++) { + if (info[i].core != LIBXL_CPUTOPOLOGY_INVALID_ENTRY) + printf("%3d: %4d %4d %4d\n", i, + info[i].core, info[i].socket, info[i].node); + libxl_cputopology_dispose(&info[i]); + } + + free(info); printf("numa_info : none\n"); - libxl_topologyinfo_dispose(&info); - return; } @@ -5160,7 +5162,7 @@ int main_cpupoolcreate(int argc, char ** libxl_cpumap freemap; libxl_cpumap cpumap; libxl_uuid uuid; - libxl_topologyinfo topology; + libxl_cputopology *topology; while (1) { opt = getopt_long(argc, argv, "hnf:", long_options, &option_index); @@ -5269,16 +5271,18 @@ int main_cpupoolcreate(int argc, char ** return -ERROR_FAIL; } if (!xlu_cfg_get_list(config, "nodes", &nodes, 0, 0)) { + int nr; n_cpus = 0; n_nodes = 0; - if (libxl_get_topologyinfo(ctx, &topology)) { + topology = libxl_get_cpu_topology(ctx, &nr); + if (topology == NULL) { fprintf(stderr, "libxl_get_topologyinfo failed\n"); return -ERROR_FAIL; } while ((buf = xlu_cfg_get_listitem(nodes, n_nodes)) != NULL) { n = atoi(buf); - for (i = 0; i < topology.nodemap.entries; i++) { - if ((topology.nodemap.array[i] == n) && + for (i = 0; i < nr; i++) { + if ((topology[i].node == n) && libxl_cpumap_test(&freemap, i)) { libxl_cpumap_set(&cpumap, i); n_cpus++; @@ -5287,7 +5291,10 @@ int main_cpupoolcreate(int argc, char ** n_nodes++; } - libxl_topologyinfo_dispose(&topology); + for (i = 0; i < nr; i++) + libxl_cputopology_dispose(&topology[i]); + + free(topology); if (n_cpus == 0) { fprintf(stderr, "no free cpu found\n"); @@ -5609,11 +5616,12 @@ int main_cpupoolnumasplit(int argc, char int schedid; int n_pools; int node; + int n_cpus; char name[16]; libxl_uuid uuid; libxl_cpumap cpumap; libxl_cpupoolinfo *poolinfo; - libxl_topologyinfo topology; + libxl_cputopology *topology; libxl_dominfo info; if ((opt = def_getopt(argc, argv, "", "cpupool-numa-split", 0)) != -1) @@ -5635,21 +5643,24 @@ int main_cpupoolnumasplit(int argc, char return -ERROR_FAIL; } - if (libxl_get_topologyinfo(ctx, &topology)) { + topology = libxl_get_cpu_topology(ctx, &n_cpus); + if (topology == NULL) { fprintf(stderr, "libxl_get_topologyinfo failed\n"); return -ERROR_FAIL; } if (libxl_cpumap_alloc(ctx, &cpumap)) { fprintf(stderr, "Failed to allocate cpumap\n"); - libxl_topologyinfo_dispose(&topology); + for (c=0; c<n_cpus; c++) + libxl_cputopology_dispose(&topology[c]); + free(topology); return -ERROR_FAIL; } /* Reset Pool-0 to 1st node: first add cpus, then remove cpus to avoid a cpupool without cpus in between */ - node = topology.nodemap.array[0]; + node = topology[0].node; if (libxl_cpupool_cpuadd_node(ctx, 0, node, &n)) { fprintf(stderr, "error on adding cpu to Pool 0\n"); return -ERROR_FAIL; @@ -5663,9 +5674,9 @@ int main_cpupoolnumasplit(int argc, char } n = 0; - for (c = 0; c < topology.nodemap.entries; c++) { - if (topology.nodemap.array[c] == node) { - topology.nodemap.array[c] = LIBXL_CPUARRAY_INVALID_ENTRY; + for (c = 0; c < n_cpus; c++) { + if (topology[c].node == node) { + topology[c].node = LIBXL_CPUTOPOLOGY_INVALID_ENTRY; libxl_cpumap_set(&cpumap, n); n++; } @@ -5690,12 +5701,12 @@ int main_cpupoolnumasplit(int argc, char } memset(cpumap.map, 0, cpumap.size); - for (c = 0; c < topology.nodemap.entries; c++) { - if (topology.nodemap.array[c] == LIBXL_CPUARRAY_INVALID_ENTRY) { + for (c = 0; c < n_cpus; c++) { + if (topology[c].node == LIBXL_CPUTOPOLOGY_INVALID_ENTRY) { continue; } - node = topology.nodemap.array[c]; + node = topology[c].node; ret = -libxl_cpupool_cpuremove_node(ctx, 0, node, &n); if (ret) { fprintf(stderr, "error on removing cpu from Pool 0\n"); @@ -5717,15 +5728,17 @@ int main_cpupoolnumasplit(int argc, char goto out; } - for (p = c; p < topology.nodemap.entries; p++) { - if (topology.nodemap.array[p] == node) { - topology.nodemap.array[p] = LIBXL_CPUARRAY_INVALID_ENTRY; + for (p = c; p < n_cpus; p++) { + if (topology[p].node == node) { + topology[p].node = LIBXL_CPUTOPOLOGY_INVALID_ENTRY; } } } out: - libxl_topologyinfo_dispose(&topology); + for (c=0; c<n_cpus; c++) + libxl_cputopology_dispose(&topology[c]); + free(topology); libxl_cpumap_dispose(&cpumap); return ret; diff --git a/tools/ocaml/libs/xl/genwrap.py b/tools/ocaml/libs/xl/genwrap.py --- a/tools/ocaml/libs/xl/genwrap.py +++ b/tools/ocaml/libs/xl/genwrap.py @@ -29,6 +29,8 @@ functions = { # ( name , [type1,type2,.. "device_pci": DEVICE_FUNCTIONS, "physinfo": [ ("get", ["unit", "t"]), ], + "cputopology": [ ("get", ["unit", "t array"]), + ], "sched_credit": [ ("domain_get", ["domid", "t"]), ("domain_set", ["domid", "t", "unit"]), ], @@ -259,7 +261,6 @@ if __name__ == '__main__': "domain_create_info", "domain_build_info", "vcpuinfo", - "topologyinfo", ] for t in blacklist: diff --git a/tools/ocaml/libs/xl/xenlight.ml.in b/tools/ocaml/libs/xl/xenlight.ml.in --- a/tools/ocaml/libs/xl/xenlight.ml.in +++ b/tools/ocaml/libs/xl/xenlight.ml.in @@ -19,17 +19,6 @@ type domid = int (* @@LIBXL_TYPES@@ *) -module Topologyinfo = struct - type t = - { - core : int; - socket : int; - node : int; - } - external get : unit -> t array = "stub_xl_topologyinfo" -end - - external send_trigger : domid -> trigger -> int -> unit = "stub_xl_send_trigger" external send_sysrq : domid -> char -> unit = "stub_xl_send_sysrq" external send_debug_keys : domid -> string -> unit = "stub_xl_send_debug_keys" diff --git a/tools/ocaml/libs/xl/xenlight.mli.in b/tools/ocaml/libs/xl/xenlight.mli.in --- a/tools/ocaml/libs/xl/xenlight.mli.in +++ b/tools/ocaml/libs/xl/xenlight.mli.in @@ -19,16 +19,6 @@ type domid = int (* @@LIBXL_TYPES@@ *) -module Topologyinfo : sig - type t = - { - core : int; - socket : int; - node : int; - } - external get : unit -> t array = "stub_xl_topologyinfo" -end - external send_trigger : domid -> trigger -> int -> unit = "stub_xl_send_trigger" external send_sysrq : domid -> char -> unit = "stub_xl_send_sysrq" external send_debug_keys : domid -> string -> unit = "stub_xl_send_debug_keys" diff --git a/tools/ocaml/libs/xl/xenlight_stubs.c b/tools/ocaml/libs/xl/xenlight_stubs.c --- a/tools/ocaml/libs/xl/xenlight_stubs.c +++ b/tools/ocaml/libs/xl/xenlight_stubs.c @@ -210,28 +210,6 @@ static value Val_hwcap(libxl_hwcap *c_va #include "_libxl_types.inc" -static value Val_topologyinfo(libxl_topologyinfo *c_val) -{ - CAMLparam0(); - CAMLlocal3(v, topology, topologyinfo); - int i; - - topologyinfo = caml_alloc_tuple(c_val->coremap.entries); - for (i = 0; i < c_val->coremap.entries; i++) { - v = Val_none; - if (c_val->coremap.array[i] != LIBXL_CPUARRAY_INVALID_ENTRY) { - topology = caml_alloc_tuple(3); - Store_field(topology, 0, Val_int(c_val->coremap.array[i])); - Store_field(topology, 1, Val_int(c_val->socketmap.array[i])); - Store_field(topology, 2, Val_int(c_val->nodemap.array[i])); - v = Val_some(topology); - } - Store_field(topologyinfo, i, v); - } - - CAMLreturn(topologyinfo); -} - value stub_xl_device_disk_add(value info, value domid) { CAMLparam2(info, domid); @@ -462,22 +440,34 @@ value stub_xl_physinfo_get(value unit) CAMLreturn(physinfo); } -value stub_xl_topologyinfo(value unit) +value stub_xl_cputopology_get(value unit) { CAMLparam1(unit); - CAMLlocal1(topologyinfo); - libxl_topologyinfo c_topologyinfo; - int ret; + CAMLlocal2(topology, v); + libxl_cputopology *c_topology; + int i, nr, ret; INIT_STRUCT(); INIT_CTX(); - ret = libxl_get_topologyinfo(ctx, &c_topologyinfo); + + c_topology = libxl_get_cpu_topology(ctx, &nr); if (ret != 0) failwith_xl("topologyinfo", &lg); + + topology = caml_alloc_tuple(nr); + for (i = 0; i < nr; i++) { + if (c_topology[i].core != LIBXL_CPUTOPOLOGY_INVALID_ENTRY) + v = Val_some(Val_cputopology(&gc, &lg, &c_topology[i])); + else + v = Val_none; + Store_field(topology, i, v); + libxl_cputopology_dispose(&c_topology[i]); + } + + free(c_topology); + FREE_CTX(); - - topologyinfo = Val_topologyinfo(&c_topologyinfo); - CAMLreturn(topologyinfo); + CAMLreturn(topology); } value stub_xl_sched_credit_domain_get(value domid) _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |