[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH] get virtualization capabilities
Hi,Here a patch to get the virtualization capabilities in userland through xm info. Signed-off-by: Jean Guyader <jean.guyader@xxxxxxxxxxxxx> -- Jean Guyader diff -r ed67f68ae2a7 tools/python/xen/lowlevel/xc/xc.c --- a/tools/python/xen/lowlevel/xc/xc.c Thu Mar 27 09:12:09 2008 +0000 +++ b/tools/python/xen/lowlevel/xc/xc.c Tue Apr 01 14:43:34 2008 +0100 @@ -767,6 +767,8 @@ static PyObject *pyxc_physinfo(XcObject uint64_t free_heap; PyObject *ret_obj, *node_to_cpu_obj, *node_to_memory_obj; xc_cpu_to_node_t map[MAX_CPU_ID + 1]; + char virt_cap[128], *p_virt=virt_cap; + const char *virt_capsname[] = {"hvm", "iommu"}; set_xen_guest_handle(info.cpu_to_node, map); info.max_cpu_id = MAX_CPU_ID; @@ -784,7 +786,14 @@ static PyObject *pyxc_physinfo(XcObject if ( q > cpu_cap ) *(q-1) = 0; - ret_obj = Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:l,s:l,s:l,s:i,s:s}", + *p_virt = 0; + for ( i = 0; i < 2; i++ ) + if ((info.virt_cap >> i) & 1) + p_virt += sprintf(p_virt, "%s ", virt_capsname[i]); + if (p_virt != virt_cap) + *(p_virt - 1) = 0; + + ret_obj = Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:l,s:l,s:l,s:i,s:s:s:s}", "nr_nodes", info.nr_nodes, "max_cpu_id", info.max_cpu_id, "threads_per_core", info.threads_per_core, @@ -794,7 +803,8 @@ static PyObject *pyxc_physinfo(XcObject "free_memory", pages_to_kib(info.free_pages), "scrub_memory", pages_to_kib(info.scrub_pages), "cpu_khz", info.cpu_khz, - "hw_caps", cpu_cap); + "hw_caps", cpu_cap, + "virt_caps", virt_cap); max_cpu_id = info.max_cpu_id; if ( max_cpu_id > MAX_CPU_ID ) diff -r ed67f68ae2a7 tools/python/xen/xend/XendNode.py --- a/tools/python/xen/xend/XendNode.py Thu Mar 27 09:12:09 2008 +0000 +++ b/tools/python/xen/xend/XendNode.py Tue Apr 01 14:43:34 2008 +0100 @@ -92,6 +92,7 @@ class XendNode: physinfo = self.physinfo_dict() cpu_count = physinfo['nr_cpus'] cpu_features = physinfo['hw_caps'] + virt_caps = physinfo['virt_caps'] # If the number of CPUs don't match, we should just reinitialise # the CPU UUIDs. @@ -112,6 +113,7 @@ class XendNode: self.cpus[u].update( { 'host' : self.uuid, 'features' : cpu_features, + 'virt_caps': virt_caps, 'speed' : int(float(cpuinfo[number]['cpu MHz'])), 'vendor' : cpuinfo[number]['vendor_id'], 'modelname': cpuinfo[number]['model name'], @@ -605,6 +607,7 @@ class XendNode: 'threads_per_core', 'cpu_mhz', 'hw_caps', + 'virt_caps', 'total_memory', 'free_memory', 'node_to_cpu', diff -r ed67f68ae2a7 xen/arch/x86/sysctl.c --- a/xen/arch/x86/sysctl.c Thu Mar 27 09:12:09 2008 +0000 +++ b/xen/arch/x86/sysctl.c Tue Apr 01 14:43:34 2008 +0100 @@ -29,6 +29,8 @@ #define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0) +extern int hvm_enabled; + long arch_do_sysctl( struct xen_sysctl *sysctl, XEN_GUEST_HANDLE(xen_sysctl_t) u_sysctl) { @@ -59,6 +61,8 @@ long arch_do_sysctl( pi->cpu_khz = cpu_khz; memset(pi->hw_cap, 0, sizeof(pi->hw_cap)); memcpy(pi->hw_cap, boot_cpu_data.x86_capability, NCAPINTS*4); + pi->virt_cap = hvm_enabled << XEN_SYSCTL_PHYSINFO_HVM_ENABLED; + pi->virt_cap |= iommu_enabled << XEN_SYSCTL_PHYSINFO_IOMMU_ENABLED; max_array_ent = pi->max_cpu_id; pi->max_cpu_id = last_cpu(cpu_online_map); diff -r ed67f68ae2a7 xen/include/public/sysctl.h --- a/xen/include/public/sysctl.h Thu Mar 27 09:12:09 2008 +0000 +++ b/xen/include/public/sysctl.h Tue Apr 01 14:43:34 2008 +0100 @@ -35,6 +35,9 @@ #include "domctl.h" #define XEN_SYSCTL_INTERFACE_VERSION 0x00000006 + +#define XEN_SYSCTL_PHYSINFO_HVM_ENABLED 0 +#define XEN_SYSCTL_PHYSINFO_IOMMU_ENABLED 1 /* * Read console content from Xen buffer ring. @@ -95,6 +98,12 @@ struct xen_sysctl_physinfo { uint64_aligned_t free_pages; uint64_aligned_t scrub_pages; uint32_t hw_cap[8]; + /* + ** Flags to get the different vt processor features + ** virt_cap:0 -> hvm enable + ** virt_cap:1 -> iommu enable + */ + uint32_t virt_cap; /* IN/OUT variables. */ /* _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |