|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH RFC] x86/sysctl: Implement XEN_SYSCTL_get_cpuid_policy
Provide a SYSCTL for the toolstack to obtain complete system cpuid policy
information. The CPUID information is serialised as an array of 6x 32bit
integers, and a mechanism is provided to query the maximum number of entries
Xen might write.
For the XSM side of things, this subop is closely related to
{phys,cputopo,numa}info, so shares the physinfo access vector.
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Ian Jackson <Ian.Jackson@xxxxxxxxxxxxx>
CC: Wei Liu <wei.liu2@xxxxxxxxxx>
CC: Daniel De Graaf <dgdegra@xxxxxxxxxxxxx>
Partly RFC, to get a feel for the seralised format. With a suitably extended
./xen-cpuid, the raw and host policies for one of my testboxes are as follows:
[root@fusebot ~]# LD_LIBRARY_PATH=/root: ./xen-cpuid -p
Raw policy:
00000000:ffffffff -> 0000000d:756e6547:6c65746e:49656e69
00000001:ffffffff -> 000306c3:00100800:7ffafbff:bfebfbff
00000002:ffffffff -> 76036301:00f0b5ff:00000000:00c10000
00000004:00000000 -> 1c004121:01c0003f:0000003f:00000000
00000004:00000001 -> 1c004122:01c0003f:0000003f:00000000
00000004:00000002 -> 1c004143:01c0003f:000001ff:00000000
00000004:00000003 -> 1c03c163:03c0003f:00001fff:00000006
00000005:ffffffff -> 00000040:00000040:00000003:00042120
00000006:ffffffff -> 00000077:00000002:00000009:00000000
00000007:00000000 -> 00000000:000027ab:00000000:00000000
0000000a:ffffffff -> 07300403:00000000:00000000:00000603
0000000b:ffffffff -> 00000001:00000002:00000100:00000000
0000000d:00000000 -> 00000007:00000340:00000340:00000000
0000000d:00000001 -> 00000001:00000000:00000000:00000000
0000000d:00000002 -> 00000100:00000240:00000000:00000000
80000000:ffffffff -> 80000008:00000000:00000000:00000000
80000001:ffffffff -> 00000000:00000000:00000021:2c100800
80000002:ffffffff -> 65746e49:2952286c:6f655820:2952286e
80000003:ffffffff -> 55504320:2d334520:30343231:20337620
80000004:ffffffff -> 2e332040:48473034:0000007a:00000000
80000006:ffffffff -> 00000000:00000000:01006040:00000000
80000007:ffffffff -> 00000000:00000000:00000000:00000100
80000008:ffffffff -> 00003027:00000000:00000000:00000000
Host policy:
00000000:ffffffff -> 0000000d:756e6547:6c65746e:49656e69
00000001:ffffffff -> 000306c3:00100800:77faf3ff:bfebfbff
00000002:ffffffff -> 76036301:00f0b5ff:00000000:00c10000
00000004:00000000 -> 1c004121:01c0003f:0000003f:00000000
00000004:00000001 -> 1c004122:01c0003f:0000003f:00000000
00000004:00000002 -> 1c004143:01c0003f:000001ff:00000000
00000004:00000003 -> 1c03c163:03c0003f:00001fff:00000006
00000007:00000000 -> 00000000:000027ab:00000000:00000000
0000000a:ffffffff -> 07300403:00000000:00000000:00000603
0000000d:00000000 -> 00000007:00000000:00000340:00000000
0000000d:00000001 -> 00000001:00000000:00000000:00000000
0000000d:00000002 -> 00000100:00000240:00000000:00000000
80000000:ffffffff -> 80000008:00000000:00000000:00000000
80000001:ffffffff -> 00000000:00000000:00000021:2c100800
80000002:ffffffff -> 65746e49:2952286c:6f655820:2952286e
80000003:ffffffff -> 55504320:2d334520:30343231:20337620
80000004:ffffffff -> 2e332040:48473034:0000007a:00000000
80000006:ffffffff -> 00000000:00000000:01006040:00000000
80000007:ffffffff -> 00000000:00000000:00000000:00000100
80000008:ffffffff -> 00003027:00000000:00000000:00000000
---
tools/libxc/include/xenctrl.h | 2 +
tools/libxc/xc_cpuid_x86.c | 27 ++++++++++++
xen/arch/x86/cpuid.c | 88 +++++++++++++++++++++++++++++++++++++
xen/arch/x86/sysctl.c | 36 +++++++++++++++
xen/include/asm-x86/cpuid.h | 17 +++++++
xen/include/public/domctl.h | 8 ++++
xen/include/public/sysctl.h | 26 +++++++++++
xen/xsm/flask/hooks.c | 1 +
xen/xsm/flask/policy/access_vectors | 2 +-
9 files changed, 206 insertions(+), 1 deletion(-)
diff --git a/tools/libxc/include/xenctrl.h b/tools/libxc/include/xenctrl.h
index bde8313..809699f 100644
--- a/tools/libxc/include/xenctrl.h
+++ b/tools/libxc/include/xenctrl.h
@@ -2485,6 +2485,8 @@ int xc_psr_cat_get_l3_info(xc_interface *xch, uint32_t
socket,
int xc_get_cpu_levelling_caps(xc_interface *xch, uint32_t *caps);
int xc_get_cpu_featureset(xc_interface *xch, uint32_t index,
uint32_t *nr_features, uint32_t *featureset);
+int xc_get_system_cpuid_policy(xc_interface *xch, uint32_t index,
+ uint32_t *nr_leaves, xen_cpuid_leaf_t *leaves);
uint32_t xc_get_cpu_featureset_size(void);
diff --git a/tools/libxc/xc_cpuid_x86.c b/tools/libxc/xc_cpuid_x86.c
index d890935..4a80a85 100644
--- a/tools/libxc/xc_cpuid_x86.c
+++ b/tools/libxc/xc_cpuid_x86.c
@@ -83,6 +83,33 @@ int xc_get_cpu_featureset(xc_interface *xch, uint32_t index,
return ret;
}
+int xc_get_system_cpuid_policy(xc_interface *xch, uint32_t index,
+ uint32_t *nr_leaves, xen_cpuid_leaf_t *leaves)
+{
+ DECLARE_SYSCTL;
+ DECLARE_HYPERCALL_BOUNCE(leaves,
+ *nr_leaves * sizeof(*leaves),
+ XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+ int ret;
+
+ if ( xc_hypercall_bounce_pre(xch, leaves) )
+ return -1;
+
+ sysctl.cmd = XEN_SYSCTL_get_cpuid_policy;
+ sysctl.u.cpuid_policy.index = index;
+ sysctl.u.cpuid_policy.nr_leaves = *nr_leaves;
+ set_xen_guest_handle(sysctl.u.cpuid_policy.policy, leaves);
+
+ ret = do_sysctl(xch, &sysctl);
+
+ xc_hypercall_bounce_post(xch, leaves);
+
+ if ( !ret )
+ *nr_leaves = sysctl.u.cpuid_policy.nr_leaves;
+
+ return ret;
+}
+
uint32_t xc_get_cpu_featureset_size(void)
{
return FEATURESET_NR_ENTRIES;
diff --git a/xen/arch/x86/cpuid.c b/xen/arch/x86/cpuid.c
index 90f125e..000bf24 100644
--- a/xen/arch/x86/cpuid.c
+++ b/xen/arch/x86/cpuid.c
@@ -2,6 +2,7 @@
#include <xen/lib.h>
#include <xen/sched.h>
#include <asm/cpuid.h>
+#include <asm/guest_access.h>
#include <asm/hvm/hvm.h>
#include <asm/hvm/nestedhvm.h>
#include <asm/hvm/svm/svm.h>
@@ -599,6 +600,93 @@ int init_domain_cpuid_policy(struct domain *d)
return 0;
}
+/*
+ * Copy a single cpuid_leaf into a guest-provided xen_cpuid_leaf_t buffer,
+ * performing boundary checking against the guests array size.
+ */
+static int copy_leaf_to_guest(uint32_t leaf, uint32_t subleaf,
+ const struct cpuid_leaf *data,
+ XEN_GUEST_HANDLE_64(xen_cpuid_leaf_t) leaves,
+ uint32_t *curr_leaf, uint32_t nr_leaves)
+{
+ const xen_cpuid_leaf_t val =
+ { leaf, subleaf, data->a, data->b, data->c, data->d };
+
+ if ( copy_to_guest_offset(leaves, *curr_leaf, &val, 1) )
+ return -EFAULT;
+
+ if ( ++(*curr_leaf) == nr_leaves )
+ return -ENOBUFS;
+
+ return 0;
+}
+
+/*
+ * Serialise a cpuid_policy object into a guest-provided array. Writes at
+ * most CPUID_MAX_SERIALISED_LEAVES, but elides leaves which are entirely
+ * empty. Returns -ENOBUFS if the guest array is too short. On success,
+ * nr_leaves_p is updated with the actual number of leaves written.
+ */
+int copy_cpuid_policy_to_guest(const struct cpuid_policy *p,
+ XEN_GUEST_HANDLE_64(xen_cpuid_leaf_t) leaves,
+ uint32_t *nr_leaves_p)
+{
+ uint32_t nr_leaves = *nr_leaves_p, curr_leaf = 0, leaf, subleaf;
+
+ if ( nr_leaves == 0 )
+ return -ENOBUFS;
+
+#define COPY_LEAF(l, s, data) \
+ ({ int ret; /* Elide leaves which are fully empty. */ \
+ if ( (*(uint64_t *)(&(data)->a) | \
+ *(uint64_t *)(&(data)->c)) && \
+ (ret = copy_leaf_to_guest( \
+ l, s, data, leaves, &curr_leaf, nr_leaves)) ) \
+ return ret; \
+ })
+
+ /* Basic leaves. */
+ for ( leaf = 0; leaf <= min(p->basic.max_leaf + 0ul,
+ ARRAY_SIZE(p->basic.raw) - 1); ++leaf )
+ {
+ switch ( leaf )
+ {
+ case 4:
+ for ( subleaf = 0; subleaf < ARRAY_SIZE(p->cache.raw); ++subleaf )
+ COPY_LEAF(leaf, subleaf, &p->cache.raw[subleaf]);
+ break;
+
+ case 7:
+ for ( subleaf = 0;
+ subleaf <= min(p->feat.max_subleaf + 0ul,
+ ARRAY_SIZE(p->feat.raw) - 1); ++subleaf )
+ COPY_LEAF(leaf, subleaf, &p->feat.raw[subleaf]);
+ break;
+
+ case XSTATE_CPUID:
+ for ( subleaf = 0;
+ subleaf <= min(63ul,
+ ARRAY_SIZE(p->xstate.raw) - 1); ++subleaf )
+ COPY_LEAF(leaf, subleaf, &p->xstate.raw[subleaf]);
+ break;
+
+ default:
+ COPY_LEAF(leaf, XEN_CPUID_NO_SUBLEAF, &p->basic.raw[leaf]);
+ break;
+ }
+ }
+
+ /* Extended leaves. */
+ for ( leaf = 0; leaf <= min(p->extd.max_leaf & 0xfffful,
+ ARRAY_SIZE(p->extd.raw) - 1); ++leaf )
+ COPY_LEAF(leaf | 0x80000000, XEN_CPUID_NO_SUBLEAF, &p->extd.raw[leaf]);
+
+#undef COPY_LEAF
+
+ *nr_leaves_p = curr_leaf;
+ return 0;
+}
+
void guest_cpuid(const struct vcpu *v, uint32_t leaf,
uint32_t subleaf, struct cpuid_leaf *res)
{
diff --git a/xen/arch/x86/sysctl.c b/xen/arch/x86/sysctl.c
index 7c294be..cf50698 100644
--- a/xen/arch/x86/sysctl.c
+++ b/xen/arch/x86/sysctl.c
@@ -250,6 +250,42 @@ long arch_do_sysctl(
break;
}
+ case XEN_SYSCTL_get_cpuid_policy:
+ {
+ static const struct cpuid_policy *const policy_table[] = {
+ [XEN_SYSCTL_cpuid_policy_raw] = &raw_cpuid_policy,
+ [XEN_SYSCTL_cpuid_policy_host] = &host_cpuid_policy,
+ };
+ const struct cpuid_policy *p = NULL;
+
+ /* Request for maximum number of leaves? */
+ if ( guest_handle_is_null(sysctl->u.cpuid_policy.policy) )
+ {
+ sysctl->u.cpuid_policy.nr_leaves = CPUID_MAX_SERIALISED_LEAVES;
+ if ( __copy_field_to_guest(u_sysctl, sysctl,
+ u.cpuid_policy.nr_leaves) )
+ ret = -EFAULT;
+ break;
+ }
+
+ /* Look up requested policy. */
+ if ( sysctl->u.cpuid_policy.index < ARRAY_SIZE(policy_table) )
+ p = policy_table[sysctl->u.cpuid_policy.index];
+
+ /* Bad policy index? */
+ if ( !p )
+ ret = -EINVAL;
+ else
+ ret = copy_cpuid_policy_to_guest(p, sysctl->u.cpuid_policy.policy,
+
&sysctl->u.cpuid_policy.nr_leaves);
+
+ /* Inform the caller of how many leaves we wrote. */
+ if ( !ret )
+ ret = __copy_field_to_guest(u_sysctl, sysctl,
+ u.cpuid_policy.nr_leaves);
+ break;
+ }
+
default:
ret = -ENOSYS;
break;
diff --git a/xen/include/asm-x86/cpuid.h b/xen/include/asm-x86/cpuid.h
index d2dd841..2fb06c1 100644
--- a/xen/include/asm-x86/cpuid.h
+++ b/xen/include/asm-x86/cpuid.h
@@ -70,6 +70,18 @@ DECLARE_PER_CPU(bool, cpuid_faulting_enabled);
#define CPUID_GUEST_NR_EXTD MAX(CPUID_GUEST_NR_EXTD_INTEL, \
CPUID_GUEST_NR_EXTD_AMD)
+/*
+ * Maximum number of leaves a struct cpuid_policy turns into when serialised
+ * for interaction with the toolstack. (Sum of all leaves in each union, less
+ * the entries in basic which sub-unions hang off of.)
+ */
+#define CPUID_MAX_SERIALISED_LEAVES \
+ (CPUID_GUEST_NR_BASIC + \
+ CPUID_GUEST_NR_FEAT - !!CPUID_GUEST_NR_FEAT + \
+ CPUID_GUEST_NR_CACHE - !!CPUID_GUEST_NR_CACHE + \
+ CPUID_GUEST_NR_XSTATE - !!CPUID_GUEST_NR_XSTATE + \
+ CPUID_GUEST_NR_EXTD)
+
struct cpuid_policy
{
#define DECL_BITFIELD(word) _DECL_BITFIELD(FEATURESET_ ## word)
@@ -265,6 +277,11 @@ void recalculate_cpuid_policy(struct domain *d);
void guest_cpuid(const struct vcpu *v, uint32_t leaf,
uint32_t subleaf, struct cpuid_leaf *res);
+/* Serialise a cpuid policy and copy it to guest context. */
+int copy_cpuid_policy_to_guest(const struct cpuid_policy *policy,
+ XEN_GUEST_HANDLE_64(xen_cpuid_leaf_t) leaves,
+ uint32_t *nr_leaves);
+
#endif /* __ASSEMBLY__ */
#endif /* !__X86_CPUID_H__ */
diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
index ff39762..75a9d1d 100644
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -692,6 +692,14 @@ struct xen_domctl_cpuid {
};
typedef struct xen_domctl_cpuid xen_domctl_cpuid_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_cpuid_t);
+
+#define XEN_CPUID_NO_SUBLEAF 0xffffffffu
+struct xen_cpuid_leaf {
+ uint32_t leaf, subleaf;
+ uint32_t a, b, c, d;
+};
+typedef struct xen_cpuid_leaf xen_cpuid_leaf_t;
+DEFINE_XEN_GUEST_HANDLE(xen_cpuid_leaf_t);
#endif
/*
diff --git a/xen/include/public/sysctl.h b/xen/include/public/sysctl.h
index ee76a66..641e397 100644
--- a/xen/include/public/sysctl.h
+++ b/xen/include/public/sysctl.h
@@ -1095,6 +1095,28 @@ struct xen_sysctl_livepatch_op {
typedef struct xen_sysctl_livepatch_op xen_sysctl_livepatch_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_livepatch_op_t);
+#if defined(__i386__) || defined(__x86_64__)
+/*
+ * XEN_SYSCTL_get_cpuid_policy (x86 specific)
+ *
+ * Return information about CPUID policies available on this host.
+ * - Raw: The real cpuid values.
+ */
+struct xen_sysctl_cpuid_policy {
+#define XEN_SYSCTL_cpuid_policy_raw 0
+#define XEN_SYSCTL_cpuid_policy_host 1
+ uint32_t index; /* IN: Which policy to query? */
+ uint32_t nr_leaves; /* IN/OUT: Number of leaves in/written to
+ * 'policy', or the maximum number of leaves if
+ * the guest handle is NULL. NB. All policies
+ * come from the same space, so have the same
+ * maximum length. */
+ XEN_GUEST_HANDLE_64(xen_cpuid_leaf_t) policy; /* OUT: */
+};
+typedef struct xen_sysctl_cpuid_policy xen_sysctl_cpuid_policy_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpuid_policy_t);
+#endif
+
struct xen_sysctl {
uint32_t cmd;
#define XEN_SYSCTL_readconsole 1
@@ -1123,6 +1145,7 @@ struct xen_sysctl {
#define XEN_SYSCTL_get_cpu_levelling_caps 25
#define XEN_SYSCTL_get_cpu_featureset 26
#define XEN_SYSCTL_livepatch_op 27
+#define XEN_SYSCTL_get_cpuid_policy 28
uint32_t interface_version; /* XEN_SYSCTL_INTERFACE_VERSION */
union {
struct xen_sysctl_readconsole readconsole;
@@ -1151,6 +1174,9 @@ struct xen_sysctl {
struct xen_sysctl_cpu_levelling_caps cpu_levelling_caps;
struct xen_sysctl_cpu_featureset cpu_featureset;
struct xen_sysctl_livepatch_op livepatch;
+#if defined(__i386__) || defined(__x86_64__)
+ struct xen_sysctl_cpuid_policy cpuid_policy;
+#endif
uint8_t pad[128];
} u;
};
diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c
index fd84ac0..07399df 100644
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -801,6 +801,7 @@ static int flask_sysctl(int cmd)
case XEN_SYSCTL_cputopoinfo:
case XEN_SYSCTL_numainfo:
case XEN_SYSCTL_pcitopoinfo:
+ case XEN_SYSCTL_get_cpuid_policy:
return domain_has_xen(current->domain, XEN__PHYSINFO);
case XEN_SYSCTL_psr_cmt_op:
diff --git a/xen/xsm/flask/policy/access_vectors
b/xen/xsm/flask/policy/access_vectors
index 1f7eb35..1683cf0 100644
--- a/xen/xsm/flask/policy/access_vectors
+++ b/xen/xsm/flask/policy/access_vectors
@@ -28,7 +28,7 @@ class xen
# XENPF_microcode_update
microcode
# XEN_SYSCTL_physinfo, XEN_SYSCTL_cputopoinfo, XEN_SYSCTL_numainfo
-# XEN_SYSCTL_pcitopoinfo
+# XEN_SYSCTL_pcitopoinfo XEN_SYSCTL_get_cpuid_policy
physinfo
# XENPF_platform_quirk
quirk
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |