[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[xen master] x86/cpu-policy: move CPU policy library code



commit 7326c1725432166cbb7a284df78c373b95be82aa
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Wed Feb 25 12:31:08 2026 +0100
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Wed Feb 25 12:31:08 2026 +0100

    x86/cpu-policy: move CPU policy library code
    
    ... to a dedicated subdir of x86's private lib/ sub-tree.
    
    For the CPU policy harnesses and libxenguest use $(lib-y) as set by the
    new Makefile.common, whereas for the emulator harnesses stick to building
    just cpuid.o for the time being.
    
    Requested-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
 tools/fuzz/cpu-policy/Makefile               |   7 +-
 tools/fuzz/x86_instruction_emulator/Makefile |   2 +-
 tools/libs/guest/Makefile.common             |   7 +-
 tools/tests/cpu-policy/Makefile              |   7 +-
 tools/tests/x86_emulator/Makefile            |   2 +-
 xen/arch/x86/arch.mk                         |   1 +
 xen/arch/x86/lib/Makefile                    |   2 +
 xen/arch/x86/lib/cpu-policy/Makefile         |   1 +
 xen/arch/x86/lib/cpu-policy/Makefile.common  |   3 +
 xen/arch/x86/lib/cpu-policy/cpuid.c          | 559 +++++++++++++++++++++++++++
 xen/arch/x86/lib/cpu-policy/msr.c            | 130 +++++++
 xen/arch/x86/lib/cpu-policy/policy.c         |  54 +++
 xen/arch/x86/lib/cpu-policy/private.h        |  78 ++++
 xen/lib/Makefile                             |   2 -
 xen/lib/x86/Makefile                         |   3 -
 xen/lib/x86/cpuid.c                          | 559 ---------------------------
 xen/lib/x86/msr.c                            | 130 -------
 xen/lib/x86/policy.c                         |  54 ---
 xen/lib/x86/private.h                        |  78 ----
 19 files changed, 845 insertions(+), 834 deletions(-)

diff --git a/tools/fuzz/cpu-policy/Makefile b/tools/fuzz/cpu-policy/Makefile
index 6e7743e0aa..76ecf20e8d 100644
--- a/tools/fuzz/cpu-policy/Makefile
+++ b/tools/fuzz/cpu-policy/Makefile
@@ -20,9 +20,12 @@ install: all
 CFLAGS += $(CFLAGS_xeninclude) -D__XEN_TOOLS__
 CFLAGS += $(APPEND_CFLAGS) -Og
 
-vpath %.c ../../../xen/lib/x86
+vpath %.c $(XEN_ROOT)/xen/arch/x86/lib/cpu-policy
 
-afl-policy-fuzzer: afl-policy-fuzzer.o msr.o cpuid.o
+lib-y :=
+include $(XEN_ROOT)/xen/arch/x86/lib/cpu-policy/Makefile.common
+
+afl-policy-fuzzer: afl-policy-fuzzer.o $(lib-y)
        $(CC) $(CFLAGS) $^ -o $@
 
 -include $(DEPS_INCLUDE)
diff --git a/tools/fuzz/x86_instruction_emulator/Makefile 
b/tools/fuzz/x86_instruction_emulator/Makefile
index d104b15f4f..4e4877a203 100644
--- a/tools/fuzz/x86_instruction_emulator/Makefile
+++ b/tools/fuzz/x86_instruction_emulator/Makefile
@@ -21,7 +21,7 @@ vpath %.h ../../tests/x86_emulator
 CFLAGS += -iquote ../../tests/x86_emulator
 
 # Add libx86 to the build
-vpath %.c $(XEN_ROOT)/xen/lib/x86
+vpath %.c $(XEN_ROOT)/xen/arch/x86/lib/cpu-policy
 
 x86_emulate:
        mkdir -p $@
diff --git a/tools/libs/guest/Makefile.common b/tools/libs/guest/Makefile.common
index a026a2f662..b928a4a246 100644
--- a/tools/libs/guest/Makefile.common
+++ b/tools/libs/guest/Makefile.common
@@ -33,9 +33,12 @@ LIBELF_OBJS += libelf-dominfo.o
 OBJS-y += $(LIBELF_OBJS)
 
 ifeq ($(CONFIG_X86),y) # Add libx86 to the build
-vpath %.c ../../../xen/lib/x86
+vpath %.c $(XEN_ROOT)/xen/arch/x86/lib/cpu-policy
 
-OBJS-y                 += cpuid.o msr.o policy.o
+lib-y :=
+include $(XEN_ROOT)/xen/arch/x86/lib/cpu-policy/Makefile.common
+
+OBJS-y                 += $(lib-y)
 endif
 
 # new domain builder
diff --git a/tools/tests/cpu-policy/Makefile b/tools/tests/cpu-policy/Makefile
index 24f87e2eca..d8e4d222f4 100644
--- a/tools/tests/cpu-policy/Makefile
+++ b/tools/tests/cpu-policy/Makefile
@@ -42,11 +42,14 @@ CFLAGS += $(APPEND_CFLAGS)
 
 LDFLAGS += $(APPEND_LDFLAGS)
 
-vpath %.c ../../../xen/lib/x86
+vpath %.c $(XEN_ROOT)/xen/arch/x86/lib/cpu-policy
+
+lib-y :=
+include $(XEN_ROOT)/xen/arch/x86/lib/cpu-policy/Makefile.common
 
 %.o: Makefile
 
-test-cpu-policy: test-cpu-policy.o msr.o cpuid.o policy.o
+test-cpu-policy: test-cpu-policy.o $(lib-y)
        $(CC) $^ -o $@ $(LDFLAGS)
 
 -include $(DEPS_INCLUDE)
diff --git a/tools/tests/x86_emulator/Makefile 
b/tools/tests/x86_emulator/Makefile
index 376cfe244d..e18725d0c3 100644
--- a/tools/tests/x86_emulator/Makefile
+++ b/tools/tests/x86_emulator/Makefile
@@ -17,7 +17,7 @@ vpath x86_emulate/%.h $(XEN_ROOT)/xen/arch/x86
 HOSTCFLAGS += -iquote $(XEN_ROOT)/xen/arch/x86
 
 # Add libx86 to the build
-vpath %.c $(XEN_ROOT)/xen/lib/x86
+vpath %.c $(XEN_ROOT)/xen/arch/x86/lib/cpu-policy
 
 CFLAGS += $(CFLAGS_xeninclude)
 
diff --git a/xen/arch/x86/arch.mk b/xen/arch/x86/arch.mk
index 37fe65bc13..0b42e6312f 100644
--- a/xen/arch/x86/arch.mk
+++ b/xen/arch/x86/arch.mk
@@ -4,6 +4,7 @@
 export XEN_IMG_OFFSET := 0x200000
 
 ARCH_LIBS-y += arch/x86/lib/lib.a
+ARCH_LIBS-y += arch/x86/lib/cpu-policy/lib.a
 
 CFLAGS += -DXEN_IMG_OFFSET=$(XEN_IMG_OFFSET)
 
diff --git a/xen/arch/x86/lib/Makefile b/xen/arch/x86/lib/Makefile
index b9a65c662a..fe9a271879 100644
--- a/xen/arch/x86/lib/Makefile
+++ b/xen/arch/x86/lib/Makefile
@@ -6,3 +6,5 @@ lib-y += generic-hweightl.o
 lib-y += memcpy.o
 lib-y += memset.o
 lib-y += scrub-page.o
+
+obj-y += cpu-policy/
diff --git a/xen/arch/x86/lib/cpu-policy/Makefile 
b/xen/arch/x86/lib/cpu-policy/Makefile
new file mode 100644
index 0000000000..b12cf7836d
--- /dev/null
+++ b/xen/arch/x86/lib/cpu-policy/Makefile
@@ -0,0 +1 @@
+include $(srcdir)/Makefile.common
diff --git a/xen/arch/x86/lib/cpu-policy/Makefile.common 
b/xen/arch/x86/lib/cpu-policy/Makefile.common
new file mode 100644
index 0000000000..35475af780
--- /dev/null
+++ b/xen/arch/x86/lib/cpu-policy/Makefile.common
@@ -0,0 +1,3 @@
+lib-y += cpuid.o
+lib-y += msr.o
+lib-y += policy.o
diff --git a/xen/arch/x86/lib/cpu-policy/cpuid.c 
b/xen/arch/x86/lib/cpu-policy/cpuid.c
new file mode 100644
index 0000000000..6298d051f2
--- /dev/null
+++ b/xen/arch/x86/lib/cpu-policy/cpuid.c
@@ -0,0 +1,559 @@
+#include "private.h"
+
+#include <xen/lib/x86/cpu-policy.h>
+
+static void zero_leaves(struct cpuid_leaf *l,
+                        unsigned int first, unsigned int last)
+{
+    if ( first <= last )
+        memset(&l[first], 0, sizeof(*l) * (last - first + 1));
+}
+
+unsigned int x86_cpuid_lookup_vendor(uint32_t ebx, uint32_t ecx, uint32_t edx)
+{
+    switch ( ebx )
+    {
+    case X86_VENDOR_INTEL_EBX:
+        if ( ecx == X86_VENDOR_INTEL_ECX &&
+             edx == X86_VENDOR_INTEL_EDX )
+            return X86_VENDOR_INTEL;
+        break;
+
+    case X86_VENDOR_AMD_EBX:
+        if ( ecx == X86_VENDOR_AMD_ECX &&
+             edx == X86_VENDOR_AMD_EDX )
+            return X86_VENDOR_AMD;
+        break;
+
+    case X86_VENDOR_CENTAUR_EBX:
+        if ( ecx == X86_VENDOR_CENTAUR_ECX &&
+             edx == X86_VENDOR_CENTAUR_EDX )
+            return X86_VENDOR_CENTAUR;
+        break;
+
+    case X86_VENDOR_SHANGHAI_EBX:
+        if ( ecx == X86_VENDOR_SHANGHAI_ECX &&
+             edx == X86_VENDOR_SHANGHAI_EDX )
+            return X86_VENDOR_SHANGHAI;
+        break;
+
+    case X86_VENDOR_HYGON_EBX:
+        if ( ecx == X86_VENDOR_HYGON_ECX &&
+             edx == X86_VENDOR_HYGON_EDX )
+            return X86_VENDOR_HYGON;
+        break;
+    }
+
+    return X86_VENDOR_UNKNOWN;
+}
+
+const char *x86_cpuid_vendor_to_str(unsigned int vendor)
+{
+    switch ( vendor )
+    {
+    case X86_VENDOR_INTEL:    return "Intel";
+    case X86_VENDOR_AMD:      return "AMD";
+    case X86_VENDOR_CENTAUR:  return "Centaur";
+    case X86_VENDOR_SHANGHAI: return "Shanghai";
+    case X86_VENDOR_HYGON:    return "Hygon";
+    default:                  return "Unknown";
+    }
+}
+
+void x86_cpu_policy_to_featureset(
+    const struct cpu_policy *p, uint32_t fs[FEATURESET_NR_ENTRIES])
+{
+    fs[FEATURESET_1d]        = p->basic._1d;
+    fs[FEATURESET_1c]        = p->basic._1c;
+    fs[FEATURESET_e1d]       = p->extd.e1d;
+    fs[FEATURESET_e1c]       = p->extd.e1c;
+    fs[FEATURESET_Da1]       = p->xstate.Da1;
+    fs[FEATURESET_7b0]       = p->feat._7b0;
+    fs[FEATURESET_7c0]       = p->feat._7c0;
+    fs[FEATURESET_e7d]       = p->extd.e7d;
+    fs[FEATURESET_e8b]       = p->extd.e8b;
+    fs[FEATURESET_7d0]       = p->feat._7d0;
+    fs[FEATURESET_7a1]       = p->feat._7a1;
+    fs[FEATURESET_e21a]      = p->extd.e21a;
+    fs[FEATURESET_7b1]       = p->feat._7b1;
+    fs[FEATURESET_7d2]       = p->feat._7d2;
+    fs[FEATURESET_7c1]       = p->feat._7c1;
+    fs[FEATURESET_7d1]       = p->feat._7d1;
+    fs[FEATURESET_m10Al]     = p->arch_caps.lo;
+    fs[FEATURESET_m10Ah]     = p->arch_caps.hi;
+    fs[FEATURESET_e21c]      = p->extd.e21c;
+}
+
+void x86_cpu_featureset_to_policy(
+    const uint32_t fs[FEATURESET_NR_ENTRIES], struct cpu_policy *p)
+{
+    p->basic._1d             = fs[FEATURESET_1d];
+    p->basic._1c             = fs[FEATURESET_1c];
+    p->extd.e1d              = fs[FEATURESET_e1d];
+    p->extd.e1c              = fs[FEATURESET_e1c];
+    p->xstate.Da1            = fs[FEATURESET_Da1];
+    p->feat._7b0             = fs[FEATURESET_7b0];
+    p->feat._7c0             = fs[FEATURESET_7c0];
+    p->extd.e7d              = fs[FEATURESET_e7d];
+    p->extd.e8b              = fs[FEATURESET_e8b];
+    p->feat._7d0             = fs[FEATURESET_7d0];
+    p->feat._7a1             = fs[FEATURESET_7a1];
+    p->extd.e21a             = fs[FEATURESET_e21a];
+    p->feat._7b1             = fs[FEATURESET_7b1];
+    p->feat._7d2             = fs[FEATURESET_7d2];
+    p->feat._7c1             = fs[FEATURESET_7c1];
+    p->feat._7d1             = fs[FEATURESET_7d1];
+    p->arch_caps.lo          = fs[FEATURESET_m10Al];
+    p->arch_caps.hi          = fs[FEATURESET_m10Ah];
+    p->extd.e21c             = fs[FEATURESET_e21c];
+}
+
+void x86_cpu_policy_recalc_synth(struct cpu_policy *p)
+{
+    p->x86_vendor = x86_cpuid_lookup_vendor(
+        p->basic.vendor_ebx, p->basic.vendor_ecx, p->basic.vendor_edx);
+}
+
+void x86_cpu_policy_fill_native(struct cpu_policy *p)
+{
+    unsigned int i;
+
+    cpuid_leaf(0, &p->basic.raw[0]);
+    for ( i = 1; i <= MIN(p->basic.max_leaf,
+                          ARRAY_SIZE(p->basic.raw) - 1); ++i )
+    {
+        switch ( i )
+        {
+        case 0x4: case 0x7: case 0xb: case 0xd:
+            /* Multi-invocation leaves.  Deferred. */
+            continue;
+        }
+
+        cpuid_leaf(i, &p->basic.raw[i]);
+    }
+
+    if ( p->basic.max_leaf >= 4 )
+    {
+        for ( i = 0; i < ARRAY_SIZE(p->cache.raw); ++i )
+        {
+            union {
+                struct cpuid_leaf l;
+                struct cpuid_cache_leaf c;
+            } u;
+
+            cpuid_count_leaf(4, i, &u.l);
+
+            if ( u.c.type == 0 )
+                break;
+
+            p->cache.subleaf[i] = u.c;
+        }
+
+        /*
+         * The choice of CPUID_GUEST_NR_CACHE is arbitrary.  It is expected
+         * that it will eventually need increasing for future hardware.
+         */
+#ifdef __XEN__
+        if ( i == ARRAY_SIZE(p->cache.raw) )
+            printk(XENLOG_WARNING
+                   "CPUID: Insufficient Leaf 4 space for this hardware\n");
+#endif
+    }
+
+    if ( p->basic.max_leaf >= 7 )
+    {
+        cpuid_count_leaf(7, 0, &p->feat.raw[0]);
+
+        for ( i = 1; i <= MIN(p->feat.max_subleaf,
+                              ARRAY_SIZE(p->feat.raw) - 1); ++i )
+            cpuid_count_leaf(7, i, &p->feat.raw[i]);
+    }
+
+    if ( p->basic.max_leaf >= 0xb )
+    {
+        union {
+            struct cpuid_leaf l;
+            struct cpuid_topo_leaf t;
+        } u;
+
+        for ( i = 0; i < ARRAY_SIZE(p->topo.raw); ++i )
+        {
+            cpuid_count_leaf(0xb, i, &u.l);
+
+            if ( u.t.type == 0 )
+                break;
+
+            p->topo.subleaf[i] = u.t;
+        }
+
+        /*
+         * The choice of CPUID_GUEST_NR_TOPO is per the manual.  It may need
+         * to grow for future hardware.
+         */
+#ifdef __XEN__
+        if ( i == ARRAY_SIZE(p->topo.raw) &&
+             (cpuid_count_leaf(0xb, i, &u.l), u.t.type != 0) )
+            printk(XENLOG_WARNING
+                   "CPUID: Insufficient Leaf 0xb space for this hardware\n");
+#endif
+    }
+
+    if ( p->basic.max_leaf >= 0xd )
+    {
+        uint64_t xstates;
+
+        cpuid_count_leaf(0xd, 0, &p->xstate.raw[0]);
+        cpuid_count_leaf(0xd, 1, &p->xstate.raw[1]);
+
+        xstates = cpu_policy_xstates(p);
+
+        /* This logic will probably need adjusting when XCR0[63] gets used. */
+        BUILD_BUG_ON(ARRAY_SIZE(p->xstate.raw) > 63);
+
+        for ( i = 2; i < min_t(unsigned int, 63,
+                               ARRAY_SIZE(p->xstate.raw)); ++i )
+        {
+            if ( xstates & (1ULL << i) )
+                cpuid_count_leaf(0xd, i, &p->xstate.raw[i]);
+        }
+    }
+
+    /* Extended leaves. */
+    cpuid_leaf(0x80000000U, &p->extd.raw[0]);
+    for ( i = 1; i <= MIN(p->extd.max_leaf & 0xffffU,
+                          ARRAY_SIZE(p->extd.raw) - 1); ++i )
+        cpuid_leaf(0x80000000U + i, &p->extd.raw[i]);
+
+    /* Don't report leaves from possible lower level hypervisor, for now. */
+    p->hv_limit = 0;
+    p->hv2_limit = 0;
+
+#ifdef __XEN__
+    /* TODO MSR_PLATFORM_INFO */
+
+    if ( p->feat.arch_caps )
+        rdmsrl(MSR_ARCH_CAPABILITIES, p->arch_caps.raw);
+#endif
+
+    x86_cpu_policy_recalc_synth(p);
+}
+
+void x86_cpu_policy_clear_out_of_range_leaves(struct cpu_policy *p)
+{
+    unsigned int i;
+
+    zero_leaves(p->basic.raw, p->basic.max_leaf + 1,
+                ARRAY_SIZE(p->basic.raw) - 1);
+
+    if ( p->basic.max_leaf < 4 )
+        memset(p->cache.raw, 0, sizeof(p->cache.raw));
+    else
+    {
+        for ( i = 0; (i < ARRAY_SIZE(p->cache.raw) &&
+                      p->cache.subleaf[i].type); ++i )
+            ;
+
+        zero_leaves(p->cache.raw, i, ARRAY_SIZE(p->cache.raw) - 1);
+    }
+
+    if ( p->basic.max_leaf < 7 )
+        memset(p->feat.raw, 0, sizeof(p->feat.raw));
+    else
+        zero_leaves(p->feat.raw, p->feat.max_subleaf + 1,
+                    ARRAY_SIZE(p->feat.raw) - 1);
+
+    if ( p->basic.max_leaf < 0xb )
+        memset(p->topo.raw, 0, sizeof(p->topo.raw));
+    else
+    {
+        for ( i = 0; (i < ARRAY_SIZE(p->topo.raw) &&
+                      p->topo.subleaf[i].type); ++i )
+            ;
+
+        zero_leaves(p->topo.raw, i, ARRAY_SIZE(p->topo.raw) - 1);
+    }
+
+    if ( p->basic.max_leaf < 0xd || !cpu_policy_xstates(p) )
+        memset(p->xstate.raw, 0, sizeof(p->xstate.raw));
+    else
+    {
+        /* This logic will probably need adjusting when XCR0[63] gets used. */
+        BUILD_BUG_ON(ARRAY_SIZE(p->xstate.raw) > 63);
+
+        /* First two leaves always valid.  Rest depend on xstates. */
+        i = max(2, 64 - __builtin_clzll(cpu_policy_xstates(p)));
+
+        zero_leaves(p->xstate.raw, i,
+                    ARRAY_SIZE(p->xstate.raw) - 1);
+    }
+
+    zero_leaves(p->extd.raw, (p->extd.max_leaf & 0xffff) + 1,
+                ARRAY_SIZE(p->extd.raw) - 1);
+}
+
+const uint32_t *x86_cpu_policy_lookup_deep_deps(uint32_t feature)
+{
+    static const uint32_t deep_features[] = INIT_DEEP_FEATURES;
+    static const struct {
+        uint32_t feature;
+        uint32_t fs[FEATURESET_NR_ENTRIES];
+    } deep_deps[] = INIT_DEEP_DEPS;
+    unsigned int start = 0, end = ARRAY_SIZE(deep_deps);
+
+    BUILD_BUG_ON(ARRAY_SIZE(deep_deps) != NR_DEEP_DEPS);
+
+    /* Fast early exit. */
+    if ( !test_bit(feature, deep_features) )
+        return NULL;
+
+    /* deep_deps[] is sorted.  Perform a binary search. */
+    while ( start < end )
+    {
+        unsigned int mid = start + ((end - start) / 2);
+
+        if ( deep_deps[mid].feature > feature )
+            end = mid;
+        else if ( deep_deps[mid].feature < feature )
+            start = mid + 1;
+        else
+            return deep_deps[mid].fs;
+    }
+
+    return NULL;
+}
+
+/*
+ * Copy a single cpuid_leaf into a provided xen_cpuid_leaf_t buffer,
+ * performing boundary checking against the buffer size.
+ */
+static int copy_leaf_to_buffer(uint32_t leaf, uint32_t subleaf,
+                               const struct cpuid_leaf *data,
+                               cpuid_leaf_buffer_t leaves,
+                               uint32_t *curr_entry, const uint32_t nr_entries)
+{
+    const xen_cpuid_leaf_t val = {
+        leaf, subleaf, data->a, data->b, data->c, data->d,
+    };
+
+    if ( *curr_entry == nr_entries )
+        return -ENOBUFS;
+
+    if ( copy_to_buffer_offset(leaves, *curr_entry, &val, 1) )
+        return -EFAULT;
+
+    ++*curr_entry;
+
+    return 0;
+}
+
+int x86_cpuid_copy_to_buffer(const struct cpu_policy *p,
+                             cpuid_leaf_buffer_t leaves, uint32_t 
*nr_entries_p)
+{
+    const uint32_t nr_entries = *nr_entries_p;
+    uint32_t curr_entry = 0, leaf, subleaf;
+
+#define COPY_LEAF(l, s, data)                                       \
+    ({                                                              \
+        int ret;                                                    \
+                                                                    \
+        if ( (ret = copy_leaf_to_buffer(                            \
+                  l, s, data, leaves, &curr_entry, nr_entries)) )   \
+            return ret;                                             \
+    })
+
+    /* Basic leaves. */
+    for ( leaf = 0; leaf <= MIN(p->basic.max_leaf,
+                                ARRAY_SIZE(p->basic.raw) - 1); ++leaf )
+    {
+        switch ( leaf )
+        {
+        case 0x4:
+            for ( subleaf = 0; subleaf < ARRAY_SIZE(p->cache.raw); ++subleaf )
+            {
+                COPY_LEAF(leaf, subleaf, &p->cache.raw[subleaf]);
+
+                if ( p->cache.subleaf[subleaf].type == 0 )
+                    break;
+            }
+            break;
+
+        case 0x7:
+            for ( subleaf = 0;
+                  subleaf <= MIN(p->feat.max_subleaf,
+                                 ARRAY_SIZE(p->feat.raw) - 1); ++subleaf )
+                COPY_LEAF(leaf, subleaf, &p->feat.raw[subleaf]);
+            break;
+
+        case 0xb:
+            for ( subleaf = 0; subleaf < ARRAY_SIZE(p->topo.raw); ++subleaf )
+            {
+                COPY_LEAF(leaf, subleaf, &p->topo.raw[subleaf]);
+
+                if ( p->topo.subleaf[subleaf].type == 0 )
+                    break;
+            }
+            break;
+
+        case 0xd:
+        {
+            uint64_t xstates = cpu_policy_xstates(p);
+
+            COPY_LEAF(leaf, 0, &p->xstate.raw[0]);
+            COPY_LEAF(leaf, 1, &p->xstate.raw[1]);
+
+            for ( xstates >>= 2, subleaf = 2;
+                  xstates && subleaf < ARRAY_SIZE(p->xstate.raw);
+                  xstates >>= 1, ++subleaf )
+                COPY_LEAF(leaf, subleaf, &p->xstate.raw[subleaf]);
+            break;
+        }
+
+        default:
+            COPY_LEAF(leaf, XEN_CPUID_NO_SUBLEAF, &p->basic.raw[leaf]);
+            break;
+        }
+    }
+
+    /* TODO: Port Xen and Viridian leaves to the new CPUID infrastructure. */
+    COPY_LEAF(0x40000000, XEN_CPUID_NO_SUBLEAF,
+              &(struct cpuid_leaf){ p->hv_limit });
+    COPY_LEAF(0x40000100, XEN_CPUID_NO_SUBLEAF,
+              &(struct cpuid_leaf){ p->hv2_limit });
+
+    /* Extended leaves. */
+    for ( leaf = 0; leaf <= MIN(p->extd.max_leaf & 0xffffUL,
+                                ARRAY_SIZE(p->extd.raw) - 1); ++leaf )
+        COPY_LEAF(0x80000000U | leaf, XEN_CPUID_NO_SUBLEAF, 
&p->extd.raw[leaf]);
+
+#undef COPY_LEAF
+
+    *nr_entries_p = curr_entry;
+
+    return 0;
+}
+
+int x86_cpuid_copy_from_buffer(struct cpu_policy *p,
+                               const cpuid_leaf_buffer_t leaves,
+                               uint32_t nr_entries, uint32_t *err_leaf,
+                               uint32_t *err_subleaf)
+{
+    unsigned int i;
+    xen_cpuid_leaf_t data;
+
+    if ( err_leaf )
+        *err_leaf = -1;
+    if ( err_subleaf )
+        *err_subleaf = -1;
+
+    /*
+     * A well formed caller is expected to pass an array with leaves in order,
+     * and without any repetitions.  However, due to per-vendor differences,
+     * and in the case of upgrade or levelled scenarios, we typically expect
+     * fewer than MAX leaves to be passed.
+     *
+     * Detecting repeated entries is prohibitively complicated, so we don't
+     * bother.  That said, one way or another if more than MAX leaves are
+     * passed, something is wrong.
+     */
+    if ( nr_entries > CPUID_MAX_SERIALISED_LEAVES )
+        return -E2BIG;
+
+    for ( i = 0; i < nr_entries; ++i )
+    {
+        struct cpuid_leaf l;
+
+        if ( copy_from_buffer_offset(&data, leaves, i, 1) )
+            return -EFAULT;
+
+        l = (struct cpuid_leaf){ data.a, data.b, data.c, data.d };
+
+        switch ( data.leaf )
+        {
+        case 0 ... ARRAY_SIZE(p->basic.raw) - 1:
+            switch ( data.leaf )
+            {
+            case 0x4:
+                if ( data.subleaf >= ARRAY_SIZE(p->cache.raw) )
+                    goto out_of_range;
+
+                array_access_nospec(p->cache.raw, data.subleaf) = l;
+                break;
+
+            case 0x7:
+                if ( data.subleaf >= ARRAY_SIZE(p->feat.raw) )
+                    goto out_of_range;
+
+                array_access_nospec(p->feat.raw, data.subleaf) = l;
+                break;
+
+            case 0xb:
+                if ( data.subleaf >= ARRAY_SIZE(p->topo.raw) )
+                    goto out_of_range;
+
+                array_access_nospec(p->topo.raw, data.subleaf) = l;
+                break;
+
+            case 0xd:
+                if ( data.subleaf >= ARRAY_SIZE(p->xstate.raw) )
+                    goto out_of_range;
+
+                array_access_nospec(p->xstate.raw, data.subleaf) = l;
+                break;
+
+            default:
+                if ( data.subleaf != XEN_CPUID_NO_SUBLEAF )
+                    goto out_of_range;
+
+                array_access_nospec(p->basic.raw, data.leaf) = l;
+                break;
+            }
+            break;
+
+        case 0x40000000:
+            if ( data.subleaf != XEN_CPUID_NO_SUBLEAF )
+                goto out_of_range;
+
+            p->hv_limit = l.a;
+            break;
+
+        case 0x40000100:
+            if ( data.subleaf != XEN_CPUID_NO_SUBLEAF )
+                goto out_of_range;
+
+            p->hv2_limit = l.a;
+            break;
+
+        case 0x80000000U ... 0x80000000U + ARRAY_SIZE(p->extd.raw) - 1:
+            if ( data.subleaf != XEN_CPUID_NO_SUBLEAF )
+                goto out_of_range;
+
+            array_access_nospec(p->extd.raw, data.leaf & 0xffff) = l;
+            break;
+
+        default:
+            goto out_of_range;
+        }
+    }
+
+    x86_cpu_policy_recalc_synth(p);
+
+    return 0;
+
+ out_of_range:
+    if ( err_leaf )
+        *err_leaf = data.leaf;
+    if ( err_subleaf )
+        *err_subleaf = data.subleaf;
+
+    return -ERANGE;
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/arch/x86/lib/cpu-policy/msr.c 
b/xen/arch/x86/lib/cpu-policy/msr.c
new file mode 100644
index 0000000000..e04b9ca013
--- /dev/null
+++ b/xen/arch/x86/lib/cpu-policy/msr.c
@@ -0,0 +1,130 @@
+#include "private.h"
+
+#include <xen/lib/x86/cpu-policy.h>
+
+/*
+ * Copy a single MSR into the provided msr_entry_buffer_t buffer, performing a
+ * boundary check against the buffer size.
+ */
+static int copy_msr_to_buffer(uint32_t idx, uint64_t val,
+                              msr_entry_buffer_t msrs,
+                              uint32_t *curr_entry, const uint32_t nr_entries)
+{
+    const xen_msr_entry_t ent = { .idx = idx, .val = val };
+
+    if ( *curr_entry == nr_entries )
+        return -ENOBUFS;
+
+    if ( copy_to_buffer_offset(msrs, *curr_entry, &ent, 1) )
+        return -EFAULT;
+
+    ++*curr_entry;
+
+    return 0;
+}
+
+int x86_msr_copy_to_buffer(const struct cpu_policy *p,
+                           msr_entry_buffer_t msrs, uint32_t *nr_entries_p)
+{
+    const uint32_t nr_entries = *nr_entries_p;
+    uint32_t curr_entry = 0;
+
+#define COPY_MSR(idx, val)                                      \
+    ({                                                          \
+        int ret;                                                \
+                                                                \
+        if ( (ret = copy_msr_to_buffer(                         \
+                  idx, val, msrs, &curr_entry, nr_entries)) )   \
+            return ret;                                         \
+    })
+
+    COPY_MSR(MSR_INTEL_PLATFORM_INFO, p->platform_info.raw);
+    COPY_MSR(MSR_ARCH_CAPABILITIES,   p->arch_caps.raw);
+
+#undef COPY_MSR
+
+    *nr_entries_p = curr_entry;
+
+    return 0;
+}
+
+int x86_msr_copy_from_buffer(struct cpu_policy *p,
+                             const msr_entry_buffer_t msrs, uint32_t 
nr_entries,
+                             uint32_t *err_msr)
+{
+    unsigned int i;
+    xen_msr_entry_t data;
+    int rc;
+
+    if ( err_msr )
+        *err_msr = -1;
+
+    /*
+     * A well formed caller is expected to pass an array with entries in
+     * order, and without any repetitions.  However, due to per-vendor
+     * differences, and in the case of upgrade or levelled scenarios, we
+     * typically expect fewer than MAX entries to be passed.
+     *
+     * Detecting repeated entries is prohibitively complicated, so we don't
+     * bother.  That said, one way or another if more than MAX entries are
+     * passed, something is wrong.
+     */
+    if ( nr_entries > MSR_MAX_SERIALISED_ENTRIES )
+        return -E2BIG;
+
+    for ( i = 0; i < nr_entries; i++ )
+    {
+        if ( copy_from_buffer_offset(&data, msrs, i, 1) )
+            return -EFAULT;
+
+        if ( data.flags ) /* .flags MBZ */
+        {
+            rc = -EINVAL;
+            goto err;
+        }
+
+        switch ( data.idx )
+        {
+            /*
+             * Assign data.val to p->field, checking for truncation if the
+             * backing storage for field is smaller than uint64_t
+             */
+#define ASSIGN(field)                             \
+({                                                \
+    if ( (typeof(p->field))data.val != data.val ) \
+    {                                             \
+        rc = -EOVERFLOW;                          \
+        goto err;                                 \
+    }                                             \
+    p->field = data.val;                          \
+})
+
+        case MSR_INTEL_PLATFORM_INFO: ASSIGN(platform_info.raw); break;
+        case MSR_ARCH_CAPABILITIES:   ASSIGN(arch_caps.raw);     break;
+
+#undef ASSIGN
+
+        default:
+            rc = -ERANGE;
+            goto err;
+        }
+    }
+
+    return 0;
+
+ err:
+    if ( err_msr )
+        *err_msr = data.idx;
+
+    return rc;
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/arch/x86/lib/cpu-policy/policy.c 
b/xen/arch/x86/lib/cpu-policy/policy.c
new file mode 100644
index 0000000000..f033d22785
--- /dev/null
+++ b/xen/arch/x86/lib/cpu-policy/policy.c
@@ -0,0 +1,54 @@
+#include "private.h"
+
+#include <xen/lib/x86/cpu-policy.h>
+
+int x86_cpu_policies_are_compatible(const struct cpu_policy *host,
+                                    const struct cpu_policy *guest,
+                                    struct cpu_policy_errors *err)
+{
+    struct cpu_policy_errors e = INIT_CPU_POLICY_ERRORS;
+    int ret = -EINVAL;
+
+#define NA XEN_CPUID_NO_SUBLEAF
+#define FAIL_CPUID(l, s) \
+    do { e.leaf = (l); e.subleaf = (s); goto out; } while ( 0 )
+#define FAIL_MSR(m) \
+    do { e.msr = (m); goto out; } while ( 0 )
+
+    if ( guest->basic.max_leaf > host->basic.max_leaf )
+        FAIL_CPUID(0, NA);
+
+    if ( guest->feat.max_subleaf > host->feat.max_subleaf )
+        FAIL_CPUID(7, 0);
+
+    if ( guest->extd.max_leaf > host->extd.max_leaf )
+        FAIL_CPUID(0x80000000U, NA);
+
+    /* TODO: Audit more CPUID data. */
+
+    if ( ~host->platform_info.raw & guest->platform_info.raw )
+        FAIL_MSR(MSR_INTEL_PLATFORM_INFO);
+
+#undef FAIL_MSR
+#undef FAIL_CPUID
+#undef NA
+
+    /* Success. */
+    ret = 0;
+
+ out:
+    if ( err )
+        *err = e;
+
+    return ret;
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/arch/x86/lib/cpu-policy/private.h 
b/xen/arch/x86/lib/cpu-policy/private.h
new file mode 100644
index 0000000000..aedd8e4821
--- /dev/null
+++ b/xen/arch/x86/lib/cpu-policy/private.h
@@ -0,0 +1,78 @@
+#ifndef XEN_LIB_X86_PRIVATE_H
+#define XEN_LIB_X86_PRIVATE_H
+
+#ifdef __XEN__
+
+#include <xen/bitops.h>
+#include <xen/guest_access.h>
+#include <xen/kernel.h>
+#include <xen/lib.h>
+#include <xen/nospec.h>
+#include <xen/types.h>
+
+#include <asm/msr.h>
+
+#define copy_to_buffer_offset copy_to_guest_offset
+#define copy_from_buffer_offset copy_from_guest_offset
+
+#else
+
+#include <errno.h>
+#include <inttypes.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <string.h>
+
+#include <xen/asm/msr-index.h>
+#include <xen/asm/x86-vendors.h>
+
+#include <xen-tools/common-macros.h>
+
+static inline bool test_bit(unsigned int bit, const void *vaddr)
+{
+    const char *addr = vaddr;
+
+    return addr[bit / 8] & (1u << (bit % 8));
+}
+
+#define array_access_nospec(a, i) (a)[(i)]
+
+/* memcpy(), but with copy_to_guest_offset()'s API. */
+#define copy_to_buffer_offset(dst, index, src, nr)      \
+({                                                      \
+    const typeof(*(src)) *src_ = (src);                 \
+    typeof(*(dst)) *dst_ = (dst);                       \
+    typeof(index) index_ = (index);                     \
+    typeof(nr) nr_ = (nr), i_;                          \
+                                                        \
+    for ( i_ = 0; i_ < nr_; i_++ )                      \
+        dst_[index_ + i_] = src_[i_];                   \
+    0;                                                  \
+})
+
+/* memcpy(), but with copy_from_guest_offset()'s API. */
+#define copy_from_buffer_offset(dst, src, index, nr)    \
+({                                                      \
+    const typeof(*(src)) *src_ = (src);                 \
+    typeof(*(dst)) *dst_ = (dst);                       \
+    typeof(index) index_ = (index);                     \
+    typeof(nr) nr_ = (nr), i_;                          \
+                                                        \
+    for ( i_ = 0; i_ < nr_; i_++ )                      \
+        dst_[i_] = src_[index_ + i_];                   \
+    0;                                                  \
+})
+
+#endif /* __XEN__ */
+
+#endif /* XEN_LIB_X86_PRIVATE_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/lib/Makefile b/xen/lib/Makefile
index 3b0137902c..dcef1610b2 100644
--- a/xen/lib/Makefile
+++ b/xen/lib/Makefile
@@ -1,5 +1,3 @@
-obj-$(CONFIG_X86) += x86/
-
 lib-y += bsearch.o
 lib-y += ctors.o
 lib-y += ctype.o
diff --git a/xen/lib/x86/Makefile b/xen/lib/x86/Makefile
deleted file mode 100644
index 780ea05db1..0000000000
--- a/xen/lib/x86/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-obj-y += cpuid.o
-obj-y += msr.o
-obj-y += policy.o
diff --git a/xen/lib/x86/cpuid.c b/xen/lib/x86/cpuid.c
deleted file mode 100644
index 6298d051f2..0000000000
--- a/xen/lib/x86/cpuid.c
+++ /dev/null
@@ -1,559 +0,0 @@
-#include "private.h"
-
-#include <xen/lib/x86/cpu-policy.h>
-
-static void zero_leaves(struct cpuid_leaf *l,
-                        unsigned int first, unsigned int last)
-{
-    if ( first <= last )
-        memset(&l[first], 0, sizeof(*l) * (last - first + 1));
-}
-
-unsigned int x86_cpuid_lookup_vendor(uint32_t ebx, uint32_t ecx, uint32_t edx)
-{
-    switch ( ebx )
-    {
-    case X86_VENDOR_INTEL_EBX:
-        if ( ecx == X86_VENDOR_INTEL_ECX &&
-             edx == X86_VENDOR_INTEL_EDX )
-            return X86_VENDOR_INTEL;
-        break;
-
-    case X86_VENDOR_AMD_EBX:
-        if ( ecx == X86_VENDOR_AMD_ECX &&
-             edx == X86_VENDOR_AMD_EDX )
-            return X86_VENDOR_AMD;
-        break;
-
-    case X86_VENDOR_CENTAUR_EBX:
-        if ( ecx == X86_VENDOR_CENTAUR_ECX &&
-             edx == X86_VENDOR_CENTAUR_EDX )
-            return X86_VENDOR_CENTAUR;
-        break;
-
-    case X86_VENDOR_SHANGHAI_EBX:
-        if ( ecx == X86_VENDOR_SHANGHAI_ECX &&
-             edx == X86_VENDOR_SHANGHAI_EDX )
-            return X86_VENDOR_SHANGHAI;
-        break;
-
-    case X86_VENDOR_HYGON_EBX:
-        if ( ecx == X86_VENDOR_HYGON_ECX &&
-             edx == X86_VENDOR_HYGON_EDX )
-            return X86_VENDOR_HYGON;
-        break;
-    }
-
-    return X86_VENDOR_UNKNOWN;
-}
-
-const char *x86_cpuid_vendor_to_str(unsigned int vendor)
-{
-    switch ( vendor )
-    {
-    case X86_VENDOR_INTEL:    return "Intel";
-    case X86_VENDOR_AMD:      return "AMD";
-    case X86_VENDOR_CENTAUR:  return "Centaur";
-    case X86_VENDOR_SHANGHAI: return "Shanghai";
-    case X86_VENDOR_HYGON:    return "Hygon";
-    default:                  return "Unknown";
-    }
-}
-
-void x86_cpu_policy_to_featureset(
-    const struct cpu_policy *p, uint32_t fs[FEATURESET_NR_ENTRIES])
-{
-    fs[FEATURESET_1d]        = p->basic._1d;
-    fs[FEATURESET_1c]        = p->basic._1c;
-    fs[FEATURESET_e1d]       = p->extd.e1d;
-    fs[FEATURESET_e1c]       = p->extd.e1c;
-    fs[FEATURESET_Da1]       = p->xstate.Da1;
-    fs[FEATURESET_7b0]       = p->feat._7b0;
-    fs[FEATURESET_7c0]       = p->feat._7c0;
-    fs[FEATURESET_e7d]       = p->extd.e7d;
-    fs[FEATURESET_e8b]       = p->extd.e8b;
-    fs[FEATURESET_7d0]       = p->feat._7d0;
-    fs[FEATURESET_7a1]       = p->feat._7a1;
-    fs[FEATURESET_e21a]      = p->extd.e21a;
-    fs[FEATURESET_7b1]       = p->feat._7b1;
-    fs[FEATURESET_7d2]       = p->feat._7d2;
-    fs[FEATURESET_7c1]       = p->feat._7c1;
-    fs[FEATURESET_7d1]       = p->feat._7d1;
-    fs[FEATURESET_m10Al]     = p->arch_caps.lo;
-    fs[FEATURESET_m10Ah]     = p->arch_caps.hi;
-    fs[FEATURESET_e21c]      = p->extd.e21c;
-}
-
-void x86_cpu_featureset_to_policy(
-    const uint32_t fs[FEATURESET_NR_ENTRIES], struct cpu_policy *p)
-{
-    p->basic._1d             = fs[FEATURESET_1d];
-    p->basic._1c             = fs[FEATURESET_1c];
-    p->extd.e1d              = fs[FEATURESET_e1d];
-    p->extd.e1c              = fs[FEATURESET_e1c];
-    p->xstate.Da1            = fs[FEATURESET_Da1];
-    p->feat._7b0             = fs[FEATURESET_7b0];
-    p->feat._7c0             = fs[FEATURESET_7c0];
-    p->extd.e7d              = fs[FEATURESET_e7d];
-    p->extd.e8b              = fs[FEATURESET_e8b];
-    p->feat._7d0             = fs[FEATURESET_7d0];
-    p->feat._7a1             = fs[FEATURESET_7a1];
-    p->extd.e21a             = fs[FEATURESET_e21a];
-    p->feat._7b1             = fs[FEATURESET_7b1];
-    p->feat._7d2             = fs[FEATURESET_7d2];
-    p->feat._7c1             = fs[FEATURESET_7c1];
-    p->feat._7d1             = fs[FEATURESET_7d1];
-    p->arch_caps.lo          = fs[FEATURESET_m10Al];
-    p->arch_caps.hi          = fs[FEATURESET_m10Ah];
-    p->extd.e21c             = fs[FEATURESET_e21c];
-}
-
-void x86_cpu_policy_recalc_synth(struct cpu_policy *p)
-{
-    p->x86_vendor = x86_cpuid_lookup_vendor(
-        p->basic.vendor_ebx, p->basic.vendor_ecx, p->basic.vendor_edx);
-}
-
-void x86_cpu_policy_fill_native(struct cpu_policy *p)
-{
-    unsigned int i;
-
-    cpuid_leaf(0, &p->basic.raw[0]);
-    for ( i = 1; i <= MIN(p->basic.max_leaf,
-                          ARRAY_SIZE(p->basic.raw) - 1); ++i )
-    {
-        switch ( i )
-        {
-        case 0x4: case 0x7: case 0xb: case 0xd:
-            /* Multi-invocation leaves.  Deferred. */
-            continue;
-        }
-
-        cpuid_leaf(i, &p->basic.raw[i]);
-    }
-
-    if ( p->basic.max_leaf >= 4 )
-    {
-        for ( i = 0; i < ARRAY_SIZE(p->cache.raw); ++i )
-        {
-            union {
-                struct cpuid_leaf l;
-                struct cpuid_cache_leaf c;
-            } u;
-
-            cpuid_count_leaf(4, i, &u.l);
-
-            if ( u.c.type == 0 )
-                break;
-
-            p->cache.subleaf[i] = u.c;
-        }
-
-        /*
-         * The choice of CPUID_GUEST_NR_CACHE is arbitrary.  It is expected
-         * that it will eventually need increasing for future hardware.
-         */
-#ifdef __XEN__
-        if ( i == ARRAY_SIZE(p->cache.raw) )
-            printk(XENLOG_WARNING
-                   "CPUID: Insufficient Leaf 4 space for this hardware\n");
-#endif
-    }
-
-    if ( p->basic.max_leaf >= 7 )
-    {
-        cpuid_count_leaf(7, 0, &p->feat.raw[0]);
-
-        for ( i = 1; i <= MIN(p->feat.max_subleaf,
-                              ARRAY_SIZE(p->feat.raw) - 1); ++i )
-            cpuid_count_leaf(7, i, &p->feat.raw[i]);
-    }
-
-    if ( p->basic.max_leaf >= 0xb )
-    {
-        union {
-            struct cpuid_leaf l;
-            struct cpuid_topo_leaf t;
-        } u;
-
-        for ( i = 0; i < ARRAY_SIZE(p->topo.raw); ++i )
-        {
-            cpuid_count_leaf(0xb, i, &u.l);
-
-            if ( u.t.type == 0 )
-                break;
-
-            p->topo.subleaf[i] = u.t;
-        }
-
-        /*
-         * The choice of CPUID_GUEST_NR_TOPO is per the manual.  It may need
-         * to grow for future hardware.
-         */
-#ifdef __XEN__
-        if ( i == ARRAY_SIZE(p->topo.raw) &&
-             (cpuid_count_leaf(0xb, i, &u.l), u.t.type != 0) )
-            printk(XENLOG_WARNING
-                   "CPUID: Insufficient Leaf 0xb space for this hardware\n");
-#endif
-    }
-
-    if ( p->basic.max_leaf >= 0xd )
-    {
-        uint64_t xstates;
-
-        cpuid_count_leaf(0xd, 0, &p->xstate.raw[0]);
-        cpuid_count_leaf(0xd, 1, &p->xstate.raw[1]);
-
-        xstates = cpu_policy_xstates(p);
-
-        /* This logic will probably need adjusting when XCR0[63] gets used. */
-        BUILD_BUG_ON(ARRAY_SIZE(p->xstate.raw) > 63);
-
-        for ( i = 2; i < min_t(unsigned int, 63,
-                               ARRAY_SIZE(p->xstate.raw)); ++i )
-        {
-            if ( xstates & (1ULL << i) )
-                cpuid_count_leaf(0xd, i, &p->xstate.raw[i]);
-        }
-    }
-
-    /* Extended leaves. */
-    cpuid_leaf(0x80000000U, &p->extd.raw[0]);
-    for ( i = 1; i <= MIN(p->extd.max_leaf & 0xffffU,
-                          ARRAY_SIZE(p->extd.raw) - 1); ++i )
-        cpuid_leaf(0x80000000U + i, &p->extd.raw[i]);
-
-    /* Don't report leaves from possible lower level hypervisor, for now. */
-    p->hv_limit = 0;
-    p->hv2_limit = 0;
-
-#ifdef __XEN__
-    /* TODO MSR_PLATFORM_INFO */
-
-    if ( p->feat.arch_caps )
-        rdmsrl(MSR_ARCH_CAPABILITIES, p->arch_caps.raw);
-#endif
-
-    x86_cpu_policy_recalc_synth(p);
-}
-
-void x86_cpu_policy_clear_out_of_range_leaves(struct cpu_policy *p)
-{
-    unsigned int i;
-
-    zero_leaves(p->basic.raw, p->basic.max_leaf + 1,
-                ARRAY_SIZE(p->basic.raw) - 1);
-
-    if ( p->basic.max_leaf < 4 )
-        memset(p->cache.raw, 0, sizeof(p->cache.raw));
-    else
-    {
-        for ( i = 0; (i < ARRAY_SIZE(p->cache.raw) &&
-                      p->cache.subleaf[i].type); ++i )
-            ;
-
-        zero_leaves(p->cache.raw, i, ARRAY_SIZE(p->cache.raw) - 1);
-    }
-
-    if ( p->basic.max_leaf < 7 )
-        memset(p->feat.raw, 0, sizeof(p->feat.raw));
-    else
-        zero_leaves(p->feat.raw, p->feat.max_subleaf + 1,
-                    ARRAY_SIZE(p->feat.raw) - 1);
-
-    if ( p->basic.max_leaf < 0xb )
-        memset(p->topo.raw, 0, sizeof(p->topo.raw));
-    else
-    {
-        for ( i = 0; (i < ARRAY_SIZE(p->topo.raw) &&
-                      p->topo.subleaf[i].type); ++i )
-            ;
-
-        zero_leaves(p->topo.raw, i, ARRAY_SIZE(p->topo.raw) - 1);
-    }
-
-    if ( p->basic.max_leaf < 0xd || !cpu_policy_xstates(p) )
-        memset(p->xstate.raw, 0, sizeof(p->xstate.raw));
-    else
-    {
-        /* This logic will probably need adjusting when XCR0[63] gets used. */
-        BUILD_BUG_ON(ARRAY_SIZE(p->xstate.raw) > 63);
-
-        /* First two leaves always valid.  Rest depend on xstates. */
-        i = max(2, 64 - __builtin_clzll(cpu_policy_xstates(p)));
-
-        zero_leaves(p->xstate.raw, i,
-                    ARRAY_SIZE(p->xstate.raw) - 1);
-    }
-
-    zero_leaves(p->extd.raw, (p->extd.max_leaf & 0xffff) + 1,
-                ARRAY_SIZE(p->extd.raw) - 1);
-}
-
-const uint32_t *x86_cpu_policy_lookup_deep_deps(uint32_t feature)
-{
-    static const uint32_t deep_features[] = INIT_DEEP_FEATURES;
-    static const struct {
-        uint32_t feature;
-        uint32_t fs[FEATURESET_NR_ENTRIES];
-    } deep_deps[] = INIT_DEEP_DEPS;
-    unsigned int start = 0, end = ARRAY_SIZE(deep_deps);
-
-    BUILD_BUG_ON(ARRAY_SIZE(deep_deps) != NR_DEEP_DEPS);
-
-    /* Fast early exit. */
-    if ( !test_bit(feature, deep_features) )
-        return NULL;
-
-    /* deep_deps[] is sorted.  Perform a binary search. */
-    while ( start < end )
-    {
-        unsigned int mid = start + ((end - start) / 2);
-
-        if ( deep_deps[mid].feature > feature )
-            end = mid;
-        else if ( deep_deps[mid].feature < feature )
-            start = mid + 1;
-        else
-            return deep_deps[mid].fs;
-    }
-
-    return NULL;
-}
-
-/*
- * Copy a single cpuid_leaf into a provided xen_cpuid_leaf_t buffer,
- * performing boundary checking against the buffer size.
- */
-static int copy_leaf_to_buffer(uint32_t leaf, uint32_t subleaf,
-                               const struct cpuid_leaf *data,
-                               cpuid_leaf_buffer_t leaves,
-                               uint32_t *curr_entry, const uint32_t nr_entries)
-{
-    const xen_cpuid_leaf_t val = {
-        leaf, subleaf, data->a, data->b, data->c, data->d,
-    };
-
-    if ( *curr_entry == nr_entries )
-        return -ENOBUFS;
-
-    if ( copy_to_buffer_offset(leaves, *curr_entry, &val, 1) )
-        return -EFAULT;
-
-    ++*curr_entry;
-
-    return 0;
-}
-
-int x86_cpuid_copy_to_buffer(const struct cpu_policy *p,
-                             cpuid_leaf_buffer_t leaves, uint32_t 
*nr_entries_p)
-{
-    const uint32_t nr_entries = *nr_entries_p;
-    uint32_t curr_entry = 0, leaf, subleaf;
-
-#define COPY_LEAF(l, s, data)                                       \
-    ({                                                              \
-        int ret;                                                    \
-                                                                    \
-        if ( (ret = copy_leaf_to_buffer(                            \
-                  l, s, data, leaves, &curr_entry, nr_entries)) )   \
-            return ret;                                             \
-    })
-
-    /* Basic leaves. */
-    for ( leaf = 0; leaf <= MIN(p->basic.max_leaf,
-                                ARRAY_SIZE(p->basic.raw) - 1); ++leaf )
-    {
-        switch ( leaf )
-        {
-        case 0x4:
-            for ( subleaf = 0; subleaf < ARRAY_SIZE(p->cache.raw); ++subleaf )
-            {
-                COPY_LEAF(leaf, subleaf, &p->cache.raw[subleaf]);
-
-                if ( p->cache.subleaf[subleaf].type == 0 )
-                    break;
-            }
-            break;
-
-        case 0x7:
-            for ( subleaf = 0;
-                  subleaf <= MIN(p->feat.max_subleaf,
-                                 ARRAY_SIZE(p->feat.raw) - 1); ++subleaf )
-                COPY_LEAF(leaf, subleaf, &p->feat.raw[subleaf]);
-            break;
-
-        case 0xb:
-            for ( subleaf = 0; subleaf < ARRAY_SIZE(p->topo.raw); ++subleaf )
-            {
-                COPY_LEAF(leaf, subleaf, &p->topo.raw[subleaf]);
-
-                if ( p->topo.subleaf[subleaf].type == 0 )
-                    break;
-            }
-            break;
-
-        case 0xd:
-        {
-            uint64_t xstates = cpu_policy_xstates(p);
-
-            COPY_LEAF(leaf, 0, &p->xstate.raw[0]);
-            COPY_LEAF(leaf, 1, &p->xstate.raw[1]);
-
-            for ( xstates >>= 2, subleaf = 2;
-                  xstates && subleaf < ARRAY_SIZE(p->xstate.raw);
-                  xstates >>= 1, ++subleaf )
-                COPY_LEAF(leaf, subleaf, &p->xstate.raw[subleaf]);
-            break;
-        }
-
-        default:
-            COPY_LEAF(leaf, XEN_CPUID_NO_SUBLEAF, &p->basic.raw[leaf]);
-            break;
-        }
-    }
-
-    /* TODO: Port Xen and Viridian leaves to the new CPUID infrastructure. */
-    COPY_LEAF(0x40000000, XEN_CPUID_NO_SUBLEAF,
-              &(struct cpuid_leaf){ p->hv_limit });
-    COPY_LEAF(0x40000100, XEN_CPUID_NO_SUBLEAF,
-              &(struct cpuid_leaf){ p->hv2_limit });
-
-    /* Extended leaves. */
-    for ( leaf = 0; leaf <= MIN(p->extd.max_leaf & 0xffffUL,
-                                ARRAY_SIZE(p->extd.raw) - 1); ++leaf )
-        COPY_LEAF(0x80000000U | leaf, XEN_CPUID_NO_SUBLEAF, 
&p->extd.raw[leaf]);
-
-#undef COPY_LEAF
-
-    *nr_entries_p = curr_entry;
-
-    return 0;
-}
-
-int x86_cpuid_copy_from_buffer(struct cpu_policy *p,
-                               const cpuid_leaf_buffer_t leaves,
-                               uint32_t nr_entries, uint32_t *err_leaf,
-                               uint32_t *err_subleaf)
-{
-    unsigned int i;
-    xen_cpuid_leaf_t data;
-
-    if ( err_leaf )
-        *err_leaf = -1;
-    if ( err_subleaf )
-        *err_subleaf = -1;
-
-    /*
-     * A well formed caller is expected to pass an array with leaves in order,
-     * and without any repetitions.  However, due to per-vendor differences,
-     * and in the case of upgrade or levelled scenarios, we typically expect
-     * fewer than MAX leaves to be passed.
-     *
-     * Detecting repeated entries is prohibitively complicated, so we don't
-     * bother.  That said, one way or another if more than MAX leaves are
-     * passed, something is wrong.
-     */
-    if ( nr_entries > CPUID_MAX_SERIALISED_LEAVES )
-        return -E2BIG;
-
-    for ( i = 0; i < nr_entries; ++i )
-    {
-        struct cpuid_leaf l;
-
-        if ( copy_from_buffer_offset(&data, leaves, i, 1) )
-            return -EFAULT;
-
-        l = (struct cpuid_leaf){ data.a, data.b, data.c, data.d };
-
-        switch ( data.leaf )
-        {
-        case 0 ... ARRAY_SIZE(p->basic.raw) - 1:
-            switch ( data.leaf )
-            {
-            case 0x4:
-                if ( data.subleaf >= ARRAY_SIZE(p->cache.raw) )
-                    goto out_of_range;
-
-                array_access_nospec(p->cache.raw, data.subleaf) = l;
-                break;
-
-            case 0x7:
-                if ( data.subleaf >= ARRAY_SIZE(p->feat.raw) )
-                    goto out_of_range;
-
-                array_access_nospec(p->feat.raw, data.subleaf) = l;
-                break;
-
-            case 0xb:
-                if ( data.subleaf >= ARRAY_SIZE(p->topo.raw) )
-                    goto out_of_range;
-
-                array_access_nospec(p->topo.raw, data.subleaf) = l;
-                break;
-
-            case 0xd:
-                if ( data.subleaf >= ARRAY_SIZE(p->xstate.raw) )
-                    goto out_of_range;
-
-                array_access_nospec(p->xstate.raw, data.subleaf) = l;
-                break;
-
-            default:
-                if ( data.subleaf != XEN_CPUID_NO_SUBLEAF )
-                    goto out_of_range;
-
-                array_access_nospec(p->basic.raw, data.leaf) = l;
-                break;
-            }
-            break;
-
-        case 0x40000000:
-            if ( data.subleaf != XEN_CPUID_NO_SUBLEAF )
-                goto out_of_range;
-
-            p->hv_limit = l.a;
-            break;
-
-        case 0x40000100:
-            if ( data.subleaf != XEN_CPUID_NO_SUBLEAF )
-                goto out_of_range;
-
-            p->hv2_limit = l.a;
-            break;
-
-        case 0x80000000U ... 0x80000000U + ARRAY_SIZE(p->extd.raw) - 1:
-            if ( data.subleaf != XEN_CPUID_NO_SUBLEAF )
-                goto out_of_range;
-
-            array_access_nospec(p->extd.raw, data.leaf & 0xffff) = l;
-            break;
-
-        default:
-            goto out_of_range;
-        }
-    }
-
-    x86_cpu_policy_recalc_synth(p);
-
-    return 0;
-
- out_of_range:
-    if ( err_leaf )
-        *err_leaf = data.leaf;
-    if ( err_subleaf )
-        *err_subleaf = data.subleaf;
-
-    return -ERANGE;
-}
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/xen/lib/x86/msr.c b/xen/lib/x86/msr.c
deleted file mode 100644
index e04b9ca013..0000000000
--- a/xen/lib/x86/msr.c
+++ /dev/null
@@ -1,130 +0,0 @@
-#include "private.h"
-
-#include <xen/lib/x86/cpu-policy.h>
-
-/*
- * Copy a single MSR into the provided msr_entry_buffer_t buffer, performing a
- * boundary check against the buffer size.
- */
-static int copy_msr_to_buffer(uint32_t idx, uint64_t val,
-                              msr_entry_buffer_t msrs,
-                              uint32_t *curr_entry, const uint32_t nr_entries)
-{
-    const xen_msr_entry_t ent = { .idx = idx, .val = val };
-
-    if ( *curr_entry == nr_entries )
-        return -ENOBUFS;
-
-    if ( copy_to_buffer_offset(msrs, *curr_entry, &ent, 1) )
-        return -EFAULT;
-
-    ++*curr_entry;
-
-    return 0;
-}
-
-int x86_msr_copy_to_buffer(const struct cpu_policy *p,
-                           msr_entry_buffer_t msrs, uint32_t *nr_entries_p)
-{
-    const uint32_t nr_entries = *nr_entries_p;
-    uint32_t curr_entry = 0;
-
-#define COPY_MSR(idx, val)                                      \
-    ({                                                          \
-        int ret;                                                \
-                                                                \
-        if ( (ret = copy_msr_to_buffer(                         \
-                  idx, val, msrs, &curr_entry, nr_entries)) )   \
-            return ret;                                         \
-    })
-
-    COPY_MSR(MSR_INTEL_PLATFORM_INFO, p->platform_info.raw);
-    COPY_MSR(MSR_ARCH_CAPABILITIES,   p->arch_caps.raw);
-
-#undef COPY_MSR
-
-    *nr_entries_p = curr_entry;
-
-    return 0;
-}
-
-int x86_msr_copy_from_buffer(struct cpu_policy *p,
-                             const msr_entry_buffer_t msrs, uint32_t 
nr_entries,
-                             uint32_t *err_msr)
-{
-    unsigned int i;
-    xen_msr_entry_t data;
-    int rc;
-
-    if ( err_msr )
-        *err_msr = -1;
-
-    /*
-     * A well formed caller is expected to pass an array with entries in
-     * order, and without any repetitions.  However, due to per-vendor
-     * differences, and in the case of upgrade or levelled scenarios, we
-     * typically expect fewer than MAX entries to be passed.
-     *
-     * Detecting repeated entries is prohibitively complicated, so we don't
-     * bother.  That said, one way or another if more than MAX entries are
-     * passed, something is wrong.
-     */
-    if ( nr_entries > MSR_MAX_SERIALISED_ENTRIES )
-        return -E2BIG;
-
-    for ( i = 0; i < nr_entries; i++ )
-    {
-        if ( copy_from_buffer_offset(&data, msrs, i, 1) )
-            return -EFAULT;
-
-        if ( data.flags ) /* .flags MBZ */
-        {
-            rc = -EINVAL;
-            goto err;
-        }
-
-        switch ( data.idx )
-        {
-            /*
-             * Assign data.val to p->field, checking for truncation if the
-             * backing storage for field is smaller than uint64_t
-             */
-#define ASSIGN(field)                             \
-({                                                \
-    if ( (typeof(p->field))data.val != data.val ) \
-    {                                             \
-        rc = -EOVERFLOW;                          \
-        goto err;                                 \
-    }                                             \
-    p->field = data.val;                          \
-})
-
-        case MSR_INTEL_PLATFORM_INFO: ASSIGN(platform_info.raw); break;
-        case MSR_ARCH_CAPABILITIES:   ASSIGN(arch_caps.raw);     break;
-
-#undef ASSIGN
-
-        default:
-            rc = -ERANGE;
-            goto err;
-        }
-    }
-
-    return 0;
-
- err:
-    if ( err_msr )
-        *err_msr = data.idx;
-
-    return rc;
-}
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/xen/lib/x86/policy.c b/xen/lib/x86/policy.c
deleted file mode 100644
index f033d22785..0000000000
--- a/xen/lib/x86/policy.c
+++ /dev/null
@@ -1,54 +0,0 @@
-#include "private.h"
-
-#include <xen/lib/x86/cpu-policy.h>
-
-int x86_cpu_policies_are_compatible(const struct cpu_policy *host,
-                                    const struct cpu_policy *guest,
-                                    struct cpu_policy_errors *err)
-{
-    struct cpu_policy_errors e = INIT_CPU_POLICY_ERRORS;
-    int ret = -EINVAL;
-
-#define NA XEN_CPUID_NO_SUBLEAF
-#define FAIL_CPUID(l, s) \
-    do { e.leaf = (l); e.subleaf = (s); goto out; } while ( 0 )
-#define FAIL_MSR(m) \
-    do { e.msr = (m); goto out; } while ( 0 )
-
-    if ( guest->basic.max_leaf > host->basic.max_leaf )
-        FAIL_CPUID(0, NA);
-
-    if ( guest->feat.max_subleaf > host->feat.max_subleaf )
-        FAIL_CPUID(7, 0);
-
-    if ( guest->extd.max_leaf > host->extd.max_leaf )
-        FAIL_CPUID(0x80000000U, NA);
-
-    /* TODO: Audit more CPUID data. */
-
-    if ( ~host->platform_info.raw & guest->platform_info.raw )
-        FAIL_MSR(MSR_INTEL_PLATFORM_INFO);
-
-#undef FAIL_MSR
-#undef FAIL_CPUID
-#undef NA
-
-    /* Success. */
-    ret = 0;
-
- out:
-    if ( err )
-        *err = e;
-
-    return ret;
-}
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/xen/lib/x86/private.h b/xen/lib/x86/private.h
deleted file mode 100644
index aedd8e4821..0000000000
--- a/xen/lib/x86/private.h
+++ /dev/null
@@ -1,78 +0,0 @@
-#ifndef XEN_LIB_X86_PRIVATE_H
-#define XEN_LIB_X86_PRIVATE_H
-
-#ifdef __XEN__
-
-#include <xen/bitops.h>
-#include <xen/guest_access.h>
-#include <xen/kernel.h>
-#include <xen/lib.h>
-#include <xen/nospec.h>
-#include <xen/types.h>
-
-#include <asm/msr.h>
-
-#define copy_to_buffer_offset copy_to_guest_offset
-#define copy_from_buffer_offset copy_from_guest_offset
-
-#else
-
-#include <errno.h>
-#include <inttypes.h>
-#include <stdbool.h>
-#include <stddef.h>
-#include <string.h>
-
-#include <xen/asm/msr-index.h>
-#include <xen/asm/x86-vendors.h>
-
-#include <xen-tools/common-macros.h>
-
-static inline bool test_bit(unsigned int bit, const void *vaddr)
-{
-    const char *addr = vaddr;
-
-    return addr[bit / 8] & (1u << (bit % 8));
-}
-
-#define array_access_nospec(a, i) (a)[(i)]
-
-/* memcpy(), but with copy_to_guest_offset()'s API. */
-#define copy_to_buffer_offset(dst, index, src, nr)      \
-({                                                      \
-    const typeof(*(src)) *src_ = (src);                 \
-    typeof(*(dst)) *dst_ = (dst);                       \
-    typeof(index) index_ = (index);                     \
-    typeof(nr) nr_ = (nr), i_;                          \
-                                                        \
-    for ( i_ = 0; i_ < nr_; i_++ )                      \
-        dst_[index_ + i_] = src_[i_];                   \
-    0;                                                  \
-})
-
-/* memcpy(), but with copy_from_guest_offset()'s API. */
-#define copy_from_buffer_offset(dst, src, index, nr)    \
-({                                                      \
-    const typeof(*(src)) *src_ = (src);                 \
-    typeof(*(dst)) *dst_ = (dst);                       \
-    typeof(index) index_ = (index);                     \
-    typeof(nr) nr_ = (nr), i_;                          \
-                                                        \
-    for ( i_ = 0; i_ < nr_; i_++ )                      \
-        dst_[i_] = src_[index_ + i_];                   \
-    0;                                                  \
-})
-
-#endif /* __XEN__ */
-
-#endif /* XEN_LIB_X86_PRIVATE_H */
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
--
generated by git-patchbot for /home/xen/git/xen.git#master



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.