|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen stable-4.10] x86: Common cpuid faulting support
commit b538a13a68b42dbe47832d76299011765bf59e60
Author: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
AuthorDate: Thu Jan 11 17:48:00 2018 +0000
Commit: Roger Pau Monne <roger.pau@xxxxxxxxxx>
CommitDate: Thu Jan 11 17:51:18 2018 +0000
x86: Common cpuid faulting support
With CPUID Faulting offered to SVM guests, move Xen's faulting code to being
common rather than Intel specific.
This is necessary for nested Xen (inc. pv-shim mode) to prevent PV guests
from
finding the outer HVM Xen leaves via native cpuid.
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
xen/arch/x86/cpu/amd.c | 16 +++++---
xen/arch/x86/cpu/common.c | 76 ++++++++++++++++++++++++++++++++++++--
xen/arch/x86/cpu/intel.c | 82 +++++++----------------------------------
xen/include/asm-x86/cpuid.h | 3 --
xen/include/asm-x86/processor.h | 4 +-
5 files changed, 98 insertions(+), 83 deletions(-)
diff --git a/xen/arch/x86/cpu/amd.c b/xen/arch/x86/cpu/amd.c
index 5f36ac75a7..2bff3ee377 100644
--- a/xen/arch/x86/cpu/amd.c
+++ b/xen/arch/x86/cpu/amd.c
@@ -198,11 +198,12 @@ static void __init noinline probe_masking_msrs(void)
}
/*
- * Context switch levelling state to the next domain. A parameter of NULL is
- * used to context switch to the default host state (by the cpu bringup-code,
- * crash path, etc).
+ * Context switch CPUID masking state to the next domain. Only called if
+ * CPUID Faulting isn't available, but masking MSRs have been detected. A
+ * parameter of NULL is used to context switch to the default host state (by
+ * the cpu bringup-code, crash path, etc).
*/
-static void amd_ctxt_switch_levelling(const struct vcpu *next)
+static void amd_ctxt_switch_masking(const struct vcpu *next)
{
struct cpuidmasks *these_masks = &this_cpu(cpuidmasks);
const struct domain *nextd = next ? next->domain : NULL;
@@ -263,6 +264,9 @@ static void __init noinline amd_init_levelling(void)
{
const struct cpuidmask *m = NULL;
+ if (probe_cpuid_faulting())
+ return;
+
probe_masking_msrs();
if (*opt_famrev != '\0') {
@@ -352,7 +356,7 @@ static void __init noinline amd_init_levelling(void)
}
if (levelling_caps)
- ctxt_switch_levelling = amd_ctxt_switch_levelling;
+ ctxt_switch_masking = amd_ctxt_switch_masking;
}
/*
@@ -518,7 +522,7 @@ static void early_init_amd(struct cpuinfo_x86 *c)
if (c == &boot_cpu_data)
amd_init_levelling();
- amd_ctxt_switch_levelling(NULL);
+ ctxt_switch_levelling(NULL);
}
static void init_amd(struct cpuinfo_x86 *c)
diff --git a/xen/arch/x86/cpu/common.c b/xen/arch/x86/cpu/common.c
index 6cf362849e..157bae2026 100644
--- a/xen/arch/x86/cpu/common.c
+++ b/xen/arch/x86/cpu/common.c
@@ -113,12 +113,80 @@ static const struct cpu_dev default_cpu = {
};
static const struct cpu_dev *this_cpu = &default_cpu;
-static void default_ctxt_switch_levelling(const struct vcpu *next)
+static DEFINE_PER_CPU(uint64_t, msr_misc_features);
+void (* __read_mostly ctxt_switch_masking)(const struct vcpu *next);
+
+bool __init probe_cpuid_faulting(void)
+{
+ uint64_t val;
+
+ if (rdmsr_safe(MSR_INTEL_PLATFORM_INFO, val) ||
+ !(val & MSR_PLATFORM_INFO_CPUID_FAULTING) ||
+ rdmsr_safe(MSR_INTEL_MISC_FEATURES_ENABLES,
+ this_cpu(msr_misc_features)))
+ {
+ setup_clear_cpu_cap(X86_FEATURE_CPUID_FAULTING);
+ return false;
+ }
+
+ expected_levelling_cap |= LCAP_faulting;
+ levelling_caps |= LCAP_faulting;
+ setup_force_cpu_cap(X86_FEATURE_CPUID_FAULTING);
+
+ return true;
+}
+
+static void set_cpuid_faulting(bool enable)
+{
+ uint64_t *this_misc_features = &this_cpu(msr_misc_features);
+ uint64_t val = *this_misc_features;
+
+ if (!!(val & MSR_MISC_FEATURES_CPUID_FAULTING) == enable)
+ return;
+
+ val ^= MSR_MISC_FEATURES_CPUID_FAULTING;
+
+ wrmsrl(MSR_INTEL_MISC_FEATURES_ENABLES, val);
+ *this_misc_features = val;
+}
+
+void ctxt_switch_levelling(const struct vcpu *next)
{
- /* Nop */
+ const struct domain *nextd = next ? next->domain : NULL;
+
+ if (cpu_has_cpuid_faulting) {
+ /*
+ * No need to alter the faulting setting if we are switching
+ * to idle; it won't affect any code running in idle context.
+ */
+ if (nextd && is_idle_domain(nextd))
+ return;
+ /*
+ * We *should* be enabling faulting for the control domain.
+ *
+ * Unfortunately, the domain builder (having only ever been a
+ * PV guest) expects to be able to see host cpuid state in a
+ * native CPUID instruction, to correctly build a CPUID policy
+ * for HVM guests (notably the xstate leaves).
+ *
+ * This logic is fundimentally broken for HVM toolstack
+ * domains, and faulting causes PV guests to behave like HVM
+ * guests from their point of view.
+ *
+ * Future development plans will move responsibility for
+ * generating the maximum full cpuid policy into Xen, at which
+ * this problem will disappear.
+ */
+ set_cpuid_faulting(nextd && !is_control_domain(nextd) &&
+ (is_pv_domain(nextd) ||
+ next->arch.msr->
+ misc_features_enables.cpuid_faulting));
+ return;
+ }
+
+ if (ctxt_switch_masking)
+ ctxt_switch_masking(next);
}
-void (* __read_mostly ctxt_switch_levelling)(const struct vcpu *next) =
- default_ctxt_switch_levelling;
bool_t opt_cpu_info;
boolean_param("cpuinfo", opt_cpu_info);
diff --git a/xen/arch/x86/cpu/intel.c b/xen/arch/x86/cpu/intel.c
index ac932e5b38..508e56f5c1 100644
--- a/xen/arch/x86/cpu/intel.c
+++ b/xen/arch/x86/cpu/intel.c
@@ -17,41 +17,6 @@
#define select_idle_routine(x) ((void)0)
-static bool __init probe_intel_cpuid_faulting(void)
-{
- uint64_t x;
-
- if (rdmsr_safe(MSR_INTEL_PLATFORM_INFO, x) ||
- !(x & MSR_PLATFORM_INFO_CPUID_FAULTING))
- return 0;
-
- expected_levelling_cap |= LCAP_faulting;
- levelling_caps |= LCAP_faulting;
- setup_force_cpu_cap(X86_FEATURE_CPUID_FAULTING);
- return 1;
-}
-
-DEFINE_PER_CPU(bool, cpuid_faulting_enabled);
-
-static void set_cpuid_faulting(bool enable)
-{
- bool *this_enabled = &this_cpu(cpuid_faulting_enabled);
- uint32_t hi, lo;
-
- ASSERT(cpu_has_cpuid_faulting);
-
- if (*this_enabled == enable)
- return;
-
- rdmsr(MSR_INTEL_MISC_FEATURES_ENABLES, lo, hi);
- lo &= ~MSR_MISC_FEATURES_CPUID_FAULTING;
- if (enable)
- lo |= MSR_MISC_FEATURES_CPUID_FAULTING;
- wrmsr(MSR_INTEL_MISC_FEATURES_ENABLES, lo, hi);
-
- *this_enabled = enable;
-}
-
/*
* Set caps in expected_levelling_cap, probe a specific masking MSR, and set
* caps in levelling_caps if it is found, or clobber the MSR index if missing.
@@ -147,40 +112,17 @@ static void __init probe_masking_msrs(void)
}
/*
- * Context switch levelling state to the next domain. A parameter of NULL is
- * used to context switch to the default host state (by the cpu bringup-code,
- * crash path, etc).
+ * Context switch CPUID masking state to the next domain. Only called if
+ * CPUID Faulting isn't available, but masking MSRs have been detected. A
+ * parameter of NULL is used to context switch to the default host state (by
+ * the cpu bringup-code, crash path, etc).
*/
-static void intel_ctxt_switch_levelling(const struct vcpu *next)
+static void intel_ctxt_switch_masking(const struct vcpu *next)
{
struct cpuidmasks *these_masks = &this_cpu(cpuidmasks);
const struct domain *nextd = next ? next->domain : NULL;
- const struct cpuidmasks *masks;
-
- if (cpu_has_cpuid_faulting) {
- /*
- * We *should* be enabling faulting for the control domain.
- *
- * Unfortunately, the domain builder (having only ever been a
- * PV guest) expects to be able to see host cpuid state in a
- * native CPUID instruction, to correctly build a CPUID policy
- * for HVM guests (notably the xstate leaves).
- *
- * This logic is fundimentally broken for HVM toolstack
- * domains, and faulting causes PV guests to behave like HVM
- * guests from their point of view.
- *
- * Future development plans will move responsibility for
- * generating the maximum full cpuid policy into Xen, at which
- * this problem will disappear.
- */
- set_cpuid_faulting(nextd && !is_control_domain(nextd) &&
- (is_pv_domain(nextd) ||
-
next->arch.msr->misc_features_enables.cpuid_faulting));
- return;
- }
-
- masks = (nextd && is_pv_domain(nextd) &&
nextd->arch.pv_domain.cpuidmasks)
+ const struct cpuidmasks *masks =
+ (nextd && is_pv_domain(nextd) &&
nextd->arch.pv_domain.cpuidmasks)
? nextd->arch.pv_domain.cpuidmasks : &cpuidmask_defaults;
if (msr_basic) {
@@ -225,8 +167,10 @@ static void intel_ctxt_switch_levelling(const struct vcpu
*next)
*/
static void __init noinline intel_init_levelling(void)
{
- if (!probe_intel_cpuid_faulting())
- probe_masking_msrs();
+ if (probe_cpuid_faulting())
+ return;
+
+ probe_masking_msrs();
if (msr_basic) {
uint32_t ecx, edx, tmp;
@@ -280,7 +224,7 @@ static void __init noinline intel_init_levelling(void)
}
if (levelling_caps)
- ctxt_switch_levelling = intel_ctxt_switch_levelling;
+ ctxt_switch_masking = intel_ctxt_switch_masking;
}
static void early_init_intel(struct cpuinfo_x86 *c)
@@ -320,7 +264,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
if (c == &boot_cpu_data)
intel_init_levelling();
- intel_ctxt_switch_levelling(NULL);
+ ctxt_switch_levelling(NULL);
}
/*
diff --git a/xen/include/asm-x86/cpuid.h b/xen/include/asm-x86/cpuid.h
index d2dd841e15..74d6f123e5 100644
--- a/xen/include/asm-x86/cpuid.h
+++ b/xen/include/asm-x86/cpuid.h
@@ -58,9 +58,6 @@ DECLARE_PER_CPU(struct cpuidmasks, cpuidmasks);
/* Default masking MSR values, calculated at boot. */
extern struct cpuidmasks cpuidmask_defaults;
-/* Whether or not cpuid faulting is available for the current domain. */
-DECLARE_PER_CPU(bool, cpuid_faulting_enabled);
-
#define CPUID_GUEST_NR_BASIC (0xdu + 1)
#define CPUID_GUEST_NR_FEAT (0u + 1)
#define CPUID_GUEST_NR_CACHE (5u + 1)
diff --git a/xen/include/asm-x86/processor.h b/xen/include/asm-x86/processor.h
index 41a8d8c32f..c9601b2fb2 100644
--- a/xen/include/asm-x86/processor.h
+++ b/xen/include/asm-x86/processor.h
@@ -151,7 +151,9 @@ extern struct cpuinfo_x86 boot_cpu_data;
extern struct cpuinfo_x86 cpu_data[];
#define current_cpu_data cpu_data[smp_processor_id()]
-extern void (*ctxt_switch_levelling)(const struct vcpu *next);
+extern bool probe_cpuid_faulting(void);
+extern void ctxt_switch_levelling(const struct vcpu *next);
+extern void (*ctxt_switch_masking)(const struct vcpu *next);
extern u64 host_pat;
extern bool_t opt_cpu_info;
--
generated by git-patchbot for /home/xen/git/xen.git#stable-4.10
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |