|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH] x86: avoid #GP for PV guest MSR accesses
Halfway recent Linux kernels probe MISC_FEATURES_ENABLES on all CPUs,
leading to ugly recovered #GP fault messages with debug builds on older
systems. We can do better, so introduce synthetic feature flags for
both this and PLATFORM_INFO to avoid the rdmsr_safe() altogether.
The rdmsr_safe() uses for MISC_ENABLE are left in place as benign - it
exists for all 64-bit capable Intel CPUs (see e.g. early_init_intel()).
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
--- a/xen/arch/x86/cpu/intel.c
+++ b/xen/arch/x86/cpu/intel.c
@@ -21,10 +21,19 @@ static bool __init probe_intel_cpuid_fau
{
uint64_t x;
- if (rdmsr_safe(MSR_INTEL_PLATFORM_INFO, x) ||
- !(x & MSR_PLATFORM_INFO_CPUID_FAULTING))
+ if (rdmsr_safe(MSR_INTEL_PLATFORM_INFO, x))
return 0;
+ setup_force_cpu_cap(X86_FEATURE_MSR_PLATFORM_INFO);
+
+ if (!(x & MSR_PLATFORM_INFO_CPUID_FAULTING)) {
+ if (!rdmsr_safe(MSR_INTEL_MISC_FEATURES_ENABLES, x))
+ setup_force_cpu_cap(X86_FEATURE_MSR_MISC_FEATURES);
+ return 0;
+ }
+
+ setup_force_cpu_cap(X86_FEATURE_MSR_MISC_FEATURES);
+
expected_levelling_cap |= LCAP_faulting;
levelling_caps |= LCAP_faulting;
setup_force_cpu_cap(X86_FEATURE_CPUID_FAULTING);
--- a/xen/arch/x86/pv/emul-priv-op.c
+++ b/xen/arch/x86/pv/emul-priv-op.c
@@ -941,8 +941,7 @@ static int read_msr(unsigned int reg, ui
return X86EMUL_OKAY;
case MSR_INTEL_PLATFORM_INFO:
- if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
- rdmsr_safe(MSR_INTEL_PLATFORM_INFO, *val) )
+ if ( !boot_cpu_has(X86_FEATURE_MSR_PLATFORM_INFO) )
break;
*val = 0;
if ( this_cpu(cpuid_faulting_enabled) )
@@ -950,8 +949,7 @@ static int read_msr(unsigned int reg, ui
return X86EMUL_OKAY;
case MSR_INTEL_MISC_FEATURES_ENABLES:
- if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
- rdmsr_safe(MSR_INTEL_MISC_FEATURES_ENABLES, *val) )
+ if ( !boot_cpu_has(X86_FEATURE_MSR_MISC_FEATURES) )
break;
*val = 0;
if ( curr->arch.cpuid_faulting )
@@ -1147,15 +1145,13 @@ static int write_msr(unsigned int reg, u
return X86EMUL_OKAY;
case MSR_INTEL_PLATFORM_INFO:
- if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
- val || rdmsr_safe(MSR_INTEL_PLATFORM_INFO, val) )
+ if ( !boot_cpu_has(X86_FEATURE_MSR_PLATFORM_INFO) || val )
break;
return X86EMUL_OKAY;
case MSR_INTEL_MISC_FEATURES_ENABLES:
- if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
- (val & ~MSR_MISC_FEATURES_CPUID_FAULTING) ||
- rdmsr_safe(MSR_INTEL_MISC_FEATURES_ENABLES, temp) )
+ if ( !boot_cpu_has(X86_FEATURE_MSR_MISC_FEATURES) ||
+ (val & ~MSR_MISC_FEATURES_CPUID_FAULTING) )
break;
if ( (val & MSR_MISC_FEATURES_CPUID_FAULTING) &&
!this_cpu(cpuid_faulting_enabled) )
--- a/xen/include/asm-x86/cpufeatures.h
+++ b/xen/include/asm-x86/cpufeatures.h
@@ -22,3 +22,5 @@ XEN_CPUFEATURE(APERFMPERF, (FSCAPIN
XEN_CPUFEATURE(MFENCE_RDTSC, (FSCAPINTS+0)*32+ 9) /* MFENCE synchronizes
RDTSC */
XEN_CPUFEATURE(XEN_SMEP, (FSCAPINTS+0)*32+10) /* SMEP gets used by Xen
itself */
XEN_CPUFEATURE(XEN_SMAP, (FSCAPINTS+0)*32+11) /* SMAP gets used by Xen
itself */
+XEN_CPUFEATURE(MSR_PLATFORM_INFO, (FSCAPINTS+0)*32+12) /* PLATFORM_INFO MSR
present */
+XEN_CPUFEATURE(MSR_MISC_FEATURES, (FSCAPINTS+0)*32+13) /*
MISC_FEATURES_ENABLES MSR present */
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |