[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v3] x86/intel: Protect set_cpuidmask() against #GP faults



Virtual environments such as Xen HVM containers and VirtualBox do not
necessarily provide support for feature masking MSRs.

As their presence is detected by model numbers alone, and their use predicated
on command line parameters, use the safe() variants of {wr,rd}msr() to avoid
dying with an early #GP fault.

While playing in this function, make some further improvements.

* Rename the masking MSR names for consistency, and name the CPU models for
  the benefit of humans reading the code.
* Correct the CPU selection as specified in the flexmigration document.  All
  steppings of 0x17 and 0x1a are stated to have masking MSRs.
* Provide log messages indicating the masks applied, or lack of masking
  capabilities.
* In the case of faulting support being available and masking command line
  options specified, provide a log message indicating the lack of masking.

References:
http://www.intel.com/content/www/us/en/virtualization/virtualization-technology-flexmigration-application-note.html
http://software.intel.com/en-us/articles/intel-architecture-and-processor-identification-with-cpuid-model-and-family-numbers

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Yang Zhang <yang.z.zhang@xxxxxxxxx>
CC: Kevin Tian <kevin.tian@xxxxxxxxx>

---
v3: Don't call set_cpumask() unconditionally
v2: Correct msr_val manipulation for msr_xsave

I have done some further digging about model 0x1f, to little avail.  It would
seem that there are two cancelled processors, Auburndale and Havendale, which
might fit the bill, and explain 0x1f's absense from the public processor
identification information.
---
 xen/arch/x86/cpu/intel.c        |  152 +++++++++++++++++++++++++--------------
 xen/include/asm-x86/msr-index.h |   13 ++--
 2 files changed, 106 insertions(+), 59 deletions(-)

diff --git a/xen/arch/x86/cpu/intel.c b/xen/arch/x86/cpu/intel.c
index e178b5c..9868cd5 100644
--- a/xen/arch/x86/cpu/intel.c
+++ b/xen/arch/x86/cpu/intel.c
@@ -51,68 +51,109 @@ void set_cpuid_faulting(bool_t enable)
  */
 static void __devinit set_cpuidmask(const struct cpuinfo_x86 *c)
 {
-       u32 eax, edx;
-       const char *extra = "";
+       static unsigned int msr_basic, msr_ext, msr_xsave;
+       static enum { not_parsed, no_mask, set_mask } status;
+       u64 msr_val;
+
+       if (status == no_mask)
+               return;
+
+       if (status == set_mask)
+               goto setmask;
+
+       ASSERT((status == not_parsed) && (c == &boot_cpu_data));
+       status = no_mask;
 
        if (!~(opt_cpuid_mask_ecx & opt_cpuid_mask_edx &
               opt_cpuid_mask_ext_ecx & opt_cpuid_mask_ext_edx &
-               opt_cpuid_mask_xsave_eax))
+              opt_cpuid_mask_xsave_eax))
                return;
 
-       /* Only family 6 supports this feature  */
-       switch ((c->x86 == 6) * c->x86_model) {
-       case 0x17:
-               if ((c->x86_mask & 0x0f) < 4)
-                       break;
-               /* fall through */
-       case 0x1d:
-               wrmsr(MSR_INTEL_CPUID_FEATURE_MASK,
-                     opt_cpuid_mask_ecx,
-                     opt_cpuid_mask_edx);
-               if (~(opt_cpuid_mask_ext_ecx & opt_cpuid_mask_ext_edx))
-                       extra = "extended ";
-               else if (~opt_cpuid_mask_xsave_eax)
-                       extra = "xsave ";
-               else
-                       return;
+       /* Only family 6 supports this feature. */
+       if (c->x86 != 6) {
+               printk("No CPUID feature masking support available\n");
+               return;
+       }
+
+       switch (c->x86_model) {
+       case 0x17: /* Yorkfield, Wolfdale, Penryn, Harpertown(DP) */
+       case 0x1d: /* Dunnington(MP) */
+               msr_basic = MSR_INTEL_MASK_V1_CPUID1;
                break;
-/* 
- * CPU supports this feature if the processor signature meets the following:
- * (CPUID.(EAX=01h):EAX) > 000106A2h, or
- * (CPUID.(EAX=01h):EAX) == 000106Exh, 0002065xh, 000206Cxh, 000206Exh, or 
000206Fxh
- *
- */
-       case 0x1a:
-               if ((c->x86_mask & 0x0f) <= 2)
-                       break;
-               /* fall through */
-       case 0x1e: case 0x1f:
-       case 0x25: case 0x2c: case 0x2e: case 0x2f:
-               wrmsr(MSR_INTEL_CPUID1_FEATURE_MASK,
-                     opt_cpuid_mask_ecx,
-                     opt_cpuid_mask_edx);
-               wrmsr(MSR_INTEL_CPUID80000001_FEATURE_MASK,
-                     opt_cpuid_mask_ext_ecx,
-                     opt_cpuid_mask_ext_edx);
-               if (!~opt_cpuid_mask_xsave_eax)
-                       return;
-               extra = "xsave ";
+
+       case 0x1a: /* Bloomfield, Nehalem-EP(Gainestown) */
+       case 0x1e: /* Clarksfield, Lynnfield, Jasper Forest */
+       case 0x1f: /* Something Nehalem-based - perhaps Auburndale/Havendale? */
+       case 0x25: /* Arrandale, Clarksdale */
+       case 0x2c: /* Gulftown, Westmere-EP */
+       case 0x2e: /* Nehalem-EX(Beckton) */
+       case 0x2f: /* Westmere-EX */
+               msr_basic = MSR_INTEL_MASK_V2_CPUID1;
+               msr_ext   = MSR_INTEL_MASK_V2_CPUID80000001;
+               break;
+
+       case 0x2a: /* SandyBridge */
+       case 0x2d: /* SandyBridge-E, SandyBridge-EN, SandyBridge-EP */
+               msr_basic = MSR_INTEL_MASK_V3_CPUID1;
+               msr_ext   = MSR_INTEL_MASK_V3_CPUID80000001;
+               msr_xsave = MSR_INTEL_MASK_V3_CPUIDD_01;
                break;
-       case 0x2a: case 0x2d:
-               wrmsr(MSR_INTEL_CPUID1_FEATURE_MASK_V2,
-                     opt_cpuid_mask_ecx,
-                     opt_cpuid_mask_edx);
-               rdmsr(MSR_INTEL_CPUIDD_01_FEATURE_MASK, eax, edx);
-               wrmsr(MSR_INTEL_CPUIDD_01_FEATURE_MASK,
-                     opt_cpuid_mask_xsave_eax, edx);
-               wrmsr(MSR_INTEL_CPUID80000001_FEATURE_MASK_V2,
-                     opt_cpuid_mask_ext_ecx,
-                     opt_cpuid_mask_ext_edx);
-               return;
        }
 
-       printk(XENLOG_ERR "Cannot set CPU %sfeature mask on CPU#%d\n",
-              extra, smp_processor_id());
+       status = set_mask;
+
+       if (~(opt_cpuid_mask_ecx & opt_cpuid_mask_edx)) {
+               if (msr_basic)
+                       printk("Writing CPUID feature mask ecx:edx -> 
%08x:%08x\n",
+                              opt_cpuid_mask_ecx, opt_cpuid_mask_edx);
+               else
+                       printk("No CPUID feature mask available\n");
+       }
+       else
+               msr_basic = 0;
+
+       if (~(opt_cpuid_mask_ext_ecx & opt_cpuid_mask_ext_edx)) {
+               if (msr_ext)
+                       printk("Writing CPUID extended feature mask ecx:edx -> 
%08x:%08x\n",
+                              opt_cpuid_mask_ext_ecx, opt_cpuid_mask_ext_edx);
+               else
+                       printk("No CPUID extended feature mask available\n");
+       }
+       else
+               msr_ext = 0;
+
+       if (~opt_cpuid_mask_xsave_eax) {
+               if (msr_xsave)
+                       printk("Writing CPUID xsave feature mask eax -> %08x\n",
+                              opt_cpuid_mask_xsave_eax);
+               else
+                       printk("No CPUID xsave feature mask available\n");
+       }
+       else
+               msr_xsave = 0;
+
+ setmask:
+       if (msr_basic &&
+           wrmsr_safe(msr_basic,
+                      ((u64)opt_cpuid_mask_edx << 32) | opt_cpuid_mask_ecx)){
+               msr_basic = 0;
+               printk("Failed to set CPUID feature mask\n");
+       }
+
+       if (msr_ext &&
+           wrmsr_safe(msr_ext,
+                      ((u64)opt_cpuid_mask_ext_edx << 32) | 
opt_cpuid_mask_ext_ecx)){
+               msr_ext = 0;
+               printk("Failed to set CPUID extended feature mask\n");
+       }
+
+       if (msr_xsave &&
+           (rdmsr_safe(msr_xsave, msr_val) ||
+            wrmsr_safe(msr_xsave,
+                       (msr_val & (~0ULL << 32)) | opt_cpuid_mask_xsave_eax))){
+               msr_xsave = 0;
+               printk("Failed to set CPUID xsave feature mask\n");
+       }
 }
 
 void __devinit early_intel_workaround(struct cpuinfo_x86 *c)
@@ -222,6 +263,11 @@ static void __devinit init_intel(struct cpuinfo_x86 *c)
 
        if (!cpu_has_cpuid_faulting)
                set_cpuidmask(c);
+       else if ((c == &boot_cpu_data) &&
+                (~(opt_cpuid_mask_ecx & opt_cpuid_mask_edx &
+                   opt_cpuid_mask_ext_ecx & opt_cpuid_mask_ext_edx &
+                   opt_cpuid_mask_xsave_eax)))
+               printk("No CPUID feature masking support available\n");
 
        /* Work around errata */
        Intel_errata_workarounds(c);
diff --git a/xen/include/asm-x86/msr-index.h b/xen/include/asm-x86/msr-index.h
index 70a8201..7247497 100644
--- a/xen/include/asm-x86/msr-index.h
+++ b/xen/include/asm-x86/msr-index.h
@@ -469,13 +469,14 @@
 #define MSR_CORE_PERF_GLOBAL_OVF_CTRL  0x00000390
 
 /* Intel cpuid spoofing MSRs */
-#define MSR_INTEL_CPUID_FEATURE_MASK   0x00000478
-#define MSR_INTEL_CPUID1_FEATURE_MASK  0x00000130
-#define MSR_INTEL_CPUID80000001_FEATURE_MASK 0x00000131
+#define MSR_INTEL_MASK_V1_CPUID1        0x00000478
 
-#define MSR_INTEL_CPUID1_FEATURE_MASK_V2        0x00000132
-#define MSR_INTEL_CPUID80000001_FEATURE_MASK_V2 0x00000133
-#define MSR_INTEL_CPUIDD_01_FEATURE_MASK        0x00000134
+#define MSR_INTEL_MASK_V2_CPUID1        0x00000130
+#define MSR_INTEL_MASK_V2_CPUID80000001 0x00000131
+
+#define MSR_INTEL_MASK_V3_CPUID1        0x00000132
+#define MSR_INTEL_MASK_V3_CPUID80000001 0x00000133
+#define MSR_INTEL_MASK_V3_CPUIDD_01     0x00000134
 
 /* Intel cpuid faulting MSRs */
 #define MSR_INTEL_PLATFORM_INFO                0x000000ce
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.