[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH RFC 26/31] xen/x86: Rework AMD masking MSR setup



This patch is best reviewed as its end result rather than as a diff, as it
rewrites almost all of the setup.

On the BSP, cpuid information is used to evaluate the potential available set
of masking MSRs, and they are unconditionally probed, filling in the
availability information and hardware defaults.

The command line parameters are then combined with the hardware defaults to
further restrict the Xen default masking level.  Each cpu is then context
switched into the default levelling state.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
---
 xen/arch/x86/cpu/amd.c | 250 ++++++++++++++++++++++++++++++-------------------
 1 file changed, 153 insertions(+), 97 deletions(-)

diff --git a/xen/arch/x86/cpu/amd.c b/xen/arch/x86/cpu/amd.c
index 265fbc0..6b95ab6 100644
--- a/xen/arch/x86/cpu/amd.c
+++ b/xen/arch/x86/cpu/amd.c
@@ -80,6 +80,13 @@ static inline int wrmsr_amd_safe(unsigned int msr, unsigned 
int lo,
        return err;
 }
 
+static void wrmsr_amd(unsigned int msr, uint64_t val)
+{
+       asm volatile("wrmsr" ::
+                    "c" (msr), "a" ((uint32_t)val),
+                    "d" (val >> 32), "D" (0x9c5a203a));
+}
+
 static const struct cpuidmask {
        uint16_t fam;
        char rev[2];
@@ -126,126 +133,172 @@ static const struct cpuidmask *__init noinline 
get_cpuidmask(const char *opt)
 }
 
 /*
- * Mask the features and extended features returned by CPUID.  Parameters are
- * set from the boot line via two methods:
- *
- *   1) Specific processor revision string
- *   2) User-defined masks
- *
- * The processor revision string parameter has precedene.
+ * Sets caps in expected_levelling_cap, probes for the specified mask MSR, and
+ * set caps in levelling_caps if it is found.  Processors prior to Fam 10h
+ * required a 32-bit password for masking MSRs.  Reads the default value into
+ * msr_val.
  */
-static void __devinit set_cpuidmask(const struct cpuinfo_x86 *c)
+static void __init __probe_mask_msr(unsigned int msr, uint64_t caps,
+                                    uint64_t *msr_val)
 {
-       static unsigned int feat_ecx, feat_edx;
-       static unsigned int extfeat_ecx, extfeat_edx;
-       static unsigned int l7s0_eax, l7s0_ebx;
-       static unsigned int thermal_ecx;
-       static bool_t skip_feat, skip_extfeat;
-       static bool_t skip_l7s0_eax_ebx, skip_thermal_ecx;
-       static enum { not_parsed, no_mask, set_mask } status;
-       unsigned int eax, ebx, ecx, edx;
-
-       if (status == no_mask)
-               return;
+       unsigned int hi, lo;
+
+        expected_levelling_cap |= caps;
+
+       if ((rdmsr_amd_safe(msr, &lo, &hi) == 0) &&
+           (wrmsr_amd_safe(msr, lo, hi) == 0))
+               levelling_caps |= caps;
+
+       *msr_val = ((uint64_t)hi << 32) | lo;
+}
 
-       if (status == set_mask)
-               goto setmask;
+/*
+ * Probe for the existance of the expected masking MSRs.  They might easily
+ * not be available if Xen is running virtualised.
+ */
+static void __init noinline probe_masking_msrs(void)
+{
+       const struct cpuinfo_x86 *c = &boot_cpu_data;
 
-       ASSERT((status == not_parsed) && (c == &boot_cpu_data));
-       status = no_mask;
+       /*
+        * First, work out which masking MSRs we should have, based on
+        * revision and cpuid.
+        */
 
        /* Fam11 doesn't support masking at all. */
        if (c->x86 == 0x11)
                return;
 
-       if (~(opt_cpuid_mask_ecx & opt_cpuid_mask_edx &
-             opt_cpuid_mask_ext_ecx & opt_cpuid_mask_ext_edx &
-             opt_cpuid_mask_l7s0_eax & opt_cpuid_mask_l7s0_ebx &
-             opt_cpuid_mask_thermal_ecx)) {
-               feat_ecx = opt_cpuid_mask_ecx;
-               feat_edx = opt_cpuid_mask_edx;
-               extfeat_ecx = opt_cpuid_mask_ext_ecx;
-               extfeat_edx = opt_cpuid_mask_ext_edx;
-               l7s0_eax = opt_cpuid_mask_l7s0_eax;
-               l7s0_ebx = opt_cpuid_mask_l7s0_ebx;
-               thermal_ecx = opt_cpuid_mask_thermal_ecx;
-       } else if (*opt_famrev == '\0') {
+       __probe_mask_msr(MSR_K8_FEATURE_MASK, LCAP_1cd,
+                        &cpumask_defaults._1cd);
+       __probe_mask_msr(MSR_K8_EXT_FEATURE_MASK, LCAP_e1cd,
+                        &cpumask_defaults.e1cd);
+
+       if (c->cpuid_level >= 7)
+               __probe_mask_msr(MSR_AMD_L7S0_FEATURE_MASK, LCAP_7ab0,
+                                &cpumask_defaults._7ab0);
+
+       if (c->x86 == 0x15 && c->cpuid_level >= 6 && cpuid_ecx(6))
+               __probe_mask_msr(MSR_AMD_THRM_FEATURE_MASK, LCAP_6c,
+                                &cpumask_defaults._6c);
+
+       /*
+        * Don't bother warning about a mismatch if virtualised.  These MSRs
+        * are not architectural and almost never virtualised.
+        */
+       if ((expected_levelling_cap == levelling_caps) ||
+           cpu_has_hypervisor)
                return;
-       } else {
-               const struct cpuidmask *m = get_cpuidmask(opt_famrev);
+
+       printk(XENLOG_WARNING "Mismatch between expected (%#x"
+              ") and real (%#x) levelling caps: missing %#x\n",
+              expected_levelling_cap, levelling_caps,
+              (expected_levelling_cap ^ levelling_caps) & levelling_caps);
+       printk(XENLOG_WARNING "Fam %#x, model %#x level %#x\n",
+              c->x86, c->x86_model, c->cpuid_level);
+       printk(XENLOG_WARNING
+              "If not running virtualised, please report a bug\n");
+}
+
+void amd_ctxt_switch_levelling(const struct domain *nextd)
+{
+       struct cpumasks *these_masks = &this_cpu(cpumasks);
+       const struct cpumasks *masks = &cpumask_defaults;
+
+#define LAZY(cap, msr, field)                                          \
+       ({                                                              \
+               if ( ((levelling_caps & cap) == cap) &&                 \
+                    (these_masks->field != masks->field) )             \
+               {                                                       \
+                       wrmsr_amd(msr, masks->field);                   \
+                       these_masks->field = masks->field;              \
+               }                                                       \
+       })
+
+       LAZY(LCAP_1cd,  MSR_K8_FEATURE_MASK,       _1cd);
+       LAZY(LCAP_e1cd, MSR_K8_EXT_FEATURE_MASK,   e1cd);
+       LAZY(LCAP_7ab0, MSR_AMD_L7S0_FEATURE_MASK, _7ab0);
+       LAZY(LCAP_6c,   MSR_AMD_THRM_FEATURE_MASK, _6c);
+
+#undef LAZY
+}
+
+/*
+ * Mask the features and extended features returned by CPUID.  Parameters are
+ * set from the boot line via two methods:
+ *
+ *   1) Specific processor revision string
+ *   2) User-defined masks
+ *
+ * The processor revision string parameter has precedence.
+ */
+static void __init noinline amd_init_levelling(void)
+{
+       const struct cpuidmask *m = NULL;
+
+       probe_masking_msrs();
+
+       if (*opt_famrev != '\0') {
+               m = get_cpuidmask(opt_famrev);
 
                if (!m) {
                        printk("Invalid processor string: %s\n", opt_famrev);
-                       printk("CPUID will not be masked\n");
-                       return;
                }
-               feat_ecx = m->ecx;
-               feat_edx = m->edx;
-               extfeat_ecx = m->ext_ecx;
-               extfeat_edx = m->ext_edx;
        }
 
-        /* Setting bits in the CPUID mask MSR that are not set in the
-         * unmasked CPUID response can cause those bits to be set in the
-         * masked response.  Avoid that by explicitly masking in software. */
-        feat_ecx &= cpuid_ecx(0x00000001);
-        feat_edx &= cpuid_edx(0x00000001);
-        extfeat_ecx &= cpuid_ecx(0x80000001);
-        extfeat_edx &= cpuid_edx(0x80000001);
+       if ((levelling_caps & LCAP_1cd) == LCAP_1cd) {
+               uint32_t ecx, edx, tmp;
 
-       status = set_mask;
-       printk("Writing CPUID feature mask ECX:EDX -> %08Xh:%08Xh\n", 
-              feat_ecx, feat_edx);
-       printk("Writing CPUID extended feature mask ECX:EDX -> %08Xh:%08Xh\n", 
-              extfeat_ecx, extfeat_edx);
+               cpuid(0x00000001, &tmp, &tmp, &ecx, &edx);
 
-       if (c->cpuid_level >= 7)
-               cpuid_count(7, 0, &eax, &ebx, &ecx, &edx);
-       else
-               ebx = eax = 0;
-       if ((eax | ebx) && ~(l7s0_eax & l7s0_ebx)) {
-               if (l7s0_eax > eax)
-                       l7s0_eax = eax;
-               l7s0_ebx &= ebx;
-               printk("Writing CPUID leaf 7 subleaf 0 feature mask EAX:EBX -> 
%08Xh:%08Xh\n",
-                      l7s0_eax, l7s0_ebx);
-       } else
-               skip_l7s0_eax_ebx = 1;
-
-       /* Only Fam15 has the respective MSR. */
-       ecx = c->x86 == 0x15 && c->cpuid_level >= 6 ? cpuid_ecx(6) : 0;
-       if (ecx && ~thermal_ecx) {
-               thermal_ecx &= ecx;
-               printk("Writing CPUID thermal/power feature mask ECX -> 
%08Xh\n",
-                      thermal_ecx);
-       } else
-               skip_thermal_ecx = 1;
-
- setmask:
-       /* AMD processors prior to family 10h required a 32-bit password */
-       if (!skip_feat &&
-           wrmsr_amd_safe(MSR_K8_FEATURE_MASK, feat_edx, feat_ecx)) {
-               skip_feat = 1;
-               printk("Failed to set CPUID feature mask\n");
+               if(~(opt_cpuid_mask_ecx & opt_cpuid_mask_edx)) {
+                       ecx &= opt_cpuid_mask_ecx;
+                       edx &= opt_cpuid_mask_edx;
+               } else if ( m ) {
+                       ecx &= m->ecx;
+                       edx &= m->edx;
+               }
+
+               cpumask_defaults._1cd = ((uint64_t)ecx << 32) | edx;
        }
 
-       if (!skip_extfeat &&
-           wrmsr_amd_safe(MSR_K8_EXT_FEATURE_MASK, extfeat_edx, extfeat_ecx)) {
-               skip_extfeat = 1;
-               printk("Failed to set CPUID extended feature mask\n");
+       if ((levelling_caps & LCAP_e1cd) == LCAP_e1cd) {
+               uint32_t ecx, edx, tmp;
+
+               cpuid(0x80000001, &tmp, &tmp, &ecx, &edx);
+
+               if(~(opt_cpuid_mask_ext_ecx & opt_cpuid_mask_ext_edx)) {
+                       ecx &= opt_cpuid_mask_ext_ecx;
+                       edx &= opt_cpuid_mask_ext_edx;
+               } else if ( m ) {
+                       ecx &= m->ext_ecx;
+                       edx &= m->ext_edx;
+               }
+
+               cpumask_defaults.e1cd = ((uint64_t)ecx << 32) | edx;
        }
 
-       if (!skip_l7s0_eax_ebx &&
-           wrmsr_amd_safe(MSR_AMD_L7S0_FEATURE_MASK, l7s0_ebx, l7s0_eax)) {
-               skip_l7s0_eax_ebx = 1;
-               printk("Failed to set CPUID leaf 7 subleaf 0 feature mask\n");
+       if ((levelling_caps & LCAP_7ab0) == LCAP_7ab0) {
+               uint32_t eax, ebx, tmp;
+
+               cpuid(0x00000007, &eax, &ebx, &tmp, &tmp);
+
+               if(~(opt_cpuid_mask_l7s0_eax & opt_cpuid_mask_l7s0_ebx)) {
+                       eax &= opt_cpuid_mask_l7s0_eax;
+                       ebx &= opt_cpuid_mask_l7s0_ebx;
+               }
+
+               cpumask_defaults._7ab0 = ((uint64_t)eax << 32) | ebx;
        }
 
-       if (!skip_thermal_ecx &&
-           (rdmsr_amd_safe(MSR_AMD_THRM_FEATURE_MASK, &eax, &edx) ||
-            wrmsr_amd_safe(MSR_AMD_THRM_FEATURE_MASK, thermal_ecx, edx))){
-               skip_thermal_ecx = 1;
-               printk("Failed to set CPUID thermal/power feature mask\n");
+       if ((levelling_caps & LCAP_6c) == LCAP_6c) {
+               uint32_t ecx = cpuid_ecx(6);
+
+               if (~opt_cpuid_mask_thermal_ecx)
+                       ecx &= opt_cpuid_mask_thermal_ecx;
+
+               cpumask_defaults._6c &= (~0ULL << 32);
+               cpumask_defaults._6c |= ecx;
        }
 }
 
@@ -426,7 +479,10 @@ static void __devinit amd_get_topology(struct cpuinfo_x86 
*c)
 
 static void __devinit early_init_amd(struct cpuinfo_x86 *c)
 {
-       set_cpuidmask(c);
+       if (c == &boot_cpu_data)
+               amd_init_levelling();
+
+       amd_ctxt_switch_levelling(NULL);
 }
 
 static void __devinit init_amd(struct cpuinfo_x86 *c)
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.