[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH RFC 26/31] xen/x86: Rework AMD masking MSR setup
>>> On 16.12.15 at 22:24, <andrew.cooper3@xxxxxxxxxx> wrote: > This patch is best reviewed as its end result rather than as a diff, as it > rewrites almost all of the setup. This, I think, doesn't belong in the commit message itself. > @@ -126,126 +133,172 @@ static const struct cpuidmask *__init noinline > get_cpuidmask(const char *opt) > } > > /* > - * Mask the features and extended features returned by CPUID. Parameters are > - * set from the boot line via two methods: > - * > - * 1) Specific processor revision string > - * 2) User-defined masks > - * > - * The processor revision string parameter has precedene. > + * Sets caps in expected_levelling_cap, probes for the specified mask MSR, > and > + * set caps in levelling_caps if it is found. Processors prior to Fam 10h > + * required a 32-bit password for masking MSRs. Reads the default value into > + * msr_val. > */ > -static void __devinit set_cpuidmask(const struct cpuinfo_x86 *c) > +static void __init __probe_mask_msr(unsigned int msr, uint64_t caps, > + uint64_t *msr_val) > { > - static unsigned int feat_ecx, feat_edx; > - static unsigned int extfeat_ecx, extfeat_edx; > - static unsigned int l7s0_eax, l7s0_ebx; > - static unsigned int thermal_ecx; > - static bool_t skip_feat, skip_extfeat; > - static bool_t skip_l7s0_eax_ebx, skip_thermal_ecx; > - static enum { not_parsed, no_mask, set_mask } status; > - unsigned int eax, ebx, ecx, edx; > - > - if (status == no_mask) > - return; > + unsigned int hi, lo; > + > + expected_levelling_cap |= caps; Mixing indentation styles. > + > + if ((rdmsr_amd_safe(msr, &lo, &hi) == 0) && > + (wrmsr_amd_safe(msr, lo, hi) == 0)) > + levelling_caps |= caps; This logic assumes that faults are possible only because the MSR is not there at all, not because of the actual value used. Is this a safe assumption, the more that you don't use the "safe" variants anymore at runtime? > +/* > + * Probe for the existance of the expected masking MSRs. They might easily > + * not be available if Xen is running virtualised. > + */ > +static void __init noinline probe_masking_msrs(void) > +{ > + const struct cpuinfo_x86 *c = &boot_cpu_data; > > - ASSERT((status == not_parsed) && (c == &boot_cpu_data)); > - status = no_mask; > + /* > + * First, work out which masking MSRs we should have, based on > + * revision and cpuid. > + */ > > /* Fam11 doesn't support masking at all. */ > if (c->x86 == 0x11) > return; > > - if (~(opt_cpuid_mask_ecx & opt_cpuid_mask_edx & > - opt_cpuid_mask_ext_ecx & opt_cpuid_mask_ext_edx & > - opt_cpuid_mask_l7s0_eax & opt_cpuid_mask_l7s0_ebx & > - opt_cpuid_mask_thermal_ecx)) { > - feat_ecx = opt_cpuid_mask_ecx; > - feat_edx = opt_cpuid_mask_edx; > - extfeat_ecx = opt_cpuid_mask_ext_ecx; > - extfeat_edx = opt_cpuid_mask_ext_edx; > - l7s0_eax = opt_cpuid_mask_l7s0_eax; > - l7s0_ebx = opt_cpuid_mask_l7s0_ebx; > - thermal_ecx = opt_cpuid_mask_thermal_ecx; > - } else if (*opt_famrev == '\0') { > + __probe_mask_msr(MSR_K8_FEATURE_MASK, LCAP_1cd, > + &cpumask_defaults._1cd); > + __probe_mask_msr(MSR_K8_EXT_FEATURE_MASK, LCAP_e1cd, > + &cpumask_defaults.e1cd); > + > + if (c->cpuid_level >= 7) > + __probe_mask_msr(MSR_AMD_L7S0_FEATURE_MASK, LCAP_7ab0, > + &cpumask_defaults._7ab0); > + > + if (c->x86 == 0x15 && c->cpuid_level >= 6 && cpuid_ecx(6)) > + __probe_mask_msr(MSR_AMD_THRM_FEATURE_MASK, LCAP_6c, > + &cpumask_defaults._6c); > + > + /* > + * Don't bother warning about a mismatch if virtualised. These MSRs > + * are not architectural and almost never virtualised. > + */ > + if ((expected_levelling_cap == levelling_caps) || > + cpu_has_hypervisor) > return; > - } else { > - const struct cpuidmask *m = get_cpuidmask(opt_famrev); > + > + printk(XENLOG_WARNING "Mismatch between expected (%#x" > + ") and real (%#x) levelling caps: missing %#x\n", Breaking a format string inside parentheses is certainly a little odd. Also I don't think the "missing" part is really useful, considering that you already print both values used in its calculation. > + expected_levelling_cap, levelling_caps, > + (expected_levelling_cap ^ levelling_caps) & levelling_caps); > + printk(XENLOG_WARNING "Fam %#x, model %#x level %#x\n", > + c->x86, c->x86_model, c->cpuid_level); > + printk(XENLOG_WARNING > + "If not running virtualised, please report a bug\n"); Well - you checked for running virtualized, so making it here when running virtualized is already a bug (albeit not in the code here)? > +void amd_ctxt_switch_levelling(const struct domain *nextd) > +{ > + struct cpumasks *these_masks = &this_cpu(cpumasks); > + const struct cpumasks *masks = &cpumask_defaults; > + > +#define LAZY(cap, msr, field) > \ > + ({ \ > + if ( ((levelling_caps & cap) == cap) && \ > + (these_masks->field != masks->field) ) \ > + { \ > + wrmsr_amd(msr, masks->field); \ > + these_masks->field = masks->field; \ > + } \ > + }) > + > + LAZY(LCAP_1cd, MSR_K8_FEATURE_MASK, _1cd); > + LAZY(LCAP_e1cd, MSR_K8_EXT_FEATURE_MASK, e1cd); > + LAZY(LCAP_7ab0, MSR_AMD_L7S0_FEATURE_MASK, _7ab0); > + LAZY(LCAP_6c, MSR_AMD_THRM_FEATURE_MASK, _6c); So here we already have the first example where fully consistent naming would allow elimination of a macro parameter. And then, how is this supposed to work? You only restore defaults, but never write non-default values. Namely, nextd is an unused function parameter ... Also I guess my comment about adding unused code needs repeating here. > +/* > + * Mask the features and extended features returned by CPUID. Parameters are > + * set from the boot line via two methods: > + * > + * 1) Specific processor revision string > + * 2) User-defined masks > + * > + * The processor revision string parameter has precedence. > + */ > +static void __init noinline amd_init_levelling(void) > +{ > + const struct cpuidmask *m = NULL; > + > + probe_masking_msrs(); > + > + if (*opt_famrev != '\0') { > + m = get_cpuidmask(opt_famrev); > > if (!m) { > printk("Invalid processor string: %s\n", opt_famrev); > - printk("CPUID will not be masked\n"); > - return; > } This leaves now pointless braces around. > - feat_ecx = m->ecx; > - feat_edx = m->edx; > - extfeat_ecx = m->ext_ecx; > - extfeat_edx = m->ext_edx; > } > > - /* Setting bits in the CPUID mask MSR that are not set in the > - * unmasked CPUID response can cause those bits to be set in the > - * masked response. Avoid that by explicitly masking in software. */ > - feat_ecx &= cpuid_ecx(0x00000001); > - feat_edx &= cpuid_edx(0x00000001); > - extfeat_ecx &= cpuid_ecx(0x80000001); > - extfeat_edx &= cpuid_edx(0x80000001); > + if ((levelling_caps & LCAP_1cd) == LCAP_1cd) { > + uint32_t ecx, edx, tmp; > > - status = set_mask; > - printk("Writing CPUID feature mask ECX:EDX -> %08Xh:%08Xh\n", > - feat_ecx, feat_edx); > - printk("Writing CPUID extended feature mask ECX:EDX -> %08Xh:%08Xh\n", > - extfeat_ecx, extfeat_edx); > + cpuid(0x00000001, &tmp, &tmp, &ecx, &edx); > > - if (c->cpuid_level >= 7) > - cpuid_count(7, 0, &eax, &ebx, &ecx, &edx); > - else > - ebx = eax = 0; > - if ((eax | ebx) && ~(l7s0_eax & l7s0_ebx)) { > - if (l7s0_eax > eax) > - l7s0_eax = eax; > - l7s0_ebx &= ebx; > - printk("Writing CPUID leaf 7 subleaf 0 feature mask EAX:EBX -> > %08Xh:%08Xh\n", > - l7s0_eax, l7s0_ebx); > - } else > - skip_l7s0_eax_ebx = 1; > - > - /* Only Fam15 has the respective MSR. */ > - ecx = c->x86 == 0x15 && c->cpuid_level >= 6 ? cpuid_ecx(6) : 0; > - if (ecx && ~thermal_ecx) { > - thermal_ecx &= ecx; > - printk("Writing CPUID thermal/power feature mask ECX -> > %08Xh\n", > - thermal_ecx); > - } else > - skip_thermal_ecx = 1; > - > - setmask: > - /* AMD processors prior to family 10h required a 32-bit password */ > - if (!skip_feat && > - wrmsr_amd_safe(MSR_K8_FEATURE_MASK, feat_edx, feat_ecx)) { > - skip_feat = 1; > - printk("Failed to set CPUID feature mask\n"); > + if(~(opt_cpuid_mask_ecx & opt_cpuid_mask_edx)) { > + ecx &= opt_cpuid_mask_ecx; > + edx &= opt_cpuid_mask_edx; > + } else if ( m ) { Bad use of Xen coding style. Jan _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |