|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH 1/6] x86: Remove x86 prefixed names from mcheck code
struct cpuinfo_x86 .x86 => .family .x86_vendor => .vendor .x86_model => .model .x86_mask => .stepping No functional change. This work is part of making Xen safe for Intel family 18/19. Signed-off-by: Kevin Lampis <kevin.lampis@xxxxxxxxxx> --- Changes since v1: - mcheck_init() check for family != 5 instead of 6 and 15 - mce_firstbank() match vfm range - mce_is_broadcast() add vendor check as well as >= INTEL_CORE_YONAH This supersedes the individual posting https://lore.kernel.org/xen-devel/20260302191900.672460-1-kevin.lampis@xxxxxxxxxx/ --- xen/arch/x86/cpu/mcheck/amd_nonfatal.c | 2 +- xen/arch/x86/cpu/mcheck/mcaction.c | 2 +- xen/arch/x86/cpu/mcheck/mce.c | 35 +++++++++++--------------- xen/arch/x86/cpu/mcheck/mce.h | 2 +- xen/arch/x86/cpu/mcheck/mce_amd.c | 16 ++++++------ xen/arch/x86/cpu/mcheck/mce_intel.c | 5 +--- xen/arch/x86/cpu/mcheck/non-fatal.c | 2 +- xen/arch/x86/cpu/mcheck/vmce.c | 8 +++--- 8 files changed, 32 insertions(+), 40 deletions(-) diff --git a/xen/arch/x86/cpu/mcheck/amd_nonfatal.c b/xen/arch/x86/cpu/mcheck/amd_nonfatal.c index 7d48c9ab5f..fb52639e13 100644 --- a/xen/arch/x86/cpu/mcheck/amd_nonfatal.c +++ b/xen/arch/x86/cpu/mcheck/amd_nonfatal.c @@ -191,7 +191,7 @@ static void cf_check mce_amd_work_fn(void *data) void __init amd_nonfatal_mcheck_init(struct cpuinfo_x86 *c) { - if (!(c->x86_vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON))) + if (!(c->vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON))) return; /* Assume we are on K8 or newer AMD or Hygon CPU here */ diff --git a/xen/arch/x86/cpu/mcheck/mcaction.c b/xen/arch/x86/cpu/mcheck/mcaction.c index bf7a0de965..236424569a 100644 --- a/xen/arch/x86/cpu/mcheck/mcaction.c +++ b/xen/arch/x86/cpu/mcheck/mcaction.c @@ -101,7 +101,7 @@ mc_memerr_dhandler(struct mca_binfo *binfo, * not always precise. In that case, fallback to broadcast. */ global->mc_domid != bank->mc_domid || - (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && + (boot_cpu_data.vendor == X86_VENDOR_INTEL && (!(global->mc_gstatus & MCG_STATUS_LMCE) || !(d->vcpu[mc_vcpuid]->arch.vmce.mcg_ext_ctl & MCG_EXT_CTL_LMCE_EN))) ) diff --git a/xen/arch/x86/cpu/mcheck/mce.c b/xen/arch/x86/cpu/mcheck/mce.c index 9a91807cfb..901a71157a 100644 --- a/xen/arch/x86/cpu/mcheck/mce.c +++ b/xen/arch/x86/cpu/mcheck/mce.c @@ -23,6 +23,7 @@ #include <asm/apic.h> #include <asm/msr.h> #include <asm/p2m.h> +#include <asm/intel-family.h> #include "mce.h" #include "barrier.h" @@ -334,7 +335,7 @@ mcheck_mca_logout(enum mca_source who, struct mca_banks *bankmask, mca_init_global(mc_flags, mig); /* A hook here to get global extended msrs */ if ( IS_ENABLED(CONFIG_INTEL) && - boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ) + boot_cpu_data.vendor == X86_VENDOR_INTEL ) intel_get_extended_msrs(mig, mci); } } @@ -564,8 +565,7 @@ bool mce_available(const struct cpuinfo_x86 *c) */ unsigned int mce_firstbank(struct cpuinfo_x86 *c) { - return c->x86 == 6 && - c->x86_vendor == X86_VENDOR_INTEL && c->x86_model < 0x1a; + return c->vfm >= INTEL_PENTIUM_PRO && c->vfm < INTEL_NEHALEM_EP; } static int show_mca_info(int inited, struct cpuinfo_x86 *c) @@ -596,7 +596,7 @@ static int show_mca_info(int inited, struct cpuinfo_x86 *c) case mcheck_amd_famXX: case mcheck_hygon: printk("%s%s Fam%xh machine check reporting enabled\n", - prefix, type_str[inited], c->x86); + prefix, type_str[inited], c->family); break; case mcheck_none: @@ -766,7 +766,7 @@ void mcheck_init(struct cpuinfo_x86 *c, bool bsp) else if ( cpu_bank_alloc(cpu) ) panic("Insufficient memory for MCE bank allocations\n"); - switch ( c->x86_vendor ) + switch ( c->vendor ) { #ifdef CONFIG_AMD case X86_VENDOR_AMD: @@ -777,13 +777,8 @@ void mcheck_init(struct cpuinfo_x86 *c, bool bsp) #ifdef CONFIG_INTEL case X86_VENDOR_INTEL: - switch ( c->x86 ) - { - case 6: - case 15: + if ( c->family != 5 ) inited = intel_mcheck_init(c, bsp); - break; - } break; #endif @@ -882,7 +877,7 @@ static void x86_mcinfo_apei_save( memset(&m, 0, sizeof(struct mce)); m.cpu = mc_global->mc_coreid; - m.cpuvendor = xen2linux_vendor(boot_cpu_data.x86_vendor); + m.cpuvendor = xen2linux_vendor(boot_cpu_data.vendor); m.cpuid = cpuid_eax(1); m.socketid = mc_global->mc_socketid; m.apicid = mc_global->mc_apicid; @@ -983,10 +978,10 @@ static void cf_check __maybe_unused do_mc_get_cpu_info(void *v) &xcp->mc_apicid, &xcp->mc_ncores, &xcp->mc_ncores_active, &xcp->mc_nthreads); xcp->mc_cpuid_level = c->cpuid_level; - xcp->mc_family = c->x86; - xcp->mc_vendor = xen2linux_vendor(c->x86_vendor); - xcp->mc_model = c->x86_model; - xcp->mc_step = c->x86_mask; + xcp->mc_family = c->family; + xcp->mc_vendor = xen2linux_vendor(c->vendor); + xcp->mc_model = c->model; + xcp->mc_step = c->stepping; xcp->mc_cache_size = c->x86_cache_size; xcp->mc_cache_alignment = c->x86_cache_alignment; memcpy(xcp->mc_vendorid, c->x86_vendor_id, sizeof xcp->mc_vendorid); @@ -1142,7 +1137,7 @@ static bool __maybe_unused x86_mc_msrinject_verify(struct xen_mc_msrinject *mci) if ( IS_MCA_BANKREG(reg, mci->mcinj_cpunr) ) { - if ( c->x86_vendor == X86_VENDOR_AMD ) + if ( c->vendor == X86_VENDOR_AMD ) { /* * On AMD we can set MCi_STATUS_WREN in the @@ -1177,15 +1172,15 @@ static bool __maybe_unused x86_mc_msrinject_verify(struct xen_mc_msrinject *mci) case MSR_F10_MC4_MISC1: case MSR_F10_MC4_MISC2: case MSR_F10_MC4_MISC3: - if ( c->x86_vendor != X86_VENDOR_AMD ) + if ( c->vendor != X86_VENDOR_AMD ) reason = "only supported on AMD"; - else if ( c->x86 < 0x10 ) + else if ( c->family < 0x10 ) reason = "only supported on AMD Fam10h+"; break; /* MSRs that the HV will take care of */ case MSR_K8_HWCR: - if ( c->x86_vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON) ) + if ( c->vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON) ) reason = "HV will operate HWCR"; else reason = "only supported on AMD or Hygon"; diff --git a/xen/arch/x86/cpu/mcheck/mce.h b/xen/arch/x86/cpu/mcheck/mce.h index 920b075355..3b61b12487 100644 --- a/xen/arch/x86/cpu/mcheck/mce.h +++ b/xen/arch/x86/cpu/mcheck/mce.h @@ -137,7 +137,7 @@ void x86_mcinfo_dump(struct mc_info *mi); static inline int mce_vendor_bank_msr(const struct vcpu *v, uint32_t msr) { - switch (boot_cpu_data.x86_vendor) { + switch (boot_cpu_data.vendor) { case X86_VENDOR_INTEL: if (msr >= MSR_IA32_MC0_CTL2 && msr < MSR_IA32_MCx_CTL2(v->arch.vmce.mcg_cap & MCG_CAP_COUNT) ) diff --git a/xen/arch/x86/cpu/mcheck/mce_amd.c b/xen/arch/x86/cpu/mcheck/mce_amd.c index 25c29eb3d2..2d17832d9c 100644 --- a/xen/arch/x86/cpu/mcheck/mce_amd.c +++ b/xen/arch/x86/cpu/mcheck/mce_amd.c @@ -160,17 +160,17 @@ mcequirk_lookup_amd_quirkdata(const struct cpuinfo_x86 *c) { unsigned int i; - BUG_ON(c->x86_vendor != X86_VENDOR_AMD); + BUG_ON(c->vendor != X86_VENDOR_AMD); for ( i = 0; i < ARRAY_SIZE(mce_amd_quirks); i++ ) { - if ( c->x86 != mce_amd_quirks[i].cpu_family ) + if ( c->family != mce_amd_quirks[i].cpu_family ) continue; if ( (mce_amd_quirks[i].cpu_model != ANY) && - (mce_amd_quirks[i].cpu_model != c->x86_model) ) + (mce_amd_quirks[i].cpu_model != c->model) ) continue; if ( (mce_amd_quirks[i].cpu_stepping != ANY) && - (mce_amd_quirks[i].cpu_stepping != c->x86_mask) ) + (mce_amd_quirks[i].cpu_stepping != c->stepping) ) continue; return mce_amd_quirks[i].quirk; } @@ -291,13 +291,13 @@ amd_mcheck_init(const struct cpuinfo_x86 *c, bool bsp) uint32_t i; enum mcequirk_amd_flags quirkflag = 0; - if ( c->x86_vendor != X86_VENDOR_HYGON ) + if ( c->vendor != X86_VENDOR_HYGON ) quirkflag = mcequirk_lookup_amd_quirkdata(c); /* Assume that machine check support is available. * The minimum provided support is at least the K8. */ if ( bsp ) - mce_handler_init(c->x86 == 0xf ? &k8_callbacks : &k10_callbacks); + mce_handler_init(c->family == 0xf ? &k8_callbacks : &k10_callbacks); for ( i = 0; i < this_cpu(nr_mce_banks); i++ ) { @@ -311,7 +311,7 @@ amd_mcheck_init(const struct cpuinfo_x86 *c, bool bsp) } } - if ( c->x86 == 0xf ) + if ( c->family == 0xf ) return mcheck_amd_k8; if ( quirkflag == MCEQUIRK_F10_GART ) @@ -337,6 +337,6 @@ amd_mcheck_init(const struct cpuinfo_x86 *c, bool bsp) ppin_msr = MSR_AMD_PPIN; } - return c->x86_vendor == X86_VENDOR_HYGON ? + return c->vendor == X86_VENDOR_HYGON ? mcheck_hygon : mcheck_amd_famXX; } diff --git a/xen/arch/x86/cpu/mcheck/mce_intel.c b/xen/arch/x86/cpu/mcheck/mce_intel.c index 839a0e5ba9..4d6b7d588e 100644 --- a/xen/arch/x86/cpu/mcheck/mce_intel.c +++ b/xen/arch/x86/cpu/mcheck/mce_intel.c @@ -711,10 +711,7 @@ static bool mce_is_broadcast(struct cpuinfo_x86 *c) * DisplayFamily_DisplayModel encoding of 06H_EH and above, * a MCA signal is broadcast to all logical processors in the system */ - if ( c->x86_vendor == X86_VENDOR_INTEL && c->x86 == 6 && - c->x86_model >= 0xe ) - return true; - return false; + return c->vendor == X86_VENDOR_INTEL && c->vfm >= INTEL_CORE_YONAH; } static bool intel_enable_lmce(void) diff --git a/xen/arch/x86/cpu/mcheck/non-fatal.c b/xen/arch/x86/cpu/mcheck/non-fatal.c index a9ee9bb94f..4e7c64abef 100644 --- a/xen/arch/x86/cpu/mcheck/non-fatal.c +++ b/xen/arch/x86/cpu/mcheck/non-fatal.c @@ -23,7 +23,7 @@ static int __init cf_check init_nonfatal_mce_checker(void) /* * Check for non-fatal errors every MCE_RATE s */ - switch (c->x86_vendor) { + switch (c->vendor) { #ifdef CONFIG_AMD case X86_VENDOR_AMD: case X86_VENDOR_HYGON: diff --git a/xen/arch/x86/cpu/mcheck/vmce.c b/xen/arch/x86/cpu/mcheck/vmce.c index 1a7e92506a..84776aeec8 100644 --- a/xen/arch/x86/cpu/mcheck/vmce.c +++ b/xen/arch/x86/cpu/mcheck/vmce.c @@ -45,7 +45,7 @@ void vmce_init_vcpu(struct vcpu *v) int i; /* global MCA MSRs init */ - if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ) + if ( boot_cpu_data.vendor == X86_VENDOR_INTEL ) v->arch.vmce.mcg_cap = INTEL_GUEST_MCG_CAP; else v->arch.vmce.mcg_cap = AMD_GUEST_MCG_CAP; @@ -63,7 +63,7 @@ int vmce_restore_vcpu(struct vcpu *v, const struct hvm_vmce_vcpu *ctxt) { unsigned long guest_mcg_cap; - if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ) + if ( boot_cpu_data.vendor == X86_VENDOR_INTEL ) guest_mcg_cap = INTEL_GUEST_MCG_CAP | MCG_LMCE_P; else guest_mcg_cap = AMD_GUEST_MCG_CAP; @@ -136,7 +136,7 @@ static int bank_mce_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val) break; default: - switch ( boot_cpu_data.x86_vendor ) + switch ( boot_cpu_data.vendor ) { #ifdef CONFIG_INTEL case X86_VENDOR_CENTAUR: @@ -273,7 +273,7 @@ static int bank_mce_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val) break; default: - switch ( boot_cpu_data.x86_vendor ) + switch ( boot_cpu_data.vendor ) { #ifdef CONFIG_INTEL case X86_VENDOR_INTEL: -- 2.51.1
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |