|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH 10/12] x86/mcheck: Migrate vendor checks to use cpu_vendor()
Not a functional change.
Signed-off-by: Alejandro Vallejo <alejandro.garciavallejo@xxxxxxx>
---
xen/arch/x86/cpu/mcheck/amd_nonfatal.c | 2 +-
xen/arch/x86/cpu/mcheck/mcaction.c | 2 +-
xen/arch/x86/cpu/mcheck/mce.c | 17 ++++++++---------
xen/arch/x86/cpu/mcheck/mce_amd.c | 7 +++----
xen/arch/x86/cpu/mcheck/mce_intel.c | 7 +++----
xen/arch/x86/cpu/mcheck/vmce.c | 4 ++--
6 files changed, 18 insertions(+), 21 deletions(-)
diff --git a/xen/arch/x86/cpu/mcheck/amd_nonfatal.c
b/xen/arch/x86/cpu/mcheck/amd_nonfatal.c
index 7d48c9ab5f..ebe13911ba 100644
--- a/xen/arch/x86/cpu/mcheck/amd_nonfatal.c
+++ b/xen/arch/x86/cpu/mcheck/amd_nonfatal.c
@@ -191,7 +191,7 @@ static void cf_check mce_amd_work_fn(void *data)
void __init amd_nonfatal_mcheck_init(struct cpuinfo_x86 *c)
{
- if (!(c->x86_vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON)))
+ if (!(cpu_vendor() & (X86_VENDOR_AMD | X86_VENDOR_HYGON)))
return;
/* Assume we are on K8 or newer AMD or Hygon CPU here */
diff --git a/xen/arch/x86/cpu/mcheck/mcaction.c
b/xen/arch/x86/cpu/mcheck/mcaction.c
index bf7a0de965..b7fc5056d8 100644
--- a/xen/arch/x86/cpu/mcheck/mcaction.c
+++ b/xen/arch/x86/cpu/mcheck/mcaction.c
@@ -101,7 +101,7 @@ mc_memerr_dhandler(struct mca_binfo *binfo,
* not always precise. In that case, fallback to
broadcast.
*/
global->mc_domid != bank->mc_domid ||
- (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
+ ((cpu_vendor() & X86_VENDOR_INTEL) &&
(!(global->mc_gstatus & MCG_STATUS_LMCE) ||
!(d->vcpu[mc_vcpuid]->arch.vmce.mcg_ext_ctl &
MCG_EXT_CTL_LMCE_EN))) )
diff --git a/xen/arch/x86/cpu/mcheck/mce.c b/xen/arch/x86/cpu/mcheck/mce.c
index 4b295aeeb5..91c5033edb 100644
--- a/xen/arch/x86/cpu/mcheck/mce.c
+++ b/xen/arch/x86/cpu/mcheck/mce.c
@@ -333,8 +333,7 @@ mcheck_mca_logout(enum mca_source who, struct mca_banks
*bankmask,
ASSERT(mig);
mca_init_global(mc_flags, mig);
/* A hook here to get global extended msrs */
- if ( IS_ENABLED(CONFIG_INTEL) &&
- boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
+ if ( cpu_vendor() & X86_VENDOR_INTEL )
intel_get_extended_msrs(mig, mci);
}
}
@@ -564,8 +563,8 @@ bool mce_available(const struct cpuinfo_x86 *c)
*/
unsigned int mce_firstbank(struct cpuinfo_x86 *c)
{
- return c->x86 == 6 &&
- c->x86_vendor == X86_VENDOR_INTEL && c->x86_model < 0x1a;
+ return (cpu_vendor() & X86_VENDOR_INTEL) &&
+ c->x86 == 6 && c->x86_model < 0x1a;
}
static int show_mca_info(int inited, struct cpuinfo_x86 *c)
@@ -862,7 +861,7 @@ static void x86_mcinfo_apei_save(
memset(&m, 0, sizeof(struct mce));
m.cpu = mc_global->mc_coreid;
- m.cpuvendor = boot_cpu_data.x86_vendor;
+ m.cpuvendor = cpu_vendor();
m.cpuid = cpuid_eax(1);
m.socketid = mc_global->mc_socketid;
m.apicid = mc_global->mc_apicid;
@@ -964,7 +963,7 @@ static void cf_check __maybe_unused do_mc_get_cpu_info(void
*v)
&xcp->mc_ncores_active, &xcp->mc_nthreads);
xcp->mc_cpuid_level = c->cpuid_level;
xcp->mc_family = c->x86;
- xcp->mc_vendor = c->x86_vendor;
+ xcp->mc_vendor = cpu_vendor();
xcp->mc_model = c->x86_model;
xcp->mc_step = c->x86_mask;
xcp->mc_cache_size = c->x86_cache_size;
@@ -1122,7 +1121,7 @@ static bool __maybe_unused x86_mc_msrinject_verify(struct
xen_mc_msrinject *mci)
if ( IS_MCA_BANKREG(reg, mci->mcinj_cpunr) )
{
- if ( c->x86_vendor == X86_VENDOR_AMD )
+ if ( (cpu_vendor() & X86_VENDOR_AMD) )
{
/*
* On AMD we can set MCi_STATUS_WREN in the
@@ -1157,7 +1156,7 @@ static bool __maybe_unused x86_mc_msrinject_verify(struct
xen_mc_msrinject *mci)
case MSR_F10_MC4_MISC1:
case MSR_F10_MC4_MISC2:
case MSR_F10_MC4_MISC3:
- if ( c->x86_vendor != X86_VENDOR_AMD )
+ if ( !(cpu_vendor() & X86_VENDOR_AMD) )
reason = "only supported on AMD";
else if ( c->x86 < 0x10 )
reason = "only supported on AMD Fam10h+";
@@ -1165,7 +1164,7 @@ static bool __maybe_unused x86_mc_msrinject_verify(struct
xen_mc_msrinject *mci)
/* MSRs that the HV will take care of */
case MSR_K8_HWCR:
- if ( c->x86_vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON) )
+ if ( cpu_vendor() & (X86_VENDOR_AMD | X86_VENDOR_HYGON) )
reason = "HV will operate HWCR";
else
reason = "only supported on AMD or Hygon";
diff --git a/xen/arch/x86/cpu/mcheck/mce_amd.c
b/xen/arch/x86/cpu/mcheck/mce_amd.c
index 25c29eb3d2..b4d09fd6c1 100644
--- a/xen/arch/x86/cpu/mcheck/mce_amd.c
+++ b/xen/arch/x86/cpu/mcheck/mce_amd.c
@@ -160,7 +160,7 @@ mcequirk_lookup_amd_quirkdata(const struct cpuinfo_x86 *c)
{
unsigned int i;
- BUG_ON(c->x86_vendor != X86_VENDOR_AMD);
+ BUG_ON(!(cpu_vendor() & X86_VENDOR_AMD));
for ( i = 0; i < ARRAY_SIZE(mce_amd_quirks); i++ )
{
@@ -291,7 +291,7 @@ amd_mcheck_init(const struct cpuinfo_x86 *c, bool bsp)
uint32_t i;
enum mcequirk_amd_flags quirkflag = 0;
- if ( c->x86_vendor != X86_VENDOR_HYGON )
+ if ( !(cpu_vendor() & X86_VENDOR_HYGON) )
quirkflag = mcequirk_lookup_amd_quirkdata(c);
/* Assume that machine check support is available.
@@ -337,6 +337,5 @@ amd_mcheck_init(const struct cpuinfo_x86 *c, bool bsp)
ppin_msr = MSR_AMD_PPIN;
}
- return c->x86_vendor == X86_VENDOR_HYGON ?
- mcheck_hygon : mcheck_amd_famXX;
+ return (cpu_vendor() & X86_VENDOR_HYGON) ? mcheck_hygon : mcheck_amd_famXX;
}
diff --git a/xen/arch/x86/cpu/mcheck/mce_intel.c
b/xen/arch/x86/cpu/mcheck/mce_intel.c
index 839a0e5ba9..90dbb60236 100644
--- a/xen/arch/x86/cpu/mcheck/mce_intel.c
+++ b/xen/arch/x86/cpu/mcheck/mce_intel.c
@@ -711,8 +711,8 @@ static bool mce_is_broadcast(struct cpuinfo_x86 *c)
* DisplayFamily_DisplayModel encoding of 06H_EH and above,
* a MCA signal is broadcast to all logical processors in the system
*/
- if ( c->x86_vendor == X86_VENDOR_INTEL && c->x86 == 6 &&
- c->x86_model >= 0xe )
+ if ( (cpu_vendor() & X86_VENDOR_INTEL) &&
+ c->x86 == 6 && c->x86_model >= 0xe )
return true;
return false;
}
@@ -1018,7 +1018,6 @@ int vmce_intel_wrmsr(struct vcpu *v, uint32_t msr,
uint64_t val)
int vmce_intel_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val)
{
- const struct cpu_policy *cp = v->domain->arch.cpu_policy;
unsigned int bank = msr - MSR_IA32_MC0_CTL2;
switch ( msr )
@@ -1036,7 +1035,7 @@ int vmce_intel_rdmsr(const struct vcpu *v, uint32_t msr,
uint64_t *val)
return 1;
}
- if ( !(cp->x86_vendor & X86_VENDOR_INTEL) )
+ if ( !(cpu_vendor() & X86_VENDOR_INTEL) )
return 0;
if ( bank < GUEST_MC_BANK_NUM )
diff --git a/xen/arch/x86/cpu/mcheck/vmce.c b/xen/arch/x86/cpu/mcheck/vmce.c
index 0c1bfb691b..393bf8c787 100644
--- a/xen/arch/x86/cpu/mcheck/vmce.c
+++ b/xen/arch/x86/cpu/mcheck/vmce.c
@@ -45,7 +45,7 @@ void vmce_init_vcpu(struct vcpu *v)
int i;
/* global MCA MSRs init */
- if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
+ if ( cpu_vendor() & X86_VENDOR_INTEL )
v->arch.vmce.mcg_cap = INTEL_GUEST_MCG_CAP;
else
v->arch.vmce.mcg_cap = AMD_GUEST_MCG_CAP;
@@ -63,7 +63,7 @@ int vmce_restore_vcpu(struct vcpu *v, const struct
hvm_vmce_vcpu *ctxt)
{
unsigned long guest_mcg_cap;
- if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
+ if ( cpu_vendor() & X86_VENDOR_INTEL )
guest_mcg_cap = INTEL_GUEST_MCG_CAP | MCG_LMCE_P;
else
guest_mcg_cap = AMD_GUEST_MCG_CAP;
--
2.43.0
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |