[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [XEN PATCH v2 2/5] x86/intel: move vmce_has_lmce() routine to header
Moving this function out of mce_intel.c would make it possible to disable build of Intel MCE code later on, because the function gets called from common x86 code. Add internal check for CONFIG_INTEL option, as MCG_LMCE_P bit is currently specific to Intel CPUs only. Also replace boilerplate code that checks for MCG_LMCE_P flag with vmce_has_lmce(), which might contribute to readability a bit. Signed-off-by: Sergiy Kibrik <Sergiy_Kibrik@xxxxxxxx> Reviewed-by: Stefano Stabellini <sstabellini@xxxxxxxxxx> CC: Jan Beulich <jbeulich@xxxxxxxx> --- changes in v2: - move vmce_has_lmce() to cpu/mcheck/mce.h - move IS_ENABLED(CONFIG_INTEL) check inside vmce_has_lmce() - changed description --- xen/arch/x86/cpu/mcheck/mce.h | 5 +++++ xen/arch/x86/cpu/mcheck/mce_intel.c | 4 ---- xen/arch/x86/cpu/mcheck/vmce.c | 5 ++--- xen/arch/x86/include/asm/mce.h | 1 - xen/arch/x86/msr.c | 2 ++ 5 files changed, 9 insertions(+), 8 deletions(-) diff --git a/xen/arch/x86/cpu/mcheck/mce.h b/xen/arch/x86/cpu/mcheck/mce.h index 4806405f96..d6d56aa232 100644 --- a/xen/arch/x86/cpu/mcheck/mce.h +++ b/xen/arch/x86/cpu/mcheck/mce.h @@ -170,6 +170,11 @@ static inline int mce_bank_msr(const struct vcpu *v, uint32_t msr) return 0; } +static inline bool vmce_has_lmce(const struct vcpu *v) +{ + return IS_ENABLED(CONFIG_INTEL) && v->arch.vmce.mcg_cap & MCG_LMCE_P; +} + struct mce_callbacks { void (*handler)(const struct cpu_user_regs *regs); bool (*check_addr)(uint64_t status, uint64_t misc, int addr_type); diff --git a/xen/arch/x86/cpu/mcheck/mce_intel.c b/xen/arch/x86/cpu/mcheck/mce_intel.c index 3f5199b531..af43281cc6 100644 --- a/xen/arch/x86/cpu/mcheck/mce_intel.c +++ b/xen/arch/x86/cpu/mcheck/mce_intel.c @@ -1050,7 +1050,3 @@ int vmce_intel_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val) return 1; } -bool vmce_has_lmce(const struct vcpu *v) -{ - return v->arch.vmce.mcg_cap & MCG_LMCE_P; -} diff --git a/xen/arch/x86/cpu/mcheck/vmce.c b/xen/arch/x86/cpu/mcheck/vmce.c index 353d4f19b2..94d1f021e1 100644 --- a/xen/arch/x86/cpu/mcheck/vmce.c +++ b/xen/arch/x86/cpu/mcheck/vmce.c @@ -199,7 +199,7 @@ int vmce_rdmsr(uint32_t msr, uint64_t *val) * bits are always set in guest MSR_IA32_FEATURE_CONTROL by Xen, so it * does not need to check them here. */ - if ( cur->arch.vmce.mcg_cap & MCG_LMCE_P ) + if ( vmce_has_lmce(cur) ) { *val = cur->arch.vmce.mcg_ext_ctl; mce_printk(MCE_VERBOSE, "MCE: %pv: rd MCG_EXT_CTL %#"PRIx64"\n", @@ -324,8 +324,7 @@ int vmce_wrmsr(uint32_t msr, uint64_t val) break; case MSR_IA32_MCG_EXT_CTL: - if ( (cur->arch.vmce.mcg_cap & MCG_LMCE_P) && - !(val & ~MCG_EXT_CTL_LMCE_EN) ) + if ( vmce_has_lmce(cur) && !(val & ~MCG_EXT_CTL_LMCE_EN) ) cur->arch.vmce.mcg_ext_ctl = val; else ret = -1; diff --git a/xen/arch/x86/include/asm/mce.h b/xen/arch/x86/include/asm/mce.h index 6ce56b5b85..2ec47a71ae 100644 --- a/xen/arch/x86/include/asm/mce.h +++ b/xen/arch/x86/include/asm/mce.h @@ -41,7 +41,6 @@ extern void vmce_init_vcpu(struct vcpu *v); extern int vmce_restore_vcpu(struct vcpu *v, const struct hvm_vmce_vcpu *ctxt); extern int vmce_wrmsr(uint32_t msr, uint64_t val); extern int vmce_rdmsr(uint32_t msr, uint64_t *val); -extern bool vmce_has_lmce(const struct vcpu *v); extern int vmce_enable_mca_cap(struct domain *d, uint64_t cap); DECLARE_PER_CPU(unsigned int, nr_mce_banks); diff --git a/xen/arch/x86/msr.c b/xen/arch/x86/msr.c index 9babd441f9..b0ec96f021 100644 --- a/xen/arch/x86/msr.c +++ b/xen/arch/x86/msr.c @@ -24,6 +24,8 @@ #include <public/hvm/params.h> +#include "cpu/mcheck/mce.h" + DEFINE_PER_CPU(uint32_t, tsc_aux); int init_vcpu_msr_policy(struct vcpu *v) -- 2.25.1
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |