|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH 11/12] x86/cpu: Migrate vendor checks to use cpu_vendor()
Not a functional change.
Signed-off-by: Alejandro Vallejo <alejandro.garciavallejo@xxxxxxx>
---
xen/arch/x86/cpu/amd.c | 6 +++---
xen/arch/x86/cpu/common.c | 2 +-
xen/arch/x86/cpu/intel_cacheinfo.c | 5 ++---
xen/arch/x86/cpu/microcode/amd.c | 2 +-
xen/arch/x86/cpu/microcode/core.c | 2 +-
xen/arch/x86/cpu/mtrr/generic.c | 5 ++---
xen/arch/x86/cpu/mwait-idle.c | 5 ++---
xen/arch/x86/cpu/vpmu.c | 3 +--
8 files changed, 13 insertions(+), 17 deletions(-)
diff --git a/xen/arch/x86/cpu/amd.c b/xen/arch/x86/cpu/amd.c
index fc496dc43e..422d81472e 100644
--- a/xen/arch/x86/cpu/amd.c
+++ b/xen/arch/x86/cpu/amd.c
@@ -327,7 +327,7 @@ int cpu_has_amd_erratum(const struct cpuinfo_x86 *cpu, int
osvw_id, ...)
u32 range;
u32 ms;
- if (cpu->x86_vendor != X86_VENDOR_AMD)
+ if (!(cpu_vendor() & X86_VENDOR_AMD))
return 0;
if (osvw_id >= 0 && cpu_has(cpu, X86_FEATURE_OSVW)) {
@@ -418,8 +418,8 @@ static void check_syscfg_dram_mod_en(void)
{
uint64_t syscfg;
- if (!((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) &&
- (boot_cpu_data.x86 >= 0x0f)))
+ if (!((cpu_vendor() & X86_VENDOR_AMD) &&
+ (boot_cpu_data.x86 >= 0x0f)))
return;
rdmsrl(MSR_K8_SYSCFG, syscfg);
diff --git a/xen/arch/x86/cpu/common.c b/xen/arch/x86/cpu/common.c
index 6f4e723172..9a6fb90fec 100644
--- a/xen/arch/x86/cpu/common.c
+++ b/xen/arch/x86/cpu/common.c
@@ -436,7 +436,7 @@ void __init early_cpu_init(bool verbose)
paddr_bits -= (ebx >> 6) & 0x3f;
}
- if (!(c->x86_vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON)))
+ if (!(cpu_vendor() & (X86_VENDOR_AMD | X86_VENDOR_HYGON)))
park_offline_cpus = opt_mce;
initialize_cpu_data(0);
diff --git a/xen/arch/x86/cpu/intel_cacheinfo.c
b/xen/arch/x86/cpu/intel_cacheinfo.c
index e88faa7545..f95ed90809 100644
--- a/xen/arch/x86/cpu/intel_cacheinfo.c
+++ b/xen/arch/x86/cpu/intel_cacheinfo.c
@@ -168,9 +168,8 @@ void init_intel_cacheinfo(struct cpuinfo_x86 *c)
* Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
* trace cache
*/
- if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1 &&
- c->x86_vendor != X86_VENDOR_SHANGHAI)
- {
+ if (((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) &&
+ (!(cpu_vendor() & X86_VENDOR_SHANGHAI))) {
/* supports eax=2 call */
unsigned int i, j, n, regs[4];
unsigned char *dp = (unsigned char *)regs;
diff --git a/xen/arch/x86/cpu/microcode/amd.c b/xen/arch/x86/cpu/microcode/amd.c
index 90f10ac8be..b4c8264e27 100644
--- a/xen/arch/x86/cpu/microcode/amd.c
+++ b/xen/arch/x86/cpu/microcode/amd.c
@@ -616,7 +616,7 @@ void __init amd_check_entrysign(void)
uint8_t fixed_rev;
if ( !IS_ENABLED(CONFIG_MICROCODE_LOADING) ||
- boot_cpu_data.vendor != X86_VENDOR_AMD ||
+ !(cpu_vendor() & X86_VENDOR_AMD) ||
boot_cpu_data.family < 0x17 ||
boot_cpu_data.family > 0x1a )
return;
diff --git a/xen/arch/x86/cpu/microcode/core.c
b/xen/arch/x86/cpu/microcode/core.c
index ea0b35c499..ebe4d21026 100644
--- a/xen/arch/x86/cpu/microcode/core.c
+++ b/xen/arch/x86/cpu/microcode/core.c
@@ -216,7 +216,7 @@ static struct patch_with_flags nmi_patch =
/* Returns true if ucode should be loaded on a given cpu */
static bool is_cpu_primary(unsigned int cpu)
{
- if ( boot_cpu_data.vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON) )
+ if ( cpu_vendor() & (X86_VENDOR_AMD | X86_VENDOR_HYGON) )
/* Load ucode on every logical thread/core */
return true;
diff --git a/xen/arch/x86/cpu/mtrr/generic.c b/xen/arch/x86/cpu/mtrr/generic.c
index c587e9140e..eab72dc103 100644
--- a/xen/arch/x86/cpu/mtrr/generic.c
+++ b/xen/arch/x86/cpu/mtrr/generic.c
@@ -218,9 +218,8 @@ static void __init print_mtrr_state(const char *level)
printk("%s %u disabled\n", level, i);
}
- if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
- boot_cpu_data.x86 >= 0xf) ||
- boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
+ if (((cpu_vendor() & X86_VENDOR_AMD) && boot_cpu_data.x86 >= 0xf) ||
+ (cpu_vendor() & X86_VENDOR_HYGON)) {
uint64_t syscfg, tom2;
rdmsrl(MSR_K8_SYSCFG, syscfg);
diff --git a/xen/arch/x86/cpu/mwait-idle.c b/xen/arch/x86/cpu/mwait-idle.c
index 5962ec1db9..8536bc97f6 100644
--- a/xen/arch/x86/cpu/mwait-idle.c
+++ b/xen/arch/x86/cpu/mwait-idle.c
@@ -1598,7 +1598,7 @@ static int __init mwait_idle_probe(void)
const struct x86_cpu_id *id;
const char *str;
- if (boot_cpu_data.vendor != X86_VENDOR_INTEL)
+ if (!(cpu_vendor() & X86_VENDOR_INTEL))
return -ENODEV;
id = x86_match_cpu(intel_idle_ids);
@@ -1816,8 +1816,7 @@ bool __init mwait_pc10_supported(void)
{
unsigned int ecx, edx, dummy;
- if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
- !cpu_has_monitor ||
+ if (!(cpu_vendor() & X86_VENDOR_INTEL) || !cpu_has_monitor ||
boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
return false;
diff --git a/xen/arch/x86/cpu/vpmu.c b/xen/arch/x86/cpu/vpmu.c
index b1ad4ebcf6..b79d57d0ec 100644
--- a/xen/arch/x86/cpu/vpmu.c
+++ b/xen/arch/x86/cpu/vpmu.c
@@ -398,7 +398,6 @@ int vpmu_load(struct vcpu *v, bool from_guest)
static int vpmu_arch_initialise(struct vcpu *v)
{
struct vpmu_struct *vpmu = vcpu_vpmu(v);
- uint8_t vendor = current_cpu_data.x86_vendor;
int ret;
BUILD_BUG_ON(sizeof(struct xen_pmu_intel_ctxt) > XENPMU_CTXT_PAD_SZ);
@@ -418,7 +417,7 @@ static int vpmu_arch_initialise(struct vcpu *v)
if ( vpmu_mode != XENPMU_MODE_OFF )
{
printk(XENLOG_G_WARNING "VPMU: Unknown CPU vendor %d. "
- "Disabling VPMU\n", vendor);
+ "Disabling VPMU\n", cpu_vendor());
opt_vpmu_enabled = 0;
vpmu_mode = XENPMU_MODE_OFF;
}
--
2.43.0
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |