|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH 06/12] x86: Migrate switch vendor checks to cpu_vendor()
In the single-vendor case (with no unknown vendor) all switches turn
to a branchless straight line through the single compiled-in case.
In the multivendor case where some vendors are disabled the VRP pass
ensures those branches are eliminated.
It's hard to tell the VRP pass zero is also unreachable when
CONFIG_UNKNOWN_CPU_VENDORS is disabled, so default cases tend to remain.
Signed-off-by: Alejandro Vallejo <alejandro.garciavallejo@xxxxxxx>
---
The removals of ifdefs are proof of VRP doing the work of DCE.
---
xen/arch/x86/acpi/cpufreq/cpufreq.c | 12 +++---------
xen/arch/x86/alternative.c | 2 +-
xen/arch/x86/cpu-policy.c | 6 +++---
xen/arch/x86/cpu/mcheck/mce.c | 6 +-----
xen/arch/x86/cpu/mcheck/mce.h | 2 +-
xen/arch/x86/cpu/mcheck/non-fatal.c | 6 +-----
xen/arch/x86/cpu/mcheck/vmce.c | 12 ++----------
xen/arch/x86/cpu/vpmu.c | 6 +-----
xen/arch/x86/domain.c | 4 ++--
xen/arch/x86/guest/xen/xen.c | 2 +-
xen/arch/x86/nmi.c | 4 ++--
xen/arch/x86/traps-setup.c | 2 +-
12 files changed, 19 insertions(+), 45 deletions(-)
diff --git a/xen/arch/x86/acpi/cpufreq/cpufreq.c
b/xen/arch/x86/acpi/cpufreq/cpufreq.c
index 5740c0d438..4ee03ecc65 100644
--- a/xen/arch/x86/acpi/cpufreq/cpufreq.c
+++ b/xen/arch/x86/acpi/cpufreq/cpufreq.c
@@ -133,7 +133,7 @@ static int __init cf_check cpufreq_driver_init(void)
ret = -ENOENT;
- switch ( boot_cpu_data.x86_vendor )
+ switch( cpu_vendor() )
{
case X86_VENDOR_INTEL:
for ( i = 0; i < cpufreq_xen_cnt; i++ )
@@ -141,12 +141,10 @@ static int __init cf_check cpufreq_driver_init(void)
switch ( cpufreq_xen_opts[i] )
{
case CPUFREQ_xen:
- ret = IS_ENABLED(CONFIG_INTEL) ?
- acpi_cpufreq_register() : -ENODEV;
+ ret = acpi_cpufreq_register();
break;
case CPUFREQ_hwp:
- ret = IS_ENABLED(CONFIG_INTEL) ?
- hwp_register_driver() : -ENODEV;
+ ret = hwp_register_driver();
break;
case CPUFREQ_none:
ret = 0;
@@ -165,7 +163,6 @@ static int __init cf_check cpufreq_driver_init(void)
case X86_VENDOR_AMD:
case X86_VENDOR_HYGON:
-#ifdef CONFIG_AMD
for ( i = 0; i < cpufreq_xen_cnt; i++ )
{
switch ( cpufreq_xen_opts[i] )
@@ -191,9 +188,6 @@ static int __init cf_check cpufreq_driver_init(void)
if ( !ret || ret == -EBUSY )
break;
}
-#else
- ret = -ENODEV;
-#endif /* CONFIG_AMD */
break;
default:
diff --git a/xen/arch/x86/alternative.c b/xen/arch/x86/alternative.c
index 9f844241bc..c723417807 100644
--- a/xen/arch/x86/alternative.c
+++ b/xen/arch/x86/alternative.c
@@ -89,7 +89,7 @@ static bool init_or_livepatch_read_mostly
toolchain_nops_are_ideal;
static void __init arch_init_ideal_nops(void)
{
- switch ( boot_cpu_data.x86_vendor )
+ switch ( cpu_vendor() )
{
case X86_VENDOR_INTEL:
/*
diff --git a/xen/arch/x86/cpu-policy.c b/xen/arch/x86/cpu-policy.c
index 5273fe0ae4..62aff61d8c 100644
--- a/xen/arch/x86/cpu-policy.c
+++ b/xen/arch/x86/cpu-policy.c
@@ -279,7 +279,7 @@ static void recalculate_misc(struct cpu_policy *p)
p->extd.raw[0x8].d = 0;
- switch ( p->x86_vendor )
+ switch ( cpu_vendor() )
{
case X86_VENDOR_INTEL:
p->basic.l2_nr_queries = 1; /* Fixed to 1 query. */
@@ -420,7 +420,7 @@ static void __init guest_common_default_leaves(struct
cpu_policy *p)
static void __init guest_common_max_feature_adjustments(uint32_t *fs)
{
- switch ( boot_cpu_data.vendor )
+ switch ( cpu_vendor() )
{
case X86_VENDOR_INTEL:
/*
@@ -514,7 +514,7 @@ static void __init
guest_common_max_feature_adjustments(uint32_t *fs)
static void __init guest_common_default_feature_adjustments(uint32_t *fs)
{
- switch ( boot_cpu_data.vendor )
+ switch ( cpu_vendor() )
{
case X86_VENDOR_INTEL:
/*
diff --git a/xen/arch/x86/cpu/mcheck/mce.c b/xen/arch/x86/cpu/mcheck/mce.c
index 9277781bff..4b295aeeb5 100644
--- a/xen/arch/x86/cpu/mcheck/mce.c
+++ b/xen/arch/x86/cpu/mcheck/mce.c
@@ -766,16 +766,13 @@ void mcheck_init(struct cpuinfo_x86 *c, bool bsp)
else if ( cpu_bank_alloc(cpu) )
panic("Insufficient memory for MCE bank allocations\n");
- switch ( c->x86_vendor )
+ switch ( cpu_vendor() )
{
-#ifdef CONFIG_AMD
case X86_VENDOR_AMD:
case X86_VENDOR_HYGON:
inited = amd_mcheck_init(c, bsp);
break;
-#endif
-#ifdef CONFIG_INTEL
case X86_VENDOR_INTEL:
switch ( c->x86 )
{
@@ -785,7 +782,6 @@ void mcheck_init(struct cpuinfo_x86 *c, bool bsp)
break;
}
break;
-#endif
default:
break;
diff --git a/xen/arch/x86/cpu/mcheck/mce.h b/xen/arch/x86/cpu/mcheck/mce.h
index 920b075355..4828c74d47 100644
--- a/xen/arch/x86/cpu/mcheck/mce.h
+++ b/xen/arch/x86/cpu/mcheck/mce.h
@@ -137,7 +137,7 @@ void x86_mcinfo_dump(struct mc_info *mi);
static inline int mce_vendor_bank_msr(const struct vcpu *v, uint32_t msr)
{
- switch (boot_cpu_data.x86_vendor) {
+ switch (cpu_vendor()) {
case X86_VENDOR_INTEL:
if (msr >= MSR_IA32_MC0_CTL2 &&
msr < MSR_IA32_MCx_CTL2(v->arch.vmce.mcg_cap & MCG_CAP_COUNT) )
diff --git a/xen/arch/x86/cpu/mcheck/non-fatal.c
b/xen/arch/x86/cpu/mcheck/non-fatal.c
index a9ee9bb94f..3a0399c121 100644
--- a/xen/arch/x86/cpu/mcheck/non-fatal.c
+++ b/xen/arch/x86/cpu/mcheck/non-fatal.c
@@ -23,20 +23,16 @@ static int __init cf_check init_nonfatal_mce_checker(void)
/*
* Check for non-fatal errors every MCE_RATE s
*/
- switch (c->x86_vendor) {
-#ifdef CONFIG_AMD
+ switch (cpu_vendor()) {
case X86_VENDOR_AMD:
case X86_VENDOR_HYGON:
/* Assume we are on K8 or newer AMD or Hygon CPU here */
amd_nonfatal_mcheck_init(c);
break;
-#endif
-#ifdef CONFIG_INTEL
case X86_VENDOR_INTEL:
intel_nonfatal_mcheck_init(c);
break;
-#endif
default:
/* unhandled vendor isn't really an error */
diff --git a/xen/arch/x86/cpu/mcheck/vmce.c b/xen/arch/x86/cpu/mcheck/vmce.c
index 1a7e92506a..0c1bfb691b 100644
--- a/xen/arch/x86/cpu/mcheck/vmce.c
+++ b/xen/arch/x86/cpu/mcheck/vmce.c
@@ -136,22 +136,18 @@ static int bank_mce_rdmsr(const struct vcpu *v, uint32_t
msr, uint64_t *val)
break;
default:
- switch ( boot_cpu_data.x86_vendor )
+ switch ( cpu_vendor() )
{
-#ifdef CONFIG_INTEL
case X86_VENDOR_CENTAUR:
case X86_VENDOR_SHANGHAI:
case X86_VENDOR_INTEL:
ret = vmce_intel_rdmsr(v, msr, val);
break;
-#endif
-#ifdef CONFIG_AMD
case X86_VENDOR_AMD:
case X86_VENDOR_HYGON:
ret = vmce_amd_rdmsr(v, msr, val);
break;
-#endif
default:
ret = 0;
@@ -273,20 +269,16 @@ static int bank_mce_wrmsr(struct vcpu *v, uint32_t msr,
uint64_t val)
break;
default:
- switch ( boot_cpu_data.x86_vendor )
+ switch ( cpu_vendor() )
{
-#ifdef CONFIG_INTEL
case X86_VENDOR_INTEL:
ret = vmce_intel_wrmsr(v, msr, val);
break;
-#endif
-#ifdef CONFIG_AMD
case X86_VENDOR_AMD:
case X86_VENDOR_HYGON:
ret = vmce_amd_wrmsr(v, msr, val);
break;
-#endif
default:
ret = 0;
diff --git a/xen/arch/x86/cpu/vpmu.c b/xen/arch/x86/cpu/vpmu.c
index c28192ea26..b1ad4ebcf6 100644
--- a/xen/arch/x86/cpu/vpmu.c
+++ b/xen/arch/x86/cpu/vpmu.c
@@ -815,7 +815,7 @@ static struct notifier_block cpu_nfb = {
static int __init cf_check vpmu_init(void)
{
- int vendor = current_cpu_data.x86_vendor;
+ int vendor = cpu_vendor();
const struct arch_vpmu_ops *ops = NULL;
if ( !opt_vpmu_enabled )
@@ -832,7 +832,6 @@ static int __init cf_check vpmu_init(void)
switch ( vendor )
{
-#ifdef CONFIG_AMD
case X86_VENDOR_AMD:
ops = amd_vpmu_init();
break;
@@ -840,13 +839,10 @@ static int __init cf_check vpmu_init(void)
case X86_VENDOR_HYGON:
ops = hygon_vpmu_init();
break;
-#endif
-#ifdef CONFIG_INTEL
case X86_VENDOR_INTEL:
ops = core2_vpmu_init();
break;
-#endif
default:
printk(XENLOG_WARNING "VPMU: Unknown CPU vendor: %d. "
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 8b2f33f1a0..10a558e515 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -319,7 +319,7 @@ void domain_cpu_policy_changed(struct domain *d)
if ( cpu_has_htt )
edx |= cpufeat_mask(X86_FEATURE_HTT);
- switch ( boot_cpu_data.x86_vendor )
+ switch( cpu_vendor() )
{
case X86_VENDOR_INTEL:
/*
@@ -427,7 +427,7 @@ void domain_cpu_policy_changed(struct domain *d)
if ( !(p->x86_vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON)) )
edx &= ~CPUID_COMMON_1D_FEATURES;
- switch ( boot_cpu_data.x86_vendor )
+ switch( cpu_vendor() )
{
case X86_VENDOR_INTEL:
mask &= ((uint64_t)edx << 32) | ecx;
diff --git a/xen/arch/x86/guest/xen/xen.c b/xen/arch/x86/guest/xen/xen.c
index ec558bcbdb..590a4bad82 100644
--- a/xen/arch/x86/guest/xen/xen.c
+++ b/xen/arch/x86/guest/xen/xen.c
@@ -63,7 +63,7 @@ void asmlinkage __init early_hypercall_setup(void)
x86_cpuid_vendor_to_str(boot_cpu_data.x86_vendor));
}
- switch ( boot_cpu_data.x86_vendor )
+ switch ( cpu_vendor() )
{
case X86_VENDOR_INTEL:
case X86_VENDOR_CENTAUR:
diff --git a/xen/arch/x86/nmi.c b/xen/arch/x86/nmi.c
index a0c9194ff0..a3ba59ce0b 100644
--- a/xen/arch/x86/nmi.c
+++ b/xen/arch/x86/nmi.c
@@ -216,7 +216,7 @@ void disable_lapic_nmi_watchdog(void)
{
if (nmi_active <= 0)
return;
- switch (boot_cpu_data.x86_vendor) {
+ switch(cpu_vendor()) {
case X86_VENDOR_AMD:
wrmsrns(MSR_K7_EVNTSEL0, 0);
break;
@@ -387,7 +387,7 @@ void setup_apic_nmi_watchdog(void)
if ( nmi_watchdog == NMI_NONE )
return;
- switch ( boot_cpu_data.x86_vendor )
+ switch( cpu_vendor() )
{
case X86_VENDOR_AMD:
setup_k7_watchdog();
diff --git a/xen/arch/x86/traps-setup.c b/xen/arch/x86/traps-setup.c
index d77be8f839..7d7554a7ba 100644
--- a/xen/arch/x86/traps-setup.c
+++ b/xen/arch/x86/traps-setup.c
@@ -243,7 +243,7 @@ static void __init init_ler(void)
* Intel Pentium 4 is the only known CPU to not use the architectural MSR
* indicies.
*/
- switch ( boot_cpu_data.x86_vendor )
+ switch( cpu_vendor() )
{
case X86_VENDOR_INTEL:
if ( boot_cpu_data.x86 == 0xf )
--
2.43.0
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |