|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH 08/12] x86/acpi: Migrate vendor checks to cpu_vendor()
Not a functional change.
Signed-off-by: Alejandro Vallejo <alejandro.garciavallejo@xxxxxxx>
---
xen/arch/x86/acpi/cpu_idle.c | 16 +++++++---------
xen/arch/x86/acpi/cpufreq/acpi.c | 2 +-
xen/arch/x86/acpi/cpufreq/cpufreq.c | 3 +--
3 files changed, 9 insertions(+), 12 deletions(-)
diff --git a/xen/arch/x86/acpi/cpu_idle.c b/xen/arch/x86/acpi/cpu_idle.c
index 0b3d0631dd..0a49b2730b 100644
--- a/xen/arch/x86/acpi/cpu_idle.c
+++ b/xen/arch/x86/acpi/cpu_idle.c
@@ -178,7 +178,7 @@ static void cf_check do_get_hw_residencies(void *arg)
struct cpuinfo_x86 *c = ¤t_cpu_data;
struct hw_residencies *hw_res = arg;
- if ( c->x86_vendor != X86_VENDOR_INTEL || c->x86 != 6 )
+ if ( !(cpu_vendor() & X86_VENDOR_INTEL) || c->x86 != 6 )
return;
switch ( c->x86_model )
@@ -915,8 +915,7 @@ void cf_check acpi_dead_idle(void)
mwait(cx->address, 0);
}
}
- else if ( (current_cpu_data.x86_vendor &
- (X86_VENDOR_AMD | X86_VENDOR_HYGON)) &&
+ else if ( cpu_vendor() & (X86_VENDOR_AMD | X86_VENDOR_HYGON) &&
cx->entry_method == ACPI_CSTATE_EM_SYSIO )
{
/* Intel prefers not to use SYSIO */
@@ -1042,8 +1041,8 @@ static void acpi_processor_power_init_bm_check(struct
acpi_processor_flags *flag
flags->bm_check = 0;
if ( num_online_cpus() == 1 )
flags->bm_check = 1;
- else if ( (c->x86_vendor == X86_VENDOR_INTEL) ||
- ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 0x15)) )
+ else if ( (cpu_vendor() & X86_VENDOR_INTEL) ||
+ ((cpu_vendor() & X86_VENDOR_AMD) && c->x86 == 0x15) )
{
/*
* Today all MP CPUs that support C3 share cache.
@@ -1059,7 +1058,7 @@ static void acpi_processor_power_init_bm_check(struct
acpi_processor_flags *flag
* is not required while entering C3 type state on
* P4, Core and beyond CPUs
*/
- if ( c->x86_vendor == X86_VENDOR_INTEL &&
+ if ( (cpu_vendor() & X86_VENDOR_INTEL) &&
(c->x86 > 0x6 || (c->x86 == 6 && c->x86_model >= 14)) )
flags->bm_control = 0;
}
@@ -1421,7 +1420,7 @@ static void amd_cpuidle_init(struct acpi_processor_power
*power)
case 0x1a:
case 0x19:
case 0x18:
- if ( boot_cpu_data.x86_vendor != X86_VENDOR_HYGON )
+ if ( !(cpu_vendor() & X86_VENDOR_HYGON) )
{
default:
vendor_override = -1;
@@ -1648,8 +1647,7 @@ static int cf_check cpu_callback(
break;
case CPU_ONLINE:
- if ( (boot_cpu_data.x86_vendor &
- (X86_VENDOR_AMD | X86_VENDOR_HYGON)) &&
+ if ( cpu_vendor() & (X86_VENDOR_AMD | X86_VENDOR_HYGON) &&
processor_powers[cpu] )
amd_cpuidle_init(processor_powers[cpu]);
break;
diff --git a/xen/arch/x86/acpi/cpufreq/acpi.c b/xen/arch/x86/acpi/cpufreq/acpi.c
index d0ca660db1..a5e5c223b3 100644
--- a/xen/arch/x86/acpi/cpufreq/acpi.c
+++ b/xen/arch/x86/acpi/cpufreq/acpi.c
@@ -454,7 +454,7 @@ static int cf_check acpi_cpufreq_cpu_init(struct
cpufreq_policy *policy)
/* Check for APERF/MPERF support in hardware
* also check for boost support */
- if (c->x86_vendor == X86_VENDOR_INTEL && c->cpuid_level >= 6)
+ if ((cpu_vendor() & X86_VENDOR_INTEL) && c->cpuid_level >= 6)
on_selected_cpus(cpumask_of(cpu), feature_detect, policy, 1);
/*
diff --git a/xen/arch/x86/acpi/cpufreq/cpufreq.c
b/xen/arch/x86/acpi/cpufreq/cpufreq.c
index 4ee03ecc65..d731a21644 100644
--- a/xen/arch/x86/acpi/cpufreq/cpufreq.c
+++ b/xen/arch/x86/acpi/cpufreq/cpufreq.c
@@ -246,8 +246,7 @@ __initcall(cpufreq_driver_late_init);
int cpufreq_cpu_init(unsigned int cpu)
{
/* Currently we only handle Intel, AMD and Hygon processor */
- if ( boot_cpu_data.x86_vendor &
- (X86_VENDOR_INTEL | X86_VENDOR_AMD | X86_VENDOR_HYGON) )
+ if ( cpu_vendor() & (X86_VENDOR_INTEL | X86_VENDOR_AMD | X86_VENDOR_HYGON)
)
return cpufreq_add_cpu(cpu);
return -EOPNOTSUPP;
--
2.43.0
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |