|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH 2/6] x86: Remove x86 prefixed names from acpi code
struct cpuinfo_x86
.x86 => .family
.x86_vendor => .vendor
.x86_model => .model
.x86_mask => .stepping
No functional change.
This work is part of making Xen safe for Intel family 18/19.
Signed-off-by: Kevin Lampis <kevin.lampis@xxxxxxxxxx>
---
Inside do_get_hw_residencies()
there is an explicit check for `c->family != 6`
Do we need to add family 18/19 support here?
---
xen/arch/x86/acpi/cpu_idle.c | 20 ++++++++++----------
xen/arch/x86/acpi/cpufreq/acpi.c | 2 +-
xen/arch/x86/acpi/cpufreq/cpufreq.c | 4 ++--
xen/arch/x86/acpi/cpufreq/powernow.c | 4 ++--
4 files changed, 15 insertions(+), 15 deletions(-)
diff --git a/xen/arch/x86/acpi/cpu_idle.c b/xen/arch/x86/acpi/cpu_idle.c
index 0b3d0631dd..1d2ef776fe 100644
--- a/xen/arch/x86/acpi/cpu_idle.c
+++ b/xen/arch/x86/acpi/cpu_idle.c
@@ -178,10 +178,11 @@ static void cf_check do_get_hw_residencies(void *arg)
struct cpuinfo_x86 *c = ¤t_cpu_data;
struct hw_residencies *hw_res = arg;
- if ( c->x86_vendor != X86_VENDOR_INTEL || c->x86 != 6 )
+ /* XXX Does this need to be extented to include fam 18/19? */
+ if ( c->vendor != X86_VENDOR_INTEL || c->family != 6 )
return;
- switch ( c->x86_model )
+ switch ( c->model )
{
/* 4th generation Intel Core (Haswell) */
case 0x45:
@@ -915,7 +916,7 @@ void cf_check acpi_dead_idle(void)
mwait(cx->address, 0);
}
}
- else if ( (current_cpu_data.x86_vendor &
+ else if ( (current_cpu_data.vendor &
(X86_VENDOR_AMD | X86_VENDOR_HYGON)) &&
cx->entry_method == ACPI_CSTATE_EM_SYSIO )
{
@@ -1042,8 +1043,8 @@ static void acpi_processor_power_init_bm_check(struct
acpi_processor_flags *flag
flags->bm_check = 0;
if ( num_online_cpus() == 1 )
flags->bm_check = 1;
- else if ( (c->x86_vendor == X86_VENDOR_INTEL) ||
- ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 0x15)) )
+ else if ( (c->vendor == X86_VENDOR_INTEL) ||
+ ((c->vendor == X86_VENDOR_AMD) && (c->family == 0x15)) )
{
/*
* Today all MP CPUs that support C3 share cache.
@@ -1059,8 +1060,7 @@ static void acpi_processor_power_init_bm_check(struct
acpi_processor_flags *flag
* is not required while entering C3 type state on
* P4, Core and beyond CPUs
*/
- if ( c->x86_vendor == X86_VENDOR_INTEL &&
- (c->x86 > 0x6 || (c->x86 == 6 && c->x86_model >= 14)) )
+ if ( c->vendor == X86_VENDOR_INTEL && c->vfm >= INTEL_CORE_YONAH )
flags->bm_control = 0;
}
@@ -1416,12 +1416,12 @@ static void amd_cpuidle_init(struct
acpi_processor_power *power)
if ( vendor_override < 0 )
return;
- switch ( c->x86 )
+ switch ( c->family )
{
case 0x1a:
case 0x19:
case 0x18:
- if ( boot_cpu_data.x86_vendor != X86_VENDOR_HYGON )
+ if ( boot_cpu_data.vendor != X86_VENDOR_HYGON )
{
default:
vendor_override = -1;
@@ -1648,7 +1648,7 @@ static int cf_check cpu_callback(
break;
case CPU_ONLINE:
- if ( (boot_cpu_data.x86_vendor &
+ if ( (boot_cpu_data.vendor &
(X86_VENDOR_AMD | X86_VENDOR_HYGON)) &&
processor_powers[cpu] )
amd_cpuidle_init(processor_powers[cpu]);
diff --git a/xen/arch/x86/acpi/cpufreq/acpi.c b/xen/arch/x86/acpi/cpufreq/acpi.c
index d0ca660db1..de67f1aee2 100644
--- a/xen/arch/x86/acpi/cpufreq/acpi.c
+++ b/xen/arch/x86/acpi/cpufreq/acpi.c
@@ -454,7 +454,7 @@ static int cf_check acpi_cpufreq_cpu_init(struct
cpufreq_policy *policy)
/* Check for APERF/MPERF support in hardware
* also check for boost support */
- if (c->x86_vendor == X86_VENDOR_INTEL && c->cpuid_level >= 6)
+ if (c->vendor == X86_VENDOR_INTEL && c->cpuid_level >= 6)
on_selected_cpus(cpumask_of(cpu), feature_detect, policy, 1);
/*
diff --git a/xen/arch/x86/acpi/cpufreq/cpufreq.c
b/xen/arch/x86/acpi/cpufreq/cpufreq.c
index 5740c0d438..9ef62b3538 100644
--- a/xen/arch/x86/acpi/cpufreq/cpufreq.c
+++ b/xen/arch/x86/acpi/cpufreq/cpufreq.c
@@ -133,7 +133,7 @@ static int __init cf_check cpufreq_driver_init(void)
ret = -ENOENT;
- switch ( boot_cpu_data.x86_vendor )
+ switch ( boot_cpu_data.vendor )
{
case X86_VENDOR_INTEL:
for ( i = 0; i < cpufreq_xen_cnt; i++ )
@@ -252,7 +252,7 @@ __initcall(cpufreq_driver_late_init);
int cpufreq_cpu_init(unsigned int cpu)
{
/* Currently we only handle Intel, AMD and Hygon processor */
- if ( boot_cpu_data.x86_vendor &
+ if ( boot_cpu_data.vendor &
(X86_VENDOR_INTEL | X86_VENDOR_AMD | X86_VENDOR_HYGON) )
return cpufreq_add_cpu(cpu);
diff --git a/xen/arch/x86/acpi/cpufreq/powernow.c
b/xen/arch/x86/acpi/cpufreq/powernow.c
index beab6cac36..55166eac72 100644
--- a/xen/arch/x86/acpi/cpufreq/powernow.c
+++ b/xen/arch/x86/acpi/cpufreq/powernow.c
@@ -143,7 +143,7 @@ static void amd_fixup_frequency(struct xen_processor_px *px)
int index = px->control & 0x00000007;
const struct cpuinfo_x86 *c = ¤t_cpu_data;
- if ((c->x86 != 0x10 || c->x86_model >= 10) && c->x86 != 0x11)
+ if ((c->family != 0x10 || c->model >= 10) && c->family != 0x11)
return;
val = rdmsr(MSR_PSTATE_DEF_BASE + index);
@@ -157,7 +157,7 @@ static void amd_fixup_frequency(struct xen_processor_px *px)
fid = val & 0x3f;
did = (val >> 6) & 7;
- if (c->x86 == 0x10)
+ if (c->family == 0x10)
px->core_frequency = (100 * (fid + 16)) >> did;
else
px->core_frequency = (100 * (fid + 8)) >> did;
--
2.51.1
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |