[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 08/12] x86/acpi: Migrate vendor checks to cpu_vendor()


  • To: <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Alejandro Vallejo <alejandro.garciavallejo@xxxxxxx>
  • Date: Fri, 6 Feb 2026 17:15:31 +0100
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass (sender ip is 165.204.84.17) smtp.rcpttodomain=lists.xenproject.org smtp.mailfrom=amd.com; dmarc=pass (p=quarantine sp=quarantine pct=100) action=none header.from=amd.com; dkim=none (message not signed); arc=none (0)
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector10001; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=MyC8/f2nPCFmUAAOBRxbvkfjjyjt5TErAPO2zi2zHVU=; b=JDipnWztcSch+OwxLIdWCgYD6M16lCmQ+SxQA9NCAPCQIs4GGeIQ8lzXDhtG8HL+DCdtlxfYCHjFF54HuwTWOfq1v1IRAbunp/42PVV6GkxbA67njtO8w2UKYdgCt+je3oHaHLY7xUtDJEqbZ5ZE7UmNi2nglGfEnxkRuHwVi+sP3Bdb0FPSRsjH6XHyb0W7k+zGnomVAanrnG7jPlxueiXrmndP8UVXS24Np/9XkGTwKfCLtMOnpfu7tW4El5dB3hw6bhOZ97P0C54IaSt0dvk7/kLhCBzRi3l2Ihhq8fMYhrKa7YhU1mQUwhNVuCaEqVt95921OrLvFft9+uEyWA==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector10001; d=microsoft.com; cv=none; b=G1KGCckWNilrQm4RVGaI03hP1mulUcxcFZILjkXzmV+rHb4q5ioMybYP1r5ZHvL8hXc4MUi3bBhmN/fz5KAEg9t4mMytkTDS6SmVwqFh21GKNM4s61YHKjCYJIFMTRaZRJ6zmD9DiCQM3GHVj3RWKBX5yESeEeZN1niLRwrHUjVGHIxmKTdz18BA+MOGhNbtOk8D+YifQ6dHhxyBayM5WB/Ym402xSzi863RckFYGTutqLyDI2lmY/P8bEk/j5ZZYDFkzRl8d6xMhguohT9L2xDkbZWnt8HgJOH5H67PwfnwSXjzaqNSwi1gnoRn4ozRn2GayA894DygtjpHswR+EQ==
  • Cc: Alejandro Vallejo <alejandro.garciavallejo@xxxxxxx>, Jan Beulich <jbeulich@xxxxxxxx>, Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, Roger Pau Monné <roger.pau@xxxxxxxxxx>, Jason Andryuk <jason.andryuk@xxxxxxx>
  • Delivery-date: Fri, 06 Feb 2026 16:16:21 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

Not a functional change.

Signed-off-by: Alejandro Vallejo <alejandro.garciavallejo@xxxxxxx>
---
 xen/arch/x86/acpi/cpu_idle.c        | 16 +++++++---------
 xen/arch/x86/acpi/cpufreq/acpi.c    |  2 +-
 xen/arch/x86/acpi/cpufreq/cpufreq.c |  3 +--
 3 files changed, 9 insertions(+), 12 deletions(-)

diff --git a/xen/arch/x86/acpi/cpu_idle.c b/xen/arch/x86/acpi/cpu_idle.c
index 0b3d0631dd..0a49b2730b 100644
--- a/xen/arch/x86/acpi/cpu_idle.c
+++ b/xen/arch/x86/acpi/cpu_idle.c
@@ -178,7 +178,7 @@ static void cf_check do_get_hw_residencies(void *arg)
     struct cpuinfo_x86 *c = &current_cpu_data;
     struct hw_residencies *hw_res = arg;
 
-    if ( c->x86_vendor != X86_VENDOR_INTEL || c->x86 != 6 )
+    if ( !(cpu_vendor() & X86_VENDOR_INTEL) || c->x86 != 6 )
         return;
 
     switch ( c->x86_model )
@@ -915,8 +915,7 @@ void cf_check acpi_dead_idle(void)
             mwait(cx->address, 0);
         }
     }
-    else if ( (current_cpu_data.x86_vendor &
-               (X86_VENDOR_AMD | X86_VENDOR_HYGON)) &&
+    else if ( cpu_vendor() & (X86_VENDOR_AMD | X86_VENDOR_HYGON) &&
               cx->entry_method == ACPI_CSTATE_EM_SYSIO )
     {
         /* Intel prefers not to use SYSIO */
@@ -1042,8 +1041,8 @@ static void acpi_processor_power_init_bm_check(struct 
acpi_processor_flags *flag
     flags->bm_check = 0;
     if ( num_online_cpus() == 1 )
         flags->bm_check = 1;
-    else if ( (c->x86_vendor == X86_VENDOR_INTEL) ||
-              ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 0x15)) )
+    else if ( (cpu_vendor() & X86_VENDOR_INTEL) ||
+              ((cpu_vendor() & X86_VENDOR_AMD) && c->x86 == 0x15) )
     {
         /*
          * Today all MP CPUs that support C3 share cache.
@@ -1059,7 +1058,7 @@ static void acpi_processor_power_init_bm_check(struct 
acpi_processor_flags *flag
      * is not required while entering C3 type state on
      * P4, Core and beyond CPUs
      */
-    if ( c->x86_vendor == X86_VENDOR_INTEL &&
+    if ( (cpu_vendor() & X86_VENDOR_INTEL) &&
         (c->x86 > 0x6 || (c->x86 == 6 && c->x86_model >= 14)) )
             flags->bm_control = 0;
 }
@@ -1421,7 +1420,7 @@ static void amd_cpuidle_init(struct acpi_processor_power 
*power)
     case 0x1a:
     case 0x19:
     case 0x18:
-        if ( boot_cpu_data.x86_vendor != X86_VENDOR_HYGON )
+        if ( !(cpu_vendor() & X86_VENDOR_HYGON) )
         {
     default:
             vendor_override = -1;
@@ -1648,8 +1647,7 @@ static int cf_check cpu_callback(
         break;
 
     case CPU_ONLINE:
-        if ( (boot_cpu_data.x86_vendor &
-              (X86_VENDOR_AMD | X86_VENDOR_HYGON)) &&
+        if ( cpu_vendor() & (X86_VENDOR_AMD | X86_VENDOR_HYGON) &&
              processor_powers[cpu] )
             amd_cpuidle_init(processor_powers[cpu]);
         break;
diff --git a/xen/arch/x86/acpi/cpufreq/acpi.c b/xen/arch/x86/acpi/cpufreq/acpi.c
index d0ca660db1..a5e5c223b3 100644
--- a/xen/arch/x86/acpi/cpufreq/acpi.c
+++ b/xen/arch/x86/acpi/cpufreq/acpi.c
@@ -454,7 +454,7 @@ static int cf_check acpi_cpufreq_cpu_init(struct 
cpufreq_policy *policy)
 
     /* Check for APERF/MPERF support in hardware
      * also check for boost support */
-    if (c->x86_vendor == X86_VENDOR_INTEL && c->cpuid_level >= 6)
+    if ((cpu_vendor() & X86_VENDOR_INTEL) && c->cpuid_level >= 6)
         on_selected_cpus(cpumask_of(cpu), feature_detect, policy, 1);
 
     /*
diff --git a/xen/arch/x86/acpi/cpufreq/cpufreq.c 
b/xen/arch/x86/acpi/cpufreq/cpufreq.c
index 4ee03ecc65..d731a21644 100644
--- a/xen/arch/x86/acpi/cpufreq/cpufreq.c
+++ b/xen/arch/x86/acpi/cpufreq/cpufreq.c
@@ -246,8 +246,7 @@ __initcall(cpufreq_driver_late_init);
 int cpufreq_cpu_init(unsigned int cpu)
 {
     /* Currently we only handle Intel, AMD and Hygon processor */
-    if ( boot_cpu_data.x86_vendor &
-         (X86_VENDOR_INTEL | X86_VENDOR_AMD | X86_VENDOR_HYGON) )
+    if ( cpu_vendor() & (X86_VENDOR_INTEL | X86_VENDOR_AMD | X86_VENDOR_HYGON) 
)
         return cpufreq_add_cpu(cpu);
 
     return -EOPNOTSUPP;
-- 
2.43.0




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.