[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v6 06/19] xen/cpufreq: make _PSD info common


  • To: <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Penny Zheng <Penny.Zheng@xxxxxxx>
  • Date: Fri, 11 Jul 2025 11:50:53 +0800
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass (sender ip is 165.204.84.17) smtp.rcpttodomain=lists.xenproject.org smtp.mailfrom=amd.com; dmarc=pass (p=quarantine sp=quarantine pct=100) action=none header.from=amd.com; dkim=none (message not signed); arc=none (0)
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector10001; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=quQ3T5IvBlVz42cNV/i6n96IfGb1lLaw5U5J0cUdN88=; b=wFIyeMJxQCRx7y4m65RwI0BhOf+Xv87zFDvZ/KWcoLW96oEsjLCod0rlXiSsjxQOo7UiQQ/FaDhtgPUPkTU2PHv9BGjYQ1Pk4OffTbcau2C44WGx04pNDt7VvqrogNYru+0a6bf0xp9pqR+jG3kTuTS6/c3TjTrMMvXp45RExNdHAYr6A38z8n3Bq5LtJUXHsXs0EBtWIh2i4Q/evS41TBXnS7F4eQWwLzb4Z6EBoutL+mSFFAhVNAHud7zqiDWaw4w4h9OhbXv0b4z/FG3wSB/a9MtyDElTaik2SO73+vnoT8z6hb0GfCwkupJPLyjxRXYDA8ZoWd+7MPyWqiPGkw==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector10001; d=microsoft.com; cv=none; b=igbXQbu95ahE3jcITyHeVKlMo5qgGOIVIimp6CYGIaYd3+nGUcA4Db1HxAWwH/OFa8g1lsDSessFGUP0NBOo2Hu2+tLvqJmDw3BiUDnLmFMfbIeF5ntq29wM6+LJbAJoOW7huzJg04rgeYIqX7YdAU6zWvM7fl7ekR49JGaZ60fYikGbPSu6ymhWgA9Jw+4dSnabfokGTYxdF6liVHakbBjJ9fHmGGi5WXH1VW8FRmSFV6ZOMYocGa9VQ/7x9rt1xpWzz9a1cSyLyvaH2RaJOuGUePVv0DfMkcMmWZwux0eJr+5HQ5lSYruW6ElzuMYJ1S5qOYI9OX23PSBvID3LmA==
  • Cc: <ray.huang@xxxxxxx>, Penny Zheng <Penny.Zheng@xxxxxxx>, Jan Beulich <jbeulich@xxxxxxxx>
  • Delivery-date: Fri, 11 Jul 2025 03:51:46 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

_PSD info, consisted of "shared_type" and "struct xen_psd_package", will not
only be provided from px-specific "struct xen_processor_performance", but also
in CPPC data.

Two new helper functions are introduced to deal with _PSD. They will later be
re-used for handling the same data for CPPC.
In the meantime, the following style corrections get applied at the same time:
- add extra space before and after bracket of if()
- remove redundant parenthesis
- no need to put brace for printk() at a seperate line

Signed-off-by: Penny Zheng <Penny.Zheng@xxxxxxx>
---
v3 -> v4:
- new commit
---
v4 -> v5:
- let check_psd_pminfo() pass in "uint32_t shared_type"
- replace unnessary parameter "uint32_t init" with processor_pminfo[cpu]->init
- replace structure copy with const pointer delivery through
  "const struct xen_psd_package **"
- blank line between non-fall-through switch-case blocks
- remove unnessary "define XEN_CPUPERF_SHARED_TYPE_xxx" movement
---
v5 -> v6:
- remove redundant local variable "domain_info_ptr"
- change check_psd_pminfo() to bool return
- Comment wants to start with a capital letter
- reword title and commit message
---
 xen/drivers/cpufreq/cpufreq.c | 100 ++++++++++++++++++++++++----------
 1 file changed, 71 insertions(+), 29 deletions(-)

diff --git a/xen/drivers/cpufreq/cpufreq.c b/xen/drivers/cpufreq/cpufreq.c
index 887bc5953d..e387b8a0d9 100644
--- a/xen/drivers/cpufreq/cpufreq.c
+++ b/xen/drivers/cpufreq/cpufreq.c
@@ -226,9 +226,29 @@ int cpufreq_limit_change(unsigned int cpu)
     return __cpufreq_set_policy(data, &policy);
 }
 
-int cpufreq_add_cpu(unsigned int cpu)
+static int get_psd_info(unsigned int cpu, uint32_t *shared_type,
+                        const struct xen_psd_package **domain_info)
 {
     int ret = 0;
+
+    switch ( processor_pminfo[cpu]->init )
+    {
+    case XEN_PX_INIT:
+        *shared_type = processor_pminfo[cpu]->perf.shared_type;
+        *domain_info = &processor_pminfo[cpu]->perf.domain_info;
+        break;
+
+    default:
+        ret = -EINVAL;
+        break;
+    }
+
+    return ret;
+}
+
+int cpufreq_add_cpu(unsigned int cpu)
+{
+    int ret;
     unsigned int firstcpu;
     unsigned int dom, domexist = 0;
     unsigned int hw_all = 0;
@@ -236,14 +256,13 @@ int cpufreq_add_cpu(unsigned int cpu)
     struct cpufreq_dom *cpufreq_dom = NULL;
     struct cpufreq_policy new_policy;
     struct cpufreq_policy *policy;
-    struct processor_performance *perf;
+    const struct xen_psd_package *domain_info;
+    uint32_t shared_type;
 
     /* to protect the case when Px was not controlled by xen */
     if ( !processor_pminfo[cpu] || !cpu_online(cpu) )
         return -EINVAL;
 
-    perf = &processor_pminfo[cpu]->perf;
-
     if ( !(processor_pminfo[cpu]->init & XEN_PX_INIT) )
         return -EINVAL;
 
@@ -253,10 +272,14 @@ int cpufreq_add_cpu(unsigned int cpu)
     if (per_cpu(cpufreq_cpu_policy, cpu))
         return 0;
 
-    if (perf->shared_type == CPUFREQ_SHARED_TYPE_HW)
+    ret = get_psd_info(cpu, &shared_type, &domain_info);
+    if ( ret )
+        return ret;
+
+    if ( shared_type == CPUFREQ_SHARED_TYPE_HW )
         hw_all = 1;
 
-    dom = perf->domain_info.domain;
+    dom = domain_info->domain;
 
     list_for_each(pos, &cpufreq_dom_list_head) {
         cpufreq_dom = list_entry(pos, struct cpufreq_dom, node);
@@ -279,21 +302,27 @@ int cpufreq_add_cpu(unsigned int cpu)
         cpufreq_dom->dom = dom;
         list_add(&cpufreq_dom->node, &cpufreq_dom_list_head);
     } else {
+        uint32_t firstcpu_shared_type;
+        const struct xen_psd_package *firstcpu_domain_info;
+
         /* domain sanity check under whatever coordination type */
         firstcpu = cpumask_first(cpufreq_dom->map);
-        if ((perf->domain_info.coord_type !=
-            processor_pminfo[firstcpu]->perf.domain_info.coord_type) ||
-            (perf->domain_info.num_processors !=
-            processor_pminfo[firstcpu]->perf.domain_info.num_processors)) {
-
+        ret = get_psd_info(firstcpu, &firstcpu_shared_type,
+                           &firstcpu_domain_info);
+        if ( ret )
+            return ret;
+
+        if ( domain_info->coord_type != firstcpu_domain_info->coord_type ||
+             domain_info->num_processors !=
+             firstcpu_domain_info->num_processors )
+        {
             printk(KERN_WARNING "cpufreq fail to add CPU%d:"
                    "incorrect _PSD(%"PRIu64":%"PRIu64"), "
                    "expect(%"PRIu64"/%"PRIu64")\n",
-                   cpu, perf->domain_info.coord_type,
-                   perf->domain_info.num_processors,
-                   processor_pminfo[firstcpu]->perf.domain_info.coord_type,
-                   processor_pminfo[firstcpu]->perf.domain_info.num_processors
-                );
+                   cpu, domain_info->coord_type,
+                   domain_info->num_processors,
+                   firstcpu_domain_info->coord_type,
+                   firstcpu_domain_info->num_processors);
             return -EINVAL;
         }
     }
@@ -339,8 +368,9 @@ int cpufreq_add_cpu(unsigned int cpu)
     if (ret)
         goto err1;
 
-    if (hw_all || (cpumask_weight(cpufreq_dom->map) ==
-                   perf->domain_info.num_processors)) {
+    if ( hw_all || cpumask_weight(cpufreq_dom->map) ==
+                   domain_info->num_processors )
+    {
         memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
 
         /*
@@ -395,29 +425,33 @@ err0:
 
 int cpufreq_del_cpu(unsigned int cpu)
 {
+    int ret;
     unsigned int dom, domexist = 0;
     unsigned int hw_all = 0;
     struct list_head *pos;
     struct cpufreq_dom *cpufreq_dom = NULL;
     struct cpufreq_policy *policy;
-    struct processor_performance *perf;
+    uint32_t shared_type;
+    const struct xen_psd_package *domain_info;
 
     /* to protect the case when Px was not controlled by xen */
     if ( !processor_pminfo[cpu] || !cpu_online(cpu) )
         return -EINVAL;
 
-    perf = &processor_pminfo[cpu]->perf;
-
     if ( !(processor_pminfo[cpu]->init & XEN_PX_INIT) )
         return -EINVAL;
 
     if (!per_cpu(cpufreq_cpu_policy, cpu))
         return 0;
 
-    if (perf->shared_type == CPUFREQ_SHARED_TYPE_HW)
+    ret = get_psd_info(cpu, &shared_type, &domain_info);
+    if ( ret )
+        return ret;
+
+    if ( shared_type == CPUFREQ_SHARED_TYPE_HW )
         hw_all = 1;
 
-    dom = perf->domain_info.domain;
+    dom = domain_info->domain;
     policy = per_cpu(cpufreq_cpu_policy, cpu);
 
     list_for_each(pos, &cpufreq_dom_list_head) {
@@ -433,8 +467,8 @@ int cpufreq_del_cpu(unsigned int cpu)
 
     /* for HW_ALL, stop gov for each core of the _PSD domain */
     /* for SW_ALL & SW_ANY, stop gov for the 1st core of the _PSD domain */
-    if (hw_all || (cpumask_weight(cpufreq_dom->map) ==
-                   perf->domain_info.num_processors))
+    if ( hw_all || cpumask_weight(cpufreq_dom->map) ==
+                   domain_info->num_processors )
         __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
 
     cpufreq_statistic_exit(cpu);
@@ -499,6 +533,17 @@ static void print_PPC(unsigned int platform_limit)
     printk("\t_PPC: %d\n", platform_limit);
 }
 
+static bool check_psd_pminfo(uint32_t shared_type)
+{
+    /* Check domain coordination */
+    if ( shared_type != CPUFREQ_SHARED_TYPE_ALL &&
+         shared_type != CPUFREQ_SHARED_TYPE_ANY &&
+         shared_type != CPUFREQ_SHARED_TYPE_HW )
+        return false;
+
+    return true;
+}
+
 int set_px_pminfo(uint32_t acpi_id, struct xen_processor_performance *perf)
 {
     int ret = 0, cpu;
@@ -581,10 +626,7 @@ int set_px_pminfo(uint32_t acpi_id, struct 
xen_processor_performance *perf)
 
     if ( perf->flags & XEN_PX_PSD )
     {
-        /* check domain coordination */
-        if ( perf->shared_type != CPUFREQ_SHARED_TYPE_ALL &&
-             perf->shared_type != CPUFREQ_SHARED_TYPE_ANY &&
-             perf->shared_type != CPUFREQ_SHARED_TYPE_HW )
+        if ( !check_psd_pminfo(perf->shared_type) )
         {
             ret = -EINVAL;
             goto out;
-- 
2.34.1




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.