[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v4 02/15] xen/cpufreq: extract _PSD info from "struct xen_processor_performance"


  • To: <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Penny Zheng <Penny.Zheng@xxxxxxx>
  • Date: Mon, 14 Apr 2025 15:40:43 +0800
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass (sender ip is 165.204.84.17) smtp.rcpttodomain=lists.xenproject.org smtp.mailfrom=amd.com; dmarc=pass (p=quarantine sp=quarantine pct=100) action=none header.from=amd.com; dkim=none (message not signed); arc=none (0)
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector10001; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=ArA15AsxDcV53pi24NZHesBDb6FJeO7+vzxGj359L/E=; b=PxU4mkUMjTz8GNvckSzuBjH7sTG4uYUkLNHo+X6TDKK+DuPpdPnrg4r8aUHhNj10/VjDny6KfwGyvSXauRXWeNtcotTcHHdcKHnZXc0gA7+f8lCgfi5EXiN0JzJvhARFGrf00PuI+HH6EB9Yr9m0vHlSIGpxvP4y6kX9+Q2Fa52bvU7rn1zWakAeC2eOi3KJy6KQH6rgKnEBCw72MsoxSgSWFtBSjNsDv0I0Bd7b4qbb0QdydKAovm7rvaI6qreoEmOFYZUgfbBGDVGzVL//VARLAGwcdba7GuHeO3OEPT7COq15LRi6MahfPibG9EZF4SzqVZDrPj5sx0GV0hWYRw==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector10001; d=microsoft.com; cv=none; b=qBFTu1gird7mucKYLZlyFSMj/3xWTaeTDuhq4Lh79E8fOiYrJ5Z0kXD0NgT6fpx1MDd6Tyt8MGwpkiNMQYmQM5a9ihRg0jZf/OAjrBqI3JE5Cy8V3TWtXsR0cNlG0JgQmThl3GkHz8iMlJD++4PxK8Vs5D+46vn2jAitYdHgW19YKiPTcqpT1UDanB9S/MmVrh2YTkj+K9wmT5/mMW1tR+20WDqeKNzRTltQNnbsvx5EJCCNTJ2I7LNbN2/R6yEFe2Ks7RiC93Rwi4Y6M/oxUDoVs9nneqHafMDcCPdp5VYOm+EpN0jatFV2BV2Yu6PLzrMdXaskU/PKVrH8ABd9UA==
  • Cc: <ray.huang@xxxxxxx>, Penny Zheng <Penny.Zheng@xxxxxxx>, Jan Beulich <jbeulich@xxxxxxxx>, Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, "Anthony PERARD" <anthony.perard@xxxxxxxxxx>, Michal Orzel <michal.orzel@xxxxxxx>, Julien Grall <julien@xxxxxxx>, Roger Pau Monné <roger.pau@xxxxxxxxxx>, Stefano Stabellini <sstabellini@xxxxxxxxxx>
  • Delivery-date: Mon, 14 Apr 2025 07:41:40 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

Since we need to re-use _PSD info, containing "shared_type" and
"struct xen_psd_package", for CPPC mode, we move all
"#define XEN_CPUPERF_SHARED_TYPE_xxx" up as common values, and introduce
a new helper check_psd_pminfo() to wrap _PSD info check.

In cpufreq_add/del_cpu(), a new helper get_psd_info() is introduced to
extract "shared_type" and "struct xen_psd_package" from
"struct xen_processor_performance", and a few indentation get fixed at
the same time.

Signed-off-by: Penny Zheng <Penny.Zheng@xxxxxxx>
---
v3 -> v4:
- new commit
---
 xen/drivers/cpufreq/cpufreq.c | 107 ++++++++++++++++++++++++----------
 xen/include/public/platform.h |  10 ++--
 2 files changed, 82 insertions(+), 35 deletions(-)

diff --git a/xen/drivers/cpufreq/cpufreq.c b/xen/drivers/cpufreq/cpufreq.c
index b01ed8e294..b020ccbcf7 100644
--- a/xen/drivers/cpufreq/cpufreq.c
+++ b/xen/drivers/cpufreq/cpufreq.c
@@ -191,9 +191,31 @@ int cpufreq_limit_change(unsigned int cpu)
     return __cpufreq_set_policy(data, &policy);
 }
 
-int cpufreq_add_cpu(unsigned int cpu)
+static int get_psd_info(uint32_t init, unsigned int cpu,
+                        uint32_t *shared_type,
+                        struct xen_psd_package *domain_info)
 {
     int ret = 0;
+
+    switch ( init )
+    {
+    case XEN_PX_INIT:
+        if ( shared_type )
+            *shared_type = processor_pminfo[cpu]->perf.shared_type;
+        if ( domain_info )
+            *domain_info = processor_pminfo[cpu]->perf.domain_info;
+        break;
+    default:
+        ret = -EINVAL;
+        break;
+    }
+
+    return ret;
+}
+
+int cpufreq_add_cpu(unsigned int cpu)
+{
+    int ret;
     unsigned int firstcpu;
     unsigned int dom, domexist = 0;
     unsigned int hw_all = 0;
@@ -201,14 +223,13 @@ int cpufreq_add_cpu(unsigned int cpu)
     struct cpufreq_dom *cpufreq_dom = NULL;
     struct cpufreq_policy new_policy;
     struct cpufreq_policy *policy;
-    struct processor_performance *perf;
+    struct xen_psd_package domain_info;
+    uint32_t shared_type;
 
     /* to protect the case when Px was not controlled by xen */
     if ( !processor_pminfo[cpu] || !cpu_online(cpu) )
         return -EINVAL;
 
-    perf = &processor_pminfo[cpu]->perf;
-
     if ( !(processor_pminfo[cpu]->init & XEN_PX_INIT) )
         return -EINVAL;
 
@@ -218,10 +239,15 @@ int cpufreq_add_cpu(unsigned int cpu)
     if (per_cpu(cpufreq_cpu_policy, cpu))
         return 0;
 
-    if (perf->shared_type == CPUFREQ_SHARED_TYPE_HW)
+    ret = get_psd_info(processor_pminfo[cpu]->init, cpu,
+                       &shared_type, &domain_info);
+    if ( ret )
+        return ret;
+
+    if ( shared_type == CPUFREQ_SHARED_TYPE_HW )
         hw_all = 1;
 
-    dom = perf->domain_info.domain;
+    dom = domain_info.domain;
 
     list_for_each(pos, &cpufreq_dom_list_head) {
         cpufreq_dom = list_entry(pos, struct cpufreq_dom, node);
@@ -244,20 +270,27 @@ int cpufreq_add_cpu(unsigned int cpu)
         cpufreq_dom->dom = dom;
         list_add(&cpufreq_dom->node, &cpufreq_dom_list_head);
     } else {
+        uint32_t firstcpu_shared_type;
+        struct xen_psd_package firstcpu_domain_info;
+
         /* domain sanity check under whatever coordination type */
         firstcpu = cpumask_first(cpufreq_dom->map);
-        if ((perf->domain_info.coord_type !=
-            processor_pminfo[firstcpu]->perf.domain_info.coord_type) ||
-            (perf->domain_info.num_processors !=
-            processor_pminfo[firstcpu]->perf.domain_info.num_processors)) {
-
+        ret = get_psd_info(processor_pminfo[firstcpu]->init, firstcpu,
+                           &firstcpu_shared_type, &firstcpu_domain_info);
+        if ( ret )
+            return ret;
+
+        if ( (domain_info.coord_type != firstcpu_domain_info.coord_type) ||
+             (domain_info.num_processors !=
+              firstcpu_domain_info.num_processors) )
+        {
             printk(KERN_WARNING "cpufreq fail to add CPU%d:"
                    "incorrect _PSD(%"PRIu64":%"PRIu64"), "
                    "expect(%"PRIu64"/%"PRIu64")\n",
-                   cpu, perf->domain_info.coord_type,
-                   perf->domain_info.num_processors,
-                   processor_pminfo[firstcpu]->perf.domain_info.coord_type,
-                   processor_pminfo[firstcpu]->perf.domain_info.num_processors
+                   cpu, domain_info.coord_type,
+                   domain_info.num_processors,
+                   firstcpu_domain_info.coord_type,
+                   firstcpu_domain_info.num_processors
                 );
             return -EINVAL;
         }
@@ -304,8 +337,9 @@ int cpufreq_add_cpu(unsigned int cpu)
     if (ret)
         goto err1;
 
-    if (hw_all || (cpumask_weight(cpufreq_dom->map) ==
-                   perf->domain_info.num_processors)) {
+    if ( hw_all || (cpumask_weight(cpufreq_dom->map) ==
+                    domain_info.num_processors) )
+    {
         memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
         policy->governor = NULL;
 
@@ -354,29 +388,34 @@ err0:
 
 int cpufreq_del_cpu(unsigned int cpu)
 {
+    int ret;
     unsigned int dom, domexist = 0;
     unsigned int hw_all = 0;
     struct list_head *pos;
     struct cpufreq_dom *cpufreq_dom = NULL;
     struct cpufreq_policy *policy;
-    struct processor_performance *perf;
+    uint32_t shared_type;
+    struct xen_psd_package domain_info;
 
     /* to protect the case when Px was not controlled by xen */
     if ( !processor_pminfo[cpu] || !cpu_online(cpu) )
         return -EINVAL;
 
-    perf = &processor_pminfo[cpu]->perf;
-
     if ( !(processor_pminfo[cpu]->init & XEN_PX_INIT) )
         return -EINVAL;
 
     if (!per_cpu(cpufreq_cpu_policy, cpu))
         return 0;
 
-    if (perf->shared_type == CPUFREQ_SHARED_TYPE_HW)
+    ret = get_psd_info(processor_pminfo[cpu]->init, cpu,
+                       &shared_type, &domain_info);
+    if ( ret )
+        return ret;
+
+    if ( shared_type == CPUFREQ_SHARED_TYPE_HW )
         hw_all = 1;
 
-    dom = perf->domain_info.domain;
+    dom = domain_info.domain;
     policy = per_cpu(cpufreq_cpu_policy, cpu);
 
     list_for_each(pos, &cpufreq_dom_list_head) {
@@ -392,8 +431,8 @@ int cpufreq_del_cpu(unsigned int cpu)
 
     /* for HW_ALL, stop gov for each core of the _PSD domain */
     /* for SW_ALL & SW_ANY, stop gov for the 1st core of the _PSD domain */
-    if (hw_all || (cpumask_weight(cpufreq_dom->map) ==
-                   perf->domain_info.num_processors))
+    if ( hw_all || (cpumask_weight(cpufreq_dom->map) ==
+                    domain_info.num_processors) )
         __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
 
     cpufreq_statistic_exit(cpu);
@@ -458,6 +497,17 @@ static void print_PPC(unsigned int platform_limit)
     printk("\t_PPC: %d\n", platform_limit);
 }
 
+static int check_psd_pminfo(const struct xen_processor_performance *perf)
+{
+    /* check domain coordination */
+    if ( perf->shared_type != CPUFREQ_SHARED_TYPE_ALL &&
+         perf->shared_type != CPUFREQ_SHARED_TYPE_ANY &&
+         perf->shared_type != CPUFREQ_SHARED_TYPE_HW )
+        return -EINVAL;
+
+    return 0;
+}
+
 int set_px_pminfo(uint32_t acpi_id, struct xen_processor_performance *perf)
 {
     int ret = 0, cpu;
@@ -539,14 +589,9 @@ int set_px_pminfo(uint32_t acpi_id, struct 
xen_processor_performance *perf)
 
     if ( perf->flags & XEN_PX_PSD )
     {
-        /* check domain coordination */
-        if ( perf->shared_type != CPUFREQ_SHARED_TYPE_ALL &&
-             perf->shared_type != CPUFREQ_SHARED_TYPE_ANY &&
-             perf->shared_type != CPUFREQ_SHARED_TYPE_HW )
-        {
-            ret = -EINVAL;
+        ret = check_psd_pminfo(perf);
+        if ( ret )
             goto out;
-        }
 
         pxpt->shared_type = perf->shared_type;
         memcpy(&pxpt->domain_info, &perf->domain_info,
diff --git a/xen/include/public/platform.h b/xen/include/public/platform.h
index 2725b8d104..67cf5eeabd 100644
--- a/xen/include/public/platform.h
+++ b/xen/include/public/platform.h
@@ -440,6 +440,11 @@ struct xen_psd_package {
     uint64_t num_processors;
 };
 
+/* Coordination type value */
+#define XEN_CPUPERF_SHARED_TYPE_HW   1 /* HW does needed coordination */
+#define XEN_CPUPERF_SHARED_TYPE_ALL  2 /* All dependent CPUs should set freq */
+#define XEN_CPUPERF_SHARED_TYPE_ANY  3 /* Freq can be set from any dependent 
CPU */
+
 struct xen_processor_performance {
     uint32_t flags;     /* flag for Px sub info type */
     uint32_t platform_limit;  /* Platform limitation on freq usage */
@@ -449,10 +454,7 @@ struct xen_processor_performance {
     XEN_GUEST_HANDLE(xen_processor_px_t) states;
     struct xen_psd_package domain_info;
     /* Coordination type of this processor */
-#define XEN_CPUPERF_SHARED_TYPE_HW   1 /* HW does needed coordination */
-#define XEN_CPUPERF_SHARED_TYPE_ALL  2 /* All dependent CPUs should set freq */
-#define XEN_CPUPERF_SHARED_TYPE_ANY  3 /* Freq can be set from any dependent 
CPU */
-    uint32_t shared_type;
+    uint32_t shared_type; /* XEN_CPUPERF_SHARED_TYPE_xxx */
 };
 typedef struct xen_processor_performance xen_processor_performance_t;
 DEFINE_XEN_GUEST_HANDLE(xen_processor_performance_t);
-- 
2.34.1




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.