[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v3 14/15] xen/xenpm: Adapt cpu frequency monitor in xenpm


  • To: <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Penny Zheng <Penny.Zheng@xxxxxxx>
  • Date: Thu, 6 Mar 2025 16:39:48 +0800
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass (sender ip is 165.204.84.17) smtp.rcpttodomain=lists.xenproject.org smtp.mailfrom=amd.com; dmarc=pass (p=quarantine sp=quarantine pct=100) action=none header.from=amd.com; dkim=none (message not signed); arc=none (0)
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector10001; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=6c9sueaBP6Rd4TxQW09D/ICaquL7cGU914u/trV6d4s=; b=sBgQgjS//pGfW57jh1UDshIP6KCMcD1WOdBoxXPdVVSVse0VcbT7ThTWifXVrQGR1qA8ejIFjbN715ICRzogqem9+LgmINR057LNP52wDJjX5cJqzZ7mGXDsIPaAcnlrjCrr6D2QnlczQqDNqDFutk84931KdB1bc6qOAxnIE/w4LHjF0C7ffGFQAhUTfoIvjVx55X6Z32Gz97YpqtsDgX+JbBUAdwF5+hEeRhs6RRdZw+8Bktfn/Cl+Zr6U+gb2AZWtdkwSJW6xl6j2M2Oo33kfuUq+aJBCbPvaK8MommxmNqpDWIjJ0FWaOB29vK7bdrsyxVJbwOqL2ab2wkD+Pg==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector10001; d=microsoft.com; cv=none; b=fJdCkYzbpKVQMc5c3fHLH32oQCpjU3k6KJZZxhUdTrTR9JNrLsbpepjxjmJn/4cR5hQDNLuyWROEvidBgLywo+gvRfu/s3KULpegfz+NfQluLbjIvYD3sW4122MQrwLLjHwAQZAozw1BWCCcmVHNNrA3kdBLQcK1T/FuoljIFFjwMlzAkYqAeNPCcQhLhitEaALkMA8ocKzOpy04ebf9ZQESjBUu2LVtkGKVcsRgGJUslaLN7zC/l1nJKt9cxktuOtzsldBXj1Rk9E7mjfI0b94q2njljYF2QBtoClMzYVM0z7j6eiLWusOuRrTI8fc8AGJ14fjZjKQEm9++jV/DiA==
  • Cc: <ray.huang@xxxxxxx>, Penny Zheng <Penny.Zheng@xxxxxxx>, Anthony PERARD <anthony.perard@xxxxxxxxxx>, Juergen Gross <jgross@xxxxxxxx>, Jan Beulich <jbeulich@xxxxxxxx>
  • Delivery-date: Thu, 06 Mar 2025 08:58:08 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

Make `xenpm get-cpureq-para/set-cpufreq-para` available in CPPC mode.
Also, In `xenpm get-cpufreq-para <cpuid>`, para scaling_available_frequencies
only has meaningful value when cpufreq driver in legacy P-states.
So we loosen "has_num" condition to bypass scaling_available_frequencies
check in CPPC mode.

Also, in `xenpm get-cpyfreq-para start`, the monitor of average frequency shall
not depend on the existence of legacy P-states.

Signed-off-by: Penny Zheng <Penny.Zheng@xxxxxxx>
---
v2 -> v3:
- new commit
---
 tools/libs/ctrl/xc_pm.c   | 12 +++++++-----
 tools/misc/xenpm.c        |  5 +++--
 xen/drivers/acpi/pmstat.c | 30 +++++++++++++++++-------------
 3 files changed, 27 insertions(+), 20 deletions(-)

diff --git a/tools/libs/ctrl/xc_pm.c b/tools/libs/ctrl/xc_pm.c
index b27b45c3dc..d843b79d6d 100644
--- a/tools/libs/ctrl/xc_pm.c
+++ b/tools/libs/ctrl/xc_pm.c
@@ -214,13 +214,12 @@ int xc_get_cpufreq_para(xc_interface *xch, int cpuid,
                         user_para->gov_num * CPUFREQ_NAME_LEN * sizeof(char), 
XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
 
     bool has_num = user_para->cpu_num &&
-                     user_para->freq_num &&
                      user_para->gov_num;
 
     if ( has_num )
     {
         if ( (!user_para->affected_cpus)                    ||
-             (!user_para->scaling_available_frequencies)    ||
+             (user_para->freq_num && 
!user_para->scaling_available_frequencies)    ||
              (user_para->gov_num && !user_para->scaling_available_governors) )
         {
             errno = EINVAL;
@@ -228,14 +227,16 @@ int xc_get_cpufreq_para(xc_interface *xch, int cpuid,
         }
         if ( xc_hypercall_bounce_pre(xch, affected_cpus) )
             goto unlock_1;
-        if ( xc_hypercall_bounce_pre(xch, scaling_available_frequencies) )
+        if ( user_para->freq_num &&
+             xc_hypercall_bounce_pre(xch, scaling_available_frequencies) )
             goto unlock_2;
         if ( user_para->gov_num &&
              xc_hypercall_bounce_pre(xch, scaling_available_governors) )
             goto unlock_3;
 
         set_xen_guest_handle(sys_para->affected_cpus, affected_cpus);
-        set_xen_guest_handle(sys_para->scaling_available_frequencies, 
scaling_available_frequencies);
+        if ( user_para->freq_num )
+            set_xen_guest_handle(sys_para->scaling_available_frequencies, 
scaling_available_frequencies);
         if ( user_para->gov_num )
             set_xen_guest_handle(sys_para->scaling_available_governors,
                                  scaling_available_governors);
@@ -301,7 +302,8 @@ unlock_4:
     if ( user_para->gov_num )
         xc_hypercall_bounce_post(xch, scaling_available_governors);
 unlock_3:
-    xc_hypercall_bounce_post(xch, scaling_available_frequencies);
+    if ( user_para->freq_num )
+        xc_hypercall_bounce_post(xch, scaling_available_frequencies);
 unlock_2:
     xc_hypercall_bounce_post(xch, affected_cpus);
 unlock_1:
diff --git a/tools/misc/xenpm.c b/tools/misc/xenpm.c
index a7aeaea35e..a521800504 100644
--- a/tools/misc/xenpm.c
+++ b/tools/misc/xenpm.c
@@ -539,7 +539,7 @@ static void signal_int_handler(int signo)
                         res / 1000000UL, 100UL * res / (double)sum_px[i]);
             }
         }
-        if ( px_cap && avgfreq[i] )
+        if ( avgfreq[i] )
             printf("  Avg freq\t%d\tKHz\n", avgfreq[i]);
     }
 
@@ -926,7 +926,8 @@ static int show_cpufreq_para_by_cpuid(xc_interface 
*xc_handle, int cpuid)
             ret = -ENOMEM;
             goto out;
         }
-        if (!(p_cpufreq->scaling_available_frequencies =
+        if (p_cpufreq->freq_num &&
+            !(p_cpufreq->scaling_available_frequencies =
               malloc(p_cpufreq->freq_num * sizeof(uint32_t))))
         {
             fprintf(stderr,
diff --git a/xen/drivers/acpi/pmstat.c b/xen/drivers/acpi/pmstat.c
index c8e00766a6..7f432be761 100644
--- a/xen/drivers/acpi/pmstat.c
+++ b/xen/drivers/acpi/pmstat.c
@@ -202,7 +202,7 @@ static int get_cpufreq_para(struct xen_sysctl_pm_op *op)
     pmpt = processor_pminfo[op->cpuid];
     policy = per_cpu(cpufreq_cpu_policy, op->cpuid);
 
-    if ( !pmpt || !pmpt->perf.states ||
+    if ( !pmpt || ((pmpt->init & XEN_PX_INIT) && !pmpt->perf.states) ||
          !policy || !policy->governor )
         return -EINVAL;
 
@@ -229,17 +229,20 @@ static int get_cpufreq_para(struct xen_sysctl_pm_op *op)
     if ( ret )
         return ret;
 
-    if ( !(scaling_available_frequencies =
-           xzalloc_array(uint32_t, op->u.get_para.freq_num)) )
-        return -ENOMEM;
-    for ( i = 0; i < op->u.get_para.freq_num; i++ )
-        scaling_available_frequencies[i] =
-                        pmpt->perf.states[i].core_frequency * 1000;
-    ret = copy_to_guest(op->u.get_para.scaling_available_frequencies,
-                   scaling_available_frequencies, op->u.get_para.freq_num);
-    xfree(scaling_available_frequencies);
-    if ( ret )
-        return ret;
+    if ( op->u.get_para.freq_num )
+    {
+        if ( !(scaling_available_frequencies =
+               xzalloc_array(uint32_t, op->u.get_para.freq_num)) )
+            return -ENOMEM;
+        for ( i = 0; i < op->u.get_para.freq_num; i++ )
+            scaling_available_frequencies[i] =
+                            pmpt->perf.states[i].core_frequency * 1000;
+        ret = copy_to_guest(op->u.get_para.scaling_available_frequencies,
+                    scaling_available_frequencies, op->u.get_para.freq_num);
+        xfree(scaling_available_frequencies);
+        if ( ret )
+            return ret;
+    }
 
     op->u.get_para.cpuinfo_cur_freq =
         cpufreq_driver.get ? alternative_call(cpufreq_driver.get, op->cpuid)
@@ -465,7 +468,8 @@ int do_pm_op(struct xen_sysctl_pm_op *op)
     switch ( op->cmd & PM_PARA_CATEGORY_MASK )
     {
     case CPUFREQ_PARA:
-        if ( !(xen_processor_pmbits & XEN_PROCESSOR_PM_PX) )
+        if ( !(xen_processor_pmbits & (XEN_PROCESSOR_PM_PX |
+                                       XEN_PROCESSOR_PM_CPPC)) )
             return -ENODEV;
         if ( !pmpt || !(pmpt->init & (XEN_PX_INIT | XEN_CPPC_INIT)) )
             return -EINVAL;
-- 
2.34.1




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.