[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v1 11/11] xen/cpufreq: Adapt SET/GET_CPUFREQ_CPPC xen_sysctl_pm_op for amd-pstate driver


  • To: <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Penny Zheng <Penny.Zheng@xxxxxxx>
  • Date: Tue, 3 Dec 2024 16:35:35 +0800
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass (sender ip is 165.204.84.17) smtp.rcpttodomain=lists.xenproject.org smtp.mailfrom=amd.com; dmarc=pass (p=quarantine sp=quarantine pct=100) action=none header.from=amd.com; dkim=none (message not signed); arc=none (0)
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector10001; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=V2P9uY49wOAkSXwRdYzJBulEXJpWyFUaFXnQvFqoUiY=; b=dy8pAjXTdNJ2+XXHNtcG+AEgVQGG7wnOcFihBPlLMPoTQgeXBuSTHrhVGp3vVvQMEJBkztB1J591gXrKXvXudOAoa+jH+TTda/arg6fXTHs1+Qaclwr+7lNKSq/co7IEInPMGqR9aBAeMe8u2rOAUQIDBoY3dI1XQ1042hfS8LYknESJ/YFczZlsBf4AuH65bF84TRrphn74dA/s8jjI+F/ryWPPARezG6VL5Xah+AEbrH0MF4T8SShAsXCUmkJJm5IbW5+px1TCEAGiNY76Nzv0+KzGGupmFcCh4ZsKgSaquAIDdPI7xmutM9cKVe5JOdEbt/QOLwbDz6g5jRc0pA==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector10001; d=microsoft.com; cv=none; b=UQ4t+E8YhMECFg1m3ObhAR/x+Y13kKeauuR2K++feAV4LpnuSZxmQVEmmEUl7T9ULyMdsfddaf+aQlOFw8HyTnWTFkH+GhoiQojcsS5dqVK23R0MDGIBiVRsJWXxMDwkYjdB3E9oDqospzkmey9QrC4P45sFEWgqxJso4kd/15gxggXLRwYPcW56Ny0ttk3iMfUl9WBo0CS5Xzx4UtS8gs6jAZpFOROSwR9A4g/yhDxL3WYeSO7WXYz+QAnzwJ0EhaHBtxYqFODo7srDZD900ig9lpuEBCtC09LNN8Bu8hwipV/l1UIRRfwr+q5p0+dew3F7gXkGQdwip148fDulrg==
  • Cc: <stefano.stabellini@xxxxxxx>, <Ray.Huang@xxxxxxx>, <Xenia.Ragiadakou@xxxxxxx>, <Jason.Andryuk@xxxxxxx>, <penny.zheng@xxxxxxx>, Penny Zheng <Penny.Zheng@xxxxxxx>, Jan Beulich <jbeulich@xxxxxxxx>, "Andrew Cooper" <andrew.cooper3@xxxxxxxxxx>, Roger Pau Monné <roger.pau@xxxxxxxxxx>
  • Delivery-date: Tue, 03 Dec 2024 08:38:21 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

Introduce helper set_amd_cppc_para and get_amd_cppc_para to
SET/GET CPPC-related para for amd-pstate/amd-pstate-epp driver.

Signed-off-by: Penny Zheng <Penny.Zheng@xxxxxxx>
---
 xen/arch/x86/acpi/cpufreq/amd-pstate.c | 119 +++++++++++++++++++++++++
 xen/drivers/acpi/pmstat.c              |  24 ++++-
 xen/include/acpi/cpufreq/cpufreq.h     |   5 ++
 3 files changed, 144 insertions(+), 4 deletions(-)

diff --git a/xen/arch/x86/acpi/cpufreq/amd-pstate.c 
b/xen/arch/x86/acpi/cpufreq/amd-pstate.c
index 40ecd04259..2b1f6c7731 100644
--- a/xen/arch/x86/acpi/cpufreq/amd-pstate.c
+++ b/xen/arch/x86/acpi/cpufreq/amd-pstate.c
@@ -29,6 +29,8 @@
 
 static bool __ro_after_init opt_cpufreq_active = false;
 
+static bool __ro_after_init amd_pstate_in_use;
+
 struct amd_pstate_drv_data
 {
     struct xen_processor_cppc *cppc_data;
@@ -489,6 +491,117 @@ static int cf_check amd_pstate_epp_set_policy(struct 
cpufreq_policy *policy)
     return amd_pstate_epp_update_limit(policy);
 }
 
+int get_amd_cppc_para(unsigned int cpu,
+                      struct xen_cppc_para *cppc_para)
+{
+    struct amd_pstate_drv_data *data = per_cpu(amd_pstate_drv_data, cpu);
+
+    if ( data == NULL )
+        return -ENODATA;
+
+    cppc_para->features         = 0;
+    cppc_para->lowest           = data->hw.lowest_perf;
+    cppc_para->lowest_nonlinear = data->hw.lowest_nonlinear_perf;
+    cppc_para->nominal          = data->hw.nominal_perf;
+    cppc_para->highest          = data->hw.highest_perf;
+    cppc_para->minimum          = data->req.min_perf;
+    cppc_para->maximum          = data->req.max_perf;
+    cppc_para->desired          = data->req.des_perf;
+    cppc_para->energy_perf      = data->req.epp;
+
+    return 0;
+}
+
+int set_amd_cppc_para(struct cpufreq_policy *policy,
+                      struct xen_set_cppc_para *set_cppc)
+{
+    unsigned int cpu = policy->cpu;
+    struct amd_pstate_drv_data *data = per_cpu(amd_pstate_drv_data, cpu);
+    uint8_t max_perf, min_perf, des_perf;
+    int epp = -1;
+
+    if ( data == NULL )
+        return -ENOENT;
+
+    /* Validate all parameters - Disallow reserved bits. */
+    if ( set_cppc->minimum > 255 || set_cppc->maximum > 255 ||
+         set_cppc->desired > 255 || set_cppc->energy_perf > 255 )
+        return -EINVAL;
+
+    /* Only allow values if params bit is set. */
+    if ( (!(set_cppc->set_params & XEN_SYSCTL_CPPC_SET_DESIRED) &&
+          set_cppc->desired) ||
+         (!(set_cppc->set_params & XEN_SYSCTL_CPPC_SET_MINIMUM) &&
+          set_cppc->minimum) ||
+         (!(set_cppc->set_params & XEN_SYSCTL_CPPC_SET_MAXIMUM) &&
+          set_cppc->maximum) ||
+         (!(set_cppc->set_params & XEN_SYSCTL_CPPC_SET_ENERGY_PERF) &&
+          set_cppc->energy_perf) )
+        return -EINVAL;
+
+    /* Activity window not supported */
+    if ( set_cppc->set_params & XEN_SYSCTL_CPPC_SET_ACT_WINDOW )
+        return -EINVAL;
+
+    /* Return if there is nothing to do. */
+    if ( set_cppc->set_params == 0 )
+        return 0;
+
+    /* Apply presets */
+    switch ( set_cppc->set_params & XEN_SYSCTL_CPPC_SET_PRESET_MASK )
+    {
+    case XEN_SYSCTL_CPPC_SET_PRESET_POWERSAVE:
+        if ( set_cppc->set_params & XEN_SYSCTL_CPPC_SET_DESIRED )
+            return -EINVAL;
+        min_perf = data->hw.lowest_perf;
+        max_perf = data->hw.highest_perf;
+        epp = CPPC_ENERGY_PERF_MAX_POWERSAVE;
+        des_perf = 0;
+        break;
+
+    case XEN_SYSCTL_CPPC_SET_PRESET_PERFORMANCE:
+        if ( set_cppc->set_params & XEN_SYSCTL_CPPC_SET_DESIRED )
+            return -EINVAL;
+        min_perf = data->hw.highest_perf;
+        max_perf = data->hw.highest_perf;
+        epp = CPPC_ENERGY_PERF_MAX_PERFORMANCE;
+        des_perf = 0;
+        break;
+
+    case XEN_SYSCTL_CPPC_SET_PRESET_BALANCE:
+        if ( set_cppc->set_params & XEN_SYSCTL_CPPC_SET_DESIRED )
+            return -EINVAL;
+        min_perf = data->hw.lowest_perf;
+        max_perf = data->hw.highest_perf;
+        epp = CPPC_ENERGY_PERF_BALANCE;
+        des_perf = 0;
+        break;
+
+    case XEN_SYSCTL_CPPC_SET_PRESET_NONE:
+        min_perf = data->hw.lowest_nonlinear_perf;
+        max_perf = data->hw.highest_perf;
+        break;
+
+    default:
+        return -EINVAL;
+    }
+
+    /* Further customize presets if needed */
+    if ( set_cppc->set_params & XEN_SYSCTL_CPPC_SET_MINIMUM )
+        min_perf = set_cppc->minimum;
+
+    if ( set_cppc->set_params & XEN_SYSCTL_CPPC_SET_MAXIMUM )
+        max_perf = set_cppc->maximum;
+
+    if ( set_cppc->set_params & XEN_SYSCTL_CPPC_SET_ENERGY_PERF )
+        epp = set_cppc->energy_perf;
+
+    if ( set_cppc->set_params & XEN_SYSCTL_CPPC_SET_DESIRED )
+        des_perf = set_cppc->desired;
+
+    return amd_pstate_write_request(cpu, min_perf, des_perf, max_perf, epp);
+}
+
 static const struct cpufreq_driver __initconstrel amd_pstate_cpufreq_driver =
 {
     .name   = XEN_AMD_PSTATE_DRIVER_NAME,
@@ -507,11 +620,17 @@ static const struct cpufreq_driver __initconstrel 
amd_pstate_epp_driver =
     .exit       = amd_pstate_cpufreq_cpu_exit,
 };
 
+bool amd_pstate_active(void)
+{
+    return amd_pstate_in_use;
+}
+
 int __init amd_pstate_register_driver(void)
 {
     if ( !cpu_has_cppc )
         return -ENODEV;
 
+    amd_pstate_in_use = true;
     if ( !opt_cpufreq_active )
         return cpufreq_register_driver(&amd_pstate_cpufreq_driver);
     else
diff --git a/xen/drivers/acpi/pmstat.c b/xen/drivers/acpi/pmstat.c
index df309e27b4..63b4019c16 100644
--- a/xen/drivers/acpi/pmstat.c
+++ b/xen/drivers/acpi/pmstat.c
@@ -198,6 +198,7 @@ static int get_cpufreq_para(struct xen_sysctl_pm_op *op)
     char     *scaling_available_governors;
     struct list_head *pos;
     uint32_t cpu, i, j = 0;
+    bool hw_auto = false;
 
     pmpt = processor_pminfo[op->cpuid];
     policy = per_cpu(cpufreq_cpu_policy, op->cpuid);
@@ -258,7 +259,19 @@ static int get_cpufreq_para(struct xen_sysctl_pm_op *op)
          !strncmp(op->u.get_para.scaling_driver, XEN_HWP_DRIVER_NAME,
                   CPUFREQ_NAME_LEN) )
         ret = get_hwp_para(policy->cpu, &op->u.get_para.u.cppc_para);
-    else
+    else if ( !strncmp(op->u.get_para.scaling_driver, 
XEN_AMD_PSTATE_DRIVER_NAME,
+                       CPUFREQ_NAME_LEN) ||
+              !strncmp(op->u.get_para.scaling_driver, 
XEN_AMD_PSTATE_EPP_DRIVER_NAME,
+                       CPUFREQ_NAME_LEN) )
+        ret = get_amd_cppc_para(policy->cpu, &op->u.get_para.u.cppc_para);
+
+    if ( !strncmp(op->u.get_para.scaling_driver, XEN_HWP_DRIVER_NAME,
+                 CPUFREQ_NAME_LEN) ||
+         !strncmp(op->u.get_para.scaling_driver, 
XEN_AMD_PSTATE_EPP_DRIVER_NAME,
+                 CPUFREQ_NAME_LEN) )
+        hw_auto = true;
+
+    if ( !hw_auto )
     {
         if ( !(scaling_available_governors =
                xzalloc_array(char, gov_num * CPUFREQ_NAME_LEN)) )
@@ -414,10 +427,13 @@ static int set_cpufreq_cppc(struct xen_sysctl_pm_op *op)
     if ( !policy || !policy->governor )
         return -ENOENT;
 
-    if ( !hwp_active() )
-        return -EOPNOTSUPP;
+    if ( hwp_active() )
+        return set_hwp_para(policy, &op->u.set_cppc);
+
+    if ( amd_pstate_active() )
+        return set_amd_cppc_para(policy, &op->u.set_cppc);
 
-    return set_hwp_para(policy, &op->u.set_cppc);
+    return -EOPNOTSUPP;
 }
 
 int do_pm_op(struct xen_sysctl_pm_op *op)
diff --git a/xen/include/acpi/cpufreq/cpufreq.h 
b/xen/include/acpi/cpufreq/cpufreq.h
index d2a74d8315..bbc783cb1b 100644
--- a/xen/include/acpi/cpufreq/cpufreq.h
+++ b/xen/include/acpi/cpufreq/cpufreq.h
@@ -286,6 +286,11 @@ int acpi_cpufreq_register(void);
 
 int amd_pstate_cmdline_parse(const char *s, const char *e);
 int amd_pstate_register_driver(void);
+bool amd_pstate_active(void);
+int get_amd_cppc_para(unsigned int cpu,
+                      struct xen_cppc_para *cppc_para);
+int set_amd_cppc_para(struct cpufreq_policy *policy,
+                      struct xen_set_cppc_para *set_cppc);
 
 extern uint16_t dmi_max_speed_mhz;
 
-- 
2.34.1




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.