[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v2 4/4] xen/cppc: introduce cppc data upload sub-hypercall


  • To: Juergen Gross <jgross@xxxxxxxx>, Stefano Stabellini <sstabellini@xxxxxxxxxx>, Oleksandr Tyshchenko <oleksandr_tyshchenko@xxxxxxxx>
  • From: Penny Zheng <Penny.Zheng@xxxxxxx>
  • Date: Thu, 5 Dec 2024 13:42:52 +0800
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass (sender ip is 165.204.84.17) smtp.rcpttodomain=suse.com smtp.mailfrom=amd.com; dmarc=pass (p=quarantine sp=quarantine pct=100) action=none header.from=amd.com; dkim=none (message not signed); arc=none (0)
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector10001; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=f3kT6q2N35dmBYg9LrUhnFoFHFr1MmQ2XdbI9z7bg1s=; b=sbepxTvpRkMUDqCcq2jJGi3Ccib/Pa1UTQtuQ4fJT+FB9dSvksHZ32dP/iBwfx53zqos3AuouienTocZ7czrrqujHWl/0qKqvY6u7Rd5QdHjeU+jtBP+ONgOeof75AExah3r5WhwYCseuQSsRbBkn0AzoMXbNOgRsFYZRJ+Yw0+xy7YeSXizAy4pjKlakQ/8+b1SNDgc5eExoZxw1Xtd8a3RkPA99aRd6fJSecrDWZjUamPiOj7Pg9yQIVR4u+zwvD+5Av7HXRUX24dSv1fFzwi3FnB8oWy+utba817OeOkCUq+YqgzMFqwf6FEZxXRIVkBjvssykBGIkVBOuTUSKg==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector10001; d=microsoft.com; cv=none; b=bqbEarWZcyjRyCaTHu+gTf5GtU0WxNMQmKRL7pBJXmwaWS8qB5qNdfAXW6PC27o4kvZnBvxh+QZgL5QRtbcCuZKHVYZRS2sOLUfEm5nH1kyCaMWIsWc5ab3vQdSeh12x18JFSF9ZLYzJOUWJtkS29WCf+yqoEnK4iVd1qM6Wq99L38yP/76ZgRCXZJ2dRraSlLktPJ0fy3HZ9KgikOUwMTfEeCdaI0bsIhN5LMn7FkpCz9eLq24N8odQyMl01Cw9tff2OU5VYjLJ8aIuhgjULaQ7RlzFD5bXD6PXaOtaYoqPN7PO8vscqBoCa5G/yES0fq1E4WpbttXdux2w4p0Eag==
  • Cc: Ray Huang <Ray.Huang@xxxxxxx>, Xenia Ragiadakou <Xenia.Ragiadakou@xxxxxxx>, Jason Andryuk <jason.andryuk@xxxxxxx>, "Penny Zheng" <Penny.Zheng@xxxxxxx>, <xen-devel@xxxxxxxxxxxxxxxxxxxx>, <linux-kernel@xxxxxxxxxxxxxxx>
  • Delivery-date: Thu, 05 Dec 2024 05:43:36 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

As Xen is uncapable of parsing the ACPI dynamic table, this commit
introduces a new sub-hypercall XEN_PM_CPPC to deliver CPPC perf
caps data.

Signed-off-by: Penny Zheng <Penny.Zheng@xxxxxxx>
---
 drivers/acpi/cppc_acpi.c         |  1 +
 drivers/xen/xen-acpi-processor.c | 89 +++++++++++++++++++++++++++++++-
 include/acpi/processor.h         |  1 +
 include/xen/interface/platform.h | 11 ++++
 4 files changed, 101 insertions(+), 1 deletion(-)

diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 3a436591da07..3570a52a5dbd 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -860,6 +860,7 @@ static int acpi_cppc_processor_parse(struct acpi_processor 
*pr, struct cpc_desc
                cpc_ptr->cpc_regs[i].cpc_entry.int_value = 0;
        }
 
+       pr->flags.has_cpc = 1;
        pr_debug("Parsed _CPC entry for CPU: %d\n", pr->acpi_id);
        kfree(output.pointer);
        return 0;
diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
index e9f38f171240..8a39e46c1ebc 100644
--- a/drivers/xen/xen-acpi-processor.c
+++ b/drivers/xen/xen-acpi-processor.c
@@ -25,6 +25,7 @@
 #include <xen/xen.h>
 #include <xen/interface/platform.h>
 #include <asm/xen/hypercall.h>
+#include <acpi/cppc_acpi.h>
 
 static int no_hypercall;
 MODULE_PARM_DESC(off, "Inhibit the hypercall.");
@@ -45,8 +46,12 @@ static unsigned long *acpi_ids_done;
 static unsigned long *acpi_id_present;
 /* And if there is an _CST definition (or a PBLK) for the ACPI IDs */
 static unsigned long *acpi_id_cst_present;
+/* And if there is an _CPC entry for the ACPI IDs */
+static unsigned long *acpi_id_cpc_present;
 /* Which ACPI P-State dependencies for a enumerated processor */
 static struct acpi_psd_package *acpi_psd;
+/* ACPI CPPC structures for a enumerated processor */
+static struct cppc_perf_caps *acpi_cppc_data;
 
 static bool pr_initialized;
 
@@ -208,6 +213,44 @@ static int xen_copy_pct_data(struct acpi_pct_register *pct,
        dst_pct->address = pct->address;
        return 0;
 }
+static int push_cppc_to_hypervisor(struct acpi_processor *_pr)
+{
+       int ret = 0;
+       struct xen_platform_op op = {
+               .cmd            = XENPF_set_processor_pminfo,
+               .interface_version  = XENPF_INTERFACE_VERSION,
+               .u.set_pminfo.id    = _pr->acpi_id,
+               .u.set_pminfo.type  = XEN_PM_CPPC,
+       };
+       struct cppc_perf_caps *cppc_perf = acpi_cppc_data + _pr->acpi_id;
+
+       op.u.set_pminfo.cppc_data.highest_perf = cppc_perf->highest_perf;
+       op.u.set_pminfo.cppc_data.lowest_perf = cppc_perf->lowest_perf;
+       op.u.set_pminfo.cppc_data.nominal_perf = cppc_perf->nominal_perf;
+       op.u.set_pminfo.cppc_data.lowest_nonlinear_perf = 
cppc_perf->lowest_nonlinear_perf;
+       op.u.set_pminfo.cppc_data.lowest_freq = cppc_perf->lowest_freq;
+       op.u.set_pminfo.cppc_data.nominal_freq = cppc_perf->nominal_freq;
+
+       if (!no_hypercall)
+               ret = HYPERVISOR_platform_op(&op);
+
+       if (!ret) {
+               pr_debug("ACPI CPU%u - CPPC uploaded.\n", _pr->acpi_id);
+               pr_debug("     highest_perf: %d\n", cppc_perf->highest_perf);
+               pr_debug("     lowest_perf: %d\n", cppc_perf->lowest_perf);
+               pr_debug("     lowest_nonlinear_perf: %d\n", 
cppc_perf->lowest_nonlinear_perf);
+               pr_debug("     nominal_perf: %d\n", cppc_perf->nominal_perf);
+               pr_debug("     lowest_freq: %d Mhz\n", cppc_perf->lowest_freq);
+               pr_debug("     nominal_freq: %d Mhz\n", 
cppc_perf->nominal_freq);
+       } else if ((ret != -EINVAL) && (ret != -ENOSYS))
+               /* EINVAL means the ACPI ID is incorrect - meaning the ACPI
+                * table is referencing a non-existing CPU - which can happen
+                * with broken ACPI tables. */
+               pr_warn("(_CPC): Hypervisor error (%d) for ACPI CPU%u\n",
+                       ret, _pr->acpi_id);
+
+               return ret;
+}
 static int push_pxx_to_hypervisor(struct acpi_processor *_pr)
 {
        int ret = 0;
@@ -284,6 +327,9 @@ static int upload_pm_data(struct acpi_processor *_pr)
        if (_pr->flags.power)
                err = push_cxx_to_hypervisor(_pr);
 
+       if (_pr->flags.has_cpc)
+               err |= push_cppc_to_hypervisor(_pr);
+
        if (_pr->performance && _pr->performance->states)
                err |= push_pxx_to_hypervisor(_pr);
 
@@ -488,6 +534,7 @@ read_acpi_id(acpi_handle handle, u32 lvl, void *context, 
void **rv)
        union acpi_object object = { 0 };
        struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
        struct acpi_buffer cst_buf = { ACPI_ALLOCATE_BUFFER, NULL };
+       struct acpi_buffer cpc_buf = { ACPI_ALLOCATE_BUFFER, NULL };
        acpi_io_address pblk = 0;
 
        status = acpi_get_type(handle, &acpi_type);
@@ -567,11 +614,20 @@ read_acpi_id(acpi_handle handle, u32 lvl, void *context, 
void **rv)
        /* .. and it has a C-state */
        __set_bit(acpi_id, acpi_id_cst_present);
 
+       status = acpi_evaluate_object(handle, "_CPC", NULL, &cpc_buf);
+       if (ACPI_FAILURE(status)) {
+               return AE_OK;
+       }
+       kfree(cpc_buf.pointer);
+
+       /* .. and it has a _CPC entry */
+       __set_bit(acpi_id, acpi_id_cpc_present);
+
        return AE_OK;
 }
 static int check_acpi_ids(struct acpi_processor *pr_backup)
 {
-       if (acpi_id_present && acpi_id_cst_present)
+       if (acpi_id_present && acpi_id_cst_present && acpi_id_cpc_present)
                /* OK, done this once .. skip to uploading */
                goto upload;
 
@@ -588,11 +644,19 @@ static int check_acpi_ids(struct acpi_processor 
*pr_backup)
                return -ENOMEM;
        }
 
+       acpi_id_cpc_present = bitmap_zalloc(nr_acpi_bits, GFP_KERNEL);
+       if (!acpi_id_cpc_present) {
+               bitmap_free(acpi_id_present);
+               bitmap_free(acpi_id_cst_present);
+               return -ENOMEM;
+       }
+
        acpi_psd = kcalloc(nr_acpi_bits, sizeof(struct acpi_psd_package),
                           GFP_KERNEL);
        if (!acpi_psd) {
                bitmap_free(acpi_id_present);
                bitmap_free(acpi_id_cst_present);
+               bitmap_free(acpi_id_cpc_present);
                return -ENOMEM;
        }
 
@@ -608,6 +672,12 @@ static int check_acpi_ids(struct acpi_processor *pr_backup)
                        pr_backup->acpi_id = i;
                        /* Mask out C-states if there are no _CST or PBLK */
                        pr_backup->flags.power = test_bit(i, 
acpi_id_cst_present);
+                       /* Mask out relevant flag if there are no _CPC */
+                       pr_backup->flags.has_cpc = test_bit(i, 
acpi_id_cpc_present);
+                       if (pr_backup->flags.has_cpc) {
+                               if (xen_processor_get_perf_caps(pr_backup, 
acpi_cppc_data + i))
+                                       return -EINVAL;
+                       }
                        /* num_entries is non-zero if we evaluated _PSD */
                        if (acpi_psd[i].num_entries) {
                                memcpy(&pr_backup->performance->domain_info,
@@ -726,6 +796,15 @@ static int __init xen_acpi_processor_init(void)
                bitmap_free(acpi_ids_done);
                return -ENOMEM;
        }
+
+       acpi_cppc_data = kcalloc(nr_acpi_bits, sizeof(struct cppc_perf_caps),
+                               GFP_KERNEL);
+       if (!acpi_cppc_data) {
+               pr_debug("Memory allocation error for acpi_cppc_data\n");
+               rc = -ENOMEM;
+               goto err1_out;
+       }
+
        for_each_possible_cpu(i) {
                if (!zalloc_cpumask_var_node(
                        &per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map,
@@ -751,6 +830,11 @@ static int __init xen_acpi_processor_init(void)
                rc = acpi_processor_get_performance_info(pr);
                if (rc)
                        goto err_out;
+
+               pr->flags.pcc_unsupported = true;
+               rc = xen_processor_get_perf_caps(pr, acpi_cppc_data + i);
+               if (rc)
+                       goto err_out;
        }
 
        rc = xen_upload_processor_pm_data();
@@ -766,6 +850,8 @@ static int __init xen_acpi_processor_init(void)
 
 err_out:
        /* Freeing a NULL pointer is OK: alloc_percpu zeroes. */
+       kfree(acpi_cppc_data);
+err1_out:
        free_acpi_perf_data();
        bitmap_free(acpi_ids_done);
        return rc;
@@ -779,6 +865,7 @@ static void __exit xen_acpi_processor_exit(void)
        bitmap_free(acpi_id_present);
        bitmap_free(acpi_id_cst_present);
        kfree(acpi_psd);
+       kfree(acpi_cppc_data);
        for_each_possible_cpu(i)
                acpi_processor_unregister_performance(i);
 
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index 18499cc11366..66492f5d68a8 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -214,6 +214,7 @@ struct acpi_processor_flags {
        u8 bm_control:1;
        u8 bm_check:1;
        u8 has_cst:1;
+       u8 has_cpc:1;
        u8 pcc_unsupported:1;
        u8 has_lpi:1;
        u8 power_setup_done:1;
diff --git a/include/xen/interface/platform.h b/include/xen/interface/platform.h
index 79a443c65ea9..e11bb9443dc0 100644
--- a/include/xen/interface/platform.h
+++ b/include/xen/interface/platform.h
@@ -319,6 +319,7 @@ DEFINE_GUEST_HANDLE_STRUCT(xenpf_getidletime_t);
 #define XEN_PM_PX   1
 #define XEN_PM_TX   2
 #define XEN_PM_PDC  3
+#define XEN_PM_CPPC 4
 /* Px sub info type */
 #define XEN_PX_PCT   1
 #define XEN_PX_PSS   2
@@ -384,6 +385,15 @@ struct xen_processor_px {
 };
 DEFINE_GUEST_HANDLE_STRUCT(xen_processor_px);
 
+struct xen_processor_cppc {
+    uint32_t highest_perf;
+    uint32_t nominal_perf;
+    uint32_t lowest_perf;
+    uint32_t lowest_nonlinear_perf;
+    uint32_t lowest_freq;
+    uint32_t nominal_freq;
+};
+
 struct xen_psd_package {
        uint64_t num_entries;
        uint64_t revision;
@@ -412,6 +422,7 @@ struct xenpf_set_processor_pminfo {
                struct xen_processor_power          power;/* Cx: _CST/_CSD */
                struct xen_processor_performance    perf; /* Px: 
_PPC/_PCT/_PSS/_PSD */
                GUEST_HANDLE(uint32_t)              pdc;
+               struct xen_processor_cppc           cppc_data; /* _CPC */
        };
 };
 DEFINE_GUEST_HANDLE_STRUCT(xenpf_set_processor_pminfo);
-- 
2.34.1




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.