[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[RFC 23/38] x86/hyperlaunch: add domu acpi construction


  • To: xen-devel@xxxxxxxxxxxxxxxxxxxx
  • From: "Daniel P. Smith" <dpsmith@xxxxxxxxxxxxxxxxxxxx>
  • Date: Sat, 19 Apr 2025 18:08:05 -0400
  • Arc-authentication-results: i=1; mx.zohomail.com; dkim=pass header.i=apertussolutions.com; spf=pass smtp.mailfrom=dpsmith@xxxxxxxxxxxxxxxxxxxx; dmarc=pass header.from=<dpsmith@xxxxxxxxxxxxxxxxxxxx>
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=zohomail.com; s=zohoarc; t=1745100552; h=Content-Transfer-Encoding:Cc:Cc:Date:Date:From:From:In-Reply-To:MIME-Version:Message-ID:References:Subject:Subject:To:To:Message-Id:Reply-To; bh=GejcDqJh4eQpJAs/6oq6gMhMd3jidjZDIYzSmQIkSCQ=; b=JpR0xaxhPe6R70jq41xcFaG52X7TLQRb5rwfVubIvvuvl1yczoqyAac9kIyqJ/SKlBHldlnIidJT5QWi+tRBTpRss25uXxZ0CcIIr0dKsZAMmIE2M9712kZtUJoPbqUVR2BaRIy3TiKcnQ573Bd/e1uJoGPN+2b16wSa7dsri14=
  • Arc-seal: i=1; a=rsa-sha256; t=1745100552; cv=none; d=zohomail.com; s=zohoarc; b=ElxRJqYtZe7ohyN2i5hH9DKMkius3OTgqmRUQSCXRFxaKRBAJv56z/TU5ZF7LXnQt/ollB0by6LCc5/5l9M0DJqLHRSnJhGdRtnFVAxnXTi4pj49BuGZLsgtBig7s/ONX2vNYR6Pqve3SiPjqx5W1A6vCckRkGWM+lLSK9UGICw=
  • Cc: "Daniel P. Smith" <dpsmith@xxxxxxxxxxxxxxxxxxxx>, jason.andryuk@xxxxxxx, stefano.stabellini@xxxxxxx, agarciav@xxxxxxx, Jan Beulich <jbeulich@xxxxxxxx>, Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, Roger Pau Monné <roger.pau@xxxxxxxxxx>
  • Delivery-date: Sat, 19 Apr 2025 22:21:26 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

Introduce hvm_setup_acpi() that will construct an APCI table for a general HVM
domU guest.

Signed-off-by: Daniel P. Smith <dpsmith@xxxxxxxxxxxxxxxxxxxx>
---
 xen/arch/x86/hvm/dom_build.c | 213 ++++++++++++++++++++++++++++++++++-
 1 file changed, 212 insertions(+), 1 deletion(-)

diff --git a/xen/arch/x86/hvm/dom_build.c b/xen/arch/x86/hvm/dom_build.c
index f997f293f329..c482d5c2d974 100644
--- a/xen/arch/x86/hvm/dom_build.c
+++ b/xen/arch/x86/hvm/dom_build.c
@@ -408,6 +408,214 @@ int __init hvm_add_mem_range(
     return 0;
 }
 
+static int __init hvm_setup_acpi_madt(
+    struct domain *d, struct acpi_table_madt *madt)
+{
+    struct acpi_table_header *table;
+    struct acpi_madt_local_apic *lapic;
+    acpi_status status;
+    unsigned long size = hvm_size_acpi_madt(d);
+    int i;
+
+    /* Copy the native MADT table header. */
+    status = acpi_get_table(ACPI_SIG_MADT, 0, &table);
+    if ( !ACPI_SUCCESS(status) )
+    {
+        printk("Failed to get MADT ACPI table, aborting.\n");
+        return -EINVAL;
+    }
+    madt->header = *table;
+    madt->address = APIC_DEFAULT_PHYS_BASE;
+    /*
+     * NB: this is currently set to 4, which is the revision in the ACPI
+     * spec 6.1. Sadly ACPICA doesn't provide revision numbers for the
+     * tables described in the headers.
+     */
+    madt->header.revision = min_t(unsigned char, table->revision, 4);
+
+    lapic = (void *)(madt + 1);
+
+    for ( i = 0; i < d->max_vcpus; i++ )
+    {
+        lapic->header.type = ACPI_MADT_TYPE_LOCAL_APIC;
+        lapic->header.length = sizeof(*lapic);
+        lapic->id = i * 2;
+        lapic->processor_id = i;
+        lapic->lapic_flags = ACPI_MADT_ENABLED;
+
+        lapic++;
+    }
+
+    madt->header.length = size;
+    /*
+     * Calling acpi_tb_checksum here is a layering violation, but
+     * introducing a wrapper for such simple usage seems overkill.
+     */
+    madt->header.checksum -= acpi_tb_checksum(ACPI_CAST_PTR(u8, madt), size);
+
+    return 0;
+}
+
+static int __init hvm_setup_acpi_xsdt(
+    struct domain *d, struct acpi_table_xsdt *xsdt, paddr_t madt_addr)
+{
+    struct acpi_table_header *table;
+    struct acpi_table_rsdp *rsdp;
+    unsigned long size = hvm_size_acpi_xsdt(d);
+    paddr_t xsdt_paddr;
+
+    /*
+     * Restore original DMAR table signature, we are going to filter it from
+     * the new XSDT that is presented to the guest, so it is no longer
+     * necessary to have it's signature zapped.
+     */
+    acpi_dmar_reinstate();
+
+    /* Copy the native XSDT table header. */
+    rsdp = acpi_os_map_memory(acpi_os_get_root_pointer(), sizeof(*rsdp));
+    if ( !rsdp )
+    {
+        printk("Unable to map RSDP\n");
+        return -EINVAL;
+    }
+    xsdt_paddr = rsdp->xsdt_physical_address;
+    acpi_os_unmap_memory(rsdp, sizeof(*rsdp));
+    table = acpi_os_map_memory(xsdt_paddr, sizeof(*table));
+    if ( !table )
+    {
+        printk("Unable to map XSDT\n");
+        return -EINVAL;
+    }
+    xsdt->header = *table;
+    acpi_os_unmap_memory(table, sizeof(*table));
+
+    /* Add the custom MADT. */
+    xsdt->table_offset_entry[0] = madt_addr;
+
+    xsdt->header.revision = 1;
+    xsdt->header.length = size;
+    /*
+     * Calling acpi_tb_checksum here is a layering violation, but
+     * introducing a wrapper for such simple usage seems overkill.
+     */
+    xsdt->header.checksum -= acpi_tb_checksum(ACPI_CAST_PTR(u8, xsdt), size);
+
+    return 0;
+}
+
+static int __init hvm_alloc_acpi_region(
+    struct domain *d, void **region, unsigned long size, paddr_t *addr)
+{
+    int i;
+
+    *addr = 0;
+
+    for ( i = 0; i < d->arch.nr_e820; i++ )
+    {
+        if ( d->arch.e820[i].type == E820_ACPI )
+        {
+            if ( d->arch.e820[i].size < size )
+                break;
+
+            *addr = d->arch.e820[i].addr;
+            break;
+        }
+    }
+
+    /* The e820 setup did not allocate ACPI region, steal one instead. */
+    if ( *addr == 0 )
+    {
+        if ( hvm_steal_ram(d, size, 0, GB(4), addr) )
+        {
+            printk("Unable to allocate guest RAM for RSDP\n");
+            return -ENOMEM;
+        }
+        if ( hvm_add_mem_range(d, *addr, *addr + size, E820_ACPI) )
+        {
+            printk("Unable to add RSDP region to memory map\n");
+            return -EFAULT;
+        }
+    }
+
+    *region = xzalloc_bytes(size);
+    if ( !region )
+        return -ENOMEM;
+
+    return 0;
+}
+
+static int __init hvm_setup_acpi(struct domain *d, paddr_t start_info)
+{
+    paddr_t rsdp_paddr, xsdt_paddr, madt_paddr;
+    struct acpi_table_rsdp *rsdp;
+    unsigned long size = hvm_size_acpi_region(d);
+    void *table;
+    int rc;
+
+    rc = hvm_alloc_acpi_region(d, &table, size, &rsdp_paddr);
+    if ( rc < 0 )
+        return rc;
+
+    /* RSDP */
+    rsdp = table;
+    xsdt_paddr = rsdp_paddr + sizeof(struct acpi_table_rsdp);
+
+    *rsdp = (struct acpi_table_rsdp){
+        .signature = ACPI_SIG_RSDP,
+        .revision = 2,
+        .length = sizeof(struct acpi_table_rsdp),
+        .oem_id = "XenHL\0", /* Xen Hyperlaunch */
+        .xsdt_physical_address = xsdt_paddr,
+    };
+
+    rsdp->checksum -= acpi_tb_checksum(ACPI_CAST_PTR(u8, rsdp),
+                                       ACPI_RSDP_REV0_SIZE);
+    rsdp->extended_checksum -= acpi_tb_checksum(ACPI_CAST_PTR(u8, rsdp),
+                                                sizeof(*rsdp));
+
+    /* XSDT */
+    table += sizeof(struct acpi_table_rsdp);
+    madt_paddr = xsdt_paddr + hvm_size_acpi_xsdt(d);
+
+    rc = hvm_setup_acpi_xsdt(d, table, madt_paddr);
+    if ( rc )
+    {
+        printk("Unable to construct XSDT\n");
+        goto out;
+    }
+
+
+    /* MADT */
+    table += hvm_size_acpi_xsdt(d);
+    rc = hvm_setup_acpi_madt(d, table);
+    if ( rc )
+    {
+        printk("Unable to construct MADT\n");
+        goto out;
+    }
+
+    /* Copy ACPI region into guest memory. */
+    rc = hvm_copy_to_guest_phys(rsdp_paddr, rsdp, size, d->vcpu[0]);
+    if ( rc )
+    {
+        printk("Unable to copy RSDP into guest memory\n");
+        goto out;
+    }
+
+    /* Copy RSDP address to start_info. */
+    rc = hvm_copy_to_guest_phys(
+        start_info + offsetof(struct hvm_start_info, rsdp_paddr), &rsdp_paddr,
+        sizeof(((struct hvm_start_info *) 0)->rsdp_paddr), d->vcpu[0]);
+    if ( rc )
+        printk("Unable to copy RSDP address to start info\n");
+
+ out:
+    if ( rsdp )
+        xfree(rsdp);
+
+    return rc;
+}
+
 static bool __init check_load_address(
     const struct domain *d, const struct elf_binary *elf)
 {
@@ -757,7 +965,10 @@ int __init dom_construct_pvh(struct boot_domain *bd)
         return rc;
     }
 
-    rc = dom0_pvh_setup_acpi(bd->d, start_info);
+    if ( is_control_domain(bd->d) || is_hardware_domain(bd->d) )
+        rc = dom0_pvh_setup_acpi(bd->d, start_info);
+    else
+        rc = hvm_setup_acpi(bd->d, start_info);
     if ( rc )
     {
         printk("Failed to setup Dom0 ACPI tables: %d\n", rc);
-- 
2.30.2




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.