[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[RFC 19/38] x86/hyperlaunch: add domu memory map construction


  • To: xen-devel@xxxxxxxxxxxxxxxxxxxx
  • From: "Daniel P. Smith" <dpsmith@xxxxxxxxxxxxxxxxxxxx>
  • Date: Sat, 19 Apr 2025 18:08:01 -0400
  • Arc-authentication-results: i=1; mx.zohomail.com; dkim=pass header.i=apertussolutions.com; spf=pass smtp.mailfrom=dpsmith@xxxxxxxxxxxxxxxxxxxx; dmarc=pass header.from=<dpsmith@xxxxxxxxxxxxxxxxxxxx>
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=zohomail.com; s=zohoarc; t=1745100540; h=Content-Transfer-Encoding:Cc:Cc:Date:Date:From:From:In-Reply-To:MIME-Version:Message-ID:References:Subject:Subject:To:To:Message-Id:Reply-To; bh=2DObGwETdwh7JUH3NaImfvU+NN7DFUDrg2HJnLwWc6A=; b=HAgOY8rb0aTpjt9jpcc8JTsXxQdP/tXtGgTf14h4A35Y1BD6tm0EP1lKs24DCbLIqL332Iazdt7b8V3ylm/MnhPfLg+1QK7kjNspHIISCIIzllaKSIIdY8aPcIxmRrJZc0B1FqBf7DQdyjmrOTmDPDiKWiD5Iby6gtbS0scO/KQ=
  • Arc-seal: i=1; a=rsa-sha256; t=1745100540; cv=none; d=zohomail.com; s=zohoarc; b=bCgIUk8XLrSRb2+emrpZXS/xMRZZzP/JIQEx1NDclO9uqk/xo1tzfOwUe5TWhyTHKmAUnTQxyNgwAnCptS/HRCwT6CVUODWnuJa8pIhv+1dRMx1Kd0N7jYeueGQWPMhOQHX/peSUAytuXsMQnIYK9ksVQxWCrW4G6nfgKuj+gwY=
  • Cc: "Daniel P. Smith" <dpsmith@xxxxxxxxxxxxxxxxxxxx>, jason.andryuk@xxxxxxx, stefano.stabellini@xxxxxxx, agarciav@xxxxxxx, Jan Beulich <jbeulich@xxxxxxxx>, Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, Roger Pau Monné <roger.pau@xxxxxxxxxx>
  • Delivery-date: Sat, 19 Apr 2025 22:21:27 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

Introduce hvm_setup_e820() that will build the e820 memory map for a general
domU. To populate the ACPI entry, ACPI table size helpers are introduced. A
conditional is added to the domain builder to select between calling
hvm_setup_e820() and dom0_pvh_setup_e820() depending on if it is building dom0
or a domU.

Signed-off-by: Daniel P. Smith <dpsmith@xxxxxxxxxxxxxxxxxxxx>
---
 xen/arch/x86/hvm/dom_build.c | 149 ++++++++++++++++++++++++++++++++++-
 1 file changed, 148 insertions(+), 1 deletion(-)

diff --git a/xen/arch/x86/hvm/dom_build.c b/xen/arch/x86/hvm/dom_build.c
index 078e42447b0a..658d3d170e64 100644
--- a/xen/arch/x86/hvm/dom_build.c
+++ b/xen/arch/x86/hvm/dom_build.c
@@ -16,6 +16,7 @@
 
 #include <acpi/actables.h>
 
+#include <public/hvm/e820.h>
 #include <public/hvm/hvm_vcpu.h>
 
 #include <asm/bootinfo.h>
@@ -43,12 +44,158 @@ static void __hwdom_init pvh_setup_mmcfg(struct domain *d)
     }
 }
 
+static unsigned long __init hvm_size_acpi_madt(struct domain *d)
+{
+    unsigned long size = sizeof(struct acpi_table_madt);
+
+    size += sizeof(struct acpi_madt_local_apic) * d->max_vcpus;
+
+    return size;
+}
+
+static unsigned long __init hvm_size_acpi_xsdt(struct domain *d)
+{
+    unsigned long size = sizeof(struct acpi_table_xsdt);
+    /* Only adding the MADT table to the XSDT. */
+    unsigned int num_tables = 1;
+
+    /*
+     * No need to add or subtract anything because struct acpi_table_xsdt
+     * includes one array slot already.
+     */
+    size += num_tables * sizeof(uint64_t);
+
+    return size;
+}
+
+static unsigned long __init hvm_size_acpi_region(struct domain *d)
+{
+    unsigned long size = sizeof(struct acpi_table_rsdp);
+
+    size += hvm_size_acpi_xsdt(d);
+    size += hvm_size_acpi_madt(d);
+
+    return ROUNDUP(size, PAGE_SIZE);
+}
+
+/* From xenguest lib */
+#define END_SPECIAL_REGION   0xff000U
+#define NR_SPECIAL_PAGES     8
+#define START_SPECIAL_REGION (END_SPECIAL_REGION - NR_SPECIAL_PAGES)
+
+#define SPECIALPAGE_PAGING   0
+#define SPECIALPAGE_ACCESS   1
+#define SPECIALPAGE_SHARING  2
+#define SPECIALPAGE_BUFIOREQ 3
+#define SPECIALPAGE_XENSTORE 4
+#define SPECIALPAGE_IOREQ    5
+#define SPECIALPAGE_IDENT_PT 6
+#define SPECIALPAGE_CONSOLE  7
+#define special_pfn(x)       (START_SPECIAL_REGION + (x))
+
+/*
+ * Allocation scheme, derived from xenlight/xenguest:
+ *
+ *                                  |  <4G MMIO Hole  |
+ * [ Low Mem ][ RDM Mem ][ >1M Mem ][ ACPI ][ Special ][ High Mem ]
+ *
+ */
+static void __init hvm_setup_e820(struct domain *d, unsigned long nr_pages)
+{
+    const uint32_t lowmem_reserved_base = 0x9e000;
+    const uint32_t rdm_base = 0xa0000, rdm_size = 0x60;
+    unsigned long low_pages, ext_pages, mmio_pages, acpi_pages, high_pages = 0;
+    unsigned long max_ext_pages = (HVM_BELOW_4G_MMIO_START - MB(1)) >> 
PAGE_SHIFT,
+                  page_count = 0;
+    unsigned nr = 0, e820_entries = 5;
+
+    /* low pages: below 1MB */
+    low_pages = lowmem_reserved_base >> PAGE_SHIFT;
+    if ( low_pages > nr_pages )
+        panic("Insufficient memory for HVM/PVH domain (%pd)\n", d);
+
+    acpi_pages = hvm_size_acpi_region(d) >> PAGE_SHIFT;
+    mmio_pages = acpi_pages + NR_SPECIAL_PAGES;
+
+    /* ext pages: from 1MB to mmio hole */
+    ext_pages = nr_pages - (low_pages + mmio_pages);
+    if ( ext_pages > max_ext_pages )
+        ext_pages = max_ext_pages;
+
+    /* high pages: above 4GB */
+    if ( nr_pages > (low_pages + mmio_pages + ext_pages) )
+        high_pages = nr_pages - (low_pages + mmio_pages + ext_pages);
+
+    /* If we should have a highmem range, add one more e820 entry */
+    if ( high_pages )
+        e820_entries++;
+
+    ASSERT(e820_entries < E820MAX);
+
+    d->arch.e820 = xzalloc_array(struct e820entry, e820_entries);
+    if ( !d->arch.e820 )
+        panic("Unable to allocate memory for boot domain e820 map\n");
+
+    /* usable: Low memory */
+    d->arch.e820[nr].addr = 0x000000;
+    d->arch.e820[nr].size = low_pages << PAGE_SHIFT;
+    d->arch.e820[nr].type = E820_RAM;
+    page_count += d->arch.e820[nr].size >> PAGE_SHIFT;
+    nr++;
+
+    /* reserved: lowmem reserved device memory */
+    d->arch.e820[nr].addr = rdm_base;
+    d->arch.e820[nr].size = rdm_size;
+    d->arch.e820[nr].type = E820_RESERVED;
+    nr++;
+
+    /* usable: extended memory from 1MB */
+    d->arch.e820[nr].addr = 0x100000;
+    d->arch.e820[nr].size = ext_pages << PAGE_SHIFT;
+    d->arch.e820[nr].type = E820_RAM;
+    page_count += d->arch.e820[nr].size >> PAGE_SHIFT;
+    nr++;
+
+    /* reserved: ACPI entry, ACPI_INFO_PHYSICAL_ADDRESS */
+    d->arch.e820[nr].addr = 0xFC000000;
+    d->arch.e820[nr].size = acpi_pages << PAGE_SHIFT;
+    d->arch.e820[nr].type = E820_ACPI;
+    page_count += d->arch.e820[nr].size >> PAGE_SHIFT;
+    nr++;
+
+    /* reserved: HVM special pages, X86_HVM_END_SPECIAL_REGION */
+    d->arch.e820[nr].addr = START_SPECIAL_REGION << PAGE_SHIFT;
+    d->arch.e820[nr].size = NR_SPECIAL_PAGES << PAGE_SHIFT;
+    d->arch.e820[nr].type = E820_RESERVED;
+    page_count += d->arch.e820[nr].size >> PAGE_SHIFT;
+    nr++;
+
+    /* usable: highmem */
+    if ( high_pages )
+    {
+        d->arch.e820[nr].addr = 0x100000000;
+        d->arch.e820[nr].size = high_pages << PAGE_SHIFT;
+        d->arch.e820[nr].type = E820_RAM;
+        page_count += d->arch.e820[nr].size >> PAGE_SHIFT;
+        nr++;
+    }
+
+    d->arch.nr_e820 = nr;
+
+    ASSERT(nr == e820_entries);
+    ASSERT(nr_pages == page_count);
+}
+
 static void __init pvh_init_p2m(struct boot_domain *bd)
 {
     unsigned long nr_pages = dom_compute_nr_pages(bd, NULL);
     bool preempted;
 
-    dom0_pvh_setup_e820(bd->d, nr_pages);
+    if ( bd->capabilities & (BUILD_CAPS_CONTROL | BUILD_CAPS_HARDWARE) )
+        dom0_pvh_setup_e820(bd->d, nr_pages);
+    else
+        hvm_setup_e820(bd->d, nr_pages);
+
     do {
         preempted = false;
         paging_set_allocation(bd->d, dom_paging_pages(bd, nr_pages),
-- 
2.30.2




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.