|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH 2/2] xen/x86/pvh: copy ACPI tables to Dom0 instead of mapping
From: Stefano Stabellini <stefano.stabellini@xxxxxxx>
Mapping the ACPI tables to Dom0 PVH 1:1 leads to memory corruptions of
the tables in the guest. Instead, copy the tables to Dom0.
This is a workaround.
Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxx>
---
As mentioned in the cover letter, this is a RFC workaround as I don't
know the cause of the underlying problem. I do know that this patch
solves what would be otherwise a hang at boot when Dom0 PVH attempts to
parse ACPI tables.
---
xen/arch/x86/hvm/dom0_build.c | 107 +++++++++-------------------------
1 file changed, 27 insertions(+), 80 deletions(-)
diff --git a/xen/arch/x86/hvm/dom0_build.c b/xen/arch/x86/hvm/dom0_build.c
index 5fde769863..a6037fc6ed 100644
--- a/xen/arch/x86/hvm/dom0_build.c
+++ b/xen/arch/x86/hvm/dom0_build.c
@@ -73,32 +73,6 @@ static void __init print_order_stats(const struct domain *d)
printk("order %2u allocations: %u\n", i, order_stats[i]);
}
-static int __init modify_identity_mmio(struct domain *d, unsigned long pfn,
- unsigned long nr_pages, const bool map)
-{
- int rc;
-
- for ( ; ; )
- {
- rc = map ? map_mmio_regions(d, _gfn(pfn), nr_pages, _mfn(pfn))
- : unmap_mmio_regions(d, _gfn(pfn), nr_pages, _mfn(pfn));
- if ( rc == 0 )
- break;
- if ( rc < 0 )
- {
- printk(XENLOG_WARNING
- "Failed to identity %smap [%#lx,%#lx) for d%d: %d\n",
- map ? "" : "un", pfn, pfn + nr_pages, d->domain_id, rc);
- break;
- }
- nr_pages -= rc;
- pfn += rc;
- process_pending_softirqs();
- }
-
- return rc;
-}
-
/* Populate a HVM memory range using the biggest possible order. */
static int __init pvh_populate_memory_range(struct domain *d,
unsigned long start,
@@ -967,6 +941,8 @@ static int __init pvh_setup_acpi_xsdt(struct domain *d,
paddr_t madt_addr,
unsigned long size = sizeof(*xsdt);
unsigned int i, j, num_tables = 0;
int rc;
+ struct acpi_table_fadt fadt;
+ unsigned long fadt_addr = 0, dsdt_addr = 0, facs_addr = 0, fadt_size = 0;
struct acpi_table_header header = {
.signature = "XSDT",
.length = sizeof(struct acpi_table_header),
@@ -1013,10 +989,33 @@ static int __init pvh_setup_acpi_xsdt(struct domain *d,
paddr_t madt_addr,
/* Copy the addresses of the rest of the allowed tables. */
for( i = 0, j = 1; i < acpi_gbl_root_table_list.count; i++ )
{
+ void *table;
+
+ pvh_steal_ram(d, tables[i].length, 0, GB(4), addr);
+ table = acpi_os_map_memory(tables[i].address, tables[i].length);
+ hvm_copy_to_guest_phys(*addr, table, tables[i].length, d->vcpu[0]);
+ pvh_add_mem_range(d, *addr, *addr + tables[i].length, E820_ACPI);
+
+ if ( !strncmp(tables[i].signature.ascii, ACPI_SIG_FADT,
ACPI_NAME_SIZE) )
+ {
+ memcpy(&fadt, table, tables[i].length);
+ fadt_addr = *addr;
+ fadt_size = tables[i].length;
+ }
+ else if ( !strncmp(tables[i].signature.ascii, ACPI_SIG_DSDT,
ACPI_NAME_SIZE) )
+ dsdt_addr = *addr;
+ else if ( !strncmp(tables[i].signature.ascii, ACPI_SIG_FACS,
ACPI_NAME_SIZE) )
+ facs_addr = *addr;
+
if ( pvh_acpi_xsdt_table_allowed(tables[i].signature.ascii,
- tables[i].address, tables[i].length) )
- xsdt->table_offset_entry[j++] = tables[i].address;
+ tables[i].address, tables[i].length) )
+ xsdt->table_offset_entry[j++] = *addr;
+
+ acpi_os_unmap_memory(table, tables[i].length);
}
+ fadt.dsdt = dsdt_addr;
+ fadt.facs = facs_addr;
+ hvm_copy_to_guest_phys(fadt_addr, &fadt, fadt_size, d->vcpu[0]);
xsdt->header.revision = 1;
xsdt->header.length = size;
@@ -1055,9 +1054,7 @@ static int __init pvh_setup_acpi_xsdt(struct domain *d,
paddr_t madt_addr,
static int __init pvh_setup_acpi(struct domain *d, paddr_t start_info)
{
- unsigned long pfn, nr_pages;
paddr_t madt_paddr, xsdt_paddr, rsdp_paddr;
- unsigned int i;
int rc;
struct acpi_table_rsdp *native_rsdp, rsdp = {
.signature = ACPI_SIG_RSDP,
@@ -1065,56 +1062,6 @@ static int __init pvh_setup_acpi(struct domain *d,
paddr_t start_info)
.length = sizeof(rsdp),
};
-
- /* Scan top-level tables and add their regions to the guest memory map. */
- for( i = 0; i < acpi_gbl_root_table_list.count; i++ )
- {
- const char *sig = acpi_gbl_root_table_list.tables[i].signature.ascii;
- unsigned long addr = acpi_gbl_root_table_list.tables[i].address;
- unsigned long size = acpi_gbl_root_table_list.tables[i].length;
-
- /*
- * Make sure the original MADT is also mapped, so that Dom0 can
- * properly access the data returned by _MAT methods in case it's
- * re-using MADT memory.
- */
- if ( strncmp(sig, ACPI_SIG_MADT, ACPI_NAME_SIZE)
- ? pvh_acpi_table_allowed(sig, addr, size)
- : !acpi_memory_banned(addr, size) )
- pvh_add_mem_range(d, addr, addr + size, E820_ACPI);
- }
-
- /* Identity map ACPI e820 regions. */
- for ( i = 0; i < d->arch.nr_e820; i++ )
- {
- if ( d->arch.e820[i].type != E820_ACPI &&
- d->arch.e820[i].type != E820_NVS )
- continue;
-
- pfn = PFN_DOWN(d->arch.e820[i].addr);
- nr_pages = PFN_UP((d->arch.e820[i].addr & ~PAGE_MASK) +
- d->arch.e820[i].size);
-
- /* Memory below 1MB has been dealt with by pvh_populate_p2m(). */
- if ( pfn < PFN_DOWN(MB(1)) )
- {
- if ( pfn + nr_pages <= PFN_DOWN(MB(1)) )
- continue;
-
- /* This shouldn't happen, but is easy to deal with. */
- nr_pages -= PFN_DOWN(MB(1)) - pfn;
- pfn = PFN_DOWN(MB(1));
- }
-
- rc = modify_identity_mmio(d, pfn, nr_pages, true);
- if ( rc )
- {
- printk("Failed to map ACPI region [%#lx, %#lx) into Dom0 memory
map\n",
- pfn, pfn + nr_pages);
- return rc;
- }
- }
-
rc = pvh_setup_acpi_madt(d, &madt_paddr);
if ( rc )
return rc;
--
2.25.1
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |