[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [v7][RFC][PATCH 05/13] hvmloader/mmio: reconcile guest mmio with reserved device memory



We need to make sure all mmio allocation don't overlap
any rdm, reserved device memory. Here we just skip
all reserved device memory range in mmio space.

Signed-off-by: Tiejun Chen <tiejun.chen@xxxxxxxxx>
---
 tools/firmware/hvmloader/pci.c | 68 +++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 67 insertions(+), 1 deletion(-)

diff --git a/tools/firmware/hvmloader/pci.c b/tools/firmware/hvmloader/pci.c
index 3712988..e26481c 100644
--- a/tools/firmware/hvmloader/pci.c
+++ b/tools/firmware/hvmloader/pci.c
@@ -37,6 +37,44 @@ uint64_t pci_hi_mem_start = 0, pci_hi_mem_end = 0;
 enum virtual_vga virtual_vga = VGA_none;
 unsigned long igd_opregion_pgbase = 0;
 
+unsigned int need_skip_rmrr = 0;
+
+/*
+ * Check whether there exists mmio hole in the specified memory range.
+ * Returns 1 if exists, else returns 0.
+ */
+static int check_mmio_hole_confliction(uint64_t start, uint64_t memsize,
+                           uint64_t mmio_start, uint64_t mmio_size)
+{
+    if ( start + memsize <= mmio_start || start >= mmio_start + mmio_size )
+        return 0;
+    else
+        return 1;
+}
+
+static int check_reserved_device_memory_map(uint64_t mmio_base,
+                                            uint64_t mmio_max)
+{
+    uint32_t i = 0;
+    uint64_t rdm_start, rdm_end;
+    int nr_entries = -1;
+
+    nr_entries = hvm_get_reserved_device_memory_map();
+
+    for ( i = 0; i < nr_entries; i++ )
+    {
+        rdm_start = rdm_map[i].start_pfn << PAGE_SHIFT;
+        rdm_end = rdm_start + (rdm_map[i].nr_pages << PAGE_SHIFT);
+        if ( check_mmio_hole_confliction(rdm_start, (rdm_end - rdm_start),
+                                         mmio_base, mmio_max - mmio_base) )
+        {
+            need_skip_rmrr++;
+        }
+    }
+
+    return nr_entries;
+}
+
 void pci_setup(void)
 {
     uint8_t is_64bar, using_64bar, bar64_relocate = 0;
@@ -58,7 +96,9 @@ void pci_setup(void)
         uint32_t bar_reg;
         uint64_t bar_sz;
     } *bars = (struct bars *)scratch_start;
-    unsigned int i, nr_bars = 0;
+    unsigned int i, j, nr_bars = 0;
+    int nr_entries = 0;
+    uint64_t rdm_start, rdm_end;
 
     const char *s;
     /*
@@ -309,6 +349,14 @@ void pci_setup(void)
     io_resource.base = 0xc000;
     io_resource.max = 0x10000;
 
+    /* Check low mmio range. */
+    nr_entries = check_reserved_device_memory_map(mem_resource.base,
+                                                  mem_resource.max);
+    /* Check high mmio range. */
+    if ( nr_entries > 0 )
+        nr_entries = check_reserved_device_memory_map(high_mem_resource.base,
+                                                      high_mem_resource.max);
+
     /* Assign iomem and ioport resources in descending order of size. */
     for ( i = 0; i < nr_bars; i++ )
     {
@@ -363,11 +411,29 @@ void pci_setup(void)
             bar_data &= ~PCI_BASE_ADDRESS_IO_MASK;
         }
 
+ reallocate_mmio:
         base = (resource->base  + bar_sz - 1) & ~(uint64_t)(bar_sz - 1);
         bar_data |= (uint32_t)base;
         bar_data_upper = (uint32_t)(base >> 32);
         base += bar_sz;
 
+        if ( need_skip_rmrr )
+        {
+            for ( j = 0; j < nr_entries; j++ )
+            {
+                rdm_start = rdm_map[j].start_pfn << PAGE_SHIFT;
+                rdm_end = rdm_start + (rdm_map[j].nr_pages << PAGE_SHIFT);
+                if ( check_mmio_hole_confliction(rdm_start,
+                                                 (rdm_end - rdm_start),
+                                                 base, bar_sz) )
+                {
+                    resource->base = rdm_end;
+                    need_skip_rmrr--;
+                    goto reallocate_mmio;
+                }
+            }
+        }
+
         if ( (base < resource->base) || (base > resource->max) )
         {
             printf("pci dev %02x:%x bar %02x size "PRIllx": no space for "
-- 
1.9.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.