[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [v5][PATCH 03/10] xen:x86: define a new hypercall to get RMRR mappings



On 2014/8/26 20:02, Andrew Cooper wrote:
On 26/08/14 12:02, Tiejun Chen wrote:
We need this new hypercall to get RMRR mapping for VM.

Signed-off-by: Tiejun Chen <tiejun.chen@xxxxxxxxx>
---
  xen/arch/x86/mm.c           | 49 +++++++++++++++++++++++++++++++++++++++++++++
  xen/include/public/memory.h | 39 +++++++++++++++++++++++++++++++++++-
  2 files changed, 87 insertions(+), 1 deletion(-)

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index d23cb3f..f54294c 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -123,6 +123,7 @@
  #include <asm/setup.h>
  #include <asm/fixmap.h>
  #include <asm/pci.h>
+#include <asm/acpi.h>

  /* Mapping of the fixmap space needed early. */
  l1_pgentry_t __attribute__ ((__section__ (".bss.page_aligned")))
@@ -4842,6 +4843,54 @@ long arch_memory_op(unsigned long cmd, 
XEN_GUEST_HANDLE_PARAM(void) arg)
          return rc;
      }

+    case XENMEM_reserved_device_memory_map:
+    {
+        struct xen_mem_reserved_device_memory_map map;
+        XEN_GUEST_HANDLE(xen_mem_reserved_device_memory_t) buffer;
+        XEN_GUEST_HANDLE_PARAM(xen_mem_reserved_device_memory_t) buffer_param;
+        unsigned int i = 0;
+        static struct xen_mem_reserved_device_memory rmrr_map;

This absolutely must not be static, as I have indicated twice before.

Sorry this is a typo. Looks I really forget to remove 'static' here, just remember to work without ant memory allocation.

Concurrent hypercalls will corrupt it.  Furthermore, its scope can be
reduced to inside the list_for_each() loop

+        struct acpi_rmrr_unit *rmrr;
+
+        if ( copy_from_guest(&map, arg, 1) )
+            return -EFAULT;
+
+        if ( !acpi_rmrr_unit_entries )
+                return -ENOENT;

The indentation here is wrong.  It would make sense to move this above
the copy_from_guest(), to avoid the copy in the case that there are no
rmrr ranges.


Okay.

+
+        if ( map.nr_entries < acpi_rmrr_unit_entries )
+        {
+            map.nr_entries = acpi_rmrr_unit_entries;
+            if ( copy_to_guest(arg, &map, 1) )
+                return -EFAULT;
+            return -ENOBUFS;
+        }
+
+        map.nr_entries = acpi_rmrr_unit_entries;

Move this assignment below, and use i which is the actual number of
entries written.

Okay, so what about this?

@@ -4842,6 +4843,55 @@ long arch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
         return rc;
     }

+    case XENMEM_reserved_device_memory_map:
+    {
+        struct xen_mem_reserved_device_memory_map map;
+        XEN_GUEST_HANDLE(xen_mem_reserved_device_memory_t) buffer;
+ XEN_GUEST_HANDLE_PARAM(xen_mem_reserved_device_memory_t) buffer_param;
+        unsigned int i = 0;
+        struct xen_mem_reserved_device_memory rmrr_map;
+        struct acpi_rmrr_unit *rmrr;
+
+        if ( !acpi_rmrr_unit_entries )
+                return -ENOENT;
+
+        if ( copy_from_guest(&map, arg, 1) )
+            return -EFAULT;
+
+        if ( map.nr_entries < acpi_rmrr_unit_entries )
+        {
+            map.nr_entries = acpi_rmrr_unit_entries;
+            if ( copy_to_guest(arg, &map, 1) )
+                return -EFAULT;
+            return -ENOBUFS;
+        }
+
+        buffer_param = guest_handle_cast(map.buffer,
+                                         xen_mem_reserved_device_memory_t);
+        buffer = guest_handle_from_param(buffer_param,
+                                         xen_mem_reserved_device_memory_t);
+        if ( !guest_handle_okay(buffer, map.nr_entries) )
+            return -EFAULT;
+
+        list_for_each_entry( rmrr, &acpi_rmrr_units, list )
+        {
+            rmrr_map.start_pfn = rmrr->base_address >> PAGE_SHIFT;
+            rmrr_map.nr_pages = PAGE_ALIGN(rmrr->end_address -
+                                           rmrr->base_address) /
+                                           PAGE_SIZE;
+            if ( copy_to_guest_offset(buffer, i, &rmrr_map, 1) )
+                return -EFAULT;
+            i++;
+        }
+
+        map.nr_entries = i;
+
+        if ( copy_to_guest(arg, &map, 1) )
+                return -EFAULT;
+
+        return 0;
+    }
+
     default:
         return subarch_memory_op(cmd, arg);
     }

Thanks
Tiejun


~Andrew

+        buffer_param = guest_handle_cast(map.buffer,
+                                         xen_mem_reserved_device_memory_t);
+        buffer = guest_handle_from_param(buffer_param,
+                                         xen_mem_reserved_device_memory_t);
+        if ( !guest_handle_okay(buffer, map.nr_entries) )
+            return -EFAULT;
+
+        list_for_each_entry( rmrr, &acpi_rmrr_units, list )
+        {
+            rmrr_map.start_pfn = rmrr->base_address >> PAGE_SHIFT;
+            rmrr_map.nr_pages = PAGE_ALIGN(rmrr->end_address -
+                                           rmrr->base_address) /
+                                           PAGE_SIZE;
+            if ( copy_to_guest_offset(buffer, i, &rmrr_map, 1) )
+                return -EFAULT;
+            i++;
+        }
+
+        if ( copy_to_guest(arg, &map, 1) )
+                return -EFAULT;
+
+        return 0;
+    }
+
      default:
          return subarch_memory_op(cmd, arg);
      }
diff --git a/xen/include/public/memory.h b/xen/include/public/memory.h
index 2c57aa0..47ce8b2 100644
--- a/xen/include/public/memory.h
+++ b/xen/include/public/memory.h
@@ -523,7 +523,44 @@ DEFINE_XEN_GUEST_HANDLE(xen_mem_sharing_op_t);

  #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */

-/* Next available subop number is 26 */
+/*
+ * For legacy reasons, some devices must be configured with special memory
+ * regions to function correctly.  The guest must avoid using any of these
+ * regions.
+ *
+ * Currently we just have RMRR
+ * - Reserved memory Region Reporting Structure,
+ * So returns the RMRR memory map as it was when the domain
+ * was started.
+ */
+#define XENMEM_reserved_device_memory_map   26
+struct xen_mem_reserved_device_memory {
+    /* Start PFN of the current mapping of the page. */
+    xen_pfn_t start_pfn;
+    /* Number of the current mapping pages. */
+    xen_ulong_t nr_pages;
+};
+typedef struct xen_mem_reserved_device_memory xen_mem_reserved_device_memory_t;
+DEFINE_XEN_GUEST_HANDLE(xen_mem_reserved_device_memory_t);
+
+struct xen_mem_reserved_device_memory_map {
+    /*
+     * On call the number of entries which can be stored in buffer. On
+     * return the number of entries which have been stored in
+     * buffer.
+     */
+    unsigned int nr_entries;
+
+    /*
+     * Entries in the buffer are in the same format as
+     * xen_mem_reserved_device_memory.
+     */
+    XEN_GUEST_HANDLE(void) buffer;
+};
+typedef struct xen_mem_reserved_device_memory_map 
xen_mem_reserved_device_memory_map_t;
+DEFINE_XEN_GUEST_HANDLE(xen_mem_reserved_device_memory_map_t);
+
+/* Next available subop number is 27 */

  #endif /* __XEN_PUBLIC_MEMORY_H__ */




_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.