[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v4 4/5] xen/memory, tools: Avoid hardcoding GUEST_MAGIC_BASE in init-dom0less


  • To: <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Henry Wang <xin.wang2@xxxxxxx>
  • Date: Tue, 9 Apr 2024 12:53:56 +0800
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass (sender ip is 165.204.84.17) smtp.rcpttodomain=lists.xenproject.org smtp.mailfrom=amd.com; dmarc=pass (p=quarantine sp=quarantine pct=100) action=none header.from=amd.com; dkim=none (message not signed); arc=none (0)
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=AqNwO/2J9JqnXyE4dOIjQwE9fUs3oiN1zYGcYi0aIXk=; b=IdrYGP8E3+vvXIsofL1ebmz0J3Vz+lo0U4JIq0ZxBjqUJnBaWaELqLH8g87nwmq2xAa+U5OPs5kjuFyo0SMqY55dAUxmYz94zx/TLbAXi2F48cCFBX9SBlvQXOLdeJ9KcHIK3CJdbR/tH/TlgHIUZ6c+T4qFXJ1GXgjedS/mGCz3NGppHsVY2B97amD411UhEuWD54UW7ALL+v0IXtiq+fNUIUV13YMUtGe6AishONKOYCprG20tGbkcyR+Ugem0yKKwDKVngIOO7zGgQXkBoixh+za0PifwTv9jHsBMtGvpvCx7fp2Td9A9MGx67yA7mkA93+kKsEQrrTj3GuONoA==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=QAH/BlyQwyb87yrM1+QxR6jc1DNMYeHDgBiQTU96fLRRkfwksdJHORgw47ca/ILch/356WcFcF6bEVd2lrSzbue9sLujelXK5qIioyAA0I2qGaW/EkzAyVISsWoqqs4DCUMukA6z5qM3xbuHLeRzaF8dpmyms4bv46M38IlVuOSTb+hvKd26bXIyevIw3QombKUvJH5FkfibMIlKM/d2Z7vlt5jNpWGJI6/abf2EgzrVVx4Fz2MuNznKt29YyR4DT8+IZ6X4HPgZjkbsY+WKirFGqi0dy2xXZUS4ksWyL5fnC+M/+Xa9LO0pcER8PFS6G0bfnSaK560yeS48lkhzSg==
  • Cc: Henry Wang <xin.wang2@xxxxxxx>, Anthony PERARD <anthony.perard@xxxxxxxxxx>, Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, George Dunlap <george.dunlap@xxxxxxxxxx>, Jan Beulich <jbeulich@xxxxxxxx>, Julien Grall <julien@xxxxxxx>, Stefano Stabellini <sstabellini@xxxxxxxxxx>, Juergen Gross <jgross@xxxxxxxx>, Alec Kwapis <alec.kwapis@xxxxxxxxxxxxx>
  • Delivery-date: Tue, 09 Apr 2024 04:54:24 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

Currently the GUEST_MAGIC_BASE in the init-dom0less application is
hardcoded, which will lead to failures for 1:1 direct-mapped Dom0less
DomUs.

Instead of hardcoding the guest magic pages region, use the
XEN_DOMCTL_get_mem_map domctl to get the start address of the guest
magic pages region. Add a new sub-op XENMEM_populate_physmap_heap_alloc
and the MEMF_force_heap_alloc flag to force populate_physmap() to
allocate page from domheap instead of using 1:1 or static allocated
pages to map the magic pages.

Reported-by: Alec Kwapis <alec.kwapis@xxxxxxxxxxxxx>
Signed-off-by: Henry Wang <xin.wang2@xxxxxxx>
---
v4:
- Use the new subop.
- Add assert to check the flag isn't already set when coming back from
  construct_memop_from_reservation().
- Use &= to clear the flag instead of clear_bit().
- Move the alias to xen/common/memory.c
v3:
- Don't ignore the error from xc_get_domain_mem_map().
- Re-purposing the _MEMF_no_refcount as _MEMF_force_heap_alloc to
  avoid introduction of a new, single-use flag.
- Reject other reservation sub-ops to use the newly added flag.
- Replace all the GUEST_MAGIC_BASE usages.
v2:
- New patch
---
 tools/helpers/init-dom0less.c | 35 +++++++++++-----
 tools/include/xenctrl.h       |  7 ++++
 tools/libs/ctrl/xc_domain.c   | 79 ++++++++++++++++++++++++++---------
 xen/common/memory.c           | 30 +++++++++++--
 xen/include/public/memory.h   |  3 +-
 5 files changed, 120 insertions(+), 34 deletions(-)

diff --git a/tools/helpers/init-dom0less.c b/tools/helpers/init-dom0less.c
index fee93459c4..dccab7b29b 100644
--- a/tools/helpers/init-dom0less.c
+++ b/tools/helpers/init-dom0less.c
@@ -19,24 +19,42 @@
 #define XENSTORE_PFN_OFFSET 1
 #define STR_MAX_LENGTH 128
 
+static xen_pfn_t xs_page_base;
+static xen_pfn_t xs_page_p2m;
+
 static int alloc_xs_page(struct xc_interface_core *xch,
                          libxl_dominfo *info,
                          uint64_t *xenstore_pfn)
 {
-    int rc;
-    const xen_pfn_t base = GUEST_MAGIC_BASE >> XC_PAGE_SHIFT;
-    xen_pfn_t p2m = (GUEST_MAGIC_BASE >> XC_PAGE_SHIFT) + XENSTORE_PFN_OFFSET;
+    int rc, i;
+    uint32_t nr_regions = XEN_MAX_MEM_REGIONS;
+    struct xen_mem_region mem_regions[XEN_MAX_MEM_REGIONS] = {0};
+
+    rc = xc_get_domain_mem_map(xch, info->domid, mem_regions, &nr_regions);
+    if (rc < 0)
+        return rc;
+
+    for ( i = 0; i < nr_regions; i++ )
+    {
+        if ( mem_regions[i].type == XEN_MEM_REGION_MAGIC )
+        {
+            xs_page_base = mem_regions[i].start >> XC_PAGE_SHIFT;
+            xs_page_p2m = (mem_regions[i].start >> XC_PAGE_SHIFT) +
+                          XENSTORE_PFN_OFFSET;
+        }
+    }
 
     rc = xc_domain_setmaxmem(xch, info->domid,
                              info->max_memkb + (XC_PAGE_SIZE/1024));
     if (rc < 0)
         return rc;
 
-    rc = xc_domain_populate_physmap_exact(xch, info->domid, 1, 0, 0, &p2m);
+    rc = xc_domain_populate_physmap_heap_exact(xch, info->domid, 1, 0, 0,
+                                               &xs_page_p2m);
     if (rc < 0)
         return rc;
 
-    *xenstore_pfn = base + XENSTORE_PFN_OFFSET;
+    *xenstore_pfn = xs_page_base + XENSTORE_PFN_OFFSET;
     rc = xc_clear_domain_page(xch, info->domid, *xenstore_pfn);
     if (rc < 0)
         return rc;
@@ -145,8 +163,7 @@ static int create_xenstore(struct xs_handle *xsh,
     rc = snprintf(target_memkb_str, STR_MAX_LENGTH, "%"PRIu64, 
info->current_memkb);
     if (rc < 0 || rc >= STR_MAX_LENGTH)
         return rc;
-    rc = snprintf(ring_ref_str, STR_MAX_LENGTH, "%lld",
-                  (GUEST_MAGIC_BASE >> XC_PAGE_SHIFT) + XENSTORE_PFN_OFFSET);
+    rc = snprintf(ring_ref_str, STR_MAX_LENGTH, "%"PRIu_xen_pfn, xs_page_p2m);
     if (rc < 0 || rc >= STR_MAX_LENGTH)
         return rc;
     rc = snprintf(xenstore_port_str, STR_MAX_LENGTH, "%u", xenstore_port);
@@ -282,9 +299,7 @@ static int init_domain(struct xs_handle *xsh,
     if (rc)
         err(1, "writing to xenstore");
 
-    rc = xs_introduce_domain(xsh, info->domid,
-            (GUEST_MAGIC_BASE >> XC_PAGE_SHIFT) + XENSTORE_PFN_OFFSET,
-            xenstore_evtchn);
+    rc = xs_introduce_domain(xsh, info->domid, xs_page_p2m, xenstore_evtchn);
     if (!rc)
         err(1, "xs_introduce_domain");
     return 0;
diff --git a/tools/include/xenctrl.h b/tools/include/xenctrl.h
index b25e9772a2..c1a601813a 100644
--- a/tools/include/xenctrl.h
+++ b/tools/include/xenctrl.h
@@ -1342,6 +1342,13 @@ int xc_domain_populate_physmap(xc_interface *xch,
                                unsigned int mem_flags,
                                xen_pfn_t *extent_start);
 
+int xc_domain_populate_physmap_heap_exact(xc_interface *xch,
+                                          uint32_t domid,
+                                          unsigned long nr_extents,
+                                          unsigned int extent_order,
+                                          unsigned int mem_flags,
+                                          xen_pfn_t *extent_start);
+
 int xc_domain_populate_physmap_exact(xc_interface *xch,
                                      uint32_t domid,
                                      unsigned long nr_extents,
diff --git a/tools/libs/ctrl/xc_domain.c b/tools/libs/ctrl/xc_domain.c
index 4dba55d01d..82c1554613 100644
--- a/tools/libs/ctrl/xc_domain.c
+++ b/tools/libs/ctrl/xc_domain.c
@@ -916,6 +916,36 @@ int xc_domain_nr_gpfns(xc_interface *xch, uint32_t domid, 
xen_pfn_t *gpfns)
     return rc;
 }
 
+static int xc_populate_physmap_cmd(xc_interface *xch,
+                                   unsigned int cmd,
+                                   uint32_t domid,
+                                   unsigned long nr_extents,
+                                   unsigned int extent_order,
+                                   unsigned int mem_flags,
+                                   xen_pfn_t *extent_start)
+{
+    int err;
+    DECLARE_HYPERCALL_BOUNCE(extent_start, nr_extents * sizeof(*extent_start), 
XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+    struct xen_memory_reservation reservation = {
+        .nr_extents   = nr_extents,
+        .extent_order = extent_order,
+        .mem_flags    = mem_flags,
+        .domid        = domid
+    };
+
+    if ( xc_hypercall_bounce_pre(xch, extent_start) )
+    {
+        PERROR("Could not bounce memory for XENMEM_populate_physmap 
hypercall");
+        return -1;
+    }
+    set_xen_guest_handle(reservation.extent_start, extent_start);
+
+    err = xc_memory_op(xch, cmd, &reservation, sizeof(reservation));
+
+    xc_hypercall_bounce_post(xch, extent_start);
+    return err;
+}
+
 int xc_domain_increase_reservation(xc_interface *xch,
                                    uint32_t domid,
                                    unsigned long nr_extents,
@@ -1135,26 +1165,9 @@ int xc_domain_populate_physmap(xc_interface *xch,
                                unsigned int mem_flags,
                                xen_pfn_t *extent_start)
 {
-    int err;
-    DECLARE_HYPERCALL_BOUNCE(extent_start, nr_extents * sizeof(*extent_start), 
XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
-    struct xen_memory_reservation reservation = {
-        .nr_extents   = nr_extents,
-        .extent_order = extent_order,
-        .mem_flags    = mem_flags,
-        .domid        = domid
-    };
-
-    if ( xc_hypercall_bounce_pre(xch, extent_start) )
-    {
-        PERROR("Could not bounce memory for XENMEM_populate_physmap 
hypercall");
-        return -1;
-    }
-    set_xen_guest_handle(reservation.extent_start, extent_start);
-
-    err = xc_memory_op(xch, XENMEM_populate_physmap, &reservation, 
sizeof(reservation));
-
-    xc_hypercall_bounce_post(xch, extent_start);
-    return err;
+    return xc_populate_physmap_cmd(xch, XENMEM_populate_physmap, domid,
+                                   nr_extents, extent_order, mem_flags,
+                                   extent_start);
 }
 
 int xc_domain_populate_physmap_exact(xc_interface *xch,
@@ -1182,6 +1195,32 @@ int xc_domain_populate_physmap_exact(xc_interface *xch,
     return err;
 }
 
+int xc_domain_populate_physmap_heap_exact(xc_interface *xch,
+                                          uint32_t domid,
+                                          unsigned long nr_extents,
+                                          unsigned int extent_order,
+                                          unsigned int mem_flags,
+                                          xen_pfn_t *extent_start)
+{
+    int err;
+
+    err = xc_populate_physmap_cmd(xch, XENMEM_populate_physmap_heap_alloc,
+                                  domid, nr_extents, extent_order, mem_flags,
+                                  extent_start);
+    if ( err == nr_extents )
+        return 0;
+
+    if ( err >= 0 )
+    {
+        DPRINTF("Failed allocation for dom %d: %ld extents of order %d\n",
+                domid, nr_extents, extent_order);
+        errno = EBUSY;
+        err = -1;
+    }
+
+    return err;
+}
+
 int xc_domain_memory_exchange_pages(xc_interface *xch,
                                     uint32_t domid,
                                     unsigned long nr_in_extents,
diff --git a/xen/common/memory.c b/xen/common/memory.c
index b4593f5f45..a4733869c2 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -155,6 +155,14 @@ static void increase_reservation(struct memop_args *a)
     a->nr_done = i;
 }
 
+/*
+ * Alias of _MEMF_no_refcount to avoid introduction of a new, single-use flag.
+ * This flag should be used for populate_physmap() only as a re-purposing of
+ * _MEMF_no_refcount to force a non-1:1 allocation from domheap.
+ */
+#define _MEMF_force_heap_alloc _MEMF_no_refcount
+#define  MEMF_force_heap_alloc (1U<<_MEMF_force_heap_alloc)
+
 static void populate_physmap(struct memop_args *a)
 {
     struct page_info *page;
@@ -219,7 +227,8 @@ static void populate_physmap(struct memop_args *a)
         }
         else
         {
-            if ( is_domain_direct_mapped(d) )
+            if ( is_domain_direct_mapped(d) &&
+                 !(a->memflags & MEMF_force_heap_alloc) )
             {
                 mfn = _mfn(gpfn);
 
@@ -246,7 +255,8 @@ static void populate_physmap(struct memop_args *a)
 
                 mfn = _mfn(gpfn);
             }
-            else if ( is_domain_using_staticmem(d) )
+            else if ( is_domain_using_staticmem(d) &&
+                      !(a->memflags & MEMF_force_heap_alloc) )
             {
                 /*
                  * No easy way to guarantee the retrieved pages are contiguous,
@@ -271,6 +281,14 @@ static void populate_physmap(struct memop_args *a)
             }
             else
             {
+                /*
+                 * Avoid passing MEMF_force_heap_alloc down to
+                 * alloc_domheap_pages() where the meaning would be the
+                 * original MEMF_no_refcount.
+                 */
+                if ( unlikely(a->memflags & MEMF_force_heap_alloc) )
+                    a->memflags &= ~MEMF_force_heap_alloc;
+
                 page = alloc_domheap_pages(d, a->extent_order, a->memflags);
 
                 if ( unlikely(!page) )
@@ -1404,6 +1422,7 @@ long do_memory_op(unsigned long cmd, 
XEN_GUEST_HANDLE_PARAM(void) arg)
     {
     case XENMEM_increase_reservation:
     case XENMEM_decrease_reservation:
+    case XENMEM_populate_physmap_heap_alloc:
     case XENMEM_populate_physmap:
         if ( copy_from_guest(&reservation, arg, 1) )
             return start_extent;
@@ -1433,6 +1452,11 @@ long do_memory_op(unsigned long cmd, 
XEN_GUEST_HANDLE_PARAM(void) arg)
              && (reservation.mem_flags & XENMEMF_populate_on_demand) )
             args.memflags |= MEMF_populate_on_demand;
 
+        /* Assert flag is not set from construct_memop_from_reservation(). */
+        ASSERT(!(args.memflags & MEMF_force_heap_alloc));
+        if ( op == XENMEM_populate_physmap_heap_alloc )
+            args.memflags |= MEMF_force_heap_alloc;
+
         if ( xsm_memory_adjust_reservation(XSM_TARGET, curr_d, d) )
         {
             rcu_unlock_domain(d);
@@ -1453,7 +1477,7 @@ long do_memory_op(unsigned long cmd, 
XEN_GUEST_HANDLE_PARAM(void) arg)
         case XENMEM_decrease_reservation:
             decrease_reservation(&args);
             break;
-        default: /* XENMEM_populate_physmap */
+        default: /* XENMEM_populate_{physmap, physmap_heap_alloc} */
             populate_physmap(&args);
             break;
         }
diff --git a/xen/include/public/memory.h b/xen/include/public/memory.h
index 5e545ae9a4..5e79992671 100644
--- a/xen/include/public/memory.h
+++ b/xen/include/public/memory.h
@@ -21,6 +21,7 @@
 #define XENMEM_increase_reservation 0
 #define XENMEM_decrease_reservation 1
 #define XENMEM_populate_physmap     6
+#define XENMEM_populate_physmap_heap_alloc 29
 
 #if __XEN_INTERFACE_VERSION__ >= 0x00030209
 /*
@@ -731,7 +732,7 @@ struct xen_vnuma_topology_info {
 typedef struct xen_vnuma_topology_info xen_vnuma_topology_info_t;
 DEFINE_XEN_GUEST_HANDLE(xen_vnuma_topology_info_t);
 
-/* Next available subop number is 29 */
+/* Next available subop number is 30 */
 
 #endif /* __XEN_PUBLIC_MEMORY_H__ */
 
-- 
2.34.1




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.