[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v2 5/5] xen/memory, tools: Make init-dom0less consume XEN_DOMCTL_get_mem_map


  • To: <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Henry Wang <xin.wang2@xxxxxxx>
  • Date: Fri, 8 Mar 2024 09:54:35 +0800
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass (sender ip is 165.204.84.17) smtp.rcpttodomain=lists.xenproject.org smtp.mailfrom=amd.com; dmarc=pass (p=quarantine sp=quarantine pct=100) action=none header.from=amd.com; dkim=none (message not signed); arc=none (0)
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=dchuisJ+SMUCW3UvJNt1sybC0OWRLBHo54+nPU/5Sjc=; b=S/9pQ+orprZ9xcyVM2Iz5Y+Kb5qUmgzaYikQHIAV+dh9dV3RJI3Ox0ibtTC2CbfZlX6hnlpTla0E1BpIPcOeLGMGbhZEyv4mo5QMUTrHwsvZ46X3UA9f3K+Cia0AcQn5ItnhMLSWshxlplmcQ57O2rDr5dLMr3itguoFe4P8KhwDBXq7vOGHFdn63jzVtjuO6+WhneFKOcd0d1xXnKSJ/BATdCuTNKZbwXV8qyo96bvGv31UKGEkgJn85mmKP0Nl8jdc1R7sGYfFuBV/2J/wNBO2MBH/HMymyL0puu1fWxj+VyFPAeBgMRTbL1/hX6VmM+Bylp1mKmLrfS6VlJpRSQ==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=P1Zb0LmRdLxFMkCQtY9gpGwYCD7CklSEZ/wkEAjq8kUT+rI9oluL0YPOb9Vh8QnMRb+h3+dxQ19txY+azfcR9PpQRopoHZhEZWwvXwiWZcgEvUaQDbaWJuDkqXPT1MasnuPvBC0tNtULBAgQ3G4PBLKmyg7hvWcDSXAwlafKxOm9tH6L0qzK2md/91oac2TlQ3yb/2iy8h7KOo2GrGI5iebTZEas6vQzmpYRfSIc79aVEEHgsT7uUCTqNULEwZh4hoK3hePdvHB2JaZn8M2MXne2T69B+HinDMmClA0NW+V0FwrJh4rER/BkQ3ZRetRU7OMrZgxhxrywmrcN6F+fFg==
  • Cc: Henry Wang <xin.wang2@xxxxxxx>, Wei Liu <wl@xxxxxxx>, Anthony PERARD <anthony.perard@xxxxxxxxxx>, Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, George Dunlap <george.dunlap@xxxxxxxxxx>, Jan Beulich <jbeulich@xxxxxxxx>, Julien Grall <julien@xxxxxxx>, Stefano Stabellini <sstabellini@xxxxxxxxxx>, Alec Kwapis <alec.kwapis@xxxxxxxxxxxxx>
  • Delivery-date: Fri, 08 Mar 2024 01:55:11 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

Previous commits enable the toolstack to get the domain memory map,
therefore instead of hardcoding the guest magic pages region, use
the XEN_DOMCTL_get_mem_map domctl to get the start address of the
guest magic pages region. Add the (XEN)MEMF_force_heap_alloc memory
flags to force populate_physmap() to allocate page from domheap
instead of using 1:1 or static allocated pages to map the magic pages.

Reported-by: Alec Kwapis <alec.kwapis@xxxxxxxxxxxxx>
Signed-off-by: Henry Wang <xin.wang2@xxxxxxx>
---
v2:
- New patch
---
 tools/helpers/init-dom0less.c | 22 ++++++++++++++++++----
 xen/common/memory.c           | 10 ++++++++--
 xen/include/public/memory.h   |  5 +++++
 xen/include/xen/mm.h          |  2 ++
 4 files changed, 33 insertions(+), 6 deletions(-)

diff --git a/tools/helpers/init-dom0less.c b/tools/helpers/init-dom0less.c
index fee93459c4..92c612f6da 100644
--- a/tools/helpers/init-dom0less.c
+++ b/tools/helpers/init-dom0less.c
@@ -23,16 +23,30 @@ static int alloc_xs_page(struct xc_interface_core *xch,
                          libxl_dominfo *info,
                          uint64_t *xenstore_pfn)
 {
-    int rc;
-    const xen_pfn_t base = GUEST_MAGIC_BASE >> XC_PAGE_SHIFT;
-    xen_pfn_t p2m = (GUEST_MAGIC_BASE >> XC_PAGE_SHIFT) + XENSTORE_PFN_OFFSET;
+    int rc, i;
+    xen_pfn_t base = ((xen_pfn_t)-1);
+    xen_pfn_t p2m = ((xen_pfn_t)-1);
+    uint32_t nr_regions = XEN_MAX_MEM_REGIONS;
+    struct xen_mem_region mem_regions[XEN_MAX_MEM_REGIONS] = {0};
+
+    rc = xc_get_domain_mem_map(xch, info->domid, mem_regions, &nr_regions);
+
+    for ( i = 0; i < nr_regions; i++ )
+    {
+        if ( mem_regions[i].type == GUEST_MEM_REGION_MAGIC )
+        {
+            base = mem_regions[i].start >> XC_PAGE_SHIFT;
+            p2m = (mem_regions[i].start >> XC_PAGE_SHIFT) + 
XENSTORE_PFN_OFFSET;
+        }
+    }
 
     rc = xc_domain_setmaxmem(xch, info->domid,
                              info->max_memkb + (XC_PAGE_SIZE/1024));
     if (rc < 0)
         return rc;
 
-    rc = xc_domain_populate_physmap_exact(xch, info->domid, 1, 0, 0, &p2m);
+    rc = xc_domain_populate_physmap_exact(xch, info->domid, 1, 0,
+                                          XENMEMF_force_heap_alloc, &p2m);
     if (rc < 0)
         return rc;
 
diff --git a/xen/common/memory.c b/xen/common/memory.c
index b3b05c2ec0..18b6c16aed 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -219,7 +219,8 @@ static void populate_physmap(struct memop_args *a)
         }
         else
         {
-            if ( is_domain_direct_mapped(d) )
+            if ( is_domain_direct_mapped(d) &&
+                 !(a->memflags & MEMF_force_heap_alloc) )
             {
                 mfn = _mfn(gpfn);
 
@@ -246,7 +247,8 @@ static void populate_physmap(struct memop_args *a)
 
                 mfn = _mfn(gpfn);
             }
-            else if ( is_domain_using_staticmem(d) )
+            else if ( is_domain_using_staticmem(d) &&
+                      !(a->memflags & MEMF_force_heap_alloc) )
             {
                 /*
                  * No easy way to guarantee the retrieved pages are contiguous,
@@ -1433,6 +1435,10 @@ long do_memory_op(unsigned long cmd, 
XEN_GUEST_HANDLE_PARAM(void) arg)
              && (reservation.mem_flags & XENMEMF_populate_on_demand) )
             args.memflags |= MEMF_populate_on_demand;
 
+        if ( op == XENMEM_populate_physmap
+             && (reservation.mem_flags & XENMEMF_force_heap_alloc) )
+            args.memflags |= MEMF_force_heap_alloc;
+
         if ( xsm_memory_adjust_reservation(XSM_TARGET, curr_d, d) )
         {
             rcu_unlock_domain(d);
diff --git a/xen/include/public/memory.h b/xen/include/public/memory.h
index 5e545ae9a4..2a1bfa5bfa 100644
--- a/xen/include/public/memory.h
+++ b/xen/include/public/memory.h
@@ -41,6 +41,11 @@
 #define XENMEMF_exact_node(n) (XENMEMF_node(n) | XENMEMF_exact_node_request)
 /* Flag to indicate the node specified is virtual node */
 #define XENMEMF_vnode  (1<<18)
+/*
+ * Flag to force populate physmap to use pages from domheap instead of 1:1
+ * or static allocation.
+ */
+#define XENMEMF_force_heap_alloc  (1<<19)
 #endif
 
 struct xen_memory_reservation {
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index bb29b352ec..a4554f730d 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -205,6 +205,8 @@ struct npfec {
 #define  MEMF_no_icache_flush (1U<<_MEMF_no_icache_flush)
 #define _MEMF_no_scrub    8
 #define  MEMF_no_scrub    (1U<<_MEMF_no_scrub)
+#define _MEMF_force_heap_alloc 9
+#define  MEMF_force_heap_alloc (1U<<_MEMF_force_heap_alloc)
 #define _MEMF_node        16
 #define  MEMF_node_mask   ((1U << (8 * sizeof(nodeid_t))) - 1)
 #define  MEMF_node(n)     ((((n) + 1) & MEMF_node_mask) << _MEMF_node)
-- 
2.34.1




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.