[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [RFC PATCH v3 03/22] Reserve live update memory regions



From: David Woodhouse <dwmw@xxxxxxxxxxxx>

The live update handover requires that a region of memory be reserved
for the new Xen to use in its boot allocator. The original Xen may use
that memory but not for any pages which are mapped to domains, or which
would need to be preserved across the live update for any other reason.

The same constraints apply to initmem pages freed from the Xen image,
since the new Xen will be loaded into the same physical location as the
previous Xen.

There is separate work ongoing which will make the xenheap meet this
requirement by eliminating share_xen_page_with_guest(). For the meantime,
just don't add those pages to the heap at all in the live update case.

Signed-off-by: David Woodhouse <dwmw@xxxxxxxxxxxx>
---
 xen/arch/x86/setup.c    | 12 ++++++++++-
 xen/common/page_alloc.c | 45 +++++++++++++++++++++++++++++++++++++++++
 xen/include/xen/mm.h    |  2 ++
 3 files changed, 58 insertions(+), 1 deletion(-)

diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
index 63f06d4856..dba8c3f0a1 100644
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -611,7 +611,7 @@ static void noinline init_done(void)
     }
 
     destroy_xen_mappings(start, end);
-    init_xenheap_pages(__pa(start), __pa(end));
+    init_lu_reserved_pages(__pa(start), __pa(end));
     printk("Freed %lukB init memory\n", (end - start) >> 10);
 
     startup_cpu_idle_loop();
@@ -1577,6 +1577,16 @@ void __init noreturn __start_xen(unsigned long mbi_p)
         unsigned long limit = virt_to_mfn(HYPERVISOR_VIRT_END - 1);
         uint64_t mask = PAGE_SIZE - 1;
 
+        /*
+         * Pages in the reserved LU region must not be used for anything which
+         * will need to persist across a live update. There is ongoing work to
+         * eliminate or limit the use of share_xen_page_with_guest() and get
+         * to a point where we can actually honour that promise, but for now
+         * just *don't* add those pages to the heap. Clear the boot allocator
+         * out completely, before adding the non-reserved ranges.
+         */
+        clear_boot_allocator();
+
         for ( i = 0; i < boot_e820.nr_map; i++ )
         {
             uint64_t s, e;
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 919a270587..a74bf02559 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -1879,6 +1879,51 @@ void __init end_boot_allocator(void)
     printk("\n");
 }
 
+/*
+ * Called when live update is supported. The memory ranges currently
+ * still free in the boot allocator must be added to the reserved
+ * heap, distinct from the xenheap in that pages from it MUST NOT be
+ * used for anything which will be mapped to a domain or otherwise
+ * need to survive a live update.
+ */
+void __init clear_boot_allocator(void)
+{
+    unsigned int i;
+
+    /* Add at least one range on node zero first, if we can. */
+    for ( i = 0; i < nr_bootmem_regions; i++ )
+    {
+        struct bootmem_region *r = &bootmem_region_list[i];
+        if ( (r->s < r->e) &&
+             (phys_to_nid(pfn_to_paddr(r->s)) == cpu_to_node(0)) )
+        {
+            init_lu_reserved_pages(r->s << PAGE_SHIFT, r->e << PAGE_SHIFT);
+            r->e = r->s;
+            break;
+        }
+    }
+    for ( i = nr_bootmem_regions; i-- > 0; )
+    {
+        struct bootmem_region *r = &bootmem_region_list[i];
+        if ( r->s < r->e )
+            init_lu_reserved_pages(r->s << PAGE_SHIFT, r->e << PAGE_SHIFT);
+    }
+    nr_bootmem_regions = 0;
+}
+
+void init_lu_reserved_pages(paddr_t ps, paddr_t pe)
+{
+    if (!lu_bootmem_start)
+        init_xenheap_pages(ps, pe);
+
+    /* There is ongoing work for other reasons to eliminate the use of
+     * share_xen_page_with_guest() and get to a point where the normal
+     * xenheap actually meets the requirement we need for live update
+     * reserved memory, that nothing allocated from it will be mapped
+     * to a guest and/or need to be preserved over a live update.
+     * Until then, we simply don't use these pages after boot. */
+}
+
 static void __init smp_scrub_heap_pages(void *data)
 {
     unsigned long mfn, start, end;
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index d0d095d9c7..d120d84d23 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -158,8 +158,10 @@ struct domain *__must_check 
page_get_owner_and_reference(struct page_info *);
 void init_boot_pages(paddr_t ps, paddr_t pe);
 mfn_t alloc_boot_pages(unsigned long nr_pfns, unsigned long pfn_align);
 void end_boot_allocator(void);
+void clear_boot_allocator(void);
 
 /* Xen suballocator. These functions are interrupt-safe. */
+void init_lu_reserved_pages(paddr_t ps, paddr_t pe);
 void init_xenheap_pages(paddr_t ps, paddr_t pe);
 void xenheap_max_mfn(unsigned long mfn);
 void *alloc_xenheap_pages(unsigned int order, unsigned int memflags);
-- 
2.21.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.