[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 1/1] xc_domain_restore: Allow QEMU to increase memory



This patch also fix a triple fault when guests running under COLO mode.
(XEN) d0v1 Over-allocation for domain 1: 524545 > 524544
(XEN) memory.c:155:d0v1 Could not allocate order=0 extent: id=1 memflags=0 (181 of 235)
(XEN) d1v1 Triple fault - invoking HVM shutdown action 1

On 04/14/2015 12:09 AM, Don Slutz wrote:
If QEMU has called on xc_domain_setmaxmem to add more memory for
option ROMs, domain restore needs to also increase the memory.

Signed-off-by: Don Slutz <dslutz@xxxxxxxxxxx>
---
To see the hvmloader loader issue:

   xl cre -p e1000x8.xfg
   xl save e1000x8 e1000x8.save
   xl restore e1000x8.save

With e1000x8.xfg:
-------------------------------------------------------------------------------
builder = "hvm"
device_model_args_hvm = [
  "-monitor",
  "pty",
  "-boot",
  "menu=on",
]
device_model_version = "qemu-xen"
disk = [
  "/dev/etherd/e500.1,,xvda",
  "/dev/etherd/e500.2,,xvdb",
  "/dev/etherd/e500.3,,xvde",
  "/local-isos/iso/centos/x86_64/CentOS-6.3-x86_64-bin-DVD1.iso,,hdc,cdrom",
  "/local-isos/iso/centos/x86_64/CentOS-6.3-x86_64-bin-DVD2.iso,,hdd,cdrom",
]
maxmem = "8192"
memory = "8192"
name = "e1000x8"
serial = "pty"
tsc_mode = "native_paravirt"
uuid = "e5000105-3d83-c962-07ae-2bc46c3644a0"
videoram = "16"
vif = [
  "model=e1000,bridge=xenbr0,mac=00:0c:29:86:44:a0",
  "model=e1000,bridge=xenbr0,mac=00:0c:29:86:44:aa",
  "model=e1000,bridge=xenbr0,mac=00:0c:29:86:44:b4",
  "model=e1000,bridge=xenbr0,mac=00:0c:29:86:44:be",
  "model=e1000,bridge=xenbr0,mac=00:0c:29:86:44:c8",
  "model=e1000,bridge=xenbr0,mac=00:0c:29:86:44:d2",
  "model=e1000,bridge=xenbr0,mac=00:0c:29:86:44:dc",
  "model=e1000,bridge=xenbr0,mac=00:0c:29:86:44:e6",
  "model=e1000,bridge=xenbr0,mac=00:0c:29:86:44:f0",
  "model=e1000,bridge=xenbr0,mac=00:0c:29:86:44:fa",
]
viridian = 0
xen_platform_pci = 1
mmio_hole = 2048
vcpus = 1
maxvcpus = 6
on_poweroff = "preserve"
on_reboot = "preserve"
on_watchdog = "preserve"
on_crash = "preserve"
-------------------------------------------------------------------------------


  tools/libxc/xc_domain_restore.c | 75 +++++++++++++++++++++++++++++++++++++++--
  1 file changed, 73 insertions(+), 2 deletions(-)

diff --git a/tools/libxc/xc_domain_restore.c b/tools/libxc/xc_domain_restore.c
index 2ab9f46..28b4fa6 100644
--- a/tools/libxc/xc_domain_restore.c
+++ b/tools/libxc/xc_domain_restore.c
@@ -47,6 +47,13 @@
  #include <xen/hvm/ioreq.h>
  #include <xen/hvm/params.h>

+/* Leave some slack so that hvmloader does not complain about lack of
+ * memory at boot time ("Could not allocate order=0 extent").
+ * Once hvmloader is modified to cope with that situation without
+ * printing warning messages, QEMU_SPARE_PAGES can be removed.
+ */
+#define QEMU_SPARE_PAGES 16
+
  struct restore_ctx {
      unsigned long max_mfn; /* max mfn of the current host machine */
      unsigned long hvirt_start; /* virtual starting address of the hypervisor 
*/
@@ -209,12 +216,44 @@ static int uncanonicalize_pagetable(
          if (!ctx->hvm && ctx->superpages)
              rc = alloc_superpage_mfns(xch, dom, ctx, nr_mfns);
          else
+        {
+            xc_domaininfo_t info;
+            unsigned long free_pages;
+
+            if ((xc_domain_getinfolist(xch, dom, 1, &info) != 1) ||
+                (info.domain != dom)) {
+                ERROR("Failed xc_domain_getinfolist for batch 
(uncanonicalize_pagetable)\n");
+                errno = ENOMEM;
+                return 0;
+            }
+            free_pages = info.max_pages - info.tot_pages;
+            if (free_pages > QEMU_SPARE_PAGES) {
+                free_pages -= QEMU_SPARE_PAGES;
+            } else {
+                free_pages = 0;
+            }
+            if (free_pages < nr_mfns)
+            {
+                DPRINTF("Adjust memory for batch (uncanonicalize_pagetable): 
free_pages=%lu nr_mfns=%d max_pages=%lu tot_pages=%lu max_mfn=%lu\n",
+                        free_pages, nr_mfns, (unsigned long)info.max_pages,
+                        (unsigned long)info.tot_pages, ctx->max_mfn);
+                if (xc_domain_setmaxmem(xch, dom,
+                                        ((info.max_pages + nr_mfns - 
free_pages)
+                                         << (XC_PAGE_SHIFT - 10))) < 0)
+                {
+                    ERROR("Failed xc_domain_setmaxmem for batch 
(uncanonicalize_pagetable)\n");
+                    errno = ENOMEM;
+                    return 0;
+                }
+            }
              rc = xc_domain_populate_physmap_exact(xch, dom, nr_mfns, 0, 0,
                                                    ctx->p2m_batch);
+        }

          if (rc)
          {
-            ERROR("Failed to allocate memory for batch.!\n");
+            ERROR("Failed to allocate memory for batch. rc=%d nr_mfns=%d!\n",
+                  rc, nr_mfns);
              errno = ENOMEM;
              return 0;
          }
@@ -1241,12 +1280,44 @@ static int apply_batch(xc_interface *xch, uint32_t dom, 
struct restore_ctx *ctx,
          if (!ctx->hvm && ctx->superpages)
              rc = alloc_superpage_mfns(xch, dom, ctx, nr_mfns);
          else
+        {
+            xc_domaininfo_t info;
+            unsigned long free_pages;
+
+            if ((xc_domain_getinfolist(xch, dom, 1, &info) != 1) ||
+                (info.domain != dom)) {
+                ERROR("Failed xc_domain_getinfolist for apply_batch\n");
+                errno = ENOMEM;
+                return -1;
+            }
+            free_pages = info.max_pages - info.tot_pages;
+            if (free_pages > QEMU_SPARE_PAGES) {
+                free_pages -= QEMU_SPARE_PAGES;
+            } else {
+                free_pages = 0;
+            }
+            if (free_pages < nr_mfns)
+            {
+                DPRINTF("Adjust memory for apply_batch: free_pages=%lu nr_mfns=%d 
max_pages=%lu tot_pages=%lu max_mfn=%lu\n",
+                        free_pages, nr_mfns, (unsigned long)info.max_pages,
+                        (unsigned long)info.tot_pages, ctx->max_mfn);
+                if (xc_domain_setmaxmem(xch, dom,
+                                        ((info.max_pages + nr_mfns - 
free_pages)
+                                         << (XC_PAGE_SHIFT - 10))) < 0)
+                {
+                    ERROR("Failed xc_domain_setmaxmem for apply_batch\n");
+                    errno = ENOMEM;
+                    return -1;
+                }
+            }
              rc = xc_domain_populate_physmap_exact(xch, dom, nr_mfns, 0, 0,
                                                    ctx->p2m_batch);
+        }

          if (rc)
          {
-            ERROR("Failed to allocate memory for batch.!\n");
+            ERROR("Failed to allocate memory for apply_batch. rc=%d nr_mfns=%d 
max_mfn=%lu!\n",
+                  rc, nr_mfns, ctx->max_mfn);
              errno = ENOMEM;
              return -1;
          }


--
Thanks,
Yang.

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.