[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [Patch] Xen/migration: Add hvm resume support when migration fail



Xen/migration: Add hvm resume support when migration fail

Currently when hvm guest migrate fail, it cannot be resumed at sender side,
under such case guest would run in neither sender nor target machine.

This patch add hvm guest resume support, draft test shows it re-run fine
at sender machine when hvm guest migrate fail.

Signed-off-by: Liu, Jinsong <jinsong.liu@xxxxxxxxx>

diff -r 0f47a60f9e6d tools/libxc/xc_resume.c
--- a/tools/libxc/xc_resume.c   Thu Jul 26 22:52:10 2012 +0800
+++ b/tools/libxc/xc_resume.c   Sat Jul 28 06:35:47 2012 +0800
@@ -140,87 +140,84 @@
     /*
      * (x86 only) Rewrite store_mfn and console_mfn back to MFN (from PFN).
      */
+    if ( !info.hvm )
+    {
 #if defined(__i386__) || defined(__x86_64__)
-    if ( info.hvm )
-    {
-        ERROR("Cannot resume uncooperative HVM guests");
-        return rc;
-    }
+        dinfo->guest_width = pv_guest_width(xch, domid);
+        if ( dinfo->guest_width != sizeof(long) )
+        {
+            ERROR("Cannot resume uncooperative cross-address-size guests");
+            return rc;
+        }
 
-    dinfo->guest_width = pv_guest_width(xch, domid);
-    if ( dinfo->guest_width != sizeof(long) )
-    {
-        ERROR("Cannot resume uncooperative cross-address-size guests");
-        return rc;
-    }
+        /* Map the shared info frame */
+        shinfo = xc_map_foreign_range(xch, domid, PAGE_SIZE,
+                                      PROT_READ, info.shared_info_frame);
+        if ( shinfo == NULL )
+        {
+            ERROR("Couldn't map shared info");
+            goto out;
+        }
 
-    /* Map the shared info frame */
-    shinfo = xc_map_foreign_range(xch, domid, PAGE_SIZE,
-                                  PROT_READ, info.shared_info_frame);
-    if ( shinfo == NULL )
-    {
-        ERROR("Couldn't map shared info");
-        goto out;
-    }
+        dinfo->p2m_size = shinfo->arch.max_pfn;
 
-    dinfo->p2m_size = shinfo->arch.max_pfn;
+        p2m_frame_list_list =
+            xc_map_foreign_range(xch, domid, PAGE_SIZE, PROT_READ,
+                                 shinfo->arch.pfn_to_mfn_frame_list_list);
+        if ( p2m_frame_list_list == NULL )
+        {
+            ERROR("Couldn't map p2m_frame_list_list");
+            goto out;
+        }
 
-    p2m_frame_list_list =
-        xc_map_foreign_range(xch, domid, PAGE_SIZE, PROT_READ,
-                             shinfo->arch.pfn_to_mfn_frame_list_list);
-    if ( p2m_frame_list_list == NULL )
-    {
-        ERROR("Couldn't map p2m_frame_list_list");
-        goto out;
-    }
+        p2m_frame_list = xc_map_foreign_pages(xch, domid, PROT_READ,
+                                              p2m_frame_list_list,
+                                              P2M_FLL_ENTRIES);
+        if ( p2m_frame_list == NULL )
+        {
+            ERROR("Couldn't map p2m_frame_list");
+            goto out;
+        }
 
-    p2m_frame_list = xc_map_foreign_pages(xch, domid, PROT_READ,
-                                          p2m_frame_list_list,
-                                          P2M_FLL_ENTRIES);
-    if ( p2m_frame_list == NULL )
-    {
-        ERROR("Couldn't map p2m_frame_list");
-        goto out;
-    }
+        /* Map all the frames of the pfn->mfn table. For migrate to succeed,
+           the guest must not change which frames are used for this purpose.
+           (its not clear why it would want to change them, and we'll be OK
+           from a safety POV anyhow. */
+        p2m = xc_map_foreign_pages(xch, domid, PROT_READ,
+                                   p2m_frame_list,
+                                   P2M_FL_ENTRIES);
+        if ( p2m == NULL )
+        {
+            ERROR("Couldn't map p2m table");
+            goto out;
+        }
 
-    /* Map all the frames of the pfn->mfn table. For migrate to succeed,
-       the guest must not change which frames are used for this purpose.
-       (its not clear why it would want to change them, and we'll be OK
-       from a safety POV anyhow. */
-    p2m = xc_map_foreign_pages(xch, domid, PROT_READ,
-                               p2m_frame_list,
-                               P2M_FL_ENTRIES);
-    if ( p2m == NULL )
-    {
-        ERROR("Couldn't map p2m table");
-        goto out;
-    }
+        if ( xc_vcpu_getcontext(xch, domid, 0, &ctxt) )
+        {
+            ERROR("Could not get vcpu context");
+            goto out;
+        }
 
-    if ( xc_vcpu_getcontext(xch, domid, 0, &ctxt) )
-    {
-        ERROR("Could not get vcpu context");
-        goto out;
-    }
+        mfn = GET_FIELD(&ctxt, user_regs.edx);
 
-    mfn = GET_FIELD(&ctxt, user_regs.edx);
+        start_info = xc_map_foreign_range(xch, domid, PAGE_SIZE,
+                                          PROT_READ | PROT_WRITE, mfn);
+        if ( start_info == NULL )
+        {
+            ERROR("Couldn't map start_info");
+            goto out;
+        }
 
-    start_info = xc_map_foreign_range(xch, domid, PAGE_SIZE,
-                                      PROT_READ | PROT_WRITE, mfn);
-    if ( start_info == NULL )
-    {
-        ERROR("Couldn't map start_info");
-        goto out;
-    }
+        start_info->store_mfn        = p2m[start_info->store_mfn];
+        start_info->console.domU.mfn = p2m[start_info->console.domU.mfn];
 
-    start_info->store_mfn        = p2m[start_info->store_mfn];
-    start_info->console.domU.mfn = p2m[start_info->console.domU.mfn];
-
-    munmap(start_info, PAGE_SIZE);
+        munmap(start_info, PAGE_SIZE);
 #endif /* defined(__i386__) || defined(__x86_64__) */
 
-    /* Reset all secondary CPU states. */
-    for ( i = 1; i <= info.max_vcpu_id; i++ )
-        xc_vcpu_setcontext(xch, domid, i, NULL);
+        /* Reset all secondary CPU states. */
+        for ( i = 1; i <= info.max_vcpu_id; i++ )
+            xc_vcpu_setcontext(xch, domid, i, NULL);
+    }
 
     /* Ready to resume domain execution now. */
     domctl.cmd = XEN_DOMCTL_resumedomain;

Attachment: hvm_resume_when_migrate_fail.patch
Description: hvm_resume_when_migrate_fail.patch

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.