[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH for-4.5 v5 11/16] Add live migration of VMware's hyper-call RPC



The VMware's hyper-call state is included in live migration and
save/restore.  Because the max size of the VMware guestinfo is
large, then data is compressed and expanded in the
vmport_save_domain_ctxt and vmport_load_domain_ctxt.

Signed-off-by: Don Slutz <dslutz@xxxxxxxxxxx>
---
v5:
      You ASSERTed that vg->key_len is 1 so you may not need the 'if'.
        That is a ASSERT(sizeof, not just ASSERT -- not changed.
      Use real errno, not -1.
        Fixed.
      No ASSERT in vmport_load_domain_ctxt
        Added.

 xen/arch/x86/hvm/vmware/vmport_rpc.c   | 309 ++++++++++++++++++++++++++++++++-
 xen/include/public/arch-x86/hvm/save.h |  39 ++++-
 2 files changed, 346 insertions(+), 2 deletions(-)

diff --git a/xen/arch/x86/hvm/vmware/vmport_rpc.c 
b/xen/arch/x86/hvm/vmware/vmport_rpc.c
index ed779b4..f795928 100644
--- a/xen/arch/x86/hvm/vmware/vmport_rpc.c
+++ b/xen/arch/x86/hvm/vmware/vmport_rpc.c
@@ -39,7 +39,8 @@
 #include <asm/hvm/vmport.h>
 #include <asm/hvm/trace.h>
 
-#include "public/arch-x86/hvm/vmporttype.h"
+#include <public/hvm/save.h>
+#include <public/arch-x86/hvm/save.h>
 
 #include "backdoor_def.h"
 #include "guest_msg_def.h"
@@ -1262,6 +1263,312 @@ void vmport_rpc_deinit(struct domain *d)
     xfree(vs);
 }
 
+/* save and restore functions */
+
+static int vmport_save_domain_ctxt(struct domain *d, hvm_domain_context_t *h)
+{
+    struct hvm_vmport_context *ctxt;
+    struct hvm_save_descriptor *desc;
+    struct hvm_domain *hd = &d->arch.hvm_domain;
+    struct vmport_state *vs = hd->vmport_data;
+    char *p;
+    unsigned int guestinfo_size = 0;
+    unsigned int used_guestinfo = 0;
+    unsigned int used_guestinfo_jumbo = 0;
+    unsigned int chans_size;
+    unsigned int i;
+
+    /* Customized handling for entry since our entry is of variable length */
+    desc = (struct hvm_save_descriptor *)&h->data[h->cur];
+    if ( _hvm_init_entry(h, HVM_SAVE_CODE(VMPORT), 0,
+                         HVM_SAVE_LENGTH(VMPORT)) )
+        return 1;
+    ctxt = (struct hvm_vmport_context *)&h->data[h->cur];
+
+    spin_lock(&hd->vmport_lock);
+
+    ctxt->version = VMPORT_SAVE_VERSION;
+    ctxt->ping_time = vs->ping_time;
+    ctxt->open_cookie = vs->open_cookie;
+    ctxt->used_guestinfo = vs->used_guestinfo;
+    ctxt->used_guestinfo_jumbo = vs->used_guestinfo_jumbo;
+
+    p = ctxt->u.packed.packed_data;
+
+    for ( i = 0; i < VMPORT_MAX_CHANS; i++ )
+    {
+        unsigned int j;
+        unsigned int buf_max;
+
+        ctxt->u.packed.chan_ctl[i].chan = vs->chans[i].ctl;
+        buf_max = vs->chans[i].ctl.send_len;
+        if ( buf_max > VMPORT_MAX_SEND_BUF * 4 )
+            buf_max = VMPORT_MAX_SEND_BUF * 4;
+        memcpy(p, vs->chans[i].send_buf, buf_max);
+        p += buf_max;
+        for ( j = 0; j < VMPORT_MAX_BKTS; j++ )
+        {
+            ctxt->u.packed.chan_ctl[i].recv[j] = vs->chans[i].recv_bkt[j].ctl;
+            buf_max = vs->chans[i].recv_bkt[j].ctl.recv_len;
+            if ( buf_max > VMPORT_MAX_RECV_BUF * 4 )
+                buf_max = VMPORT_MAX_RECV_BUF * 4;
+            memcpy(p, vs->chans[i].recv_bkt[j].recv_buf, buf_max);
+            p += buf_max;
+        }
+        ctxt->u.packed.chan_ctl[i].jumbo = vs->chans[i].jumbo_recv_bkt.ctl;
+        buf_max = vs->chans[i].jumbo_recv_bkt.ctl.recv_len;
+        if ( buf_max > VMPORT_MAX_RECV_JUMBO_BUF * 4 )
+            buf_max = VMPORT_MAX_RECV_JUMBO_BUF * 4;
+        memcpy(p, vs->chans[i].jumbo_recv_bkt.recv_buf, buf_max);
+        p += buf_max;
+    }
+
+    chans_size = p - ctxt->u.packed.packed_data;
+
+    for ( i = 0; i < ctxt->used_guestinfo; i++ )
+    {
+        vmport_guestinfo_t *vg = vs->guestinfo[i];
+
+        if ( vg && vg->key_len )
+        {
+            guestinfo_size += sizeof(vg->key_len) + sizeof(vg->val_len) +
+                vg->key_len + vg->val_len;
+            used_guestinfo++;
+            ASSERT(sizeof(vg->key_len) == 1);
+            *p++ = (char) vg->key_len;
+            ASSERT(sizeof(vg->val_len) == 1);
+            *p++ = (char) vg->val_len;
+            if ( vg->key_len )
+            {
+                memcpy(p, vg->key_data, vg->key_len);
+                p += vg->key_len;
+                if ( vg->val_len )
+                {
+                    memcpy(p, vg->val_data, vg->val_len);
+                    p += vg->val_len;
+                }
+            }
+        }
+    }
+    ctxt->used_guestinfo = used_guestinfo;
+
+    for ( i = 0; i < ctxt->used_guestinfo_jumbo; i++ )
+    {
+        vmport_guestinfo_jumbo_t *vgj =
+            vs->guestinfo_jumbo[i];
+        if ( vgj && vgj->key_len )
+        {
+            guestinfo_size += sizeof(vgj->key_len) + sizeof(vgj->val_len) +
+                vgj->key_len + vgj->val_len;
+            used_guestinfo_jumbo++;
+            ASSERT(sizeof(vgj->key_len) == 1);
+            *p++ = (char) vgj->key_len;
+            /* This is so migation does not fail */
+            ASSERT(sizeof(vgj->val_len) == 2);
+            memcpy(p, &vgj->val_len, sizeof(vgj->val_len));
+            p += sizeof(vgj->val_len);
+            if ( vgj->key_len )
+            {
+                memcpy(p, vgj->key_data, vgj->key_len);
+                p += vgj->key_len;
+                if ( vgj->val_len )
+                {
+                    memcpy(p, vgj->val_data, vgj->val_len);
+                    p += vgj->val_len;
+                }
+            }
+        }
+    }
+    ctxt->used_guestinfo_jumbo = used_guestinfo_jumbo;
+
+    ctxt->used_guestsize = guestinfo_size;
+
+    spin_unlock(&hd->vmport_lock);
+
+#ifndef NDEBUG
+    gdprintk(XENLOG_WARNING, "chans_size=%d guestinfo_size=%d, used=%ld\n",
+             chans_size, guestinfo_size,
+             p - ctxt->u.packed.packed_data);
+#endif
+    ASSERT(p - ctxt->u.packed.packed_data == chans_size + guestinfo_size);
+    ASSERT(desc->length >= p - (char *)ctxt);
+    desc->length = p - (char *)ctxt; /* Fixup length to be right */
+    h->cur += desc->length; /* Do _hvm_write_entry */
+    ASSERT(guestinfo_size < desc->length);
+
+    return 0;
+}
+
+static int vmport_load_domain_ctxt(struct domain *d, hvm_domain_context_t *h)
+{
+    struct hvm_vmport_context *ctxt;
+    struct hvm_save_descriptor *desc;
+    struct hvm_domain *hd = &d->arch.hvm_domain;
+    struct vmport_state *vs = hd->vmport_data;
+    unsigned int i;
+    uint8_t key_len;
+    uint16_t val_len;
+    char *p;
+    vmport_guestinfo_t *vg;
+    vmport_guestinfo_jumbo_t *vgj;
+    unsigned int loop_cnt;
+    unsigned int guestinfo_size;
+    unsigned int used_guestinfo;
+    unsigned int used_guestinfo_jumbo;
+
+    if ( !vs )
+        return -ENOMEM;
+
+    /* Customized checking for entry since our entry is of variable length */
+    desc = (struct hvm_save_descriptor *)&h->data[h->cur];
+    if ( sizeof(*desc) > h->size - h->cur )
+    {
+        printk(XENLOG_G_WARNING
+               "HVM%d restore: not enough data left to read descriptor"
+               "for type %lu\n", d->domain_id,
+               HVM_SAVE_CODE(VMPORT));
+        return -E2BIG;
+    }
+    if ( desc->length + sizeof(*desc) > h->size - h->cur )
+    {
+        printk(XENLOG_G_WARNING
+               "HVM%d restore: not enough data left to read %u bytes "
+               "for type %lu\n", d->domain_id, desc->length,
+               HVM_SAVE_CODE(VMPORT));
+        return -EFBIG;
+    }
+    if ( HVM_SAVE_CODE(VMPORT) != desc->typecode ||
+         (desc->length > HVM_SAVE_LENGTH(VMPORT)) )
+    {
+        printk(XENLOG_G_WARNING
+               "HVM%d restore mismatch: expected type %lu with max length %lu, 
"
+               "saw type %u length %u\n", d->domain_id, HVM_SAVE_CODE(VMPORT),
+               HVM_SAVE_LENGTH(VMPORT),
+               desc->typecode, desc->length);
+        return -ESRCH;
+    }
+    h->cur += sizeof(*desc);
+    /* Checking finished */
+
+    ctxt = (struct hvm_vmport_context *)&h->data[h->cur];
+    h->cur += desc->length;
+
+    if ( ctxt->version != VMPORT_SAVE_VERSION )
+        return -EINVAL;
+
+    spin_lock(&hd->vmport_lock);
+
+    vs->ping_time = ctxt->ping_time;
+    vs->open_cookie = ctxt->open_cookie;
+    vs->used_guestinfo = ctxt->used_guestinfo;
+    vs->used_guestinfo_jumbo = ctxt->used_guestinfo_jumbo;
+    guestinfo_size = ctxt->used_guestsize;
+    used_guestinfo = ctxt->used_guestinfo;
+    used_guestinfo_jumbo = ctxt->used_guestinfo_jumbo;
+
+    p = ctxt->u.packed.packed_data;
+
+    for ( i = 0; i < VMPORT_MAX_CHANS; i++ )
+    {
+        unsigned int j;
+
+        vs->chans[i].ctl = ctxt->u.packed.chan_ctl[i].chan;
+        memcpy(vs->chans[i].send_buf, p, vs->chans[i].ctl.send_len);
+        p += vs->chans[i].ctl.send_len;
+        for ( j = 0; j < VMPORT_MAX_BKTS; j++ )
+        {
+            vs->chans[i].recv_bkt[j].ctl = ctxt->u.packed.chan_ctl[i].recv[j];
+            memcpy(vs->chans[i].recv_bkt[j].recv_buf, p,
+                   vs->chans[i].recv_bkt[j].ctl.recv_len);
+            p += vs->chans[i].recv_bkt[j].ctl.recv_len;
+        }
+        vs->chans[i].jumbo_recv_bkt.ctl = ctxt->u.packed.chan_ctl[i].jumbo;
+        memcpy(vs->chans[i].jumbo_recv_bkt.recv_buf, p,
+               vs->chans[i].jumbo_recv_bkt.ctl.recv_len);
+        p += vs->chans[i].jumbo_recv_bkt.ctl.recv_len;
+    }
+
+
+    /* keep at least 10 total and 5 empty entries */
+    loop_cnt = (vs->used_guestinfo + 5) > 10 ?
+        (vs->used_guestinfo + 5) : 10;
+    for ( i = 0; i < loop_cnt; i++ )
+    {
+        if ( !vs->guestinfo[i] )
+        {
+            vs->guestinfo[i] = xzalloc(vmport_guestinfo_t);
+        }
+        if ( i < vs->used_guestinfo
+             && guestinfo_size > 0 )
+        {
+            ASSERT(sizeof(vg->key_len) == 1);
+            key_len = (uint8_t)*p++;
+            ASSERT(sizeof(vg->val_len) == 1);
+            val_len = (uint8_t)*p++;
+            guestinfo_size -= 2;
+            if ( guestinfo_size >= key_len + val_len )
+            {
+                vg = vs->guestinfo[i];
+                if ( key_len )
+                {
+                    vg->key_len = key_len;
+                    vg->val_len = val_len;
+                    memcpy(vg->key_data, p, key_len);
+                    p += key_len;
+                    memcpy(vg->val_data, p, val_len);
+                    p += val_len;
+                    guestinfo_size -= key_len + val_len;
+                }
+            }
+        }
+    }
+    vs->used_guestinfo = loop_cnt;
+
+    /* keep at least 2 total and 1 empty entries */
+    loop_cnt = (vs->used_guestinfo_jumbo + 1) > 2 ?
+        (vs->used_guestinfo_jumbo + 1) : 2;
+    for ( i = 0; i < loop_cnt; i++ )
+    {
+        if ( !vs->guestinfo_jumbo[i] )
+        {
+            vs->guestinfo_jumbo[i] = xzalloc(vmport_guestinfo_jumbo_t);
+        }
+        if ( i < vs->used_guestinfo_jumbo
+             && guestinfo_size > 0 )
+        {
+            ASSERT(sizeof(vgj->key_len) == 1);
+            key_len = (uint8_t)*p++;
+            /* This is so migation does not fail */
+            ASSERT(sizeof(vgj->val_len) == 2);
+            memcpy(&val_len, p, 2);
+            p += 2;
+            guestinfo_size -= 3;
+            if ( guestinfo_size >= key_len + val_len )
+            {
+                vgj = vs->guestinfo_jumbo[i];
+                if ( key_len )
+                {
+                    vgj->key_len = key_len;
+                    vgj->val_len = val_len;
+                    memcpy(vgj->key_data, p, key_len);
+                    p += key_len;
+                    memcpy(vgj->val_data, p, val_len);
+                    p += val_len;
+                    guestinfo_size -= key_len + val_len;
+                }
+            }
+        }
+    }
+    vs->used_guestinfo_jumbo = loop_cnt;
+
+    spin_unlock(&hd->vmport_lock);
+
+    return 0;
+}
+
+HVM_REGISTER_SAVE_RESTORE(VMPORT, vmport_save_domain_ctxt,
+                          vmport_load_domain_ctxt, 1, HVMSR_PER_DOM);
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/include/public/arch-x86/hvm/save.h 
b/xen/include/public/arch-x86/hvm/save.h
index 16d85a3..7cdcb8f 100644
--- a/xen/include/public/arch-x86/hvm/save.h
+++ b/xen/include/public/arch-x86/hvm/save.h
@@ -26,6 +26,8 @@
 #ifndef __XEN_PUBLIC_HVM_SAVE_X86_H__
 #define __XEN_PUBLIC_HVM_SAVE_X86_H__
 
+#include "vmporttype.h"
+
 /* 
  * Save/restore header: general info about the save file. 
  */
@@ -610,9 +612,44 @@ struct hvm_msr {
 
 #define CPU_MSR_CODE  20
 
+/*
+ * VMware context.
+ */
+struct hvm_vmport_context {
+    uint32_t version;
+    uint32_t used_guestsize;
+    uint64_t ping_time;
+    uint32_t open_cookie;
+    uint32_t used_guestinfo;
+    uint32_t used_guestinfo_jumbo;
+    union {
+        struct {
+            vmport_channel_t chans[VMPORT_MAX_CHANS];
+            vmport_guestinfo_t guestinfo[VMPORT_MAX_NUM_KEY];
+            vmport_guestinfo_jumbo_t guestinfo_jumbo[VMPORT_MAX_NUM_JUMBO_KEY];
+        } full;
+        struct {
+            struct {
+                vmport_channel_control_t chan;
+                vmport_bucket_control_t recv[VMPORT_MAX_BKTS];
+                vmport_bucket_control_t jumbo;
+            } chan_ctl[VMPORT_MAX_CHANS];
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+            char packed_data[];
+#elif defined(__GNUC__)
+            char packed_data[0];
+#else
+            char packed_data[1 /* Variable length */];
+#endif
+        } packed;
+    } u;
+};
+
+DECLARE_HVM_SAVE_TYPE(VMPORT, 21, struct hvm_vmport_context);
+
 /* 
  * Largest type-code in use
  */
-#define HVM_SAVE_CODE_MAX 20
+#define HVM_SAVE_CODE_MAX 21
 
 #endif /* __XEN_PUBLIC_HVM_SAVE_X86_H__ */
-- 
1.8.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.