[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [RFC PATCH 3/3] xen/hvm-save: Adjust calling of multi-instance save handlers.



Alter the calling logic so hvm_save and hvm_save_one are responsible for
calling the save handlers with appropriate instance IDs (and are responsible
for ensuring the validity of the instance parameter).

This involves fairly substantial changes to each of the save handler bodies
for records expecting to use multiple instances (all the per-vcpu ones, and
the PIC record).

Where sensible, refactoring has also involved changing the functions to write
directly into the context buffer, rather than writing to a context structure
on the stack and pointlessly copying.

The only observable change should be that hvm_save_one now extracts the
correct data in all cases, rather than being wrong for PIC records, variable
length records and per-vcpu records when one or more vcpus are offline.  There
should be no difference whatsoever in the result from hvm_save.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Discovered-by: Don Slutz <dslutz@xxxxxxxxxxx>
CC: Keir Fraser <keir@xxxxxxx>
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Don Slutz <dslutz@xxxxxxxxxxx>
CC: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
---
 xen/arch/x86/cpu/mcheck/vmce.c |   27 ++---
 xen/arch/x86/hvm/hvm.c         |  225 +++++++++++++++++++---------------------
 xen/arch/x86/hvm/mtrr.c        |   55 +++++-----
 xen/arch/x86/hvm/viridian.c    |   15 +--
 xen/arch/x86/hvm/vlapic.c      |   26 +----
 xen/arch/x86/hvm/vpic.c        |   13 +--
 xen/common/hvm/save.c          |   86 +++++++++------
 7 files changed, 213 insertions(+), 234 deletions(-)

diff --git a/xen/arch/x86/cpu/mcheck/vmce.c b/xen/arch/x86/cpu/mcheck/vmce.c
index 8ef40c3..18ef18a 100644
--- a/xen/arch/x86/cpu/mcheck/vmce.c
+++ b/xen/arch/x86/cpu/mcheck/vmce.c
@@ -299,22 +299,23 @@ int vmce_wrmsr(uint32_t msr, uint64_t val)
 static int vmce_save_vcpu_ctxt(struct domain *d, uint16_t inst,
                                hvm_domain_context_t *h)
 {
-    struct vcpu *v;
-    int err = 0;
+    struct vcpu *v = d->vcpu[inst];
+    int rc = 0;
+    struct hvm_vmce_vcpu *rec;
 
-    for_each_vcpu( d, v ) {
-        struct hvm_vmce_vcpu ctxt = {
-            .caps = v->arch.vmce.mcg_cap,
-            .mci_ctl2_bank0 = v->arch.vmce.bank[0].mci_ctl2,
-            .mci_ctl2_bank1 = v->arch.vmce.bank[1].mci_ctl2
-        };
+    rc = _hvm_init_entry(h, HVM_SAVE_CODE(VMCE_VCPU),
+                         inst, HVM_SAVE_LENGTH(VMCE_VCPU));
+    if ( rc < 0 )
+        return rc;
 
-        err = hvm_save_entry(VMCE_VCPU, v->vcpu_id, h, &ctxt);
-        if ( err < 0 )
-            break;
-    }
+    rec = (struct hvm_vmce_vcpu *)&h->data[h->cur];
+    h->cur += HVM_SAVE_LENGTH(VMCE_VCPU);
+
+    rec->caps = v->arch.vmce.mcg_cap;
+    rec->mci_ctl2_bank0 = v->arch.vmce.bank[0].mci_ctl2;
+    rec->mci_ctl2_bank1 = v->arch.vmce.bank[1].mci_ctl2;
 
-    return err;
+    return rc + HVM_SAVE_LENGTH(VMCE_VCPU);
 }
 
 static int vmce_load_vcpu_ctxt(struct domain *d, hvm_domain_context_t *h)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 925e792..bf1901a 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -665,19 +665,8 @@ void hvm_domain_destroy(struct domain *d)
 static int hvm_save_tsc_adjust(struct domain *d, uint16_t inst,
                                hvm_domain_context_t *h)
 {
-    struct vcpu *v;
-    struct hvm_tsc_adjust ctxt;
-    int err = 0;
-
-    for_each_vcpu ( d, v )
-    {
-        ctxt.tsc_adjust = v->arch.hvm_vcpu.msr_tsc_adjust;
-        err = hvm_save_entry(TSC_ADJUST, v->vcpu_id, h, &ctxt);
-        if ( err < 0 )
-            break;
-    }
-
-    return err;
+    return hvm_save_entry(TSC_ADJUST, inst, h,
+                          &d->vcpu[inst]->arch.hvm_vcpu.msr_tsc_adjust);
 }
 
 static int hvm_load_tsc_adjust(struct domain *d, hvm_domain_context_t *h)
@@ -706,112 +695,116 @@ HVM_REGISTER_SAVE_RESTORE_PER_VCPU(TSC_ADJUST, 
hvm_save_tsc_adjust,
 static int hvm_save_cpu_ctxt(struct domain *d, uint16_t inst,
                              hvm_domain_context_t *h)
 {
-    struct vcpu *v;
-    struct hvm_hw_cpu ctxt;
+    struct vcpu *v = d->vcpu[inst];
+    struct hvm_hw_cpu *rec;
     struct segment_register seg;
+    int rc = 0;
 
-    for_each_vcpu ( d, v )
-    {
-        /* We don't need to save state for a vcpu that is down; the restore 
-         * code will leave it down if there is nothing saved. */
-        if ( test_bit(_VPF_down, &v->pause_flags) ) 
-            continue;
+    /* We don't need to save state for a vcpu that is down; the restore
+     * code will leave it down if there is nothing saved. */
+    if ( test_bit(_VPF_down, &v->pause_flags) )
+        return rc;
 
-        /* Architecture-specific vmcs/vmcb bits */
-        hvm_funcs.save_cpu_ctxt(v, &ctxt);
-
-        ctxt.msr_tsc_aux = hvm_msr_tsc_aux(v);
-
-        hvm_get_segment_register(v, x86_seg_idtr, &seg);
-        ctxt.idtr_limit = seg.limit;
-        ctxt.idtr_base = seg.base;
-
-        hvm_get_segment_register(v, x86_seg_gdtr, &seg);
-        ctxt.gdtr_limit = seg.limit;
-        ctxt.gdtr_base = seg.base;
-
-        hvm_get_segment_register(v, x86_seg_cs, &seg);
-        ctxt.cs_sel = seg.sel;
-        ctxt.cs_limit = seg.limit;
-        ctxt.cs_base = seg.base;
-        ctxt.cs_arbytes = seg.attr.bytes;
-
-        hvm_get_segment_register(v, x86_seg_ds, &seg);
-        ctxt.ds_sel = seg.sel;
-        ctxt.ds_limit = seg.limit;
-        ctxt.ds_base = seg.base;
-        ctxt.ds_arbytes = seg.attr.bytes;
-
-        hvm_get_segment_register(v, x86_seg_es, &seg);
-        ctxt.es_sel = seg.sel;
-        ctxt.es_limit = seg.limit;
-        ctxt.es_base = seg.base;
-        ctxt.es_arbytes = seg.attr.bytes;
-
-        hvm_get_segment_register(v, x86_seg_ss, &seg);
-        ctxt.ss_sel = seg.sel;
-        ctxt.ss_limit = seg.limit;
-        ctxt.ss_base = seg.base;
-        ctxt.ss_arbytes = seg.attr.bytes;
-
-        hvm_get_segment_register(v, x86_seg_fs, &seg);
-        ctxt.fs_sel = seg.sel;
-        ctxt.fs_limit = seg.limit;
-        ctxt.fs_base = seg.base;
-        ctxt.fs_arbytes = seg.attr.bytes;
-
-        hvm_get_segment_register(v, x86_seg_gs, &seg);
-        ctxt.gs_sel = seg.sel;
-        ctxt.gs_limit = seg.limit;
-        ctxt.gs_base = seg.base;
-        ctxt.gs_arbytes = seg.attr.bytes;
-
-        hvm_get_segment_register(v, x86_seg_tr, &seg);
-        ctxt.tr_sel = seg.sel;
-        ctxt.tr_limit = seg.limit;
-        ctxt.tr_base = seg.base;
-        ctxt.tr_arbytes = seg.attr.bytes;
-
-        hvm_get_segment_register(v, x86_seg_ldtr, &seg);
-        ctxt.ldtr_sel = seg.sel;
-        ctxt.ldtr_limit = seg.limit;
-        ctxt.ldtr_base = seg.base;
-        ctxt.ldtr_arbytes = seg.attr.bytes;
-
-        if ( v->fpu_initialised )
-            memcpy(ctxt.fpu_regs, v->arch.fpu_ctxt, sizeof(ctxt.fpu_regs));
-        else 
-            memset(ctxt.fpu_regs, 0, sizeof(ctxt.fpu_regs));
-
-        ctxt.rax = v->arch.user_regs.eax;
-        ctxt.rbx = v->arch.user_regs.ebx;
-        ctxt.rcx = v->arch.user_regs.ecx;
-        ctxt.rdx = v->arch.user_regs.edx;
-        ctxt.rbp = v->arch.user_regs.ebp;
-        ctxt.rsi = v->arch.user_regs.esi;
-        ctxt.rdi = v->arch.user_regs.edi;
-        ctxt.rsp = v->arch.user_regs.esp;
-        ctxt.rip = v->arch.user_regs.eip;
-        ctxt.rflags = v->arch.user_regs.eflags;
-        ctxt.r8  = v->arch.user_regs.r8;
-        ctxt.r9  = v->arch.user_regs.r9;
-        ctxt.r10 = v->arch.user_regs.r10;
-        ctxt.r11 = v->arch.user_regs.r11;
-        ctxt.r12 = v->arch.user_regs.r12;
-        ctxt.r13 = v->arch.user_regs.r13;
-        ctxt.r14 = v->arch.user_regs.r14;
-        ctxt.r15 = v->arch.user_regs.r15;
-        ctxt.dr0 = v->arch.debugreg[0];
-        ctxt.dr1 = v->arch.debugreg[1];
-        ctxt.dr2 = v->arch.debugreg[2];
-        ctxt.dr3 = v->arch.debugreg[3];
-        ctxt.dr6 = v->arch.debugreg[6];
-        ctxt.dr7 = v->arch.debugreg[7];
-
-        if ( hvm_save_entry(CPU, v->vcpu_id, h, &ctxt) < 0 )
-            return -ENOSPC;
-    }
-    return 0;
+    rc = _hvm_init_entry(h, HVM_SAVE_CODE(CPU),
+                         inst, HVM_SAVE_LENGTH(CPU));
+    if ( rc < 0 )
+        return rc;
+
+    rec = (struct hvm_hw_cpu *)&h->data[h->cur];
+    h->cur += HVM_SAVE_LENGTH(CPU);
+
+    /* Architecture-specific vmcs/vmcb bits */
+    hvm_funcs.save_cpu_ctxt(v, rec);
+
+    rec->msr_tsc_aux = hvm_msr_tsc_aux(v);
+
+    hvm_get_segment_register(v, x86_seg_idtr, &seg);
+    rec->idtr_limit = seg.limit;
+    rec->idtr_base = seg.base;
+
+    hvm_get_segment_register(v, x86_seg_gdtr, &seg);
+    rec->gdtr_limit = seg.limit;
+    rec->gdtr_base = seg.base;
+
+    hvm_get_segment_register(v, x86_seg_cs, &seg);
+    rec->cs_sel = seg.sel;
+    rec->cs_limit = seg.limit;
+    rec->cs_base = seg.base;
+    rec->cs_arbytes = seg.attr.bytes;
+
+    hvm_get_segment_register(v, x86_seg_ds, &seg);
+    rec->ds_sel = seg.sel;
+    rec->ds_limit = seg.limit;
+    rec->ds_base = seg.base;
+    rec->ds_arbytes = seg.attr.bytes;
+
+    hvm_get_segment_register(v, x86_seg_es, &seg);
+    rec->es_sel = seg.sel;
+    rec->es_limit = seg.limit;
+    rec->es_base = seg.base;
+    rec->es_arbytes = seg.attr.bytes;
+
+    hvm_get_segment_register(v, x86_seg_ss, &seg);
+    rec->ss_sel = seg.sel;
+    rec->ss_limit = seg.limit;
+    rec->ss_base = seg.base;
+    rec->ss_arbytes = seg.attr.bytes;
+
+    hvm_get_segment_register(v, x86_seg_fs, &seg);
+    rec->fs_sel = seg.sel;
+    rec->fs_limit = seg.limit;
+    rec->fs_base = seg.base;
+    rec->fs_arbytes = seg.attr.bytes;
+
+    hvm_get_segment_register(v, x86_seg_gs, &seg);
+    rec->gs_sel = seg.sel;
+    rec->gs_limit = seg.limit;
+    rec->gs_base = seg.base;
+    rec->gs_arbytes = seg.attr.bytes;
+
+    hvm_get_segment_register(v, x86_seg_tr, &seg);
+    rec->tr_sel = seg.sel;
+    rec->tr_limit = seg.limit;
+    rec->tr_base = seg.base;
+    rec->tr_arbytes = seg.attr.bytes;
+
+    hvm_get_segment_register(v, x86_seg_ldtr, &seg);
+    rec->ldtr_sel = seg.sel;
+    rec->ldtr_limit = seg.limit;
+    rec->ldtr_base = seg.base;
+    rec->ldtr_arbytes = seg.attr.bytes;
+
+    if ( v->fpu_initialised )
+        memcpy(rec->fpu_regs, v->arch.fpu_ctxt, sizeof(rec->fpu_regs));
+    else
+        memset(rec->fpu_regs, 0, sizeof(rec->fpu_regs));
+
+    rec->rax = v->arch.user_regs.eax;
+    rec->rbx = v->arch.user_regs.ebx;
+    rec->rcx = v->arch.user_regs.ecx;
+    rec->rdx = v->arch.user_regs.edx;
+    rec->rbp = v->arch.user_regs.ebp;
+    rec->rsi = v->arch.user_regs.esi;
+    rec->rdi = v->arch.user_regs.edi;
+    rec->rsp = v->arch.user_regs.esp;
+    rec->rip = v->arch.user_regs.eip;
+    rec->rflags = v->arch.user_regs.eflags;
+    rec->r8  = v->arch.user_regs.r8;
+    rec->r9  = v->arch.user_regs.r9;
+    rec->r10 = v->arch.user_regs.r10;
+    rec->r11 = v->arch.user_regs.r11;
+    rec->r12 = v->arch.user_regs.r12;
+    rec->r13 = v->arch.user_regs.r13;
+    rec->r14 = v->arch.user_regs.r14;
+    rec->r15 = v->arch.user_regs.r15;
+    rec->dr0 = v->arch.debugreg[0];
+    rec->dr1 = v->arch.debugreg[1];
+    rec->dr2 = v->arch.debugreg[2];
+    rec->dr3 = v->arch.debugreg[3];
+    rec->dr6 = v->arch.debugreg[6];
+    rec->dr7 = v->arch.debugreg[7];
+
+    return rc + HVM_SAVE_LENGTH(CPU);
 }
 
 static bool_t hvm_efer_valid(struct domain *d,
diff --git a/xen/arch/x86/hvm/mtrr.c b/xen/arch/x86/hvm/mtrr.c
index 40f58ed..389138d 100644
--- a/xen/arch/x86/hvm/mtrr.c
+++ b/xen/arch/x86/hvm/mtrr.c
@@ -603,39 +603,40 @@ int32_t hvm_set_mem_pinned_cacheattr(
 static int hvm_save_mtrr_msr(struct domain *d, uint16_t inst,
                              hvm_domain_context_t *h)
 {
-    int i;
-    struct vcpu *v;
-    struct hvm_hw_mtrr hw_mtrr;
-    struct mtrr_state *mtrr_state;
-    /* save mtrr&pat */
-    for_each_vcpu(d, v)
-    {
-        mtrr_state = &v->arch.hvm_vcpu.mtrr;
+    int i, rc;
+    struct vcpu *v = d->vcpu[inst];
+    struct mtrr_state *mtrr_state = &v->arch.hvm_vcpu.mtrr;
+    struct hvm_hw_mtrr *rec;
 
-        hvm_get_guest_pat(v, &hw_mtrr.msr_pat_cr);
+    rc = _hvm_init_entry(h, HVM_SAVE_CODE(MTRR),
+                         inst, HVM_SAVE_LENGTH(MTRR));
+    if ( rc < 0 )
+        return rc;
 
-        hw_mtrr.msr_mtrr_def_type = mtrr_state->def_type
-                                | (mtrr_state->enabled << 10);
-        hw_mtrr.msr_mtrr_cap = mtrr_state->mtrr_cap;
+    rec = (struct hvm_hw_mtrr *)&h->data[h->cur];
+    h->cur += HVM_SAVE_LENGTH(MTRR);
 
-        for ( i = 0; i < MTRR_VCNT; i++ )
-        {
-            /* save physbase */
-            hw_mtrr.msr_mtrr_var[i*2] =
-                ((uint64_t*)mtrr_state->var_ranges)[i*2];
-            /* save physmask */
-            hw_mtrr.msr_mtrr_var[i*2+1] =
-                ((uint64_t*)mtrr_state->var_ranges)[i*2+1];
-        }
+    hvm_get_guest_pat(v, &rec->msr_pat_cr);
 
-        for ( i = 0; i < NUM_FIXED_MSR; i++ )
-            hw_mtrr.msr_mtrr_fixed[i] =
-                ((uint64_t*)mtrr_state->fixed_ranges)[i];
+    rec->msr_mtrr_def_type = mtrr_state->def_type
+        | (mtrr_state->enabled << 10);
+    rec->msr_mtrr_cap = mtrr_state->mtrr_cap;
 
-        if ( hvm_save_entry(MTRR, v->vcpu_id, h, &hw_mtrr) < 0 )
-            return -ENOSPC;
+    for ( i = 0; i < MTRR_VCNT; i++ )
+    {
+        /* save physbase */
+        rec->msr_mtrr_var[i*2] =
+            ((uint64_t*)mtrr_state->var_ranges)[i*2];
+        /* save physmask */
+        rec->msr_mtrr_var[i*2+1] =
+            ((uint64_t*)mtrr_state->var_ranges)[i*2+1];
     }
-    return 0;
+
+    for ( i = 0; i < NUM_FIXED_MSR; i++ )
+        rec->msr_mtrr_fixed[i] =
+            ((uint64_t*)mtrr_state->fixed_ranges)[i];
+
+    return rc + HVM_SAVE_LENGTH(MTRR);
 }
 
 static int hvm_load_mtrr_msr(struct domain *d, hvm_domain_context_t *h)
diff --git a/xen/arch/x86/hvm/viridian.c b/xen/arch/x86/hvm/viridian.c
index 4f9186d..dfb01d5 100644
--- a/xen/arch/x86/hvm/viridian.c
+++ b/xen/arch/x86/hvm/viridian.c
@@ -456,21 +456,12 @@ HVM_REGISTER_SAVE_RESTORE_PER_DOM(VIRIDIAN_DOMAIN, 
viridian_save_domain_ctxt,
 static int viridian_save_vcpu_ctxt(struct domain *d, uint16_t inst,
                                    hvm_domain_context_t *h)
 {
-    struct vcpu *v;
-
     if ( !is_viridian_domain(d) )
         return 0;
 
-    for_each_vcpu( d, v ) {
-        struct hvm_viridian_vcpu_context ctxt;
-
-        ctxt.apic_assist = v->arch.hvm_vcpu.viridian.apic_assist.raw;
-
-        if ( hvm_save_entry(VIRIDIAN_VCPU, v->vcpu_id, h, &ctxt) < 0 )
-            return -ENOSPC;
-    }
-
-    return 0;
+    return hvm_save_entry(
+        VIRIDIAN_VCPU, inst, h,
+        &d->vcpu[inst]->arch.hvm_vcpu.viridian.apic_assist.raw);
 }
 
 static int viridian_load_vcpu_ctxt(struct domain *d, hvm_domain_context_t *h)
diff --git a/xen/arch/x86/hvm/vlapic.c b/xen/arch/x86/hvm/vlapic.c
index 81dfd3f..bb1438a 100644
--- a/xen/arch/x86/hvm/vlapic.c
+++ b/xen/arch/x86/hvm/vlapic.c
@@ -1140,35 +1140,17 @@ static void lapic_rearm(struct vlapic *s)
 static int lapic_save_hidden(struct domain *d, uint16_t inst,
                              hvm_domain_context_t *h)
 {
-    struct vcpu *v;
-    struct vlapic *s;
-    int rc = 0;
-
-    for_each_vcpu ( d, v )
-    {
-        s = vcpu_vlapic(v);
-        if ( (rc = hvm_save_entry(LAPIC, v->vcpu_id, h, &s->hw)) < 0 )
-            break;
-    }
+    struct vlapic *s = vcpu_vlapic(d->vcpu[inst]);
 
-    return rc;
+    return hvm_save_entry(LAPIC, inst, h, &s->hw);
 }
 
 static int lapic_save_regs(struct domain *d, uint16_t inst,
                            hvm_domain_context_t *h)
 {
-    struct vcpu *v;
-    struct vlapic *s;
-    int rc = 0;
-
-    for_each_vcpu ( d, v )
-    {
-        s = vcpu_vlapic(v);
-        if ( (rc = hvm_save_entry(LAPIC_REGS, v->vcpu_id, h, s->regs)) < 0 )
-            break;
-    }
+    struct vlapic *s = vcpu_vlapic(d->vcpu[inst]);
 
-    return rc;
+    return hvm_save_entry(LAPIC_REGS, inst, h, &s->regs);
 }
 
 static int lapic_load_hidden(struct domain *d, hvm_domain_context_t *h)
diff --git a/xen/arch/x86/hvm/vpic.c b/xen/arch/x86/hvm/vpic.c
index 7e4b64b..416e970 100644
--- a/xen/arch/x86/hvm/vpic.c
+++ b/xen/arch/x86/hvm/vpic.c
@@ -367,18 +367,9 @@ static int vpic_intercept_elcr_io(
 static int vpic_save(struct domain *d, uint16_t inst,
                      hvm_domain_context_t *h)
 {
-    struct hvm_hw_vpic *s;
-    int i;
+    struct hvm_hw_vpic *s = &d->arch.hvm_domain.vpic[inst];
 
-    /* Save the state of both PICs */
-    for ( i = 0; i < 2 ; i++ )
-    {
-        s = &d->arch.hvm_domain.vpic[i];
-        if ( hvm_save_entry(PIC, i, h, s) < 0 )
-            return -ENOSPC;
-    }
-
-    return 0;
+    return hvm_save_entry(PIC, inst, h, s);
 }
 
 static int vpic_load(struct domain *d, hvm_domain_context_t *h)
diff --git a/xen/common/hvm/save.c b/xen/common/hvm/save.c
index e9723e3..8eb9672 100644
--- a/xen/common/hvm/save.c
+++ b/xen/common/hvm/save.c
@@ -84,44 +84,42 @@ size_t hvm_save_size(struct domain *d)
 int hvm_save_one(struct domain *d, uint16_t typecode, uint16_t instance, 
                  XEN_GUEST_HANDLE_64(uint8) handle)
 {
-    int rv = 0;
-    size_t sz = 0;
-    struct vcpu *v;
-    hvm_domain_context_t ctxt = { 0, };
+    int rv;
+    hvm_domain_context_t ctxt = { 0 };
 
-    if ( d->is_dying 
-         || typecode > HVM_SAVE_CODE_MAX 
-         || hvm_sr_handlers[typecode].size < sizeof(struct hvm_save_descriptor)
-         || hvm_sr_handlers[typecode].save == NULL )
+    if ( d->is_dying || typecode > HVM_SAVE_CODE_MAX )
         return -EINVAL;
 
-    if ( is_per_vcpu_handler(hvm_sr_handlers[typecode]) )
-        for_each_vcpu(d, v)
-            sz += hvm_sr_handlers[typecode].size;
-    else 
-        sz = hvm_sr_handlers[typecode].size;
-    
-    if ( (instance + 1) * hvm_sr_handlers[typecode].size > sz )
+    if ( hvm_sr_handlers[typecode].size < sizeof(struct hvm_save_descriptor) ||
+         hvm_sr_handlers[typecode].save == NULL )
         return -EINVAL;
 
-    ctxt.size = sz;
-    ctxt.data = xmalloc_bytes(sz);
+    if ( (is_per_vcpu_handler(hvm_sr_handlers[typecode]) &&
+          (instance >= d->max_vcpus || d->vcpu[instance] == NULL)) ||
+         (instance >= hvm_sr_handlers[typecode].num) )
+        return -EBADSLT;
+
+    ctxt.size = hvm_sr_handlers[typecode].size;
+    ctxt.data = xmalloc_bytes(hvm_sr_handlers[typecode].size);
     if ( !ctxt.data )
         return -ENOMEM;
 
-    if ( hvm_sr_handlers[typecode].save(d, 0, &ctxt) < 0 )
+    rv = hvm_sr_handlers[typecode].save(d, instance, &ctxt);
+
+    if ( rv < 0 )
     {
-        printk(XENLOG_G_ERR "HVM%d save: failed to save type %"PRIu16"\n",
-               d->domain_id, typecode);
+        printk(XENLOG_G_ERR "HVM%d save: failed to save type %"PRIu16
+               ", instance %"PRIu16"\n",
+               d->domain_id, typecode, instance);
         rv = -EFAULT;
     }
-    else if ( copy_to_guest(handle,
-                            ctxt.data 
-                            + (instance * hvm_sr_handlers[typecode].size) 
-                            + sizeof (struct hvm_save_descriptor), 
-                            hvm_sr_handlers[typecode].size
-                            - sizeof (struct hvm_save_descriptor)) )
-        rv = -EFAULT;
+    else if ( rv <= sizeof (struct hvm_save_descriptor) )
+        rv = -ENODATA;
+    else
+        rv = copy_to_guest(handle,
+                           ctxt.data + sizeof (struct hvm_save_descriptor),
+                           rv - sizeof (struct hvm_save_descriptor))
+            ? -EFAULT : 0;
 
     xfree(ctxt.data);
     return rv;
@@ -165,13 +163,35 @@ int hvm_save(struct domain *d, hvm_domain_context_t *h)
         {
             printk(XENLOG_G_INFO "HVM%d save: %s\n",
                    d->domain_id, hvm_sr_handlers[i].name);
-            if ( handler(d, 0, h) < 0 )
+
+            if ( is_per_vcpu_handler(hvm_sr_handlers[i]) )
+            {
+                struct vcpu *v;
+
+                for_each_vcpu( d, v )
+                    if ( handler(d, v->vcpu_id, h) < 0 )
+                    {
+                        printk(XENLOG_G_ERR
+                               "HVM%d save: failed to save type %"PRIu16
+                               ", instance %"PRIu16"\n",
+                               d->domain_id, i, v->vcpu_id);
+                        return -EFAULT;
+                    }
+            }
+            else
             {
-                printk(XENLOG_G_ERR
-                       "HVM%d save: failed to save type %"PRIu16"\n",
-                       d->domain_id, i);
-                return -EFAULT;
-            } 
+                int j;
+
+                for ( j = 0; j < hvm_sr_handlers[i].num; ++j )
+                    if ( handler(d, j, h) < 0 )
+                    {
+                        printk(XENLOG_G_ERR
+                               "HVM%d save: failed to save type %"PRIu16
+                               ", instance %"PRIu16"\n",
+                               d->domain_id, i, j);
+                        return -EFAULT;
+                    }
+            }
         }
     }
 
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.