[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v7 15/15] x86/domctl: Don't pause the whole domain if only getting vcpu state
This patch is focused moving the for loop to the caller so now we can save info for a single vcpu instance. Signed-off-by: Alexandru Isaila <aisaila@xxxxxxxxxxxxxxx> --- xen/arch/x86/cpu/mcheck/vmce.c | 3 +- xen/arch/x86/hvm/hpet.c | 3 +- xen/arch/x86/hvm/hvm.c | 12 ++--- xen/arch/x86/hvm/i8254.c | 3 +- xen/arch/x86/hvm/irq.c | 9 ++-- xen/arch/x86/hvm/mtrr.c | 3 +- xen/arch/x86/hvm/pmtimer.c | 3 +- xen/arch/x86/hvm/rtc.c | 3 +- xen/arch/x86/hvm/save.c | 118 ++++++++++++++++++++++++++++++----------- xen/arch/x86/hvm/vioapic.c | 3 +- xen/arch/x86/hvm/viridian.c | 7 +-- xen/arch/x86/hvm/vlapic.c | 34 ++++-------- xen/arch/x86/hvm/vpic.c | 3 +- xen/include/asm-x86/hvm/save.h | 2 +- 14 files changed, 125 insertions(+), 81 deletions(-) diff --git a/xen/arch/x86/cpu/mcheck/vmce.c b/xen/arch/x86/cpu/mcheck/vmce.c index ead1f73..88541b7 100644 --- a/xen/arch/x86/cpu/mcheck/vmce.c +++ b/xen/arch/x86/cpu/mcheck/vmce.c @@ -349,10 +349,9 @@ int vmce_wrmsr(uint32_t msr, uint64_t val) return ret; } -static int vmce_save_vcpu_ctxt(struct domain *d, hvm_domain_context_t *h) +static int vmce_save_vcpu_ctxt(struct vcpu *v, hvm_domain_context_t *h) { struct hvm_vmce_vcpu ctxt; - struct vcpu *v = NULL; ctxt.caps = v->arch.vmce.mcg_cap; ctxt.mci_ctl2_bank0 = v->arch.vmce.bank[0].mci_ctl2; diff --git a/xen/arch/x86/hvm/hpet.c b/xen/arch/x86/hvm/hpet.c index 2837709..3ed6547 100644 --- a/xen/arch/x86/hvm/hpet.c +++ b/xen/arch/x86/hvm/hpet.c @@ -516,8 +516,9 @@ static const struct hvm_mmio_ops hpet_mmio_ops = { }; -static int hpet_save(struct domain *d, hvm_domain_context_t *h) +static int hpet_save(struct vcpu *vcpu, hvm_domain_context_t *h) { + struct domain *d = vcpu->domain; HPETState *hp = domain_vhpet(d); struct vcpu *v = pt_global_vcpu_target(d); int rc; diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index a88efeb..70d90cc 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -740,9 +740,8 @@ void hvm_domain_destroy(struct domain *d) destroy_vpci_mmcfg(d); } -static int hvm_save_tsc_adjust(struct domain *d, hvm_domain_context_t *h) +static int hvm_save_tsc_adjust(struct vcpu *v, hvm_domain_context_t *h) { - struct vcpu *v = NULL; struct hvm_tsc_adjust ctxt; ctxt.tsc_adjust = v->arch.hvm_vcpu.msr_tsc_adjust; @@ -772,11 +771,10 @@ static int hvm_load_tsc_adjust(struct domain *d, hvm_domain_context_t *h) HVM_REGISTER_SAVE_RESTORE(TSC_ADJUST, hvm_save_tsc_adjust, hvm_load_tsc_adjust, 1, HVMSR_PER_VCPU); -static int hvm_save_cpu_ctxt(struct domain *d, hvm_domain_context_t *h) +static int hvm_save_cpu_ctxt(struct vcpu *v, hvm_domain_context_t *h) { struct segment_register seg; struct hvm_hw_cpu ctxt = {}; - struct vcpu *v = NULL; /* We don't need to save state for a vcpu that is down; the restore * code will leave it down if there is nothing saved. */ @@ -1160,9 +1158,8 @@ HVM_REGISTER_SAVE_RESTORE(CPU, hvm_save_cpu_ctxt, hvm_load_cpu_ctxt, save_area) + \ xstate_ctxt_size(xcr0)) -static int hvm_save_cpu_xsave_states(struct domain *d, hvm_domain_context_t *h) +static int hvm_save_cpu_xsave_states(struct vcpu *v, hvm_domain_context_t *h) { - struct vcpu *v = NULL; struct hvm_hw_cpu_xsave *ctxt; unsigned int size = HVM_CPU_XSAVE_SIZE(v->arch.xcr0_accum); @@ -1322,9 +1319,8 @@ static const uint32_t msrs_to_send[] = { }; static unsigned int __read_mostly msr_count_max = ARRAY_SIZE(msrs_to_send); -static int hvm_save_cpu_msrs(struct domain *d, hvm_domain_context_t *h) +static int hvm_save_cpu_msrs(struct vcpu *v, hvm_domain_context_t *h) { - struct vcpu *v = NULL; struct hvm_save_descriptor *desc = _p(&h->data[h->cur]); struct hvm_msr *ctxt; unsigned int i; diff --git a/xen/arch/x86/hvm/i8254.c b/xen/arch/x86/hvm/i8254.c index 992f08d..e0d2255 100644 --- a/xen/arch/x86/hvm/i8254.c +++ b/xen/arch/x86/hvm/i8254.c @@ -390,8 +390,9 @@ void pit_stop_channel0_irq(PITState *pit) spin_unlock(&pit->lock); } -static int pit_save(struct domain *d, hvm_domain_context_t *h) +static int pit_save(struct vcpu *v, hvm_domain_context_t *h) { + struct domain *d = v->domain; PITState *pit = domain_vpit(d); int rc; diff --git a/xen/arch/x86/hvm/irq.c b/xen/arch/x86/hvm/irq.c index c85d004..72acb73 100644 --- a/xen/arch/x86/hvm/irq.c +++ b/xen/arch/x86/hvm/irq.c @@ -630,8 +630,9 @@ static int __init dump_irq_info_key_init(void) } __initcall(dump_irq_info_key_init); -static int irq_save_pci(struct domain *d, hvm_domain_context_t *h) +static int irq_save_pci(struct vcpu *v, hvm_domain_context_t *h) { + struct domain *d = v->domain; struct hvm_irq *hvm_irq = hvm_domain_irq(d); unsigned int asserted, pdev, pintx; int rc; @@ -662,16 +663,18 @@ static int irq_save_pci(struct domain *d, hvm_domain_context_t *h) return rc; } -static int irq_save_isa(struct domain *d, hvm_domain_context_t *h) +static int irq_save_isa(struct vcpu *v, hvm_domain_context_t *h) { + struct domain *d = v->domain; struct hvm_irq *hvm_irq = hvm_domain_irq(d); /* Save ISA IRQ lines */ return ( hvm_save_entry(ISA_IRQ, 0, h, &hvm_irq->isa_irq) ); } -static int irq_save_link(struct domain *d, hvm_domain_context_t *h) +static int irq_save_link(struct vcpu *v, hvm_domain_context_t *h) { + struct domain *d = v->domain; struct hvm_irq *hvm_irq = hvm_domain_irq(d); /* Save PCI-ISA link state */ diff --git a/xen/arch/x86/hvm/mtrr.c b/xen/arch/x86/hvm/mtrr.c index 4c1e850..ae73c78 100644 --- a/xen/arch/x86/hvm/mtrr.c +++ b/xen/arch/x86/hvm/mtrr.c @@ -666,9 +666,8 @@ int hvm_set_mem_pinned_cacheattr(struct domain *d, uint64_t gfn_start, return 0; } -static int hvm_save_mtrr_msr(struct domain *d, hvm_domain_context_t *h) +static int hvm_save_mtrr_msr(struct vcpu *v, hvm_domain_context_t *h) { - struct vcpu *v = NULL; struct hvm_hw_mtrr hw_mtrr; struct mtrr_state *mtrr_state = &v->arch.hvm_vcpu.mtrr; int i; diff --git a/xen/arch/x86/hvm/pmtimer.c b/xen/arch/x86/hvm/pmtimer.c index 435647f..d8dcbc2 100644 --- a/xen/arch/x86/hvm/pmtimer.c +++ b/xen/arch/x86/hvm/pmtimer.c @@ -249,8 +249,9 @@ static int handle_pmt_io( return X86EMUL_OKAY; } -static int acpi_save(struct domain *d, hvm_domain_context_t *h) +static int acpi_save(struct vcpu *v, hvm_domain_context_t *h) { + struct domain *d = v->domain; struct hvm_hw_acpi *acpi = &d->arch.hvm_domain.acpi; PMTState *s = &d->arch.hvm_domain.pl_time->vpmt; uint32_t x, msb = acpi->tmr_val & TMR_VAL_MSB; diff --git a/xen/arch/x86/hvm/rtc.c b/xen/arch/x86/hvm/rtc.c index cb75b99..58b70fc 100644 --- a/xen/arch/x86/hvm/rtc.c +++ b/xen/arch/x86/hvm/rtc.c @@ -737,8 +737,9 @@ void rtc_migrate_timers(struct vcpu *v) } /* Save RTC hardware state */ -static int rtc_save(struct domain *d, hvm_domain_context_t *h) +static int rtc_save(struct vcpu *v, hvm_domain_context_t *h) { + struct domain *d = v->domain; RTCState *s = domain_vrtc(d); int rc; diff --git a/xen/arch/x86/hvm/save.c b/xen/arch/x86/hvm/save.c index 8984a23..69f0fff 100644 --- a/xen/arch/x86/hvm/save.c +++ b/xen/arch/x86/hvm/save.c @@ -135,9 +135,12 @@ size_t hvm_save_size(struct domain *d) int hvm_save_one(struct domain *d, unsigned int typecode, unsigned int instance, XEN_GUEST_HANDLE_64(uint8) handle, uint64_t *bufsz) { - int rv; + int rv = 0; hvm_domain_context_t ctxt = { }; const struct hvm_save_descriptor *desc; + bool is_single_instance = false; + uint32_t off = 0; + struct vcpu *v; if ( d->is_dying || typecode > HVM_SAVE_CODE_MAX || @@ -146,42 +149,85 @@ int hvm_save_one(struct domain *d, unsigned int typecode, unsigned int instance, return -EINVAL; ctxt.size = hvm_sr_handlers[typecode].size; - if ( hvm_sr_handlers[typecode].kind == HVMSR_PER_VCPU ) + if ( hvm_sr_handlers[typecode].kind == HVMSR_PER_VCPU && + instance == d->max_vcpus ) ctxt.size *= d->max_vcpus; ctxt.data = xmalloc_bytes(ctxt.size); if ( !ctxt.data ) return -ENOMEM; - if ( (rv = hvm_sr_handlers[typecode].save(d, &ctxt)) != 0 ) - printk(XENLOG_G_ERR "HVM%d save: failed to save type %"PRIu16" (%d)\n", - d->domain_id, typecode, rv); - else if ( rv = -ENOENT, ctxt.cur >= sizeof(*desc) ) - { - uint32_t off; + if( is_single_instance ) + vcpu_pause(d->vcpu[instance]); + else + domain_pause(d); - for ( off = 0; off <= (ctxt.cur - sizeof(*desc)); off += desc->length ) + if( is_single_instance ) + { + if ( (rv = hvm_sr_handlers[typecode].save(d->vcpu[instance], + &ctxt)) != 0 ) { - desc = (void *)(ctxt.data + off); + printk(XENLOG_G_ERR "HVM%d save: failed to save type %"PRIu16" (%d)\n", + d->domain_id, typecode, rv); + vcpu_unpause(d->vcpu[instance]); + } + else if ( ctxt.cur >= sizeof(*desc) ) + { + rv = -ENOENT; + desc = (void *)(ctxt.data); /* Move past header */ - off += sizeof(*desc); + off = sizeof(*desc); if ( ctxt.cur < desc->length || - off > ctxt.cur - desc->length ) - break; - if ( instance == desc->instance ) - { - rv = 0; - if ( guest_handle_is_null(handle) ) - *bufsz = desc->length; - else if ( *bufsz < desc->length ) - rv = -ENOBUFS; - else if ( copy_to_guest(handle, ctxt.data + off, desc->length) ) - rv = -EFAULT; - else - *bufsz = desc->length; - break; - } + off > ctxt.cur - desc->length ) + rv = -EFAULT; + rv = 0; + if ( guest_handle_is_null(handle) ) + *bufsz = desc->length; + else if ( *bufsz < desc->length ) + rv = -ENOBUFS; + else if ( copy_to_guest(handle, ctxt.data + off, desc->length) ) + rv = -EFAULT; + else + *bufsz = desc->length; + vcpu_unpause(d->vcpu[instance]); } } + else + { + for_each_vcpu ( d, v ) + { + if ( (rv = hvm_sr_handlers[typecode].save(d->vcpu[instance], + &ctxt)) != 0 ) + { + printk(XENLOG_G_ERR "HVM%d save: failed to save type %"PRIu16" (%d)\n", + d->domain_id, typecode, rv); + } + else if ( ctxt.cur >= sizeof(*desc) ) + { + rv = -ENOENT; + desc = (void *)(ctxt.data + off); + /* Move past header */ + off += sizeof(*desc); + if ( ctxt.cur < desc->length || + off > ctxt.cur - desc->length ) + break; + if ( instance == desc->instance ) + { + rv = 0; + if ( guest_handle_is_null(handle) ) + *bufsz = desc->length; + else if ( *bufsz < desc->length ) + rv = -ENOBUFS; + else if ( copy_to_guest(handle, ctxt.data + off, desc->length) ) + rv = -EFAULT; + else + *bufsz = desc->length; + break; + } + off += desc->length; + } + } + domain_unpause(d); + } xfree(ctxt.data); return rv; @@ -193,7 +239,8 @@ int hvm_save(struct domain *d, hvm_domain_context_t *h) struct hvm_save_header hdr; struct hvm_save_end end; hvm_save_handler handler; - unsigned int i; + unsigned int i, rc; + struct vcpu *v = NULL; if ( d->is_dying ) return -EINVAL; @@ -225,12 +272,19 @@ int hvm_save(struct domain *d, hvm_domain_context_t *h) { printk(XENLOG_G_INFO "HVM%d save: %s\n", d->domain_id, hvm_sr_handlers[i].name); - if ( handler(d, h) != 0 ) + for_each_vcpu ( d, v ) { - printk(XENLOG_G_ERR - "HVM%d save: failed to save type %"PRIu16"\n", - d->domain_id, i); - return -EFAULT; + rc = handler(v, h); + if( rc == CONTINUE ) + continue; + + if( rc != 0 ) + { + printk(XENLOG_G_ERR + "HVM%d save: failed to save type %"PRIu16"\n", + d->domain_id, i); + return -EFAULT; + } } } } diff --git a/xen/arch/x86/hvm/vioapic.c b/xen/arch/x86/hvm/vioapic.c index 97b419f..86d02cf 100644 --- a/xen/arch/x86/hvm/vioapic.c +++ b/xen/arch/x86/hvm/vioapic.c @@ -569,8 +569,9 @@ int vioapic_get_trigger_mode(const struct domain *d, unsigned int gsi) return vioapic->redirtbl[pin].fields.trig_mode; } -static int ioapic_save(struct domain *d, hvm_domain_context_t *h) +static int ioapic_save(struct vcpu *v, hvm_domain_context_t *h) { + struct domain *d = v->domain; struct hvm_vioapic *s; if ( !has_vioapic(d) ) diff --git a/xen/arch/x86/hvm/viridian.c b/xen/arch/x86/hvm/viridian.c index 86a43ee..7ec7a2b 100644 --- a/xen/arch/x86/hvm/viridian.c +++ b/xen/arch/x86/hvm/viridian.c @@ -990,8 +990,9 @@ out: return HVM_HCALL_completed; } -static int viridian_save_domain_ctxt(struct domain *d, hvm_domain_context_t *h) +static int viridian_save_domain_ctxt(struct vcpu *v, hvm_domain_context_t *h) { + struct domain *d = v->domain; struct hvm_viridian_domain_context ctxt = { .time_ref_count = d->arch.hvm_domain.viridian.time_ref_count.val, .hypercall_gpa = d->arch.hvm_domain.viridian.hypercall_gpa.raw, @@ -1027,9 +1028,9 @@ HVM_REGISTER_SAVE_RESTORE(VIRIDIAN_DOMAIN, viridian_save_domain_ctxt, viridian_load_domain_ctxt, 1, HVMSR_PER_DOM); -static int viridian_save_vcpu_ctxt(struct domain *d, hvm_domain_context_t *h) +static int viridian_save_vcpu_ctxt(struct vcpu *v, hvm_domain_context_t *h) { - struct vcpu *v = NULL; + struct domain *d = v->domain; struct hvm_viridian_vcpu_context ctxt = { .vp_assist_msr = v->arch.hvm_vcpu.viridian.vp_assist.msr.raw, .vp_assist_pending = v->arch.hvm_vcpu.viridian.vp_assist.pending, diff --git a/xen/arch/x86/hvm/vlapic.c b/xen/arch/x86/hvm/vlapic.c index 1b9f00a..6337cdb 100644 --- a/xen/arch/x86/hvm/vlapic.c +++ b/xen/arch/x86/hvm/vlapic.c @@ -1435,45 +1435,31 @@ static void lapic_rearm(struct vlapic *s) s->timer_last_update = s->pt.last_plt_gtime; } -static int lapic_save_hidden(struct domain *d, hvm_domain_context_t *h) +static int lapic_save_hidden(struct vcpu *v, hvm_domain_context_t *h) { - struct vcpu *v; + struct domain *d = v->domain; struct vlapic *s; - int rc = 0; if ( !has_vlapic(d) ) return 0; - for_each_vcpu ( d, v ) - { - s = vcpu_vlapic(v); - if ( (rc = hvm_save_entry(LAPIC, v->vcpu_id, h, &s->hw)) != 0 ) - break; - } - - return rc; + s = vcpu_vlapic(v); + return hvm_save_entry(LAPIC, v->vcpu_id, h, &s->hw); } -static int lapic_save_regs(struct domain *d, hvm_domain_context_t *h) +static int lapic_save_regs(struct vcpu *v, hvm_domain_context_t *h) { - struct vcpu *v; + struct domain *d = v->domain; struct vlapic *s; - int rc = 0; if ( !has_vlapic(d) ) return 0; - for_each_vcpu ( d, v ) - { - if ( hvm_funcs.sync_pir_to_irr ) - hvm_funcs.sync_pir_to_irr(v); - - s = vcpu_vlapic(v); - if ( (rc = hvm_save_entry(LAPIC_REGS, v->vcpu_id, h, s->regs)) != 0 ) - break; - } + if ( hvm_funcs.sync_pir_to_irr ) + hvm_funcs.sync_pir_to_irr(v); - return rc; + s = vcpu_vlapic(v); + return hvm_save_entry(LAPIC_REGS, v->vcpu_id, h, s->regs); } /* diff --git a/xen/arch/x86/hvm/vpic.c b/xen/arch/x86/hvm/vpic.c index e160bbd..bad5066 100644 --- a/xen/arch/x86/hvm/vpic.c +++ b/xen/arch/x86/hvm/vpic.c @@ -371,8 +371,9 @@ static int vpic_intercept_elcr_io( return X86EMUL_OKAY; } -static int vpic_save(struct domain *d, hvm_domain_context_t *h) +static int vpic_save(struct vcpu *v, hvm_domain_context_t *h) { + struct domain *d = v->domain; struct hvm_hw_vpic *s; int i; diff --git a/xen/include/asm-x86/hvm/save.h b/xen/include/asm-x86/hvm/save.h index f889e8f..fe642ab 100644 --- a/xen/include/asm-x86/hvm/save.h +++ b/xen/include/asm-x86/hvm/save.h @@ -95,7 +95,7 @@ static inline uint16_t hvm_load_instance(struct hvm_domain_context *h) * The save handler may save multiple instances of a type into the buffer; * the load handler will be called once for each instance found when * restoring. Both return non-zero on error. */ -typedef int (*hvm_save_handler) (struct domain *d, +typedef int (*hvm_save_handler) (struct vcpu *v, hvm_domain_context_t *h); typedef int (*hvm_load_handler) (struct domain *d, hvm_domain_context_t *h); -- 2.7.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |