[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [RFC Patch 25/25] sync mmu before resuming secondary vm
At 07/24/2014 06:59 PM, Tim Deegan Write: > At 19:39 +0800 on 18 Jul (1405708750), Wen Congyang wrote: >> In out test, we find secondary vm will be bluescreen due to memory >> related problem. If we sync mmu, the problem will disappear. > > Erk. Do you understand _why_ this happens? Do you need both the TLB > flush and the EPT flush to fix it? > > The TLB flush sounds plausible because the migration may have changed > some pagetables and you've elided any TLB flushes that happened on the > source. For that case I think I'd prefer to make the TLB flush > implicit in the HVM load operation, e.g., by putting a call to > hvm_asid_flush_vcpu(v) into hvm_load_cpu_ctxt() (on the grounds that the > TLB is part of the vcpu state). Hmm, I don't investigate this problem deeply. I will try you suggestion. Thanks Wen Congyang > > Cheers, > > Tim. > >> TODO: only vmx+ept is done. >> Signed-off-by: Wen Congyang <wency@xxxxxxxxxxxxxx> >> --- >> tools/libxc/xc_domain.c | 9 +++++++++ >> tools/libxc/xenctrl.h | 2 ++ >> tools/libxl/libxl_colo_restore.c | 6 +++++- >> xen/arch/x86/domctl.c | 15 +++++++++++++++ >> xen/arch/x86/hvm/save.c | 6 ++++++ >> xen/arch/x86/hvm/vmx/vmcs.c | 8 ++++++++ >> xen/arch/x86/hvm/vmx/vmx.c | 1 + >> xen/include/asm-x86/hvm/hvm.h | 1 + >> xen/include/asm-x86/hvm/vmx/vmcs.h | 1 + >> xen/include/public/domctl.h | 1 + >> xen/include/xen/hvm/save.h | 2 ++ >> 11 files changed, 51 insertions(+), 1 deletion(-) >> >> diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c >> index 0230c6c..0b47bdd 100644 >> --- a/tools/libxc/xc_domain.c >> +++ b/tools/libxc/xc_domain.c >> @@ -2123,6 +2123,15 @@ int xc_domain_set_max_evtchn(xc_interface *xch, >> uint32_t domid, >> return do_domctl(xch, &domctl); >> } >> >> +int xc_domain_hvm_sync_mmu(xc_interface *xch, uint32_t domid) >> +{ >> + DECLARE_DOMCTL; >> + >> + domctl.cmd = XEN_DOMCTL_hvm_sync_mmu; >> + domctl.domain = domid; >> + return do_domctl(xch, &domctl); >> +} >> + >> /* >> * Local variables: >> * mode: C >> diff --git a/tools/libxc/xenctrl.h b/tools/libxc/xenctrl.h >> index 3578b09..a83364a 100644 >> --- a/tools/libxc/xenctrl.h >> +++ b/tools/libxc/xenctrl.h >> @@ -961,6 +961,8 @@ int xc_domain_set_virq_handler(xc_interface *xch, >> uint32_t domid, int virq); >> int xc_domain_set_max_evtchn(xc_interface *xch, uint32_t domid, >> uint32_t max_port); >> >> +int xc_domain_hvm_sync_mmu(xc_interface *xch, uint32_t domid); >> + >> /* >> * CPUPOOL MANAGEMENT FUNCTIONS >> */ >> diff --git a/tools/libxl/libxl_colo_restore.c >> b/tools/libxl/libxl_colo_restore.c >> index aea3feb..730b492 100644 >> --- a/tools/libxl/libxl_colo_restore.c >> +++ b/tools/libxl/libxl_colo_restore.c >> @@ -124,11 +124,15 @@ static void colo_resume_vm(libxl__egc *egc, >> STATE_AO_GC(crs->ao); >> >> if (!crs->saved_cb) { >> - /* TODO: sync mmu for hvm? */ >> + rc = xc_domain_hvm_sync_mmu(CTX->xch, crs->domid); >> + if (rc) >> + goto fail; >> + >> rc = libxl__domain_resume(gc, crs->domid, 0, 1); >> if (rc) >> LOG(ERROR, "cannot resume secondary vm"); >> >> +fail: >> crcs->callback(egc, crcs, rc); >> return; >> } >> diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c >> index d62c715..d0dfad7 100644 >> --- a/xen/arch/x86/domctl.c >> +++ b/xen/arch/x86/domctl.c >> @@ -1395,6 +1395,21 @@ long arch_do_domctl( >> } >> break; >> >> + case XEN_DOMCTL_hvm_sync_mmu: >> + { >> + struct domain *d; >> + >> + ret = -ESRCH; >> + d = rcu_lock_domain_by_id(domctl->domain); >> + if ( d != NULL ) >> + { >> + arch_hvm_sync_mmu(d); >> + rcu_unlock_domain(d); >> + ret = 0; >> + } >> + } >> + break; >> + >> default: >> ret = iommu_do_domctl(domctl, d, u_domctl); >> break; >> diff --git a/xen/arch/x86/hvm/save.c b/xen/arch/x86/hvm/save.c >> index 6af19be..7a07ebf 100644 >> --- a/xen/arch/x86/hvm/save.c >> +++ b/xen/arch/x86/hvm/save.c >> @@ -79,6 +79,12 @@ int arch_hvm_load(struct domain *d, struct >> hvm_save_header *hdr) >> return 0; >> } >> >> +void arch_hvm_sync_mmu(struct domain *d) >> +{ >> + if (hvm_funcs.sync_mmu) >> + hvm_funcs.sync_mmu(d); >> +} >> + >> /* >> * Local variables: >> * mode: C >> diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c >> index 8ffc562..4be9b4d 100644 >> --- a/xen/arch/x86/hvm/vmx/vmcs.c >> +++ b/xen/arch/x86/hvm/vmx/vmcs.c >> @@ -596,6 +596,14 @@ void vmx_cpu_down(void) >> local_irq_restore(flags); >> } >> >> +void vmx_sync_mmu(struct domain *d) >> +{ >> + ept_sync_domain(p2m_get_hostp2m(d)); >> + >> + /* flush tlb */ >> + flush_all(FLUSH_TLB_GLOBAL); >> +} >> + >> struct foreign_vmcs { >> struct vcpu *v; >> unsigned int count; >> diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c >> index eb73412..b46b4dd 100644 >> --- a/xen/arch/x86/hvm/vmx/vmx.c >> +++ b/xen/arch/x86/hvm/vmx/vmx.c >> @@ -1719,6 +1719,7 @@ static struct hvm_function_table __initdata >> vmx_function_table = { >> .event_pending = vmx_event_pending, >> .cpu_up = vmx_cpu_up, >> .cpu_down = vmx_cpu_down, >> + .sync_mmu = vmx_sync_mmu, >> .cpuid_intercept = vmx_cpuid_intercept, >> .wbinvd_intercept = vmx_wbinvd_intercept, >> .fpu_dirty_intercept = vmx_fpu_dirty_intercept, >> diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h >> index 0ebd478..b4f89a7 100644 >> --- a/xen/include/asm-x86/hvm/hvm.h >> +++ b/xen/include/asm-x86/hvm/hvm.h >> @@ -151,6 +151,7 @@ struct hvm_function_table { >> >> int (*cpu_up)(void); >> void (*cpu_down)(void); >> + void (*sync_mmu)(struct domain *d); >> >> /* Copy up to 15 bytes from cached instruction bytes at current rIP. */ >> unsigned int (*get_insn_bytes)(struct vcpu *v, uint8_t *buf); >> diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h >> b/xen/include/asm-x86/hvm/vmx/vmcs.h >> index 215d93c..664741a 100644 >> --- a/xen/include/asm-x86/hvm/vmx/vmcs.h >> +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h >> @@ -29,6 +29,7 @@ extern int vmx_cpu_up_prepare(unsigned int cpu); >> extern void vmx_cpu_dead(unsigned int cpu); >> extern int vmx_cpu_up(void); >> extern void vmx_cpu_down(void); >> +extern void vmx_sync_mmu(struct domain *d); >> extern void vmx_save_host_msrs(void); >> >> struct vmcs_struct { >> diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h >> index 5b11bbf..11e5a26 100644 >> --- a/xen/include/public/domctl.h >> +++ b/xen/include/public/domctl.h >> @@ -1008,6 +1008,7 @@ struct xen_domctl { >> #define XEN_DOMCTL_cacheflush 71 >> #define XEN_DOMCTL_get_vcpu_msrs 72 >> #define XEN_DOMCTL_set_vcpu_msrs 73 >> +#define XEN_DOMCTL_hvm_sync_mmu 74 >> #define XEN_DOMCTL_gdbsx_guestmemio 1000 >> #define XEN_DOMCTL_gdbsx_pausevcpu 1001 >> #define XEN_DOMCTL_gdbsx_unpausevcpu 1002 >> diff --git a/xen/include/xen/hvm/save.h b/xen/include/xen/hvm/save.h >> index ae6f0bb..049fdb8 100644 >> --- a/xen/include/xen/hvm/save.h >> +++ b/xen/include/xen/hvm/save.h >> @@ -135,4 +135,6 @@ struct hvm_save_header; >> void arch_hvm_save(struct domain *d, struct hvm_save_header *hdr); >> int arch_hvm_load(struct domain *d, struct hvm_save_header *hdr); >> >> +void arch_hvm_sync_mmu(struct domain *d); >> + >> #endif /* __XEN_HVM_SAVE_H__ */ >> -- >> 1.9.3 >> >> >> _______________________________________________ >> Xen-devel mailing list >> Xen-devel@xxxxxxxxxxxxx >> http://lists.xen.org/xen-devel > . > -- ÃÃÃÃÂÃÂÂ ÂÃÂÃÂÂÂÂÂÂÃÂÂÂÂÂÂÃÂÂ ÃÃÂÂÃÃ -------------------------------------------------- Wen Congyang Development Dept.I Nanjing Fujitsu Nanda Software Tech. Co., Ltd.(FNST) No. 6 Wenzhu Road, Nanjing, 210012, China Postcode: 210012 TELÂÂ+86+25-86630566-8503 FUJITSU INTERNALÂÂ79955-8503 FAXÂÂ+86+25-83317685 MailÂÂwency@xxxxxxxxxxxxxx -------------------------------------------------- This communication is for use by the intended recipient(s) only and may contain information that is privileged, confidential and exempt from disclosure under applicable law. If you are not an intended recipient of this communication, you are hereby notified that any dissemination, distribution or copying hereof is strictly prohibited. If you have received this communication in error, please notify me by reply e-mail, permanently delete this communication from your system, and destroy any hard copies you may have printed. _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |