[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] X86/vMCE: handle broken page with regard to migration
# HG changeset patch # User Liu Jinsong <jinsong.liu@xxxxxxxxx> # Date 1354790842 0 # Node ID dc81777ca115fd8cb11023f41df8f969c41ce8cc # Parent 89bd3c43f883620e481f4fa019e30c96044aec55 X86/vMCE: handle broken page with regard to migration At the sender xc_domain_save has a key point: 'to query the types of all the pages with xc_get_pfn_type_batch' 1) if broken page occur before the key point, migration will be fine since proper pfn_type and pfn number will be transferred to the target and then take appropriate action; 2) if broken page occur after the key point, whole system will crash and no need care migration any more; At the target Target will populates pages for guest. As for the case of broken page, we prefer to keep the type of the page for the sake of seamless migration. Target will set p2m as p2m_ram_broken for broken page. If guest access the broken page again it will kill itself as expected. Suggested-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx> Signed-off-by: Liu Jinsong <jinsong.liu@xxxxxxxxx> Acked-by: Ian Campbell <ian.campbell@xxxxxxxxxx> Acked-by: Tim Deegan <tim@xxxxxxx> Committed-by: Ian Campbell <ian.campbell@xxxxxxxxxx> --- diff -r 89bd3c43f883 -r dc81777ca115 tools/libxc/xc_domain.c --- a/tools/libxc/xc_domain.c Thu Dec 06 10:19:08 2012 +0000 +++ b/tools/libxc/xc_domain.c Thu Dec 06 10:47:22 2012 +0000 @@ -283,6 +283,22 @@ int xc_domain_getinfolist(xc_interface * return ret; } +/* set broken page p2m */ +int xc_set_broken_page_p2m(xc_interface *xch, + uint32_t domid, + unsigned long pfn) +{ + int ret; + DECLARE_DOMCTL; + + domctl.cmd = XEN_DOMCTL_set_broken_page_p2m; + domctl.domain = (domid_t)domid; + domctl.u.set_broken_page_p2m.pfn = pfn; + ret = do_domctl(xch, &domctl); + + return ret ? -1 : 0; +} + /* get info from hvm guest for save */ int xc_domain_hvm_getcontext(xc_interface *xch, uint32_t domid, diff -r 89bd3c43f883 -r dc81777ca115 tools/libxc/xc_domain_restore.c --- a/tools/libxc/xc_domain_restore.c Thu Dec 06 10:19:08 2012 +0000 +++ b/tools/libxc/xc_domain_restore.c Thu Dec 06 10:47:22 2012 +0000 @@ -1023,9 +1023,15 @@ static int pagebuf_get_one(xc_interface countpages = count; for (i = oldcount; i < buf->nr_pages; ++i) - if ((buf->pfn_types[i] & XEN_DOMCTL_PFINFO_LTAB_MASK) == XEN_DOMCTL_PFINFO_XTAB - ||(buf->pfn_types[i] & XEN_DOMCTL_PFINFO_LTAB_MASK) == XEN_DOMCTL_PFINFO_XALLOC) + { + unsigned long pagetype; + + pagetype = buf->pfn_types[i] & XEN_DOMCTL_PFINFO_LTAB_MASK; + if ( pagetype == XEN_DOMCTL_PFINFO_XTAB || + pagetype == XEN_DOMCTL_PFINFO_BROKEN || + pagetype == XEN_DOMCTL_PFINFO_XALLOC ) --countpages; + } if (!countpages) return count; @@ -1267,6 +1273,17 @@ static int apply_batch(xc_interface *xch /* a bogus/unmapped/allocate-only page: skip it */ continue; + if ( pagetype == XEN_DOMCTL_PFINFO_BROKEN ) + { + if ( xc_set_broken_page_p2m(xch, dom, pfn) ) + { + ERROR("Set p2m for broken page failed, " + "dom=%d, pfn=%lx\n", dom, pfn); + goto err_mapped; + } + continue; + } + if (pfn_err[i]) { ERROR("unexpected PFN mapping failure pfn %lx map_mfn %lx p2m_mfn %lx", diff -r 89bd3c43f883 -r dc81777ca115 tools/libxc/xc_domain_save.c --- a/tools/libxc/xc_domain_save.c Thu Dec 06 10:19:08 2012 +0000 +++ b/tools/libxc/xc_domain_save.c Thu Dec 06 10:47:22 2012 +0000 @@ -1277,6 +1277,13 @@ int xc_domain_save(xc_interface *xch, in if ( !hvm ) gmfn = pfn_to_mfn(gmfn); + if ( pfn_type[j] == XEN_DOMCTL_PFINFO_BROKEN ) + { + pfn_type[j] |= pfn_batch[j]; + ++run; + continue; + } + if ( pfn_err[j] ) { if ( pfn_type[j] == XEN_DOMCTL_PFINFO_XTAB ) @@ -1371,8 +1378,12 @@ int xc_domain_save(xc_interface *xch, in } } - /* skip pages that aren't present or are alloc-only */ + /* + * skip pages that aren't present, + * or are broken, or are alloc-only + */ if ( pagetype == XEN_DOMCTL_PFINFO_XTAB + || pagetype == XEN_DOMCTL_PFINFO_BROKEN || pagetype == XEN_DOMCTL_PFINFO_XALLOC ) continue; diff -r 89bd3c43f883 -r dc81777ca115 tools/libxc/xenctrl.h --- a/tools/libxc/xenctrl.h Thu Dec 06 10:19:08 2012 +0000 +++ b/tools/libxc/xenctrl.h Thu Dec 06 10:47:22 2012 +0000 @@ -575,6 +575,17 @@ int xc_domain_getinfolist(xc_interface * xc_domaininfo_t *info); /** + * This function set p2m for broken page + * &parm xch a handle to an open hypervisor interface + * @parm domid the domain id which broken page belong to + * @parm pfn the pfn number of the broken page + * @return 0 on success, -1 on failure + */ +int xc_set_broken_page_p2m(xc_interface *xch, + uint32_t domid, + unsigned long pfn); + +/** * This function returns information about the context of a hvm domain * @parm xch a handle to an open hypervisor interface * @parm domid the domain to get information from diff -r 89bd3c43f883 -r dc81777ca115 xen/arch/x86/domctl.c --- a/xen/arch/x86/domctl.c Thu Dec 06 10:19:08 2012 +0000 +++ b/xen/arch/x86/domctl.c Thu Dec 06 10:47:22 2012 +0000 @@ -209,12 +209,18 @@ long arch_do_domctl( for ( j = 0; j < k; j++ ) { unsigned long type = 0; + p2m_type_t t; - page = get_page_from_gfn(d, arr[j], NULL, P2M_ALLOC); + page = get_page_from_gfn(d, arr[j], &t, P2M_ALLOC); if ( unlikely(!page) || unlikely(is_xen_heap_page(page)) ) - type = XEN_DOMCTL_PFINFO_XTAB; + { + if ( p2m_is_broken(t) ) + type = XEN_DOMCTL_PFINFO_BROKEN; + else + type = XEN_DOMCTL_PFINFO_XTAB; + } else { switch( page->u.inuse.type_info & PGT_type_mask ) @@ -235,6 +241,9 @@ long arch_do_domctl( if ( page->u.inuse.type_info & PGT_pinned ) type |= XEN_DOMCTL_PFINFO_LPINTAB; + + if ( page->count_info & PGC_broken ) + type = XEN_DOMCTL_PFINFO_BROKEN; } if ( page ) @@ -1568,6 +1577,29 @@ long arch_do_domctl( } break; + case XEN_DOMCTL_set_broken_page_p2m: + { + struct domain *d; + + d = rcu_lock_domain_by_id(domctl->domain); + if ( d != NULL ) + { + p2m_type_t pt; + unsigned long pfn = domctl->u.set_broken_page_p2m.pfn; + mfn_t mfn = get_gfn_query(d, pfn, &pt); + + if ( unlikely(!mfn_valid(mfn_x(mfn)) || !p2m_is_ram(pt) || + (p2m_change_type(d, pfn, pt, p2m_ram_broken) != pt)) ) + ret = -EINVAL; + + put_gfn(d, pfn); + rcu_unlock_domain(d); + } + else + ret = -ESRCH; + } + break; + default: ret = iommu_do_domctl(domctl, u_domctl); break; diff -r 89bd3c43f883 -r dc81777ca115 xen/include/public/domctl.h --- a/xen/include/public/domctl.h Thu Dec 06 10:19:08 2012 +0000 +++ b/xen/include/public/domctl.h Thu Dec 06 10:47:22 2012 +0000 @@ -136,6 +136,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_getme #define XEN_DOMCTL_PFINFO_LPINTAB (0x1U<<31) #define XEN_DOMCTL_PFINFO_XTAB (0xfU<<28) /* invalid page */ #define XEN_DOMCTL_PFINFO_XALLOC (0xeU<<28) /* allocate-only page */ +#define XEN_DOMCTL_PFINFO_BROKEN (0xdU<<28) /* broken page */ #define XEN_DOMCTL_PFINFO_LTAB_MASK (0xfU<<28) struct xen_domctl_getpageframeinfo { @@ -834,6 +835,12 @@ struct xen_domctl_set_access_required { typedef struct xen_domctl_set_access_required xen_domctl_set_access_required_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_access_required_t); +struct xen_domctl_set_broken_page_p2m { + uint64_aligned_t pfn; +}; +typedef struct xen_domctl_set_broken_page_p2m xen_domctl_set_broken_page_p2m_t; +DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_broken_page_p2m_t); + struct xen_domctl { uint32_t cmd; #define XEN_DOMCTL_createdomain 1 @@ -899,6 +906,7 @@ struct xen_domctl { #define XEN_DOMCTL_set_access_required 64 #define XEN_DOMCTL_audit_p2m 65 #define XEN_DOMCTL_set_virq_handler 66 +#define XEN_DOMCTL_set_broken_page_p2m 67 #define XEN_DOMCTL_gdbsx_guestmemio 1000 #define XEN_DOMCTL_gdbsx_pausevcpu 1001 #define XEN_DOMCTL_gdbsx_unpausevcpu 1002 @@ -954,6 +962,7 @@ struct xen_domctl { struct xen_domctl_audit_p2m audit_p2m; struct xen_domctl_set_virq_handler set_virq_handler; struct xen_domctl_gdbsx_memio gdbsx_guest_memio; + struct xen_domctl_set_broken_page_p2m set_broken_page_p2m; struct xen_domctl_gdbsx_pauseunp_vcpu gdbsx_pauseunp_vcpu; struct xen_domctl_gdbsx_domstatus gdbsx_domstatus; uint8_t pad[128]; _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |