[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen stable-4.3] Revert "x86/paging: make log-dirty operations preemptible"
commit ffd7f8db506c77de5b6141c15f87cd7ce293bf41 Author: Ian Jackson <ian.jackson@xxxxxxxxxxxxx> AuthorDate: Tue Aug 19 12:52:25 2014 +0100 Commit: Ian Jackson <Ian.Jackson@xxxxxxxxxxxxx> CommitDate: Tue Aug 19 12:52:25 2014 +0100 Revert "x86/paging: make log-dirty operations preemptible" This reverts commit b497ecca359466294d169b07a62b98eef7dc0a36. This fix has been discovered to cause regressions. Investigations are ongoing. Revert-reqeuested-by: Jan Beulich <JBeulich@xxxxxxxx> --- xen/arch/x86/domain.c | 4 +- xen/arch/x86/domctl.c | 3 - xen/arch/x86/mm/hap/hap.c | 3 +- xen/arch/x86/mm/paging.c | 199 +++++++-------------------------------- xen/arch/x86/mm/shadow/common.c | 3 +- xen/common/domain.c | 1 + xen/include/asm-x86/domain.h | 14 --- xen/include/asm-x86/paging.h | 8 ++- 8 files changed, 49 insertions(+), 186 deletions(-) diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index 336cd79..ddf9c64 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -1867,9 +1867,7 @@ int domain_relinquish_resources(struct domain *d) pci_release_devices(d); /* Tear down paging-assistance stuff. */ - ret = paging_teardown(d); - if ( ret ) - return ret; + paging_teardown(d); /* Drop the in-use references to page-table bases. */ for_each_vcpu ( d, v ) diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c index 060912e..67afe8e 100644 --- a/xen/arch/x86/domctl.c +++ b/xen/arch/x86/domctl.c @@ -61,9 +61,6 @@ long arch_do_domctl( ret = paging_domctl(d, &domctl->u.shadow_op, guest_handle_cast(u_domctl, void)); - if ( ret == -EAGAIN ) - return hypercall_create_continuation(__HYPERVISOR_domctl, - "h", u_domctl); copyback = 1; } break; diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c index 3817b71..66239b7 100644 --- a/xen/arch/x86/mm/hap/hap.c +++ b/xen/arch/x86/mm/hap/hap.c @@ -565,7 +565,8 @@ int hap_domctl(struct domain *d, xen_domctl_shadow_op_t *sc, paging_unlock(d); if ( preempted ) /* Not finished. Set up to re-run the call. */ - rc = -EAGAIN; + rc = hypercall_create_continuation(__HYPERVISOR_domctl, "h", + u_domctl); else /* Finished. Return the new allocation */ sc->mb = hap_get_allocation(d); diff --git a/xen/arch/x86/mm/paging.c b/xen/arch/x86/mm/paging.c index f713901..21344e5 100644 --- a/xen/arch/x86/mm/paging.c +++ b/xen/arch/x86/mm/paging.c @@ -26,7 +26,6 @@ #include <asm/shadow.h> #include <asm/p2m.h> #include <asm/hap.h> -#include <asm/event.h> #include <asm/hvm/nestedhvm.h> #include <xen/numa.h> #include <xsm/xsm.h> @@ -117,46 +116,26 @@ static void paging_free_log_dirty_page(struct domain *d, mfn_t mfn) d->arch.paging.free_page(d, mfn_to_page(mfn)); } -static int paging_free_log_dirty_bitmap(struct domain *d, int rc) +void paging_free_log_dirty_bitmap(struct domain *d) { mfn_t *l4, *l3, *l2; int i4, i3, i2; - paging_lock(d); - if ( !mfn_valid(d->arch.paging.log_dirty.top) ) - { - paging_unlock(d); - return 0; - } + return; - if ( !d->arch.paging.preempt.vcpu ) - { - memset(&d->arch.paging.preempt.log_dirty, 0, - sizeof(d->arch.paging.preempt.log_dirty)); - ASSERT(rc <= 0); - d->arch.paging.preempt.log_dirty.done = -rc; - } - else if ( d->arch.paging.preempt.vcpu != current || - d->arch.paging.preempt.op != XEN_DOMCTL_SHADOW_OP_OFF ) - { - paging_unlock(d); - return -EBUSY; - } + paging_lock(d); l4 = map_domain_page(mfn_x(d->arch.paging.log_dirty.top)); - i4 = d->arch.paging.preempt.log_dirty.i4; - i3 = d->arch.paging.preempt.log_dirty.i3; - rc = 0; - for ( ; i4 < LOGDIRTY_NODE_ENTRIES; i4++, i3 = 0 ) + for ( i4 = 0; i4 < LOGDIRTY_NODE_ENTRIES; i4++ ) { if ( !mfn_valid(l4[i4]) ) continue; l3 = map_domain_page(mfn_x(l4[i4])); - for ( ; i3 < LOGDIRTY_NODE_ENTRIES; i3++ ) + for ( i3 = 0; i3 < LOGDIRTY_NODE_ENTRIES; i3++ ) { if ( !mfn_valid(l3[i3]) ) continue; @@ -169,54 +148,20 @@ static int paging_free_log_dirty_bitmap(struct domain *d, int rc) unmap_domain_page(l2); paging_free_log_dirty_page(d, l3[i3]); - l3[i3] = _mfn(INVALID_MFN); - - if ( i3 < LOGDIRTY_NODE_ENTRIES - 1 && hypercall_preempt_check() ) - { - d->arch.paging.preempt.log_dirty.i3 = i3 + 1; - d->arch.paging.preempt.log_dirty.i4 = i4; - rc = -EAGAIN; - break; - } } unmap_domain_page(l3); - if ( rc ) - break; paging_free_log_dirty_page(d, l4[i4]); - l4[i4] = _mfn(INVALID_MFN); - - if ( i4 < LOGDIRTY_NODE_ENTRIES - 1 && hypercall_preempt_check() ) - { - d->arch.paging.preempt.log_dirty.i3 = 0; - d->arch.paging.preempt.log_dirty.i4 = i4 + 1; - rc = -EAGAIN; - break; - } } unmap_domain_page(l4); + paging_free_log_dirty_page(d, d->arch.paging.log_dirty.top); + d->arch.paging.log_dirty.top = _mfn(INVALID_MFN); - if ( !rc ) - { - paging_free_log_dirty_page(d, d->arch.paging.log_dirty.top); - d->arch.paging.log_dirty.top = _mfn(INVALID_MFN); - - ASSERT(d->arch.paging.log_dirty.allocs == 0); - d->arch.paging.log_dirty.failed_allocs = 0; - - rc = -d->arch.paging.preempt.log_dirty.done; - d->arch.paging.preempt.vcpu = NULL; - } - else - { - d->arch.paging.preempt.vcpu = current; - d->arch.paging.preempt.op = XEN_DOMCTL_SHADOW_OP_OFF; - } + ASSERT(d->arch.paging.log_dirty.allocs == 0); + d->arch.paging.log_dirty.failed_allocs = 0; paging_unlock(d); - - return rc; } int paging_log_dirty_enable(struct domain *d) @@ -233,25 +178,15 @@ int paging_log_dirty_enable(struct domain *d) return ret; } -static int paging_log_dirty_disable(struct domain *d, bool_t resuming) +int paging_log_dirty_disable(struct domain *d) { - int ret = 1; - - if ( !resuming ) - { - domain_pause(d); - /* Safe because the domain is paused. */ - ret = d->arch.paging.log_dirty.disable_log_dirty(d); - ASSERT(ret <= 0); - } + int ret; + domain_pause(d); + /* Safe because the domain is paused. */ + ret = d->arch.paging.log_dirty.disable_log_dirty(d); if ( !paging_mode_log_dirty(d) ) - { - ret = paging_free_log_dirty_bitmap(d, ret); - if ( ret == -EAGAIN ) - return ret; - } - + paging_free_log_dirty_bitmap(d); domain_unpause(d); return ret; @@ -391,9 +326,7 @@ int paging_mfn_is_dirty(struct domain *d, mfn_t gmfn) /* Read a domain's log-dirty bitmap and stats. If the operation is a CLEAN, * clear the bitmap and stats as well. */ -static int paging_log_dirty_op(struct domain *d, - struct xen_domctl_shadow_op *sc, - bool_t resuming) +int paging_log_dirty_op(struct domain *d, struct xen_domctl_shadow_op *sc) { int rv = 0, clean = 0, peek = 1; unsigned long pages = 0; @@ -401,22 +334,9 @@ static int paging_log_dirty_op(struct domain *d, unsigned long *l1 = NULL; int i4, i3, i2; - if ( !resuming ) - domain_pause(d); + domain_pause(d); paging_lock(d); - if ( !d->arch.paging.preempt.vcpu ) - memset(&d->arch.paging.preempt.log_dirty, 0, - sizeof(d->arch.paging.preempt.log_dirty)); - else if ( d->arch.paging.preempt.vcpu != current || - d->arch.paging.preempt.op != sc->op ) - { - paging_unlock(d); - ASSERT(!resuming); - domain_unpause(d); - return -EBUSY; - } - clean = (sc->op == XEN_DOMCTL_SHADOW_OP_CLEAN); PAGING_DEBUG(LOGDIRTY, "log-dirty %s: dom %u faults=%u dirty=%u\n", @@ -445,15 +365,17 @@ static int paging_log_dirty_op(struct domain *d, goto out; } + pages = 0; l4 = paging_map_log_dirty_bitmap(d); - i4 = d->arch.paging.preempt.log_dirty.i4; - i3 = d->arch.paging.preempt.log_dirty.i3; - pages = d->arch.paging.preempt.log_dirty.done; - for ( ; (pages < sc->pages) && (i4 < LOGDIRTY_NODE_ENTRIES); i4++, i3 = 0 ) + for ( i4 = 0; + (pages < sc->pages) && (i4 < LOGDIRTY_NODE_ENTRIES); + i4++ ) { l3 = (l4 && mfn_valid(l4[i4])) ? map_domain_page(mfn_x(l4[i4])) : NULL; - for ( ; (pages < sc->pages) && (i3 < LOGDIRTY_NODE_ENTRIES); i3++ ) + for ( i3 = 0; + (pages < sc->pages) && (i3 < LOGDIRTY_NODE_ENTRIES); + i3++ ) { l2 = ((l3 && mfn_valid(l3[i3])) ? map_domain_page(mfn_x(l3[i3])) : NULL); @@ -488,51 +410,18 @@ static int paging_log_dirty_op(struct domain *d, } if ( l2 ) unmap_domain_page(l2); - - if ( i3 < LOGDIRTY_NODE_ENTRIES - 1 && hypercall_preempt_check() ) - { - d->arch.paging.preempt.log_dirty.i4 = i4; - d->arch.paging.preempt.log_dirty.i3 = i3 + 1; - rv = -EAGAIN; - break; - } } if ( l3 ) unmap_domain_page(l3); - - if ( !rv && i4 < LOGDIRTY_NODE_ENTRIES - 1 && - hypercall_preempt_check() ) - { - d->arch.paging.preempt.log_dirty.i4 = i4 + 1; - d->arch.paging.preempt.log_dirty.i3 = 0; - rv = -EAGAIN; - } - if ( rv ) - break; } if ( l4 ) unmap_domain_page(l4); - if ( !rv ) - d->arch.paging.preempt.vcpu = NULL; - else - { - d->arch.paging.preempt.vcpu = current; - d->arch.paging.preempt.op = sc->op; - d->arch.paging.preempt.log_dirty.done = pages; - } + if ( pages < sc->pages ) + sc->pages = pages; paging_unlock(d); - if ( rv ) - { - /* Never leave the domain paused for other errors. */ - ASSERT(rv == -EAGAIN); - return rv; - } - - if ( pages < sc->pages ) - sc->pages = pages; if ( clean ) { /* We need to further call clean_dirty_bitmap() functions of specific @@ -543,7 +432,6 @@ static int paging_log_dirty_op(struct domain *d, return rv; out: - d->arch.paging.preempt.vcpu = NULL; paging_unlock(d); domain_unpause(d); @@ -610,6 +498,12 @@ void paging_log_dirty_init(struct domain *d, d->arch.paging.log_dirty.clean_dirty_bitmap = clean_dirty_bitmap; } +/* This function fress log dirty bitmap resources. */ +static void paging_log_dirty_teardown(struct domain*d) +{ + paging_free_log_dirty_bitmap(d); +} + /************************************************/ /* CODE FOR PAGING SUPPORT */ /************************************************/ @@ -653,7 +547,6 @@ void paging_vcpu_init(struct vcpu *v) int paging_domctl(struct domain *d, xen_domctl_shadow_op_t *sc, XEN_GUEST_HANDLE_PARAM(void) u_domctl) { - bool_t resuming = 0; int rc; if ( unlikely(d == current->domain) ) @@ -676,20 +569,6 @@ int paging_domctl(struct domain *d, xen_domctl_shadow_op_t *sc, return -EINVAL; } - if ( d->arch.paging.preempt.vcpu ) - { - if ( d->arch.paging.preempt.vcpu != current || - d->arch.paging.preempt.op != sc->op ) - { - printk(XENLOG_G_DEBUG - "d%d:v%d: Paging op %#x on Dom%u with unfinished prior op %#x\n", - current->domain->domain_id, current->vcpu_id, - sc->op, d->domain_id, d->arch.paging.preempt.op); - return -EBUSY; - } - resuming = 1; - } - rc = xsm_shadow_control(XSM_HOOK, d, sc->op); if ( rc ) return rc; @@ -715,13 +594,13 @@ int paging_domctl(struct domain *d, xen_domctl_shadow_op_t *sc, case XEN_DOMCTL_SHADOW_OP_OFF: if ( paging_mode_log_dirty(d) ) - if ( (rc = paging_log_dirty_disable(d, resuming)) != 0 ) + if ( (rc = paging_log_dirty_disable(d)) != 0 ) return rc; break; case XEN_DOMCTL_SHADOW_OP_CLEAN: case XEN_DOMCTL_SHADOW_OP_PEEK: - return paging_log_dirty_op(d, sc, resuming); + return paging_log_dirty_op(d, sc); } /* Here, dispatch domctl to the appropriate paging code */ @@ -732,24 +611,18 @@ int paging_domctl(struct domain *d, xen_domctl_shadow_op_t *sc, } /* Call when destroying a domain */ -int paging_teardown(struct domain *d) +void paging_teardown(struct domain *d) { - int rc; - if ( hap_enabled(d) ) hap_teardown(d); else shadow_teardown(d); /* clean up log dirty resources. */ - rc = paging_free_log_dirty_bitmap(d, 0); - if ( rc == -EAGAIN ) - return rc; + paging_log_dirty_teardown(d); /* Move populate-on-demand cache back to domain_list for destruction */ p2m_pod_empty_cache(d); - - return rc; } /* Call once all of the references to the domain have gone away */ diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c index 8f378d6..72f5718 100644 --- a/xen/arch/x86/mm/shadow/common.c +++ b/xen/arch/x86/mm/shadow/common.c @@ -3706,7 +3706,8 @@ int shadow_domctl(struct domain *d, paging_unlock(d); if ( preempted ) /* Not finished. Set up to re-run the call. */ - rc = -EAGAIN; + rc = hypercall_create_continuation( + __HYPERVISOR_domctl, "h", u_domctl); else /* Finished. Return the new allocation */ sc->mb = shadow_get_allocation(d); diff --git a/xen/common/domain.c b/xen/common/domain.c index 9980d83..6dbfaa5 100644 --- a/xen/common/domain.c +++ b/xen/common/domain.c @@ -527,6 +527,7 @@ int domain_kill(struct domain *d) rc = domain_relinquish_resources(d); if ( rc != 0 ) { + BUG_ON(rc != -EAGAIN); break; } if ( sched_move_domain(d, cpupool0) ) diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h index d6a4c89..1df8ea9 100644 --- a/xen/include/asm-x86/domain.h +++ b/xen/include/asm-x86/domain.h @@ -186,20 +186,6 @@ struct paging_domain { struct hap_domain hap; /* log dirty support */ struct log_dirty_domain log_dirty; - - /* preemption handling */ - struct { - struct vcpu *vcpu; - unsigned int op; - union { - struct { - unsigned long done:PADDR_BITS - PAGE_SHIFT; - unsigned long i4:PAGETABLE_ORDER; - unsigned long i3:PAGETABLE_ORDER; - } log_dirty; - }; - } preempt; - /* alloc/free pages from the pool for paging-assistance structures * (used by p2m and log-dirty code for their tries) */ struct page_info * (*alloc_page)(struct domain *d); diff --git a/xen/include/asm-x86/paging.h b/xen/include/asm-x86/paging.h index ed63987..cd7ee3b 100644 --- a/xen/include/asm-x86/paging.h +++ b/xen/include/asm-x86/paging.h @@ -133,6 +133,9 @@ struct paging_mode { /***************************************************************************** * Log dirty code */ +/* free log dirty bitmap resource */ +void paging_free_log_dirty_bitmap(struct domain *d); + /* get the dirty bitmap for a specific range of pfns */ void paging_log_dirty_range(struct domain *d, unsigned long begin_pfn, @@ -142,6 +145,9 @@ void paging_log_dirty_range(struct domain *d, /* enable log dirty */ int paging_log_dirty_enable(struct domain *d); +/* disable log dirty */ +int paging_log_dirty_disable(struct domain *d); + /* log dirty initialization */ void paging_log_dirty_init(struct domain *d, int (*enable_log_dirty)(struct domain *d), @@ -200,7 +206,7 @@ int paging_domctl(struct domain *d, xen_domctl_shadow_op_t *sc, XEN_GUEST_HANDLE_PARAM(void) u_domctl); /* Call when destroying a domain */ -int paging_teardown(struct domain *d); +void paging_teardown(struct domain *d); /* Call once all of the references to the domain have gone away */ void paging_final_teardown(struct domain *d); -- generated by git-patchbot for /home/xen/git/xen.git#stable-4.3 _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |