|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen stable-4.2] Revert "x86/paging: make log-dirty operations preemptible"
commit 09a293a57fa38331dcb70904a0306282a98525e7
Author: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
AuthorDate: Tue Aug 19 12:54:06 2014 +0100
Commit: Ian Jackson <Ian.Jackson@xxxxxxxxxxxxx>
CommitDate: Tue Aug 19 12:54:06 2014 +0100
Revert "x86/paging: make log-dirty operations preemptible"
This reverts commit 6fbd99d7cc46a91566839e85bffb9bff406adbae.
This fix has been discovered to cause regressions. Investigations are
ongoing.
Revert-reqeuested-by: Jan Beulich <JBeulich@xxxxxxxx>
---
xen/arch/x86/domain.c | 4 +-
xen/arch/x86/domctl.c | 3 -
xen/arch/x86/mm/hap/hap.c | 3 +-
xen/arch/x86/mm/paging.c | 199 +++++++--------------------------------
xen/arch/x86/mm/shadow/common.c | 3 +-
xen/common/domain.c | 1 +
xen/include/asm-x86/domain.h | 14 ---
xen/include/asm-x86/paging.h | 8 ++-
8 files changed, 49 insertions(+), 186 deletions(-)
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 2799e86..8f11473 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -2136,9 +2136,7 @@ int domain_relinquish_resources(struct domain *d)
pci_release_devices(d);
/* Tear down paging-assistance stuff. */
- ret = paging_teardown(d);
- if ( ret )
- return ret;
+ paging_teardown(d);
/* Drop the in-use references to page-table bases. */
for_each_vcpu ( d, v )
diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index e392cca..48de4bf 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -66,9 +66,6 @@ long arch_do_domctl(
&domctl->u.shadow_op,
guest_handle_cast(u_domctl, void));
rcu_unlock_domain(d);
- if ( ret == -EAGAIN )
- return hypercall_create_continuation(__HYPERVISOR_domctl,
- "h", u_domctl);
copy_to_guest(u_domctl, domctl, 1);
}
}
diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
index acdc192..c198323 100644
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -678,7 +678,8 @@ int hap_domctl(struct domain *d, xen_domctl_shadow_op_t *sc,
paging_unlock(d);
if ( preempted )
/* Not finished. Set up to re-run the call. */
- rc = -EAGAIN;
+ rc = hypercall_create_continuation(__HYPERVISOR_domctl, "h",
+ u_domctl);
else
/* Finished. Return the new allocation */
sc->mb = hap_get_allocation(d);
diff --git a/xen/arch/x86/mm/paging.c b/xen/arch/x86/mm/paging.c
index 7a7fdf6..0fd1199 100644
--- a/xen/arch/x86/mm/paging.c
+++ b/xen/arch/x86/mm/paging.c
@@ -26,7 +26,6 @@
#include <asm/shadow.h>
#include <asm/p2m.h>
#include <asm/hap.h>
-#include <asm/event.h>
#include <asm/hvm/nestedhvm.h>
#include <xen/numa.h>
#include <xsm/xsm.h>
@@ -117,46 +116,26 @@ static void paging_free_log_dirty_page(struct domain *d,
mfn_t mfn)
d->arch.paging.free_page(d, mfn_to_page(mfn));
}
-static int paging_free_log_dirty_bitmap(struct domain *d, int rc)
+void paging_free_log_dirty_bitmap(struct domain *d)
{
mfn_t *l4, *l3, *l2;
int i4, i3, i2;
- paging_lock(d);
-
if ( !mfn_valid(d->arch.paging.log_dirty.top) )
- {
- paging_unlock(d);
- return 0;
- }
+ return;
- if ( !d->arch.paging.preempt.vcpu )
- {
- memset(&d->arch.paging.preempt.log_dirty, 0,
- sizeof(d->arch.paging.preempt.log_dirty));
- ASSERT(rc <= 0);
- d->arch.paging.preempt.log_dirty.done = -rc;
- }
- else if ( d->arch.paging.preempt.vcpu != current ||
- d->arch.paging.preempt.op != XEN_DOMCTL_SHADOW_OP_OFF )
- {
- paging_unlock(d);
- return -EBUSY;
- }
+ paging_lock(d);
l4 = map_domain_page(mfn_x(d->arch.paging.log_dirty.top));
- i4 = d->arch.paging.preempt.log_dirty.i4;
- i3 = d->arch.paging.preempt.log_dirty.i3;
- rc = 0;
- for ( ; i4 < LOGDIRTY_NODE_ENTRIES; i4++, i3 = 0 )
+ for ( i4 = 0; i4 < LOGDIRTY_NODE_ENTRIES; i4++ )
{
if ( !mfn_valid(l4[i4]) )
continue;
l3 = map_domain_page(mfn_x(l4[i4]));
- for ( ; i3 < LOGDIRTY_NODE_ENTRIES; i3++ )
+ for ( i3 = 0; i3 < LOGDIRTY_NODE_ENTRIES; i3++ )
{
if ( !mfn_valid(l3[i3]) )
continue;
@@ -169,54 +148,20 @@ static int paging_free_log_dirty_bitmap(struct domain *d,
int rc)
unmap_domain_page(l2);
paging_free_log_dirty_page(d, l3[i3]);
- l3[i3] = _mfn(INVALID_MFN);
-
- if ( i3 < LOGDIRTY_NODE_ENTRIES - 1 && hypercall_preempt_check() )
- {
- d->arch.paging.preempt.log_dirty.i3 = i3 + 1;
- d->arch.paging.preempt.log_dirty.i4 = i4;
- rc = -EAGAIN;
- break;
- }
}
unmap_domain_page(l3);
- if ( rc )
- break;
paging_free_log_dirty_page(d, l4[i4]);
- l4[i4] = _mfn(INVALID_MFN);
-
- if ( i4 < LOGDIRTY_NODE_ENTRIES - 1 && hypercall_preempt_check() )
- {
- d->arch.paging.preempt.log_dirty.i3 = 0;
- d->arch.paging.preempt.log_dirty.i4 = i4 + 1;
- rc = -EAGAIN;
- break;
- }
}
unmap_domain_page(l4);
+ paging_free_log_dirty_page(d, d->arch.paging.log_dirty.top);
+ d->arch.paging.log_dirty.top = _mfn(INVALID_MFN);
- if ( !rc )
- {
- paging_free_log_dirty_page(d, d->arch.paging.log_dirty.top);
- d->arch.paging.log_dirty.top = _mfn(INVALID_MFN);
-
- ASSERT(d->arch.paging.log_dirty.allocs == 0);
- d->arch.paging.log_dirty.failed_allocs = 0;
-
- rc = -d->arch.paging.preempt.log_dirty.done;
- d->arch.paging.preempt.vcpu = NULL;
- }
- else
- {
- d->arch.paging.preempt.vcpu = current;
- d->arch.paging.preempt.op = XEN_DOMCTL_SHADOW_OP_OFF;
- }
+ ASSERT(d->arch.paging.log_dirty.allocs == 0);
+ d->arch.paging.log_dirty.failed_allocs = 0;
paging_unlock(d);
-
- return rc;
}
int paging_log_dirty_enable(struct domain *d)
@@ -233,25 +178,15 @@ int paging_log_dirty_enable(struct domain *d)
return ret;
}
-static int paging_log_dirty_disable(struct domain *d, bool_t resuming)
+int paging_log_dirty_disable(struct domain *d)
{
- int ret = 1;
-
- if ( !resuming )
- {
- domain_pause(d);
- /* Safe because the domain is paused. */
- ret = d->arch.paging.log_dirty.disable_log_dirty(d);
- ASSERT(ret <= 0);
- }
+ int ret;
+ domain_pause(d);
+ /* Safe because the domain is paused. */
+ ret = d->arch.paging.log_dirty.disable_log_dirty(d);
if ( !paging_mode_log_dirty(d) )
- {
- ret = paging_free_log_dirty_bitmap(d, ret);
- if ( ret == -EAGAIN )
- return ret;
- }
-
+ paging_free_log_dirty_bitmap(d);
domain_unpause(d);
return ret;
@@ -391,9 +326,7 @@ int paging_mfn_is_dirty(struct domain *d, mfn_t gmfn)
/* Read a domain's log-dirty bitmap and stats. If the operation is a CLEAN,
* clear the bitmap and stats as well. */
-static int paging_log_dirty_op(struct domain *d,
- struct xen_domctl_shadow_op *sc,
- bool_t resuming)
+int paging_log_dirty_op(struct domain *d, struct xen_domctl_shadow_op *sc)
{
int rv = 0, clean = 0, peek = 1;
unsigned long pages = 0;
@@ -401,22 +334,9 @@ static int paging_log_dirty_op(struct domain *d,
unsigned long *l1 = NULL;
int i4, i3, i2;
- if ( !resuming )
- domain_pause(d);
+ domain_pause(d);
paging_lock(d);
- if ( !d->arch.paging.preempt.vcpu )
- memset(&d->arch.paging.preempt.log_dirty, 0,
- sizeof(d->arch.paging.preempt.log_dirty));
- else if ( d->arch.paging.preempt.vcpu != current ||
- d->arch.paging.preempt.op != sc->op )
- {
- paging_unlock(d);
- ASSERT(!resuming);
- domain_unpause(d);
- return -EBUSY;
- }
-
clean = (sc->op == XEN_DOMCTL_SHADOW_OP_CLEAN);
PAGING_DEBUG(LOGDIRTY, "log-dirty %s: dom %u faults=%u dirty=%u\n",
@@ -445,15 +365,17 @@ static int paging_log_dirty_op(struct domain *d,
goto out;
}
+ pages = 0;
l4 = paging_map_log_dirty_bitmap(d);
- i4 = d->arch.paging.preempt.log_dirty.i4;
- i3 = d->arch.paging.preempt.log_dirty.i3;
- pages = d->arch.paging.preempt.log_dirty.done;
- for ( ; (pages < sc->pages) && (i4 < LOGDIRTY_NODE_ENTRIES); i4++, i3 = 0 )
+ for ( i4 = 0;
+ (pages < sc->pages) && (i4 < LOGDIRTY_NODE_ENTRIES);
+ i4++ )
{
l3 = (l4 && mfn_valid(l4[i4])) ? map_domain_page(mfn_x(l4[i4])) : NULL;
- for ( ; (pages < sc->pages) && (i3 < LOGDIRTY_NODE_ENTRIES); i3++ )
+ for ( i3 = 0;
+ (pages < sc->pages) && (i3 < LOGDIRTY_NODE_ENTRIES);
+ i3++ )
{
l2 = ((l3 && mfn_valid(l3[i3])) ?
map_domain_page(mfn_x(l3[i3])) : NULL);
@@ -488,51 +410,18 @@ static int paging_log_dirty_op(struct domain *d,
}
if ( l2 )
unmap_domain_page(l2);
-
- if ( i3 < LOGDIRTY_NODE_ENTRIES - 1 && hypercall_preempt_check() )
- {
- d->arch.paging.preempt.log_dirty.i4 = i4;
- d->arch.paging.preempt.log_dirty.i3 = i3 + 1;
- rv = -EAGAIN;
- break;
- }
}
if ( l3 )
unmap_domain_page(l3);
-
- if ( !rv && i4 < LOGDIRTY_NODE_ENTRIES - 1 &&
- hypercall_preempt_check() )
- {
- d->arch.paging.preempt.log_dirty.i4 = i4 + 1;
- d->arch.paging.preempt.log_dirty.i3 = 0;
- rv = -EAGAIN;
- }
- if ( rv )
- break;
}
if ( l4 )
unmap_domain_page(l4);
- if ( !rv )
- d->arch.paging.preempt.vcpu = NULL;
- else
- {
- d->arch.paging.preempt.vcpu = current;
- d->arch.paging.preempt.op = sc->op;
- d->arch.paging.preempt.log_dirty.done = pages;
- }
+ if ( pages < sc->pages )
+ sc->pages = pages;
paging_unlock(d);
- if ( rv )
- {
- /* Never leave the domain paused for other errors. */
- ASSERT(rv == -EAGAIN);
- return rv;
- }
-
- if ( pages < sc->pages )
- sc->pages = pages;
if ( clean )
{
/* We need to further call clean_dirty_bitmap() functions of specific
@@ -543,7 +432,6 @@ static int paging_log_dirty_op(struct domain *d,
return rv;
out:
- d->arch.paging.preempt.vcpu = NULL;
paging_unlock(d);
domain_unpause(d);
@@ -610,6 +498,12 @@ void paging_log_dirty_init(struct domain *d,
d->arch.paging.log_dirty.clean_dirty_bitmap = clean_dirty_bitmap;
}
+/* This function fress log dirty bitmap resources. */
+static void paging_log_dirty_teardown(struct domain*d)
+{
+ paging_free_log_dirty_bitmap(d);
+}
+
/************************************************/
/* CODE FOR PAGING SUPPORT */
/************************************************/
@@ -653,7 +547,6 @@ void paging_vcpu_init(struct vcpu *v)
int paging_domctl(struct domain *d, xen_domctl_shadow_op_t *sc,
XEN_GUEST_HANDLE(void) u_domctl)
{
- bool_t resuming = 0;
int rc;
if ( unlikely(d == current->domain) )
@@ -676,20 +569,6 @@ int paging_domctl(struct domain *d, xen_domctl_shadow_op_t
*sc,
return -EINVAL;
}
- if ( d->arch.paging.preempt.vcpu )
- {
- if ( d->arch.paging.preempt.vcpu != current ||
- d->arch.paging.preempt.op != sc->op )
- {
- printk(XENLOG_G_DEBUG
- "d%d:v%d: Paging op %#x on Dom%u with unfinished prior op
%#x\n",
- current->domain->domain_id, current->vcpu_id,
- sc->op, d->domain_id, d->arch.paging.preempt.op);
- return -EBUSY;
- }
- resuming = 1;
- }
-
rc = xsm_shadow_control(d, sc->op);
if ( rc )
return rc;
@@ -715,13 +594,13 @@ int paging_domctl(struct domain *d,
xen_domctl_shadow_op_t *sc,
case XEN_DOMCTL_SHADOW_OP_OFF:
if ( paging_mode_log_dirty(d) )
- if ( (rc = paging_log_dirty_disable(d, resuming)) != 0 )
+ if ( (rc = paging_log_dirty_disable(d)) != 0 )
return rc;
break;
case XEN_DOMCTL_SHADOW_OP_CLEAN:
case XEN_DOMCTL_SHADOW_OP_PEEK:
- return paging_log_dirty_op(d, sc, resuming);
+ return paging_log_dirty_op(d, sc);
}
/* Here, dispatch domctl to the appropriate paging code */
@@ -732,24 +611,18 @@ int paging_domctl(struct domain *d,
xen_domctl_shadow_op_t *sc,
}
/* Call when destroying a domain */
-int paging_teardown(struct domain *d)
+void paging_teardown(struct domain *d)
{
- int rc;
-
if ( hap_enabled(d) )
hap_teardown(d);
else
shadow_teardown(d);
/* clean up log dirty resources. */
- rc = paging_free_log_dirty_bitmap(d, 0);
- if ( rc == -EAGAIN )
- return rc;
+ paging_log_dirty_teardown(d);
/* Move populate-on-demand cache back to domain_list for destruction */
p2m_pod_empty_cache(d);
-
- return rc;
}
/* Call once all of the references to the domain have gone away */
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index d5c5905..0362c85 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -3829,7 +3829,8 @@ int shadow_domctl(struct domain *d,
paging_unlock(d);
if ( preempted )
/* Not finished. Set up to re-run the call. */
- rc = -EAGAIN;
+ rc = hypercall_create_continuation(
+ __HYPERVISOR_domctl, "h", u_domctl);
else
/* Finished. Return the new allocation */
sc->mb = shadow_get_allocation(d);
diff --git a/xen/common/domain.c b/xen/common/domain.c
index 45d90bf..73842f9 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -479,6 +479,7 @@ int domain_kill(struct domain *d)
rc = domain_relinquish_resources(d);
if ( rc != 0 )
{
+ BUG_ON(rc != -EAGAIN);
break;
}
if ( sched_move_domain(d, cpupool0) )
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index bd170ed..6c62c25 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -193,20 +193,6 @@ struct paging_domain {
struct hap_domain hap;
/* log dirty support */
struct log_dirty_domain log_dirty;
-
- /* preemption handling */
- struct {
- struct vcpu *vcpu;
- unsigned int op;
- union {
- struct {
- unsigned long done:PADDR_BITS - PAGE_SHIFT;
- unsigned long i4:PAGETABLE_ORDER;
- unsigned long i3:PAGETABLE_ORDER;
- } log_dirty;
- };
- } preempt;
-
/* alloc/free pages from the pool for paging-assistance structures
* (used by p2m and log-dirty code for their tries) */
struct page_info * (*alloc_page)(struct domain *d);
diff --git a/xen/include/asm-x86/paging.h b/xen/include/asm-x86/paging.h
index 21daf04..1e77e78 100644
--- a/xen/include/asm-x86/paging.h
+++ b/xen/include/asm-x86/paging.h
@@ -141,6 +141,9 @@ struct paging_mode {
/*****************************************************************************
* Log dirty code */
+/* free log dirty bitmap resource */
+void paging_free_log_dirty_bitmap(struct domain *d);
+
/* get the dirty bitmap for a specific range of pfns */
void paging_log_dirty_range(struct domain *d,
unsigned long begin_pfn,
@@ -150,6 +153,9 @@ void paging_log_dirty_range(struct domain *d,
/* enable log dirty */
int paging_log_dirty_enable(struct domain *d);
+/* disable log dirty */
+int paging_log_dirty_disable(struct domain *d);
+
/* log dirty initialization */
void paging_log_dirty_init(struct domain *d,
int (*enable_log_dirty)(struct domain *d),
@@ -212,7 +218,7 @@ int paging_domctl(struct domain *d, xen_domctl_shadow_op_t
*sc,
XEN_GUEST_HANDLE(void) u_domctl);
/* Call when destroying a domain */
-int paging_teardown(struct domain *d);
+void paging_teardown(struct domain *d);
/* Call once all of the references to the domain have gone away */
void paging_final_teardown(struct domain *d);
--
generated by git-patchbot for /home/xen/git/xen.git#stable-4.2
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |