|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH] domctl: improve locking during domain destruction
There is no need to hold the global domctl lock across domain_kill() -
the domain lock is fully sufficient here, and parallel cleanup after
multiple domains performs quite a bit better this way.
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---
Changes since RFC: Comment added.
---
Obviously other domctl-s could benefit from similar adjustments, so
this is meant to be just a start.
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -615,13 +615,21 @@ int domain_kill(struct domain *d)
if ( d == current->domain )
return -EINVAL;
- /* Protected by domctl_lock. */
+ /* Protected by d->domain_lock. */
switch ( d->is_dying )
{
case DOMDYING_alive:
+ domain_unlock(d);
domain_pause(d);
+ domain_lock(d);
+ /*
+ * With the domain lock dropped, d->is_dying may have changed. Call
+ * ourselves recursively if so, which is safe as then we won't come
+ * back here.
+ */
+ if ( d->is_dying != DOMDYING_alive )
+ return domain_kill(d);
d->is_dying = DOMDYING_dying;
- spin_barrier(&d->domain_lock);
evtchn_destroy(d);
gnttab_release_mappings(d);
tmem_destroy(d->tmem_client);
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -665,11 +665,14 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe
break;
case XEN_DOMCTL_destroydomain:
+ domctl_lock_release();
+ domain_lock(d);
ret = domain_kill(d);
+ domain_unlock(d);
if ( ret == -ERESTART )
ret = hypercall_create_continuation(
__HYPERVISOR_domctl, "h", u_domctl);
- break;
+ goto domctl_out_unlock_domonly;
case XEN_DOMCTL_setnodeaffinity:
{
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |