|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 2/2] x86: fix domain cleanup
Free d->arch.cpuids on both the creation error path and during
destruction.
Don't bypass iommu_domain_destroy().
Move psr_domain_init() up so that hvm_domain_initialise() again is the
last thing which can fail, and hence doesn't require undoing later on.
Move psr_domain_free() up on the creation error path, so that cleanup
once again gets done in reverse order of setup.
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -642,21 +642,18 @@ int arch_domain_create(struct domain *d,
}
spin_lock_init(&d->arch.e820_lock);
+ if ( (rc = psr_domain_init(d)) != 0 )
+ goto fail;
+
if ( has_hvm_container_domain(d) )
{
if ( (rc = hvm_domain_initialise(d)) != 0 )
- {
- iommu_domain_destroy(d);
goto fail;
- }
}
else
/* 64-bit PV guest by default. */
d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 0;
- if ( (rc = psr_domain_init(d)) != 0 )
- goto fail;
-
/* initialize default tsc behavior in case tools don't */
tsc_set_info(d, TSC_MODE_DEFAULT, 0UL, 0, 0);
spin_lock_init(&d->arch.vtsc_lock);
@@ -674,8 +671,11 @@ int arch_domain_create(struct domain *d,
fail:
d->is_dying = DOMDYING_dead;
+ psr_domain_free(d);
+ iommu_domain_destroy(d);
cleanup_domain_irq_mapping(d);
free_xenheap_page(d->shared_info);
+ xfree(d->arch.cpuids);
if ( paging_initialised )
paging_final_teardown(d);
free_perdomain_mappings(d);
@@ -684,7 +684,6 @@ int arch_domain_create(struct domain *d,
xfree(d->arch.pv_domain.cpuidmasks);
free_xenheap_page(d->arch.pv_domain.gdt_ldt_l1tab);
}
- psr_domain_free(d);
return rc;
}
@@ -694,6 +693,7 @@ void arch_domain_destroy(struct domain *
hvm_domain_destroy(d);
xfree(d->arch.e820);
+ xfree(d->arch.cpuids);
free_domain_pirqs(d);
if ( !is_idle_domain(d) )
Attachment:
x86-free-cpuids.patch _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |