|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v2 1/5] x86/paging: fold most HAP and shadow final teardown
HAP does a few things beyond what's common, which are left there at
least for now. Common operations, however, are moved to
paging_final_teardown(), allowing shadow_final_teardown() to go away.
While moving (and hence generalizing) the respective SHADOW_PRINTK()
drop the logging of total_pages from the 2nd instance - the value is
necessarily zero after {hap,shadow}_set_allocation() - and shorten the
messages, in part accounting for PAGING_PRINTK() logging __func__
already.
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---
The remaining parts of hap_final_teardown() could be moved as well, at
the price of a CONFIG_HVM conditional. I wasn't sure whether that was
deemed reasonable.
---
v2: Shorten PAGING_PRINTK() messages. Adjust comments while being
moved.
--- a/xen/arch/x86/include/asm/shadow.h
+++ b/xen/arch/x86/include/asm/shadow.h
@@ -78,9 +78,6 @@ int shadow_domctl(struct domain *d,
void shadow_vcpu_teardown(struct vcpu *v);
void shadow_teardown(struct domain *d, bool *preempted);
-/* Call once all of the references to the domain have gone away */
-void shadow_final_teardown(struct domain *d);
-
void sh_remove_shadows(struct domain *d, mfn_t gmfn, int fast, int all);
/* Adjust shadows ready for a guest page to change its type. */
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -268,8 +268,8 @@ static void hap_free(struct domain *d, m
/*
* For dying domains, actually free the memory here. This way less work is
- * left to hap_final_teardown(), which cannot easily have preemption checks
- * added.
+ * left to paging_final_teardown(), which cannot easily have preemption
+ * checks added.
*/
if ( unlikely(d->is_dying) )
{
@@ -552,18 +552,6 @@ void hap_final_teardown(struct domain *d
for (i = 0; i < MAX_NESTEDP2M; i++) {
p2m_teardown(d->arch.nested_p2m[i], true, NULL);
}
-
- if ( d->arch.paging.total_pages != 0 )
- hap_teardown(d, NULL);
-
- p2m_teardown(p2m_get_hostp2m(d), true, NULL);
- /* Free any memory that the p2m teardown released */
- paging_lock(d);
- hap_set_allocation(d, 0, NULL);
- ASSERT(d->arch.paging.p2m_pages == 0);
- ASSERT(d->arch.paging.free_pages == 0);
- ASSERT(d->arch.paging.total_pages == 0);
- paging_unlock(d);
}
void hap_vcpu_teardown(struct vcpu *v)
--- a/xen/arch/x86/mm/paging.c
+++ b/xen/arch/x86/mm/paging.c
@@ -842,10 +842,45 @@ int paging_teardown(struct domain *d)
/* Call once all of the references to the domain have gone away */
void paging_final_teardown(struct domain *d)
{
- if ( hap_enabled(d) )
+ bool hap = hap_enabled(d);
+
+ PAGING_PRINTK("%pd start: total = %u, free = %u, p2m = %u\n",
+ d, d->arch.paging.total_pages,
+ d->arch.paging.free_pages, d->arch.paging.p2m_pages);
+
+ if ( hap )
hap_final_teardown(d);
+
+ /*
+ * Remove remaining paging memory. This can be nonzero on certain error
+ * paths.
+ */
+ if ( d->arch.paging.total_pages )
+ {
+ if ( hap )
+ hap_teardown(d, NULL);
+ else
+ shadow_teardown(d, NULL);
+ }
+
+ /* It is now safe to pull down the p2m map. */
+ p2m_teardown(p2m_get_hostp2m(d), true, NULL);
+
+ /* Free any paging memory that the p2m teardown released. */
+ paging_lock(d);
+
+ if ( hap )
+ hap_set_allocation(d, 0, NULL);
else
- shadow_final_teardown(d);
+ shadow_set_allocation(d, 0, NULL);
+
+ PAGING_PRINTK("%pd done: free = %u, p2m = %u\n",
+ d, d->arch.paging.free_pages, d->arch.paging.p2m_pages);
+ ASSERT(!d->arch.paging.p2m_pages);
+ ASSERT(!d->arch.paging.free_pages);
+ ASSERT(!d->arch.paging.total_pages);
+
+ paging_unlock(d);
p2m_final_teardown(d);
}
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -1194,7 +1194,7 @@ void shadow_free(struct domain *d, mfn_t
/*
* For dying domains, actually free the memory here. This way less
- * work is left to shadow_final_teardown(), which cannot easily have
+ * work is left to paging_final_teardown(), which cannot easily have
* preemption checks added.
*/
if ( unlikely(dying) )
@@ -2898,35 +2898,6 @@ out:
}
}
-void shadow_final_teardown(struct domain *d)
-/* Called by arch_domain_destroy(), when it's safe to pull down the p2m map. */
-{
- SHADOW_PRINTK("dom %u final teardown starts."
- " Shadow pages total = %u, free = %u, p2m=%u\n",
- d->domain_id, d->arch.paging.total_pages,
- d->arch.paging.free_pages, d->arch.paging.p2m_pages);
-
- /* Double-check that the domain didn't have any shadow memory.
- * It is possible for a domain that never got domain_kill()ed
- * to get here with its shadow allocation intact. */
- if ( d->arch.paging.total_pages != 0 )
- shadow_teardown(d, NULL);
-
- /* It is now safe to pull down the p2m map. */
- p2m_teardown(p2m_get_hostp2m(d), true, NULL);
- /* Free any shadow memory that the p2m teardown released */
- paging_lock(d);
- shadow_set_allocation(d, 0, NULL);
- SHADOW_PRINTK("dom %u final teardown done."
- " Shadow pages total = %u, free = %u, p2m=%u\n",
- d->domain_id, d->arch.paging.total_pages,
- d->arch.paging.free_pages, d->arch.paging.p2m_pages);
- ASSERT(d->arch.paging.p2m_pages == 0);
- ASSERT(d->arch.paging.free_pages == 0);
- ASSERT(d->arch.paging.total_pages == 0);
- paging_unlock(d);
-}
-
static int shadow_one_bit_enable(struct domain *d, u32 mode)
/* Turn on a single shadow mode feature */
{
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |