|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen staging] xen/x86: Fix memory leak in vcpu_create() error path
commit d162f36848c4a98a782cc05820b0aa7ec1ae297d
Author: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
AuthorDate: Mon Sep 28 15:25:44 2020 +0100
Commit: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CommitDate: Mon Dec 21 14:11:25 2020 +0000
xen/x86: Fix memory leak in vcpu_create() error path
Various paths in vcpu_create() end up calling paging_update_paging_modes(),
which eventually allocate a monitor pagetable if one doesn't exist.
However, an error in vcpu_create() results in the vcpu being cleaned up
locally, and not put onto the domain's vcpu list. Therefore, the monitor
table is not freed by {hap,shadow}_teardown()'s loop. This is caught by
assertions later that we've successfully freed the entire hap/shadow memory
pool.
The per-vcpu loops in domain teardown logic is conceptually wrong, but exist
due to insufficient existing structure in the existing logic.
Break paging_vcpu_teardown() out of paging_teardown(), with mirrored
breakouts
in the hap/shadow code, and use it from arch_vcpu_create()'s error path.
This
fixes the memory leak.
The new {hap,shadow}_vcpu_teardown() must be idempotent, and are written to
be
as tolerable as possible, with the minimum number of safety checks possible.
In particular, drop the mfn_valid() check - if these fields are junk, then
Xen
is going to explode anyway.
Reported-by: MichaÅ? LeszczyÅ?ski <michal.leszczynski@xxxxxxx>
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
---
xen/arch/x86/domain.c | 1 +
xen/arch/x86/mm/hap/hap.c | 39 ++++++++++++++++------------
xen/arch/x86/mm/paging.c | 8 ++++++
xen/arch/x86/mm/shadow/common.c | 56 +++++++++++++++++++++++------------------
xen/include/asm-x86/hap.h | 1 +
xen/include/asm-x86/paging.h | 3 ++-
xen/include/asm-x86/shadow.h | 3 ++-
7 files changed, 69 insertions(+), 42 deletions(-)
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 4932ed62f1..b9ba04633e 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -588,6 +588,7 @@ int arch_vcpu_create(struct vcpu *v)
return rc;
fail:
+ paging_vcpu_teardown(v);
vcpu_destroy_fpu(v);
xfree(v->arch.msrs);
v->arch.msrs = NULL;
diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
index 0fdb7d4a59..73575deb0d 100644
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -563,30 +563,37 @@ void hap_final_teardown(struct domain *d)
paging_unlock(d);
}
+void hap_vcpu_teardown(struct vcpu *v)
+{
+ struct domain *d = v->domain;
+ mfn_t mfn;
+
+ paging_lock(d);
+
+ if ( !paging_mode_hap(d) || !v->arch.paging.mode )
+ goto out;
+
+ mfn = pagetable_get_mfn(v->arch.hvm.monitor_table);
+ if ( mfn_x(mfn) )
+ hap_destroy_monitor_table(v, mfn);
+ v->arch.hvm.monitor_table = pagetable_null();
+
+ out:
+ paging_unlock(d);
+}
+
void hap_teardown(struct domain *d, bool *preempted)
{
struct vcpu *v;
- mfn_t mfn;
ASSERT(d->is_dying);
ASSERT(d != current->domain);
- paging_lock(d); /* Keep various asserts happy */
+ /* TODO - Remove when the teardown path is better structured. */
+ for_each_vcpu ( d, v )
+ hap_vcpu_teardown(v);
- if ( paging_mode_enabled(d) )
- {
- /* release the monitor table held by each vcpu */
- for_each_vcpu ( d, v )
- {
- if ( paging_get_hostmode(v) && paging_mode_external(d) )
- {
- mfn = pagetable_get_mfn(v->arch.hvm.monitor_table);
- if ( mfn_valid(mfn) && (mfn_x(mfn) != 0) )
- hap_destroy_monitor_table(v, mfn);
- v->arch.hvm.monitor_table = pagetable_null();
- }
- }
- }
+ paging_lock(d); /* Keep various asserts happy */
if ( d->arch.paging.hap.total_pages != 0 )
{
diff --git a/xen/arch/x86/mm/paging.c b/xen/arch/x86/mm/paging.c
index 3aaec2ee3a..8bc14df943 100644
--- a/xen/arch/x86/mm/paging.c
+++ b/xen/arch/x86/mm/paging.c
@@ -794,6 +794,14 @@ long
paging_domctl_continuation(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
}
#endif /* PG_log_dirty */
+void paging_vcpu_teardown(struct vcpu *v)
+{
+ if ( hap_enabled(v->domain) )
+ hap_vcpu_teardown(v);
+ else
+ shadow_vcpu_teardown(v);
+}
+
/* Call when destroying a domain */
int paging_teardown(struct domain *d)
{
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index a33e100861..3298711972 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -2779,6 +2779,34 @@ int shadow_enable(struct domain *d, u32 mode)
return rv;
}
+void shadow_vcpu_teardown(struct vcpu *v)
+{
+ struct domain *d = v->domain;
+
+ paging_lock(d);
+
+ if ( !paging_mode_shadow(d) || !v->arch.paging.mode )
+ goto out;
+
+ v->arch.paging.mode->shadow.detach_old_tables(v);
+#ifdef CONFIG_HVM
+ if ( shadow_mode_external(d) )
+ {
+ mfn_t mfn = pagetable_get_mfn(v->arch.hvm.monitor_table);
+
+ if ( mfn_x(mfn) )
+ sh_destroy_monitor_table(
+ v, mfn,
+ v->arch.paging.mode->shadow.shadow_levels);
+
+ v->arch.hvm.monitor_table = pagetable_null();
+ }
+#endif
+
+ out:
+ paging_unlock(d);
+}
+
void shadow_teardown(struct domain *d, bool *preempted)
/* Destroy the shadow pagetables of this domain and free its shadow memory.
* Should only be called for dying domains. */
@@ -2789,31 +2817,11 @@ void shadow_teardown(struct domain *d, bool *preempted)
ASSERT(d->is_dying);
ASSERT(d != current->domain);
- paging_lock(d);
-
- if ( shadow_mode_enabled(d) )
- {
- /* Release the shadow and monitor tables held by each vcpu */
- for_each_vcpu(d, v)
- {
- if ( v->arch.paging.mode )
- {
- v->arch.paging.mode->shadow.detach_old_tables(v);
-#ifdef CONFIG_HVM
- if ( shadow_mode_external(d) )
- {
- mfn_t mfn = pagetable_get_mfn(v->arch.hvm.monitor_table);
+ /* TODO - Remove when the teardown path is better structured. */
+ for_each_vcpu ( d, v )
+ shadow_vcpu_teardown(v);
- if ( mfn_valid(mfn) && (mfn_x(mfn) != 0) )
- sh_destroy_monitor_table(
- v, mfn,
- v->arch.paging.mode->shadow.shadow_levels);
- v->arch.hvm.monitor_table = pagetable_null();
- }
-#endif /* CONFIG_HVM */
- }
- }
- }
+ paging_lock(d);
#if (SHADOW_OPTIMIZATIONS & (SHOPT_VIRTUAL_TLB|SHOPT_OUT_OF_SYNC))
/* Free the virtual-TLB array attached to each vcpu */
diff --git a/xen/include/asm-x86/hap.h b/xen/include/asm-x86/hap.h
index d489df3812..90dece29de 100644
--- a/xen/include/asm-x86/hap.h
+++ b/xen/include/asm-x86/hap.h
@@ -36,6 +36,7 @@ int hap_domctl(struct domain *d, struct
xen_domctl_shadow_op *sc,
XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl);
int hap_enable(struct domain *d, u32 mode);
void hap_final_teardown(struct domain *d);
+void hap_vcpu_teardown(struct vcpu *v);
void hap_teardown(struct domain *d, bool *preempted);
void hap_vcpu_init(struct vcpu *v);
int hap_track_dirty_vram(struct domain *d,
diff --git a/xen/include/asm-x86/paging.h b/xen/include/asm-x86/paging.h
index 5cf128cf34..7332a9b506 100644
--- a/xen/include/asm-x86/paging.h
+++ b/xen/include/asm-x86/paging.h
@@ -237,7 +237,8 @@ int paging_domctl(struct domain *d, struct
xen_domctl_shadow_op *sc,
/* Helper hypercall for dealing with continuations. */
long paging_domctl_continuation(XEN_GUEST_HANDLE_PARAM(xen_domctl_t));
-/* Call when destroying a domain */
+/* Call when destroying a vcpu/domain */
+void paging_vcpu_teardown(struct vcpu *v);
int paging_teardown(struct domain *d);
/* Call once all of the references to the domain have gone away */
diff --git a/xen/include/asm-x86/shadow.h b/xen/include/asm-x86/shadow.h
index 76e47f257f..29a86ed78e 100644
--- a/xen/include/asm-x86/shadow.h
+++ b/xen/include/asm-x86/shadow.h
@@ -74,7 +74,8 @@ int shadow_domctl(struct domain *d,
struct xen_domctl_shadow_op *sc,
XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl);
-/* Call when destroying a domain */
+/* Call when destroying a vcpu/domain */
+void shadow_vcpu_teardown(struct vcpu *v);
void shadow_teardown(struct domain *d, bool *preempted);
/* Call once all of the references to the domain have gone away */
--
generated by git-patchbot for /home/xen/git/xen.git#staging
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |