[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v10 1/5] dmar: device scope mem leak fix
From: Elena Ufimtseva <elena.ufimtseva@xxxxxxxxxx> Release memory allocated for scope.devices dmar units on various failure paths and when disabling dmar. Set device count after sucessfull memory allocation, not before, in device scope parsing function. Signed-off-by: Elena Ufimtseva <elena.ufimtseva@xxxxxxxxxx> --- Changes in v10: - mark patch v6 as v10 and include into the series of patches which add RMRR comman line option for Xen; Changes in v6: - eliminated unrelated code move; - fix introduces in v5 memory leak; Changes in v5; - xencope_devices_free actually safe; Changes in v4: - make scope_devices_free safe to call with NULL scope pointer; - since scope_devices_free is safe to call, use it in failure path in acpi_parse_one_drhd; Changes in v3: - make freeing memory for scope devices and zeroing device counter as a function; - make sure parse_one_rmrr has memory leak fix in this patch; - make sure ret values are not lost acpi_parse_one_drhd; Changes in v2: - release memory for devices scope on error paths in acpi_parse_one_drhd and acpi_parse_one_atsr and set the count to zero; drivers/passthrough/vtd/dmar.c | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/xen/drivers/passthrough/vtd/dmar.c b/xen/drivers/passthrough/vtd/dmar.c index 2b07be9..8ed1e24 100644 --- a/xen/drivers/passthrough/vtd/dmar.c +++ b/xen/drivers/passthrough/vtd/dmar.c @@ -81,6 +81,15 @@ static int __init acpi_register_rmrr_unit(struct acpi_rmrr_unit *rmrr) return 0; } +static void scope_devices_free(struct dmar_scope *scope) +{ + if ( !scope ) + return; + + scope->devices_cnt = 0; + xfree(scope->devices); +} + static void __init disable_all_dmar_units(void) { struct acpi_drhd_unit *drhd, *_drhd; @@ -90,16 +99,19 @@ static void __init disable_all_dmar_units(void) list_for_each_entry_safe ( drhd, _drhd, &acpi_drhd_units, list ) { list_del(&drhd->list); + scope_devices_free(&drhd->scope); xfree(drhd); } list_for_each_entry_safe ( rmrr, _rmrr, &acpi_rmrr_units, list ) { list_del(&rmrr->list); + scope_devices_free(&rmrr->scope); xfree(rmrr); } list_for_each_entry_safe ( atsr, _atsr, &acpi_atsr_units, list ) { list_del(&atsr->list); + scope_devices_free(&atsr->scope); xfree(atsr); } } @@ -318,13 +330,13 @@ static int __init acpi_parse_dev_scope( if ( (cnt = scope_device_count(start, end)) < 0 ) return cnt; - scope->devices_cnt = cnt; if ( cnt > 0 ) { scope->devices = xzalloc_array(u16, cnt); if ( !scope->devices ) return -ENOMEM; } + scope->devices_cnt = cnt; while ( start < end ) { @@ -427,7 +439,7 @@ static int __init acpi_parse_dev_scope( out: if ( ret ) - xfree(scope->devices); + scope_devices_free(scope); return ret; } @@ -542,6 +554,7 @@ acpi_parse_one_drhd(struct acpi_dmar_header *header) " Workaround BIOS bug: ignore the DRHD due to all " "devices under its scope are not PCI discoverable!\n"); + scope_devices_free(&dmaru->scope); iommu_free(dmaru); xfree(dmaru); } @@ -562,9 +575,11 @@ acpi_parse_one_drhd(struct acpi_dmar_header *header) out: if ( ret ) { + scope_devices_free(&dmaru->scope); iommu_free(dmaru); xfree(dmaru); } + return ret; } @@ -658,6 +673,7 @@ acpi_parse_one_rmrr(struct acpi_dmar_header *header) " Ignore the RMRR (%"PRIx64", %"PRIx64") due to " "devices under its scope are not PCI discoverable!\n", rmrru->base_address, rmrru->end_address); + scope_devices_free(&rmrru->scope); xfree(rmrru); } else if ( base_addr > end_addr ) @@ -665,6 +681,7 @@ acpi_parse_one_rmrr(struct acpi_dmar_header *header) dprintk(XENLOG_WARNING VTDPREFIX, " The RMRR (%"PRIx64", %"PRIx64") is incorrect!\n", rmrru->base_address, rmrru->end_address); + scope_devices_free(&rmrru->scope); xfree(rmrru); ret = -EFAULT; } @@ -727,7 +744,10 @@ acpi_parse_one_atsr(struct acpi_dmar_header *header) } if ( ret ) + { + scope_devices_free(&atsru->scope); xfree(atsru); + } else acpi_register_atsr_unit(atsru); return ret; -- 2.1.3 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |