[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v5 05/10] AMD/IOMMU: let callers of amd_iommu_alloc_intremap_table() handle errors
Additional users of the function will want to handle errors more gracefully. Remove the BUG_ON()s and make the current caller panic() instead. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> --- v5: New. --- a/xen/drivers/passthrough/amd/iommu_acpi.c +++ b/xen/drivers/passthrough/amd/iommu_acpi.c @@ -86,6 +86,10 @@ static void __init add_ivrs_mapping_entr ivrs_mappings[alias_id].intremap_table = shared_intremap_table; ivrs_mappings[alias_id].intremap_inuse = shared_intremap_inuse; } + + if ( !ivrs_mappings[alias_id].intremap_table ) + panic("No memory for %04x:%02x:%02x.%u's IRT\n", iommu->seg, + PCI_BUS(alias_id), PCI_SLOT(alias_id), PCI_FUNC(alias_id)); }ivrs_mappings[alias_id].valid = true; --- a/xen/drivers/passthrough/amd/iommu_intr.c +++ b/xen/drivers/passthrough/amd/iommu_intr.c @@ -817,12 +817,22 @@ int __init amd_iommu_free_intremap_table void *__init amd_iommu_alloc_intremap_table( const struct amd_iommu *iommu, unsigned long **inuse_map) { - void *tb = __alloc_amd_iommu_tables(intremap_table_order(iommu)); + unsigned int order = intremap_table_order(iommu); + void *tb = __alloc_amd_iommu_tables(order); + + if ( tb ) + { + *inuse_map = xzalloc_array(unsigned long, + BITS_TO_LONGS(INTREMAP_ENTRIES)); + if ( *inuse_map ) + memset(tb, 0, PAGE_SIZE << order); + else + { + __free_amd_iommu_tables(tb, order); + tb = NULL; + } + }- BUG_ON(tb == NULL); - memset(tb, 0, PAGE_SIZE << intremap_table_order(iommu)); - *inuse_map = xzalloc_array(unsigned long, BITS_TO_LONGS(INTREMAP_ENTRIES)); - BUG_ON(*inuse_map == NULL); return tb; } _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |