[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen stable-4.13] AMD/IOMMU: re-arrange/complete re-assignment handling
commit 2357043846efeaa7a8b14791413103bb693dca52 Author: Jan Beulich <jbeulich@xxxxxxxx> AuthorDate: Wed Aug 25 15:23:43 2021 +0200 Commit: Jan Beulich <jbeulich@xxxxxxxx> CommitDate: Wed Aug 25 15:23:43 2021 +0200 AMD/IOMMU: re-arrange/complete re-assignment handling Prior to the assignment step having completed successfully, devices should not get associated with their new owner. Hand the device to DomIO (perhaps temporarily), until after the de-assignment step has completed. De-assignment of a device (from other than Dom0) as well as failure of reassign_device() during assignment should result in unity mappings getting torn down. This in turn requires switching to a refcounted mapping approach, as was already used by VT-d for its RMRRs, to prevent unmapping a region used by multiple devices. This is CVE-2021-28696 / part of XSA-378. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> Reviewed-by: Paul Durrant <paul@xxxxxxx> master commit: 899272539cbe1acda736a850015416fff653a1b6 master date: 2021-08-25 14:16:26 +0200 --- xen/drivers/passthrough/amd/iommu_map.c | 63 ++++++++++++++++----------- xen/drivers/passthrough/amd/pci_amd_iommu.c | 54 ++++++++++++++++++----- xen/include/asm-x86/hvm/svm/amd-iommu-proto.h | 6 ++- 3 files changed, 83 insertions(+), 40 deletions(-) diff --git a/xen/drivers/passthrough/amd/iommu_map.c b/xen/drivers/passthrough/amd/iommu_map.c index 85b8df9abd..927d6224a9 100644 --- a/xen/drivers/passthrough/amd/iommu_map.c +++ b/xen/drivers/passthrough/amd/iommu_map.c @@ -430,38 +430,49 @@ int amd_iommu_flush_iotlb_all(struct domain *d) return 0; } -int amd_iommu_reserve_domain_unity_map(struct domain *domain, - paddr_t phys_addr, - unsigned long size, int iw, int ir) +int amd_iommu_reserve_domain_unity_map(struct domain *d, + const struct ivrs_unity_map *map, + unsigned int flag) { - unsigned long npages, i; - unsigned long gfn; - unsigned int flags = !!ir; - unsigned int flush_flags = 0; - int rt = 0; - - if ( iw ) - flags |= IOMMUF_writable; - - npages = region_to_pages(phys_addr, size); - gfn = phys_addr >> PAGE_SHIFT; - for ( i = 0; i < npages; i++ ) + int rc; + + if ( d == dom_io ) + return 0; + + for ( rc = 0; !rc && map; map = map->next ) { - unsigned long frame = gfn + i; + p2m_access_t p2ma = p2m_access_n; - rt = amd_iommu_map_page(domain, _dfn(frame), _mfn(frame), flags, - &flush_flags); - if ( rt != 0 ) - break; + if ( map->read ) + p2ma |= p2m_access_r; + if ( map->write ) + p2ma |= p2m_access_w; + + rc = iommu_identity_mapping(d, p2ma, map->addr, + map->addr + map->length - 1, flag); } - /* Use while-break to avoid compiler warning */ - while ( flush_flags && - amd_iommu_flush_iotlb_pages(domain, _dfn(gfn), - npages, flush_flags) ) - break; + return rc; +} + +int amd_iommu_reserve_domain_unity_unmap(struct domain *d, + const struct ivrs_unity_map *map) +{ + int rc; + + if ( d == dom_io ) + return 0; + + for ( rc = 0; map; map = map->next ) + { + int ret = iommu_identity_mapping(d, p2m_access_x, map->addr, + map->addr + map->length - 1, 0); + + if ( ret && ret != -ENOENT && !rc ) + rc = ret; + } - return rt; + return rc; } int __init amd_iommu_quarantine_init(struct domain *d) diff --git a/xen/drivers/passthrough/amd/pci_amd_iommu.c b/xen/drivers/passthrough/amd/pci_amd_iommu.c index 8cd5394e6b..6c730f1a72 100644 --- a/xen/drivers/passthrough/amd/pci_amd_iommu.c +++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c @@ -330,6 +330,7 @@ static int reassign_device(struct domain *source, struct domain *target, struct amd_iommu *iommu; int bdf, rc; struct domain_iommu *t = dom_iommu(target); + const struct ivrs_mappings *ivrs_mappings = get_ivrs_mappings(pdev->seg); bdf = PCI_BDF2(pdev->bus, pdev->devfn); iommu = find_iommu_for_device(pdev->seg, bdf); @@ -344,10 +345,24 @@ static int reassign_device(struct domain *source, struct domain *target, amd_iommu_disable_domain_device(source, iommu, devfn, pdev); - if ( devfn == pdev->devfn ) + /* + * If the device belongs to the hardware domain, and it has a unity mapping, + * don't remove it from the hardware domain, because BIOS may reference that + * mapping. + */ + if ( !is_hardware_domain(source) ) { - list_move(&pdev->domain_list, &target->pdev_list); - pdev->domain = target; + rc = amd_iommu_reserve_domain_unity_unmap( + source, + ivrs_mappings[get_dma_requestor_id(pdev->seg, bdf)].unity_map); + if ( rc ) + return rc; + } + + if ( devfn == pdev->devfn && pdev->domain != dom_io ) + { + list_move(&pdev->domain_list, &dom_io->pdev_list); + pdev->domain = dom_io; } rc = allocate_domain_resources(t); @@ -359,6 +374,12 @@ static int reassign_device(struct domain *source, struct domain *target, pdev->seg, pdev->bus, PCI_SLOT(devfn), PCI_FUNC(devfn), source->domain_id, target->domain_id); + if ( devfn == pdev->devfn && pdev->domain != target ) + { + list_move(&pdev->domain_list, &target->pdev_list); + pdev->domain = target; + } + return 0; } @@ -369,20 +390,28 @@ static int amd_iommu_assign_device(struct domain *d, u8 devfn, struct ivrs_mappings *ivrs_mappings = get_ivrs_mappings(pdev->seg); int bdf = PCI_BDF2(pdev->bus, devfn); int req_id = get_dma_requestor_id(pdev->seg, bdf); - const struct ivrs_unity_map *unity_map; + int rc = amd_iommu_reserve_domain_unity_map( + d, ivrs_mappings[req_id].unity_map, flag); + + if ( !rc ) + rc = reassign_device(pdev->domain, d, devfn, pdev); - for ( unity_map = ivrs_mappings[req_id].unity_map; unity_map; - unity_map = unity_map->next ) + if ( rc && !is_hardware_domain(d) ) { - int rc = amd_iommu_reserve_domain_unity_map( - d, unity_map->addr, unity_map->length, - unity_map->write, unity_map->read); + int ret = amd_iommu_reserve_domain_unity_unmap( + d, ivrs_mappings[req_id].unity_map); - if ( rc ) - return rc; + if ( ret ) + { + printk(XENLOG_ERR "AMD-Vi: " + "unity-unmap for %pd/%04x:%02x:%02x.%u failed (%d)\n", + d, pdev->seg, pdev->bus, + PCI_SLOT(devfn), PCI_FUNC(devfn), ret); + domain_crash(d); + } } - return reassign_device(pdev->domain, d, devfn, pdev); + return rc; } static void deallocate_next_page_table(struct page_info *pg, int level) @@ -441,6 +470,7 @@ static void deallocate_iommu_page_tables(struct domain *d) static void amd_iommu_domain_destroy(struct domain *d) { + iommu_identity_map_teardown(d); deallocate_iommu_page_tables(d); amd_iommu_flush_all_pages(d); } diff --git a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h index 94e288f52d..8726e62558 100644 --- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h +++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h @@ -64,8 +64,10 @@ int __must_check amd_iommu_unmap_page(struct domain *d, dfn_t dfn, unsigned int *flush_flags); int __must_check amd_iommu_alloc_root(struct domain_iommu *hd); int amd_iommu_reserve_domain_unity_map(struct domain *domain, - paddr_t phys_addr, unsigned long size, - int iw, int ir); + const struct ivrs_unity_map *map, + unsigned int flag); +int amd_iommu_reserve_domain_unity_unmap(struct domain *d, + const struct ivrs_unity_map *map); int __must_check amd_iommu_flush_iotlb_pages(struct domain *d, dfn_t dfn, unsigned int page_count, unsigned int flush_flags); -- generated by git-patchbot for /home/xen/git/xen.git#stable-4.13
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |