[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v3 04/10] vt-d: propagate IOMMU Device-TLB flush error up to IOMMU unmapping.



Propagate the IOMMU Device-TLB flush error up to IOMMU unmapping.

Signed-off-by: Quan Xu <quan.xu@xxxxxxxxx>
Acked-by: Kevin Tian <kevin.tian@xxxxxxxxx>

CC: Kevin Tian <kevin.tian@xxxxxxxxx>
CC: Feng Wu <feng.wu@xxxxxxxxx>
CC: Jan Beulich <jbeulich@xxxxxxxx>
---
 xen/drivers/passthrough/vtd/iommu.c | 15 ++++++++-------
 1 file changed, 8 insertions(+), 7 deletions(-)

diff --git a/xen/drivers/passthrough/vtd/iommu.c 
b/xen/drivers/passthrough/vtd/iommu.c
index 6e2e43a..e908046 100644
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -618,11 +618,12 @@ static void intel_iommu_iotlb_flush_all(struct domain *d)
 }
 
 /* clear one page's page table */
-static void dma_pte_clear_one(struct domain *domain, u64 addr)
+static int __must_check dma_pte_clear_one(struct domain *domain, u64 addr)
 {
     struct hvm_iommu *hd = domain_hvm_iommu(domain);
     struct dma_pte *page = NULL, *pte = NULL;
     u64 pg_maddr;
+    int rc = 0;
 
     spin_lock(&hd->arch.mapping_lock);
     /* get last level pte */
@@ -630,7 +631,7 @@ static void dma_pte_clear_one(struct domain *domain, u64 
addr)
     if ( pg_maddr == 0 )
     {
         spin_unlock(&hd->arch.mapping_lock);
-        return;
+        return 0;
     }
 
     page = (struct dma_pte *)map_vtd_domain_page(pg_maddr);
@@ -640,7 +641,7 @@ static void dma_pte_clear_one(struct domain *domain, u64 
addr)
     {
         spin_unlock(&hd->arch.mapping_lock);
         unmap_vtd_domain_page(page);
-        return;
+        return 0;
     }
 
     dma_clear_pte(*pte);
@@ -648,9 +649,11 @@ static void dma_pte_clear_one(struct domain *domain, u64 
addr)
     iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
 
     if ( !this_cpu(iommu_dont_flush_iotlb) )
-        iommu_flush_iotlb(domain, addr >> PAGE_SHIFT_4K, 1, 1);
+        rc = iommu_flush_iotlb(domain, addr >> PAGE_SHIFT_4K, 1, 1);
 
     unmap_vtd_domain_page(page);
+
+    return rc;
 }
 
 static void iommu_free_pagetable(u64 pt_maddr, int level)
@@ -1771,9 +1774,7 @@ static int intel_iommu_unmap_page(struct domain *d, 
unsigned long gfn)
     if ( iommu_passthrough && is_hardware_domain(d) )
         return 0;
 
-    dma_pte_clear_one(d, (paddr_t)gfn << PAGE_SHIFT_4K);
-
-    return 0;
+    return dma_pte_clear_one(d, (paddr_t)gfn << PAGE_SHIFT_4K);
 }
 
 int iommu_pte_flush(struct domain *d, u64 gfn, u64 *pte,
-- 
1.9.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.