|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v2 01/11] vt-d: fix the IOMMU flush issue
> From: Xu, Quan
> Sent: Monday, April 18, 2016 10:00 PM
>
> The propagation value from IOMMU flush interfaces may be positive, which
> indicates callers need to flush cache, not one of faliures.
>
> when the propagation value is positive, this patch fixes this flush issue
> as follows:
> - call iommu_flush_write_buffer() to flush cache.
> - return zero.
>
> Signed-off-by: Quan Xu <quan.xu@xxxxxxxxx>
>
> CC: Kevin Tian <kevin.tian@xxxxxxxxx>
> CC: Feng Wu <feng.wu@xxxxxxxxx>
> CC: Keir Fraser <keir@xxxxxxx>
> CC: Jan Beulich <jbeulich@xxxxxxxx>
> CC: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
> ---
> xen/drivers/passthrough/vtd/iommu.c | 94
> ++++++++++++++++++++++++-------------
> xen/include/asm-x86/iommu.h | 2 +-
> 2 files changed, 63 insertions(+), 33 deletions(-)
>
> diff --git a/xen/drivers/passthrough/vtd/iommu.c
> b/xen/drivers/passthrough/vtd/iommu.c
> index 5ad25dc..50d98ac 100644
> --- a/xen/drivers/passthrough/vtd/iommu.c
> +++ b/xen/drivers/passthrough/vtd/iommu.c
> @@ -558,14 +558,16 @@ static void iommu_flush_all(void)
> }
> }
>
> -static void __intel_iommu_iotlb_flush(struct domain *d, unsigned long gfn,
> - int dma_old_pte_present, unsigned int page_count)
> +static int iommu_flush_iotlb(struct domain *d, unsigned long gfn,
any reason for the renaming here?
> + int dma_old_pte_present,
> + unsigned int page_count)
> {
> struct hvm_iommu *hd = domain_hvm_iommu(d);
> struct acpi_drhd_unit *drhd;
> struct iommu *iommu;
> int flush_dev_iotlb;
> int iommu_domid;
> + int rc = 0;
>
> /*
> * No need pcideves_lock here because we have flush
> @@ -584,29 +586,34 @@ static void __intel_iommu_iotlb_flush(struct domain *d,
> unsigned long gfn,
> continue;
>
> if ( page_count != 1 || gfn == INVALID_GFN )
> - {
> - if ( iommu_flush_iotlb_dsi(iommu, iommu_domid,
> - 0, flush_dev_iotlb) )
> - iommu_flush_write_buffer(iommu);
> - }
> + rc = iommu_flush_iotlb_dsi(iommu, iommu_domid,
> + 0, flush_dev_iotlb);
> else
> + rc = iommu_flush_iotlb_psi(iommu, iommu_domid,
> + (paddr_t)gfn << PAGE_SHIFT_4K,
> + PAGE_ORDER_4K,
> + !dma_old_pte_present,
> + flush_dev_iotlb);
> +
> + if ( rc > 0 )
> {
> - if ( iommu_flush_iotlb_psi(iommu, iommu_domid,
> - (paddr_t)gfn << PAGE_SHIFT_4K, PAGE_ORDER_4K,
> - !dma_old_pte_present, flush_dev_iotlb) )
> - iommu_flush_write_buffer(iommu);
> - }
> + iommu_flush_write_buffer(iommu);
> + rc = 0;
> + } else if ( rc < 0 )
> + break;
> }
> +
> + return rc;
> }
>
> static void intel_iommu_iotlb_flush(struct domain *d, unsigned long gfn,
> unsigned int
> page_count)
> {
> - __intel_iommu_iotlb_flush(d, gfn, 1, page_count);
> + iommu_flush_iotlb(d, gfn, 1, page_count);
> }
>
> static void intel_iommu_iotlb_flush_all(struct domain *d)
> {
> - __intel_iommu_iotlb_flush(d, INVALID_GFN, 0, 0);
> + iommu_flush_iotlb(d, INVALID_GFN, 0, 0);
> }
Do you plan to change return value of above 2 functions in another
patch, or will keep it current form w/o propagation?
>
> /* clear one page's page table */
> @@ -640,7 +647,7 @@ static void dma_pte_clear_one(struct domain *domain, u64
> addr)
> iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
>
> if ( !this_cpu(iommu_dont_flush_iotlb) )
> - __intel_iommu_iotlb_flush(domain, addr >> PAGE_SHIFT_4K, 1, 1);
> + iommu_flush_iotlb(domain, addr >> PAGE_SHIFT_4K, 1, 1);
>
> unmap_vtd_domain_page(page);
> }
> @@ -1281,6 +1288,7 @@ int domain_context_mapping_one(
> u64 maddr, pgd_maddr;
> u16 seg = iommu->intel->drhd->segment;
> int agaw;
> + int rc;
>
> ASSERT(pcidevs_locked());
> spin_lock(&iommu->lock);
> @@ -1394,13 +1402,19 @@ int domain_context_mapping_one(
> spin_unlock(&iommu->lock);
>
> /* Context entry was previously non-present (with domid 0). */
> - if ( iommu_flush_context_device(iommu, 0, (((u16)bus) << 8) | devfn,
> - DMA_CCMD_MASK_NOBIT, 1) )
> - iommu_flush_write_buffer(iommu);
> - else
> + rc = iommu_flush_context_device(iommu, 0, (((u16)bus) << 8) | devfn,
> + DMA_CCMD_MASK_NOBIT, 1);
> +
> + if ( !rc )
> {
> int flush_dev_iotlb = find_ats_dev_drhd(iommu) ? 1 : 0;
> - iommu_flush_iotlb_dsi(iommu, 0, 1, flush_dev_iotlb);
> + rc = iommu_flush_iotlb_dsi(iommu, 0, 1, flush_dev_iotlb);
> + }
> +
> + if ( rc > 0 )
> + {
> + iommu_flush_write_buffer(iommu);
> + rc = 0;
what about 'rc>0' in iommu_flush_context_device while "rc<0"
in iommu_flush_iotlb_dsi, which will leave write buffer not flushed?
> }
>
> set_bit(iommu->index, &hd->arch.iommu_bitmap);
> @@ -1410,7 +1424,7 @@ int domain_context_mapping_one(
> if ( !seg )
> me_wifi_quirk(domain, bus, devfn, MAP_ME_PHANTOM_FUNC);
>
> - return 0;
> + return rc;
> }
>
> static int domain_context_mapping(
> @@ -1505,6 +1519,7 @@ int domain_context_unmap_one(
> struct context_entry *context, *context_entries;
> u64 maddr;
> int iommu_domid;
> + int rc;
>
> ASSERT(pcidevs_locked());
> spin_lock(&iommu->lock);
> @@ -1532,14 +1547,20 @@ int domain_context_unmap_one(
> return -EINVAL;
> }
>
> - if ( iommu_flush_context_device(iommu, iommu_domid,
> + rc = iommu_flush_context_device(iommu, iommu_domid,
> (((u16)bus) << 8) | devfn,
> - DMA_CCMD_MASK_NOBIT, 0) )
> - iommu_flush_write_buffer(iommu);
> - else
> + DMA_CCMD_MASK_NOBIT, 0);
> +
> + if ( !rc )
> {
> int flush_dev_iotlb = find_ats_dev_drhd(iommu) ? 1 : 0;
> - iommu_flush_iotlb_dsi(iommu, iommu_domid, 0, flush_dev_iotlb);
> + rc = iommu_flush_iotlb_dsi(iommu, iommu_domid, 0, flush_dev_iotlb);
> + }
> +
> + if ( rc > 0 )
> + {
> + iommu_flush_write_buffer(iommu);
> + rc = 0;
> }
ditto
Thanks
Kevin
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |