[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v14 3/9] iommu: push use of type-safe DFN and MFN into iommu_ops



> -----Original Message-----
> From: Paul Durrant [mailto:paul.durrant@xxxxxxxxxx]
> Sent: 04 October 2018 11:46
> To: xen-devel@xxxxxxxxxxxxxxxxxxxx
> Cc: Paul Durrant <Paul.Durrant@xxxxxxxxxx>; Andrew Cooper
> <Andrew.Cooper3@xxxxxxxxxx>; George Dunlap <George.Dunlap@xxxxxxxxxx>
> Subject: [PATCH v14 3/9] iommu: push use of type-safe DFN and MFN into
> iommu_ops
> 
> This patch modifies the methods in struct iommu_ops to use type-safe DFN
> and MFN. This follows on from the prior patch that modified the functions
> exported in xen/iommu.h.
> 
> Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>

Julien, you should have been cc-ed on this one too. Not sure why I missed you 
off the list... sorry about that. Could I get an ack or otherwise please?

  Paul

> Reviewed-by: Wei Liu <wei.liu2@xxxxxxxxxx>
> Reviewed-by: Kevin Tian <kevin.tian@xxxxxxxxx>
> Reviewed-by: Roger Pau Monne <roger.pau@xxxxxxxxxx>
> Acked-by: Jan Beulich <jbeulich@xxxxxxxx>
> Acked-by: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx>
> ---
> Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
> Cc: George Dunlap <george.dunlap@xxxxxxxxxx>
> 
> v14:
>  - Re-base.
> 
> v9:
>  - Re-base.
> 
> v7:
>  - Re-base and re-name BFN -> DFN.
>  - Added Jan's A-b since re-naming was purely mechanical.
> 
> v6:
>  - Re-base.
> 
> v3:
>  - Remove some use of intermediate 'frame' variables.
> 
> v2:
>  - Addressed comments from Jan.
>  - Extend use of intermediate 'frame' variable to avoid directly
>    encapsulating gfn values as bfns (now dfns) or vice versa.
> ---
>  xen/drivers/passthrough/amd/iommu_map.c       | 46 ++++++++++++++++------
> -----
>  xen/drivers/passthrough/arm/smmu.c            | 16 +++++-----
>  xen/drivers/passthrough/iommu.c               |  9 +++---
>  xen/drivers/passthrough/vtd/iommu.c           | 26 +++++++--------
>  xen/drivers/passthrough/x86/iommu.c           |  2 +-
>  xen/include/asm-x86/hvm/svm/amd-iommu-proto.h |  8 ++---
>  xen/include/xen/iommu.h                       | 13 +++++---
>  7 files changed, 66 insertions(+), 54 deletions(-)
> 
> diff --git a/xen/drivers/passthrough/amd/iommu_map.c
> b/xen/drivers/passthrough/amd/iommu_map.c
> index 61ade71850..c89c54fdb6 100644
> --- a/xen/drivers/passthrough/amd/iommu_map.c
> +++ b/xen/drivers/passthrough/amd/iommu_map.c
> @@ -631,7 +631,7 @@ static int update_paging_mode(struct domain *d,
> unsigned long dfn)
>      return 0;
>  }
> 
> -int amd_iommu_map_page(struct domain *d, unsigned long dfn, unsigned long
> mfn,
> +int amd_iommu_map_page(struct domain *d, dfn_t dfn, mfn_t mfn,
>                         unsigned int flags)
>  {
>      bool_t need_flush = 0;
> @@ -651,7 +651,8 @@ int amd_iommu_map_page(struct domain *d, unsigned long
> dfn, unsigned long mfn,
>      if ( rc )
>      {
>          spin_unlock(&hd->arch.mapping_lock);
> -        AMD_IOMMU_DEBUG("Root table alloc failed, dfn = %lx\n", dfn);
> +        AMD_IOMMU_DEBUG("Root table alloc failed, dfn = %"PRI_dfn"\n",
> +                        dfn_x(dfn));
>          domain_crash(d);
>          return rc;
>      }
> @@ -660,25 +661,27 @@ int amd_iommu_map_page(struct domain *d, unsigned
> long dfn, unsigned long mfn,
>       * we might need a deeper page table for wider dfn now */
>      if ( is_hvm_domain(d) )
>      {
> -        if ( update_paging_mode(d, dfn) )
> +        if ( update_paging_mode(d, dfn_x(dfn)) )
>          {
>              spin_unlock(&hd->arch.mapping_lock);
> -            AMD_IOMMU_DEBUG("Update page mode failed dfn = %lx\n", dfn);
> +            AMD_IOMMU_DEBUG("Update page mode failed dfn = %"PRI_dfn"\n",
> +                            dfn_x(dfn));
>              domain_crash(d);
>              return -EFAULT;
>          }
>      }
> 
> -    if ( iommu_pde_from_dfn(d, dfn, pt_mfn) || (pt_mfn[1] == 0) )
> +    if ( iommu_pde_from_dfn(d, dfn_x(dfn), pt_mfn) || (pt_mfn[1] == 0) )
>      {
>          spin_unlock(&hd->arch.mapping_lock);
> -        AMD_IOMMU_DEBUG("Invalid IO pagetable entry dfn = %lx\n", dfn);
> +        AMD_IOMMU_DEBUG("Invalid IO pagetable entry dfn = %"PRI_dfn"\n",
> +                        dfn_x(dfn));
>          domain_crash(d);
>          return -EFAULT;
>      }
> 
>      /* Install 4k mapping first */
> -    need_flush = set_iommu_pte_present(pt_mfn[1], dfn, mfn,
> +    need_flush = set_iommu_pte_present(pt_mfn[1], dfn_x(dfn), mfn_x(mfn),
>                                         IOMMU_PAGING_MODE_LEVEL_1,
>                                         !!(flags & IOMMUF_writable),
>                                         !!(flags & IOMMUF_readable));
> @@ -690,7 +693,7 @@ int amd_iommu_map_page(struct domain *d, unsigned long
> dfn, unsigned long mfn,
>      /* 4K mapping for PV guests never changes,
>       * no need to flush if we trust non-present bits */
>      if ( is_hvm_domain(d) )
> -        amd_iommu_flush_pages(d, dfn, 0);
> +        amd_iommu_flush_pages(d, dfn_x(dfn), 0);
> 
>      for ( merge_level = IOMMU_PAGING_MODE_LEVEL_2;
>            merge_level <= hd->arch.paging_mode; merge_level++ )
> @@ -698,15 +701,16 @@ int amd_iommu_map_page(struct domain *d, unsigned
> long dfn, unsigned long mfn,
>          if ( pt_mfn[merge_level] == 0 )
>              break;
>          if ( !iommu_update_pde_count(d, pt_mfn[merge_level],
> -                                     dfn, mfn, merge_level) )
> +                                     dfn_x(dfn), mfn_x(mfn), merge_level)
> )
>              break;
> 
> -        if ( iommu_merge_pages(d, pt_mfn[merge_level], dfn,
> +        if ( iommu_merge_pages(d, pt_mfn[merge_level], dfn_x(dfn),
>                                 flags, merge_level) )
>          {
>              spin_unlock(&hd->arch.mapping_lock);
>              AMD_IOMMU_DEBUG("Merge iommu page failed at level %d, "
> -                            "dfn = %lx mfn = %lx\n", merge_level, dfn,
> mfn);
> +                            "dfn = %"PRI_dfn" mfn = %"PRI_mfn"\n",
> +                            merge_level, dfn_x(dfn), mfn_x(mfn));
>              domain_crash(d);
>              return -EFAULT;
>          }
> @@ -720,7 +724,7 @@ out:
>      return 0;
>  }
> 
> -int amd_iommu_unmap_page(struct domain *d, unsigned long dfn)
> +int amd_iommu_unmap_page(struct domain *d, dfn_t dfn)
>  {
>      unsigned long pt_mfn[7];
>      struct domain_iommu *hd = dom_iommu(d);
> @@ -742,31 +746,33 @@ int amd_iommu_unmap_page(struct domain *d, unsigned
> long dfn)
>       * we might need a deeper page table for lager dfn now */
>      if ( is_hvm_domain(d) )
>      {
> -        int rc = update_paging_mode(d, dfn);
> +        int rc = update_paging_mode(d, dfn_x(dfn));
> 
>          if ( rc )
>          {
>              spin_unlock(&hd->arch.mapping_lock);
> -            AMD_IOMMU_DEBUG("Update page mode failed dfn = %lx\n", dfn);
> +            AMD_IOMMU_DEBUG("Update page mode failed dfn = %"PRI_dfn"\n",
> +                            dfn_x(dfn));
>              if ( rc != -EADDRNOTAVAIL )
>                  domain_crash(d);
>              return rc;
>          }
>      }
> 
> -    if ( iommu_pde_from_dfn(d, dfn, pt_mfn) || (pt_mfn[1] == 0) )
> +    if ( iommu_pde_from_dfn(d, dfn_x(dfn), pt_mfn) || (pt_mfn[1] == 0) )
>      {
>          spin_unlock(&hd->arch.mapping_lock);
> -        AMD_IOMMU_DEBUG("Invalid IO pagetable entry dfn = %lx\n", dfn);
> +        AMD_IOMMU_DEBUG("Invalid IO pagetable entry dfn = %"PRI_dfn"\n",
> +                        dfn_x(dfn));
>          domain_crash(d);
>          return -EFAULT;
>      }
> 
>      /* mark PTE as 'page not present' */
> -    clear_iommu_pte_present(pt_mfn[1], dfn);
> +    clear_iommu_pte_present(pt_mfn[1], dfn_x(dfn));
>      spin_unlock(&hd->arch.mapping_lock);
> 
> -    amd_iommu_flush_pages(d, dfn, 0);
> +    amd_iommu_flush_pages(d, dfn_x(dfn), 0);
> 
>      return 0;
>  }
> @@ -787,7 +793,9 @@ int amd_iommu_reserve_domain_unity_map(struct domain
> *domain,
>      gfn = phys_addr >> PAGE_SHIFT;
>      for ( i = 0; i < npages; i++ )
>      {
> -        rt = amd_iommu_map_page(domain, gfn +i, gfn +i, flags);
> +        unsigned long frame = gfn + i;
> +
> +        rt = amd_iommu_map_page(domain, _dfn(frame), _mfn(frame), flags);
>          if ( rt != 0 )
>              return rt;
>      }
> diff --git a/xen/drivers/passthrough/arm/smmu.c
> b/xen/drivers/passthrough/arm/smmu.c
> index 1eda96a72a..53e5823d05 100644
> --- a/xen/drivers/passthrough/arm/smmu.c
> +++ b/xen/drivers/passthrough/arm/smmu.c
> @@ -2550,8 +2550,7 @@ static int __must_check
> arm_smmu_iotlb_flush_all(struct domain *d)
>       return 0;
>  }
> 
> -static int __must_check arm_smmu_iotlb_flush(struct domain *d,
> -                                             unsigned long dfn,
> +static int __must_check arm_smmu_iotlb_flush(struct domain *d, dfn_t dfn,
>                                               unsigned int page_count)
>  {
>       /* ARM SMMU v1 doesn't have flush by VMA and VMID */
> @@ -2748,8 +2747,8 @@ static void arm_smmu_iommu_domain_teardown(struct
> domain *d)
>       xfree(xen_domain);
>  }
> 
> -static int __must_check arm_smmu_map_page(struct domain *d, unsigned long
> dfn,
> -                     unsigned long mfn, unsigned int flags)
> +static int __must_check arm_smmu_map_page(struct domain *d, dfn_t dfn,
> +                                       mfn_t mfn, unsigned int flags)
>  {
>       p2m_type_t t;
> 
> @@ -2762,7 +2761,7 @@ static int __must_check arm_smmu_map_page(struct
> domain *d, unsigned long dfn,
>        * function should only be used by gnttab code with gfn == mfn ==
> dfn.
>        */
>       BUG_ON(!is_domain_direct_mapped(d));
> -     BUG_ON(mfn != dfn);
> +     BUG_ON(mfn_x(mfn) != dfn_x(dfn));
> 
>       /* We only support readable and writable flags */
>       if (!(flags & (IOMMUF_readable | IOMMUF_writable)))
> @@ -2774,10 +2773,11 @@ static int __must_check arm_smmu_map_page(struct
> domain *d, unsigned long dfn,
>        * The function guest_physmap_add_entry replaces the current mapping
>        * if there is already one...
>        */
> -     return guest_physmap_add_entry(d, _gfn(dfn), _mfn(dfn), 0, t);
> +     return guest_physmap_add_entry(d, _gfn(dfn_x(dfn)),
> _mfn(dfn_x(dfn)),
> +                                    0, t);
>  }
> 
> -static int __must_check arm_smmu_unmap_page(struct domain *d, unsigned
> long dfn)
> +static int __must_check arm_smmu_unmap_page(struct domain *d, dfn_t dfn)
>  {
>       /*
>        * This function should only be used by gnttab code when the domain
> @@ -2786,7 +2786,7 @@ static int __must_check arm_smmu_unmap_page(struct
> domain *d, unsigned long dfn)
>       if ( !is_domain_direct_mapped(d) )
>               return -EINVAL;
> 
> -     return guest_physmap_remove_page(d, _gfn(dfn), _mfn(dfn), 0);
> +     return guest_physmap_remove_page(d, _gfn(dfn_x(dfn)),
> _mfn(dfn_x(dfn)), 0);
>  }
> 
>  static const struct iommu_ops arm_smmu_iommu_ops = {
> diff --git a/xen/drivers/passthrough/iommu.c
> b/xen/drivers/passthrough/iommu.c
> index ef9d651317..d61fbbf439 100644
> --- a/xen/drivers/passthrough/iommu.c
> +++ b/xen/drivers/passthrough/iommu.c
> @@ -224,7 +224,8 @@ void __hwdom_init iommu_hwdom_init(struct domain *d)
>                    == PGT_writable_page) )
>                  mapping |= IOMMUF_writable;
> 
> -            ret = hd->platform_ops->map_page(d, dfn, mfn, mapping);
> +            ret = hd->platform_ops->map_page(d, _dfn(dfn), _mfn(mfn),
> +                                             mapping);
>              if ( !rc )
>                  rc = ret;
> 
> @@ -294,7 +295,7 @@ int iommu_map_page(struct domain *d, dfn_t dfn, mfn_t
> mfn,
>      if ( !iommu_enabled || !hd->platform_ops )
>          return 0;
> 
> -    rc = hd->platform_ops->map_page(d, dfn_x(dfn), mfn_x(mfn), flags);
> +    rc = hd->platform_ops->map_page(d, dfn, mfn, flags);
>      if ( unlikely(rc) )
>      {
>          if ( !d->is_shutting_down && printk_ratelimit() )
> @@ -317,7 +318,7 @@ int iommu_unmap_page(struct domain *d, dfn_t dfn)
>      if ( !iommu_enabled || !hd->platform_ops )
>          return 0;
> 
> -    rc = hd->platform_ops->unmap_page(d, dfn_x(dfn));
> +    rc = hd->platform_ops->unmap_page(d, dfn);
>      if ( unlikely(rc) )
>      {
>          if ( !d->is_shutting_down && printk_ratelimit() )
> @@ -357,7 +358,7 @@ int iommu_iotlb_flush(struct domain *d, dfn_t dfn,
> unsigned int page_count)
>      if ( !iommu_enabled || !hd->platform_ops || !hd->platform_ops-
> >iotlb_flush )
>          return 0;
> 
> -    rc = hd->platform_ops->iotlb_flush(d, dfn_x(dfn), page_count);
> +    rc = hd->platform_ops->iotlb_flush(d, dfn, page_count);
>      if ( unlikely(rc) )
>      {
>          if ( !d->is_shutting_down && printk_ratelimit() )
> diff --git a/xen/drivers/passthrough/vtd/iommu.c
> b/xen/drivers/passthrough/vtd/iommu.c
> index 485704bcce..7e185c43f3 100644
> --- a/xen/drivers/passthrough/vtd/iommu.c
> +++ b/xen/drivers/passthrough/vtd/iommu.c
> @@ -584,8 +584,7 @@ static int __must_check iommu_flush_all(void)
>      return rc;
>  }
> 
> -static int __must_check iommu_flush_iotlb(struct domain *d,
> -                                          unsigned long dfn,
> +static int __must_check iommu_flush_iotlb(struct domain *d, dfn_t dfn,
>                                            bool_t dma_old_pte_present,
>                                            unsigned int page_count)
>  {
> @@ -612,12 +611,12 @@ static int __must_check iommu_flush_iotlb(struct
> domain *d,
>          if ( iommu_domid == -1 )
>              continue;
> 
> -        if ( page_count != 1 || dfn == dfn_x(INVALID_DFN) )
> +        if ( page_count != 1 || dfn_eq(dfn, INVALID_DFN) )
>              rc = iommu_flush_iotlb_dsi(iommu, iommu_domid,
>                                         0, flush_dev_iotlb);
>          else
>              rc = iommu_flush_iotlb_psi(iommu, iommu_domid,
> -                                       __dfn_to_daddr(dfn),
> +                                       dfn_to_daddr(dfn),
>                                         PAGE_ORDER_4K,
>                                         !dma_old_pte_present,
>                                         flush_dev_iotlb);
> @@ -633,7 +632,7 @@ static int __must_check iommu_flush_iotlb(struct
> domain *d,
>  }
> 
>  static int __must_check iommu_flush_iotlb_pages(struct domain *d,
> -                                                unsigned long dfn,
> +                                                dfn_t dfn,
>                                                  unsigned int page_count)
>  {
>      return iommu_flush_iotlb(d, dfn, 1, page_count);
> @@ -641,7 +640,7 @@ static int __must_check iommu_flush_iotlb_pages(struct
> domain *d,
> 
>  static int __must_check iommu_flush_iotlb_all(struct domain *d)
>  {
> -    return iommu_flush_iotlb(d, dfn_x(INVALID_DFN), 0, 0);
> +    return iommu_flush_iotlb(d, INVALID_DFN, 0, 0);
>  }
> 
>  /* clear one page's page table */
> @@ -676,7 +675,7 @@ static int __must_check dma_pte_clear_one(struct
> domain *domain, u64 addr)
>      iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
> 
>      if ( !this_cpu(iommu_dont_flush_iotlb) )
> -        rc = iommu_flush_iotlb_pages(domain, addr >> PAGE_SHIFT_4K, 1);
> +        rc = iommu_flush_iotlb_pages(domain, daddr_to_dfn(addr), 1);
> 
>      unmap_vtd_domain_page(page);
> 
> @@ -1763,8 +1762,7 @@ static void iommu_domain_teardown(struct domain *d)
>  }
> 
>  static int __must_check intel_iommu_map_page(struct domain *d,
> -                                             unsigned long dfn,
> -                                             unsigned long mfn,
> +                                             dfn_t dfn, mfn_t mfn,
>                                               unsigned int flags)
>  {
>      struct domain_iommu *hd = dom_iommu(d);
> @@ -1782,16 +1780,16 @@ static int __must_check
> intel_iommu_map_page(struct domain *d,
> 
>      spin_lock(&hd->arch.mapping_lock);
> 
> -    pg_maddr = addr_to_dma_page_maddr(d, __dfn_to_daddr(dfn), 1);
> +    pg_maddr = addr_to_dma_page_maddr(d, dfn_to_daddr(dfn), 1);
>      if ( pg_maddr == 0 )
>      {
>          spin_unlock(&hd->arch.mapping_lock);
>          return -ENOMEM;
>      }
>      page = (struct dma_pte *)map_vtd_domain_page(pg_maddr);
> -    pte = page + (dfn & LEVEL_MASK);
> +    pte = page + (dfn_x(dfn) & LEVEL_MASK);
>      old = *pte;
> -    dma_set_pte_addr(new, (paddr_t)mfn << PAGE_SHIFT_4K);
> +    dma_set_pte_addr(new, mfn_to_maddr(mfn));
>      dma_set_pte_prot(new,
>                       ((flags & IOMMUF_readable) ? DMA_PTE_READ  : 0) |
>                       ((flags & IOMMUF_writable) ? DMA_PTE_WRITE : 0));
> @@ -1819,13 +1817,13 @@ static int __must_check
> intel_iommu_map_page(struct domain *d,
>  }
> 
>  static int __must_check intel_iommu_unmap_page(struct domain *d,
> -                                               unsigned long dfn)
> +                                               dfn_t dfn)
>  {
>      /* Do nothing if hardware domain and iommu supports pass thru. */
>      if ( iommu_hwdom_passthrough && is_hardware_domain(d) )
>          return 0;
> 
> -    return dma_pte_clear_one(d, __dfn_to_daddr(dfn));
> +    return dma_pte_clear_one(d, dfn_to_daddr(dfn));
>  }
> 
>  int iommu_pte_flush(struct domain *d, uint64_t dfn, uint64_t *pte,
> diff --git a/xen/drivers/passthrough/x86/iommu.c
> b/xen/drivers/passthrough/x86/iommu.c
> index 34727b6653..f410717a59 100644
> --- a/xen/drivers/passthrough/x86/iommu.c
> +++ b/xen/drivers/passthrough/x86/iommu.c
> @@ -68,7 +68,7 @@ int arch_iommu_populate_page_table(struct domain *d)
>              {
>                  ASSERT(!(gfn >> DEFAULT_DOMAIN_ADDRESS_WIDTH));
>                  BUG_ON(SHARED_M2P(gfn));
> -                rc = hd->platform_ops->map_page(d, gfn, mfn,
> +                rc = hd->platform_ops->map_page(d, _dfn(gfn), _mfn(mfn),
>                                                  IOMMUF_readable |
>                                                  IOMMUF_writable);
>              }
> diff --git a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
> b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
> index 99bc21c7b3..3083d625bd 100644
> --- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
> +++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
> @@ -52,9 +52,9 @@ int amd_iommu_init(void);
>  int amd_iommu_update_ivrs_mapping_acpi(void);
> 
>  /* mapping functions */
> -int __must_check amd_iommu_map_page(struct domain *d, unsigned long gfn,
> -                                    unsigned long mfn, unsigned int
> flags);
> -int __must_check amd_iommu_unmap_page(struct domain *d, unsigned long
> gfn);
> +int __must_check amd_iommu_map_page(struct domain *d, dfn_t dfn,
> +                                    mfn_t mfn, unsigned int flags);
> +int __must_check amd_iommu_unmap_page(struct domain *d, dfn_t dfn);
>  u64 amd_iommu_get_next_table_from_pte(u32 *entry);
>  int __must_check amd_iommu_alloc_root(struct domain_iommu *hd);
>  int amd_iommu_reserve_domain_unity_map(struct domain *domain,
> @@ -77,7 +77,7 @@ void iommu_dte_set_guest_cr3(u32 *dte, u16 dom_id, u64
> gcr3,
> 
>  /* send cmd to iommu */
>  void amd_iommu_flush_all_pages(struct domain *d);
> -void amd_iommu_flush_pages(struct domain *d, unsigned long gfn,
> +void amd_iommu_flush_pages(struct domain *d, unsigned long dfn,
>                             unsigned int order);
>  void amd_iommu_flush_iotlb(u8 devfn, const struct pci_dev *pdev,
>                             uint64_t gaddr, unsigned int order);
> diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h
> index f9d86fc816..7313957c81 100644
> --- a/xen/include/xen/iommu.h
> +++ b/xen/include/xen/iommu.h
> @@ -48,6 +48,11 @@ static inline dfn_t dfn_add(dfn_t dfn, unsigned long i)
>      return _dfn(dfn_x(dfn) + i);
>  }
> 
> +static inline bool_t dfn_eq(dfn_t x, dfn_t y)
> +{
> +    return dfn_x(x) == dfn_x(y);
> +}
> +
>  extern bool_t iommu_enable, iommu_enabled;
>  extern bool_t force_iommu, iommu_verbose;
>  extern bool_t iommu_workaround_bios_bug, iommu_igfx;
> @@ -174,9 +179,9 @@ struct iommu_ops {
>  #endif /* HAS_PCI */
> 
>      void (*teardown)(struct domain *d);
> -    int __must_check (*map_page)(struct domain *d, unsigned long dfn,
> -                                 unsigned long mfn, unsigned int flags);
> -    int __must_check (*unmap_page)(struct domain *d, unsigned long dfn);
> +    int __must_check (*map_page)(struct domain *d, dfn_t dfn, mfn_t mfn,
> +                                 unsigned int flags);
> +    int __must_check (*unmap_page)(struct domain *d, dfn_t dfn);
>      void (*free_page_table)(struct page_info *);
>  #ifdef CONFIG_X86
>      void (*update_ire_from_apic)(unsigned int apic, unsigned int reg,
> unsigned int value);
> @@ -187,7 +192,7 @@ struct iommu_ops {
>      void (*resume)(void);
>      void (*share_p2m)(struct domain *d);
>      void (*crash_shutdown)(void);
> -    int __must_check (*iotlb_flush)(struct domain *d, unsigned long dfn,
> +    int __must_check (*iotlb_flush)(struct domain *d, dfn_t dfn,
>                                      unsigned int page_count);
>      int __must_check (*iotlb_flush_all)(struct domain *d);
>      int (*get_reserved_device_memory)(iommu_grdm_t *, void *);
> --
> 2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.