[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 5/6] x86/vtd: Drop struct iommu_flush




> -----Original Message-----
> From: Andrew Cooper [mailto:andrew.cooper3@xxxxxxxxxx]
> Sent: 22 February 2019 19:13
> To: Xen-devel <xen-devel@xxxxxxxxxxxxx>
> Cc: Andrew Cooper <Andrew.Cooper3@xxxxxxxxxx>; Jan Beulich 
> <JBeulich@xxxxxxxx>; Paul Durrant
> <Paul.Durrant@xxxxxxxxxx>; Kevin Tian <kevin.tian@xxxxxxxxx>
> Subject: [PATCH 5/6] x86/vtd: Drop struct iommu_flush
> 
> It is unclear why this abstraction exists, but iommu_get_flush() returns
> possibly NULL and every user unconditionally dereferences the result.  In
> practice, I can't spot a path where iommu is NULL, so I think it is mostly
> dead.
> 
> Move the two function pointers into struct vtd_iommu (using a flush_ prefix),
> and delete iommu_get_flush().  Furthermore, there is no need to pass the IOMMU
> pointer to the callbacks via a void pointer, so change the parameter to be
> correctly typed as struct vtd_iommu.  Clean up bool_t to bool in surrounding
> context.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
> ---
> CC: Jan Beulich <JBeulich@xxxxxxxx>
> CC: Paul Durrant <paul.durrant@xxxxxxxxxx>
> CC: Kevin Tian <kevin.tian@xxxxxxxxx>
> ---
>  xen/drivers/passthrough/vtd/iommu.c  | 62 
> ++++++++++++++++--------------------
>  xen/drivers/passthrough/vtd/iommu.h  | 24 +++++---------
>  xen/drivers/passthrough/vtd/qinval.c | 21 +++++-------
>  3 files changed, 44 insertions(+), 63 deletions(-)
> 
> diff --git a/xen/drivers/passthrough/vtd/iommu.c 
> b/xen/drivers/passthrough/vtd/iommu.c
> index 05dc7ff..7fc6fe0 100644
> --- a/xen/drivers/passthrough/vtd/iommu.c
> +++ b/xen/drivers/passthrough/vtd/iommu.c
> @@ -334,11 +334,11 @@ static void iommu_flush_write_buffer(struct vtd_iommu 
> *iommu)
>  }
> 
>  /* return value determine if we need a write buffer flush */
> -static int __must_check flush_context_reg(void *_iommu, u16 did, u16 
> source_id,
> -                                          u8 function_mask, u64 type,
> -                                          bool_t flush_non_present_entry)
> +static int __must_check flush_context_reg(struct vtd_iommu *iommu, u16 did,
> +                                          u16 source_id, u8 function_mask,
> +                                          u64 type,
> +                                          bool flush_non_present_entry)

More u8, u16 and u64 cleanup needed.

>  {
> -    struct vtd_iommu *iommu = _iommu;
>      u64 val = 0;
>      unsigned long flags;
> 
> @@ -387,31 +387,28 @@ static int __must_check flush_context_reg(void *_iommu, 
> u16 did, u16 source_id,
>  }
> 
>  static int __must_check iommu_flush_context_global(struct vtd_iommu *iommu,
> -                                                   bool_t 
> flush_non_present_entry)
> +                                                   bool 
> flush_non_present_entry)
>  {
> -    struct iommu_flush *flush = iommu_get_flush(iommu);
> -    return flush->context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL,
> -                                 flush_non_present_entry);
> +    return iommu->flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL,
> +                                flush_non_present_entry);
>  }
> 
>  static int __must_check iommu_flush_context_device(struct vtd_iommu *iommu,
>                                                     u16 did, u16 source_id,

And here.

>                                                     u8 function_mask,
> -                                                   bool_t 
> flush_non_present_entry)
> +                                                   bool 
> flush_non_present_entry)
>  {
> -    struct iommu_flush *flush = iommu_get_flush(iommu);
> -    return flush->context(iommu, did, source_id, function_mask,
> -                                 DMA_CCMD_DEVICE_INVL,
> -                                 flush_non_present_entry);
> +    return iommu->flush_context(iommu, did, source_id, function_mask,
> +                                DMA_CCMD_DEVICE_INVL, 
> flush_non_present_entry);
>  }
> 
>  /* return value determine if we need a write buffer flush */
> -static int __must_check flush_iotlb_reg(void *_iommu, u16 did, u64 addr,
> +static int __must_check flush_iotlb_reg(struct vtd_iommu *iommu, u16 did,
> +                                        u64 addr,

And here.

>                                          unsigned int size_order, u64 type,
> -                                        bool_t flush_non_present_entry,
> -                                        bool_t flush_dev_iotlb)
> +                                        bool flush_non_present_entry,
> +                                        bool flush_dev_iotlb)
>  {
> -    struct vtd_iommu *iommu = _iommu;
>      int tlb_offset = ecap_iotlb_offset(iommu->ecap);
>      u64 val = 0;
>      unsigned long flags;
> @@ -474,17 +471,16 @@ static int __must_check flush_iotlb_reg(void *_iommu, 
> u16 did, u64 addr,
>  }
> 
>  static int __must_check iommu_flush_iotlb_global(struct vtd_iommu *iommu,
> -                                                 bool_t 
> flush_non_present_entry,
> -                                                 bool_t flush_dev_iotlb)
> +                                                 bool 
> flush_non_present_entry,
> +                                                 bool flush_dev_iotlb)
>  {
> -    struct iommu_flush *flush = iommu_get_flush(iommu);
>      int status;
> 
>      /* apply platform specific errata workarounds */
>      vtd_ops_preamble_quirk(iommu);
> 
> -    status = flush->iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH,
> -                        flush_non_present_entry, flush_dev_iotlb);
> +    status = iommu->flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH,
> +                                flush_non_present_entry, flush_dev_iotlb);
> 
>      /* undo platform specific errata workarounds */
>      vtd_ops_postamble_quirk(iommu);
> @@ -496,14 +492,13 @@ static int __must_check iommu_flush_iotlb_dsi(struct 
> vtd_iommu *iommu, u16 did,

And here.

>                                                bool_t flush_non_present_entry,
>                                                bool_t flush_dev_iotlb)
>  {
> -    struct iommu_flush *flush = iommu_get_flush(iommu);
>      int status;
> 
>      /* apply platform specific errata workarounds */
>      vtd_ops_preamble_quirk(iommu);
> 
> -    status =  flush->iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH,
> -                        flush_non_present_entry, flush_dev_iotlb);
> +    status = iommu->flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH,
> +                                flush_non_present_entry, flush_dev_iotlb);
> 
>      /* undo platform specific errata workarounds */
>      vtd_ops_postamble_quirk(iommu);
> @@ -516,18 +511,19 @@ static int __must_check iommu_flush_iotlb_psi(struct 
> vtd_iommu *iommu, u16 did,

And here.

>                                                bool_t flush_non_present_entry,
>                                                bool_t flush_dev_iotlb)
>  {
> -    struct iommu_flush *flush = iommu_get_flush(iommu);
>      int status;
> 
>      ASSERT(!(addr & (~PAGE_MASK_4K)));
> 
>      /* Fallback to domain selective flush if no PSI support */
>      if ( !cap_pgsel_inv(iommu->cap) )
> -        return iommu_flush_iotlb_dsi(iommu, did, flush_non_present_entry, 
> flush_dev_iotlb);
> +        return iommu_flush_iotlb_dsi(iommu, did, flush_non_present_entry,
> +                                     flush_dev_iotlb);
> 
>      /* Fallback to domain selective flush if size is too big */
>      if ( order > cap_max_amask_val(iommu->cap) )
> -        return iommu_flush_iotlb_dsi(iommu, did, flush_non_present_entry, 
> flush_dev_iotlb);
> +        return iommu_flush_iotlb_dsi(iommu, did, flush_non_present_entry,
> +                                     flush_dev_iotlb);
> 
>      addr >>= PAGE_SHIFT_4K + order;
>      addr <<= PAGE_SHIFT_4K + order;
> @@ -535,8 +531,8 @@ static int __must_check iommu_flush_iotlb_psi(struct 
> vtd_iommu *iommu, u16 did,

And here.

>      /* apply platform specific errata workarounds */
>      vtd_ops_preamble_quirk(iommu);
> 
> -    status = flush->iotlb(iommu, did, addr, order, DMA_TLB_PSI_FLUSH,
> -                        flush_non_present_entry, flush_dev_iotlb);
> +    status = iommu->flush_iotlb(iommu, did, addr, order, DMA_TLB_PSI_FLUSH,
> +                                flush_non_present_entry, flush_dev_iotlb);
> 
>      /* undo platform specific errata workarounds */
>      vtd_ops_postamble_quirk(iommu);
> @@ -2158,7 +2154,6 @@ static int __must_check init_vtd_hw(void)
>  {
>      struct acpi_drhd_unit *drhd;
>      struct vtd_iommu *iommu;
> -    struct iommu_flush *flush = NULL;
>      int ret;
>      unsigned long flags;
>      u32 sts;
> @@ -2193,9 +2188,8 @@ static int __must_check init_vtd_hw(void)
>           */
>          if ( enable_qinval(iommu) != 0 )
>          {
> -            flush = iommu_get_flush(iommu);
> -            flush->context = flush_context_reg;
> -            flush->iotlb = flush_iotlb_reg;
> +            iommu->flush_context = flush_context_reg;
> +            iommu->flush_iotlb = flush_iotlb_reg;
>          }
>      }
> 
> diff --git a/xen/drivers/passthrough/vtd/iommu.h 
> b/xen/drivers/passthrough/vtd/iommu.h
> index 97d0e6b..a8cffba 100644
> --- a/xen/drivers/passthrough/vtd/iommu.h
> +++ b/xen/drivers/passthrough/vtd/iommu.h
> @@ -506,18 +506,7 @@ extern struct list_head acpi_drhd_units;
>  extern struct list_head acpi_rmrr_units;
>  extern struct list_head acpi_ioapic_units;
> 
> -struct iommu_flush {
> -    int __must_check (*context)(void *iommu, u16 did, u16 source_id,
> -                                u8 function_mask, u64 type,
> -                                bool_t non_present_entry_flush);
> -    int __must_check (*iotlb)(void *iommu, u16 did, u64 addr,
> -                              unsigned int size_order, u64 type,
> -                              bool_t flush_non_present_entry,
> -                              bool_t flush_dev_iotlb);
> -};
> -
>  struct intel_iommu {
> -    struct iommu_flush flush;
>      struct acpi_drhd_unit *drhd;
>  };
> 
> @@ -540,16 +529,19 @@ struct vtd_iommu {
>      unsigned int iremap_num; /* total num of used interrupt remap entry */
>      spinlock_t iremap_lock;  /* lock for irq remapping table */
> 
> +    int __must_check (*flush_context)(struct vtd_iommu *iommu, u16 did,
> +                                      u16 source_id, u8 function_mask, u64 
> type,
> +                                      bool non_present_entry_flush);

Certainly here, since you're moving code.

> +    int __must_check (*flush_iotlb)(struct vtd_iommu *iommu, u16 did, u64 
> addr,
> +                                    unsigned int size_order, u64 type,
> +                                    bool flush_non_present_entry,
> +                                    bool flush_dev_iotlb);
> +
>      struct list_head ats_devices;
>      unsigned long *domid_bitmap;  /* domain id bitmap */
>      u16 *domid_map;               /* domain id mapping array */
>  };
> 
> -static inline struct iommu_flush *iommu_get_flush(struct vtd_iommu *iommu)
> -{
> -    return iommu ? &iommu->intel->flush : NULL;
> -}
> -
>  #define INTEL_IOMMU_DEBUG(fmt, args...) \
>      do  \
>      {   \
> diff --git a/xen/drivers/passthrough/vtd/qinval.c 
> b/xen/drivers/passthrough/vtd/qinval.c
> index f6fcee5..99e98e7 100644
> --- a/xen/drivers/passthrough/vtd/qinval.c
> +++ b/xen/drivers/passthrough/vtd/qinval.c
> @@ -317,12 +317,10 @@ int iommu_flush_iec_index(struct vtd_iommu *iommu, u8 
> im, u16 iidx)
>      return queue_invalidate_iec_sync(iommu, IEC_INDEX_INVL, im, iidx);
>  }
> 
> -static int __must_check flush_context_qi(void *_iommu, u16 did,
> +static int __must_check flush_context_qi(struct vtd_iommu *iommu, u16 did,
>                                           u16 sid, u8 fm, u64 type,

More here.

> -                                         bool_t flush_non_present_entry)
> +                                         bool flush_non_present_entry)
>  {
> -    struct vtd_iommu *iommu = _iommu;
> -
>      ASSERT(iommu->qinval_maddr);
> 
>      /*
> @@ -343,14 +341,14 @@ static int __must_check flush_context_qi(void *_iommu, 
> u16 did,
>                                           type >> DMA_CCMD_INVL_GRANU_OFFSET);
>  }
> 
> -static int __must_check flush_iotlb_qi(void *_iommu, u16 did, u64 addr,
> +static int __must_check flush_iotlb_qi(struct vtd_iommu *iommu, u16 did,
> +                                       u64 addr,

And here.

>                                         unsigned int size_order, u64 type,
> -                                       bool_t flush_non_present_entry,
> -                                       bool_t flush_dev_iotlb)
> +                                       bool flush_non_present_entry,
> +                                       bool flush_dev_iotlb)
>  {
>      u8 dr = 0, dw = 0;
>      int ret = 0, rc;
> -    struct vtd_iommu *iommu = _iommu;
> 
>      ASSERT(iommu->qinval_maddr);
> 
> @@ -392,15 +390,12 @@ static int __must_check flush_iotlb_qi(void *_iommu, 
> u16 did, u64 addr,

And here.

>  int enable_qinval(struct vtd_iommu *iommu)
>  {
>      struct acpi_drhd_unit *drhd;
> -    struct iommu_flush *flush;
>      u32 sts;

And here.

 Paul

>      unsigned long flags;
> 
>      if ( !ecap_queued_inval(iommu->ecap) || !iommu_qinval )
>          return -ENOENT;
> 
> -    flush = iommu_get_flush(iommu);
> -
>      /* Return if already enabled by Xen */
>      sts = dmar_readl(iommu->reg, DMAR_GSTS_REG);
>      if ( (sts & DMA_GSTS_QIES) && iommu->qinval_maddr )
> @@ -417,8 +412,8 @@ int enable_qinval(struct vtd_iommu *iommu)
>              return -ENOMEM;
>      }
> 
> -    flush->context = flush_context_qi;
> -    flush->iotlb = flush_iotlb_qi;
> +    iommu->flush_context = flush_context_qi;
> +    iommu->flush_iotlb   = flush_iotlb_qi;
> 
>      spin_lock_irqsave(&iommu->register_lock, flags);
> 
> --
> 2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.