[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [v8][PATCH 04/17] update the existing hypercall to support XEN_DOMCTL_set_rdm



> From: Chen, Tiejun
> Sent: Monday, December 01, 2014 5:24 PM
> 
> After we intend to expost that hypercall explicitly based on
> XEN_DOMCTL_set_rdm, we need this rebase. I hope we can squash
> this into that previous patch once Jan Ack this.

better to merge together, since it's the right thing to do based on previous
discussion.

one question about 'd->arch.hvm_domain.pci_force'. My impression is
that this flag enables force check, and while enabled, you'll always
do selected BDF filtering by default. However from below code, seems
pci_force is used to whether report all or selected regions. Am I reading
it wrong?

> 
> Signed-off-by: Tiejun Chen <tiejun.chen@xxxxxxxxx>
> ---
>  xen/common/compat/memory.c         | 75
> ++++++++++++++++++++++++++++++--------
>  xen/common/memory.c                | 71
> +++++++++++++++++++++++++++++-------
>  xen/drivers/passthrough/vtd/dmar.c | 32 ++++++++++++----
>  xen/include/public/memory.h        |  5 +++
>  xen/include/xen/iommu.h            |  2 +-
>  xen/include/xen/pci.h              |  2 +
>  6 files changed, 148 insertions(+), 39 deletions(-)
> 
> diff --git a/xen/common/compat/memory.c b/xen/common/compat/memory.c
> index 60512fa..e6a256e 100644
> --- a/xen/common/compat/memory.c
> +++ b/xen/common/compat/memory.c
> @@ -22,27 +22,66 @@ struct get_reserved_device_memory {
>      unsigned int used_entries;
>  };
> 
> -static int get_reserved_device_memory(xen_pfn_t start,
> -                                      xen_ulong_t nr, void *ctxt)
> +static int get_reserved_device_memory(xen_pfn_t start, xen_ulong_t nr,
> +                                      u32 id, void *ctxt)
>  {
>      struct get_reserved_device_memory *grdm = ctxt;
> +    struct domain *d;
> +    unsigned int i;
> +    u32 sbdf;
> +    struct compat_reserved_device_memory rdm = {
> +        .start_pfn = start, .nr_pages = nr
> +    };
> 
> -    if ( grdm->used_entries < grdm->map.nr_entries )
> -    {
> -        struct compat_reserved_device_memory rdm = {
> -            .start_pfn = start, .nr_pages = nr
> -        };
> +    if ( rdm.start_pfn != start || rdm.nr_pages != nr )
> +        return -ERANGE;
> 
> -        if ( rdm.start_pfn != start || rdm.nr_pages != nr )
> -            return -ERANGE;
> +    d = rcu_lock_domain_by_any_id(grdm->map.domid);
> +    if ( d == NULL )
> +        return -ESRCH;
> 
> -        if ( __copy_to_compat_offset(grdm->map.buffer,
> grdm->used_entries,
> -                                     &rdm, 1) )
> -            return -EFAULT;
> +    if ( d )
> +    {
> +        if ( d->arch.hvm_domain.pci_force )
> +        {
> +            if ( grdm->used_entries < grdm->map.nr_entries )
> +            {
> +                if ( __copy_to_compat_offset(grdm->map.buffer,
> +                                             grdm->used_entries,
> +                                             &rdm, 1) )
> +                {
> +                    rcu_unlock_domain(d);
> +                    return -EFAULT;
> +                }
> +            }
> +            ++grdm->used_entries;
> +        }
> +        else
> +        {
> +            for ( i = 0; i < d->arch.hvm_domain.num_pcidevs; i++ )
> +            {
> +                sbdf = PCI_SBDF2(d->arch.hvm_domain.pcidevs[i].seg,
> +                                 d->arch.hvm_domain.pcidevs[i].bus,
> +
> d->arch.hvm_domain.pcidevs[i].devfn);
> +                if ( sbdf == id )
> +                {
> +                    if ( grdm->used_entries < grdm->map.nr_entries )
> +                    {
> +                        if
> ( __copy_to_compat_offset(grdm->map.buffer,
> +
> grdm->used_entries,
> +                                                     &rdm, 1) )
> +                        {
> +                            rcu_unlock_domain(d);
> +                            return -EFAULT;
> +                        }
> +                    }
> +                    ++grdm->used_entries;
> +                }
> +            }
> +        }
>      }
> 
> -    ++grdm->used_entries;
> -
> +    rcu_unlock_domain(d);
>      return 0;
>  }
>  #endif
> @@ -319,9 +358,13 @@ int compat_memory_op(unsigned int cmd,
> XEN_GUEST_HANDLE_PARAM(void) compat)
> 
>              if ( !rc && grdm.map.nr_entries < grdm.used_entries )
>                  rc = -ENOBUFS;
> +
>              grdm.map.nr_entries = grdm.used_entries;
> -            if ( __copy_to_guest(compat, &grdm.map, 1) )
> -                rc = -EFAULT;
> +            if ( grdm.map.nr_entries )
> +            {
> +                if ( __copy_to_guest(compat, &grdm.map, 1) )
> +                    rc = -EFAULT;
> +            }
> 
>              return rc;
>          }
> diff --git a/xen/common/memory.c b/xen/common/memory.c
> index 4788acc..9ce82b1 100644
> --- a/xen/common/memory.c
> +++ b/xen/common/memory.c
> @@ -698,24 +698,63 @@ struct get_reserved_device_memory {
>      unsigned int used_entries;
>  };
> 
> -static int get_reserved_device_memory(xen_pfn_t start,
> -                                      xen_ulong_t nr, void *ctxt)
> +static int get_reserved_device_memory(xen_pfn_t start, xen_ulong_t nr,
> +                                      u32 id, void *ctxt)
>  {
>      struct get_reserved_device_memory *grdm = ctxt;
> +    struct domain *d;
> +    unsigned int i;
> +    u32 sbdf;
> +    struct xen_reserved_device_memory rdm = {
> +        .start_pfn = start, .nr_pages = nr
> +    };
> 
> -    if ( grdm->used_entries < grdm->map.nr_entries )
> -    {
> -        struct xen_reserved_device_memory rdm = {
> -            .start_pfn = start, .nr_pages = nr
> -        };
> +    d = rcu_lock_domain_by_any_id(grdm->map.domid);
> +    if ( d == NULL )
> +        return -ESRCH;
> 
> -        if ( __copy_to_guest_offset(grdm->map.buffer,
> grdm->used_entries,
> -                                    &rdm, 1) )
> -            return -EFAULT;
> +    if ( d )
> +    {
> +        if ( d->arch.hvm_domain.pci_force )
> +        {
> +            if ( grdm->used_entries < grdm->map.nr_entries )
> +            {
> +                if ( __copy_to_guest_offset(grdm->map.buffer,
> +                                            grdm->used_entries,
> +                                            &rdm, 1) )
> +                {
> +                    rcu_unlock_domain(d);
> +                    return -EFAULT;
> +                }
> +            }
> +            ++grdm->used_entries;
> +        }
> +        else
> +        {
> +            for ( i = 0; i < d->arch.hvm_domain.num_pcidevs; i++ )
> +            {
> +                sbdf = PCI_SBDF2(d->arch.hvm_domain.pcidevs[i].seg,
> +                                 d->arch.hvm_domain.pcidevs[i].bus,
> +
> d->arch.hvm_domain.pcidevs[i].devfn);
> +                if ( sbdf == id )
> +                {
> +                    if ( grdm->used_entries < grdm->map.nr_entries )
> +                    {
> +                        if ( __copy_to_guest_offset(grdm->map.buffer,
> +
> grdm->used_entries,
> +                                                    &rdm, 1) )
> +                        {
> +                            rcu_unlock_domain(d);
> +                            return -EFAULT;
> +                        }
> +                    }
> +                    ++grdm->used_entries;
> +                }
> +            }
> +        }
>      }
> 
> -    ++grdm->used_entries;
> -
> +    rcu_unlock_domain(d);
>      return 0;
>  }
>  #endif
> @@ -1144,9 +1183,13 @@ long do_memory_op(unsigned long cmd,
> XEN_GUEST_HANDLE_PARAM(void) arg)
> 
>          if ( !rc && grdm.map.nr_entries < grdm.used_entries )
>              rc = -ENOBUFS;
> +
>          grdm.map.nr_entries = grdm.used_entries;
> -        if ( __copy_to_guest(arg, &grdm.map, 1) )
> -            rc = -EFAULT;
> +        if ( grdm.map.nr_entries )
> +        {
> +            if ( __copy_to_guest(arg, &grdm.map, 1) )
> +                rc = -EFAULT;
> +        }
> 
>          break;
>      }
> diff --git a/xen/drivers/passthrough/vtd/dmar.c
> b/xen/drivers/passthrough/vtd/dmar.c
> index 86cfad3..c5bc8d6 100644
> --- a/xen/drivers/passthrough/vtd/dmar.c
> +++ b/xen/drivers/passthrough/vtd/dmar.c
> @@ -904,17 +904,33 @@ int platform_supports_x2apic(void)
> 
>  int intel_iommu_get_reserved_device_memory(iommu_grdm_t *func, void
> *ctxt)
>  {
> -    struct acpi_rmrr_unit *rmrr;
> +    struct acpi_rmrr_unit *rmrr, *rmrr_cur = NULL;
>      int rc = 0;
> +    unsigned int i;
> +    u16 bdf;
> 
> -    list_for_each_entry(rmrr, &acpi_rmrr_units, list)
> +    for_each_rmrr_device ( rmrr, bdf, i )
>      {
> -        rc = func(PFN_DOWN(rmrr->base_address),
> -                  PFN_UP(rmrr->end_address) -
> PFN_DOWN(rmrr->base_address),
> -                  ctxt);
> -        if ( rc )
> -            break;
> +        if ( rmrr != rmrr_cur )
> +        {
> +            rc = func(PFN_DOWN(rmrr->base_address),
> +                      PFN_UP(rmrr->end_address) -
> +                        PFN_DOWN(rmrr->base_address),
> +                      PCI_SBDF(rmrr->segment, bdf),
> +                      ctxt);
> +
> +            if ( unlikely(rc < 0) )
> +                return rc;
> +
> +            /* Just go next. */
> +            if ( !rc )
> +                rmrr_cur = rmrr;
> +
> +            /* Now just return specific to user requirement. */
> +            if ( rc > 0 )
> +                return rc;
> +        }
>      }
> 
> -    return rc;
> +    return 0;
>  }
> diff --git a/xen/include/public/memory.h b/xen/include/public/memory.h
> index cee4535..0d0544e 100644
> --- a/xen/include/public/memory.h
> +++ b/xen/include/public/memory.h
> @@ -586,6 +586,11 @@ typedef struct xen_reserved_device_memory
> xen_reserved_device_memory_t;
>  DEFINE_XEN_GUEST_HANDLE(xen_reserved_device_memory_t);
> 
>  struct xen_reserved_device_memory_map {
> +    /*
> +     * Domain whose reservation is being changed.
> +     * Unprivileged domains can specify only DOMID_SELF.
> +     */
> +    domid_t        domid;
>      /* IN/OUT */
>      unsigned int nr_entries;
>      /* OUT */
> diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h
> index 409f6f8..8fc6d6d 100644
> --- a/xen/include/xen/iommu.h
> +++ b/xen/include/xen/iommu.h
> @@ -120,7 +120,7 @@ void iommu_dt_domain_destroy(struct domain *d);
> 
>  struct page_info;
> 
> -typedef int iommu_grdm_t(xen_pfn_t start, xen_ulong_t nr, void *ctxt);
> +typedef int iommu_grdm_t(xen_pfn_t start, xen_ulong_t nr, u32 id, void
> *ctxt);
> 
>  struct iommu_ops {
>      int (*init)(struct domain *d);
> diff --git a/xen/include/xen/pci.h b/xen/include/xen/pci.h
> index 5f295f3..d34205f 100644
> --- a/xen/include/xen/pci.h
> +++ b/xen/include/xen/pci.h
> @@ -31,6 +31,8 @@
>  #define PCI_DEVFN2(bdf) ((bdf) & 0xff)
>  #define PCI_BDF(b,d,f)  ((((b) & 0xff) << 8) | PCI_DEVFN(d,f))
>  #define PCI_BDF2(b,df)  ((((b) & 0xff) << 8) | ((df) & 0xff))
> +#define PCI_SBDF(s,bdf) (((s & 0xffff) << 16) | (bdf & 0xffff))
> +#define PCI_SBDF2(s,b,df) (((s & 0xffff) << 16) | PCI_BDF2(b,df))
> 
>  struct pci_dev_info {
>      bool_t is_extfn;
> --
> 1.9.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.