[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v2 2/2] x86/io: move the list of guest to machine IO ports out of domain_iommu



> -----Original Message-----
> From: Roger Pau Monne
> Sent: 28 March 2017 14:12
> To: xen-devel@xxxxxxxxxxxxxxxxxxxx
> Cc: Roger Pau Monne <roger.pau@xxxxxxxxxx>; Jan Beulich
> <jbeulich@xxxxxxxx>; Andrew Cooper <Andrew.Cooper3@xxxxxxxxxx>; Paul
> Durrant <Paul.Durrant@xxxxxxxxxx>
> Subject: [PATCH v2 2/2] x86/io: move the list of guest to machine IO ports
> out of domain_iommu
> 
> There's no reason to store that list inside of the domain_iommu struct, the
> forwarding of guest IO ports into machine IO ports is not tied to the presence
> of an IOMMU.
> 
> Move it inside of the hvm_domain struct instead.
> 
> Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>

Reviewed-by: Paul Durrant <paul.durrant@xxxxxxxxxx>

> ---
> Cc: Jan Beulich <jbeulich@xxxxxxxx>
> Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
> Cc: Paul Durrant <paul.durrant@xxxxxxxxxx>
> ---
>  xen/arch/x86/domctl.c               | 10 +++++-----
>  xen/arch/x86/hvm/hvm.c              | 12 ++++++++++++
>  xen/arch/x86/hvm/io.c               |  4 ++--
>  xen/drivers/passthrough/x86/iommu.c | 11 -----------
>  xen/include/asm-x86/hvm/domain.h    |  3 +++
>  xen/include/asm-x86/iommu.h         |  1 -
>  6 files changed, 22 insertions(+), 19 deletions(-)
> 
> diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
> index 1220224..b7b8748 100644
> --- a/xen/arch/x86/domctl.c
> +++ b/xen/arch/x86/domctl.c
> @@ -721,11 +721,11 @@ long arch_do_domctl(
> 
>      case XEN_DOMCTL_ioport_mapping:
>      {
> -        struct domain_iommu *hd;
>          unsigned int fgp = domctl->u.ioport_mapping.first_gport;
>          unsigned int fmp = domctl->u.ioport_mapping.first_mport;
>          unsigned int np = domctl->u.ioport_mapping.nr_ports;
>          unsigned int add = domctl->u.ioport_mapping.add_mapping;
> +        struct hvm_domain *hvm_domain;
>          struct g2m_ioport *g2m_ioport;
>          int found = 0;
> 
> @@ -747,14 +747,14 @@ long arch_do_domctl(
>          if ( ret )
>              break;
> 
> -        hd = dom_iommu(d);
> +        hvm_domain = &d->arch.hvm_domain;
>          if ( add )
>          {
>              printk(XENLOG_G_INFO
>                     "ioport_map:add: dom%d gport=%x mport=%x nr=%x\n",
>                     d->domain_id, fgp, fmp, np);
> 
> -            list_for_each_entry(g2m_ioport, &hd->arch.g2m_ioport_list, list)
> +            list_for_each_entry(g2m_ioport, &hvm_domain->g2m_ioport_list,
> list)
>                  if (g2m_ioport->mport == fmp )
>                  {
>                      g2m_ioport->gport = fgp;
> @@ -773,7 +773,7 @@ long arch_do_domctl(
>                  g2m_ioport->gport = fgp;
>                  g2m_ioport->mport = fmp;
>                  g2m_ioport->np = np;
> -                list_add_tail(&g2m_ioport->list, &hd->arch.g2m_ioport_list);
> +                list_add_tail(&g2m_ioport->list, 
> &hvm_domain->g2m_ioport_list);
>              }
>              if ( !ret )
>                  ret = ioports_permit_access(d, fmp, fmp + np - 1);
> @@ -788,7 +788,7 @@ long arch_do_domctl(
>              printk(XENLOG_G_INFO
>                     "ioport_map:remove: dom%d gport=%x mport=%x nr=%x\n",
>                     d->domain_id, fgp, fmp, np);
> -            list_for_each_entry(g2m_ioport, &hd->arch.g2m_ioport_list, list)
> +            list_for_each_entry(g2m_ioport, &hvm_domain->g2m_ioport_list,
> list)
>                  if ( g2m_ioport->mport == fmp )
>                  {
>                      list_del(&g2m_ioport->list);
> diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
> index 3a2d001..b6c5c9b 100644
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -608,6 +608,7 @@ int hvm_domain_initialise(struct domain *d)
>      spin_lock_init(&d->arch.hvm_domain.uc_lock);
>      spin_lock_init(&d->arch.hvm_domain.write_map.lock);
>      INIT_LIST_HEAD(&d->arch.hvm_domain.write_map.list);
> +    INIT_LIST_HEAD(&d->arch.hvm_domain.g2m_ioport_list);
> 
>      hvm_init_cacheattr_region_list(d);
> 
> @@ -707,6 +708,9 @@ void hvm_domain_relinquish_resources(struct
> domain *d)
> 
>  void hvm_domain_destroy(struct domain *d)
>  {
> +    struct list_head *ioport_list, *tmp;
> +    struct g2m_ioport *ioport;
> +
>      xfree(d->arch.hvm_domain.io_handler);
>      d->arch.hvm_domain.io_handler = NULL;
> 
> @@ -722,6 +726,14 @@ void hvm_domain_destroy(struct domain *d)
> 
>      xfree(d->arch.hvm_domain.pl_time);
>      d->arch.hvm_domain.pl_time = NULL;
> +
> +    list_for_each_safe ( ioport_list, tmp,
> +                         &d->arch.hvm_domain.g2m_ioport_list )
> +    {
> +        ioport = list_entry(ioport_list, struct g2m_ioport, list);
> +        list_del(&ioport->list);
> +        xfree(ioport);
> +    }
>  }
> 
>  static int hvm_save_tsc_adjust(struct domain *d, hvm_domain_context_t
> *h)
> diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c
> index 3b3a600..9e00409 100644
> --- a/xen/arch/x86/hvm/io.c
> +++ b/xen/arch/x86/hvm/io.c
> @@ -171,12 +171,12 @@ static bool_t g2m_portio_accept(const struct
> hvm_io_handler *handler,
>                                  const ioreq_t *p)
>  {
>      struct vcpu *curr = current;
> -    const struct domain_iommu *dio = dom_iommu(curr->domain);
> +    const struct hvm_domain *hvm_domain = &curr->domain-
> >arch.hvm_domain;
>      struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
>      struct g2m_ioport *g2m_ioport;
>      unsigned int start, end;
> 
> -    list_for_each_entry( g2m_ioport, &dio->arch.g2m_ioport_list, list )
> +    list_for_each_entry( g2m_ioport, &hvm_domain->g2m_ioport_list, list )
>      {
>          start = g2m_ioport->gport;
>          end = start + g2m_ioport->np;
> diff --git a/xen/drivers/passthrough/x86/iommu.c
> b/xen/drivers/passthrough/x86/iommu.c
> index 750c663..0253823 100644
> --- a/xen/drivers/passthrough/x86/iommu.c
> +++ b/xen/drivers/passthrough/x86/iommu.c
> @@ -123,7 +123,6 @@ int arch_iommu_domain_init(struct domain *d)
>      struct domain_iommu *hd = dom_iommu(d);
> 
>      spin_lock_init(&hd->arch.mapping_lock);
> -    INIT_LIST_HEAD(&hd->arch.g2m_ioport_list);
>      INIT_LIST_HEAD(&hd->arch.mapped_rmrrs);
> 
>      return 0;
> @@ -131,16 +130,6 @@ int arch_iommu_domain_init(struct domain *d)
> 
>  void arch_iommu_domain_destroy(struct domain *d)
>  {
> -    const struct domain_iommu *hd = dom_iommu(d);
> -    struct list_head *ioport_list, *tmp;
> -    struct g2m_ioport *ioport;
> -
> -    list_for_each_safe ( ioport_list, tmp, &hd->arch.g2m_ioport_list )
> -    {
> -        ioport = list_entry(ioport_list, struct g2m_ioport, list);
> -        list_del(&ioport->list);
> -        xfree(ioport);
> -    }
>  }
> 
>  /*
> diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-
> x86/hvm/domain.h
> index 420cbdc..bdfc082 100644
> --- a/xen/include/asm-x86/hvm/domain.h
> +++ b/xen/include/asm-x86/hvm/domain.h
> @@ -180,6 +180,9 @@ struct hvm_domain {
> 
>      unsigned long *io_bitmap;
> 
> +    /* List of guest to machine IO ports mapping. */
> +    struct list_head g2m_ioport_list;
> +
>      /* List of permanently write-mapped pages. */
>      struct {
>          spinlock_t lock;
> diff --git a/xen/include/asm-x86/iommu.h b/xen/include/asm-x86/iommu.h
> index be95106..0008505 100644
> --- a/xen/include/asm-x86/iommu.h
> +++ b/xen/include/asm-x86/iommu.h
> @@ -34,7 +34,6 @@ struct arch_iommu
>      u64 pgd_maddr;                 /* io page directory machine address */
>      spinlock_t mapping_lock;            /* io page table lock */
>      int agaw;     /* adjusted guest address width, 0 is level 2 30-bit */
> -    struct list_head g2m_ioport_list;   /* guest to machine ioport mapping */
>      u64 iommu_bitmap;              /* bitmap of iommu(s) that the domain 
> uses */
>      struct list_head mapped_rmrrs;
> 
> --
> 2.10.1 (Apple Git-78)

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.