[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v4 2/9] x86/mmcfg: add handlers for the PVH Dom0 MMCFG areas



> -----Original Message-----
> From: Roger Pau Monne [mailto:roger.pau@xxxxxxxxxx]
> Sent: 30 June 2017 16:01
> To: xen-devel@xxxxxxxxxxxxxxxxxxxx
> Cc: boris.ostrovsky@xxxxxxxxxx; julien.grall@xxxxxxx;
> konrad.wilk@xxxxxxxxxx; Roger Pau Monne <roger.pau@xxxxxxxxxx>; Jan
> Beulich <jbeulich@xxxxxxxx>; Andrew Cooper
> <Andrew.Cooper3@xxxxxxxxxx>; Paul Durrant <Paul.Durrant@xxxxxxxxxx>
> Subject: [PATCH v4 2/9] x86/mmcfg: add handlers for the PVH Dom0 MMCFG
> areas
> 
> Introduce a set of handlers for the accesses to the MMCFG areas. Those
> areas are setup based on the contents of the hardware MMCFG tables,
> and the list of handled MMCFG areas is stored inside of the hvm_domain
> struct.
> 
> The read/writes are forwarded to the generic vpci handlers once the
> address is decoded in order to obtain the device and register the
> guest is trying to access.
> 
> Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
> ---
> Cc: Jan Beulich <jbeulich@xxxxxxxx>
> Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
> Cc: Paul Durrant <paul.durrant@xxxxxxxxxx>
> ---
> Changes since v3:
>  - Propagate changes from previous patches: drop xen_ prefix for vpci
>    functions, pass slot and func instead of devfn and fix the error
>    paths of the MMCFG handlers.
>  - s/ecam/mmcfg/.
>  - Move the destroy code to a separate function, so the hvm_mmcfg
>    struct can be private to hvm/io.c.
>  - Constify the return of vpci_mmcfg_find.
>  - Use d instead of v->domain in vpci_mmcfg_accept.
>  - Allow 8byte accesses to the mmcfg.
> 
> Changes since v1:
>  - Added locking.
> ---
>  xen/arch/x86/hvm/dom0_build.c    |  27 ++++++
>  xen/arch/x86/hvm/hvm.c           |   3 +
>  xen/arch/x86/hvm/io.c            | 188
> ++++++++++++++++++++++++++++++++++++++-
>  xen/include/asm-x86/hvm/domain.h |   3 +
>  xen/include/asm-x86/hvm/io.h     |   7 ++
>  5 files changed, 225 insertions(+), 3 deletions(-)
> 
> diff --git a/xen/arch/x86/hvm/dom0_build.c
> b/xen/arch/x86/hvm/dom0_build.c
> index 0e7d06be95..57db8adc8d 100644
> --- a/xen/arch/x86/hvm/dom0_build.c
> +++ b/xen/arch/x86/hvm/dom0_build.c
> @@ -38,6 +38,8 @@
>  #include <public/hvm/hvm_info_table.h>
>  #include <public/hvm/hvm_vcpu.h>
> 
> +#include "../x86_64/mmconfig.h"
> +
>  /*
>   * Have the TSS cover the ISA port range, which makes it
>   * - 104 bytes base structure
> @@ -1041,6 +1043,24 @@ static int __init pvh_setup_acpi(struct domain *d,
> paddr_t start_info)
>      return 0;
>  }
> 
> +int __init pvh_setup_mmcfg(struct domain *d)
> +{
> +    unsigned int i;
> +    int rc;
> +
> +    for ( i = 0; i < pci_mmcfg_config_num; i++ )
> +    {
> +        rc = register_vpci_mmcfg_handler(d, pci_mmcfg_config[i].address,
> +                                         
> pci_mmcfg_config[i].start_bus_number,
> +                                         pci_mmcfg_config[i].end_bus_number,
> +                                         pci_mmcfg_config[i].pci_segment);
> +        if ( rc )
> +            return rc;
> +    }
> +
> +    return 0;
> +}
> +
>  int __init dom0_construct_pvh(struct domain *d, const module_t *image,
>                                unsigned long image_headroom,
>                                module_t *initrd,
> @@ -1090,6 +1110,13 @@ int __init dom0_construct_pvh(struct domain *d,
> const module_t *image,
>          return rc;
>      }
> 
> +    rc = pvh_setup_mmcfg(d);
> +    if ( rc )
> +    {
> +        printk("Failed to setup Dom0 PCI MMCFG areas: %d\n", rc);
> +        return rc;
> +    }
> +
>      panic("Building a PVHv2 Dom0 is not yet supported.");
>      return 0;
>  }
> diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
> index c4176ee458..f45e2bd23d 100644
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -584,6 +584,7 @@ int hvm_domain_initialise(struct domain *d, unsigned
> long domcr_flags,
>      spin_lock_init(&d->arch.hvm_domain.write_map.lock);
>      INIT_LIST_HEAD(&d->arch.hvm_domain.write_map.list);
>      INIT_LIST_HEAD(&d->arch.hvm_domain.g2m_ioport_list);
> +    INIT_LIST_HEAD(&d->arch.hvm_domain.mmcfg_regions);
> 
>      rc = create_perdomain_mapping(d, PERDOMAIN_VIRT_START, 0, NULL,
> NULL);
>      if ( rc )
> @@ -729,6 +730,8 @@ void hvm_domain_destroy(struct domain *d)
>          list_del(&ioport->list);
>          xfree(ioport);
>      }
> +
> +    destroy_vpci_mmcfg(&d->arch.hvm_domain.mmcfg_regions);
>  }
> 
>  static int hvm_save_tsc_adjust(struct domain *d, hvm_domain_context_t
> *h)
> diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c
> index 4e91a485cd..bb67f3accc 100644
> --- a/xen/arch/x86/hvm/io.c
> +++ b/xen/arch/x86/hvm/io.c
> @@ -261,11 +261,11 @@ void register_g2m_portio_handler(struct domain
> *d)
>  static int vpci_access_check(unsigned int reg, unsigned int len)
>  {
>      /* Check access size. */
> -    if ( len != 1 && len != 2 && len != 4 )
> +    if ( len != 1 && len != 2 && len != 4 && len != 8 )
>          return -EINVAL;
> 
> -    /* Check if access crosses a double-word boundary. */
> -    if ( (reg & 3) + len > 4 )
> +    /* Check if access crosses a double-word boundary or it's not aligned. */
> +    if ( (len <= 4 && (reg & 3) + len > 4) || (len == 8 && (reg & 3) != 0) )

Maybe !!(reg & 3) in the second clause to be consistent with the previous 
clause's boolean usage of (reg & 3)?

>          return -EINVAL;
> 
>      return 0;
> @@ -398,6 +398,188 @@ void register_vpci_portio_handler(struct domain
> *d)
>      handler->ops = &vpci_portio_ops;
>  }
> 
> +struct hvm_mmcfg {
> +    paddr_t addr;
> +    size_t size;
> +    unsigned int bus;
> +    unsigned int segment;
> +    struct list_head next;
> +};
> +
> +/* Handlers to trap PCI ECAM config accesses. */
> +static const struct hvm_mmcfg *vpci_mmcfg_find(struct domain *d,
> +                                               unsigned long addr)
> +{
> +    const struct hvm_mmcfg *mmcfg;
> +
> +    ASSERT(vpci_locked(d));
> +    list_for_each_entry ( mmcfg, &d->arch.hvm_domain.mmcfg_regions,
> next )
> +        if ( addr >= mmcfg->addr && addr < mmcfg->addr + mmcfg->size )
> +            return mmcfg;
> +
> +    return NULL;
> +}
> +
> +static void vpci_mmcfg_decode_addr(const struct hvm_mmcfg *mmcfg,
> +                                   unsigned long addr, unsigned int *bus,
> +                                   unsigned int *slot, unsigned int *func,
> +                                   unsigned int *reg)
> +{
> +    addr -= mmcfg->addr;
> +    *bus = ((addr >> 20) & 0xff) + mmcfg->bus;
> +    *slot = (addr >> 15) & 0x1f;
> +    *func = (addr >> 12) & 0x7;
> +    *reg = addr & 0xfff;

Lots of magic numbers here. Perhaps define some macros analogous to the CF8 
ones?

> +}
> +
> +static int vpci_mmcfg_accept(struct vcpu *v, unsigned long addr)
> +{
> +    struct domain *d = v->domain;
> +    bool found;
> +
> +    vpci_lock(d);
> +    found = vpci_mmcfg_find(d, addr);
> +    vpci_unlock(d);
> +
> +    return found;
> +}
> +
> +static int vpci_mmcfg_read(struct vcpu *v, unsigned long addr,
> +                           unsigned int len, unsigned long *data)
> +{
> +    struct domain *d = v->domain;
> +    const struct hvm_mmcfg *mmcfg;
> +    unsigned int bus, slot, func, reg;
> +
> +    *data = ~(unsigned long)0;
> +
> +    vpci_lock(d);
> +    mmcfg = vpci_mmcfg_find(d, addr);
> +    if ( !mmcfg )
> +    {
> +        vpci_unlock(d);
> +        return X86EMUL_OKAY;
> +    }
> +
> +    vpci_mmcfg_decode_addr(mmcfg, addr, &bus, &slot, &func, &reg);
> +
> +    if ( vpci_access_check(reg, len) )
> +    {
> +        vpci_unlock(d);
> +        return X86EMUL_OKAY;
> +    }
> +
> +    pcidevs_lock();
> +    if ( len == 8 )
> +    {
> +        /*
> +         * According to the PCIe 3.1A specification:
> +         *  - Configuration Reads and Writes must usually be DWORD or smaller
> +         *    in size.
> +         *  - Because Root Complex implementations are not required to
> support
> +         *    accesses to a RCRB that cross DW boundaries [...] software
> +         *    should take care not to cause the generation of such accesses
> +         *    when accessing a RCRB unless the Root Complex will support the
> +         *    access.
> +         *  Xen however supports 8byte accesses by splitting them into two
> +         *  4byte accesses.
> +         */
> +        *data = vpci_read(mmcfg->segment, bus, slot, func, reg, 4);
> +        *data |= (uint64_t)vpci_read(mmcfg->segment, bus, slot, func,
> +                                     reg + 4, 4) << 32;
> +    }
> +    else
> +        *data = vpci_read(mmcfg->segment, bus, slot, func, reg, len);
> +    pcidevs_unlock();
> +    vpci_unlock(d);
> +
> +    return X86EMUL_OKAY;
> +}
> +
> +static int vpci_mmcfg_write(struct vcpu *v, unsigned long addr,
> +                            unsigned int len, unsigned long data)
> +{
> +    struct domain *d = v->domain;
> +    const struct hvm_mmcfg *mmcfg;
> +    unsigned int bus, slot, func, reg;
> +
> +    vpci_lock(d);
> +    mmcfg = vpci_mmcfg_find(d, addr);
> +    if ( !mmcfg )
> +        return X86EMUL_OKAY;
> +
> +    vpci_mmcfg_decode_addr(mmcfg, addr, &bus, &slot, &func, &reg);
> +
> +    if ( vpci_access_check(reg, len) )
> +        return X86EMUL_OKAY;
> +
> +    pcidevs_lock();
> +    if ( len == 8 )
> +    {
> +        vpci_write(mmcfg->segment, bus, slot, func, reg, 4, data);
> +        vpci_write(mmcfg->segment, bus, slot, func, reg + 4, 4, data >> 32);
> +    }
> +    else
> +        vpci_write(mmcfg->segment, bus, slot, func, reg, len, data);
> +    pcidevs_unlock();
> +    vpci_unlock(d);
> +
> +    return X86EMUL_OKAY;
> +}
> +
> +static const struct hvm_mmio_ops vpci_mmcfg_ops = {
> +    .check = vpci_mmcfg_accept,
> +    .read = vpci_mmcfg_read,
> +    .write = vpci_mmcfg_write,
> +};
> +
> +int register_vpci_mmcfg_handler(struct domain *d, paddr_t addr,
> +                                unsigned int start_bus, unsigned int end_bus,
> +                                unsigned int seg)
> +{
> +    struct hvm_mmcfg *mmcfg;
> +
> +    ASSERT(is_hardware_domain(d));
> +
> +    vpci_lock(d);
> +    if ( vpci_mmcfg_find(d, addr) )
> +    {
> +        vpci_unlock(d);
> +        return -EEXIST;
> +    }
> +
> +    mmcfg = xmalloc(struct hvm_mmcfg);
> +    if ( !mmcfg )
> +    {
> +        vpci_unlock(d);
> +        return -ENOMEM;
> +    }
> +
> +    if ( list_empty(&d->arch.hvm_domain.mmcfg_regions) )
> +        register_mmio_handler(d, &vpci_mmcfg_ops);
> +
> +    mmcfg->addr = addr + (start_bus << 20);
> +    mmcfg->bus = start_bus;
> +    mmcfg->segment = seg;
> +    mmcfg->size = (end_bus - start_bus + 1) << 20;
> +    list_add(&mmcfg->next, &d->arch.hvm_domain.mmcfg_regions);
> +    vpci_unlock(d);
> +
> +    return 0;
> +}
> +
> +void destroy_vpci_mmcfg(struct list_head *domain_mmcfg)
> +{
> +    while ( !list_empty(domain_mmcfg) )
> +    {
> +        struct hvm_mmcfg *mmcfg = list_first_entry(domain_mmcfg,
> +                                                   struct hvm_mmcfg, next);
> +
> +        list_del(&mmcfg->next);
> +        xfree(mmcfg);
> +    }
> +}
> +
>  /*
>   * Local variables:
>   * mode: C
> diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-
> x86/hvm/domain.h
> index cbf4170789..7028f93861 100644
> --- a/xen/include/asm-x86/hvm/domain.h
> +++ b/xen/include/asm-x86/hvm/domain.h
> @@ -187,6 +187,9 @@ struct hvm_domain {
>      /* Lock for the PCI emulation layer (vPCI). */
>      spinlock_t vpci_lock;
> 
> +    /* List of ECAM (MMCFG) regions trapped by Xen. */
> +    struct list_head mmcfg_regions;
> +
>      /* List of permanently write-mapped pages. */
>      struct {
>          spinlock_t lock;
> diff --git a/xen/include/asm-x86/hvm/io.h b/xen/include/asm-x86/hvm/io.h
> index 0af1ed14dc..4fe996fe49 100644
> --- a/xen/include/asm-x86/hvm/io.h
> +++ b/xen/include/asm-x86/hvm/io.h
> @@ -163,6 +163,13 @@ void register_g2m_portio_handler(struct domain
> *d);
>  /* HVM port IO handler for PCI accesses. */
>  void register_vpci_portio_handler(struct domain *d);
> 
> +/* HVM MMIO handler for PCI MMCFG accesses. */
> +int register_vpci_mmcfg_handler(struct domain *d, paddr_t addr,
> +                                unsigned int start_bus, unsigned int end_bus,
> +                                unsigned int seg);
> +/* Destroy tracked MMCFG areas. */
> +void destroy_vpci_mmcfg(struct list_head *domain_mmcfg);
> +
>  #endif /* __ASM_X86_HVM_IO_H__ */
> 

Rest LGTM.

  Paul

> 
> --
> 2.11.0 (Apple Git-81)

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.