[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 2/9] x86/ecam: add handlers for the PVH Dom0 MMCFG areas
Introduce a set of handlers for the accesses to the ECAM areas. Those areas are setup based on the contents of the hardware MMCFG tables, and the list of handled ECAM areas is stored inside of the hvm_domain struct. The read/writes are forwarded to the generic vpci handlers once the address is decoded in order to obtain the device and register the guest is trying to access. Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx> --- Cc: Jan Beulich <jbeulich@xxxxxxxx> Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Cc: Paul Durrant <paul.durrant@xxxxxxxxxx> --- Changes since v1: - Added locking. --- xen/arch/x86/hvm/dom0_build.c | 27 ++++++++ xen/arch/x86/hvm/hvm.c | 10 +++ xen/arch/x86/hvm/io.c | 135 +++++++++++++++++++++++++++++++++++++++ xen/include/asm-x86/hvm/domain.h | 10 +++ xen/include/asm-x86/hvm/io.h | 4 ++ 5 files changed, 186 insertions(+) diff --git a/xen/arch/x86/hvm/dom0_build.c b/xen/arch/x86/hvm/dom0_build.c index 020c355faf..ca88c5835e 100644 --- a/xen/arch/x86/hvm/dom0_build.c +++ b/xen/arch/x86/hvm/dom0_build.c @@ -38,6 +38,8 @@ #include <public/hvm/hvm_info_table.h> #include <public/hvm/hvm_vcpu.h> +#include "../x86_64/mmconfig.h" + /* * Have the TSS cover the ISA port range, which makes it * - 104 bytes base structure @@ -1048,6 +1050,24 @@ static int __init pvh_setup_acpi(struct domain *d, paddr_t start_info) return 0; } +int __init pvh_setup_ecam(struct domain *d) +{ + unsigned int i; + int rc; + + for ( i = 0; i < pci_mmcfg_config_num; i++ ) + { + size_t size = (pci_mmcfg_config[i].end_bus_number + 1) << 20; + + rc = register_vpci_ecam_handler(d, pci_mmcfg_config[i].address, size, + pci_mmcfg_config[i].pci_segment); + if ( rc ) + return rc; + } + + return 0; +} + int __init dom0_construct_pvh(struct domain *d, const module_t *image, unsigned long image_headroom, module_t *initrd, @@ -1090,6 +1110,13 @@ int __init dom0_construct_pvh(struct domain *d, const module_t *image, return rc; } + rc = pvh_setup_ecam(d); + if ( rc ) + { + printk("Failed to setup Dom0 PCI ECAM areas: %d\n", rc); + return rc; + } + panic("Building a PVHv2 Dom0 is not yet supported."); return 0; } diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 7f3322ede6..ef3ad2a615 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -613,6 +613,7 @@ int hvm_domain_initialise(struct domain *d) spin_lock_init(&d->arch.hvm_domain.write_map.lock); INIT_LIST_HEAD(&d->arch.hvm_domain.write_map.list); INIT_LIST_HEAD(&d->arch.hvm_domain.g2m_ioport_list); + INIT_LIST_HEAD(&d->arch.hvm_domain.ecam_regions); hvm_init_cacheattr_region_list(d); @@ -725,6 +726,7 @@ void hvm_domain_destroy(struct domain *d) { struct list_head *ioport_list, *tmp; struct g2m_ioport *ioport; + struct hvm_ecam *ecam, *etmp; xfree(d->arch.hvm_domain.io_handler); d->arch.hvm_domain.io_handler = NULL; @@ -752,6 +754,14 @@ void hvm_domain_destroy(struct domain *d) list_del(&ioport->list); xfree(ioport); } + + list_for_each_entry_safe ( ecam, etmp, &d->arch.hvm_domain.ecam_regions, + next ) + { + list_del(&ecam->next); + xfree(ecam); + } + } static int hvm_save_tsc_adjust(struct domain *d, hvm_domain_context_t *h) diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c index 15048da556..319cf9287b 100644 --- a/xen/arch/x86/hvm/io.c +++ b/xen/arch/x86/hvm/io.c @@ -391,6 +391,141 @@ void register_vpci_portio_handler(struct domain *d) handler->ops = &vpci_portio_ops; } +/* Handlers to trap PCI ECAM config accesses. */ +static struct hvm_ecam *vpci_ecam_find(struct domain *d, unsigned long addr) +{ + struct hvm_ecam *ecam = NULL; + + ASSERT(vpci_locked(d)); + list_for_each_entry ( ecam, &d->arch.hvm_domain.ecam_regions, next ) + if ( addr >= ecam->addr && addr < ecam->addr + ecam->size ) + return ecam; + + return NULL; +} + +static void vpci_ecam_decode_addr(unsigned long addr, unsigned int *bus, + unsigned int *devfn, unsigned int *reg) +{ + *bus = (addr >> 20) & 0xff; + *devfn = (addr >> 12) & 0xff; + *reg = addr & 0xfff; +} + +static int vpci_ecam_accept(struct vcpu *v, unsigned long addr) +{ + struct domain *d = v->domain; + int found; + + vpci_lock(d); + found = !!vpci_ecam_find(v->domain, addr); + vpci_unlock(d); + + return found; +} + +static int vpci_ecam_read(struct vcpu *v, unsigned long addr, + unsigned int len, unsigned long *data) +{ + struct domain *d = v->domain; + struct hvm_ecam *ecam; + unsigned int bus, devfn, reg; + uint32_t data32; + int rc; + + vpci_lock(d); + ecam = vpci_ecam_find(d, addr); + if ( !ecam ) + { + vpci_unlock(d); + return X86EMUL_UNHANDLEABLE; + } + + vpci_ecam_decode_addr(addr - ecam->addr, &bus, &devfn, ®); + + if ( vpci_access_check(reg, len) || reg >= 0xfff ) + { + vpci_unlock(d); + return X86EMUL_UNHANDLEABLE; + } + + rc = xen_vpci_read(ecam->segment, bus, devfn, reg, len, &data32); + if ( !rc ) + *data = data32; + vpci_unlock(d); + + return rc ? X86EMUL_UNHANDLEABLE : X86EMUL_OKAY; +} + +static int vpci_ecam_write(struct vcpu *v, unsigned long addr, + unsigned int len, unsigned long data) +{ + struct domain *d = v->domain; + struct hvm_ecam *ecam; + unsigned int bus, devfn, reg; + int rc; + + vpci_lock(d); + ecam = vpci_ecam_find(d, addr); + if ( !ecam ) + { + vpci_unlock(d); + return X86EMUL_UNHANDLEABLE; + } + + vpci_ecam_decode_addr(addr - ecam->addr, &bus, &devfn, ®); + + if ( vpci_access_check(reg, len) || reg >= 0xfff ) + { + vpci_unlock(d); + return X86EMUL_UNHANDLEABLE; + } + + rc = xen_vpci_write(ecam->segment, bus, devfn, reg, len, data); + vpci_unlock(d); + + return rc ? X86EMUL_UNHANDLEABLE : X86EMUL_OKAY; +} + +static const struct hvm_mmio_ops vpci_ecam_ops = { + .check = vpci_ecam_accept, + .read = vpci_ecam_read, + .write = vpci_ecam_write, +}; + +int register_vpci_ecam_handler(struct domain *d, paddr_t addr, size_t size, + unsigned int seg) +{ + struct hvm_ecam *ecam; + + ASSERT(is_hardware_domain(d)); + + vpci_lock(d); + if ( vpci_ecam_find(d, addr) ) + { + vpci_unlock(d); + return -EEXIST; + } + + ecam = xzalloc(struct hvm_ecam); + if ( !ecam ) + { + vpci_unlock(d); + return -ENOMEM; + } + + if ( list_empty(&d->arch.hvm_domain.ecam_regions) ) + register_mmio_handler(d, &vpci_ecam_ops); + + ecam->addr = addr; + ecam->segment = seg; + ecam->size = size; + list_add(&ecam->next, &d->arch.hvm_domain.ecam_regions); + vpci_unlock(d); + + return 0; +} + /* * Local variables: * mode: C diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h index cbf4170789..ce710496c7 100644 --- a/xen/include/asm-x86/hvm/domain.h +++ b/xen/include/asm-x86/hvm/domain.h @@ -100,6 +100,13 @@ struct hvm_pi_ops { void (*do_resume)(struct vcpu *v); }; +struct hvm_ecam { + paddr_t addr; + size_t size; + unsigned int segment; + struct list_head next; +}; + struct hvm_domain { /* Guest page range used for non-default ioreq servers */ struct { @@ -187,6 +194,9 @@ struct hvm_domain { /* Lock for the PCI emulation layer (vPCI). */ spinlock_t vpci_lock; + /* List of ECAM (MMCFG) regions trapped by Xen. */ + struct list_head ecam_regions; + /* List of permanently write-mapped pages. */ struct { spinlock_t lock; diff --git a/xen/include/asm-x86/hvm/io.h b/xen/include/asm-x86/hvm/io.h index 2dbf92f13e..0434aca706 100644 --- a/xen/include/asm-x86/hvm/io.h +++ b/xen/include/asm-x86/hvm/io.h @@ -158,6 +158,10 @@ void register_g2m_portio_handler(struct domain *d); /* HVM port IO handler for PCI accesses. */ void register_vpci_portio_handler(struct domain *d); +/* HVM MMIO handler for PCI ECAM accesses. */ +int register_vpci_ecam_handler(struct domain *d, paddr_t addr, size_t size, + unsigned int seg); + #endif /* __ASM_X86_HVM_IO_H__ */ -- 2.11.0 (Apple Git-81) _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |