[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 04/17] x86/hvm: unify dpci portio intercept with standard portio intercept
This patch re-works the dpci portio intercepts so that they can be unified with standard portio handling thereby removing a substantial amount of code duplication. Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx> Cc: Keir Fraser <keir@xxxxxxx> Cc: Jan Beulich <jbeulich@xxxxxxxx> Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> --- xen/arch/x86/hvm/hvm.c | 2 + xen/arch/x86/hvm/intercept.c | 8 +- xen/arch/x86/hvm/io.c | 232 +++++++++++++------------------------- xen/include/asm-x86/hvm/domain.h | 1 + xen/include/asm-x86/hvm/io.h | 3 + xen/include/asm-x86/hvm/vcpu.h | 2 + xen/include/xen/iommu.h | 1 - 7 files changed, 89 insertions(+), 160 deletions(-) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index f4b57a4..1ecb528 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -1517,6 +1517,8 @@ int hvm_domain_initialise(struct domain *d) register_portio_handler(d, 0xe9, 1, hvm_print_line); register_portio_handler(d, 0xcf8, 4, hvm_access_cf8); + register_dpci_portio_handler(d); + rc = hvm_funcs.domain_initialise(d); if ( rc != 0 ) goto fail6; diff --git a/xen/arch/x86/hvm/intercept.c b/xen/arch/x86/hvm/intercept.c index c03502c..9e20a72 100644 --- a/xen/arch/x86/hvm/intercept.c +++ b/xen/arch/x86/hvm/intercept.c @@ -329,13 +329,7 @@ int hvm_io_intercept(ioreq_t *p) struct vcpu *v = current; struct hvm_io_handler *handler; - if ( p->type == IOREQ_TYPE_PIO ) - { - int rc = dpci_ioport_intercept(p); - if ( (rc == X86EMUL_OKAY) || (rc == X86EMUL_RETRY) ) - return rc; - } - else if ( p->type == IOREQ_TYPE_COPY ) + if ( p->type == IOREQ_TYPE_COPY ) { int rc = stdvga_intercept_mmio(p); if ( (rc == X86EMUL_OKAY) || (rc == X86EMUL_RETRY) ) diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c index 2ba6272..367d51e 100644 --- a/xen/arch/x86/hvm/io.c +++ b/xen/arch/x86/hvm/io.c @@ -208,185 +208,113 @@ void hvm_io_assist(ioreq_t *p) } } -static int dpci_ioport_read(uint32_t mport, ioreq_t *p) +static bool_t dpci_portio_accept(struct hvm_io_handler *io_handler, + struct vcpu *v, + uint64_t addr, + uint32_t size) { - struct hvm_vcpu_io *vio = ¤t->arch.hvm_vcpu.hvm_io; - int rc = X86EMUL_OKAY, i, step = p->df ? -p->size : p->size; - uint32_t data = 0; - - for ( i = 0; i < p->count; i++ ) - { - if ( vio->mmio_retrying ) - { - if ( vio->mmio_large_read_bytes != p->size ) - return X86EMUL_UNHANDLEABLE; - memcpy(&data, vio->mmio_large_read, p->size); - vio->mmio_large_read_bytes = 0; - vio->mmio_retrying = 0; - } - else switch ( p->size ) - { - case 1: - data = inb(mport); - break; - case 2: - data = inw(mport); - break; - case 4: - data = inl(mport); - break; - default: - BUG(); - } + struct hvm_iommu *hd = domain_hvm_iommu(v->domain); + struct hvm_vcpu_io *vio = &v->arch.hvm_vcpu.hvm_io; + struct g2m_ioport *g2m_ioport; + uint32_t start, end; + uint32_t gport = addr, mport; - if ( p->data_is_ptr ) - { - switch ( hvm_copy_to_guest_phys(p->data + step * i, - &data, p->size) ) - { - case HVMCOPY_okay: - break; - case HVMCOPY_gfn_paged_out: - case HVMCOPY_gfn_shared: - rc = X86EMUL_RETRY; - break; - case HVMCOPY_bad_gfn_to_mfn: - /* Drop the write as real hardware would. */ - continue; - case HVMCOPY_bad_gva_to_gfn: - ASSERT(0); - /* fall through */ - default: - rc = X86EMUL_UNHANDLEABLE; - break; - } - if ( rc != X86EMUL_OKAY) - break; - } - else - p->data = data; - } - if ( rc == X86EMUL_RETRY ) + list_for_each_entry( g2m_ioport, &hd->arch.g2m_ioport_list, list ) { - vio->mmio_retry = 1; - vio->mmio_large_read_bytes = p->size; - memcpy(vio->mmio_large_read, &data, p->size); + start = g2m_ioport->gport; + end = start + g2m_ioport->np; + if ( (gport >= start) && (gport < end) ) + goto found; } - if ( i != 0 ) + return 0; + + found: + mport = (gport - start) + g2m_ioport->mport; + + if ( !ioports_access_permitted(current->domain, mport, + mport + size - 1) ) { - p->count = i; - rc = X86EMUL_OKAY; + gdprintk(XENLOG_ERR, "Error: access to gport=%#x denied!\n", + (uint32_t)addr); + return 0; } - return rc; + vio->g2m_ioport = g2m_ioport; + return 1; } -static int dpci_ioport_write(uint32_t mport, ioreq_t *p) +static int dpci_portio_read(struct hvm_io_handler *io_handler, + struct vcpu *v, + uint64_t addr, + uint32_t size, + uint64_t *data) { - int rc = X86EMUL_OKAY, i, step = p->df ? -p->size : p->size; - uint32_t data; - - for ( i = 0; i < p->count; i++ ) - { - data = p->data; - if ( p->data_is_ptr ) - { - switch ( hvm_copy_from_guest_phys(&data, p->data + step * i, - p->size) ) - { - case HVMCOPY_okay: - break; - case HVMCOPY_gfn_paged_out: - case HVMCOPY_gfn_shared: - rc = X86EMUL_RETRY; - break; - case HVMCOPY_bad_gfn_to_mfn: - data = ~0; - break; - case HVMCOPY_bad_gva_to_gfn: - ASSERT(0); - /* fall through */ - default: - rc = X86EMUL_UNHANDLEABLE; - break; - } - if ( rc != X86EMUL_OKAY) - break; - } - - switch ( p->size ) - { - case 1: - outb(data, mport); - break; - case 2: - outw(data, mport); - break; - case 4: - outl(data, mport); - break; - default: - BUG(); - } - } + struct hvm_vcpu_io *vio = &v->arch.hvm_vcpu.hvm_io; + struct g2m_ioport *g2m_ioport = vio->g2m_ioport; + uint32_t mport = (addr - g2m_ioport->gport) + g2m_ioport->mport; - if ( rc == X86EMUL_RETRY ) - current->arch.hvm_vcpu.hvm_io.mmio_retry = 1; - - if ( i != 0 ) + switch ( size ) { - p->count = i; - rc = X86EMUL_OKAY; + case 1: + *data = inb(mport); + break; + case 2: + *data = inw(mport); + break; + case 4: + *data = inl(mport); + break; + default: + BUG(); } - return rc; + return X86EMUL_OKAY; } -int dpci_ioport_intercept(ioreq_t *p) +static int dpci_portio_write(struct hvm_io_handler *io_handler, + struct vcpu *v, + uint64_t addr, + uint32_t size, + uint64_t data) { - struct domain *d = current->domain; - struct hvm_iommu *hd = domain_hvm_iommu(d); - struct g2m_ioport *g2m_ioport; - unsigned int mport, gport = p->addr; - unsigned int s = 0, e = 0; - int rc; + struct hvm_vcpu_io *vio = &v->arch.hvm_vcpu.hvm_io; + struct g2m_ioport *g2m_ioport = vio->g2m_ioport; + uint32_t mport = (addr - g2m_ioport->gport) + g2m_ioport->mport; - list_for_each_entry( g2m_ioport, &hd->arch.g2m_ioport_list, list ) + switch ( size ) { - s = g2m_ioport->gport; - e = s + g2m_ioport->np; - if ( (gport >= s) && (gport < e) ) - goto found; + case 1: + outb(data, mport); + break; + case 2: + outw(data, mport); + break; + case 4: + outl(data, mport); + break; + default: + BUG(); } - return X86EMUL_UNHANDLEABLE; + return X86EMUL_OKAY; +} - found: - mport = (gport - s) + g2m_ioport->mport; +static const struct hvm_io_ops dpci_portio_ops = { + .accept = dpci_portio_accept, + .read = dpci_portio_read, + .write = dpci_portio_write +}; - if ( !ioports_access_permitted(d, mport, mport + p->size - 1) ) - { - gdprintk(XENLOG_ERR, "Error: access to gport=%#x denied!\n", - (uint32_t)p->addr); - return X86EMUL_UNHANDLEABLE; - } +void register_dpci_portio_handler(struct domain *d) +{ + struct hvm_io_handler *handler = &d->arch.hvm_domain.dpci_handler; - switch ( p->dir ) - { - case IOREQ_READ: - rc = dpci_ioport_read(mport, p); - break; - case IOREQ_WRITE: - rc = dpci_ioport_write(mport, p); - break; - default: - gdprintk(XENLOG_ERR, "Error: couldn't handle p->dir = %d", p->dir); - rc = X86EMUL_UNHANDLEABLE; - } + handler->type = IOREQ_TYPE_PIO; + handler->ops = &dpci_portio_ops; - return rc; + hvm_register_io_handler(d, handler, 1); } /* diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h index d9c0cfe..5d9c9f5 100644 --- a/xen/include/asm-x86/hvm/domain.h +++ b/xen/include/asm-x86/hvm/domain.h @@ -94,6 +94,7 @@ struct hvm_domain { struct hvm_mmio_handler *mmio_handler; struct hvm_portio_handler *portio_handler; + struct hvm_io_handler dpci_handler; struct { spinlock_t lock; diff --git a/xen/include/asm-x86/hvm/io.h b/xen/include/asm-x86/hvm/io.h index 633a210..c3bab2f 100644 --- a/xen/include/asm-x86/hvm/io.h +++ b/xen/include/asm-x86/hvm/io.h @@ -137,6 +137,9 @@ int stdvga_intercept_mmio(ioreq_t *p); void stdvga_deinit(struct domain *d); extern void hvm_dpci_msi_eoi(struct domain *d, int vector); + +void register_dpci_portio_handler(struct domain *d); + #endif /* __ASM_X86_HVM_IO_H__ */ diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h index 3d8f4dc..dd08416 100644 --- a/xen/include/asm-x86/hvm/vcpu.h +++ b/xen/include/asm-x86/hvm/vcpu.h @@ -77,6 +77,8 @@ struct hvm_vcpu_io { bool_t mmio_retry, mmio_retrying; unsigned long msix_unmask_address; + + struct g2m_ioport *g2m_ioport; }; #define VMCX_EADDR (~0ULL) diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h index b30bf41..1d00696 100644 --- a/xen/include/xen/iommu.h +++ b/xen/include/xen/iommu.h @@ -93,7 +93,6 @@ void pt_pci_init(void); struct pirq; int hvm_do_IRQ_dpci(struct domain *, struct pirq *); -int dpci_ioport_intercept(ioreq_t *p); int pt_irq_create_bind(struct domain *, xen_domctl_bind_pt_irq_t *); int pt_irq_destroy_bind(struct domain *, xen_domctl_bind_pt_irq_t *); -- 1.7.10.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |