[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 03/17] x86/hvm: unify internal portio and mmio intercepts
The implementation of mmio and portio intercepts is unnecessarily different. This leads to much code duplication. This patch unifies much of the intercept handling, leaving only distinct handlers for stdvga mmio and dpci portio. Subsequent patches will unify those handlers. Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx> Cc: Keir Fraser <keir@xxxxxxx> Cc: Jan Beulich <jbeulich@xxxxxxxx> Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> --- xen/arch/x86/hvm/emulate.c | 11 +- xen/arch/x86/hvm/hpet.c | 2 + xen/arch/x86/hvm/hvm.c | 42 ++- xen/arch/x86/hvm/intercept.c | 581 ++++++++++++++++------------- xen/arch/x86/hvm/stdvga.c | 30 +- xen/arch/x86/hvm/vioapic.c | 2 + xen/arch/x86/hvm/vlapic.c | 3 + xen/arch/x86/hvm/vmsi.c | 5 + xen/drivers/passthrough/amd/iommu_guest.c | 30 +- xen/include/asm-x86/hvm/domain.h | 8 +- xen/include/asm-x86/hvm/io.h | 132 +++---- 11 files changed, 475 insertions(+), 371 deletions(-) diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c index c8da114..7da2d68 100644 --- a/xen/arch/x86/hvm/emulate.c +++ b/xen/arch/x86/hvm/emulate.c @@ -148,16 +148,7 @@ static int hvmemul_do_io( hvmtrace_io_assist(is_mmio, &p); } - if ( is_mmio ) - { - rc = hvm_mmio_intercept(&p); - if ( rc == X86EMUL_UNHANDLEABLE ) - rc = hvm_buffered_io_intercept(&p); - } - else - { - rc = hvm_portio_intercept(&p); - } + rc = hvm_io_intercept(&p); switch ( rc ) { diff --git a/xen/arch/x86/hvm/hpet.c b/xen/arch/x86/hvm/hpet.c index 9585ca8..24b5ec9 100644 --- a/xen/arch/x86/hvm/hpet.c +++ b/xen/arch/x86/hvm/hpet.c @@ -659,6 +659,8 @@ void hpet_init(struct domain *d) h->hpet.comparator64[i] = ~0ULL; h->pt[i].source = PTSRC_isa; } + + register_mmio_handler(d, &hpet_mmio_ops); } void hpet_deinit(struct domain *d) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 2736802..1943099 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -1451,18 +1451,30 @@ int hvm_domain_initialise(struct domain *d) INIT_LIST_HEAD(&d->arch.hvm_domain.msixtbl_list); spin_lock_init(&d->arch.hvm_domain.msixtbl_list_lock); + INIT_LIST_HEAD(&d->arch.hvm_domain.io_handler.list); + spin_lock_init(&d->arch.hvm_domain.io_handler.lock); + hvm_init_cacheattr_region_list(d); rc = paging_enable(d, PG_refcounts|PG_translate|PG_external); if ( rc != 0 ) goto fail0; - d->arch.hvm_domain.params = xzalloc_array(uint64_t, HVM_NR_PARAMS); - d->arch.hvm_domain.io_handler = xmalloc(struct hvm_io_handler); rc = -ENOMEM; - if ( !d->arch.hvm_domain.params || !d->arch.hvm_domain.io_handler ) + + d->arch.hvm_domain.params = xzalloc_array(uint64_t, HVM_NR_PARAMS); + if ( !d->arch.hvm_domain.params ) goto fail1; - d->arch.hvm_domain.io_handler->num_slot = 0; + + d->arch.hvm_domain.mmio_handler = xzalloc_array(struct hvm_mmio_handler, + NR_MMIO_HANDLERS); + if ( !d->arch.hvm_domain.mmio_handler ) + goto fail2; + + d->arch.hvm_domain.portio_handler = xzalloc_array(struct hvm_portio_handler, + NR_PORTIO_HANDLERS); + if ( !d->arch.hvm_domain.portio_handler ) + goto fail3; /* Set the default IO Bitmap. */ if ( is_hardware_domain(d) ) @@ -1471,7 +1483,7 @@ int hvm_domain_initialise(struct domain *d) if ( d->arch.hvm_domain.io_bitmap == NULL ) { rc = -ENOMEM; - goto fail1; + goto fail4; } memset(d->arch.hvm_domain.io_bitmap, ~0, HVM_IOBITMAP_SIZE); } @@ -1493,30 +1505,37 @@ int hvm_domain_initialise(struct domain *d) rc = vioapic_init(d); if ( rc != 0 ) - goto fail1; + goto fail5; stdvga_init(d); rtc_init(d); + msixtbl_init(d); + register_portio_handler(d, 0xe9, 1, hvm_print_line); register_portio_handler(d, 0xcf8, 4, hvm_access_cf8); rc = hvm_funcs.domain_initialise(d); if ( rc != 0 ) - goto fail2; + goto fail6; return 0; - fail2: + fail6: rtc_deinit(d); stdvga_deinit(d); vioapic_deinit(d); - fail1: + fail5: if ( is_hardware_domain(d) ) xfree(d->arch.hvm_domain.io_bitmap); - xfree(d->arch.hvm_domain.io_handler); + fail4: + xfree(d->arch.hvm_domain.portio_handler); + fail3: + xfree(d->arch.hvm_domain.mmio_handler); + fail2: xfree(d->arch.hvm_domain.params); + fail1: fail0: hvm_destroy_cacheattr_region_list(d); return rc; @@ -1545,7 +1564,8 @@ void hvm_domain_relinquish_resources(struct domain *d) void hvm_domain_destroy(struct domain *d) { - xfree(d->arch.hvm_domain.io_handler); + xfree(d->arch.hvm_domain.portio_handler); + xfree(d->arch.hvm_domain.mmio_handler); xfree(d->arch.hvm_domain.params); hvm_destroy_cacheattr_region_list(d); diff --git a/xen/arch/x86/hvm/intercept.c b/xen/arch/x86/hvm/intercept.c index dc39b1b..08aa4fd 100644 --- a/xen/arch/x86/hvm/intercept.c +++ b/xen/arch/x86/hvm/intercept.c @@ -32,45 +32,134 @@ #include <xen/event.h> #include <xen/iommu.h> -static const struct hvm_mmio_ops *const -hvm_mmio_handlers[HVM_MMIO_HANDLER_NR] = +static bool_t hvm_mmio_accept(struct hvm_io_handler *io_handler, + struct vcpu *v, + uint64_t addr, + uint32_t size) { - &hpet_mmio_ops, - &vlapic_mmio_ops, - &vioapic_mmio_ops, - &msixtbl_mmio_ops, - &iommu_mmio_ops + struct hvm_mmio_handler *mmio_handler; + const struct hvm_mmio_ops *ops; + + mmio_handler = container_of(io_handler, + struct hvm_mmio_handler, + io_handler); + ops = mmio_handler->ops; + + BUG_ON(ops == NULL); + + return ops->check(v, addr); +} + +static int hvm_mmio_read(struct hvm_io_handler *io_handler, + struct vcpu *v, + uint64_t addr, + uint32_t size, + uint64_t *data) +{ + struct hvm_mmio_handler *mmio_handler; + const struct hvm_mmio_ops *ops; + + mmio_handler = container_of(io_handler, + struct hvm_mmio_handler, + io_handler); + ops = mmio_handler->ops; + + BUG_ON(ops == NULL); + + return ops->read(v, addr, size, data); +} + +static int hvm_mmio_write(struct hvm_io_handler *io_handler, + struct vcpu *v, + uint64_t addr, + uint32_t size, + uint64_t data) +{ + struct hvm_mmio_handler *mmio_handler; + const struct hvm_mmio_ops *ops; + + mmio_handler = container_of(io_handler, + struct hvm_mmio_handler, + io_handler); + ops = mmio_handler->ops; + + BUG_ON(ops == NULL); + + return ops->write(v, addr, size, data); +} + +static const struct hvm_io_ops mmio_ops = { + .accept = hvm_mmio_accept, + .read = hvm_mmio_read, + .write = hvm_mmio_write +}; + +static bool_t hvm_portio_accept(struct hvm_io_handler *io_handler, + struct vcpu *v, + uint64_t addr, + uint32_t size) +{ + struct hvm_portio_handler *portio_handler; + + portio_handler = container_of(io_handler, + struct hvm_portio_handler, + io_handler); + + return (addr >= portio_handler->start) && + ((addr + size) <= portio_handler->end); +} + +static int hvm_portio_read(struct hvm_io_handler *io_handler, + struct vcpu *v, + uint64_t addr, + uint32_t size, + uint64_t *data) +{ + struct hvm_portio_handler *portio_handler; + uint32_t val; + int rc; + + portio_handler = container_of(io_handler, + struct hvm_portio_handler, + io_handler); + + rc = portio_handler->action(IOREQ_READ, addr, size, &val); + if ( rc == X86EMUL_OKAY ) + *data = val; + + return rc; +} + +static int hvm_portio_write(struct hvm_io_handler *io_handler, + struct vcpu *v, + uint64_t addr, + uint32_t size, + uint64_t data) +{ + struct hvm_portio_handler *portio_handler; + uint32_t val = data; + + portio_handler = container_of(io_handler, + struct hvm_portio_handler, + io_handler); + + return portio_handler->action(IOREQ_WRITE, addr, size, &val); +} + +static const struct hvm_io_ops portio_ops = { + .accept = hvm_portio_accept, + .read = hvm_portio_read, + .write = hvm_portio_write }; -static int hvm_mmio_access(struct vcpu *v, - ioreq_t *p, - hvm_mmio_read_t read, - hvm_mmio_write_t write) +static int process_io_intercept(struct vcpu *v, ioreq_t *p, + struct hvm_io_handler *handler) { struct hvm_vcpu_io *vio = &v->arch.hvm_vcpu.hvm_io; - unsigned long data; + const struct hvm_io_ops *ops = handler->ops; int rc = X86EMUL_OKAY, i, step = p->df ? -p->size : p->size; - - if ( !p->data_is_ptr ) - { - if ( p->dir == IOREQ_READ ) - { - if ( vio->mmio_retrying ) - { - if ( vio->mmio_large_read_bytes != p->size ) - return X86EMUL_UNHANDLEABLE; - memcpy(&data, vio->mmio_large_read, p->size); - vio->mmio_large_read_bytes = 0; - vio->mmio_retrying = 0; - } - else - rc = read(v, p->addr, p->size, &data); - p->data = data; - } - else /* p->dir == IOREQ_WRITE */ - rc = write(v, p->addr, p->size, p->data); - return rc; - } + uint64_t data; + uint64_t addr; if ( p->dir == IOREQ_READ ) { @@ -86,31 +175,40 @@ static int hvm_mmio_access(struct vcpu *v, } else { - rc = read(v, p->addr + step * i, p->size, &data); + addr = (p->type == IOREQ_TYPE_COPY) ? + p->addr + step * i : + p->addr; + rc = ops->read(handler, v, addr, p->size, &data); if ( rc != X86EMUL_OKAY ) break; } - switch ( hvm_copy_to_guest_phys(p->data + step * i, - &data, p->size) ) + + if ( p->data_is_ptr ) { - case HVMCOPY_okay: - break; - case HVMCOPY_gfn_paged_out: - case HVMCOPY_gfn_shared: - rc = X86EMUL_RETRY; - break; - case HVMCOPY_bad_gfn_to_mfn: - /* Drop the write as real hardware would. */ - continue; - case HVMCOPY_bad_gva_to_gfn: - ASSERT(0); - /* fall through */ - default: - rc = X86EMUL_UNHANDLEABLE; - break; + switch ( hvm_copy_to_guest_phys(p->data + step * i, + &data, p->size) ) + { + case HVMCOPY_okay: + break; + case HVMCOPY_gfn_paged_out: + case HVMCOPY_gfn_shared: + rc = X86EMUL_RETRY; + break; + case HVMCOPY_bad_gfn_to_mfn: + /* Drop the write as real hardware would. */ + continue; + case HVMCOPY_bad_gva_to_gfn: + ASSERT(0); + /* fall through */ + default: + rc = X86EMUL_UNHANDLEABLE; + break; + } + if ( rc != X86EMUL_OKAY ) + break; } - if ( rc != X86EMUL_OKAY) - break; + else + p->data = data; } if ( rc == X86EMUL_RETRY ) @@ -120,32 +218,41 @@ static int hvm_mmio_access(struct vcpu *v, memcpy(vio->mmio_large_read, &data, p->size); } } - else + else /* p->dir == IOREQ_WRITE */ { for ( i = 0; i < p->count; i++ ) { - switch ( hvm_copy_from_guest_phys(&data, p->data + step * i, - p->size) ) + if ( p->data_is_ptr ) { - case HVMCOPY_okay: - break; - case HVMCOPY_gfn_paged_out: - case HVMCOPY_gfn_shared: - rc = X86EMUL_RETRY; - break; - case HVMCOPY_bad_gfn_to_mfn: - data = ~0; - break; - case HVMCOPY_bad_gva_to_gfn: - ASSERT(0); - /* fall through */ - default: - rc = X86EMUL_UNHANDLEABLE; - break; + switch ( hvm_copy_from_guest_phys(&data, p->data + step * i, + p->size) ) + { + case HVMCOPY_okay: + break; + case HVMCOPY_gfn_paged_out: + case HVMCOPY_gfn_shared: + rc = X86EMUL_RETRY; + break; + case HVMCOPY_bad_gfn_to_mfn: + data = ~0; + break; + case HVMCOPY_bad_gva_to_gfn: + ASSERT(0); + /* fall through */ + default: + rc = X86EMUL_UNHANDLEABLE; + break; + } + if ( rc != X86EMUL_OKAY ) + break; } - if ( rc != X86EMUL_OKAY ) - break; - rc = write(v, p->addr + step * i, p->size, data); + else + data = p->data; + + addr = (p->type == IOREQ_TYPE_COPY) ? + p->addr + step * i : + p->addr; + rc = ops->write(handler, v, addr, p->size, data); if ( rc != X86EMUL_OKAY ) break; } @@ -163,240 +270,182 @@ static int hvm_mmio_access(struct vcpu *v, return rc; } -bool_t hvm_mmio_internal(paddr_t gpa) -{ - struct vcpu *curr = current; - unsigned int i; - - for ( i = 0; i < HVM_MMIO_HANDLER_NR; ++i ) - if ( hvm_mmio_handlers[i]->check(curr, gpa) ) - return 1; +static DEFINE_RCU_READ_LOCK(intercept_rcu_lock); - return 0; -} - -int hvm_mmio_intercept(ioreq_t *p) +struct hvm_io_handler *hvm_find_io_handler(struct vcpu *v, + ioreq_t *p) { - struct vcpu *v = current; - int i; + struct domain *d = v->domain; + struct hvm_io_handler *handler; - for ( i = 0; i < HVM_MMIO_HANDLER_NR; i++ ) - { - hvm_mmio_check_t check = - hvm_mmio_handlers[i]->check; + rcu_read_lock(&intercept_rcu_lock); - if ( check(v, p->addr) ) - { - if ( unlikely(p->count > 1) && - !check(v, unlikely(p->df) - ? p->addr - (p->count - 1L) * p->size - : p->addr + (p->count - 1L) * p->size) ) - p->count = 1; - - return hvm_mmio_access( - v, p, - hvm_mmio_handlers[i]->read, - hvm_mmio_handlers[i]->write); - } - } - - return X86EMUL_UNHANDLEABLE; -} - -static int process_portio_intercept(portio_action_t action, ioreq_t *p) -{ - struct hvm_vcpu_io *vio = ¤t->arch.hvm_vcpu.hvm_io; - int rc = X86EMUL_OKAY, i, step = p->df ? -p->size : p->size; - uint32_t data; - - if ( !p->data_is_ptr ) + list_for_each_entry( handler, + &d->arch.hvm_domain.io_handler.list, + list_entry ) { - if ( p->dir == IOREQ_READ ) - { - if ( vio->mmio_retrying ) - { - if ( vio->mmio_large_read_bytes != p->size ) - return X86EMUL_UNHANDLEABLE; - memcpy(&data, vio->mmio_large_read, p->size); - vio->mmio_large_read_bytes = 0; - vio->mmio_retrying = 0; - } - else - rc = action(IOREQ_READ, p->addr, p->size, &data); - p->data = data; - } - else - { - data = p->data; - rc = action(IOREQ_WRITE, p->addr, p->size, &data); - } - return rc; - } + const struct hvm_io_ops *ops = handler->ops; + uint64_t start, end; - if ( p->dir == IOREQ_READ ) - { - for ( i = 0; i < p->count; i++ ) + if ( handler->type != p->type ) + continue; + + switch ( handler->type ) { - if ( vio->mmio_retrying ) + case IOREQ_TYPE_PIO: + start = p->addr; + end = p->addr + p->size; + break; + case IOREQ_TYPE_COPY: + if ( p->df ) { - if ( vio->mmio_large_read_bytes != p->size ) - return X86EMUL_UNHANDLEABLE; - memcpy(&data, vio->mmio_large_read, p->size); - vio->mmio_large_read_bytes = 0; - vio->mmio_retrying = 0; + start = (p->addr - (p->count - 1) * p->size); + end = p->addr + p->size; } else { - rc = action(IOREQ_READ, p->addr, p->size, &data); - if ( rc != X86EMUL_OKAY ) - break; - } - switch ( hvm_copy_to_guest_phys(p->data + step * i, - &data, p->size) ) - { - case HVMCOPY_okay: - break; - case HVMCOPY_gfn_paged_out: - case HVMCOPY_gfn_shared: - rc = X86EMUL_RETRY; - break; - case HVMCOPY_bad_gfn_to_mfn: - /* Drop the write as real hardware would. */ - continue; - case HVMCOPY_bad_gva_to_gfn: - ASSERT(0); - /* fall through */ - default: - rc = X86EMUL_UNHANDLEABLE; - break; + start = p->addr; + end = p->addr + p->count * p->size; } - if ( rc != X86EMUL_OKAY) - break; + break; + default: + BUG(); } - if ( rc == X86EMUL_RETRY ) - { - vio->mmio_retry = 1; - vio->mmio_large_read_bytes = p->size; - memcpy(vio->mmio_large_read, &data, p->size); - } + if ( ops->accept(handler, v, start, end - start) ) + goto done; } - else /* p->dir == IOREQ_WRITE */ - { - for ( i = 0; i < p->count; i++ ) - { - data = 0; - switch ( hvm_copy_from_guest_phys(&data, p->data + step * i, - p->size) ) - { - case HVMCOPY_okay: - break; - case HVMCOPY_gfn_paged_out: - case HVMCOPY_gfn_shared: - rc = X86EMUL_RETRY; - break; - case HVMCOPY_bad_gfn_to_mfn: - data = ~0; - break; - case HVMCOPY_bad_gva_to_gfn: - ASSERT(0); - /* fall through */ - default: - rc = X86EMUL_UNHANDLEABLE; - break; - } - if ( rc != X86EMUL_OKAY ) - break; - rc = action(IOREQ_WRITE, p->addr, p->size, &data); - if ( rc != X86EMUL_OKAY ) - break; - } - if ( rc == X86EMUL_RETRY ) - vio->mmio_retry = 1; - } + handler = NULL; - if ( i != 0 ) - { - p->count = i; - rc = X86EMUL_OKAY; - } +done: + rcu_read_unlock(&intercept_rcu_lock); - return rc; + return handler; } -/* - * Check if the request is handled inside xen - * return value: 0 --not handled; 1 --handled - */ -int hvm_io_intercept(ioreq_t *p, int type) +int hvm_io_intercept(ioreq_t *p) { struct vcpu *v = current; - struct hvm_io_handler *handler = v->domain->arch.hvm_domain.io_handler; - int i; - unsigned long addr, size; + struct hvm_io_handler *handler; - if ( type == HVM_PORTIO ) + if ( p->type == IOREQ_TYPE_PIO ) { int rc = dpci_ioport_intercept(p); if ( (rc == X86EMUL_OKAY) || (rc == X86EMUL_RETRY) ) return rc; } + else if ( p->type == IOREQ_TYPE_COPY ) + { + int rc = stdvga_intercept_mmio(p); + if ( (rc == X86EMUL_OKAY) || (rc == X86EMUL_RETRY) ) + return rc; + } - for ( i = 0; i < handler->num_slot; i++ ) + handler = hvm_find_io_handler(v, p); + + if ( handler == NULL ) + return X86EMUL_UNHANDLEABLE; + + return process_io_intercept(v, p, handler); +} + +void hvm_register_io_handler(struct domain *d, + struct hvm_io_handler *handler, + bool_t head) +{ + spin_lock(&d->arch.hvm_domain.io_handler.lock); + if ( head ) + list_add_rcu(&handler->list_entry, + &d->arch.hvm_domain.io_handler.list); + else + list_add_tail_rcu(&handler->list_entry, + &d->arch.hvm_domain.io_handler.list); + spin_unlock(&d->arch.hvm_domain.io_handler.lock); +} + +void register_mmio_handler(struct domain *d, const struct hvm_mmio_ops *ops) +{ + struct hvm_mmio_handler *mmio_handler; + unsigned int i; + + for ( i = 0; i < NR_MMIO_HANDLERS; i++ ) { - if ( type != handler->hdl_list[i].type ) - continue; - addr = handler->hdl_list[i].addr; - size = handler->hdl_list[i].size; - if ( (p->addr >= addr) && - ((p->addr + p->size) <= (addr + size)) ) - { - if ( type == HVM_PORTIO ) - return process_portio_intercept( - handler->hdl_list[i].action.portio, p); + mmio_handler = &d->arch.hvm_domain.mmio_handler[i]; - if ( unlikely(p->count > 1) && - (unlikely(p->df) - ? p->addr - (p->count - 1L) * p->size < addr - : p->addr + p->count * 1L * p->size - 1 >= addr + size) ) - p->count = 1; + if ( mmio_handler->io_handler.ops == NULL ) + break; + } - return handler->hdl_list[i].action.mmio(p); - } + BUG_ON(i == NR_MMIO_HANDLERS); + + mmio_handler->io_handler.type = IOREQ_TYPE_COPY; + mmio_handler->io_handler.ops = &mmio_ops; + + hvm_register_io_handler(d, &mmio_handler->io_handler, 0); + + mmio_handler->ops = ops; +} + +void register_portio_handler(struct domain *d, uint64_t addr, + uint32_t size, portio_action_t action) +{ + struct hvm_portio_handler *portio_handler; + unsigned int i; + + for ( i = 0; i < NR_PORTIO_HANDLERS; i++ ) + { + portio_handler = &d->arch.hvm_domain.portio_handler[i]; + + if ( portio_handler->io_handler.ops == NULL ) + break; } - return X86EMUL_UNHANDLEABLE; + BUG_ON(i == NR_PORTIO_HANDLERS); + + portio_handler->io_handler.type = IOREQ_TYPE_PIO; + portio_handler->io_handler.ops = &portio_ops; + + hvm_register_io_handler(d, &portio_handler->io_handler, 0); + + portio_handler->start = addr; + portio_handler->end = addr + size; + portio_handler->action = action; } -void register_io_handler( - struct domain *d, unsigned long addr, unsigned long size, - void *action, int type) +void relocate_portio_handler(struct domain *d, uint64_t old_addr, + uint64_t new_addr, uint32_t size) { - struct hvm_io_handler *handler = d->arch.hvm_domain.io_handler; - int num = handler->num_slot; + struct hvm_portio_handler *portio_handler; + unsigned int i; + + for ( i = 0; i < NR_PORTIO_HANDLERS; i++ ) + { + portio_handler = &d->arch.hvm_domain.portio_handler[i]; + + if ( portio_handler->io_handler.ops == NULL ) + break; + + if ( portio_handler->start == old_addr && + portio_handler->end == old_addr + size ) + goto found; + } - BUG_ON(num >= MAX_IO_HANDLER); + BUG(); - handler->hdl_list[num].addr = addr; - handler->hdl_list[num].size = size; - handler->hdl_list[num].action.ptr = action; - handler->hdl_list[num].type = type; - handler->num_slot++; + found: + portio_handler->start = new_addr; + portio_handler->end = new_addr + size; } -void relocate_io_handler( - struct domain *d, unsigned long old_addr, unsigned long new_addr, - unsigned long size, int type) +bool_t hvm_mmio_internal(paddr_t gpa) { - struct hvm_io_handler *handler = d->arch.hvm_domain.io_handler; - int i; - - for ( i = 0; i < handler->num_slot; i++ ) - if ( (handler->hdl_list[i].addr == old_addr) && - (handler->hdl_list[i].size == size) && - (handler->hdl_list[i].type == type) ) - handler->hdl_list[i].addr = new_addr; + ioreq_t p = { + .type = IOREQ_TYPE_COPY, + .addr = gpa + }; + + return (hvm_find_io_handler(current, &p) != NULL); } /* diff --git a/xen/arch/x86/hvm/stdvga.c b/xen/arch/x86/hvm/stdvga.c index 13d1029..34cdd90 100644 --- a/xen/arch/x86/hvm/stdvga.c +++ b/xen/arch/x86/hvm/stdvga.c @@ -547,12 +547,27 @@ static int mmio_move(struct hvm_hw_stdvga *s, ioreq_t *p) return 1; } -static int stdvga_intercept_mmio(ioreq_t *p) +int stdvga_intercept_mmio(ioreq_t *p) { struct domain *d = current->domain; struct hvm_hw_stdvga *s = &d->arch.hvm_domain.stdvga; + uint64_t start, end; int buf = 0, rc; + if ( p->df ) + { + start = (p->addr - (p->count - 1) * p->size); + end = p->addr + p->size; + } + else + { + start = p->addr; + end = p->addr + p->count * p->size; + } + + if ( (start < VGA_MEM_BASE) || (end > (VGA_MEM_BASE + VGA_MEM_SIZE)) ) + return X86EMUL_UNHANDLEABLE; + if ( p->size > 8 ) { gdprintk(XENLOG_WARNING, "invalid mmio size %d\n", (int)p->size); @@ -619,9 +634,6 @@ void stdvga_init(struct domain *d) register_portio_handler(d, 0x3c4, 2, stdvga_intercept_pio); /* Graphics registers. */ register_portio_handler(d, 0x3ce, 2, stdvga_intercept_pio); - /* MMIO. */ - register_buffered_io_handler( - d, VGA_MEM_BASE, VGA_MEM_SIZE, stdvga_intercept_mmio); } } @@ -638,3 +650,13 @@ void stdvga_deinit(struct domain *d) s->vram_page[i] = NULL; } } + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/hvm/vioapic.c b/xen/arch/x86/hvm/vioapic.c index e4ab336..df76019 100644 --- a/xen/arch/x86/hvm/vioapic.c +++ b/xen/arch/x86/hvm/vioapic.c @@ -456,6 +456,8 @@ int vioapic_init(struct domain *d) d->arch.hvm_domain.vioapic->domain = d; vioapic_reset(d); + register_mmio_handler(d, &vioapic_mmio_ops); + return 0; } diff --git a/xen/arch/x86/hvm/vlapic.c b/xen/arch/x86/hvm/vlapic.c index 56171d6..6916e14 100644 --- a/xen/arch/x86/hvm/vlapic.c +++ b/xen/arch/x86/hvm/vlapic.c @@ -1463,6 +1463,9 @@ int vlapic_init(struct vcpu *v) vlapic_init_sipi_action, (unsigned long)v); + if ( v->vcpu_id == 0 ) + register_mmio_handler(v->domain, &vlapic_mmio_ops); + return 0; } diff --git a/xen/arch/x86/hvm/vmsi.c b/xen/arch/x86/hvm/vmsi.c index bee5d03..5245a61 100644 --- a/xen/arch/x86/hvm/vmsi.c +++ b/xen/arch/x86/hvm/vmsi.c @@ -531,6 +531,11 @@ found: spin_unlock_irq(&irq_desc->lock); } +void msixtbl_init(struct domain *d) +{ + register_mmio_handler(d, &msixtbl_mmio_ops); +} + void msixtbl_pt_cleanup(struct domain *d) { struct msixtbl_entry *entry, *temp; diff --git a/xen/drivers/passthrough/amd/iommu_guest.c b/xen/drivers/passthrough/amd/iommu_guest.c index 7b0c102..1ac85bd 100644 --- a/xen/drivers/passthrough/amd/iommu_guest.c +++ b/xen/drivers/passthrough/amd/iommu_guest.c @@ -868,6 +868,20 @@ static void guest_iommu_reg_init(struct guest_iommu *iommu) iommu->reg_ext_feature.hi = upper; } +static int guest_iommu_mmio_range(struct vcpu *v, unsigned long addr) +{ + struct guest_iommu *iommu = vcpu_iommu(v); + + return iommu && addr >= iommu->mmio_base && + addr < iommu->mmio_base + IOMMU_MMIO_SIZE; +} + +const struct hvm_mmio_ops iommu_mmio_ops = { + .check = guest_iommu_mmio_range, + .read = guest_iommu_mmio_read, + .write = guest_iommu_mmio_write +}; + /* Domain specific initialization */ int guest_iommu_init(struct domain* d) { @@ -894,6 +908,8 @@ int guest_iommu_init(struct domain* d) spin_lock_init(&iommu->lock); + register_mmio_handler(d, &iommu_mmio_ops); + return 0; } @@ -910,17 +926,3 @@ void guest_iommu_destroy(struct domain *d) domain_hvm_iommu(d)->arch.g_iommu = NULL; } - -static int guest_iommu_mmio_range(struct vcpu *v, unsigned long addr) -{ - struct guest_iommu *iommu = vcpu_iommu(v); - - return iommu && addr >= iommu->mmio_base && - addr < iommu->mmio_base + IOMMU_MMIO_SIZE; -} - -const struct hvm_mmio_ops iommu_mmio_ops = { - .check = guest_iommu_mmio_range, - .read = guest_iommu_mmio_read, - .write = guest_iommu_mmio_write -}; diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h index bdab45d..d9c0cfe 100644 --- a/xen/include/asm-x86/hvm/domain.h +++ b/xen/include/asm-x86/hvm/domain.h @@ -92,7 +92,13 @@ struct hvm_domain { struct pl_time pl_time; - struct hvm_io_handler *io_handler; + struct hvm_mmio_handler *mmio_handler; + struct hvm_portio_handler *portio_handler; + + struct { + spinlock_t lock; + struct list_head list; + } io_handler; /* Lock protects access to irq, vpic and vioapic. */ spinlock_t irq_lock; diff --git a/xen/include/asm-x86/hvm/io.h b/xen/include/asm-x86/hvm/io.h index f2aaec5..752b6e9 100644 --- a/xen/include/asm-x86/hvm/io.h +++ b/xen/include/asm-x86/hvm/io.h @@ -25,10 +25,34 @@ #include <public/hvm/ioreq.h> #include <public/event_channel.h> -#define MAX_IO_HANDLER 16 +struct hvm_io_handler; + +typedef int (*hvm_io_read_t)(struct hvm_io_handler *handler, + struct vcpu *v, + uint64_t addr, + uint32_t size, + uint64_t *val); +typedef int (*hvm_io_write_t)(struct hvm_io_handler *handler, + struct vcpu *v, + uint64_t addr, + uint32_t size, + uint64_t val); +typedef bool_t (*hvm_io_accept_t)(struct hvm_io_handler *handler, + struct vcpu *v, + uint64_t addr, + uint32_t size); + +struct hvm_io_ops { + hvm_io_accept_t accept; + hvm_io_read_t read; + hvm_io_write_t write; +}; -#define HVM_PORTIO 0 -#define HVM_BUFFERED_IO 2 +struct hvm_io_handler { + struct list_head list_entry; + const struct hvm_io_ops *ops; + uint8_t type; +}; typedef int (*hvm_mmio_read_t)(struct vcpu *v, unsigned long addr, @@ -40,81 +64,47 @@ typedef int (*hvm_mmio_write_t)(struct vcpu *v, unsigned long val); typedef int (*hvm_mmio_check_t)(struct vcpu *v, unsigned long addr); -typedef int (*portio_action_t)( - int dir, uint32_t port, uint32_t bytes, uint32_t *val); -typedef int (*mmio_action_t)(ioreq_t *); -struct io_handler { - int type; - unsigned long addr; - unsigned long size; - union { - portio_action_t portio; - mmio_action_t mmio; - void *ptr; - } action; -}; - -struct hvm_io_handler { - int num_slot; - struct io_handler hdl_list[MAX_IO_HANDLER]; -}; - struct hvm_mmio_ops { hvm_mmio_check_t check; hvm_mmio_read_t read; hvm_mmio_write_t write; }; -extern const struct hvm_mmio_ops hpet_mmio_ops; -extern const struct hvm_mmio_ops vlapic_mmio_ops; -extern const struct hvm_mmio_ops vioapic_mmio_ops; -extern const struct hvm_mmio_ops msixtbl_mmio_ops; -extern const struct hvm_mmio_ops iommu_mmio_ops; +#define NR_MMIO_HANDLERS 5 -#define HVM_MMIO_HANDLER_NR 5 +struct hvm_mmio_handler { + const struct hvm_mmio_ops *ops; + struct hvm_io_handler io_handler; +}; -int hvm_io_intercept(ioreq_t *p, int type); -void register_io_handler( - struct domain *d, unsigned long addr, unsigned long size, - void *action, int type); -void relocate_io_handler( - struct domain *d, unsigned long old_addr, unsigned long new_addr, - unsigned long size, int type); +typedef int (*portio_action_t)( + int dir, uint32_t port, uint32_t bytes, uint32_t *val); + +#define NR_PORTIO_HANDLERS 16 + +struct hvm_portio_handler { + uint64_t start, end; + portio_action_t action; + struct hvm_io_handler io_handler; +}; -static inline int hvm_portio_intercept(ioreq_t *p) -{ - return hvm_io_intercept(p, HVM_PORTIO); -} +void hvm_register_io_handler(struct domain *d, + struct hvm_io_handler *handler, + bool_t head); +struct hvm_io_handler *hvm_find_io_handler(struct vcpu *v, + ioreq_t *p); -static inline int hvm_buffered_io_intercept(ioreq_t *p) -{ - return hvm_io_intercept(p, HVM_BUFFERED_IO); -} +int hvm_io_intercept(ioreq_t *p); +void register_mmio_handler(struct domain *d, + const struct hvm_mmio_ops *ops); +void register_portio_handler(struct domain *d, uint64_t addr, + uint32_t size, portio_action_t action); +void relocate_portio_handler(struct domain *d, uint64_t old_addr, + uint64_t new_addr, uint32_t size); bool_t hvm_mmio_internal(paddr_t gpa); -int hvm_mmio_intercept(ioreq_t *p); -int hvm_buffered_io_send(ioreq_t *p); -static inline void register_portio_handler( - struct domain *d, unsigned long addr, - unsigned long size, portio_action_t action) -{ - register_io_handler(d, addr, size, action, HVM_PORTIO); -} - -static inline void relocate_portio_handler( - struct domain *d, unsigned long old_addr, unsigned long new_addr, - unsigned long size) -{ - relocate_io_handler(d, old_addr, new_addr, size, HVM_PORTIO); -} - -static inline void register_buffered_io_handler( - struct domain *d, unsigned long addr, - unsigned long size, mmio_action_t action) -{ - register_io_handler(d, addr, size, action, HVM_BUFFERED_IO); -} +int hvm_buffered_io_send(ioreq_t *p); void send_timeoffset_req(unsigned long timeoff); void send_invalidate_req(void); @@ -127,6 +117,7 @@ void hvm_io_assist(ioreq_t *p); void hvm_dpci_eoi(struct domain *d, unsigned int guest_irq, const union vioapic_redir_entry *ent); void msix_write_completion(struct vcpu *); +void msixtbl_init(struct domain *d); struct hvm_hw_stdvga { uint8_t sr_index; @@ -141,8 +132,19 @@ struct hvm_hw_stdvga { }; void stdvga_init(struct domain *d); +int stdvga_intercept_mmio(ioreq_t *p); void stdvga_deinit(struct domain *d); extern void hvm_dpci_msi_eoi(struct domain *d, int vector); #endif /* __ASM_X86_HVM_IO_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ -- 1.7.10.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |