[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v7 09/15] x86/hvm: only call hvm_io_assist() from hvm_wait_for_io()
By removing the calls in hvmemul_do_io() (which is replaced by a single assignment) and hvm_complete_assist_request() (which is replaced by a call to hvm_process_portio_intercept() with a suitable set of ops) then hvm_io_assist() can be moved into hvm.c and made static (and hence be a candidate for inlining). The calls to msix_write_completion() and vcpu_end_shutdown_deferral() are also made unconditionally because the ioreq state will always be STATE_IOREQ_NONE at the end of hvm_io_assist() so the test was pointless. These calls are also only relevant when the emulation has been handled externally which is now always the case. Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx> Cc: Keir Fraser <keir@xxxxxxx> Acked-by: Jan Beulich <jbeulich@xxxxxxxx> Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> --- v7: - No change v6: - Added Andrew's reviewed-by v5: - Added Jan's acked-by --- xen/arch/x86/hvm/emulate.c | 34 ++++++++++++++++++--- xen/arch/x86/hvm/hvm.c | 67 ++++++++++++++++++++++------------------- xen/arch/x86/hvm/io.c | 39 ------------------------ xen/include/asm-x86/hvm/hvm.h | 1 - xen/include/asm-x86/hvm/io.h | 1 - 5 files changed, 66 insertions(+), 76 deletions(-) diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c index 53ab3d3..54b9430 100644 --- a/xen/arch/x86/hvm/emulate.c +++ b/xen/arch/x86/hvm/emulate.c @@ -50,6 +50,32 @@ static void hvmtrace_io_assist(const ioreq_t *p) trace_var(event, 0/*!cycles*/, size, buffer); } +static int null_read(const struct hvm_io_handler *io_handler, + uint64_t addr, + uint32_t size, + uint64_t *data) +{ + *data = ~0ul; + return X86EMUL_OKAY; +} + +static int null_write(const struct hvm_io_handler *handler, + uint64_t addr, + uint32_t size, + uint64_t data) +{ + return X86EMUL_OKAY; +} + +static const struct hvm_io_ops null_ops = { + .read = null_read, + .write = null_write +}; + +static const struct hvm_io_handler null_handler = { + .ops = &null_ops +}; + static int hvmemul_do_io( bool_t is_mmio, paddr_t addr, unsigned long reps, unsigned int size, uint8_t dir, bool_t df, bool_t data_is_addr, uintptr_t data) @@ -139,8 +165,7 @@ static int hvmemul_do_io( switch ( rc ) { case X86EMUL_OKAY: - p.state = STATE_IORESP_READY; - hvm_io_assist(&p); + vio->io_data = p.data; vio->io_state = HVMIO_none; break; case X86EMUL_UNHANDLEABLE: @@ -151,8 +176,9 @@ static int hvmemul_do_io( /* If there is no suitable backing DM, just ignore accesses */ if ( !s ) { - hvm_complete_assist_req(&p); - rc = X86EMUL_OKAY; + rc = hvm_process_io_intercept(&null_handler, &p); + if ( rc == X86EMUL_OKAY ) + vio->io_data = p.data; vio->io_state = HVMIO_none; } else diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 9bdc1d6..7358acf 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -411,6 +411,42 @@ bool_t hvm_io_pending(struct vcpu *v) return 0; } +static void hvm_io_assist(ioreq_t *p) +{ + struct vcpu *curr = current; + struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io; + enum hvm_io_state io_state; + + p->state = STATE_IOREQ_NONE; + + io_state = vio->io_state; + vio->io_state = HVMIO_none; + + switch ( io_state ) + { + case HVMIO_awaiting_completion: + vio->io_state = HVMIO_completed; + vio->io_data = p->data; + break; + case HVMIO_handle_mmio_awaiting_completion: + vio->io_state = HVMIO_completed; + vio->io_data = p->data; + (void)handle_mmio(); + break; + case HVMIO_handle_pio_awaiting_completion: + if ( vio->io_size == 4 ) /* Needs zero extension. */ + guest_cpu_user_regs()->rax = (uint32_t)p->data; + else + memcpy(&guest_cpu_user_regs()->rax, &p->data, vio->io_size); + break; + default: + break; + } + + msix_write_completion(curr); + vcpu_end_shutdown_deferral(curr); +} + static bool_t hvm_wait_for_io(struct hvm_ioreq_vcpu *sv, ioreq_t *p) { /* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */ @@ -2663,37 +2699,6 @@ int hvm_send_assist_req(struct hvm_ioreq_server *s, ioreq_t *proto_p) return X86EMUL_UNHANDLEABLE; } -void hvm_complete_assist_req(ioreq_t *p) -{ - switch ( p->type ) - { - case IOREQ_TYPE_PCI_CONFIG: - ASSERT_UNREACHABLE(); - break; - case IOREQ_TYPE_COPY: - case IOREQ_TYPE_PIO: - if ( p->dir == IOREQ_READ ) - { - if ( !p->data_is_ptr ) - p->data = ~0ul; - else - { - int i, step = p->df ? -p->size : p->size; - uint32_t data = ~0; - - for ( i = 0; i < p->count; i++ ) - hvm_copy_to_guest_phys(p->data + step * i, &data, - p->size); - } - } - /* FALLTHRU */ - default: - p->state = STATE_IORESP_READY; - hvm_io_assist(p); - break; - } -} - void hvm_broadcast_assist_req(ioreq_t *p) { struct domain *d = current->domain; diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c index 2c88ddb..fe099d8 100644 --- a/xen/arch/x86/hvm/io.c +++ b/xen/arch/x86/hvm/io.c @@ -169,45 +169,6 @@ int handle_pio(uint16_t port, unsigned int size, int dir) return 1; } -void hvm_io_assist(ioreq_t *p) -{ - struct vcpu *curr = current; - struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io; - enum hvm_io_state io_state; - - p->state = STATE_IOREQ_NONE; - - io_state = vio->io_state; - vio->io_state = HVMIO_none; - - switch ( io_state ) - { - case HVMIO_awaiting_completion: - vio->io_state = HVMIO_completed; - vio->io_data = p->data; - break; - case HVMIO_handle_mmio_awaiting_completion: - vio->io_state = HVMIO_completed; - vio->io_data = p->data; - (void)handle_mmio(); - break; - case HVMIO_handle_pio_awaiting_completion: - if ( vio->io_size == 4 ) /* Needs zero extension. */ - guest_cpu_user_regs()->rax = (uint32_t)p->data; - else - memcpy(&guest_cpu_user_regs()->rax, &p->data, vio->io_size); - break; - default: - break; - } - - if ( p->state == STATE_IOREQ_NONE ) - { - msix_write_completion(curr); - vcpu_end_shutdown_deferral(curr); - } -} - static bool_t dpci_portio_accept(const struct hvm_io_handler *handler, const ioreq_t *p) { diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h index 1d1fd35..efb8e7d 100644 --- a/xen/include/asm-x86/hvm/hvm.h +++ b/xen/include/asm-x86/hvm/hvm.h @@ -227,7 +227,6 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d, ioreq_t *p); int hvm_send_assist_req(struct hvm_ioreq_server *s, ioreq_t *p); void hvm_broadcast_assist_req(ioreq_t *p); -void hvm_complete_assist_req(ioreq_t *p); void hvm_get_guest_pat(struct vcpu *v, u64 *guest_pat); int hvm_set_guest_pat(struct vcpu *v, u64 guest_pat); diff --git a/xen/include/asm-x86/hvm/io.h b/xen/include/asm-x86/hvm/io.h index d9e2447..508ec52 100644 --- a/xen/include/asm-x86/hvm/io.h +++ b/xen/include/asm-x86/hvm/io.h @@ -125,7 +125,6 @@ int handle_mmio_with_translation(unsigned long gva, unsigned long gpfn, struct npfec); int handle_pio(uint16_t port, unsigned int size, int dir); void hvm_interrupt_post(struct vcpu *v, int vector, int type); -void hvm_io_assist(ioreq_t *p); void hvm_dpci_eoi(struct domain *d, unsigned int guest_irq, const union vioapic_redir_entry *ent); void msix_write_completion(struct vcpu *); -- 1.7.10.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |