[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v6 09/16] x86/hvm: limit reps to avoid the need to handle retry
By limiting hvmemul_do_io_addr() to reps falling within the page on which a reference has already been taken, we can guarantee that calls to hvm_copy_to/from_guest_phys() will not hit the HVMCOPY_gfn_paged_out or HVMCOPY_gfn_shared cases. Thus we can remove the retry logic (added by c/s 82ed8716b "fix direct PCI port I/O emulation retry and error handling") from the intercept code and simplify it significantly. Normally hvmemul_do_io_addr() will only reference single page at a time. It will, however, take an extra page reference for I/O spanning a page boundary. It is still important to know, upon returning from x86_emulate(), whether the number of reps was reduced so the mmio_retry flag is retained for that purpose. Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx> Cc: Keir Fraser <keir@xxxxxxx> Cc: Jan Beulich <jbeulich@xxxxxxxx> Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> --- xen/arch/x86/hvm/emulate.c | 76 ++++++++++++++++++++++++++-------------- xen/arch/x86/hvm/hvm.c | 4 +++ xen/arch/x86/hvm/intercept.c | 57 +++++++----------------------- xen/include/asm-x86/hvm/vcpu.h | 2 +- 4 files changed, 66 insertions(+), 73 deletions(-) diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c index 1afd626..c80c488 100644 --- a/xen/arch/x86/hvm/emulate.c +++ b/xen/arch/x86/hvm/emulate.c @@ -51,7 +51,7 @@ static void hvmtrace_io_assist(const ioreq_t *p) } static int hvmemul_do_io( - bool_t is_mmio, paddr_t addr, unsigned long *reps, unsigned int size, + bool_t is_mmio, paddr_t addr, unsigned long reps, unsigned int size, uint8_t dir, bool_t df, bool_t data_is_addr, uintptr_t data) { struct vcpu *curr = current; @@ -60,6 +60,7 @@ static int hvmemul_do_io( .type = is_mmio ? IOREQ_TYPE_COPY : IOREQ_TYPE_PIO, .addr = addr, .size = size, + .count = reps, .dir = dir, .df = df, .data = data, @@ -125,15 +126,6 @@ static int hvmemul_do_io( HVMIO_dispatched : HVMIO_awaiting_completion; vio->io_size = size; - /* - * When retrying a repeated string instruction, force exit to guest after - * completion of the retried iteration to allow handling of interrupts. - */ - if ( vio->mmio_retrying ) - *reps = 1; - - p.count = *reps; - if ( dir == IOREQ_WRITE ) { if ( !data_is_addr ) @@ -147,17 +139,9 @@ static int hvmemul_do_io( switch ( rc ) { case X86EMUL_OKAY: - case X86EMUL_RETRY: - *reps = p.count; p.state = STATE_IORESP_READY; - if ( !vio->mmio_retry ) - { - hvm_io_assist(&p); - vio->io_state = HVMIO_none; - } - else - /* Defer hvm_io_assist() invocation to hvm_do_resume(). */ - vio->io_state = HVMIO_handle_mmio_awaiting_completion; + hvm_io_assist(&p); + vio->io_state = HVMIO_none; break; case X86EMUL_UNHANDLEABLE: { @@ -235,7 +219,7 @@ static int hvmemul_do_io_buffer( BUG_ON(buffer == NULL); - rc = hvmemul_do_io(is_mmio, addr, reps, size, dir, df, 0, + rc = hvmemul_do_io(is_mmio, addr, *reps, size, dir, df, 0, (uintptr_t)buffer); if ( rc == X86EMUL_UNHANDLEABLE && dir == IOREQ_READ ) memset(buffer, 0xff, size); @@ -287,17 +271,56 @@ static int hvmemul_do_io_addr( bool_t is_mmio, paddr_t addr, unsigned long *reps, unsigned int size, uint8_t dir, bool_t df, paddr_t ram_gpa) { - struct page_info *ram_page; + struct vcpu *v = current; + unsigned long ram_gmfn = paddr_to_pfn(ram_gpa); + unsigned int page_off = ram_gpa & (PAGE_SIZE - 1); + struct page_info *ram_page[2]; + int nr_pages = 0; + unsigned long count; int rc; - rc = hvmemul_acquire_page(paddr_to_pfn(ram_gpa), &ram_page); + rc = hvmemul_acquire_page(ram_gmfn, &ram_page[nr_pages]); if ( rc != X86EMUL_OKAY ) - return rc; + goto out; - rc = hvmemul_do_io(is_mmio, addr, reps, size, dir, df, 1, + nr_pages++; + + /* Detemine how many reps will fit within this page */ + count = min_t(unsigned long, + *reps, + df ? + (page_off + size - 1) / size : + (PAGE_SIZE - page_off) / size); + + if ( count == 0 ) + { + /* + * This access must span two pages, so grab a reference to + * the next page and do a single rep. + * It is safe to assume multiple pages are physically + * contiguous at this point as hvmemul_linear_to_phys() will + * ensure this is the case. + */ + rc = hvmemul_acquire_page(df ? ram_gmfn - 1 : ram_gmfn + 1, + &ram_page[nr_pages]); + if ( rc != X86EMUL_OKAY ) + goto out; + + nr_pages++; + count = 1; + } + + rc = hvmemul_do_io(is_mmio, addr, count, size, dir, df, 1, ram_gpa); + if ( rc == X86EMUL_OKAY ) + { + v->arch.hvm_vcpu.hvm_io.mmio_retry = (count < *reps); + *reps = count; + } - hvmemul_release_page(ram_page); + out: + while ( --nr_pages >= 0 ) + hvmemul_release_page(ram_page[nr_pages]); return rc; } @@ -1547,7 +1570,6 @@ static int _hvm_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt, } hvmemul_ctxt->exn_pending = 0; - vio->mmio_retrying = vio->mmio_retry; vio->mmio_retry = 0; if ( cpu_has_vmx ) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index c25b001..810e579 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -440,6 +440,7 @@ static bool_t hvm_wait_for_io(struct hvm_ioreq_vcpu *sv, ioreq_t *p) void hvm_do_resume(struct vcpu *v) { + struct hvm_vcpu_io *vio = &v->arch.hvm_vcpu.hvm_io; struct domain *d = v->domain; struct hvm_ioreq_server *s; @@ -468,6 +469,9 @@ void hvm_do_resume(struct vcpu *v) } } + if ( vio->mmio_retry ) + (void)handle_mmio(); + /* Inject pending hw/sw trap */ if ( v->arch.hvm_vcpu.inject_trap.vector != -1 ) { diff --git a/xen/arch/x86/hvm/intercept.c b/xen/arch/x86/hvm/intercept.c index 78e2f57..20bb4ea 100644 --- a/xen/arch/x86/hvm/intercept.c +++ b/xen/arch/x86/hvm/intercept.c @@ -123,8 +123,6 @@ static const struct hvm_io_ops portio_ops = { int hvm_process_io_intercept(const struct hvm_io_handler *handler, ioreq_t *p) { - struct vcpu *curr = current; - struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io; const struct hvm_io_ops *ops = handler->ops; int rc = X86EMUL_OKAY, i, step = p->df ? -p->size : p->size; uint64_t data; @@ -134,23 +132,12 @@ int hvm_process_io_intercept(const struct hvm_io_handler *handler, { for ( i = 0; i < p->count; i++ ) { - if ( vio->mmio_retrying ) - { - if ( vio->mmio_large_read_bytes != p->size ) - return X86EMUL_UNHANDLEABLE; - memcpy(&data, vio->mmio_large_read, p->size); - vio->mmio_large_read_bytes = 0; - vio->mmio_retrying = 0; - } - else - { - addr = (p->type == IOREQ_TYPE_COPY) ? - p->addr + step * i : - p->addr; - rc = ops->read(handler, addr, p->size, &data); - if ( rc != X86EMUL_OKAY ) - break; - } + addr = (p->type == IOREQ_TYPE_COPY) ? + p->addr + step * i : + p->addr; + rc = ops->read(handler, addr, p->size, &data); + if ( rc != X86EMUL_OKAY ) + break; if ( p->data_is_ptr ) { @@ -159,14 +146,12 @@ int hvm_process_io_intercept(const struct hvm_io_handler *handler, { case HVMCOPY_okay: break; - case HVMCOPY_gfn_paged_out: - case HVMCOPY_gfn_shared: - rc = X86EMUL_RETRY; - break; case HVMCOPY_bad_gfn_to_mfn: /* Drop the write as real hardware would. */ continue; case HVMCOPY_bad_gva_to_gfn: + case HVMCOPY_gfn_paged_out: + case HVMCOPY_gfn_shared: ASSERT_UNREACHABLE(); /* fall through */ default: @@ -179,13 +164,6 @@ int hvm_process_io_intercept(const struct hvm_io_handler *handler, else p->data = data; } - - if ( rc == X86EMUL_RETRY ) - { - vio->mmio_retry = 1; - vio->mmio_large_read_bytes = p->size; - memcpy(vio->mmio_large_read, &data, p->size); - } } else /* p->dir == IOREQ_WRITE */ { @@ -198,14 +176,12 @@ int hvm_process_io_intercept(const struct hvm_io_handler *handler, { case HVMCOPY_okay: break; - case HVMCOPY_gfn_paged_out: - case HVMCOPY_gfn_shared: - rc = X86EMUL_RETRY; - break; case HVMCOPY_bad_gfn_to_mfn: data = ~0; break; case HVMCOPY_bad_gva_to_gfn: + case HVMCOPY_gfn_paged_out: + case HVMCOPY_gfn_shared: ASSERT_UNREACHABLE(); /* fall through */ default: @@ -225,19 +201,10 @@ int hvm_process_io_intercept(const struct hvm_io_handler *handler, if ( rc != X86EMUL_OKAY ) break; } - - if ( rc == X86EMUL_RETRY ) - vio->mmio_retry = 1; } - if ( i != 0 ) - { - if ( rc == X86EMUL_UNHANDLEABLE ) - domain_crash(curr->domain); - - p->count = i; - rc = X86EMUL_OKAY; - } + if ( i != 0 && rc == X86EMUL_UNHANDLEABLE ) + domain_crash(current->domain); return rc; } diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h index b15c1e6..2ed0606 100644 --- a/xen/include/asm-x86/hvm/vcpu.h +++ b/xen/include/asm-x86/hvm/vcpu.h @@ -74,7 +74,7 @@ struct hvm_vcpu_io { * For string instruction emulation we need to be able to signal a * necessary retry through other than function return codes. */ - bool_t mmio_retry, mmio_retrying; + bool_t mmio_retry; unsigned long msix_unmask_address; -- 1.7.10.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |