diff -r 0f95fb74e19d xen/arch/x86/hvm/emulate.c --- a/xen/arch/x86/hvm/emulate.c Tue Oct 25 12:44:49 2011 +0200 +++ b/xen/arch/x86/hvm/emulate.c Tue Oct 25 17:04:05 2011 +0200 @@ -56,6 +56,7 @@ static int hvmemul_do_io( paddr_t value = ram_gpa; int value_is_ptr = (p_data == NULL); struct vcpu *curr = current; + struct hvm_vcpu_io *vio; ioreq_t *p = get_ioreq(curr); unsigned long ram_gfn = paddr_to_pfn(ram_gpa); p2m_type_t p2mt; @@ -91,43 +92,45 @@ static int hvmemul_do_io( p_data = NULL; } + vio = &curr->arch.hvm_vcpu.hvm_io; + if ( is_mmio && !value_is_ptr ) { /* Part of a multi-cycle read or write? */ if ( dir == IOREQ_WRITE ) { - paddr_t pa = curr->arch.hvm_vcpu.mmio_large_write_pa; - unsigned int bytes = curr->arch.hvm_vcpu.mmio_large_write_bytes; + paddr_t pa = vio->mmio_large_write_pa; + unsigned int bytes = vio->mmio_large_write_bytes; if ( (addr >= pa) && ((addr + size) <= (pa + bytes)) ) return X86EMUL_OKAY; } else { - paddr_t pa = curr->arch.hvm_vcpu.mmio_large_read_pa; - unsigned int bytes = curr->arch.hvm_vcpu.mmio_large_read_bytes; + paddr_t pa = vio->mmio_large_read_pa; + unsigned int bytes = vio->mmio_large_read_bytes; if ( (addr >= pa) && ((addr + size) <= (pa + bytes)) ) { - memcpy(p_data, &curr->arch.hvm_vcpu.mmio_large_read[addr - pa], + memcpy(p_data, &vio->mmio_large_read[addr - pa], size); return X86EMUL_OKAY; } } } - switch ( curr->arch.hvm_vcpu.io_state ) + switch ( vio->io_state ) { case HVMIO_none: break; case HVMIO_completed: - curr->arch.hvm_vcpu.io_state = HVMIO_none; + vio->io_state = HVMIO_none; if ( p_data == NULL ) return X86EMUL_UNHANDLEABLE; goto finish_access; case HVMIO_dispatched: /* May have to wait for previous cycle of a multi-write to complete. */ if ( is_mmio && !value_is_ptr && (dir == IOREQ_WRITE) && - (addr == (curr->arch.hvm_vcpu.mmio_large_write_pa + - curr->arch.hvm_vcpu.mmio_large_write_bytes)) ) + (addr == (vio->mmio_large_write_pa + + vio->mmio_large_write_bytes)) ) return X86EMUL_RETRY; default: return X86EMUL_UNHANDLEABLE; @@ -140,9 +143,9 @@ static int hvmemul_do_io( return X86EMUL_UNHANDLEABLE; } - curr->arch.hvm_vcpu.io_state = + vio->io_state = (p_data == NULL) ? HVMIO_dispatched : HVMIO_awaiting_completion; - curr->arch.hvm_vcpu.io_size = size; + vio->io_size = size; p->dir = dir; p->data_is_ptr = value_is_ptr; @@ -173,12 +176,12 @@ static int hvmemul_do_io( *reps = p->count; p->state = STATE_IORESP_READY; hvm_io_assist(); - curr->arch.hvm_vcpu.io_state = HVMIO_none; + vio->io_state = HVMIO_none; break; case X86EMUL_UNHANDLEABLE: rc = X86EMUL_RETRY; if ( !hvm_send_assist_req(curr) ) - curr->arch.hvm_vcpu.io_state = HVMIO_none; + vio->io_state = HVMIO_none; else if ( p_data == NULL ) rc = X86EMUL_OKAY; break; @@ -191,33 +194,32 @@ static int hvmemul_do_io( finish_access: if ( p_data != NULL ) - memcpy(p_data, &curr->arch.hvm_vcpu.io_data, size); + memcpy(p_data, &vio->io_data, size); if ( is_mmio && !value_is_ptr ) { /* Part of a multi-cycle read or write? */ if ( dir == IOREQ_WRITE ) { - paddr_t pa = curr->arch.hvm_vcpu.mmio_large_write_pa; - unsigned int bytes = curr->arch.hvm_vcpu.mmio_large_write_bytes; + paddr_t pa = vio->mmio_large_write_pa; + unsigned int bytes = vio->mmio_large_write_bytes; if ( bytes == 0 ) - pa = curr->arch.hvm_vcpu.mmio_large_write_pa = addr; + pa = vio->mmio_large_write_pa = addr; if ( addr == (pa + bytes) ) - curr->arch.hvm_vcpu.mmio_large_write_bytes += size; + vio->mmio_large_write_bytes += size; } else { - paddr_t pa = curr->arch.hvm_vcpu.mmio_large_read_pa; - unsigned int bytes = curr->arch.hvm_vcpu.mmio_large_read_bytes; + paddr_t pa = vio->mmio_large_read_pa; + unsigned int bytes = vio->mmio_large_read_bytes; if ( bytes == 0 ) - pa = curr->arch.hvm_vcpu.mmio_large_read_pa = addr; + pa = vio->mmio_large_read_pa = addr; if ( (addr == (pa + bytes)) && ((bytes + size) < - sizeof(curr->arch.hvm_vcpu.mmio_large_read)) ) + sizeof(vio->mmio_large_read)) ) { - memcpy(&curr->arch.hvm_vcpu.mmio_large_read[addr - pa], - p_data, size); - curr->arch.hvm_vcpu.mmio_large_read_bytes += size; + memcpy(&vio->mmio_large_read[addr - pa], p_data, size); + vio->mmio_large_read_bytes += size; } } } @@ -401,6 +403,7 @@ static int __hvmemul_read( struct vcpu *curr = current; unsigned long addr, reps = 1; uint32_t pfec = PFEC_page_present; + struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io; paddr_t gpa; int rc; @@ -409,13 +412,12 @@ static int __hvmemul_read( if ( rc != X86EMUL_OKAY ) return rc; - if ( unlikely(curr->arch.hvm_vcpu.mmio_gva == (addr & PAGE_MASK)) && - curr->arch.hvm_vcpu.mmio_gva ) + if ( unlikely(vio->mmio_gva == (addr & PAGE_MASK)) && vio->mmio_gva ) { unsigned int off = addr & (PAGE_SIZE - 1); if ( access_type == hvm_access_insn_fetch ) return X86EMUL_UNHANDLEABLE; - gpa = (((paddr_t)curr->arch.hvm_vcpu.mmio_gpfn << PAGE_SHIFT) | off); + gpa = (((paddr_t)vio->mmio_gpfn << PAGE_SHIFT) | off); if ( (off + bytes) <= PAGE_SIZE ) return hvmemul_do_mmio(gpa, &reps, bytes, 0, IOREQ_READ, 0, p_data); @@ -500,6 +502,7 @@ static int hvmemul_write( struct vcpu *curr = current; unsigned long addr, reps = 1; uint32_t pfec = PFEC_page_present | PFEC_write_access; + struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io; paddr_t gpa; int rc; @@ -508,11 +511,10 @@ static int hvmemul_write( if ( rc != X86EMUL_OKAY ) return rc; - if ( unlikely(curr->arch.hvm_vcpu.mmio_gva == (addr & PAGE_MASK)) && - curr->arch.hvm_vcpu.mmio_gva ) + if ( unlikely(vio->mmio_gva == (addr & PAGE_MASK)) && vio->mmio_gva ) { unsigned int off = addr & (PAGE_SIZE - 1); - gpa = (((paddr_t)curr->arch.hvm_vcpu.mmio_gpfn << PAGE_SHIFT) | off); + gpa = (((paddr_t)vio->mmio_gpfn << PAGE_SHIFT) | off); if ( (off + bytes) <= PAGE_SIZE ) return hvmemul_do_mmio(gpa, &reps, bytes, 0, IOREQ_WRITE, 0, p_data); @@ -530,7 +532,7 @@ static int hvmemul_write( return X86EMUL_EXCEPTION; case HVMCOPY_unhandleable: return X86EMUL_UNHANDLEABLE; - case HVMCOPY_bad_gfn_to_mfn: + case HVMCOPY_bad_gfn_to_mfn: rc = hvmemul_linear_to_phys( addr, &gpa, bytes, &reps, pfec, hvmemul_ctxt); if ( rc != X86EMUL_OKAY ) @@ -1011,6 +1013,7 @@ int hvm_emulate_one( struct cpu_user_regs *regs = hvmemul_ctxt->ctxt.regs; struct vcpu *curr = current; uint32_t new_intr_shadow, pfec = PFEC_page_present; + struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io; unsigned long addr; int rc; @@ -1048,8 +1051,7 @@ int hvm_emulate_one( rc = x86_emulate(&hvmemul_ctxt->ctxt, &hvm_emulate_ops); if ( rc != X86EMUL_RETRY ) - curr->arch.hvm_vcpu.mmio_large_read_bytes = - curr->arch.hvm_vcpu.mmio_large_write_bytes = 0; + vio->mmio_large_read_bytes = vio->mmio_large_write_bytes = 0; if ( rc != X86EMUL_OKAY ) return rc; diff -r 0f95fb74e19d xen/arch/x86/hvm/io.c --- a/xen/arch/x86/hvm/io.c Tue Oct 25 12:44:49 2011 +0200 +++ b/xen/arch/x86/hvm/io.c Tue Oct 25 17:04:05 2011 +0200 @@ -170,28 +171,31 @@ int handle_mmio(void) { struct hvm_emulate_ctxt ctxt; struct vcpu *curr = current; + struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io; int rc; hvm_emulate_prepare(&ctxt, guest_cpu_user_regs()); rc = hvm_emulate_one(&ctxt); - if ( curr->arch.hvm_vcpu.io_state == HVMIO_awaiting_completion ) - curr->arch.hvm_vcpu.io_state = HVMIO_handle_mmio_awaiting_completion; + if ( vio->io_state == HVMIO_awaiting_completion ) + vio->io_state = HVMIO_handle_mmio_awaiting_completion; else - curr->arch.hvm_vcpu.mmio_gva = 0; + vio->mmio_gva = 0; switch ( rc ) { case X86EMUL_UNHANDLEABLE: gdprintk(XENLOG_WARNING, "MMIO emulation failed @ %04x:%lx: " - "%02x %02x %02x %02x %02x %02x\n", + "%02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", hvmemul_get_seg_reg(x86_seg_cs, &ctxt)->sel, ctxt.insn_buf_eip, ctxt.insn_buf[0], ctxt.insn_buf[1], ctxt.insn_buf[2], ctxt.insn_buf[3], - ctxt.insn_buf[4], ctxt.insn_buf[5]); + ctxt.insn_buf[4], ctxt.insn_buf[5], + ctxt.insn_buf[6], ctxt.insn_buf[7], + ctxt.insn_buf[8], ctxt.insn_buf[9]); return 0; case X86EMUL_EXCEPTION: if ( ctxt.exn_pending ) @@ -208,14 +212,16 @@ int handle_mmio(void) int handle_mmio_with_translation(unsigned long gva, unsigned long gpfn) { - current->arch.hvm_vcpu.mmio_gva = gva & PAGE_MASK; - current->arch.hvm_vcpu.mmio_gpfn = gpfn; + struct hvm_vcpu_io *vio = ¤t->arch.hvm_vcpu.hvm_io; + vio->mmio_gva = gva & PAGE_MASK; + vio->mmio_gpfn = gpfn; return handle_mmio(); } int handle_pio(uint16_t port, int size, int dir) { struct vcpu *curr = current; + struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io; unsigned long data, reps = 1; int rc; @@ -228,15 +234,14 @@ int handle_pio(uint16_t port, int size, { case X86EMUL_OKAY: if ( dir == IOREQ_READ ) - memcpy(&guest_cpu_user_regs()->eax, - &data, curr->arch.hvm_vcpu.io_size); + memcpy(&guest_cpu_user_regs()->eax, &data, vio->io_size); break; case X86EMUL_RETRY: - if ( curr->arch.hvm_vcpu.io_state != HVMIO_awaiting_completion ) + if ( vio->io_state != HVMIO_awaiting_completion ) return 0; /* Completion in hvm_io_assist() with no re-emulation required. */ ASSERT(dir == IOREQ_READ); - curr->arch.hvm_vcpu.io_state = HVMIO_handle_pio_awaiting_completion; + vio->io_state = HVMIO_handle_pio_awaiting_completion; break; default: gdprintk(XENLOG_ERR, "Weird HVM ioemulation status %d.\n", rc); @@ -250,6 +255,7 @@ int handle_pio(uint16_t port, int size, void hvm_io_assist(void) { struct vcpu *curr = current; + struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io; ioreq_t *p = get_ioreq(curr); enum hvm_io_state io_state; @@ -257,23 +263,23 @@ void hvm_io_assist(void) p->state = STATE_IOREQ_NONE; - io_state = curr->arch.hvm_vcpu.io_state; - curr->arch.hvm_vcpu.io_state = HVMIO_none; + io_state = vio->io_state; + vio->io_state = HVMIO_none; switch ( io_state ) { case HVMIO_awaiting_completion: - curr->arch.hvm_vcpu.io_state = HVMIO_completed; - curr->arch.hvm_vcpu.io_data = p->data; + vio->io_state = HVMIO_completed; + vio->io_data = p->data; break; case HVMIO_handle_mmio_awaiting_completion: - curr->arch.hvm_vcpu.io_state = HVMIO_completed; - curr->arch.hvm_vcpu.io_data = p->data; + vio->io_state = HVMIO_completed; + vio->io_data = p->data; (void)handle_mmio(); break; case HVMIO_handle_pio_awaiting_completion: memcpy(&guest_cpu_user_regs()->eax, - &p->data, curr->arch.hvm_vcpu.io_size); + &p->data, vio->io_size); break; default: break; diff -r 0f95fb74e19d xen/arch/x86/hvm/svm/nestedsvm.c --- a/xen/arch/x86/hvm/svm/nestedsvm.c Tue Oct 25 12:44:49 2011 +0200 +++ b/xen/arch/x86/hvm/svm/nestedsvm.c Tue Oct 25 17:04:05 2011 +0200 @@ -1168,7 +1168,7 @@ enum hvm_intblk nsvm_intr_blocked(struct * Delay the injection because this would result in delivering * an interrupt *within* the execution of an instruction. */ - if ( v->arch.hvm_vcpu.io_state != HVMIO_none ) + if ( v->arch.hvm_vcpu.hvm_io.io_state != HVMIO_none ) return hvm_intblk_shadow; } diff -r 0f95fb74e19d xen/arch/x86/hvm/vmx/realmode.c --- a/xen/arch/x86/hvm/vmx/realmode.c Tue Oct 25 12:44:49 2011 +0200 +++ b/xen/arch/x86/hvm/vmx/realmode.c Tue Oct 25 17:04:05 2011 +0200 @@ -172,6 +172,7 @@ void vmx_realmode(struct cpu_user_regs * struct vcpu *curr = current; struct hvm_emulate_ctxt hvmemul_ctxt; struct segment_register *sreg; + struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io; unsigned long intr_info; unsigned int emulations = 0; @@ -182,7 +183,7 @@ void vmx_realmode(struct cpu_user_regs * hvm_emulate_prepare(&hvmemul_ctxt, regs); - if ( curr->arch.hvm_vcpu.io_state == HVMIO_completed ) + if ( vio->io_state == HVMIO_completed ) realmode_emulate_one(&hvmemul_ctxt); /* Only deliver interrupts into emulated real mode. */ @@ -196,7 +197,7 @@ void vmx_realmode(struct cpu_user_regs * curr->arch.hvm_vmx.vmx_emulate = 1; while ( curr->arch.hvm_vmx.vmx_emulate && !softirq_pending(smp_processor_id()) && - (curr->arch.hvm_vcpu.io_state == HVMIO_none) ) + (vio->io_state == HVMIO_none) ) { /* * Check for pending interrupts only every 16 instructions, because @@ -221,7 +222,7 @@ void vmx_realmode(struct cpu_user_regs * } /* Need to emulate next time if we've started an IO operation */ - if ( curr->arch.hvm_vcpu.io_state != HVMIO_none ) + if ( vio->io_state != HVMIO_none ) curr->arch.hvm_vmx.vmx_emulate = 1; if ( !curr->arch.hvm_vmx.vmx_emulate && !curr->arch.hvm_vmx.vmx_realmode ) diff -r 0f95fb74e19d xen/include/asm-x86/hvm/vcpu.h --- a/xen/include/asm-x86/hvm/vcpu.h Tue Oct 25 12:44:49 2011 +0200 +++ b/xen/include/asm-x86/hvm/vcpu.h Tue Oct 25 17:04:05 2011 +0200 @@ -44,6 +44,30 @@ struct hvm_vcpu_asid { uint32_t asid; }; +struct hvm_vcpu_io { + /* I/O request in flight to device model. */ + enum hvm_io_state io_state; + unsigned long io_data; + int io_size; + + /* + * HVM emulation: + * Virtual address @mmio_gva maps to MMIO physical frame @mmio_gpfn. + * The latter is known to be an MMIO frame (not RAM). + * This translation is only valid if @mmio_gva is non-zero. + */ + unsigned long mmio_gva; + unsigned long mmio_gpfn; + + /* We may read up to m128 as a number of device-model transactions. */ + paddr_t mmio_large_read_pa; + uint8_t mmio_large_read[16]; + unsigned int mmio_large_read_bytes; + /* We may write up to m128 as a number of device-model transactions. */ + paddr_t mmio_large_write_pa; + unsigned int mmio_large_write_bytes; +}; + #define VMCX_EADDR (~0ULL) struct nestedvcpu { @@ -137,31 +161,11 @@ struct hvm_vcpu { /* Which cache mode is this VCPU in (CR0:CD/NW)? */ u8 cache_mode; - /* I/O request in flight to device model. */ - enum hvm_io_state io_state; - unsigned long io_data; - int io_size; - - /* - * HVM emulation: - * Virtual address @mmio_gva maps to MMIO physical frame @mmio_gpfn. - * The latter is known to be an MMIO frame (not RAM). - * This translation is only valid if @mmio_gva is non-zero. - */ - unsigned long mmio_gva; - unsigned long mmio_gpfn; + struct hvm_vcpu_io hvm_io; /* Callback into x86_emulate when emulating FPU/MMX/XMM instructions. */ void (*fpu_exception_callback)(void *, struct cpu_user_regs *); void *fpu_exception_callback_arg; - /* We may read up to m128 as a number of device-model transactions. */ - paddr_t mmio_large_read_pa; - uint8_t mmio_large_read[16]; - unsigned int mmio_large_read_bytes; - /* We may write up to m128 as a number of device-model transactions. */ - paddr_t mmio_large_write_pa; - unsigned int mmio_large_write_bytes; - /* Pending hw/sw interrupt */ int inject_trap; /* -1 for nothing to inject */ int inject_error_code;