[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] x86_emulate: On HVM MMIO emulation, cache the gva->pfn mapping for the
# HG changeset patch # User Keir Fraser <keir.fraser@xxxxxxxxxx> # Date 1206615174 0 # Node ID ea93383019c8cc607fd128b40701e426dd264903 # Parent ed67f68ae2a7f204b6a2d146e6f4037bd2893438 x86_emulate: On HVM MMIO emulation, cache the gva->pfn mapping for the MMIO page. Speeds up Windows installation by about 20 percent. Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx> --- xen/arch/x86/hvm/emulate.c | 27 +++++++++++++++++++++++++-- xen/arch/x86/hvm/io.c | 11 ++++++++++- xen/arch/x86/mm/shadow/multi.c | 6 ++++-- xen/include/asm-x86/hvm/io.h | 1 + xen/include/asm-x86/hvm/vcpu.h | 9 +++++++++ 5 files changed, 49 insertions(+), 5 deletions(-) diff -r ed67f68ae2a7 -r ea93383019c8 xen/arch/x86/hvm/emulate.c --- a/xen/arch/x86/hvm/emulate.c Thu Mar 27 09:12:09 2008 +0000 +++ b/xen/arch/x86/hvm/emulate.c Thu Mar 27 10:52:54 2008 +0000 @@ -214,7 +214,9 @@ static int __hvmemul_read( enum hvm_access_type access_type, struct hvm_emulate_ctxt *hvmemul_ctxt) { + struct vcpu *curr = current; unsigned long addr; + paddr_t gpa; int rc; rc = hvmemul_virtual_to_linear( @@ -223,6 +225,17 @@ static int __hvmemul_read( return rc; *val = 0; + + if ( unlikely(curr->arch.hvm_vcpu.mmio_gva == (addr & PAGE_MASK)) && + curr->arch.hvm_vcpu.mmio_gva ) + { + unsigned int off = addr & (PAGE_SIZE - 1); + if ( access_type == hvm_access_insn_fetch ) + return X86EMUL_UNHANDLEABLE; + gpa = (((paddr_t)curr->arch.hvm_vcpu.mmio_gpfn << PAGE_SHIFT) | off); + if ( (off + bytes) <= PAGE_SIZE ) + return hvmemul_do_mmio(gpa, 1, bytes, 0, IOREQ_READ, 0, 0, val); + } rc = ((access_type == hvm_access_insn_fetch) ? hvm_fetch_from_guest_virt(val, addr, bytes) : @@ -233,7 +246,6 @@ static int __hvmemul_read( if ( rc == HVMCOPY_bad_gfn_to_mfn ) { unsigned long reps = 1; - paddr_t gpa; if ( access_type == hvm_access_insn_fetch ) return X86EMUL_UNHANDLEABLE; @@ -293,13 +305,25 @@ static int hvmemul_write( { struct hvm_emulate_ctxt *hvmemul_ctxt = container_of(ctxt, struct hvm_emulate_ctxt, ctxt); + struct vcpu *curr = current; unsigned long addr; + paddr_t gpa; int rc; rc = hvmemul_virtual_to_linear( seg, offset, bytes, hvm_access_write, hvmemul_ctxt, &addr); if ( rc != X86EMUL_OKAY ) return rc; + + if ( unlikely(curr->arch.hvm_vcpu.mmio_gva == (addr & PAGE_MASK)) && + curr->arch.hvm_vcpu.mmio_gva ) + { + unsigned int off = addr & (PAGE_SIZE - 1); + gpa = (((paddr_t)curr->arch.hvm_vcpu.mmio_gpfn << PAGE_SHIFT) | off); + if ( (off + bytes) <= PAGE_SIZE ) + return hvmemul_do_mmio(gpa, 1, bytes, val, IOREQ_WRITE, + 0, 0, NULL); + } rc = hvm_copy_to_guest_virt(addr, &val, bytes); if ( rc == HVMCOPY_bad_gva_to_gfn ) @@ -308,7 +332,6 @@ static int hvmemul_write( if ( rc == HVMCOPY_bad_gfn_to_mfn ) { unsigned long reps = 1; - paddr_t gpa; rc = hvmemul_linear_to_phys( addr, &gpa, bytes, &reps, hvm_access_write, hvmemul_ctxt); diff -r ed67f68ae2a7 -r ea93383019c8 xen/arch/x86/hvm/io.c --- a/xen/arch/x86/hvm/io.c Thu Mar 27 09:12:09 2008 +0000 +++ b/xen/arch/x86/hvm/io.c Thu Mar 27 10:52:54 2008 +0000 @@ -183,7 +183,9 @@ int handle_mmio(void) rc = hvm_emulate_one(&ctxt); if ( curr->arch.hvm_vcpu.io_state == HVMIO_awaiting_completion ) - curr->arch.hvm_vcpu.io_state = HVMIO_handle_mmio_awaiting_completion; + curr->arch.hvm_vcpu.io_state = HVMIO_handle_mmio_awaiting_completion; + else + curr->arch.hvm_vcpu.mmio_gva = 0; switch ( rc ) { @@ -210,6 +212,13 @@ int handle_mmio(void) return 1; } +int handle_mmio_with_translation(unsigned long gva, unsigned long gpfn) +{ + current->arch.hvm_vcpu.mmio_gva = gva & PAGE_MASK; + current->arch.hvm_vcpu.mmio_gpfn = gpfn; + return handle_mmio(); +} + void hvm_io_assist(void) { struct vcpu *v = current; diff -r ed67f68ae2a7 -r ea93383019c8 xen/arch/x86/mm/shadow/multi.c --- a/xen/arch/x86/mm/shadow/multi.c Thu Mar 27 09:12:09 2008 +0000 +++ b/xen/arch/x86/mm/shadow/multi.c Thu Mar 27 10:52:54 2008 +0000 @@ -2881,7 +2881,8 @@ static int sh_page_fault(struct vcpu *v, perfc_incr(shadow_fault_fast_mmio); SHADOW_PRINTK("fast path mmio %#"PRIpaddr"\n", gpa); reset_early_unshadow(v); - return handle_mmio() ? EXCRET_fault_fixed : 0; + return (handle_mmio_with_translation(va, gpa >> PAGE_SHIFT) + ? EXCRET_fault_fixed : 0); } else { @@ -3199,7 +3200,8 @@ static int sh_page_fault(struct vcpu *v, shadow_audit_tables(v); reset_early_unshadow(v); shadow_unlock(d); - return handle_mmio() ? EXCRET_fault_fixed : 0; + return (handle_mmio_with_translation(va, gpa >> PAGE_SHIFT) + ? EXCRET_fault_fixed : 0); not_a_shadow_fault: sh_audit_gw(v, &gw); diff -r ed67f68ae2a7 -r ea93383019c8 xen/include/asm-x86/hvm/io.h --- a/xen/include/asm-x86/hvm/io.h Thu Mar 27 09:12:09 2008 +0000 +++ b/xen/include/asm-x86/hvm/io.h Thu Mar 27 10:52:54 2008 +0000 @@ -99,6 +99,7 @@ void send_timeoffset_req(unsigned long t void send_timeoffset_req(unsigned long timeoff); void send_invalidate_req(void); int handle_mmio(void); +int handle_mmio_with_translation(unsigned long gva, unsigned long gpfn); void hvm_interrupt_post(struct vcpu *v, int vector, int type); void hvm_io_assist(void); void hvm_dpci_eoi(struct domain *d, unsigned int guest_irq, diff -r ed67f68ae2a7 -r ea93383019c8 xen/include/asm-x86/hvm/vcpu.h --- a/xen/include/asm-x86/hvm/vcpu.h Thu Mar 27 09:12:09 2008 +0000 +++ b/xen/include/asm-x86/hvm/vcpu.h Thu Mar 27 10:52:54 2008 +0000 @@ -80,6 +80,15 @@ struct hvm_vcpu { /* I/O request in flight to device model. */ enum hvm_io_state io_state; unsigned long io_data; + + /* + * HVM emulation: + * Virtual address @mmio_gva maps to MMIO physical frame @mmio_gpfn. + * The latter is known to be an MMIO frame (not RAM). + * This translation is only valid if @mmio_gva is non-zero. + */ + unsigned long mmio_gva; + unsigned long mmio_gpfn; }; #endif /* __ASM_X86_HVM_VCPU_H__ */ _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |