[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 5/5] x86/HVM: cache emulated instruction for retry processing



Jan Beulich wrote on 2013-09-30:
> Rather than re-reading the instruction bytes upon retry processing, 
> stash away and re-use tha what we already read. That way we can be 
> certain that the retry won't do something different from what 
> requested the retry, getting once again closer to real hardware 
> behavior (where what we use retries for is simply a bus operation, not 
> involving redundant decoding of instructions).
> 

This patch doesn't consider the nested case. 
For example, if the buffer saved the L2's instruction, then vmexit to L1 and L1 
may use the wrong instruction.

There are two ways to fix it, but I am not sure one is better:

one is record instruction eip and check whether the eip is same when reading 
from buffer:

diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index f39c173..738eaca 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -1158,7 +1158,8 @@ int hvm_emulate_one(
         pfec |= PFEC_user_mode;
 
     hvmemul_ctxt->insn_buf_eip = regs->eip;
-    if ( !vio->mmio_insn_bytes )
+    if ( !vio->mmio_insn_bytes || 
+         (vio->mmio_insn_eip != hvmemul_ctxt->insn_buf_eip) )
     {
         hvmemul_ctxt->insn_buf_bytes =
             hvm_get_insn_bytes(curr, hvmemul_ctxt->insn_buf) ?:
@@ -1192,12 +1193,14 @@ int hvm_emulate_one(
     {
         vio->mmio_large_read_bytes = vio->mmio_large_write_bytes = 0;
         vio->mmio_insn_bytes = 0;
+        vio->mmio_insn_eip = 0;
     }
     else
     {
         BUILD_BUG_ON(sizeof(vio->mmio_insn) < sizeof(hvmemul_ctxt->insn_buf));
         vio->mmio_insn_bytes = hvmemul_ctxt->insn_buf_bytes;
         memcpy(vio->mmio_insn, hvmemul_ctxt->insn_buf, vio->mmio_insn_bytes);
+        vio->mmio_insn_eip = hvmemul_ctxt->insn_buf_eip;
     }
 
     if ( rc != X86EMUL_OKAY )
diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h
index 258e5d0..01c5dc4 100644
--- a/xen/include/asm-x86/hvm/vcpu.h
+++ b/xen/include/asm-x86/hvm/vcpu.h
@@ -69,6 +69,7 @@ struct hvm_vcpu_io {
     /* For retries we shouldn't re-fetch the instruction. */
     unsigned int mmio_insn_bytes;
     unsigned char mmio_insn[16];
+    unsigned long mmio_insn_eip;
     /*
      * For string instruction emulation we need to be able to signal a
      * necessary retry through other than function return codes.


Another one is to clear buffer when virtual vmentry and virtual vmexit happens:

diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index 8617a7b..822b92f 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -1136,12 +1136,14 @@ static void virtual_vmentry(struct cpu_user_regs *regs)
     struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
     void *vvmcs = nvcpu->nv_vvmcx;
     unsigned long lm_l1, lm_l2;
+    struct hvm_vcpu_io *vio = &v->arch.hvm_vcpu.hvm_io;
 
     vmx_vmcs_switch(v->arch.hvm_vmx.vmcs, nvcpu->nv_n2vmcx);
 
     nestedhvm_vcpu_enter_guestmode(v);
     nvcpu->nv_vmentry_pending = 0;
     nvcpu->nv_vmswitch_in_progress = 1;
+    vio->mmio_insn_bytes = 0;
 
     /*
      * EFER handling:
@@ -1331,6 +1333,7 @@ static void virtual_vmexit(struct cpu_user_regs *regs)
     struct vcpu *v = current;
     struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
     unsigned long lm_l1, lm_l2;
+    struct hvm_vcpu_io *vio = &v->arch.hvm_vcpu.hvm_io;
 
     sync_vvmcs_ro(v);
     sync_vvmcs_guest_state(v, regs);
@@ -1346,6 +1349,8 @@ static void virtual_vmexit(struct cpu_user_regs *regs)
     nvcpu->nv_vmexit_pending = 0;
     nvcpu->nv_vmswitch_in_progress = 1;
 
+    vio->mmio_insn_bytes = 0;
+
     lm_l2 = !!hvm_long_mode_enabled(v);
     lm_l1 = !!(__get_vvmcs(nvcpu->nv_vvmcx, VM_EXIT_CONTROLS) &
                            VM_EXIT_IA32E_MODE);

> Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
> 
> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -569,9 +569,19 @@ static int hvmemul_insn_fetch(
> 
>      /* Fall back if requested bytes are not in the prefetch cache. */
>      if ( unlikely((insn_off + bytes) > hvmemul_ctxt->insn_buf_bytes) )
> -        return __hvmemul_read( -            seg, offset, p_data, bytes,
> -            hvm_access_insn_fetch, hvmemul_ctxt); +    { +        int
> rc = __hvmemul_read(seg, offset, p_data, bytes, +                       
>         hvm_access_insn_fetch, hvmemul_ctxt); + +        if ( rc ==
> X86EMUL_OKAY ) +        { +            ASSERT(insn_off + bytes <=
> sizeof(hvmemul_ctxt->insn_buf)); +           
> memcpy(&hvmemul_ctxt->insn_buf[insn_off], p_data, bytes); +           
> hvmemul_ctxt->insn_buf_bytes = insn_off + bytes; +        } + +       
> return rc; +    }
> 
>      /* Hit the cache. Simple memcpy. */
>      memcpy(p_data, &hvmemul_ctxt->insn_buf[insn_off], bytes); @@
> -1148,17 +1158,27 @@ int hvm_emulate_one(
>          pfec |= PFEC_user_mode;
>      hvmemul_ctxt->insn_buf_eip = regs->eip;
> -    hvmemul_ctxt->insn_buf_bytes = -        hvm_get_insn_bytes(curr,
> hvmemul_ctxt->insn_buf) -        ? : -       
> (hvm_virtual_to_linear_addr( -            x86_seg_cs,
> &hvmemul_ctxt->seg_reg[x86_seg_cs], -            regs->eip,
> sizeof(hvmemul_ctxt->insn_buf), -            hvm_access_insn_fetch,
> hvmemul_ctxt->ctxt.addr_size, &addr) && -        
> !hvm_fetch_from_guest_virt_nofault( -            
> hvmemul_ctxt->insn_buf, addr, -            
> sizeof(hvmemul_ctxt->insn_buf), pfec)) -        ?
> sizeof(hvmemul_ctxt->insn_buf) : 0; +    if ( !vio->mmio_insn_bytes ) + 
>   { +        hvmemul_ctxt->insn_buf_bytes = +           
> hvm_get_insn_bytes(curr, hvmemul_ctxt->insn_buf) ?: +           
> (hvm_virtual_to_linear_addr(x86_seg_cs, +
> &hvmemul_ctxt->seg_reg[x86_seg_cs], +                                   
>     regs->eip, + sizeof(hvmemul_ctxt->insn_buf), +                      
>                  hvm_access_insn_fetch, + 
> hvmemul_ctxt->ctxt.addr_size,
> +                                        &addr) && + +
> hvm_fetch_from_guest_virt_nofault(hvmemul_ctxt->insn_buf, addr, +
> sizeof(hvmemul_ctxt->insn_buf), +                                       
>        pfec) == HVMCOPY_okay) ? +           
> sizeof(hvmemul_ctxt->insn_buf) : 0; +    } +    else +    { +       
> hvmemul_ctxt->insn_buf_bytes = vio->mmio_insn_bytes; +       
> memcpy(hvmemul_ctxt->insn_buf, vio->mmio_insn, vio->mmio_insn_bytes); + 
>   }
> 
>      hvmemul_ctxt->exn_pending = 0; vio->mmio_retrying =
>      vio->mmio_retry; @@ -1169,7 +1189,16 @@ int hvm_emulate_one( if (
>      rc == X86EMUL_OKAY && vio->mmio_retry )
>          rc = X86EMUL_RETRY;
>      if ( rc != X86EMUL_RETRY )
> +    {
>          vio->mmio_large_read_bytes = vio->mmio_large_write_bytes = 0;
> +        vio->mmio_insn_bytes = 0; +    } +    else +    { +       
> BUILD_BUG_ON(sizeof(vio->mmio_insn) < sizeof(hvmemul_ctxt->insn_buf)); +
>        vio->mmio_insn_bytes = hvmemul_ctxt->insn_buf_bytes; +       
> memcpy(vio->mmio_insn, hvmemul_ctxt->insn_buf, vio->mmio_insn_bytes); + 
>   }
> 
>      if ( rc != X86EMUL_OKAY )
>          return rc;
> --- a/xen/include/asm-x86/hvm/vcpu.h
> +++ b/xen/include/asm-x86/hvm/vcpu.h
> @@ -66,6 +66,9 @@ struct hvm_vcpu_io {
>      /* We may write up to m256 as a number of device-model
>      transactions. */ unsigned int mmio_large_write_bytes; paddr_t
>      mmio_large_write_pa;
> +    /* For retries we shouldn't re-fetch the instruction. */
> +    unsigned int mmio_insn_bytes;
> +    unsigned char mmio_insn[16];
>      /*
>       * For string instruction emulation we need to be able to signal a
>       * necessary retry through other than function return codes.


Best regards,
Yang



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.