[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 4/4] SVM: streamline entry.S code



On 23/08/2013 15:04, Jan Beulich wrote:
- move stuff easily/better done in C into C code
- re-arrange code paths so that no redundant GET_CURRENT() would remain
  on the fast paths
- move long latency operations earlier
- slightly defer disabling global interrupts on the VM entry path

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

--- a/xen/arch/x86/hvm/svm/entry.S
+++ b/xen/arch/x86/hvm/svm/entry.S
@@ -32,28 +32,34 @@
 #define CLGI   .byte 0x0F,0x01,0xDD
 
 ENTRY(svm_asm_do_resume)
+        GET_CURRENT(%rbx)
+.Lsvm_do_resume:
         call svm_intr_assist
         mov  %rsp,%rdi
         call nsvm_vcpu_switch
         ASSERT_NOT_IN_ATOMIC
 
-        GET_CURRENT(%rbx)
-        CLGI
-
         mov  VCPU_processor(%rbx),%eax
-        shl  $IRQSTAT_shift,%eax
         lea  irq_stat+IRQSTAT_softirq_pending(%rip),%rdx
-        cmpl $0,(%rdx,%rax,1)
+        xor  %ecx,%ecx
+        shl  $IRQSTAT_shift,%eax
+        CLGI
+        cmp  %ecx,(%rdx,%rax,1)
         jne  .Lsvm_process_softirqs
 
-        testb $0, VCPU_nsvm_hap_enabled(%rbx)
-UNLIKELY_START(nz, nsvm_hap)
-        mov  VCPU_nhvm_p2m(%rbx),%rax
-        test %rax,%rax
+        cmp  %cl,VCPU_nsvm_hap_enabled(%rbx)
+UNLIKELY_START(ne, nsvm_hap)
+        cmp  %rcx,VCPU_nhvm_p2m(%rbx)
         sete %al
-        andb VCPU_nhvm_guestmode(%rbx),%al
-        jnz  .Lsvm_nsvm_no_p2m
-UNLIKELY_END(nsvm_hap)
+        test VCPU_nhvm_guestmode(%rbx),%al
+        UNLIKELY_DONE(z, nsvm_hap)
+        /*
+         * Someone shot down our nested p2m table; go round again
+         * and nsvm_vcpu_switch() will fix it for us.
+         */
+        STGI
+        jmp  .Lsvm_do_resume
+__UNLIKELY_END(nsvm_hap)
 
         call svm_asid_handle_vmrun
 
@@ -72,13 +78,12 @@ UNLIKELY_END(svm_trace)
         mov  UREGS_eflags(%rsp),%rax
         mov  %rax,VMCB_rflags(%rcx)
 
-        mov  VCPU_svm_vmcb_pa(%rbx),%rax
-
         pop  %r15
         pop  %r14
         pop  %r13
         pop  %r12
         pop  %rbp
+        mov  VCPU_svm_vmcb_pa(%rbx),%rax
         pop  %rbx
         pop  %r11
         pop  %r10
@@ -92,25 +97,26 @@ UNLIKELY_END(svm_trace)
 
         VMRUN
 
+        GET_CURRENT(%rax)
         push %rdi
         push %rsi
         push %rdx
         push %rcx
+        mov  VCPU_svm_vmcb(%rax),%rcx
         push %rax

Having read the manual several times, I am now more and more confused about this.

My reading of the AMD programmer manual vol 3 indicates that %rax after VMRUN completes will be the host %rax, i.e. VCPU_svm_vmcb_pa.

However, I cant find anywhere in the code which overwrites regs->rax from vmcb->rax, which I would have thought would have thought would cause utter devastation in combination with the generic functions working with a cpu_user_regs structure.

The alternative is that %rax after VMRUN is actually the guest %rax, at which point the pushes used to do the correct thing, but are now broken by this patch clobbering it before being saved.

Can someone with more knowledge please confirm?  I really hope I have overlooked something in the code.

~Andrew

         push %r8
         push %r9
         push %r10
         push %r11
         push %rbx
+        mov  %rax,%rbx
         push %rbp
         push %r12
         push %r13
         push %r14
         push %r15
 
-        GET_CURRENT(%rbx)
         movb $0,VCPU_svm_vmcb_in_sync(%rbx)
-        mov  VCPU_svm_vmcb(%rbx),%rcx
         mov  VMCB_rax(%rcx),%rax
         mov  %rax,UREGS_rax(%rsp)
         mov  VMCB_rip(%rcx),%rax
@@ -120,33 +126,14 @@ UNLIKELY_END(svm_trace)
         mov  VMCB_rflags(%rcx),%rax
         mov  %rax,UREGS_eflags(%rsp)
 
-#ifndef NDEBUG
-        mov  $0xbeef,%ax
-        mov  %ax,UREGS_error_code(%rsp)
-        mov  %ax,UREGS_entry_vector(%rsp)
-        mov  %ax,UREGS_saved_upcall_mask(%rsp)
-        mov  %ax,UREGS_cs(%rsp)
-        mov  %ax,UREGS_ds(%rsp)
-        mov  %ax,UREGS_es(%rsp)
-        mov  %ax,UREGS_fs(%rsp)
-        mov  %ax,UREGS_gs(%rsp)
-        mov  %ax,UREGS_ss(%rsp)
-#endif
-
         STGI
 .globl svm_stgi_label
 svm_stgi_label:
         mov  %rsp,%rdi
         call svm_vmexit_handler
-        jmp  svm_asm_do_resume
+        jmp  .Lsvm_do_resume
 
 .Lsvm_process_softirqs:
         STGI
         call do_softirq
-        jmp  svm_asm_do_resume
-
-.Lsvm_nsvm_no_p2m:
-        /* Someone shot down our nested p2m table; go round again
-         * and nsvm_vcpu_switch() will fix it for us. */
-        STGI
-        jmp  svm_asm_do_resume
+        jmp  .Lsvm_do_resume
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -2069,6 +2069,8 @@ void svm_vmexit_handler(struct cpu_user_
     vintr_t intr;
     bool_t vcpu_guestmode = 0;
 
+    hvm_invalidate_regs_fields(regs);
+
     if ( paging_mode_hap(v->domain) )
         v->arch.hvm_vcpu.guest_cr[3] = v->arch.hvm_vcpu.hw_cr[3] =
             vmcb_get_cr3(vmcb);
--- a/xen/include/asm-x86/asm_defns.h
+++ b/xen/include/asm-x86/asm_defns.h
@@ -39,11 +39,17 @@ void ret_from_intr(void);
         .subsection 1;            \
         .Lunlikely.tag:
 
-#define UNLIKELY_END(tag)         \
-        jmp .Llikely.tag;         \
+#define UNLIKELY_DONE(cond, tag)  \
+        j##cond .Llikely.tag
+
+#define __UNLIKELY_END(tag)       \
         .subsection 0;            \
         .Llikely.tag:
 
+#define UNLIKELY_END(tag)         \
+        UNLIKELY_DONE(mp, tag);   \
+        __UNLIKELY_END(tag)
+
 #define STACK_CPUINFO_FIELD(field) (STACK_SIZE-CPUINFO_sizeof+CPUINFO_##field)
 #define GET_STACK_BASE(reg)                       \
         movq $~(STACK_SIZE-1),reg;                \




_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.