[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 3/6] mini-os/x86-64 entry: code refactoring; no functional changes



Re-arrange assembly code blocks so that they are in called
order instead of jumping around, enhancing readability.
Macros are grouped together as well.

Signed-off-by: Xu Zhang <xzhang@xxxxxxxxxx>
---
 extras/mini-os/arch/x86/x86_64.S |  118 +++++++++++++++++++-------------------
 1 files changed, 59 insertions(+), 59 deletions(-)

diff --git a/extras/mini-os/arch/x86/x86_64.S b/extras/mini-os/arch/x86/x86_64.S
index addb7b1..79e893f 100644
--- a/extras/mini-os/arch/x86/x86_64.S
+++ b/extras/mini-os/arch/x86/x86_64.S
@@ -36,6 +36,22 @@ hypercall_page:
         .org 0x3000
 
 
+#define XEN_GET_VCPU_INFO(reg) movq HYPERVISOR_shared_info,reg
+#define XEN_PUT_VCPU_INFO(reg)
+#define XEN_PUT_VCPU_INFO_fixup
+#define XEN_LOCKED_BLOCK_EVENTS(reg)   movb $1,evtchn_upcall_mask(reg)
+#define XEN_LOCKED_UNBLOCK_EVENTS(reg) movb $0,evtchn_upcall_mask(reg)
+#define XEN_TEST_PENDING(reg)  testb $0xFF,evtchn_upcall_pending(reg)
+
+#define XEN_BLOCK_EVENTS(reg)  XEN_GET_VCPU_INFO(reg)                  ; \
+                                       XEN_LOCKED_BLOCK_EVENTS(reg)    ; \
+                                           XEN_PUT_VCPU_INFO(reg)
+
+#define XEN_UNBLOCK_EVENTS(reg)        XEN_GET_VCPU_INFO(reg)                  
; \
+                                               XEN_LOCKED_UNBLOCK_EVENTS(reg)  
; \
+                                       XEN_PUT_VCPU_INFO(reg)
+
+
 /* Offsets into shared_info_t. */                
 #define evtchn_upcall_pending          /* 0 */
 #define evtchn_upcall_mask             1
@@ -46,6 +62,27 @@ NMI_MASK = 0x80000000
 #define ORIG_RAX 120       /* + error_code */ 
 #define EFLAGS 144
 
+
+/* Macros */
+.macro zeroentry sym
+       movq (%rsp),%rcx
+       movq 8(%rsp),%r11
+       addq $0x10,%rsp /* skip rcx and r11 */
+       pushq $0        /* push error code/oldrax */
+       pushq %rax      /* push real oldrax to the rdi slot */
+       leaq  \sym(%rip),%rax
+       jmp error_entry
+.endm
+
+.macro errorentry sym
+       movq (%rsp),%rcx
+       movq 8(%rsp),%r11
+       addq $0x10,%rsp /* rsp points to the error code */
+       pushq %rax
+       leaq  \sym(%rip),%rax
+       jmp error_entry
+.endm
+
 .macro SAVE_REST
        subq $6*8,%rsp
        movq %rbx,5*8(%rsp)
@@ -79,7 +116,6 @@ NMI_MASK = 0x80000000
        addq $9*8+8,%rsp
 .endm  
 
-
 .macro HYPERVISOR_IRET flag
        testl $NMI_MASK,2*8(%rsp)
        jnz   2f
@@ -98,6 +134,8 @@ NMI_MASK = 0x80000000
        jmp  hypercall_page + (__HYPERVISOR_iret * 32)
 .endm
 
+
+
 /*
  * Exception entry point. This expects an error code/orig_rax on the stack
  * and the exception handler in %rax.  
@@ -130,73 +168,24 @@ error_call_handler:
        call *%rax
        jmp error_exit
 
-.macro zeroentry sym
-       movq (%rsp),%rcx
-       movq 8(%rsp),%r11
-       addq $0x10,%rsp /* skip rcx and r11 */
-       pushq $0        /* push error code/oldrax */ 
-       pushq %rax      /* push real oldrax to the rdi slot */ 
-       leaq  \sym(%rip),%rax
-       jmp error_entry
-.endm  
-
-.macro errorentry sym
-       movq (%rsp),%rcx
-       movq 8(%rsp),%r11
-       addq $0x10,%rsp /* rsp points to the error code */
-       pushq %rax
-       leaq  \sym(%rip),%rax
-       jmp error_entry
-.endm
-
-#define XEN_GET_VCPU_INFO(reg) movq HYPERVISOR_shared_info,reg
-#define XEN_PUT_VCPU_INFO(reg)
-#define XEN_PUT_VCPU_INFO_fixup
-#define XEN_LOCKED_BLOCK_EVENTS(reg)   movb $1,evtchn_upcall_mask(reg)
-#define XEN_LOCKED_UNBLOCK_EVENTS(reg) movb $0,evtchn_upcall_mask(reg)
-#define XEN_TEST_PENDING(reg)  testb $0xFF,evtchn_upcall_pending(reg)
-
-#define XEN_BLOCK_EVENTS(reg)  XEN_GET_VCPU_INFO(reg)                  ; \
-                                       XEN_LOCKED_BLOCK_EVENTS(reg)    ; \
-                                           XEN_PUT_VCPU_INFO(reg)
-
-#define XEN_UNBLOCK_EVENTS(reg)        XEN_GET_VCPU_INFO(reg)                  
; \
-                                               XEN_LOCKED_UNBLOCK_EVENTS(reg)  
; \
-                                       XEN_PUT_VCPU_INFO(reg)
-
 
 
 ENTRY(hypervisor_callback)
        zeroentry hypervisor_callback2
 
 ENTRY(hypervisor_callback2)
-       movq %rdi, %rsp 
+       movq %rdi, %rsp
 11:    movq %gs:8,%rax
        incl %gs:0
        cmovzq %rax,%rsp
        pushq %rdi
-       call do_hypervisor_callback 
+       call do_hypervisor_callback
        popq %rsp
        decl %gs:0
-       jmp error_exit
-
-restore_all_enable_events:  
-       XEN_UNBLOCK_EVENTS(%rsi)        # %rsi is already set up...
-
-scrit: /**** START OF CRITICAL REGION ****/
-       XEN_TEST_PENDING(%rsi)
-       jnz  14f                        # process more events if necessary...
-       XEN_PUT_VCPU_INFO(%rsi)
-       RESTORE_ALL
-       HYPERVISOR_IRET 0
-
-14:    XEN_LOCKED_BLOCK_EVENTS(%rsi)
-       XEN_PUT_VCPU_INFO(%rsi)
-       SAVE_REST
-       movq %rsp,%rdi                  # set the argument again
-       jmp  11b
-ecrit:  /**** END OF CRITICAL REGION ****/
 
+error_exit:
+       RESTORE_REST
+       XEN_BLOCK_EVENTS(%rsi)
 
 retint_kernel:
 retint_restore_args:
@@ -211,11 +200,22 @@ retint_restore_args:
        RESTORE_ALL
        HYPERVISOR_IRET 0
 
+restore_all_enable_events:
+       XEN_UNBLOCK_EVENTS(%rsi)        # %rsi is already set up...
 
-error_exit:
-       RESTORE_REST
-       XEN_BLOCK_EVENTS(%rsi)          
-       jmp retint_kernel
+scrit: /**** START OF CRITICAL REGION ****/
+       XEN_TEST_PENDING(%rsi)
+       jnz  14f                        # process more events if necessary...
+       XEN_PUT_VCPU_INFO(%rsi)
+       RESTORE_ALL
+       HYPERVISOR_IRET 0
+
+14:    XEN_LOCKED_BLOCK_EVENTS(%rsi)
+       XEN_PUT_VCPU_INFO(%rsi)
+       SAVE_REST
+       movq %rsp,%rdi                  # set the argument again
+       jmp  11b
+ecrit:  /**** END OF CRITICAL REGION ****/
 
 
 
-- 
1.7.7.6


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.