[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 05/18 V2 RESEND]: PVH xen: more preparatory changes for PVH



This is also a preparotary patch for PVH. In this patch, following functions
 are made non-static:
     vmx_fpu_enter(), get_instruction_length(), update_guest_eip(),
     vmx_dr_access(), vmx_do_extint(), pv_cpuid(), and
     emulate_forced_invalid_op().

There is no functionality change.

Changes in V2:
  - prepend vmx_ to get_instruction_length and update_guest_eip.
  - Do not export/use vmr().

Signed-off-by: Mukesh Rathor <mukesh.rathor@xxxxxxxxxx>
---
 xen/arch/x86/hvm/vmx/vmx.c         |   74 +++++++++++++++---------------------
 xen/arch/x86/hvm/vmx/vvmx.c        |    2 +-
 xen/arch/x86/traps.c               |    4 +-
 xen/include/asm-x86/hvm/vmx/vmcs.h |    1 +
 xen/include/asm-x86/hvm/vmx/vmx.h  |   16 +++++++-
 xen/include/asm-x86/processor.h    |    2 +
 6 files changed, 52 insertions(+), 47 deletions(-)

diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 04dbefb..e64980f 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -574,7 +574,7 @@ static int vmx_load_vmcs_ctxt(struct vcpu *v, struct 
hvm_hw_cpu *ctxt)
     return 0;
 }
 
-static void vmx_fpu_enter(struct vcpu *v)
+void vmx_fpu_enter(struct vcpu *v)
 {
     vcpu_restore_fpu_lazy(v);
     v->arch.hvm_vmx.exception_bitmap &= ~(1u << TRAP_no_device);
@@ -1526,24 +1526,12 @@ struct hvm_function_table * __init start_vmx(void)
     return &vmx_function_table;
 }
 
-/*
- * Not all cases receive valid value in the VM-exit instruction length field.
- * Callers must know what they're doing!
- */
-static int get_instruction_length(void)
-{
-    int len;
-    len = __vmread(VM_EXIT_INSTRUCTION_LEN); /* Safe: callers audited */
-    BUG_ON((len < 1) || (len > 15));
-    return len;
-}
-
-void update_guest_eip(void)
+void vmx_update_guest_eip(void)
 {
     struct cpu_user_regs *regs = guest_cpu_user_regs();
     unsigned long x;
 
-    regs->eip += get_instruction_length(); /* Safe: callers audited */
+    regs->eip += vmx_get_instruction_length(); /* Safe: callers audited */
     regs->eflags &= ~X86_EFLAGS_RF;
 
     x = __vmread(GUEST_INTERRUPTIBILITY_INFO);
@@ -1616,8 +1604,8 @@ static void vmx_do_cpuid(struct cpu_user_regs *regs)
     regs->edx = edx;
 }
 
-static void vmx_dr_access(unsigned long exit_qualification,
-                          struct cpu_user_regs *regs)
+void vmx_dr_access(unsigned long exit_qualification,
+                   struct cpu_user_regs *regs)
 {
     struct vcpu *v = current;
 
@@ -2037,7 +2025,7 @@ gp_fault:
     return X86EMUL_EXCEPTION;
 }
 
-static void vmx_do_extint(struct cpu_user_regs *regs)
+void vmx_do_extint(struct cpu_user_regs *regs)
 {
     unsigned int vector;
 
@@ -2221,7 +2209,7 @@ static int vmx_handle_eoi_write(void)
     if ( (((exit_qualification >> 12) & 0xf) == 1) &&
          ((exit_qualification & 0xfff) == APIC_EOI) )
     {
-        update_guest_eip(); /* Safe: APIC data write */
+        vmx_update_guest_eip(); /* Safe: APIC data write */
         vlapic_EOI_set(vcpu_vlapic(current));
         HVMTRACE_0D(VLAPIC);
         return 1;
@@ -2434,7 +2422,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
             HVMTRACE_1D(TRAP, vector);
             if ( v->domain->debugger_attached )
             {
-                update_guest_eip(); /* Safe: INT3 */            
+                vmx_update_guest_eip(); /* Safe: INT3 */            
                 current->arch.gdbsx_vcpu_event = TRAP_int3;
                 domain_pause_for_debugger();
                 break;
@@ -2542,7 +2530,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
          */
         inst_len = ((source != 3) ||        /* CALL, IRET, or JMP? */
                     (idtv_info & (1u<<10))) /* IntrType > 3? */
-            ? get_instruction_length() /* Safe: SDM 3B 23.2.4 */ : 0;
+            ? vmx_get_instruction_length() /* Safe: SDM 3B 23.2.4 */ : 0;
         if ( (source == 3) && (idtv_info & INTR_INFO_DELIVER_CODE_MASK) )
             ecode = __vmread(IDT_VECTORING_ERROR_CODE);
         regs->eip += inst_len;
@@ -2550,15 +2538,15 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
         break;
     }
     case EXIT_REASON_CPUID:
-        update_guest_eip(); /* Safe: CPUID */
+        vmx_update_guest_eip(); /* Safe: CPUID */
         vmx_do_cpuid(regs);
         break;
     case EXIT_REASON_HLT:
-        update_guest_eip(); /* Safe: HLT */
+        vmx_update_guest_eip(); /* Safe: HLT */
         hvm_hlt(regs->eflags);
         break;
     case EXIT_REASON_INVLPG:
-        update_guest_eip(); /* Safe: INVLPG */
+        vmx_update_guest_eip(); /* Safe: INVLPG */
         exit_qualification = __vmread(EXIT_QUALIFICATION);
         vmx_invlpg_intercept(exit_qualification);
         break;
@@ -2566,7 +2554,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
         regs->ecx = hvm_msr_tsc_aux(v);
         /* fall through */
     case EXIT_REASON_RDTSC:
-        update_guest_eip(); /* Safe: RDTSC, RDTSCP */
+        vmx_update_guest_eip(); /* Safe: RDTSC, RDTSCP */
         hvm_rdtsc_intercept(regs);
         break;
     case EXIT_REASON_VMCALL:
@@ -2576,7 +2564,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
         rc = hvm_do_hypercall(regs);
         if ( rc != HVM_HCALL_preempted )
         {
-            update_guest_eip(); /* Safe: VMCALL */
+            vmx_update_guest_eip(); /* Safe: VMCALL */
             if ( rc == HVM_HCALL_invalidate )
                 send_invalidate_req();
         }
@@ -2586,7 +2574,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
     {
         exit_qualification = __vmread(EXIT_QUALIFICATION);
         if ( vmx_cr_access(exit_qualification) == X86EMUL_OKAY )
-            update_guest_eip(); /* Safe: MOV Cn, LMSW, CLTS */
+            vmx_update_guest_eip(); /* Safe: MOV Cn, LMSW, CLTS */
         break;
     }
     case EXIT_REASON_DR_ACCESS:
@@ -2600,7 +2588,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
         {
             regs->eax = (uint32_t)msr_content;
             regs->edx = (uint32_t)(msr_content >> 32);
-            update_guest_eip(); /* Safe: RDMSR */
+            vmx_update_guest_eip(); /* Safe: RDMSR */
         }
         break;
     }
@@ -2609,63 +2597,63 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
         uint64_t msr_content;
         msr_content = ((uint64_t)regs->edx << 32) | (uint32_t)regs->eax;
         if ( hvm_msr_write_intercept(regs->ecx, msr_content) == X86EMUL_OKAY )
-            update_guest_eip(); /* Safe: WRMSR */
+            vmx_update_guest_eip(); /* Safe: WRMSR */
         break;
     }
 
     case EXIT_REASON_VMXOFF:
         if ( nvmx_handle_vmxoff(regs) == X86EMUL_OKAY )
-            update_guest_eip();
+            vmx_update_guest_eip();
         break;
 
     case EXIT_REASON_VMXON:
         if ( nvmx_handle_vmxon(regs) == X86EMUL_OKAY )
-            update_guest_eip();
+            vmx_update_guest_eip();
         break;
 
     case EXIT_REASON_VMCLEAR:
         if ( nvmx_handle_vmclear(regs) == X86EMUL_OKAY )
-            update_guest_eip();
+            vmx_update_guest_eip();
         break;
  
     case EXIT_REASON_VMPTRLD:
         if ( nvmx_handle_vmptrld(regs) == X86EMUL_OKAY )
-            update_guest_eip();
+            vmx_update_guest_eip();
         break;
 
     case EXIT_REASON_VMPTRST:
         if ( nvmx_handle_vmptrst(regs) == X86EMUL_OKAY )
-            update_guest_eip();
+            vmx_update_guest_eip();
         break;
 
     case EXIT_REASON_VMREAD:
         if ( nvmx_handle_vmread(regs) == X86EMUL_OKAY )
-            update_guest_eip();
+            vmx_update_guest_eip();
         break;
  
     case EXIT_REASON_VMWRITE:
         if ( nvmx_handle_vmwrite(regs) == X86EMUL_OKAY )
-            update_guest_eip();
+            vmx_update_guest_eip();
         break;
 
     case EXIT_REASON_VMLAUNCH:
         if ( nvmx_handle_vmlaunch(regs) == X86EMUL_OKAY )
-            update_guest_eip();
+            vmx_update_guest_eip();
         break;
 
     case EXIT_REASON_VMRESUME:
         if ( nvmx_handle_vmresume(regs) == X86EMUL_OKAY )
-            update_guest_eip();
+            vmx_update_guest_eip();
         break;
 
     case EXIT_REASON_INVEPT:
         if ( nvmx_handle_invept(regs) == X86EMUL_OKAY )
-            update_guest_eip();
+            vmx_update_guest_eip();
         break;
 
     case EXIT_REASON_INVVPID:
         if ( nvmx_handle_invvpid(regs) == X86EMUL_OKAY )
-            update_guest_eip();
+            vmx_update_guest_eip();
         break;
 
     case EXIT_REASON_MWAIT_INSTRUCTION:
@@ -2713,14 +2701,14 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
             int bytes = (exit_qualification & 0x07) + 1;
             int dir = (exit_qualification & 0x08) ? IOREQ_READ : IOREQ_WRITE;
             if ( handle_pio(port, bytes, dir) )
-                update_guest_eip(); /* Safe: IN, OUT */
+                vmx_update_guest_eip(); /* Safe: IN, OUT */
         }
         break;
 
     case EXIT_REASON_INVD:
     case EXIT_REASON_WBINVD:
     {
-        update_guest_eip(); /* Safe: INVD, WBINVD */
+        vmx_update_guest_eip(); /* Safe: INVD, WBINVD */
         vmx_wbinvd_intercept();
         break;
     }
@@ -2753,7 +2741,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
     {
         u64 new_bv = (((u64)regs->edx) << 32) | regs->eax;
         if ( hvm_handle_xsetbv(new_bv) == 0 )
-            update_guest_eip(); /* Safe: XSETBV */
+            vmx_update_guest_eip(); /* Safe: XSETBV */
         break;
     }
 
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index bb7688f..225de9f 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -2136,7 +2136,7 @@ int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs,
             tsc += __get_vvmcs(nvcpu->nv_vvmcx, TSC_OFFSET);
             regs->eax = (uint32_t)tsc;
             regs->edx = (uint32_t)(tsc >> 32);
-            update_guest_eip();
+            vmx_update_guest_eip();
 
             return 1;
         }
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index ab3e814..ab54f82 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -728,7 +728,7 @@ int cpuid_hypervisor_leaves( uint32_t idx, uint32_t sub_idx,
     return 1;
 }
 
-static void pv_cpuid(struct cpu_user_regs *regs)
+void pv_cpuid(struct cpu_user_regs *regs)
 {
     uint32_t a, b, c, d;
 
@@ -905,7 +905,7 @@ static int emulate_invalid_rdtscp(struct cpu_user_regs 
*regs)
     return EXCRET_fault_fixed;
 }
 
-static int emulate_forced_invalid_op(struct cpu_user_regs *regs)
+unsigned long emulate_forced_invalid_op(struct cpu_user_regs *regs)
 {
     char sig[5], instr[2];
     unsigned long eip, rc;
diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h 
b/xen/include/asm-x86/hvm/vmx/vmcs.h
index 37e6734..11b09ef 100644
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -461,6 +461,7 @@ void vmx_vmcs_switch(struct vmcs_struct *from, struct 
vmcs_struct *to);
 void vmx_set_eoi_exit_bitmap(struct vcpu *v, u8 vector);
 void vmx_clear_eoi_exit_bitmap(struct vcpu *v, u8 vector);
 int vmx_check_msr_bitmap(unsigned long *msr_bitmap, u32 msr, int access_type);
+void vmx_fpu_enter(struct vcpu *v);
 void virtual_vmcs_enter(void *vvmcs);
 void virtual_vmcs_exit(void *vvmcs);
 u64 virtual_vmcs_vmread(void *vvmcs, u32 vmcs_encoding);
diff --git a/xen/include/asm-x86/hvm/vmx/vmx.h 
b/xen/include/asm-x86/hvm/vmx/vmx.h
index d4d6feb..a742e16 100644
--- a/xen/include/asm-x86/hvm/vmx/vmx.h
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h
@@ -420,6 +420,18 @@ static inline int __vmxon(u64 addr)
     return rc;
 }
 
+/*
+ * Not all cases receive valid value in the VM-exit instruction length field.
+ * Callers must know what they're doing!
+ */
+static inline int vmx_get_instruction_length(void)
+{
+    int len;
+    len = __vmread(VM_EXIT_INSTRUCTION_LEN); /* Safe: callers audited */
+    BUG_ON((len < 1) || (len > 15));
+    return len;
+}
+
 void vmx_get_segment_register(struct vcpu *, enum x86_segment,
                               struct segment_register *);
 void vmx_inject_extint(int trap);
@@ -431,7 +443,9 @@ void ept_p2m_uninit(struct p2m_domain *p2m);
 void ept_walk_table(struct domain *d, unsigned long gfn);
 void setup_ept_dump(void);
 
-void update_guest_eip(void);
+void vmx_update_guest_eip(void);
+void vmx_dr_access(unsigned long exit_qualification,struct cpu_user_regs 
*regs);
+void vmx_do_extint(struct cpu_user_regs *regs);
 
 int alloc_p2m_hap_data(struct p2m_domain *p2m);
 void free_p2m_hap_data(struct p2m_domain *p2m);
diff --git a/xen/include/asm-x86/processor.h b/xen/include/asm-x86/processor.h
index 5cdacc7..096cdc9 100644
--- a/xen/include/asm-x86/processor.h
+++ b/xen/include/asm-x86/processor.h
@@ -566,6 +566,8 @@ void microcode_set_module(unsigned int);
 int microcode_update(XEN_GUEST_HANDLE_PARAM(const_void), unsigned long len);
 int microcode_resume_cpu(int cpu);
 
+void pv_cpuid(struct cpu_user_regs *regs);
+unsigned long emulate_forced_invalid_op(struct cpu_user_regs *regs);
 #endif /* !__ASSEMBLY__ */
 
 #endif /* __ASM_X86_PROCESSOR_H */
-- 
1.7.2.3


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.