[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v5 05/10] nEPT: Try to enable EPT paging for L2 guest.



From: Zhang Xiantao <xiantao.zhang@xxxxxxxxx>

Once found EPT is enabled by L1 VMM, enabled nested EPT support
for L2 guest.

Signed-off-by: Zhang Xiantao <xiantao.zhang@xxxxxxxxx>
Acked-by: Tim Deegan <tim@xxxxxxx>
---
 xen/arch/x86/hvm/vmx/vmx.c         |   22 ++++++++++++++--
 xen/arch/x86/hvm/vmx/vvmx.c        |   48 +++++++++++++++++++++++++++--------
 xen/include/asm-x86/hvm/vmx/vvmx.h |    7 ++++-
 3 files changed, 62 insertions(+), 15 deletions(-)

diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index d74aae0..712fcec 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1461,6 +1461,7 @@ static struct hvm_function_table __read_mostly 
vmx_function_table = {
     .nhvm_vcpu_guestcr3   = nvmx_vcpu_guestcr3,
     .nhvm_vcpu_p2m_base   = nvmx_vcpu_eptp_base,
     .nhvm_vcpu_asid       = nvmx_vcpu_asid,
+    .nhvm_vmcx_hap_enabled = nvmx_ept_enabled,
     .nhvm_vmcx_guest_intercepts_trap = nvmx_intercepts_exception,
     .nhvm_vcpu_vmexit_trap = nvmx_vmexit_trap,
     .nhvm_intr_blocked    = nvmx_intr_blocked,
@@ -2003,6 +2004,7 @@ static void ept_handle_violation(unsigned long 
qualification, paddr_t gpa)
     unsigned long gla, gfn = gpa >> PAGE_SHIFT;
     mfn_t mfn;
     p2m_type_t p2mt;
+    int ret;
     struct domain *d = current->domain;
 
     if ( tb_init_done )
@@ -2017,18 +2019,32 @@ static void ept_handle_violation(unsigned long 
qualification, paddr_t gpa)
         _d.gpa = gpa;
         _d.qualification = qualification;
         _d.mfn = mfn_x(get_gfn_query_unlocked(d, gfn, &_d.p2mt));
-        
+
         __trace_var(TRC_HVM_NPF, 0, sizeof(_d), &_d);
     }
 
-    if ( hvm_hap_nested_page_fault(gpa,
+    ret = hvm_hap_nested_page_fault(gpa,
                                    qualification & EPT_GLA_VALID       ? 1 : 0,
                                    qualification & EPT_GLA_VALID
                                      ? __vmread(GUEST_LINEAR_ADDRESS) : ~0ull,
                                    qualification & EPT_READ_VIOLATION  ? 1 : 0,
                                    qualification & EPT_WRITE_VIOLATION ? 1 : 0,
-                                   qualification & EPT_EXEC_VIOLATION  ? 1 : 
0) )
+                                   qualification & EPT_EXEC_VIOLATION  ? 1 : 
0);
+    switch ( ret ) {
+    case 0:         // Unhandled L1 EPT violation
+        break;
+    case 1:         // This violation is handled completly
+        /*Current nested EPT maybe flushed by other vcpus, so need
+         * to re-set its shadow EPTP pointer.
+         */
+        if ( nestedhvm_vcpu_in_guestmode(current) &&
+                        nestedhvm_paging_mode_hap(current ) )
+            __vmwrite(EPT_POINTER, get_shadow_eptp(current));
+        return;
+    case -1:        // This vioaltion should be injected to L1 VMM
+        vcpu_nestedhvm(current).nv_vmexit_pending = 1;
         return;
+    }
 
     /* Everything else is an error. */
     mfn = get_gfn_query_unlocked(d, gfn, &p2mt);
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index 1d3090d..3bf23bb 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -41,6 +41,7 @@ int nvmx_vcpu_initialise(struct vcpu *v)
         gdprintk(XENLOG_ERR, "nest: allocation for shadow vmcs failed\n");
        goto out;
     }
+    nvmx->ept.enabled = 0;
     nvmx->vmxon_region_pa = 0;
     nvcpu->nv_vvmcx = NULL;
     nvcpu->nv_vvmcxaddr = VMCX_EADDR;
@@ -96,9 +97,11 @@ uint64_t nvmx_vcpu_guestcr3(struct vcpu *v)
 
 uint64_t nvmx_vcpu_eptp_base(struct vcpu *v)
 {
-    /* TODO */
-    ASSERT(0);
-    return 0;
+    uint64_t eptp_base;
+    struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
+
+    eptp_base = __get_vvmcs(nvcpu->nv_vvmcx, EPT_POINTER);
+    return eptp_base & PAGE_MASK; 
 }
 
 uint32_t nvmx_vcpu_asid(struct vcpu *v)
@@ -108,6 +111,13 @@ uint32_t nvmx_vcpu_asid(struct vcpu *v)
     return 0;
 }
 
+bool_t nvmx_ept_enabled(struct vcpu *v)
+{
+    struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
+
+    return !!(nvmx->ept.enabled);
+}
+
 static const enum x86_segment sreg_to_index[] = {
     [VMX_SREG_ES] = x86_seg_es,
     [VMX_SREG_CS] = x86_seg_cs,
@@ -502,14 +512,16 @@ void nvmx_update_exec_control(struct vcpu *v, u32 
host_cntrl)
 }
 
 void nvmx_update_secondary_exec_control(struct vcpu *v,
-                                            unsigned long value)
+                                            unsigned long host_cntrl)
 {
     u32 shadow_cntrl;
     struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
+    struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
 
     shadow_cntrl = __get_vvmcs(nvcpu->nv_vvmcx, SECONDARY_VM_EXEC_CONTROL);
-    shadow_cntrl |= value;
-    set_shadow_control(v, SECONDARY_VM_EXEC_CONTROL, shadow_cntrl);
+    nvmx->ept.enabled = !!(shadow_cntrl & SECONDARY_EXEC_ENABLE_EPT);
+    shadow_cntrl |= host_cntrl;
+    __vmwrite(SECONDARY_VM_EXEC_CONTROL, shadow_cntrl);
 }
 
 static void nvmx_update_pin_control(struct vcpu *v, unsigned long host_cntrl)
@@ -851,6 +863,17 @@ static void load_shadow_guest_state(struct vcpu *v)
     /* TODO: CR3 target control */
 }
 
+
+uint64_t get_shadow_eptp(struct vcpu *v)
+{
+    uint64_t np2m_base = nvmx_vcpu_eptp_base(v);
+    struct p2m_domain *p2m = p2m_get_nestedp2m(v, np2m_base);
+    struct ept_data *ept = &p2m->ept;
+
+    ept->asr = pagetable_get_pfn(p2m_get_pagetable(p2m));
+    return ept_get_eptp(ept);
+}
+
 static void virtual_vmentry(struct cpu_user_regs *regs)
 {
     struct vcpu *v = current;
@@ -895,7 +918,10 @@ static void virtual_vmentry(struct cpu_user_regs *regs)
     /* updating host cr0 to sync TS bit */
     __vmwrite(HOST_CR0, v->arch.hvm_vmx.host_cr0);
 
-    /* TODO: EPT_POINTER */
+    /* Setup virtual ETP for L2 guest*/
+    if ( nestedhvm_paging_mode_hap(v) )
+        __vmwrite(EPT_POINTER, get_shadow_eptp(v));
+
 }
 
 static void sync_vvmcs_guest_state(struct vcpu *v, struct cpu_user_regs *regs)
@@ -948,8 +974,8 @@ static void sync_vvmcs_ro(struct vcpu *v)
     /* Adjust exit_reason/exit_qualifciation for violation case */
     if ( __get_vvmcs(vvmcs, VM_EXIT_REASON) == EXIT_REASON_EPT_VIOLATION )
     {
-        __set_vvmcs(vvmcs, EXIT_QUALIFICATION, nvmx->ept_exit.exit_qual);
-        __set_vvmcs(vvmcs, VM_EXIT_REASON, nvmx->ept_exit.exit_reason);
+        __set_vvmcs(vvmcs, EXIT_QUALIFICATION, nvmx->ept.exit_qual);
+        __set_vvmcs(vvmcs, VM_EXIT_REASON, nvmx->ept.exit_reason);
     }
 }
 
@@ -1515,8 +1541,8 @@ nvmx_hap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, 
paddr_t *L1_gpa,
     case EPT_TRANSLATE_VIOLATION:
     case EPT_TRANSLATE_MISCONFIG:
         rc = NESTEDHVM_PAGEFAULT_INJECT;
-        nvmx->ept_exit.exit_reason = exit_reason;
-        nvmx->ept_exit.exit_qual = exit_qual;
+        nvmx->ept.exit_reason = exit_reason;
+        nvmx->ept.exit_qual = exit_qual;
         break;
     case EPT_TRANSLATE_RETRY:
         rc = NESTEDHVM_PAGEFAULT_RETRY;
diff --git a/xen/include/asm-x86/hvm/vmx/vvmx.h 
b/xen/include/asm-x86/hvm/vmx/vvmx.h
index 97554bf..09d3fa4 100644
--- a/xen/include/asm-x86/hvm/vmx/vvmx.h
+++ b/xen/include/asm-x86/hvm/vmx/vvmx.h
@@ -33,9 +33,10 @@ struct nestedvmx {
         u32           error_code;
     } intr;
     struct {
+        bool_t   enabled;
         uint32_t exit_reason;
         uint32_t exit_qual;
-    } ept_exit;
+    } ept;
 };
 
 #define vcpu_2_nvmx(v) (vcpu_nestedhvm(v).u.nvmx)
@@ -110,6 +111,8 @@ int nvmx_intercepts_exception(struct vcpu *v,
                               unsigned int trap, int error_code);
 void nvmx_domain_relinquish_resources(struct domain *d);
 
+bool_t nvmx_ept_enabled(struct vcpu *v);
+
 int nvmx_handle_vmxon(struct cpu_user_regs *regs);
 int nvmx_handle_vmxoff(struct cpu_user_regs *regs);
 
@@ -178,6 +181,8 @@ enum vvmcs_encoding_type {
 u64 __get_vvmcs(void *vvmcs, u32 vmcs_encoding);
 void __set_vvmcs(void *vvmcs, u32 vmcs_encoding, u64 val);
 
+uint64_t get_shadow_eptp(struct vcpu *v);
+
 void nvmx_destroy_vmcs(struct vcpu *v);
 int nvmx_handle_vmptrld(struct cpu_user_regs *regs);
 int nvmx_handle_vmptrst(struct cpu_user_regs *regs);
-- 
1.7.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.