[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v4 07/10] nEPT: Use minimal permission for nested p2m.



From: Zhang Xiantao <xiantao.zhang@xxxxxxxxx>

Emulate permission check for the nested p2m. Current solution is to
use minimal permission, and once meet permission violation in L0, then
determin whether it is caused by guest EPT or host EPT

Signed-off-by: Zhang Xiantao <xiantao.zhang@xxxxxxxxx>
---
 xen/arch/x86/hvm/svm/nestedsvm.c        |    2 +-
 xen/arch/x86/hvm/vmx/vvmx.c             |    4 +-
 xen/arch/x86/mm/hap/nested_ept.c        |    5 ++-
 xen/arch/x86/mm/hap/nested_hap.c        |   39 +++++++++++++++++++++++-------
 xen/include/asm-x86/hvm/hvm.h           |    2 +-
 xen/include/asm-x86/hvm/svm/nestedsvm.h |    2 +-
 xen/include/asm-x86/hvm/vmx/vvmx.h      |    6 ++--
 7 files changed, 41 insertions(+), 19 deletions(-)

diff --git a/xen/arch/x86/hvm/svm/nestedsvm.c b/xen/arch/x86/hvm/svm/nestedsvm.c
index c1c6fa7..b8a93f4 100644
--- a/xen/arch/x86/hvm/svm/nestedsvm.c
+++ b/xen/arch/x86/hvm/svm/nestedsvm.c
@@ -1177,7 +1177,7 @@ nsvm_vmcb_hap_enabled(struct vcpu *v)
  */
 int
 nsvm_hap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa,
-                      unsigned int *page_order,
+                      unsigned int *page_order, uint8_t *p2m_acc,
                       bool_t access_r, bool_t access_w, bool_t access_x)
 {
     uint32_t pfec;
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index 7b48436..f1f6af2 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -1528,7 +1528,7 @@ int nvmx_msr_write_intercept(unsigned int msr, u64 
msr_content)
  */
 int
 nvmx_hap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa,
-                      unsigned int *page_order,
+                      unsigned int *page_order, uint8_t *p2m_acc,
                       bool_t access_r, bool_t access_w, bool_t access_x)
 {
     int rc;
@@ -1538,7 +1538,7 @@ nvmx_hap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, 
paddr_t *L1_gpa,
     uint32_t rwx_rights = (access_x << 2) | (access_w << 1) | access_r;
     struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
 
-    rc = nept_translate_l2ga(v, L2_gpa, page_order, rwx_rights, &gfn,
+    rc = nept_translate_l2ga(v, L2_gpa, page_order, rwx_rights, &gfn, p2m_acc,
                                 &exit_qual, &exit_reason);
     switch ( rc )
     {
diff --git a/xen/arch/x86/mm/hap/nested_ept.c b/xen/arch/x86/mm/hap/nested_ept.c
index 1463d81..4393065 100644
--- a/xen/arch/x86/mm/hap/nested_ept.c
+++ b/xen/arch/x86/mm/hap/nested_ept.c
@@ -224,8 +224,8 @@ out:
 
 int nept_translate_l2ga(struct vcpu *v, paddr_t l2ga,
                         unsigned int *page_order, uint32_t rwx_acc,
-                        unsigned long *l1gfn, uint64_t *exit_qual,
-                        uint32_t *exit_reason)
+                        unsigned long *l1gfn, uint8_t *p2m_acc,
+                        uint64_t *exit_qual, uint32_t *exit_reason)
 {
     uint32_t rc, rwx_bits = 0;
     ept_walk_t gw;
@@ -262,6 +262,7 @@ int nept_translate_l2ga(struct vcpu *v, paddr_t l2ga,
         if ( nept_permission_check(rwx_acc, rwx_bits) )
         {
             *l1gfn = gw.lxe[0].mfn;
+            *p2m_acc = (uint8_t)rwx_bits;
             break;
         }
         rc = EPT_TRANSLATE_VIOLATION;
diff --git a/xen/arch/x86/mm/hap/nested_hap.c b/xen/arch/x86/mm/hap/nested_hap.c
index 6d1264b..7722a2a 100644
--- a/xen/arch/x86/mm/hap/nested_hap.c
+++ b/xen/arch/x86/mm/hap/nested_hap.c
@@ -142,12 +142,12 @@ nestedhap_fix_p2m(struct vcpu *v, struct p2m_domain *p2m,
  */
 static int
 nestedhap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa,
-                      unsigned int *page_order,
+                      unsigned int *page_order, uint8_t *p2m_acc,
                       bool_t access_r, bool_t access_w, bool_t access_x)
 {
     ASSERT(hvm_funcs.nhvm_hap_walk_L1_p2m);
 
-    return hvm_funcs.nhvm_hap_walk_L1_p2m(v, L2_gpa, L1_gpa, page_order,
+    return hvm_funcs.nhvm_hap_walk_L1_p2m(v, L2_gpa, L1_gpa, page_order, 
p2m_acc,
         access_r, access_w, access_x);
 }
 
@@ -158,16 +158,15 @@ nestedhap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, 
paddr_t *L1_gpa,
  */
 static int
 nestedhap_walk_L0_p2m(struct p2m_domain *p2m, paddr_t L1_gpa, paddr_t *L0_gpa,
-                      p2m_type_t *p2mt,
+                      p2m_type_t *p2mt, p2m_access_t *p2ma,
                       unsigned int *page_order,
                       bool_t access_r, bool_t access_w, bool_t access_x)
 {
     mfn_t mfn;
-    p2m_access_t p2ma;
     int rc;
 
     /* walk L0 P2M table */
-    mfn = get_gfn_type_access(p2m, L1_gpa >> PAGE_SHIFT, p2mt, &p2ma, 
+    mfn = get_gfn_type_access(p2m, L1_gpa >> PAGE_SHIFT, p2mt, p2ma, 
                               0, page_order);
 
     rc = NESTEDHVM_PAGEFAULT_MMIO;
@@ -206,12 +205,14 @@ nestedhvm_hap_nested_page_fault(struct vcpu *v, paddr_t 
*L2_gpa,
     struct p2m_domain *p2m, *nested_p2m;
     unsigned int page_order_21, page_order_10, page_order_20;
     p2m_type_t p2mt_10;
+    p2m_access_t p2ma_10 = p2m_access_rwx;
+    uint8_t p2ma_21 = p2m_access_rwx;
 
     p2m = p2m_get_hostp2m(d); /* L0 p2m */
     nested_p2m = p2m_get_nestedp2m(v, nhvm_vcpu_p2m_base(v));
 
     /* walk the L1 P2M table */
-    rv = nestedhap_walk_L1_p2m(v, *L2_gpa, &L1_gpa, &page_order_21,
+    rv = nestedhap_walk_L1_p2m(v, *L2_gpa, &L1_gpa, &page_order_21, &p2ma_21,
         access_r, access_w, access_x);
 
     /* let caller to handle these two cases */
@@ -229,7 +230,7 @@ nestedhvm_hap_nested_page_fault(struct vcpu *v, paddr_t 
*L2_gpa,
 
     /* ==> we have to walk L0 P2M */
     rv = nestedhap_walk_L0_p2m(p2m, L1_gpa, &L0_gpa,
-        &p2mt_10, &page_order_10,
+        &p2mt_10, &p2ma_10, &page_order_10,
         access_r, access_w, access_x);
 
     /* let upper level caller to handle these two cases */
@@ -250,10 +251,30 @@ nestedhvm_hap_nested_page_fault(struct vcpu *v, paddr_t 
*L2_gpa,
 
     page_order_20 = min(page_order_21, page_order_10);
 
+    ASSERT(p2ma_10 <= p2m_access_n2rwx);
+    /*NOTE: if assert fails, needs to handle new access type here */
+
+    switch ( p2ma_10 )
+    {
+    case p2m_access_n ... p2m_access_rwx:
+        break;
+    case p2m_access_rx2rw:
+        p2ma_10 = p2m_access_rx;
+        break;
+    case p2m_access_n2rwx:
+        p2ma_10 = p2m_access_n;
+        break;
+    default:
+        p2ma_10 = p2m_access_n;
+        /* For safety, remove all permissions. */
+        gdprintk(XENLOG_ERR, "Unhandled p2m access type:%d\n", p2ma_10);
+    }
+    /* Use minimal permission for nested p2m. */
+    p2ma_10 &= (p2m_access_t)p2ma_21;
+
     /* fix p2m_get_pagetable(nested_p2m) */
     nestedhap_fix_p2m(v, nested_p2m, *L2_gpa, L0_gpa, page_order_20,
-        p2mt_10,
-        p2m_access_rwx /* FIXME: Should use minimum permission. */);
+        p2mt_10, p2ma_10);
 
     return NESTEDHVM_PAGEFAULT_DONE;
 }
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 80f07e9..889e3c9 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -186,7 +186,7 @@ struct hvm_function_table {
 
     /*Walk nested p2m  */
     int (*nhvm_hap_walk_L1_p2m)(struct vcpu *v, paddr_t L2_gpa, paddr_t 
*L1_gpa,
-                      unsigned int *page_order,
+                      unsigned int *page_order, uint8_t *p2m_acc,
                       bool_t access_r, bool_t access_w, bool_t access_x);
 };
 
diff --git a/xen/include/asm-x86/hvm/svm/nestedsvm.h 
b/xen/include/asm-x86/hvm/svm/nestedsvm.h
index 0c90f30..748cc04 100644
--- a/xen/include/asm-x86/hvm/svm/nestedsvm.h
+++ b/xen/include/asm-x86/hvm/svm/nestedsvm.h
@@ -134,7 +134,7 @@ void svm_vmexit_do_clgi(struct cpu_user_regs *regs, struct 
vcpu *v);
 void svm_vmexit_do_stgi(struct cpu_user_regs *regs, struct vcpu *v);
 bool_t nestedsvm_gif_isset(struct vcpu *v);
 int nsvm_hap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa,
-                      unsigned int *page_order,
+                      unsigned int *page_order, uint8_t *p2m_acc,
                       bool_t access_r, bool_t access_w, bool_t access_x);
 
 #define NSVM_INTR_NOTHANDLED     3
diff --git a/xen/include/asm-x86/hvm/vmx/vvmx.h 
b/xen/include/asm-x86/hvm/vmx/vvmx.h
index e3d1a22..1da0e77 100644
--- a/xen/include/asm-x86/hvm/vmx/vvmx.h
+++ b/xen/include/asm-x86/hvm/vmx/vvmx.h
@@ -123,7 +123,7 @@ int nvmx_handle_vmxoff(struct cpu_user_regs *regs);
 
 int
 nvmx_hap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa,
-                      unsigned int *page_order,
+                      unsigned int *page_order, uint8_t *p2m_acc,
                       bool_t access_r, bool_t access_w, bool_t access_x);
 /*
  * Virtual VMCS layout
@@ -206,7 +206,7 @@ int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs,
 
 int nept_translate_l2ga(struct vcpu *v, paddr_t l2ga, 
                         unsigned int *page_order, uint32_t rwx_acc,
-                        unsigned long *l1gfn, uint64_t *exit_qual,
-                        uint32_t *exit_reason);
+                        unsigned long *l1gfn, uint8_t *p2m_acc,
+                        uint64_t *exit_qual, uint32_t *exit_reason);
 #endif /* __ASM_X86_HVM_VVMX_H__ */
 
-- 
1.7.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.