[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH RFC 03/14] xen: vmx: Introduce the SPPTP and SPP page table.



From: Zhang Yi Z <yi.z.zhang@xxxxxxxxxxxxxxx>

SPPT has 4-level paging structure that is similar to EPT
except L1E.
The sub-page permission table is referenced via a 64-bit control
field called Sub-Page Permission Table Pointer (SPPTP) which
contains a 4K-aligned physical address, the index and encoding
for this VMCS field is defined 0x2030 at this time.

The format of SPPTP is shown in below figure

---------------------------------------------------------------|
| Bit    | Contents                                            |
:--------------------------------------------------------------|
| 11:0   | Reserved (0)                                        |
| N-1:12 | Physical address of 4KB aligned SPPT L4E Table      |
| 51:N   | Reserved (0)                                        |
| 63:52  | Reserved (0)                                        |
---------------------------------------------------------------|

Note: N is the physical address width supported by the processor.

This patch introduced the Spp paging structures, which root page
will created at p2m_alloc_table. and free at p2m_teardown.
Same as EPT page table, We initialized the SPPT, and write the
SPPT point into VMCS field.

Signed-off-by: Zhang Yi Z <yi.z.zhang@xxxxxxxxxxxxxxx>
---
 xen/arch/x86/hvm/vmx/vmcs.c        |  6 ++++++
 xen/arch/x86/mm/p2m.c              | 12 +++++++++++-
 xen/include/asm-x86/hvm/vmx/vmcs.h | 11 +++++++++++
 xen/include/asm-x86/p2m.h          |  8 +++++++-
 4 files changed, 35 insertions(+), 2 deletions(-)

diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index bee5d74..e2a1f1f 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -1273,6 +1273,12 @@ static int construct_vmcs(struct vcpu *v)
 
         ept->mfn = pagetable_get_pfn(p2m_get_pagetable(p2m));
         __vmwrite(EPT_POINTER, ept->eptp);
+
+        if ( cpu_has_vmx_ept_spp ) {
+            struct spp_data *spp = &p2m->spptp;
+            spp->mfn = pagetable_get_pfn(p2m_get_spp_pagetable(p2m));
+            __vmwrite(SPPT_POINT, spp->sppt_point);
+        }
     }
 
     if ( paging_mode_hap(d) )
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index e8a57d1..3d618e9 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -609,7 +609,7 @@ void p2m_free_ptp(struct p2m_domain *p2m, struct page_info 
*pg)
  */
 int p2m_alloc_table(struct p2m_domain *p2m)
 {
-    struct page_info *p2m_top;
+    struct page_info *p2m_top, *p2m_spp;
     struct domain *d = p2m->domain;
     int rc = 0;
 
@@ -639,8 +639,17 @@ int p2m_alloc_table(struct p2m_domain *p2m)
         return -ENOMEM;
     }
 
+    p2m_spp = p2m_alloc_ptp(p2m, PGT_l4_page_table);
+    if ( p2m_spp == NULL )
+    {
+        p2m_unlock(p2m);
+        return -ENOMEM;
+    }
+
     p2m->phys_table = pagetable_from_mfn(page_to_mfn(p2m_top));
 
+    p2m->spp_phys_table = pagetable_from_mfn(page_to_mfn(p2m_spp));
+
     if ( hap_enabled(d) )
         iommu_share_p2m_table(d);
 
@@ -678,6 +687,7 @@ void p2m_teardown(struct p2m_domain *p2m)
     p2m_lock(p2m);
     ASSERT(atomic_read(&d->shr_pages) == 0);
     p2m->phys_table = pagetable_null();
+    p2m->spp_phys_table = pagetable_null();
 
     while ( (pg = page_list_remove_head(&p2m->pages)) )
         d->arch.paging.free_page(d, pg);
diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h 
b/xen/include/asm-x86/hvm/vmx/vmcs.h
index 139f590..4843bc4 100644
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -56,6 +56,16 @@ struct ept_data {
     cpumask_var_t invalidate;
 };
 
+struct spp_data {
+   union {
+        struct {
+            u32 reserved:12;
+            u64 mfn:52;
+        };
+        u64 sppt_point;
+   };
+};
+
 #define _VMX_DOMAIN_PML_ENABLED    0
 #define VMX_DOMAIN_PML_ENABLED     (1ul << _VMX_DOMAIN_PML_ENABLED)
 struct vmx_domain {
@@ -391,6 +401,7 @@ enum vmcs_field {
     VMWRITE_BITMAP                  = 0x00002028,
     VIRT_EXCEPTION_INFO             = 0x0000202a,
     XSS_EXIT_BITMAP                 = 0x0000202c,
+    SPPT_POINT                      = 0x00002030,
     TSC_MULTIPLIER                  = 0x00002032,
     GUEST_PHYSICAL_ADDRESS          = 0x00002400,
     VMCS_LINK_POINTER               = 0x00002800,
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index 6395e8f..0561643 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -193,6 +193,8 @@ struct p2m_domain {
     /* Shadow translated domain: p2m mapping */
     pagetable_t        phys_table;
 
+    pagetable_t        spp_phys_table;
+
     /* Same as domain_dirty_cpumask but limited to
      * this p2m and those physical cpus whose vcpu's are in
      * guestmode.
@@ -339,6 +341,9 @@ struct p2m_domain {
         struct ept_data ept;
         /* NPT-equivalent structure could be added here. */
     };
+    union {
+        struct spp_data spptp;
+    };
 
      struct {
          spinlock_t lock;
@@ -385,7 +390,8 @@ static inline bool_t p2m_is_altp2m(const struct p2m_domain 
*p2m)
     return p2m->p2m_class == p2m_alternate;
 }
 
-#define p2m_get_pagetable(p2m)  ((p2m)->phys_table)
+#define p2m_get_pagetable(p2m)      ((p2m)->phys_table)
+#define p2m_get_spp_pagetable(p2m)  ((p2m)->spp_phys_table)
 
 /*
  * Ensure any deferred p2m TLB flush has been completed on all VCPUs.
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.