|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH RFC 08/14] xen: vmx: Added setup spp page structure.
From: Zhang Yi Z <yi.z.zhang@xxxxxxxxxxxxxxx>
The hardware uses the guest-physical address and bits 11:7 of the
address accessed to lookup the SPPT to fetch a write permission bit for
the 128 byte wide sub-page region being accessed within the 4K
guest-physical page. If the sub-page region write permission bit is set,
the write is allowed; otherwise the write is disallowed and results in
an EPT violation.
Guest-physical pages mapped via leaf EPT-paging-structures for which the
accumulated write-access bit and the SPP bits are both clear (0)
generate
EPT violations on memory writes accesses. Guest-physical pages mapped
via EPT-paging-structure for which the accumulated write-access bit is
set (1) allow writes, effectively ignoring the SPP bit on the leaf
EPT-paging structure.
Software will setup the spp page table level4,3,2 as well as EPT page
structure, and fill the level1 via the 32 bit bitmap per a single 4K
page.
Now it could be divided to 32 x 128 sub-pages.
Signed-off-by: Zhang Yi Z <yi.z.zhang@xxxxxxxxxxxxxxx>
---
xen/arch/x86/mm/mem_access.c | 35 +++++++++++++++
xen/arch/x86/mm/p2m-ept.c | 94 +++++++++++++++++++++++++++++++++++++++
xen/include/asm-x86/hvm/vmx/vmx.h | 10 +++++
xen/include/asm-x86/p2m.h | 3 ++
4 files changed, 142 insertions(+)
diff --git a/xen/arch/x86/mm/mem_access.c b/xen/arch/x86/mm/mem_access.c
index a471c74..1b97469 100644
--- a/xen/arch/x86/mm/mem_access.c
+++ b/xen/arch/x86/mm/mem_access.c
@@ -490,6 +490,41 @@ unlock_exit:
return rc;
}
+static u64 format_spp_spte(u32 spp_wp_bitmap)
+{
+ u64 new_spte = 0;
+ int i = 0;
+
+ /*
+ * One 4K page contains 32 sub-pages, in SPP table L4E, old bits
+ * are reserved, so we need to transfer u32 subpage write
+ * protect bitmap to u64 SPP L4E format.
+ */
+ while ( i < 32 ) {
+ if ( spp_wp_bitmap & (1ULL << i) )
+ new_spte |= 1ULL << (i * 2);
+
+ i++;
+ }
+
+ return new_spte;
+}
+
+int p2m_set_spp_page_st(struct domain *d, gfn_t gfn, uint32_t access_map)
+{
+ struct p2m_domain *p2m = p2m_get_hostp2m(d);
+ u64 access = format_spp_spte(access_map);
+ unsigned long gfn_l = gfn_x(gfn);
+ int ret = -1;
+
+ p2m_lock(p2m);
+ if ( p2m->spp_set_entry )
+ ret = p2m->spp_set_entry(p2m, gfn_l, access);
+ p2m_unlock(p2m);
+
+ return ret;
+}
+
/*
* Local variables:
* mode: C
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index c249286..c9dc29c 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -38,6 +38,8 @@
#define is_epte_present(ept_entry) ((ept_entry)->epte & 0x7)
#define is_epte_superpage(ept_entry) ((ept_entry)->sp)
+#define is_sppt_present(spp_entry) ((spp_entry)->spp & 0x1)
+
static inline bool_t is_epte_valid(ept_entry_t *e)
{
/* suppress_ve alone is not considered valid, so mask it off */
@@ -253,6 +255,22 @@ static int ept_set_middle_entry(struct p2m_domain *p2m,
ept_entry_t *ept_entry)
return 1;
}
+static int spp_set_middle_entry(struct p2m_domain *p2m, spp_entry_t *spp_entry)
+{
+ struct page_info *pg;
+
+ pg = p2m_alloc_ptp(p2m, 0);
+ if ( pg == NULL )
+ return 0;
+
+ spp_entry->spp = 0;
+ spp_entry->mfn = page_to_mfn(pg);
+
+ spp_entry->present = 1;
+
+ return 1;
+}
+
/* free ept sub tree behind an entry */
static void ept_free_entry(struct p2m_domain *p2m, ept_entry_t *ept_entry, int
level)
{
@@ -323,6 +341,44 @@ static bool_t ept_split_super_page(struct p2m_domain *p2m,
return rv;
}
+static int spp_next_level(struct p2m_domain *p2m,
+ spp_entry_t **table, unsigned long *gfn_remainder,
+ int next_level)
+{
+ unsigned long mfn;
+ spp_entry_t *spp_entry, e;
+ u32 shift, index;
+
+ shift = next_level * EPT_TABLE_ORDER;
+
+ index = *gfn_remainder >> shift;
+
+ /* index must be falling into the page */
+ ASSERT(index < EPT_PAGETABLE_ENTRIES);
+
+ spp_entry = (*table) + index;
+
+ /* ept_next_level() is called (sometimes) without a lock. Read
+ * the entry once, and act on the "cached" entry after that to
+ * avoid races. */
+ e.spp = read_atomic(&(spp_entry->spp));
+
+ if ( !is_sppt_present(&e) )
+ {
+ if ( !spp_set_middle_entry(p2m, spp_entry) )
+ return GUEST_TABLE_MAP_FAILED;
+ else
+ e.spp = read_atomic(&(spp_entry->spp)); /* Refresh */
+ }
+
+ mfn = e.mfn;
+ unmap_domain_page(*table);
+ *table = map_domain_page(_mfn(mfn));
+ *gfn_remainder &= (1UL << shift) - 1;
+ return GUEST_TABLE_NORMAL_PAGE;
+}
+
+
/* Take the currently mapped table, find the corresponding gfn entry,
* and map the next table, if available. If the entry is empty
* and read_only is set,
@@ -709,6 +765,43 @@ out:
return rc;
}
+static int
+spp_set_entry(struct p2m_domain *p2m, unsigned long gfn, u64 access)
+{
+ struct spp_data *spp = &p2m->spptp;
+ unsigned long gfn_remainder = gfn;
+ spp_entry_t *table;
+ u64 *pspp_bitmap;
+ u64 old_spp_bitmap;
+ unsigned int i;
+ int ret, rc = 0;
+
+ ASSERT(spp);
+ table =
map_domain_page(_mfn(pagetable_get_pfn(p2m_get_spp_pagetable(p2m))));
+
+ for ( i = 3; i > 0; i-- )
+ {
+ ret = spp_next_level(p2m, &table, &gfn_remainder, i);
+ if ( ret != GUEST_TABLE_NORMAL_PAGE )
+ {
+ printk("dazhang1 error oc ret = %x\n", ret);
+ rc = -1;
+ goto out;
+ }
+ }
+
+ pspp_bitmap = (u64 *) (table + gfn_remainder);
+ old_spp_bitmap = read_atomic(pspp_bitmap);
+ if( old_spp_bitmap != access )
+ {
+ write_atomic(pspp_bitmap, access);
+ }
+
+out:
+ unmap_domain_page(table);
+ return rc;
+}
+
/*
* ept_set_entry() computes 'need_modify_vtd_table' for itself,
* by observing whether any gfn->mfn translations are modified.
@@ -1309,6 +1402,7 @@ int ept_p2m_init(struct p2m_domain *p2m)
if ( cpu_has_vmx_ept_spp )
{
p2m->update_ept_spp_wp = ept_spp_update_wp;
+ p2m->spp_set_entry = spp_set_entry;
}
if ( !zalloc_cpumask_var(&ept->invalidate) )
diff --git a/xen/include/asm-x86/hvm/vmx/vmx.h
b/xen/include/asm-x86/hvm/vmx/vmx.h
index 18383b8..655ce80 100644
--- a/xen/include/asm-x86/hvm/vmx/vmx.h
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h
@@ -52,6 +52,16 @@ typedef union {
u64 epte;
} ept_entry_t;
+typedef union {
+ struct {
+ u64 present : 1, /* bit 0 - spp middle table is present */
+ reserved : 11, /* bit 1:11 - reserved */
+ mfn : 40, /* bit 12:51 - Machine physical frame number */
+ reserved2 : 12; /* bit 52:63 - reserved */
+ };
+ u64 spp;
+} spp_entry_t;
+
typedef struct {
/*use lxe[0] to save result */
ept_entry_t lxe[5];
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index adbc1c6..b94ebb2 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -268,6 +268,9 @@ struct p2m_domain {
long (*audit_p2m)(struct p2m_domain *p2m);
int (*update_ept_spp_wp)(struct p2m_domain *p2m,
unsigned long gfn);
+ int (*spp_set_entry)(struct p2m_domain *p2m,
+ unsigned long gfn,
+ u64 access);
/*
* P2M updates may require TLBs to be flushed (invalidated).
--
2.7.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |