|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v4 03/15] VMX: implement suppress #VE.
In preparation for selectively enabling #VE in a later patch, set
suppress #VE on all EPTE's.
Suppress #VE should always be the default condition for two reasons:
it is generally not safe to deliver #VE into a guest unless that guest
has been modified to receive it; and even then for most EPT violations only
the hypervisor is able to handle the violation.
Signed-off-by: Ed White <edmund.h.white@xxxxxxxxx>
Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Reviewed-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
Acked-by: Jun Nakajima <jun.nakajima@xxxxxxxxx>
---
xen/arch/x86/mm/p2m-ept.c | 26 +++++++++++++++++++++++++-
1 file changed, 25 insertions(+), 1 deletion(-)
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index a6c9adf..4111795 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -41,7 +41,8 @@
#define is_epte_superpage(ept_entry) ((ept_entry)->sp)
static inline bool_t is_epte_valid(ept_entry_t *e)
{
- return (e->epte != 0 && e->sa_p2mt != p2m_invalid);
+ /* suppress_ve alone is not considered valid, so mask it off */
+ return ((e->epte & ~(1ul << 63)) != 0 && e->sa_p2mt != p2m_invalid);
}
/* returns : 0 for success, -errno otherwise */
@@ -219,6 +220,8 @@ static void ept_p2m_type_to_flags(struct p2m_domain *p2m,
ept_entry_t *entry,
static int ept_set_middle_entry(struct p2m_domain *p2m, ept_entry_t *ept_entry)
{
struct page_info *pg;
+ ept_entry_t *table;
+ unsigned int i;
pg = p2m_alloc_ptp(p2m, 0);
if ( pg == NULL )
@@ -232,6 +235,15 @@ static int ept_set_middle_entry(struct p2m_domain *p2m,
ept_entry_t *ept_entry)
/* Manually set A bit to avoid overhead of MMU having to write it later. */
ept_entry->a = 1;
+ ept_entry->suppress_ve = 1;
+
+ table = __map_domain_page(pg);
+
+ for ( i = 0; i < EPT_PAGETABLE_ENTRIES; i++ )
+ table[i].suppress_ve = 1;
+
+ unmap_domain_page(table);
+
return 1;
}
@@ -281,6 +293,7 @@ static int ept_split_super_page(struct p2m_domain *p2m,
ept_entry_t *ept_entry,
epte->sp = (level > 1);
epte->mfn += i * trunk;
epte->snp = (iommu_enabled && iommu_snoop);
+ epte->suppress_ve = 1;
ept_p2m_type_to_flags(p2m, epte, epte->sa_p2mt, epte->access);
@@ -790,6 +803,8 @@ ept_set_entry(struct p2m_domain *p2m, unsigned long gfn,
mfn_t mfn,
ept_p2m_type_to_flags(p2m, &new_entry, p2mt, p2ma);
}
+ new_entry.suppress_ve = 1;
+
rc = atomic_write_ept_entry(ept_entry, new_entry, target);
if ( unlikely(rc) )
old_entry.epte = 0;
@@ -1111,6 +1126,8 @@ static void ept_flush_pml_buffers(struct p2m_domain *p2m)
int ept_p2m_init(struct p2m_domain *p2m)
{
struct ept_data *ept = &p2m->ept;
+ ept_entry_t *table;
+ unsigned int i;
p2m->set_entry = ept_set_entry;
p2m->get_entry = ept_get_entry;
@@ -1134,6 +1151,13 @@ int ept_p2m_init(struct p2m_domain *p2m)
p2m->flush_hardware_cached_dirty = ept_flush_pml_buffers;
}
+ table = map_domain_page(pagetable_get_pfn(p2m_get_pagetable(p2m)));
+
+ for ( i = 0; i < EPT_PAGETABLE_ENTRIES; i++ )
+ table[i].suppress_ve = 1;
+
+ unmap_domain_page(table);
+
if ( !zalloc_cpumask_var(&ept->synced_mask) )
return -ENOMEM;
--
1.9.1
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |