[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 6/7] x86/hvm: Add SMAP support to HVM guest



Intel new CPU supports SMAP (Supervisor Mode Access Prevention).
SMAP prevents supervisor-mode accesses to any linear address with
a valid translation for which the U/S flag (bit 2) is 1 in every
paging-structure entry controlling the translation for the linear
address.

Signed-off-by: Feng Wu <feng.wu@xxxxxxxxx>
---
 xen/arch/x86/hvm/hvm.c        |  3 +++
 xen/arch/x86/hvm/vmx/vmx.c    |  3 +++
 xen/arch/x86/mm/guest_walk.c  | 37 +++++++++++++++++++++++++++----------
 xen/include/asm-x86/hvm/hvm.h | 12 ++++++++++++
 4 files changed, 45 insertions(+), 10 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index b0da8e7..b52476d 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3036,6 +3036,9 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, 
unsigned int *ebx,
         if ( (count == 0) && !cpu_has_smep )
             *ebx &= ~cpufeat_mask(X86_FEATURE_SMEP);
 
+        if ( (count == 0) && !cpu_has_smap )
+            *ebx &= ~cpufeat_mask(X86_FEATURE_SMAP);
+
         /* Don't expose MPX to hvm when VMX support is not available */
         if ( (count == 0) &&
              (!(vmx_vmexit_control & VM_EXIT_CLEAR_BNDCFGS) ||
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 79dd272..b0f983e 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2536,12 +2536,15 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
     unsigned long exit_qualification, exit_reason, idtv_info, intr_info = 0;
     unsigned int vector = 0;
     struct vcpu *v = current;
+    unsigned long sel;
 
     __vmread(GUEST_RIP,    &regs->rip);
     __vmread(GUEST_RSP,    &regs->rsp);
     __vmread(GUEST_RFLAGS, &regs->rflags);
+    __vmread(GUEST_CS_SELECTOR, &sel);
 
     hvm_invalidate_regs_fields(regs);
+    regs->cs = sel;
 
     if ( paging_mode_hap(v->domain) )
     {
diff --git a/xen/arch/x86/mm/guest_walk.c b/xen/arch/x86/mm/guest_walk.c
index 70460b6..2c93bd3 100644
--- a/xen/arch/x86/mm/guest_walk.c
+++ b/xen/arch/x86/mm/guest_walk.c
@@ -144,7 +144,7 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m,
     guest_l4e_t *l4p;
 #endif
     uint32_t gflags, mflags, iflags, rc = 0;
-    int smep;
+    bool_t smep = 0, smap = 0;
     bool_t pse1G = 0, pse2M = 0;
     p2m_query_t qt = P2M_ALLOC | P2M_UNSHARE;
 
@@ -159,13 +159,30 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m,
     mflags = mandatory_flags(v, pfec);
     iflags = (_PAGE_NX_BIT | _PAGE_INVALID_BITS);
 
-    /* SMEP: kernel-mode instruction fetches from user-mode mappings
-     * should fault.  Unlike NX or invalid bits, we're looking for _all_
-     * entries in the walk to have _PAGE_USER set, so we need to do the
-     * whole walk as if it were a user-mode one and then invert the answer. */
-    smep = (is_hvm_vcpu(v) && hvm_smep_enabled(v) 
-            && (pfec & PFEC_insn_fetch) && !(pfec & PFEC_user_mode) );
-    if ( smep )
+    if ( is_hvm_vcpu(v) && !(pfec & PFEC_user_mode) )
+    {
+        struct cpu_user_regs *regs = guest_cpu_user_regs();
+
+        /* SMEP: kernel-mode instruction fetches from user-mode mappings
+         * should fault.  Unlike NX or invalid bits, we're looking for _all_
+         * entries in the walk to have _PAGE_USER set, so we need to do the
+         * whole walk as if it were a user-mode one and then invert the 
answer. */
+        smep =  hvm_smep_enabled(v) && (pfec & PFEC_insn_fetch);
+
+        /*
+         * SMAP: kernel-mode data accesses from user-mode mappings should fault
+         * A fault is considered as a SMAP violation if the following
+         * conditions come true:
+         *   - X86_CR4_SMAP is set in CR4
+         *   - A user page is accessed
+         *   - CPL = 3 or X86_EFLAGS_AC is clear
+         *   - Page fault in kernel mode
+         */
+        smap = hvm_smap_enabled(v) &&
+               !(!ring_3(regs) && (regs->eflags & X86_EFLAGS_AC));
+    }
+
+    if ( smep || smap )
         mflags |= _PAGE_USER;
 
 #if GUEST_PAGING_LEVELS >= 3 /* PAE or 64... */
@@ -338,8 +355,8 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m,
 #if GUEST_PAGING_LEVELS >= 4 /* 64-bit only... */
 set_ad:
 #endif
-    /* Now re-invert the user-mode requirement for SMEP. */
-    if ( smep ) 
+    /* Now re-invert the user-mode requirement for SMEP and SMAP */
+    if ( smep || smap )
         rc ^= _PAGE_USER;
 
     /* Go back and set accessed and dirty bits only if the walk was a
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 74a09ef..8dbb3f0 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -257,6 +257,8 @@ int hvm_girq_dest_2_vcpu_id(struct domain *d, uint8_t dest, 
uint8_t dest_mode);
     (hvm_paging_enabled(v) && ((v)->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PAE))
 #define hvm_smep_enabled(v) \
     (hvm_paging_enabled(v) && ((v)->arch.hvm_vcpu.guest_cr[4] & X86_CR4_SMEP))
+#define hvm_smap_enabled(v) \
+    (hvm_paging_enabled(v) && ((v)->arch.hvm_vcpu.guest_cr[4] & X86_CR4_SMAP))
 #define hvm_nx_enabled(v) \
     (!!((v)->arch.hvm_vcpu.guest_efer & EFER_NX))
 
@@ -360,6 +362,15 @@ static inline bool_t hvm_vcpu_has_smep(void)
     return !!(ebx & cpufeat_mask(X86_FEATURE_SMEP));
 }
 
+static inline bool_t hvm_vcpu_has_smap(void)
+{
+    unsigned int ebx = 0, leaf = 0x7;
+
+    hvm_cpuid(leaf, NULL, &ebx, NULL, NULL);
+
+    return !!(ebx & cpufeat_mask(X86_FEATURE_SMAP));
+}
+
 /* These reserved bits in lower 32 remain 0 after any load of CR0 */
 #define HVM_CR0_GUEST_RESERVED_BITS             \
     (~((unsigned long)                          \
@@ -380,6 +391,7 @@ static inline bool_t hvm_vcpu_has_smep(void)
         X86_CR4_MCE | X86_CR4_PGE | X86_CR4_PCE |       \
         X86_CR4_OSFXSR | X86_CR4_OSXMMEXCPT |           \
         (hvm_vcpu_has_smep() ? X86_CR4_SMEP : 0) |      \
+        (hvm_vcpu_has_smap() ? X86_CR4_SMAP : 0) |      \
         (cpu_has_fsgsbase ? X86_CR4_FSGSBASE : 0) |     \
         ((nestedhvm_enabled((_v)->domain) && cpu_has_vmx)\
                       ? X86_CR4_VMXE : 0)  |             \
-- 
1.8.3.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.