[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH for-4.12 V3] x86/altp2m: fix HVMOP_altp2m_set_domain_state race



HVMOP_altp2m_set_domain_state does not domain_pause(), presumably
on purpose (as it was originally supposed to cater to a in-guest
agent, and a domain pausing itself is not a good idea).

This can lead to domain crashes in the vmx_vmexit_handler() code
that checks if the guest has the ability to switch EPTP without an
exit. That code can __vmread() the host p2m's EPT_POINTER
(before HVMOP_altp2m_set_domain_state "for_each_vcpu()" has a
chance to run altp2m_vcpu_initialise(), but after
d->arch.altp2m_active is set).

This patch reorganizes the code so that d->arch.altp2m_active
is set to true only after all the init work has been done, and
to false before the uninit work begins. This required adding
a new bool parameter altp2m_vcpu_update_p2m(), which relied
on d->arch.altp2m_active being set before it's called.

p2m_get_altp2m() now returns NULL if !altp2m_active(), to
prevent it from returning a "valid" altp2m pointer between
the moment where d->arch.altp2m_active = false and the
point when v->p2midx actually becomes INVALID_ALTP2M.

While at it, I've changed a couple of bool_t's to bool's.

Signed-off-by: Razvan Cojocaru <rcojocaru@xxxxxxxxxxxxxxx>

---
Changes since V2:
 - Removed leftover asm-x86/altp2m.h changes.
 - nstate = !!a.u.domain_state.state; becomes
   nstate = a.u.domain_state.state;
 - Removed the if() and else in do_altp2m_op() as
   recommended by Jan.
 - Using true explicitly instead of altp2m_active(d) for
   altp2m_vcpu_update_p2m() in p2m_switch_vcpu_altp2m_by_id()
   and p2m_switch_domain_altp2m_by_id().
 - Updated patch description.
 - Modified p2m_get_altp2m() to return NULL if !altp2m_active().
---
 xen/arch/x86/hvm/hvm.c        | 16 +++++++++++++---
 xen/arch/x86/hvm/vmx/vmx.c    |  4 ++--
 xen/arch/x86/mm/altp2m.c      |  4 ++--
 xen/arch/x86/mm/p2m.c         |  4 ++--
 xen/include/asm-x86/domain.h  |  2 +-
 xen/include/asm-x86/hvm/hvm.h |  6 +++---
 xen/include/asm-x86/p2m.h     |  8 +++++++-
 7 files changed, 30 insertions(+), 14 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 21944e9..50d896d 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4525,7 +4525,7 @@ static int do_altp2m_op(
     case HVMOP_altp2m_set_domain_state:
     {
         struct vcpu *v;
-        bool_t ostate;
+        bool ostate, nstate;
 
         if ( nestedhvm_enabled(d) )
         {
@@ -4534,12 +4534,15 @@ static int do_altp2m_op(
         }
 
         ostate = d->arch.altp2m_active;
-        d->arch.altp2m_active = !!a.u.domain_state.state;
+        nstate = a.u.domain_state.state;
 
         /* If the alternate p2m state has changed, handle appropriately */
-        if ( d->arch.altp2m_active != ostate &&
+        if ( nstate != ostate &&
              (ostate || !(rc = p2m_init_altp2m_by_id(d, 0))) )
         {
+            /* First mark altp2m as disabled, then altp2m_vcpu_destroy(). */
+            d->arch.altp2m_active = false;
+
             for_each_vcpu( d, v )
             {
                 if ( !ostate )
@@ -4550,7 +4553,14 @@ static int do_altp2m_op(
 
             if ( ostate )
                 p2m_flush_altp2m(d);
+
+            /*
+             * Wait until altp2m_vcpu_initialise() is done before marking
+             * altp2m as being enabled for the domain.
+             */
+            d->arch.altp2m_active = nstate;
         }
+
         break;
     }
 
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 64af8bf..e542568 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2150,13 +2150,13 @@ static bool_t vmx_is_singlestep_supported(void)
     return !!cpu_has_monitor_trap_flag;
 }
 
-static void vmx_vcpu_update_eptp(struct vcpu *v)
+static void vmx_vcpu_update_eptp(struct vcpu *v, bool altp2m_enabled)
 {
     struct domain *d = v->domain;
     struct p2m_domain *p2m = NULL;
     struct ept_data *ept;
 
-    if ( altp2m_active(d) )
+    if ( altp2m_enabled )
         p2m = p2m_get_altp2m(v);
     if ( !p2m )
         p2m = p2m_get_hostp2m(d);
diff --git a/xen/arch/x86/mm/altp2m.c b/xen/arch/x86/mm/altp2m.c
index 930bdc2..c51303b 100644
--- a/xen/arch/x86/mm/altp2m.c
+++ b/xen/arch/x86/mm/altp2m.c
@@ -39,7 +39,7 @@ altp2m_vcpu_initialise(struct vcpu *v)
     vcpu_altp2m(v).p2midx = 0;
     atomic_inc(&p2m_get_altp2m(v)->active_vcpus);
 
-    altp2m_vcpu_update_p2m(v);
+    altp2m_vcpu_update_p2m(v, true);
 
     if ( v != current )
         vcpu_unpause(v);
@@ -58,7 +58,7 @@ altp2m_vcpu_destroy(struct vcpu *v)
 
     altp2m_vcpu_reset(v);
 
-    altp2m_vcpu_update_p2m(v);
+    altp2m_vcpu_update_p2m(v, false);
     altp2m_vcpu_update_vmfunc_ve(v);
 
     if ( v != current )
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index d14ce57..6f991f8 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -2332,7 +2332,7 @@ bool_t p2m_switch_vcpu_altp2m_by_id(struct vcpu *v, 
unsigned int idx)
             atomic_dec(&p2m_get_altp2m(v)->active_vcpus);
             vcpu_altp2m(v).p2midx = idx;
             atomic_inc(&p2m_get_altp2m(v)->active_vcpus);
-            altp2m_vcpu_update_p2m(v);
+            altp2m_vcpu_update_p2m(v, true);
         }
         rc = 1;
     }
@@ -2573,7 +2573,7 @@ int p2m_switch_domain_altp2m_by_id(struct domain *d, 
unsigned int idx)
                 atomic_dec(&p2m_get_altp2m(v)->active_vcpus);
                 vcpu_altp2m(v).p2midx = idx;
                 atomic_inc(&p2m_get_altp2m(v)->active_vcpus);
-                altp2m_vcpu_update_p2m(v);
+                altp2m_vcpu_update_p2m(v, true);
             }
 
         rc = 0;
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index 277f99f..a4e8f5a 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -337,7 +337,7 @@ struct arch_domain
     mm_lock_t nested_p2m_lock;
 
     /* altp2m: allow multiple copies of host p2m */
-    bool_t altp2m_active;
+    bool altp2m_active;
     struct p2m_domain *altp2m_p2m[MAX_ALTP2M];
     mm_lock_t altp2m_list_lock;
     uint64_t *altp2m_eptp;
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 0a10b51..de0969b 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -214,7 +214,7 @@ struct hvm_function_table {
     bool_t (*is_singlestep_supported)(void);
 
     /* Alternate p2m */
-    void (*altp2m_vcpu_update_p2m)(struct vcpu *v);
+    void (*altp2m_vcpu_update_p2m)(struct vcpu *v, bool altp2m_enabled);
     void (*altp2m_vcpu_update_vmfunc_ve)(struct vcpu *v);
     bool_t (*altp2m_vcpu_emulate_ve)(struct vcpu *v);
     int (*altp2m_vcpu_emulate_vmfunc)(const struct cpu_user_regs *regs);
@@ -639,10 +639,10 @@ static inline bool hvm_altp2m_supported(void)
 }
 
 /* updates the current hardware p2m */
-static inline void altp2m_vcpu_update_p2m(struct vcpu *v)
+static inline void altp2m_vcpu_update_p2m(struct vcpu *v, bool altp2m_enabled)
 {
     if ( hvm_funcs.altp2m_vcpu_update_p2m )
-        hvm_funcs.altp2m_vcpu_update_p2m(v);
+        hvm_funcs.altp2m_vcpu_update_p2m(v, altp2m_enabled);
 }
 
 /* updates VMCS fields related to VMFUNC and #VE */
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index 2095076..a683d20 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -28,6 +28,7 @@
 
 #include <xen/paging.h>
 #include <xen/mem_access.h>
+#include <asm/altp2m.h>
 #include <asm/mem_sharing.h>
 #include <asm/page.h>    /* for pagetable_t */
 
@@ -847,7 +848,12 @@ void nestedp2m_write_p2m_entry(struct p2m_domain *p2m, 
unsigned long gfn,
 /* get current alternate p2m table */
 static inline struct p2m_domain *p2m_get_altp2m(struct vcpu *v)
 {
-    unsigned int index = vcpu_altp2m(v).p2midx;
+    unsigned int index;
+
+    if ( !altp2m_active(v->domain) )
+        return NULL;
+
+    index = vcpu_altp2m(v).p2midx;
 
     if ( index == INVALID_ALTP2M )
         return NULL;
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.