[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v5] altp2m: Allow shared entries to be copied to altp2m views during lazycopy



Move sharing locks above altp2m to avoid locking order violation and crashing
the hypervisor during unsharing operations when altp2m is active.

Applying mem_access settings or remapping gfns in altp2m views will
automatically unshare the page if it was shared previously. Also,
disallow nominating pages for which there are pre-existing altp2m
mem_access settings or remappings present. However, allow altp2m
to populate altp2m views with shared entries during lazycopy as
unsharing will automatically propagate the change to these entries in
altp2m views as well.

Signed-off-by: Tamas K Lengyel <tamas.lengyel@xxxxxxxxxxxx>
---
Cc: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>

v5: Allow only lazycopy to copy shared entries to altp2m views
    Use get_gfn_type_access for unsharing as get_entry doesn't do that
---
 xen/arch/x86/mm/mem_sharing.c | 25 ++++++++++++++++++++++++-
 xen/arch/x86/mm/mm-locks.h    | 30 +++++++++++++++---------------
 xen/arch/x86/mm/p2m.c         | 12 +++++++-----
 3 files changed, 46 insertions(+), 21 deletions(-)

diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
index a522423..3939cd0 100644
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -33,6 +33,7 @@
 #include <asm/page.h>
 #include <asm/string.h>
 #include <asm/p2m.h>
+#include <asm/altp2m.h>
 #include <asm/atomic.h>
 #include <asm/event.h>
 #include <xsm/xsm.h>
@@ -828,20 +829,42 @@ int mem_sharing_nominate_page(struct domain *d,
                               int expected_refcnt,
                               shr_handle_t *phandle)
 {
+    struct p2m_domain *hp2m = p2m_get_hostp2m(d);
     p2m_type_t p2mt;
+    p2m_access_t p2ma;
     mfn_t mfn;
     struct page_info *page = NULL; /* gcc... */
     int ret;
 
     *phandle = 0UL;
 
-    mfn = get_gfn(d, gfn, &p2mt);
+    mfn = get_gfn_type_access(hp2m, gfn, &p2mt, &p2ma, 0, NULL);
 
     /* Check if mfn is valid */
     ret = -EINVAL;
     if ( !mfn_valid(mfn) )
         goto out;
 
+    /* Check if there are mem_access/remapped altp2m entries for this page */
+    if ( altp2m_active(d) )
+    {
+        unsigned int i;
+        struct p2m_domain *ap2m;
+        mfn_t amfn;
+        p2m_access_t ap2ma;
+
+        for ( i = 0; i < MAX_ALTP2M; i++ )
+        {
+            ap2m = d->arch.altp2m_p2m[i];
+            if ( !ap2m )
+                continue;
+
+            amfn = get_gfn_type_access(ap2m, gfn, NULL, &ap2ma, 0, NULL);
+            if ( mfn_valid(amfn) && (mfn_x(amfn) != mfn_x(mfn) || ap2ma != 
p2ma) )
+                goto out;
+        }
+    }
+
     /* Return the handle if the page is already shared */
     if ( p2m_is_shared(p2mt) ) {
         struct page_info *pg = __grab_shared_page(mfn);
diff --git a/xen/arch/x86/mm/mm-locks.h b/xen/arch/x86/mm/mm-locks.h
index 086c8bb..74fdfc1 100644
--- a/xen/arch/x86/mm/mm-locks.h
+++ b/xen/arch/x86/mm/mm-locks.h
@@ -242,6 +242,21 @@ declare_mm_lock(nestedp2m)
 
 declare_mm_rwlock(p2m);
 
+/* Sharing per page lock
+ *
+ * This is an external lock, not represented by an mm_lock_t. The memory
+ * sharing lock uses it to protect addition and removal of (gfn,domain)
+ * tuples to a shared page. We enforce order here against the p2m lock,
+ * which is taken after the page_lock to change the gfn's p2m entry.
+ *
+ * The lock is recursive because during share we lock two pages. */
+
+declare_mm_order_constraint(per_page_sharing)
+#define page_sharing_mm_pre_lock()   
mm_enforce_order_lock_pre_per_page_sharing()
+#define page_sharing_mm_post_lock(l, r) \
+        mm_enforce_order_lock_post_per_page_sharing((l), (r))
+#define page_sharing_mm_unlock(l, r) mm_enforce_order_unlock((l), (r))
+
 /* Alternate P2M list lock (per-domain)
  *
  * A per-domain lock that protects the list of alternate p2m's.
@@ -287,21 +302,6 @@ declare_mm_rwlock(altp2m);
 #define p2m_locked_by_me(p)   mm_write_locked_by_me(&(p)->lock)
 #define gfn_locked_by_me(p,g) p2m_locked_by_me(p)
 
-/* Sharing per page lock
- *
- * This is an external lock, not represented by an mm_lock_t. The memory
- * sharing lock uses it to protect addition and removal of (gfn,domain)
- * tuples to a shared page. We enforce order here against the p2m lock,
- * which is taken after the page_lock to change the gfn's p2m entry.
- *
- * The lock is recursive because during share we lock two pages. */
-
-declare_mm_order_constraint(per_page_sharing)
-#define page_sharing_mm_pre_lock()   
mm_enforce_order_lock_pre_per_page_sharing()
-#define page_sharing_mm_post_lock(l, r) \
-        mm_enforce_order_lock_post_per_page_sharing((l), (r))
-#define page_sharing_mm_unlock(l, r) mm_enforce_order_unlock((l), (r))
-
 /* PoD lock (per-p2m-table)
  * 
  * Protects private PoD data structs: entry and cache
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index ff0cce8..812dbf6 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1786,8 +1786,9 @@ int p2m_set_altp2m_mem_access(struct domain *d, struct 
p2m_domain *hp2m,
     /* Check host p2m if no valid entry in alternate */
     if ( !mfn_valid(mfn) )
     {
-        mfn = hp2m->get_entry(hp2m, gfn_l, &t, &old_a,
-                              P2M_ALLOC | P2M_UNSHARE, &page_order, NULL);
+
+        mfn = get_gfn_type_access(hp2m, gfn_l, &t, &old_a,
+                                  P2M_ALLOC | P2M_UNSHARE, &page_order);
 
         rc = -ESRCH;
         if ( !mfn_valid(mfn) || t != p2m_ram_rw )
@@ -2363,7 +2364,7 @@ bool_t p2m_altp2m_lazy_copy(struct vcpu *v, paddr_t gpa,
         return 0;
 
     mfn = get_gfn_type_access(hp2m, gfn_x(gfn), &p2mt, &p2ma,
-                              P2M_ALLOC | P2M_UNSHARE, &page_order);
+                              P2M_ALLOC, &page_order);
     __put_gfn(hp2m, gfn_x(gfn));
 
     if ( mfn_eq(mfn, INVALID_MFN) )
@@ -2562,8 +2563,8 @@ int p2m_change_altp2m_gfn(struct domain *d, unsigned int 
idx,
     /* Check host p2m if no valid entry in alternate */
     if ( !mfn_valid(mfn) )
     {
-        mfn = hp2m->get_entry(hp2m, gfn_x(old_gfn), &t, &a,
-                              P2M_ALLOC | P2M_UNSHARE, &page_order, NULL);
+        mfn = get_gfn_type_access(hp2m, gfn_x(old_gfn), &t, &a,
+                                  P2M_ALLOC | P2M_UNSHARE, &page_order);
 
         if ( !mfn_valid(mfn) || t != p2m_ram_rw )
             goto out;
@@ -2588,6 +2589,7 @@ int p2m_change_altp2m_gfn(struct domain *d, unsigned int 
idx,
     if ( !mfn_valid(mfn) )
         mfn = hp2m->get_entry(hp2m, gfn_x(new_gfn), &t, &a, 0, NULL, NULL);
 
+    /* Note: currently it is not safe to remap to a shared entry */
     if ( !mfn_valid(mfn) || (t != p2m_ram_rw) )
         goto out;
 
-- 
2.8.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.