[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 08 of 12] x86/mm: use RCU in mem sharing audit list, eliminate global lock completely



 xen/arch/x86/mm/mem_sharing.c     |  94 +++++++++++++++++---------------------
 xen/arch/x86/mm/mm-locks.h        |  18 -------
 xen/include/asm-x86/mem_sharing.h |   1 +
 3 files changed, 43 insertions(+), 70 deletions(-)


Signed-off-by: Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx>

diff -r 98cc0713476f -r 288ac8cfea7f xen/arch/x86/mm/mem_sharing.c
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -32,6 +32,7 @@
 #include <asm/p2m.h>
 #include <asm/mem_event.h>
 #include <asm/atomic.h>
+#include <xen/rcupdate.h>
 
 #include "mm-locks.h"
 
@@ -47,52 +48,52 @@ typedef struct pg_lock_data {
 
 #if MEM_SHARING_AUDIT
 
-static mm_lock_t shr_lock;
-
-#define shr_lock()          _shr_lock()
-#define shr_unlock()        _shr_unlock()
-#define shr_locked_by_me()  _shr_locked_by_me()
-
 static int mem_sharing_audit(void);
 
 #define MEM_SHARING_DEBUG(_f, _a...)                                  \
     debugtrace_printk("mem_sharing_debug: %s(): " _f, __func__, ##_a)
 
 static struct list_head shr_audit_list;
+static spinlock_t shr_audit_lock;
+DEFINE_RCU_READ_LOCK(shr_audit_read_lock);
+
+/* RCU delayed free of audit list entry */
+static void _free_pg_shared_info(struct rcu_head *head)
+{
+    xfree(container_of(head, struct page_sharing_info, rcu_head));
+}
 
 static inline void audit_add_list(struct page_info *page)
 {
     INIT_LIST_HEAD(&page->shared_info->entry);
-    list_add(&page->shared_info->entry, &shr_audit_list);
+    spin_lock(&shr_audit_lock);
+    list_add_rcu(&page->shared_info->entry, &shr_audit_list);
+    spin_unlock(&shr_audit_lock);
 }
 
 static inline void audit_del_list(struct page_info *page)
 {
-    list_del(&page->shared_info->entry);
+    spin_lock(&shr_audit_lock);
+    list_del_rcu(&page->shared_info->entry);
+    spin_unlock(&shr_audit_lock);
+    INIT_RCU_HEAD(&page->shared_info->rcu_head);
+    call_rcu(&page->shared_info->rcu_head, _free_pg_shared_info);
 }
 
-static inline int mem_sharing_page_lock(struct page_info *p, 
-                                        pg_lock_data_t *l)
-{
-    return 1;
-}
-#define mem_sharing_page_unlock(p, l)   ((void)0)
-
-#define get_next_handle()   next_handle++;
 #else
 
-#define shr_lock()          ((void)0)
-#define shr_unlock()        ((void)0)
-/* Only used inside audit code */
-//#define shr_locked_by_me()  ((void)0)
-
 static inline int mem_sharing_audit(void)
 {
     return 0;
 }
 
 #define audit_add_list(p)  ((void)0)
-#define audit_del_list(p)  ((void)0)
+static inline void audit_del_list(struct page_info *page)
+{
+    xfree(page->shared_info);
+}
+
+#endif /* MEM_SHARING_AUDIT */
 
 static inline int mem_sharing_page_lock(struct page_info *pg,
                                         pg_lock_data_t *pld)
@@ -128,7 +129,6 @@ static inline shr_handle_t get_next_hand
     while ( (y = cmpxchg(&next_handle, x, x + 1)) != x );
     return x + 1;
 }
-#endif /* MEM_SHARING_AUDIT */
 
 #define mem_sharing_enabled(d) \
     (is_hvm_domain(d) && (d)->arch.hvm_domain.mem_sharing_enabled)
@@ -215,11 +215,13 @@ static int mem_sharing_audit(void)
     unsigned long count_expected;
     unsigned long count_found = 0;
     struct list_head *ae;
+    DECLARE_PG_LOCK_DATA(pld);
 
-    ASSERT(shr_locked_by_me());
     count_expected = atomic_read(&nr_shared_mfns);
 
-    list_for_each(ae, &shr_audit_list)
+    rcu_read_lock(&shr_audit_read_lock);
+
+    list_for_each_rcu(ae, &shr_audit_list)
     {
         struct page_sharing_info *shared_info;
         unsigned long nr_gfns = 0;
@@ -231,6 +233,15 @@ static int mem_sharing_audit(void)
         pg = shared_info->pg;
         mfn = page_to_mfn(pg);
 
+        /* If we can't lock it, it's definitely not a shared page */
+        if ( !mem_sharing_page_lock(pg, &pld) )
+        {
+           MEM_SHARING_DEBUG("mfn %lx in audit list, but cannot be locked 
(%lx)!\n",
+                              mfn_x(mfn), pg->u.inuse.type_info);
+           errors++;
+           continue;
+        }
+
         /* Check if the MFN has correct type, owner and handle. */ 
         if ( !(pg->u.inuse.type_info & PGT_shared_page) )
         {
@@ -311,8 +322,12 @@ static int mem_sharing_audit(void)
                               (pg->u.inuse.type_info & PGT_count_mask));
             errors++;
         }
+
+        mem_sharing_page_unlock(pg, &pld);
     }
 
+    rcu_read_unlock(&shr_audit_read_lock);
+
     if ( count_found != count_expected )
     {
         MEM_SHARING_DEBUG("Expected %ld shared mfns, found %ld.",
@@ -538,14 +553,10 @@ static int page_make_private(struct doma
     spin_lock(&d->page_alloc_lock);
 
     /* We can only change the type if count is one */
-    /* If we are locking pages individually, then we need to drop
+    /* Because we are locking pages individually, we need to drop
      * the lock here, while the page is typed. We cannot risk the 
      * race of page_unlock and then put_page_type. */
-#if MEM_SHARING_AUDIT
-    expected_type = (PGT_shared_page | PGT_validated | 1);
-#else
     expected_type = (PGT_shared_page | PGT_validated | PGT_locked | 2);
-#endif
     if ( page->u.inuse.type_info != expected_type )
     {
         put_page(page);
@@ -556,12 +567,8 @@ static int page_make_private(struct doma
     /* Drop the final typecount */
     put_page_and_type(page);
 
-#if MEM_SHARING_AUDIT
-    (void)pld;
-#else
     /* Now that we've dropped the type, we can unlock */
     mem_sharing_page_unlock(page, pld);
-#endif
 
     /* Change the owner */
     ASSERT(page_get_owner(page) == dom_cow);
@@ -613,7 +620,6 @@ int mem_sharing_nominate_page(struct dom
 
     *phandle = 0UL;
 
-    shr_lock(); 
     mfn = get_gfn(d, gfn, &p2mt);
 
     /* Check if mfn is valid */
@@ -705,7 +711,6 @@ int mem_sharing_nominate_page(struct dom
 
 out:
     put_gfn(d, gfn);
-    shr_unlock();
     return ret;
 }
 
@@ -721,8 +726,6 @@ int mem_sharing_share_pages(struct domai
     p2m_type_t smfn_type, cmfn_type;
     DECLARE_PG_LOCK_DATA(pld);
 
-    shr_lock();
-
     /* XXX if sd == cd handle potential deadlock by ordering
      * the get_ and put_gfn's */
     smfn = get_gfn(sd, sgfn, &smfn_type);
@@ -808,7 +811,6 @@ int mem_sharing_share_pages(struct domai
 
     /* Clear the rest of the shared state */
     audit_del_list(cpage);
-    xfree(cpage->shared_info);
     cpage->shared_info = NULL;
 
     mem_sharing_page_unlock(secondpg, &pld);
@@ -826,7 +828,6 @@ int mem_sharing_share_pages(struct domai
 err_out:
     put_gfn(cd, cgfn);
     put_gfn(sd, sgfn);
-    shr_unlock();
     return ret;
 }
 
@@ -911,17 +912,12 @@ int mem_sharing_unshare_page(struct doma
     struct list_head *le;
     DECLARE_PG_LOCK_DATA(pld);
    
-    /* This is one of the reasons why we can't enforce ordering
-     * between shr_lock and p2m fine-grained locks in mm-lock. 
-     * Callers may walk in here already holding the lock for this gfn */
-    shr_lock();
     mem_sharing_audit();
     mfn = get_gfn(d, gfn, &p2mt);
     
     /* Has someone already unshared it? */
     if ( !p2m_is_shared(p2mt) ) {
         put_gfn(d, gfn);
-        shr_unlock();
         return 0;
     }
 
@@ -952,7 +948,6 @@ gfn_found:
     {
         /* Clean up shared state */
         audit_del_list(page);
-        xfree(page->shared_info);
         page->shared_info = NULL;
         atomic_dec(&nr_shared_mfns);
     }
@@ -968,7 +963,6 @@ gfn_found:
             test_and_clear_bit(_PGC_allocated, &page->count_info) ) 
             put_page(page);
         put_gfn(d, gfn);
-        shr_unlock();
 
         return 0;
     }
@@ -987,7 +981,6 @@ gfn_found:
         mem_sharing_page_unlock(old_page, &pld);
         put_gfn(d, gfn);
         mem_sharing_notify_helper(d, gfn);
-        shr_unlock();
         return -ENOMEM;
     }
 
@@ -1018,7 +1011,6 @@ private_page_found:
     paging_mark_dirty(d, mfn_x(page_to_mfn(page)));
     /* We do not need to unlock a private page */
     put_gfn(d, gfn);
-    shr_unlock();
     return 0;
 }
 
@@ -1191,9 +1183,7 @@ int mem_sharing_domctl(struct domain *d,
             break;
     }
 
-    shr_lock();
     mem_sharing_audit();
-    shr_unlock();
 
     return rc;
 }
@@ -1202,7 +1192,7 @@ void __init mem_sharing_init(void)
 {
     printk("Initing memory sharing.\n");
 #if MEM_SHARING_AUDIT
-    mm_lock_init(&shr_lock);
+    spin_lock_init(&shr_audit_lock);
     INIT_LIST_HEAD(&shr_audit_list);
 #endif
 }
diff -r 98cc0713476f -r 288ac8cfea7f xen/arch/x86/mm/mm-locks.h
--- a/xen/arch/x86/mm/mm-locks.h
+++ b/xen/arch/x86/mm/mm-locks.h
@@ -143,19 +143,6 @@ static inline void mm_enforce_order_unlo
  *                                                                      *
  ************************************************************************/
 
-#if MEM_SHARING_AUDIT
-/* Page-sharing lock (global) 
- *
- * A single global lock that protects the memory-sharing code's
- * hash tables. */
-
-declare_mm_lock(shr)
-#define _shr_lock()         mm_lock(shr, &shr_lock)
-#define _shr_unlock()       mm_unlock(&shr_lock)
-#define _shr_locked_by_me() mm_locked_by_me(&shr_lock)
-
-#else
-
 /* Sharing per page lock
  *
  * This is an external lock, not represented by an mm_lock_t. The memory
@@ -163,9 +150,6 @@ declare_mm_lock(shr)
  * tuples to a shared page. We enforce order here against the p2m lock,
  * which is taken after the page_lock to change the gfn's p2m entry.
  *
- * Note that in sharing audit mode, we use the global page lock above, 
- * instead.
- *
  * The lock is recursive because during share we lock two pages. */
 
 declare_mm_order_constraint(per_page_sharing)
@@ -174,8 +158,6 @@ declare_mm_order_constraint(per_page_sha
         mm_enforce_order_lock_post_per_page_sharing((l), (r))
 #define page_sharing_mm_unlock(l, r) mm_enforce_order_unlock((l), (r))
 
-#endif /* MEM_SHARING_AUDIT */
-
 /* Nested P2M lock (per-domain)
  *
  * A per-domain lock that protects the mapping from nested-CR3 to 
diff -r 98cc0713476f -r 288ac8cfea7f xen/include/asm-x86/mem_sharing.h
--- a/xen/include/asm-x86/mem_sharing.h
+++ b/xen/include/asm-x86/mem_sharing.h
@@ -35,6 +35,7 @@ struct page_sharing_info
     shr_handle_t handle;    /* Globally unique version / handle. */
 #if MEM_SHARING_AUDIT
     struct list_head entry; /* List of all shared pages (entry). */
+    struct rcu_head rcu_head; /* List of all shared pages (entry). */
 #endif
     struct list_head gfns;  /* List of domains and gfns for this page (head). 
*/
 };

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.