[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v3 13/15] tmem: refator function tmem_ensure_avail_pages()



tmem_ensure_avail_pages() doesn't return a value which is incorrect because
the caller need to confirm whether there is enough memory.

Signed-off-by: Bob Liu <bob.liu@xxxxxxxxxx>
---
 xen/common/tmem.c          |   32 ++++++++++++++++++++------------
 xen/include/xen/tmem_xen.h |    6 ------
 2 files changed, 20 insertions(+), 18 deletions(-)

diff --git a/xen/common/tmem.c b/xen/common/tmem.c
index 6296869..5c96ef4 100644
--- a/xen/common/tmem.c
+++ b/xen/common/tmem.c
@@ -1292,22 +1292,28 @@ static unsigned long tmem_relinquish_npages(unsigned 
long n)
     return avail_pages;
 }
 
-/* Under certain conditions (e.g. if each client is putting pages for exactly
+/*
+ * Under certain conditions (e.g. if each client is putting pages for exactly
  * one object), once locks are held, freeing up memory may
  * result in livelocks and very long "put" times, so we try to ensure there
  * is a minimum amount of memory (1MB) available BEFORE any data structure
- * locks are held */
-static inline void tmem_ensure_avail_pages(void)
+ * locks are held.
+ */
+static inline bool_t tmem_ensure_avail_pages(void)
 {
     int failed_evict = 10;
+    unsigned long free_mem;
 
-    while ( !tmem_free_mb() )
-    {
-        if ( tmem_evict() )
-            continue;
-        else if ( failed_evict-- <= 0 )
-            break;
-    }
+    do {
+        free_mem = (tmem_page_list_pages + total_free_pages())
+                        >> (20 - PAGE_SHIFT);
+        if ( free_mem )
+            return 1;
+        if ( !tmem_evict() )
+            failed_evict--;
+    } while ( failed_evict > 0 );
+
+    return 0;
 }
 
 /************ TMEM CORE OPERATIONS ************************************/
@@ -2351,9 +2357,11 @@ long do_tmem_op(tmem_cli_op_t uops)
                               op.u.creat.uuid[0], op.u.creat.uuid[1]);
         break;
     case TMEM_PUT_PAGE:
-        tmem_ensure_avail_pages();
-        rc = do_tmem_put(pool, oidp, op.u.gen.index, op.u.gen.cmfn,
+        if (tmem_ensure_avail_pages())
+            rc = do_tmem_put(pool, oidp, op.u.gen.index, op.u.gen.cmfn,
                         tmem_cli_buf_null);
+        else
+            rc = -ENOMEM;
         break;
     case TMEM_GET_PAGE:
         rc = do_tmem_get(pool, oidp, op.u.gen.index, op.u.gen.cmfn,
diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h
index 6cd4370..65566f9 100644
--- a/xen/include/xen/tmem_xen.h
+++ b/xen/include/xen/tmem_xen.h
@@ -164,13 +164,7 @@ static inline void __tmem_free_page(struct page_info *pi)
     atomic_dec(&freeable_page_count);
 }
 
-static inline unsigned long tmem_free_mb(void)
-{
-    return (tmem_page_list_pages + total_free_pages()) >> (20 - PAGE_SHIFT);
-}
-
 /*  "Client" (==domain) abstraction */
-
 static inline struct client *tmem_client_from_cli_id(domid_t cli_id)
 {
     struct client *c;
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.