[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 1/3] xen: delay page scrubbing to allocation path



Because of page scrubbing, it's very slow to destroy a domain with large memory.
It takes around 10 minutes when destroy a guest of nearly 1 TB of memory.

This patch introduced a "PGC_need_scrub" flag, pages with this flag need to be
scrubbed before use. During domain destory just mark pages as "PGC_need_scrub"
and then add them to free list, so that xl can return quickly.

Note: PCG_need_scrub pages and normal pages are not mergeable

v2:
 * Fix issue: Avoid to scrub all 4Tb when a 4Tb chunk found for a request of a
 single page.
 * Replace more scrub_one_page() place by setting "need_scrub"
 * No more use an extra _scrub[] array

Signed-off-by: Bob Liu <bob.liu@xxxxxxxxxx>
---
 xen/common/page_alloc.c  |   63 ++++++++++++++++++++++++++++++++++------------
 xen/include/asm-arm/mm.h |    5 +++-
 xen/include/asm-x86/mm.h |    5 +++-
 3 files changed, 55 insertions(+), 18 deletions(-)

diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 58677d0..c184f86 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -711,6 +711,12 @@ static struct page_info *alloc_heap_pages(
 
     for ( i = 0; i < (1 << order); i++ )
     {
+        if ( test_bit(_PGC_need_scrub, &pg[i].count_info) )
+        {
+            scrub_one_page(&pg[i]);
+            pg[i].count_info &= ~PGC_need_scrub;
+        }
+
         /* Reference count must continuously be zero for free pages. */
         BUG_ON(pg[i].count_info != PGC_state_free);
         pg[i].count_info = PGC_state_inuse;
@@ -827,7 +833,7 @@ static int reserve_offlined_page(struct page_info *head)
 
 /* Free 2^@order set of pages. */
 static void free_heap_pages(
-    struct page_info *pg, unsigned int order)
+    struct page_info *pg, unsigned int order, bool_t need_scrub)
 {
     unsigned long mask, mfn = page_to_mfn(pg);
     unsigned int i, node = phys_to_nid(page_to_maddr(pg)), tainted = 0;
@@ -876,6 +882,15 @@ static void free_heap_pages(
         midsize_alloc_zone_pages = max(
             midsize_alloc_zone_pages, total_avail_pages / MIDSIZE_ALLOC_FRAC);
 
+    if ( need_scrub )
+    {
+        if ( !tainted )
+        {
+            for ( i = 0; i < (1 << order); i++ )
+                pg[i].count_info |= PGC_need_scrub;
+        }
+    }
+
     /* Merge chunks as far as possible. */
     while ( order < MAX_ORDER )
     {
@@ -889,6 +904,17 @@ static void free_heap_pages(
                  (PFN_ORDER(pg-mask) != order) ||
                  (phys_to_nid(page_to_maddr(pg-mask)) != node) )
                 break;
+            /* If we need scrub, only merge with PGC_need_scrub pages */
+            if ( need_scrub )
+            {
+                if ( !test_bit(_PGC_need_scrub, &(pg-mask)->count_info) )
+                    break;
+            }
+            else
+            {
+                if ( test_bit(_PGC_need_scrub, &(pg-mask)->count_info) )
+                    break;
+            }
             pg -= mask;
             page_list_del(pg, &heap(node, zone, order));
         }
@@ -900,6 +926,16 @@ static void free_heap_pages(
                  (PFN_ORDER(pg+mask) != order) ||
                  (phys_to_nid(page_to_maddr(pg+mask)) != node) )
                 break;
+            if ( need_scrub )
+            {
+                if ( !test_bit(_PGC_need_scrub, &(pg+mask)->count_info) )
+                    break;
+            }
+            else
+            {
+                if ( test_bit(_PGC_need_scrub, &(pg+mask)->count_info) )
+                    break;
+            }
             page_list_del(pg + mask, &heap(node, zone, order));
         }
 
@@ -1132,7 +1168,7 @@ unsigned int online_page(unsigned long mfn, uint32_t 
*status)
     spin_unlock(&heap_lock);
 
     if ( (y & PGC_state) == PGC_state_offlined )
-        free_heap_pages(pg, 0);
+        free_heap_pages(pg, 0, 0);
 
     return ret;
 }
@@ -1201,7 +1237,7 @@ static void init_heap_pages(
             nr_pages -= n;
         }
 
-        free_heap_pages(pg+i, 0);
+        free_heap_pages(pg+i, 0, 0);
     }
 }
 
@@ -1535,7 +1571,7 @@ void free_xenheap_pages(void *v, unsigned int order)
 
     memguard_guard_range(v, 1 << (order + PAGE_SHIFT));
 
-    free_heap_pages(virt_to_page(v), order);
+    free_heap_pages(virt_to_page(v), order, 1);
 }
 
 #else
@@ -1588,11 +1624,10 @@ void free_xenheap_pages(void *v, unsigned int order)
 
     for ( i = 0; i < (1u << order); i++ )
     {
-        scrub_one_page(&pg[i]);
         pg[i].count_info &= ~PGC_xen_heap;
     }
 
-    free_heap_pages(pg, order);
+    free_heap_pages(pg, order, 1);
 }
 
 #endif
@@ -1696,7 +1731,7 @@ struct page_info *alloc_domheap_pages(
 
     if ( (d != NULL) && assign_pages(d, pg, order, memflags) )
     {
-        free_heap_pages(pg, order);
+        free_heap_pages(pg, order, 0);
         return NULL;
     }
     
@@ -1745,24 +1780,20 @@ void free_domheap_pages(struct page_info *pg, unsigned 
int order)
          * domain has died we assume responsibility for erasure.
          */
         if ( unlikely(d->is_dying) )
-            for ( i = 0; i < (1 << order); i++ )
-                scrub_one_page(&pg[i]);
-
-        free_heap_pages(pg, order);
+            free_heap_pages(pg, order, 1);
+        else
+            free_heap_pages(pg, order, 0);
     }
     else if ( unlikely(d == dom_cow) )
     {
         ASSERT(order == 0); 
-        scrub_one_page(pg);
-        free_heap_pages(pg, 0);
+        free_heap_pages(pg, 0, 1);
         drop_dom_ref = 0;
     }
     else
     {
         /* Freeing anonymous domain-heap pages. */
-        for ( i = 0; i < (1 << order); i++ )
-            scrub_one_page(&pg[i]);
-        free_heap_pages(pg, order);
+        free_heap_pages(pg, order, 1);
         drop_dom_ref = 0;
     }
 
diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h
index 2552d34..e8913a8 100644
--- a/xen/include/asm-arm/mm.h
+++ b/xen/include/asm-arm/mm.h
@@ -103,9 +103,12 @@ struct page_info
 #define PGC_state_offlined PG_mask(2, 9)
 #define PGC_state_free    PG_mask(3, 9)
 #define page_state_is(pg, st) (((pg)->count_info&PGC_state) == PGC_state_##st)
+/* Page need to be scrubbed */
+#define _PGC_need_scrub   PG_shift(10)
+#define PGC_need_scrub    PG_mask(1, 10)
 
 /* Count of references to this frame. */
-#define PGC_count_width   PG_shift(9)
+#define PGC_count_width   PG_shift(10)
 #define PGC_count_mask    ((1UL<<PGC_count_width)-1)
 
 extern unsigned long xenheap_mfn_start, xenheap_mfn_end;
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index d253117..35746ab 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -223,9 +223,12 @@ struct page_info
 #define PGC_state_offlined PG_mask(2, 9)
 #define PGC_state_free    PG_mask(3, 9)
 #define page_state_is(pg, st) (((pg)->count_info&PGC_state) == PGC_state_##st)
+/* Page need to be scrubbed */
+#define _PGC_need_scrub   PG_shift(10)
+#define PGC_need_scrub    PG_mask(1, 10)
 
  /* Count of references to this frame. */
-#define PGC_count_width   PG_shift(9)
+#define PGC_count_width   PG_shift(10)
 #define PGC_count_mask    ((1UL<<PGC_count_width)-1)
 
 struct spage_info
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.