[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH] Optimise restore memory allocation



Try to allocate larger order pages.
With some test memory program stressing TLB (many small random
memory accesses) you can get 15% performance improves.
On the first memory iteration the sender is currently sending
memory in 4mb aligned chunks which allows the receiver to
allocate most pages as 2mb superpages instead of single 4kb pages.

Signed-off-by: Frediano Ziglio <frediano.ziglio@xxxxxxxxx>
---
 tools/libs/guest/xg_sr_restore.c | 39 ++++++++++++++++++++++++++++----
 1 file changed, 35 insertions(+), 4 deletions(-)

diff --git a/tools/libs/guest/xg_sr_restore.c b/tools/libs/guest/xg_sr_restore.c
index 06231ca826..8dcb1b19c5 100644
--- a/tools/libs/guest/xg_sr_restore.c
+++ b/tools/libs/guest/xg_sr_restore.c
@@ -129,6 +129,8 @@ static int pfn_set_populated(struct xc_sr_context *ctx, 
xen_pfn_t pfn)
     return 0;
 }
 
+#define IS_POWER_OF_2(n) (((n) & ((n) - 1)) == 0)
+
 /*
  * Given a set of pfns, obtain memory from Xen to fill the physmap for the
  * unpopulated subset.  If types is NULL, no page type checking is performed
@@ -141,6 +143,7 @@ int populate_pfns(struct xc_sr_context *ctx, unsigned int 
count,
     xen_pfn_t *mfns = malloc(count * sizeof(*mfns)),
         *pfns = malloc(count * sizeof(*pfns));
     unsigned int i, nr_pfns = 0;
+    bool contiguous = true;
     int rc = -1;
 
     if ( !mfns || !pfns )
@@ -159,18 +162,46 @@ int populate_pfns(struct xc_sr_context *ctx, unsigned int 
count,
             if ( rc )
                 goto err;
             pfns[nr_pfns] = mfns[nr_pfns] = original_pfns[i];
+            if ( pfns[nr_pfns] != pfns[0] + nr_pfns )
+                contiguous = false;
             ++nr_pfns;
         }
     }
 
     if ( nr_pfns )
     {
-        rc = xc_domain_populate_physmap_exact(
-            xch, ctx->domid, nr_pfns, 0, 0, mfns);
+        /* try optimizing using larger order */
+        rc = -1;
+        /*
+         * The "nr_pfns <= (1 << 18)" check is mainly for paranoia, it should
+         * never happen, the sender would have to send a really large packet.
+         */
+        if ( contiguous && nr_pfns <= (1 << 18) &&
+             IS_POWER_OF_2(nr_pfns) && (pfns[0] & (nr_pfns - 1)) == 0 )
+        {
+            const unsigned int extent_order = __builtin_ffs(nr_pfns) - 1;
+
+            rc = xc_domain_populate_physmap_exact(
+                xch, ctx->domid, 1, extent_order, 0, mfns);
+            if ( rc )
+                mfns[0] = pfns[0];
+            else
+            {
+                for ( i = 1; i < nr_pfns; ++i )
+                    mfns[i] = mfns[0] + i;
+            }
+        }
+
+        /* if using larger order fails fall back to single pages */
         if ( rc )
         {
-            PERROR("Failed to populate physmap");
-            goto err;
+            rc = xc_domain_populate_physmap_exact(
+                xch, ctx->domid, nr_pfns, 0, 0, mfns);
+            if ( rc )
+            {
+                PERROR("Failed to populate physmap");
+                goto err;
+            }
         }
 
         for ( i = 0; i < nr_pfns; ++i )
-- 
2.43.0




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.