[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v20210713 15/31] tools: restore: preallocate types array



Remove repeated allocation from migration loop. There will never be
more than MAX_BATCH_SIZE pages to process in an incoming batch.
Allocate the space once.

Signed-off-by: Olaf Hering <olaf@xxxxxxxxx>
---
 tools/libs/guest/xg_sr_common.h  |  1 +
 tools/libs/guest/xg_sr_restore.c | 22 +++++++---------------
 2 files changed, 8 insertions(+), 15 deletions(-)

diff --git a/tools/libs/guest/xg_sr_common.h b/tools/libs/guest/xg_sr_common.h
index d32b4c46f3..bd2027ba5d 100644
--- a/tools/libs/guest/xg_sr_common.h
+++ b/tools/libs/guest/xg_sr_common.h
@@ -257,6 +257,7 @@ struct xc_sr_context
             struct xc_sr_restore_ops ops;
             struct restore_callbacks *callbacks;
             xen_pfn_t *pfns;
+            uint32_t *types;
 
             int send_back_fd;
             unsigned long p2m_size;
diff --git a/tools/libs/guest/xg_sr_restore.c b/tools/libs/guest/xg_sr_restore.c
index e812f65f99..bbb55b6844 100644
--- a/tools/libs/guest/xg_sr_restore.c
+++ b/tools/libs/guest/xg_sr_restore.c
@@ -315,7 +315,7 @@ static int handle_page_data(struct xc_sr_context *ctx, 
struct xc_sr_record *rec)
     int rc = -1;
 
     xen_pfn_t pfn;
-    uint32_t *types = NULL, type;
+    uint32_t type;
 
     /*
      * v2 compatibility only exists for x86 streams.  This is a bit of a
@@ -362,14 +362,6 @@ static int handle_page_data(struct xc_sr_context *ctx, 
struct xc_sr_record *rec)
         goto err;
     }
 
-    types = malloc(pages->count * sizeof(*types));
-    if ( !types )
-    {
-        ERROR("Unable to allocate enough memory for %u pfns",
-              pages->count);
-        goto err;
-    }
-
     for ( i = 0; i < pages->count; ++i )
     {
         pfn = pages->pfn[i] & PAGE_DATA_PFN_MASK;
@@ -393,7 +385,7 @@ static int handle_page_data(struct xc_sr_context *ctx, 
struct xc_sr_record *rec)
             pages_of_data++;
 
         ctx->restore.pfns[i] = pfn;
-        types[i] = type;
+        ctx->restore.types[i] = type;
     }
 
     if ( rec->length != (sizeof(*pages) +
@@ -406,11 +398,9 @@ static int handle_page_data(struct xc_sr_context *ctx, 
struct xc_sr_record *rec)
         goto err;
     }
 
-    rc = process_page_data(ctx, pages->count, ctx->restore.pfns, types,
-                           &pages->pfn[pages->count]);
+    rc = process_page_data(ctx, pages->count, ctx->restore.pfns,
+                           ctx->restore.types, &pages->pfn[pages->count]);
  err:
-    free(types);
-
     return rc;
 }
 
@@ -727,7 +717,8 @@ static int setup(struct xc_sr_context *ctx)
     }
 
     ctx->restore.pfns = malloc(MAX_BATCH_SIZE * sizeof(*ctx->restore.pfns));
-    if ( !ctx->restore.pfns )
+    ctx->restore.types = malloc(MAX_BATCH_SIZE * sizeof(*ctx->restore.types));
+    if ( !ctx->restore.pfns  || !ctx->restore.types )
     {
         ERROR("Unable to allocate memory");
         rc = -1;
@@ -764,6 +755,7 @@ static void cleanup(struct xc_sr_context *ctx)
 
     free(ctx->restore.buffered_records);
     free(ctx->restore.populated_pfns);
+    free(ctx->restore.types);
     free(ctx->restore.pfns);
 
     if ( ctx->restore.ops.cleanup(ctx) )



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.