|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH 4/4] tools/libs/guest: allocate various migration arrays just once
From: Edwin Török <edwin.torok@xxxxxxxxxx>
Allocate these array just once at the start of migration,
using the maximum batch size, and free them at the end.
Signed-off-by: Edwin Török <edwin.torok@xxxxxxxxxx>
Signed-off-by: Frediano Ziglio <frediano.ziglio@xxxxxxxxxx>
---
tools/libs/guest/xg_sr_common.h | 13 +++++++
tools/libs/guest/xg_sr_save.c | 66 +++++++++++++--------------------
2 files changed, 39 insertions(+), 40 deletions(-)
diff --git a/tools/libs/guest/xg_sr_common.h b/tools/libs/guest/xg_sr_common.h
index f1573aefcb..77312bae9c 100644
--- a/tools/libs/guest/xg_sr_common.h
+++ b/tools/libs/guest/xg_sr_common.h
@@ -209,6 +209,18 @@ static inline int update_blob(struct xc_sr_blob *blob,
return 0;
}
+struct xc_sr_context_save_buffers
+{
+ xen_pfn_t batch_pfns[MAX_BATCH_SIZE];
+ xen_pfn_t mfns[MAX_BATCH_SIZE];
+ xen_pfn_t types[MAX_BATCH_SIZE];
+ int errors[MAX_BATCH_SIZE];
+ void *guest_data[MAX_BATCH_SIZE];
+ void *local_pages[MAX_BATCH_SIZE];
+ struct iovec iov[MAX_BATCH_SIZE + 2]; /* headers + data */
+ uint64_t rec_pfns[MAX_BATCH_SIZE];
+};
+
struct xc_sr_context
{
xc_interface *xch;
@@ -244,6 +256,7 @@ struct xc_sr_context
unsigned long *deferred_pages;
unsigned long nr_deferred_pages;
xc_hypercall_buffer_t dirty_bitmap_hbuf;
+ struct xc_sr_context_save_buffers *buffers;
} save;
struct /* Restore data. */
diff --git a/tools/libs/guest/xg_sr_save.c b/tools/libs/guest/xg_sr_save.c
index 1700d81905..64014dcdbd 100644
--- a/tools/libs/guest/xg_sr_save.c
+++ b/tools/libs/guest/xg_sr_save.c
@@ -87,16 +87,16 @@ static int write_checkpoint_record(struct xc_sr_context
*ctx)
static int write_batch(struct xc_sr_context *ctx)
{
xc_interface *xch = ctx->xch;
- xen_pfn_t *mfns = NULL, *types = NULL;
+ xen_pfn_t *mfns, *types;
void *guest_mapping = NULL;
- void **guest_data = NULL;
- void **local_pages = NULL;
- int *errors = NULL, rc = -1;
+ void **guest_data;
+ void **local_pages;
+ int *errors, rc = -1;
unsigned int i, p, nr_pages = 0, nr_pages_mapped = 0;
unsigned int nr_pfns = ctx->save.nr_batch_pfns;
void *page, *orig_page;
- uint64_t *rec_pfns = NULL;
- struct iovec *iov = NULL; int iovcnt = 0;
+ uint64_t *rec_pfns;
+ struct iovec *iov; int iovcnt = 0;
struct {
struct xc_sr_rhdr rec;
struct xc_sr_rec_page_data_header page_data;
@@ -106,26 +106,24 @@ static int write_batch(struct xc_sr_context *ctx)
};
assert(nr_pfns != 0);
+ assert(nr_pfns <= MAX_BATCH_SIZE);
+ assert(ctx->save.buffers);
/* Mfns of the batch pfns. */
- mfns = malloc(nr_pfns * sizeof(*mfns));
+ mfns = ctx->save.buffers->mfns;
/* Types of the batch pfns. */
- types = malloc(nr_pfns * sizeof(*types));
+ types = ctx->save.buffers->types;
/* Errors from attempting to map the gfns. */
- errors = malloc(nr_pfns * sizeof(*errors));
+ errors = ctx->save.buffers->errors;
/* Pointers to page data to send. Mapped gfns or local allocations. */
- guest_data = calloc(nr_pfns, sizeof(*guest_data));
+ guest_data = ctx->save.buffers->guest_data;
+ memset(guest_data, 0, sizeof(*guest_data) * nr_pfns);
/* Pointers to locally allocated pages. Need freeing. */
- local_pages = calloc(nr_pfns, sizeof(*local_pages));
+ local_pages = ctx->save.buffers->local_pages;
+ memset(local_pages, 0, sizeof(*local_pages) * nr_pfns);
/* iovec[] for writev(). */
- iov = malloc((nr_pfns + 2) * sizeof(*iov));
-
- if ( !mfns || !types || !errors || !guest_data || !local_pages || !iov )
- {
- ERROR("Unable to allocate arrays for a batch of %u pages",
- nr_pfns);
- goto err;
- }
+ iov = ctx->save.buffers->iov;
+ rec_pfns = ctx->save.buffers->rec_pfns;
for ( i = 0; i < nr_pfns; ++i )
{
@@ -211,14 +209,6 @@ static int write_batch(struct xc_sr_context *ctx)
}
}
- rec_pfns = malloc(nr_pfns * sizeof(*rec_pfns));
- if ( !rec_pfns )
- {
- ERROR("Unable to allocate %zu bytes of memory for page data pfn list",
- nr_pfns * sizeof(*rec_pfns));
- goto err;
- }
-
hdrs.rec.length = sizeof(hdrs.page_data);
hdrs.rec.length += nr_pfns * sizeof(*rec_pfns);
hdrs.rec.length += nr_pages * PAGE_SIZE;
@@ -261,17 +251,13 @@ static int write_batch(struct xc_sr_context *ctx)
rc = ctx->save.nr_batch_pfns = 0;
err:
- free(rec_pfns);
if ( guest_mapping )
xenforeignmemory_unmap(xch->fmem, guest_mapping, nr_pages_mapped);
for ( i = 0; local_pages && i < nr_pfns; ++i )
+ {
free(local_pages[i]);
- free(iov);
- free(local_pages);
- free(guest_data);
- free(errors);
- free(types);
- free(mfns);
+ local_pages[i] = NULL;
+ }
return rc;
}
@@ -799,18 +785,18 @@ static int setup(struct xc_sr_context *ctx)
dirty_bitmap = xc_hypercall_buffer_alloc_pages(
xch, dirty_bitmap, NRPAGES(bitmap_size(ctx->save.p2m_size)));
- ctx->save.batch_pfns = malloc(MAX_BATCH_SIZE *
- sizeof(*ctx->save.batch_pfns));
ctx->save.deferred_pages = bitmap_alloc(ctx->save.p2m_size);
+ ctx->save.buffers = calloc(1, sizeof(*ctx->save.buffers));
- if ( !ctx->save.batch_pfns || !dirty_bitmap || !ctx->save.deferred_pages )
+ if ( !dirty_bitmap || !ctx->save.deferred_pages || !ctx->save.buffers)
{
- ERROR("Unable to allocate memory for dirty bitmaps, batch pfns and"
- " deferred pages");
+ ERROR("Unable to allocate memory for dirty bitmaps, deferred pages"
+ " and various batch buffers");
rc = -1;
errno = ENOMEM;
goto err;
}
+ ctx->save.batch_pfns = ctx->save.buffers->batch_pfns;
rc = 0;
@@ -834,7 +820,7 @@ static void cleanup(struct xc_sr_context *ctx)
xc_hypercall_buffer_free_pages(xch, dirty_bitmap,
NRPAGES(bitmap_size(ctx->save.p2m_size)));
free(ctx->save.deferred_pages);
- free(ctx->save.batch_pfns);
+ free(ctx->save.buffers);
}
/*
--
2.43.0
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |