[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH] libxc: try to find last used pfn when migrating
For migration the last used pfn of a guest is needed to size the logdirty bitmap and as an upper bound of the page loop. Unfortunately there are pv-kernels advertising a much higher maximum pfn as they are really using in order to support memory hotplug. This will lead to allocation of much more memory in Xen tools during migration as really needed. Try to find the last used guest pfn of a pv-domu by scanning the p2m tree from the last entry towards it's start and search for an entry not being invalid. Normally the mid pages of the p2m tree containing all invalid entries are being reused, so we can just scan the top page for identical entries and skip them but the first one. Signed-off-by: Juergen Gross <jgross@xxxxxxxx> --- tools/libxc/xc_sr_save.c | 8 ++++---- tools/libxc/xc_sr_save_x86_pv.c | 34 +++++++++++++++++++++++++++++++--- 2 files changed, 35 insertions(+), 7 deletions(-) diff --git a/tools/libxc/xc_sr_save.c b/tools/libxc/xc_sr_save.c index 0c12e56..22b3f18 100644 --- a/tools/libxc/xc_sr_save.c +++ b/tools/libxc/xc_sr_save.c @@ -677,6 +677,10 @@ static int setup(struct xc_sr_context *ctx) DECLARE_HYPERCALL_BUFFER_SHADOW(unsigned long, dirty_bitmap, &ctx->save.dirty_bitmap_hbuf); + rc = ctx->save.ops.setup(ctx); + if ( rc ) + goto err; + dirty_bitmap = xc_hypercall_buffer_alloc_pages( xch, dirty_bitmap, NRPAGES(bitmap_size(ctx->save.p2m_size))); ctx->save.batch_pfns = malloc(MAX_BATCH_SIZE * @@ -692,10 +696,6 @@ static int setup(struct xc_sr_context *ctx) goto err; } - rc = ctx->save.ops.setup(ctx); - if ( rc ) - goto err; - rc = 0; err: diff --git a/tools/libxc/xc_sr_save_x86_pv.c b/tools/libxc/xc_sr_save_x86_pv.c index f63f40b..6189fda 100644 --- a/tools/libxc/xc_sr_save_x86_pv.c +++ b/tools/libxc/xc_sr_save_x86_pv.c @@ -83,8 +83,8 @@ static int map_p2m(struct xc_sr_context *ctx) */ xc_interface *xch = ctx->xch; int rc = -1; - unsigned x, fpp, fll_entries, fl_entries; - xen_pfn_t fll_mfn; + unsigned x, saved_x, fpp, fll_entries, fl_entries; + xen_pfn_t fll_mfn, saved_mfn, max_pfn; xen_pfn_t *local_fll = NULL; void *guest_fll = NULL; @@ -96,7 +96,6 @@ static int map_p2m(struct xc_sr_context *ctx) fpp = PAGE_SIZE / ctx->x86_pv.width; fll_entries = (ctx->x86_pv.max_pfn / (fpp * fpp)) + 1; - fl_entries = (ctx->x86_pv.max_pfn / fpp) + 1; fll_mfn = GET_FIELD(ctx->x86_pv.shinfo, arch.pfn_to_mfn_frame_list_list, ctx->x86_pv.width); @@ -131,6 +130,8 @@ static int map_p2m(struct xc_sr_context *ctx) } /* Check for bad mfns in frame list list. */ + saved_mfn = 0; + saved_x = 0; for ( x = 0; x < fll_entries; ++x ) { if ( local_fll[x] == 0 || local_fll[x] > ctx->x86_pv.max_mfn ) @@ -139,8 +140,35 @@ static int map_p2m(struct xc_sr_context *ctx) local_fll[x], x, fll_entries); goto err; } + if ( local_fll[x] != saved_mfn ) + { + saved_mfn = local_fll[x]; + saved_x = x; + } } + /* + * Check for actual lower max_pfn: + * If the trailing entries of the frame list list were all the same we can + * assume they all reference mid pages all referencing p2m pages with all + * invalid entries. Otherwise there would be multiple pfns referencing all + * the same mfn which can't work across migration, as this sharing would be + * broken by the migration process. + * Adjust max_pfn if possible to avoid allocating much larger areas as + * needed for p2m and logdirty map. + */ + max_pfn = (saved_x + 1) * fpp * fpp - 1; + if ( max_pfn < ctx->x86_pv.max_pfn ) + { + ctx->x86_pv.max_pfn = max_pfn; + ctx->x86_pv.p2m_frames = (ctx->x86_pv.max_pfn + fpp) / fpp; + ctx->save.p2m_size = max_pfn + 1; + fll_entries = (ctx->x86_pv.max_pfn / (fpp * fpp)) + 1; + DPRINTF("lowering max_pfn to %#lx, p2m_frames %d", max_pfn, + ctx->x86_pv.p2m_frames); + } + fl_entries = (ctx->x86_pv.max_pfn / fpp) + 1; + /* Map the guest mid p2m frames. */ guest_fl = xc_map_foreign_pages(xch, ctx->domid, PROT_READ, local_fll, fll_entries); -- 2.6.2 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |