[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH RFC XEN v1 11/14] tools: migrate: refactor selection of save/restore ops to be arch specific
I wasn't sure of the best way to achieve this, but a pair of per-arch hooks seemed to be preferable to ifdeffery. I also wasn't sure about the change to guest_type for save. The restore half of the ctxt already has such a field but since the save side treats it as an input to the process as opposed to the restore side which determines it from the stream it seemed like keeping them separate was best. Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx> Cc: andyhhp --- tools/libxc/xc_sr_common.h | 15 +++++++++++++++ tools/libxc/xc_sr_common_x86.c | 22 ++++++++++++++++++++++ tools/libxc/xc_sr_restore.c | 15 +++------------ tools/libxc/xc_sr_save.c | 22 +++++++--------------- 4 files changed, 47 insertions(+), 27 deletions(-) diff --git a/tools/libxc/xc_sr_common.h b/tools/libxc/xc_sr_common.h index 64f6082..0d36c8d 100644 --- a/tools/libxc/xc_sr_common.h +++ b/tools/libxc/xc_sr_common.h @@ -174,6 +174,9 @@ struct xc_sr_context struct xc_sr_save_ops ops; struct save_callbacks *callbacks; + /* For Domain Header */ + uint32_t guest_type; + /* Live migrate vs non live suspend. */ bool live; @@ -317,9 +320,21 @@ struct xc_sr_context extern struct xc_sr_save_ops save_ops_x86_pv; extern struct xc_sr_save_ops save_ops_x86_hvm; +extern struct xc_sr_save_ops save_ops_arm; extern struct xc_sr_restore_ops restore_ops_x86_pv; extern struct xc_sr_restore_ops restore_ops_x86_hvm; +extern struct xc_sr_restore_ops restore_ops_arm; + +/* + * Arch function to select the correct ctx.{save,restore}.ops + * implementation for the guest. Will update the appropriate ops + * pointer. + * + * _save must also set ctx->save.guest_type. + */ +void xc_sr_select_restore_ops(struct xc_sr_context *ctx); +void xc_sr_select_save_ops(struct xc_sr_context *ctx); struct xc_sr_record { diff --git a/tools/libxc/xc_sr_common_x86.c b/tools/libxc/xc_sr_common_x86.c index 98f1cef..151bb0a 100644 --- a/tools/libxc/xc_sr_common_x86.c +++ b/tools/libxc/xc_sr_common_x86.c @@ -43,6 +43,28 @@ int handle_tsc_info(struct xc_sr_context *ctx, struct xc_sr_record *rec) return 0; } +void xc_sr_select_save_ops(struct xc_sr_context *ctx) +{ + if ( ctx->dominfo.hvm ) + { + ctx->save.guest_type = DHDR_TYPE_X86_HVM; + ctx->save.ops = save_ops_x86_hvm; + } + else + { + ctx->guest_type = DHDR_TYPE_X86_PV; + ctx->save.ops = save_ops_x86_pv; + } +} + +void xc_sr_select_restore_ops(struct xc_sr_context *ctx) +{ + if ( ctx->dominfo.hvm ) + ctx->restore.ops = restore_ops_x86_hvm; + else + ctx->restore.ops = restore_ops_x86_pv; +} + /* * Local variables: * mode: C diff --git a/tools/libxc/xc_sr_restore.c b/tools/libxc/xc_sr_restore.c index 05159bb..80f6bc5 100644 --- a/tools/libxc/xc_sr_restore.c +++ b/tools/libxc/xc_sr_restore.c @@ -763,18 +763,9 @@ int xc_domain_restore(xc_interface *xch, int io_fd, uint32_t dom, if ( read_headers(&ctx) ) return -1; - if ( ctx.dominfo.hvm ) - { - ctx.restore.ops = restore_ops_x86_hvm; - if ( restore(&ctx) ) - return -1; - } - else - { - ctx.restore.ops = restore_ops_x86_pv; - if ( restore(&ctx) ) - return -1; - } + xc_sr_select_restore_ops(&ctx); + if ( restore(&ctx) ) + return -1; IPRINTF("XenStore: mfn %#"PRIpfn", dom %d, evt %u", ctx.restore.xenstore_gfn, diff --git a/tools/libxc/xc_sr_save.c b/tools/libxc/xc_sr_save.c index 0c12e56..e6e659d 100644 --- a/tools/libxc/xc_sr_save.c +++ b/tools/libxc/xc_sr_save.c @@ -6,7 +6,7 @@ /* * Writes an Image header and Domain header into the stream. */ -static int write_headers(struct xc_sr_context *ctx, uint16_t guest_type) +static int write_headers(struct xc_sr_context *ctx) { xc_interface *xch = ctx->xch; int32_t xen_version = xc_version(xch, XENVER_version, NULL); @@ -19,7 +19,7 @@ static int write_headers(struct xc_sr_context *ctx, uint16_t guest_type) }; struct xc_sr_dhdr dhdr = { - .type = guest_type, + .type = ctx->save.guest_type, .page_shift = XC_PAGE_SHIFT, .xen_major = (xen_version >> 16) & 0xffff, .xen_minor = (xen_version) & 0xffff, @@ -724,13 +724,13 @@ static void cleanup(struct xc_sr_context *ctx) /* * Save a domain. */ -static int save(struct xc_sr_context *ctx, uint16_t guest_type) +static int save(struct xc_sr_context *ctx) { xc_interface *xch = ctx->xch; int rc, saved_rc = 0, saved_errno = 0; IPRINTF("Saving domain %d, type %s", - ctx->domid, dhdr_type_to_str(guest_type)); + ctx->domid, dhdr_type_to_str(ctx->save.guest_type)); rc = setup(ctx); if ( rc ) @@ -738,7 +738,7 @@ static int save(struct xc_sr_context *ctx, uint16_t guest_type) xc_report_progress_single(xch, "Start of stream"); - rc = write_headers(ctx, guest_type); + rc = write_headers(ctx); if ( rc ) goto err; @@ -884,16 +884,8 @@ int xc_domain_save(xc_interface *xch, int io_fd, uint32_t dom, return -1; } - if ( ctx.dominfo.hvm ) - { - ctx.save.ops = save_ops_x86_hvm; - return save(&ctx, DHDR_TYPE_X86_HVM); - } - else - { - ctx.save.ops = save_ops_x86_pv; - return save(&ctx, DHDR_TYPE_X86_PV); - } + xc_sr_select_save_ops(&ctx); + return save(&ctx); } /* -- 2.6.1 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |