[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 2 of 2] xl: migrate: destroy disks on source before giving destination the "go"
# HG changeset patch # User Ian Campbell <ian.campbell@xxxxxxxxxx> # Date 1344330518 -3600 # Node ID 773de9b98cd183ee2b595f1856fb9768709e20f7 # Parent 0a67d3147a174a26dda2e268ca801e56a9c5b711 xl: migrate: destroy disks on source before giving destination the "go" 25733:353bc0801b11 "libxl: support custom block hotplug scripts which had the (intended) side effect of re-enabling the hotplug script's device sharing checks, which in turn has exposed the fact that during migration xl currently has both devices in existence (but thankfully not active). Fix this by destroying the disk backends before sending the GO message to the destination end (and recreating them on failure). This is a bit of an ad-hoc solution for 4.2, we should revisit the sequencing of the operations during migration for 4.3. Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx> diff -r 0a67d3147a17 -r 773de9b98cd1 tools/libxl/xl_cmdimpl.c --- a/tools/libxl/xl_cmdimpl.c Tue Aug 07 08:59:34 2012 +0100 +++ b/tools/libxl/xl_cmdimpl.c Tue Aug 07 10:08:38 2012 +0100 @@ -3021,6 +3021,8 @@ static void migrate_domain(const char *d char rc_buf; uint8_t *config_data; int config_len; + libxl_device_disk *disks; + int i, nr_disks = 0; save_domain_core_begin(domain_spec, override_config_file, &config_data, &config_len); @@ -3072,6 +3074,18 @@ static void migrate_domain(const char *d if (rc) goto failed_resume; } + /* In order to avoid having the same disk active twice + * simultaneously on a migration, which will trigger the hotplug + * script sharing detection, we must first remove the disks from + * the source domain. */ + disks = libxl_device_disk_list(ctx, domid, &nr_disks); + if (!disks) goto failed_badly; + + for (i = 0 ; i < nr_disks ; i++) { + rc = libxl_device_disk_destroy(ctx, domid, &disks[i], NULL); + if (rc) goto failed_badly; + } + /* point of no return - as soon as we have tried to say * "go" to the receiver, it's not safe to carry on. We leave * the domain renamed to %s--migratedaway in case that's helpful. @@ -3107,6 +3121,13 @@ static void migrate_domain(const char *d fprintf(stderr, "migration sender: Trying to resume at our end.\n"); + /* Re-add disks removed above. */ + for (i = 0 ; i < nr_disks ; i++) { + rc = libxl_device_disk_add(ctx, domid, &disks[i], NULL); + if (rc) goto failed_badly; + } + libxl_device_disk_list_free(disks, nr_disks); + if (common_domname) { libxl_domain_rename(ctx, domid, away_domname, common_domname); } @@ -3120,6 +3141,8 @@ static void migrate_domain(const char *d fprintf(stderr, "migration sender: Target reports successful startup.\n"); libxl_domain_destroy(ctx, domid, 0); /* bang! */ fprintf(stderr, "Migration successful.\n"); + + libxl_device_disk_list_free(disks, nr_disks); exit(0); failed_suspend: @@ -3132,6 +3155,7 @@ static void migrate_domain(const char *d close(send_fd); migration_child_report(recv_fd); fprintf(stderr, "Migration failed, resuming at sender.\n"); + assert(!nr_disks); /* This failure path is always before disk shutdown */ libxl_domain_resume(ctx, domid, 0); exit(-ERROR_FAIL); @@ -3146,6 +3170,7 @@ static void migrate_domain(const char *d close(send_fd); migration_child_report(recv_fd); + libxl_device_disk_list_free(disks, nr_disks); exit(-ERROR_BADFAIL); } _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |