[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 26/28] libxl: spawns two QEMUs for HVM guests
From: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx> We actually need to spawn a second QEMU if we want to run qemu as non-root, because the pv backends ought to run as root (or device hotplug may not work). So in this case, start a second QEMU to provide PV backends in userspace to HVM guests. Use both dcs->dmss.pvqemu and dcs->dmss.dm to keep track of the starting QEMUs. Only proceed when both QEMUs have started. And, we only default to running QEMU as non-root if we are going to be able to run split qemus. In particular, it is not safe to run split qemus if they don't support the emulator_id option, because we need to split the xenstore paths too. Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx> Signed-off-by: Ian Jackson <ian.jackson@xxxxxxxxxxxxx> --- v6: Split only if trying to run a qemu as non-root Reorganise changes to dm callbacks No more dcs in dmss Use min() to calculate worst rc Explicitly set unused fields of dmss.pvqemu to 0 Change error handling v3: use dcs->dmss.pvqemu to spawn the second QEMU keep track of the rc of both QEMUs before proceeding --- tools/libxl/libxl_create.c | 72 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) diff --git a/tools/libxl/libxl_create.c b/tools/libxl/libxl_create.c index e67e402..59dfcd67 100644 --- a/tools/libxl/libxl_create.c +++ b/tools/libxl/libxl_create.c @@ -431,6 +431,8 @@ static int domcreate_setdefault_dm_user(libxl__gc *gc, int rc; const char *user; + const char *dm = libxl__domain_device_model(gc, b_info); + if (b_info->device_model_user) /* already set, good-oh */ return 0; @@ -440,6 +442,15 @@ static int domcreate_setdefault_dm_user(libxl__gc *gc, /* we're not going to run it anyway */ return 0; + if (!libxl__dm_supported(gc, dm, libxl__dm_support_check__emulator_id)) { + /* we don't want to run the pv backends as non-root because + * device hotplug will no longer work. */ + LOG(WARN, + "Device model does not support split PV backends, running it as root"); + user = "root"; + goto found; + } + user = GCSPRINTF("%s%d", LIBXL_QEMU_USER_BASE, domid); rc = dm_runas_helper(gc, user); @@ -802,6 +813,14 @@ static void remus_checkpoint_stream_done( /* Event callbacks, in this order: */ static void domcreate_dm_support_checked(libxl__egc *egc, libxl__dm_support_check_state *checking, int rc); + +static void domcreate_dm_local_split_pv_cb(libxl__egc *egc, + libxl__dm_spawn_state *dmss); +static void domcreate_dm_local_split_dm_cb(libxl__egc *egc, + libxl__dm_spawn_state *dmss); +static void domcreate_dm_local_split_cb(libxl__egc *egc, + libxl__domain_create_state *dcs); + static void domcreate_devmodel_started(libxl__egc *egc, libxl__dm_spawn_state *dmss); static void domcreate_bootloader_console_available(libxl__egc *egc, @@ -1044,6 +1063,9 @@ static void domcreate_dm_support_checked(libxl__egc *egc, /* convenience aliases */ libxl_domain_config *const d_config = dcs->guest_config; + const libxl_domain_build_info *b_info = &d_config->b_info; + libxl__domain_build_state *const b_state = &dcs->build_state; + const uint32_t domid = dcs->guest_domid; const int restore_fd = dcs->restore_fd; if (rc) goto out; @@ -1053,6 +1075,15 @@ static void domcreate_dm_support_checked(libxl__egc *egc, rc = domcreate_setdefault_dm_user(gc, dcs); if (rc) goto out; + /* run two qemus? */ + if (d_config->c_info.type == LIBXL_DOMAIN_TYPE_HVM && + !libxl_defbool_val(d_config->b_info.device_model_stubdomain) && + b_info->device_model_user && + strcmp(b_info->device_model_user, "root")) { + rc = libxl__dm_emuidmap_add(gc, domid, b_state, EMUID_SPLIT); + if (rc) goto out; + } + dcs->bl.ao = ao; libxl_device_disk *bootdisk = d_config->num_disks > 0 ? &d_config->disks[0] : NULL; @@ -1130,6 +1161,11 @@ static void domcreate_bootloader_done(libxl__egc *egc, dcs->dmss.dm.callback = domcreate_devmodel_started; dcs->dmss.callback = domcreate_devmodel_started; + dcs->dmss.pvqemu.spawn.ao = ao; + dcs->dmss.pvqemu.guest_domid = domid; + dcs->dmss.pvqemu.guest_config = 0; + dcs->dmss.pvqemu.build_state = 0; + if (restore_fd < 0 && dcs->domid_soft_reset == INVALID_DOMID) { rc = libxl__domain_build(gc, d_config, domid, state); domcreate_rebuild_done(egc, dcs, rc); @@ -1398,6 +1434,16 @@ static void domcreate_launch_dm(libxl__egc *egc, libxl__multidev *multidev, if (libxl_defbool_val(d_config->b_info.device_model_stubdomain)) { libxl__spawn_stub_dm(egc, &dcs->dmss, EMUID_DM); } else { + if (state->emuidmap & (1u << EMUID_SPLIT)) { + dcs->dmss.dm.rc = 1; + dcs->dmss.dm.callback = domcreate_dm_local_split_dm_cb; + + dcs->dmss.pvqemu.rc = 1; /* +ve means in progress */ + dcs->dmss.pvqemu.callback = domcreate_dm_local_split_pv_cb; + + libxl__spawn_qdisk_backend(egc, &dcs->dmss.pvqemu); + } + libxl__spawn_local_dm(egc, &dcs->dmss.dm, EMUID_DM); } @@ -1455,6 +1501,32 @@ static void domcreate_launch_dm(libxl__egc *egc, libxl__multidev *multidev, domcreate_complete(egc, dcs, ret); } +static void domcreate_dm_local_split_pv_cb(libxl__egc *egc, + libxl__dm_spawn_state *dmss) +{ + libxl__domain_create_state *dcs = CONTAINER_OF(dmss, *dcs, dmss.pvqemu); + domcreate_dm_local_split_cb(egc, dcs); +} + +static void domcreate_dm_local_split_dm_cb(libxl__egc *egc, + libxl__dm_spawn_state *dmss) +{ + libxl__domain_create_state *dcs = CONTAINER_OF(dmss, *dcs, dmss.dm); + domcreate_dm_local_split_cb(egc, dcs); +} + +static void domcreate_dm_local_split_cb(libxl__egc *egc, + libxl__domain_create_state *dcs) +{ + if (dcs->dmss.dm.rc > 0 || + dcs->dmss.pvqemu.rc > 0) + /* something is still in progress */ + return; + + dcs->dmss.dm.rc = min(dcs->dmss.dm.rc, dcs->dmss.pvqemu.rc); + domcreate_devmodel_started(egc, &dcs->dmss.dm); +} + static void domcreate_devmodel_started(libxl__egc *egc, libxl__dm_spawn_state *dmss) { -- 1.7.10.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |