|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v3 29/31] libxl_disk: Have libxl_cdrom_insert use libxl__ev_qmp
So when QEMU is involve, the operation will be asynchrone and will
finish later.
Signed-off-by: Anthony PERARD <anthony.perard@xxxxxxxxxx>
---
tools/libxl/libxl_disk.c | 55 +++++++++++++++++++++++++++++++++++-----
1 file changed, 49 insertions(+), 6 deletions(-)
diff --git a/tools/libxl/libxl_disk.c b/tools/libxl/libxl_disk.c
index a3bf974fe3..9808a53c1b 100644
--- a/tools/libxl/libxl_disk.c
+++ b/tools/libxl/libxl_disk.c
@@ -663,6 +663,7 @@ int libxl_device_disk_getinfo(libxl_ctx *ctx, uint32_t
domid,
typedef struct {
libxl__ao *ao;
+ libxl__ev_qmp ev;
libxl_domain_config d_config;
const char *be_path;
const char *libxl_path;
@@ -675,8 +676,14 @@ typedef struct {
} libxl__cdrom_insert_state;
static void cdrom_insert_ejected(libxl__egc *egc,
libxl__cdrom_insert_state *cis);
+static void cdrom_insert_ejected_qmp_cb(libxl__egc *egc, libxl__ev_qmp *ev,
+ const libxl__json_object *response,
+ libxl__qmp_error_class error);
static void cdrom_insert_inserted(libxl__egc *egc,
libxl__cdrom_insert_state *cis);
+static void cdrom_insert_inserted_qmp_cb(libxl__egc *egc, libxl__ev_qmp *ev,
+ const libxl__json_object *response,
+ libxl__qmp_error_class error);
static void cdrom_insert_done(libxl__egc *egc,
libxl__cdrom_insert_state *cis,
int rc);
@@ -694,6 +701,7 @@ int libxl_cdrom_insert(libxl_ctx *ctx, uint32_t domid,
libxl_device_disk *disk,
GCNEW(cis);
cis->ao = ao;
cis->domid = domid;
+ libxl__ev_qmp_init(&cis->ev);
// XXX: can I do that? is disk going to exist until the AO is over?
cis->disk = disk;
@@ -778,12 +786,14 @@ int libxl_cdrom_insert(libxl_ctx *ctx, uint32_t domid,
libxl_device_disk *disk,
* by inserting empty media. JSON is not updated.
*/
if (cis->dm_ver == LIBXL_DEVICE_MODEL_VERSION_QEMU_XEN) {
- rc = libxl__qmp_insert_cdrom(gc, domid, disk_empty);
+ rc = libxl__qmp_insert_cdrom_ev(gc, domid, &cis->ev,
+ cdrom_insert_ejected_qmp_cb,
+ disk_empty);
if (rc) goto out;
+ } else {
+ cdrom_insert_ejected(egc, cis);
}
- cdrom_insert_ejected(egc, cis);
-
return AO_INPROGRESS;
out:
@@ -794,6 +804,21 @@ out:
return AO_INPROGRESS;
}
+static void cdrom_insert_ejected_qmp_cb(libxl__egc *egc, libxl__ev_qmp *ev,
+ const libxl__json_object *response,
+ libxl__qmp_error_class error)
+{
+ EGC_GC;
+ libxl__cdrom_insert_state *cis = CONTAINER_OF(ev, *cis, ev);
+
+ if (error) {
+ cdrom_insert_done(egc, cis, ERROR_FAIL);
+ } else {
+ libxl__ev_qmp_deregister(gc, ev);
+ cdrom_insert_ejected(egc, cis);
+ }
+}
+
static void cdrom_insert_ejected(libxl__egc *egc,
libxl__cdrom_insert_state *cis)
{
@@ -852,12 +877,14 @@ static void cdrom_insert_ejected(libxl__egc *egc,
if (rc) goto out;
if (cis->dm_ver == LIBXL_DEVICE_MODEL_VERSION_QEMU_XEN) {
- rc = libxl__qmp_insert_cdrom(gc, domid, disk);
+ rc = libxl__qmp_insert_cdrom_ev(gc, domid, &cis->ev,
+ cdrom_insert_inserted_qmp_cb,
+ disk);
if (rc) goto out;
+ } else {
+ cdrom_insert_inserted(egc, cis);
}
- cdrom_insert_inserted(egc, cis);
-
return;
out:
@@ -865,6 +892,21 @@ out:
cdrom_insert_done(egc, cis, rc);
}
+static void cdrom_insert_inserted_qmp_cb(libxl__egc *egc, libxl__ev_qmp *ev,
+ const libxl__json_object *response,
+ libxl__qmp_error_class error)
+{
+ EGC_GC;
+ libxl__cdrom_insert_state *cis = CONTAINER_OF(ev, *cis, ev);
+
+ if (error) {
+ cdrom_insert_done(egc, cis, ERROR_FAIL);
+ } else {
+ libxl__ev_qmp_deregister(gc, ev);
+ cdrom_insert_inserted(egc, cis);
+ }
+}
+
static void cdrom_insert_inserted(libxl__egc *egc,
libxl__cdrom_insert_state *cis)
{
@@ -934,6 +976,7 @@ static void cdrom_insert_done(libxl__egc *egc,
{
STATE_AO_GC(cis->ao);
+ libxl__ev_qmp_deregister(gc, &cis->ev);
libxl_domain_config_dispose(&cis->d_config);
libxl_device_disk_dispose(&cis->disk_empty);
libxl_device_disk_dispose(&cis->disk_saved);
--
Anthony PERARD
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |