[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 3/3] libxl: Domain destroy: fork



Ian Campbell wrote:
> On Tue, 2015-03-17 at 09:30 -0600, Jim Fehlig wrote:
>   
>> diff --git a/tools/libxl/libxl.c b/tools/libxl/libxl.c
>> index b6541d4..b43db1a 100644
>> --- a/tools/libxl/libxl.c
>> +++ b/tools/libxl/libxl.c
>> @@ -1481,6 +1481,10 @@ static void domain_destroy_callback(libxl__egc *egc,
>>  static void destroy_finish_check(libxl__egc *egc,
>>                                   libxl__domain_destroy_state *dds);
>>  
>> +static void domain_destroy_domid_cb(libxl__egc *egc,
>> +                                    libxl__ev_child *destroyer,
>> +                                    pid_t pid, int status);
>>     
>
> This seems misplaced, doesn't it want to go before libxl__destroy_domid
> just after the prototype of devices_destroy_cb, which is where it lives
> in the sequence?
>   

Yes, I think you are right. Here an adjusted patch.

Regards,
Jim

From 0c6fef6926580938b0fb8750c009af5151a056dc Mon Sep 17 00:00:00 2001
From: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
Date: Thu, 5 Mar 2015 16:28:04 +0000
Subject: [PATCH 3/3] libxl: Domain destroy: fork

Call xc_domain_destroy in a subprocess.  That allows us to do so
asynchronously, rather than blocking the whole process calling libxl.

The changes in detail:

 * Provide an libxl__ev_child in libxl__domain_destroy_state, and
   initialise it in libxl__domain_destroy.  There is no possibility
   to `clean up' a libxl__ev_child, but there need to clean it up, as
   the control flow ensures that we only continue after the child has
   exited.

 * Call libxl__ev_child_fork at the right point and put the call to
   xc_domain_destroy and associated logging in the child.  (The child
   opens a new xenctrl handle because we mustn't use the parent's.)

 * Consequently, the success return path from domain_destroy_domid_cb
   no longer calls dis->callback.  Instead it simply returns.

 * We plumb the errorno value through the child's exit status, if it
   fits.  This means we normally do the logging only in the parent.

 * Incidentally, we fix the bug that we were treating the return value
   from xc_domain_destroy as an errno value when in fact it is a
   return value from do_domctl (in this case, 0 or -1 setting errno).

Signed-off-by: Ian Jackson <Ian.Jackson@xxxxxxxxxxxxx>
Reviewed-by: Wei Liu <wei.liu2@xxxxxxxxxx>
Reviewed-by: Jim Fehlig <jfehlig@xxxxxxxx>
Tested-by: Jim Fehlig <jfehlig@xxxxxxxx>
---
 tools/libxl/libxl.c          | 57 ++++++++++++++++++++++++++++++++++++++++----
 tools/libxl/libxl_internal.h |  1 +
 2 files changed, 53 insertions(+), 5 deletions(-)

diff --git a/tools/libxl/libxl.c b/tools/libxl/libxl.c
index b6541d4..b53a183 100644
--- a/tools/libxl/libxl.c
+++ b/tools/libxl/libxl.c
@@ -1558,6 +1558,10 @@ static void devices_destroy_cb(libxl__egc *egc,
                                libxl__devices_remove_state *drs,
                                int rc);
 
+static void domain_destroy_domid_cb(libxl__egc *egc,
+                                    libxl__ev_child *destroyer,
+                                    pid_t pid, int status);
+
 void libxl__destroy_domid(libxl__egc *egc, libxl__destroy_domid_state *dis)
 {
     STATE_AO_GC(dis->ao);
@@ -1567,6 +1571,8 @@ void libxl__destroy_domid(libxl__egc *egc, 
libxl__destroy_domid_state *dis)
     char *pid;
     int rc, dm_present;
 
+    libxl__ev_child_init(&dis->destroyer);
+
     rc = libxl_domain_info(ctx, NULL, domid);
     switch(rc) {
     case 0:
@@ -1672,17 +1678,58 @@ static void devices_destroy_cb(libxl__egc *egc,
 
     libxl__unlock_domain_userdata(lock);
 
-    rc = xc_domain_destroy(ctx->xch, domid);
-    if (rc < 0) {
-        LIBXL__LOG_ERRNOVAL(ctx, LIBXL__LOG_ERROR, rc, "xc_domain_destroy 
failed for %d", domid);
+    rc = libxl__ev_child_fork(gc, &dis->destroyer, domain_destroy_domid_cb);
+    if (rc < 0) goto out;
+    if (!rc) { /* child */
+        ctx->xch = xc_interface_open(ctx->lg,0,0);
+        if (!ctx->xch) goto badchild;
+
+        rc = xc_domain_destroy(ctx->xch, domid);
+        if (rc < 0) goto badchild;
+        _exit(0);
+
+    badchild:
+        if (errno > 0  && errno < 126) {
+            _exit(errno);
+        } else {
+            LOGE(ERROR,
+ "xc_domain_destroy failed for %d (with difficult errno value %d)",
+                 domid, errno);
+            _exit(-1);
+        }
+    }
+    LOG(INFO, "forked pid %ld for destroy of domain %d", (long)rc, domid);
+
+    return;
+
+out:
+    dis->callback(egc, dis, rc);
+    return;
+}
+
+static void domain_destroy_domid_cb(libxl__egc *egc,
+                                    libxl__ev_child *destroyer,
+                                    pid_t pid, int status)
+{
+    libxl__destroy_domid_state *dis = CONTAINER_OF(destroyer, *dis, destroyer);
+    STATE_AO_GC(dis->ao);
+    int rc;
+
+    if (status) {
+        if (WIFEXITED(status) && WEXITSTATUS(status)<126) {
+            LOGEV(ERROR, WEXITSTATUS(status),
+                  "xc_domain_destroy failed for %"PRIu32"", dis->domid);
+        } else {
+            libxl_report_child_exitstatus(CTX, XTL_ERROR,
+                                          "async domain destroy", pid, status);
+        }
         rc = ERROR_FAIL;
         goto out;
     }
     rc = 0;
 
-out:
+ out:
     dis->callback(egc, dis, rc);
-    return;
 }
 
 int libxl_console_exec(libxl_ctx *ctx, uint32_t domid, int cons_num,
diff --git a/tools/libxl/libxl_internal.h b/tools/libxl/libxl_internal.h
index 934465a..28d32ef 100644
--- a/tools/libxl/libxl_internal.h
+++ b/tools/libxl/libxl_internal.h
@@ -2957,6 +2957,7 @@ struct libxl__destroy_domid_state {
     libxl__domid_destroy_cb *callback;
     /* private to implementation */
     libxl__devices_remove_state drs;
+    libxl__ev_child destroyer;
 };
 
 struct libxl__domain_destroy_state {
-- 
1.8.0.1

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.