[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH] libxenlight: fix multiple xenstore watches problem



On Thu, 3 Dec 2009, Vincent Hanquez wrote:
> On Thu, Dec 03, 2009 at 03:39:57PM +0000, Stefano Stabellini wrote:
> > You are right about that, but Andres found the bug and fixed it with his
> > "clone context to avoid GC corruption" patch.
> 
> that's not about whether or not it can works. I think it's just confusing to
> clone and share a context in a mono thread where the only thing you want out 
> of
> it, is a xenstore handle. You can simply clone a new xs handle, and worst case
> scenario, you can extends the xl context to store 2 xs connections.
> 
> In the end, the approch is overkill, and complex solution often lead to
> unforseen bug (at this was the case here)
> 

All right then, I am OK with not cloning the whole context but just the
xenstore connection, however it would also be nice to be able to wrap
the connection opening in a nice function like Andres did with the
context.

What about the following as a fix to the cloning context problem, and as
an alternative to Andres' 0/7 patch:

---


diff -r 0f35fb4f73d6 tools/libxl/libxl.c
--- a/tools/libxl/libxl.c       Thu Dec 03 13:52:02 2009 +0000
+++ b/tools/libxl/libxl.c       Thu Dec 03 18:17:13 2009 +0000
@@ -654,22 +654,20 @@
 void dm_xenstore_record_pid(struct libxl_ctx *ctx, void *for_spawn,
                             pid_t innerchild) {
     struct libxl_device_model_starting *starting = for_spawn;
-    struct libxl_ctx clone;
     char *kvs[3];
     int rc;
 
-    clone = *ctx;
-    clone.xsh = xs_daemon_open();
+    libxl_ctx_open_xstemp(ctx);
     /* we mustn't use the parent's handle in the child */
 
     kvs[0] = "image/device-model-pid";
-    kvs[1] = libxl_sprintf(&clone, "%d", innerchild);
+    kvs[1] = libxl_sprintf(ctx, "%d", innerchild);
     kvs[2] = NULL;
-    rc = libxl_xs_writev(&clone, XBT_NULL, starting->dom_path, kvs);
-    if (rc) XL_LOG_ERRNO(&clone, XL_LOG_ERROR,
+    rc = libxl_xs_writev(ctx, XBT_NULL, starting->dom_path, kvs);
+    if (rc) XL_LOG_ERRNO(ctx, XL_LOG_ERROR,
                          "Couldn't record device model pid %ld at %s/%s",
                          (unsigned long)innerchild, starting->dom_path, kvs);
-    xs_daemon_close(clone.xsh);
+    libxl_ctx_close_xstemp(ctx);
 }
 
 static int libxl_vfb_and_vkb_from_device_model_info(struct libxl_ctx *ctx,
diff -r 0f35fb4f73d6 tools/libxl/libxl.h
--- a/tools/libxl/libxl.h       Thu Dec 03 13:52:02 2009 +0000
+++ b/tools/libxl/libxl.h       Thu Dec 03 18:17:13 2009 +0000
@@ -35,6 +35,8 @@
 struct libxl_ctx {
     int xch;
     struct xs_handle *xsh;
+    struct xs_handle *xsh_temp;
+
     /* errors/debug buf */
     void *log_userdata;
     libxl_log_callback log_callback;
diff -r 0f35fb4f73d6 tools/libxl/libxl_device.c
--- a/tools/libxl/libxl_device.c        Thu Dec 03 13:52:02 2009 +0000
+++ b/tools/libxl/libxl_device.c        Thu Dec 03 18:17:13 2009 +0000
@@ -212,50 +212,49 @@
     fd_set rfds;
     struct timeval tv;
     flexarray_t *toremove;
-    struct libxl_ctx clone = *ctx;
 
-    clone.xsh = xs_daemon_open();
+    libxl_ctx_open_xstemp(ctx);
     toremove = flexarray_make(16, 1);
-    path = libxl_sprintf(&clone, "/local/domain/%d/device", domid);
-    l1 = libxl_xs_directory(&clone, XBT_NULL, path, &num1);
+    path = libxl_sprintf(ctx, "/local/domain/%d/device", domid);
+    l1 = libxl_xs_directory(ctx, XBT_NULL, path, &num1);
     if (!l1) {
-        XL_LOG(&clone, XL_LOG_ERROR, "%s is empty", path);
-        xs_daemon_close(clone.xsh);
+        XL_LOG(ctx, XL_LOG_ERROR, "%s is empty", path);
+        libxl_ctx_close_xstemp(ctx);
         return -1;
     }
     for (i = 0; i < num1; i++) {
-        path = libxl_sprintf(&clone, "/local/domain/%d/device/%s", domid, 
l1[i]);
-        l2 = libxl_xs_directory(&clone, XBT_NULL, path, &num2);
+        path = libxl_sprintf(ctx, "/local/domain/%d/device/%s", domid, l1[i]);
+        l2 = libxl_xs_directory(ctx, XBT_NULL, path, &num2);
         if (!l2)
             continue;
         for (j = 0; j < num2; j++) {
-            fe_path = libxl_sprintf(&clone, "/local/domain/%d/device/%s/%s", 
domid, l1[i], l2[j]);
-            be_path = libxl_xs_read(&clone, XBT_NULL, libxl_sprintf(&clone, 
"%s/backend", fe_path));
+            fe_path = libxl_sprintf(ctx, "/local/domain/%d/device/%s/%s", 
domid, l1[i], l2[j]);
+            be_path = libxl_xs_read(ctx, XBT_NULL, libxl_sprintf(ctx, 
"%s/backend", fe_path));
             if (be_path != NULL) {
-                if (libxl_device_destroy(&clone, be_path, force) > 0)
+                if (libxl_device_destroy(ctx, be_path, force) > 0)
                     n_watches++;
-                flexarray_set(toremove, n++, libxl_dirname(&clone, be_path));
+                flexarray_set(toremove, n++, libxl_dirname(ctx, be_path));
             } else {
-                xs_rm(clone.xsh, XBT_NULL, path);
+                xs_rm(ctx->xsh, XBT_NULL, path);
             }
         }
     }
     if (!force) {
-        nfds = xs_fileno(clone.xsh) + 1;
+        nfds = xs_fileno(ctx->xsh) + 1;
         /* Linux-ism */
         tv.tv_sec = LIBXL_DESTROY_TIMEOUT;
         tv.tv_usec = 0;
         while (n_watches > 0 && tv.tv_sec > 0) {
             FD_ZERO(&rfds);
-            FD_SET(xs_fileno(clone.xsh), &rfds);
+            FD_SET(xs_fileno(ctx->xsh), &rfds);
             if (select(nfds, &rfds, NULL, NULL, &tv) > 0) {
-                l1 = xs_read_watch(clone.xsh, &num1);
+                l1 = xs_read_watch(ctx->xsh, &num1);
                 if (l1 != NULL) {
-                    char *state = libxl_xs_read(&clone, XBT_NULL, l1[0]);
+                    char *state = libxl_xs_read(ctx, XBT_NULL, l1[0]);
                     if (!state || atoi(state) == 6) {
-                        xs_unwatch(clone.xsh, l1[0], l1[1]);
-                        xs_rm(clone.xsh, XBT_NULL, l1[1]);
-                        XL_LOG(&clone, XL_LOG_DEBUG, "Destroyed device backend 
at %s", l1[1]);
+                        xs_unwatch(ctx->xsh, l1[0], l1[1]);
+                        xs_rm(ctx->xsh, XBT_NULL, l1[1]);
+                        XL_LOG(ctx, XL_LOG_DEBUG, "Destroyed device backend at 
%s", l1[1]);
                         n_watches--;
                     }
                     free(l1);
@@ -266,10 +265,10 @@
     }
     for (i = 0; i < n; i++) {
         flexarray_get(toremove, i, (void**) &path);
-        xs_rm(clone.xsh, XBT_NULL, path);
+        xs_rm(ctx->xsh, XBT_NULL, path);
     }
     flexarray_free(toremove);
-    xs_daemon_close(clone.xsh);
+    libxl_ctx_close_xstemp(ctx);
     return 0;
 }
 
diff -r 0f35fb4f73d6 tools/libxl/libxl_internal.c
--- a/tools/libxl/libxl_internal.c      Thu Dec 03 13:52:02 2009 +0000
+++ b/tools/libxl/libxl_internal.c      Thu Dec 03 18:17:13 2009 +0000
@@ -26,6 +26,24 @@
 int libxl_error_set(struct libxl_ctx *ctx, int code)
 {
     return 0;
+}
+
+int libxl_ctx_open_xstemp(struct libxl_ctx *ctx)
+{
+    if (ctx->xsh_temp)
+        return -1;
+    ctx->xsh_temp = ctx->xsh;
+    ctx->xsh = xs_daemon_open();
+    return 0;
+}
+
+int libxl_ctx_close_xstemp(struct libxl_ctx *ctx)
+{
+    if (!ctx->xsh_temp)
+        return -1;
+    xs_daemon_close(ctx->xsh);
+    ctx->xsh = ctx->xsh_temp;
+    ctx->xsh_temp = NULL;
 }
 
 int libxl_ptr_add(struct libxl_ctx *ctx, void *ptr)
diff -r 0f35fb4f73d6 tools/libxl/libxl_internal.h
--- a/tools/libxl/libxl_internal.h      Thu Dec 03 13:52:02 2009 +0000
+++ b/tools/libxl/libxl_internal.h      Thu Dec 03 18:17:13 2009 +0000
@@ -61,6 +61,10 @@
 #define PCI_BAR_IO             0x01
 
 #define PRINTF_ATTRIBUTE(x, y) __attribute__((format(printf, x, y)))
+
+/* open\close a seconday xenstore connection */
+int libxl_ctx_open_xstemp(struct libxl_ctx *ctx);
+int libxl_ctx_close_xstemp(struct libxl_ctx *ctx);
 
 /* memory allocation tracking/helpers */
 int libxl_ptr_add(struct libxl_ctx *ctx, void *ptr);

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.