[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH V5] libxenstore: filter watch events in libxenstore when we unwatch



XenStore puts in queued watch events via a thread and notifies the user.
Sometimes xs_unwatch is called before all related message is read. The use
case is non-threaded libevent, we have two event A and B:
    - Event A will destroy something and call xs_unwatch;
    - Event B is used to notify that a node has changed in XenStore.
As the event is called one by one, event A can be handled before event B.
So on next xs_watch_read the user could retrieve an unwatch token and
a segfault occured if the token store the pointer of the structure
(ie: "backend:0xcafe").

To avoid problem with previous application using libXenStore, this behaviour
will only be enabled if XS_UNWATCH_FILTER is given to xs_open.

Signed-off-by: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
Signed-off-by: Julien Grall <julien.grall@xxxxxxxxxx>
---

  Modification between V4 and V5:
    - Use tab instead of space for the indentation.

  Modifications between V3 and V4:
    - Rename XS_UNWATCH_SAFE to XS_UNWATCH_FILTER;
    - Improve documentation;
    - Fix sub-path checking in xs_unwatch.

  Modifications between V2 and V3:
    - Add XS_UNWATCH_SAFE;
    - Rename xs_clear_watch_pipe to xs_maybe_clear_watch_pipe.

  Modifications between V1 and V2:
    - Add xs_clear_watch_pipe to avoid code duplication;
    - Modify commit message by Ian Jackson;
    - Rework list filtering.

tools/xenstore/xenstore.h |   21 ++++++++++++
 tools/xenstore/xs.c       |   84 ++++++++++++++++++++++++++++++++++++++++-----
 2 files changed, 97 insertions(+), 8 deletions(-)

diff --git a/tools/xenstore/xenstore.h b/tools/xenstore/xenstore.h
index 7259e49..fdf5e76 100644
--- a/tools/xenstore/xenstore.h
+++ b/tools/xenstore/xenstore.h
@@ -27,6 +27,27 @@
 #define XS_OPEN_READONLY       1UL<<0
 #define XS_OPEN_SOCKETONLY      1UL<<1
 
+/*
+ * Setting XS_UNWATCH_FILTER arranges that after xs_unwatch, no
+ * related watch events will be delivered via xs_read_watch.  But
+ * this relies on the couple token, subpath is unique.
+ *
+ * XS_UNWATCH_FILTER clear          XS_UNWATCH_FILTER set
+ *
+ * Even after xs_unwatch, "stale"   After xs_unwatch returns, no
+ * instances of the watch event     watch events with the same
+ * may be delivered.                token and with the same subpath
+ *                                  will be delivered.
+ *
+ * A path and a subpath can be      The application must avoid
+ * register with the same token.    registering a path (/foo/) and
+ *                                  a subpath (/foo/bar) with the
+ *                                  same path until a successful
+ *                                  xs_unwatch for the first watch
+ *                                  has returned.
+ */
+#define XS_UNWATCH_FILTER     1UL<<2
+
 struct xs_handle;
 typedef uint32_t xs_transaction_t;
 
diff --git a/tools/xenstore/xs.c b/tools/xenstore/xs.c
index b951015..86ef6c7 100644
--- a/tools/xenstore/xs.c
+++ b/tools/xenstore/xs.c
@@ -67,6 +67,8 @@ struct xs_handle {
 
        /* Clients can select() on this pipe to wait for a watch to fire. */
        int watch_pipe[2];
+       /* Filtering watch event in unwatch function? */
+       bool unwatch_filter;
 
        /*
          * A list of replies. Currently only one will ever be outstanding
@@ -125,6 +127,8 @@ struct xs_handle {
        struct list_head watch_list;
        /* Clients can select() on this pipe to wait for a watch to fire. */
        int watch_pipe[2];
+       /* Filtering watch event in unwatch function? */
+       bool unwatch_filter;
 };
 
 #define mutex_lock(m)          ((void)0)
@@ -247,6 +251,8 @@ static struct xs_handle *get_handle(const char *connect_to)
        /* Watch pipe is allocated on demand in xs_fileno(). */
        h->watch_pipe[0] = h->watch_pipe[1] = -1;
 
+       h->unwatch_filter = false;
+
 #ifdef USE_PTHREAD
        pthread_mutex_init(&h->watch_mutex, NULL);
        pthread_cond_init(&h->watch_condvar, NULL);
@@ -287,6 +293,9 @@ struct xs_handle *xs_open(unsigned long flags)
        if (!xsh && !(flags & XS_OPEN_SOCKETONLY))
                xsh = get_handle(xs_domain_dev());
 
+       if (xsh && (flags & XS_UNWATCH_FILTER))
+               xsh->unwatch_filter = true;
+
        return xsh;
 }
 
@@ -753,6 +762,19 @@ bool xs_watch(struct xs_handle *h, const char *path, const 
char *token)
                                ARRAY_SIZE(iov), NULL));
 }
 
+
+/* Clear the pipe token if there are no more pending watchs.
+ * We suppose the watch_mutex is already taken.
+ */
+static void xs_maybe_clear_watch_pipe(struct xs_handle *h)
+{
+       char c;
+
+       if (list_empty(&h->watch_list) && (h->watch_pipe[0] != -1))
+               while (read(h->watch_pipe[0], &c, 1) != 1)
+                       continue;
+}
+
 /* Find out what node change was on (will block if nothing pending).
  * Returns array of two pointers: path and token, or NULL.
  * Call free() after use.
@@ -761,7 +783,7 @@ static char **read_watch_internal(struct xs_handle *h, 
unsigned int *num,
                                  int nonblocking)
 {
        struct xs_stored_msg *msg;
-       char **ret, *strings, c = 0;
+       char **ret, *strings;
        unsigned int num_strings, i;
 
        mutex_lock(&h->watch_mutex);
@@ -798,11 +820,7 @@ static char **read_watch_internal(struct xs_handle *h, 
unsigned int *num,
        msg = list_top(&h->watch_list, struct xs_stored_msg, list);
        list_del(&msg->list);
 
-       /* Clear the pipe token if there are no more pending watches. */
-       if (list_empty(&h->watch_list) && (h->watch_pipe[0] != -1))
-               while (read(h->watch_pipe[0], &c, 1) != 1)
-                       continue;
-
+       xs_maybe_clear_watch_pipe(h);
        mutex_unlock(&h->watch_mutex);
 
        assert(msg->hdr.type == XS_WATCH_EVENT);
@@ -855,14 +873,64 @@ char **xs_read_watch(struct xs_handle *h, unsigned int 
*num)
 bool xs_unwatch(struct xs_handle *h, const char *path, const char *token)
 {
        struct iovec iov[2];
+       struct xs_stored_msg *msg, *tmsg;
+       bool res;
+       char *s, *p;
+       unsigned int i;
+       char *l_token, *l_path;
 
        iov[0].iov_base = (char *)path;
        iov[0].iov_len = strlen(path) + 1;
        iov[1].iov_base = (char *)token;
        iov[1].iov_len = strlen(token) + 1;
 
-       return xs_bool(xs_talkv(h, XBT_NULL, XS_UNWATCH, iov,
-                               ARRAY_SIZE(iov), NULL));
+       res = xs_bool(xs_talkv(h, XBT_NULL, XS_UNWATCH, iov,
+                              ARRAY_SIZE(iov), NULL));
+
+       if (!h->unwatch_filter) /* Don't filter the watch list */
+               return res;
+
+
+       /* Filter the watch list to remove potential message */
+       mutex_lock(&h->watch_mutex);
+
+       if (list_empty(&h->watch_list)) {
+               mutex_unlock(&h->watch_mutex);
+               return res;
+       }
+
+       list_for_each_entry_safe(msg, tmsg, &h->watch_list, list) {
+               assert(msg->hdr.type == XS_WATCH_EVENT);
+
+               s = msg->body;
+
+               l_token = NULL;
+               l_path = NULL;
+
+               for (p = s, i = 0; p < msg->body + msg->hdr.len; p++) {
+                       if (*p == '\0')
+                       {
+                               if (i == XS_WATCH_TOKEN)
+                                       l_token = s;
+                               else if (i == XS_WATCH_PATH)
+                                       l_path = s;
+                               i++;
+                               s = p + 1;
+                       }
+               }
+
+               if (l_token && !strcmp(token, l_token) &&
+                   l_path && xs_path_is_subpath(path, l_path)) {
+                       list_del(&msg->list);
+                       free(msg);
+               }
+       }
+
+       xs_maybe_clear_watch_pipe(h);
+
+       mutex_unlock(&h->watch_mutex);
+
+       return res;
 }
 
 /* Start a transaction: changes by others will not be seen during this
-- 
Julien Grall


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.