|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 09/11] mini-os/xenbus: Sort out request and watch locking
Make the xenbus_req_lock public, and lock it everywhere it is needed.
It needs to protect not just the xenbus request ring itself, but also
a number of internal data structures.
Signed-off-by: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
---
include/mini-os/xenbus.h | 7 +++++++
xen/xenbus/xenbus.c | 42 ++++++++++++++++++++++++++++++++++++------
2 files changed, 43 insertions(+), 6 deletions(-)
diff --git a/include/mini-os/xenbus.h b/include/mini-os/xenbus.h
index b8d152d..a811c19 100644
--- a/include/mini-os/xenbus.h
+++ b/include/mini-os/xenbus.h
@@ -5,6 +5,7 @@
#include <mini-os/sched.h>
#include <mini-os/waittypes.h>
#include <mini-os/queue.h>
+#include <mini-os/spinlock.h>
typedef unsigned long xenbus_transaction_t;
#define XBT_NIL ((xenbus_transaction_t)0)
@@ -23,6 +24,12 @@ static inline void init_xenbus(void)
set to a malloc'd copy of the value. */
char *xenbus_read(xenbus_transaction_t xbt, const char *path, char **value);
+/* All accesses to an active xenbus_event_queue must occur with this
+ * lock held. The public functions here will do that for you, but
+ * your own accesses to the queue (including the contained waitq)
+ * must be protected by the lock. */
+extern spinlock_t xenbus_req_lock;
+
/* Queue for events (watches or async request replies - see below) */
struct xenbus_event {
union {
diff --git a/xen/xenbus/xenbus.c b/xen/xenbus/xenbus.c
index bf4bb45..7b391c5 100644
--- a/xen/xenbus/xenbus.c
+++ b/xen/xenbus/xenbus.c
@@ -56,6 +56,17 @@ struct xenbus_req_info
};
+spinlock_t xenbus_req_lock = SPIN_LOCK_UNLOCKED;
+/*
+ * This lock protects:
+ * the xenbus request ring
+ * req_info[]
+ * all live struct xenbus_event_queue (including xenbus_default_watch_queue)
+ * nr_live_reqs
+ * req_wq
+ * watches
+ */
+
void xenbus_event_queue_init(struct xenbus_event_queue *queue)
{
MINIOS_STAILQ_INIT(&queue->events);
@@ -64,6 +75,7 @@ void xenbus_event_queue_init(struct xenbus_event_queue *queue)
static struct xenbus_event *remove_event(struct xenbus_event_queue *queue)
{
+ /* Called with lock held */
struct xenbus_event *event;
event = MINIOS_STAILQ_FIRST(&queue->events);
@@ -78,6 +90,7 @@ static struct xenbus_event *remove_event(struct
xenbus_event_queue *queue)
static void queue_event(struct xenbus_event_queue *queue,
struct xenbus_event *event)
{
+ /* Called with lock held */
MINIOS_STAILQ_INSERT_TAIL(&queue->events, event, entry);
wake_up(&queue->waitq);
}
@@ -86,11 +99,15 @@ static struct xenbus_event *await_event(struct
xenbus_event_queue *queue)
{
struct xenbus_event *event;
DEFINE_WAIT(w);
+ spin_lock(&xenbus_req_lock);
while (!(event = remove_event(queue))) {
add_waiter(w, queue->waitq);
+ spin_unlock(&xenbus_req_lock);
schedule();
+ spin_lock(&xenbus_req_lock);
}
remove_waiter(w, queue->waitq);
+ spin_unlock(&xenbus_req_lock);
return event;
}
@@ -269,6 +286,8 @@ static void xenbus_thread_func(void *ign)
xenstore_buf->rsp_cons += msg.len + sizeof(msg);
+ spin_lock(&xenbus_req_lock);
+
MINIOS_LIST_FOREACH(watch, &watches, entry)
if (!strcmp(watch->token, event->token)) {
event->watch = watch;
@@ -282,6 +301,8 @@ static void xenbus_thread_func(void *ign)
printk("unexpected watch token %s\n", event->token);
free(event);
}
+
+ spin_unlock(&xenbus_req_lock);
}
else
@@ -293,8 +314,10 @@ static void xenbus_thread_func(void *ign)
MASK_XENSTORE_IDX(xenstore_buf->rsp_cons),
msg.len + sizeof(msg));
xenstore_buf->rsp_cons += msg.len + sizeof(msg);
+ spin_lock(&xenbus_req_lock);
queue_event(req_info[msg.req_id].reply_queue,
req_info[msg.req_id].for_queue);
+ spin_unlock(&xenbus_req_lock);
}
}
}
@@ -307,19 +330,18 @@ static void xenbus_evtchn_handler(evtchn_port_t port,
struct pt_regs *regs,
}
static int nr_live_reqs;
-static spinlock_t req_lock = SPIN_LOCK_UNLOCKED;
static DECLARE_WAIT_QUEUE_HEAD(req_wq);
/* Release a xenbus identifier */
void xenbus_id_release(int id)
{
BUG_ON(!req_info[id].reply_queue);
- spin_lock(&req_lock);
+ spin_lock(&xenbus_req_lock);
req_info[id].reply_queue = 0;
nr_live_reqs--;
if (nr_live_reqs == NR_REQS - 1)
wake_up(&req_wq);
- spin_unlock(&req_lock);
+ spin_unlock(&xenbus_req_lock);
}
int xenbus_id_allocate(struct xenbus_event_queue *reply_queue,
@@ -330,10 +352,10 @@ int xenbus_id_allocate(struct xenbus_event_queue
*reply_queue,
while (1)
{
- spin_lock(&req_lock);
+ spin_lock(&xenbus_req_lock);
if (nr_live_reqs < NR_REQS)
break;
- spin_unlock(&req_lock);
+ spin_unlock(&xenbus_req_lock);
wait_event(req_wq, (nr_live_reqs < NR_REQS));
}
@@ -349,7 +371,7 @@ int xenbus_id_allocate(struct xenbus_event_queue
*reply_queue,
req_info[o_probe].reply_queue = reply_queue;
req_info[o_probe].for_queue = for_queue;
probe = (o_probe + 1) % NR_REQS;
- spin_unlock(&req_lock);
+ spin_unlock(&xenbus_req_lock);
return o_probe;
}
@@ -366,14 +388,18 @@ void xenbus_watch_prepare(struct xenbus_watch *watch)
watch->token = malloc(size);
int r = snprintf(watch->token,size,"*%p",(void*)watch);
BUG_ON(!(r > 0 && r < size));
+ spin_lock(&xenbus_req_lock);
MINIOS_LIST_INSERT_HEAD(&watches, watch, entry);
+ spin_unlock(&xenbus_req_lock);
}
void xenbus_watch_release(struct xenbus_watch *watch)
{
if (!watch->token)
return;
+ spin_lock(&xenbus_req_lock);
MINIOS_LIST_REMOVE(watch, entry);
+ spin_unlock(&xenbus_req_lock);
free(watch->token);
watch->token = 0;
}
@@ -624,7 +650,9 @@ char* xenbus_watch_path_token( xenbus_transaction_t xbt,
const char *path, const
watch->token = strdup(token);
watch->events = events;
+ spin_lock(&xenbus_req_lock);
MINIOS_LIST_INSERT_HEAD(&watches, watch, entry);
+ spin_unlock(&xenbus_req_lock);
rep = xenbus_msg_reply(XS_WATCH, xbt, req, ARRAY_SIZE(req));
@@ -654,6 +682,7 @@ char* xenbus_unwatch_path_token( xenbus_transaction_t xbt,
const char *path, con
if (msg) return msg;
free(rep);
+ spin_lock(&xenbus_req_lock);
MINIOS_LIST_FOREACH(watch, &watches, entry)
if (!strcmp(watch->token, token)) {
free(watch->token);
@@ -661,6 +690,7 @@ char* xenbus_unwatch_path_token( xenbus_transaction_t xbt,
const char *path, con
free(watch);
break;
}
+ spin_unlock(&xenbus_req_lock);
return NULL;
}
--
1.7.10.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |