[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 09/18] xen/pvcalls: implement bind command
Allocate a socket. Track the allocated passive sockets with a new data structure named sockpass_mapping. It contains an unbound workqueue to schedule delayed work for the accept and poll commands. It also has a reqcopy field to be used to store a copy of a request for delayed work. Reads/writes to it are protected by a lock (the "copy_lock" spinlock). Initialize the workqueue in pvcalls_back_bind. Implement the bind command with inet_bind. The pass_sk_data_ready event handler will be added later. Signed-off-by: Stefano Stabellini <stefano@xxxxxxxxxxx> CC: boris.ostrovsky@xxxxxxxxxx CC: jgross@xxxxxxxx --- drivers/xen/pvcalls-back.c | 87 +++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 86 insertions(+), 1 deletion(-) diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c index 65fbc39..d3278bd 100644 --- a/drivers/xen/pvcalls-back.c +++ b/drivers/xen/pvcalls-back.c @@ -82,6 +82,18 @@ struct sock_mapping { struct pvcalls_ioworker ioworker; }; +struct sockpass_mapping { + struct list_head list; + struct pvcalls_back_priv *priv; + struct socket *sock; + uint64_t id; + struct xen_pvcalls_request reqcopy; + spinlock_t copy_lock; + struct workqueue_struct *wq; + struct work_struct register_work; + void (*saved_data_ready)(struct sock *sk); +}; + static irqreturn_t pvcalls_back_conn_event(int irq, void *sock_map); static int pvcalls_back_release_active(struct xenbus_device *dev, struct pvcalls_back_priv *priv, @@ -253,10 +265,83 @@ static int pvcalls_back_release(struct xenbus_device *dev, return 0; } +static void __pvcalls_back_accept(struct work_struct *work) +{ +} + +static void pvcalls_pass_sk_data_ready(struct sock *sock) +{ +} + static int pvcalls_back_bind(struct xenbus_device *dev, struct xen_pvcalls_request *req) { - return 0; + struct pvcalls_back_priv *priv; + int ret, err; + struct socket *sock; + struct sockpass_mapping *map = NULL; + struct xen_pvcalls_response *rsp; + + priv = dev_get_drvdata(&dev->dev); + + map = kzalloc(sizeof(*map), GFP_KERNEL); + if (map == NULL) { + ret = -ENOMEM; + goto out; + } + + INIT_WORK(&map->register_work, __pvcalls_back_accept); + spin_lock_init(&map->copy_lock); + map->wq = alloc_workqueue("pvcalls_wq", WQ_UNBOUND, 1); + if (!map->wq) { + ret = -ENOMEM; + kfree(map); + goto out; + } + + ret = sock_create(AF_INET, SOCK_STREAM, 0, &sock); + if (ret < 0) { + destroy_workqueue(map->wq); + kfree(map); + goto out; + } + + ret = inet_bind(sock, (struct sockaddr *)&req->u.bind.addr, + req->u.bind.len); + if (ret < 0) { + destroy_workqueue(map->wq); + kfree(map); + goto out; + } + + map->priv = priv; + map->sock = sock; + map->id = req->u.bind.id; + + down(&priv->socket_lock); + err = radix_tree_insert(&priv->socketpass_mappings, map->id, + map); + up(&priv->socket_lock); + if (err) { + ret = err; + destroy_workqueue(map->wq); + kfree(map); + goto out; + } + + write_lock_bh(&sock->sk->sk_callback_lock); + map->saved_data_ready = sock->sk->sk_data_ready; + sock->sk->sk_user_data = map; + sock->sk->sk_data_ready = pvcalls_pass_sk_data_ready; + write_unlock_bh(&sock->sk->sk_callback_lock); + +out: + rsp = RING_GET_RESPONSE(&priv->ring, priv->ring.rsp_prod_pvt++); + rsp->req_id = req->req_id; + rsp->cmd = req->cmd; + rsp->u.bind.id = req->u.bind.id; + rsp->ret = ret; + return ret; } static int pvcalls_back_listen(struct xenbus_device *dev, -- 1.9.1 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |