[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v4 06/13] xen/pvcalls: implement bind command



Send PVCALLS_BIND to the backend. Introduce a new structure, part of
struct sock_mapping, to store information specific to passive sockets.

Introduce a status field to keep track of the status of the passive
socket.

Signed-off-by: Stefano Stabellini <stefano@xxxxxxxxxxx>
CC: boris.ostrovsky@xxxxxxxxxx
CC: jgross@xxxxxxxx
---
 drivers/xen/pvcalls-front.c | 68 +++++++++++++++++++++++++++++++++++++++++++++
 drivers/xen/pvcalls-front.h |  3 ++
 2 files changed, 71 insertions(+)

diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
index ef511b6..50b6588 100644
--- a/drivers/xen/pvcalls-front.c
+++ b/drivers/xen/pvcalls-front.c
@@ -72,6 +72,13 @@ struct sock_mapping {
 
                        wait_queue_head_t inflight_conn_req;
                } active;
+               struct {
+               /* Socket status */
+#define PVCALLS_STATUS_UNINITALIZED  0
+#define PVCALLS_STATUS_BIND          1
+#define PVCALLS_STATUS_LISTEN        2
+                       uint8_t status;
+               } passive;
        };
 };
 
@@ -346,6 +353,67 @@ int pvcalls_front_connect(struct socket *sock, struct 
sockaddr *addr,
        return ret;
 }
 
+int pvcalls_front_bind(struct socket *sock, struct sockaddr *addr, int 
addr_len)
+{
+       struct pvcalls_bedata *bedata;
+       struct sock_mapping *map = NULL;
+       struct xen_pvcalls_request *req;
+       int notify, req_id, ret;
+
+       pvcalls_enter;
+       if (!pvcalls_front_dev) {
+               pvcalls_exit;
+               return -ENOTCONN;
+       }
+       if (addr->sa_family != AF_INET || sock->type != SOCK_STREAM) {
+               pvcalls_exit;
+               return -ENOTSUPP;
+       }
+       bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
+
+       map = (struct sock_mapping *) sock->sk->sk_send_head;
+       if (map == NULL) {
+               pvcalls_exit;
+               return -ENOTSOCK;
+       }
+
+       spin_lock(&bedata->socket_lock);
+       ret = get_request(bedata, &req_id);
+       if (ret < 0) {
+               spin_unlock(&bedata->socket_lock);
+               pvcalls_exit;
+               return ret;
+       }
+       req = RING_GET_REQUEST(&bedata->ring, req_id);
+       req->req_id = req_id;
+       map->sock = sock;
+       req->cmd = PVCALLS_BIND;
+       req->u.bind.id = (uint64_t) map;
+       memcpy(req->u.bind.addr, addr, sizeof(*addr));
+       req->u.bind.len = addr_len;
+
+       init_waitqueue_head(&map->passive.inflight_accept_req);
+
+       map->active_socket = false;
+
+       bedata->ring.req_prod_pvt++;
+       RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&bedata->ring, notify);
+       spin_unlock(&bedata->socket_lock);
+       if (notify)
+               notify_remote_via_irq(bedata->irq);
+
+       wait_event(bedata->inflight_req,
+                  READ_ONCE(bedata->rsp[req_id].req_id) == req_id);
+
+       map->passive.status = PVCALLS_STATUS_BIND;
+       ret = bedata->rsp[req_id].ret;
+       /* read ret, then set this rsp slot to be reused */
+       smp_mb();
+       WRITE_ONCE(bedata->rsp[req_id].req_id, PVCALLS_INVALID_ID);
+       pvcalls_exit;
+       return 0;
+}
+
 static const struct xenbus_device_id pvcalls_front_ids[] = {
        { "pvcalls" },
        { "" }
diff --git a/drivers/xen/pvcalls-front.h b/drivers/xen/pvcalls-front.h
index 63b0417..8b0a274 100644
--- a/drivers/xen/pvcalls-front.h
+++ b/drivers/xen/pvcalls-front.h
@@ -6,5 +6,8 @@
 int pvcalls_front_socket(struct socket *sock);
 int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr,
                          int addr_len, int flags);
+int pvcalls_front_bind(struct socket *sock,
+                      struct sockaddr *addr,
+                      int addr_len);
 
 #endif
-- 
1.9.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.