[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 13/18] xen/pvcalls: implement release command



Release both active and passive sockets. For active sockets, make sure
to avoid possible conflicts with the ioworker reading/writing to those
sockets concurrently. Set map->release to let the ioworker know
atomically that the socket will be released soon, then wait until the
ioworker removed the socket from its list.

Unmap indexes pages and data rings.

Signed-off-by: Stefano Stabellini <stefano@xxxxxxxxxxx>
CC: boris.ostrovsky@xxxxxxxxxx
CC: jgross@xxxxxxxx
---
 drivers/xen/pvcalls-back.c | 94 +++++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 93 insertions(+), 1 deletion(-)

diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c
index d5b7412..22c6426 100644
--- a/drivers/xen/pvcalls-back.c
+++ b/drivers/xen/pvcalls-back.c
@@ -253,13 +253,105 @@ static int pvcalls_back_release_active(struct 
xenbus_device *dev,
                                       struct pvcalls_back_priv *priv,
                                       struct sock_mapping *map)
 {
+       struct pvcalls_ioworker *iow =
+           &pvcalls_back_global.ioworkers[map->data_worker];
+       unsigned long flags;
+       bool in_loop = false;
+
+
+       disable_irq(map->irq);
+       if (map->sock->sk != NULL) {
+               lock_sock(map->sock->sk);
+               map->sock->sk->sk_user_data = NULL;
+               map->sock->sk->sk_data_ready = map->saved_data_ready;
+               release_sock(map->sock->sk);
+       }
+
+       atomic_set(&map->release, 1);
+
+       /*
+        * To avoid concurrency problems with ioworker, check if the socket
+        * has any outstanding io requests. If so, wait until the ioworker
+        * removes it from the list before proceeding.
+        */
+       spin_lock_irqsave(&iow->lock, flags);
+       in_loop = !list_empty(&map->queue);
+       spin_unlock_irqrestore(&iow->lock, flags);
+
+       if (in_loop) {
+               atomic_inc(&iow->io);
+               queue_work_on(map->data_worker, pvcalls_back_global.wq,
+                             &iow->register_work);
+               while (atomic_read(&map->release) > 0)
+                       cond_resched();
+       }
+
+       down_write(&priv->pvcallss_lock);
+       list_del(&map->list);
+       up_write(&priv->pvcallss_lock);
+
+       xenbus_unmap_ring_vfree(dev, (void *)map->bytes);
+       xenbus_unmap_ring_vfree(dev, (void *)map->ring);
+       unbind_from_irqhandler(map->irq, map);
+
+       sock_release(map->sock);
+       kfree(map);
+
+       return 0;
+}
+
+static int pvcalls_back_release_passive(struct xenbus_device *dev,
+                                       struct pvcalls_back_priv *priv,
+                                       struct sockpass_mapping *mappass)
+{
+       if (mappass->sock->sk != NULL) {
+               lock_sock(mappass->sock->sk);
+               mappass->sock->sk->sk_user_data = NULL;
+               mappass->sock->sk->sk_data_ready = mappass->saved_data_ready;
+               release_sock(mappass->sock->sk);
+       }
+       down_write(&priv->pvcallss_lock);
+       radix_tree_delete(&priv->socketpass_mappings, mappass->id);
+       sock_release(mappass->sock);
+       flush_workqueue(mappass->wq);
+       destroy_workqueue(mappass->wq);
+       kfree(mappass);
+       up_write(&priv->pvcallss_lock);
+
        return 0;
 }
 
 static int pvcalls_back_release(struct xenbus_device *dev,
                                struct xen_pvcalls_request *req)
 {
-       return 0;
+       struct pvcalls_back_priv *priv;
+       struct sock_mapping *map, *n;
+       struct sockpass_mapping *mappass;
+       int ret = 0;
+       struct xen_pvcalls_response *rsp;
+
+       priv = dev_get_drvdata(&dev->dev);
+
+       list_for_each_entry_safe(map, n, &priv->socket_mappings, list) {
+               if (map->id == req->u.release.id) {
+                       ret = pvcalls_back_release_active(dev, priv, map);
+                       goto out;
+               }
+       }
+       mappass = radix_tree_lookup(&priv->socketpass_mappings,
+                                   req->u.release.id);
+       if (mappass != NULL) {
+               ret = pvcalls_back_release_passive(dev, priv, mappass);
+               goto out;
+       }
+
+out:
+       rsp = RING_GET_RESPONSE(&priv->ring, priv->ring.rsp_prod_pvt++);
+       rsp->req_id = req->req_id;
+       rsp->u.release.id = req->u.release.id;
+       rsp->cmd = req->cmd;
+       rsp->ret = ret;
+       return 1;
 }
 
 static void __pvcalls_back_accept(struct work_struct *work)
-- 
1.9.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.