[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 05/18] xen/pvcalls: connect to a frontend
On 05/15/2017 04:35 PM, Stefano Stabellini wrote: Introduce a per-frontend data structure named pvcalls_back_priv. It contains pointers to the command ring, its event channel, a list of active sockets and a tree of passive sockets (passing sockets need to be looked up from the id on listen, accept and poll commands, while active sockets only on release). It would be useful to put this into a comment in pvcalls_back_priv definition. It also has an unbound workqueue to schedule the work of parsing and executing commands on the command ring. pvcallss_lock protects the two lists. In pvcalls_back_global, keep a list of connected frontends. Signed-off-by: Stefano Stabellini <stefano@xxxxxxxxxxx> CC: boris.ostrovsky@xxxxxxxxxx CC: jgross@xxxxxxxx --- drivers/xen/pvcalls-back.c | 87 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 87 insertions(+) diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c index 86eca19..876e577 100644 --- a/drivers/xen/pvcalls-back.c +++ b/drivers/xen/pvcalls-back.c @@ -44,13 +44,100 @@ struct pvcalls_back_global { struct rw_semaphore privs_lock; } pvcalls_back_global; +struct pvcalls_back_priv { + struct list_head list; + struct xenbus_device *dev; + struct xen_pvcalls_sring *sring; + struct xen_pvcalls_back_ring ring; + int irq; + struct list_head socket_mappings; + struct radix_tree_root socketpass_mappings; + struct rw_semaphore pvcallss_lock; Same question as before regarding using rw semaphore --- I only see down/up_writes. And what does the name (pvcallss) stand for? + atomic_t work; + struct workqueue_struct *wq; + struct work_struct register_work; +}; + static void pvcalls_back_ioworker(struct work_struct *work) { } +static void pvcalls_back_work(struct work_struct *work) +{ +} + +static irqreturn_t pvcalls_back_event(int irq, void *dev_id) +{ + return IRQ_HANDLED; +} + static int backend_connect(struct xenbus_device *dev) { + int err, evtchn; + grant_ref_t ring_ref; + void *addr = NULL; + struct pvcalls_back_priv *priv = NULL; + + priv = kzalloc(sizeof(struct pvcalls_back_priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + err = xenbus_scanf(XBT_NIL, dev->otherend, "port", "%u", + &evtchn); + if (err != 1) { + err = -EINVAL; + xenbus_dev_fatal(dev, err, "reading %s/event-channel", + dev->otherend); + goto error; + } + + err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-ref", "%u", &ring_ref); + if (err != 1) { + err = -EINVAL; + xenbus_dev_fatal(dev, err, "reading %s/ring-ref", + dev->otherend); + goto error; + } + + err = xenbus_map_ring_valloc(dev, &ring_ref, 1, &addr); + if (err < 0) + goto error; I'd move this closer to first use, below. -boris + + err = bind_interdomain_evtchn_to_irqhandler(dev->otherend_id, evtchn, + pvcalls_back_event, 0, + "pvcalls-backend", dev); + if (err < 0) + goto error; + + priv->wq = alloc_workqueue("pvcalls_back_wq", WQ_UNBOUND, 1); + if (!priv->wq) { + err = -ENOMEM; + goto error; + } + INIT_WORK(&priv->register_work, pvcalls_back_work); + priv->dev = dev; + priv->sring = addr; + BACK_RING_INIT(&priv->ring, priv->sring, XEN_PAGE_SIZE * 1); + priv->irq = err; + INIT_LIST_HEAD(&priv->socket_mappings); + INIT_RADIX_TREE(&priv->socketpass_mappings, GFP_KERNEL); + init_rwsem(&priv->pvcallss_lock); + dev_set_drvdata(&dev->dev, priv); + down_write(&pvcalls_back_global.privs_lock); + list_add_tail(&priv->list, &pvcalls_back_global.privs); + up_write(&pvcalls_back_global.privs_lock); + queue_work(priv->wq, &priv->register_work); + return 0; + + error: + if (addr != NULL) + xenbus_unmap_ring_vfree(dev, addr); + if (priv->wq) + destroy_workqueue(priv->wq); + unbind_from_irqhandler(priv->irq, dev); + kfree(priv); + return err; } static int backend_disconnect(struct xenbus_device *dev) _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |