[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 5/7] xen/9pfs: send requests to the backend



On Tue, 7 Mar 2017, Boris Ostrovsky wrote:
> On 03/06/2017 03:01 PM, Stefano Stabellini wrote:
> > Implement struct p9_trans_module create and close functions by looking
> > at the available Xen 9pfs frontend-backend connections. We don't expect
> > many frontend-backend connections, thus walking a list is OK.
> > 
> > Send requests to the backend by copying each request to one of the
> > available rings (each frontend-backend connection comes with multiple
> > rings). Handle the ring and notifications following the 9pfs
> > specification. If there are not enough free bytes on the ring for the
> > request, wait on the wait_queue: the backend will send a notification
> > after consuming more requests.
> > 
> > Signed-off-by: Stefano Stabellini <stefano@xxxxxxxxxxx>
> > CC: boris.ostrovsky@xxxxxxxxxx
> > CC: jgross@xxxxxxxx
> > CC: Eric Van Hensbergen <ericvh@xxxxxxxxx>
> > CC: Ron Minnich <rminnich@xxxxxxxxxx>
> > CC: Latchesar Ionkov <lucho@xxxxxxxxxx>
> > CC: v9fs-developer@xxxxxxxxxxxxxxxxxxxxx
> > ---
> >  net/9p/trans_xen.c | 83 
> > +++++++++++++++++++++++++++++++++++++++++++++++++++++-
> >  1 file changed, 82 insertions(+), 1 deletion(-)
> > 
> > diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c
> > index 9f6cf8d..4e26556 100644
> > --- a/net/9p/trans_xen.c
> > +++ b/net/9p/trans_xen.c
> > @@ -47,22 +47,103 @@ struct xen_9pfs_front_priv {
> >  };
> >  static LIST_HEAD(xen_9pfs_devs);
> >  
> > +/* We don't currently allow canceling of requests */
> >  static int p9_xen_cancel(struct p9_client *client, struct p9_req_t *req)
> >  {
> > -   return 0;
> > +   return 1;
> >  }
> >  
> >  static int p9_xen_create(struct p9_client *client, const char *addr, char 
> > *args)
> >  {
> > +   struct xen_9pfs_front_priv *priv = NULL;
> > +
> > +   list_for_each_entry(priv, &xen_9pfs_devs, list) {
> > +           if (!strcmp(priv->tag, addr))
> > +                   break;
> > +   }
> 
> 
> You could simplify this (and p9_xen_close()) but assigning client and
> returning from inside the 'if' statement.

I'll do that.


> I am also not sure you need to initialize priv.
 
With the new changes, I won't need to.

 
> > +   if (!priv || strcmp(priv->tag, addr))
> > +           return -EINVAL;
> > +
> > +   priv->client = client; 
> >     return 0;
> >  }
> >  
> >  static void p9_xen_close(struct p9_client *client)
> >  {
> > +   struct xen_9pfs_front_priv *priv = NULL;
> > +
> > +   list_for_each_entry(priv, &xen_9pfs_devs, list) {
> > +           if (priv->client == client)
> > +                   break;
> > +   }
> > +   if (!priv || priv->client != client)
> > +           return;
> > +
> > +   priv->client = NULL; 
> > +   return;
> > +}
> > +
> > +static int p9_xen_write_todo(struct xen_9pfs_dataring *ring, RING_IDX size)
> > +{
> > +   RING_IDX cons, prod;
> > +
> > +   cons = ring->intf->out_cons;
> > +   prod = ring->intf->out_prod;
> > +   mb();
> > +
> > +   if (XEN_9PFS_RING_SIZE - xen_9pfs_queued(prod, cons, 
> > XEN_9PFS_RING_SIZE) >= size)
> > +           return 1;
> > +   else
> > +           return 0;
> >  }
> >  
> >  static int p9_xen_request(struct p9_client *client, struct p9_req_t 
> > *p9_req)
> >  {
> > +   struct xen_9pfs_front_priv *priv = NULL;
> > +   RING_IDX cons, prod, masked_cons, masked_prod;
> > +   unsigned long flags;
> > +   uint32_t size = p9_req->tc->size;
> > +   struct xen_9pfs_dataring *ring;
> > +   int num;
> > +
> > +   list_for_each_entry(priv, &xen_9pfs_devs, list) {
> > +           if (priv->client == client)
> > +                   break;
> > +   }
> > +   if (priv == NULL || priv->client != client)
> > +           return -EINVAL;
> > +
> > +   num = p9_req->tc->tag % priv->num_rings;
> > +   ring = &priv->rings[num];
> > +
> > +again:
> > +   while (wait_event_interruptible(ring->wq,
> > +                           p9_xen_write_todo(ring, size) > 0) != 0);
> > +
> > +   spin_lock_irqsave(&ring->lock, flags);
> > +   cons = ring->intf->out_cons;
> > +   prod = ring->intf->out_prod;
> > +   mb();
> > +
> > +   if (XEN_9PFS_RING_SIZE - xen_9pfs_queued(prod, cons, 
> > XEN_9PFS_RING_SIZE) < size) {
> 
> 
> This looks like p9_xen_write_todo().

p9_xen_write_todo is just a wrapper around xen_9pfs_queued to provide
a return value that works well with wait_event_interruptible.

I would prefer not to call p9_xen_write_todo here, because it's simpler
if we don't read prod and cons twice.


> BTW, where is xen_9pfs_queued()
> defined? I couldn't find it. Same for xen_9pfs_mask() and
> xen_9pfs_write_packet().

They are provided by the new ring macros, see
include/xen/interface/io/ring.h (the first patch).


> > +           spin_unlock_irqrestore(&ring->lock, flags);
> > +           goto again;
> > +   }
> > +
> > +   masked_prod = xen_9pfs_mask(prod, XEN_9PFS_RING_SIZE);
> > +   masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE);
> > +
> > +   xen_9pfs_write_packet(ring->ring.out,
> > +                           &masked_prod, masked_cons,
> > +                           XEN_9PFS_RING_SIZE, p9_req->tc->sdata, size);
> > +
> > +   p9_req->status = REQ_STATUS_SENT;
> > +   wmb();                  /* write ring before updating pointer */
> > +   prod += size;
> > +   ring->intf->out_prod = prod;
> > +   spin_unlock_irqrestore(&ring->lock, flags);
> > +   notify_remote_via_irq(ring->irq);
> > +
> >     return 0;
> >  }
> >  
> > 
> 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.