[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v1 08/13] xen/pvcalls: implement sendmsg



On 22/07/17 02:11, Stefano Stabellini wrote:
> Send data to an active socket by copying data to the "out" ring. Take
> the active socket out_mutex so that only one function can access the
> ring at any given time.
> 
> If not enough room is available on the ring, rather than returning
> immediately or sleep-waiting, spin for up to 5000 cycles. This small
> optimization turns out to improve performance significantly.
> 
> Signed-off-by: Stefano Stabellini <stefano@xxxxxxxxxxx>
> CC: boris.ostrovsky@xxxxxxxxxx
> CC: jgross@xxxxxxxx
> ---
>  drivers/xen/pvcalls-front.c | 109 
> ++++++++++++++++++++++++++++++++++++++++++++
>  drivers/xen/pvcalls-front.h |   3 ++
>  2 files changed, 112 insertions(+)
> 
> diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
> index f3a04a2..bf29f40 100644
> --- a/drivers/xen/pvcalls-front.c
> +++ b/drivers/xen/pvcalls-front.c
> @@ -27,6 +27,7 @@
>  #define PVCALLS_INVALID_ID (UINT_MAX)
>  #define RING_ORDER XENBUS_MAX_RING_GRANT_ORDER
>  #define PVCALLS_NR_REQ_PER_RING __CONST_RING_SIZE(xen_pvcalls, XEN_PAGE_SIZE)
> +#define PVCALLS_FRON_MAX_SPIN 5000

Any reason not to name it PVCALLS_FRONT_MAX_SPIN? I first thought you
meant FROM instead.


Juergen

>  
>  struct pvcalls_bedata {
>       struct xen_pvcalls_front_ring ring;
> @@ -77,6 +78,22 @@ struct sock_mapping {
>       };
>  };
>  
> +static int pvcalls_front_write_todo(struct sock_mapping *map)
> +{
> +     struct pvcalls_data_intf *intf = map->active.ring;
> +     RING_IDX cons, prod, size = XEN_FLEX_RING_SIZE(intf->ring_order);
> +     int32_t error;
> +
> +     cons = intf->out_cons;
> +     prod = intf->out_prod;
> +     error = intf->out_error;
> +     if (error == -ENOTCONN)
> +             return 0;
> +     if (error != 0)
> +             return error;
> +     return size - pvcalls_queued(prod, cons, size);
> +}
> +
>  static irqreturn_t pvcalls_front_event_handler(int irq, void *dev_id)
>  {
>       struct xenbus_device *dev = dev_id;
> @@ -304,6 +321,98 @@ int pvcalls_front_connect(struct socket *sock, struct 
> sockaddr *addr,
>       return ret;
>  }
>  
> +static int __write_ring(struct pvcalls_data_intf *intf,
> +                     struct pvcalls_data *data,
> +                     struct iov_iter *msg_iter,
> +                     size_t len)
> +{
> +     RING_IDX cons, prod, size, masked_prod, masked_cons;
> +     RING_IDX array_size = XEN_FLEX_RING_SIZE(intf->ring_order);
> +     int32_t error;
> +
> +     cons = intf->out_cons;
> +     prod = intf->out_prod;
> +     error = intf->out_error;
> +     /* read indexes before continuing */
> +     virt_mb();
> +
> +     if (error < 0)
> +             return error;
> +
> +     size = pvcalls_queued(prod, cons, array_size);
> +     if (size >= array_size)
> +             return 0;
> +     if (len > array_size - size)
> +             len = array_size - size;
> +
> +     masked_prod = pvcalls_mask(prod, array_size);
> +     masked_cons = pvcalls_mask(cons, array_size);
> +
> +     if (masked_prod < masked_cons) {
> +             copy_from_iter(data->out + masked_prod, len, msg_iter);
> +     } else {
> +             if (len > array_size - masked_prod) {
> +                     copy_from_iter(data->out + masked_prod,
> +                                    array_size - masked_prod, msg_iter);
> +                     copy_from_iter(data->out,
> +                                    len - (array_size - masked_prod),
> +                                    msg_iter);
> +             } else {
> +                     copy_from_iter(data->out + masked_prod, len, msg_iter);
> +             }
> +     }
> +     /* write to ring before updating pointer */
> +     virt_wmb();
> +     intf->out_prod += len;
> +
> +     return len;
> +}
> +
> +int pvcalls_front_sendmsg(struct socket *sock, struct msghdr *msg,
> +                       size_t len)
> +{
> +     struct pvcalls_bedata *bedata;
> +     struct sock_mapping *map;
> +     int sent = 0, tot_sent = 0;
> +     int count = 0, flags;
> +
> +     if (!pvcalls_front_dev)
> +             return -ENOTCONN;
> +     bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
> +
> +     map = (struct sock_mapping *) READ_ONCE(sock->sk->sk_send_head);
> +     if (!map)
> +             return -ENOTSOCK;
> +
> +     flags = msg->msg_flags;
> +     if (flags & (MSG_CONFIRM|MSG_DONTROUTE|MSG_EOR|MSG_OOB))
> +             return -EOPNOTSUPP;
> +
> +     mutex_lock(&map->active.out_mutex);
> +     if ((flags & MSG_DONTWAIT) && !pvcalls_front_write_todo(map)) {
> +             mutex_unlock(&map->active.out_mutex);
> +             return -EAGAIN;
> +     }
> +
> +again:
> +     count++;
> +     sent = __write_ring(map->active.ring,
> +                         &map->active.data, &msg->msg_iter,
> +                         len);
> +     if (sent > 0) {
> +             len -= sent;
> +             tot_sent += sent;
> +             notify_remote_via_irq(map->active.irq);
> +     }
> +     if (sent >= 0 && len > 0 && count < PVCALLS_FRON_MAX_SPIN)
> +             goto again;
> +     if (sent < 0)
> +             tot_sent = sent;
> +
> +     mutex_unlock(&map->active.out_mutex);
> +     return tot_sent;
> +}
> +
>  int pvcalls_front_bind(struct socket *sock, struct sockaddr *addr, int 
> addr_len)
>  {
>       struct pvcalls_bedata *bedata;
> diff --git a/drivers/xen/pvcalls-front.h b/drivers/xen/pvcalls-front.h
> index ab4f1da..d937c24 100644
> --- a/drivers/xen/pvcalls-front.h
> +++ b/drivers/xen/pvcalls-front.h
> @@ -13,5 +13,8 @@ int pvcalls_front_bind(struct socket *sock,
>  int pvcalls_front_accept(struct socket *sock,
>                        struct socket *newsock,
>                        int flags);
> +int pvcalls_front_sendmsg(struct socket *sock,
> +                       struct msghdr *msg,
> +                       size_t len);
>  
>  #endif
> 


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.