[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v3 08/12] x86/hvm/ioreq: maintain an array of ioreq servers rather than a list



On Thu, Aug 31, 2017 at 10:36:01AM +0100, Paul Durrant wrote:
> A subsequent patch will remove the current implicit limitation on creation
> of ioreq servers which is due to the allocation of gfns for the ioreq
> structures and buffered ioreq ring.
> 
> It will therefore be necessary to introduce an explicit limit and, since
> this limit should be small, it simplifies the code to maintain an array of
> that size rather than using a list.
> 
> Also, by reserving an array slot for the default server and populating
> array slots early in create, the need to pass an 'is_default' boolean
> to sub-functions can be avoided.
> 
> Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
> ---
> Cc: Jan Beulich <jbeulich@xxxxxxxx>
> Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
> 
> v3:
>  - New patch (replacing "move is_default into struct hvm_ioreq_server") in
>    response to review comments.
> ---
>  xen/arch/x86/hvm/ioreq.c         | 507 
> +++++++++++++++++++--------------------
>  xen/include/asm-x86/hvm/domain.h |  11 +-
>  2 files changed, 247 insertions(+), 271 deletions(-)
> 
> diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
> index 5e01e1a6d2..0e92763384 100644
> --- a/xen/arch/x86/hvm/ioreq.c
> +++ b/xen/arch/x86/hvm/ioreq.c
> @@ -46,14 +46,18 @@ static ioreq_t *get_ioreq(struct hvm_ioreq_server *s, 
> struct vcpu *v)
>  bool hvm_io_pending(struct vcpu *v)
>  {
>      struct domain *d = v->domain;
> -    struct hvm_ioreq_server *s;
> +    unsigned int id;
>  
> -    list_for_each_entry ( s,
> -                          &d->arch.hvm_domain.ioreq_server.list,
> -                          list_entry )
> +    for ( id = 0; id < MAX_NR_IOREQ_SERVERS; id++ )
>      {
> +        struct hvm_ioreq_server *s;
>          struct hvm_ioreq_vcpu *sv;
>  
> +        s = d->arch.hvm_domain.ioreq_server.server[id];
> +

No need for the extra newline IMHO (here and below). You could also
do the initialization together with the definition, but I guess that's
going to exceed the line char limit?

Or even you could do something like this AFAICT:

for ( id = 0, s = d->arch.hvm_domain.ioreq_server.server[0];
      id < MAX_NR_IOREQ_SERVERS;
      id++, s = d->arch.hvm_domain.ioreq_server.server[id] )
{
     ....

I would make this a macro (FOREACH_IOREQ_SERVER or similar), since the
pattern seems to be repeated in quite a lot of places.

> +#define IS_DEFAULT(s) \
> +    (s == s->domain->arch.hvm_domain.ioreq_server.server[DEFAULT_IOSERVID])

Parentheses around the instances of s please.

>  static int hvm_ioreq_server_alloc_rangesets(struct hvm_ioreq_server *s,
> -                                            bool is_default)
> +                                            ioservid_t id)

You could get the id by doing some arithmetic with the array and s,
but I don't think that's worth it.

>  int hvm_create_ioreq_server(struct domain *d, domid_t domid,
> @@ -685,52 +667,66 @@ int hvm_create_ioreq_server(struct domain *d, domid_t 
> domid,
>                              ioservid_t *id)
>  {
>      struct hvm_ioreq_server *s;
> +    unsigned int i;
>      int rc;
>  
>      if ( bufioreq_handling > HVM_IOREQSRV_BUFIOREQ_ATOMIC )
>          return -EINVAL;
>  
> -    rc = -ENOMEM;
>      s = xzalloc(struct hvm_ioreq_server);
>      if ( !s )
> -        goto fail1;
> +        return -ENOMEM;
>  
>      domain_pause(d);
>      spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
>  
> -    rc = -EEXIST;
> -    if ( is_default && d->arch.hvm_domain.default_ioreq_server != NULL )
> -        goto fail2;
> -
> -    rc = hvm_ioreq_server_init(s, d, domid, is_default, bufioreq_handling,
> -                               next_ioservid(d));
> -    if ( rc )
> -        goto fail3;
> -
> -    list_add(&s->list_entry,
> -             &d->arch.hvm_domain.ioreq_server.list);
> -
>      if ( is_default )
>      {
> -        d->arch.hvm_domain.default_ioreq_server = s;
> -        hvm_ioreq_server_enable(s, true);
> +        i = DEFAULT_IOSERVID;
> +
> +        rc = -EEXIST;
> +        if ( d->arch.hvm_domain.ioreq_server.server[i] )
> +            goto fail;
> +    }
> +    else
> +    {
> +        for ( i = 0; i < MAX_NR_IOREQ_SERVERS; i++ )
> +        {
> +            if ( i != DEFAULT_IOSERVID &&
> +                 !d->arch.hvm_domain.ioreq_server.server[i] )
> +                break;
> +        }
> +
> +        rc = -ENOSPC;
> +        if ( i >= MAX_NR_IOREQ_SERVERS )
> +            goto fail;
>      }
>  
> +    d->arch.hvm_domain.ioreq_server.server[i] = s;
> +
> +    rc = hvm_ioreq_server_init(s, d, domid, bufioreq_handling, i);
> +    if ( rc )
> +        goto fail;
> +
> +    if ( IS_DEFAULT(s) )
> +        hvm_ioreq_server_enable(s);
> +
>      if ( id )
> -        *id = s->id;
> +        *id = i;
> +
> +    d->arch.hvm_domain.ioreq_server.count++;
>  
>      spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
>      domain_unpause(d);
>  
>      return 0;
>  
> - fail3:
> - fail2:
> + fail:
>      spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
>      domain_unpause(d);
>  
> +    d->arch.hvm_domain.ioreq_server.server[i] = NULL;

Shouldn't this be done while holding the ioreq_server lock?

>      xfree(s);
> - fail1:
>      return rc;
>  }
>  
> @@ -741,35 +737,30 @@ int hvm_destroy_ioreq_server(struct domain *d, 
> ioservid_t id)
>  
>      spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
>  
> -    rc = -ENOENT;
> -    list_for_each_entry ( s,
> -                          &d->arch.hvm_domain.ioreq_server.list,
> -                          list_entry )
> -    {
> -        if ( s == d->arch.hvm_domain.default_ioreq_server )
> -            continue;
> +    s = d->arch.hvm_domain.ioreq_server.server[id];
>  
> -        if ( s->id != id )
> -            continue;
> -
> -        domain_pause(d);
> +    rc = -ENOENT;
> +    if ( id >= MAX_NR_IOREQ_SERVERS || !s || IS_DEFAULT(s) )

The id >= MAX_NR_IOREQ_SERVERS should be done before getting the
element IMHO, even before getting the lock.

Also, I don't like the:

rc = ...
if ( ... )
    goto error

construct, I think it's easy to make a mistake and end up returning an
error code in the successful path (or forgetting to set an error when
needed). This is however widely used in Xen, so I'm not going to
complain any more.

>  void hvm_destroy_all_ioreq_servers(struct domain *d)
>  {
> -    struct hvm_ioreq_server *s, *next;
> +    unsigned int id;
>  
>      spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
>  
>      /* No need to domain_pause() as the domain is being torn down */
>  
> -    list_for_each_entry_safe ( s,
> -                               next,
> -                               &d->arch.hvm_domain.ioreq_server.list,
> -                               list_entry )
> +    for ( id = 0; id < MAX_NR_IOREQ_SERVERS; id++ )
>      {
> -        bool is_default = (s == d->arch.hvm_domain.default_ioreq_server);
> +        struct hvm_ioreq_server *s;
>  
> -        hvm_ioreq_server_disable(s, is_default);
> +        s = d->arch.hvm_domain.ioreq_server.server[id];
>  
> -        if ( is_default )
> -            d->arch.hvm_domain.default_ioreq_server = NULL;
> +        if ( !s )
> +            continue;
>  
> -        list_del(&s->list_entry);
> +        hvm_ioreq_server_disable(s);
> +        hvm_ioreq_server_deinit(s);
>  
> -        hvm_ioreq_server_deinit(s, is_default);
> +        ASSERT(d->arch.hvm_domain.ioreq_server.count);
> +        --d->arch.hvm_domain.ioreq_server.count;

It seems more common to use d->arch.hvm_domain.ioreq_server.count--,
unless there' a reason for prefixing the decrement.
  
> +#define MAX_NR_IOREQ_SERVERS 8
> +#define DEFAULT_IOSERVID 0

I would rather write it as DEFAULT_IOREQ_ID or DEFAULT_IOSERVER_ID I
don't think there's any need to shorten SERVER here (specially when
it's not shorted in MAX_NR_IOREQ_SERVERS.

Thanks, Roger.

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.