[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [XenARM] XEN tools for ARM with Virtualization Extensions - Patch #2



On 12 July 2013 20:27, Eric Trudeau <etrudeau@xxxxxxxxxxxx> wrote:
> Second patch submitted with changes based on comments on first patch.
>
>> > From 551f174c9653def01890fadd2dd769ab8a9acde7 Mon Sep 17 00:00:00
>> 2001
>> > From: Eric Trudeau <etrudeau@xxxxxxxxxxxx>
>> > Date: Thu, 11 Jul 2013 20:03:51 -0400
>> > Subject: [PATCH] Add support for Guest physdev irqs
>> >
>> > ---
>> >  xen/arch/arm/domain.c  | 16 ++++++++++++++++
>> >  xen/arch/arm/gic.c     | 15 ++++++++++-----
>> >  xen/arch/arm/physdev.c | 48
>> ++++++++++++++++++++++++++++++++++++++++++++++--
>> >  xen/arch/arm/vgic.c    |  5 +----
>> >  4 files changed, 73 insertions(+), 11 deletions(-)
>> >
>> > diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
>> > index 4c434a1..52d3429 100644
>> > --- a/xen/arch/arm/domain.c
>> > +++ b/xen/arch/arm/domain.c
>> > @@ -31,6 +31,8 @@
>> >  #include <asm/gic.h>
>> >  #include "vtimer.h"
>> >  #include "vpl011.h"
>> > +#include <xen/iocap.h>
>> > +#include <xen/irq.h>
>> >
>> >  DEFINE_PER_CPU(struct vcpu *, curr_vcpu);
>> >
>> > @@ -513,8 +515,22 @@ fail:
>> >      return rc;
>> >  }
>> >
>> > +static int release_domain_irqs(struct domain *d)
>> > +{
>> > +    int i;
>> > +    for (i = 0; i <= d->nr_pirqs; i++) {
>> > +        if (irq_access_permitted(d, i)) {
>> > +            release_irq(i);
>> > +        }
>> > +    }
>> > +    return 0;
>> > +}
>>
>> As you may know, release_irq will spin until the flags IRQ_INPROGRESS
>> is unset. This is done my the maintenance handler.
>>
>> Now, let say the domain has crashed and the IRQ is not yet EOIed, then you
>> will spin forever.
>>
>
> I put a timeout in the IRQ_INPROGRESS check in release_irq() of 100 attempts
> with a 1 msec delay per loop iteration.

If we plan to only use release_irq when a domain is destroyed, this
check is useless,
so it should be removed.

An IRQ stays with the flag IRQ_INPROGRESS until Xen will eoi it.
If the domain has crashed or received an hard shutdown (ie xl
destroy), the IRQ will
remain "inflight" and can never come up again.

You need to check if the IRQ is still inflight and, if yes, eoi it.

>> > +
>> >  void arch_domain_destroy(struct domain *d)
>> >  {
>> > +    if (d->irq_caps != NULL)
>> You don't need this check.
>> During the domain create, Xen ensures that irq_caps is not NULL.
>>
>> > +        release_domain_irqs(d);
>> >      p2m_teardown(d);
>> >      domain_vgic_free(d);
>> >      domain_uart0_free(d);
>> > diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c
>> > index cafb681..1f576d1 100644
>> > --- a/xen/arch/arm/gic.c
>> > +++ b/xen/arch/arm/gic.c
>> > @@ -510,7 +510,7 @@ void gic_route_spis(void)
>> >      }
>> >  }
>> >
>> > -void __init release_irq(unsigned int irq)
>> > +void release_irq(unsigned int irq)
>> >  {
>> >      struct irq_desc *desc;
>> >      unsigned long flags;
>> > @@ -522,6 +522,7 @@ void __init release_irq(unsigned int irq)
>> >      action = desc->action;
>> >      desc->action  = NULL;
>> >      desc->status |= IRQ_DISABLED;
>> > +    desc->status &= ~IRQ_GUEST;
>> >
>> >      spin_lock(&gic.lock);
>> >      desc->handler->shutdown(desc);
>> > @@ -707,6 +708,12 @@ int gic_route_irq_to_guest(struct domain *d, const
>> struct dt_irq *irq,
>> >      spin_lock_irqsave(&desc->lock, flags);
>> >      spin_lock(&gic.lock);
>> >
>> > +    if ( desc->action != NULL )
>> > +    {
>> > +        retval = -EBUSY;
>> > +        goto out;
>> > +    }
>> > +
>> >      desc->handler = &gic_guest_irq_type;
>> >      desc->status |= IRQ_GUEST;
>> >
>> > @@ -715,12 +722,10 @@ int gic_route_irq_to_guest(struct domain *d, const
>> struct dt_irq *irq,
>> >      gic_set_irq_properties(irq->irq, level, 1u << smp_processor_id(), 
>> > 0xa0);
>> >
>> >      retval = __setup_irq(desc, irq->irq, action);
>> > -    if (retval) {
>> > -        xfree(action);
>> > -        goto out;
>> > -    }
>> >
>> >  out:
>> > +    if (retval)
>> > +        xfree(action);
>> >      spin_unlock(&gic.lock);
>> >      spin_unlock_irqrestore(&desc->lock, flags);
>> >      return retval;
>> > diff --git a/xen/arch/arm/physdev.c b/xen/arch/arm/physdev.c
>> > index 61b4a18..8a5f770 100644
>> > --- a/xen/arch/arm/physdev.c
>> > +++ b/xen/arch/arm/physdev.c
>> > @@ -9,12 +9,56 @@
>> >  #include <xen/lib.h>
>> >  #include <xen/errno.h>
>> >  #include <asm/hypercall.h>
>> > +#include <public/physdev.h>
>> > +#include <xen/guest_access.h>
>> > +#include <xen/irq.h>
>> > +#include <xen/sched.h>
>> > +#include <asm/gic.h>
>> >
>> >
>> >  int do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
>> >  {
>> > -    printk("%s %d cmd=%d: not implemented yet\n", __func__, __LINE__,
>> cmd);
>> > -    return -ENOSYS;
>> > +    int ret;
>> > +
>> > +    switch ( cmd )
>> > +    {
>> > +    case PHYSDEVOP_map_pirq: {
>> > +        physdev_map_pirq_t map;
>> > +        struct dt_irq irq;
>> > +        struct domain *d;
>> > +
>> > +        ret = -EFAULT;
>> > +        if ( copy_from_guest(&map, arg, 1) != 0 )
>> > +            break;
>> > +
>> > +        d = rcu_lock_domain_by_any_id(map.domid);
>> > +        if ( d == NULL ) {
>> > +            ret = -ESRCH;
>> > +            break;
>> > +        }
>>
>> Missing sanity check on the map.pirq value.
>>
>
> I added a check for (map.pirq < gic_number_lines()).  This is the sanity check
> that is done in gic_route_irq().  Should this be less than or equal or are the
> device tree SPI irq numbers 0-based before they are offset by 32?

Your sanity check looks good to me. gic_number_lines returns the exact
number of IRQ (SPI + SGI + PPI).

>> > +        irq.irq = map.pirq;
>> > +        irq.type = DT_IRQ_TYPE_LEVEL_MASK;
>> > +
>> > +        ret = gic_route_irq_to_guest(d, &irq, "GuestIRQ");
>>
>> Do you plan to handle non 1:1 IRQ mapping?
>> How does work your the IRQ mapping if the IRQ is already mapped to dom0?
>>
>
> See comment below about sticking with 1:1 irq mapping.

I think you didn't answer to the second question. How do you tell to
Xen : "Don't map
this IRQ in dom0"? Do you manually remove the node from dom0 DTS or do you
have an automatic way?

>> > +        if (!ret)
>> > +            printk("%s gic_route_irq_to_guest added IRQ %d to dom%d\n",
>> > +                   __FUNCTION__, irq.irq, d->domain_id);
>> > +
>> > +        rcu_unlock_domain(d);
>> > +
>> > +        if (!ret && __copy_to_guest(arg, &map, 1) )
>> > +            ret = -EFAULT;
>> > +        break;
>> > +    }
>> > +
>> > +    default:
>> > +        printk("%s %d cmd=%d: not implemented yet\n", __func__, __LINE__,
>> cmd);
>> > +        ret = -ENOSYS;
>> > +        break;
>> > +    }
>> > +
>> > +    return ret;
>> >  }
>> >
>> >  /*
>> > diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
>> > index 2e4b11f..0ebcdac 100644
>> > --- a/xen/arch/arm/vgic.c
>> > +++ b/xen/arch/arm/vgic.c
>> > @@ -82,10 +82,7 @@ int domain_vgic_init(struct domain *d)
>> >      /* Currently nr_lines in vgic and gic doesn't have the same meanings
>> >       * Here nr_lines = number of SPIs
>> >       */
>> > -    if ( d->domain_id == 0 )
>> > -        d->arch.vgic.nr_lines = gic_number_lines() - 32;
>> > -    else
>> > -        d->arch.vgic.nr_lines = 0; /* We don't need SPIs for the guest */
>> > +    d->arch.vgic.nr_lines = d->nr_pirqs - 32;
>>
>> If you want to stick on the 1:1 mapping, the best solution
>> is to set "nr_lines to gic_number_lines() - 32" for all the domains.
>>
>
> I will stick with 1:1 mapping for guest IRQs and use gic_number_lines() - 32.
>
>> --
>> Julien Grall
>
> Patch #2 begins here:
> --------------------------------------------
>
> From 924441c3eddb1765cb4fb0e94f6b62177195837d Mon Sep 17 00:00:00 2001
> From: Eric Trudeau <etrudeau@xxxxxxxxxxxx>
> Date: Fri, 12 Jul 2013 14:54:24 -0400
> Subject: [PATCH] xen/arm: Add support for 1:1 IRQ mapping into guest domains
>  on ARM
>
> ARM guests will now have the ability to access 1:1 mapped IRQs.
>
> The irqs list in the domain configuration file is used.  A SPI irq
> number must include the offset of 32.  For example, if the device
> tree irq number is 76 for a SPI, then the irqs field in the dom.cfg
> file would be:
>    irqs = [ 108 ]
>
> Only level-triggered IRQs are supported at this time.
>
> When an IRQ is released on destruction of the guest, any in-progress
> handlers are given at least a 100 msec to complete.
>
> Signed-off-by: Eric Trudeau <etrudeau@xxxxxxxxxxxx>
> ---
>  xen/arch/arm/domain.c  | 15 ++++++++++++++
>  xen/arch/arm/gic.c     | 29 ++++++++++++++++++--------
>  xen/arch/arm/physdev.c | 56 
> ++++++++++++++++++++++++++++++++++++++++++++++++--
>  xen/arch/arm/vgic.c    |  5 +----
>  4 files changed, 91 insertions(+), 14 deletions(-)
>
> diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
> index 4c434a1..f15ff06 100644
> --- a/xen/arch/arm/domain.c
> +++ b/xen/arch/arm/domain.c
> @@ -31,6 +31,8 @@
>  #include <asm/gic.h>
>  #include "vtimer.h"
>  #include "vpl011.h"
> +#include <xen/iocap.h>
> +#include <xen/irq.h>
>
>  DEFINE_PER_CPU(struct vcpu *, curr_vcpu);
>
> @@ -513,8 +515,21 @@ fail:
>      return rc;
>  }
>
> +static int release_domain_irqs(struct domain *d)
> +{
> +    int i;
> +    for (i = 0; i <= d->nr_pirqs; i++) {
> +        if (irq_access_permitted(d, i)) {
> +            release_irq(i);
> +        }
> +    }
> +    return 0;
> +}
> +
> +
>  void arch_domain_destroy(struct domain *d)
>  {
> +    release_domain_irqs(d);
>      p2m_teardown(d);
>      domain_vgic_free(d);
>      domain_uart0_free(d);
> diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c
> index ccce565..ed15ec3 100644
> --- a/xen/arch/arm/gic.c
> +++ b/xen/arch/arm/gic.c
> @@ -30,6 +30,7 @@
>  #include <xen/device_tree.h>
>  #include <asm/p2m.h>
>  #include <asm/domain.h>
> +#include <xen/delay.h>
>
>  #include <asm/gic.h>
>
> @@ -510,11 +511,12 @@ void gic_route_spis(void)
>      }
>  }
>
> -void __init release_irq(unsigned int irq)
> +void release_irq(unsigned int irq)
>  {
>      struct irq_desc *desc;
>      unsigned long flags;
> -   struct irqaction *action;
> +    struct irqaction *action;
> +    int    inprogresschecks;
>
>      desc = irq_to_desc(irq);
>
> @@ -530,8 +532,15 @@ void __init release_irq(unsigned int irq)
>
>      spin_unlock_irqrestore(&desc->lock,flags);
>
> -    /* Wait to make sure it's not being used on another CPU */
> -    do { smp_mb(); } while ( desc->status & IRQ_INPROGRESS );
> +    /* Wait a little while to make sure it's not being used on another CPU
> +     * But not indefinitely, because a guest may have crashed */
> +    for (inprogresschecks = 0; (inprogresschecks < 100); inprogresschecks++) 
> {
> +        smp_mb();
> +        if ( desc->status & IRQ_INPROGRESS )
> +            mdelay(1);
> +        else
> +            break;
> +    }
>
>      if (action && action->free_on_release)
>          xfree(action);
> @@ -708,6 +717,12 @@ int gic_route_irq_to_guest(struct domain *d, const 
> struct dt_irq *irq,
>      spin_lock_irqsave(&desc->lock, flags);
>      spin_lock(&gic.lock);
>
> +    if ( desc->action != NULL )
> +    {
> +        retval = -EBUSY;
> +        goto out;
> +    }
> +
>      desc->handler = &gic_guest_irq_type;
>      desc->status |= IRQ_GUEST;
>
> @@ -716,12 +731,10 @@ int gic_route_irq_to_guest(struct domain *d, const 
> struct dt_irq *irq,
>      gic_set_irq_properties(irq->irq, level, 1u << smp_processor_id(), 0xa0);
>
>      retval = __setup_irq(desc, irq->irq, action);
> -    if (retval) {
> -        xfree(action);
> -        goto out;
> -    }
>
>  out:
> +    if (retval)
> +        xfree(action);
>      spin_unlock(&gic.lock);
>      spin_unlock_irqrestore(&desc->lock, flags);
>      return retval;
> diff --git a/xen/arch/arm/physdev.c b/xen/arch/arm/physdev.c
> index 61b4a18..8d76d11 100644
> --- a/xen/arch/arm/physdev.c
> +++ b/xen/arch/arm/physdev.c
> @@ -9,12 +9,64 @@
>  #include <xen/lib.h>
>  #include <xen/errno.h>
>  #include <asm/hypercall.h>
> +#include <public/physdev.h>
> +#include <xen/guest_access.h>
> +#include <xen/irq.h>
> +#include <xen/sched.h>
> +#include <asm/gic.h>
> +#include <xsm/xsm.h>
>
>
>  int do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
>  {
> -    printk("%s %d cmd=%d: not implemented yet\n", __func__, __LINE__, cmd);
> -    return -ENOSYS;
> +    int ret;
> +
> +    switch ( cmd )
> +    {
> +    case PHYSDEVOP_map_pirq: {
> +        physdev_map_pirq_t map;
> +        struct dt_irq irq;
> +        struct domain *d;
> +
> +        ret = -EFAULT;
> +        if ( copy_from_guest(&map, arg, 1) != 0 )
> +            break;
> +
> +        d = rcu_lock_domain_by_any_id(map.domid);
> +        if ( d == NULL ) {
> +            ret = -ESRCH;
> +            break;
> +        }
> +
> +        ret = xsm_map_domain_pirq(XSM_TARGET, d);
> +
> +        if (!ret && (map.pirq >= gic_number_lines()))
> +            ret = -EINVAL;
> +
> +        if (!ret) {
> +            irq.irq = map.pirq;
> +            irq.type = DT_IRQ_TYPE_LEVEL_MASK;
> +
> +            ret = gic_route_irq_to_guest(d, &irq, "GuestIRQ");
> +            if (!ret)
> +                dprintk(XENLOG_G_INFO, "dom%d: gic_route_irq_to_guest mapped 
> in IRQ %d\n",
> +                        d->domain_id, irq.irq);
> +        }
> +
> +        rcu_unlock_domain(d);
> +
> +        if (!ret && __copy_to_guest(arg, &map, 1) )
> +            ret = -EFAULT;
> +        break;
> +    }
> +
> +    default:
> +        printk("%s %d cmd=%d: not implemented yet\n", __func__, __LINE__, 
> cmd);
> +        ret = -ENOSYS;
> +        break;
> +    }
> +
> +    return ret;
>  }
>
>  /*
> diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
> index 2e4b11f..9c95f67 100644
> --- a/xen/arch/arm/vgic.c
> +++ b/xen/arch/arm/vgic.c
> @@ -82,10 +82,7 @@ int domain_vgic_init(struct domain *d)
>      /* Currently nr_lines in vgic and gic doesn't have the same meanings
>       * Here nr_lines = number of SPIs
>       */
> -    if ( d->domain_id == 0 )
> -        d->arch.vgic.nr_lines = gic_number_lines() - 32;
> -    else
> -        d->arch.vgic.nr_lines = 0; /* We don't need SPIs for the guest */
> +    d->arch.vgic.nr_lines = gic_number_lines() - 32;
>
>      d->arch.vgic.shared_irqs =
>          xzalloc_array(struct vgic_irq_rank, DOMAIN_NR_RANKS(d));
> --
> 1.8.1.2



--
Julien Grall

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.