[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v2 10/21] xen/arm: Implement hypercall PHYSDEVOP_{, un}map_pirq



On Thu, 31 Jul 2014, Julien Grall wrote:
> The physdev sub-hypercalls PHYSDEVOP_{,map}_pirq allow the toolstack to
> assign/deassign a physical IRQ to the guest (via the config options "irqs"
> for xl).
> For now, we allow only SPIs to be mapped to the guest.
> The type MAP_PIRQ_TYPE_GSI is used for this purpose.
> 
> The virtual IRQ number is allocated by Xen. The toolstack as to specify
> the number of SPIs handled by the vGIC via an hypercall.
> 
> Signed-off-by: Julien Grall <julien.grall@xxxxxxxxxx>

Acked-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>



> ---
>     I'm wondering if we should introduce an alias of MAP_PIRQ_TYPE_GSI
>     for ARM. It's will be less confuse for the user.

Just improve the error message below, maybe:

"wrong map_pirq type 0x%x, only MAP_PIRQ_TYPE_GSI is supported"


>     Changes in v2:
>         - Add PHYSDEVOP_unmap_pirq
>         - Rework commit message
>         - Add functions to allocate/release a VIRQ
>         - is_routable_irq has been renamed into is_assignable_irq
> ---
>  xen/arch/arm/physdev.c       |  120 
> +++++++++++++++++++++++++++++++++++++++++-
>  xen/arch/arm/vgic.c          |   51 ++++++++++++++++++
>  xen/include/asm-arm/domain.h |    1 +
>  xen/include/asm-arm/vgic.h   |    5 ++
>  4 files changed, 175 insertions(+), 2 deletions(-)
> 
> diff --git a/xen/arch/arm/physdev.c b/xen/arch/arm/physdev.c
> index 61b4a18..9333aa0 100644
> --- a/xen/arch/arm/physdev.c
> +++ b/xen/arch/arm/physdev.c
> @@ -8,13 +8,129 @@
>  #include <xen/types.h>
>  #include <xen/lib.h>
>  #include <xen/errno.h>
> +#include <xen/iocap.h>
> +#include <xen/guest_access.h>
> +#include <xsm/xsm.h>
> +#include <asm/current.h>
>  #include <asm/hypercall.h>
> +#include <public/physdev.h>
>  
> +static int physdev_map_pirq(domid_t domid, int type, int index, int *pirq_p)
> +{
> +    struct domain *d;
> +    int ret;
> +    int irq = index;
> +    int virq = 0;
> +
> +    d = rcu_lock_domain_by_any_id(domid);
> +    if ( d == NULL )
> +        return -ESRCH;
> +
> +    ret = xsm_map_domain_pirq(XSM_TARGET, d);
> +    if ( ret )
> +        goto free_domain;
> +
> +    /* For now we only suport GSI */
> +    if ( type != MAP_PIRQ_TYPE_GSI )
> +    {
> +        ret = -EINVAL;
> +        dprintk(XENLOG_G_ERR, "dom%u: wrong map_pirq type 0x%x\n",
> +                d->domain_id, type);
> +        goto free_domain;
> +    }
> +
> +    if ( !is_assignable_irq(irq) )
> +    {
> +        ret = -EINVAL;
> +        dprintk(XENLOG_G_ERR, "IRQ%u is not routable to a guest\n", irq);
> +        goto free_domain;
> +    }
> +
> +    ret = -EPERM;
> +    if ( !irq_access_permitted(current->domain, irq) )
> +        goto free_domain;
> +
> +    virq = vgic_allocate_virq(d, irq);
> +    ret = -EMFILE;
> +    if ( virq == -1 )
> +        goto free_domain;
> +
> +    ret = route_irq_to_guest(d, virq, irq, "routed IRQ");
> +
> +    if ( !ret )
> +        *pirq_p = virq;
> +    else
> +        vgic_free_virq(d, virq);
> +
> +free_domain:
> +    rcu_unlock_domain(d);
> +
> +    return ret;
> +}
> +
> +int physdev_unmap_pirq(domid_t domid, int pirq)
> +{
> +    struct domain *d;
> +    int ret;
> +
> +    d = rcu_lock_domain_by_any_id(domid);
> +    if ( d == NULL )
> +        return -ESRCH;
> +
> +    ret = xsm_unmap_domain_pirq(XSM_TARGET, d);
> +    if ( ret )
> +        goto free_domain;
> +
> +    ret = release_guest_irq(d, pirq);
> +    if ( ret )
> +        goto free_domain;
> +
> +    vgic_free_virq(d, pirq);
> +
> +free_domain:
> +    rcu_unlock_domain(d);
> +
> +    return ret;
> +}
>  
>  int do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
>  {
> -    printk("%s %d cmd=%d: not implemented yet\n", __func__, __LINE__, cmd);
> -    return -ENOSYS;
> +    int ret;
> +
> +    switch ( cmd )
> +    {
> +    case PHYSDEVOP_map_pirq:
> +        {
> +            physdev_map_pirq_t map;
> +
> +            ret = -EFAULT;
> +            if ( copy_from_guest(&map, arg, 1) != 0 )
> +                break;
> +
> +            ret = physdev_map_pirq(map.domid, map.type, map.index, 
> &map.pirq);
> +
> +            if ( __copy_to_guest(arg, &map, 1) )
> +                ret = -EFAULT;
> +        }
> +        break;
> +
> +    case PHYSDEVOP_unmap_pirq:
> +        {
> +            physdev_unmap_pirq_t unmap;
> +
> +            ret = -EFAULT;
> +            if ( copy_from_guest(&unmap, arg, 1) != 0 )
> +                break;
> +
> +            ret = physdev_unmap_pirq(unmap.domid, unmap.pirq);
> +        }
> +
> +    default:
> +        ret = -ENOSYS;
> +        break;
> +    }
> +
> +    return ret;
>  }
>  
>  /*
> diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
> index 2a5fc18..644742e 100644
> --- a/xen/arch/arm/vgic.c
> +++ b/xen/arch/arm/vgic.c
> @@ -81,6 +81,8 @@ int domain_vgic_init(struct domain *d, unsigned int nr_spis)
>          return -ENODEV;
>      }
>  
> +    spin_lock_init(&d->arch.vgic.lock);
> +
>      d->arch.vgic.shared_irqs =
>          xzalloc_array(struct vgic_irq_rank, DOMAIN_NR_RANKS(d));
>      if ( d->arch.vgic.shared_irqs == NULL )
> @@ -108,6 +110,11 @@ int domain_vgic_init(struct domain *d, unsigned int 
> nr_spis)
>  
>      d->arch.vgic.handler->domain_init(d);
>  
> +    d->arch.vgic.allocated_spis =
> +        xzalloc_array(unsigned long, BITS_TO_LONGS(d->arch.vgic.nr_spis));
> +    if ( !d->arch.vgic.allocated_spis )
> +        return -ENOMEM;
> +
>      return 0;
>  }
>  
> @@ -444,6 +451,50 @@ void arch_evtchn_inject(struct vcpu *v)
>      vgic_vcpu_inject_irq(v, v->domain->arch.evtchn_irq);
>  }
>  
> +int vgic_allocate_virq(struct domain *d, unsigned int irq)
> +{
> +    unsigned int spi;
> +    int virq = -1;
> +
> +    /* Hardware domain has IRQ mapped 1:1 */
> +    if ( is_hardware_domain(d) )
> +        return irq;
> +
> +    spin_lock(&d->arch.vgic.lock);
> +
> +    spi = find_first_zero_bit(d->arch.vgic.allocated_spis,
> +                              d->arch.vgic.nr_spis);
> +
> +    if ( spi >= d->arch.vgic.nr_spis )
> +        goto unlock;
> +
> +    set_bit(spi, d->arch.vgic.allocated_spis);
> +
> +    virq = 32 + spi;
> +
> +unlock:
> +    spin_unlock(&d->arch.vgic.lock);
> +
> +    return virq;
> +}
> +
> +void vgic_free_virq(struct domain *d, unsigned int virq)
> +{
> +    unsigned int spi;
> +
> +    if ( is_hardware_domain(d) )
> +        return;
> +
> +    if ( virq < 32 && virq >= vgic_num_irqs(d) )
> +        return;
> +
> +    spi = virq - 32;
> +
> +    spin_lock(&d->arch.vgic.lock);
> +    clear_bit(spi, d->arch.vgic.allocated_spis);
> +    spin_unlock(&d->arch.vgic.lock);
> +}
> +
>  /*
>   * Local variables:
>   * mode: C
> diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
> index 44727b2..a4039c1 100644
> --- a/xen/include/asm-arm/domain.h
> +++ b/xen/include/asm-arm/domain.h
> @@ -94,6 +94,7 @@ struct arch_domain
>          spinlock_t lock;
>          int ctlr;
>          int nr_spis; /* Number of SPIs */
> +        unsigned long *allocated_spis; /* bitmap of SPIs allocated */
>          struct vgic_irq_rank *shared_irqs;
>          /*
>           * SPIs are domain global, SGIs and PPIs are per-VCPU and stored in
> diff --git a/xen/include/asm-arm/vgic.h b/xen/include/asm-arm/vgic.h
> index 84ae441..c5d8b2e 100644
> --- a/xen/include/asm-arm/vgic.h
> +++ b/xen/include/asm-arm/vgic.h
> @@ -180,6 +180,11 @@ extern int vgic_to_sgi(struct vcpu *v, register_t sgir,
>                         enum gic_sgi_mode irqmode, int virq,
>                         unsigned long vcpu_mask);
>  extern void vgic_migrate_irq(struct vcpu *old, struct vcpu *new, unsigned 
> int irq);
> +
> +/* Allocate a VIRQ number of a guest SPI */
> +extern int vgic_allocate_virq(struct domain *d, unsigned int irq);
> +extern void vgic_free_virq(struct domain *d, unsigned int irq);
> +
>  #endif /* __ASM_ARM_VGIC_H__ */
>  
>  /*

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.