[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v4 2/4] x86/physdev: factor out the code to allocate and map a pirq



On 01/06/17 12:49, Roger Pau Monne wrote:
> Move the code to allocate and map a domain pirq (either GSI or MSI)
> into the x86 irq code base, so that it can be used outside of the
> physdev ops.
>
> This change shouldn't affect the functionality of the already existing
> physdev ops.
>
> Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>

As you are moving code, please could you make some style fixes (which
can be done on commit if there are no other problems).

> ---
> Cc: Jan Beulich <jbeulich@xxxxxxxx>
> Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
> ---
> Changes since v3:
>  - Pass the index parameter to the allocate_pirq function, so that it
>    can be printed in case of error.
>  - Remove pointless elses in allocate_pirq.
>  - Return directly in the last check of allocate_pirq (instead of
>    storing the error code in the pirq variable.
>  - allocate_and_map_{gsi/msi}_pirq don't need index to be a pointer.
>  - Fix the last parameter of the allocate_pirq call in
>    allocate_and_map_gsi_pirq to use NULL (instead of 0).
>  - Add an ASSERT_UNREACHABLE if the interrupt type passed to
>    allocate_and_map_msi_pirq is not of MSI the MSI kind.
>  - Restore newlines in the physdev_map_pirq switch case.
>
> Changes since v2:
>  - Factor out the code to allocate the pirq.
>  - Fix coding style issues.
>  - Do not take the pci lock to bind a GSI.
>  - Pass a type parameter to the MSI bind function.
>
> Changes since v1:
>  - New in this version.
> ---
>  xen/arch/x86/irq.c        | 159 
> ++++++++++++++++++++++++++++++++++++++++++++++
>  xen/arch/x86/physdev.c    | 122 ++---------------------------------
>  xen/include/asm-x86/irq.h |   5 ++
>  3 files changed, 169 insertions(+), 117 deletions(-)
>
> diff --git a/xen/arch/x86/irq.c b/xen/arch/x86/irq.c
> index 676ba5216f..5184b6144e 100644
> --- a/xen/arch/x86/irq.c
> +++ b/xen/arch/x86/irq.c
> @@ -2537,3 +2537,162 @@ bool_t hvm_domain_use_pirq(const struct domain *d, 
> const struct pirq *pirq)
>      return is_hvm_domain(d) && pirq &&
>             pirq->arch.hvm.emuirq != IRQ_UNBOUND; 
>  }
> +
> +static int allocate_pirq(struct domain *d, int index, int pirq, int irq,
> +                         int type, int *nr)
> +{
> +    int current_pirq;
> +
> +    ASSERT(spin_is_locked(&d->event_lock));
> +    current_pirq = domain_irq_to_pirq(d, irq);
> +    if ( pirq < 0 )
> +    {
> +        if ( current_pirq )
> +        {
> +            dprintk(XENLOG_G_ERR, "dom%d: %d:%d already mapped to %d\n",
> +                    d->domain_id, index, pirq, current_pirq);
> +            if ( current_pirq < 0 )
> +                return -EBUSY;
> +        }
> +        else if ( type == MAP_PIRQ_TYPE_MULTI_MSI )
> +        {
> +            if ( *nr <= 0 || *nr > 32 )
> +                return -EDOM;
> +            if ( *nr != 1 && !iommu_intremap )
> +                return -EOPNOTSUPP;
> +
> +            while ( *nr & (*nr - 1) )
> +                *nr += *nr & -*nr;
> +            pirq = get_free_pirqs(d, *nr);
> +            if ( pirq < 0 )
> +            {
> +                while ( (*nr >>= 1) > 1 )
> +                    if ( get_free_pirqs(d, *nr) > 0 )
> +                        break;
> +                dprintk(XENLOG_G_ERR, "dom%d: no block of %d free pirqs\n",
> +                        d->domain_id, *nr << 1);
> +            }
> +        }
> +        else
> +        {
> +            pirq = get_free_pirq(d, type);
> +            if ( pirq < 0 )
> +                dprintk(XENLOG_G_ERR, "dom%d: no free pirq\n", d->domain_id);
> +        }
> +    }
> +    else if ( current_pirq && pirq != current_pirq )
> +    {
> +        dprintk(XENLOG_G_ERR, "dom%d: irq %d already mapped to pirq %d\n",
> +                d->domain_id, irq, current_pirq);
> +        return -EEXIST;
> +    }
> +
> +    return pirq;
> +}
> +
> +int allocate_and_map_gsi_pirq(struct domain *d, int index, int *pirq_p)
> +{
> +    int irq, pirq, ret;
> +
> +    if ( index < 0 || index >= nr_irqs_gsi )
> +    {
> +        dprintk(XENLOG_G_ERR, "dom%d: map invalid irq %d\n", d->domain_id,
> +                index);
> +        return -EINVAL;
> +    }
> +
> +    irq = domain_pirq_to_irq(current->domain, index);
> +    if ( irq <= 0 )
> +    {
> +        if ( is_hardware_domain(current->domain) )
> +            irq = index;
> +        else
> +        {
> +            dprintk(XENLOG_G_ERR, "dom%d: map pirq with incorrect irq!\n",
> +                    d->domain_id);
> +            return -EINVAL;
> +        }
> +    }
> +
> +    /* Verify or get pirq. */
> +    spin_lock(&d->event_lock);
> +    pirq = allocate_pirq(d, index, *pirq_p, irq, MAP_PIRQ_TYPE_GSI, NULL);
> +    if ( pirq < 0 )
> +    {
> +        ret = pirq;
> +        goto done;
> +    }
> +
> +    ret = map_domain_pirq(d, pirq, irq, MAP_PIRQ_TYPE_GSI, NULL);
> +    if ( ret == 0 )
> +        *pirq_p = pirq;
> +
> + done:
> +    spin_unlock(&d->event_lock);
> +
> +    return ret;
> +}
> +
> +int allocate_and_map_msi_pirq(struct domain *d, int index, int *pirq_p,
> +                              int type, struct msi_info *msi)
> +{
> +    int irq, pirq, ret;
> +
> +    switch ( type )
> +    {
> +    case MAP_PIRQ_TYPE_MSI:
> +        if ( !msi->table_base )
> +            msi->entry_nr = 1;
> +        irq = index;
> +        if ( irq == -1 )

{

> +    case MAP_PIRQ_TYPE_MULTI_MSI:
> +            irq = create_irq(NUMA_NO_NODE);

}

> +
> +        if ( irq < nr_irqs_gsi || irq >= nr_irqs )
> +        {
> +            dprintk(XENLOG_G_ERR, "dom%d: can't create irq for msi!\n",
> +                    d->domain_id);
> +            return -EINVAL;
> +        }
> +
> +        msi->irq = irq;
> +        break;
> +
> +    default:
> +        dprintk(XENLOG_G_ERR, "dom%d: wrong pirq type %x\n",
> +                d->domain_id, type);
> +        ASSERT_UNREACHABLE();
> +        return -EINVAL;
> +    }
> +
> +    msi->irq = irq;
> +
> +    pcidevs_lock();
> +    /* Verify or get pirq. */
> +    spin_lock(&d->event_lock);
> +    pirq = allocate_pirq(d, index, *pirq_p, irq, type, &msi->entry_nr);
> +    if ( pirq < 0 )
> +    {
> +        ret = pirq;
> +        goto done;
> +    }
> +
> +    ret = map_domain_pirq(d, pirq, irq, type, msi);
> +    if ( ret == 0 )
> +        *pirq_p = pirq;
> +
> + done:
> +    spin_unlock(&d->event_lock);
> +    pcidevs_unlock();
> +    if ( ret != 0 )

{

> +        switch ( type )
> +        {
> +        case MAP_PIRQ_TYPE_MSI:
> +            if ( index == -1 )
> +        case MAP_PIRQ_TYPE_MULTI_MSI:
> +                destroy_irq(irq);
> +            break;
> +        }

}

> +
> +    return ret;
> +}

~Andrew

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.