[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v3 for-next 1/3] x86/physdev: factor out the code to allocate and map a pirq
>>> On 17.05.17 at 17:15, <roger.pau@xxxxxxxxxx> wrote: > --- a/xen/arch/x86/irq.c > +++ b/xen/arch/x86/irq.c > @@ -2537,3 +2537,162 @@ bool_t hvm_domain_use_pirq(const struct domain *d, > const struct pirq *pirq) > return is_hvm_domain(d) && pirq && > pirq->arch.hvm.emuirq != IRQ_UNBOUND; > } > + > +static int allocate_pirq(struct domain *d, int pirq, int irq, int type, > + int *nr) > +{ > + int current_pirq; > + > + ASSERT(spin_is_locked(&d->event_lock)); > + current_pirq = domain_irq_to_pirq(d, irq); > + if ( pirq < 0 ) > + { > + if ( current_pirq ) > + { > + dprintk(XENLOG_G_ERR, "dom%d: %d:%d already mapped to %d\n", > + d->domain_id, irq, pirq, current_pirq); Instead of "irq" the old code did pass "*index", i.e. a Dom0 kernel specified value (which is going to be more useful, as any problem here will need to be diagnosed in the Dom0 kernel). The two values are identical only if, in the GSI case, domain_pirq_to_irq(current->domain, *index) in allocate_and_map_gsi_pirq() returned a negative value. > + if ( current_pirq < 0 ) > + return -EBUSY; > + } > + else if ( type == MAP_PIRQ_TYPE_MULTI_MSI ) > + { > + if ( *nr <= 0 || *nr > 32 ) > + return -EDOM; > + else if ( *nr != 1 && !iommu_intremap ) > + return -EOPNOTSUPP; > + else Pointless "else" (twice). > + { > + while ( *nr & (*nr - 1) ) > + *nr += *nr & -*nr; > + pirq = get_free_pirqs(d, *nr); > + if ( pirq < 0 ) > + { > + while ( (*nr >>= 1) > 1 ) > + if ( get_free_pirqs(d, *nr) > 0 ) > + break; > + dprintk(XENLOG_G_ERR, "dom%d: no block of %d free > pirqs\n", > + d->domain_id, *nr << 1); > + } > + } > + } > + else > + { > + pirq = get_free_pirq(d, type); > + if ( pirq < 0 ) > + dprintk(XENLOG_G_ERR, "dom%d: no free pirq\n", d->domain_id); > + } > + } > + else if ( current_pirq && pirq != current_pirq ) > + { > + dprintk(XENLOG_G_ERR, "dom%d: irq %d already mapped to pirq %d\n", > + d->domain_id, irq, current_pirq); > + pirq = -EEXIST; "return -EEXIST" please, to match the direct returns you use further up. > + } > + > + return pirq; > +} > + > +int allocate_and_map_gsi_pirq(struct domain *d, int *index, int *pirq_p) Neither here nor in the MSI sibling you ever write to *index, so there's no need for the parameter to have pointer type. > +{ > + int irq, pirq, ret; > + > + if ( *index < 0 || *index >= nr_irqs_gsi ) > + { > + dprintk(XENLOG_G_ERR, "dom%d: map invalid irq %d\n", d->domain_id, > + *index); > + return -EINVAL; > + } > + > + irq = domain_pirq_to_irq(current->domain, *index); > + if ( irq <= 0 ) > + { > + if ( is_hardware_domain(current->domain) ) > + irq = *index; > + else > + { > + dprintk(XENLOG_G_ERR, "dom%d: map pirq with incorrect irq!\n", > + d->domain_id); > + return -EINVAL; > + } > + } > + > + /* Verify or get pirq. */ > + spin_lock(&d->event_lock); > + pirq = allocate_pirq(d, *pirq_p, irq, MAP_PIRQ_TYPE_GSI, 0); The last parameter of allocate_pirq() is a pointer, so you really mean NULL here. > + if ( pirq < 0 ) > + { > + ret = pirq; > + goto done; > + } > + > + ret = map_domain_pirq(d, pirq, irq, MAP_PIRQ_TYPE_GSI, NULL); > + if ( ret == 0 ) > + *pirq_p = pirq; > + > + done: > + spin_unlock(&d->event_lock); > + return ret; > +} Blank line before final return statement please. > +int allocate_and_map_msi_pirq(struct domain *d, int *index, int *pirq_p, > + int type, struct msi_info *msi) > +{ > + int irq, pirq, ret; > + > + switch ( type ) > + { > + case MAP_PIRQ_TYPE_MSI: > + if ( !msi->table_base ) > + msi->entry_nr = 1; > + irq = *index; > + if ( irq == -1 ) > + case MAP_PIRQ_TYPE_MULTI_MSI: > + irq = create_irq(NUMA_NO_NODE); > + > + if ( irq < nr_irqs_gsi || irq >= nr_irqs ) > + { > + dprintk(XENLOG_G_ERR, "dom%d: can't create irq for msi!\n", > + d->domain_id); > + return -EINVAL; > + } > + > + msi->irq = irq; > + break; > + > + default: > + dprintk(XENLOG_G_ERR, "dom%d: wrong pirq type %x\n", > + d->domain_id, type); > + return -EINVAL; This really should be an ASSERT_UNREACHABLE() (with the return statement kept). > + } > + > + msi->irq = irq; > + > + pcidevs_lock(); > + /* Verify or get pirq. */ > + spin_lock(&d->event_lock); > + pirq = allocate_pirq(d, *pirq_p, irq, type, &msi->entry_nr); > + if ( pirq < 0 ) > + { > + ret = pirq; > + goto done; > + } > + > + ret = map_domain_pirq(d, pirq, irq, type, msi); > + if ( ret == 0 ) > + *pirq_p = pirq; > + > + done: > + spin_unlock(&d->event_lock); > + pcidevs_unlock(); > + if ( ret != 0 ) > + switch ( type ) > + { > + case MAP_PIRQ_TYPE_MSI: > + if ( *index == -1 ) > + case MAP_PIRQ_TYPE_MULTI_MSI: > + destroy_irq(irq); > + break; > + } > + return ret; > +} > + Please don't end a file with a blank line. > diff --git a/xen/arch/x86/physdev.c b/xen/arch/x86/physdev.c > index eec4a41231..e99fd9a35f 100644 > --- a/xen/arch/x86/physdev.c > +++ b/xen/arch/x86/physdev.c > @@ -92,8 +92,7 @@ int physdev_map_pirq(domid_t domid, int type, int *index, > int *pirq_p, > struct msi_info *msi) > { > struct domain *d = current->domain; > - int pirq, irq, ret = 0; > - void *map_data = NULL; > + int ret; > > if ( domid == DOMID_SELF && is_hvm_domain(d) && has_pirq(d) ) > { > @@ -119,135 +118,22 @@ int physdev_map_pirq(domid_t domid, int type, int > *index, int *pirq_p, > switch ( type ) > { > case MAP_PIRQ_TYPE_GSI: > - if ( *index < 0 || *index >= nr_irqs_gsi ) > - { > - dprintk(XENLOG_G_ERR, "dom%d: map invalid irq %d\n", > - d->domain_id, *index); > - ret = -EINVAL; > - goto free_domain; > - } > - > - irq = domain_pirq_to_irq(current->domain, *index); > - if ( irq <= 0 ) > - { > - if ( is_hardware_domain(current->domain) ) > - irq = *index; > - else { > - dprintk(XENLOG_G_ERR, "dom%d: map pirq with incorrect > irq!\n", > - d->domain_id); > - ret = -EINVAL; > - goto free_domain; > - } > - } > + ret = allocate_and_map_gsi_pirq(d, index, pirq_p); > break; > - > case MAP_PIRQ_TYPE_MSI: Please don't delete the blank lines between case blocks, even if the blocks are much smaller now. Jan _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |