[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v2 3/3] xen: arm: handle PCI DT node ranges and interrupt-map properties



Hi Ian,

On 12/03/15 17:17, Ian Campbell wrote:
> These properties are defined in ePAPR (2.3.8 and 2.4.3.1 respectively)
> and the OpenFirmware PCI Bus Binding Specification (IEEE Std 1275-1994).
> 
> This replaces the xgene specific mapping. Tested on Mustang and on a
> model with a PCI virtio controller.
> 
> TODO: Use a helper iterator (e.g. dt_for_each_range) for the ranges
> property, like is already done for interrupts using
> dt_for_each_irq_map.
> 
> TODO: Should we stop calling map_device for nodes beneath this one
> (since we should have mapped everything underneath)? I think this is
> complex for cases which map interrupt but not ranges or vice versa,
> perhaps meaning we need to recurse on them separately. Maybe we can
> continue to descend and the mappings may just be harmlessly
> instantiated twice.

There is not harm to route twice the same IRQ. Although it's pointless
to continue.

How difficult would it be to introduce a new bus?

> TODO: Related to the above there are also some buses for which we
> cannot use dt_bus_default_{map,translate}. We might want to pull in
> the of_bus_pci stuff from Linux, although I think that would be
> orthogonal to this fix.
> 
> Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
> ---
> v2: This is essentially a complete reworking, which actually parses
> things properly (obeying #{address,size,interrupt}-cells on the
> appriopriate nodes) and includes handling of interrupt-map too.
> ---
>  xen/arch/arm/domain_build.c          |  156 
> ++++++++++++++++++++++++++++++++++
>  xen/arch/arm/platforms/xgene-storm.c |  143 -------------------------------
>  2 files changed, 156 insertions(+), 143 deletions(-)
> 
> diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
> index 2a2fc2b..704c2aa 100644
> --- a/xen/arch/arm/domain_build.c
> +++ b/xen/arch/arm/domain_build.c
> @@ -23,6 +23,21 @@
>  #include <xen/irq.h>
>  #include "kernel.h"
>  
> +/*
> + * Definitions for implementing parts of the OpenFirmware PCI Bus Binding
> + * Specification (IEEE Std 1275-1994).
> + */
> +
> +struct of_pci_unit_address {
> +     __be32 hi, mid, lo;
> +} __attribute__((packed));
> +
> +struct of_pci_ranges_entry {
> +     struct of_pci_unit_address      pci_addr;
> +     __be64                          cpu_addr;
> +     __be64                          length;
> +} __attribute__((packed));
> +
>  static unsigned int __initdata opt_dom0_max_vcpus;
>  integer_param("dom0_max_vcpus", opt_dom0_max_vcpus);
>  
> @@ -911,6 +926,139 @@ static int make_timer_node(const struct domain *d, void 
> *fdt,
>      return res;
>  }
>  
> +static int map_pci_device_ranges(struct domain *d,
> +                                 const struct dt_device_node *dev,
> +                                 const struct of_pci_ranges_entry  *ranges,
> +                                 const u32 len)
> +{
> +    int parent_size_cells, parent_addr_cells;
> +    int i, nr, res;
> +
> +    parent_size_cells = dt_n_size_cells(dev);
> +    parent_addr_cells = dt_n_addr_cells(dev);
> +
> +    /*
> +     * Range is child address, host address (#address-cells), length
> +     * (#size-cells),see ePAPR 2.3.8.
> +     *
> +     * PCI child address is u32 space + u64 address, see ePAPR 6.2.2.
> +     *
> +     */
> +    nr = len / sizeof(*ranges);
> +
> +    for ( i = 0; i < nr ; i++ )
> +    {
> +        const struct of_pci_ranges_entry *range = &ranges[i];
> +        u64 addr, len;
> +
> +        len = fdt64_to_cpu(range->length);
> +
> +        addr = dt_translate_address(dev, (const __be32 *)&range->cpu_addr);
> +        DPRINT("PCI SPACE 0x%08x.%08x.%08x 0x%"PRIx64" size 0x%"PRIx64"\n",
> +               fdt32_to_cpu(range->pci_addr.hi),
> +               fdt32_to_cpu(range->pci_addr.mid),
> +               fdt32_to_cpu(range->pci_addr.lo),
> +               addr, len);
> +
> +        res = map_mmio_regions(d,
> +                               paddr_to_pfn(addr & PAGE_MASK),
> +                               DIV_ROUND_UP(len, PAGE_SIZE),
> +                               paddr_to_pfn(addr & PAGE_MASK));
> +        if ( res < 0 )
> +        {
> +            printk(XENLOG_ERR "Unable to map 0x%"PRIx64
> +                   " - 0x%"PRIx64" in domain %d\n",
> +                   addr & PAGE_MASK, PAGE_ALIGN(addr + len) - 1,
> +                   d->domain_id);
> +            return res;
> +        }
> +    }
> +    return 0;
> +}
> +
> +static int map_device_ranges(struct domain *d, const struct dt_device_node 
> *dev)
> +{
> +    const void *ranges;
> +    u32 len;
> +
> +    ranges = dt_get_property(dev, "ranges", &len);
> +    /* No ranges, nothing to do */
> +    if ( !ranges )
> +        return 0;
> +
> +    if ( dt_device_type_is_equal(dev, "pci") )
> +        return map_pci_device_ranges(d, dev, ranges, len);
> +
> +    printk("Cannot handle ranges for non-PCI device %s type %s\n",
> +           dt_node_name(dev), dev->type);
> +

Is the printk really necessary? It will a spurious log on platform where
ranges is not empty (midway, arndale, foundation model...).

In general, we should not handle ranges by default as we may overlap the
mapping done during the domain creation (such as the GIC).

> +    /* Lets not worry for now... */
> +    return 0;
> +}
> +
> +static int map_interrupt_to_domain(const struct dt_device_node *dev,
> +                                   const struct dt_raw_irq *dt_raw_irq,
> +                                   void *data)
> +{
> +    struct domain *d = data;
> +    struct dt_irq dt_irq;
> +    int res;
> +
> +    res = dt_irq_translate(dt_raw_irq, &dt_irq);
> +    if ( res < 0 )
> +    {
> +        printk(XENLOG_ERR "%s: Failed to translate IRQ: %d\n",
> +               dt_node_name(dev), res);
> +        return res;
> +    }
> +
> +    if ( dt_irq.irq < NR_LOCAL_IRQS )
> +    {
> +        printk(XENLOG_ERR "%s: IRQ%"PRId32" is not a SPI\n",
> +               dt_node_name(dev), dt_irq.irq);
> +        return -EINVAL;
> +    }
> +
> +    /* Setup the IRQ type */
> +    res = irq_set_spi_type(dt_irq.irq, dt_irq.type);
> +    if ( res )
> +    {
> +        printk(XENLOG_ERR
> +               "%s: Unable to setup IRQ%"PRId32" to dom%d\n",
> +               dt_node_name(dev), dt_irq.irq, d->domain_id);
> +        return res;
> +    }
> +
> +    res = route_irq_to_guest(d, dt_irq.irq, dt_node_name(dev));
> +    if ( res < 0 )
> +    {
> +        printk(XENLOG_ERR "Unable to map IRQ%"PRId32" to dom%d\n",
> +               dt_irq.irq, d->domain_id);
> +        return res;
> +    }
> +
> +    DPRINT("PCI IRQ %u mapped to guest", dt_irq.irq);
> +
> +    return 0;
> +}
> +
> +static int map_device_interrupts(struct domain *d, const struct 
> dt_device_node *dev)
> +{
> +
> +    if ( !dt_property_read_bool(dev, "interrupt-map") )

It's strange to use dt_property_read_bool here and dt_get_property above
to check the emptiness.

I prefer the dt_get_property version which is less confusing.

Anyway, why do you need to check interrupt-map. Can't your new helper
deal with empty property?

> +        return 0; /* No interrupt map to handle */
> +
> +    if ( dt_device_type_is_equal(dev, "pci") )
> +        return dt_for_each_irq_map(dev, &map_interrupt_to_domain, d);
> +
> +    printk("Cannot handle interrupt-map for non-PCI device %s type %s\n",
> +           dt_node_name(dev), dev->type);
> +
> +    /* Lets not worry for now... */
> +    return 0;
> +}
> +
> +
>  /* Map the device in the domain */
>  static int map_device(struct domain *d, struct dt_device_node *dev)
>  {
> @@ -1025,6 +1173,14 @@ static int map_device(struct domain *d, struct 
> dt_device_node *dev)
>          }
>      }
>  
> +    res = map_device_ranges(d, dev);
> +    if ( res )
> +        return res;
> +
> +    res = map_device_interrupts(d, dev);

The name of the function doesn't make much sense. We already map the
interrupt above (see platform get_irq).

Regards,

-- 
Julien Grall

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.