[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 16/16] HACK: xen: arm: map PCI controller ranges region MMIOs to dom0.



The ranges property of a node with device_type = "pci" is defined in ePAPR
2.3.8. Map the appropriate MMIO regions through to dom0.

This is a hack/PoC since it actually crashes for some reason. Hence it
contains a hacked in hardcoded list suitable for Xgene while I figure this
out.

This should also eventually handle the interrupt-map and (ePAPR 2.4.3.1) and
possibly dma-ranges (ePAPR 2.3.9) and msi-ranges (unspeciifed?) too.
---
 xen/arch/arm/domain_build.c |  103 +++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 103 insertions(+)

diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
index aa7e3d2..e778c06 100644
--- a/xen/arch/arm/domain_build.c
+++ b/xen/arch/arm/domain_build.c
@@ -702,6 +702,69 @@ static int make_xen_node(const struct domain *d, void *fdt,
     return res;
 }
 
+static int map_pci_device_ranges(struct domain *d,
+                                 const struct dt_device_node *dev,
+                                 const struct dt_property *ranges)
+{
+    const __be32 *cells;
+
+    int size_cells, addr_cells, i, nr;
+
+    u32 pci_space;
+    u64 child_addr;
+    u64 host_addr;
+    u64 length;
+
+    printk("%s(%p, %p, %p)\n", __func__, d, dev, ranges);
+    printk("%s device %s\n", __func__, dt_node_full_name(dev));
+    return 0;
+
+    cells = ranges->value;
+    printk("%s ranges at %p, length %d\n", __func__, cells, ranges->length);
+    size_cells = dt_n_size_cells(dev);
+    addr_cells = dt_n_addr_cells(dev);
+
+    /*
+     * Range is child address, host address (#address-cells), length
+     * (#size-cells),see ePAPR 2.3.8.
+     *
+     * PCI child address is u32 space + u64 address, see ePAPR 6.2.2.
+     *
+     */
+    nr = ranges->length / ( 3 + size_cells + addr_cells );
+    printk("PCI device %s: #address-cells %d, #size-cells %d. len %d, entries 
%d\n",
+           dt_node_name(dev), addr_cells, size_cells, ranges->length, nr);
+
+    for ( i = 0; i < nr ; i++ )
+    {
+        pci_space = (u32)dt_next_cell(1, &cells);
+        child_addr = dt_next_cell(2, &cells);
+        host_addr = dt_next_cell(addr_cells, &cells);
+        length = dt_next_cell(size_cells, &cells);
+        printk("PCI SPACE 0x%08x, 0x%"PRIx64" maps to 0x%"PRIx64" size 
0x%"PRIx64"\n",
+               pci_space, child_addr, host_addr, length);
+    }
+    return 0;
+}
+
+static int map_device_ranges(struct domain *d, const struct dt_device_node 
*dev)
+{
+    const struct dt_property *ranges;
+    u32 len;
+
+    ranges = dt_get_property(dev, "ranges", &len);
+    /* No ranges, nothing to do */
+    if ( !ranges )
+        return 0;
+
+    if ( dt_device_type_is_equal(dev, "pci") )
+        return map_pci_device_ranges(d, dev, ranges);
+
+    printk("Cannot handle ranges for non-PCI device type %s\n", dev->type);
+    /* Lets not worry for now... */
+    return 0;
+}
+
 /* Map the device in the domain */
 static int map_device(struct domain *d, const struct dt_device_node *dev)
 {
@@ -767,6 +830,9 @@ static int map_device(struct domain *d, const struct 
dt_device_node *dev)
         DPRINT("addr %u = 0x%"PRIx64" - 0x%"PRIx64"\n",
                i, addr, addr + size - 1);
 
+        if ( size == 0 )
+            continue;
+
         res = map_mmio_regions(d, addr & PAGE_MASK,
                                PAGE_ALIGN(addr + size) - 1,
                                addr & PAGE_MASK);
@@ -779,6 +845,8 @@ static int map_device(struct domain *d, const struct 
dt_device_node *dev)
         }
     }
 
+    res = map_device_ranges(d, dev);
+
     return 0;
 }
 
@@ -903,6 +971,41 @@ static int prepare_dtb(struct domain *d, struct 
kernel_info *kinfo)
     if ( ret )
         goto err;
 
+    {
+        struct dt_irq irq;
+
+        ret = map_mmio_regions(d,
+                               0xe000000000UL,
+                               0xe00fffffffUL,
+                               0xe000000000UL);
+        if (ret) printk("PCI REGION 0 failed\n");
+        ret = map_mmio_regions(d,
+                               0xe080000000UL,
+                               0xe08fffffffUL,
+                               0xe080000000UL);
+        if (ret) printk("PCI REGION 1 failed\n");
+        ret = map_mmio_regions(d,
+                               0xe010000000UL,
+                               0xe010000000UL,
+                               0xe01000ffffUL);
+        if (ret) printk("PCI REGION 2 failed\n");
+
+        irq.type = 0x4;
+
+        irq.irq = 0xc2 + 32;
+        ret = gic_route_irq_to_guest(d, &irq, "PCI#INTA");
+        if (ret) printk("PCI INTA failed\n");
+        irq.irq = 0xc3 + 32;
+        ret = gic_route_irq_to_guest(d, &irq, "PCI#INTB");
+        if (ret) printk("PCI INTB failed\n");
+        irq.irq = 0xc4 + 32;
+        ret = gic_route_irq_to_guest(d, &irq, "PCI#INTC");
+        if (ret) printk("PCI INTC failed\n");
+        irq.irq = 0xc5 + 32;
+        ret = gic_route_irq_to_guest(d, &irq, "PCI#INTD");
+        if (ret) printk("PCI INTD failed\n");
+    }
+
     ret = fdt_finish(kinfo->fdt);
     if ( ret < 0 )
         goto err;
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.