[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 2/2] xen/arm: smmu: Parse generic iommu binding for PCI devices



The legacy smmu binding will be deprecated in favour of the generic
"iommus" binding. So we need a new helper to parse generic iommu
bindings. When the system detects the SMMU is using generic iommu
binding, this helper will be called when this platform device is
assiged to a guest.

This patch is based on Linux of_iommu.c:
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/drivers/iommu/of_iommu.c
The commit id is:
2a0c57545a291f257cd231b1c4b18285b84608d8

Signed-off-by: Wei Chen <Wei.Chen@xxxxxxx>
---
 xen/drivers/passthrough/arm/smmu.c | 73 ++++++++++++++++++++++++++++++++++++--
 1 file changed, 71 insertions(+), 2 deletions(-)

diff --git a/xen/drivers/passthrough/arm/smmu.c 
b/xen/drivers/passthrough/arm/smmu.c
index 25f2207..50ff997 100644
--- a/xen/drivers/passthrough/arm/smmu.c
+++ b/xen/drivers/passthrough/arm/smmu.c
@@ -2750,7 +2750,72 @@ static int arm_smmu_platform_iommu_init(struct device 
*dev)
        return 0;
 }
 
-static int arm_smmu_xen_add_device(u8 devfn, struct device*dev)
+/*
+ * Currently, we haven't supported PCI device on ARM. So this is the
+ * temporary function to get device node of pci bridge device for
+ * function verification only
+ */
+static struct device_node *pci_get_bridge_device_node(struct device *dev)
+{
+       struct device_node *dt_dev;
+       const char *type_str;
+
+       dt_for_each_device_node(dt_host, dt_dev) {
+               /* Return the first pci bridge device node simply */
+               if (!dt_property_read_string(dt_dev, "device_type", &type_str) 
&&
+                       !strcmp(type_str, "pci"))
+                       return dt_dev;
+       }
+
+       return NULL;
+}
+
+#define PCI_DEVID(bus, devfn)  ((((u16)(bus)) << 8) | (devfn))
+
+static int arm_smmu_pci_iommu_init(struct device *dev, u8 devfn)
+{
+       struct device_node *bridge_np;
+       struct of_phandle_args iommu_spec;
+       struct pci_dev *pdev = to_pci_dev(dev);
+       int ret;
+
+       bridge_np = pci_get_bridge_device_node(dev);
+       if (!bridge_np) {
+               dev_err(dev, "Cloud not find the pci bridge device node!\n");
+               return -ENODEV;
+       }
+
+       /*
+        * Start by tracing the RID alias down the PCI topology as
+        * far as the host bridge whose OF node we have...
+        * (we're not even attempting to handle multi-alias devices yet)
+        */
+       iommu_spec.args_count = 1;
+       iommu_spec.np = bridge_np;
+       ret = __arm_smmu_get_pci_sid(pdev, PCI_DEVID(pdev->bus, devfn),
+                                       &iommu_spec.args[0]);
+       if (ret) {
+               dev_err(dev, "Get pci requester ID failed, err=%d!\n", ret);
+               return ret;
+       }
+
+       /*
+        * ...then find out what that becomes once it escapes the PCI
+        * bus into the system beyond, and which IOMMU it ends up at.
+        */
+       iommu_spec.np = NULL;
+       ret = dt_pci_map_rid(bridge_np, iommu_spec.args[0], "iommu-map",
+                                       "iommu-map-mask", &iommu_spec.np,
+                                       iommu_spec.args);
+       if (ret) {
+               dev_err(dev, "Do pci map rid failed, err=%d\n", ret);
+               return ret;
+       }
+
+       return arm_smmu_of_xlate(dev, &iommu_spec);
+}
+
+static int arm_smmu_xen_add_device(u8 devfn, struct device *dev)
 {
        int ret;
 
@@ -2760,7 +2825,11 @@ static int arm_smmu_xen_add_device(u8 devfn, struct 
device*dev)
         * register Master IDs while this function had been invoked.
         */
        if (using_generic_binding) {
-               ret = arm_smmu_platform_iommu_init(dev);
+               if (dev_is_pci(dev))
+                       ret = arm_smmu_pci_iommu_init(dev, devfn);
+               else
+                       ret = arm_smmu_platform_iommu_init(dev);
+
                if (ret)
                        return ret;
        }
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.