[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 1/1 V3] x86/AMD: Fix setup ssss:bb:dd:f for d0 failed
From: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx> The host bridge device (i.e. 0x18 for AMD) does not require IOMMU, and therefore is not included in the IVRS. The current logic tries to map all PCI devices to an IOMMU. In this case, "xl dmesg" shows the following message on AMD sytem. (XEN) setup 0000:00:18.0 for d0 failed (-19) (XEN) setup 0000:00:18.1 for d0 failed (-19) (XEN) setup 0000:00:18.2 for d0 failed (-19) (XEN) setup 0000:00:18.3 for d0 failed (-19) (XEN) setup 0000:00:18.4 for d0 failed (-19) (XEN) setup 0000:00:18.5 for d0 failed (-19) This patch add new device type (i.e. DEV_TYPE_PCI_HOST_BRIDGE) which is corresponded to PCI class code 0x06 and sub-class 0x00. Then, it use this new type to filter when trying to map device to IOMMU. Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx> Reported-by: Stefan Bader <stefan.bader@xxxxxxxxxxxxx> --- Changes for V3: - Change how Intel code handles mapping/unmapping the host bridge devices - Clean up debug messages - Tested on Intel system xen/drivers/passthrough/amd/pci_amd_iommu.c | 14 ++++++++++++-- xen/drivers/passthrough/pci.c | 9 +++++---- xen/drivers/passthrough/vtd/intremap.c | 3 +++ xen/drivers/passthrough/vtd/iommu.c | 15 ++++++++++++++- xen/include/xen/pci.h | 1 + 5 files changed, 35 insertions(+), 7 deletions(-) diff --git a/xen/drivers/passthrough/amd/pci_amd_iommu.c b/xen/drivers/passthrough/amd/pci_amd_iommu.c index 9684ae8..dc7ca73 100644 --- a/xen/drivers/passthrough/amd/pci_amd_iommu.c +++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c @@ -147,9 +147,10 @@ static void amd_iommu_setup_domain_device( amd_iommu_flush_device(iommu, req_id); - AMD_IOMMU_DEBUG("Setup I/O page table: device id = %#x, " + AMD_IOMMU_DEBUG("Setup I/O page table: device id = %#x, type = %x, " "root table = %#"PRIx64", " - "domain = %d, paging mode = %d\n", req_id, + "domain = %d, paging mode = %d\n", + req_id, pdev->type, page_to_maddr(hd->root_table), hd->domain_id, hd->paging_mode); } @@ -175,6 +176,15 @@ static int __init amd_iommu_setup_dom0_device(u8 devfn, struct pci_dev *pdev) if ( unlikely(!iommu) ) { + /* Filter the bridge devices */ + if ( (pdev->type == DEV_TYPE_PCI_HOST_BRIDGE) ) + { + AMD_IOMMU_DEBUG("Skipping host bridge %04x:%02x:%02x.%u (type %x)\n", + pdev->seg, PCI_BUS(bdf), PCI_SLOT(bdf), PCI_FUNC(bdf), + pdev->type); + return 0; + } + AMD_IOMMU_DEBUG("No iommu for device %04x:%02x:%02x.%u\n", pdev->seg, pdev->bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); diff --git a/xen/drivers/passthrough/pci.c b/xen/drivers/passthrough/pci.c index b488e2a..c151bc8 100644 --- a/xen/drivers/passthrough/pci.c +++ b/xen/drivers/passthrough/pci.c @@ -203,9 +203,6 @@ static struct pci_dev *alloc_pdev(struct pci_seg *pseg, u8 bus, u8 devfn) u16 cap; u8 sec_bus, sub_bus; - case DEV_TYPE_PCIe_BRIDGE: - break; - case DEV_TYPE_PCIe2PCI_BRIDGE: case DEV_TYPE_LEGACY_PCI_BRIDGE: sec_bus = pci_conf_read8(pseg->nr, bus, PCI_SLOT(devfn), @@ -253,6 +250,8 @@ static struct pci_dev *alloc_pdev(struct pci_seg *pseg, u8 bus, u8 devfn) break; case DEV_TYPE_PCI: + case DEV_TYPE_PCIe_BRIDGE: + case DEV_TYPE_PCI_HOST_BRIDGE: break; default: @@ -706,6 +705,7 @@ void pci_release_devices(struct domain *d) spin_unlock(&pcidevs_lock); } +#define PCI_CLASS_BRIDGE_HOST 0x0600 #define PCI_CLASS_BRIDGE_PCI 0x0604 enum pdev_type pdev_type(u16 seg, u8 bus, u8 devfn) @@ -729,7 +729,8 @@ enum pdev_type pdev_type(u16 seg, u8 bus, u8 devfn) return DEV_TYPE_PCI2PCIe_BRIDGE; } return DEV_TYPE_PCIe_BRIDGE; - + case PCI_CLASS_BRIDGE_HOST: + return DEV_TYPE_PCI_HOST_BRIDGE; case 0x0000: case 0xffff: return DEV_TYPE_PCI_UNKNOWN; } diff --git a/xen/drivers/passthrough/vtd/intremap.c b/xen/drivers/passthrough/vtd/intremap.c index f3bb31b..9be7341 100644 --- a/xen/drivers/passthrough/vtd/intremap.c +++ b/xen/drivers/passthrough/vtd/intremap.c @@ -439,6 +439,9 @@ static void set_msi_source_id(struct pci_dev *pdev, struct iremap_entry *ire) { unsigned int sq; + case DEV_TYPE_PCI_HOST_BRIDGE: + break; + case DEV_TYPE_PCIe_ENDPOINT: case DEV_TYPE_PCIe_BRIDGE: case DEV_TYPE_PCIe2PCI_BRIDGE: diff --git a/xen/drivers/passthrough/vtd/iommu.c b/xen/drivers/passthrough/vtd/iommu.c index fd3abcb..6492fd1 100644 --- a/xen/drivers/passthrough/vtd/iommu.c +++ b/xen/drivers/passthrough/vtd/iommu.c @@ -1433,6 +1433,12 @@ static int domain_context_mapping( switch ( pdev->type ) { + case DEV_TYPE_PCI_HOST_BRIDGE: + if (iommu_verbose) + dprintk(VTDPREFIX, "d%d:Hostbridge map skip %04x:%02x:%02x.%u\n", + domain->domain_id, seg, bus, + PCI_SLOT(devfn), PCI_FUNC(devfn)); + case DEV_TYPE_PCIe_BRIDGE: case DEV_TYPE_PCIe2PCI_BRIDGE: case DEV_TYPE_LEGACY_PCI_BRIDGE: @@ -1563,6 +1569,12 @@ static int domain_context_unmap( switch ( pdev->type ) { + case DEV_TYPE_PCI_HOST_BRIDGE: + if (iommu_verbose) + dprintk(VTDPREFIX, "d%d:Hostbridge unmap skip %04x:%02x:%02x.%u\n", + domain->domain_id, seg, bus, + PCI_SLOT(devfn), PCI_FUNC(devfn)); + case DEV_TYPE_PCIe_BRIDGE: case DEV_TYPE_PCIe2PCI_BRIDGE: case DEV_TYPE_LEGACY_PCI_BRIDGE: @@ -1885,7 +1897,8 @@ static int intel_iommu_add_device(u8 devfn, struct pci_dev *pdev) ret = domain_context_mapping(pdev->domain, devfn, pdev); if ( ret ) { - dprintk(XENLOG_ERR VTDPREFIX, "d%d: context mapping failed\n", + if ( ret != -EPERM ) + dprintk(XENLOG_ERR VTDPREFIX, "d%d: context mapping failed\n", pdev->domain->domain_id); return ret; } diff --git a/xen/include/xen/pci.h b/xen/include/xen/pci.h index c367736..d33a15d 100644 --- a/xen/include/xen/pci.h +++ b/xen/include/xen/pci.h @@ -63,6 +63,7 @@ struct pci_dev { DEV_TYPE_PCIe2PCI_BRIDGE, // PCIe-to-PCI/PCIx bridge DEV_TYPE_PCI2PCIe_BRIDGE, // PCI/PCIx-to-PCIe bridge DEV_TYPE_LEGACY_PCI_BRIDGE, // Legacy PCI bridge + DEV_TYPE_PCI_HOST_BRIDGE, // PCI Host bridge DEV_TYPE_PCI, } type; -- 1.8.1.2 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |