[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v5] AMD IOMMU: fix Dom0 device setup failure for host bridges



On 12.09.2013 10:17, Jan Beulich wrote:
> The host bridge device (i.e. 0x18 for AMD) does not require IOMMU, and
> therefore is not included in the IVRS. The current logic tries to map
> all PCI devices to an IOMMU. In this case, "xl dmesg" shows the
> following message on AMD sytem.
> 
> (XEN) setup 0000:00:18.0 for d0 failed (-19)
> (XEN) setup 0000:00:18.1 for d0 failed (-19)
> (XEN) setup 0000:00:18.2 for d0 failed (-19)
> (XEN) setup 0000:00:18.3 for d0 failed (-19)
> (XEN) setup 0000:00:18.4 for d0 failed (-19)
> (XEN) setup 0000:00:18.5 for d0 failed (-19)
> 
> This patch adds a new device type (i.e. DEV_TYPE_PCI_HOST_BRIDGE) which
> corresponds to PCI class code 0x06 and sub-class 0x00. Then, it uses
> this new type to filter when trying to map device to IOMMU.
> 
> Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx>
> Reported-by: Stefan Bader <stefan.bader@xxxxxxxxxxxxx>
> 
> On VT-d refuse (un)mapping host bridges for other than the hardware
> domain.
> 
> Coding style cleanup.
> 
> Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

As this looks to be about final.

Tested-by: Stefan Bader <stefan.bader@xxxxxxxxxxxxx>
> 
> --- a/xen/drivers/passthrough/amd/pci_amd_iommu.c
> +++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c
> @@ -147,9 +147,10 @@ static void amd_iommu_setup_domain_devic
>  
>          amd_iommu_flush_device(iommu, req_id);
>  
> -        AMD_IOMMU_DEBUG("Setup I/O page table: device id = %#x, "
> +        AMD_IOMMU_DEBUG("Setup I/O page table: device id = %#x, type = %#x, "
>                          "root table = %#"PRIx64", "
> -                        "domain = %d, paging mode = %d\n", req_id,
> +                        "domain = %d, paging mode = %d\n",
> +                        req_id, pdev->type,
>                          page_to_maddr(hd->root_table),
>                          hd->domain_id, hd->paging_mode);
>      }
> @@ -175,6 +176,15 @@ static int __init amd_iommu_setup_dom0_d
>  
>      if ( unlikely(!iommu) )
>      {
> +        /* Filter the bridge devices */
> +        if ( pdev->type == DEV_TYPE_PCI_HOST_BRIDGE )
> +        {
> +            AMD_IOMMU_DEBUG("Skipping host bridge %04x:%02x:%02x.%u\n",
> +                            pdev->seg, PCI_BUS(bdf), PCI_SLOT(bdf),
> +                            PCI_FUNC(bdf));
> +            return 0;
> +        }
> +
>          AMD_IOMMU_DEBUG("No iommu for device %04x:%02x:%02x.%u\n",
>                          pdev->seg, pdev->bus,
>                          PCI_SLOT(devfn), PCI_FUNC(devfn));
> --- a/xen/drivers/passthrough/pci.c
> +++ b/xen/drivers/passthrough/pci.c
> @@ -194,9 +194,6 @@ static struct pci_dev *alloc_pdev(struct
>          u16 cap;
>          u8 sec_bus, sub_bus;
>  
> -        case DEV_TYPE_PCIe_BRIDGE:
> -            break;
> -
>          case DEV_TYPE_PCIe2PCI_BRIDGE:
>          case DEV_TYPE_LEGACY_PCI_BRIDGE:
>              sec_bus = pci_conf_read8(pseg->nr, bus, PCI_SLOT(devfn),
> @@ -244,6 +241,8 @@ static struct pci_dev *alloc_pdev(struct
>              break;
>  
>          case DEV_TYPE_PCI:
> +        case DEV_TYPE_PCIe_BRIDGE:
> +        case DEV_TYPE_PCI_HOST_BRIDGE:
>              break;
>  
>          default:
> @@ -697,6 +696,7 @@ void pci_release_devices(struct domain *
>      spin_unlock(&pcidevs_lock);
>  }
>  
> +#define PCI_CLASS_BRIDGE_HOST    0x0600
>  #define PCI_CLASS_BRIDGE_PCI     0x0604
>  
>  enum pdev_type pdev_type(u16 seg, u8 bus, u8 devfn)
> @@ -720,6 +720,8 @@ enum pdev_type pdev_type(u16 seg, u8 bus
>              return DEV_TYPE_PCI2PCIe_BRIDGE;
>          }
>          return DEV_TYPE_PCIe_BRIDGE;
> +    case PCI_CLASS_BRIDGE_HOST:
> +        return DEV_TYPE_PCI_HOST_BRIDGE;
>  
>      case 0x0000: case 0xffff:
>          return DEV_TYPE_PCI_UNKNOWN;
> --- a/xen/drivers/passthrough/vtd/intremap.c
> +++ b/xen/drivers/passthrough/vtd/intremap.c
> @@ -442,6 +442,7 @@ static void set_msi_source_id(struct pci
>      case DEV_TYPE_PCIe_ENDPOINT:
>      case DEV_TYPE_PCIe_BRIDGE:
>      case DEV_TYPE_PCIe2PCI_BRIDGE:
> +    case DEV_TYPE_PCI_HOST_BRIDGE:
>          switch ( pdev->phantom_stride )
>          {
>          case 1: sq = SQ_13_IGNORE_3; break;
> --- a/xen/drivers/passthrough/vtd/iommu.c
> +++ b/xen/drivers/passthrough/vtd/iommu.c
> @@ -1433,6 +1433,15 @@ static int domain_context_mapping(
>  
>      switch ( pdev->type )
>      {
> +    case DEV_TYPE_PCI_HOST_BRIDGE:
> +        if ( iommu_verbose )
> +            dprintk(VTDPREFIX, "d%d:Hostbridge: skip %04x:%02x:%02x.%u 
> map\n",
> +                    domain->domain_id, seg, bus,
> +                    PCI_SLOT(devfn), PCI_FUNC(devfn));
> +        if ( !is_hardware_domain(domain) )
> +            return -EPERM;
> +        break;
> +
>      case DEV_TYPE_PCIe_BRIDGE:
>      case DEV_TYPE_PCIe2PCI_BRIDGE:
>      case DEV_TYPE_LEGACY_PCI_BRIDGE:
> @@ -1563,6 +1572,15 @@ static int domain_context_unmap(
>  
>      switch ( pdev->type )
>      {
> +    case DEV_TYPE_PCI_HOST_BRIDGE:
> +        if ( iommu_verbose )
> +            dprintk(VTDPREFIX, "d%d:Hostbridge: skip %04x:%02x:%02x.%u 
> unmap\n",
> +                    domain->domain_id, seg, bus,
> +                    PCI_SLOT(devfn), PCI_FUNC(devfn));
> +        if ( !is_hardware_domain(domain) )
> +            return -EPERM;
> +        goto out;
> +
>      case DEV_TYPE_PCIe_BRIDGE:
>      case DEV_TYPE_PCIe2PCI_BRIDGE:
>      case DEV_TYPE_LEGACY_PCI_BRIDGE:
> --- a/xen/include/xen/pci.h
> +++ b/xen/include/xen/pci.h
> @@ -63,6 +63,7 @@ struct pci_dev {
>          DEV_TYPE_PCIe2PCI_BRIDGE,   // PCIe-to-PCI/PCIx bridge
>          DEV_TYPE_PCI2PCIe_BRIDGE,   // PCI/PCIx-to-PCIe bridge
>          DEV_TYPE_LEGACY_PCI_BRIDGE, // Legacy PCI bridge
> +        DEV_TYPE_PCI_HOST_BRIDGE,   // PCI Host bridge
>          DEV_TYPE_PCI,
>      } type;
>  
> 
> 


Attachment: signature.asc
Description: OpenPGP digital signature

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.