[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [v8][PATCH 16/17] xen/vtd: group assigned device with RMRR



> From: Chen, Tiejun
> Sent: Monday, December 01, 2014 5:25 PM
> 
> Sometimes different devices may share RMRR range so in this
> case we shouldn't assign these devices into different VMs
> since they may have potential leakage even damage between VMs.
> 
> So we need to group all devices as RMRR range to make sure they
> are just assigned into the same VM.
> 
> Here we introduce two field, gid and domid, in struct,
> acpi_rmrr_unit:
>  gid: indicate which group this device owns. "0" is invalid so
>       just start from "1".
>  domid: indicate which domain this device owns currently. Firstly
>         the hardware domain should own it.
> 
> Signed-off-by: Tiejun Chen <tiejun.chen@xxxxxxxxx>
> ---
>  xen/drivers/passthrough/vtd/dmar.c  | 28 ++++++++++++++-
>  xen/drivers/passthrough/vtd/dmar.h  |  2 ++
>  xen/drivers/passthrough/vtd/iommu.c | 68
> +++++++++++++++++++++++++++++++++----
>  3 files changed, 91 insertions(+), 7 deletions(-)
> 
> diff --git a/xen/drivers/passthrough/vtd/dmar.c
> b/xen/drivers/passthrough/vtd/dmar.c
> index c5bc8d6..8d3406f 100644
> --- a/xen/drivers/passthrough/vtd/dmar.c
> +++ b/xen/drivers/passthrough/vtd/dmar.c
> @@ -572,10 +572,11 @@ acpi_parse_one_rmrr(struct acpi_dmar_header
> *header)
>  {
>      struct acpi_dmar_reserved_memory *rmrr =
>          container_of(header, struct acpi_dmar_reserved_memory,
> header);
> -    struct acpi_rmrr_unit *rmrru;
> +    struct acpi_rmrr_unit *rmrru, *cur_rmrr;
>      void *dev_scope_start, *dev_scope_end;
>      u64 base_addr = rmrr->base_address, end_addr = rmrr->end_address;
>      int ret;
> +    static unsigned int group_id = 0;
> 
>      if ( (ret = acpi_dmar_check_length(header, sizeof(*rmrr))) != 0 )
>          return ret;
> @@ -611,6 +612,8 @@ acpi_parse_one_rmrr(struct acpi_dmar_header
> *header)
>      rmrru->base_address = base_addr;
>      rmrru->end_address = end_addr;
>      rmrru->segment = rmrr->segment;
> +    /* "0" is an invalid group id. */
> +    rmrru->gid = 0;
> 
>      dev_scope_start = (void *)(rmrr + 1);
>      dev_scope_end   = ((void *)rmrr) + header->length;
> @@ -682,7 +685,30 @@ acpi_parse_one_rmrr(struct acpi_dmar_header
> *header)
>                      "So please set pci_rdmforce to reserve these ranges"
>                      " if you need such a device in hotplug case.\n");
> 
> +            list_for_each_entry(cur_rmrr, &acpi_rmrr_units, list)
> +            {
> +                /*
> +                 * Any same or overlap range mean they should be
> +                 * at same group.
> +                 */
> +                if ( ((base_addr >= cur_rmrr->base_address) &&
> +                     (end_addr <= cur_rmrr->end_address)) ||
> +                     ((base_addr <= cur_rmrr->base_address) &&
> +                     (end_addr >= cur_rmrr->end_address)) )

again, such overlap detection pattern is incomplete.

> +                {
> +                    rmrru->gid = cur_rmrr->gid;
> +                    continue;
> +                }
> +            }
> +
>              acpi_register_rmrr_unit(rmrru);
> +
> +            /* Allocate group id from gid:1. */
> +            if ( !rmrru->gid )
> +            {
> +                group_id++;
> +                rmrru->gid = group_id;
> +            }
>          }
>      }
> 
> diff --git a/xen/drivers/passthrough/vtd/dmar.h
> b/xen/drivers/passthrough/vtd/dmar.h
> index af1feef..a57c0d4 100644
> --- a/xen/drivers/passthrough/vtd/dmar.h
> +++ b/xen/drivers/passthrough/vtd/dmar.h
> @@ -76,6 +76,8 @@ struct acpi_rmrr_unit {
>      u64    end_address;
>      u16    segment;
>      u8     allow_all:1;
> +    int    gid;
> +    domid_t    domid;
>  };
> 
>  struct acpi_atsr_unit {
> diff --git a/xen/drivers/passthrough/vtd/iommu.c
> b/xen/drivers/passthrough/vtd/iommu.c
> index a54c6eb..ba40209 100644
> --- a/xen/drivers/passthrough/vtd/iommu.c
> +++ b/xen/drivers/passthrough/vtd/iommu.c
> @@ -1882,9 +1882,9 @@ static int rmrr_identity_mapping(struct domain *d,
> bool_t map,
> 
>  static int intel_iommu_add_device(u8 devfn, struct pci_dev *pdev)
>  {
> -    struct acpi_rmrr_unit *rmrr;
> -    u16 bdf;
> -    int ret, i;
> +    struct acpi_rmrr_unit *rmrr, *g_rmrr;
> +    u16 bdf, g_bdf;
> +    int ret, i, j;
> 
>      ASSERT(spin_is_locked(&pcidevs_lock));
> 
> @@ -1905,6 +1905,32 @@ static int intel_iommu_add_device(u8 devfn, struct
> pci_dev *pdev)
>               PCI_BUS(bdf) == pdev->bus &&
>               PCI_DEVFN2(bdf) == devfn )
>          {
> +            if ( rmrr->domid == hardware_domain->domain_id )
> +            {
> +                for_each_rmrr_device ( g_rmrr, g_bdf, j )
> +                {
> +                    if ( g_rmrr->gid == rmrr->gid )
> +                    {
> +                        if ( g_rmrr->domid ==
> hardware_domain->domain_id )
> +                            g_rmrr->domid =
> pdev->domain->domain_id;
> +                        else if ( g_rmrr->domid !=
> pdev->domain->domain_id )
> +                        {
> +                            rmrr->domid = g_rmrr->domid;
> +                            continue;
> +                        }
> +                    }
> +                }
> +            }
> +
> +            if ( rmrr->domid != pdev->domain->domain_id )
> +            {
> +                domain_context_unmap(pdev->domain, devfn, pdev);
> +                dprintk(XENLOG_ERR VTDPREFIX, "d%d: this is a group
> device owned by d%d\n",
> +                        pdev->domain->domain_id, rmrr->domid);
> +                rmrr->domid = 0;

0 or release to hardware domain?

> +                return -EINVAL;
> +            }
> +
>              ret = rmrr_identity_mapping(pdev->domain, 1, rmrr);
>              if ( ret )
>                  dprintk(XENLOG_ERR VTDPREFIX, "d%d: RMRR mapping
> failed\n",
> @@ -1946,6 +1972,8 @@ static int intel_iommu_remove_device(u8 devfn,
> struct pci_dev *pdev)
>               PCI_DEVFN2(bdf) != devfn )
>              continue;
> 
> +        /* Just release to hardware domain. */
> +        rmrr->domid = hardware_domain->domain_id;
>          rmrr_identity_mapping(pdev->domain, 0, rmrr);
>      }
> 
> @@ -2104,6 +2132,8 @@ static void __hwdom_init setup_hwdom_rmrr(struct
> domain *d)
>      spin_lock(&pcidevs_lock);
>      for_each_rmrr_device ( rmrr, bdf, i )
>      {
> +        /* hwdom should own all devices at first. */
> +        rmrr->domid = d->domain_id;
>          ret = rmrr_identity_mapping(d, 1, rmrr);
>          if ( ret )
>              dprintk(XENLOG_ERR VTDPREFIX,
> @@ -2273,9 +2303,9 @@ static int reassign_device_ownership(
>  static int intel_iommu_assign_device(
>      struct domain *d, u8 devfn, struct pci_dev *pdev)
>  {
> -    struct acpi_rmrr_unit *rmrr;
> -    int ret = 0, i;
> -    u16 bdf, seg;
> +    struct acpi_rmrr_unit *rmrr, *g_rmrr;
> +    int ret = 0, i, j;
> +    u16 bdf, seg, g_bdf;
>      u8 bus;
> 
>      if ( list_empty(&acpi_drhd_units) )
> @@ -2300,6 +2330,32 @@ static int intel_iommu_assign_device(
>               PCI_BUS(bdf) == bus &&
>               PCI_DEVFN2(bdf) == devfn )
>          {
> +            if ( rmrr->domid == hardware_domain->domain_id )
> +            {
> +                for_each_rmrr_device ( g_rmrr, g_bdf, j )
> +                {
> +                    if ( g_rmrr->gid == rmrr->gid )
> +                    {
> +                        if ( g_rmrr->domid ==
> hardware_domain->domain_id )
> +                            g_rmrr->domid =
> pdev->domain->domain_id;
> +                        else if ( g_rmrr->domid !=
> pdev->domain->domain_id )
> +                        {
> +                            rmrr->domid = g_rmrr->domid;
> +                            continue;
> +                        }
> +                    }
> +                }
> +            }
> +
> +            if ( rmrr->domid != pdev->domain->domain_id )
> +            {
> +                domain_context_unmap(pdev->domain, devfn, pdev);
> +                dprintk(XENLOG_ERR VTDPREFIX, "d%d: this is a group
> device owned by d%d\n",
> +                        pdev->domain->domain_id, rmrr->domid);
> +                rmrr->domid = 0;
> +                return -EINVAL;
> +            }
> +
>              ret = rmrr_identity_mapping(d, 1, rmrr);
>              if ( ret )
>              {
> --
> 1.9.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.