[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH] VT-d: don't crash when PTE bits 52 and up are non-zero


  • To: Jan Beulich <JBeulich@xxxxxxxx>, xen-devel <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: "Tian, Kevin" <kevin.tian@xxxxxxxxx>
  • Date: Tue, 23 Dec 2014 06:52:14 +0000
  • Accept-language: en-US
  • Cc: "Zhang, Yang Z" <yang.z.zhang@xxxxxxxxx>
  • Delivery-date: Tue, 23 Dec 2014 06:53:39 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xen.org>
  • Thread-index: AQHQG36pw80e8IN0Eke5kWNKFh6X6Jycwptw
  • Thread-topic: [PATCH] VT-d: don't crash when PTE bits 52 and up are non-zero

> From: Jan Beulich [mailto:JBeulich@xxxxxxxx]
> Sent: Friday, December 19, 2014 7:26 PM
> 
> This can (and will) be legitimately the case when sharing page tables
> with EPT (more of a problem before p2m_access_rwx became zero, but
> still possible even now when other than that is the default for a
> guest), leading to an unconditional crash (in print_vtd_entries())
> when a DMA remapping fault occurs.

could you elaborate the scenarios when bits 52+ are non-zero?

> 
> Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

but the changes looks reasonable to me.

Signed-off-by: Kevin Tian <kevin.tian@xxxxxxxxx>

> ---
> Regarding the release, if the changes to iommu.c are deemed too
> extensive, then _I think_ they could be split off (that code ought to
> come into play only when not sharing page tables between VT-d and EPT,
> and VT-d code never sets the offending bits to non-zero values afaict).
> 
> --- a/xen/drivers/passthrough/vtd/iommu.c
> +++ b/xen/drivers/passthrough/vtd/iommu.c
> @@ -258,8 +258,7 @@ static u64 addr_to_dma_page_maddr(struct
>      struct dma_pte *parent, *pte = NULL;
>      int level = agaw_to_level(hd->arch.agaw);
>      int offset;
> -    u64 pte_maddr = 0, maddr;
> -    u64 *vaddr = NULL;
> +    u64 pte_maddr = 0;
> 
>      addr &= (((u64)1) << addr_width) - 1;
>      ASSERT(spin_is_locked(&hd->arch.mapping_lock));
> @@ -281,19 +280,19 @@ static u64 addr_to_dma_page_maddr(struct
>          offset = address_level_offset(addr, level);
>          pte = &parent[offset];
> 
> -        if ( dma_pte_addr(*pte) == 0 )
> +        pte_maddr = dma_pte_addr(*pte);
> +        if ( !pte_maddr )
>          {
>              if ( !alloc )
>                  break;
> 
>              pdev = pci_get_pdev_by_domain(domain, -1, -1, -1);
>              drhd = acpi_find_matched_drhd_unit(pdev);
> -            maddr = alloc_pgtable_maddr(drhd, 1);
> -            if ( !maddr )
> +            pte_maddr = alloc_pgtable_maddr(drhd, 1);
> +            if ( !pte_maddr )
>                  break;
> 
> -            dma_set_pte_addr(*pte, maddr);
> -            vaddr = map_vtd_domain_page(maddr);
> +            dma_set_pte_addr(*pte, pte_maddr);
> 
>              /*
>               * high level table always sets r/w, last level
> @@ -303,21 +302,12 @@ static u64 addr_to_dma_page_maddr(struct
>              dma_set_pte_writable(*pte);
>              iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
>          }
> -        else
> -        {
> -            vaddr = map_vtd_domain_page(pte->val);
> -        }
> 
>          if ( level == 2 )
> -        {
> -            pte_maddr = pte->val & PAGE_MASK_4K;
> -            unmap_vtd_domain_page(vaddr);
>              break;
> -        }
> 
>          unmap_vtd_domain_page(parent);
> -        parent = (struct dma_pte *)vaddr;
> -        vaddr = NULL;
> +        parent = map_vtd_domain_page(pte_maddr);
>          level--;
>      }
> 
> @@ -2449,7 +2439,7 @@ static void vtd_dump_p2m_table_level(pad
>              printk("%*sgfn: %08lx mfn: %08lx\n",
>                     indent, "",
>                     (unsigned long)(address >> PAGE_SHIFT_4K),
> -                   (unsigned long)(pte->val >> PAGE_SHIFT_4K));
> +                   (unsigned long)(dma_pte_addr(*pte) >>
> PAGE_SHIFT_4K));
>      }
> 
>      unmap_vtd_domain_page(pt_vaddr);
> --- a/xen/drivers/passthrough/vtd/iommu.h
> +++ b/xen/drivers/passthrough/vtd/iommu.h
> @@ -276,7 +276,7 @@ struct dma_pte {
>  #define dma_set_pte_snp(p)  do {(p).val |= DMA_PTE_SNP;} while(0)
>  #define dma_set_pte_prot(p, prot) \
>              do {(p).val = ((p).val & ~3) | ((prot) & 3); } while (0)
> -#define dma_pte_addr(p) ((p).val & PAGE_MASK_4K)
> +#define dma_pte_addr(p) ((p).val & PADDR_MASK & PAGE_MASK_4K)
>  #define dma_set_pte_addr(p, addr) do {\
>              (p).val |= ((addr) & PAGE_MASK_4K); } while (0)
>  #define dma_pte_present(p) (((p).val & 3) != 0)
> --- a/xen/drivers/passthrough/vtd/utils.c
> +++ b/xen/drivers/passthrough/vtd/utils.c
> @@ -170,16 +170,16 @@ void print_vtd_entries(struct iommu *iom
>          l_index = get_level_index(gmfn, level);
>          printk("    l%d_index = %x\n", level, l_index);
> 
> -        pte.val = val = l[l_index];
> +        pte.val = l[l_index];
>          unmap_vtd_domain_page(l);
> -        printk("    l%d[%x] = %"PRIx64"\n", level, l_index, val);
> +        printk("    l%d[%x] = %"PRIx64"\n", level, l_index, pte.val);
> 
> -        pte.val = val;
>          if ( !dma_pte_present(pte) )
>          {
>              printk("    l%d[%x] not present\n", level, l_index);
>              break;
>          }
> +        val = dma_pte_addr(pte);
>      } while ( --level );
>  }
> 
> 


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.