[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH] dump_p2m_table: For IOMMU



Hi Santosh
Please some outputs below, gfn still seems incorrect.
Thanks
Wei


(XEN)     gfn: 000000f0  mfn: 001023ac
(XEN)     gfn: 000000f0  mfn: 0023f83d
(XEN)     gfn: 000000f0  mfn: 001023ab
(XEN)     gfn: 000000f0  mfn: 0023f83c
(XEN)     gfn: 000000f0  mfn: 001023aa
(XEN)     gfn: 000000f0  mfn: 0023f83b
(XEN)     gfn: 000000f0  mfn: 001023a9
(XEN)     gfn: 000000f0  mfn: 0023f83a
(XEN)     gfn: 000000f0  mfn: 001023a8
(XEN)     gfn: 000000f0  mfn: 0023f839
(XEN)     gfn: 000000f0  mfn: 001023a7
(XEN)     gfn: 000000f0  mfn: 0023f838
(XEN)     gfn: 000000f0  mfn: 001023a6
(XEN)     gfn: 000000f0  mfn: 0023f837
(XEN)     gfn: 000000f0  mfn: 001023a5
(XEN)     gfn: 000000f0  mfn: 0023f836
(XEN)     gfn: 000000f0  mfn: 001023a4
(XEN)     gfn: 000000f0  mfn: 0023f835
(XEN)     gfn: 000000f0  mfn: 001023a3
(XEN)     gfn: 000000f0  mfn: 0023f834
(XEN)     gfn: 000000f0  mfn: 001023a2
(XEN)     gfn: 000000f0  mfn: 0023f833
(XEN)     gfn: 000000f0  mfn: 001023a1
(XEN)     gfn: 000000f0  mfn: 0023f832
(XEN)     gfn: 000000f0  mfn: 001023a0
(XEN)     gfn: 000000f0  mfn: 0023f831
(XEN)     gfn: 000000f0  mfn: 0010239f
(XEN)     gfn: 000000f0  mfn: 0023f830
(XEN)     gfn: 000000f0  mfn: 0010239e
(XEN)     gfn: 000000f0  mfn: 0023f82f
(XEN)     gfn: 000000f0  mfn: 0010239d
(XEN)     gfn: 000000f0  mfn: 0023f82e
(XEN)     gfn: 000000f0  mfn: 0010239c
(XEN)     gfn: 000000f0  mfn: 0023f82d
(XEN)     gfn: 000000f0  mfn: 0010239b
(XEN)     gfn: 000000f0  mfn: 0023f82c
(XEN)     gfn: 000000f0  mfn: 0010239a
(XEN)     gfn: 000000f0  mfn: 0023f82b
(XEN)     gfn: 000000f0  mfn: 00102399
(XEN)     gfn: 000000f0  mfn: 0023f82a
(XEN)     gfn: 000000f0  mfn: 00102398
(XEN)     gfn: 000000f0  mfn: 0023f829
(XEN)     gfn: 000000f0  mfn: 00102397
(XEN)     gfn: 000000f0  mfn: 0023f828
(XEN)     gfn: 000000f0  mfn: 00102396
(XEN)     gfn: 000000f0  mfn: 0023f827
(XEN)     gfn: 000000f0  mfn: 00102395
(XEN)     gfn: 000000f0  mfn: 0023f826
(XEN)     gfn: 000000f0  mfn: 00102394
(XEN)     gfn: 000000f0  mfn: 0023f825
(XEN)     gfn: 000000f0  mfn: 00102393
(XEN)     gfn: 000000f0  mfn: 0023f824
(XEN)     gfn: 000000f0  mfn: 00102392
(XEN)     gfn: 000000f0  mfn: 0023f823
(XEN)     gfn: 000000f0  mfn: 00102391
(XEN)     gfn: 000000f0  mfn: 0023f822
(XEN)     gfn: 000000f0  mfn: 00102390
(XEN)     gfn: 000000f0  mfn: 0023f821
(XEN)     gfn: 000000f0  mfn: 0010238f
(XEN)     gfn: 000000f0  mfn: 0023f820
(XEN)     gfn: 000000f0  mfn: 0010238e
(XEN)     gfn: 000000f0  mfn: 0023f81f
(XEN)     gfn: 000000f0  mfn: 0010238d
(XEN)     gfn: 000000f0  mfn: 0023f81e
(XEN)     gfn: 000000f0  mfn: 0010238c
(XEN)     gfn: 000000f0  mfn: 0023f81d
(XEN)     gfn: 000000f0  mfn: 0010238b
(XEN)     gfn: 000000f0  mfn: 0023f81c
(XEN)     gfn: 000000f0  mfn: 0010238a
(XEN)     gfn: 000000f0  mfn: 0023f81b
(XEN)     gfn: 000000f0  mfn: 00102389
(XEN)     gfn: 000000f0  mfn: 0023f81a
(XEN)     gfn: 000000f0  mfn: 00102388
(XEN)     gfn: 000000f0  mfn: 0023f819
(XEN)     gfn: 000000f0  mfn: 00102387
(XEN)     gfn: 000000f0  mfn: 0023f818
(XEN)     gfn: 000000f0  mfn: 00102386
(XEN)     gfn: 000000f0  mfn: 0023f817
(XEN)     gfn: 000000f0  mfn: 00102385
(XEN)     gfn: 000000f0  mfn: 0023f816
(XEN)     gfn: 000000f0  mfn: 00102384
(XEN)     gfn: 000000f0  mfn: 0023f815
(XEN)     gfn: 000000f0  mfn: 00102383
(XEN)     gfn: 000000f0  mfn: 0023f814
(XEN)     gfn: 000000f0  mfn: 00102382
(XEN)     gfn: 000000f0  mfn: 0023f813
(XEN)     gfn: 000000f0  mfn: 00102381
(XEN)     gfn: 000000f0  mfn: 0023f812
(XEN)     gfn: 000000f0  mfn: 00102380
(XEN)     gfn: 000000f0  mfn: 0023f811
(XEN)     gfn: 000000f0  mfn: 0010217f
(XEN)     gfn: 000000f0  mfn: 0023f810
(XEN)     gfn: 000000f0  mfn: 0010217e
(XEN)     gfn: 000000f0  mfn: 0023f80f
(XEN)     gfn: 000000f0  mfn: 0010217d
(XEN)     gfn: 000000f0  mfn: 0023f80e
(XEN)     gfn: 000000f0  mfn: 0010217c
(XEN)     gfn: 000000f0  mfn: 0023f80d
(XEN)     gfn: 000000f0  mfn: 0010217b
(XEN)     gfn: 000000f0  mfn: 0023f80c
(XEN)     gfn: 000000f0  mfn: 0010217a
(XEN)     gfn: 000000f0  mfn: 0023f80b
(XEN)     gfn: 000000f0  mfn: 00102179
(XEN)     gfn: 000000fc  mfn: 0023f806
(XEN)     gfn: 000000fc  mfn: 0023f809
(XEN)     gfn: 000000fc  mfn: 001025f1
(XEN)     gfn: 000000fc  mfn: 0023f807
(XEN)     gfn: 000000fc  mfn: 001025f0
(XEN)     gfn: 000000fc  mfn: 001025ef
(XEN)     gfn: 000000fc  mfn: 0023f805
(XEN)     gfn: 000000fc  mfn: 001025ee
(XEN)     gfn: 000000fc  mfn: 0023f804
(XEN)     gfn: 000000fc  mfn: 001025ed
(XEN)     gfn: 000000fc  mfn: 0023f803
(XEN)     gfn: 000000fc  mfn: 001025ec
(XEN)     gfn: 000000fc  mfn: 0023f802
(XEN)     gfn: 000000fc  mfn: 001025eb
(XEN)     gfn: 000000fc  mfn: 0023f801
(XEN)     gfn: 000000fc  mfn: 001025ea
(XEN)     gfn: 000000fc  mfn: 0023f800
(XEN)     gfn: 000000fe  mfn: 000812b0
(XEN)     gfn: 000000fe  mfn: 0010a085
(XEN)     gfn: 000000fe  mfn: 0021f60c
(XEN)     gfn: 000000fe  mfn: 0010a084
(XEN)     gfn: 000000fe  mfn: 0021f60b
(XEN)     gfn: 000000fe  mfn: 0010a083



On 08/10/2012 09:14 PM, Santosh Jodh wrote:
New key handler 'o' to dump the IOMMU p2m table for each domain.
Skips dumping table for domain0.
Intel and AMD specific iommu_ops handler for dumping p2m table.

Signed-off-by: Santosh Jodh<santosh.jodh@xxxxxxxxxx>

diff -r 472fc515a463 -r 9c7609a4fbc1 xen/drivers/passthrough/amd/pci_amd_iommu.c
--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c       Tue Aug 07 18:37:31 
2012 +0100
+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c       Fri Aug 10 08:19:58 
2012 -0700
@@ -22,6 +22,7 @@
  #include<xen/pci.h>
  #include<xen/pci_regs.h>
  #include<xen/paging.h>
+#include<xen/softirq.h>
  #include<asm/hvm/iommu.h>
  #include<asm/amd-iommu.h>
  #include<asm/hvm/svm/amd-iommu-proto.h>
@@ -512,6 +513,80 @@ static int amd_iommu_group_id(u16 seg, u

  #include<asm/io_apic.h>

+static void amd_dump_p2m_table_level(struct page_info* pg, int level,
+                                     paddr_t gpa, int indent)
+{
+    paddr_t address;
+    void *table_vaddr, *pde;
+    paddr_t next_table_maddr;
+    int index, next_level, present;
+    u32 *entry;
+
+    if ( level<  1 )
+        return;
+
+    table_vaddr = __map_domain_page(pg);
+    if ( table_vaddr == NULL )
+    {
+        printk("Failed to map IOMMU domain page %"PRIpaddr"\n",
+                page_to_maddr(pg));
+        return;
+    }
+
+    for ( index = 0; index<  PTE_PER_TABLE_SIZE; index++ )
+    {
+        if ( !(index % 2) )
+            process_pending_softirqs();
+
+        pde = table_vaddr + (index * IOMMU_PAGE_TABLE_ENTRY_SIZE);
+        next_table_maddr = amd_iommu_get_next_table_from_pte(pde);
+        entry = (u32*)pde;
+
+        present = get_field_from_reg_u32(entry[0],
+                                         IOMMU_PDE_PRESENT_MASK,
+                                         IOMMU_PDE_PRESENT_SHIFT);
+
+        if ( !present )
+            continue;
+
+        next_level = get_field_from_reg_u32(entry[0],
+                                            IOMMU_PDE_NEXT_LEVEL_MASK,
+                                            IOMMU_PDE_NEXT_LEVEL_SHIFT);
+
+        address = gpa + amd_offset_level_address(index, level);
+        if ( (next_table_maddr != 0)&&  (next_level != 0) )
+        {
+            amd_dump_p2m_table_level(
+                maddr_to_page(next_table_maddr), level - 1,
+                address, indent + 1);
+        }
+        else
+        {
+            int i;
+
+            for ( i = 0; i<  indent; i++ )
+                printk("  ");
+
+            printk("gfn: %08lx  mfn: %08lx\n",
+                   (unsigned long)PFN_DOWN(address),
+                   (unsigned long)PFN_DOWN(next_table_maddr));
+        }
+    }
+
+    unmap_domain_page(table_vaddr);
+}
+
+static void amd_dump_p2m_table(struct domain *d)
+{
+    struct hvm_iommu *hd  = domain_hvm_iommu(d);
+
+    if ( !hd->root_table )
+        return;
+
+    printk("p2m table has %d levels\n", hd->paging_mode);
+    amd_dump_p2m_table_level(hd->root_table, hd->paging_mode, 0, 0);
+}
+
  const struct iommu_ops amd_iommu_ops = {
      .init = amd_iommu_domain_init,
      .dom0_init = amd_iommu_dom0_init,
@@ -531,4 +606,5 @@ const struct iommu_ops amd_iommu_ops = {
      .resume = amd_iommu_resume,
      .share_p2m = amd_iommu_share_p2m,
      .crash_shutdown = amd_iommu_suspend,
+    .dump_p2m_table = amd_dump_p2m_table,
  };
diff -r 472fc515a463 -r 9c7609a4fbc1 xen/drivers/passthrough/iommu.c
--- a/xen/drivers/passthrough/iommu.c   Tue Aug 07 18:37:31 2012 +0100
+++ b/xen/drivers/passthrough/iommu.c   Fri Aug 10 08:19:58 2012 -0700
@@ -18,11 +18,13 @@
  #include<asm/hvm/iommu.h>
  #include<xen/paging.h>
  #include<xen/guest_access.h>
+#include<xen/keyhandler.h>
  #include<xen/softirq.h>
  #include<xsm/xsm.h>

  static void parse_iommu_param(char *s);
  static int iommu_populate_page_table(struct domain *d);
+static void iommu_dump_p2m_table(unsigned char key);

  /*
   * The 'iommu' parameter enables the IOMMU.  Optional comma separated
@@ -54,6 +56,12 @@ bool_t __read_mostly amd_iommu_perdev_in

  DEFINE_PER_CPU(bool_t, iommu_dont_flush_iotlb);

+static struct keyhandler iommu_p2m_table = {
+    .diagnostic = 0,
+    .u.fn = iommu_dump_p2m_table,
+    .desc = "dump iommu p2m table"
+};
+
  static void __init parse_iommu_param(char *s)
  {
      char *ss;
@@ -119,6 +127,7 @@ void __init iommu_dom0_init(struct domai
      if ( !iommu_enabled )
          return;

+    register_keyhandler('o',&iommu_p2m_table);
      d->need_iommu = !!iommu_dom0_strict;
      if ( need_iommu(d) )
      {
@@ -654,6 +663,34 @@ int iommu_do_domctl(
      return ret;
  }

+static void iommu_dump_p2m_table(unsigned char key)
+{
+    struct domain *d;
+    const struct iommu_ops *ops;
+
+    if ( !iommu_enabled )
+    {
+        printk("IOMMU not enabled!\n");
+        return;
+    }
+
+    ops = iommu_get_ops();
+    for_each_domain(d)
+    {
+        if ( !d->domain_id )
+            continue;
+
+        if ( iommu_use_hap_pt(d) )
+        {
+            printk("\ndomain%d IOMMU p2m table shared with MMU: \n", 
d->domain_id);
+            continue;
+        }
+
+        printk("\ndomain%d IOMMU p2m table: \n", d->domain_id);
+        ops->dump_p2m_table(d);
+    }
+}
+
  /*
   * Local variables:
   * mode: C
diff -r 472fc515a463 -r 9c7609a4fbc1 xen/drivers/passthrough/vtd/iommu.c
--- a/xen/drivers/passthrough/vtd/iommu.c       Tue Aug 07 18:37:31 2012 +0100
+++ b/xen/drivers/passthrough/vtd/iommu.c       Fri Aug 10 08:19:58 2012 -0700
@@ -31,6 +31,7 @@
  #include<xen/pci.h>
  #include<xen/pci_regs.h>
  #include<xen/keyhandler.h>
+#include<xen/softirq.h>
  #include<asm/msi.h>
  #include<asm/irq.h>
  #if defined(__i386__) || defined(__x86_64__)
@@ -2365,6 +2366,71 @@ static void vtd_resume(void)
      }
  }

+static void vtd_dump_p2m_table_level(paddr_t pt_maddr, int level, paddr_t gpa,
+                                     int indent)
+{
+    paddr_t address;
+    int i;
+    struct dma_pte *pt_vaddr, *pte;
+    int next_level;
+
+    if ( pt_maddr == 0 )
+        return;
+
+    pt_vaddr = map_vtd_domain_page(pt_maddr);
+    if ( pt_vaddr == NULL )
+    {
+        printk("Failed to map VT-D domain page %"PRIpaddr"\n", pt_maddr);
+        return;
+    }
+
+    next_level = level - 1;
+    for ( i = 0; i<  PTE_NUM; i++ )
+    {
+        if ( !(i % 2) )
+            process_pending_softirqs();
+
+        pte =&pt_vaddr[i];
+        if ( !dma_pte_present(*pte) )
+            continue;
+
+        address = gpa + offset_level_address(i, level);
+        if ( next_level>= 1 )
+        {
+            vtd_dump_p2m_table_level(dma_pte_addr(*pte), next_level,
+                                     address, indent + 1);
+        }
+        else
+        {
+            int j;
+
+            for ( j = 0; j<  indent; j++ )
+                printk("  ");
+
+            printk("gfn: %08lx mfn: %08lx super=%d rd=%d wr=%d\n",
+                   (unsigned long)(address>>  PAGE_SHIFT_4K),
+                   (unsigned long)(pte->val>>  PAGE_SHIFT_4K),
+                   dma_pte_superpage(*pte)? 1 : 0,
+                   dma_pte_read(*pte)? 1 : 0,
+                   dma_pte_write(*pte)? 1 : 0);
+        }
+    }
+
+    unmap_vtd_domain_page(pt_vaddr);
+}
+
+static void vtd_dump_p2m_table(struct domain *d)
+{
+    struct hvm_iommu *hd;
+
+    if ( list_empty(&acpi_drhd_units) )
+        return;
+
+    hd = domain_hvm_iommu(d);
+    printk("p2m table has %d levels\n", agaw_to_level(hd->agaw));
+    vtd_dump_p2m_table_level(hd->pgd_maddr, agaw_to_level(hd->agaw), 0, 0);
+}
+
  const struct iommu_ops intel_iommu_ops = {
      .init = intel_iommu_domain_init,
      .dom0_init = intel_iommu_dom0_init,
@@ -2387,6 +2453,7 @@ const struct iommu_ops intel_iommu_ops =
      .crash_shutdown = vtd_crash_shutdown,
      .iotlb_flush = intel_iommu_iotlb_flush,
      .iotlb_flush_all = intel_iommu_iotlb_flush_all,
+    .dump_p2m_table = vtd_dump_p2m_table,
  };

  /*
diff -r 472fc515a463 -r 9c7609a4fbc1 xen/drivers/passthrough/vtd/iommu.h
--- a/xen/drivers/passthrough/vtd/iommu.h       Tue Aug 07 18:37:31 2012 +0100
+++ b/xen/drivers/passthrough/vtd/iommu.h       Fri Aug 10 08:19:58 2012 -0700
@@ -248,6 +248,8 @@ struct context_entry {
  #define level_to_offset_bits(l) (12 + (l - 1) * LEVEL_STRIDE)
  #define address_level_offset(addr, level) \
              ((addr>>  level_to_offset_bits(level))&  LEVEL_MASK)
+#define offset_level_address(offset, level) \
+            ((u64)(offset)<<  level_to_offset_bits(level))
  #define level_mask(l) (((u64)(-1))<<  level_to_offset_bits(l))
  #define level_size(l) (1<<  level_to_offset_bits(l))
  #define align_to_level(addr, l) ((addr + level_size(l) - 1)&  level_mask(l))
@@ -277,6 +279,9 @@ struct dma_pte {
  #define dma_set_pte_addr(p, addr) do {\
              (p).val |= ((addr)&  PAGE_MASK_4K); } while (0)
  #define dma_pte_present(p) (((p).val&  3) != 0)
+#define dma_pte_superpage(p) (((p).val&  (1<<7)) != 0)
+#define dma_pte_read(p) (((p).val&  DMA_PTE_READ) != 0)
+#define dma_pte_write(p) (((p).val&  DMA_PTE_WRITE) != 0)

  /* interrupt remap entry */
  struct iremap_entry {
diff -r 472fc515a463 -r 9c7609a4fbc1 
xen/include/asm-x86/hvm/svm/amd-iommu-defs.h
--- a/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h      Tue Aug 07 18:37:31 
2012 +0100
+++ b/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h      Fri Aug 10 08:19:58 
2012 -0700
@@ -38,6 +38,10 @@
  #define PTE_PER_TABLE_ALLOC(entries)  \
        PAGE_SIZE * (PTE_PER_TABLE_ALIGN(entries)>>  PTE_PER_TABLE_SHIFT)

+#define amd_offset_level_address(offset, level) \
+       ((u64)(offset)<<  ((PTE_PER_TABLE_SHIFT * \
+                             (level - IOMMU_PAGING_MODE_LEVEL_1))))
+
  #define PCI_MIN_CAP_OFFSET    0x40
  #define PCI_MAX_CAP_BLOCKS    48
  #define PCI_CAP_PTR_MASK      0xFC
diff -r 472fc515a463 -r 9c7609a4fbc1 xen/include/xen/iommu.h
--- a/xen/include/xen/iommu.h   Tue Aug 07 18:37:31 2012 +0100
+++ b/xen/include/xen/iommu.h   Fri Aug 10 08:19:58 2012 -0700
@@ -141,6 +141,7 @@ struct iommu_ops {
      void (*crash_shutdown)(void);
      void (*iotlb_flush)(struct domain *d, unsigned long gfn, unsigned int 
page_count);
      void (*iotlb_flush_all)(struct domain *d);
+    void (*dump_p2m_table)(struct domain *d);
  };

  void iommu_update_ire_from_apic(unsigned int apic, unsigned int reg, unsigned 
int value);




_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.