[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] RE: [PATCH v4 11/14] iommu: stop calling IOMMU page tables 'p2m tables'
> From: Paul Durrant <paul@xxxxxxx> > Sent: Tuesday, August 4, 2020 9:42 PM > > From: Paul Durrant <pdurrant@xxxxxxxxxx> > > It's confusing and not consistent with the terminology introduced with > 'dfn_t'. > Just call them IOMMU page tables. > > Also remove a pointless check of the 'acpi_drhd_units' list in > vtd_dump_page_table_level(). If the list is empty then IOMMU mappings > would > not have been enabled for the domain in the first place. > > NOTE: All calls to printk() have also been removed from > iommu_dump_page_tables(); the implementation specific code is now > responsible for all output. > The check for the global 'iommu_enabled' has also been replaced by an > ASSERT since iommu_dump_page_tables() is not registered as a key > handler > unless IOMMU mappings are enabled. > > Signed-off-by: Paul Durrant <pdurrant@xxxxxxxxxx> with Jan's comments addressed: Reviewed-by: Kevin Tian <kevin.tian@xxxxxxxxx> > --- > Cc: Jan Beulich <jbeulich@xxxxxxxx> > Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> > Cc: Paul Durrant <paul@xxxxxxx> > Cc: Kevin Tian <kevin.tian@xxxxxxxxx> > > v2: > - Moved all output into implementation specific code > --- > xen/drivers/passthrough/amd/pci_amd_iommu.c | 16 ++++++------- > xen/drivers/passthrough/iommu.c | 21 ++++------------- > xen/drivers/passthrough/vtd/iommu.c | 26 +++++++++++---------- > xen/include/xen/iommu.h | 2 +- > 4 files changed, 28 insertions(+), 37 deletions(-) > > diff --git a/xen/drivers/passthrough/amd/pci_amd_iommu.c > b/xen/drivers/passthrough/amd/pci_amd_iommu.c > index 3390c22cf3..be578607b1 100644 > --- a/xen/drivers/passthrough/amd/pci_amd_iommu.c > +++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c > @@ -491,8 +491,8 @@ static int amd_iommu_group_id(u16 seg, u8 bus, u8 > devfn) > > #include <asm/io_apic.h> > > -static void amd_dump_p2m_table_level(struct page_info* pg, int level, > - paddr_t gpa, int indent) > +static void amd_dump_page_table_level(struct page_info* pg, int level, > + paddr_t gpa, int indent) > { > paddr_t address; > struct amd_iommu_pte *table_vaddr; > @@ -529,7 +529,7 @@ static void amd_dump_p2m_table_level(struct > page_info* pg, int level, > > address = gpa + amd_offset_level_address(index, level); > if ( pde->next_level >= 1 ) > - amd_dump_p2m_table_level( > + amd_dump_page_table_level( > mfn_to_page(_mfn(pde->mfn)), pde->next_level, > address, indent + 1); > else > @@ -542,16 +542,16 @@ static void amd_dump_p2m_table_level(struct > page_info* pg, int level, > unmap_domain_page(table_vaddr); > } > > -static void amd_dump_p2m_table(struct domain *d) > +static void amd_dump_page_tables(struct domain *d) > { > const struct domain_iommu *hd = dom_iommu(d); > > if ( !hd->arch.amd.root_table ) > return; > > - printk("p2m table has %u levels\n", hd->arch.amd.paging_mode); > - amd_dump_p2m_table_level(hd->arch.amd.root_table, > - hd->arch.amd.paging_mode, 0, 0); > + printk("AMD IOMMU table has %u levels\n", hd- > >arch.amd.paging_mode); > + amd_dump_page_table_level(hd->arch.amd.root_table, > + hd->arch.amd.paging_mode, 0, 0); > } > > static const struct iommu_ops __initconstrel _iommu_ops = { > @@ -578,7 +578,7 @@ static const struct iommu_ops __initconstrel > _iommu_ops = { > .suspend = amd_iommu_suspend, > .resume = amd_iommu_resume, > .crash_shutdown = amd_iommu_crash_shutdown, > - .dump_p2m_table = amd_dump_p2m_table, > + .dump_page_tables = amd_dump_page_tables, > }; > > static const struct iommu_init_ops __initconstrel _iommu_init_ops = { > diff --git a/xen/drivers/passthrough/iommu.c > b/xen/drivers/passthrough/iommu.c > index 7464f10d1c..0f468379e1 100644 > --- a/xen/drivers/passthrough/iommu.c > +++ b/xen/drivers/passthrough/iommu.c > @@ -22,7 +22,7 @@ > #include <xen/keyhandler.h> > #include <xsm/xsm.h> > > -static void iommu_dump_p2m_table(unsigned char key); > +static void iommu_dump_page_tables(unsigned char key); > > unsigned int __read_mostly iommu_dev_iotlb_timeout = 1000; > integer_param("iommu_dev_iotlb_timeout", iommu_dev_iotlb_timeout); > @@ -212,7 +212,7 @@ void __hwdom_init iommu_hwdom_init(struct > domain *d) > if ( !is_iommu_enabled(d) ) > return; > > - register_keyhandler('o', &iommu_dump_p2m_table, "dump iommu p2m > table", 0); > + register_keyhandler('o', &iommu_dump_page_tables, "dump iommu > page tables", 0); > > hd->platform_ops->hwdom_init(d); > } > @@ -533,16 +533,12 @@ bool_t iommu_has_feature(struct domain *d, > enum iommu_feature feature) > return is_iommu_enabled(d) && test_bit(feature, dom_iommu(d)- > >features); > } > > -static void iommu_dump_p2m_table(unsigned char key) > +static void iommu_dump_page_tables(unsigned char key) > { > struct domain *d; > const struct iommu_ops *ops; > > - if ( !iommu_enabled ) > - { > - printk("IOMMU not enabled!\n"); > - return; > - } > + ASSERT(iommu_enabled); > > ops = iommu_get_ops(); > > @@ -553,14 +549,7 @@ static void iommu_dump_p2m_table(unsigned char > key) > if ( is_hardware_domain(d) || !is_iommu_enabled(d) ) > continue; > > - if ( iommu_use_hap_pt(d) ) > - { > - printk("\ndomain%d IOMMU p2m table shared with MMU: \n", d- > >domain_id); > - continue; > - } > - > - printk("\ndomain%d IOMMU p2m table: \n", d->domain_id); > - ops->dump_p2m_table(d); > + ops->dump_page_tables(d); > } > > rcu_read_unlock(&domlist_read_lock); > diff --git a/xen/drivers/passthrough/vtd/iommu.c > b/xen/drivers/passthrough/vtd/iommu.c > index a532d9e88c..f8da4fe0e7 100644 > --- a/xen/drivers/passthrough/vtd/iommu.c > +++ b/xen/drivers/passthrough/vtd/iommu.c > @@ -2582,8 +2582,8 @@ static void vtd_resume(void) > } > } > > -static void vtd_dump_p2m_table_level(paddr_t pt_maddr, int level, paddr_t > gpa, > - int indent) > +static void vtd_dump_page_table_level(paddr_t pt_maddr, int level, > paddr_t gpa, > + int indent) > { > paddr_t address; > int i; > @@ -2612,8 +2612,8 @@ static void vtd_dump_p2m_table_level(paddr_t > pt_maddr, int level, paddr_t gpa, > > address = gpa + offset_level_address(i, level); > if ( next_level >= 1 ) > - vtd_dump_p2m_table_level(dma_pte_addr(*pte), next_level, > - address, indent + 1); > + vtd_dump_page_table_level(dma_pte_addr(*pte), next_level, > + address, indent + 1); > else > printk("%*sdfn: %08lx mfn: %08lx\n", > indent, "", > @@ -2624,17 +2624,19 @@ static void vtd_dump_p2m_table_level(paddr_t > pt_maddr, int level, paddr_t gpa, > unmap_vtd_domain_page(pt_vaddr); > } > > -static void vtd_dump_p2m_table(struct domain *d) > +static void vtd_dump_page_tables(struct domain *d) > { > - const struct domain_iommu *hd; > + const struct domain_iommu *hd = dom_iommu(d); > > - if ( list_empty(&acpi_drhd_units) ) > + if ( iommu_use_hap_pt(d) ) > + { > + printk("VT-D sharing EPT table\n"); > return; > + } > > - hd = dom_iommu(d); > - printk("p2m table has %d levels\n", agaw_to_level(hd->arch.vtd.agaw)); > - vtd_dump_p2m_table_level(hd->arch.vtd.pgd_maddr, > - agaw_to_level(hd->arch.vtd.agaw), 0, 0); > + printk("VT-D table has %d levels\n", agaw_to_level(hd->arch.vtd.agaw)); > + vtd_dump_page_table_level(hd->arch.vtd.pgd_maddr, > + agaw_to_level(hd->arch.vtd.agaw), 0, 0); > } > > static int __init intel_iommu_quarantine_init(struct domain *d) > @@ -2734,7 +2736,7 @@ static struct iommu_ops __initdata vtd_ops = { > .iotlb_flush = iommu_flush_iotlb_pages, > .iotlb_flush_all = iommu_flush_iotlb_all, > .get_reserved_device_memory = > intel_iommu_get_reserved_device_memory, > - .dump_p2m_table = vtd_dump_p2m_table, > + .dump_page_tables = vtd_dump_page_tables, > }; > > const struct iommu_init_ops __initconstrel intel_iommu_init_ops = { > diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h > index 1f25d2082f..23e884f54b 100644 > --- a/xen/include/xen/iommu.h > +++ b/xen/include/xen/iommu.h > @@ -277,7 +277,7 @@ struct iommu_ops { > unsigned int flush_flags); > int __must_check (*iotlb_flush_all)(struct domain *d); > int (*get_reserved_device_memory)(iommu_grdm_t *, void *); > - void (*dump_p2m_table)(struct domain *d); > + void (*dump_page_tables)(struct domain *d); > > #ifdef CONFIG_HAS_DEVICE_TREE > /* > -- > 2.20.1
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |