IOMMU: only try to share IOMMU and HAP tables for domains with P2M. This makes the check more precise, and brings VTd in line with AMD code. Signed-off-by: Tim Deegan diff -r e856f75d327c xen/include/xen/iommu.h --- a/xen/include/xen/iommu.h Mon Aug 15 11:22:52 2011 +0100 +++ b/xen/include/xen/iommu.h Mon Aug 15 11:24:18 2011 +0100 @@ -33,6 +33,9 @@ extern bool_t iommu_snoop, iommu_qinval, extern bool_t iommu_hap_pt_share; extern bool_t iommu_debug; +/* Does this domain have a P2M table we can use as its IOMMU pagetable? */ +#define iommu_use_hap_pt(d) (paging_mode_hap(d) && iommu_hap_pt_share) + extern struct rangeset *mmio_ro_ranges; #define domain_hvm_iommu(d) (&d->arch.hvm_domain.hvm_iommu) diff -r e856f75d327c xen/drivers/passthrough/amd/iommu_map.c --- a/xen/drivers/passthrough/amd/iommu_map.c Mon Aug 15 11:22:52 2011 +0100 +++ b/xen/drivers/passthrough/amd/iommu_map.c Mon Aug 15 11:24:18 2011 +0100 @@ -581,7 +581,7 @@ int amd_iommu_map_page(struct domain *d, BUG_ON( !hd->root_table ); - if ( iommu_hap_pt_share && is_hvm_domain(d) ) + if ( iommu_use_hap_pt(d) ) return 0; spin_lock(&hd->mapping_lock); @@ -624,7 +624,7 @@ int amd_iommu_unmap_page(struct domain * BUG_ON( !hd->root_table ); - if ( iommu_hap_pt_share && is_hvm_domain(d) ) + if ( iommu_use_hap_pt(d) ) return 0; spin_lock(&hd->mapping_lock); @@ -723,7 +723,7 @@ void amd_iommu_share_p2m(struct domain * ASSERT( is_hvm_domain(d) && d->arch.hvm_domain.hap_enabled ); - if ( !iommu_hap_pt_share ) + if ( !iommu_use_hap_pt(d) ) return; pgd_mfn = pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d))); diff -r e856f75d327c xen/drivers/passthrough/amd/pci_amd_iommu.c --- a/xen/drivers/passthrough/amd/pci_amd_iommu.c Mon Aug 15 11:22:52 2011 +0100 +++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c Mon Aug 15 11:24:18 2011 +0100 @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -359,7 +360,7 @@ static void deallocate_iommu_page_tables { struct hvm_iommu *hd = domain_hvm_iommu(d); - if ( iommu_hap_pt_share ) + if ( iommu_use_hap_pt(d) ) return; spin_lock(&hd->mapping_lock); diff -r e856f75d327c xen/drivers/passthrough/iommu.c --- a/xen/drivers/passthrough/iommu.c Mon Aug 15 11:22:52 2011 +0100 +++ b/xen/drivers/passthrough/iommu.c Mon Aug 15 11:24:18 2011 +0100 @@ -177,7 +177,7 @@ int assign_device(struct domain *d, u8 b if ( has_arch_pdevs(d) && !need_iommu(d) ) { d->need_iommu = 1; - if ( !iommu_hap_pt_share ) + if ( !iommu_use_hap_pt(d) ) rc = iommu_populate_page_table(d); goto done; } diff -r e856f75d327c xen/drivers/passthrough/vtd/iommu.c --- a/xen/drivers/passthrough/vtd/iommu.c Mon Aug 15 11:22:52 2011 +0100 +++ b/xen/drivers/passthrough/vtd/iommu.c Mon Aug 15 11:24:18 2011 +0100 @@ -1613,7 +1613,7 @@ void iommu_domain_teardown(struct domain if ( list_empty(&acpi_drhd_units) ) return; - if ( iommu_hap_pt_share ) + if ( iommu_use_hap_pt(d) ) return; spin_lock(&hd->mapping_lock); @@ -1635,7 +1635,7 @@ static int intel_iommu_map_page( int iommu_domid; /* Do nothing if VT-d shares EPT page table */ - if ( iommu_hap_pt_share ) + if ( iommu_use_hap_pt(d) ) return 0; /* do nothing if dom0 and iommu supports pass thru */ @@ -1760,7 +1760,7 @@ void iommu_set_pgd(struct domain *d) ASSERT( is_hvm_domain(d) && d->arch.hvm_domain.hap_enabled ); - if ( !iommu_hap_pt_share ) + if ( !iommu_use_hap_pt(d) ) return; pgd_mfn = pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)));