|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH 5/6] iommu: remove the share_p2m operation
From: Paul Durrant <pdurrant@xxxxxxxxxx>
Sharing of HAP tables is VT-d specific so the operation is never defined for
AMD IOMMU. There's also no need to pro-actively set vtd.pgd_maddr when using
shared EPT as it is straightforward to simply define a helper function to
return the appropriate value in the shared and non-shared cases.
Signed-off-by: Paul Durrant <pdurrant@xxxxxxxxxx>
---
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Cc: George Dunlap <george.dunlap@xxxxxxxxxx>
Cc: Wei Liu <wl@xxxxxxx>
Cc: "Roger Pau Monné" <roger.pau@xxxxxxxxxx>
Cc: Kevin Tian <kevin.tian@xxxxxxxxx>
---
xen/arch/x86/mm/p2m.c | 3 --
xen/drivers/passthrough/iommu.c | 8 -----
xen/drivers/passthrough/vtd/iommu.c | 55 ++++++++++++++---------------
xen/include/xen/iommu.h | 3 --
4 files changed, 27 insertions(+), 42 deletions(-)
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index c5f52a4118..95b5055648 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -726,9 +726,6 @@ int p2m_alloc_table(struct p2m_domain *p2m)
p2m->phys_table = pagetable_from_mfn(top_mfn);
- if ( hap_enabled(d) )
- iommu_share_p2m_table(d);
-
p2m_unlock(p2m);
return 0;
}
diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c
index f32d8e25a8..6a3803ff2c 100644
--- a/xen/drivers/passthrough/iommu.c
+++ b/xen/drivers/passthrough/iommu.c
@@ -478,14 +478,6 @@ int iommu_do_domctl(
return ret;
}
-void iommu_share_p2m_table(struct domain* d)
-{
- ASSERT(hap_enabled(d));
-
- if ( iommu_use_hap_pt(d) )
- iommu_get_ops()->share_p2m(d);
-}
-
void iommu_crash_shutdown(void)
{
if ( !iommu_crash_disable )
diff --git a/xen/drivers/passthrough/vtd/iommu.c
b/xen/drivers/passthrough/vtd/iommu.c
index 149d7122c3..d09ca3fb3d 100644
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -313,6 +313,26 @@ static u64 addr_to_dma_page_maddr(struct domain *domain,
u64 addr, int alloc)
return pte_maddr;
}
+static u64 domain_pgd_maddr(struct domain *d)
+{
+ struct domain_iommu *hd = dom_iommu(d);
+
+ ASSERT(spin_is_locked(&hd->arch.mapping_lock));
+
+ if ( iommu_use_hap_pt(d) )
+ {
+ mfn_t pgd_mfn =
+ pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)));
+
+ return pagetable_get_paddr(pagetable_from_mfn(pgd_mfn));
+ }
+
+ if ( !hd->arch.vtd.pgd_maddr )
+ addr_to_dma_page_maddr(d, 0, 1);
+
+ return hd->arch.vtd.pgd_maddr;
+}
+
static void iommu_flush_write_buffer(struct vtd_iommu *iommu)
{
u32 val;
@@ -1347,22 +1367,17 @@ int domain_context_mapping_one(
{
spin_lock(&hd->arch.mapping_lock);
- /* Ensure we have pagetables allocated down to leaf PTE. */
- if ( hd->arch.vtd.pgd_maddr == 0 )
+ pgd_maddr = domain_pgd_maddr(domain);
+ if ( !pgd_maddr )
{
- addr_to_dma_page_maddr(domain, 0, 1);
- if ( hd->arch.vtd.pgd_maddr == 0 )
- {
- nomem:
- spin_unlock(&hd->arch.mapping_lock);
- spin_unlock(&iommu->lock);
- unmap_vtd_domain_page(context_entries);
- return -ENOMEM;
- }
+ nomem:
+ spin_unlock(&hd->arch.mapping_lock);
+ spin_unlock(&iommu->lock);
+ unmap_vtd_domain_page(context_entries);
+ return -ENOMEM;
}
/* Skip top levels of page tables for 2- and 3-level DRHDs. */
- pgd_maddr = hd->arch.vtd.pgd_maddr;
for ( agaw = level_to_agaw(4);
agaw != level_to_agaw(iommu->nr_pt_levels);
agaw-- )
@@ -1727,9 +1742,6 @@ static void iommu_domain_teardown(struct domain *d)
ASSERT(is_iommu_enabled(d));
- if ( iommu_use_hap_pt(d) )
- return;
-
hd->arch.vtd.pgd_maddr = 0;
}
@@ -1821,18 +1833,6 @@ static int __init vtd_ept_page_compatible(struct
vtd_iommu *iommu)
(ept_has_1gb(ept_cap) && opt_hap_1gb) <= cap_sps_1gb(vtd_cap);
}
-/*
- * set VT-d page table directory to EPT table if allowed
- */
-static void iommu_set_pgd(struct domain *d)
-{
- mfn_t pgd_mfn;
-
- pgd_mfn = pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)));
- dom_iommu(d)->arch.vtd.pgd_maddr =
- pagetable_get_paddr(pagetable_from_mfn(pgd_mfn));
-}
-
static int rmrr_identity_mapping(struct domain *d, bool_t map,
const struct acpi_rmrr_unit *rmrr,
u32 flag)
@@ -2682,7 +2682,6 @@ static struct iommu_ops __initdata vtd_ops = {
.adjust_irq_affinities = adjust_vtd_irq_affinities,
.suspend = vtd_suspend,
.resume = vtd_resume,
- .share_p2m = iommu_set_pgd,
.crash_shutdown = vtd_crash_shutdown,
.iotlb_flush = iommu_flush_iotlb_pages,
.iotlb_flush_all = iommu_flush_iotlb_all,
diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h
index ec639ba128..cc122fd10b 100644
--- a/xen/include/xen/iommu.h
+++ b/xen/include/xen/iommu.h
@@ -266,7 +266,6 @@ struct iommu_ops {
int __must_check (*suspend)(void);
void (*resume)(void);
- void (*share_p2m)(struct domain *d);
void (*crash_shutdown)(void);
int __must_check (*iotlb_flush)(struct domain *d, dfn_t dfn,
unsigned int page_count,
@@ -343,8 +342,6 @@ void iommu_resume(void);
void iommu_crash_shutdown(void);
int iommu_get_reserved_device_memory(iommu_grdm_t *, void *);
-void iommu_share_p2m_table(struct domain *d);
-
#ifdef CONFIG_HAS_PCI
int iommu_do_pci_domctl(struct xen_domctl *, struct domain *d,
XEN_GUEST_HANDLE_PARAM(xen_domctl_t));
--
2.20.1
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |