|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 7/7] amd-iommu: add lookup_page method to iommu_ops
This patch adds a new method to the AMD IOMMU implementation to find the
MFN currently mapped by the specified DFN. This is analogous to the
method added for VT-d IOMMU by commit 43d1622b.
Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
---
Cc: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx>
Cc: Brian Woods <brian.woods@xxxxxxx>
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Cc: Wei Liu <wei.liu2@xxxxxxxxxx>
---
xen/drivers/passthrough/amd/iommu_map.c | 50 +++++++++++++++++++++++++++
xen/drivers/passthrough/amd/pci_amd_iommu.c | 1 +
xen/include/asm-x86/hvm/svm/amd-iommu-proto.h | 2 ++
3 files changed, 53 insertions(+)
diff --git a/xen/drivers/passthrough/amd/iommu_map.c
b/xen/drivers/passthrough/amd/iommu_map.c
index fecde9d645..309720743f 100644
--- a/xen/drivers/passthrough/amd/iommu_map.c
+++ b/xen/drivers/passthrough/amd/iommu_map.c
@@ -69,6 +69,20 @@ static void get_iommu_pde_info(uint32_t *pde, uint64_t
*maddr, bool *iw,
IOMMU_PDE_IO_READ_PERMISSION_SHIFT);
}
+static void get_iommu_pte_info(unsigned long l1_mfn, unsigned long dfn,
+ uint64_t *maddr, bool *iw, bool *ir)
+{
+ uint64_t *table;
+ uint32_t *pde;
+
+ table = map_domain_page(_mfn(l1_mfn));
+ pde = (uint32_t *)(table + pfn_to_pde_idx(dfn,
+ IOMMU_PAGING_MODE_LEVEL_1));
+
+ get_iommu_pde_info(pde, maddr, iw, ir);
+ unmap_domain_page(table);
+}
+
static bool set_iommu_pde_info(uint32_t *pde, uint64_t maddr, bool iw,
bool ir)
{
@@ -793,6 +807,42 @@ int amd_iommu_unmap_page(struct domain *d, dfn_t dfn)
return 0;
}
+int amd_iommu_lookup_page(struct domain *d, dfn_t dfn, mfn_t *mfn,
+ unsigned int *flags)
+{
+ unsigned long pt_mfn[7] = {};
+ struct domain_iommu *hd = dom_iommu(d);
+ uint64_t maddr;
+ bool iw, ir;
+ int rc;
+
+ if ( iommu_use_hap_pt(d) )
+ return -EOPNOTSUPP;
+
+ spin_lock(&hd->arch.mapping_lock);
+
+ if ( !hd->arch.root_table )
+ {
+ spin_unlock(&hd->arch.mapping_lock);
+ return -EOPNOTSUPP;
+ }
+
+ rc = iommu_pde_from_dfn(d, dfn_x(dfn), pt_mfn);
+ if ( rc )
+ {
+ spin_unlock(&hd->arch.mapping_lock);
+ return rc;
+ }
+
+ get_iommu_pte_info(pt_mfn[1], dfn_x(dfn), &maddr, &iw, &ir);
+ spin_unlock(&hd->arch.mapping_lock);
+
+ *mfn = _mfn(maddr >> PAGE_SHIFT);
+ *flags = (iw ? IOMMUF_writable : 0) | (ir ? IOMMUF_readable : 0);
+
+ return 0;
+}
+
int amd_iommu_reserve_domain_unity_map(struct domain *domain,
uint64_t phys_addr,
unsigned long size, int iw, int ir)
diff --git a/xen/drivers/passthrough/amd/pci_amd_iommu.c
b/xen/drivers/passthrough/amd/pci_amd_iommu.c
index 5e99b6988e..86b97d7eaf 100644
--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c
+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c
@@ -616,6 +616,7 @@ const struct iommu_ops amd_iommu_ops = {
.teardown = amd_iommu_domain_destroy,
.map_page = amd_iommu_map_page,
.unmap_page = amd_iommu_unmap_page,
+ .lookup_page = amd_iommu_lookup_page,
.free_page_table = deallocate_page_table,
.reassign_device = reassign_device,
.get_device_group_id = amd_iommu_group_id,
diff --git a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
index 3083d625bd..d451acc28c 100644
--- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
+++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
@@ -55,6 +55,8 @@ int amd_iommu_update_ivrs_mapping_acpi(void);
int __must_check amd_iommu_map_page(struct domain *d, dfn_t dfn,
mfn_t mfn, unsigned int flags);
int __must_check amd_iommu_unmap_page(struct domain *d, dfn_t dfn);
+int __must_check amd_iommu_lookup_page(struct domain *d, dfn_t dfn,
+ mfn_t *mfn, unsigned int *flags);
u64 amd_iommu_get_next_table_from_pte(u32 *entry);
int __must_check amd_iommu_alloc_root(struct domain_iommu *hd);
int amd_iommu_reserve_domain_unity_map(struct domain *domain,
--
2.11.0
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |