|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [V3 PATCH 1/9] PVH dom0: iommu related changes
- For now, iommu is required for PVH dom0. Check for that.
- For pvh, we need to do mfn_to_gmfn before calling mapping function
intel_iommu_map_page/amd_iommu_map_page which expects a gfn.
Signed-off-by: Mukesh Rathor <mukesh.rathor@xxxxxxxxxx>
Acked-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
---
xen/drivers/passthrough/iommu.c | 17 ++++++++++++++++-
1 files changed, 16 insertions(+), 1 deletions(-)
diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c
index 93ad122..f6c7ad6 100644
--- a/xen/drivers/passthrough/iommu.c
+++ b/xen/drivers/passthrough/iommu.c
@@ -125,10 +125,24 @@ int iommu_domain_init(struct domain *d)
return hd->platform_ops->init(d);
}
+static __init void check_dom0_pvh_reqs(struct domain *d)
+{
+ if ( !iommu_enabled )
+ panic("Presently, iommu must be enabled for pvh dom0\n");
+
+ if ( iommu_passthrough )
+ panic("For pvh dom0, dom0-passthrough must not be enabled\n");
+
+ iommu_dom0_strict = 1;
+}
+
void __init iommu_dom0_init(struct domain *d)
{
struct hvm_iommu *hd = domain_hvm_iommu(d);
+ if ( is_pvh_domain(d) )
+ check_dom0_pvh_reqs(d);
+
if ( !iommu_enabled )
return;
@@ -141,12 +155,13 @@ void __init iommu_dom0_init(struct domain *d)
page_list_for_each ( page, &d->page_list )
{
unsigned long mfn = page_to_mfn(page);
+ unsigned long gfn = mfn_to_gmfn(d, mfn);
unsigned int mapping = IOMMUF_readable;
if ( ((page->u.inuse.type_info & PGT_count_mask) == 0) ||
((page->u.inuse.type_info & PGT_type_mask)
== PGT_writable_page) )
mapping |= IOMMUF_writable;
- hd->platform_ops->map_page(d, mfn, mfn, mapping);
+ hd->platform_ops->map_page(d, gfn, mfn, mapping);
if ( !(i++ & 0xfffff) )
process_pending_softirqs();
}
--
1.7.2.3
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |