|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 07/13] iommu: track reserved ranges using a rangeset
Ranges that should be considered reserved in the IOMMU are not necessarily
limited to RMRRs. If iommu_inclusive_mapping is set then any frame number
falling within an E820 reserved region should also be considered as
reserved in the IOMMU.
This patch adds a rangeset to the domain_iommu structure that is then used
to track all reserved ranges. This will be needed by a subsequent patch
to test whether it is safe to modify a particular IOMMU entry.
Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
---
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Kevin Tian <kevin.tian@xxxxxxxxx>
v2:
- New in v2.
---
xen/drivers/passthrough/iommu.c | 10 +++++++++-
xen/drivers/passthrough/vtd/iommu.c | 20 +++++++++++++-------
xen/drivers/passthrough/vtd/x86/vtd.c | 24 ++++++++++++++++++++----
xen/include/xen/iommu.h | 6 ++++++
4 files changed, 48 insertions(+), 12 deletions(-)
diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c
index 7e59211a59..3fbd3ebaf6 100644
--- a/xen/drivers/passthrough/iommu.c
+++ b/xen/drivers/passthrough/iommu.c
@@ -147,6 +147,10 @@ int iommu_domain_init(struct domain *d)
if ( !iommu_enabled )
return 0;
+ hd->reserved_ranges = rangeset_new(d, NULL, 0);
+ if ( !hd->reserved_ranges )
+ return -ENOMEM;
+
hd->platform_ops = iommu_get_ops();
return hd->platform_ops->init(d);
}
@@ -249,12 +253,16 @@ int iommu_construct(struct domain *d)
void iommu_domain_destroy(struct domain *d)
{
- if ( !iommu_enabled || !dom_iommu(d)->platform_ops )
+ const struct domain_iommu *hd = dom_iommu(d);
+
+ if ( !iommu_enabled || !hd->platform_ops )
return;
iommu_teardown(d);
arch_iommu_domain_destroy(d);
+
+ rangeset_destroy(hd->reserved_ranges);
}
int iommu_map_page(struct domain *d, bfn_t bfn, mfn_t mfn,
diff --git a/xen/drivers/passthrough/vtd/iommu.c
b/xen/drivers/passthrough/vtd/iommu.c
index 2aa97c9efe..7cd3813b9f 100644
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -1911,6 +1911,7 @@ static int rmrr_identity_mapping(struct domain *d, bool_t
map,
unsigned long end_pfn = PAGE_ALIGN_4K(rmrr->end_address) >> PAGE_SHIFT_4K;
struct mapped_rmrr *mrmrr;
struct domain_iommu *hd = dom_iommu(d);
+ int err = 0;
ASSERT(pcidevs_locked());
ASSERT(rmrr->base_address < rmrr->end_address);
@@ -1924,8 +1925,6 @@ static int rmrr_identity_mapping(struct domain *d, bool_t
map,
if ( mrmrr->base == rmrr->base_address &&
mrmrr->end == rmrr->end_address )
{
- int ret = 0;
-
if ( map )
{
++mrmrr->count;
@@ -1935,28 +1934,35 @@ static int rmrr_identity_mapping(struct domain *d,
bool_t map,
if ( --mrmrr->count )
return 0;
- while ( base_pfn < end_pfn )
+ err = rangeset_remove_range(hd->reserved_ranges,
+ base_pfn, end_pfn);
+ while ( !err && base_pfn < end_pfn )
{
if ( clear_identity_p2m_entry(d, base_pfn) )
- ret = -ENXIO;
+ err = -ENXIO;
+
base_pfn++;
}
list_del(&mrmrr->list);
xfree(mrmrr);
- return ret;
+ return err;
}
}
if ( !map )
return -ENOENT;
+ err = rangeset_add_range(hd->reserved_ranges, base_pfn, end_pfn);
+ if ( err )
+ return err;
+
while ( base_pfn < end_pfn )
{
- int err = set_identity_p2m_entry(d, base_pfn, p2m_access_rw, flag);
-
+ err = set_identity_p2m_entry(d, base_pfn, p2m_access_rw, flag);
if ( err )
return err;
+
base_pfn++;
}
diff --git a/xen/drivers/passthrough/vtd/x86/vtd.c
b/xen/drivers/passthrough/vtd/x86/vtd.c
index dc37dce4b6..d65d3c60fb 100644
--- a/xen/drivers/passthrough/vtd/x86/vtd.c
+++ b/xen/drivers/passthrough/vtd/x86/vtd.c
@@ -155,16 +155,32 @@ void __hwdom_init vtd_set_hwdom_mapping(struct domain *d)
tmp = 1 << (PAGE_SHIFT - PAGE_SHIFT_4K);
for ( j = 0; j < tmp; j++ )
{
- int ret = iommu_map_page(d, _bfn(pfn * tmp + j),
- _mfn(pfn * tmp + j),
- IOMMUF_readable|IOMMUF_writable);
+ bfn_t bfn = _bfn(pfn * tmp + j);
+ mfn_t mfn = _mfn(pfn * tmp + j);
+ int ret = iommu_map_page(d, bfn, mfn,
+ IOMMUF_readable | IOMMUF_writable);
if ( !rc )
rc = ret;
+
+ /*
+ * The only reason a reserved page would be mapped is that
+ * iommu_inclusive_mapping is set, in which case the BFN
+ * needs to be marked as reserved in the IOMMU.
+ */
+ if ( page_is_ram_type(pfn, RAM_TYPE_RESERVED) )
+ {
+ ASSERT(iommu_inclusive_mapping);
+
+ ret = rangeset_add_singleton(dom_iommu(d)->reserved_ranges,
+ bfn_x(bfn));
+ if ( !rc )
+ rc = ret;
+ }
}
if ( rc )
- printk(XENLOG_WARNING VTDPREFIX " d%d: IOMMU mapping failed: %d\n",
+ printk(XENLOG_WARNING VTDPREFIX " d%d: IOMMU mapping/reservation
failed: %d\n",
d->domain_id, rc);
if (!(i & (0xfffff >> (PAGE_SHIFT - PAGE_SHIFT_4K))))
diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h
index 624784fec8..cc0be81b4e 100644
--- a/xen/include/xen/iommu.h
+++ b/xen/include/xen/iommu.h
@@ -122,6 +122,12 @@ struct domain_iommu {
/* Features supported by the IOMMU */
DECLARE_BITMAP(features, IOMMU_FEAT_count);
+
+ /*
+ * BFN ranges that are reserved in the domain IOMMU mappings and
+ * must not be modified after initialization.
+ */
+ struct rangeset *reserved_ranges;
};
#define dom_iommu(d) (&(d)->iommu)
--
2.11.0
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |