|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v7 01/14] iommu: add preemption support to iommu_{un,}map()
From: Roger Pau Monné <roger.pau@xxxxxxxxxx>
The loop in iommu_{,un}map() can be arbitrary large, and as such it
needs to handle preemption. Introduce a new flag that signals whether
the function should do preemption checks, returning the number of pages
that have been processed in case a need for preemption was actually
found.
Note that the cleanup done in iommu_map() can now be incomplete if
preemption has happened, and hence callers would need to take care of
unmapping the whole range (ie: ranges already mapped by previously
preempted calls). So far none of the callers care about having those
ranges unmapped, so error handling in arch_iommu_hwdom_init() can be
kept as-is.
Note that iommu_legacy_{un,}map() are left without preemption handling:
callers of those interfaces aren't going to modified to pass bigger
chunks, and hence the functions won't be modified as they are legacy and
uses should be replaced with iommu_{un,}map() instead if preemption is
required.
Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---
v7: Integrate into series, with quite a few adjustments (beyond mere re-
basing).
--- a/xen/drivers/passthrough/iommu.c
+++ b/xen/drivers/passthrough/iommu.c
@@ -308,13 +308,13 @@ static unsigned int mapping_order(const
return order;
}
-int iommu_map(struct domain *d, dfn_t dfn0, mfn_t mfn0,
- unsigned long page_count, unsigned int flags,
- unsigned int *flush_flags)
+long iommu_map(struct domain *d, dfn_t dfn0, mfn_t mfn0,
+ unsigned long page_count, unsigned int flags,
+ unsigned int *flush_flags)
{
const struct domain_iommu *hd = dom_iommu(d);
unsigned long i;
- unsigned int order;
+ unsigned int order, j = 0;
int rc = 0;
if ( !is_iommu_enabled(d) )
@@ -329,6 +329,11 @@ int iommu_map(struct domain *d, dfn_t df
order = mapping_order(hd, dfn, mfn, page_count - i);
+ if ( (flags & IOMMUF_preempt) &&
+ ((!(++j & 0xfff) && general_preempt_check()) ||
+ i > LONG_MAX - (1UL << order)) )
+ return i;
+
rc = iommu_call(hd->platform_ops, map_page, d, dfn, mfn,
flags | IOMMUF_order(order), flush_flags);
@@ -341,7 +346,7 @@ int iommu_map(struct domain *d, dfn_t df
d->domain_id, dfn_x(dfn), mfn_x(mfn), rc);
/* while statement to satisfy __must_check */
- while ( iommu_unmap(d, dfn0, i, flush_flags) )
+ while ( iommu_unmap(d, dfn0, i, 0, flush_flags) )
break;
if ( !is_hardware_domain(d) )
@@ -365,7 +370,10 @@ int iommu_legacy_map(struct domain *d, d
unsigned long page_count, unsigned int flags)
{
unsigned int flush_flags = 0;
- int rc = iommu_map(d, dfn, mfn, page_count, flags, &flush_flags);
+ int rc;
+
+ ASSERT(!(flags & IOMMUF_preempt));
+ rc = iommu_map(d, dfn, mfn, page_count, flags, &flush_flags);
if ( !this_cpu(iommu_dont_flush_iotlb) && !rc )
rc = iommu_iotlb_flush(d, dfn, page_count, flush_flags);
@@ -373,25 +381,33 @@ int iommu_legacy_map(struct domain *d, d
return rc;
}
-int iommu_unmap(struct domain *d, dfn_t dfn0, unsigned long page_count,
- unsigned int *flush_flags)
+long iommu_unmap(struct domain *d, dfn_t dfn0, unsigned long page_count,
+ unsigned int flags, unsigned int *flush_flags)
{
const struct domain_iommu *hd = dom_iommu(d);
unsigned long i;
- unsigned int order;
+ unsigned int order, j = 0;
int rc = 0;
if ( !is_iommu_enabled(d) )
return 0;
+ ASSERT(!(flags & ~IOMMUF_preempt));
+
for ( i = 0; i < page_count; i += 1UL << order )
{
dfn_t dfn = dfn_add(dfn0, i);
int err;
order = mapping_order(hd, dfn, _mfn(0), page_count - i);
+
+ if ( (flags & IOMMUF_preempt) &&
+ ((!(++j & 0xfff) && general_preempt_check()) ||
+ i > LONG_MAX - (1UL << order)) )
+ return i;
+
err = iommu_call(hd->platform_ops, unmap_page, d, dfn,
- order, flush_flags);
+ flags | IOMMUF_order(order), flush_flags);
if ( likely(!err) )
continue;
@@ -425,7 +441,7 @@ int iommu_unmap(struct domain *d, dfn_t
int iommu_legacy_unmap(struct domain *d, dfn_t dfn, unsigned long page_count)
{
unsigned int flush_flags = 0;
- int rc = iommu_unmap(d, dfn, page_count, &flush_flags);
+ int rc = iommu_unmap(d, dfn, page_count, 0, &flush_flags);
if ( !this_cpu(iommu_dont_flush_iotlb) && !rc )
rc = iommu_iotlb_flush(d, dfn, page_count, flush_flags);
--- a/xen/include/xen/iommu.h
+++ b/xen/include/xen/iommu.h
@@ -124,14 +124,15 @@ void arch_iommu_check_autotranslated_hwd
void arch_iommu_hwdom_init(struct domain *d);
/*
- * The following flags are passed to map operations and passed by lookup
- * operations.
+ * The following flags are passed to map (applicable ones also to unmap)
+ * operations, while some are passed back by lookup operations.
*/
#define IOMMUF_order(n) ((n) & 0x3f)
#define _IOMMUF_readable 6
#define IOMMUF_readable (1u<<_IOMMUF_readable)
#define _IOMMUF_writable 7
#define IOMMUF_writable (1u<<_IOMMUF_writable)
+#define IOMMUF_preempt (1u << 8)
/*
* flush_flags:
@@ -153,12 +154,18 @@ enum
#define IOMMU_FLUSHF_modified (1u << _IOMMU_FLUSHF_modified)
#define IOMMU_FLUSHF_all (1u << _IOMMU_FLUSHF_all)
-int __must_check iommu_map(struct domain *d, dfn_t dfn, mfn_t mfn,
- unsigned long page_count, unsigned int flags,
- unsigned int *flush_flags);
-int __must_check iommu_unmap(struct domain *d, dfn_t dfn,
- unsigned long page_count,
- unsigned int *flush_flags);
+/*
+ * For both of these: Negative return values are error indicators. Zero
+ * indicates full successful completion of the request, while positive
+ * values indicate partial completion, which is possible only with
+ * IOMMUF_preempt passed in.
+ */
+long __must_check iommu_map(struct domain *d, dfn_t dfn, mfn_t mfn,
+ unsigned long page_count, unsigned int flags,
+ unsigned int *flush_flags);
+long __must_check iommu_unmap(struct domain *d, dfn_t dfn,
+ unsigned long page_count, unsigned int flags,
+ unsigned int *flush_flags);
int __must_check iommu_legacy_map(struct domain *d, dfn_t dfn, mfn_t mfn,
unsigned long page_count,
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |