|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 2/4] iommu: rename wrapper functions
A subsequent patch will add semantically different versions of
iommu_map/unmap() so, in advance of that change, this patch renames the
existing functions to iommu_legacy_map/unmap() and modifies all call-sites.
The patch also renames iommu_iotlb_flush[_all] to the shorter name(s)
iommu_flush[_all] (also renaming an internal VT-d function to avoid a name
clash) and co-locates the declarations with those of
iommu_legacy_map/unmap().
The only changes in this patch that are not purely cosmetic are in
arch_iommu_populate_page_table() and iommu_hwdom_init(), which now call
iommu_legacy_map() rather than calling the map_page() iommu_ops method
directly.
Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
---
Cc: Stefano Stabellini <sstabellini@xxxxxxxxxx>
Cc: Julien Grall <julien.grall@xxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Cc: George Dunlap <George.Dunlap@xxxxxxxxxxxxx>
Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
Cc: Tim Deegan <tim@xxxxxxx>
Cc: Wei Liu <wei.liu2@xxxxxxxxxx>
Cc: "Roger Pau Monné" <roger.pau@xxxxxxxxxx>
Cc: Jun Nakajima <jun.nakajima@xxxxxxxxx>
Cc: Kevin Tian <kevin.tian@xxxxxxxxx>
Cc: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
v2:
- New in v2.
---
xen/arch/arm/p2m.c | 4 ++--
xen/arch/x86/mm.c | 11 ++++++-----
xen/arch/x86/mm/p2m-ept.c | 4 ++--
xen/arch/x86/mm/p2m-pt.c | 5 +++--
xen/arch/x86/mm/p2m.c | 12 ++++++------
xen/arch/x86/x86_64/mm.c | 9 +++++----
xen/common/grant_table.c | 14 +++++++-------
xen/common/memory.c | 4 ++--
xen/drivers/passthrough/iommu.c | 14 +++++++-------
xen/drivers/passthrough/vtd/iommu.c | 10 +++++-----
xen/drivers/passthrough/x86/iommu.c | 13 ++++++-------
xen/include/xen/iommu.h | 19 ++++++++++---------
12 files changed, 61 insertions(+), 58 deletions(-)
diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index 6c76298ebc..e8b7624492 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -971,8 +971,8 @@ static int __p2m_set_entry(struct p2m_domain *p2m,
if ( need_iommu_pt_sync(p2m->domain) &&
(lpae_is_valid(orig_pte) || lpae_is_valid(*entry)) )
- rc = iommu_iotlb_flush(p2m->domain, _dfn(gfn_x(sgfn)),
- 1UL << page_order);
+ rc = iommu_flush(p2m->domain, _dfn(gfn_x(sgfn)),
+ 1UL << page_order);
else
rc = 0;
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 28a003063e..746f0b0258 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2801,12 +2801,13 @@ static int _get_page_type(struct page_info *page,
unsigned long type,
mfn_t mfn = page_to_mfn(page);
if ( (x & PGT_type_mask) == PGT_writable_page )
- iommu_ret = iommu_unmap(d, _dfn(mfn_x(mfn)),
- PAGE_ORDER_4K);
+ iommu_ret = iommu_legacy_unmap(d, _dfn(mfn_x(mfn)),
+ PAGE_ORDER_4K);
else if ( type == PGT_writable_page )
- iommu_ret = iommu_map(d, _dfn(mfn_x(mfn)), mfn,
- PAGE_ORDER_4K,
- IOMMUF_readable | IOMMUF_writable);
+ iommu_ret = iommu_legacy_map(d, _dfn(mfn_x(mfn)), mfn,
+ PAGE_ORDER_4K,
+ IOMMUF_readable |
+ IOMMUF_writable);
}
}
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index 6e4e375bad..64a49c07b7 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -882,8 +882,8 @@ out:
rc = iommu_pte_flush(d, gfn, &ept_entry->epte, order,
vtd_pte_present);
else if ( need_iommu_pt_sync(d) )
rc = iommu_flags ?
- iommu_map(d, _dfn(gfn), mfn, order, iommu_flags) :
- iommu_unmap(d, _dfn(gfn), order);
+ iommu_legacy_map(d, _dfn(gfn), mfn, order, iommu_flags) :
+ iommu_legacy_unmap(d, _dfn(gfn), order);
}
unmap_domain_page(table);
diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
index 17a6b61f12..69ffb08179 100644
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -686,8 +686,9 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t
mfn,
if ( need_iommu_pt_sync(p2m->domain) )
rc = iommu_pte_flags ?
- iommu_map(d, _dfn(gfn), mfn, page_order, iommu_pte_flags) :
- iommu_unmap(d, _dfn(gfn), page_order);
+ iommu_legacy_map(d, _dfn(gfn), mfn, page_order,
+ iommu_pte_flags) :
+ iommu_legacy_unmap(d, _dfn(gfn), page_order);
else if ( iommu_use_hap_pt(d) && iommu_old_flags )
amd_iommu_flush_pages(p2m->domain, gfn, page_order);
}
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index fea4497910..ed76e96d33 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -733,7 +733,7 @@ p2m_remove_page(struct p2m_domain *p2m, unsigned long
gfn_l, unsigned long mfn,
if ( !paging_mode_translate(p2m->domain) )
return need_iommu_pt_sync(p2m->domain) ?
- iommu_unmap(p2m->domain, _dfn(mfn), page_order) : 0;
+ iommu_legacy_unmap(p2m->domain, _dfn(mfn), page_order) : 0;
ASSERT(gfn_locked_by_me(p2m, gfn));
P2M_DEBUG("removing gfn=%#lx mfn=%#lx\n", gfn_l, mfn);
@@ -780,8 +780,8 @@ guest_physmap_add_entry(struct domain *d, gfn_t gfn, mfn_t
mfn,
if ( !paging_mode_translate(d) )
return (need_iommu_pt_sync(d) && t == p2m_ram_rw) ?
- iommu_map(d, _dfn(mfn_x(mfn)), mfn, page_order,
- IOMMUF_readable | IOMMUF_writable) : 0;
+ iommu_legacy_map(d, _dfn(mfn_x(mfn)), mfn, page_order,
+ IOMMUF_readable | IOMMUF_writable) : 0;
/* foreign pages are added thru p2m_add_foreign */
if ( p2m_is_foreign(t) )
@@ -1151,8 +1151,8 @@ int set_identity_p2m_entry(struct domain *d, unsigned
long gfn_l,
{
if ( !need_iommu_pt_sync(d) )
return 0;
- return iommu_map(d, _dfn(gfn_l), _mfn(gfn_l), PAGE_ORDER_4K,
- IOMMUF_readable | IOMMUF_writable);
+ return iommu_legacy_map(d, _dfn(gfn_l), _mfn(gfn_l), PAGE_ORDER_4K,
+ IOMMUF_readable | IOMMUF_writable);
}
gfn_lock(p2m, gfn, 0);
@@ -1242,7 +1242,7 @@ int clear_identity_p2m_entry(struct domain *d, unsigned
long gfn_l)
{
if ( !need_iommu_pt_sync(d) )
return 0;
- return iommu_unmap(d, _dfn(gfn_l), PAGE_ORDER_4K);
+ return iommu_legacy_unmap(d, _dfn(gfn_l), PAGE_ORDER_4K);
}
gfn_lock(p2m, gfn, 0);
diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
index 11977f2671..8056679de0 100644
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -1436,15 +1436,16 @@ int memory_add(unsigned long spfn, unsigned long epfn,
unsigned int pxm)
!need_iommu_pt_sync(hardware_domain) )
{
for ( i = spfn; i < epfn; i++ )
- if ( iommu_map(hardware_domain, _dfn(i), _mfn(i),
- PAGE_ORDER_4K,
- IOMMUF_readable | IOMMUF_writable) )
+ if ( iommu_legacy_map(hardware_domain, _dfn(i), _mfn(i),
+ PAGE_ORDER_4K,
+ IOMMUF_readable | IOMMUF_writable) )
break;
if ( i != epfn )
{
while (i-- > old_max)
/* If statement to satisfy __must_check. */
- if ( iommu_unmap(hardware_domain, _dfn(i), PAGE_ORDER_4K) )
+ if ( iommu_legacy_unmap(hardware_domain, _dfn(i),
+ PAGE_ORDER_4K) )
continue;
goto destroy_m2p;
diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
index b67ae9e3f5..fd099a8f25 100644
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -1134,14 +1134,14 @@ map_grant_ref(
!(old_pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) )
{
if ( !(kind & MAPKIND_WRITE) )
- err = iommu_map(ld, _dfn(mfn_x(mfn)), mfn, 0,
- IOMMUF_readable | IOMMUF_writable);
+ err = iommu_legacy_map(ld, _dfn(mfn_x(mfn)), mfn, 0,
+ IOMMUF_readable | IOMMUF_writable);
}
else if ( act_pin && !old_pin )
{
if ( !kind )
- err = iommu_map(ld, _dfn(mfn_x(mfn)), mfn, 0,
- IOMMUF_readable);
+ err = iommu_legacy_map(ld, _dfn(mfn_x(mfn)), mfn, 0,
+ IOMMUF_readable);
}
if ( err )
{
@@ -1389,10 +1389,10 @@ unmap_common(
kind = mapkind(lgt, rd, op->mfn);
if ( !kind )
- err = iommu_unmap(ld, _dfn(mfn_x(op->mfn)), 0);
+ err = iommu_legacy_unmap(ld, _dfn(mfn_x(op->mfn)), 0);
else if ( !(kind & MAPKIND_WRITE) )
- err = iommu_map(ld, _dfn(mfn_x(op->mfn)), op->mfn, 0,
- IOMMUF_readable);
+ err = iommu_legacy_map(ld, _dfn(mfn_x(op->mfn)), op->mfn, 0,
+ IOMMUF_readable);
double_gt_unlock(lgt, rgt);
diff --git a/xen/common/memory.c b/xen/common/memory.c
index 175bd62c11..7b668077d8 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -865,11 +865,11 @@ int xenmem_add_to_physmap(struct domain *d, struct
xen_add_to_physmap *xatp,
this_cpu(iommu_dont_flush_iotlb) = 0;
- ret = iommu_iotlb_flush(d, _dfn(xatp->idx - done), done);
+ ret = iommu_flush(d, _dfn(xatp->idx - done), done);
if ( unlikely(ret) && rc >= 0 )
rc = ret;
- ret = iommu_iotlb_flush(d, _dfn(xatp->gpfn - done), done);
+ ret = iommu_flush(d, _dfn(xatp->gpfn - done), done);
if ( unlikely(ret) && rc >= 0 )
rc = ret;
}
diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c
index c1cce08551..6d231bec94 100644
--- a/xen/drivers/passthrough/iommu.c
+++ b/xen/drivers/passthrough/iommu.c
@@ -226,8 +226,8 @@ void __hwdom_init iommu_hwdom_init(struct domain *d)
== PGT_writable_page) )
mapping |= IOMMUF_writable;
- ret = hd->platform_ops->map_page(d, _dfn(dfn), _mfn(mfn),
- mapping);
+ ret = iommu_legacy_map(d, _dfn(dfn), _mfn(mfn), mapping,
+ PAGE_ORDER_4K);
if ( !rc )
rc = ret;
@@ -304,8 +304,8 @@ void iommu_domain_destroy(struct domain *d)
arch_iommu_domain_destroy(d);
}
-int iommu_map(struct domain *d, dfn_t dfn, mfn_t mfn,
- unsigned int page_order, unsigned int flags)
+int iommu_legacy_map(struct domain *d, dfn_t dfn, mfn_t mfn,
+ unsigned int page_order, unsigned int flags)
{
const struct domain_iommu *hd = dom_iommu(d);
unsigned long i;
@@ -345,7 +345,7 @@ int iommu_map(struct domain *d, dfn_t dfn, mfn_t mfn,
return rc;
}
-int iommu_unmap(struct domain *d, dfn_t dfn, unsigned int page_order)
+int iommu_legacy_unmap(struct domain *d, dfn_t dfn, unsigned int page_order)
{
const struct domain_iommu *hd = dom_iommu(d);
unsigned long i;
@@ -409,7 +409,7 @@ static void iommu_free_pagetables(unsigned long unused)
cpumask_cycle(smp_processor_id(),
&cpu_online_map));
}
-int iommu_iotlb_flush(struct domain *d, dfn_t dfn, unsigned int page_count)
+int iommu_flush(struct domain *d, dfn_t dfn, unsigned int page_count)
{
const struct domain_iommu *hd = dom_iommu(d);
int rc;
@@ -436,7 +436,7 @@ int iommu_iotlb_flush(struct domain *d, dfn_t dfn, unsigned
int page_count)
return rc;
}
-int iommu_iotlb_flush_all(struct domain *d)
+int iommu_flush_all(struct domain *d)
{
const struct domain_iommu *hd = dom_iommu(d);
int rc;
diff --git a/xen/drivers/passthrough/vtd/iommu.c
b/xen/drivers/passthrough/vtd/iommu.c
index d2fa5e2b25..8727e242e2 100644
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -546,7 +546,7 @@ static int __must_check iommu_flush_iotlb_psi(struct iommu
*iommu, u16 did,
return status;
}
-static int __must_check iommu_flush_all(void)
+static int __must_check flush_all_iommus(void)
{
struct acpi_drhd_unit *drhd;
struct iommu *iommu;
@@ -1310,7 +1310,7 @@ static void __hwdom_init intel_iommu_hwdom_init(struct
domain *d)
/* Make sure workarounds are applied before enabling the IOMMU(s). */
arch_iommu_hwdom_init(d);
- if ( iommu_flush_all() )
+ if ( flush_all_iommus() )
printk(XENLOG_WARNING VTDPREFIX
" IOMMU flush all failed for hardware domain\n");
@@ -2250,7 +2250,7 @@ static int __must_check init_vtd_hw(void)
}
}
- return iommu_flush_all();
+ return flush_all_iommus();
}
static void __hwdom_init setup_hwdom_rmrr(struct domain *d)
@@ -2554,7 +2554,7 @@ static int __must_check vtd_suspend(void)
if ( !iommu_enabled )
return 0;
- rc = iommu_flush_all();
+ rc = flush_all_iommus();
if ( unlikely(rc) )
{
printk(XENLOG_WARNING VTDPREFIX
@@ -2602,7 +2602,7 @@ static void vtd_crash_shutdown(void)
if ( !iommu_enabled )
return;
- if ( iommu_flush_all() )
+ if ( flush_all_iommus() )
printk(XENLOG_WARNING VTDPREFIX
" crash shutdown: IOMMU flush all failed\n");
diff --git a/xen/drivers/passthrough/x86/iommu.c
b/xen/drivers/passthrough/x86/iommu.c
index c68a72279d..c1f3e2442e 100644
--- a/xen/drivers/passthrough/x86/iommu.c
+++ b/xen/drivers/passthrough/x86/iommu.c
@@ -46,7 +46,6 @@ int __init iommu_setup_hpet_msi(struct msi_desc *msi)
int arch_iommu_populate_page_table(struct domain *d)
{
- const struct domain_iommu *hd = dom_iommu(d);
struct page_info *page;
int rc = 0, n = 0;
@@ -68,9 +67,9 @@ int arch_iommu_populate_page_table(struct domain *d)
{
ASSERT(!(gfn >> DEFAULT_DOMAIN_ADDRESS_WIDTH));
BUG_ON(SHARED_M2P(gfn));
- rc = hd->platform_ops->map_page(d, _dfn(gfn), _mfn(mfn),
- IOMMUF_readable |
- IOMMUF_writable);
+ rc = iommu_legacy_map(d, _dfn(gfn), _mfn(mfn),
+ PAGE_ORDER_4K, IOMMUF_readable |
+ IOMMUF_writable);
}
if ( rc )
{
@@ -107,7 +106,7 @@ int arch_iommu_populate_page_table(struct domain *d)
this_cpu(iommu_dont_flush_iotlb) = 0;
if ( !rc )
- rc = iommu_iotlb_flush_all(d);
+ rc = iommu_flush_all(d);
if ( rc && rc != -ERESTART )
iommu_teardown(d);
@@ -241,8 +240,8 @@ void __hwdom_init arch_iommu_hwdom_init(struct domain *d)
if ( paging_mode_translate(d) )
rc = set_identity_p2m_entry(d, pfn, p2m_access_rw, 0);
else
- rc = iommu_map(d, _dfn(pfn), _mfn(pfn), PAGE_ORDER_4K,
- IOMMUF_readable | IOMMUF_writable);
+ rc = iommu_legacy_map(d, _dfn(pfn), _mfn(pfn), PAGE_ORDER_4K,
+ IOMMUF_readable | IOMMUF_writable);
if ( rc )
printk(XENLOG_WARNING " d%d: IOMMU mapping failed: %d\n",
d->domain_id, rc);
diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h
index da8294bac8..6773d605a9 100644
--- a/xen/include/xen/iommu.h
+++ b/xen/include/xen/iommu.h
@@ -88,17 +88,22 @@ int iommu_construct(struct domain *d);
/* Function used internally, use iommu_domain_destroy */
void iommu_teardown(struct domain *d);
-/* iommu_map_page() takes flags to direct the mapping operation. */
#define _IOMMUF_readable 0
#define IOMMUF_readable (1u<<_IOMMUF_readable)
#define _IOMMUF_writable 1
#define IOMMUF_writable (1u<<_IOMMUF_writable)
-int __must_check iommu_map(struct domain *d, dfn_t dfn, mfn_t mfn,
- unsigned int page_order, unsigned int flags);
-int __must_check iommu_unmap(struct domain *d, dfn_t dfn,
- unsigned int page_order);
+
+int __must_check iommu_legacy_map(struct domain *d, dfn_t dfn, mfn_t mfn,
+ unsigned int page_order,
+ unsigned int flags);
+int __must_check iommu_legacy_unmap(struct domain *d, dfn_t dfn,
+ unsigned int page_order);
+
int __must_check iommu_lookup_page(struct domain *d, dfn_t dfn, mfn_t *mfn,
unsigned int *flags);
+int __must_check iommu_flush(struct domain *d, dfn_t dfn,
+ unsigned int page_count);
+int __must_check iommu_flush_all(struct domain *d);
enum iommu_feature
{
@@ -252,10 +257,6 @@ int iommu_do_pci_domctl(struct xen_domctl *, struct domain
*d,
int iommu_do_domctl(struct xen_domctl *, struct domain *d,
XEN_GUEST_HANDLE_PARAM(xen_domctl_t));
-int __must_check iommu_iotlb_flush(struct domain *d, dfn_t dfn,
- unsigned int page_count);
-int __must_check iommu_iotlb_flush_all(struct domain *d);
-
void iommu_dev_iotlb_flush_timeout(struct domain *d, struct pci_dev *pdev);
/*
--
2.11.0
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |