|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 2/7] iommu: make use of type-safe BFN and MFN in exported functions
This patch modifies the declaration of the entry points to the IOMMU
sub-system to use bfn_t and mfn_t in place of unsigned long. A subsequent
patch will similarly modify the methods in the iommu_ops structure.
NOTE: Since (with this patch applied) bfn_t is now in use, the patch also
introduces the 'cscope/grep fodder' to allow the type declaration to
be easily found.
Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
---
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Cc: George Dunlap <George.Dunlap@xxxxxxxxxxxxx>
Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
Cc: Stefano Stabellini <sstabellini@xxxxxxxxxx>
Cc: Julien Grall <julien.grall@xxxxxxx>
Cc: Tim Deegan <tim@xxxxxxx>
Cc: Wei Liu <wei.liu2@xxxxxxxxxx>
Cc: Jun Nakajima <jun.nakajima@xxxxxxxxx>
Cc: Kevin Tian <kevin.tian@xxxxxxxxx>
Cc: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
---
xen/arch/arm/p2m.c | 3 ++-
xen/arch/x86/mm.c | 7 +++----
xen/arch/x86/mm/p2m-ept.c | 8 +++++---
xen/arch/x86/mm/p2m-pt.c | 8 ++++----
xen/arch/x86/mm/p2m.c | 15 +++++++++------
xen/arch/x86/x86_64/mm.c | 5 +++--
xen/common/grant_table.c | 10 ++++++----
xen/common/memory.c | 4 ++--
xen/drivers/passthrough/iommu.c | 25 ++++++++++++-------------
xen/drivers/passthrough/vtd/x86/vtd.c | 3 ++-
xen/include/xen/iommu.h | 23 +++++++++++++++++++----
11 files changed, 67 insertions(+), 44 deletions(-)
diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index 65e8b9c6ea..25e9af6b05 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -957,7 +957,8 @@ static int __p2m_set_entry(struct p2m_domain *p2m,
if ( need_iommu(p2m->domain) &&
(lpae_valid(orig_pte) || lpae_valid(*entry)) )
- rc = iommu_iotlb_flush(p2m->domain, gfn_x(sgfn), 1UL << page_order);
+ rc = iommu_iotlb_flush(p2m->domain, _bfn(gfn_x(sgfn)),
+ 1UL << page_order);
else
rc = 0;
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 35f204369b..69ce57914b 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2676,13 +2676,12 @@ static int _get_page_type(struct page_info *page,
unsigned long type,
struct domain *d = page_get_owner(page);
if ( d && is_pv_domain(d) && unlikely(need_iommu(d)) )
{
- gfn_t gfn = _gfn(mfn_to_gmfn(d, mfn_x(page_to_mfn(page))));
+ bfn_t bfn = _bfn(mfn_to_gmfn(d, mfn_x(page_to_mfn(page))));
if ( (x & PGT_type_mask) == PGT_writable_page )
- iommu_ret = iommu_unmap_page(d, gfn_x(gfn));
+ iommu_ret = iommu_unmap_page(d, bfn);
else if ( type == PGT_writable_page )
- iommu_ret = iommu_map_page(d, gfn_x(gfn),
- mfn_x(page_to_mfn(page)),
+ iommu_ret = iommu_map_page(d, bfn, page_to_mfn(page),
IOMMUF_readable|IOMMUF_writable);
}
}
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index 66dbb3e83a..e1ebd25e57 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -873,12 +873,14 @@ out:
if ( iommu_flags )
for ( i = 0; i < (1 << order); i++ )
{
- rc = iommu_map_page(d, gfn + i, mfn_x(mfn) + i,
iommu_flags);
+ rc = iommu_map_page(d, _bfn(gfn + i), mfn_add(mfn, i),
+ iommu_flags);
if ( unlikely(rc) )
{
while ( i-- )
/* If statement to satisfy __must_check. */
- if ( iommu_unmap_page(p2m->domain, gfn + i) )
+ if ( iommu_unmap_page(p2m->domain,
+ _bfn(gfn + i)) )
continue;
break;
@@ -887,7 +889,7 @@ out:
else
for ( i = 0; i < (1 << order); i++ )
{
- ret = iommu_unmap_page(d, gfn + i);
+ ret = iommu_unmap_page(d, _bfn(gfn + i));
if ( !rc )
rc = ret;
}
diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
index ad6f9ef10d..0e6392a959 100644
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -696,13 +696,13 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_,
mfn_t mfn,
else if ( iommu_pte_flags )
for ( i = 0; i < (1UL << page_order); i++ )
{
- rc = iommu_map_page(p2m->domain, gfn + i, mfn_x(mfn) + i,
- iommu_pte_flags);
+ rc = iommu_map_page(p2m->domain, _bfn(gfn + i),
+ mfn_add(mfn, i), iommu_pte_flags);
if ( unlikely(rc) )
{
while ( i-- )
/* If statement to satisfy __must_check. */
- if ( iommu_unmap_page(p2m->domain, gfn + i) )
+ if ( iommu_unmap_page(p2m->domain, _bfn(gfn + i)) )
continue;
break;
@@ -711,7 +711,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t
mfn,
else
for ( i = 0; i < (1UL << page_order); i++ )
{
- int ret = iommu_unmap_page(p2m->domain, gfn + i);
+ int ret = iommu_unmap_page(p2m->domain, _bfn(gfn + i));
if ( !rc )
rc = ret;
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index dccd1425b4..115956bcec 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -722,7 +722,7 @@ p2m_remove_page(struct p2m_domain *p2m, unsigned long
gfn_l, unsigned long mfn,
{
for ( i = 0; i < (1 << page_order); i++ )
{
- int ret = iommu_unmap_page(p2m->domain, mfn + i);
+ int ret = iommu_unmap_page(p2m->domain, _bfn(mfn + i));
if ( !rc )
rc = ret;
@@ -781,14 +781,14 @@ guest_physmap_add_entry(struct domain *d, gfn_t gfn,
mfn_t mfn,
{
for ( i = 0; i < (1 << page_order); i++ )
{
- rc = iommu_map_page(d, mfn_x(mfn_add(mfn, i)),
- mfn_x(mfn_add(mfn, i)),
+ rc = iommu_map_page(d, _bfn(mfn_x(mfn) + i),
+ mfn_add(mfn, i),
IOMMUF_readable|IOMMUF_writable);
if ( rc != 0 )
{
while ( i-- > 0 )
/* If statement to satisfy __must_check. */
- if ( iommu_unmap_page(d, mfn_x(mfn_add(mfn, i))) )
+ if ( iommu_unmap_page(d, _bfn(mfn_x(mfn) + i)) )
continue;
return rc;
@@ -1164,7 +1164,9 @@ int set_identity_p2m_entry(struct domain *d, unsigned
long gfn_l,
{
if ( !need_iommu(d) )
return 0;
- return iommu_map_page(d, gfn_l, gfn_l,
IOMMUF_readable|IOMMUF_writable);
+
+ return iommu_map_page(d, _bfn(gfn_l), _mfn(gfn_l),
+ IOMMUF_readable|IOMMUF_writable);
}
gfn_lock(p2m, gfn, 0);
@@ -1254,7 +1256,8 @@ int clear_identity_p2m_entry(struct domain *d, unsigned
long gfn_l)
{
if ( !need_iommu(d) )
return 0;
- return iommu_unmap_page(d, gfn_l);
+
+ return iommu_unmap_page(d, _bfn(gfn_l));
}
gfn_lock(p2m, gfn, 0);
diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
index 9b37da6698..5af3164b8d 100644
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -1428,13 +1428,14 @@ int memory_add(unsigned long spfn, unsigned long epfn,
unsigned int pxm)
if ( iommu_enabled && !iommu_passthrough && !need_iommu(hardware_domain) )
{
for ( i = spfn; i < epfn; i++ )
- if ( iommu_map_page(hardware_domain, i, i,
IOMMUF_readable|IOMMUF_writable) )
+ if ( iommu_map_page(hardware_domain, _bfn(i), _mfn(i),
+ IOMMUF_readable|IOMMUF_writable) )
break;
if ( i != epfn )
{
while (i-- > old_max)
/* If statement to satisfy __must_check. */
- if ( iommu_unmap_page(hardware_domain, i) )
+ if ( iommu_unmap_page(hardware_domain, _bfn(i)) )
continue;
goto destroy_m2p;
diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
index 48c547930c..97dc371f4b 100644
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -1108,13 +1108,14 @@ map_grant_ref(
!(old_pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) )
{
if ( !(kind & MAPKIND_WRITE) )
- err = iommu_map_page(ld, frame, frame,
+ err = iommu_map_page(ld, _bfn(frame), _mfn(frame),
IOMMUF_readable|IOMMUF_writable);
}
else if ( act_pin && !old_pin )
{
if ( !kind )
- err = iommu_map_page(ld, frame, frame, IOMMUF_readable);
+ err = iommu_map_page(ld, _bfn(frame), _mfn(frame),
+ IOMMUF_readable);
}
if ( err )
{
@@ -1376,9 +1377,10 @@ unmap_common(
kind = mapkind(lgt, rd, op->frame);
if ( !kind )
- err = iommu_unmap_page(ld, op->frame);
+ err = iommu_unmap_page(ld, _bfn(op->frame));
else if ( !(kind & MAPKIND_WRITE) )
- err = iommu_map_page(ld, op->frame, op->frame, IOMMUF_readable);
+ err = iommu_map_page(ld, _bfn(op->frame), _mfn(op->frame),
+ IOMMUF_readable);
double_gt_unlock(lgt, rgt);
diff --git a/xen/common/memory.c b/xen/common/memory.c
index 59d23a2a98..5f9152a817 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -823,11 +823,11 @@ int xenmem_add_to_physmap(struct domain *d, struct
xen_add_to_physmap *xatp,
this_cpu(iommu_dont_flush_iotlb) = 0;
- ret = iommu_iotlb_flush(d, xatp->idx - done, done);
+ ret = iommu_iotlb_flush(d, _bfn(xatp->idx - done), done);
if ( unlikely(ret) && rc >= 0 )
rc = ret;
- ret = iommu_iotlb_flush(d, xatp->gpfn - done, done);
+ ret = iommu_iotlb_flush(d, _bfn(xatp->gpfn - done), done);
if ( unlikely(ret) && rc >= 0 )
rc = ret;
}
diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c
index df7c22f39c..b25d9e3707 100644
--- a/xen/drivers/passthrough/iommu.c
+++ b/xen/drivers/passthrough/iommu.c
@@ -255,7 +255,7 @@ void iommu_domain_destroy(struct domain *d)
arch_iommu_domain_destroy(d);
}
-int iommu_map_page(struct domain *d, unsigned long bfn, unsigned long mfn,
+int iommu_map_page(struct domain *d, bfn_t bfn, mfn_t mfn,
unsigned int flags)
{
const struct domain_iommu *hd = dom_iommu(d);
@@ -264,13 +264,13 @@ int iommu_map_page(struct domain *d, unsigned long bfn,
unsigned long mfn,
if ( !iommu_enabled || !hd->platform_ops )
return 0;
- rc = hd->platform_ops->map_page(d, bfn, mfn, flags);
+ rc = hd->platform_ops->map_page(d, bfn_x(bfn), mfn_x(mfn), flags);
if ( unlikely(rc) )
{
if ( !d->is_shutting_down && printk_ratelimit() )
printk(XENLOG_ERR
- "d%d: IOMMU mapping bfn %#lx to mfn %#lx failed: %d\n",
- d->domain_id, bfn, mfn, rc);
+ "d%d: IOMMU mapping bfn %"PRI_bfn" to mfn %"PRI_mfn"
failed: %d\n",
+ d->domain_id, bfn_x(bfn), mfn_x(mfn), rc);
if ( !is_hardware_domain(d) )
domain_crash(d);
@@ -279,7 +279,7 @@ int iommu_map_page(struct domain *d, unsigned long bfn,
unsigned long mfn,
return rc;
}
-int iommu_unmap_page(struct domain *d, unsigned long bfn)
+int iommu_unmap_page(struct domain *d, bfn_t bfn)
{
const struct domain_iommu *hd = dom_iommu(d);
int rc;
@@ -287,13 +287,13 @@ int iommu_unmap_page(struct domain *d, unsigned long bfn)
if ( !iommu_enabled || !hd->platform_ops )
return 0;
- rc = hd->platform_ops->unmap_page(d, bfn);
+ rc = hd->platform_ops->unmap_page(d, bfn_x(bfn));
if ( unlikely(rc) )
{
if ( !d->is_shutting_down && printk_ratelimit() )
printk(XENLOG_ERR
- "d%d: IOMMU unmapping bfn %#lx failed: %d\n",
- d->domain_id, bfn, rc);
+ "d%d: IOMMU unmapping bfn %"PRI_bfn" failed: %d\n",
+ d->domain_id, bfn_x(bfn), rc);
if ( !is_hardware_domain(d) )
domain_crash(d);
@@ -319,8 +319,7 @@ static void iommu_free_pagetables(unsigned long unused)
cpumask_cycle(smp_processor_id(),
&cpu_online_map));
}
-int iommu_iotlb_flush(struct domain *d, unsigned long bfn,
- unsigned int page_count)
+int iommu_iotlb_flush(struct domain *d, bfn_t bfn, unsigned int page_count)
{
const struct domain_iommu *hd = dom_iommu(d);
int rc;
@@ -328,13 +327,13 @@ int iommu_iotlb_flush(struct domain *d, unsigned long bfn,
if ( !iommu_enabled || !hd->platform_ops || !hd->platform_ops->iotlb_flush
)
return 0;
- rc = hd->platform_ops->iotlb_flush(d, bfn, page_count);
+ rc = hd->platform_ops->iotlb_flush(d, bfn_x(bfn), page_count);
if ( unlikely(rc) )
{
if ( !d->is_shutting_down && printk_ratelimit() )
printk(XENLOG_ERR
- "d%d: IOMMU IOTLB flush failed: %d, bfn %#lx, page count
%u\n",
- d->domain_id, rc, bfn, page_count);
+ "d%d: IOMMU IOTLB flush failed: %d, bfn %"PRI_bfn", page
count %u\n",
+ d->domain_id, rc, bfn_x(bfn), page_count);
if ( !is_hardware_domain(d) )
domain_crash(d);
diff --git a/xen/drivers/passthrough/vtd/x86/vtd.c
b/xen/drivers/passthrough/vtd/x86/vtd.c
index 88a60b3307..16f900f451 100644
--- a/xen/drivers/passthrough/vtd/x86/vtd.c
+++ b/xen/drivers/passthrough/vtd/x86/vtd.c
@@ -143,7 +143,8 @@ void __hwdom_init vtd_set_hwdom_mapping(struct domain *d)
tmp = 1 << (PAGE_SHIFT - PAGE_SHIFT_4K);
for ( j = 0; j < tmp; j++ )
{
- int ret = iommu_map_page(d, pfn * tmp + j, pfn * tmp + j,
+ int ret = iommu_map_page(d, _bfn(pfn * tmp + j),
+ _mfn(pfn * tmp + j),
IOMMUF_readable|IOMMUF_writable);
if ( !rc )
diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h
index de1c581cdd..3d19918301 100644
--- a/xen/include/xen/iommu.h
+++ b/xen/include/xen/iommu.h
@@ -24,14 +24,29 @@
#include <xen/spinlock.h>
#include <xen/pci.h>
#include <xen/typesafe.h>
+#include <xen/mm.h>
#include <public/hvm/ioreq.h>
#include <public/domctl.h>
#include <asm/device.h>
#include <asm/iommu.h>
TYPE_SAFE(unsigned long, bfn);
+#define PRI_bfn "05lx"
#define INVALID_BFN _bfn(~0UL)
+/*
+ * The definitions below are purely for the benefit of grep/cscope. The
+ * real definitions come from the TYPE_SAFE macro above.
+ */
+#ifndef bfn_t
+#define bfn_t
+#define _bfn
+#define bfn_x
+#undef bfn_t
+#undef _bfn
+#undef bfn_x
+#endif
+
extern bool_t iommu_enable, iommu_enabled;
extern bool_t force_iommu, iommu_verbose;
extern bool_t iommu_workaround_bios_bug, iommu_igfx, iommu_passthrough;
@@ -64,9 +79,9 @@ void iommu_teardown(struct domain *d);
#define IOMMUF_readable (1u<<_IOMMUF_readable)
#define _IOMMUF_writable 1
#define IOMMUF_writable (1u<<_IOMMUF_writable)
-int __must_check iommu_map_page(struct domain *d, unsigned long bfn,
- unsigned long mfn, unsigned int flags);
-int __must_check iommu_unmap_page(struct domain *d, unsigned long bfn);
+int __must_check iommu_map_page(struct domain *d, bfn_t bfn,
+ mfn_t mfn, unsigned int flags);
+int __must_check iommu_unmap_page(struct domain *d, bfn_t bfn);
enum iommu_feature
{
@@ -191,7 +206,7 @@ int iommu_do_pci_domctl(struct xen_domctl *, struct domain
*d,
int iommu_do_domctl(struct xen_domctl *, struct domain *d,
XEN_GUEST_HANDLE_PARAM(xen_domctl_t));
-int __must_check iommu_iotlb_flush(struct domain *d, unsigned long bfn,
+int __must_check iommu_iotlb_flush(struct domain *d, bfn_t bfn,
unsigned int page_count);
int __must_check iommu_iotlb_flush_all(struct domain *d);
--
2.11.0
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |