[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] iommu / p2m: add a page_order parameter to iommu_map/unmap_page()



The P2M code currently contains many loops to deal with the fact that,
while it may be require to handle page orders greater than 4k, the
IOMMU map and unmap functions do not.
This patch adds a page_order parameter to those functions and implements
the necessary loops within. This allows the P2M code to be sunstantially
simplified.

NOTE: This patch does not modify the underlying vendor IOMMU
      implementations to deal with page orders of more than 4k.

Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
---
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Cc: Wei Liu <wei.liu2@xxxxxxxxxx>
Cc: George Dunlap <George.Dunlap@xxxxxxxxxx>
Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
Cc: Julien Grall <julien.grall@xxxxxxx>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
Cc: Stefano Stabellini <sstabellini@xxxxxxxxxx>
Cc: Tim Deegan <tim@xxxxxxx>
Cc: Jun Nakajima <jun.nakajima@xxxxxxxxx>
Cc: Kevin Tian <kevin.tian@xxxxxxxxx>
---
 xen/arch/x86/mm.c                   |  4 ++-
 xen/arch/x86/mm/p2m-ept.c           | 30 ++---------------
 xen/arch/x86/mm/p2m-pt.c            | 47 ++++++---------------------
 xen/arch/x86/mm/p2m.c               | 49 ++++------------------------
 xen/arch/x86/x86_64/mm.c            |  4 ++-
 xen/common/grant_table.c            |  7 ++--
 xen/drivers/passthrough/iommu.c     | 65 ++++++++++++++++++++++++-------------
 xen/drivers/passthrough/x86/iommu.c |  2 +-
 xen/include/xen/iommu.h             |  8 +++--
 9 files changed, 80 insertions(+), 136 deletions(-)

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index c53bc86a68..f0ae016e7a 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2794,9 +2794,11 @@ static int _get_page_type(struct page_info *page, 
unsigned long type,
             mfn_t mfn = page_to_mfn(page);
 
             if ( (x & PGT_type_mask) == PGT_writable_page )
-                iommu_ret = iommu_unmap_page(d, _dfn(mfn_x(mfn)));
+                iommu_ret = iommu_unmap_page(d, _dfn(mfn_x(mfn)),
+                                             PAGE_ORDER_4K);
             else if ( type == PGT_writable_page )
                 iommu_ret = iommu_map_page(d, _dfn(mfn_x(mfn)), mfn,
+                                           PAGE_ORDER_4K,
                                            IOMMUF_readable |
                                            IOMMUF_writable);
         }
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index 407e299e50..656c8dd3ac 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -880,33 +880,9 @@ out:
         if ( iommu_use_hap_pt(d) )
             rc = iommu_pte_flush(d, gfn, &ept_entry->epte, order, 
vtd_pte_present);
         else if ( need_iommu_pt_sync(d) )
-        {
-            dfn_t dfn = _dfn(gfn);
-
-            if ( iommu_flags )
-                for ( i = 0; i < (1 << order); i++ )
-                {
-                    rc = iommu_map_page(d, dfn_add(dfn, i),
-                                        mfn_add(mfn, i), iommu_flags);
-                    if ( unlikely(rc) )
-                    {
-                        while ( i-- )
-                            /* If statement to satisfy __must_check. */
-                            if ( iommu_unmap_page(p2m->domain,
-                                                  dfn_add(dfn, i)) )
-                                continue;
-
-                        break;
-                    }
-                }
-            else
-                for ( i = 0; i < (1 << order); i++ )
-                {
-                    ret = iommu_unmap_page(d, dfn_add(dfn, i));
-                    if ( !rc )
-                        rc = ret;
-                }
-        }
+            rc = iommu_flags ?
+                iommu_map_page(d, _dfn(gfn), mfn, order, iommu_flags) :
+                iommu_unmap_page(d, _dfn(gfn), order);
     }
 
     unmap_domain_page(table);
diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
index 55df18501e..b264a97bd8 100644
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -477,10 +477,11 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, 
mfn_t mfn,
                  unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma,
                  int sve)
 {
+    struct domain *d = p2m->domain;
     /* XXX -- this might be able to be faster iff current->domain == d */
     void *table;
     unsigned long gfn = gfn_x(gfn_);
-    unsigned long i, gfn_remainder = gfn;
+    unsigned long gfn_remainder = gfn;
     l1_pgentry_t *p2m_entry, entry_content;
     /* Intermediate table to free if we're replacing it with a superpage. */
     l1_pgentry_t intermediate_entry = l1e_empty();
@@ -515,7 +516,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t 
mfn,
         t.gfn = gfn;
         t.mfn = mfn_x(mfn);
         t.p2mt = p2mt;
-        t.d = p2m->domain->domain_id;
+        t.d = d->domain_id;
         t.order = page_order;
 
         __trace_var(TRC_MEM_SET_P2M_ENTRY, 0, sizeof(t), &t);
@@ -683,41 +684,13 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, 
mfn_t mfn,
     {
         ASSERT(rc == 0);
 
-        if ( iommu_use_hap_pt(p2m->domain) )
-        {
-            if ( iommu_old_flags )
-                amd_iommu_flush_pages(p2m->domain, gfn, page_order);
-        }
-        else if ( need_iommu_pt_sync(p2m->domain) )
-        {
-            dfn_t dfn = _dfn(gfn);
-
-            if ( iommu_pte_flags )
-                for ( i = 0; i < (1UL << page_order); i++ )
-                {
-                    rc = iommu_map_page(p2m->domain, dfn_add(dfn, i),
-                                        mfn_add(mfn, i), iommu_pte_flags);
-                    if ( unlikely(rc) )
-                    {
-                        while ( i-- )
-                            /* If statement to satisfy __must_check. */
-                            if ( iommu_unmap_page(p2m->domain,
-                                                  dfn_add(dfn, i)) )
-                                continue;
-
-                        break;
-                    }
-                }
-            else
-                for ( i = 0; i < (1UL << page_order); i++ )
-                {
-                    int ret = iommu_unmap_page(p2m->domain,
-                                               dfn_add(dfn, i));
-
-                    if ( !rc )
-                        rc = ret;
-                }
-        }
+        if ( need_iommu_pt_sync(p2m->domain) )
+            rc = iommu_pte_flags ?
+                iommu_map_page(d, _dfn(gfn), mfn, page_order,
+                               iommu_pte_flags) :
+                iommu_unmap_page(d, _dfn(gfn), page_order);
+        else if ( iommu_use_hap_pt(d) && iommu_old_flags )
+            amd_iommu_flush_pages(p2m->domain, gfn, page_order);
     }
 
     /*
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index f1df1debc7..3fa559da01 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -718,24 +718,8 @@ p2m_remove_page(struct p2m_domain *p2m, unsigned long 
gfn_l, unsigned long mfn,
     p2m_access_t a;
 
     if ( !paging_mode_translate(p2m->domain) )
-    {
-        int rc = 0;
-
-        if ( need_iommu_pt_sync(p2m->domain) )
-        {
-            dfn_t dfn = _dfn(mfn);
-
-            for ( i = 0; i < (1 << page_order); i++ )
-            {
-                int ret = iommu_unmap_page(p2m->domain, dfn_add(dfn, i));
-
-                if ( !rc )
-                    rc = ret;
-            }
-        }
-
-        return rc;
-    }
+        return need_iommu_pt_sync(p2m->domain) ?
+            iommu_unmap_page(p2m->domain, _dfn(mfn), page_order) : 0;
 
     ASSERT(gfn_locked_by_me(p2m, gfn));
     P2M_DEBUG("removing gfn=%#lx mfn=%#lx\n", gfn_l, mfn);
@@ -781,28 +765,9 @@ guest_physmap_add_entry(struct domain *d, gfn_t gfn, mfn_t 
mfn,
     int rc = 0;
 
     if ( !paging_mode_translate(d) )
-    {
-        if ( need_iommu_pt_sync(d) && t == p2m_ram_rw )
-        {
-            dfn_t dfn = _dfn(mfn_x(mfn));
-
-            for ( i = 0; i < (1 << page_order); i++ )
-            {
-                rc = iommu_map_page(d, dfn_add(dfn, i), mfn_add(mfn, i),
-                                    IOMMUF_readable|IOMMUF_writable);
-                if ( rc != 0 )
-                {
-                    while ( i-- > 0 )
-                        /* If statement to satisfy __must_check. */
-                        if ( iommu_unmap_page(d, dfn_add(dfn, i)) )
-                            continue;
-
-                    return rc;
-                }
-            }
-        }
-        return 0;
-    }
+        return (need_iommu_pt_sync(d) && t == p2m_ram_rw) ?
+            iommu_map_page(d, _dfn(mfn_x(mfn)), mfn, PAGE_ORDER_4K,
+                           IOMMUF_readable | IOMMUF_writable) : 0;
 
     /* foreign pages are added thru p2m_add_foreign */
     if ( p2m_is_foreign(t) )
@@ -1173,7 +1138,7 @@ int set_identity_p2m_entry(struct domain *d, unsigned 
long gfn_l,
     {
         if ( !need_iommu_pt_sync(d) )
             return 0;
-        return iommu_map_page(d, _dfn(gfn_l), _mfn(gfn_l),
+        return iommu_map_page(d, _dfn(gfn_l), _mfn(gfn_l), PAGE_ORDER_4K,
                               IOMMUF_readable | IOMMUF_writable);
     }
 
@@ -1264,7 +1229,7 @@ int clear_identity_p2m_entry(struct domain *d, unsigned 
long gfn_l)
     {
         if ( !need_iommu_pt_sync(d) )
             return 0;
-        return iommu_unmap_page(d, _dfn(gfn_l));
+        return iommu_unmap_page(d, _dfn(gfn_l), PAGE_ORDER_4K);
     }
 
     gfn_lock(p2m, gfn, 0);
diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
index 543ea030e3..c0ce5673ba 100644
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -1437,13 +1437,15 @@ int memory_add(unsigned long spfn, unsigned long epfn, 
unsigned int pxm)
     {
         for ( i = spfn; i < epfn; i++ )
             if ( iommu_map_page(hardware_domain, _dfn(i), _mfn(i),
+                                PAGE_ORDER_4K,
                                 IOMMUF_readable | IOMMUF_writable) )
                 break;
         if ( i != epfn )
         {
             while (i-- > old_max)
                 /* If statement to satisfy __must_check. */
-                if ( iommu_unmap_page(hardware_domain, _dfn(i)) )
+                if ( iommu_unmap_page(hardware_domain, _dfn(i),
+                                      PAGE_ORDER_4K) )
                     continue;
 
             goto destroy_m2p;
diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
index 878e668bf5..971c6b100c 100644
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -1142,12 +1142,14 @@ map_grant_ref(
         {
             if ( !(kind & MAPKIND_WRITE) )
                 err = iommu_map_page(ld, _dfn(mfn_x(mfn)), mfn,
+                                     PAGE_ORDER_4K,
                                      IOMMUF_readable | IOMMUF_writable);
         }
         else if ( act_pin && !old_pin )
         {
             if ( !kind )
                 err = iommu_map_page(ld, _dfn(mfn_x(mfn)), mfn,
+                                     PAGE_ORDER_4K,
                                      IOMMUF_readable);
         }
         if ( err )
@@ -1396,10 +1398,11 @@ unmap_common(
 
         kind = mapkind(lgt, rd, op->mfn);
         if ( !kind )
-            err = iommu_unmap_page(ld, _dfn(mfn_x(op->mfn)));
+            err = iommu_unmap_page(ld, _dfn(mfn_x(op->mfn)),
+                                   PAGE_ORDER_4K);
         else if ( !(kind & MAPKIND_WRITE) )
             err = iommu_map_page(ld, _dfn(mfn_x(op->mfn)), op->mfn,
-                                 IOMMUF_readable);
+                                 PAGE_ORDER_4K, IOMMUF_readable);
 
         double_gt_unlock(lgt, rgt);
 
diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c
index 8b438ae4bc..40db9e7849 100644
--- a/xen/drivers/passthrough/iommu.c
+++ b/xen/drivers/passthrough/iommu.c
@@ -305,50 +305,71 @@ void iommu_domain_destroy(struct domain *d)
 }
 
 int iommu_map_page(struct domain *d, dfn_t dfn, mfn_t mfn,
-                   unsigned int flags)
+                   unsigned int page_order, unsigned int flags)
 {
     const struct domain_iommu *hd = dom_iommu(d);
-    int rc;
+    unsigned long i;
 
     if ( !iommu_enabled || !hd->platform_ops )
         return 0;
 
-    rc = hd->platform_ops->map_page(d, dfn, mfn, flags);
-    if ( unlikely(rc) )
+    for ( i = 0; i < (1ul << page_order); i++ )
     {
-        if ( !d->is_shutting_down && printk_ratelimit() )
-            printk(XENLOG_ERR
-                   "d%d: IOMMU mapping dfn %"PRI_dfn" to mfn %"PRI_mfn" 
failed: %d\n",
-                   d->domain_id, dfn_x(dfn), mfn_x(mfn), rc);
+        int ignored, rc = hd->platform_ops->map_page(d, dfn_add(dfn, i),
+                                                     mfn_add(mfn, i),
+                                                     flags);
 
-        if ( !is_hardware_domain(d) )
-            domain_crash(d);
+        if ( unlikely(rc) )
+        {
+            while (i--)
+            {
+                /* assign to something to avoid compiler warning */
+                ignored = hd->platform_ops->unmap_page(d, dfn_add(dfn, i));
+            }
+
+            if ( !d->is_shutting_down && printk_ratelimit() )
+                printk(XENLOG_ERR
+                       "d%d: IOMMU order %u mapping dfn %"PRI_dfn" to mfn 
%"PRI_mfn" failed: %d\n",
+                       d->domain_id, page_order, dfn_x(dfn), mfn_x(mfn),
+                       rc);
+
+            if ( !is_hardware_domain(d) )
+                domain_crash(d);
+
+            return rc;
+        }
     }
 
-    return rc;
+    return 0;
 }
 
-int iommu_unmap_page(struct domain *d, dfn_t dfn)
+int iommu_unmap_page(struct domain *d, dfn_t dfn, unsigned int page_order)
 {
     const struct domain_iommu *hd = dom_iommu(d);
-    int rc;
+    unsigned long i;
 
     if ( !iommu_enabled || !hd->platform_ops )
         return 0;
 
-    rc = hd->platform_ops->unmap_page(d, dfn);
-    if ( unlikely(rc) )
+    for ( i = 0; i < (1ul << page_order); i++ )
     {
-        if ( !d->is_shutting_down && printk_ratelimit() )
-            printk(XENLOG_ERR
-                   "d%d: IOMMU unmapping dfn %"PRI_dfn" failed: %d\n",
-                   d->domain_id, dfn_x(dfn), rc);
+        int rc = hd->platform_ops->unmap_page(d, dfn_add(dfn, i));
 
-        if ( !is_hardware_domain(d) )
-            domain_crash(d);
+        if ( unlikely(rc) )
+        {
+            if ( !d->is_shutting_down && printk_ratelimit() )
+                printk(XENLOG_ERR
+                       "d%d: IOMMU unmapping dfn %"PRI_dfn" failed: %d\n",
+                       d->domain_id, dfn_x(dfn), rc);
+
+            if ( !is_hardware_domain(d) )
+                domain_crash(d);
+
+            return rc;
+        }
     }
 
-    return rc;
+    return 0;
 }
 
 int iommu_lookup_page(struct domain *d, dfn_t dfn, mfn_t *mfn,
diff --git a/xen/drivers/passthrough/x86/iommu.c 
b/xen/drivers/passthrough/x86/iommu.c
index b20bad17de..4fd3eb1dca 100644
--- a/xen/drivers/passthrough/x86/iommu.c
+++ b/xen/drivers/passthrough/x86/iommu.c
@@ -239,7 +239,7 @@ void __hwdom_init arch_iommu_hwdom_init(struct domain *d)
         if ( paging_mode_translate(d) )
             rc = set_identity_p2m_entry(d, pfn, p2m_access_rw, 0);
         else
-            rc = iommu_map_page(d, _dfn(pfn), _mfn(pfn),
+            rc = iommu_map_page(d, _dfn(pfn), _mfn(pfn), PAGE_ORDER_4K,
                                 IOMMUF_readable | IOMMUF_writable);
         if ( rc )
             printk(XENLOG_WARNING " d%d: IOMMU mapping failed: %d\n",
diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h
index c75333c077..a003d82204 100644
--- a/xen/include/xen/iommu.h
+++ b/xen/include/xen/iommu.h
@@ -89,9 +89,11 @@ void iommu_teardown(struct domain *d);
 #define IOMMUF_readable  (1u<<_IOMMUF_readable)
 #define _IOMMUF_writable 1
 #define IOMMUF_writable  (1u<<_IOMMUF_writable)
-int __must_check iommu_map_page(struct domain *d, dfn_t dfn,
-                                mfn_t mfn, unsigned int flags);
-int __must_check iommu_unmap_page(struct domain *d, dfn_t dfn);
+int __must_check iommu_map_page(struct domain *d, dfn_t dfn, mfn_t mfn,
+                                unsigned int page_order,
+                                unsigned int flags);
+int __must_check iommu_unmap_page(struct domain *d, dfn_t dfn,
+                                  unsigned int page_order);
 int __must_check iommu_lookup_page(struct domain *d, dfn_t dfn, mfn_t *mfn,
                                    unsigned int *flags);
 
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.