[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] common/domain_page: Drop domain_mmap_cache infrastructure



This infrastructure is used exclusively by the x86 do_mmu_update() hypercall.
Mapping and unmapping domain pages is probably not the slow part of that
function, but even with an opencoded caching implementation, Bloat-o-meter
reports:

  function                                     old     new   delta
  do_mmu_update                               6815    6573    -242

The !CONFIG_DOMAIN_PAGE stub code has a mismatch between mapping and
unmapping, which is a latent bug.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Stefano Stabellini <sstabellini@xxxxxxxxxx>
CC: Julien Grall <julien.grall@xxxxxxx>
---
 xen/arch/x86/mm.c             | 22 ++++++++------
 xen/include/xen/domain_page.h | 68 -------------------------------------------
 2 files changed, 13 insertions(+), 77 deletions(-)

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 97b3b4b..88bf4f6 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -3440,13 +3440,13 @@ long do_mmu_update(
     unsigned int foreigndom)
 {
     struct mmu_update req;
-    void *va;
+    void *va = NULL;
     unsigned long gpfn, gmfn, mfn;
     struct page_info *page;
     unsigned int cmd, i = 0, done = 0, pt_dom;
     struct vcpu *curr = current, *v = curr;
     struct domain *d = v->domain, *pt_owner = d, *pg_owner;
-    struct domain_mmap_cache mapcache;
+    mfn_t map_mfn = INVALID_MFN;
     uint32_t xsm_needed = 0;
     uint32_t xsm_checked = 0;
     int rc = put_old_guest_table(curr);
@@ -3503,8 +3503,6 @@ long do_mmu_update(
         goto out;
     }
 
-    domain_mmap_cache_init(&mapcache);
-
     for ( i = 0; i < count; i++ )
     {
         if ( curr->arch.old_guest_table || (i && hypercall_preempt_check()) )
@@ -3573,9 +3571,15 @@ long do_mmu_update(
             }
 
             mfn = page_to_mfn(page);
-            va = map_domain_page_with_cache(mfn, &mapcache);
-            va = (void *)((unsigned long)va +
-                          (unsigned long)(req.ptr & ~PAGE_MASK));
+
+            if ( !mfn_eq(_mfn(mfn), map_mfn) )
+            {
+                if ( va )
+                    unmap_domain_page(va);
+                va = map_domain_page(_mfn(mfn));
+                map_mfn = _mfn(mfn);
+            }
+            va = _p(((unsigned long)va & PAGE_MASK) + (req.ptr & ~PAGE_MASK));
 
             if ( page_lock(page) )
             {
@@ -3653,7 +3657,6 @@ long do_mmu_update(
                 put_page_type(page);
             }
 
-            unmap_domain_page_with_cache(va, &mapcache);
             put_page(page);
         }
         break;
@@ -3734,7 +3737,8 @@ long do_mmu_update(
 
     put_pg_owner(pg_owner);
 
-    domain_mmap_cache_destroy(&mapcache);
+    if ( va )
+        unmap_domain_page(va);
 
     perfc_add(num_page_updates, i);
 
diff --git a/xen/include/xen/domain_page.h b/xen/include/xen/domain_page.h
index c1d630c..93f2a5a 100644
--- a/xen/include/xen/domain_page.h
+++ b/xen/include/xen/domain_page.h
@@ -51,66 +51,6 @@ static inline void *__map_domain_page_global(const struct 
page_info *pg)
     return map_domain_page_global(_mfn(__page_to_mfn(pg)));
 }
 
-#define DMCACHE_ENTRY_VALID 1U
-#define DMCACHE_ENTRY_HELD  2U
-
-struct domain_mmap_cache {
-    unsigned long mfn;
-    void         *va;
-    unsigned int  flags;
-};
-
-static inline void
-domain_mmap_cache_init(struct domain_mmap_cache *cache)
-{
-    ASSERT(cache != NULL);
-    cache->flags = 0;
-    cache->mfn = 0;
-    cache->va = NULL;
-}
-
-static inline void *
-map_domain_page_with_cache(unsigned long mfn, struct domain_mmap_cache *cache)
-{
-    ASSERT(cache != NULL);
-    BUG_ON(cache->flags & DMCACHE_ENTRY_HELD);
-
-    if ( likely(cache->flags & DMCACHE_ENTRY_VALID) )
-    {
-        cache->flags |= DMCACHE_ENTRY_HELD;
-        if ( likely(mfn == cache->mfn) )
-            goto done;
-        unmap_domain_page(cache->va);
-    }
-
-    cache->mfn   = mfn;
-    cache->va    = map_domain_page(_mfn(mfn));
-    cache->flags = DMCACHE_ENTRY_HELD | DMCACHE_ENTRY_VALID;
-
- done:
-    return cache->va;
-}
-
-static inline void
-unmap_domain_page_with_cache(const void *va, struct domain_mmap_cache *cache)
-{
-    ASSERT(cache != NULL);
-    cache->flags &= ~DMCACHE_ENTRY_HELD;
-}
-
-static inline void
-domain_mmap_cache_destroy(struct domain_mmap_cache *cache)
-{
-    ASSERT(cache != NULL);
-    BUG_ON(cache->flags & DMCACHE_ENTRY_HELD);
-
-    if ( likely(cache->flags & DMCACHE_ENTRY_VALID) )
-    {
-        unmap_domain_page(cache->va);
-        cache->flags = 0;
-    }
-}
-
 #else /* !CONFIG_DOMAIN_PAGE */
 
 #define map_domain_page(mfn)                mfn_to_virt(mfn_x(mfn))
@@ -130,14 +70,6 @@ static inline void *__map_domain_page_global(const struct 
page_info *pg)
 
 static inline void unmap_domain_page_global(const void *va) {};
 
-struct domain_mmap_cache { 
-};
-
-#define domain_mmap_cache_init(c)           ((void)(c))
-#define map_domain_page_with_cache(mfn,c)   (map_domain_page(mfn))
-#define unmap_domain_page_with_cache(va,c)  ((void)(va))
-#define domain_mmap_cache_destroy(c)        ((void)(c))
-
 #endif /* !CONFIG_DOMAIN_PAGE */
 
 #endif /* __XEN_DOMAIN_PAGE_H__ */
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.