[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 1/3] xen/domain_page: Convert map_domain_page_global() to using mfn_t



From: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>

The sh_map/unmap wrappers can be dropped, and take the opportunity to turn
some #define's into static inlines, for added type saftey.

As part of adding the type safety, GCC highlights an problematic include cycle
with arm/mm.h including domain_page.h which includes xen/mm.h and falls over
__page_to_mfn being used before being declared.  Simply dropping the inclusion
of domain_page.h fixes the compilation issue.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Tim Deegan <tim@xxxxxxx>
CC: Ian Campbell <ian.campbell@xxxxxxxxxx>
CC: Stefano Stabellini <stefano.stabellini@xxxxxxxxxx>
---
 xen/arch/arm/mm.c                |  6 ++----
 xen/arch/x86/domain_page.c       |  9 ++++-----
 xen/arch/x86/mm/shadow/multi.c   | 10 +++++-----
 xen/arch/x86/mm/shadow/private.h | 12 ------------
 xen/include/asm-arm/mm.h         |  1 -
 xen/include/xen/domain_page.h    | 22 +++++++++++++++++-----
 6 files changed, 28 insertions(+), 32 deletions(-)

diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index ff1b330..d479048 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -271,11 +271,9 @@ void clear_fixmap(unsigned map)
 }
 
 #ifdef CONFIG_DOMAIN_PAGE
-void *map_domain_page_global(unsigned long mfn)
+void *map_domain_page_global(mfn_t mfn)
 {
-    mfn_t m = _mfn(mfn);
-
-    return vmap(&m, 1);
+    return vmap(&mfn, 1);
 }
 
 void unmap_domain_page_global(const void *va)
diff --git a/xen/arch/x86/domain_page.c b/xen/arch/x86/domain_page.c
index d684b2f..0f7548b 100644
--- a/xen/arch/x86/domain_page.c
+++ b/xen/arch/x86/domain_page.c
@@ -302,17 +302,16 @@ int mapcache_vcpu_init(struct vcpu *v)
     return 0;
 }
 
-void *map_domain_page_global(unsigned long mfn)
+void *map_domain_page_global(mfn_t mfn)
 {
-    mfn_t m = _mfn(mfn);
     ASSERT(!in_irq() && local_irq_is_enabled());
 
 #ifdef NDEBUG
-    if ( mfn <= PFN_DOWN(__pa(HYPERVISOR_VIRT_END - 1)) )
-        return mfn_to_virt(mfn);
+    if ( mfn_x(mfn) <= PFN_DOWN(__pa(HYPERVISOR_VIRT_END - 1)) )
+        return mfn_to_virt(mfn_x(mfn));
 #endif
 
-    return vmap(&m, 1);
+    return vmap(&mfn, 1);
 }
 
 void unmap_domain_page_global(const void *ptr)
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index 42204d9..54d0bd3 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -3806,7 +3806,7 @@ sh_detach_old_tables(struct vcpu *v)
     if ( v->arch.paging.shadow.guest_vtable )
     {
         if ( shadow_mode_external(d) || shadow_mode_translate(d) )
-            sh_unmap_domain_page_global(v->arch.paging.shadow.guest_vtable);
+            unmap_domain_page_global(v->arch.paging.shadow.guest_vtable);
         v->arch.paging.shadow.guest_vtable = NULL;
     }
 #endif // !NDEBUG
@@ -3977,8 +3977,8 @@ sh_update_cr3(struct vcpu *v, int do_locking)
     if ( shadow_mode_external(d) || shadow_mode_translate(d) )
     {
         if ( v->arch.paging.shadow.guest_vtable )
-            sh_unmap_domain_page_global(v->arch.paging.shadow.guest_vtable);
-        v->arch.paging.shadow.guest_vtable = sh_map_domain_page_global(gmfn);
+            unmap_domain_page_global(v->arch.paging.shadow.guest_vtable);
+        v->arch.paging.shadow.guest_vtable = map_domain_page_global(gmfn);
         /* PAGING_LEVELS==4 implies 64-bit, which means that
          * map_domain_page_global can't fail */
         BUG_ON(v->arch.paging.shadow.guest_vtable == NULL);
@@ -4010,8 +4010,8 @@ sh_update_cr3(struct vcpu *v, int do_locking)
     if ( shadow_mode_external(d) || shadow_mode_translate(d) )
     {
         if ( v->arch.paging.shadow.guest_vtable )
-            sh_unmap_domain_page_global(v->arch.paging.shadow.guest_vtable);
-        v->arch.paging.shadow.guest_vtable = sh_map_domain_page_global(gmfn);
+            unmap_domain_page_global(v->arch.paging.shadow.guest_vtable);
+        v->arch.paging.shadow.guest_vtable = map_domain_page_global(gmfn);
         /* Does this really need map_domain_page_global?  Handle the
          * error properly if so. */
         BUG_ON(v->arch.paging.shadow.guest_vtable == NULL); /* XXX */
diff --git a/xen/arch/x86/mm/shadow/private.h b/xen/arch/x86/mm/shadow/private.h
index f72ea9f..eff39dc 100644
--- a/xen/arch/x86/mm/shadow/private.h
+++ b/xen/arch/x86/mm/shadow/private.h
@@ -517,18 +517,6 @@ sh_unmap_domain_page(void *p)
     unmap_domain_page(p);
 }
 
-static inline void *
-sh_map_domain_page_global(mfn_t mfn)
-{
-    return map_domain_page_global(mfn_x(mfn));
-}
-
-static inline void
-sh_unmap_domain_page_global(void *p)
-{
-    unmap_domain_page_global(p);
-}
-
 /**************************************************************************/
 /* Shadow-page refcounting. */
 
diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h
index 3601140..2e1f21a 100644
--- a/xen/include/asm-arm/mm.h
+++ b/xen/include/asm-arm/mm.h
@@ -5,7 +5,6 @@
 #include <xen/kernel.h>
 #include <asm/page.h>
 #include <public/xen.h>
-#include <xen/domain_page.h>
 #include <xen/pdx.h>
 
 /* Align Xen to a 2 MiB boundary. */
diff --git a/xen/include/xen/domain_page.h b/xen/include/xen/domain_page.h
index b7a710b..1aac0eb 100644
--- a/xen/include/xen/domain_page.h
+++ b/xen/include/xen/domain_page.h
@@ -41,11 +41,15 @@ unsigned long domain_page_map_to_mfn(const void *va);
  * address spaces (not just within the VCPU that created the mapping). Global
  * mappings can also be unmapped from any context.
  */
-void *map_domain_page_global(unsigned long mfn);
+void *map_domain_page_global(mfn_t mfn);
 void unmap_domain_page_global(const void *va);
 
 #define __map_domain_page(pg)        map_domain_page(__page_to_mfn(pg))
-#define __map_domain_page_global(pg) map_domain_page_global(__page_to_mfn(pg))
+
+static inline void *__map_domain_page_global(struct page_info *pg)
+{
+    return map_domain_page_global(_mfn(__page_to_mfn(pg)));
+}
 
 #define DMCACHE_ENTRY_VALID 1U
 #define DMCACHE_ENTRY_HELD  2U
@@ -117,9 +121,17 @@ domain_mmap_cache_destroy(struct domain_mmap_cache *cache)
                                                       mfn_to_virt(smfn))
 #define domain_page_map_to_mfn(va)          virt_to_mfn((unsigned long)(va))
 
-#define map_domain_page_global(mfn)         mfn_to_virt(mfn)
-#define __map_domain_page_global(pg)        page_to_virt(pg)
-#define unmap_domain_page_global(va)        ((void)(va))
+static inline void *map_domain_page_global(mfn_t mfn)
+{
+    return mfn_to_virt(mfn_x(mfn));
+}
+
+static inline void *__map_domain_page_global(struct page_info *pg)
+{
+    return page_to_virt(pg);
+}
+
+static inline void unmap_domain_page_global(void *va) {};
 
 struct domain_mmap_cache { 
 };
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.