[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 4/5] x86/mm: build map_domain_gfn() just once



It doesn't depend on GUEST_PAGING_LEVELS. Moving the function to p2m.c
at once allows a bogus #define/#include pair to be removed from
hap/nested_ept.c.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---
v2: To ensure no dependency on GUEST_PAGING_LEVELS, move the function
    to p2m.c.

--- a/xen/arch/x86/mm/guest_walk.c
+++ b/xen/arch/x86/mm/guest_walk.c
@@ -88,46 +88,6 @@ static uint32_t set_ad_bits(void *guest_
     return 0;
 }
 
-/* If the map is non-NULL, we leave this function having 
- * acquired an extra ref on mfn_to_page(*mfn) */
-void *map_domain_gfn(struct p2m_domain *p2m, gfn_t gfn, mfn_t *mfn,
-                     p2m_type_t *p2mt, p2m_query_t q, uint32_t *rc)
-{
-    struct page_info *page;
-    void *map;
-
-    /* Translate the gfn, unsharing if shared */
-    page = get_page_from_gfn_p2m(p2m->domain, p2m, gfn_x(gfn), p2mt, NULL,
-                                 q);
-    if ( p2m_is_paging(*p2mt) )
-    {
-        ASSERT(p2m_is_hostp2m(p2m));
-        if ( page )
-            put_page(page);
-        p2m_mem_paging_populate(p2m->domain, gfn_x(gfn));
-        *rc = _PAGE_PAGED;
-        return NULL;
-    }
-    if ( p2m_is_shared(*p2mt) )
-    {
-        if ( page )
-            put_page(page);
-        *rc = _PAGE_SHARED;
-        return NULL;
-    }
-    if ( !page )
-    {
-        *rc |= _PAGE_PRESENT;
-        return NULL;
-    }
-    *mfn = _mfn(page_to_mfn(page));
-    ASSERT(mfn_valid(mfn_x(*mfn)));
-
-    map = map_domain_page(*mfn);
-    return map;
-}
-
-
 /* Walk the guest pagetables, after the manner of a hardware walker. */
 /* Because the walk is essentially random, it can cause a deadlock 
  * warning in the p2m locking code. Highly unlikely this is an actual
--- a/xen/arch/x86/mm/hap/nested_ept.c
+++ b/xen/arch/x86/mm/hap/nested_ept.c
@@ -34,10 +34,6 @@
 #include <asm/hvm/vmx/vmx.h>
 #include <asm/hvm/vmx/vvmx.h>
 
-/* EPT always use 4-level paging structure */
-#define GUEST_PAGING_LEVELS 4
-#include <asm/guest_pt.h>
-
 /* Must reserved bits in all level entries  */
 #define EPT_MUST_RSV_BITS (((1ull << PADDR_BITS) - 1) & \
                            ~((1ull << paddr_bits) - 1))
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -2053,6 +2053,44 @@ unsigned long paging_gva_to_gfn(struct v
     return hostmode->gva_to_gfn(v, hostp2m, va, pfec);
 }
 
+/*
+ * If the map is non-NULL, we leave this function having
+ * acquired an extra ref on mfn_to_page(*mfn).
+ */
+void *map_domain_gfn(struct p2m_domain *p2m, gfn_t gfn, mfn_t *mfn,
+                     p2m_type_t *p2mt, p2m_query_t q, uint32_t *rc)
+{
+    struct page_info *page;
+
+    /* Translate the gfn, unsharing if shared. */
+    page = get_page_from_gfn_p2m(p2m->domain, p2m, gfn_x(gfn), p2mt, NULL, q);
+    if ( p2m_is_paging(*p2mt) )
+    {
+        ASSERT(p2m_is_hostp2m(p2m));
+        if ( page )
+            put_page(page);
+        p2m_mem_paging_populate(p2m->domain, gfn_x(gfn));
+        *rc = _PAGE_PAGED;
+        return NULL;
+    }
+    if ( p2m_is_shared(*p2mt) )
+    {
+        if ( page )
+            put_page(page);
+        *rc = _PAGE_SHARED;
+        return NULL;
+    }
+    if ( !page )
+    {
+        *rc |= _PAGE_PRESENT;
+        return NULL;
+    }
+    *mfn = page_to_mfn(page);
+    ASSERT(mfn_valid(*mfn));
+
+    return map_domain_page(*mfn);
+}
+
 int map_mmio_regions(struct domain *d,
                      unsigned long start_gfn,
                      unsigned long nr,
--- a/xen/include/asm-x86/guest_pt.h
+++ b/xen/include/asm-x86/guest_pt.h
@@ -305,10 +305,6 @@ guest_walk_to_page_order(walk_t *gw)
 #define GPT_RENAME2(_n, _l) _n ## _ ## _l ## _levels
 #define GPT_RENAME(_n, _l) GPT_RENAME2(_n, _l)
 #define guest_walk_tables GPT_RENAME(guest_walk_tables, GUEST_PAGING_LEVELS)
-#define map_domain_gfn GPT_RENAME(map_domain_gfn, GUEST_PAGING_LEVELS)
-
-void *map_domain_gfn(struct p2m_domain *p2m, gfn_t gfn, mfn_t *mfn,
-                     p2m_type_t *p2mt, p2m_query_t q, uint32_t *rc);
 
 extern uint32_t 
 guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m, unsigned long va,
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -680,6 +680,9 @@ int p2m_set_entry(struct p2m_domain *p2m
 /* Set up function pointers for PT implementation: only for use by p2m code */
 extern void p2m_pt_init(struct p2m_domain *p2m);
 
+void *map_domain_gfn(struct p2m_domain *p2m, gfn_t gfn, mfn_t *mfn,
+                     p2m_type_t *p2mt, p2m_query_t q, uint32_t *rc);
+
 /* Debugging and auditing of the P2M code? */
 #ifndef NDEBUG
 #define P2M_AUDIT     1


Attachment: x86-map_domain_gfn-once.patch
Description: Text document

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.